|
| 1 | +# SPDX-License-Identifier: GPL-2.0 |
| 2 | +# Carsten Haitzler <[email protected]>, 2021 |
| 3 | + |
| 4 | +# This is sourced from a driver script so no need for #!/bin... etc. at the |
| 5 | +# top - the assumption below is that it runs as part of sourcing after the |
| 6 | +# test sets up some basic env vars to say what it is. |
| 7 | + |
| 8 | +# This currently works with ETMv4 / ETF not any other packet types at thi |
| 9 | +# point. This will need changes if that changes. |
| 10 | + |
| 11 | +# perf record options for the perf tests to use |
| 12 | +PERFRECMEM="-m ,16M" |
| 13 | +PERFRECOPT="$PERFRECMEM -e cs_etm//u" |
| 14 | + |
| 15 | +TOOLS=$(dirname $0) |
| 16 | +DIR="$TOOLS/$TEST" |
| 17 | +BIN="$DIR/$TEST" |
| 18 | +# If the test tool/binary does not exist and is executable then skip the test |
| 19 | +if ! test -x "$BIN"; then exit 2; fi |
| 20 | +DATD="." |
| 21 | +# If the data dir env is set then make the data dir use that instead of ./ |
| 22 | +if test -n "$PERF_TEST_CORESIGHT_DATADIR"; then |
| 23 | + DATD="$PERF_TEST_CORESIGHT_DATADIR"; |
| 24 | +fi |
| 25 | +# If the stat dir env is set then make the data dir use that instead of ./ |
| 26 | +STATD="." |
| 27 | +if test -n "$PERF_TEST_CORESIGHT_STATDIR"; then |
| 28 | + STATD="$PERF_TEST_CORESIGHT_STATDIR"; |
| 29 | +fi |
| 30 | + |
| 31 | +# Called if the test fails - error code 1 |
| 32 | +err() { |
| 33 | + echo "$1" |
| 34 | + exit 1 |
| 35 | +} |
| 36 | + |
| 37 | +# Check that some statistics from our perf |
| 38 | +check_val_min() { |
| 39 | + STATF="$4" |
| 40 | + if test "$2" -lt "$3"; then |
| 41 | + echo ", FAILED" >> "$STATF" |
| 42 | + err "Sanity check number of $1 is too low ($2 < $3)" |
| 43 | + fi |
| 44 | +} |
| 45 | + |
| 46 | +perf_dump_aux_verify() { |
| 47 | + # Some basic checking that the AUX chunk contains some sensible data |
| 48 | + # to see that we are recording something and at least a minimum |
| 49 | + # amount of it. We should almost always see Fn packets in just about |
| 50 | + # anything but certainly we will see some trace info and async |
| 51 | + # packets |
| 52 | + DUMP="$DATD/perf-tmp-aux-dump.txt" |
| 53 | + perf report --stdio --dump -i "$1" | \ |
| 54 | + grep -o -e I_ATOM_F -e I_ASYNC -e I_TRACE_INFO > "$DUMP" |
| 55 | + # Simply count how many of these packets we find to see that we are |
| 56 | + # producing a reasonable amount of data - exact checks are not sane |
| 57 | + # as this is a lossy process where we may lose some blocks and the |
| 58 | + # compiler may produce different code depending on the compiler and |
| 59 | + # optimization options, so this is rough just to see if we're |
| 60 | + # either missing almost all the data or all of it |
| 61 | + ATOM_FX_NUM=`grep I_ATOM_F "$DUMP" | wc -l` |
| 62 | + ASYNC_NUM=`grep I_ASYNC "$DUMP" | wc -l` |
| 63 | + TRACE_INFO_NUM=`grep I_TRACE_INFO "$DUMP" | wc -l` |
| 64 | + rm -f "$DUMP" |
| 65 | + |
| 66 | + # Arguments provide minimums for a pass |
| 67 | + CHECK_FX_MIN="$2" |
| 68 | + CHECK_ASYNC_MIN="$3" |
| 69 | + CHECK_TRACE_INFO_MIN="$4" |
| 70 | + |
| 71 | + # Write out statistics, so over time you can track results to see if |
| 72 | + # there is a pattern - for example we have less "noisy" results that |
| 73 | + # produce more consistent amounts of data each run, to see if over |
| 74 | + # time any techinques to minimize data loss are having an effect or |
| 75 | + # not |
| 76 | + STATF="$STATD/stats-$TEST-$DATV.csv" |
| 77 | + if ! test -f "$STATF"; then |
| 78 | + echo "ATOM Fx Count, Minimum, ASYNC Count, Minimum, TRACE INFO Count, Minimum" > "$STATF" |
| 79 | + fi |
| 80 | + echo -n "$ATOM_FX_NUM, $CHECK_FX_MIN, $ASYNC_NUM, $CHECK_ASYNC_MIN, $TRACE_INFO_NUM, $CHECK_TRACE_INFO_MIN" >> "$STATF" |
| 81 | + |
| 82 | + # Actually check to see if we passed or failed. |
| 83 | + check_val_min "ATOM_FX" "$ATOM_FX_NUM" "$CHECK_FX_MIN" "$STATF" |
| 84 | + check_val_min "ASYNC" "$ASYNC_NUM" "$CHECK_ASYNC_MIN" "$STATF" |
| 85 | + check_val_min "TRACE_INFO" "$TRACE_INFO_NUM" "$CHECK_TRACE_INFO_MIN" "$STATF" |
| 86 | + echo ", Ok" >> "$STATF" |
| 87 | +} |
| 88 | + |
| 89 | +perf_dump_aux_tid_verify() { |
| 90 | + # Specifically crafted test will produce a list of Tread ID's to |
| 91 | + # stdout that need to be checked to see that they have had trace |
| 92 | + # info collected in AUX blocks in the perf data. This will go |
| 93 | + # through all the TID's that are listed as CID=0xabcdef and see |
| 94 | + # that all the Thread IDs the test tool reports are in the perf |
| 95 | + # data AUX chunks |
| 96 | + |
| 97 | + # The TID test tools will print a TID per stdout line that are being |
| 98 | + # tested |
| 99 | + TIDS=`cat "$2"` |
| 100 | + # Scan the perf report to find the TIDs that are actually CID in hex |
| 101 | + # and build a list of the ones found |
| 102 | + FOUND_TIDS=`perf report --stdio --dump -i "$1" | \ |
| 103 | + grep -o "CID=0x[0-9a-z]\+" | sed 's/CID=//g' | \ |
| 104 | + uniq | sort | uniq` |
| 105 | + # No CID=xxx found - maybe your kernel is reporting these as |
| 106 | + # VMID=xxx so look there |
| 107 | + if test -z "$FOUND_TIDS"; then |
| 108 | + FOUND_TIDS=`perf report --stdio --dump -i "$1" | \ |
| 109 | + grep -o "VMID=0x[0-9a-z]\+" | sed 's/VMID=//g' | \ |
| 110 | + uniq | sort | uniq` |
| 111 | + fi |
| 112 | + |
| 113 | + # Iterate over the list of TIDs that the test says it has and find |
| 114 | + # them in the TIDs found in the perf report |
| 115 | + MISSING="" |
| 116 | + for TID2 in $TIDS; do |
| 117 | + FOUND="" |
| 118 | + for TIDHEX in $FOUND_TIDS; do |
| 119 | + TID=`printf "%i" $TIDHEX` |
| 120 | + if test "$TID" -eq "$TID2"; then |
| 121 | + FOUND="y" |
| 122 | + break |
| 123 | + fi |
| 124 | + done |
| 125 | + if test -z "$FOUND"; then |
| 126 | + MISSING="$MISSING $TID" |
| 127 | + fi |
| 128 | + done |
| 129 | + if test -n "$MISSING"; then |
| 130 | + err "Thread IDs $MISSING not found in perf AUX data" |
| 131 | + fi |
| 132 | +} |
0 commit comments