|
| 1 | +#!/usr/bin/env python |
| 2 | + |
| 3 | +from __future__ import print_function |
| 4 | + |
| 5 | +import argparse |
| 6 | +import json |
| 7 | + |
| 8 | +# This value was emirically determined on an iMac Pro with a fairly small sample size. |
| 9 | +# All measurements in seconds should be taken with extreme care |
| 10 | +INSTRUCTIONS_PER_SECOND = 6693088549.09358 |
| 11 | + |
| 12 | +# The length of the bars in the histograms |
| 13 | +HISTOGRAM_BAR_LENGTH = 40 |
| 14 | + |
| 15 | + |
| 16 | +def parse_args(): |
| 17 | + parser = argparse.ArgumentParser() |
| 18 | + parser.add_argument('request_durations_file') |
| 19 | + return parser.parse_args() |
| 20 | + |
| 21 | + |
| 22 | +def print_stats(entries): |
| 23 | + """ |
| 24 | + entries are dictionaries from the JSON file under the |
| 25 | + files.<file-name>.<request-name> path containing the following keys |
| 26 | + - average |
| 27 | + - logHistogram |
| 28 | + - requestsExecuted |
| 29 | + """ |
| 30 | + |
| 31 | + # Create a log histogram for all entries |
| 32 | + aggregated_completion_log_histogram = {} |
| 33 | + # Total number of instructions executed (adds up average * requestsExecuted) |
| 34 | + instruction_sum = 0 |
| 35 | + # Adds requestsExecuted for each entry |
| 36 | + total_requests_executed = 0 |
| 37 | + for entry in entries: |
| 38 | + average = int(entry['average']) |
| 39 | + requests_executed = int(entry['requestsExecuted']) |
| 40 | + instruction_sum += average * requests_executed |
| 41 | + total_requests_executed += requests_executed |
| 42 | + for (bucket, count) in entry['logHistogram'].iteritems(): |
| 43 | + bucket = int(bucket) |
| 44 | + count = int(count) |
| 45 | + if bucket not in aggregated_completion_log_histogram: |
| 46 | + aggregated_completion_log_histogram[bucket] = 0 |
| 47 | + aggregated_completion_log_histogram[bucket] += count |
| 48 | + |
| 49 | + average_instructions_per_request = instruction_sum / total_requests_executed |
| 50 | + seconds_estimate = average_instructions_per_request / INSTRUCTIONS_PER_SECOND |
| 51 | + print('Average: {:.0e} instr (ca. {:.3f}s)' |
| 52 | + .format(average_instructions_per_request, seconds_estimate)) |
| 53 | + |
| 54 | + print('\nHistogram:') |
| 55 | + |
| 56 | + min_bucket = min(aggregated_completion_log_histogram) |
| 57 | + max_bucket = max(aggregated_completion_log_histogram) |
| 58 | + max_count = max([x[1] for x in aggregated_completion_log_histogram.iteritems()]) |
| 59 | + |
| 60 | + # Sums up the percentages for each entry to make it easier to spot the |
| 61 | + # n-th percentile in the histogram |
| 62 | + running_percentage_sum = 0 |
| 63 | + for bucket in range(min_bucket, max_bucket + 1): |
| 64 | + count = aggregated_completion_log_histogram.get(bucket, 0) |
| 65 | + executed_instructions = pow(2, bucket) |
| 66 | + estimated_seconds = executed_instructions / INSTRUCTIONS_PER_SECOND |
| 67 | + bar_fill = (count * HISTOGRAM_BAR_LENGTH / max_count) |
| 68 | + bar = '*' * bar_fill + ' ' * (HISTOGRAM_BAR_LENGTH - bar_fill) |
| 69 | + percentage = float(count) / total_requests_executed * 100 |
| 70 | + running_percentage_sum += percentage |
| 71 | + print('< {:5.0e} instr (ca. {:6.3f}s): {} {:4.1f}% (< {:5.1f}%)' |
| 72 | + .format(executed_instructions, estimated_seconds, bar, percentage, |
| 73 | + running_percentage_sum)) |
| 74 | + |
| 75 | + |
| 76 | +def print_stats_for_all_files(request_durations, request_name): |
| 77 | + print('\n---------- All {} requests ----------\n'.format(request_name)) |
| 78 | + all_request_entries = [] |
| 79 | + for (file_name, file_stats) in request_durations['files'].iteritems(): |
| 80 | + all_request_entries.append(file_stats[request_name]) |
| 81 | + print_stats(all_request_entries) |
| 82 | + |
| 83 | + |
| 84 | +def print_slowest_files(request_durations, request_name, num_files): |
| 85 | + print('\n---------- {} slowest files for {} requests ----------\n' |
| 86 | + .format(num_files, request_name)) |
| 87 | + |
| 88 | + file_and_average = [] |
| 89 | + for (file_name, file_stats) in request_durations['files'].iteritems(): |
| 90 | + file_and_average.append((file_name, file_stats[request_name]['average'])) |
| 91 | + file_and_average = sorted(file_and_average, key=lambda x: x[1], reverse=True) |
| 92 | + |
| 93 | + for (file_name, average_request_duration) in file_and_average[0:num_files]: |
| 94 | + print('-- {} --\n'.format(file_name)) |
| 95 | + print_stats([request_durations['files'][file_name][request_name]]) |
| 96 | + print('\n') |
| 97 | + |
| 98 | + |
| 99 | +def print_seconds_disclaimer(): |
| 100 | + print('''IMPORTANT: |
| 101 | +All measurements printed by this script were done in number of number of |
| 102 | +instructions. Measurements in seconds are computed assuming that all machines |
| 103 | +execute the same number of instructions per second at all times, which is not |
| 104 | +correct. Thus all measurements in seconds should be taken with extreme care. |
| 105 | +As they scale linearly with the number of instructions, it is safe to compare |
| 106 | +values in seconds output by this script. |
| 107 | +''') |
| 108 | + |
| 109 | + |
| 110 | +def analyze_request_durations(file): |
| 111 | + print_seconds_disclaimer() |
| 112 | + print('\n---------- Raw durations for further analysis ----------\n') |
| 113 | + with open(file, 'r') as json_file: |
| 114 | + print(json_file.read()) |
| 115 | + |
| 116 | + with open(file, 'r') as json_file: |
| 117 | + request_durations = json.load(json_file, encoding='utf-8') |
| 118 | + print_stats_for_all_files(request_durations, 'CodeComplete') |
| 119 | + print_slowest_files(request_durations, 'CodeComplete', 10) |
| 120 | + |
| 121 | + |
| 122 | +def main(): |
| 123 | + args = parse_args() |
| 124 | + analyze_request_durations(args.request_durations_file) |
| 125 | + |
| 126 | + |
| 127 | +if __name__ == '__main__': |
| 128 | + main() |
0 commit comments