Skip to content

[Python] Fix "indentation is not a multiple of four" #1290

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Feb 13, 2016
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
24 changes: 12 additions & 12 deletions test/Driver/Dependencies/Inputs/modify-non-primary-files.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,19 +24,19 @@
assert sys.argv[1] == '-frontend'

if '-primary-file' in sys.argv:
primaryFileIndex = sys.argv.index('-primary-file') + 1
primaryFile = sys.argv[primaryFileIndex]
primaryFileIndex = sys.argv.index('-primary-file') + 1
primaryFile = sys.argv[primaryFileIndex]

# Modify all files after the primary file.
# Ideally this would modify every non-primary file, but that's harder to
# infer without actually parsing the arguments.
for file in sys.argv[primaryFileIndex + 1:]:
if file.startswith('-'):
break
os.utime(file, None)
# Modify all files after the primary file.
# Ideally this would modify every non-primary file, but that's harder to
# infer without actually parsing the arguments.
for file in sys.argv[primaryFileIndex + 1:]:
if file.startswith('-'):
break
os.utime(file, None)

else:
primaryFile = None
primaryFile = None

outputFile = sys.argv[sys.argv.index('-o') + 1]

Expand All @@ -46,6 +46,6 @@
os.utime(outputFile, None)

if primaryFile:
print("Handled", os.path.basename(primaryFile))
print("Handled", os.path.basename(primaryFile))
else:
print("Produced", os.path.basename(outputFile))
print("Produced", os.path.basename(outputFile))
15 changes: 8 additions & 7 deletions test/Driver/Dependencies/Inputs/update-dependencies.py
Original file line number Diff line number Diff line change
Expand Up @@ -36,13 +36,14 @@
assert sys.argv[1] == '-frontend'

if '-primary-file' in sys.argv:
primaryFile = sys.argv[sys.argv.index('-primary-file') + 1]
depsFile = sys.argv[sys.argv.index('-emit-reference-dependencies-path') + 1]
primaryFile = sys.argv[sys.argv.index('-primary-file') + 1]
depsFile = sys.argv[sys.argv.index(
'-emit-reference-dependencies-path') + 1]

# Replace the dependencies file with the input file.
shutil.copyfile(primaryFile, depsFile)
# Replace the dependencies file with the input file.
shutil.copyfile(primaryFile, depsFile)
else:
primaryFile = None
primaryFile = None

outputFile = sys.argv[sys.argv.index('-o') + 1]

Expand All @@ -52,6 +53,6 @@
os.utime(outputFile, None)

if primaryFile:
print("Handled", os.path.basename(primaryFile))
print("Handled", os.path.basename(primaryFile))
else:
print("Produced", os.path.basename(outputFile))
print("Produced", os.path.basename(outputFile))
235 changes: 120 additions & 115 deletions utils/submit-benchmark-results
100755 → 100644
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,7 @@ XFAIL = 2

###


def capture_with_result(args, include_stderr=False):
"""capture_with_result(command) -> (output, exit code)

Expand All @@ -40,16 +41,19 @@ def capture_with_result(args, include_stderr=False):
out, _ = p.communicate()
return out, p.wait()


def capture(args, include_stderr=False):
"""capture(command) - Run the given command (or argv list) in a shell and
return the standard output."""
return capture_with_result(args, include_stderr)[0]


def timestamp():
return datetime.datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S')

###


def submit_results_to_server(results_data, submit_url):
# Submit the URL encoded data.
data = urllib.urlencode({'input_data': results_data,
Expand All @@ -73,121 +77,122 @@ def submit_results_to_server(results_data, submit_url):

###


def main():
parser = optparse.OptionParser("""%prog [options] <results>""")
parser.add_option("", "--submit", dest="submit_url", metavar="URL",
help="Submit results to the given URL",
action="store", type=str, default=None)
parser.add_option("", "--output", dest="output", metavar="PATH",
help="Write raw report data to PATH",
action="store", type=str, default=None)
parser.add_option("", "--machine-name", dest="machine_name", metavar="NAME",
help="Set the machine name to embed in the report",
action="store", type=str, default=None)
parser.add_option("", "--run-order", dest="run_order", metavar="REVISION",
help="Set the run order to embed in the report",
action="store", type=int, default=None)
opts, args = parser.parse_args()

# At least one of --submit or --output is required.
if len(args) != 1:
parser.error("incorrect number of arguments")
if opts.submit_url is None and opts.output is None:
parser.error("no action given (provide --submit or --output)")
if opts.machine_name is None:
parser.error("--machine-name is required")
if opts.run_order is None:
parser.error("--run-order is required")

# Load the results data.
results_path, = args
with open(results_path) as f:
data = json.load(f)

# Compute some data not present in the 'lit' report.
machine_name = opts.machine_name
run_order = str(opts.run_order)

# Estimate the end time as being now, and the start time as being that minus
# the elapsed testing time.
utcnow = datetime.datetime.utcnow()
start_time = utcnow - datetime.timedelta(seconds=data['elapsed'])
end_time = utcnow

# Create the LNT report format.
lnt_results = {}
lnt_results['Machine'] = {
'Name': machine_name,
'Info': {
'hardware': capture(["uname", "-m"], include_stderr=True).strip(),
'name': capture(["uname", "-n"], include_stderr=True).strip(),
'os': capture(["uname", "-sr"], include_stderr=True).strip(),
'uname': capture(["uname", "-a"], include_stderr=True).strip(),
}
}

# FIXME: Record source versions for LLVM, Swift, etc.?
lnt_results['Run'] = {
'Start Time': start_time.strftime('%Y-%m-%d %H:%M:%S'),
'End Time': end_time.strftime('%Y-%m-%d %H:%M:%S'),
'Info': {
'__report_version__': '1',
'tag': 'nts',
'inferred_run_order': run_order,
'run_order': run_order,
'sw_vers': capture(['sw_vers'], include_stderr=True).strip(),
}
}

lnt_results['Tests'] = lnt_tests = []
for test in data['tests']:
# Ignore tests which have unexpected status.
code = test['code']
if code not in ('PASS', 'XPASS', 'FAIL', 'XFAIL'):
sys.stderr.write("ignoring test %r with result code %r" % (
test['name'], code))
continue

# Extract the test name, which is encoded as 'suite :: name'.
test_name = 'nts.%s' % (test['name'].split('::', 1)[1][1:],)

# Convert this test to the 'nts' schema.
compile_success = test['metrics'].get('compile_success', 1)
compile_time = test['metrics']['compile_time']
exec_success = test['metrics'].get('exec_success', 1)
exec_time = test['metrics']['exec_time']

# FIXME: Ensure the test success flags matches the result code.
# FIXME: The XFAIL handling here isn't going to be right.

if not compile_success:
lnt_tests.append({'Name': '%s.compile.status' % (test_name,),
'Info': {},
'Data': [FAIL]})
if not exec_success:
lnt_tests.append({'Name': '%s.exec.status' % (test_name,),
'Info': {},
'Data': [FAIL]})
lnt_tests.append({'Name': '%s.compile' % (test_name,),
'Info': {},
'Data': [compile_time]})
lnt_tests.append({'Name': '%s.exec' % (test_name,),
'Info': {},
'Data': [exec_time]})

# Create the report data.
lnt_result_data = json.dumps(lnt_results, indent=2) + '\n'

# Write the results, if requested.
if opts.output:
sys.stderr.write('%s: generating report: %r\n' % (
timestamp(), opts.output))
with open(opts.output, 'w') as f:
f.write(lnt_result_data)

# Submit the results to an LNT server, if requested.
if opts.submit_url:
submit_results_to_server(lnt_result_data, opts.submit_url)
parser = optparse.OptionParser("""%prog [options] <results>""")
parser.add_option("", "--submit", dest="submit_url", metavar="URL",
help="Submit results to the given URL",
action="store", type=str, default=None)
parser.add_option("", "--output", dest="output", metavar="PATH",
help="Write raw report data to PATH",
action="store", type=str, default=None)
parser.add_option("", "--machine-name", dest="machine_name", metavar="NAME",
help="Set the machine name to embed in the report",
action="store", type=str, default=None)
parser.add_option("", "--run-order", dest="run_order", metavar="REVISION",
help="Set the run order to embed in the report",
action="store", type=int, default=None)
opts, args = parser.parse_args()

# At least one of --submit or --output is required.
if len(args) != 1:
parser.error("incorrect number of arguments")
if opts.submit_url is None and opts.output is None:
parser.error("no action given (provide --submit or --output)")
if opts.machine_name is None:
parser.error("--machine-name is required")
if opts.run_order is None:
parser.error("--run-order is required")

# Load the results data.
results_path, = args
with open(results_path) as f:
data = json.load(f)

# Compute some data not present in the 'lit' report.
machine_name = opts.machine_name
run_order = str(opts.run_order)

# Estimate the end time as being now, and the start time as being that minus
# the elapsed testing time.
utcnow = datetime.datetime.utcnow()
start_time = utcnow - datetime.timedelta(seconds=data['elapsed'])
end_time = utcnow

# Create the LNT report format.
lnt_results = {}
lnt_results['Machine'] = {
'Name': machine_name,
'Info': {
'hardware': capture(["uname", "-m"], include_stderr=True).strip(),
'name': capture(["uname", "-n"], include_stderr=True).strip(),
'os': capture(["uname", "-sr"], include_stderr=True).strip(),
'uname': capture(["uname", "-a"], include_stderr=True).strip(),
}
}

# FIXME: Record source versions for LLVM, Swift, etc.?
lnt_results['Run'] = {
'Start Time': start_time.strftime('%Y-%m-%d %H:%M:%S'),
'End Time': end_time.strftime('%Y-%m-%d %H:%M:%S'),
'Info': {
'__report_version__': '1',
'tag': 'nts',
'inferred_run_order': run_order,
'run_order': run_order,
'sw_vers': capture(['sw_vers'], include_stderr=True).strip(),
}
}

lnt_results['Tests'] = lnt_tests = []
for test in data['tests']:
# Ignore tests which have unexpected status.
code = test['code']
if code not in ('PASS', 'XPASS', 'FAIL', 'XFAIL'):
sys.stderr.write("ignoring test %r with result code %r" % (
test['name'], code))
continue

# Extract the test name, which is encoded as 'suite :: name'.
test_name = 'nts.%s' % (test['name'].split('::', 1)[1][1:],)

# Convert this test to the 'nts' schema.
compile_success = test['metrics'].get('compile_success', 1)
compile_time = test['metrics']['compile_time']
exec_success = test['metrics'].get('exec_success', 1)
exec_time = test['metrics']['exec_time']

# FIXME: Ensure the test success flags matches the result code.
# FIXME: The XFAIL handling here isn't going to be right.

if not compile_success:
lnt_tests.append({'Name': '%s.compile.status' % (test_name,),
'Info': {},
'Data': [FAIL]})
if not exec_success:
lnt_tests.append({'Name': '%s.exec.status' % (test_name,),
'Info': {},
'Data': [FAIL]})
lnt_tests.append({'Name': '%s.compile' % (test_name,),
'Info': {},
'Data': [compile_time]})
lnt_tests.append({'Name': '%s.exec' % (test_name,),
'Info': {},
'Data': [exec_time]})

# Create the report data.
lnt_result_data = json.dumps(lnt_results, indent=2) + '\n'

# Write the results, if requested.
if opts.output:
sys.stderr.write('%s: generating report: %r\n' % (
timestamp(), opts.output))
with open(opts.output, 'w') as f:
f.write(lnt_result_data)

# Submit the results to an LNT server, if requested.
if opts.submit_url:
submit_results_to_server(lnt_result_data, opts.submit_url)

if __name__ == '__main__':
main()
main()