1
1
from __future__ import absolute_import
2
- import json
3
- import math
4
2
import os
3
+ import re
5
4
import shlex
5
+ import subprocess
6
6
import sys
7
7
8
8
import lit .Test
@@ -25,19 +25,74 @@ def __init__(self, test_sub_dirs, test_suffix, run_under = []):
25
25
self .test_suffixes = {exe_suffix , test_suffix + '.py' }
26
26
self .run_under = run_under
27
27
28
- def get_num_tests (self , path , localConfig ):
29
- cmd = [path , '--gtest_list_tests' , '--gtest_filter=-*DISABLED_*' ]
30
- if cmd [0 ].endswith ('.py' ):
31
- cmd = [sys .executable ] + cmd
32
- out , _ , exitCode = lit .util .executeCommand (cmd , env = localConfig .environment )
33
- if exitCode == 0 :
34
- return sum (map (lambda line : line .startswith (' ' ), out .splitlines ()))
35
- return None
28
+ def getGTestTests (self , path , litConfig , localConfig ):
29
+ """getGTestTests(path) - [name]
30
+
31
+ Return the tests available in gtest executable.
32
+
33
+ Args:
34
+ path: String path to a gtest executable
35
+ litConfig: LitConfig instance
36
+ localConfig: TestingConfig instance"""
37
+
38
+ list_test_cmd = self .prepareCmd ([path , '--gtest_list_tests' ])
39
+
40
+ try :
41
+ output = subprocess .check_output (list_test_cmd ,
42
+ env = localConfig .environment )
43
+ except subprocess .CalledProcessError as exc :
44
+ litConfig .warning (
45
+ "unable to discover google-tests in %r: %s. Process output: %s"
46
+ % (path , sys .exc_info ()[1 ], exc .output ))
47
+ # This doesn't look like a valid gtest file. This can
48
+ # have a number of causes, none of them good. For
49
+ # instance, we could have created a broken executable.
50
+ # Alternatively, someone has cruft in their test
51
+ # directory. If we don't return a test here, then no
52
+ # failures will get reported, so return a dummy test name
53
+ # so that the failure is reported later.
54
+ yield 'failed_to_discover_tests_from_gtest'
55
+ return
56
+
57
+ upstream_prefix = re .compile ('Running main\(\) from .*gtest_main\.cc' )
58
+ nested_tests = []
59
+ for ln in output .splitlines (False ): # Don't keep newlines.
60
+ ln = lit .util .to_string (ln )
61
+
62
+ if upstream_prefix .fullmatch (ln ):
63
+ # Upstream googletest prints this to stdout prior to running
64
+ # tests. LLVM removed that print statement in r61540, but we
65
+ # handle it here in case upstream googletest is being used.
66
+ continue
67
+
68
+ # The test name list includes trailing comments beginning with
69
+ # a '#' on some lines, so skip those. We don't support test names
70
+ # that use escaping to embed '#' into their name as the names come
71
+ # from C++ class and method names where such things are hard and
72
+ # uninteresting to support.
73
+ ln = ln .split ('#' , 1 )[0 ].rstrip ()
74
+ if not ln .lstrip ():
75
+ continue
76
+
77
+ index = 0
78
+ while ln [index * 2 :index * 2 + 2 ] == ' ' :
79
+ index += 1
80
+ while len (nested_tests ) > index :
81
+ nested_tests .pop ()
82
+
83
+ ln = ln [index * 2 :]
84
+ if ln .endswith ('.' ):
85
+ nested_tests .append (ln )
86
+ elif any ([name .startswith ('DISABLED_' )
87
+ for name in nested_tests + [ln ]]):
88
+ # Gtest will internally skip these tests. No need to launch a
89
+ # child process for it.
90
+ continue
91
+ else :
92
+ yield '' .join (nested_tests ) + ln
36
93
37
94
def getTestsInDirectory (self , testSuite , path_in_suite ,
38
95
litConfig , localConfig ):
39
- init_shard_size = 512 # number of tests in a shard
40
- core_count = lit .util .usable_core_count ()
41
96
source_path = testSuite .getSourcePath (path_in_suite )
42
97
for subdir in self .test_sub_dirs :
43
98
dir_path = os .path .join (source_path , subdir )
@@ -47,97 +102,52 @@ def getTestsInDirectory(self, testSuite, path_in_suite,
47
102
suffixes = self .test_suffixes ):
48
103
# Discover the tests in this executable.
49
104
execpath = os .path .join (source_path , subdir , fn )
50
- num_tests = self .get_num_tests (execpath , localConfig )
51
- if num_tests is not None :
52
- # Compute the number of shards.
53
- shard_size = init_shard_size
54
- nshard = int (math .ceil (num_tests / shard_size ))
55
- while nshard < core_count and shard_size > 1 :
56
- shard_size = shard_size // 2
57
- nshard = int (math .ceil (num_tests / shard_size ))
58
-
59
- # Create one lit test for each shard.
60
- for idx in range (nshard ):
61
- testPath = path_in_suite + (subdir , fn ,
62
- str (idx ), str (nshard ))
63
- json_file = '-' .join ([execpath , testSuite .config .name ,
64
- str (os .getpid ()), str (idx ),
65
- str (nshard )]) + '.json'
66
- yield lit .Test .Test (testSuite , testPath , localConfig ,
67
- file_path = execpath ,
68
- gtest_json_file = json_file )
69
- else :
70
- # This doesn't look like a valid gtest file. This can
71
- # have a number of causes, none of them good. For
72
- # instance, we could have created a broken executable.
73
- # Alternatively, someone has cruft in their test
74
- # directory. If we don't return a test here, then no
75
- # failures will get reported, so return a dummy test name
76
- # so that the failure is reported later.
77
- testPath = path_in_suite + (subdir , fn , 'failed_to_discover_tests_from_gtest' )
78
- yield lit .Test .Test (testSuite , testPath , localConfig , file_path = execpath )
105
+ testnames = self .getGTestTests (execpath , litConfig , localConfig )
106
+ for testname in testnames :
107
+ testPath = path_in_suite + (subdir , fn , testname )
108
+ yield lit .Test .Test (testSuite , testPath , localConfig ,
109
+ file_path = execpath )
79
110
80
111
def execute (self , test , litConfig ):
81
- if test .gtest_json_file is None :
82
- return lit .Test .FAIL , ''
83
-
84
112
testPath ,testName = os .path .split (test .getSourcePath ())
85
113
while not os .path .exists (testPath ):
86
114
# Handle GTest parametrized and typed tests, whose name includes
87
115
# some '/'s.
88
116
testPath , namePrefix = os .path .split (testPath )
89
117
testName = namePrefix + '/' + testName
90
118
91
- testName ,total_shards = os .path .split (testName )
92
- testName ,shard_idx = os .path .split (testName )
93
- shard_env = {'GTEST_COLOR' :'no' ,'GTEST_TOTAL_SHARDS' :total_shards , 'GTEST_SHARD_INDEX' :shard_idx , 'GTEST_OUTPUT' :'json:' + test .gtest_json_file }
94
- test .config .environment .update (shard_env )
95
-
96
- cmd = [testPath ]
119
+ cmd = [testPath , '--gtest_filter=' + testName ]
97
120
cmd = self .prepareCmd (cmd )
98
121
if litConfig .useValgrind :
99
122
cmd = litConfig .valgrindArgs + cmd
100
123
101
124
if litConfig .noExecute :
102
125
return lit .Test .PASS , ''
103
126
104
- shard_envs = '\n ' .join ([k + '=' + v for k , v in shard_env .items ()])
105
- shard_header = f"Script(shard):\n --\n { shard_envs } \n { ' ' .join (cmd )} \n --\n "
127
+ header = f"Script:\n --\n { ' ' .join (cmd )} \n --\n "
106
128
107
129
try :
108
- _ , _ , exitCode = lit .util .executeCommand (
130
+ out , err , exitCode = lit .util .executeCommand (
109
131
cmd , env = test .config .environment ,
110
132
timeout = litConfig .maxIndividualTestTime )
111
133
except lit .util .ExecuteCommandTimeoutException :
112
134
return (lit .Test .TIMEOUT ,
113
- f'{ shard_header } Reached timeout of '
135
+ f'{ header } Reached timeout of '
114
136
f'{ litConfig .maxIndividualTestTime } seconds' )
115
137
116
- if not os .path .exists (test .gtest_json_file ):
117
- errmsg = f"shard JSON output does not exist: %s" % (test .gtest_json_file )
118
- return lit .Test .FAIL , shard_header + errmsg
119
-
120
138
if exitCode :
121
- output = shard_header + '\n '
122
- with open (test .gtest_json_file , encoding = 'utf-8' ) as f :
123
- testsuites = json .load (f )['testsuites' ]
124
- for testcase in testsuites :
125
- for testinfo in testcase ['testsuite' ]:
126
- if testinfo ['result' ] == 'SUPPRESSED' or testinfo ['result' ] == 'SKIPPED' :
127
- continue
128
- testname = testcase ['name' ] + '.' + testinfo ['name' ]
129
- header = f"Script:\n --\n { ' ' .join (cmd )} --gtest_filter={ testname } \n --\n "
130
- if 'failures' in testinfo :
131
- output += header
132
- for fail in testinfo ['failures' ]:
133
- output += fail ['failure' ] + '\n '
134
- output += '\n '
135
- elif testinfo ['result' ] != 'COMPLETED' :
136
- output += header
137
- output += 'unresolved test result\n '
138
- return lit .Test .FAIL , output
139
- else :
140
- return lit .Test .PASS , ''
139
+ return lit .Test .FAIL , header + out + err
140
+
141
+ if '[ SKIPPED ] 1 test,' in out :
142
+ return lit .Test .SKIPPED , ''
143
+
144
+ passing_test_line = '[ PASSED ] 1 test.'
145
+ if passing_test_line not in out :
146
+ return (lit .Test .UNRESOLVED ,
147
+ f'{ header } Unable to find { passing_test_line } '
148
+ f'in gtest output:\n \n { out } { err } ' )
149
+
150
+ return lit .Test .PASS ,''
141
151
142
152
def prepareCmd (self , cmd ):
143
153
"""Insert interpreter if needed.
@@ -156,61 +166,3 @@ def prepareCmd(self, cmd):
156
166
else :
157
167
cmd = shlex .split (self .run_under ) + cmd
158
168
return cmd
159
-
160
- @staticmethod
161
- def post_process_shard_results (selected_tests , discovered_tests ):
162
- def remove_gtest (tests ):
163
- idxs = []
164
- for idx , t in enumerate (tests ):
165
- if t .gtest_json_file :
166
- idxs .append (idx )
167
- for i in range (len (idxs )):
168
- del tests [idxs [i ]- i ]
169
-
170
- remove_gtest (discovered_tests )
171
- gtests = [t for t in selected_tests if t .gtest_json_file ]
172
- remove_gtest (selected_tests )
173
- for test in gtests :
174
- # In case gtest has bugs such that no JSON file was emitted.
175
- if not os .path .exists (test .gtest_json_file ):
176
- selected_tests .append (test )
177
- discovered_tests .append (test )
178
- continue
179
-
180
- # Load json file to retrieve results.
181
- with open (test .gtest_json_file , encoding = 'utf-8' ) as f :
182
- testsuites = json .load (f )['testsuites' ]
183
- for testcase in testsuites :
184
- for testinfo in testcase ['testsuite' ]:
185
- # Ignore disabled tests.
186
- if testinfo ['result' ] == 'SUPPRESSED' :
187
- continue
188
-
189
- testPath = test .path_in_suite [:- 2 ] + (testcase ['name' ], testinfo ['name' ])
190
- subtest = lit .Test .Test (test .suite , testPath ,
191
- test .config , test .file_path )
192
-
193
- testname = testcase ['name' ] + '.' + testinfo ['name' ]
194
- header = f"Script:\n --\n { test .file_path } --gtest_filter={ testname } \n --\n "
195
-
196
- output = ''
197
- if testinfo ['result' ] == 'SKIPPED' :
198
- returnCode = lit .Test .SKIPPED
199
- elif 'failures' in testinfo :
200
- returnCode = lit .Test .FAIL
201
- output = header
202
- for fail in testinfo ['failures' ]:
203
- output += fail ['failure' ] + '\n '
204
- elif testinfo ['result' ] == 'COMPLETED' :
205
- returnCode = lit .Test .PASS
206
- else :
207
- returnCode = lit .Test .UNRESOLVED
208
- output = header + 'unresolved test result\n '
209
-
210
- subtest .setResult (lit .Test .Result (returnCode , output , float (testinfo ['time' ][:- 1 ])))
211
-
212
- selected_tests .append (subtest )
213
- discovered_tests .append (subtest )
214
- os .remove (test .gtest_json_file )
215
-
216
- return selected_tests , discovered_tests
0 commit comments