|
20 | 20 | import sys |
21 | 21 | import unittest |
22 | 22 | from collections import OrderedDict |
| 23 | +from concurrent.futures import ThreadPoolExecutor |
| 24 | +from pathlib import Path |
| 25 | +import queue |
| 26 | +import io |
| 27 | +import threading |
| 28 | +from functools import partial |
23 | 29 |
|
24 | 30 | from scripts.test import binaryenjs |
25 | 31 | from scripts.test import lld |
@@ -175,73 +181,124 @@ def run_wasm_reduce_tests(): |
175 | 181 | assert after < 0.85 * before, [before, after] |
176 | 182 |
|
177 | 183 |
|
178 | | -def run_spec_tests(): |
179 | | - print('\n[ checking wasm-shell spec testcases... ]\n') |
| 184 | +def run_spec_test(wast, stdout=None, stderr=None): |
| 185 | + cmd = shared.WASM_SHELL + [wast] |
| 186 | + output = support.run_command(cmd, stdout=stdout, stderr=stderr) |
| 187 | + # filter out binaryen interpreter logging that the spec suite |
| 188 | + # doesn't expect |
| 189 | + filtered = [line for line in output.splitlines() if not line.startswith('[trap')] |
| 190 | + return '\n'.join(filtered) + '\n' |
180 | 191 |
|
181 | | - for wast in shared.options.spec_tests: |
182 | | - base = os.path.basename(wast) |
183 | | - print('..', base) |
184 | | - # windows has some failures that need to be investigated |
185 | | - if base == 'names.wast' and shared.skip_if_on_windows('spec: ' + base): |
186 | | - continue |
187 | 192 |
|
188 | | - def run_spec_test(wast): |
189 | | - cmd = shared.WASM_SHELL + [wast] |
190 | | - output = support.run_command(cmd, stderr=subprocess.PIPE) |
191 | | - # filter out binaryen interpreter logging that the spec suite |
192 | | - # doesn't expect |
193 | | - filtered = [line for line in output.splitlines() if not line.startswith('[trap')] |
194 | | - return '\n'.join(filtered) + '\n' |
195 | | - |
196 | | - def run_opt_test(wast): |
197 | | - # check optimization validation |
198 | | - cmd = shared.WASM_OPT + [wast, '-O', '-all', '-q'] |
199 | | - support.run_command(cmd) |
| 193 | +def run_opt_test(wast, stdout=None, stderr=None): |
| 194 | + # check optimization validation |
| 195 | + cmd = shared.WASM_OPT + [wast, '-O', '-all', '-q'] |
| 196 | + support.run_command(cmd, stdout=stdout, stderr=stderr) |
| 197 | + |
| 198 | + |
| 199 | +def check_expected(actual, expected, stdout=None): |
| 200 | + if expected and os.path.exists(expected): |
| 201 | + expected = open(expected).read() |
| 202 | + print(' (using expected output)', file=stdout) |
| 203 | + actual = actual.strip() |
| 204 | + expected = expected.strip() |
| 205 | + if actual != expected: |
| 206 | + shared.fail(actual, expected) |
| 207 | + |
| 208 | + |
| 209 | +def run_one_spec_test(wast: Path, stdout=None, stderr=None): |
| 210 | + test_name = wast.name |
| 211 | + base_name = "-".join(wast.parts[-3:]) |
| 212 | + |
| 213 | + print('..', test_name, file=stdout) |
| 214 | + # windows has some failures that need to be investigated |
| 215 | + if test_name == 'names.wast' and shared.skip_if_on_windows('spec: ' + test_name): |
| 216 | + return |
| 217 | + |
| 218 | + expected = os.path.join(shared.get_test_dir('spec'), 'expected-output', test_name + '.log') |
| 219 | + |
| 220 | + # some spec tests should fail (actual process failure, not just assert_invalid) |
| 221 | + try: |
| 222 | + actual = run_spec_test(str(wast), stdout=stdout, stderr=stderr) |
| 223 | + except Exception as e: |
| 224 | + if ('wasm-validator error' in str(e) or 'error: ' in str(e)) and '.fail.' in test_name: |
| 225 | + print('<< test failed as expected >>', file=stdout) |
| 226 | + return # don't try all the binary format stuff TODO |
| 227 | + else: |
| 228 | + shared.fail_with_error(str(e)) |
| 229 | + |
| 230 | + check_expected(actual, expected, stdout=stdout) |
| 231 | + |
| 232 | + # check binary format. here we can verify execution of the final |
| 233 | + # result, no need for an output verification |
| 234 | + actual = '' |
| 235 | + with open(base_name + ".transformed", 'w') as transformed_spec_file: |
| 236 | + for i, (module, asserts) in enumerate(support.split_wast(str(wast))): |
| 237 | + if not module: |
| 238 | + # Skip any initial assertions that don't have a module |
| 239 | + return |
| 240 | + print(f' testing split module {i}', file=stdout) |
| 241 | + split_name = base_name + f'_split{i}.wast' |
| 242 | + support.write_wast(split_name, module) |
| 243 | + run_opt_test(split_name, stdout=stdout, stderr=stderr) # also that our optimizer doesn't break on it |
| 244 | + |
| 245 | + result_wast_file = shared.binary_format_check(split_name, verify_final_result=False, base_name=base_name, stdout=stdout, stderr=stderr) |
| 246 | + with open(result_wast_file) as f: |
| 247 | + result_wast = f.read() |
| 248 | + # add the asserts, and verify that the test still passes |
| 249 | + transformed_spec_file.write(result_wast + '\n' + '\n'.join(asserts)) |
| 250 | + |
| 251 | + # compare all the outputs to the expected output |
| 252 | + actual = run_spec_test(base_name + ".transformed", stdout=stdout, stderr=stderr) |
| 253 | + check_expected(actual, os.path.join(shared.get_test_dir('spec'), 'expected-output', test_name + '.log'), stdout=stdout) |
| 254 | + |
| 255 | + |
| 256 | +def run_spec_test_with_wrapped_stdout(output_queue, wast: Path): |
| 257 | + out = io.StringIO() |
| 258 | + try: |
| 259 | + ret = run_one_spec_test(wast, stdout=out, stderr=out) |
| 260 | + except Exception as e: |
| 261 | + print(e, file=out) |
| 262 | + raise |
| 263 | + finally: |
| 264 | + # If a test fails, it's important to keep its output |
| 265 | + output_queue.put(out.getvalue()) |
| 266 | + return ret |
| 267 | + |
| 268 | + |
| 269 | +def run_spec_tests(): |
| 270 | + print('\n[ checking wasm-shell spec testcases... ]\n') |
200 | 271 |
|
201 | | - def check_expected(actual, expected): |
202 | | - if expected and os.path.exists(expected): |
203 | | - expected = open(expected).read() |
204 | | - print(' (using expected output)') |
205 | | - actual = actual.strip() |
206 | | - expected = expected.strip() |
207 | | - if actual != expected: |
208 | | - shared.fail(actual, expected) |
209 | | - |
210 | | - expected = os.path.join(shared.get_test_dir('spec'), 'expected-output', base + '.log') |
211 | | - |
212 | | - # some spec tests should fail (actual process failure, not just assert_invalid) |
213 | | - try: |
214 | | - actual = run_spec_test(wast) |
215 | | - except Exception as e: |
216 | | - if ('wasm-validator error' in str(e) or 'error: ' in str(e)) and '.fail.' in base: |
217 | | - print('<< test failed as expected >>') |
218 | | - continue # don't try all the binary format stuff TODO |
219 | | - else: |
220 | | - shared.fail_with_error(str(e)) |
221 | | - |
222 | | - check_expected(actual, expected) |
223 | | - |
224 | | - # check binary format. here we can verify execution of the final |
225 | | - # result, no need for an output verification |
226 | | - actual = '' |
227 | | - with open(base, 'w') as transformed_spec_file: |
228 | | - for i, (module, asserts) in enumerate(support.split_wast(wast)): |
229 | | - if not module: |
230 | | - # Skip any initial assertions that don't have a module |
231 | | - continue |
232 | | - print(f' testing split module {i}') |
233 | | - split_name = os.path.splitext(base)[0] + f'_split{i}.wast' |
234 | | - support.write_wast(split_name, module) |
235 | | - run_opt_test(split_name) # also that our optimizer doesn't break on it |
236 | | - result_wast_file = shared.binary_format_check(split_name, verify_final_result=False) |
237 | | - with open(result_wast_file) as f: |
238 | | - result_wast = f.read() |
239 | | - # add the asserts, and verify that the test still passes |
240 | | - transformed_spec_file.write(result_wast + '\n' + '\n'.join(asserts)) |
241 | | - |
242 | | - # compare all the outputs to the expected output |
243 | | - actual = run_spec_test(base) |
244 | | - check_expected(actual, os.path.join(shared.get_test_dir('spec'), 'expected-output', base + '.log')) |
| 272 | + output_queue = queue.Queue() |
| 273 | + |
| 274 | + def printer(): |
| 275 | + while True: |
| 276 | + try: |
| 277 | + string = output_queue.get() |
| 278 | + except queue.ShutDown: |
| 279 | + break |
| 280 | + print(string, end="") |
| 281 | + |
| 282 | + printing_thread = threading.Thread(target=printer) |
| 283 | + printing_thread.start() |
| 284 | + |
| 285 | + worker_count = os.cpu_count() * 2 |
| 286 | + print("Running with", worker_count, "workers") |
| 287 | + executor = ThreadPoolExecutor(max_workers=worker_count) |
| 288 | + try: |
| 289 | + results = executor.map(partial(run_spec_test_with_wrapped_stdout, output_queue), map(Path, shared.options.spec_tests)) |
| 290 | + for _ in results: |
| 291 | + # Iterating joins the threads. No return value here. |
| 292 | + pass |
| 293 | + except KeyboardInterrupt: |
| 294 | + # Hard exit to avoid threads continuing to run after Ctrl-C. |
| 295 | + # There's no concern of deadlocking during shutdown here. |
| 296 | + os._exit(1) |
| 297 | + finally: |
| 298 | + executor.shutdown(cancel_futures=True) |
| 299 | + |
| 300 | + output_queue.shutdown() |
| 301 | + printing_thread.join() |
245 | 302 |
|
246 | 303 |
|
247 | 304 | def run_validator_tests(): |
|
0 commit comments