|
1 | 1 | # --------------------------------------------------------- |
2 | 2 | # Copyright (c) Microsoft Corporation. All rights reserved. |
3 | 3 | # --------------------------------------------------------- |
4 | | - |
| 4 | +import argparse |
| 5 | +import contextlib |
| 6 | +import json |
| 7 | +import os |
| 8 | +import re |
5 | 9 | import subprocess |
6 | 10 | import sys |
| 11 | +from collections import defaultdict |
7 | 12 | from pathlib import Path |
8 | 13 |
|
| 14 | +import dotenv |
9 | 15 |
|
10 | | -def run_tests(input_file): |
11 | | - """Run tests listed in a file. Lines starting with # or ; are ignored. |
12 | 16 |
|
13 | | - :param input_file: Path to a file containing a list of tests to run. |
14 | | - :type input_file: str |
15 | | - """ |
16 | | - tests_to_run = [] |
| 17 | +def normalize_test_name(test_name): |
| 18 | + if "[" in test_name: |
| 19 | + test_name = test_name.split("[")[0] |
| 20 | + return test_name.strip() |
| 21 | + |
| 22 | + |
| 23 | +def extract_test_location(location): |
| 24 | + test_path, line_no, test_func = location |
| 25 | + test_class_name, test_func_name = test_func.split(".", 1) |
| 26 | + test_class = test_path.split(os.path.sep, 3)[-1] + "::" + test_class_name |
| 27 | + m = re.match(r"(\w+)\[(\w+)]", test_func_name) |
| 28 | + if m: |
| 29 | + test_func_name, test_param = m.groups() |
| 30 | + else: |
| 31 | + test_param = None |
| 32 | + return test_class, test_func_name, test_param |
| 33 | + |
| 34 | + |
| 35 | +def load_tests_from_file(input_file): |
| 36 | + tests_to_run = set() |
17 | 37 | with open(input_file, "r") as f: |
18 | 38 |
|
19 | 39 | for line in f: |
20 | 40 | if len(line) < 1 or line[0] in ["#", ";"]: |
21 | 41 | continue |
22 | | - if "[" in line: |
23 | | - line = line.split("[")[0] |
24 | | - line = line.strip() |
25 | | - if line not in tests_to_run: |
26 | | - tests_to_run.append(line) |
| 42 | + line = normalize_test_name(line) |
| 43 | + tests_to_run.add(line) |
| 44 | + return tests_to_run |
| 45 | + |
| 46 | + |
| 47 | +@contextlib.contextmanager |
| 48 | +def update_dot_env_file(env_override): |
| 49 | + """Update env file with env_override, and restore it after the context is exited. |
| 50 | + Support bool variable only for now. |
| 51 | + """ |
| 52 | + env_file = dotenv.find_dotenv(raise_error_if_not_found=True) |
| 53 | + print(f"Updating env file: {env_file}") |
| 54 | + origin_env_content = None |
| 55 | + try: |
| 56 | + with open(env_file, "r") as f: |
| 57 | + origin_env_content = f.read() |
| 58 | + env_vars = [line.strip() for line in origin_env_content.splitlines() if line.strip()] |
| 59 | + for key, value in env_override.items(): |
| 60 | + if isinstance(value, bool): |
| 61 | + target_line = f"{key}='true'" |
| 62 | + for i, line in enumerate(env_vars): |
| 63 | + if line == target_line and not value: |
| 64 | + env_vars[i] = f"#{target_line}" |
| 65 | + elif re.match(rf"# *{target_line}", line) and value: |
| 66 | + env_vars[i] = f"{target_line}" |
| 67 | + with open(env_file, "w") as f: |
| 68 | + f.write("\n".join(env_vars)) |
| 69 | + yield |
| 70 | + finally: |
| 71 | + if origin_env_content is not None: |
| 72 | + with open(env_file, "w") as f: |
| 73 | + f.write(origin_env_content) |
| 74 | + |
| 75 | + |
| 76 | +def run_simple(tests_to_run, working_dir, extra_params, is_live_and_recording): |
| 77 | + print(f"Running {len(tests_to_run)} tests under {working_dir}: ") |
27 | 78 | for test_name in tests_to_run: |
28 | 79 | print(test_name) |
29 | 80 |
|
30 | | - for test_name in tests_to_run: |
31 | | - print(f"Running test: {test_name}") |
32 | | - subprocess.call( |
33 | | - [ |
34 | | - sys.executable, |
35 | | - "-m", |
36 | | - "pytest", |
37 | | - "--disable-warnings", |
38 | | - "--disable-pytest-warnings", |
39 | | - test_name, |
40 | | - ], |
41 | | - cwd=Path(__file__).parent.parent, |
42 | | - ) |
| 81 | + with update_dot_env_file( |
| 82 | + {"AZURE_TEST_RUN_LIVE": is_live_and_recording, "AZURE_SKIP_LIVE_RECORDING": not is_live_and_recording}, |
| 83 | + ): |
| 84 | + for test_name in tests_to_run: |
| 85 | + print( |
| 86 | + f"pytest {test_name} {' '.join(extra_params)} in {'live' if is_live_and_recording else 'playback'} mode..." |
| 87 | + ) |
| 88 | + subprocess.run( |
| 89 | + [ |
| 90 | + sys.executable, |
| 91 | + "-m", |
| 92 | + "pytest", |
| 93 | + test_name, |
| 94 | + ] |
| 95 | + + extra_params, |
| 96 | + cwd=working_dir, |
| 97 | + ) |
| 98 | + |
| 99 | + |
| 100 | +def run_tests(tests_to_run, extras, *, skip_first_run=False, record_mismatch=False, is_live_and_recording=False): |
| 101 | + working_dir = Path(__file__).parent.parent |
| 102 | + if record_mismatch: |
| 103 | + log_file_path = working_dir / "scripts" / "tmp" / "pytest_log.json" |
| 104 | + log_file_path.parent.mkdir(parents=True, exist_ok=True) |
| 105 | + |
| 106 | + if not skip_first_run: |
| 107 | + run_simple( |
| 108 | + tests_to_run, |
| 109 | + working_dir, |
| 110 | + extra_params=[ |
| 111 | + "--disable-warnings", |
| 112 | + "--disable-pytest-warnings", |
| 113 | + "--report-log", |
| 114 | + log_file_path.as_posix(), |
| 115 | + ] |
| 116 | + + extras, |
| 117 | + is_live_and_recording=False, |
| 118 | + ) |
| 119 | + |
| 120 | + tests_failed_with_recording_mismatch = defaultdict(dict) |
| 121 | + with open(log_file_path, "r") as f: |
| 122 | + for line in f: |
| 123 | + node = json.loads(line) |
| 124 | + if "outcome" not in node: |
| 125 | + continue |
| 126 | + if node["outcome"] != "failed": |
| 127 | + continue |
| 128 | + test_class, test_name, test_param = extract_test_location(node["location"]) |
| 129 | + |
| 130 | + msg = node["longrepr"]["reprcrash"]["message"] |
| 131 | + if "ResourceNotFoundError" in msg: |
| 132 | + if test_param is None: |
| 133 | + tests_failed_with_recording_mismatch[test_class][test_name] = None |
| 134 | + elif test_name not in tests_failed_with_recording_mismatch[test_class]: |
| 135 | + tests_failed_with_recording_mismatch[test_class][test_name] = [test_param] |
| 136 | + else: |
| 137 | + tests_failed_with_recording_mismatch[test_class][test_name].append(test_param) |
| 138 | + |
| 139 | + if tests_failed_with_recording_mismatch: |
| 140 | + # re-run the tests with recording mismatch in live mode |
| 141 | + for test_class, test_info in tests_failed_with_recording_mismatch.items(): |
| 142 | + keys = [] |
| 143 | + for test_name, test_params in test_info.items(): |
| 144 | + if test_params is not None: |
| 145 | + keys.append(f"{test_name}[{'-'.join(test_params)}]") |
| 146 | + else: |
| 147 | + keys.append(test_name) |
| 148 | + run_simple( |
| 149 | + [test_class], |
| 150 | + working_dir, |
| 151 | + ["-k", " or ".join(keys), "--tb=line"], |
| 152 | + is_live_and_recording=True, |
| 153 | + ) |
| 154 | + |
| 155 | + # re-run the original tests to check if they are still failing |
| 156 | + run_simple(tests_to_run, working_dir, extras, is_live_and_recording=False) |
| 157 | + else: |
| 158 | + run_simple(tests_to_run, working_dir, extras, is_live_and_recording=is_live_and_recording) |
43 | 159 |
|
44 | 160 |
|
45 | 161 | if __name__ == "__main__": |
46 | | - run_tests(sys.argv[1]) |
| 162 | + parser = argparse.ArgumentParser() |
| 163 | + parser.add_argument( |
| 164 | + "--file", |
| 165 | + type=str, |
| 166 | + help="File containing tests to run, each line is a test name", |
| 167 | + ) |
| 168 | + parser.add_argument( |
| 169 | + "--name", |
| 170 | + type=str, |
| 171 | + help="Name of the test to run. Format is aligned with pytest, e.g. 'tests/pipeline_job/'.", |
| 172 | + ) |
| 173 | + parser.add_argument( |
| 174 | + "--record-mismatch", |
| 175 | + "-r", |
| 176 | + action="store_true", |
| 177 | + help="If specified, pytest log will be outputted to tmp/pytest_log.json, " |
| 178 | + "then tests failed with recording not found error will be rerun in live & recording mode." |
| 179 | + "Note that .env file will be updated during the process, so please revert the change manually " |
| 180 | + "if the script run is stopped early.", |
| 181 | + ) |
| 182 | + parser.add_argument( |
| 183 | + "--skip-first-run", |
| 184 | + "-s", |
| 185 | + action="store_true", |
| 186 | + help="If specified, will skip the first run in record-mismatch mode.", |
| 187 | + ) |
| 188 | + |
| 189 | + _args, _extras = parser.parse_known_args() |
| 190 | + |
| 191 | + if _args.file: |
| 192 | + _tests = load_tests_from_file(_args.file) |
| 193 | + elif _args.name: |
| 194 | + _tests = [_args.name] |
| 195 | + else: |
| 196 | + raise ValueError("Must specify either --file or --name") |
| 197 | + run_tests( |
| 198 | + _tests, |
| 199 | + _extras, |
| 200 | + skip_first_run=_args.skip_first_run, |
| 201 | + record_mismatch=_args.record_mismatch, |
| 202 | + ) |
0 commit comments