|
7 | 7 |
|
8 | 8 | import json |
9 | 9 | import os |
| 10 | +import platform |
10 | 11 | from collections.abc import Callable |
11 | 12 | from pathlib import Path |
12 | 13 | from typing import Any |
|
81 | 82 | not transformer_imports_successful(), reason='transformers not available' |
82 | 83 | ) |
83 | 84 |
|
84 | | -# We only run this on the latest Python as the llama_cpp tests have been regularly failing in CI with `Fatal Python error: Illegal instruction`: |
| 85 | + |
| 86 | +def _has_avx2_support() -> bool: |
| 87 | + """Check if the CPU supports AVX2 instructions required by llama_cpp. |
| 88 | +
|
| 89 | + The llama_cpp library crashes with 'Fatal Python error: Illegal instruction' on CPUs without AVX2. |
| 90 | + This check allows us to skip the tests gracefully on such machines (e.g., some GitHub Actions runners). |
| 91 | + """ |
| 92 | + if platform.system() == 'Linux': |
| 93 | + try: |
| 94 | + with open('/proc/cpuinfo', encoding='utf-8') as f: |
| 95 | + return 'avx2' in f.read().lower() |
| 96 | + except Exception: |
| 97 | + return False |
| 98 | + return True |
| 99 | + |
| 100 | + |
| 101 | +# The llama_cpp tests have been regularly failing in CI with `Fatal Python error: Illegal instruction` |
| 102 | +# due to AVX2 instructions not being supported on some GitHub Actions runners: |
85 | 103 | # https://github.com/pydantic/pydantic-ai/actions/runs/19547773220/job/55970947389 |
86 | 104 | skip_if_llama_cpp_imports_unsuccessful = pytest.mark.skipif( |
87 | | - not llama_cpp_imports_successful() or os.getenv('RUN_LLAMA_CPP_TESTS', 'true').lower() == 'false', |
88 | | - reason='llama_cpp not available', |
| 105 | + not llama_cpp_imports_successful() |
| 106 | + or os.getenv('RUN_LLAMA_CPP_TESTS', 'true').lower() == 'false' |
| 107 | + or not _has_avx2_support(), |
| 108 | + reason='llama_cpp not available or AVX2 not supported', |
89 | 109 | ) |
90 | 110 |
|
91 | 111 | skip_if_vllm_imports_unsuccessful = pytest.mark.skipif(not vllm_imports_successful(), reason='vllm not available') |
@@ -156,6 +176,12 @@ def transformers_multimodal_model() -> OutlinesModel: |
156 | 176 |
|
157 | 177 | @pytest.fixture |
158 | 178 | def llamacpp_model() -> OutlinesModel: |
| 179 | + if ( |
| 180 | + not llama_cpp_imports_successful() |
| 181 | + or os.getenv('RUN_LLAMA_CPP_TESTS', 'true').lower() == 'false' |
| 182 | + or not _has_avx2_support() |
| 183 | + ): |
| 184 | + pytest.skip('llama_cpp not available or AVX2 not supported') |
159 | 185 | outlines_model_llamacpp = outlines.models.llamacpp.from_llamacpp( |
160 | 186 | llama_cpp.Llama.from_pretrained( |
161 | 187 | repo_id='M4-ai/TinyMistral-248M-v2-Instruct-GGUF', |
|
0 commit comments