Skip to content

Commit d73000d

Browse files
dsfacciniclaude
andcommitted
fix: Skip llama_cpp tests on CPUs without AVX2 support
The llama_cpp tests have been flaky in CI because they crash with 'Fatal Python error: Illegal instruction' on GitHub Actions runners that don't support AVX2 CPU instructions. This adds an AVX2 detection check on Linux via /proc/cpuinfo to gracefully skip the tests on incompatible machines instead of crashing. 🤖 Generated with [Claude Code](https://claude.com/claude-code) Co-Authored-By: Claude <noreply@anthropic.com>
1 parent 212f935 commit d73000d

File tree

1 file changed

+29
-3
lines changed

1 file changed

+29
-3
lines changed

tests/models/test_outlines.py

Lines changed: 29 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,7 @@
77

88
import json
99
import os
10+
import platform
1011
from collections.abc import Callable
1112
from pathlib import Path
1213
from typing import Any
@@ -81,11 +82,30 @@
8182
not transformer_imports_successful(), reason='transformers not available'
8283
)
8384

84-
# We only run this on the latest Python as the llama_cpp tests have been regularly failing in CI with `Fatal Python error: Illegal instruction`:
85+
86+
def _has_avx2_support() -> bool:
87+
"""Check if the CPU supports AVX2 instructions required by llama_cpp.
88+
89+
The llama_cpp library crashes with 'Fatal Python error: Illegal instruction' on CPUs without AVX2.
90+
This check allows us to skip the tests gracefully on such machines (e.g., some GitHub Actions runners).
91+
"""
92+
if platform.system() == 'Linux':
93+
try:
94+
with open('/proc/cpuinfo', encoding='utf-8') as f:
95+
return 'avx2' in f.read().lower()
96+
except Exception:
97+
return False
98+
return True
99+
100+
101+
# The llama_cpp tests have been regularly failing in CI with `Fatal Python error: Illegal instruction`
102+
# due to AVX2 instructions not being supported on some GitHub Actions runners:
85103
# https://github.com/pydantic/pydantic-ai/actions/runs/19547773220/job/55970947389
86104
skip_if_llama_cpp_imports_unsuccessful = pytest.mark.skipif(
87-
not llama_cpp_imports_successful() or os.getenv('RUN_LLAMA_CPP_TESTS', 'true').lower() == 'false',
88-
reason='llama_cpp not available',
105+
not llama_cpp_imports_successful()
106+
or os.getenv('RUN_LLAMA_CPP_TESTS', 'true').lower() == 'false'
107+
or not _has_avx2_support(),
108+
reason='llama_cpp not available or AVX2 not supported',
89109
)
90110

91111
skip_if_vllm_imports_unsuccessful = pytest.mark.skipif(not vllm_imports_successful(), reason='vllm not available')
@@ -156,6 +176,12 @@ def transformers_multimodal_model() -> OutlinesModel:
156176

157177
@pytest.fixture
158178
def llamacpp_model() -> OutlinesModel:
179+
if (
180+
not llama_cpp_imports_successful()
181+
or os.getenv('RUN_LLAMA_CPP_TESTS', 'true').lower() == 'false'
182+
or not _has_avx2_support()
183+
):
184+
pytest.skip('llama_cpp not available or AVX2 not supported')
159185
outlines_model_llamacpp = outlines.models.llamacpp.from_llamacpp(
160186
llama_cpp.Llama.from_pretrained(
161187
repo_id='M4-ai/TinyMistral-248M-v2-Instruct-GGUF',

0 commit comments

Comments
 (0)