Skip to content

Commit 2913fe2

Browse files
feat(cicd): codspeed benchmarks (#688)
1 parent 184ed73 commit 2913fe2

File tree

6 files changed

+386
-3
lines changed

6 files changed

+386
-3
lines changed

.github/workflows/codspeed.yml

Lines changed: 39 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,39 @@
1+
name: CodSpeed Benchmarks
2+
3+
on:
4+
push:
5+
branches:
6+
- master
7+
- '[0-9].[0-9]+'
8+
pull_request:
9+
branches:
10+
- master
11+
- '[0-9].[0-9]+'
12+
13+
jobs:
14+
benchmark:
15+
name: Run CodSpeed Benchmarks (Python ${{ matrix.python-version }})
16+
runs-on: ubuntu-latest
17+
steps:
18+
- name: Checkout code
19+
uses: actions/checkout@v4
20+
21+
- name: Set up Python
22+
uses: actions/setup-python@v5
23+
with:
24+
python-version: '3.13'
25+
cache: 'pip'
26+
cache-dependency-path: '**/requirements*.txt'
27+
28+
- name: Install dependencies
29+
run: pip install -r requirements-benchmarks.txt
30+
31+
- name: Create empty pytest config
32+
run: echo "[pytest]" > .empty-pytest.ini
33+
34+
- name: Run the benchmarks
35+
uses: CodSpeedHQ/action@v4
36+
with:
37+
mode: instrumentation
38+
run: pytest -c .empty-pytest.ini --codspeed benchmark.py --timeout=0
39+
token: ${{ secrets.CODSPEED_TOKEN }}

README.rst

Lines changed: 26 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -92,6 +92,32 @@ The library supports explicit invalidation for specific function call by
9292
The method returns `True` if corresponding arguments set was cached already, `False`
9393
otherwise.
9494

95+
Benchmarks
96+
----------
97+
98+
async-lru uses `CodSpeed <https://codspeed.io/>`_ for performance regression testing.
99+
100+
To run the benchmarks locally:
101+
102+
.. code-block:: shell
103+
104+
pip install -r requirements-dev.txt
105+
pytest --codspeed benchmark.py
106+
107+
The benchmark suite covers both bounded (with maxsize) and unbounded (no maxsize) cache configurations. Scenarios include:
108+
109+
- Cache hit
110+
- Cache miss
111+
- Cache fill/eviction (cycling through more keys than maxsize)
112+
- Cache clear
113+
- TTL expiry
114+
- Cache invalidation
115+
- Cache info retrieval
116+
- Concurrent cache hits
117+
- Baseline (uncached async function)
118+
119+
On CI, benchmarks are run automatically via GitHub Actions on Python 3.13, and results are uploaded to CodSpeed (if a `CODSPEED_TOKEN` is configured). You can view performance history and detect regressions on the CodSpeed dashboard.
120+
95121
Thanks
96122
------
97123

benchmark.py

Lines changed: 313 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,313 @@
1+
import asyncio
2+
from typing import Any, Callable
3+
4+
import pytest
5+
6+
from async_lru import _LRUCacheWrapper, alru_cache
7+
8+
9+
try:
10+
from pytest_codspeed import BenchmarkFixture
11+
except ImportError: # pragma: no branch # only hit in cibuildwheel
12+
pytestmark = pytest.mark.skip("pytest-codspeed needs to be installed")
13+
else:
14+
pytestmark = pytest.mark.benchmark
15+
16+
17+
@pytest.fixture
18+
def loop():
19+
loop = asyncio.new_event_loop()
20+
asyncio.set_event_loop(loop)
21+
yield loop
22+
loop.close()
23+
24+
25+
@pytest.fixture
26+
def run_loop(loop):
27+
async def _get_coro(awaitable):
28+
"""A helper function that turns an awaitable into a coroutine."""
29+
return await awaitable
30+
31+
def run_the_loop(fn, *args, **kwargs):
32+
awaitable = fn(*args, **kwargs)
33+
coro = awaitable if asyncio.iscoroutine(awaitable) else _get_coro(awaitable)
34+
return loop.run_until_complete(coro)
35+
36+
return run_the_loop
37+
38+
39+
# Bounded cache (LRU)
40+
@alru_cache(maxsize=128)
41+
async def cached_func(x):
42+
return x
43+
44+
45+
@alru_cache(maxsize=16, ttl=0.01)
46+
async def cached_func_ttl(x):
47+
return x
48+
49+
50+
# Unbounded cache (no maxsize)
51+
@alru_cache()
52+
async def cached_func_unbounded(x):
53+
return x
54+
55+
56+
@alru_cache(ttl=0.01)
57+
async def cached_func_unbounded_ttl(x):
58+
return x
59+
60+
61+
class Methods:
62+
@alru_cache(maxsize=128)
63+
async def cached_meth(self, x):
64+
return x
65+
66+
@alru_cache(maxsize=16, ttl=0.01)
67+
async def cached_meth_ttl(self, x):
68+
return x
69+
70+
@alru_cache()
71+
async def cached_meth_unbounded(self, x):
72+
return x
73+
74+
@alru_cache(ttl=0.01)
75+
async def cached_meth_unbounded_ttl(self, x):
76+
return x
77+
78+
79+
async def uncached_func(x):
80+
return x
81+
82+
83+
ids = ["func-bounded", "func-unbounded", "meth-bounded", "meth-unbounded"]
84+
funcs = [
85+
cached_func,
86+
cached_func_unbounded,
87+
Methods.cached_meth,
88+
Methods.cached_meth_unbounded,
89+
]
90+
funcs_ttl = [
91+
cached_func_ttl,
92+
cached_func_unbounded_ttl,
93+
Methods.cached_meth_ttl,
94+
Methods.cached_meth_unbounded_ttl,
95+
]
96+
97+
98+
@pytest.mark.parametrize("func", funcs, ids=ids)
99+
def test_cache_hit_benchmark(
100+
benchmark: BenchmarkFixture,
101+
run_loop: Callable[..., Any],
102+
func: _LRUCacheWrapper[Any],
103+
) -> None:
104+
# Populate cache
105+
keys = list(range(10))
106+
for key in keys:
107+
run_loop(func, key)
108+
109+
async def run() -> None:
110+
for _ in range(100):
111+
for key in keys:
112+
await func(key)
113+
114+
benchmark(run_loop, run)
115+
116+
117+
@pytest.mark.parametrize("func", funcs, ids=ids)
118+
def test_cache_miss_benchmark(
119+
benchmark: BenchmarkFixture,
120+
run_loop: Callable[..., Any],
121+
func: _LRUCacheWrapper[Any],
122+
) -> None:
123+
unique_objects = [object() for _ in range(128)]
124+
func.cache_clear()
125+
126+
async def run() -> None:
127+
for obj in unique_objects:
128+
await func(obj)
129+
130+
benchmark(run_loop, run)
131+
132+
133+
@pytest.mark.parametrize("func", funcs, ids=ids)
134+
def test_cache_clear_benchmark(
135+
benchmark: BenchmarkFixture,
136+
run_loop: Callable[..., Any],
137+
func: _LRUCacheWrapper[Any],
138+
) -> None:
139+
for i in range(100):
140+
run_loop(func, i)
141+
142+
benchmark(func.cache_clear)
143+
144+
145+
@pytest.mark.parametrize("func_ttl", funcs_ttl, ids=ids)
146+
def test_cache_ttl_expiry_benchmark(
147+
benchmark: BenchmarkFixture,
148+
run_loop: Callable[..., Any],
149+
func_ttl: _LRUCacheWrapper[Any],
150+
) -> None:
151+
run_loop(func_ttl, 99)
152+
run_loop(asyncio.sleep, 0.02)
153+
154+
benchmark(run_loop, func_ttl, 99)
155+
156+
157+
@pytest.mark.parametrize("func", funcs, ids=ids)
158+
def test_cache_invalidate_benchmark(
159+
benchmark: BenchmarkFixture,
160+
run_loop: Callable[..., Any],
161+
func: _LRUCacheWrapper[Any],
162+
) -> None:
163+
# Populate cache
164+
keys = list(range(123, 321))
165+
for i in keys:
166+
run_loop(func, i)
167+
168+
invalidate = func.cache_invalidate
169+
170+
@benchmark
171+
def run() -> None:
172+
for i in keys:
173+
invalidate(i)
174+
175+
176+
@pytest.mark.parametrize("func", funcs, ids=ids)
177+
def test_cache_info_benchmark(
178+
benchmark: BenchmarkFixture,
179+
run_loop: Callable[..., Any],
180+
func: _LRUCacheWrapper[Any],
181+
) -> None:
182+
# Populate cache
183+
keys = list(range(1000))
184+
for i in keys:
185+
run_loop(func, i)
186+
187+
cache_info = func.cache_info
188+
189+
@benchmark
190+
def run() -> None:
191+
for _ in keys:
192+
cache_info()
193+
194+
195+
@pytest.mark.parametrize("func", funcs, ids=ids)
196+
def test_concurrent_cache_hit_benchmark(
197+
benchmark: BenchmarkFixture,
198+
run_loop: Callable[..., Any],
199+
func: _LRUCacheWrapper[Any],
200+
) -> None:
201+
# Populate cache
202+
keys = list(range(600, 700))
203+
for key in keys:
204+
run_loop(func, key)
205+
206+
async def gather_coros():
207+
gather = asyncio.gather
208+
for _ in range(10):
209+
return await gather(*map(func, keys))
210+
211+
benchmark(run_loop, gather_coros)
212+
213+
214+
def test_cache_fill_eviction_benchmark(
215+
benchmark: BenchmarkFixture, run_loop: Callable[..., Any]
216+
) -> None:
217+
# Populate cache
218+
for i in range(-128, 0):
219+
run_loop(cached_func, i)
220+
221+
keys = list(range(5000))
222+
223+
async def fill():
224+
for k in keys:
225+
await cached_func(k)
226+
227+
benchmark(run_loop, fill)
228+
229+
230+
# ===========================
231+
# Internal Microbenchmarks
232+
# ===========================
233+
# These benchmarks directly exercise internal (sync) methods and data structures
234+
# not covered by the async public API benchmarks above.
235+
236+
# The relevant internal methods do not exist on _LRUCacheWrapperInstanceMethod,
237+
# so we can skip methods for this part of the benchmark suite.
238+
only_funcs = funcs[:2]
239+
func_ids = ids[:2]
240+
241+
242+
@pytest.mark.parametrize("func", only_funcs, ids=func_ids)
243+
def test_internal_cache_hit_microbenchmark(
244+
benchmark: BenchmarkFixture,
245+
run_loop: Callable[..., Any],
246+
func: _LRUCacheWrapper[Any],
247+
) -> None:
248+
"""Directly benchmark _cache_hit (internal, sync) using parameterized funcs."""
249+
cache_hit = func._cache_hit
250+
251+
# Populate cache
252+
keys = list(range(128))
253+
for i in keys:
254+
run_loop(func, i)
255+
256+
@benchmark
257+
def run() -> None:
258+
for i in keys:
259+
cache_hit(i)
260+
261+
262+
@pytest.mark.parametrize("func", only_funcs, ids=func_ids)
263+
def test_internal_cache_miss_microbenchmark(
264+
benchmark: BenchmarkFixture, func: _LRUCacheWrapper[Any]
265+
) -> None:
266+
"""Directly benchmark _cache_miss (internal, sync) using parameterized funcs."""
267+
cache_miss = func._cache_miss
268+
269+
@benchmark
270+
def run() -> None:
271+
for i in range(128):
272+
cache_miss(i)
273+
274+
275+
@pytest.mark.parametrize("func", only_funcs, ids=func_ids)
276+
@pytest.mark.parametrize("task_state", ["finished", "cancelled", "exception"])
277+
def test_internal_task_done_callback_microbenchmark(
278+
benchmark: BenchmarkFixture,
279+
loop: asyncio.BaseEventLoop,
280+
func: _LRUCacheWrapper[Any],
281+
task_state: str,
282+
) -> None:
283+
"""Directly benchmark _task_done_callback (internal, sync) using parameterized funcs and task states."""
284+
285+
# Create a dummy coroutine and task
286+
async def dummy_coro():
287+
if task_state == "exception":
288+
raise ValueError("test exception")
289+
return 123
290+
291+
task = loop.create_task(dummy_coro())
292+
if task_state == "finished":
293+
loop.run_until_complete(task)
294+
elif task_state == "cancelled":
295+
task.cancel()
296+
try:
297+
loop.run_until_complete(task)
298+
except asyncio.CancelledError:
299+
pass
300+
elif task_state == "exception":
301+
try:
302+
loop.run_until_complete(task)
303+
except Exception:
304+
pass
305+
306+
iterations = range(1000)
307+
create_future = loop.create_future
308+
callback = func._task_done_callback
309+
310+
@benchmark
311+
def run() -> None:
312+
for i in iterations:
313+
callback(create_future(), i, task)

requirements-benchmarks.txt

Lines changed: 4 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,4 @@
1+
-e .
2+
-r requirements-test.txt
3+
4+
pytest-codspeed==4.0.0

requirements-test.txt

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,3 @@
1+
pytest==8.4.2
2+
pytest-asyncio==1.2.0
3+
pytest-timeout==2.4.0

0 commit comments

Comments
 (0)