Skip to content
Open
Show file tree
Hide file tree
Changes from 6 commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
40 changes: 40 additions & 0 deletions .github/workflows/codspeed.yml
Copy link
Member

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Probably also want this in a separate PR in order to see the change this PR produces..

Original file line number Diff line number Diff line change
@@ -0,0 +1,40 @@
name: CodSpeed Benchmarks

on:
push:
branches:
- master
- '[0-9].[0-9]+'
pull_request:
branches:
- master
- '[0-9].[0-9]+'

jobs:
benchmark:
name: Run CodSpeed Benchmarks (Python ${{ matrix.python-version }})
runs-on: ubuntu-latest
strategy:
matrix:
python-version: ['3.9', '3.10', '3.11', '3.12', '3.13']
steps:
- name: Checkout code
uses: actions/checkout@v4

- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python-version }}
cache: 'pip'
cache-dependency-path: '**/requirements*.txt'

- name: Install dependencies
run: |
pip install -r requirements-dev.txt
pip install .

- name: Run CodSpeed benchmarks
run: |
pytest --codspeed --codspeed-upload
env:
CODSPEED_TOKEN: ${{ secrets.CODSPEED_TOKEN }}
26 changes: 26 additions & 0 deletions README.rst
Original file line number Diff line number Diff line change
Expand Up @@ -92,6 +92,32 @@ The library supports explicit invalidation for specific function call by
The method returns `True` if corresponding arguments set was cached already, `False`
otherwise.

Benchmarks
----------

async-lru uses [CodSpeed](https://codspeed.io/) for performance regression testing.

To run the benchmarks locally:

.. code-block:: shell

pip install -r requirements-dev.txt
pytest --codspeed

The benchmark suite covers both bounded (with maxsize) and unbounded (no maxsize) cache configurations. Scenarios include:

- Cache hit
- Cache miss
- Cache fill/eviction (cycling through more keys than maxsize)
- Cache clear
- TTL expiry
- Cache invalidation
- Cache info retrieval
- Concurrent cache hits
- Baseline (uncached async function)

On CI, benchmarks are run automatically via GitHub Actions on all supported Python versions (3.9–3.13), and results are uploaded to CodSpeed (if a `CODSPEED_TOKEN` is configured). You can view performance history and detect regressions on the CodSpeed dashboard.

Thanks
------

Expand Down
19 changes: 12 additions & 7 deletions async_lru/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -200,25 +200,30 @@ async def __call__(self, /, *fn_args: Any, **fn_kwargs: Any) -> _R:

key = _make_key(fn_args, fn_kwargs, self.__typed)

cache_item = self.__cache.get(key)
cache = self.__cache

cache_item = cache.get(key)

if cache_item is not None:
self._cache_hit(key)
if not cache_item.fut.done():
return await asyncio.shield(cache_item.fut)

return cache_item.fut.result()
fut = cache_item.fut
if not fut.done():
return await asyncio.shield(fut)

return fut.result()

fut = loop.create_future()
coro = self.__wrapped__(*fn_args, **fn_kwargs)
task: asyncio.Task[_R] = loop.create_task(coro)
self.__tasks.add(task)
task.add_done_callback(partial(self._task_done_callback, fut, key))

self.__cache[key] = _CacheItem(fut, None)
cache[key] = _CacheItem(fut, None)

if self.__maxsize is not None and len(self.__cache) > self.__maxsize:
dropped_key, cache_item = self.__cache.popitem(last=False)
maxsize = self.__maxsize
if maxsize is not None and len(cache) > maxsize:
dropped_key, cache_item = cache.popitem(last=False)
cache_item.cancel()

self._cache_miss(key)
Expand Down
1 change: 1 addition & 0 deletions requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -3,5 +3,6 @@
coverage==7.10.4
pytest==8.4.1
pytest-asyncio==1.1.0
pytest-codspeed==2.3.0
pytest-cov==6.2.1
pytest-timeout==2.4.0
140 changes: 140 additions & 0 deletions tests/benchmarks/test_bench_async_lru.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,140 @@
import asyncio
import pytest
from async_lru import alru_cache

pytestmark = pytest.mark.codspeed

# Bounded cache (LRU)
@alru_cache(maxsize=128)
async def cached_func(x):
return x

@alru_cache(maxsize=16, ttl=0.01)
async def cached_func_ttl(x):
return x

# Unbounded cache (no maxsize)
@alru_cache()
async def cached_func_unbounded(x):
return x

@alru_cache(ttl=0.01)
async def cached_func_unbounded_ttl(x):
return x

async def uncached_func(x):
return x

# Bounded cache benchmarks
@pytest.mark.asyncio
async def test_cache_hit_benchmark(benchmark):
await cached_func(42)
async def hit():
await cached_func(42)
await benchmark.asyncio(hit)

@pytest.mark.asyncio
async def test_cache_miss_benchmark(benchmark):
async def miss():
await cached_func(object())
await benchmark.asyncio(miss)

@pytest.mark.asyncio
async def test_cache_fill_eviction_benchmark(benchmark):
keys = list(range(256))
async def fill():
for k in keys:
await cached_func(k)
await benchmark.asyncio(fill)

@pytest.mark.asyncio
async def test_cache_clear_benchmark(benchmark):
await cached_func(1)
async def clear():
await cached_func.cache_clear()
await benchmark.asyncio(clear)

@pytest.mark.asyncio
async def test_cache_ttl_expiry_benchmark(benchmark):
await cached_func_ttl(99)
await asyncio.sleep(0.02)
async def ttl_expire():
await cached_func_ttl(99)
await benchmark.asyncio(ttl_expire)

@pytest.mark.asyncio
async def test_cache_invalidate_benchmark(benchmark):
await cached_func(123)
async def invalidate():
await cached_func.cache_invalidate(123)
await benchmark.asyncio(invalidate)

@pytest.mark.asyncio
async def test_cache_info_benchmark(benchmark):
await cached_func(1)
async def info():
cached_func.cache_info()
await benchmark.asyncio(info)

@pytest.mark.asyncio
async def test_uncached_func_benchmark(benchmark):
async def raw():
await uncached_func(42)
await benchmark.asyncio(raw)

@pytest.mark.asyncio
async def test_concurrent_cache_hit_benchmark(benchmark):
await cached_func(77)
async def concurrent_hit():
await asyncio.gather(*(cached_func(77) for _ in range(10)))
await benchmark.asyncio(concurrent_hit)

# Unbounded cache benchmarks
@pytest.mark.asyncio
async def test_cache_hit_unbounded_benchmark(benchmark):
await cached_func_unbounded(42)
async def hit():
await cached_func_unbounded(42)
await benchmark.asyncio(hit)

@pytest.mark.asyncio
async def test_cache_miss_unbounded_benchmark(benchmark):
async def miss():
await cached_func_unbounded(object())
await benchmark.asyncio(miss)

@pytest.mark.asyncio
async def test_cache_clear_unbounded_benchmark(benchmark):
await cached_func_unbounded(1)
async def clear():
await cached_func_unbounded.cache_clear()
await benchmark.asyncio(clear)

@pytest.mark.asyncio
async def test_cache_ttl_expiry_unbounded_benchmark(benchmark):
await cached_func_unbounded_ttl(99)
await asyncio.sleep(0.02)
async def ttl_expire():
await cached_func_unbounded_ttl(99)
await benchmark.asyncio(ttl_expire)

@pytest.mark.asyncio
async def test_cache_invalidate_unbounded_benchmark(benchmark):
await cached_func_unbounded(123)
async def invalidate():
await cached_func_unbounded.cache_invalidate(123)
await benchmark.asyncio(invalidate)

@pytest.mark.asyncio
async def test_cache_info_unbounded_benchmark(benchmark):
await cached_func_unbounded(1)
async def info():
cached_func_unbounded.cache_info()
await benchmark.asyncio(info)

@pytest.mark.asyncio
async def test_concurrent_cache_hit_unbounded_benchmark(benchmark):
await cached_func_unbounded(77)
async def concurrent_hit():
await asyncio.gather(*(cached_func_unbounded(77) for _ in range(10)))
await benchmark.asyncio(concurrent_hit)
Loading