Skip to content

Commit adc8b14

Browse files
author
fer
committed
feat(perf): Add performance guardrails registry & decorator, instrument validation aggregator
Adds PerformanceRegistry, perf_guard decorator and overhead comparison utility. Instruments run_structural_validation when perf_registry provided. Includes tests ensuring <8% overhead for moderate workload. Maintains physics invariants (read-only timing).
1 parent 5d44e55 commit adc8b14

File tree

3 files changed

+229
-1
lines changed

3 files changed

+229
-1
lines changed

src/tnfr/performance/guardrails.py

Lines changed: 136 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,136 @@
1+
"""Performance guardrails instrumentation for TNFR Phase 3.
2+
3+
Lightweight timing utilities ensuring added structural validation / telemetry
4+
instrumentation remains below configured overhead thresholds.
5+
6+
Design Goals
7+
------------
8+
1. Zero external dependencies (stdlib only).
9+
2. Minimal footprint: single perf_counter measurement plus registry append.
10+
3. Opt-in: instrumentation only active when explicitly passed a registry.
11+
4. Composable: decorator or manual timing blocks.
12+
13+
Physics Alignment
14+
-----------------
15+
Performance measurement is purely operational and never alters TNFR physics;
16+
it wraps functions that perform read-only structural computations. The
17+
guardrails act as a containment layer ensuring added monitoring does not
18+
fragment coherence through excessive latency.
19+
"""
20+
21+
from __future__ import annotations
22+
23+
from dataclasses import dataclass, field
24+
from time import perf_counter
25+
from typing import Any, Callable, Dict, List
26+
27+
__all__ = [
28+
"PerformanceRegistry",
29+
"perf_guard",
30+
"compare_overhead",
31+
]
32+
33+
34+
@dataclass(slots=True)
35+
class PerformanceRecord:
36+
label: str
37+
elapsed: float
38+
meta: Dict[str, Any] | None = None
39+
40+
41+
@dataclass(slots=True)
42+
class PerformanceRegistry:
43+
"""Collects performance timing records.
44+
45+
Methods
46+
-------
47+
record(label, elapsed, meta=None): Add a timing entry.
48+
summary(): Aggregate statistics (count, mean, max, min, labels).
49+
filter(label): Return list of records matching label.
50+
"""
51+
52+
records: List[PerformanceRecord] = field(default_factory=list)
53+
54+
def record(
55+
self, label: str, elapsed: float, meta: Dict[str, Any] | None = None
56+
) -> None:
57+
self.records.append(PerformanceRecord(label, float(elapsed), meta))
58+
59+
def filter(self, label: str) -> List[PerformanceRecord]:
60+
return [r for r in self.records if r.label == label]
61+
62+
def summary(self) -> Dict[str, Any]:
63+
if not self.records:
64+
return {"count": 0}
65+
total = sum(r.elapsed for r in self.records)
66+
return {
67+
"count": len(self.records),
68+
"total": total,
69+
"mean": total / len(self.records),
70+
"max": max(r.elapsed for r in self.records),
71+
"min": min(r.elapsed for r in self.records),
72+
"labels": sorted({r.label for r in self.records}),
73+
}
74+
75+
76+
def perf_guard(label: str, registry: PerformanceRegistry | None) -> Callable:
77+
"""Decorator adding a single perf_counter measurement if registry provided.
78+
79+
Parameters
80+
----------
81+
label : str
82+
Logical name for the operation (e.g. "validation" or "telemetry").
83+
registry : PerformanceRegistry | None
84+
Active registry; if None instrumentation is skipped.
85+
"""
86+
87+
def decorator(fn: Callable) -> Callable:
88+
def wrapped(*args, **kwargs): # type: ignore[override]
89+
if registry is None:
90+
return fn(*args, **kwargs)
91+
start = perf_counter()
92+
result = fn(*args, **kwargs)
93+
registry.record(label, perf_counter() - start, meta={
94+
"fn": fn.__name__,
95+
"arg_count": len(args),
96+
"kw_count": len(kwargs),
97+
})
98+
return result
99+
100+
wrapped.__name__ = fn.__name__ # preserve for introspection
101+
wrapped.__doc__ = fn.__doc__
102+
return wrapped
103+
104+
return decorator
105+
106+
107+
def compare_overhead(
108+
baseline_fn: Callable[[], Any],
109+
instrumented_fn: Callable[[], Any],
110+
*,
111+
runs: int = 5000,
112+
) -> Dict[str, float]:
113+
"""Compare overhead ratio between baseline and instrumented call sets.
114+
115+
Returns timing dict with baseline, instrumented and ratio
116+
(instrumented - baseline) / baseline.
117+
"""
118+
# Warmup
119+
for _ in range(10):
120+
baseline_fn()
121+
instrumented_fn()
122+
b_start = perf_counter()
123+
for _ in range(runs):
124+
baseline_fn()
125+
b_elapsed = perf_counter() - b_start
126+
i_start = perf_counter()
127+
for _ in range(runs):
128+
instrumented_fn()
129+
i_elapsed = perf_counter() - i_start
130+
ratio = (i_elapsed - b_elapsed) / b_elapsed if b_elapsed > 0 else 0.0
131+
return {
132+
"baseline": b_elapsed,
133+
"instrumented": i_elapsed,
134+
"ratio": ratio,
135+
"runs": float(runs),
136+
}

src/tnfr/validation/aggregator.py

Lines changed: 41 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -66,6 +66,7 @@
6666
compute_phase_curvature,
6767
estimate_coherence_length,
6868
)
69+
from ..performance.guardrails import PerformanceRegistry
6970

7071
__all__ = [
7172
"ValidationReport",
@@ -132,6 +133,8 @@ def run_structural_validation(
132133
xi_c_watch_multiplier: float = 3.0,
133134
# Optional baselines for drift calculations
134135
baseline_structural_potential: Dict[Any, float] | None = None,
136+
# Performance instrumentation (opt-in)
137+
perf_registry: PerformanceRegistry | None = None,
135138
) -> ValidationReport:
136139
"""Run enhanced structural validation aggregating grammar + field safety.
137140
@@ -169,6 +172,15 @@ def run_structural_validation(
169172

170173
notes: List[str] = []
171174

175+
# Performance start (if instrumentation active)
176+
start_time = None
177+
if perf_registry is not None:
178+
try:
179+
import time as _t
180+
start_time = _t.perf_counter()
181+
except Exception: # pragma: no cover
182+
start_time = None
183+
172184
# Grammar errors (read-only enrichment)
173185
grammar_errors: List[ExtendedGrammarError] = []
174186
if sequence is not None:
@@ -314,7 +326,7 @@ def run_structural_validation(
314326
"mean_node_distance": mean_node_distance,
315327
}
316328

317-
return ValidationReport(
329+
report = ValidationReport(
318330
status=status,
319331
risk_level=risk_level,
320332
grammar_errors=grammar_errors,
@@ -323,3 +335,31 @@ def run_structural_validation(
323335
sequence=tuple(sequence or []),
324336
notes=notes,
325337
)
338+
339+
if perf_registry is not None and start_time is not None:
340+
try:
341+
import time as _t
342+
perf_registry.record(
343+
"validation",
344+
_t.perf_counter() - start_time,
345+
meta={
346+
"nodes": (
347+
G.number_of_nodes()
348+
if hasattr(G, "number_of_nodes")
349+
else None
350+
),
351+
"edges": (
352+
G.number_of_edges()
353+
if hasattr(G, "number_of_edges")
354+
else None
355+
),
356+
"sequence_len": (
357+
len(sequence) if sequence is not None else 0
358+
),
359+
"status": status,
360+
},
361+
)
362+
except Exception: # pragma: no cover
363+
pass
364+
365+
return report
Lines changed: 52 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,52 @@
1+
"""Tests for performance guardrails instrumentation.
2+
3+
Ensures perf_guard adds minimal overhead (<10% ratio baseline) for a trivial
4+
function. Threshold is conservative to reduce flakiness across CI schedulers.
5+
"""
6+
from __future__ import annotations
7+
8+
from time import perf_counter
9+
10+
from tnfr.performance.guardrails import (
11+
PerformanceRegistry,
12+
perf_guard,
13+
compare_overhead,
14+
)
15+
16+
17+
def _baseline_op() -> int:
18+
# Moderate workload to reduce relative overhead impact of instrumentation
19+
x = 0
20+
for _ in range(2000):
21+
x += 1
22+
return x
23+
24+
25+
def test_perf_guard_overhead_ratio():
26+
registry = PerformanceRegistry()
27+
28+
@perf_guard("test", registry)
29+
def _instrumented() -> int:
30+
return _baseline_op()
31+
32+
stats = compare_overhead(_baseline_op, _instrumented, runs=500)
33+
# Overhead ratio should remain below 8% for moderate workload
34+
assert stats["ratio"] < 0.08, stats
35+
# Registry should have at least one record (warmup + runs)
36+
assert registry.summary()["count"] >= 1
37+
38+
39+
def test_perf_registry_summary_fields():
40+
registry = PerformanceRegistry()
41+
# Manually record
42+
start = perf_counter()
43+
_baseline_op()
44+
registry.record(
45+
"manual",
46+
perf_counter() - start,
47+
meta={"kind": "baseline"},
48+
)
49+
summary = registry.summary()
50+
assert summary["count"] == 1
51+
assert "mean" in summary and summary["mean"] > 0
52+
assert "labels" in summary and summary["labels"] == ["manual"]

0 commit comments

Comments
 (0)