From a3d2755c4b88c4bdbf58925cc83524c8f9ab0c5b Mon Sep 17 00:00:00 2001 From: Guillaume Lagrange Date: Thu, 28 Aug 2025 14:24:13 +0200 Subject: [PATCH 01/22] chore: bump version to 5.0.0 --- lerna.json | 6 ++++-- packages/benchmark.js-plugin/package.json | 4 ++-- packages/core/package.json | 2 +- packages/tinybench-plugin/package.json | 4 ++-- packages/vitest-plugin/package.json | 4 ++-- pnpm-lock.yaml | 6 +++--- 6 files changed, 14 insertions(+), 12 deletions(-) diff --git a/lerna.json b/lerna.json index d9d77fcd..9819cb20 100644 --- a/lerna.json +++ b/lerna.json @@ -1,7 +1,9 @@ { "npmClient": "pnpm", "useWorkspaces": true, - "packages": ["packages/*"], + "packages": [ + "packages/*" + ], "$schema": "node_modules/lerna/schemas/lerna-schema.json", - "version": "4.0.1" + "version": "5.0.0" } diff --git a/packages/benchmark.js-plugin/package.json b/packages/benchmark.js-plugin/package.json index a99ed43c..9b7ca25b 100644 --- a/packages/benchmark.js-plugin/package.json +++ b/packages/benchmark.js-plugin/package.json @@ -1,6 +1,6 @@ { "name": "@codspeed/benchmark.js-plugin", - "version": "4.0.1", + "version": "5.0.0", "description": "Benchmark.js compatibility layer for CodSpeed", "keywords": [ "codspeed", @@ -27,7 +27,7 @@ "jest-mock-extended": "^3.0.4" }, "dependencies": { - "@codspeed/core": "workspace:^4.0.1", + "@codspeed/core": "workspace:^5.0.0", "lodash": "^4.17.10", "stack-trace": "1.0.0-pre2" }, diff --git a/packages/core/package.json b/packages/core/package.json index ad5dba6a..1fc2345f 100644 --- a/packages/core/package.json +++ b/packages/core/package.json @@ -1,6 +1,6 @@ { "name": "@codspeed/core", - "version": "4.0.1", + "version": "5.0.0", "description": "The core Node library used to integrate with Codspeed runners", "keywords": [ "codspeed", diff --git a/packages/tinybench-plugin/package.json b/packages/tinybench-plugin/package.json index db43c514..f5c41f57 100644 --- a/packages/tinybench-plugin/package.json +++ b/packages/tinybench-plugin/package.json @@ -1,6 +1,6 @@ { "name": "@codspeed/tinybench-plugin", - "version": "4.0.1", + "version": "5.0.0", "description": "tinybench compatibility layer for CodSpeed", "keywords": [ "codspeed", @@ -31,7 +31,7 @@ "vitest": "^3.2.4" }, "dependencies": { - "@codspeed/core": "workspace:^4.0.1", + "@codspeed/core": "workspace:^5.0.0", "stack-trace": "1.0.0-pre2" }, "peerDependencies": { diff --git a/packages/vitest-plugin/package.json b/packages/vitest-plugin/package.json index f26c53e7..3a4c336c 100644 --- a/packages/vitest-plugin/package.json +++ b/packages/vitest-plugin/package.json @@ -1,6 +1,6 @@ { "name": "@codspeed/vitest-plugin", - "version": "4.0.1", + "version": "5.0.0", "description": "vitest plugin for CodSpeed", "keywords": [ "codspeed", @@ -28,7 +28,7 @@ "bench": "vitest bench" }, "dependencies": { - "@codspeed/core": "workspace:^4.0.1" + "@codspeed/core": "workspace:^5.0.0" }, "peerDependencies": { "vite": "^4.2.0 || ^5.0.0 || ^6.0.0 || ^7.0.0", diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 5b8bb92e..98e6f532 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -213,7 +213,7 @@ importers: packages/benchmark.js-plugin: dependencies: '@codspeed/core': - specifier: workspace:^4.0.1 + specifier: workspace:^5.0.0 version: link:../core lodash: specifier: ^4.17.10 @@ -275,7 +275,7 @@ importers: packages/tinybench-plugin: dependencies: '@codspeed/core': - specifier: workspace:^4.0.1 + specifier: workspace:^5.0.0 version: link:../core stack-trace: specifier: 1.0.0-pre2 @@ -297,7 +297,7 @@ importers: packages/vitest-plugin: dependencies: '@codspeed/core': - specifier: workspace:^4.0.1 + specifier: workspace:^5.0.0 version: link:../core devDependencies: '@total-typescript/shoehorn': From 102e9c1df25edb1ea059e3ee4454b8ce50cc02a4 Mon Sep 17 00:00:00 2001 From: Guillaume Lagrange Date: Fri, 19 Sep 2025 10:26:32 +0200 Subject: [PATCH 02/22] ci: pin action to v3 and remove moon's --concurency usage v3 was the action version of the previous base job. We want to do changes one by one, and v4 (current main) does not work without the concurrency change. This just brings the concurrency change that we will need to set the action version to the latest (v4) again. --- .github/workflows/ci.yml | 4 ++-- .github/workflows/codspeed.yml | 13 ++++++++----- 2 files changed, 10 insertions(+), 7 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index b26f64db..699e530f 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -62,7 +62,7 @@ jobs: - name: Run benchmarks with tinybench-plugin # use version from `main` branch to always test the latest version, in real projects, use a tag, like `@v2` - uses: CodSpeedHQ/action@main + uses: CodSpeedHQ/action@v3 with: run: pnpm --filter ${{ matrix.example }} bench-tinybench env: @@ -70,7 +70,7 @@ jobs: CODSPEED_DEBUG: true - name: Run benchmarks with benchmark.js-plugin # use version from `main` branch to always test the latest version, in real projects, use a tag, like `@v2` - uses: CodSpeedHQ/action@main + uses: CodSpeedHQ/action@v3 with: run: pnpm --filter ${{ matrix.example }} bench-benchmark-js env: diff --git a/.github/workflows/codspeed.yml b/.github/workflows/codspeed.yml index 1695e38b..d0c77b2d 100644 --- a/.github/workflows/codspeed.yml +++ b/.github/workflows/codspeed.yml @@ -25,10 +25,12 @@ jobs: - name: Run benchmarks # use version from `main` branch to always test the latest version, in real projects, use a tag, like `@v2` - uses: CodSpeedHQ/action@main + uses: CodSpeedHQ/action@v3 with: run: | - pnpm moon run --concurrency 1 :bench + pnpm moon run tinybench-plugin:bench + pnpm moon run vitest-plugin:bench + pnpm moon run benchmark.js-plugin:bench pnpm --workspace-concurrency 1 -r bench-tinybench pnpm --workspace-concurrency 1 -r bench-benchmark-js pnpm --workspace-concurrency 1 -r bench-vitest @@ -51,10 +53,11 @@ jobs: - name: Run benchmarks # use version from `main` branch to always test the latest version, in real projects, use a tag, like `@v2` - uses: CodSpeedHQ/action@main + uses: CodSpeedHQ/action@v3 with: - # Only tinybench supports walltime for now run: | - pnpm moon run --concurrency 1 :bench + pnpm moon run tinybench-plugin:bench + pnpm moon run vitest-plugin:bench + pnpm moon run benchmark.js-plugin:bench pnpm --workspace-concurrency 1 -r bench-tinybench pnpm --workspace-concurrency 1 -r bench-vitest From c3ddfff2dc84d7eecd5d7a42c1a4b262f2f4e0f9 Mon Sep 17 00:00:00 2001 From: Guillaume Lagrange Date: Fri, 19 Sep 2025 11:07:38 +0200 Subject: [PATCH 03/22] ci: re-follow main version for action to use v4 Enables perf by default on walltime runs, and requires explicit mode. --- .github/workflows/ci.yml | 6 ++++-- .github/workflows/codspeed.yml | 6 ++++-- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 699e530f..720726b7 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -62,16 +62,18 @@ jobs: - name: Run benchmarks with tinybench-plugin # use version from `main` branch to always test the latest version, in real projects, use a tag, like `@v2` - uses: CodSpeedHQ/action@v3 + uses: CodSpeedHQ/action@main with: + mode: instrumentation run: pnpm --filter ${{ matrix.example }} bench-tinybench env: CODSPEED_SKIP_UPLOAD: true CODSPEED_DEBUG: true - name: Run benchmarks with benchmark.js-plugin # use version from `main` branch to always test the latest version, in real projects, use a tag, like `@v2` - uses: CodSpeedHQ/action@v3 + uses: CodSpeedHQ/action@main with: + mode: instrumentation run: pnpm --filter ${{ matrix.example }} bench-benchmark-js env: CODSPEED_SKIP_UPLOAD: true diff --git a/.github/workflows/codspeed.yml b/.github/workflows/codspeed.yml index d0c77b2d..9c002e5f 100644 --- a/.github/workflows/codspeed.yml +++ b/.github/workflows/codspeed.yml @@ -25,8 +25,9 @@ jobs: - name: Run benchmarks # use version from `main` branch to always test the latest version, in real projects, use a tag, like `@v2` - uses: CodSpeedHQ/action@v3 + uses: CodSpeedHQ/action@main with: + mode: instrumentation run: | pnpm moon run tinybench-plugin:bench pnpm moon run vitest-plugin:bench @@ -53,8 +54,9 @@ jobs: - name: Run benchmarks # use version from `main` branch to always test the latest version, in real projects, use a tag, like `@v2` - uses: CodSpeedHQ/action@v3 + uses: CodSpeedHQ/action@main with: + mode: walltime run: | pnpm moon run tinybench-plugin:bench pnpm moon run vitest-plugin:bench From 81f0dc9f93de801fe713d4b33c14091686b88eb6 Mon Sep 17 00:00:00 2001 From: Guillaume Lagrange Date: Fri, 19 Sep 2025 11:45:59 +0200 Subject: [PATCH 04/22] fix(core): split node flags to be more minimalistic in walltime These flags will be tweaked in the PR adding proper perf management. --- packages/core/src/introspection.ts | 41 +++++++++++-------- .../vitest-plugin/src/__tests__/index.test.ts | 4 +- 2 files changed, 26 insertions(+), 19 deletions(-) diff --git a/packages/core/src/introspection.ts b/packages/core/src/introspection.ts index 1be58ab1..ce965922 100644 --- a/packages/core/src/introspection.ts +++ b/packages/core/src/introspection.ts @@ -1,28 +1,35 @@ import { writeFileSync } from "fs"; +import { getCodspeedRunnerMode } from "."; const CUSTOM_INTROSPECTION_EXIT_CODE = 0; export const getV8Flags = () => { const nodeVersionMajor = parseInt(process.version.slice(1).split(".")[0]); + const codspeedRunnerMode = getCodspeedRunnerMode(); - const flags = [ - "--hash-seed=1", - "--random-seed=1", - "--no-opt", - "--predictable", - "--predictable-gc-schedule", - "--interpreted-frames-native-stack", - "--allow-natives-syntax", - "--expose-gc", - "--no-concurrent-sweeping", - "--max-old-space-size=4096", - ]; - if (nodeVersionMajor < 18) { - flags.push("--no-randomize-hashes"); - } - if (nodeVersionMajor < 20) { - flags.push("--no-scavenge-task"); + const flags = ["--interpreted-frames-native-stack", "--allow-natives-syntax"]; + + if (codspeedRunnerMode === "instrumented") { + flags.push( + ...[ + "--hash-seed=1", + "--random-seed=1", + "--no-opt", + "--predictable", + "--predictable-gc-schedule", + "--expose-gc", + "--no-concurrent-sweeping", + "--max-old-space-size=4096", + ] + ); + if (nodeVersionMajor < 18) { + flags.push("--no-randomize-hashes"); + } + if (nodeVersionMajor < 20) { + flags.push("--no-scavenge-task"); + } } + return flags; }; diff --git a/packages/vitest-plugin/src/__tests__/index.test.ts b/packages/vitest-plugin/src/__tests__/index.test.ts index 290b0dde..b0322819 100644 --- a/packages/vitest-plugin/src/__tests__/index.test.ts +++ b/packages/vitest-plugin/src/__tests__/index.test.ts @@ -93,13 +93,13 @@ describe("codSpeedPlugin", () => { poolOptions: { forks: { execArgv: [ + "--interpreted-frames-native-stack", + "--allow-natives-syntax", "--hash-seed=1", "--random-seed=1", "--no-opt", "--predictable", "--predictable-gc-schedule", - "--interpreted-frames-native-stack", - "--allow-natives-syntax", "--expose-gc", "--no-concurrent-sweeping", "--max-old-space-size=4096", From bbc9bffdada67ad86890408a2dd1f4d4fdc630e9 Mon Sep 17 00:00:00 2001 From: Guillaume Lagrange Date: Thu, 11 Sep 2025 17:19:33 +0200 Subject: [PATCH 05/22] feat(core): add native __codspeed_root_frame__ function This is not used yet, but keeping the implementation just in case. If untouched for a long time, do not hesitate to remove --- packages/core/binding.gyp | 1 + packages/core/src/native_core/index.ts | 3 +++ .../core/src/native_core/instruments/hooks.ts | 6 +++++ .../native_core/instruments/hooks_wrapper.cc | 23 +++++++++++++++++++ .../src/native_core/linux_perf/linux_perf.cc | 2 +- 5 files changed, 34 insertions(+), 1 deletion(-) diff --git a/packages/core/binding.gyp b/packages/core/binding.gyp index fb60fa44..b04df35c 100644 --- a/packages/core/binding.gyp +++ b/packages/core/binding.gyp @@ -9,6 +9,7 @@ "-fno-exceptions" ], "cflags": [ + "-g", "-Wno-maybe-uninitialized", "-Wno-unused-variable", "-Wno-unused-parameter", diff --git a/packages/core/src/native_core/index.ts b/packages/core/src/native_core/index.ts index f56417be..5b35f6a3 100644 --- a/packages/core/src/native_core/index.ts +++ b/packages/core/src/native_core/index.ts @@ -46,6 +46,9 @@ try { setIntegration: (_name: string, _version: string) => { return 0; }, + __codspeed_root_frame__: (callback: () => T): T => { + return callback(); + }, }, isBound: false, }; diff --git a/packages/core/src/native_core/instruments/hooks.ts b/packages/core/src/native_core/instruments/hooks.ts index e71457e1..3d321a5f 100644 --- a/packages/core/src/native_core/instruments/hooks.ts +++ b/packages/core/src/native_core/instruments/hooks.ts @@ -31,4 +31,10 @@ export interface InstrumentHooks { * @returns 0 on success, non-zero on error */ setIntegration(name: string, version: string): number; + + /** + * Execute a callback function with __codspeed_root_frame__ in its stack trace + * @param callback Function to execute + */ + __codspeed_root_frame__(callback: () => T): T; } diff --git a/packages/core/src/native_core/instruments/hooks_wrapper.cc b/packages/core/src/native_core/instruments/hooks_wrapper.cc index f760b370..3b6013bf 100644 --- a/packages/core/src/native_core/instruments/hooks_wrapper.cc +++ b/packages/core/src/native_core/instruments/hooks_wrapper.cc @@ -81,6 +81,27 @@ Napi::Number SetIntegration(const Napi::CallbackInfo &info) { return Napi::Number::New(env, result); } +Napi::Value __attribute__ ((noinline)) __codspeed_root_frame__(const Napi::CallbackInfo &info) { + Napi::Env env = info.Env(); + + if (info.Length() != 1) { + Napi::TypeError::New(env, "Expected 1 argument: callback function") + .ThrowAsJavaScriptException(); + return env.Undefined(); + } + + if (!info[0].IsFunction()) { + Napi::TypeError::New(env, "Expected function argument") + .ThrowAsJavaScriptException(); + return env.Undefined(); + } + + Napi::Function callback = info[0].As(); + Napi::Value result = callback.Call(env.Global(), {}); + + return result; +} + Napi::Object Initialize(Napi::Env env, Napi::Object exports) { Napi::Object instrumentHooksObj = Napi::Object::New(env); @@ -96,6 +117,8 @@ Napi::Object Initialize(Napi::Env env, Napi::Object exports) { Napi::Function::New(env, SetExecutedBenchmark)); instrumentHooksObj.Set(Napi::String::New(env, "setIntegration"), Napi::Function::New(env, SetIntegration)); + instrumentHooksObj.Set(Napi::String::New(env, "__codspeed_root_frame__"), + Napi::Function::New(env, __codspeed_root_frame__)); exports.Set(Napi::String::New(env, "InstrumentHooks"), instrumentHooksObj); diff --git a/packages/core/src/native_core/linux_perf/linux_perf.cc b/packages/core/src/native_core/linux_perf/linux_perf.cc index 31ace3f8..40e5cbec 100644 --- a/packages/core/src/native_core/linux_perf/linux_perf.cc +++ b/packages/core/src/native_core/linux_perf/linux_perf.cc @@ -39,4 +39,4 @@ Napi::Value LinuxPerf::Stop(const Napi::CallbackInfo &info) { return Napi::Boolean::New(info.Env(), false); } -} // namespace codspeed_native \ No newline at end of file +} // namespace codspeed_native From bf330c3123c234107ba402a3190c73bdab7b2891 Mon Sep 17 00:00:00 2001 From: Guillaume Lagrange Date: Thu, 11 Sep 2025 17:21:36 +0200 Subject: [PATCH 06/22] refactor(tinybench-plugin): call setupCore no matter the codspeed mode if not disabled --- packages/tinybench-plugin/src/index.ts | 2 ++ packages/tinybench-plugin/src/instrumented.ts | 2 -- packages/tinybench-plugin/tests/index.integ.test.ts | 4 ++-- 3 files changed, 4 insertions(+), 4 deletions(-) diff --git a/packages/tinybench-plugin/src/index.ts b/packages/tinybench-plugin/src/index.ts index 329575ef..e6bbf91a 100644 --- a/packages/tinybench-plugin/src/index.ts +++ b/packages/tinybench-plugin/src/index.ts @@ -3,6 +3,7 @@ import { getGitDir, InstrumentHooks, mongoMeasurement, + setupCore, SetupInstrumentsRequestBody, SetupInstrumentsResponse, tryIntrospect, @@ -22,6 +23,7 @@ export function withCodSpeed(bench: Bench): Bench { if (codspeedRunnerMode === "disabled") { return bench; } + setupCore(); const rootCallingFile = getCallingFile(); diff --git a/packages/tinybench-plugin/src/instrumented.ts b/packages/tinybench-plugin/src/instrumented.ts index 80955247..66633e60 100644 --- a/packages/tinybench-plugin/src/instrumented.ts +++ b/packages/tinybench-plugin/src/instrumented.ts @@ -2,7 +2,6 @@ import { InstrumentHooks, mongoMeasurement, optimizeFunction, - setupCore, teardownCore, } from "@codspeed/core"; import { Bench, Fn, FnOptions } from "tinybench"; @@ -18,7 +17,6 @@ export function runInstrumentedBench( console.log( `[CodSpeed] running with @codspeed/tinybench v${__VERSION__} (instrumented mode)` ); - setupCore(); for (const task of bench.tasks) { const uri = getTaskUri(bench, task.name, rootCallingFile); diff --git a/packages/tinybench-plugin/tests/index.integ.test.ts b/packages/tinybench-plugin/tests/index.integ.test.ts index fe467d7b..1b513703 100644 --- a/packages/tinybench-plugin/tests/index.integ.test.ts +++ b/packages/tinybench-plugin/tests/index.integ.test.ts @@ -205,7 +205,7 @@ describe("Benchmark.Suite", () => { expect(afterAll).toHaveBeenCalledTimes(2); }); - it("should call setupCore and teardownCore only once after run()", async () => { + it("should call setupCore and teardownCore only once", async () => { mockCore.InstrumentHooks.isInstrumented.mockReturnValue(true); const bench = withCodSpeed(new Bench()) .add("RegExp", function () { @@ -215,7 +215,7 @@ describe("Benchmark.Suite", () => { /o/.test("Hello World!"); }); - expect(mockCore.setupCore).not.toHaveBeenCalled(); + expect(mockCore.setupCore).toHaveBeenCalledTimes(1); expect(mockCore.teardownCore).not.toHaveBeenCalled(); await bench.run(); From 04d3abfb5b0ff5eca3b1121dfb275a2565a5b988 Mon Sep 17 00:00:00 2001 From: Guillaume Lagrange Date: Thu, 11 Sep 2025 17:29:26 +0200 Subject: [PATCH 07/22] feat(tinybench-plugin): add support for perf profiling Support is still far from perfect for async/heavy code. --- .gitignore | 3 + packages/tinybench-plugin/benches/sample.ts | 4 +- packages/tinybench-plugin/benches/timing.ts | 6 +- packages/tinybench-plugin/src/walltime.ts | 100 ++++++++++++++++++-- 4 files changed, 101 insertions(+), 12 deletions(-) diff --git a/.gitignore b/.gitignore index 4abc61c4..3dd48dfe 100644 --- a/.gitignore +++ b/.gitignore @@ -7,6 +7,9 @@ yarn-error.log* lerna-debug.log* .pnpm-debug.log* +# JIT dumps +jit-*.dump + # Diagnostic reports (https://nodejs.org/api/report.html) report.[0-9]*.[0-9]*.[0-9]*.[0-9]*.json diff --git a/packages/tinybench-plugin/benches/sample.ts b/packages/tinybench-plugin/benches/sample.ts index 19633d62..4cc40ecf 100644 --- a/packages/tinybench-plugin/benches/sample.ts +++ b/packages/tinybench-plugin/benches/sample.ts @@ -35,7 +35,7 @@ bench }); (async () => { - await bench.run(); + bench.runSync(); console.table(bench.table()); const timingBench = withCodSpeed( @@ -44,6 +44,6 @@ bench registerTimingBenchmarks(timingBench); - await timingBench.run(); + timingBench.runSync(); console.table(timingBench.table()); })(); diff --git a/packages/tinybench-plugin/benches/timing.ts b/packages/tinybench-plugin/benches/timing.ts index b9a139c6..79130428 100644 --- a/packages/tinybench-plugin/benches/timing.ts +++ b/packages/tinybench-plugin/benches/timing.ts @@ -8,15 +8,15 @@ const busySleep = (ms: number): void => { }; export function registerTimingBenchmarks(bench: Bench) { - bench.add("wait 1ms", async () => { + bench.add("wait 1ms", () => { busySleep(1); }); - bench.add("wait 500ms", async () => { + bench.add("wait 500ms", () => { busySleep(500); }); - bench.add("wait 1sec", async () => { + bench.add("wait 1sec", () => { busySleep(1000); }); } diff --git a/packages/tinybench-plugin/src/walltime.ts b/packages/tinybench-plugin/src/walltime.ts index 2c22c539..22aa5097 100644 --- a/packages/tinybench-plugin/src/walltime.ts +++ b/packages/tinybench-plugin/src/walltime.ts @@ -1,5 +1,6 @@ import { calculateQuantiles, + InstrumentHooks, mongoMeasurement, msToNs, msToS, @@ -7,7 +8,7 @@ import { type Benchmark, type BenchmarkStats, } from "@codspeed/core"; -import { Bench, TaskResult } from "tinybench"; +import { Bench, Fn, TaskResult } from "tinybench"; import { getTaskUri } from "./uri"; declare const __VERSION__: string; @@ -28,21 +29,27 @@ export function runWalltimeBench(bench: Bench, rootCallingFile: string): void { const benchmarks: Benchmark[] = []; - // Run the bench naturally to collect TaskResult data - const results = []; - // Collect and report walltime data for (const task of bench.tasks) { const uri = getTaskUri(bench, task.name, rootCallingFile); + // Override the function under test to add a static frame + const { fn } = task as unknown as { fn: Fn }; + async function __codspeed_root_frame__() { + await fn(); + } + (task as any).fn = __codspeed_root_frame__; + // run the warmup of the task right before its actual run if (bench.opts.warmup) { await task.warmup(); } + await mongoMeasurement.start(uri); - const taskResult = await task.run(); + InstrumentHooks.startBenchmark(); + await task.run(); + InstrumentHooks.stopBenchmark(); await mongoMeasurement.stop(uri); - results.push(taskResult); if (task.result) { // Convert tinybench result to BenchmarkStats format @@ -67,8 +74,87 @@ export function runWalltimeBench(bench: Bench, rootCallingFile: string): void { }; benchmarks.push(benchmark); + console.log(` ✔ Collected walltime data for ${uri}`); + InstrumentHooks.setExecutedBenchmark(process.pid, uri); + } else { + console.warn(` ⚠ No result data available for ${uri}`); + } + } + + // Write results to JSON file using core function + if (benchmarks.length > 0) { + writeWalltimeResults(benchmarks); + } + + console.log( + `[CodSpeed] Done collecting walltime data for ${bench.tasks.length} benches.` + ); + // Restore our custom run method + bench.run = originalRun; + + return bench.tasks; + }; + + bench.runSync = () => { + console.log( + `[CodSpeed] running with @codspeed/tinybench v${__VERSION__} (walltime mode)` + ); + + // Store the original run method before we override it + const originalRun = bench.run; + + // Temporarily restore the original run to get actual benchmark results + const benchProto = Object.getPrototypeOf(bench); + const prototypeRun = benchProto.run; + bench.run = prototypeRun; + + const benchmarks: Benchmark[] = []; + + // Collect and report walltime data + for (const task of bench.tasks) { + const uri = getTaskUri(bench, task.name, rootCallingFile); + + // run the warmup of the task right before its actual run + if (bench.opts.warmup) { + task.warmup(); + } + // Override the function under test to add a static frame + const { fn } = task as unknown as { fn: Fn }; + function __codspeed_root_frame__() { + fn(); + } + (task as any).fn = __codspeed_root_frame__; + + InstrumentHooks.startBenchmark(); + task.runSync(); + InstrumentHooks.stopBenchmark(); + + if (task.result) { + // Convert tinybench result to BenchmarkStats format + const stats = convertTinybenchResultToBenchmarkStats( + task.result, + bench.opts.warmup ? bench.opts.warmupIterations ?? 0 : 0 + ); + + const benchmark: Benchmark = { + name: task.name, + uri, + config: { + max_rounds: bench.opts.iterations ?? null, + max_time_ns: bench.opts.time ? msToNs(bench.opts.time) : null, + min_round_time_ns: null, // tinybench does not have an option for this + warmup_time_ns: + bench.opts.warmup && bench.opts.warmupTime + ? msToNs(bench.opts.warmupTime) + : null, + }, + stats, + }; + + benchmarks.push(benchmark); console.log(` ✔ Collected walltime data for ${uri}`); + InstrumentHooks.setExecutedBenchmark(process.pid, uri); } else { console.warn(` ⚠ No result data available for ${uri}`); } @@ -85,7 +171,7 @@ export function runWalltimeBench(bench: Bench, rootCallingFile: string): void { // Restore our custom run method bench.run = originalRun; - return results; + return bench.tasks; }; } From 7edb085cfabed3cdfeaaa392417e47fd969931f3 Mon Sep 17 00:00:00 2001 From: Guillaume Lagrange Date: Thu, 11 Sep 2025 17:30:53 +0200 Subject: [PATCH 08/22] feat(core): add a warning in results.json about walltime profiling code --- packages/core/src/walltime/index.ts | 50 ++++++++++++++++------- packages/core/src/walltime/interfaces.ts | 1 + packages/tinybench-plugin/src/walltime.ts | 2 +- 3 files changed, 38 insertions(+), 15 deletions(-) diff --git a/packages/core/src/walltime/index.ts b/packages/core/src/walltime/index.ts index 5f06d671..7632fa80 100644 --- a/packages/core/src/walltime/index.ts +++ b/packages/core/src/walltime/index.ts @@ -8,19 +8,34 @@ export function getProfileFolder(): string | null { return process.env.CODSPEED_PROFILE_FOLDER || null; } -export function writeWalltimeResults(benchmarks: Benchmark[]) { +export function writeWalltimeResults( + benchmarks: Benchmark[], + asyncWarning = false +): void { const profileFolder = getProfileFolder(); - let resultPath: string; - - if (profileFolder) { - const resultsDir = path.join(profileFolder, "results"); - fs.mkdirSync(resultsDir, { recursive: true }); - resultPath = path.join(resultsDir, `${process.pid}.json`); - } else { - // Fallback: write to .codspeed in current working directory - const codspeedDir = path.join(process.cwd(), ".codspeed"); - fs.mkdirSync(codspeedDir, { recursive: true }); - resultPath = path.join(codspeedDir, `results_${Date.now()}.json`); + + const resultDir = (() => { + if (profileFolder) { + return path.join(profileFolder, "results"); + } else { + // Fallback: write to .codspeed in current working directory + return path.join(process.cwd(), ".codspeed"); + } + })(); + fs.mkdirSync(resultDir, { recursive: true }); + const resultPath = path.join(resultDir, `${process.pid}.json`); + + // Check if file already exists and merge benchmarks + let existingBenchmarks: Benchmark[] = []; + if (fs.existsSync(resultPath)) { + try { + const existingData = JSON.parse( + fs.readFileSync(resultPath, "utf-8") + ) as ResultData; + existingBenchmarks = existingData.benchmarks || []; + } catch (error) { + console.warn(`[CodSpeed] Failed to read existing results file: ${error}`); + } } const data: ResultData = { @@ -30,11 +45,18 @@ export function writeWalltimeResults(benchmarks: Benchmark[]) { pid: process.pid, }, instrument: { type: "walltime" }, - benchmarks: benchmarks, + benchmarks: [...existingBenchmarks, ...benchmarks], + metadata: asyncWarning + ? { + async_warning: "Profiling is inaccurate due to async operations", + } + : undefined, }; fs.writeFileSync(resultPath, JSON.stringify(data, null, 2)); - console.log(`[CodSpeed] Results written to ${resultPath}`); + console.log( + `[CodSpeed] Results written to ${resultPath} (${data.benchmarks.length} total benchmarks)` + ); } export * from "./interfaces"; diff --git a/packages/core/src/walltime/interfaces.ts b/packages/core/src/walltime/interfaces.ts index 126a442a..a0518837 100644 --- a/packages/core/src/walltime/interfaces.ts +++ b/packages/core/src/walltime/interfaces.ts @@ -40,4 +40,5 @@ export interface ResultData { }; instrument: { type: "walltime" }; benchmarks: Benchmark[]; + metadata?: Record; } diff --git a/packages/tinybench-plugin/src/walltime.ts b/packages/tinybench-plugin/src/walltime.ts index 22aa5097..cdddce63 100644 --- a/packages/tinybench-plugin/src/walltime.ts +++ b/packages/tinybench-plugin/src/walltime.ts @@ -83,7 +83,7 @@ export function runWalltimeBench(bench: Bench, rootCallingFile: string): void { // Write results to JSON file using core function if (benchmarks.length > 0) { - writeWalltimeResults(benchmarks); + writeWalltimeResults(benchmarks, true); } console.log( From 725aee4c916d91c9474d3bb8f3d572fee0476490 Mon Sep 17 00:00:00 2001 From: Guillaume Lagrange Date: Fri, 12 Sep 2025 11:44:45 +0200 Subject: [PATCH 09/22] feat(tinybench-plugin): support bench runSync in instrumented mode --- packages/tinybench-plugin/src/instrumented.ts | 51 +++++++++++++++++++ 1 file changed, 51 insertions(+) diff --git a/packages/tinybench-plugin/src/instrumented.ts b/packages/tinybench-plugin/src/instrumented.ts index 66633e60..7d4a158f 100644 --- a/packages/tinybench-plugin/src/instrumented.ts +++ b/packages/tinybench-plugin/src/instrumented.ts @@ -63,4 +63,55 @@ export function runInstrumentedBench( console.log(`[CodSpeed] Done running ${bench.tasks.length} benches.`); return bench.tasks; }; + + bench.runSync = () => { + console.log( + `[CodSpeed] running with @codspeed/tinybench v${__VERSION__} (instrumented mode)` + ); + + for (const task of bench.tasks) { + const uri = getTaskUri(bench, task.name, rootCallingFile); + + // Access private fields + const { fnOpts, fn } = task as unknown as { fnOpts?: FnOptions; fn: Fn }; + + // Call beforeAll hook if it exists + fnOpts?.beforeAll?.call(task, "run"); + + // run optimizations + optimizeFunction(async () => { + fnOpts?.beforeEach?.call(task, "run"); + fn(); + fnOpts?.afterEach?.call(task, "run"); + }); + + // run instrumented benchmark + fnOpts?.beforeEach?.call(task, "run"); + + // await mongoMeasurement.start(uri); + global.gc?.(); + (function __codspeed_root_frame__() { + InstrumentHooks.startBenchmark(); + fn(); + InstrumentHooks.stopBenchmark(); + InstrumentHooks.setExecutedBenchmark(process.pid, uri); + })(); + mongoMeasurement.stop(uri); + + fnOpts?.afterEach?.call(task, "run"); + + fnOpts?.afterAll?.call(task, "run"); + + // print results + console.log( + ` ✔ ${ + InstrumentHooks.isInstrumented() ? "Measured" : "Checked" + } ${uri}` + ); + } + + teardownCore(); + console.log(`[CodSpeed] Done running ${bench.tasks.length} benches.`); + return bench.tasks; + }; } From ab65f70c6a119a9d429ddff74647afcc38553fac Mon Sep 17 00:00:00 2001 From: Guillaume Lagrange Date: Fri, 12 Sep 2025 12:05:14 +0200 Subject: [PATCH 10/22] chore: fix eslint warnings --- packages/core/src/native_core/index.ts | 2 ++ packages/tinybench-plugin/src/walltime.ts | 4 ++++ 2 files changed, 6 insertions(+) diff --git a/packages/core/src/native_core/index.ts b/packages/core/src/native_core/index.ts index 5b35f6a3..9cbb4a90 100644 --- a/packages/core/src/native_core/index.ts +++ b/packages/core/src/native_core/index.ts @@ -40,9 +40,11 @@ try { stopBenchmark: () => { return 0; }, + // eslint-disable-next-line @typescript-eslint/no-unused-vars setExecutedBenchmark: (_pid: number, _uri: string) => { return 0; }, + // eslint-disable-next-line @typescript-eslint/no-unused-vars setIntegration: (_name: string, _version: string) => { return 0; }, diff --git a/packages/tinybench-plugin/src/walltime.ts b/packages/tinybench-plugin/src/walltime.ts index cdddce63..6ff932db 100644 --- a/packages/tinybench-plugin/src/walltime.ts +++ b/packages/tinybench-plugin/src/walltime.ts @@ -35,9 +35,11 @@ export function runWalltimeBench(bench: Bench, rootCallingFile: string): void { // Override the function under test to add a static frame const { fn } = task as unknown as { fn: Fn }; + // eslint-disable-next-line no-inner-declarations async function __codspeed_root_frame__() { await fn(); } + // eslint-disable-next-line @typescript-eslint/no-explicit-any (task as any).fn = __codspeed_root_frame__; // run the warmup of the task right before its actual run @@ -121,9 +123,11 @@ export function runWalltimeBench(bench: Bench, rootCallingFile: string): void { // Override the function under test to add a static frame const { fn } = task as unknown as { fn: Fn }; + // eslint-disable-next-line no-inner-declarations function __codspeed_root_frame__() { fn(); } + // eslint-disable-next-line @typescript-eslint/no-explicit-any (task as any).fn = __codspeed_root_frame__; InstrumentHooks.startBenchmark(); From d3e40bbac60fd4669bea5294073fbb8022199116 Mon Sep 17 00:00:00 2001 From: Guillaume Lagrange Date: Mon, 15 Sep 2025 14:46:06 +0200 Subject: [PATCH 11/22] refactor(tinybench-plugin): commonize logic between `run` and `runSync` --- packages/tinybench-plugin/src/instrumented.ts | 142 +++++------ packages/tinybench-plugin/src/walltime.ts | 231 ++++++++---------- 2 files changed, 169 insertions(+), 204 deletions(-) diff --git a/packages/tinybench-plugin/src/instrumented.ts b/packages/tinybench-plugin/src/instrumented.ts index 7d4a158f..e4a33953 100644 --- a/packages/tinybench-plugin/src/instrumented.ts +++ b/packages/tinybench-plugin/src/instrumented.ts @@ -4,7 +4,7 @@ import { optimizeFunction, teardownCore, } from "@codspeed/core"; -import { Bench, Fn, FnOptions } from "tinybench"; +import { Bench, Fn, FnOptions, Task } from "tinybench"; import { getTaskUri } from "./uri"; declare const __VERSION__: string; @@ -13,103 +13,89 @@ export function runInstrumentedBench( bench: Bench, rootCallingFile: string ): void { - bench.run = async () => { - console.log( - `[CodSpeed] running with @codspeed/tinybench v${__VERSION__} (instrumented mode)` - ); - - for (const task of bench.tasks) { - const uri = getTaskUri(bench, task.name, rootCallingFile); + const runTaskAsync = async (task: Task, uri: string): Promise => { + const { fnOpts, fn } = task as unknown as { fnOpts?: FnOptions; fn: Fn }; - // Access private fields - const { fnOpts, fn } = task as unknown as { fnOpts?: FnOptions; fn: Fn }; + await fnOpts?.beforeAll?.call(task, "run"); + await optimizeFunction(async () => { + await fnOpts?.beforeEach?.call(task, "run"); + await fn(); + await fnOpts?.afterEach?.call(task, "run"); + }); + await fnOpts?.beforeEach?.call(task, "run"); + await mongoMeasurement.start(uri); - // Call beforeAll hook if it exists - await fnOpts?.beforeAll?.call(task, "run"); + await (async function __codspeed_root_frame__() { + global.gc?.(); + InstrumentHooks.startBenchmark(); + await fn(); + InstrumentHooks.stopBenchmark(); + InstrumentHooks.setExecutedBenchmark(process.pid, uri); + })(); + + await mongoMeasurement.stop(uri); + await fnOpts?.afterEach?.call(task, "run"); + await fnOpts?.afterAll?.call(task, "run"); + }; - // run optimizations - await optimizeFunction(async () => { - await fnOpts?.beforeEach?.call(task, "run"); - await fn(); - await fnOpts?.afterEach?.call(task, "run"); - }); + // Sync task runner + const runTaskSync = (task: Task, uri: string): void => { + const { fnOpts, fn } = task as unknown as { fnOpts?: FnOptions; fn: Fn }; - // run instrumented benchmark - await fnOpts?.beforeEach?.call(task, "run"); + fnOpts?.beforeAll?.call(task, "run"); + fnOpts?.beforeEach?.call(task, "run"); - await mongoMeasurement.start(uri); + (function __codspeed_root_frame__() { global.gc?.(); - await (async function __codspeed_root_frame__() { - InstrumentHooks.startBenchmark(); - await fn(); - InstrumentHooks.stopBenchmark(); - InstrumentHooks.setExecutedBenchmark(process.pid, uri); - })(); - await mongoMeasurement.stop(uri); - - await fnOpts?.afterEach?.call(task, "run"); + InstrumentHooks.startBenchmark(); + fn(); + InstrumentHooks.stopBenchmark(); + InstrumentHooks.setExecutedBenchmark(process.pid, uri); + })(); + + fnOpts?.afterEach?.call(task, "run"); + fnOpts?.afterAll?.call(task, "run"); + }; - await fnOpts?.afterAll?.call(task, "run"); + bench.run = async () => { + logStart(); - // print results - console.log( - ` ✔ ${ - InstrumentHooks.isInstrumented() ? "Measured" : "Checked" - } ${uri}` - ); + for (const task of bench.tasks) { + const uri = getTaskUri(bench, task.name, rootCallingFile); + await runTaskAsync(task, uri); + logTaskCompletion(uri); } - teardownCore(); - console.log(`[CodSpeed] Done running ${bench.tasks.length} benches.`); - return bench.tasks; + return logEnd(); }; bench.runSync = () => { - console.log( - `[CodSpeed] running with @codspeed/tinybench v${__VERSION__} (instrumented mode)` - ); + logStart(); for (const task of bench.tasks) { const uri = getTaskUri(bench, task.name, rootCallingFile); + runTaskSync(task, uri); + logTaskCompletion(uri); + } - // Access private fields - const { fnOpts, fn } = task as unknown as { fnOpts?: FnOptions; fn: Fn }; - - // Call beforeAll hook if it exists - fnOpts?.beforeAll?.call(task, "run"); - - // run optimizations - optimizeFunction(async () => { - fnOpts?.beforeEach?.call(task, "run"); - fn(); - fnOpts?.afterEach?.call(task, "run"); - }); + return logEnd(); + }; - // run instrumented benchmark - fnOpts?.beforeEach?.call(task, "run"); + const logStart = () => { + console.log( + `[CodSpeed] running with @codspeed/tinybench v${__VERSION__} (instrumented mode)` + ); + }; - // await mongoMeasurement.start(uri); - global.gc?.(); - (function __codspeed_root_frame__() { - InstrumentHooks.startBenchmark(); - fn(); - InstrumentHooks.stopBenchmark(); - InstrumentHooks.setExecutedBenchmark(process.pid, uri); - })(); - mongoMeasurement.stop(uri); - - fnOpts?.afterEach?.call(task, "run"); - - fnOpts?.afterAll?.call(task, "run"); - - // print results - console.log( - ` ✔ ${ - InstrumentHooks.isInstrumented() ? "Measured" : "Checked" - } ${uri}` - ); - } + const logTaskCompletion = (uri: string) => { + console.log( + ` ✔ ${ + InstrumentHooks.isInstrumented() ? "Measured" : "Checked" + } ${uri}` + ); + }; + const logEnd = () => { teardownCore(); console.log(`[CodSpeed] Done running ${bench.tasks.length} benches.`); return bench.tasks; diff --git a/packages/tinybench-plugin/src/walltime.ts b/packages/tinybench-plugin/src/walltime.ts index 6ff932db..7418eb29 100644 --- a/packages/tinybench-plugin/src/walltime.ts +++ b/packages/tinybench-plugin/src/walltime.ts @@ -5,42 +5,25 @@ import { msToNs, msToS, writeWalltimeResults, - type Benchmark, + type Benchmark as CodspeedBenchmark, type BenchmarkStats, } from "@codspeed/core"; -import { Bench, Fn, TaskResult } from "tinybench"; +import { Bench, Fn, Task, TaskResult } from "tinybench"; import { getTaskUri } from "./uri"; declare const __VERSION__: string; export function runWalltimeBench(bench: Bench, rootCallingFile: string): void { bench.run = async () => { - console.log( - `[CodSpeed] running with @codspeed/tinybench v${__VERSION__} (walltime mode)` - ); - - // Store the original run method before we override it - const originalRun = bench.run; - - // Temporarily restore the original run to get actual benchmark results - const benchProto = Object.getPrototypeOf(bench); - const prototypeRun = benchProto.run; - bench.run = prototypeRun; - - const benchmarks: Benchmark[] = []; + logStart(); + const codspeedBenchmarks: CodspeedBenchmark[] = []; // Collect and report walltime data for (const task of bench.tasks) { const uri = getTaskUri(bench, task.name, rootCallingFile); // Override the function under test to add a static frame - const { fn } = task as unknown as { fn: Fn }; - // eslint-disable-next-line no-inner-declarations - async function __codspeed_root_frame__() { - await fn(); - } - // eslint-disable-next-line @typescript-eslint/no-explicit-any - (task as any).fn = __codspeed_root_frame__; + wrapTaskFunction(task, true); // run the warmup of the task right before its actual run if (bench.opts.warmup) { @@ -53,130 +36,126 @@ export function runWalltimeBench(bench: Bench, rootCallingFile: string): void { InstrumentHooks.stopBenchmark(); await mongoMeasurement.stop(uri); - if (task.result) { - // Convert tinybench result to BenchmarkStats format - const stats = convertTinybenchResultToBenchmarkStats( - task.result, - bench.opts.warmup ? bench.opts.warmupIterations ?? 0 : 0 - ); - - const benchmark: Benchmark = { - name: task.name, - uri, - config: { - max_rounds: bench.opts.iterations ?? null, - max_time_ns: bench.opts.time ? msToNs(bench.opts.time) : null, - min_round_time_ns: null, // tinybench does not have an option for this - warmup_time_ns: - bench.opts.warmup && bench.opts.warmupTime - ? msToNs(bench.opts.warmupTime) - : null, - }, - stats, - }; - - benchmarks.push(benchmark); - console.log(` ✔ Collected walltime data for ${uri}`); - InstrumentHooks.setExecutedBenchmark(process.pid, uri); - } else { - console.warn(` ⚠ No result data available for ${uri}`); - } - } - - // Write results to JSON file using core function - if (benchmarks.length > 0) { - writeWalltimeResults(benchmarks, true); + registerCodspeedBenchmarkFromTask( + codspeedBenchmarks, + task, + bench, + rootCallingFile + ); } - console.log( - `[CodSpeed] Done collecting walltime data for ${bench.tasks.length} benches.` - ); - // Restore our custom run method - bench.run = originalRun; - - return bench.tasks; + return finalizeWalltimeRun(bench, codspeedBenchmarks, true); }; bench.runSync = () => { - console.log( - `[CodSpeed] running with @codspeed/tinybench v${__VERSION__} (walltime mode)` - ); + logStart(); + const codspeedBenchmarks: CodspeedBenchmark[] = []; - // Store the original run method before we override it - const originalRun = bench.run; - - // Temporarily restore the original run to get actual benchmark results - const benchProto = Object.getPrototypeOf(bench); - const prototypeRun = benchProto.run; - bench.run = prototypeRun; - - const benchmarks: Benchmark[] = []; - - // Collect and report walltime data for (const task of bench.tasks) { - const uri = getTaskUri(bench, task.name, rootCallingFile); + // Override the function under test to add a static frame + wrapTaskFunction(task, false); - // run the warmup of the task right before its actual run if (bench.opts.warmup) { task.warmup(); } - // Override the function under test to add a static frame - const { fn } = task as unknown as { fn: Fn }; - // eslint-disable-next-line no-inner-declarations - function __codspeed_root_frame__() { - fn(); - } - // eslint-disable-next-line @typescript-eslint/no-explicit-any - (task as any).fn = __codspeed_root_frame__; - InstrumentHooks.startBenchmark(); task.runSync(); InstrumentHooks.stopBenchmark(); - if (task.result) { - // Convert tinybench result to BenchmarkStats format - const stats = convertTinybenchResultToBenchmarkStats( - task.result, - bench.opts.warmup ? bench.opts.warmupIterations ?? 0 : 0 - ); - - const benchmark: Benchmark = { - name: task.name, - uri, - config: { - max_rounds: bench.opts.iterations ?? null, - max_time_ns: bench.opts.time ? msToNs(bench.opts.time) : null, - min_round_time_ns: null, // tinybench does not have an option for this - warmup_time_ns: - bench.opts.warmup && bench.opts.warmupTime - ? msToNs(bench.opts.warmupTime) - : null, - }, - stats, - }; - - benchmarks.push(benchmark); - console.log(` ✔ Collected walltime data for ${uri}`); - InstrumentHooks.setExecutedBenchmark(process.pid, uri); - } else { - console.warn(` ⚠ No result data available for ${uri}`); - } + registerCodspeedBenchmarkFromTask( + codspeedBenchmarks, + task, + bench, + rootCallingFile + ); } - // Write results to JSON file using core function - if (benchmarks.length > 0) { - writeWalltimeResults(benchmarks); - } + return finalizeWalltimeRun(bench, codspeedBenchmarks, false); + }; +} - console.log( - `[CodSpeed] Done collecting walltime data for ${bench.tasks.length} benches.` - ); - // Restore our custom run method - bench.run = originalRun; +function logStart() { + console.log( + `[CodSpeed] running with @codspeed/tinybench v${__VERSION__} (walltime mode)` + ); +} - return bench.tasks; - }; +const TINYBENCH_WARMUP_DEFAULT = 16; + +function registerCodspeedBenchmarkFromTask( + codspeedBenchmarks: CodspeedBenchmark[], + task: Task, + bench: Bench, + rootCallingFile: string +): void { + const uri = getTaskUri(bench, task.name, rootCallingFile); + + if (!task.result) { + console.warn(` ⚠ No result data available for ${uri}`); + return; + } + + const warmupIterations = bench.opts.warmup + ? bench.opts.warmupIterations ?? TINYBENCH_WARMUP_DEFAULT + : 0; + const stats = convertTinybenchResultToBenchmarkStats( + task.result, + warmupIterations + ); + + codspeedBenchmarks.push({ + name: task.name, + uri, + config: { + max_rounds: bench.opts.iterations ?? null, + max_time_ns: bench.opts.time ? msToNs(bench.opts.time) : null, + min_round_time_ns: null, // tinybench does not have an option for this + warmup_time_ns: + bench.opts.warmup && bench.opts.warmupTime + ? msToNs(bench.opts.warmupTime) + : null, + }, + stats, + }); + + console.log(` ✔ Collected walltime data for ${uri}`); + InstrumentHooks.setExecutedBenchmark(process.pid, uri); +} + +function wrapTaskFunction(task: Task, isAsync: boolean): void { + const { fn } = task as unknown as { fn: Fn }; + if (isAsync) { + // eslint-disable-next-line no-inner-declarations + async function __codspeed_root_frame__() { + await fn(); + } + // eslint-disable-next-line @typescript-eslint/no-explicit-any + (task as any).fn = __codspeed_root_frame__; + } else { + // eslint-disable-next-line no-inner-declarations + function __codspeed_root_frame__() { + fn(); + } + // eslint-disable-next-line @typescript-eslint/no-explicit-any + (task as any).fn = __codspeed_root_frame__; + } +} + +function finalizeWalltimeRun( + bench: Bench, + benchmarks: CodspeedBenchmark[], + isAsync: boolean +) { + // Write results to JSON file using core function + if (benchmarks.length > 0) { + writeWalltimeResults(benchmarks, isAsync); + } + + console.log( + `[CodSpeed] Done collecting walltime data for ${bench.tasks.length} benches.` + ); + return bench.tasks; } function convertTinybenchResultToBenchmarkStats( From e31ee8b5d9c6d9a309eb11c407198e580f4825b7 Mon Sep 17 00:00:00 2001 From: Guillaume Lagrange Date: Wed, 17 Sep 2025 11:09:05 +0200 Subject: [PATCH 12/22] refactor(tinybench): share structure across walltime and instrumented --- packages/tinybench-plugin/src/index.ts | 10 +- .../tinybench-plugin/src/index.unit.test.ts | 14 +- packages/tinybench-plugin/src/instrumented.ts | 111 ++++----- packages/tinybench-plugin/src/shared.ts | 89 +++++++ packages/tinybench-plugin/src/walltime.ts | 222 ++++++++---------- .../__snapshots__/index.integ.test.ts.snap | 8 +- .../tests/index.integ.test.ts | 2 +- 7 files changed, 249 insertions(+), 207 deletions(-) create mode 100644 packages/tinybench-plugin/src/shared.ts diff --git a/packages/tinybench-plugin/src/index.ts b/packages/tinybench-plugin/src/index.ts index e6bbf91a..06241a91 100644 --- a/packages/tinybench-plugin/src/index.ts +++ b/packages/tinybench-plugin/src/index.ts @@ -3,7 +3,6 @@ import { getGitDir, InstrumentHooks, mongoMeasurement, - setupCore, SetupInstrumentsRequestBody, SetupInstrumentsResponse, tryIntrospect, @@ -12,9 +11,9 @@ import path from "path"; import { get as getStackTrace } from "stack-trace"; import { Bench } from "tinybench"; import { fileURLToPath } from "url"; -import { runInstrumentedBench } from "./instrumented"; +import { setupCodspeedInstrumentedBench } from "./instrumented"; import { getOrCreateUriMap } from "./uri"; -import { runWalltimeBench } from "./walltime"; +import { setupCodspeedWalltimeBench } from "./walltime"; tryIntrospect(); @@ -23,7 +22,6 @@ export function withCodSpeed(bench: Bench): Bench { if (codspeedRunnerMode === "disabled") { return bench; } - setupCore(); const rootCallingFile = getCallingFile(); @@ -42,9 +40,9 @@ export function withCodSpeed(bench: Bench): Bench { }; if (codspeedRunnerMode === "instrumented") { - runInstrumentedBench(bench, rootCallingFile); + setupCodspeedInstrumentedBench(bench, rootCallingFile); } else if (codspeedRunnerMode === "walltime") { - runWalltimeBench(bench, rootCallingFile); + setupCodspeedWalltimeBench(bench, rootCallingFile); } return bench; diff --git a/packages/tinybench-plugin/src/index.unit.test.ts b/packages/tinybench-plugin/src/index.unit.test.ts index 7bb6954b..0cf303d8 100644 --- a/packages/tinybench-plugin/src/index.unit.test.ts +++ b/packages/tinybench-plugin/src/index.unit.test.ts @@ -3,7 +3,7 @@ import { beforeEach, describe, expect, it, vi } from "vitest"; import { withCodSpeed } from "."; const mockInstrumented = vi.hoisted(() => ({ - runInstrumentedBench: vi.fn(), + setupCodspeedInstrumentedBench: vi.fn(), })); vi.mock("./instrumented", () => ({ @@ -11,7 +11,7 @@ vi.mock("./instrumented", () => ({ })); const mockWalltime = vi.hoisted(() => ({ - runWalltimeBench: vi.fn(), + setupCodspeedWalltimeBench: vi.fn(), })); vi.mock("./walltime", () => ({ @@ -44,8 +44,8 @@ describe("withCodSpeed behavior without different codspeed modes", () => { withCodSpeed(new Bench()); - expect(mockInstrumented.runInstrumentedBench).toHaveBeenCalled(); - expect(mockWalltime.runWalltimeBench).not.toHaveBeenCalled(); + expect(mockInstrumented.setupCodspeedInstrumentedBench).toHaveBeenCalled(); + expect(mockWalltime.setupCodspeedWalltimeBench).not.toHaveBeenCalled(); }); it("should run in walltime mode when CODSPEED_RUNNER_MODE=walltime", async () => { @@ -54,7 +54,9 @@ describe("withCodSpeed behavior without different codspeed modes", () => { withCodSpeed(new Bench()); - expect(mockInstrumented.runInstrumentedBench).not.toHaveBeenCalled(); - expect(mockWalltime.runWalltimeBench).toHaveBeenCalled(); + expect( + mockInstrumented.setupCodspeedInstrumentedBench + ).not.toHaveBeenCalled(); + expect(mockWalltime.setupCodspeedWalltimeBench).toHaveBeenCalled(); }); }); diff --git a/packages/tinybench-plugin/src/instrumented.ts b/packages/tinybench-plugin/src/instrumented.ts index e4a33953..7462aa87 100644 --- a/packages/tinybench-plugin/src/instrumented.ts +++ b/packages/tinybench-plugin/src/instrumented.ts @@ -2,18 +2,40 @@ import { InstrumentHooks, mongoMeasurement, optimizeFunction, - teardownCore, } from "@codspeed/core"; import { Bench, Fn, FnOptions, Task } from "tinybench"; -import { getTaskUri } from "./uri"; +import { BaseBenchRunner } from "./shared"; -declare const __VERSION__: string; - -export function runInstrumentedBench( +export function setupCodspeedInstrumentedBench( bench: Bench, rootCallingFile: string ): void { - const runTaskAsync = async (task: Task, uri: string): Promise => { + const runner = new InstrumentedBenchRunner(bench, rootCallingFile); + runner.setupBenchMethods(); +} + +class InstrumentedBenchRunner extends BaseBenchRunner { + protected getModeName(): string { + return "instrumented mode"; + } + + private taskCompletionMessage() { + return InstrumentHooks.isInstrumented() ? "Measured" : "Checked"; + } + + private wrapFunctionWithFrame(fn: Fn, isAsync: boolean): Fn { + if (isAsync) { + return async function __codspeed_root_frame__() { + await fn(); + }; + } else { + return function __codspeed_root_frame__() { + fn(); + }; + } + } + + protected async runTaskAsync(task: Task, uri: string): Promise { const { fnOpts, fn } = task as unknown as { fnOpts?: FnOptions; fn: Fn }; await fnOpts?.beforeAll?.call(task, "run"); @@ -25,79 +47,38 @@ export function runInstrumentedBench( await fnOpts?.beforeEach?.call(task, "run"); await mongoMeasurement.start(uri); - await (async function __codspeed_root_frame__() { - global.gc?.(); - InstrumentHooks.startBenchmark(); - await fn(); - InstrumentHooks.stopBenchmark(); - InstrumentHooks.setExecutedBenchmark(process.pid, uri); - })(); + global.gc?.(); + await this.wrapWithInstrumentHooksAsync( + this.wrapFunctionWithFrame(fn, true), + uri + ); await mongoMeasurement.stop(uri); await fnOpts?.afterEach?.call(task, "run"); await fnOpts?.afterAll?.call(task, "run"); - }; - // Sync task runner - const runTaskSync = (task: Task, uri: string): void => { + this.logTaskCompletion(uri, this.taskCompletionMessage()); + } + + protected runTaskSync(task: Task, uri: string): void { const { fnOpts, fn } = task as unknown as { fnOpts?: FnOptions; fn: Fn }; fnOpts?.beforeAll?.call(task, "run"); fnOpts?.beforeEach?.call(task, "run"); - (function __codspeed_root_frame__() { - global.gc?.(); - InstrumentHooks.startBenchmark(); - fn(); - InstrumentHooks.stopBenchmark(); - InstrumentHooks.setExecutedBenchmark(process.pid, uri); - })(); + this.wrapWithInstrumentHooks(this.wrapFunctionWithFrame(fn, false), uri); fnOpts?.afterEach?.call(task, "run"); fnOpts?.afterAll?.call(task, "run"); - }; - - bench.run = async () => { - logStart(); - for (const task of bench.tasks) { - const uri = getTaskUri(bench, task.name, rootCallingFile); - await runTaskAsync(task, uri); - logTaskCompletion(uri); - } - - return logEnd(); - }; - - bench.runSync = () => { - logStart(); - - for (const task of bench.tasks) { - const uri = getTaskUri(bench, task.name, rootCallingFile); - runTaskSync(task, uri); - logTaskCompletion(uri); - } + this.logTaskCompletion(uri, this.taskCompletionMessage()); + } - return logEnd(); - }; - - const logStart = () => { - console.log( - `[CodSpeed] running with @codspeed/tinybench v${__VERSION__} (instrumented mode)` - ); - }; - - const logTaskCompletion = (uri: string) => { - console.log( - ` ✔ ${ - InstrumentHooks.isInstrumented() ? "Measured" : "Checked" - } ${uri}` - ); - }; + protected finalizeAsyncRun(): Task[] { + return this.finalizeBenchRun(); + } - const logEnd = () => { - teardownCore(); - console.log(`[CodSpeed] Done running ${bench.tasks.length} benches.`); - return bench.tasks; - }; + protected finalizeSyncRun(): Task[] { + return this.finalizeBenchRun(); + } } diff --git a/packages/tinybench-plugin/src/shared.ts b/packages/tinybench-plugin/src/shared.ts new file mode 100644 index 00000000..bc38e3b6 --- /dev/null +++ b/packages/tinybench-plugin/src/shared.ts @@ -0,0 +1,89 @@ +import { InstrumentHooks, setupCore, teardownCore } from "@codspeed/core"; +import { Bench, Fn, Task } from "tinybench"; +import { getTaskUri } from "./uri"; + +declare const __VERSION__: string; + +export abstract class BaseBenchRunner { + protected bench: Bench; + protected rootCallingFile: string; + + constructor(bench: Bench, rootCallingFile: string) { + this.bench = bench; + this.rootCallingFile = rootCallingFile; + } + + private setupBenchRun(): void { + setupCore(); + this.logStart(); + } + + private logStart(): void { + console.log( + `[CodSpeed] running with @codspeed/tinybench v${__VERSION__} (${this.getModeName()})` + ); + } + + protected getTaskUri(task: Task): string { + return getTaskUri(this.bench, task.name, this.rootCallingFile); + } + + protected logTaskCompletion(uri: string, status: string): void { + console.log(`[CodSpeed] ${status} ${uri}`); + } + + protected finalizeBenchRun(): Task[] { + teardownCore(); + console.log(`[CodSpeed] Done running ${this.bench.tasks.length} benches.`); + return this.bench.tasks; + } + + protected wrapWithInstrumentHooks(fn: () => T, uri: string): T { + InstrumentHooks.startBenchmark(); + const result = fn(); + InstrumentHooks.stopBenchmark(); + InstrumentHooks.setExecutedBenchmark(process.pid, uri); + return result; + } + + protected async wrapWithInstrumentHooksAsync( + fn: Fn, + uri: string + ): Promise { + InstrumentHooks.startBenchmark(); + const result = await fn(); + InstrumentHooks.stopBenchmark(); + InstrumentHooks.setExecutedBenchmark(process.pid, uri); + return result; + } + + protected abstract getModeName(): string; + protected abstract runTaskAsync(task: Task, uri: string): Promise; + protected abstract runTaskSync(task: Task, uri: string): void; + protected abstract finalizeAsyncRun(): Task[]; + protected abstract finalizeSyncRun(): Task[]; + + public setupBenchMethods(): void { + this.bench.run = async () => { + this.setupBenchRun(); + + for (const task of this.bench.tasks) { + const uri = this.getTaskUri(task); + await this.runTaskAsync(task, uri); + } + + return this.finalizeAsyncRun(); + }; + + this.bench.runSync = () => { + this.setupBenchRun(); + + for (const task of this.bench.tasks) { + const uri = this.getTaskUri(task); + this.runTaskSync(task, uri); + } + + return this.finalizeSyncRun(); + }; + } +} diff --git a/packages/tinybench-plugin/src/walltime.ts b/packages/tinybench-plugin/src/walltime.ts index 7418eb29..0c86e1b3 100644 --- a/packages/tinybench-plugin/src/walltime.ts +++ b/packages/tinybench-plugin/src/walltime.ts @@ -1,6 +1,5 @@ import { calculateQuantiles, - InstrumentHooks, mongoMeasurement, msToNs, msToS, @@ -9,155 +8,128 @@ import { type BenchmarkStats, } from "@codspeed/core"; import { Bench, Fn, Task, TaskResult } from "tinybench"; -import { getTaskUri } from "./uri"; +import { BaseBenchRunner } from "./shared"; -declare const __VERSION__: string; - -export function runWalltimeBench(bench: Bench, rootCallingFile: string): void { - bench.run = async () => { - logStart(); - const codspeedBenchmarks: CodspeedBenchmark[] = []; +export function setupCodspeedWalltimeBench( + bench: Bench, + rootCallingFile: string +): void { + const runner = new WalltimeBenchRunner(bench, rootCallingFile); + runner.setupBenchMethods(); +} - // Collect and report walltime data - for (const task of bench.tasks) { - const uri = getTaskUri(bench, task.name, rootCallingFile); +class WalltimeBenchRunner extends BaseBenchRunner { + private codspeedBenchmarks: CodspeedBenchmark[] = []; - // Override the function under test to add a static frame - wrapTaskFunction(task, true); + protected getModeName(): string { + return "walltime mode"; + } - // run the warmup of the task right before its actual run - if (bench.opts.warmup) { - await task.warmup(); - } + protected async runTaskAsync(task: Task, uri: string): Promise { + // Override the function under test to add a static frame + this.wrapTaskFunction(task, true); - await mongoMeasurement.start(uri); - InstrumentHooks.startBenchmark(); - await task.run(); - InstrumentHooks.stopBenchmark(); - await mongoMeasurement.stop(uri); - - registerCodspeedBenchmarkFromTask( - codspeedBenchmarks, - task, - bench, - rootCallingFile - ); + // run the warmup of the task right before its actual run + if (this.bench.opts.warmup) { + await task.warmup(); } - return finalizeWalltimeRun(bench, codspeedBenchmarks, true); - }; - - bench.runSync = () => { - logStart(); - const codspeedBenchmarks: CodspeedBenchmark[] = []; + await mongoMeasurement.start(uri); + await this.wrapWithInstrumentHooksAsync(() => task.run(), uri); + await mongoMeasurement.stop(uri); - for (const task of bench.tasks) { - // Override the function under test to add a static frame - wrapTaskFunction(task, false); - - if (bench.opts.warmup) { - task.warmup(); - } + this.registerCodspeedBenchmarkFromTask(task); + } - InstrumentHooks.startBenchmark(); - task.runSync(); - InstrumentHooks.stopBenchmark(); + protected runTaskSync(task: Task, uri: string): void { + // Override the function under test to add a static frame + this.wrapTaskFunction(task, false); - registerCodspeedBenchmarkFromTask( - codspeedBenchmarks, - task, - bench, - rootCallingFile - ); + if (this.bench.opts.warmup) { + task.warmup(); } - return finalizeWalltimeRun(bench, codspeedBenchmarks, false); - }; -} + this.wrapWithInstrumentHooks(() => task.runSync(), uri); -function logStart() { - console.log( - `[CodSpeed] running with @codspeed/tinybench v${__VERSION__} (walltime mode)` - ); -} + this.registerCodspeedBenchmarkFromTask(task); + } -const TINYBENCH_WARMUP_DEFAULT = 16; + protected finalizeAsyncRun(): Task[] { + return this.finalizeWalltimeRun(true); + } -function registerCodspeedBenchmarkFromTask( - codspeedBenchmarks: CodspeedBenchmark[], - task: Task, - bench: Bench, - rootCallingFile: string -): void { - const uri = getTaskUri(bench, task.name, rootCallingFile); + protected finalizeSyncRun(): Task[] { + return this.finalizeWalltimeRun(false); + } - if (!task.result) { - console.warn(` ⚠ No result data available for ${uri}`); - return; + private wrapTaskFunction(task: Task, isAsync: boolean): void { + const { fn } = task as unknown as { fn: Fn }; + if (isAsync) { + // eslint-disable-next-line no-inner-declarations + async function __codspeed_root_frame__() { + await fn(); + } + // eslint-disable-next-line @typescript-eslint/no-explicit-any + (task as any).fn = __codspeed_root_frame__; + } else { + // eslint-disable-next-line no-inner-declarations + function __codspeed_root_frame__() { + fn(); + } + // eslint-disable-next-line @typescript-eslint/no-explicit-any + (task as any).fn = __codspeed_root_frame__; + } } - const warmupIterations = bench.opts.warmup - ? bench.opts.warmupIterations ?? TINYBENCH_WARMUP_DEFAULT - : 0; - const stats = convertTinybenchResultToBenchmarkStats( - task.result, - warmupIterations - ); - - codspeedBenchmarks.push({ - name: task.name, - uri, - config: { - max_rounds: bench.opts.iterations ?? null, - max_time_ns: bench.opts.time ? msToNs(bench.opts.time) : null, - min_round_time_ns: null, // tinybench does not have an option for this - warmup_time_ns: - bench.opts.warmup && bench.opts.warmupTime - ? msToNs(bench.opts.warmupTime) - : null, - }, - stats, - }); - - console.log(` ✔ Collected walltime data for ${uri}`); - InstrumentHooks.setExecutedBenchmark(process.pid, uri); -} + private registerCodspeedBenchmarkFromTask(task: Task): void { + const uri = this.getTaskUri(task); -function wrapTaskFunction(task: Task, isAsync: boolean): void { - const { fn } = task as unknown as { fn: Fn }; - if (isAsync) { - // eslint-disable-next-line no-inner-declarations - async function __codspeed_root_frame__() { - await fn(); - } - // eslint-disable-next-line @typescript-eslint/no-explicit-any - (task as any).fn = __codspeed_root_frame__; - } else { - // eslint-disable-next-line no-inner-declarations - function __codspeed_root_frame__() { - fn(); + if (!task.result) { + console.warn(` ⚠ No result data available for ${uri}`); + return; } - // eslint-disable-next-line @typescript-eslint/no-explicit-any - (task as any).fn = __codspeed_root_frame__; - } -} -function finalizeWalltimeRun( - bench: Bench, - benchmarks: CodspeedBenchmark[], - isAsync: boolean -) { - // Write results to JSON file using core function - if (benchmarks.length > 0) { - writeWalltimeResults(benchmarks, isAsync); + const warmupIterations = this.bench.opts.warmup + ? this.bench.opts.warmupIterations ?? TINYBENCH_WARMUP_DEFAULT + : 0; + const stats = convertTinybenchResultToBenchmarkStats( + task.result, + warmupIterations + ); + + this.codspeedBenchmarks.push({ + name: task.name, + uri, + config: { + max_rounds: this.bench.opts.iterations ?? null, + max_time_ns: this.bench.opts.time ? msToNs(this.bench.opts.time) : null, + min_round_time_ns: null, // tinybench does not have an option for this + warmup_time_ns: + this.bench.opts.warmup && this.bench.opts.warmupTime + ? msToNs(this.bench.opts.warmupTime) + : null, + }, + stats, + }); + + this.logTaskCompletion(uri, "Collected walltime data for"); } - console.log( - `[CodSpeed] Done collecting walltime data for ${bench.tasks.length} benches.` - ); - return bench.tasks; + private finalizeWalltimeRun(isAsync: boolean): Task[] { + // Write results to JSON file using core function + if (this.codspeedBenchmarks.length > 0) { + writeWalltimeResults(this.codspeedBenchmarks, isAsync); + } + + console.log( + `[CodSpeed] Done collecting walltime data for ${this.bench.tasks.length} benches.` + ); + return this.bench.tasks; + } } +const TINYBENCH_WARMUP_DEFAULT = 16; + function convertTinybenchResultToBenchmarkStats( result: TaskResult, warmupIterations: number diff --git a/packages/tinybench-plugin/tests/__snapshots__/index.integ.test.ts.snap b/packages/tinybench-plugin/tests/__snapshots__/index.integ.test.ts.snap index 5a41b12c..bed46639 100644 --- a/packages/tinybench-plugin/tests/__snapshots__/index.integ.test.ts.snap +++ b/packages/tinybench-plugin/tests/__snapshots__/index.integ.test.ts.snap @@ -7,10 +7,10 @@ exports[`Benchmark.Suite > check console output(instrumented=%p) false 1`] = ` "[CodSpeed] running with @codspeed/tinybench v1.0.0 (instrumented mode)", ], [ - " ✔ Checked packages/tinybench-plugin/tests/index.integ.test.ts::RegExp", + "[CodSpeed] Checked packages/tinybench-plugin/tests/index.integ.test.ts::RegExp", ], [ - " ✔ Checked packages/tinybench-plugin/tests/index.integ.test.ts::RegExp2", + "[CodSpeed] Checked packages/tinybench-plugin/tests/index.integ.test.ts::RegExp2", ], [ "[CodSpeed] Done running 2 benches.", @@ -24,10 +24,10 @@ exports[`Benchmark.Suite > check console output(instrumented=%p) true 1`] = ` { "log": [ [ - " ✔ Measured packages/tinybench-plugin/tests/index.integ.test.ts::RegExp", + "[CodSpeed] Measured packages/tinybench-plugin/tests/index.integ.test.ts::RegExp", ], [ - " ✔ Measured packages/tinybench-plugin/tests/index.integ.test.ts::RegExp2", + "[CodSpeed] Measured packages/tinybench-plugin/tests/index.integ.test.ts::RegExp2", ], [ "[CodSpeed] Done running 2 benches.", diff --git a/packages/tinybench-plugin/tests/index.integ.test.ts b/packages/tinybench-plugin/tests/index.integ.test.ts index 1b513703..b0d44751 100644 --- a/packages/tinybench-plugin/tests/index.integ.test.ts +++ b/packages/tinybench-plugin/tests/index.integ.test.ts @@ -215,7 +215,7 @@ describe("Benchmark.Suite", () => { /o/.test("Hello World!"); }); - expect(mockCore.setupCore).toHaveBeenCalledTimes(1); + expect(mockCore.setupCore).not.toHaveBeenCalled(); expect(mockCore.teardownCore).not.toHaveBeenCalled(); await bench.run(); From f00a51213e14a2c45838969bfcdc156e51461714 Mon Sep 17 00:00:00 2001 From: Guillaume Lagrange Date: Sun, 14 Sep 2025 18:09:22 +0200 Subject: [PATCH 13/22] refactor(vitest-plugin): make the walltimerunner class less cluttered --- packages/vitest-plugin/rollup.config.ts | 2 +- packages/vitest-plugin/src/walltime.ts | 186 ------------------- packages/vitest-plugin/src/walltime/index.ts | 37 ++++ packages/vitest-plugin/src/walltime/utils.ts | 130 +++++++++++++ 4 files changed, 168 insertions(+), 187 deletions(-) delete mode 100644 packages/vitest-plugin/src/walltime.ts create mode 100644 packages/vitest-plugin/src/walltime/index.ts create mode 100644 packages/vitest-plugin/src/walltime/utils.ts diff --git a/packages/vitest-plugin/rollup.config.ts b/packages/vitest-plugin/rollup.config.ts index 03e611ec..5fbbc655 100644 --- a/packages/vitest-plugin/rollup.config.ts +++ b/packages/vitest-plugin/rollup.config.ts @@ -27,7 +27,7 @@ export default defineConfig([ external: ["@codspeed/core", /^vitest/], }, { - input: "src/walltime.ts", + input: "src/walltime/index.ts", output: { file: "dist/walltime.mjs", format: "es" }, plugins: jsPlugins(pkg.version), external: ["@codspeed/core", /^vitest/], diff --git a/packages/vitest-plugin/src/walltime.ts b/packages/vitest-plugin/src/walltime.ts deleted file mode 100644 index 9eda348b..00000000 --- a/packages/vitest-plugin/src/walltime.ts +++ /dev/null @@ -1,186 +0,0 @@ -import { - calculateQuantiles, - msToNs, - msToS, - writeWalltimeResults, - type Benchmark, - type BenchmarkStats, -} from "@codspeed/core"; -import { - type Benchmark as VitestBenchmark, - type RunnerTaskResult, - type RunnerTestSuite, -} from "vitest"; -import { NodeBenchmarkRunner } from "vitest/runners"; -import { getBenchOptions } from "vitest/suite"; -import { - isVitestTaskBenchmark, - patchRootSuiteWithFullFilePath, -} from "./common"; - -declare const __VERSION__: string; - -/** - * WalltimeRunner uses Vitest's default benchmark execution - * and extracts results from the suite after completion - */ -export class WalltimeRunner extends NodeBenchmarkRunner { - async runSuite(suite: RunnerTestSuite): Promise { - patchRootSuiteWithFullFilePath(suite); - - console.log( - `[CodSpeed] running with @codspeed/vitest-plugin v${__VERSION__} (walltime mode)` - ); - - // Let Vitest's default benchmark runner handle execution - await super.runSuite(suite); - - // Extract benchmark results from the completed suite - const benchmarks = await this.extractBenchmarkResults(suite); - - if (benchmarks.length > 0) { - writeWalltimeResults(benchmarks); - console.log( - `[CodSpeed] Done collecting walltime data for ${benchmarks.length} benches.` - ); - } else { - console.warn( - `[CodSpeed] No benchmark results found after suite execution` - ); - } - } - - private async extractBenchmarkResults( - suite: RunnerTestSuite, - parentPath = "" - ): Promise { - const benchmarks: Benchmark[] = []; - const currentPath = parentPath - ? `${parentPath}::${suite.name}` - : suite.name; - - for (const task of suite.tasks) { - if (isVitestTaskBenchmark(task) && task.result?.state === "pass") { - const benchmark = await this.processBenchmarkTask(task, currentPath); - if (benchmark) { - benchmarks.push(benchmark); - } - } else if (task.type === "suite") { - const nestedBenchmarks = await this.extractBenchmarkResults( - task, - currentPath - ); - benchmarks.push(...nestedBenchmarks); - } - } - - return benchmarks; - } - - private async processBenchmarkTask( - task: VitestBenchmark, - suitePath: string - ): Promise { - const uri = `${suitePath}::${task.name}`; - - const result = task.result; - if (!result) { - console.warn(` ⚠ No result data available for ${uri}`); - return null; - } - - try { - // Get tinybench configuration options from vitest - const benchOptions = getBenchOptions(task); - - const stats = this.convertVitestResultToBenchmarkStats( - result, - benchOptions - ); - - if (stats === null) { - console.log(` ✔ No walltime data to collect for ${uri}`); - return null; - } - - const coreBenchmark: Benchmark = { - name: task.name, - uri, - config: { - max_rounds: benchOptions.iterations ?? null, - max_time_ns: benchOptions.time ? msToNs(benchOptions.time) : null, - min_round_time_ns: null, // tinybench does not have an option for this - warmup_time_ns: - benchOptions.warmupIterations !== 0 && benchOptions.warmupTime - ? msToNs(benchOptions.warmupTime) - : null, - }, - stats, - }; - - console.log(` ✔ Collected walltime data for ${uri}`); - return coreBenchmark; - } catch (error) { - console.warn( - ` ⚠ Failed to process benchmark result for ${uri}:`, - error - ); - return null; - } - } - - private convertVitestResultToBenchmarkStats( - result: RunnerTaskResult, - benchOptions: { - time?: number; - warmupTime?: number; - warmupIterations?: number; - iterations?: number; - } - ): BenchmarkStats | null { - const benchmark = result.benchmark; - - if (!benchmark) { - throw new Error("No benchmark data available in result"); - } - - const { totalTime, min, max, mean, sd, samples } = benchmark; - - // Get individual sample times in nanoseconds and sort them - const sortedTimesNs = samples.map(msToNs).sort((a, b) => a - b); - const meanNs = msToNs(mean); - const stdevNs = msToNs(sd); - - if (sortedTimesNs.length == 0) { - // Sometimes the benchmarks can be completely optimized out and not even run, but its beforeEach and afterEach hooks are still executed, and the task is still considered a success. - // This is the case for the hooks.bench.ts example in this package - return null; - } - - const { - q1_ns, - q3_ns, - median_ns, - iqr_outlier_rounds, - stdev_outlier_rounds, - } = calculateQuantiles({ meanNs, stdevNs, sortedTimesNs }); - - return { - min_ns: msToNs(min), - max_ns: msToNs(max), - mean_ns: meanNs, - stdev_ns: stdevNs, - q1_ns, - median_ns, - q3_ns, - total_time: msToS(totalTime), - iter_per_round: 1, // as there is only one round in tinybench, we define that there were n rounds of 1 iteration - rounds: sortedTimesNs.length, - iqr_outlier_rounds, - stdev_outlier_rounds, - warmup_iters: benchOptions.warmupIterations ?? 0, - }; - } -} - -export default WalltimeRunner; diff --git a/packages/vitest-plugin/src/walltime/index.ts b/packages/vitest-plugin/src/walltime/index.ts new file mode 100644 index 00000000..96f8a923 --- /dev/null +++ b/packages/vitest-plugin/src/walltime/index.ts @@ -0,0 +1,37 @@ +import { setupCore, writeWalltimeResults } from "@codspeed/core"; +import { type RunnerTestSuite } from "vitest"; +import { NodeBenchmarkRunner } from "vitest/runners"; +import { patchRootSuiteWithFullFilePath } from "../common"; +import { extractBenchmarkResults } from "./utils"; + +/** + * WalltimeRunner uses Vitest's default benchmark execution + * and extracts results from the suite after completion + */ +export class WalltimeRunner extends NodeBenchmarkRunner { + private isTinybenchHookedWithCodspeed = false; + private benchmarkUris = new Map(); + + async runSuite(suite: RunnerTestSuite): Promise { + patchRootSuiteWithFullFilePath(suite); + + setupCore(); + + await super.runSuite(suite); + + const benchmarks = await extractBenchmarkResults(suite); + + if (benchmarks.length > 0) { + writeWalltimeResults(benchmarks); + console.log( + `[CodSpeed] Done collecting walltime data for ${benchmarks.length} benches.` + ); + } else { + console.warn( + `[CodSpeed] No benchmark results found after suite execution` + ); + } + } +} + +export default WalltimeRunner; diff --git a/packages/vitest-plugin/src/walltime/utils.ts b/packages/vitest-plugin/src/walltime/utils.ts new file mode 100644 index 00000000..db31d962 --- /dev/null +++ b/packages/vitest-plugin/src/walltime/utils.ts @@ -0,0 +1,130 @@ +import { + calculateQuantiles, + msToNs, + msToS, + type Benchmark, + type BenchmarkStats, +} from "@codspeed/core"; +import { + type Benchmark as VitestBenchmark, + type RunnerTaskResult, + type RunnerTestSuite, +} from "vitest"; +import { getBenchOptions } from "vitest/suite"; +import { isVitestTaskBenchmark } from "../common"; + +export async function extractBenchmarkResults( + suite: RunnerTestSuite, + parentPath = "" +): Promise { + const benchmarks: Benchmark[] = []; + const currentPath = parentPath ? `${parentPath}::${suite.name}` : suite.name; + + for (const task of suite.tasks) { + if (isVitestTaskBenchmark(task) && task.result?.state === "pass") { + const benchmark = await processBenchmarkTask(task, currentPath); + if (benchmark) { + benchmarks.push(benchmark); + } + } else if (task.type === "suite") { + const nestedBenchmarks = await extractBenchmarkResults(task, currentPath); + benchmarks.push(...nestedBenchmarks); + } + } + + return benchmarks; +} + +async function processBenchmarkTask( + task: VitestBenchmark, + suitePath: string +): Promise { + const uri = `${suitePath}::${task.name}`; + + const result = task.result; + if (!result) { + console.warn(` ⚠ No result data available for ${uri}`); + return null; + } + + try { + // Get tinybench configuration options from vitest + const benchOptions = getBenchOptions(task); + + const stats = convertVitestResultToBenchmarkStats(result, benchOptions); + + if (stats === null) { + console.log(` ✔ No walltime data to collect for ${uri}`); + return null; + } + + const coreBenchmark: Benchmark = { + name: task.name, + uri, + config: { + max_rounds: benchOptions.iterations ?? null, + max_time_ns: benchOptions.time ? msToNs(benchOptions.time) : null, + min_round_time_ns: null, // tinybench does not have an option for this + warmup_time_ns: + benchOptions.warmupIterations !== 0 && benchOptions.warmupTime + ? msToNs(benchOptions.warmupTime) + : null, + }, + stats, + }; + + console.log(` ✔ Collected walltime data for ${uri}`); + return coreBenchmark; + } catch (error) { + console.warn(` ⚠ Failed to process benchmark result for ${uri}:`, error); + return null; + } +} + +function convertVitestResultToBenchmarkStats( + result: RunnerTaskResult, + benchOptions: { + time?: number; + warmupTime?: number; + warmupIterations?: number; + iterations?: number; + } +): BenchmarkStats | null { + const benchmark = result.benchmark; + + if (!benchmark) { + throw new Error("No benchmark data available in result"); + } + + const { totalTime, min, max, mean, sd, samples } = benchmark; + + // Get individual sample times in nanoseconds and sort them + const sortedTimesNs = samples.map(msToNs).sort((a, b) => a - b); + const meanNs = msToNs(mean); + const stdevNs = msToNs(sd); + + if (sortedTimesNs.length == 0) { + // Sometimes the benchmarks can be completely optimized out and not even run, but its beforeEach and afterEach hooks are still executed, and the task is still considered a success. + // This is the case for the hooks.bench.ts example in this package + return null; + } + + const { q1_ns, q3_ns, median_ns, iqr_outlier_rounds, stdev_outlier_rounds } = + calculateQuantiles({ meanNs, stdevNs, sortedTimesNs }); + + return { + min_ns: msToNs(min), + max_ns: msToNs(max), + mean_ns: meanNs, + stdev_ns: stdevNs, + q1_ns, + median_ns, + q3_ns, + total_time: msToS(totalTime), + iter_per_round: 1, // as there is only one round in tinybench, we define that there were n rounds of 1 iteration + rounds: sortedTimesNs.length, + iqr_outlier_rounds, + stdev_outlier_rounds, + warmup_iters: benchOptions.warmupIterations ?? 0, + }; +} From 453da47cb2c90340f0aa95443b510721ba1235a9 Mon Sep 17 00:00:00 2001 From: Guillaume Lagrange Date: Sun, 14 Sep 2025 18:10:03 +0200 Subject: [PATCH 14/22] feat(vitest-plugin): add perf profiling for vitest plugin --- packages/vitest-plugin/package.json | 2 + packages/vitest-plugin/src/walltime/index.ts | 84 +++++++++++++++++++- pnpm-lock.yaml | 3 + 3 files changed, 86 insertions(+), 3 deletions(-) diff --git a/packages/vitest-plugin/package.json b/packages/vitest-plugin/package.json index 3a4c336c..d3535bf2 100644 --- a/packages/vitest-plugin/package.json +++ b/packages/vitest-plugin/package.json @@ -31,12 +31,14 @@ "@codspeed/core": "workspace:^5.0.0" }, "peerDependencies": { + "tinybench": "^2.9.0", "vite": "^4.2.0 || ^5.0.0 || ^6.0.0 || ^7.0.0", "vitest": ">=3.2" }, "devDependencies": { "@total-typescript/shoehorn": "^0.1.1", "execa": "^8.0.1", + "tinybench": "^2.9.0", "vite": "^7.0.0", "vitest": "^3.2.4" } diff --git a/packages/vitest-plugin/src/walltime/index.ts b/packages/vitest-plugin/src/walltime/index.ts index 96f8a923..fa4d8524 100644 --- a/packages/vitest-plugin/src/walltime/index.ts +++ b/packages/vitest-plugin/src/walltime/index.ts @@ -1,5 +1,14 @@ -import { setupCore, writeWalltimeResults } from "@codspeed/core"; -import { type RunnerTestSuite } from "vitest"; +import { + InstrumentHooks, + setupCore, + writeWalltimeResults, +} from "@codspeed/core"; +import { Fn } from "tinybench"; +import { + RunnerTaskEventPack, + RunnerTaskResultPack, + type RunnerTestSuite, +} from "vitest"; import { NodeBenchmarkRunner } from "vitest/runners"; import { patchRootSuiteWithFullFilePath } from "../common"; import { extractBenchmarkResults } from "./utils"; @@ -10,10 +19,13 @@ import { extractBenchmarkResults } from "./utils"; */ export class WalltimeRunner extends NodeBenchmarkRunner { private isTinybenchHookedWithCodspeed = false; - private benchmarkUris = new Map(); + private suiteUris = new Map(); + /// Suite ID of the currently running suite, to allow constructing the URI in the context of tinybench tasks + private currentSuiteId: string | null = null; async runSuite(suite: RunnerTestSuite): Promise { patchRootSuiteWithFullFilePath(suite); + this.populateBenchmarkUris(suite); setupCore(); @@ -32,6 +44,72 @@ export class WalltimeRunner extends NodeBenchmarkRunner { ); } } + + private populateBenchmarkUris(suite: RunnerTestSuite, parentPath = ""): void { + const currentPath = + parentPath !== "" ? `${parentPath}::${suite.name}` : suite.name; + + for (const task of suite.tasks) { + if (task.type === "suite") { + this.suiteUris.set(task.id, `${currentPath}::${task.name}`); + this.populateBenchmarkUris(task, currentPath); + } + } + } + + async importTinybench(): Promise { + const tinybench = await super.importTinybench(); + + if (this.isTinybenchHookedWithCodspeed) { + return tinybench; + } + this.isTinybenchHookedWithCodspeed = true; + + const originalRun = tinybench.Task.prototype.run; + + const getSuiteUri = (): string => { + if (this.currentSuiteId === null) { + throw new Error("currentSuiteId is null - something went wrong"); + } + return this.suiteUris.get(this.currentSuiteId) || ""; + }; + + tinybench.Task.prototype.run = async function () { + const { fn } = this as { fn: Fn }; + const suiteUri = getSuiteUri(); + + function __codspeed_root_frame__() { + return fn(); + } + (this as { fn: Fn }).fn = __codspeed_root_frame__; + + InstrumentHooks.startBenchmark(); + await originalRun.call(this); + InstrumentHooks.stopBenchmark(); + + // Look up the URI by task name + const uri = `${suiteUri}::${this.name}`; + InstrumentHooks.setExecutedBenchmark(process.pid, uri); + + return this; + }; + + return tinybench; + } + + // Allow tinybench to retrieve the path to the currently running suite + async onTaskUpdate( + _: RunnerTaskResultPack[], + events: RunnerTaskEventPack[] + ): Promise { + events.map((event) => { + const [id, eventName] = event; + + if (eventName === "suite-prepare") { + this.currentSuiteId = id; + } + }); + } } export default WalltimeRunner; diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 98e6f532..4bbeeab1 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -306,6 +306,9 @@ importers: execa: specifier: ^8.0.1 version: 8.0.1 + tinybench: + specifier: ^2.9.0 + version: 2.9.0 vite: specifier: ^7.0.0 version: 7.1.3(@types/node@20.19.11) From 32fa63e3e2ca8e93c165add1547daf577a0fd420 Mon Sep 17 00:00:00 2001 From: Guillaume Lagrange Date: Wed, 27 Aug 2025 15:44:15 +0200 Subject: [PATCH 15/22] ci: allow publishing alpha releases from tag --- .github/workflows/release.yml | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 0bddb0cb..a7ed1904 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -29,7 +29,12 @@ jobs: run: pnpm moon run :build - name: Publish the libraries - run: pnpm publish -r --access=public --no-git-checks + run: | + if [[ "${{ github.ref }}" == *"-alpha"* ]]; then + pnpm publish -r --access=public --no-git-checks --tag=alpha + else + pnpm publish -r --access=public --no-git-checks + fi env: NPM_CONFIG_PROVENANCE: true NODE_AUTH_TOKEN: ${{ secrets.NPM_TOKEN }} From 835fb0242a3945e5a7dc1b4e7fb1fc5f46e4b915 Mon Sep 17 00:00:00 2001 From: Guillaume Lagrange Date: Mon, 15 Sep 2025 16:52:12 +0200 Subject: [PATCH 16/22] chore(core): print error when failing to load native core --- packages/core/src/native_core/index.ts | 3 +++ 1 file changed, 3 insertions(+) diff --git a/packages/core/src/native_core/index.ts b/packages/core/src/native_core/index.ts index 9cbb4a90..64459a13 100644 --- a/packages/core/src/native_core/index.ts +++ b/packages/core/src/native_core/index.ts @@ -1,4 +1,5 @@ import path from "path"; +import { logDebug } from "../utils"; import { InstrumentHooks } from "./instruments/hooks"; import { LinuxPerf } from "./linux_perf/linux_perf"; interface NativeCore { @@ -21,6 +22,8 @@ try { isBound: true, }; } catch (e) { + logDebug("Failed to bind native core, instruments will not work."); + logDebug(e); native_core = { LinuxPerf: class LinuxPerf { start() { From 471a62f9f603196344dcb3b41f7dad19564fe64a Mon Sep 17 00:00:00 2001 From: Guillaume Lagrange Date: Tue, 16 Sep 2025 12:18:23 +0200 Subject: [PATCH 17/22] ci(core): build artifacts for both x86 and arm64 during release --- .github/workflows/release.yml | 29 +++++++++++++++++++++++++++++ 1 file changed, 29 insertions(+) diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index a7ed1904..37997c60 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -10,8 +10,31 @@ permissions: contents: write jobs: + build-native-arm: + runs-on: codspeedhq-arm64-ubuntu-22.04 + + steps: + - uses: actions/checkout@v4 + with: + fetch-depth: 0 + submodules: true + - uses: pnpm/action-setup@v2 + - uses: actions/setup-node@v3 + with: + cache: pnpm + node-version-file: .nvmrc + - run: pnpm install --frozen-lockfile --prefer-offline + - name: Build native code on ARM + run: pnpm moon core:build-native-addon + - name: Upload ARM prebuilds + uses: actions/upload-artifact@v4 + with: + name: arm-prebuilds + path: packages/core/prebuilds + build: runs-on: ubuntu-latest + needs: build-native-arm steps: - uses: actions/checkout@v4 @@ -28,6 +51,12 @@ jobs: - name: Build the libraries run: pnpm moon run :build + - name: Download ARM prebuilds + uses: actions/download-artifact@v4 + with: + name: arm-prebuilds + path: packages/core/prebuilds + - name: Publish the libraries run: | if [[ "${{ github.ref }}" == *"-alpha"* ]]; then From bb7c9d9f81c515b5876bb102053207578023551d Mon Sep 17 00:00:00 2001 From: Guillaume Lagrange Date: Tue, 16 Sep 2025 16:40:38 +0200 Subject: [PATCH 18/22] ci(core): run tests on arm as well --- .github/workflows/ci.yml | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 720726b7..21d941a0 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -8,7 +8,10 @@ on: jobs: check: - runs-on: "ubuntu-latest" + strategy: + matrix: + os: ["ubuntu-latest", "codspeedhq-arm64-ubuntu-22.04"] + runs-on: ${{ matrix.os }} steps: - uses: "actions/checkout@v4" with: From 613c6a095beb9066200a350b234b462f6b2da305 Mon Sep 17 00:00:00 2001 From: Guillaume Lagrange Date: Tue, 16 Sep 2025 16:42:44 +0200 Subject: [PATCH 19/22] ci: fix deprecation warning about set-output --- .github/workflows/ci.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 21d941a0..e48aff26 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -38,7 +38,7 @@ jobs: # list the directories in ./examples and output them to a github action workflow variables as a JSON array - run: | examples=$(find ./examples -maxdepth 1 -mindepth 1 -type d -printf '%f\n' | jq -R -s -c 'split("\n") | map(select(length > 0))') - echo "::set-output name=examples::$examples" + echo "examples=$examples" >> $GITHUB_OUTPUT id: list-examples node-versions: From e8bd881c0a4ae5220151588567961da6d09359af Mon Sep 17 00:00:00 2001 From: Guillaume Lagrange Date: Fri, 26 Sep 2025 12:00:50 +0200 Subject: [PATCH 20/22] feat(core): throw when trying to call setupCore with no native core --- packages/core/src/index.ts | 6 ++++++ packages/core/tests/index.integ.test.ts | 5 +++++ 2 files changed, 11 insertions(+) diff --git a/packages/core/src/index.ts b/packages/core/src/index.ts index ec89e15f..4e31f38d 100644 --- a/packages/core/src/index.ts +++ b/packages/core/src/index.ts @@ -33,6 +33,12 @@ export function getCodspeedRunnerMode(): CodSpeedRunnerMode { } export const setupCore = () => { + if (!native_core.isBound) { + throw new Error( + "Native core module is not bound, CodSpeed integration will not work properly" + ); + } + native_core.InstrumentHooks.setIntegration("codspeed-node", __VERSION__); linuxPerf.start(); checkV8Flags(); diff --git a/packages/core/tests/index.integ.test.ts b/packages/core/tests/index.integ.test.ts index 1b803844..d9b8dc83 100644 --- a/packages/core/tests/index.integ.test.ts +++ b/packages/core/tests/index.integ.test.ts @@ -24,4 +24,9 @@ describe("without bindings", () => { const isBound = require("..").isBound as boolean; expect(isBound).toBe(false); }); + + it("should throw when calling setupCore", () => { + const setupCore = require("..").setupCore as () => unknown; + expect(setupCore).toThrowError("Native core module is not bound"); + }); }); From 69c156b69b8bd7a7e3020d3457057f7a27341b46 Mon Sep 17 00:00:00 2001 From: Guillaume Lagrange Date: Fri, 26 Sep 2025 12:48:10 +0200 Subject: [PATCH 21/22] fix(core): fix tests failing after running `pnpm install` `node-gyp` was being too smart for its own good and was actually falling back to the local build, which should not be allowed for this test! --- packages/core/tests/index.integ.test.ts | 2 ++ 1 file changed, 2 insertions(+) diff --git a/packages/core/tests/index.integ.test.ts b/packages/core/tests/index.integ.test.ts index d9b8dc83..46f01baa 100644 --- a/packages/core/tests/index.integ.test.ts +++ b/packages/core/tests/index.integ.test.ts @@ -16,6 +16,8 @@ describe("without bindings", () => { const initialEnv = process.env; beforeAll(() => { process.env.npm_config_arch = "unknown"; + // Prevent node-gyp from falling back to a local version of the native core in packages/core/build + process.env.PREBUILDS_ONLY = "1"; }); afterAll(() => { process.env = initialEnv; From 8d03921a9a04e4538328c5a49cabae7e48b3a3ff Mon Sep 17 00:00:00 2001 From: Guillaume Lagrange Date: Fri, 26 Sep 2025 17:03:20 +0200 Subject: [PATCH 22/22] ci: use staging environment --- .github/workflows/codspeed.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/codspeed.yml b/.github/workflows/codspeed.yml index 9c002e5f..489845cd 100644 --- a/.github/workflows/codspeed.yml +++ b/.github/workflows/codspeed.yml @@ -28,6 +28,7 @@ jobs: uses: CodSpeedHQ/action@main with: mode: instrumentation + upload-url: ${{ secrets.STAGING_CODSPEED_UPLOAD_URL }} run: | pnpm moon run tinybench-plugin:bench pnpm moon run vitest-plugin:bench @@ -57,6 +58,7 @@ jobs: uses: CodSpeedHQ/action@main with: mode: walltime + upload-url: ${{ secrets.STAGING_CODSPEED_UPLOAD_URL }} run: | pnpm moon run tinybench-plugin:bench pnpm moon run vitest-plugin:bench