diff --git a/packages/vitest/package.json b/packages/vitest/package.json index fd6b7d53e42f..5edbfee0b2b5 100644 --- a/packages/vitest/package.json +++ b/packages/vitest/package.json @@ -188,7 +188,7 @@ "pathe": "catalog:", "picomatch": "^4.0.3", "std-env": "catalog:", - "tinybench": "^2.9.0", + "tinybench": "^6.0.0", "tinyexec": "^1.0.2", "tinyglobby": "catalog:", "tinyrainbow": "catalog:", diff --git a/packages/vitest/src/node/reporters/base.ts b/packages/vitest/src/node/reporters/base.ts index edd1fd63b5e2..729a5eb67a6f 100644 --- a/packages/vitest/src/node/reporters/base.ts +++ b/packages/vitest/src/node/reporters/base.ts @@ -883,7 +883,7 @@ export abstract class BaseReporter implements Reporter { .sort((a, b) => a.result!.benchmark!.rank - b.result!.benchmark!.rank) for (const sibling of siblings) { - const number = (sibling.result!.benchmark!.mean / bench.result!.benchmark!.mean).toFixed(2) + const number = (sibling.result!.benchmark!.latency.mean / bench.result!.benchmark!.latency.mean).toFixed(2) this.log(c.green(` ${number}x `) + c.gray('faster than ') + sibling.name) } diff --git a/packages/vitest/src/node/reporters/benchmark/json-formatter.ts b/packages/vitest/src/node/reporters/benchmark/json-formatter.ts index 074d68b0713e..8081a753a5d3 100644 --- a/packages/vitest/src/node/reporters/benchmark/json-formatter.ts +++ b/packages/vitest/src/node/reporters/benchmark/json-formatter.ts @@ -32,7 +32,7 @@ export function createBenchmarkJsonReport(files: File[]): Report { const benchmark = t.meta.benchmark && t.result?.benchmark if (benchmark) { - benchmarks.push({ id: t.id, ...benchmark, samples: [] }) + benchmarks.push({ id: t.id, ...benchmark }) } } diff --git a/packages/vitest/src/node/reporters/benchmark/reporter.ts b/packages/vitest/src/node/reporters/benchmark/reporter.ts index 1abc7f376a4b..4e7963daed13 100644 --- a/packages/vitest/src/node/reporters/benchmark/reporter.ts +++ b/packages/vitest/src/node/reporters/benchmark/reporter.ts @@ -39,8 +39,8 @@ export class BenchmarkReporter extends DefaultReporter { const task = this.ctx.state.idMap.get(pack[0]) if (task?.type === 'suite' && task.result?.state !== 'run') { - task.tasks.filter(task => task.result?.benchmark) - .sort((benchA, benchB) => benchA.result!.benchmark!.mean - benchB.result!.benchmark!.mean) + task.tasks.filter(task => task.result?.benchmark?.latency) + .sort((benchA, benchB) => benchA.result!.benchmark!.latency.mean - benchB.result!.benchmark!.latency.mean) .forEach((bench, idx) => { bench.result!.benchmark!.rank = Number(idx) + 1 }) diff --git a/packages/vitest/src/node/reporters/benchmark/tableRender.ts b/packages/vitest/src/node/reporters/benchmark/tableRender.ts index a613ac909546..f54fc633af54 100644 --- a/packages/vitest/src/node/reporters/benchmark/tableRender.ts +++ b/packages/vitest/src/node/reporters/benchmark/tableRender.ts @@ -18,10 +18,10 @@ function formatNumber(number: number) { const tableHead = [ 'name', - 'hz', + 'mean', 'min', 'max', - 'mean', + 'p50/median', 'p75', 'p99', 'p995', @@ -33,16 +33,16 @@ const tableHead = [ function renderBenchmarkItems(result: BenchmarkResult) { return [ result.name, - formatNumber(result.hz || 0), - formatNumber(result.min || 0), - formatNumber(result.max || 0), - formatNumber(result.mean || 0), - formatNumber(result.p75 || 0), - formatNumber(result.p99 || 0), - formatNumber(result.p995 || 0), - formatNumber(result.p999 || 0), - `±${(result.rme || 0).toFixed(2)}%`, - (result.sampleCount || 0).toString(), + formatNumber(result.latency.mean || 0), + formatNumber(result.latency.min || 0), + formatNumber(result.latency.max || 0), + formatNumber(result.latency.p50 || 0), + formatNumber(result.latency.p75 || 0), + formatNumber(result.latency.p99 || 0), + formatNumber(result.latency.p995 || 0), + formatNumber(result.latency.p999 || 0), + `±${(result.latency.rme || 0).toFixed(2)}%`, + (result.samplesCount || 0).toString(), ] } @@ -67,16 +67,16 @@ function renderBenchmark(result: BenchmarkResult, widths: number[]) { const padded = padRow(renderBenchmarkItems(result), widths) return [ padded[0], // name - c.blue(padded[1]), // hz + c.blue(padded[1]), // mean c.cyan(padded[2]), // min c.cyan(padded[3]), // max - c.cyan(padded[4]), // mean + c.cyan(padded[4]), // p50/median c.cyan(padded[5]), // p75 c.cyan(padded[6]), // p99 c.cyan(padded[7]), // p995 c.cyan(padded[8]), // p999 c.dim(padded[9]), // rem - c.dim(padded[10]), // sample + c.dim(padded[10]), // samples ].join(' ') } @@ -151,15 +151,14 @@ export function renderTable( let body = renderBenchmark(bench.current, columnWidths) if (options.compare && bench.baseline) { - if (bench.current.hz) { - const diff = bench.current.hz / bench.baseline.hz + if (bench.current.throughput.mean) { + const diff = bench.current.throughput.mean / bench.baseline.throughput.mean const diffFixed = diff.toFixed(2) - if (diffFixed === '1.0.0') { + if (diffFixed === '1.00') { body += c.gray(` [${diffFixed}x]`) } - - if (diff > 1) { + else if (diff > 1) { body += c.blue(` [${diffFixed}x] ⇑`) } else { diff --git a/packages/vitest/src/public/index.ts b/packages/vitest/src/public/index.ts index 09aab798d971..270ea7d77d36 100644 --- a/packages/vitest/src/public/index.ts +++ b/packages/vitest/src/public/index.ts @@ -46,14 +46,14 @@ export { VitestEvaluatedModules as EvaluatedModules } from '../runtime/moduleRun export { NodeBenchmarkRunner as BenchmarkRunner } from '../runtime/runners/benchmark' export { TestRunner } from '../runtime/runners/test' export type { - BenchFactory, + Bench as BenchFactory, BenchFunction, Benchmark, BenchmarkAPI, BenchmarkResult, BenchOptions, - BenchTask, - BenchTaskResult, + TinybenchTask as BenchTask, + TinybenchTaskResult as BenchTaskResult, } from '../runtime/types/benchmark' export { assertType } from '../typecheck/assertType' diff --git a/packages/vitest/src/runtime/benchmark.ts b/packages/vitest/src/runtime/benchmark.ts index 5d173501659c..9a5a2ded3e13 100644 --- a/packages/vitest/src/runtime/benchmark.ts +++ b/packages/vitest/src/runtime/benchmark.ts @@ -1,4 +1,5 @@ import type { Test } from '@vitest/runner' +import type { SerializedConfig } from './config' import type { BenchFunction, BenchmarkAPI, BenchOptions } from './types/benchmark' import { getCurrentSuite } from '@vitest/runner' import { createChainable } from '@vitest/runner/utils' @@ -6,10 +7,13 @@ import { noop } from '@vitest/utils/helpers' import { getWorkerState } from './utils' const benchFns = new WeakMap() -const benchOptsMap = new WeakMap() +const benchOptsMap = new WeakMap() -export function getBenchOptions(key: Test): BenchOptions { - return benchOptsMap.get(key) +export function getBenchOptions(key: Test, config: SerializedConfig): BenchOptions { + return { + ...benchOptsMap.get(key)!, + retainSamples: !!config.benchmark?.includeSamples, + } } export function getBenchFn(key: Test): BenchFunction { @@ -17,7 +21,7 @@ export function getBenchFn(key: Test): BenchFunction { } export const bench: BenchmarkAPI = createBenchmark(function ( - name, + name: string | Function, fn: BenchFunction = noop, options: BenchOptions = {}, ) { @@ -32,7 +36,7 @@ export const bench: BenchmarkAPI = createBenchmark(function ( }, }) benchFns.set(task, fn) - benchOptsMap.set(task, options) + benchOptsMap.set(task, { ...options, name: formatName(name) }) // vitest runner sets mode to `todo` if handler is not passed down // but we store handler separetly if (!this.todo && task.mode === 'todo') { diff --git a/packages/vitest/src/runtime/runners/benchmark.ts b/packages/vitest/src/runtime/runners/benchmark.ts index 0d773910fdd9..6c5c2b47ff72 100644 --- a/packages/vitest/src/runtime/runners/benchmark.ts +++ b/packages/vitest/src/runtime/runners/benchmark.ts @@ -7,11 +7,11 @@ import type { } from '@vitest/runner' import type { ModuleRunner } from 'vite/module-runner' import type { SerializedConfig } from '../config' -// import type { VitestExecutor } from '../execute' import type { Benchmark, BenchmarkResult, - BenchTask, + BenchmarkStatistics, + TinybenchTask, } from '../types/benchmark' import { updateTask as updateRunnerTask } from '@vitest/runner' import { createDefer } from '@vitest/utils/helpers' @@ -19,24 +19,47 @@ import { getSafeTimers } from '@vitest/utils/timers' import { getBenchFn, getBenchOptions } from '../benchmark' import { getWorkerState } from '../utils' +function createEmptyStatistics(): BenchmarkStatistics { + return { + aad: 0, + critical: 0, + df: 0, + mad: 0, + max: 0, + mean: 0, + min: 0, + moe: 0, + p50: 0, + p75: 0, + p99: 0, + p995: 0, + p999: 0, + rme: 0, + samples: undefined, + samplesCount: 0, + sd: 0, + sem: 0, + variance: 0, + } +} + function createBenchmarkResult(name: string): BenchmarkResult { return { name, rank: 0, - rme: 0, - samples: [] as number[], - } as BenchmarkResult + samplesCount: 0, + latency: createEmptyStatistics(), + throughput: createEmptyStatistics(), + period: 0, + totalTime: 0, + } } -const benchmarkTasks = new WeakMap() - async function runBenchmarkSuite(suite: Suite, runner: NodeBenchmarkRunner) { - const { Task, Bench } = await runner.importTinybench() - const start = performance.now() const benchmarkGroup: Benchmark[] = [] - const benchmarkSuiteGroup = [] + const benchmarkSuiteGroup: Suite[] = [] for (const task of suite.tasks) { if (task.mode !== 'run' && task.mode !== 'queued') { continue @@ -50,12 +73,15 @@ async function runBenchmarkSuite(suite: Suite, runner: NodeBenchmarkRunner) { } } - // run sub suites sequentially - for (const subSuite of benchmarkSuiteGroup) { - await runBenchmarkSuite(subSuite, runner) + if (benchmarkSuiteGroup.length) { + for (const subSuite of benchmarkSuiteGroup) { + await runBenchmarkSuite(subSuite, runner) + } } if (benchmarkGroup.length) { + const { Bench } = await runner.importTinybench() + const defer = createDefer() suite.result = { state: 'run', @@ -65,37 +91,51 @@ async function runBenchmarkSuite(suite: Suite, runner: NodeBenchmarkRunner) { updateTask('suite-prepare', suite) const addBenchTaskListener = ( - task: InstanceType, + task: TinybenchTask, benchmark: Benchmark, ) => { + let hasErrored = false + task.addEventListener( - 'complete', + 'error', (e) => { - const task = e.task - const taskRes = task.result! - const result = benchmark.result!.benchmark! - benchmark.result!.state = 'pass' - Object.assign(result, taskRes) - // compute extra stats and free raw samples as early as possible - const samples = result.samples - result.sampleCount = samples.length - result.median = samples.length % 2 - ? samples[Math.floor(samples.length / 2)] - : (samples[samples.length / 2] + samples[samples.length / 2 - 1]) / 2 - if (!runner.config.benchmark?.includeSamples) { - result.samples.length = 0 - } - updateTask('test-finished', benchmark) + hasErrored = true + defer.reject(e.error ?? e) }, { once: true, }, ) task.addEventListener( - 'error', + 'complete', (e) => { - const task = e.task - defer.reject(benchmark ? task.result!.error : e) + const task = e.task! + const taskResult = task.result + + if (hasErrored || taskResult.state !== 'completed') { + benchmark.result!.state = 'fail' + updateTask('test-finished', benchmark) + return + } + + const result = benchmark.result!.benchmark! + result.latency = { + ...taskResult.latency, + samples: taskResult.latency.samples + ? [...taskResult.latency.samples] + : undefined, + } + result.throughput = { + ...taskResult.throughput, + samples: taskResult.throughput.samples + ? [...taskResult.throughput.samples] + : undefined, + } + result.period = taskResult.period + result.totalTime = taskResult.totalTime + result.samplesCount = taskResult.latency.samplesCount + benchmark.result!.state = 'pass' + updateTask('test-finished', benchmark) }, { once: true, @@ -103,38 +143,24 @@ async function runBenchmarkSuite(suite: Suite, runner: NodeBenchmarkRunner) { ) } - benchmarkGroup.forEach((benchmark) => { - const options = getBenchOptions(benchmark) - const benchmarkInstance = new Bench(options) - - const benchmarkFn = getBenchFn(benchmark) - + const { setTimeout } = getSafeTimers() + for (const benchmark of benchmarkGroup) { + const benchOptions = getBenchOptions(benchmark, runner.config) + const bench = new Bench(benchOptions) + bench.add(benchmark.name, getBenchFn(benchmark)) benchmark.result = { state: 'run', startTime: start, benchmark: createBenchmarkResult(benchmark.name), } - - const task = new Task(benchmarkInstance, benchmark.name, benchmarkFn) - benchmarkTasks.set(benchmark, task) - addBenchTaskListener(task, benchmark) - }) - - const { setTimeout } = getSafeTimers() - const tasks: [BenchTask, Benchmark][] = [] - - for (const benchmark of benchmarkGroup) { - const task = benchmarkTasks.get(benchmark)! + addBenchTaskListener(bench.getTask(benchmark.name)!, benchmark) updateTask('test-prepare', benchmark) - await task.warmup() - tasks.push([ - await new Promise(resolve => - setTimeout(async () => { - resolve(await task.run()) - }), - ), - benchmark, - ]) + await new Promise(resolve => + setTimeout(async () => { + await bench.run() + resolve() + }), + ) } suite.result!.duration = performance.now() - start diff --git a/packages/vitest/src/runtime/types/benchmark.ts b/packages/vitest/src/runtime/types/benchmark.ts index 9ffd62c4f83b..edc6212848b9 100644 --- a/packages/vitest/src/runtime/types/benchmark.ts +++ b/packages/vitest/src/runtime/types/benchmark.ts @@ -1,28 +1,35 @@ import type { Test } from '@vitest/runner' import type { ChainableFunction } from '@vitest/runner/utils' import type { - Bench as BenchFactory, - Options as BenchOptions, - Task as BenchTask, - TaskResult as BenchTaskResult, - TaskResult as TinybenchResult, + Bench, + Fn as BenchFunction, + BenchOptions, + Statistics as TinybenchStatistics, + Task as TinybenchTask, + TaskResult as TinybenchTaskResult, } from 'tinybench' +export type BenchmarkStatistics = Omit & { + samples: number[] | undefined +} + export interface Benchmark extends Test { meta: { benchmark: true - result?: BenchTaskResult + result?: TinybenchTaskResult } } -export interface BenchmarkResult extends TinybenchResult { +export interface BenchmarkResult { name: string rank: number - sampleCount: number - median: number + samplesCount: number + latency: BenchmarkStatistics + throughput: BenchmarkStatistics + period: number + totalTime: number } -export type BenchFunction = (this: BenchFactory) => Promise | void type ChainableBenchmarkAPI = ChainableFunction< 'skip' | 'only' | 'todo', (name: string | Function, fn?: BenchFunction, options?: BenchOptions) => void @@ -32,4 +39,4 @@ export type BenchmarkAPI = ChainableBenchmarkAPI & { runIf: (condition: any) => ChainableBenchmarkAPI } -export { BenchFactory, BenchOptions, BenchTask, BenchTaskResult } +export { Bench, BenchFunction, BenchOptions, TinybenchTask, TinybenchTaskResult } diff --git a/pnpm-lock.yaml b/pnpm-lock.yaml index 7867258373e7..8d063a14b114 100644 --- a/pnpm-lock.yaml +++ b/pnpm-lock.yaml @@ -1048,8 +1048,8 @@ importers: specifier: 'catalog:' version: 4.0.0-rc.1 tinybench: - specifier: ^2.9.0 - version: 2.9.0 + specifier: ^6.0.0 + version: 6.0.0 tinyexec: specifier: ^1.0.2 version: 1.0.2 @@ -9640,8 +9640,9 @@ packages: thread-stream@3.1.0: resolution: {integrity: sha512-OqyPZ9u96VohAyMfJykzmivOrY2wfMSf3C5TtFJVgN+Hm6aj+voFhlK+kZEIv2FBh1X6Xp3DlnCOfEQ3B2J86A==} - tinybench@2.9.0: - resolution: {integrity: sha512-0+DUvqWMValLmha6lr4kD8iAMK1HzV0/aKnCtWb9v9641TnP/MFb7Pc2bxoxQjTXAErryXVgUOfv2YqNllqGeg==} + tinybench@6.0.0: + resolution: {integrity: sha512-BWlWpVbbZXaYjRV0twGLNQO00Zj4HA/sjLOQP2IvzQqGwRGp+2kh7UU3ijyJ3ywFRogYDRbiHDMrUOfaMnN56g==} + engines: {node: '>=20.0.0'} tinyexec@0.3.2: resolution: {integrity: sha512-KQQR9yN7R5+OSwaK0XQoj22pwHoTlgYqmUscPYoknOoWCWfj/5/ABTMRi69FrKU5ffPVh5QcFikpWJI/P1ocHA==} @@ -18926,7 +18927,7 @@ snapshots: dependencies: real-require: 0.2.0 - tinybench@2.9.0: {} + tinybench@6.0.0: {} tinyexec@0.3.2: {} diff --git a/test/cli/test/__snapshots__/benchmarking.test.ts.snap b/test/cli/test/__snapshots__/benchmarking.test.ts.snap index 3ebbc8a64d42..cc43bdb8e9fd 100644 --- a/test/cli/test/__snapshots__/benchmarking.test.ts.snap +++ b/test/cli/test/__snapshots__/benchmarking.test.ts.snap @@ -25,8 +25,6 @@ exports[`summary 1`] = ` good - summary.bench.ts > suite-a (?) faster than bad - good - summary.bench.ts > suite-b - good - summary.bench.ts > suite-b > suite-b-nested " diff --git a/test/cli/test/benchmarking.test.ts b/test/cli/test/benchmarking.test.ts index acd0e8457d16..e731f337cc4c 100644 --- a/test/cli/test/benchmarking.test.ts +++ b/test/cli/test/benchmarking.test.ts @@ -54,12 +54,12 @@ it.for([true, false])('includeSamples %s', async (includeSamples) => { assert(result.ctx) const allSamples = [...result.ctx.state.idMap.values()] .filter(t => t.meta.benchmark) - .map(t => t.result?.benchmark?.samples) + .map(t => t.result?.benchmark?.latency.samples) if (includeSamples) { - expect(allSamples[0]).not.toEqual([]) + expect(allSamples[0]).not.toEqual(undefined) } else { - expect(allSamples[0]).toEqual([]) + expect(allSamples[0]).toEqual(undefined) } })