Skip to content
Open
Show file tree
Hide file tree
Changes from 9 commits
Commits
Show all changes
21 commits
Select commit Hold shift + click to select a range
98f0b8b
chore(deps): update tinybench to 3.x.x
jerome-benoit Dec 18, 2024
cd23ff1
fix: import the proprer FnOptions tinybench type
jerome-benoit Dec 18, 2024
7a6a69f
fix: import the right BenchOptions type
jerome-benoit Dec 18, 2024
3a4d73d
refactor: remove console.log debugging
jerome-benoit Dec 18, 2024
5aa3de5
Merge branch 'main' into chore/tinybench-upgrade
jerome-benoit Dec 20, 2024
5de0335
Merge branch 'main' into chore/tinybench-upgrade
jerome-benoit Dec 20, 2024
1df2a31
Merge remote-tracking branch 'upstream/main' into chore/tinybench-upg…
jerome-benoit Mar 4, 2026
d8c20de
fix: adapt benchmark reporter and runner for tinybench 3 API after up…
jerome-benoit Mar 4, 2026
0609c92
chore(deps): upgrade tinybench to v6
jerome-benoit Mar 4, 2026
682332b
fix: address review comments — type-safe defaults, remove unused arra…
jerome-benoit Mar 4, 2026
d64d9b9
fix: handle errored benchmark tasks and prevent result pollution
jerome-benoit Mar 4, 2026
4c164cc
Merge branch 'main' into chore/tinybench-upgrade
jerome-benoit Mar 5, 2026
896e03b
refactor: centralize bench options mapping in getBenchOptions
jerome-benoit Mar 5, 2026
87cd6a7
Merge branch 'main' into chore/tinybench-upgrade
jerome-benoit Mar 6, 2026
cbfc1d1
refactor: align BenchmarkResult.samplesCount with tinybench naming
jerome-benoit Mar 6, 2026
16064f3
Merge branch 'main' into chore/tinybench-upgrade
jerome-benoit Mar 6, 2026
9fbad46
Merge branch 'main' into chore/tinybench-upgrade
jerome-benoit Mar 6, 2026
bb4df4d
Merge branch 'main' into chore/tinybench-upgrade
jerome-benoit Mar 8, 2026
37a2e76
Merge branch 'main' into chore/tinybench-upgrade
jerome-benoit Mar 11, 2026
05afd1d
Merge branch 'main' into chore/tinybench-upgrade
jerome-benoit Mar 14, 2026
f48c916
Merge branch 'main' into chore/tinybench-upgrade
jerome-benoit Mar 16, 2026
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion packages/vitest/package.json
Original file line number Diff line number Diff line change
Expand Up @@ -188,7 +188,7 @@
"pathe": "catalog:",
"picomatch": "^4.0.3",
"std-env": "catalog:",
"tinybench": "^2.9.0",
"tinybench": "^6.0.0",
"tinyexec": "^1.0.2",
"tinyglobby": "catalog:",
"tinyrainbow": "catalog:",
Expand Down
2 changes: 1 addition & 1 deletion packages/vitest/src/node/reporters/base.ts
Original file line number Diff line number Diff line change
Expand Up @@ -867,7 +867,7 @@ export abstract class BaseReporter implements Reporter {
.sort((a, b) => a.result!.benchmark!.rank - b.result!.benchmark!.rank)

for (const sibling of siblings) {
const number = (sibling.result!.benchmark!.mean / bench.result!.benchmark!.mean).toFixed(2)
const number = (sibling.result!.benchmark!.latency.mean / bench.result!.benchmark!.latency.mean).toFixed(2)
this.log(c.green(` ${number}x `) + c.gray('faster than ') + sibling.name)
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ export function createBenchmarkJsonReport(files: File[]): Report {
const benchmark = t.meta.benchmark && t.result?.benchmark

if (benchmark) {
benchmarks.push({ id: t.id, ...benchmark, samples: [] })
benchmarks.push({ id: t.id, ...benchmark })
}
}

Expand Down
4 changes: 2 additions & 2 deletions packages/vitest/src/node/reporters/benchmark/reporter.ts
Original file line number Diff line number Diff line change
Expand Up @@ -39,8 +39,8 @@ export class BenchmarkReporter extends DefaultReporter {
const task = this.ctx.state.idMap.get(pack[0])

if (task?.type === 'suite' && task.result?.state !== 'run') {
task.tasks.filter(task => task.result?.benchmark)
.sort((benchA, benchB) => benchA.result!.benchmark!.mean - benchB.result!.benchmark!.mean)
task.tasks.filter(task => task.result?.benchmark?.latency)
.sort((benchA, benchB) => benchA.result!.benchmark!.latency.mean - benchB.result!.benchmark!.latency.mean)
.forEach((bench, idx) => {
bench.result!.benchmark!.rank = Number(idx) + 1
})
Expand Down
34 changes: 17 additions & 17 deletions packages/vitest/src/node/reporters/benchmark/tableRender.ts
Original file line number Diff line number Diff line change
Expand Up @@ -18,10 +18,10 @@ function formatNumber(number: number) {

const tableHead = [
'name',
'hz',
'mean',
'min',
'max',
'mean',
'p50/median',
'p75',
'p99',
'p995',
Expand All @@ -33,16 +33,16 @@ const tableHead = [
function renderBenchmarkItems(result: BenchmarkResult) {
return [
result.name,
formatNumber(result.hz || 0),
formatNumber(result.min || 0),
formatNumber(result.max || 0),
formatNumber(result.mean || 0),
formatNumber(result.p75 || 0),
formatNumber(result.p99 || 0),
formatNumber(result.p995 || 0),
formatNumber(result.p999 || 0),
`±${(result.rme || 0).toFixed(2)}%`,
(result.sampleCount || 0).toString(),
formatNumber(result.latency.mean || 0),
formatNumber(result.latency.min || 0),
formatNumber(result.latency.max || 0),
formatNumber(result.latency.p50 || 0),
formatNumber(result.latency.p75 || 0),
formatNumber(result.latency.p99 || 0),
formatNumber(result.latency.p995 || 0),
formatNumber(result.latency.p999 || 0),
`±${(result.latency.rme || 0).toFixed(2)}%`,
(result.numberOfSamples || 0).toString(),
]
}

Expand All @@ -67,16 +67,16 @@ function renderBenchmark(result: BenchmarkResult, widths: number[]) {
const padded = padRow(renderBenchmarkItems(result), widths)
return [
padded[0], // name
c.blue(padded[1]), // hz
c.blue(padded[1]), // mean
c.cyan(padded[2]), // min
c.cyan(padded[3]), // max
c.cyan(padded[4]), // mean
c.cyan(padded[4]), // p50/median
c.cyan(padded[5]), // p75
c.cyan(padded[6]), // p99
c.cyan(padded[7]), // p995
c.cyan(padded[8]), // p999
c.dim(padded[9]), // rem
c.dim(padded[10]), // sample
c.dim(padded[10]), // samples
].join(' ')
}

Expand Down Expand Up @@ -151,8 +151,8 @@ export function renderTable(
let body = renderBenchmark(bench.current, columnWidths)

if (options.compare && bench.baseline) {
if (bench.current.hz) {
const diff = bench.current.hz / bench.baseline.hz
if (bench.current.throughput.mean) {
const diff = bench.current.throughput.mean / bench.baseline.throughput.mean
const diffFixed = diff.toFixed(2)

if (diffFixed === '1.0.0') {
Expand Down
6 changes: 3 additions & 3 deletions packages/vitest/src/public/index.ts
Original file line number Diff line number Diff line change
Expand Up @@ -46,14 +46,14 @@ export { VitestEvaluatedModules as EvaluatedModules } from '../runtime/moduleRun
export { NodeBenchmarkRunner as BenchmarkRunner } from '../runtime/runners/benchmark'
export { TestRunner } from '../runtime/runners/test'
export type {
BenchFactory,
Bench as BenchFactory,
BenchFunction,
Benchmark,
BenchmarkAPI,
BenchmarkResult,
BenchOptions,
BenchTask,
BenchTaskResult,
TinybenchTask as BenchTask,
TinybenchTaskResult as BenchTaskResult,
} from '../runtime/types/benchmark'
export { assertType } from '../typecheck/assertType'

Expand Down
8 changes: 4 additions & 4 deletions packages/vitest/src/runtime/benchmark.ts
Original file line number Diff line number Diff line change
Expand Up @@ -6,18 +6,18 @@ import { noop } from '@vitest/utils/helpers'
import { getWorkerState } from './utils'

const benchFns = new WeakMap<Test, BenchFunction>()
const benchOptsMap = new WeakMap()
const benchOptsMap = new WeakMap<Test, BenchOptions>()

export function getBenchOptions(key: Test): BenchOptions {
return benchOptsMap.get(key)
return benchOptsMap.get(key)!
}

export function getBenchFn(key: Test): BenchFunction {
return benchFns.get(key)!
}

export const bench: BenchmarkAPI = createBenchmark(function (
name,
name: string | Function,
fn: BenchFunction = noop,
options: BenchOptions = {},
) {
Expand All @@ -32,7 +32,7 @@ export const bench: BenchmarkAPI = createBenchmark(function (
},
})
benchFns.set(task, fn)
benchOptsMap.set(task, options)
benchOptsMap.set(task, { ...options, name: formatName(name) })
// vitest runner sets mode to `todo` if handler is not passed down
// but we store handler separetly
if (!this.todo && task.mode === 'todo') {
Expand Down
84 changes: 30 additions & 54 deletions packages/vitest/src/runtime/runners/benchmark.ts
Original file line number Diff line number Diff line change
Expand Up @@ -7,11 +7,10 @@ import type {
} from '@vitest/runner'
import type { ModuleRunner } from 'vite/module-runner'
import type { SerializedConfig } from '../config'
// import type { VitestExecutor } from '../execute'
import type {
Benchmark,
BenchmarkResult,
BenchTask,
TinybenchTask,
} from '../types/benchmark'
import { updateTask as updateRunnerTask } from '@vitest/runner'
import { createDefer } from '@vitest/utils/helpers'
Expand All @@ -23,20 +22,15 @@ function createBenchmarkResult(name: string): BenchmarkResult {
return {
name,
rank: 0,
rme: 0,
samples: [] as number[],
numberOfSamples: 0,
} as BenchmarkResult
}

const benchmarkTasks = new WeakMap<Benchmark, import('tinybench').Task>()

async function runBenchmarkSuite(suite: Suite, runner: NodeBenchmarkRunner) {
const { Task, Bench } = await runner.importTinybench()

const start = performance.now()

const benchmarkGroup: Benchmark[] = []
const benchmarkSuiteGroup = []
const benchmarkSuiteGroup: Suite[] = []
for (const task of suite.tasks) {
if (task.mode !== 'run' && task.mode !== 'queued') {
continue
Expand All @@ -50,12 +44,15 @@ async function runBenchmarkSuite(suite: Suite, runner: NodeBenchmarkRunner) {
}
}

// run sub suites sequentially
for (const subSuite of benchmarkSuiteGroup) {
await runBenchmarkSuite(subSuite, runner)
if (benchmarkSuiteGroup.length) {
for (const subSuite of benchmarkSuiteGroup) {
await runBenchmarkSuite(subSuite, runner)
}
}

if (benchmarkGroup.length) {
const { Bench } = await runner.importTinybench()

const defer = createDefer()
suite.result = {
state: 'run',
Expand All @@ -65,26 +62,17 @@ async function runBenchmarkSuite(suite: Suite, runner: NodeBenchmarkRunner) {
updateTask('suite-prepare', suite)

const addBenchTaskListener = (
task: InstanceType<typeof Task>,
task: InstanceType<typeof TinybenchTask>,
benchmark: Benchmark,
) => {
task.addEventListener(
'complete',
(e) => {
const task = e.task
const taskRes = task.result!
const result = benchmark.result!.benchmark!
const task = e.task!
benchmark.result!.state = 'pass'
Object.assign(result, taskRes)
// compute extra stats and free raw samples as early as possible
const samples = result.samples
result.sampleCount = samples.length
result.median = samples.length % 2
? samples[Math.floor(samples.length / 2)]
: (samples[samples.length / 2] + samples[samples.length / 2 - 1]) / 2
if (!runner.config.benchmark?.includeSamples) {
result.samples.length = 0
}
const result = benchmark.result!.benchmark!
Object.assign(result, task.result)
result.numberOfSamples = result.latency.samplesCount
updateTask('test-finished', benchmark)
},
{
Expand All @@ -94,47 +82,35 @@ async function runBenchmarkSuite(suite: Suite, runner: NodeBenchmarkRunner) {
task.addEventListener(
'error',
(e) => {
const task = e.task
defer.reject(benchmark ? task.result!.error : e)
defer.reject(e.error ?? e)
},
{
once: true,
},
)
}

benchmarkGroup.forEach((benchmark) => {
const options = getBenchOptions(benchmark)
const benchmarkInstance = new Bench(options)

const benchmarkFn = getBenchFn(benchmark)

const { setTimeout } = getSafeTimers()
const tasks: [TinybenchTask, Benchmark][] = []
for (const benchmark of benchmarkGroup) {
const benchOptions = getBenchOptions(benchmark)
const bench = new Bench({
...benchOptions,
retainSamples: !!runner.config.benchmark?.includeSamples,
})
bench.add(benchmark.name, getBenchFn(benchmark))
benchmark.result = {
state: 'run',
startTime: start,
benchmark: createBenchmarkResult(benchmark.name),
}

const task = new Task(benchmarkInstance, benchmark.name, benchmarkFn)
benchmarkTasks.set(benchmark, task)
addBenchTaskListener(task, benchmark)
})

const { setTimeout } = getSafeTimers()
const tasks: [BenchTask, Benchmark][] = []

for (const benchmark of benchmarkGroup) {
const task = benchmarkTasks.get(benchmark)!
addBenchTaskListener(bench.getTask(benchmark.name)!, benchmark)
updateTask('test-prepare', benchmark)
await task.warmup()
tasks.push([
await new Promise<BenchTask>(resolve =>
setTimeout(async () => {
resolve(await task.run())
}),
),
benchmark,
])
tasks.push([await new Promise<TinybenchTask>(resolve =>
setTimeout(async () => {
resolve((await bench.run())[0])
}),
), benchmark])
}

suite.result!.duration = performance.now() - start
Expand Down
29 changes: 18 additions & 11 deletions packages/vitest/src/runtime/types/benchmark.ts
Original file line number Diff line number Diff line change
@@ -1,28 +1,35 @@
import type { Test } from '@vitest/runner'
import type { ChainableFunction } from '@vitest/runner/utils'
import type {
Bench as BenchFactory,
Options as BenchOptions,
Task as BenchTask,
TaskResult as BenchTaskResult,
TaskResult as TinybenchResult,
Bench,
Fn as BenchFunction,
BenchOptions,
Statistics as TinybenchStatistics,
Task as TinybenchTask,
TaskResult as TinybenchTaskResult,
} from 'tinybench'

export type BenchmarkStatistics = Omit<TinybenchStatistics, 'samples'> & {
samples: number[] | undefined
}

export interface Benchmark extends Test {
meta: {
benchmark: true
result?: BenchTaskResult
result?: TinybenchTaskResult
}
}

export interface BenchmarkResult extends TinybenchResult {
export interface BenchmarkResult {
name: string
rank: number
sampleCount: number
median: number
numberOfSamples: number
latency: BenchmarkStatistics
throughput: BenchmarkStatistics
period: number
totalTime: number
}

export type BenchFunction = (this: BenchFactory) => Promise<void> | void
type ChainableBenchmarkAPI = ChainableFunction<
'skip' | 'only' | 'todo',
(name: string | Function, fn?: BenchFunction, options?: BenchOptions) => void
Expand All @@ -32,4 +39,4 @@ export type BenchmarkAPI = ChainableBenchmarkAPI & {
runIf: (condition: any) => ChainableBenchmarkAPI
}

export { BenchFactory, BenchOptions, BenchTask, BenchTaskResult }
export { Bench, BenchFunction, BenchOptions, TinybenchTask, TinybenchTaskResult }
Loading
Loading