-
Notifications
You must be signed in to change notification settings - Fork 7
Expand file tree
/
Copy pathwalltime.ts
More file actions
186 lines (162 loc) · 5.13 KB
/
walltime.ts
File metadata and controls
186 lines (162 loc) · 5.13 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
import {
calculateQuantiles,
msToNs,
msToS,
writeWalltimeResults,
type Benchmark,
type BenchmarkStats,
} from "@codspeed/core";
import {
type Benchmark as VitestBenchmark,
type RunnerTaskResult,
type RunnerTestSuite,
} from "vitest";
import { NodeBenchmarkRunner } from "vitest/runners";
import { getBenchOptions } from "vitest/suite";
import {
isVitestTaskBenchmark,
patchRootSuiteWithFullFilePath,
} from "./common";
declare const __VERSION__: string;
/**
* WalltimeRunner uses Vitest's default benchmark execution
* and extracts results from the suite after completion
*/
export class WalltimeRunner extends NodeBenchmarkRunner {
async runSuite(suite: RunnerTestSuite): Promise<void> {
patchRootSuiteWithFullFilePath(suite);
console.log(
`[CodSpeed] running with @codspeed/vitest-plugin v${__VERSION__} (walltime mode)`
);
// Let Vitest's default benchmark runner handle execution
await super.runSuite(suite);
// Extract benchmark results from the completed suite
const benchmarks = await this.extractBenchmarkResults(suite);
if (benchmarks.length > 0) {
writeWalltimeResults(benchmarks);
console.log(
`[CodSpeed] Done collecting walltime data for ${benchmarks.length} benches.`
);
} else {
console.warn(
`[CodSpeed] No benchmark results found after suite execution`
);
}
}
private async extractBenchmarkResults(
suite: RunnerTestSuite,
parentPath = ""
): Promise<Benchmark[]> {
const benchmarks: Benchmark[] = [];
const currentPath = parentPath
? `${parentPath}::${suite.name}`
: suite.name;
for (const task of suite.tasks) {
if (isVitestTaskBenchmark(task) && task.result?.state === "pass") {
const benchmark = await this.processBenchmarkTask(task, currentPath);
if (benchmark) {
benchmarks.push(benchmark);
}
} else if (task.type === "suite") {
const nestedBenchmarks = await this.extractBenchmarkResults(
task,
currentPath
);
benchmarks.push(...nestedBenchmarks);
}
}
return benchmarks;
}
private async processBenchmarkTask(
task: VitestBenchmark,
suitePath: string
): Promise<Benchmark | null> {
const uri = `${suitePath}::${task.name}`;
const result = task.result;
if (!result) {
console.warn(` ⚠ No result data available for ${uri}`);
return null;
}
try {
// Get tinybench configuration options from vitest
const benchOptions = getBenchOptions(task);
const stats = this.convertVitestResultToBenchmarkStats(
result,
benchOptions
);
if (stats === null) {
console.log(` ✔ No walltime data to collect for ${uri}`);
return null;
}
const coreBenchmark: Benchmark = {
name: task.name,
uri,
config: {
max_rounds: benchOptions.iterations ?? null,
max_time_ns: benchOptions.time ? msToNs(benchOptions.time) : null,
min_round_time_ns: null, // tinybench does not have an option for this
warmup_time_ns:
benchOptions.warmupIterations !== 0 && benchOptions.warmupTime
? msToNs(benchOptions.warmupTime)
: null,
},
stats,
};
console.log(` ✔ Collected walltime data for ${uri}`);
return coreBenchmark;
} catch (error) {
console.warn(
` ⚠ Failed to process benchmark result for ${uri}:`,
error
);
return null;
}
}
private convertVitestResultToBenchmarkStats(
result: RunnerTaskResult,
benchOptions: {
time?: number;
warmupTime?: number;
warmupIterations?: number;
iterations?: number;
}
): BenchmarkStats | null {
const benchmark = result.benchmark;
if (!benchmark) {
throw new Error("No benchmark data available in result");
}
const { totalTime, min, max, mean, sd, samples } = benchmark;
// Get individual sample times in nanoseconds and sort them
const sortedTimesNs = samples.map(msToNs).sort((a, b) => a - b);
const meanNs = msToNs(mean);
const stdevNs = msToNs(sd);
if (sortedTimesNs.length == 0) {
// Sometimes the benchmarks can be completely optimized out and not even run, but its beforeEach and afterEach hooks are still executed, and the task is still considered a success.
// This is the case for the hooks.bench.ts example in this package
return null;
}
const {
q1_ns,
q3_ns,
median_ns,
iqr_outlier_rounds,
stdev_outlier_rounds,
} = calculateQuantiles({ meanNs, stdevNs, sortedTimesNs });
return {
min_ns: msToNs(min),
max_ns: msToNs(max),
mean_ns: meanNs,
stdev_ns: stdevNs,
q1_ns,
median_ns,
q3_ns,
total_time: msToS(totalTime),
iter_per_round: 1, // as there is only one round in tinybench, we define that there were n rounds of 1 iteration
rounds: sortedTimesNs.length,
iqr_outlier_rounds,
stdev_outlier_rounds,
warmup_iters: benchOptions.warmupIterations ?? 0,
};
}
}
export default WalltimeRunner;