Skip to content
Open
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
1 change: 1 addition & 0 deletions .github/workflows/test.yml
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@ on:
jobs:
test:
runs-on: ubuntu-22.04
timeout-minutes: 10

steps:
- uses: actions/checkout@b4ffde65f46336ab88eb53be808477a3936bae11 # v4.1.1
Expand Down
1 change: 1 addition & 0 deletions docs/articles/about-benchmarking.md
Original file line number Diff line number Diff line change
Expand Up @@ -20,3 +20,4 @@ A micro benchmark is simply a function you supply, that will be run for a number

Instantiate the `Bench` class to create a benchmark suite. use `bench.group` to group bench cases and add headers. Use `bench.add` to register individual benchmarks.

By default, each test case now runs a single iteration before reporting results. You can control this by passing an `iterations` option (or `minIterations` for backwards compatibility) to the `Bench` constructor or individual test cases. Supplying `iterations` (the default) runs a fixed number of passes and skips the adaptive run-until-time behavior; explicitly clearing `iterations` restores the adaptive timing based on `minIterations` and `time`. Increasing the iteration count will re-run the benchmark that many times and aggregate the results, at the cost of longer total execution time.
42 changes: 37 additions & 5 deletions modules/bench/src/bench.ts
Original file line number Diff line number Diff line change
Expand Up @@ -28,6 +28,8 @@ export type BenchProps = {
delay?: number;
/** Increase if OK to let slow benchmarks take long time, potentially produces more stable results */
minIterations?: number;
/** Number of iterations to run for each benchmark test */
iterations?: number;
};

export type BenchTestFunction = <T>(testArgs?: T) => T | Promise<T>;
Expand All @@ -49,6 +51,8 @@ export type BenchTestCaseProps = {
delay?: number;
/** Increase if OK to let slow benchmarks take long time, potentially produces more stable results */
minIterations?: number;
/** Number of iterations to run for each benchmark test */
iterations?: number;
multiplier?: number;
unit?: string;
_throughput?: number;
Expand Down Expand Up @@ -99,6 +103,8 @@ type BenchTestCase = {
delay: number;
/** Increase if OK to let slow benchmarks take long time, potentially produces more stable results */
minIterations: number;
/** Number of iterations to run for each benchmark test */
iterations: number;
multiplier: number;
unit: string;
_throughput: number;
Expand All @@ -112,7 +118,8 @@ const DEFAULT_BENCH_OPTIONS: Required<BenchProps> = {
log: undefined!,
time: 80,
delay: 5,
minIterations: 3
minIterations: 1,
iterations: 1
};

const DEFAULT_BENCH_TEST_CASE: BenchTestCase = {
Expand All @@ -126,6 +133,7 @@ const DEFAULT_BENCH_TEST_CASE: BenchTestCase = {
once: false,
time: 0,
minIterations: 1,
iterations: 1,
multiplier: 1, // multiplier per test case
unit: '',
delay: 0,
Expand All @@ -147,7 +155,7 @@ export class Bench {

constructor(props: BenchProps = {}) {
this.props = {...DEFAULT_BENCH_OPTIONS, ...props};
const {id, time, delay, minIterations} = this.props;
const {id, time, delay, minIterations, iterations} = this.props;

let log = this.props.log;
if (!log) {
Expand All @@ -156,7 +164,7 @@ export class Bench {
}

this.id = id;
this.props = {id, log, time, delay, minIterations};
this.props = {id, log, time, delay, minIterations, iterations};
autobind(this);
Object.seal(this);
}
Expand Down Expand Up @@ -374,9 +382,10 @@ async function runBenchTestCaseAsync(testCase: BenchTestCase) {
let totalTime = 0;
let totalIterations = 0;

const minIterations: number = testCase.minIterations || 1;
const iterationCount: number = testCase.iterations ?? testCase.minIterations ?? 1;
const useExplicitIterationCount = testCase.iterations !== undefined;

for (let i = 0; i < minIterations; i++) {
for (let i = 0; i < iterationCount; i++) {
let time;
let iterations;
// Runs "testCase._throughput" parallel testCase cases
Expand All @@ -385,6 +394,8 @@ async function runBenchTestCaseAsync(testCase: BenchTestCase) {
testCase,
testCase._throughput
));
} else if (useExplicitIterationCount) {
({time, iterations} = await runBenchTestCaseFixedIterationsAsync(testCase, 1));
} else {
({time, iterations} = await runBenchTestCaseForMinimumTimeAsync(
testCase,
Expand Down Expand Up @@ -442,6 +453,27 @@ async function runBenchTestCaseForMinimumTimeAsync(
};
}

// Run a test func for a specific amount of iterations (and measure time)
async function runBenchTestCaseFixedIterationsAsync(
testCase: BenchTestCase,
iterations: number
): Promise<{time: number; iterations: number}> {
const timeStart = getHiResTimestamp();

if (testCase.async) {
await runBenchTestCaseIterationsAsync(testCase, iterations);
} else {
runBenchTestCaseIterations(testCase, iterations);
}

const time = (getHiResTimestamp() - timeStart) / 1000;

return {
time,
iterations
};
}

// Run a test func for a specific amount of parallel iterations
async function runBenchTestCaseParallelIterationsAsync(
testCase: BenchTestCase,
Expand Down
43 changes: 43 additions & 0 deletions modules/bench/test/bench.spec.ts
Original file line number Diff line number Diff line change
Expand Up @@ -34,3 +34,46 @@ test('Bench#run', (t) => {
t.ok(suite instanceof Bench, 'suite created successfully');
suite.run().then(() => t.end());
});

test('Bench#iterations option', (t) => {
const suite = new Bench({
id: 'iteration-control',
iterations: 2,
time: 1,
log: () => {}
});

let callCount = 0;

suite.addAsync('respects iteration count', {_throughput: 1}, async () => {
callCount++;
});

suite.run().then(() => {
t.equals(callCount, 2, 'runs configured number of iterations');
t.end();
Comment on lines +54 to +56

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

P1 Badge Account for calibration pass when asserting iteration counts

These new assertions assume the benchmarked test runs exactly the configured iterations (2 here and 1 in the later test), but runTests still performs a calibration warm‑up via runCalibrationTests before the measured run. That warm‑up invokes each test once, so callCount will be 3 in this test and 2 in the single‑iteration case, causing yarn test bench to fail and the iteration option to execute more times than the test claims.

Useful? React with 👍 / 👎.

});
});

test('Bench#iterations runs fixed passes', async (t) => {
const suite = new Bench({
id: 'fixed-iteration-count',
iterations: 1,
time: 10000,
log: () => {}
});

let callCount = 0;

suite.add('single invocation', () => {
callCount++;
if (callCount > 1) {
throw new Error('Benchmark ran more than once');
}
});

await suite.run();

t.equals(callCount, 1, 'runs exactly one pass even if time threshold is high');
t.end();
});
Loading