Benchmark harness.
We believe in a future in which the web is a preferred environment for numerical computation. To help realize this future, we’ve built stdlib. stdlib is a standard library, with an emphasis on numerical and scientific computation, written in JavaScript (and C) for execution in browsers and in Node.js.
The library is fully decomposable, being architected in such a way that you can swap out and mix and match APIs and functionality to cater to your exact preferences and use cases.
When you use stdlib, you can be absolutely certain that you are using the most thorough, rigorous, well-written, studied, documented, tested, measured, and high-quality code out there.
To join us in bringing numerical computing to the web, get started by checking us out on GitHub, and please consider financially supporting stdlib. We greatly appreciate your continued support!
[![NPM version][npm-image]][npm-url] [![Build Status][test-image]][test-url] [![Coverage Status][coverage-image]][coverage-url]
Benchmark harness.
bash
npm install @stdlib/bench-harness
script
tag without installation and bundlers, use the [ES Module][es-module] available on the [esm
][esm-url] branch (see [README][esm-readme]).deno
][deno-url] branch (see [README][deno-readme] for usage intructions).umd
][umd-url] branch (see [README][umd-readme]).javascript
var bench = require( '@stdlib/bench-harness' );
benchmark
to be run during a subsequent turn of the event loop. After running the benchmark
, the function outputs benchmark results as Test Anything Protocol ([TAP][tap]) output.javascript
bench( 'Math.sin', function benchmark( b ) {
var x;
var i;
b.tic();
for ( i = 0; i < b.iterations; i++ ) {
x = Math.sin( Math.random() );
if ( x !== x ) {
b.fail( 'should not return NaN' );
}
}
b.toc();
if ( x !== x ) {
b.fail( 'should not return NaN' );
}
b.pass( 'benchmark finished' );
b.end();
});
benchmark
function has the following signature:javascript
function benchmark( b ) {
// Benchmark code...
}
b
is a Benchmark
instance. Synchronous benchmarks should, at minimum, have the following structure:javascript
function benchmark( b ) {
var x;
var i;
// [1] Start timing:
b.tic();
// [2] Loop containing code to time...
for ( i = 0; i < b.iterations; i++ ) {
// [3] Code to time...
// [4] A conditional verifying results to prevent certain compiler optimizations:
if ( x !== x ) {
b.fail( 'something went wrong!' );
}
}
// [5] Stop timing:
b.toc();
// [6] Another conditional verifying results to prevent certain compiler optimizations:
if ( x !== x ) {
b.fail( 'something went wrong!' );
}
// [7] End the benchmark:
b.end();
}
javascript
function benchmark( b ) {
var i = 0;
// [1] Start timing:
b.tic();
// [2] Asynchronous code to time:
return next();
function next( error ) {
if ( error ) {
return b.fail( error.message );
}
i += 1;
// [3] Exit condition:
if ( i <= b.iterations ) {
// Asynchronous task...
return;
}
// [4] Stop timing:
b.toc();
// [5] End the benchmark:
b.end();
}
}
b.end()
is mandatory, as failing to do so will cause the harness to hang. For example, the following benchmark will never complete.javascript
function benchmark( b ) {
var i;
b.tic();
for ( i = 0; i < b.iterations; i++ ) {
// Synchronous task...
}
b.toc();
}
javascript
function benchmark( b ) {
var x;
var i;
b.tic();
for ( i = 0; i < b.iterations; i++ ) {
x = Math.sin( Math.random() );
b.equal( x, x, 'does not return NaN' ); // Avoid doing this!
}
b.toc();
b.equal( x, x, 'does not return NaN' ); // This is fine.
b.end();
}
b.tic()
and that all cleanup code executes after calling b.toc()
. For example, avoid the following:javascript
function benchmark( b ) {
var x;
var y;
var i;
// Start timing:
b.tic();
// Setup code:
x = new Array( b.iterations ); // Should be before b.tic()!
for ( i = 0; i < b.iterations; i++ ) {
x[ i ] = Math.random();
}
// Code to be timed...
for ( i = 0; i < b.iterations; i++ ) {
y = Math.sin( x[ i ] );
if ( y !== y ) {
b.fail( 'should not return NaN' );
}
}
// Verify results:
b.equal( x, x, 'does not return NaN' ); // Should be after b.toc()!
// Stop timing:
b.toc();
b.end();
}
options
:null
, the number of iterations is determined by trying successive powers of 10
until the total time is at least 0.1
seconds. Default: null
.3
.300000
(5 minutes).boolean
indicating whether to skip a benchmark.iterations
option.javascript
var opts = {
'iterations': 1e6
};
bench( 'require a specific number of iterations', opts, function benchmark( b ) {
var i;
b.tic();
for ( i = 0; i < b.iterations; i++ ) {
// Code to be benchmarked...
}
b.toc();
b.end();
});
repeats
option.javascript
var opts = {
'repeats': 5
};
bench( 'repeat a benchmark multiple times', opts, function benchmark( b ) {
var i;
b.tic();
for ( i = 0; i < b.iterations; i++ ) {
// Code to be benchmarked...
}
b.toc();
b.end();
});
skip
option.javascript
var opts = {
'skip': true
};
bench( 'skipped benchmark', opts, function benchmark( b ) {
var i;
b.tic();
for ( i = 0; i < b.iterations; i++ ) {
// Code to be benchmarked...
}
b.toc();
b.end();
});
timeout
option (in milliseconds).javascript
var opts = {
'timeout': 5000 // 5 seconds
};
bench( 'async benchmark', opts, function benchmark( b ) {
var i = 0;
b.tic();
return next();
function next( error ) {
if ( error ) {
return b.fail( error.message );
}
i += 1;
if ( i <= b.iterations ) {
// Asynchronous task...
return;
}
b.toc();
b.end();
}
});
javascript
function onFinish() {
console.log( 'Done!' );
}
bench.onFinish( onFinish );
javascript
var stdout = require( '@stdlib/streams-node-stdout' );
var stream = bench.createStream();
// Direct all results to `stdout`:
stream.pipe( stdout );
var opts = {
'iterations': 1,
'repeats': 1
};
bench( 'beep', opts, function benchmark( b ) {
var i;
b.tic();
for ( i = 0; i < b.iterations; i++ ) {
b.equal( 3.14, 3.14, 'should be equal' );
}
b.toc();
b.end();
});
text
TAP version 13
# beep
ok 1 should be equal
---
iterations: 1
elapsed: 0.002985193
rate: 334.98671610177297
...
#
1..1
# total 1
# pass 1
#
# ok
Writable
][nodejs-writable-stream] stream (e.g., network connection, file, stdout
, etc).options
as [@stdlib/streams/node/transform][@stdlib/streams/node/transform]. For example, by default, the method returns a stream which produces [TAP][tap] output as text. To return an object stream,javascript
var opts = {
'objectMode': true
};
var stream = bench.createStream( opts );
stream.on( 'data', onRow );
function onRow( row ) {
console.log( JSON.stringify( row ) );
}
opts = {
'iterations': 1,
'repeats': 1
};
bench( 'beep', opts, function benchmark( b ) {
var i;
b.tic();
for ( i = 0; i < b.iterations; i++ ) {
b.equal( 3.14, 3.14, 'should be equal' );
}
b.toc();
b.end();
});
text
{"type":"benchmark","name":"beep","id":0}
{"id":0,"ok":true,"name":"should be equal","operator":"equal","actual":3.14,"expected":3.14,"benchmark":0,"type":"assert"}
{"ok":true,"operator":"result","iterations":1,"elapsed":0.00283753,"rate":352.41918147120913,"benchmark":0,"type":"result"}
{"benchmark":0,"type":"end"}
javascript
var harness = bench.createHarness();
harness( 'beep', function benchmark( b ) {
var i;
b.tic();
for ( i = 0; i < b.iterations; i++ ) {
// Synchronous task...
}
b.toc();
b.end();
});
function
.javascript
var harness = bench.createHarness( onFinish );
function onFinish() {
harness.close();
}
harness( 'beep', function benchmark( b ) {
var i;
b.tic();
for ( i = 0; i < b.iterations; i++ ) {
// Synchronous task...
}
b.toc();
b.end();
});
options
:boolean
indicating whether to automatically close a harness after running all benchmarks.harness
does not automatically close. To automatically close a harness once a harness finishes running all benchmarks, set the autoclose
option to true
.javascript
var harness = bench.createHarness({
'autoclose': true
});
harness( 'beep', function benchmark( b ) {
var i;
b.tic();
for ( i = 0; i < b.iterations; i++ ) {
// Synchronous task...
}
b.toc();
b.end();
});
harness
has the following properties and methods…javascript
var stdout = require( '@stdlib/streams-node-stdout' );
var harness = bench.createHarness();
var stream = harness.createStream();
// Direct all results to `stdout`:
stream.pipe( stdout );
var opts = {
'iterations': 1,
'repeats': 1
};
harness( 'beep', opts, function benchmark( b ) {
var i;
b.tic();
for ( i = 0; i < b.iterations; i++ ) {
b.equal( 'beep', 'beep', 'should be equal' );
}
b.toc();
b.end();
});
text
TAP version 13
# beep
ok 1 should be equal
---
iterations: 1
elapsed: 0.00166768
rate: 599.6354216636286
...
#
1..1
# total 1
# pass 1
#
# ok
options
as [@stdlib/streams/node/transform][@stdlib/streams/node/transform].javascript
var stdout = require( '@stdlib/streams-node-stdout' );
var harness = bench.createHarness();
var stream = harness.createStream();
stream.pipe( stdout );
var opts = {
'iterations': 5,
'repeats': 5
};
harness( 'early close', opts, function benchmark( b ) {
var i = 0;
b.tic();
setTimeout( next, 0 );
function next() {
i += 1;
if ( i <= b.iterations ) {
b.ok( true, 'should be truthy' );
return setTimeout( next, 10 );
}
b.toc();
b.end();
}
});
// Early close:
setTimeout( onTimeout, 50 );
function onTimeout() {
harness.close();
}
text
TAP version 13
# early close
ok 1 should be truthy
ok 2 should be truthy
# WARNING: harness closed before completion.
ok 3 should be truthy
ok 4 should be truthy
ok 5 should be truthy
---
iterations: 5
elapsed: 0.05940291
rate: 84.17096064822414
...
javascript
var stdout = require( '@stdlib/streams-node-stdout' );
var harness = bench.createHarness();
var stream = harness.createStream();
stream.pipe( stdout );
var opts = {
'iterations': 5
};
harness( 'force exit', opts, function benchmark( b ) {
var i = 0;
b.tic();
return next();
function next() {
i += 1;
if ( i <= b.iterations ) {
b.ok( true, 'should be truthy' );
return setTimeout( next, 10 );
}
b.toc();
b.end();
}
});
// Forcefully exit:
setTimeout( onTimeout, 20 );
function onTimeout() {
harness.exit();
}
text
TAP version 13
# force exit
ok 1 should be truthy
not ok 2 benchmark exited without ending
---
operator: fail
TODO: include stack
...
not ok 3 benchmark exited without ending
---
operator: fail
TODO: include stack
...
ok 4 should be truthy
ok 5 should be truthy
ok 6 should be truthy
ok 7 should be truthy
---
iterations: 5
elapsed: 0.061504862
rate: 81.29438612511642
...
0
; otherwise, the exit code is 1
.javascript
var harness = bench.createHarness();
// Benchmarks only start running when results have a destination:
var stream = harness.createStream();
function onFinish() {
console.log( harness.exitCode );
// => 1
}
var opts = {
'iterations': 1,
'repeats': 1
};
harness( 'exit code', opts, function benchmark( b ) {
var i;
b.tic();
for ( i = 0; i < b.iterations; i++ ) {
b.fail( 'failing assertion' );
}
b.toc();
b.end();
});
Benchmark
instance has the following properties and methods…name
.javascript
var str = b.name;
// returns <string>
javascript
var iter = b.iterations;
// returns <number>
benchmark
function.javascript
function benchmark( b ) {
var x;
var i;
// Start a timer:
b.tic();
for ( i = 0; i < b.iterations; i++ ) {
x = Math.sin( Math.random() );
if ( x !== x ) {
b.fail( 'should not return NaN' );
}
}
b.toc();
if ( x !== x ) {
b.fail( 'should not return NaN' );
}
b.end();
}
benchmark
function.javascript
function benchmark( b ) {
var x;
var i;
b.tic();
for ( i = 0; i < b.iterations; i++ ) {
x = Math.sin( Math.random() );
if ( x !== x ) {
b.fail( 'should not return NaN' );
}
}
// Stop a timer:
b.toc();
if ( x !== x ) {
b.fail( 'should not return NaN' );
}
b.end();
}
benchmark
function.javascript
function benchmark( b ) {
var x;
var i;
b.tic();
for ( i = 0; i < b.iterations; i++ ) {
x = Math.sin( Math.random() );
if ( x !== x ) {
b.fail( 'should not return NaN' );
}
}
b.toc();
if ( x !== x ) {
b.fail( 'should not return NaN' );
}
// Explicitly end the benchmark:
b.end();
}
b.end()
. Including assertions after b.end()
may result in interleaved [TAP][tap] output or an output stream closing before a benchmark executes pending assertions.javascript
b.comment( 'This is a comment.' );
text
# This is a comment.
javascript
b.skip( false, 'This is skipped.' );
b.skip( true, 'This is skipped.' );
text
ok 1 This is skipped. # SKIP
ok 2 This is skipped. # SKIP
javascript
b.todo( false, 'This is a todo.' );
b.todo( true, 'This is a todo.' );
text
not ok 3 This is a todo. # TODO
---
operator: todo
TODO: include stack
...
ok 4 This is a todo. # TODO
b.todo()
assertions typically fail, they do not contribute to the failed assertion count. If a benchmark includes b.todo()
assertions and no other failing assertions, the benchmark is considered successful.javascript
b.fail( 'This is a failing assertion.' );
text
not ok 5 This is a failing assertion.
---
operator: fail
TODO: include stack
...
javascript
b.pass( 'This is a passing assertion.' );
text
ok 6 This is a passing assertion.
value
is truthy.javascript
b.ok( [] );
text
ok 7 should be truthy
msg
argument.javascript
b.ok( true, 'This asserts a value is truthy.' );
b.ok( false, 'This asserts a value is truthy.' );
text
ok 8 This asserts a value is truthy.
not ok 9 This asserts a value is truthy.
---
operator: ok
TODO: include stack
...
value
is falsy.javascript
b.notOk( null );
text
ok 10 should be falsy
msg
argument.javascript
b.notOk( false, 'This asserts a value is falsy.' );
b.notOk( true, 'This asserts a value is falsy.' );
text
ok 11 This asserts a value is falsy.
not ok 12 This asserts a value is falsy.
---
operator: notOk
TODO: include stack
...
actual
is strictly equal to expected
.javascript
var expected = [];
var actual = expected;
b.equal( actual, expected );
text
ok 13 should be equal
msg
argument.javascript
var expected = [];
var actual = expected;
b.equal( actual, expected, 'This asserts two values are strictly equal.' );
b.equal( 1.0, 2.0, 'This asserts two values are strictly equal.' );
text
ok 14 This asserts two values are strictly equal.
not ok 15 This asserts two values are strictly equal.
---
operator: equal
TODO: include stack
...
actual
is not strictly equal to expected
.javascript
var expected = [];
var actual = [];
b.notEqual( actual, expected );
text
ok 16 should not be equal
msg
argument.javascript
var expected = [];
var actual = [];
b.notEqual( 1.0, 2.0, 'This asserts two values are not equal.' );
b.notEqual( actual, expected, 'This asserts two values are not equal.' );
text
ok 17 This asserts two values are not equal.
not ok 18 This asserts two values are not equal.
---
operator: notEqual
TODO: include stack
...
actual
is deeply equal to expected
.javascript
var expected = {
'a': 'b'
};
var actual = {
'a': 'b'
};
b.deepEqual( actual, expected );
text
ok 19 should be deeply equal
msg
argument.javascript
var expected = {
'a': 'b'
};
var actual = {
'a': 'b'
};
b.deepEqual( actual, expected, 'This asserts two values are deeply equal.' );
actual.a = 'c';
b.deepEqual( actual, expected, 'This asserts two values are deeply equal.' );
text
TODO
actual
is not deeply equal to expected
.javascript
var expected = {
'a': 'b'
};
var actual = {
'a': 'c'
};
b.notDeepEqual( actual, expected );
text
ok 22 should not be deeply equal
msg
argument.javascript
var expected = {
'a': 'b'
};
var actual = {
'a': 'c'
};
b.notDeepEqual( actual, expected, 'This asserts two values are not deeply equal.' );
actual.a = 'b';
b.notDeepEqual( actual, expected, 'This asserts two values are not deeply equal.' );
text
TODO
benchmark
function, the benchmark is considered a todo
and opts.repeat
is ignored.b.tic()
and/or b.toc()
during pretests (even if due to an intermittent failure), a benchmark is only run once (i.e., options.repeats
is ignored). Similarly, if options.iterations
is null
and a benchmark fails during iteration number determination, a benchmark is only run once and for one iteration. Accordingly, if a benchmark does not run an expected number of repetitions and/or iterations, this behavior is likely attributable to a benchmark failure during pretesting.name
. If a name
is not provided, the harness will throw an Error
.name
. Unique names ensure easier identification and assignment of benchmark results.b.tic()
, b.toc()
, or b.end()
is called more than once within a benchmark, the benchmark will fail.text
TAP version 13
# Math.hypot
---
iterations: 1000000
elapsed: 0.457849215
rate: 2184125.181911691
...
ok 1 benchmark finished
# Math.hypot
---
iterations: 1000000
elapsed: 0.454676639
rate: 2199365.250432407
...
ok 2 benchmark finished
# Math.hypot
---
iterations: 1000000
elapsed: 0.472378014
rate: 2116948.652059831
...
ok 3 benchmark finished
# hypot
---
iterations: 1000000
elapsed: 0.13120811
rate: 7621480.105155086
...
ok 4 benchmark finished
# hypot
---
iterations: 1000000
elapsed: 0.129308984
rate: 7733414.717727579
...
ok 5 benchmark finished
# hypot
---
iterations: 1000000
elapsed: 0.12404053
rate: 8061881.064197323
...
ok 6 benchmark finished
#
1..6
# total 6
# pass 6
#
# ok
text
TODO
b.tic()
and ending with b.toc()
(in seconds).javascript
var randu = require( '@stdlib/random-base-randu' );
var isnan = require( '@stdlib/math-base-assert-is-nan' );
var sin = require( '@stdlib/math-base-special-sin' );
var bench = require( '@stdlib/bench-harness' );
var opts = {
'iterations': 1e6,
'repeats': 3
};
bench( 'Math.sin', opts, function benchmark( b ) {
var x;
var y;
var i;
b.tic();
for ( i = 0; i < b.iterations; i++ ) {
x = (randu()*100.0) - 50.0;
y = Math.sin( x );
if ( y < -1.0 || y > 1.0 ) {
b.fail( 'something went wrong!' );
}
}
b.toc();
if ( isnan( y ) ) {
b.fail( 'something went wrong!' );
}
b.pass( 'benchmark finished' );
b.end();
});
bench( 'sin', opts, function benchmark( b ) {
var x;
var y;
var i;
b.tic();
for ( i = 0; i < b.iterations; i++ ) {
x = (randu()*100.0) - 50.0;
y = sin( x );
if ( y < -1.0 || y > 1.0 ) {
b.fail( 'something went wrong!' );
}
}
b.toc();
if ( isnan( y ) ) {
b.fail( 'something went wrong!' );
}
b.pass( 'benchmark finished' );
b.end();
});
bash
npm install -g @stdlib/bench-harness-cli
text
Usage: bench [options] <glob> ...
Options:
-h, --help Print this message.
-V, --version Print the package version.
-r, --require module Load module before running benchmarks.
bash
$ node /path/to/benchmark.js
-r
flag one or more times. For example,bash
$ bench -r foo -r bar 'benchmark/*.js'
foo
and bar
will both be loaded before any benchmarks matching the glob benchmark/*.js
. The -r
flag behaves exactly like require
, and modules are resolved relative to the current working directory. To load local modules, use relative paths.bash
$ bench -r ./foo/bar.js -r ./beep/boop/bap 'benchmark/*.js'
-r
modules are loaded before running benchmarks regardless of order. Hence,bash
$ bench -r foo -r bar 'benchmark/*.js'
bash
$ bench -r foo 'benchmark/*.js' -r bar
bash
$ bench benchmark/*.js
bash
$ bench 'benchmark/*.js'
$ bench "benchmark/*.js"
bash
$ bench ./examples/index.js
text
TAP version 13
# Math.sin
---
iterations: 1000000
elapsed: 0.107631765
rate: 9290937.484858675
...
ok 1 benchmark finished
# Math.sin
---
iterations: 1000000
elapsed: 0.100319363
rate: 9968165.368035682
...
ok 2 benchmark finished
# Math.sin
---
iterations: 1000000
elapsed: 0.095116262
rate: 10513449.31952856
...
ok 3 benchmark finished
# sin
---
iterations: 1000000
elapsed: 0.173696195
rate: 5757178.503536016
...
ok 4 benchmark finished
# sin
---
iterations: 1000000
elapsed: 0.158544701
rate: 6307369.42762912
...
ok 5 benchmark finished
# sin
---
iterations: 1000000
elapsed: 0.157709895
rate: 6340756.234730865
...
ok 6 benchmark finished
#
1..6
# total 6
# pass 6
#
# ok