Online regression via stochastic gradient descent (SGD).
We believe in a future in which the web is a preferred environment for numerical computation. To help realize this future, we’ve built stdlib. stdlib is a standard library, with an emphasis on numerical and scientific computation, written in JavaScript (and C) for execution in browsers and in Node.js.
The library is fully decomposable, being architected in such a way that you can swap out and mix and match APIs and functionality to cater to your exact preferences and use cases.
When you use stdlib, you can be absolutely certain that you are using the most thorough, rigorous, well-written, studied, documented, tested, measured, and high-quality code out there.
To join us in bringing numerical computing to the web, get started by checking us out on GitHub, and please consider financially supporting stdlib. We greatly appreciate your continued support!
[![NPM version][npm-image]][npm-url] [![Build Status][test-image]][test-url] [![Coverage Status][coverage-image]][coverage-url]
Online regression via [Stochastic Gradient Descent][stochastic-gradient-descent].
bash
npm install @stdlib/ml-incr-sgd-regression
script
tag without installation and bundlers, use the [ES Module][es-module] available on the [esm
][esm-url] branch (see [README][esm-readme]).deno
][deno-url] branch (see [README][deno-readme] for usage intructions).umd
][umd-url] branch (see [README][umd-readme]).javascript
var incrSGDRegression = require( '@stdlib/ml-incr-sgd-regression' );
javascript
var randu = require( '@stdlib/random-base-randu' );
var normal = require( '@stdlib/random-base-normal' );
var accumulator = incrSGDRegression();
var x1;
var x2;
var i;
var y;
// Update model as data comes in...
for ( i = 0; i < 100000; i++ ) {
x1 = randu();
x2 = randu();
y = (3.0 * x1) + (-3.0 * x2) + 2.0 + normal( 0.0, 1.0 );
accumulator( [ x1, x2 ], y );
}
options
:string
denoting the learning rate to use. Can be constant
, pegasos
or basic
. Default: basic
.string
denoting the loss function to use. Can be squaredError
, epsilonInsensitive
or huber
. Default: squaredError
.0.1
.1e-3
.0.02
.boolean
indicating whether to include an intercept. Default: true
.javascript
var accumulator = incrSGDRegression({
'loss': 'squaredError',
'lambda': 1e-4
});
learningRate
decides how fast or slow the weights will be updated towards the optimal weights. Let i
denote the current iteration of the algorithm (i.e. the number of data points having arrived). The possible learning rates are:loss
option. The available options are:lambda
parameter determines the amount of shrinkage inflicted on the model coefficients:javascript
var createRandom = require( '@stdlib/random-base-randu' ).factory;
var accumulator;
var coefs;
var opts;
var rand;
var x1;
var x2;
var i;
var y;
opts = {
'seed': 23
};
rand = createRandom( opts );
accumulator = incrSGDRegression({
'lambda': 1e-5
});
for ( i = 0; i < 100; i++ ) {
x1 = rand();
x2 = rand();
y = (3.0 * x1) + (-3.0 * x2) + 2.0;
accumulator( [ x1, x2 ], y );
}
coefs = accumulator.coefs;
// returns [ ~3.007, ~-3.002, ~2 ]
rand = createRandom( opts );
accumulator = incrSGDRegression({
'lambda': 1e-2
});
for ( i = 0; i < 100; i++ ) {
x1 = rand();
x2 = rand();
y = (3.0 * x1) + (-3.0 * x2) + 2.0;
accumulator( [ x1, x2 ], y );
}
coefs = accumulator.coefs;
// returns [ ~2.893, ~-2.409, ~1.871 ]
lambda
reduce the variance of the model coefficient estimates at the expense of introducing bias.intercept
term. To omit the intercept
, set the corresponding option to false
:javascript
var accumulator = incrSGDRegression({
'intercept': false
});
accumulator( [ 1.4, 0.5 ], 2.0 );
var dim = accumulator.coefs.length;
// returns 2
accumulator = incrSGDRegression();
accumulator( [ 1.4, 0.5 ], 2.0 );
dim = accumulator.coefs.length;
// returns 3
intercept
is true
, an element equal to one is implicitly added to each x
vector. Hence, this module performs regularization of the intercept term.y
must be a numeric response value, x
a numeric array
of predictors. The number of predictors is decided upon first invocation of this method. All subsequent calls must supply x
vectors of the same dimensionality.javascript
accumulator( [ 1.0, 0.0 ], 5.0 );
x
, where x
must be a numeric array
of predictors. Given feature vector x = [x_0, x_1, ...]
and model coefficients c = [c_0, c_1, ...]
, the prediction is equal to x_0*c_0 + x_1*c_1 + ... + c_intercept
.javascript
var yhat = accumulator.predict( [ 0.5, 2.0 ] );
// returns <number>
array
. The coefficients are ordered as [c_0, c_1,..., c_intercept]
, where c_0
corresponds to the first feature in x
and so on.javascript
var coefs = accumulator.coefs;
// returns <Array>
[0,1]
or [-1,1]
or to transform them into z-scores with zero mean and unit variance. One should keep in mind that the same scaling has to be applied to test vectors in order to obtain accurate predictions.javascript
var randu = require( '@stdlib/random-base-randu' );
var normal = require( '@stdlib/random-base-normal' );
var incrSGDRegression = require( '@stdlib/ml-incr-sgd-regression' );
var accumulator;
var rnorm;
var x1;
var x2;
var y;
var i;
rnorm = normal.factory( 0.0, 1.0 );
// Create model:
accumulator = incrSGDRegression({
'lambda': 1e-7,
'loss': 'squaredError',
'intercept': true
});
// Update model as data comes in...
for ( i = 0; i < 10000; i++ ) {
x1 = randu();
x2 = randu();
y = (3.0 * x1) + (-3.0 * x2) + 2.0 + rnorm();
accumulator( [ x1, x2 ], y );
}
// Extract model coefficients:
console.log( accumulator.coefs );
// Predict new observations:
console.log( 'y_hat = %d; x1 = %d; x2 = %d', accumulator.predict( [0.9, 0.1] ), 0.9, 0.1 );
console.log( 'y_hat = %d; x1 = %d; x2 = %d', accumulator.predict( [0.1, 0.9] ), 0.1, 0.9 );
console.log( 'y_hat = %d; x1 = %d; x2 = %d', accumulator.predict( [0.9, 0.9] ), 0.9, 0.9 );