Latent Dirichlet Allocation via collapsed Gibbs sampling.
We believe in a future in which the web is a preferred environment for numerical computation. To help realize this future, we’ve built stdlib. stdlib is a standard library, with an emphasis on numerical and scientific computation, written in JavaScript (and C) for execution in browsers and in Node.js.
The library is fully decomposable, being architected in such a way that you can swap out and mix and match APIs and functionality to cater to your exact preferences and use cases.
When you use stdlib, you can be absolutely certain that you are using the most thorough, rigorous, well-written, studied, documented, tested, measured, and high-quality code out there.
To join us in bringing numerical computing to the web, get started by checking us out on GitHub, and please consider financially supporting stdlib. We greatly appreciate your continued support!
[![NPM version][npm-image]][npm-url] [![Build Status][test-image]][test-url] [![Coverage Status][coverage-image]][coverage-url]
[Latent Dirichlet Allocation][lda] via collapsed Gibbs sampling.
bash
npm install @stdlib/nlp-lda
script
tag without installation and bundlers, use the [ES Module][es-module] available on the [esm
][esm-url] branch (see [README][esm-readme]).deno
][deno-url] branch (see [README][deno-readme] for usage intructions).umd
][umd-url] branch (see [README][umd-readme]).javascript
var lda = require( '@stdlib/nlp-lda' );
lda
function by passing it an array
of strings
and the number of topics K
that should be identified.javascript
var model;
var docs;
docs = [
'I loved you first',
'For one is both and both are one in love',
'You never see my pain',
'My love is such that rivers cannot quench',
'See a lot of pain, a lot of tears'
];
model = lda( docs, 2 );
// returns {}
.fit()
method, which performs collapsed Gibbs sampling.javascript
model.fit( 1000, 100, 10 );
iter
parameter denotes the number of sampling iterations. While a common choice, one thousand iterations might not always be appropriate. Empirical diagnostics can be used to assess whether the constructed Markov Chain has converged. burnin
denotes the number of estimates that are thrown away at the beginning, whereas thin
controls the number of estimates discarded in-between iterations.no
terms with the highest probabilities for chosen topic k
.javascript
var words = model.getTerms( 0, 3 );
/* returns
[
{ 'word': 'both', 'prob': 0.06315008476532499 },
{ 'word': 'pain', 'prob': 0.05515729517235543 },
{ 'word': 'one', 'prob': 0.05486669737616135 }
]
*/
javascript
var sotu = require( '@stdlib/datasets-sotu' );
var roundn = require( '@stdlib/math-base-special-roundn' );
var stopwords = require( '@stdlib/datasets-stopwords-en' );
var lowercase = require( '@stdlib/string-lowercase' );
var lda = require( '@stdlib/nlp-lda' );
var speeches;
var words;
var terms;
var model;
var str;
var i;
var j;
words = stopwords();
for ( i = 0; i < words.length; i++ ) {
words[ i ] = new RegExp( '\\b'+words[ i ]+'\\b', 'gi' );
}
speeches = sotu({
'range': [ 1930, 2010 ]
});
for ( i = 0; i < speeches.length; i++ ) {
str = lowercase( speeches[ i ].text );
for ( j = 0; j < words.length; j++ ) {
str = str.replace( words[ j ], '' );
}
speeches[ i ] = str;
}
model = lda( speeches, 3 );
model.fit( 1000, 100, 10 );
for ( i = 0; i <= 80; i++ ) {
str = 'Year: ' + (1930+i) + '\t';
str += 'Topic 1: ' + roundn( model.avgTheta.get( i, 0 ), -3 ) + '\t';
str += 'Topic 2: ' + roundn( model.avgTheta.get( i, 1 ), -3 ) + '\t';
str += 'Topic 3: ' + roundn( model.avgTheta.get( i, 2 ), -3 );
console.log( str );
}
terms = model.getTerms( 0, 20 );
for ( i = 0; i < terms.length; i++ ) {
terms[ i ] = terms[ i ].word;
}
console.log( 'Words most associated with first topic:\n ' + terms.join( ', ' ) );
terms = model.getTerms( 1, 20 );
for ( i = 0; i < terms.length; i++ ) {
terms[ i ] = terms[ i ].word;
}
console.log( 'Words most associated with second topic:\n ' + terms.join( ', ' ) );
terms = model.getTerms( 2, 20 );
for ( i = 0; i < terms.length; i++ ) {
terms[ i ] = terms[ i ].word;
}
console.log( 'Words most associated with third topic:\n ' + terms.join( ', ' ) );