void drwnGaussian::train(const drwnSuffStats& stats, double lambda) { DRWN_ASSERT_MSG(stats.size() == _n, stats.size() << " != " << _n); // normalize sufficient statistics _mu = stats.firstMoments() / stats.count(); if (stats.isDiagonal()) { _mSigma = stats.secondMoments().array() / stats.count(); _mSigma -= _mu.array().square().matrix().asDiagonal(); _mSigma += lambda * MatrixXd::Identity(_n, _n); } else { _mSigma = stats.secondMoments() / stats.count() - _mu * _mu.transpose() + lambda * MatrixXd::Identity(_n, _n); } updateCachedParameters(); }
// training double drwnFeatureWhitener::train(const drwnSuffStats& stats) { _nFeatures = stats.size(); _mu = VectorXd::Zero(_nFeatures); _beta = VectorXd::Zero(_nFeatures); for (int i = 0; i < _nFeatures; i++) { _mu[i] = stats.sum(i) / (stats.count() + DRWN_DBL_MIN); double sigma = stats.sum2(i, i) / (stats.count() + DRWN_DBL_MIN) - _mu[i] * _mu[i]; if (sigma < DRWN_DBL_MIN) { _beta[i] = (fabs(_mu[i]) > DRWN_DBL_MIN) ? (1.0 / _mu[i]) : 1.0; _mu[i] = 0.0; } else { _beta[i] = 1.0 / sqrt(sigma); } } DRWN_LOG_DEBUG("drwnFeatureWhitener::_mu = " << _mu.transpose()); DRWN_LOG_DEBUG("drwnFeatureWhitener::_beta = " << _beta.transpose()); _bValid = true; return _beta.sum(); }