示例#1
0
void drwnGaussian::train(const drwnSuffStats& stats, double lambda)
{
    DRWN_ASSERT_MSG(stats.size() == _n, stats.size() << " != " << _n);

    // normalize sufficient statistics
    _mu = stats.firstMoments() / stats.count();
    if (stats.isDiagonal()) {
        _mSigma = stats.secondMoments().array() / stats.count();
        _mSigma -= _mu.array().square().matrix().asDiagonal();
        _mSigma += lambda * MatrixXd::Identity(_n, _n);
    } else {
        _mSigma = stats.secondMoments() / stats.count() -
                  _mu * _mu.transpose() + lambda * MatrixXd::Identity(_n, _n);
    }

    updateCachedParameters();
}
示例#2
0
// training
double drwnFeatureWhitener::train(const drwnSuffStats& stats)
{
    _nFeatures = stats.size();
    _mu = VectorXd::Zero(_nFeatures);
    _beta = VectorXd::Zero(_nFeatures);

    for (int i = 0; i < _nFeatures; i++) {
        _mu[i] = stats.sum(i) / (stats.count() + DRWN_DBL_MIN);
        double sigma = stats.sum2(i, i) / (stats.count() + DRWN_DBL_MIN) -
            _mu[i] * _mu[i];
        if (sigma < DRWN_DBL_MIN) {
            _beta[i] = (fabs(_mu[i]) > DRWN_DBL_MIN) ? (1.0 / _mu[i]) : 1.0;
            _mu[i] = 0.0;
        } else {
            _beta[i] = 1.0 / sqrt(sigma);
        }
    }

    DRWN_LOG_DEBUG("drwnFeatureWhitener::_mu = " << _mu.transpose());
    DRWN_LOG_DEBUG("drwnFeatureWhitener::_beta = " << _beta.transpose());

    _bValid = true;
    return _beta.sum();
}
示例#3
0
drwnGaussian::drwnGaussian(const drwnSuffStats& stats) :
    _n(stats.size()), _invSigma(NULL), _logZ(0.0), _mL(NULL)
{
    initialize(_n);
    train(stats, DRWN_EPSILON);
}
示例#4
0
double drwnGaussian::klDivergence(const drwnSuffStats& stats) const
{
    DRWN_ASSERT(stats.size() == _n);
    return klDivergence(drwnGaussian(stats));
}