Vector4d compute_grad(Vector4d beta, VectorXd x, VectorXd y){ Vector4d grad; ArrayXd tmp; ArrayXd pred = model_fun(beta, x); assert(x.size()==y.size()); // beta(0) tmp = 1 / (1 + exp(-(x.array()-beta(2))/abs(beta(3)))); tmp *= pred - y.array(); grad(0) = tmp.sum() / x.size(); // beta(1) tmp = 1 / (1 + exp(-(x.array()-beta(2))/abs(beta(3)))); tmp = 1 - tmp; tmp *= pred - y.array(); grad(1) = tmp.sum() / x.size(); // beta(2) tmp = -(beta(0)- beta(1)) * (exp((beta(2)-x.array())/abs(beta(3)))/abs(beta(3))) \ / (1+exp((beta(2)-x.array())/abs(beta(3)))).pow(2); tmp *= pred - y.array(); grad(2) = tmp.sum() / x.size(); // beta(3) tmp = (beta(0) - beta(1)) * (beta(2)-x.array()).pow(2) * sgn(beta(3)) \ / (abs(beta(3))*pow(beta(3), 2)*(1+exp((beta(2)-x.array())/abs(beta(3)))).pow(2)); tmp *= pred - y.array(); grad(3) = tmp.sum() / x.size(); return grad; }
//@{ double gammaDist::aic (const ArrayXd& y, const ArrayXd& n, const ArrayXd& mu, const ArrayXd& wt, double dev) const { double nn(wt.sum()); double disp(dev/nn); double ans(0), invdisp(1./disp); for (int i = 0; i < mu.size(); ++i) ans += wt[i] * ::Rf_dgamma(y[i], invdisp, mu[i] * disp, true); return -2. * ans + 2.; }
double MeanNormalLikelihood::logValue(RefArrayXd modelParameters) { unsigned long n = observations.size(); double lambda0; double lambda; ArrayXd argument; ArrayXd predictions; predictions.resize(n); predictions.setZero(); model.predict(predictions, modelParameters); argument = (observations - predictions); argument = argument.square()*weights; lambda0 = lgammal(n/2.) - log(2) - (n/2.)*log(Functions::PI) + 0.5*weights.log().sum(); lambda = lambda0 - (n/2.)*log(argument.sum()); return lambda; }
double Functions::logGaussLikelihood(const RefArrayXd observations, const RefArrayXd predictions, const RefArrayXd uncertainties) { if ((observations.size() != predictions.size()) || (observations.size() != uncertainties.size())) { cerr << "Array dimensions do not match. Quitting program." << endl; exit(EXIT_FAILURE); } ArrayXd delta; ArrayXd lambda0; ArrayXd lambda; delta = ((observations - predictions)*(observations - predictions)) / (uncertainties*uncertainties); lambda0 = -1.*log(sqrt(2.*PI) * uncertainties); lambda = lambda0 -0.5*delta; return lambda.sum(); }
//@{ double inverseGaussianDist::aic (const ArrayXd& y, const ArrayXd& n, const ArrayXd& mu, const ArrayXd& wt, double dev) const { double wtsum(wt.sum()); return wtsum * (std::log(dev/wtsum * 2. * M_PI) + 1.) + 3. * (y.log() * wt).sum() + 2.; }