Vector4d compute_grad(Vector4d beta, VectorXd x, VectorXd y){ Vector4d grad; ArrayXd tmp; ArrayXd pred = model_fun(beta, x); assert(x.size()==y.size()); // beta(0) tmp = 1 / (1 + exp(-(x.array()-beta(2))/abs(beta(3)))); tmp *= pred - y.array(); grad(0) = tmp.sum() / x.size(); // beta(1) tmp = 1 / (1 + exp(-(x.array()-beta(2))/abs(beta(3)))); tmp = 1 - tmp; tmp *= pred - y.array(); grad(1) = tmp.sum() / x.size(); // beta(2) tmp = -(beta(0)- beta(1)) * (exp((beta(2)-x.array())/abs(beta(3)))/abs(beta(3))) \ / (1+exp((beta(2)-x.array())/abs(beta(3)))).pow(2); tmp *= pred - y.array(); grad(2) = tmp.sum() / x.size(); // beta(3) tmp = (beta(0) - beta(1)) * (beta(2)-x.array()).pow(2) * sgn(beta(3)) \ / (abs(beta(3))*pow(beta(3), 2)*(1+exp((beta(2)-x.array())/abs(beta(3)))).pow(2)); tmp *= pred - y.array(); grad(3) = tmp.sum() / x.size(); return grad; }
inline ArrayXd lm::Dplus(const ArrayXd& d) { ArrayXd di(d.size()); double comp(d.maxCoeff() * threshold()); for (int j = 0; j < d.size(); ++j) di[j] = (d[j] < comp) ? 0. : 1./d[j]; m_r = (di != 0.).count(); return di; }
//@{ double gammaDist::aic (const ArrayXd& y, const ArrayXd& n, const ArrayXd& mu, const ArrayXd& wt, double dev) const { double nn(wt.sum()); double disp(dev/nn); double ans(0), invdisp(1./disp); for (int i = 0; i < mu.size(); ++i) ans += wt[i] * ::Rf_dgamma(y[i], invdisp, mu[i] * disp, true); return -2. * ans + 2.; }
double computeBinWidth(const MatrixXd& positions) { // assumes first col of positions corresponds to dominant eigenvect ArrayXd firstCol = positions.col(0).array(); firstCol -= firstCol.mean(); double SSE = firstCol.matrix().squaredNorm(); double variance = SSE / firstCol.size(); double std = sqrt(variance); double targetBinsPerStd = (MAX_HASH_VALUE - HASH_VALUE_OFFSET) / TARGET_HASH_SPREAD_STDS; return std / targetBinsPerStd; }
VectorXd probutils::logsumexp (const MatrixXd& X) { const VectorXd mx = X.rowwise().maxCoeff(); // Get max of each row // Perform the sum(exp(x - mx)) part ArrayXd se = ((X.colwise() - mx).array().exp()).rowwise().sum(); // return total log(sum(exp(x))) - hoping for return value optimisation return (se.log()).matrix() + mx; }
ArrayXd GoSUM::CModelVariables::hcPoint2ModelPoint(const ArrayXd &x) { if ( x.size()!=mvs.size() ) throw "GoSUM::CModelVariables::hcPoint2ModelPoint error: wrong dimension"; int j,dim=int(x.size()); ArrayXd X(dim); for ( j=0; j<dim; j++ ) { X(j)=mvs[j].generateSampleValue(x(j)); } return X; }
void Functions::modeProfileSinc(RefArrayXd predictions, const RefArrayXd covariates, const double centroid, const double height, const double resolution) { ArrayXd sincFunctionArgument = Functions::PI*(covariates - centroid)/resolution; ArrayXd sincFunction = sincFunctionArgument.sin() / sincFunctionArgument; // Multiply the profile by the height in the PSD predictions = height*sincFunction.square(); }
void CMATLAB::matPut(string filename,const ArrayXd &X,string Xname) { MATFile *pmat=matOpen(filename,string("w")); if (!pmat) throw "CMATLAB::exportTo error: matOpen failed"; mxArray *pa=mxCreateDoubleMatrix((int)X.size(),1); if (!pa) throw "CMATLAB::exportTo error: mxCreateDoubleMatrix failed"; memcpy((void *)(mxGetPr(pa)), (void *)X.data(), X.size()*sizeof(double)); if (!matPutVariable(pmat,string("X"),pa)) throw "CMATLAB::exportTo error: matlab.matPutVariable failed"; mxDestroyArray(pa); if (!matClose(pmat)) throw "CMATLAB::exportTo error: matlab.matClose failed"; }
AFI::AFI(const bool prompt) : SteadyState() { if (prompt) cout << "Enter flip-angle (degrees): " << flush; double inFlip; QI::Read(cin, inFlip); m_flip = ArrayXd::Ones(1) * inFlip * M_PI / 180.; if (prompt) cout << "Enter TR1 & TR2 (seconds): " << flush; ArrayXd temp; QI::ReadArray(cin, temp); if (temp.rows() != 2) QI_EXCEPTION("Must enter 2 TR values."); m_TR1 = temp[0]; m_TR2 = temp[1]; }
SSFPSimple::SSFPSimple(const ArrayXd &flip, const double TR, const ArrayXd &phi) : SteadyState() { m_TR = TR; m_flip = (flip * M_PI / 180.).replicate(phi.rows(), 1); m_nphi = phi.size(); m_phi = ArrayXd::Zero(m_flip.size()); int start = 0; for (int i = 0; i < phi.size(); i++) { m_phi.segment(start, flip.size()).setConstant(phi[i] * M_PI / 180.); start += flip.size(); } }
void CMATLAB::matGet(string filename,ArrayXd &X,string Xname) { MATFile *pmat=matOpen(filename,string("r")); if (!pmat) throw "CMATLAB::importFrom error: matlab.matOpen failed"; mxArray *pa=matGetVariable(pmat,Xname); if (!pa) throw "CMATLAB::importFrom error: matlab.matGetVariable failed"; int N=mxGetNumberOfElements(pa); if (N<=0) throw "CMATLAB::importFrom error: matlab.mxGetNumberOfElements failed"; X.resize(N); memcpy((void *)(mxGetPr(pa)), (void *)X.data(), sizeof((double *)X.data())); mxDestroyArray(pa); if (!matClose(pmat)) throw "CMATLAB::importFrom error: matlab.matClose failed"; }
int gesdd(MatrixXd& A, ArrayXd& S, MatrixXd& Vt) { int info, mone = -1, m = A.rows(), n = A.cols(); std::vector<int> iwork(8 * n); double wrk; if (m < n || S.size() != n || Vt.rows() != n || Vt.cols() != n) throw std::invalid_argument("dimension mismatch in gesvd"); F77_CALL(dgesdd)("O", &m, &n, A.data(), &m, S.data(), A.data(), &m, Vt.data(), &n, &wrk, &mone, &iwork[0], &info); int lwork(wrk); std::vector<double> work(lwork); F77_CALL(dgesdd)("O", &m, &n, A.data(), &m, S.data(), A.data(), &m, Vt.data(), &n, &work[0], &lwork, &iwork[0], &info); return info; }
NOMAD::Point CMADS::ArrayXd2NOMADPoint(const ArrayXd &x) { int i,n=int(x.size()); NOMAD::Point p(n); for ( i=0; i<n; i++ ) p[i]=x(i); return p; }
void GoSUM::CModelVariables::setNTuple(const ArrayXd &X,int _at) { if ( X.size()!=mvs.size() ) throw "GoSUM::CModelVariables::setNTuple error: bad nTupe size"; int i,N=int(mvs.size()); for ( i=0; i<N; i++ ) mvs[i].setSampleValue(X(i),_at); }
//@{ double negativeBinomialDist::aic (const ArrayXd& y, const ArrayXd& n, const ArrayXd& mu, const ArrayXd& wt, double dev) const { return 2. * (wt * (y + d_theta) * (mu + d_theta).log() - y * mu.log() + (y + 1).unaryExpr(Lgamma<double>()) - d_theta * std::log(d_theta) + lgamma(d_theta) - (d_theta + y).unaryExpr(Lgamma<double>())).sum(); }
void CMT::HistogramNonlinearity::setParameters(const ArrayXd& parameters) { if(parameters.size() != mHistogram.size()) throw Exception("Wrong number of parameters."); for(int i = 0; i < mHistogram.size(); ++i) mHistogram[i] = parameters[i]; }
double glmDist::aic(const ArrayXd& y, const ArrayXd& n, const ArrayXd& mu, const ArrayXd& wt, double dev) const { int nn = mu.size(); double ans = ::Rf_asReal(::Rf_eval(::Rf_lang6(as<SEXP>(d_aic), as<SEXP>(NumericVector(y.data(), y.data() + nn)), as<SEXP>(NumericVector(n.data(), n.data() + nn)), as<SEXP>(NumericVector(mu.data(), mu.data() + nn)), as<SEXP>(NumericVector(wt.data(), wt.data() + nn)), PROTECT(::Rf_ScalarReal(dev))), d_rho)); UNPROTECT(1); return ans; }
Trajectory Trajectory::generateMinJerkTrajectory(const VectorXd& ts, const VectorXd& y_from, const VectorXd& y_to) { int n_time_steps = ts.size(); int n_dims = y_from.size(); MatrixXd ys(n_time_steps,n_dims), yds(n_time_steps,n_dims), ydds(n_time_steps,n_dims); double D = ts[n_time_steps-1]; ArrayXd tss = (ts/D).array(); ArrayXd A = y_to.array()-y_from.array(); for (int i_dim=0; i_dim<n_dims; i_dim++) { // http://noisyaccumulation.blogspot.fr/2012/02/how-to-decompose-2d-trajectory-data.html ys.col(i_dim) = y_from[i_dim] + A[i_dim]*( 6*tss.pow(5) -15*tss.pow(4) +10*tss.pow(3)); yds.col(i_dim) = (A[i_dim]/D)*( 30*tss.pow(4) -60*tss.pow(3) +30*tss.pow(2)); ydds.col(i_dim) = (A[i_dim]/(D*D))*(120*tss.pow(3) -180*tss.pow(2) +60*tss ); } return Trajectory(ts,ys,yds,ydds); }
const ArrayXd glmDist::devResid(const ArrayXd &y, const ArrayXd &mu, const ArrayXd &wt) const { int n = mu.size(); return as<ArrayXd>(::Rf_eval(::Rf_lang4(as<SEXP>(d_devRes), as<SEXP>(NumericVector(y.data(), y.data() + n)), as<SEXP>(NumericVector(mu.data(), mu.data() + n)), as<SEXP>(NumericVector(wt.data(), wt.data() + n)) ), d_rho)); }
//@{ double binomialDist::aic (const ArrayXd& y, const ArrayXd& n, const ArrayXd& mu, const ArrayXd& wt, double dev) const { ArrayXd m((n > 1).any() ? n : wt); ArrayXd yy((m * y).unaryExpr(Round<double>())); m = m.unaryExpr(Round<double>()); double ans(0.); for (int i=0; i < mu.size(); ++i) ans += (m[i] <= 0. ? 0. : wt[i]/m[i]) * ::Rf_dbinom(yy[i], m[i], mu[i], true); return (-2. * ans); }
double Functions::logGaussLikelihood(const RefArrayXd observations, const RefArrayXd predictions, const RefArrayXd uncertainties) { if ((observations.size() != predictions.size()) || (observations.size() != uncertainties.size())) { cerr << "Array dimensions do not match. Quitting program." << endl; exit(EXIT_FAILURE); } ArrayXd delta; ArrayXd lambda0; ArrayXd lambda; delta = ((observations - predictions)*(observations - predictions)) / (uncertainties*uncertainties); lambda0 = -1.*log(sqrt(2.*PI) * uncertainties); lambda = lambda0 -0.5*delta; return lambda.sum(); }
ArrayXd GoSUM::CModelVariables::expandNTuple(const ArrayXd &X) const { if ( X.size()!=mvs.size() ) throw "GoSUM::CModelVariables::expand error: wrong X size"; int i,j,k,N=int(mvs.size()),eN=expandedSize(),exsize; ArrayXd eX=ArrayXd::Zero(eN); for ( i=j=0; i<N; i++,j+=exsize ) { exsize=mvs[i].expandedSize(); if ( exsize==1 ) { eX(j)=X(i); } else { for ( k=0; k<exsize; k++ ) if (k==X(i)) { eX(j+k) = 1.; break; } } } return eX; }
void merPredD::updateXwts(const ArrayXd& sqrtXwt) { if (d_Xwts.size() != sqrtXwt.size()) throw invalid_argument("updateXwts: dimension mismatch"); std::copy(sqrtXwt.data(), sqrtXwt.data() + sqrtXwt.size(), d_Xwts.data()); if (sqrtXwt.size() == d_V.rows()) { // W is diagonal d_V = d_Xwts.asDiagonal() * d_X; for (int j = 0; j < d_N; ++j) for (MSpMatrixd::InnerIterator Utj(d_Ut, j), Ztj(d_Zt, j); Utj && Ztj; ++Utj, ++Ztj) Utj.valueRef() = Ztj.value() * d_Xwts.data()[j]; } else { SpMatrixd W(d_V.rows(), sqrtXwt.size()); const double *pt = sqrtXwt.data(); W.reserve(sqrtXwt.size()); for (Index j = 0; j < W.cols(); ++j, ++pt) { W.startVec(j); W.insertBack(j % d_V.rows(), j) = *pt; } W.finalize(); d_V = W * d_X; SpMatrixd Ut(d_Zt * W.adjoint()); if (Ut.cols() != d_Ut.cols()) throw std::runtime_error("Size mismatch in updateXwts"); // More complex code to handle the pruning of zeros MVec(d_Ut.valuePtr(), d_Ut.nonZeros()).setZero(); for (int j = 0; j < d_Ut.outerSize(); ++j) { MSpMatrixd::InnerIterator lhsIt(d_Ut, j); for (SpMatrixd::InnerIterator rhsIt(Ut, j); rhsIt; ++rhsIt, ++lhsIt) { Index k(rhsIt.index()); while (lhsIt && lhsIt.index() != k) ++lhsIt; if (lhsIt.index() != k) throw std::runtime_error("Pattern mismatch in updateXwts"); lhsIt.valueRef() = rhsIt.value(); } } } d_VtV.setZero().selfadjointView<Eigen::Upper>().rankUpdate(d_V.adjoint()); updateL(); }
const ArrayXd binomialDist::devResid(const ArrayXd& y, const ArrayXd& mu, const ArrayXd& wt) const { int debug=0; if (debug) { for (int i=0; i < mu.size(); ++i) { double r = 2. * wt[i] * (Y_log_Y(y[i], mu[i]) + Y_log_Y(1. - y[i], 1. - mu[i])); if (r!=r) { // attempt to detect `nan` (needs cross-platform testing, but should compile // everywhere whether or not it actually works) Rcpp::Rcout << "(bD) " << "nan @ pos " << i << ": y= " << y[i] << "; mu=" << mu[i] << "; wt=" << wt[i] << "; 1-y=" << 1. - y[i] << "; 1-mu=" << 1. - mu[i] << "; ylogy=" << Y_log_Y(y[i], mu[i]) << "; cylogy=" << Y_log_Y(1.-y[i], 1.-mu[i]) << std::endl; } } } return 2. * wt * (Y_log_Y(y, mu) + Y_log_Y(1. - y, 1. - mu)); }
double MeanNormalLikelihood::logValue(RefArrayXd modelParameters) { unsigned long n = observations.size(); double lambda0; double lambda; ArrayXd argument; ArrayXd predictions; predictions.resize(n); predictions.setZero(); model.predict(predictions, modelParameters); argument = (observations - predictions); argument = argument.square()*weights; lambda0 = lgammal(n/2.) - log(2) - (n/2.)*log(Functions::PI) + 0.5*weights.log().sum(); lambda = lambda0 - (n/2.)*log(argument.sum()); return lambda; }
void NestedSampler::setLogWeightOfPosteriorSample(ArrayXd newLogWeightOfPosteriorSample) { int Nsamples = newLogWeightOfPosteriorSample.size(); logWeightOfPosteriorSample.resize(Nsamples); logWeightOfPosteriorSample = newLogWeightOfPosteriorSample; }
const ArrayXd glmLink::muEta(const ArrayXd &eta) const { return as<ArrayXd>(::Rf_eval(::Rf_lang2(as<SEXP>(d_muEta), as<SEXP>(Rcpp::NumericVector(eta.data(), eta.data() + eta.size())) ), d_rho)); }
const ArrayXd glmDist::variance(const ArrayXd &mu) const { return as<ArrayXd>(::Rf_eval(::Rf_lang2(as<SEXP>(d_variance), as<SEXP>(Rcpp::NumericVector(mu.data(), mu.data() + mu.size())) ), d_rho)); }
MatrixXd NumInt::GuassLobattoQaudrature(const int &N) { int N0=N; double a=1.0; double b=1.0; double a1=2.0; double b1 = 2.0; if (N0>=1) { N0=N0-2; } // Build the pointers int* _N = &N0; double* _a=&a; double* _b=&b; double* _a1=&a1; double* _b1=&b1; // Initial Guess - Chebyshev-Gauss-Lobatto points ArrayXd xu; xu.setLinSpaced(N0+2,0.0,N0+1); ArrayXd x=-cos(xu/(N0+1)*M_PI); // Allocate space for points and weights VectorXd z = VectorXd::Zero(x.size()); VectorXd w = VectorXd::Zero(x.size()); double x0,x1,del; double* _x0 = &x0; double* _x1 = &x1; double deps = std::numeric_limits<double>::epsilon(); for (int k=0; k<=x.size()-1; k++) { x0=x(k); del=2.0; while (fabs(del) > deps) { // Polynomial Deflation: Exclude the already determined roots VectorXd s1 = x.head(k); VectorXd ones = VectorXd::Constant(s1.size(),1); double s = (ones.cwiseQuotient((x0-s1.array()).matrix())).sum(); // Compute Jacobi polynomial p(a,b) JacobiPolynomials J(_N,_a,_b,_x0,false); VectorXd p = J.getJacobiPolynomials(); // Compute Jacobi polynomial p(a+1,b+1) for derivative dp(a,b) JacobiPolynomials J1(_N,_a1,_b1,_x0,false); VectorXd p1 = J1.getJacobiPolynomials(); VectorXd dp=VectorXd::Zero(p1.size()); dp(0)=0; // Compute derivative of Jacobi polynomial p(a,b) for (int j=0; j<=*_N-1; j++) { dp(j+1) = 0.5*(*_a+*_b+j+2)*p1(j); } //Gauss-Lobatto points are roots of (1-x^2)*dp, hence double nom = (1-x0*x0)*p(N0); double dnom = -2*x0*p(N0)+(1-x0*x0)*dp(N0); del = - nom/(dnom-nom*s); x1 = x0+del; x0=x1; } z(k)=x1; double a2=0; double b2=0; int N1=N0+1; double* _a2=&a2; double* _b2=&b2; int* _N1 = &N1; JacobiPolynomials J(_N1,_a2,_b2,_x1,false); VectorXd p = J.getJacobiPolynomials(); w(k) = 2.0/((N1)*(N1+1)*p(N1)*p(N1)); } // Store Matrix<double,Dynamic,Dynamic> zw(N0+2,2); zw.col(0)=z; zw.col(1)=w; return zw; }
MatrixXd NumInt::GuassQaudrature(const int& N, double& a, double& b) { int N0=N-1; const int N1 = N0+1; const int N2 = N0+2; VectorXd xu; xu.setLinSpaced(N1,-1.0,1.0); // Legendre-Gauss-Vandermonde Matrix //Matrix<double,N1,N2> L = Matrix<double,N1,N2>::Zero(); MatrixXd L(N1,N2); L = MatrixXd::Zero(N1,N2); // Derivative of Legendre-Gauss-Vandermonde Matrix //Matrix<double,N1,1> Lp = Matrix<double,N1,1>::Zero(); VectorXd Lp(N1); Lp = VectorXd::Zero(N1); VectorXd dum; dum.setLinSpaced(N1,0.0,N0); ArrayXd y; y = cos((2*dum.array()+1)*M_PI/(2*N0+2))+(0.27/N1)*sin(M_PI*xu.array()*N0/N2); double deps = std::numeric_limits<double>::epsilon(); //Initial Guess //Array<double,N1,1> y0 = Array<double,N1,1>::Constant(2); ArrayXd y0 = ArrayXd::Constant(N1,2); while ((y-y0).abs().matrix().maxCoeff() > deps) { // L.col(0) = Matrix<double,N1,1>::Constant(1); L.col(0) = VectorXd::Constant(N1,1); //Lp = Matrix<double,N1,1>::Zero(); Lp = VectorXd::Zero(N1); L.col(1) = y; for (int k=1; k!=N1; k++) { L.col(k+1) = ((2*k+1)*L.col(k).cwiseProduct(y.matrix())-k*L.col(k-1))/(k+1); } Lp = (N2)*(L.col(N0)-L.col(N1).cwiseProduct(y.matrix())).cwiseQuotient((1-y.square()).matrix()); y0 = y; y = y0-(L.col(N1).cwiseQuotient(Lp)).array(); } // Gauss Points //Matrix<double,N1,1> z = ((a*(1-y)+b*(1+y))/2).matrix(); VectorXd z(N1); z = ((a*(1-y)+b*(1+y))/2).matrix(); // Gauss Weights //Matrix<double,N1,1> w; VectorXd w(N1); w = (b-a)/(((1-y.square()).matrix()).cwiseProduct(Lp.cwiseProduct(Lp))).array()*pow((double)N2/N1,2); // Store //Matrix<double,N1,2> zw; Matrix<double,Dynamic,Dynamic> zw(N1,2); zw.col(0)=z; zw.col(1)=w; return zw; }