// [[Rcpp::export]]
double alphapost(colvec& y,mat& X, int k, colvec t, colvec& betadraw, 
                 mat& Rdraw, mat& S, double gammadraw, double alpha, 
                 double delta, double eta){
  
  int n = X.n_rows, l = n/k;
  
  mat invR = inv(Rdraw);
  mat invS = inv(S);
  mat invSig = invS*invR*invS;
  
  double empSS = 0;
  
  for (int j=0; j<n; j+=k){
    
    colvec res = y.rows(j,j+k-1)-X.rows(j,j+k-1)*betadraw;
    empSS += as_scalar(trans(res)*invSig*res);
    
  }
  
  //colvec alphat = -alpha*t, gammat = - gammadraw/t;
  //double maxalphat = max(alphat), maxgammat = max(gammat);
  double zalpha = max(-alpha*t)/delta, 
    zgamma = max(-gammadraw/t)/eta;
  
  double dalpha = R::dnorm(zalpha,0.0,1.0,0);
  double dgamma = R::dnorm(zgamma,0.0,1.0,0);
  
  double logval =  -0.5*l*log(det(S*Rdraw*S))-0.5*empSS-0.5*pow((alpha/eta),2)-log(1-dalpha)-log(1-dgamma);
  
  return logval;
  
}
示例#2
0
// [[Rcpp::export]]
colvec ST3a(colvec z ,double gam)
{
int n=z.size();
colvec z1(n);
for( int i=0; i<n;++i)
{
double z11=z(i);
z1(i)=ST1a(z11,gam);
}
return(z1);
}
示例#3
0
// [[Rcpp::export]]
cube gamloopP(NumericVector beta_,const mat Y,const mat Z, const colvec gammgrid, const double lassothresh,const colvec YMean2,const colvec ZMean2,const colvec znorm,mat BFoo){

  //Data is read in from R as Armadillo objects directly.  Initially, I did not know that this was possible


//setting number of threads
omp_set_num_threads(4);

 // set stacksize, not really necessary unless you are running into memory issues
setenv("OMP_STACKSIZE","64M",1);


const int n2=BFoo.n_rows;
const int k2=BFoo.n_cols;

//Coefficient matrices
 mat B1=BFoo;
 mat B1F2=BFoo;
 mat b2=B1;
const int ngridpts=gammgrid.size();

 //These are typically defined inside of the "lassocore" function, but doing so causes problems with openMP, so they are defined out here and read in to the function
// const int ngridpts=gamm.size();
cube bcube(beta_.begin(),n2,k2,ngridpts,false);
cube bcube2(n2,k2+1,ngridpts);
bcube2.fill(0.0);
// const colvec ZN = as<colvec>(znorm2);
colvec nu=zeros<colvec>(n2);
uvec m=ind(k2,1);
double gam =0;
 int i;
#pragma omp parallel for shared(bcube,bcube2,b2) private(i,gam,B1F2,B1,m,nu) default(none) schedule(auto)
  for (i=0; i<ngridpts;++i) {
      gam=gammgrid(i);
        //Previous coefficient matrix is read in as a "warm start"

	mat B1F2=bcube.slice(i);
	//The actual algorithm is being applied here 
	 	B1 = lassocore(Y,Z,B1F2,b2,gam, lassothresh,znorm,m,k2,n2); 
	

	//intercept is calculated
	 nu = YMean2 - B1 *ZMean2;
         bcube2.slice(i) = mat(join_horiz(nu, B1)); 
	}
    return(bcube2);
}
// [[Rcpp::export]]
double Rpost(colvec& y, mat& X, int k, colvec& betadraw, 
             mat& Sdraw, mat& R){
  
  int n = X.n_rows, l = n/k;
  
  mat invR = inv(R);
  mat invS = inv(Sdraw);
  mat invSig = invS*invR*invS;
  
  double empSS = 0;
  
  for (int j=0; j<n; j+=k){
    
    colvec res = y.rows(j,j+k-1)-X.rows(j,j+k-1)*betadraw;
    empSS += as_scalar(trans(res)*invSig*res);
    
  }
  
  return (-0.5*l*log(det(R))-0.5*empSS);
  
}
// [[Rcpp::export]]
colvec betaUpdate(colvec& y, mat& X, int k, mat& Sdraw, 
                  mat& Rdraw, colvec& B0, mat& sigB){
  
  int n = X.n_rows, p = X.n_cols;
  mat VB = zeros<mat>(p,p);
  colvec muB = zeros<colvec>(p);
  
  mat invR = inv(Rdraw);
  mat invS = inv(Sdraw);
  mat invSig = invS*invR*invS;
  
  for(int j=0; j<n; j+=k){
    VB += trans(X.rows(j,j+k-1))*invSig*X.rows(j,j+k-1);
    muB += trans(X.rows(j,j+k-1))*invSig*y.rows(j,j+k-1);
  }
  
  mat V1 = inv(inv(sigB)+VB);
  colvec mu1 = V1*(inv(sigB)*B0+muB);
  
  colvec betadraw = vectorise(mvrnorm(1, mu1,V1));
  
  return betadraw;
  
}
// 
// Written by Conrad Sanderson - http://conradsanderson.id.au


#include <armadillo>
#include "catch.hpp"

using namespace arma;


TEST_CASE("fn_cumsum_1")
  {
  colvec a = linspace<colvec>(1,5,6);
  rowvec b = linspace<rowvec>(1,5,6);
  
  colvec c = { 1.0000, 2.8000, 5.4000, 8.8000, 13.0000, 18.0000 };
  
  REQUIRE( accu(abs(cumsum(a) - c    )) == Approx(0.0) );
  REQUIRE( accu(abs(cumsum(b) - c.t())) == Approx(0.0) );
  
  REQUIRE_THROWS( b = cumsum(a) );
  }



TEST_CASE("fn_cumsum_2")
  {
  mat A =
    {
    { -0.78838,  0.69298,  0.41084,  0.90142 },
    {  0.49345, -0.12020,  0.78987,  0.53124 },
示例#7
0
// [[Rcpp::export]]
mat randWalkTrain(const mat& X, const mat& A, const colvec& Z,  
                  const colvec& theta, int trainit, 
                  const colvec& sigma, const colvec& etaRange,
                  const mat& V, bool verbose)
{
    double logAccept;
    int p = theta.size() - 1;
    int n = X.n_rows;
    colvec eigval;
    mat eigvec;
    eig_sym(eigval, eigvec, V);
    mat R = eigvec * diagmat(sqrt(eigval));
    colvec Y = rautologistic_(X, A, theta);
    colvec Ynew(n);
    colvec thetaold = theta;
    colvec thetanew = theta;
    colvec normvec(p + 1);
    mat estimates(trainit + 1, p + 1);
    estimates.row(0) = theta.t();
    colvec beta = theta.subvec(0, p - 1);
    double eta = theta[p];
    colvec Xbeta = X * beta;
    colvec mu = exp(Xbeta);
    mu = mu / (1 + mu);
    double Zsum = as_scalar(Z.t() * A * Z);
    int onepct = 0.01 * trainit;
	int pct = 1;
	RNGScope scope;
	Function rnorm("rnorm"),
	         runif("runif");
    for (int i = 1; i < trainit + 1; i++)
    {
        do
        {
            normvec = as<colvec>(rnorm(p + 1));
            thetanew = thetaold + R * normvec;
        }
        while(thetanew[p] < etaRange[0] || thetanew[p] > etaRange[1]);
        Ynew = rautologistic_(X, A, thetanew);
        colvec betaold = thetaold.subvec(0, p - 1);
        colvec betanew = thetanew.subvec(0, p - 1);
        double etaold = thetaold[p];
        double etanew = thetanew[p];
        colvec Xbetaold = X * betaold;
        colvec Xbetanew = X * betanew;
        colvec muold = exp(Xbetaold);
        muold = muold / (1 + muold);
        colvec munew = exp(Xbetanew );
        munew = munew / (1 + munew);
        double Ynewsum = as_scalar(Ynew.t() * A * Ynew);
        double Ysum = as_scalar(Y.t() * A * Y);
        logAccept = as_scalar(0.5 * (eta * (Ynewsum - Ysum) + etanew * (Zsum - Ynewsum) + etaold * (Ysum - Zsum))
                            + trans(Ynew - Y) * Xbeta + trans(Z - Ynew) * Xbetanew + trans(Y - Z) * Xbetaold
                            + etaold * trans(Z - Y) * A * muold + etanew * trans(Ynew - Z) * A * munew + eta * trans(Y - Ynew) * A * mu)
                            + accu((square(betaold) - square(betanew)) / (2 * sigma));
        if (as_scalar(log(as<double>(runif(1)))) < logAccept)
        {
            estimates.row(i) = thetanew.t();
            thetaold = thetanew;
            Y = Ynew;
        }
        else
            estimates.row(i) = thetaold.t();
        if (verbose && i % onepct == 0)
        {
            Rcout << "\rTraining progress: |";
			for (int j = 1; j <= pct; j++)
				Rcout << "+";
			for (int j = 1; j < (100 - pct); j++)
				Rcout << " ";
			Rcout << "| " << pct << "%";
			pct++;
            R_FlushConsole();
        }
    }
	if (verbose)
		Rcout << std::endl << std::endl;
    return estimates.rows(1, trainit);
} 
示例#8
0
// How is that for a descriptive function name???
colvec symmetricTridiagonalFrancisAlgorithmWithWilkinsonShift(colvec a, colvec b) {
    float epsilon = 0.001f; // turned off currently
    int passes = 3;
    int pass = 0;
    unsigned long long int count = 0;

    int m = a.n_elem - 1;
    while (0<m) {
        ++pass;
        float s, c;
        float d = (a(m-1) - a(m)) * 0.5f;
        if (d == 0) {
            s = a(m) - abs(b(m));
        } else {
            s = a(m) - pow(b(m),2) / (d + sgn(d)*sqrt(d*d + b(m)*b(m)));
        }
        float x = a(0) - s;
        float y = b(1);
        for (int k=0; k<=m-1; ++k) {
            if (1 < m) {
                // givens rotation
                float r = hypot(y,x);
                c = x/r;
                s = y/r;
            } else {
                // Orthogonal diagonalization of a symmetric 2x2 matrix
                // http://scipp.ucsc.edu/~haber/ph116A/diag2x2_11.pdf
                float theta;
                theta = 0.5f * atan2(2*b(1),a(0)-a(1));
                s = sin(theta);
                c = cos(theta);
            }
            float w = c*x - s*y;
            d = a(k) - a(k+1);
            float z = (2*c*b(k+1) + d*s)*s;
            a(k)   -= z;
            a(k+1) += z;
            b(k+1) = d*c*s + (c*c - s*s)*b(k);
            x = b(k+1);
            if (0 < k) {
                b(k) = w;
            }
            if (k < m-1) {
                y = -s*b(k+2);
                b(k+2) *= c;
                if (abs(c) > 1) {
                    cout << c << endl;
                }
            }
            // Do not need to calculate Q. Do not need eigenvectors.
        }
        count += m;
//        cout << abs(b(m)) << "       " << epsilon*(abs(a(m-1)) + abs(a(m))) << endl;
        float smallThing = epsilon*(abs(a(m-1)) + abs(a(m)));
        if (isnan(smallThing)) {
            cout << "Not a number!" << endl;
        }
        if (abs(b(m)) < smallThing) { // check for convergence
//            --m;
//            temporarily turned off, to make iterations until m is decreased constant
        }
        if (pass == passes) {
            --m;
            pass = 0;
        }
    }
//    cout << "diagonal" << endl << a.t() << endl;
    cout << "off diagonal" << endl << b.t() << endl;
    cout << "inner loop iterations : " << count << endl; // total number of inner loop iterations performed
    return a;
}
// [[Rcpp::export]]
double logLik(colvec& y, mat& X, int k, int nblock, List t,
              colvec& betadraw, mat& Sdraw, mat& Rdraw, 
              colvec& B0, mat& sigB, double delta, double eta,
              colvec& gammadraw, colvec& alphadraw){
  
  int n = X.n_rows, p = X.n_cols, l = n/k;
  
  double logy, logbeta, logprior,logfull;
  
  
  int tlen = k/nblock;
  double alphaj, gammaj, maxalphat, maxgammat; 
  
  colvec alphat = zeros<colvec>(tlen),
    gammat = zeros<colvec>(tlen);
  
  colvec loggamma(nblock), logalpha(nblock);
  colvec valpha(nblock), vgamma(nblock), 
  dalpha(nblock), dgamma(nblock);
  
  NumericVector tj(tlen);
  
  mat invR = inv(Rdraw);
  mat invS = inv(Sdraw);
  mat invSig = invS*invR*invS;
  
  double empSS = 0;
  
  for (int j=0; j<n; j+=k){
    
    colvec res = y.rows(j,j+k-1)-X.rows(j,j+k-1)*betadraw;
    empSS += as_scalar(trans(res)*invSig*res);
    
  }
  
  // log likelihood of data y, pi = M_PI
  logy = as_scalar(-0.5*k*l*log(2*M_PI)-0.5*l*log(det(Sdraw*Rdraw*Sdraw))-0.5*empSS);
  
  // log prior for beta
  logbeta = as_scalar(-0.5*p*log(2*M_PI)-0.5*log(det(sigB))-0.5*(trans(betadraw-B0)*inv(sigB)*(betadraw-B0)));
  
  
  //RNGScope scope;
  
  for(int j=0; j<nblock; j++){
    
    tj = t[j];
    
    alphaj = alphadraw(j);
    alphat = -alphaj*tj;
    maxalphat = max(alphat);
    
    gammaj = gammadraw(j);
    gammat = - gammaj/tj;
    maxgammat = max(gammat);
    
    valpha(j) = maxalphat/delta;
    vgamma(j) = maxgammat/eta;
    dalpha(j) = R::dnorm(valpha(j),0.0,1.0,0);
    dgamma(j) = R::dnorm(vgamma(j),0.0,1.0,0);
    
    loggamma(j) = -0.5*log(2*M_PI)-log(delta)-0.5*pow(gammadraw(j)/delta,2)-log(1-dalpha(j));
    logalpha(j) = -0.5*log(2*M_PI)-log(eta)-0.5*pow(alphadraw(j)/eta,2)-log(1-dgamma(j));
    
  }
  
  logprior = logbeta + sum(loggamma) + sum(logalpha);
  
  logfull = logy + logprior;
  
  return logfull;
} 
示例#10
0
PieceWiseConstant::PieceWiseConstant(const colvec& x, const colvec& y)
	: VolatilityFunction(x.size()), xs(x), ys(y), lastPos(0)
{
	if (xs.size() != ys.size())
		throw("x and y do not match in piecewiseconstant");	
}
示例#11
0
void PieceWiseConstant::setY(const colvec& y){
	if (y.size() != xs.size()) throw("new x size does not match in piecewiseconstant");
	for (int i = 0; i < n; i++)
		ys[i] = y[i];
	lastPos = 0;
}
示例#12
0
void PieceWiseConstant::setX(const colvec& x){
	if (x.size() != ys.size()) throw("new x size does not match in piecewiseconstant");
	for (int i = 0; i < n; i++)
		xs[i] = x[i];
	lastPos = 0;
}
示例#13
0
// update vector of cluster membership indicators, s(i),....,s(N)
SEXP clusterstep(const cube& B, mat& kappa_star, mat& B1, const uvec& o,
             const field<mat>& C, const mat& D, ucolvec& s, 
             //const field<sp_mat>& C,
             ucolvec& num, unsigned int& M, double& conc, int a, int b,
             const vec& ipr, colvec& Num)
    {
        BEGIN_RCPP
      
        // sample cluster assignments, s(1), ..., s(N)
        // B = (B_1,...,B_K), where B_k is N x T matrix for iGMRF term k
        // Q = (Q_1,...,Q_K), where Q_k is a T x T de-scaled iGMRF precision matrix
        // C = (C_1,...,C_K), where C_k = D_k^-1 * Omega_k, 
        // where Omega_k is the T x T adjacency matrix for iGMRF term, k
        // D is a K x T matrix where row k contains T diagonal elements of Q_k
        // K x M matrix, kappa_star records locations for each iGMRF term
        // o = (o_1,...,o_k) is a vector where each entry denotes the order of term K.
        // e.g. RW(1) -> o = 2, RW(2) -> o = 3, seas(3) -> o = 3
        int N = B.slice(0).n_rows;
        int T = B.slice(0).n_cols;
        int K = C.n_rows;
        double sweights = 0;
        // zro is the zeros.T vector 
        colvec zro(T); zro.zeros();
        uvec o_adjust = o; 
        //o_adjust.zeros();
        // capture quadratic product for rate kernel of posterior gamma
        // posterior for kappa_star(k,i).  
        // save B1 to latter (in another function) compute posterior for kappa_star
        // mat B1(K,N); 
        double a1k; /* posterior shape for kappa_star(k,i) under 1 obs */
        B1.zeros();
        int i, j, k;
	      unsigned int l;
           
        /* 
        mat D_k(T,T), Omega_k(T,T);
        cube Q(T,T,K);
        for(k = 0; k < k; k++)
        {
           D_k.zeros(); D_k.diag()        = D.row(k);
           Omega_k                      = D_k * C(k,0); 
           Q.slice(k)                   = D_k - Omega_k;
        } // end loop K over iGMRF terms 
        */
        
        for(i = 0; i < N; i++)
        {
            // check if _i assigned to singleton cluster
            // if so, remove the cluster associated to _i
            // and decrement the cluster labels for m > s(i)
            if(num(s(i)) == 1) /* remove singleton cluster */
            {
                kappa_star.shed_col(s(i));
                num.shed_row(s(i));
                Num.shed_row(s(i));
                M -= 1; /* decrement cluster count */

                //decrement cluster tracking values by 1 for tossed cluster
                s( find(s > s(i)) )          -= 1;
                
            } /* end cluster accounting adjustment for singleton cluster */
            else /* cluster contains more than one unit */
            {
                num(s(i))                    -= 1;
                /* scale up num to population totals, Num, based on H-T inverse probability estimator */
                Num(s(i))                    -= 1/ipr(i);
            } /* decrement non-singleton cluster count by one */

            // construct normalization constant, q0i, to sample s(i)
            // build loqq0 and exponentiate
            colvec bki(T), bbar_ki(T); /* T x 1, D_k^-1*Omega_k*b_ki = C(k,0)*b_ki */
            mat bbar_i(K,T); bbar_i.zeros();
            double logd_dk = 0; /* set of T 0 mean gaussian densities for term k */
            double logq0ki = 0, logq0i = 0, q0i = 0;
            // accumulate weight, q0i, for s(i) over K iGMRF terms  
            for( k = 0; k < K; k++)
            {
                 logq0ki       = 0; /* reset k-indexed log-like on each k */
                 //a1k           = 0.5*(double(T)) + a;
                 a1k           = 0.5*(double(T)-double(o_adjust(k))) + a;
                 bki           = B.slice(k).row(i).t();
                 bbar_ki       = C(k,0) * bki; /* T x 1 */
                 bbar_i.row(k) = bbar_ki.t();
                 B1(k,i)       = 0.5*dot( D.row(k), pow((bki-bbar_ki),2) ); /* no b */
                 logd_dk       = 0; /* set of T gaussian densities for term k */
                 /* dmvn(zro|m,Q.slice(k),true) */
                 for( j = 0; j < T; j++ )
                 {
                    logd_dk   += R::dnorm(0.0,0.0,double(1/sqrt(D(k,j))),true);
                 }
                 logq0ki      = logd_dk + lgamma(a1k) + a*log(b) -
                                   lgamma(a) - a1k*trunc_log(B1(k,i)+b);
                 logq0i       += logq0ki;
            } /* end loop k over iGMRF terms */
            q0i = trunc_exp(logq0i);

            // construct posterior sampling weights to sample s(i)
            colvec weights(M+1); weights.zeros();
            /* evaluate likelihood under kappa_star(k,i) */
            double lweights_l;
            for(l = 0; l < M; l++) /* cycle through all clusters for s(i) */
            {
                s(i)          = l; /* will compute likelihoods for every cluster */  
                lweights_l = 0; /* hold log densities for K computations */
                for(k = 0; k < K; k++)
                {
                    bki            = B.slice(k).row(i).t();
                    for( j = 0; j < T; j++ )
                    {
                      /* effectively making assignment, s(i) = l */
                      lweights_l   += trunc_log(R::dnorm(bki(j),bbar_i(k,j),
                                    double(1/sqrt(kappa_star(k,l)*D(k,j))),false));
                    } /* end loop j over time index */
                } /* end loop k over iGMRF terms */
                //if(lweights_l < -300){lweights_l = -300;}
                weights(l)          = trunc_exp(lweights_l);
                weights(l)          *= double(Num(s(i)))/(double(N) - 1/ipr(i) + conc);
            } /* end loop l over existing or populated clusters */
            /* M+1 or new component sampled from F_{0} */
            weights(M)              = conc/(double(N) - 1/ipr(i) + conc)*q0i;

            // normalize weights
            sweights = sum(weights);
            if(sweights == 0)
            {
                weights.ones(); weights *= 1/(double(M)+1);
            }
            else
            {
                weights /= sweights;
            }

            // conduct discrete posterior draw for s(j)
            unsigned long MplusOne = M + 1;
            s(i) = rdrawone(weights, MplusOne);

            // if new cluster chosen, generate new location
            if(s(i) == M)
            {
                /* sample posterior of ksi_star[k,m] for 1 (vs. n_m) observation */
                double a_star_k; /* shape for 1 obs */
                double bstar_ki;
                kappa_star.insert_cols(M,1); /* add K vector new location to kappa_star */
                num.insert_rows(M,1);
                Num.insert_rows(M,1);
                for(k = 0; k < K; k++)
                {
                     a_star_k         = 0.5*(double(T) - double(o_adjust(k))) + a; /* shape for 1 obs */
                     bstar_ki         = B1(k,i) + b; /* B1(k,i) is a scalar quadratic product */
                     /*
                     bki              = B.slice(k).row(i).t();
                     bstar_ki         = 0.5*( as_scalar(bki.t()*symmatl(Q.slice(k))*bki) ) + b;
                     */
                     kappa_star(k,M)  = rgamma(1, a_star_k, (1/bstar_ki))[0];
                }
                num(M)   = 1;
                Num(M)   = 1/ipr(i);
                M        = MplusOne;
            }
            else
            {
                num(s(i)) += 1;
                Num(s(i)) += 1/ipr(i);
            }
            
        } /* end loop i for cluster assignment to unit i = 1,...,N */
        END_RCPP
    } /* end function bstep for cluster assignments, s, and computing zb */