예제 #1
0
// [[Rcpp::export]]
colvec ST3a(colvec z ,double gam)
{
int n=z.size();
colvec z1(n);
for( int i=0; i<n;++i)
{
double z11=z(i);
z1(i)=ST1a(z11,gam);
}
return(z1);
}
예제 #2
0
// [[Rcpp::export]]
cube gamloopP(NumericVector beta_,const mat Y,const mat Z, const colvec gammgrid, const double lassothresh,const colvec YMean2,const colvec ZMean2,const colvec znorm,mat BFoo){

  //Data is read in from R as Armadillo objects directly.  Initially, I did not know that this was possible


//setting number of threads
omp_set_num_threads(4);

 // set stacksize, not really necessary unless you are running into memory issues
setenv("OMP_STACKSIZE","64M",1);


const int n2=BFoo.n_rows;
const int k2=BFoo.n_cols;

//Coefficient matrices
 mat B1=BFoo;
 mat B1F2=BFoo;
 mat b2=B1;
const int ngridpts=gammgrid.size();

 //These are typically defined inside of the "lassocore" function, but doing so causes problems with openMP, so they are defined out here and read in to the function
// const int ngridpts=gamm.size();
cube bcube(beta_.begin(),n2,k2,ngridpts,false);
cube bcube2(n2,k2+1,ngridpts);
bcube2.fill(0.0);
// const colvec ZN = as<colvec>(znorm2);
colvec nu=zeros<colvec>(n2);
uvec m=ind(k2,1);
double gam =0;
 int i;
#pragma omp parallel for shared(bcube,bcube2,b2) private(i,gam,B1F2,B1,m,nu) default(none) schedule(auto)
  for (i=0; i<ngridpts;++i) {
      gam=gammgrid(i);
        //Previous coefficient matrix is read in as a "warm start"

	mat B1F2=bcube.slice(i);
	//The actual algorithm is being applied here 
	 	B1 = lassocore(Y,Z,B1F2,b2,gam, lassothresh,znorm,m,k2,n2); 
	

	//intercept is calculated
	 nu = YMean2 - B1 *ZMean2;
         bcube2.slice(i) = mat(join_horiz(nu, B1)); 
	}
    return(bcube2);
}
예제 #3
0
// [[Rcpp::export]]
mat randWalkTrain(const mat& X, const mat& A, const colvec& Z,  
                  const colvec& theta, int trainit, 
                  const colvec& sigma, const colvec& etaRange,
                  const mat& V, bool verbose)
{
    double logAccept;
    int p = theta.size() - 1;
    int n = X.n_rows;
    colvec eigval;
    mat eigvec;
    eig_sym(eigval, eigvec, V);
    mat R = eigvec * diagmat(sqrt(eigval));
    colvec Y = rautologistic_(X, A, theta);
    colvec Ynew(n);
    colvec thetaold = theta;
    colvec thetanew = theta;
    colvec normvec(p + 1);
    mat estimates(trainit + 1, p + 1);
    estimates.row(0) = theta.t();
    colvec beta = theta.subvec(0, p - 1);
    double eta = theta[p];
    colvec Xbeta = X * beta;
    colvec mu = exp(Xbeta);
    mu = mu / (1 + mu);
    double Zsum = as_scalar(Z.t() * A * Z);
    int onepct = 0.01 * trainit;
	int pct = 1;
	RNGScope scope;
	Function rnorm("rnorm"),
	         runif("runif");
    for (int i = 1; i < trainit + 1; i++)
    {
        do
        {
            normvec = as<colvec>(rnorm(p + 1));
            thetanew = thetaold + R * normvec;
        }
        while(thetanew[p] < etaRange[0] || thetanew[p] > etaRange[1]);
        Ynew = rautologistic_(X, A, thetanew);
        colvec betaold = thetaold.subvec(0, p - 1);
        colvec betanew = thetanew.subvec(0, p - 1);
        double etaold = thetaold[p];
        double etanew = thetanew[p];
        colvec Xbetaold = X * betaold;
        colvec Xbetanew = X * betanew;
        colvec muold = exp(Xbetaold);
        muold = muold / (1 + muold);
        colvec munew = exp(Xbetanew );
        munew = munew / (1 + munew);
        double Ynewsum = as_scalar(Ynew.t() * A * Ynew);
        double Ysum = as_scalar(Y.t() * A * Y);
        logAccept = as_scalar(0.5 * (eta * (Ynewsum - Ysum) + etanew * (Zsum - Ynewsum) + etaold * (Ysum - Zsum))
                            + trans(Ynew - Y) * Xbeta + trans(Z - Ynew) * Xbetanew + trans(Y - Z) * Xbetaold
                            + etaold * trans(Z - Y) * A * muold + etanew * trans(Ynew - Z) * A * munew + eta * trans(Y - Ynew) * A * mu)
                            + accu((square(betaold) - square(betanew)) / (2 * sigma));
        if (as_scalar(log(as<double>(runif(1)))) < logAccept)
        {
            estimates.row(i) = thetanew.t();
            thetaold = thetanew;
            Y = Ynew;
        }
        else
            estimates.row(i) = thetaold.t();
        if (verbose && i % onepct == 0)
        {
            Rcout << "\rTraining progress: |";
			for (int j = 1; j <= pct; j++)
				Rcout << "+";
			for (int j = 1; j < (100 - pct); j++)
				Rcout << " ";
			Rcout << "| " << pct << "%";
			pct++;
            R_FlushConsole();
        }
    }
	if (verbose)
		Rcout << std::endl << std::endl;
    return estimates.rows(1, trainit);
} 
예제 #4
0
PieceWiseConstant::PieceWiseConstant(const colvec& x, const colvec& y)
	: VolatilityFunction(x.size()), xs(x), ys(y), lastPos(0)
{
	if (xs.size() != ys.size())
		throw("x and y do not match in piecewiseconstant");	
}
예제 #5
0
void PieceWiseConstant::setY(const colvec& y){
	if (y.size() != xs.size()) throw("new x size does not match in piecewiseconstant");
	for (int i = 0; i < n; i++)
		ys[i] = y[i];
	lastPos = 0;
}
예제 #6
0
void PieceWiseConstant::setX(const colvec& x){
	if (x.size() != ys.size()) throw("new x size does not match in piecewiseconstant");
	for (int i = 0; i < n; i++)
		xs[i] = x[i];
	lastPos = 0;
}