コード例 #1
0
void draw_uncollapsed_xaya(std::vector<double> &xaya, std::vector<double> &xa, std::vector<double> &xag, std::vector<double> Bg,  double phi,  int na, int p, int p_gamma)
{
	double sd=sqrt(1/phi);
	std::vector<double> Z(na);
	for(std::vector<double>::iterator it=Z.begin(); it!=Z.end(); ++it) *it=Rf_rnorm(0,1);

	if(p_gamma!=0){
		dgemv_(&transN , &na, &p_gamma, &unity, &*xag.begin(), &na, &*Bg.begin(), &inc, &inputscale0, &*xaya.begin(), &inc);
		daxpy_(&p, &sd, &*Z.begin(), &inc, &*xaya.begin(), &inc);
		//dtrmv_(&uplo, &transT, &unit_tri, &na, &*xa.begin(), &na, &*xaya.begin(), &inc);
		dtpmv_(&uplo, &transT, &unit_tri, &na, &*xa.begin(), &*xaya.begin(), &inc);
	}else{
		for(size_t i=0; i!=xaya.size(); ++i) xaya[i]=sd*Z[i];
		//dtrmv_(&uplo, &transT, &unit_tri, &na, &*xa.begin(), &na, &*xaya.begin(), &inc);
		dtpmv_(&uplo, &transT, &unit_tri, &na, &*xa.begin(), &*xaya.begin(), &inc);
	}
}
コード例 #2
0
ファイル: glmtest.cpp プロジェクト: eddelbuettel/mvabund
int GlmTest::resampNonCase(glm *model, gsl_matrix *bT, unsigned int i)
{
   unsigned int j, k, id;
   double bt, score, yij, mij;
   gsl_vector_view yj;
   unsigned int nRows=tm->nRows, nVars=tm->nVars;

   // note that residuals have got means subtracted
   switch (tm->resamp) {
   case RESIBOOT: 
       if (tm->reprand!=TRUE) GetRNGstate();
       for (j=0; j<nRows; j++) {
           if (bootID!=NULL)
               id = (unsigned int) gsl_matrix_get(bootID, i, j);
           else if (tm->reprand==TRUE)
               id = (unsigned int) gsl_rng_uniform_int(rnd, nRows);
           else id = (unsigned int) nRows * Rf_runif(0, 1);
           // bY = mu+(bootr*sqrt(variance))
           for (k=0; k<nVars; k++) { 
               bt=gsl_matrix_get(model->Mu,j,k)+sqrt(gsl_matrix_get(model->Var,j,k))*gsl_matrix_get(model->Res, id, k);  
               bt = MAX(bt, 0.0);
               bt = MIN(bt, model->maxtol);
               gsl_matrix_set(bT, j, k, bt);
        }   }
        if (tm->reprand!=TRUE) PutRNGstate();   	  	
        break;
   case SCOREBOOT: 
        for (j=0; j<nRows; j++) {
           if (bootID!=NULL)
               score = (double) gsl_matrix_get(bootID, i, j);
           else if (tm->reprand==TRUE)
               score = gsl_ran_ugaussian (rnd); 
           else score = Rf_rnorm(0.0, 1.0);
           // bY = mu + score*sqrt(variance)  
	   for (k=0; k<nVars; k++){
               bt=gsl_matrix_get(model->Mu, j, k)+sqrt(gsl_matrix_get(model->Var, j, k))*gsl_matrix_get(model->Res, j, k)*score;
               bt = MAX(bt, 0.0);
               bt = MIN(bt, model->maxtol);
               gsl_matrix_set(bT, j, k, bt);
        }   }	    
	break;
   case PERMUTE: 
        if (bootID==NULL) 
            gsl_ran_shuffle(rnd,permid,nRows,sizeof(unsigned int));
        for (j=0; j<nRows; j++) {
            if (bootID==NULL) id = permid[j];
            else id = (unsigned int) gsl_matrix_get(bootID, i, j);
	    // bY = mu + bootr * sqrt(var)
	    for (k=0; k<nVars; k++) {
                bt=gsl_matrix_get(model->Mu,j,k)+sqrt(gsl_matrix_get(model->Var,j,k))*gsl_matrix_get(model->Res, id, k);
            bt = MAX(bt, 0.0);
            bt = MIN(bt, model->maxtol);
            gsl_matrix_set(bT, j, k, bt);
        }   }
        break;
   case FREEPERM:
         if (bootID==NULL) 
             gsl_ran_shuffle(rnd,permid,nRows,sizeof(unsigned int));
         for (j=0; j<nRows; j++) {
              if (bootID==NULL)  id = permid[j];
              else id = (unsigned int) gsl_matrix_get(bootID, i, j);
              yj=gsl_matrix_row(model->Yref, id);
              gsl_matrix_set_row (bT, j, &yj.vector);
 	 }
	 break;
   case MONTECARLO:
        McSample(model, rnd, XBeta, Sigma, bT);
        break;
    case PITSBOOT:
       if (tm->reprand!=TRUE) GetRNGstate();
       for (j=0; j<nRows; j++) {
           if (bootID!=NULL) 
               id = (unsigned int) gsl_matrix_get(bootID, i, j);
           else if (tm->reprand==TRUE) 
               id = (unsigned int) gsl_rng_uniform_int(rnd, nRows);
           else id = (unsigned int) Rf_runif(0, nRows);
           for (k=0; k<nVars; k++) {
               bt = gsl_matrix_get(model->PitRes, id, k); 
               mij = gsl_matrix_get(model->Mu, j, k);                
               yij = model->cdfinv(bt, mij, model->theta[k]); 
               gsl_matrix_set(bT, j, k, yij);
           }
       }
       if (tm->reprand!=TRUE) PutRNGstate();
       break;
    default: GSL_ERROR("The resampling method is not supported", GSL_ERANGE); break;
    }
    return SUCCESS;
} 
コード例 #3
0
Type rnorm(Type mu, Type sigma)
{
  return Rf_rnorm(asDouble(mu), asDouble(sigma));
}
コード例 #4
0
ファイル: glm_gibbs.cpp プロジェクト: michaellindon/oda
extern "C" void glm_gibbs(double * rZ, double * rxo,  double * rlam, int * rmodelprior, double * rpriorprob, double * rbeta1, double * rbeta2, int * rburnin, int * rniter, int * rscalemixture, double * ralpha,  int * rno, int * rna, int * rp, double * B_mcmc, double * prob_mcmc, int * gamma_mcmc, double * phi_mcmc, double * lam_mcmc, double * B_rb, double * prob_rb, double * intercept_mcmc, double * xo_scale)
{
	GetRNGstate();
	//MCMC Variables//
	int burnin=*rburnin;
	int niter=*rniter;

	//Dimensions//
	int p=*rp;
	int no=*rno;
	int na=*rna;

	//Phi Variables//
	double phi=1.0;

	//Yo Variables//
	std::vector<double> Z(rZ, rZ+no); 
	std::vector<double> xo(rxo, rxo+no*p);
	standardize_xo(xo,xo_scale,no,p);
	std::vector<double> xoyo(p);
	double yobar=0;

	std::vector<double> xoxo(p*p);
	dgemm_( &transT, &transN, &p, &p, &no, &unity, &*xo.begin(), &no, &*xo.begin(), &no, &inputscale0, &*xoxo.begin(), &p );

	//Construct Xa//
	std::vector<double> xa(p*(p+1)/2); //Triangular Packed Storage
	std::vector<double> d(p);
	chol_xa(xa,xoxo,d,p);


	//Reserve Memory for Submatrices//
	std::vector<double> xog; xog.reserve(no*p);
	std::vector<double> xogyo; xogyo.reserve(p);
	std::vector<double> xogxog_Lamg; xogxog_Lamg.reserve(p*p);
	std::vector<double> xag; xag.reserve(na*p);

	//Ya Variables//
	std::vector<double> xaya(p);

	//Beta Variables//
	double intercept=0;
	std::vector<double> Bols(p);
	std::vector<double> B(p,0.0);
	std::vector<double> Bg; Bg.reserve(p);

	//Lambda Variables//
	int scalemixture=*rscalemixture;
	double alpha=*ralpha;
	std::vector<double> lam(rlam,rlam+p);
	std::vector<double> lamg; lamg.reserve(p); //vector instead of diagonal pxp matrix

	//Gamma Variables//
	std::vector<int> gamma(p,1);
	int p_gamma=std::accumulate(gamma.begin(),gamma.end(),0);
	bool gamma_diff=true;
	int modelprior=*rmodelprior;

	//Probability Variables//
	std::vector<double> prob(p);
	std::vector<double> odds(p);
	std::vector<double> priorprob(rpriorprob,rpriorprob+p);

	//Theta Variables//
	double theta=0.5;
	double beta1=*rbeta1;
	double beta2=*rbeta2;

	//Store Initial Values//
	std::copy(B.begin(),B.end(),B_mcmc);
	std::copy(prob.begin(),prob.end(),prob_mcmc);
	std::copy(gamma.begin(),gamma.end(),gamma_mcmc);
	std::copy(lam.begin(),lam.end(),lam_mcmc);

	//Run Gibbs Sampler//
	for (int t = 1; t < niter; ++t)
	{

		//Form Submatrices//
		if(p_gamma) submatrices_uncollapsed(gamma_diff,B,xog,xag,lamg,Bg,gamma,lam,xo,xa,p_gamma,p,no,na);

		//Draw xoyo//
		draw_xoyo(Z,xoyo,yobar,xo,xog,Bg,phi,no,p,p_gamma,intercept);

		//Draw xaya//
		draw_uncollapsed_xaya(xaya,xa,xag,Bg,phi,na,p,p_gamma);

		//Compute Probabilities//
		if(modelprior==1)
		{
			bernoulli_probabilities(prob,odds,Bols,d,xoyo,xaya,priorprob,lam,phi);
		}else if(modelprior==2)
		{
			betabinomial_probabilities(prob,odds,Bols,d,xoyo,xaya,theta,lam,phi);
		}else
		{
			uniform_probabilities(prob,odds,Bols,d,xoyo,xaya,lam,phi);
		}

		//Draw Gamma//
		draw_gamma(gamma,p_gamma,prob);


		//Draw Theta//
		if(modelprior==2) theta=Rf_rbeta(beta1+p_gamma,p-p_gamma+beta2);


		//Draw Beta//
		draw_beta(gamma,B,Bols,d,lam,phi);

		//Draw Intercept//
		intercept=yobar+sqrt(1/(no*phi))*Rf_rnorm(0,1);

		//Draw Lambda//
		if(scalemixture) draw_lambda_t(lam,gamma,alpha,B,phi);


		//Store Draws//
		intercept_mcmc[t]=intercept;
		std::copy(gamma.begin(),gamma.end(),(gamma_mcmc+p*t));
		std::copy(prob.begin(),prob.end(),(prob_mcmc+p*t));
		std::copy(B.begin(),B.end(),(B_mcmc+p*t));
		std::copy(lam.begin(),lam.end(),(lam_mcmc+p*t));

		//Rao Blackwell//
		if(t>=burnin) rao_blackwell(B_rb,prob_rb,B,prob,burnin,niter);

		//Has Gamma Changed?//
		gamma_diff=gamma_change(gamma_mcmc,t,p);

	}
	PutRNGstate();
}
コード例 #5
0
ファイル: mcmcpkpg.cpp プロジェクト: myajima/bppkgx
int MCMCPkPg::sampleTheta( PkPgModel& model, PkPgResult& Result ){
  //int i, v, j;
  double fx0, fx1, ratio, u;
  mat proplogTheta( model.N, model.V ); // Proposal for Theta.  In matrix with vision of parallelizing in the future
  mat propTheta( model.N, model.V ); 
  // Loop subjects ---------------------------------------------------
  for( size_t i = 0; i < model.N; i++ ){
    // current posterior log likelihood
    colvec CurrentlogTheta = Result.logTheta.row( i ).t();
    fx0 = f_theta( CurrentlogTheta,     Result.mTheta.row( i ).t(), 
                   Result.Otheta,    1.0, //Result.Tau( i ), 
                   model.Y.slice( i ), Result.Fit.slice( i ), 
                   Result.Sigma.row( i ) , log(Result.EHRTime(i,0)));              
    // propoal ----------- start -----------
    colvec theta_bar_i = Result.logTheta_bar.row( i ).t();
    mat sigma_theta_i  = Result.Sigma_theta.slice( i );
    //colvec ProplogThetaCenter = CurrentlogTheta + 2 * ( Result.logTheta_MLE.row(i).t() - CurrentlogTheta );
    //colvec logtemp = MVNORM( 0, ProplogThetaCenter , sigma_theta_i ) ;      // proposal

    colvec logtemp = MVNORM( 0, CurrentlogTheta , sigma_theta_i ) ;      // proposal
    proplogTheta.row( i ) = logtemp.t();
    //logtemp.print("logtemp");
    propTheta.row( i )   = exp( proplogTheta.row( i ) );
    rowvec tempPThetaRow(15);
    for( int j = 0; j< 10; j++)tempPThetaRow(j) = propTheta( i, j );
    double proplogEHRTime = Rf_rnorm(log(Result.EHRTime(i,0)),0.1);
    double propEHRTime = exp( proplogEHRTime );
    tempPThetaRow(10) =  propEHRTime; //Result.EHRTime(i,0);
    for( int j = 10; j< 14; j++) tempPThetaRow(j+1) = propTheta( i, j );
    rowvec param        = tempPThetaRow;
    mat TempFit         = fev( model.PkModel, model.env, model.K, model.T, param, i ); // update the likelihood
    if( TempFit.n_cols==model.K){
      uvec idx            = find( TempFit < 0.00001 );
      TempFit.elem( idx ) = ones<vec>( idx.n_elem ) * 0.00001;
      // propoal ----------- end -----------
      // proposed posterior log likelihood  
      fx1 = f_theta( proplogTheta.row( i ).t(), Result.mTheta.row( i ).t(), 
                     Result.Otheta,    1.0, //Result.Tau(i),
                     model.Y.slice( i ), TempFit, 
                     Result.Sigma.row( i ), proplogEHRTime ); 
      // Accept or Reject
      ratio = fx1 - fx0;     // M-H ratio                    
      u = Rf_runif( 0, 1 ); 
      if( log( u ) < ratio ){
        // for( v = 0; v < model.V; v++ ) {
        //   Result.Theta( i, v ) = propTheta( i, v );   // Update theta_v
        // }
        acceptTheta(i,cIter)=1;
        Result.logTheta_old.row( i ) = Result.logTheta.row( i );
        Result.logTheta.row( i )     = proplogTheta.row( i );
        Result.Theta.row( i )        = propTheta.row( i );
        Result.Fit.slice( i )        = TempFit;
        Result.EHRTime(i,0)          = propEHRTime;
      }  else {
        Result.logTheta_old.row( i ) = Result.logTheta.row( i );
        Result.logTheta.row( i )     = Result.logTheta.row( i );
        Result.Theta.row( i )        = Result.Theta.row( i );
        Result.Fit.slice( i )        = Result.Fit.slice( i );
      }
    } else{
      acceptTheta(i,cIter)=-1;
      Result.logTheta_old.row( i ) = Result.logTheta.row( i );
      Result.logTheta.row( i )     = Result.logTheta.row( i );
      Result.Theta.row( i )        = Result.Theta.row( i );
      Result.Fit.slice( i )        = Result.Fit.slice( i );
    }

  } // end loop i

  return 0;
}