void ModelWPAMGPU::setUNoise() { fmat E = eye<fmat>(3,3); fmat Q_Row(0,0); fmat Q(9,9); Q_Row.insert_cols(0,(pow(T,4)/4)*E); Q_Row.insert_cols(3,(pow(T,3)/2)*E); Q_Row.insert_cols(6,(pow(T,2)/2)*E); Q.insert_rows(0,Q_Row); Q_Row = fmat(0,0); Q_Row.insert_cols(0,(pow(T,3)/2)*E); Q_Row.insert_cols(3,(pow(T,2))*E); Q_Row.insert_cols(6,T*E); Q.insert_rows(0,Q_Row); Q_Row = fmat(0,0); Q_Row.insert_cols(0,(pow(T,2)/2)*E); Q_Row.insert_cols(3,T*E); Q_Row.insert_cols(6,E); Q.insert_rows(0,Q_Row); for (unsigned int i=0; i< Q.n_cols;++i) { NoiseGaussianGPU* u = new NoiseGaussianGPU(0,Q(i,i)*variance); U.addNoise(u); } }
void PolyInterpMixin<time>::fas(time dt, shared_ptr<ISweeper<time>> dst, shared_ptr<const ISweeper<time>> src) { auto& crse = pfasst::encap::as_encap_sweeper(dst); auto& fine = pfasst::encap::as_encap_sweeper(src); auto const ncrse = crse.get_nodes().size(); assert(ncrse >= 1); auto const nfine = fine.get_nodes().size(); assert(nfine >= 1); auto crse_factory = crse.get_factory(); auto fine_factory = fine.get_factory(); EncapVecT crse_int(ncrse), fine_int(nfine), rstr_int(ncrse); for (size_t m = 0; m < ncrse; m++) { crse_int[m] = crse_factory->create(solution); } for (size_t m = 0; m < ncrse; m++) { rstr_int[m] = crse_factory->create(solution); } for (size_t m = 0; m < nfine; m++) { fine_int[m] = fine_factory->create(solution); } // compute '0 to node' integral on the coarse level crse.integrate(dt, crse_int); // compute '0 to node' integral on the fine level fine.integrate(dt, fine_int); // restrict '0 to node' fine integral int trat = (int(nfine) - 1) / (int(ncrse) - 1); for (size_t m = 0; m < ncrse; m++) { this->restrict(rstr_int[m], fine_int[m * trat]); } // compute 'node to node' tau correction EncapVecT tau(ncrse), rstr_and_crse(2 * ncrse); // Attention: tau is getting filled with pointers to the crse's member for (size_t m = 0; m < ncrse; m++) { tau[m] = crse.get_tau(m); } for (size_t m = 0; m < ncrse; m++) { rstr_and_crse[m] = rstr_int[m]; } for (size_t m = 0; m < ncrse; m++) { rstr_and_crse[ncrse + m] = crse_int[m]; } if (fmat.rows() == 0) { fmat.resize(ncrse, 2 * ncrse); fmat.fill(0.0); for (size_t m = 0; m < ncrse; m++) { fmat(m, m) = 1.0; fmat(m, ncrse + m) = -1.0; // subtract 0-to-(m-1) FAS so resulting FAS is (m-1)-to-m FAS, // which will be required in the sweeper logic for (size_t n = 0; n < m; n++) { fmat(m, n) = -1.0; fmat(m, ncrse + n) = 1.0; } } } tau[0]->mat_apply(tau, 1.0, fmat, rstr_and_crse, true); }
fmat BFilterUKF::sigmas(fvec x, fmat P, float c) { //Sigma points around reference point //Inputs: // x: reference point // P: covariance // c: coefficient //Output: // X: Sigma points fmat X; fmat cholP = chol(P); fmat A = c*cholP.t(); // Y = x(:,ones(1,numel(x))); fmat Y = zeros<fmat>(x.n_rows, x.n_elem); for (unsigned int j = 0; j < x.n_elem; ++j) { for (unsigned int i = 0; i < x.n_rows; ++i) { Y(i,j) = x1(i); } } //X = [x Y+A Y-A]; X = fmat(x); X.insert_cols(X.n_cols,Y+A); X.insert_cols(X.n_cols,Y-A); return X; }
bool Connectome::read(const QString &fileName, bool quiet) { if (!this->isEmpty()) clear(); QFile file(fileName); if (!file.open(QIODevice::ReadOnly)) { if (!quiet) QMessageBox::critical(0, "Warning!", QString("Cannot read file %1:\n%2.") .arg(file.fileName()).arg(file.errorString())); return false; } qint64 sz = sizeof(header); if ( file.read( (char *) &header, sz ) != sz ) { file.close(); return false; } // test it is a Connectome file QString test = QString(header.fileTag); test.resize(4); if ( test != "Cnct" ) { QMessageBox::critical(0, "Warning!", "This is not a Connectome file."); file.close(); return false; } // move file pointer 64, where data starts file.seek(64); NW = mat(header.nRegions,header.nRegions); sz = sizeof(double)*header.nRegions*header.nRegions; if (file.read( (char *) NW.memptr(), sz ) != sz) { file.close(); return false; } centroids = fmat(header.nRegions,3); sz = sizeof(float)*header.nRegions*3; if (file.read( (char *) centroids.memptr(), sz ) != sz) { file.close(); return false; } sz = header.nR*header.nC*header.nS; maskImage = uchar_cube(header.nR,header.nC,header.nS); if (file.read( (char *) maskImage.memptr(), sz ) != sz) { file.close(); return false; } // assuming the rest of the file is a ; separated string list sz = file.size() - file.pos(); char *tmp = new char[sz]; if (file.read( (char *) tmp, sz ) != sz) { file.close(); return false; } file.close(); names = QString(tmp).split(";"); delete tmp; if (names.count() != header.nRegions) return false; computeMetrics(); return true; }
SPF::SPF(model_settings* model_set, Data* dataset) { settings = model_set; data = dataset; // user influence printf("\tinitializing user influence (tau)\n"); tau = sp_fmat(data->user_count(), data->user_count()); logtau = sp_fmat(data->user_count(), data->user_count()); a_tau = sp_fmat(data->user_count(), data->user_count()); b_tau = sp_fmat(data->user_count(), data->user_count()); // user preferences printf("\tinitializing user preferences (theta)\n"); theta = fmat(settings->k, data->user_count()); logtheta = fmat(settings->k, data->user_count()); a_theta = fmat(settings->k, data->user_count()); b_theta = fmat(settings->k, data->user_count()); // item attributes printf("\tinitializing item attributes (beta)\n"); printf("\t%d users and %d items\n", data->user_count(), data->item_count()); beta = fmat(settings->k, data->item_count()); logbeta = fmat(settings->k, data->item_count()); a_beta = fmat(settings->k, data->item_count()); a_beta_user = fmat(settings->k, data->item_count()); b_beta = fmat(settings->k, data->item_count()); delta = fvec(data->item_count()); a_delta = fvec(data->item_count()); b_delta = settings->b_delta + data->user_count(); a_delta_user = fvec(data->item_count()); // keep track of old a_beta and a_delta for SVI a_beta_old = fmat(settings->k, data->item_count()); a_beta_old.fill(settings->a_beta); a_delta_old = fvec(data->item_count()); a_delta_old.fill(settings->a_delta); printf("\tsetting random seed\n"); rand_gen = gsl_rng_alloc(gsl_rng_taus); gsl_rng_set(rand_gen, (long) settings->seed); // init the seed initialize_parameters(); scale = settings->svi ? data->user_count() / settings->sample_size : 1; }
Particles ResamplingMultinomialCUDA::resample(Particles* particles){ Particles resampledSet; resampledSet.samples = fmat(particles->samples.n_rows,particles->samples.n_cols); resampledSet.weights = frowvec(particles->weights.n_cols); fvec cumsum = zeros<fvec>(particles->weights.n_cols); fvec random = randu<fvec>(particles->weights.n_cols); for (unsigned int i=1; i < particles->weights.n_cols;++i){ cumsum(i) = cumsum(i-1) + particles->weights(i); } random=random * cumsum(cumsum.n_rows-1); // sort random random = sort(random); for (unsigned int j=0; j < random.n_rows; ++j){ for (unsigned int i=0 ; i < cumsum.n_rows; ++i){ if (random(j) <= cumsum(i)){ if(i > 0){ if(random(j) >= cumsum(i-1)) { for (unsigned int k=0;k<particles->samples.n_rows;++k){ resampledSet.samples(k,j) = particles->samples(k,i); } break; } } else { for (unsigned int k=0;k<particles->samples.n_rows;++k){ resampledSet.samples(k,j) = particles->samples(k,i); } break; } } // Normalize weights resampledSet.weights(j) = 1.0f/particles->weights.n_cols; } } return resampledSet; }
ArmadilloSolver::ArmadilloSolver( float *p_flow_bases_u, float *p_flow_bases_v, float *p_cov_matrix, int p_pc_width, int p_pc_height, int p_nc, int p_nc_max_per, double p_sigma, double p_lambda, int p_debug, int n_iters) { // Copy parameters this->pc_width = p_pc_width; this->pc_height = p_pc_height; this->nc = p_nc; this->sigma_sq = p_sigma * p_sigma; this->lambda = p_lambda; this->debug = p_debug; this->n_iters = n_iters; // Copy pointers to raw data of the flow bases. //this->flow_bases_u = flow_bases_u; //this->flow_bases_v = flow_bases_v; // These matrices are of size (width*height) x n_bases. this->flow_bases_u_t = fmat(p_flow_bases_u, p_pc_width*p_pc_height, p_nc/2); this->flow_bases_v_t = fmat(p_flow_bases_v, p_pc_width*p_pc_height, p_nc/2); // If we want less cols, remove them here. this->flow_bases_u_t = this->flow_bases_u_t.head_cols(p_nc/2); this->flow_bases_v_t = this->flow_bases_v_t.head_cols(p_nc/2); // Generate covariance matrix fmat cov(p_cov_matrix, 2*p_nc_max_per,2*p_nc_max_per); // Armadillo stores the matrices as column-major. But cov is symmetric, so this is not a problem. if (p_nc_max_per == p_nc/2) { // We use the full covariance matrix // Generate Q as inverse of covariance matrix with some slight regularization. mat Q_ = this->lambda * (conv_to<mat>::from(cov) + eye<mat>(p_nc,p_nc)).i(); this->Q = conv_to<fmat>::from(Q_); } else { if (0) { // Old-style inversion mat Q_ = this->lambda * (conv_to<mat>::from(cov) + eye<mat>(2*p_nc_max_per,2*p_nc_max_per)).i(); uvec inds(p_nc); inds.subvec(0,p_nc/2-1) = linspace<uvec>(0,p_nc/2-1,p_nc/2); inds.subvec(p_nc/2,p_nc-1) = linspace<uvec>(0,p_nc/2-1,p_nc/2)+p_nc_max_per; this->Q = conv_to<fmat>::from(Q_(inds,inds)); } else { // New style uvec inds = join_cols(linspace<uvec>(0,p_nc/2-1,p_nc/2), linspace<uvec>(p_nc_max_per, p_nc_max_per+p_nc/2-1,p_nc/2)); fmat cov2 = cov(inds,inds); mat Q_ = this->lambda * (conv_to<mat>::from(cov2) + eye<mat>(p_nc,p_nc)).i(); this->Q = conv_to<fmat>::from(Q_); } } // fvec indices_ = kp0tr.col(1) * this->pc_width + kp0tr.col(0); // uvec indices = conv_to<uvec>::from(indices_); // if (this->debug) // std::cout << "[DEBUG] \t Filling A ..." << std::endl; // fmat Au = this->flow_bases_u_t.rows(indices); // fmat Av = this->flow_bases_v_t.rows(indices); // this->Q = this->lambda * (cov + eye<fmat>(p_nc,p_nc)).i(); if (this->debug) { std::cout << "[DEBUG] Initialized ArmadilloSolver." << std::endl; std::cout << "[DEBUG] Using " << this->nc << " basis vectors." << std::endl; } }
Particles ResamplingResidual::resample(Particles* particles){ Particles resampledSet; Particles stage1; int count = 0; resampledSet.samples = fmat(particles->samples.n_rows,particles->samples.n_cols); resampledSet.weights = frowvec(particles->weights.n_cols); fvec cumsum; fvec random; unsigned int number = particles->samples.n_cols; unsigned int numberOfStage1 = 0; // Generating copie information of every particle ivec copies = zeros<ivec>(particles->samples.n_cols); for (unsigned int i=0;i<particles->samples.n_cols;++i){ copies = (int) floor(number*particles->weights(i)); } numberOfStage1 = sum(copies); stage1.samples = fmat(particles->samples.n_rows,numberOfStage1); stage1.weights = frowvec(numberOfStage1); //Picking N_i = N*w_i copies of i-th particle for (unsigned int i=1; i < copies.n_rows;++i){ for (int j=0;j<copies(i);++j){ for (unsigned int k=0;k<particles->samples.n_rows;++k){ stage1.samples(k,count) = particles->samples(k,i); } stage1.weights(count) = particles->weights(i) - (float)copies(i)/number; count++; } } // multinomial resampling with residuum weights w_i = w_i - N_i/N cumsum = zeros<fvec>(numberOfStage1); for (unsigned int i=1; i < stage1.weights.n_cols;++i){ cumsum(i) = cumsum(i-1) + stage1.weights(i); } // generate sorted random set random = randu<fvec>(numberOfStage1); random=random*cumsum(cumsum.n_rows-1); random = sort(random); for (unsigned int j=0; j < random.n_rows; ++j){ for (unsigned int i=0 ; i < cumsum.n_rows; ++i){ if (random(j) <= cumsum(i)){ if(i > 0){ if(random(j) >= cumsum(i-1)) { for (unsigned int k=0;k<stage1.samples.n_rows;++k){ resampledSet.samples(k,j) = stage1.samples(k,i); assignmentVec.push_back(i); } break; } } else { for (unsigned int k=0; k<stage1.samples.n_rows; ++k){ resampledSet.samples(k,j) = stage1.samples(k,i); assignmentVec.push_back(i); } break; } } // Normalize weights resampledSet.weights(j) = 1.0f/particles->weights.n_cols; } } return resampledSet; }
Particles::Particles(){ dim = 1; samples = fmat(1,1); weights = frowvec(1); }
SEXP msbsvar_irf(SEXP gibbs, SEXP msbsvar, SEXP nsteps) { int i, k, n, N2, h, m, p, n0max, ns=INTEGER(nsteps)[0]; int *db, *dF, *dxi, *dQ, N210pct, pctct=0; SEXP bR, FR, xiR, QR, Ui, IRFlist, IRFtmp; // Rprintf("ns = %d\n",ns); // Get b, F, xi, Q, SS, dims from gibbs object PROTECT(bR = VECTOR_ELT(gibbs,0)); db=getdims(bR); // Rprintf("b(%d,%d)\n",db[0],db[1]); PROTECT(FR = VECTOR_ELT(gibbs,1)); dF=getdims(FR); // Rprintf("F(%d,%d)\n",dF[0],dF[1]); PROTECT(xiR= VECTOR_ELT(gibbs,2)); dxi=getdims(xiR); // Rprintf("xi(%d,%d)\n",dxi[0],dxi[1]); PROTECT(QR = VECTOR_ELT(gibbs,3)); dQ=getdims(QR); UNPROTECT(1); // Rprintf("Q(%d,%d)\n",dQ[0],dQ[1]); // Rprintf("Gibbs Objects and Dimensions Assigned\n"); // Reconstruct constants N2=db[0]; h=(int)sqrt((double)dQ[1]); n0max=db[1]/h; m=dxi[1]/h; p=((dF[1]/(h*m))-1)/m; N210pct=N2/10; // Rprintf("N2=%d\nh=%d\nm=%d\np=%d\nn0max=%d\n",N2,h,m,p,n0max); // Get Ui from msbsvar PROTECT(Ui=VECTOR_ELT(msbsvar,7)); Matrix bsample=R2Cmat(bR,N2,n0max*h); Matrix Fsample=R2Cmat(FR,N2,m*(m*p+1)*h); Matrix xisample=R2Cmat(xiR,N2,m*h); ColumnVector bk(n0max), Fk(m*(m*p+1)), bvec(m*m*p); bk=0.0; Fk=0.0; bvec=0.0; DiagonalMatrix xik(m), sqrtxik(m); xik=0.0; sqrtxik=0.0; Matrix Q(h,h), A0(m,m), A0i(m,m), fmat(m,m*p+1), sqrtwish, impulse(N2,m*m*ns); double *pFk; int IRFdims[]={N2,ns,m*m}; PROTECT(IRFlist=allocVector(VECSXP,h)); // Loop over regimes for(k=1;k<=h;k++){ // Rprintf("\n==========\nRegime %d\n==========\n",k); pctct=0; // Compute impulse responses for every draw of regime k for(n=1;n<=N2;n++){ // Rprintf("\nDraw %d:\n",n); // Get values for draw 'n', regime 'k' bk=bsample.SubMatrix(n,n,(k-1)*n0max+1,k*n0max).t(); // Rprintf("--bk(%d): ",bk.Storage()); //printCVector(bk); Fk=Fsample.SubMatrix(n,n,(k-1)*m*(m*p+1)+1,k*m*(m*p+1)).t(); pFk=Fk.Store(); // Rprintf("--Fk(%d): ",Fk.Storage()); //printCVector(Fk); for(i=1;i<=m;i++) xik(i)=sqrt(xisample(n,(k-1)*m+i)); // Rprintf("--xik(%d)/sqrtxik(%d) defined\n",m,m); // Compute A0/A0^-1/sqrtwish for regime k A0=b2a(bk,Ui); //Rprintf("--A0(%d,%d):",m,m); //printMatrix(A0); A0i=A0.i(); //Rprintf("--A0^-1(%d,%d):",m,m); //printMatrix(A0i); sqrtwish=(A0*xik).i(); //Rprintf("--sqrtwish(%d,%d):",m,m); //printMatrix(sqrtwish); // Compute beta vector fmat.ReSize(m,m*p+1); fmat<<pFk; fmat=fmat.t(); fmat=(fmat.Rows(1,m*p)*A0i).t(); bvec=fmat.AsColumn(); // Rprintf("--fmat(%d,%d):",m,m*p+1); printMatrix(fmat); // Rprintf("bvec_%d:", n); printCVector(bvec); // Compute IRF impulse.Row(n)=irf_var_from_beta(sqrtwish.t(), bvec, ns).t(); if (!(n%N210pct)) Rprintf("Regime %d: Monte Carlo IRF %d percent complete (Iteration %d)\n",k,++pctct*10,n); } // Create and class Robj for impulses, load into IRFlist PROTECT(IRFtmp=C2R3D(impulse,IRFdims)); setclass(IRFtmp,"mc.irf.BSVAR"); SET_VECTOR_ELT(IRFlist, k-1, IRFtmp); UNPROTECT(1); } UNPROTECT(5); return IRFlist; }