Пример #1
0
void LinearGaussian::init_and_weight(NumericMatrix & xparticles, NumericVector & logweights){
  RNGScope scope;
  int nparticles = xparticles.nrow();
  std::fill(xparticles.begin(), xparticles.end(), 0);
  xparticles(_,0) = rnorm(nparticles, 0, sigma / sqrt(1 - rho*rho));
  for (int iparticle = 0; iparticle < nparticles; iparticle++){
    logweights(iparticle) = Minus1Div2SdMeasurSquared * (xparticles(iparticle,0) - observations(0, 0)) *
      (xparticles(iparticle,0) - observations(0, 0));
  }
}
// [[Rcpp::export]]
NumericMatrix matrixSqrt(NumericMatrix orig) {

  // allocate the matrix we will return
  NumericMatrix mat(orig.nrow(), orig.ncol());

  // transform it
  std::transform(orig.begin(), orig.end(), mat.begin(), ::sqrt);

  // return the new matrix
  return mat;
}
Пример #3
0
// [[Rcpp::export]]
List MatrixExample(NumericMatrix orig) {
    NumericMatrix mat(orig.nrow(), orig.ncol());	

    // we could query size via
    //   int n = mat.nrow(), k=mat.ncol();
    // and loop over the elements, but using the STL is so much nicer
    // so we use a STL transform() algorithm on each element
    std::transform(orig.begin(), orig.end(), mat.begin(), sqrt_double );

    List result = List::create(
        Named( "result" ) = mat, 
        Named( "original" ) = orig
    );
    return result ;
}
Пример #4
0
// Scan a single chromosome with additive covariates and weights
//
// genoprobs = 3d array of genotype probabilities (individuals x genotypes x positions)
// pheno     = matrix of numeric phenotypes (individuals x phenotypes)
//             (no missing data allowed, values should be in [0,1])
// addcovar  = additive covariates (an intercept, at least)
// weights   = vector of weights
//
// output    = matrix of (weighted) residual sums of squares (RSS) (phenotypes x positions)
//
// [[Rcpp::export]]
NumericMatrix scan_binary_onechr_weighted(const NumericVector& genoprobs,
                                          const NumericMatrix& pheno,
                                          const NumericMatrix& addcovar,
                                          const NumericVector& weights,
                                          const int maxit=100,
                                          const double tol=1e-6,
                                          const double qr_tol=1e-12,
                                          const double eta_max=30.0)
{
    const int n_ind = pheno.rows();
    if(Rf_isNull(genoprobs.attr("dim")))
        throw std::invalid_argument("genoprobs should be a 3d array but has no dim attribute");
    const Dimension d = genoprobs.attr("dim");
    if(d.size() != 3)
        throw std::invalid_argument("genoprobs should be a 3d array");
    if(n_ind != d[0])
        throw std::range_error("nrow(pheno) != nrow(genoprobs)");
    if(n_ind != addcovar.rows())
        throw std::range_error("nrow(pheno) != nrow(addcovar)");
    if(n_ind != weights.size())
        throw std::range_error("nrow(pheno) != length(weights)");
    const int n_pos = d[2];
    const int n_gen = d[1];
    const int n_add = addcovar.cols();
    const int g_size = n_ind * n_gen;
    const int n_phe = pheno.cols();

    NumericMatrix result(n_phe, n_pos);
    NumericMatrix X(n_ind, n_gen+n_add);

    if(n_add > 0) // paste in covariates, if present
        std::copy(addcovar.begin(), addcovar.end(), X.begin() + g_size);

    for(int pos=0, offset=0; pos<n_pos; pos++, offset += g_size) {
        Rcpp::checkUserInterrupt();  // check for ^C from user

        // copy genoprobs for this pos into a matrix
        std::copy(genoprobs.begin() + offset, genoprobs.begin() + offset + g_size, X.begin());

        for(int phe=0; phe<n_phe; phe++) {
            // calc rss and paste into ith column of result
            result(phe,pos) = calc_ll_binreg_weighted(X, pheno(_,phe), weights, maxit, tol, qr_tol, eta_max);
        }
    }

    return result;
}
// [[Rcpp::export]]
NumericMatrix samplehouseholds_format2(NumericMatrix phi, NumericMatrix w, NumericVector pi,
                               NumericVector d, List lambda,
                               int currrentbatch, int nHouseholds,  int householdsize) {

  int K = w.nrow();
  int L = w.ncol();
  int p = d.length();
  int n_lambdas = lambda.length();
  int *lambda_columns = new int[n_lambdas];
  double **lambdas = new double*[n_lambdas];
  int maxDDtp = phi.nrow();
  int maxdd = maxDDtp / p;

  //int ncol = householdsize * DIM + 1 + householdsize;
  //output data: zero-based
  //column 0: household unique index
  //column 1: member index within the household (pernum:person number?)
  //column 2 to 2+p-1: individual data
  //column 2+p: 2+p+n_lambdas-2: household level data
  //column 2+p+n_lambdas-1: household group indicator
  //column last hh_size: individual group indicator
  int DIM = 2 + p + n_lambdas - 1; //not output the household size
  int ncol = DIM * householdsize + householdsize  + 1;
  NumericMatrix data(nHouseholds, ncol);

  //copy data from list of matrices to C++
  for (int i = 0; i < n_lambdas; i++) {
    NumericMatrix l = lambda[i];
    lambda_columns[i] = l.ncol();
    lambdas[i] = new double[l.length()];
    std::copy(l.begin(), l.end(), lambdas[i]);
  }
  //printf("in samplehouseholds\n");
  NumericVector rand = runif(nHouseholds * ncol); //at most this many
  sampleHouseholds_imp_format2(data.begin(), rand.begin(), lambdas, lambda_columns, w.begin(),
                       phi.begin(), pi.begin(),d.begin(),
                       nHouseholds, householdsize, K, L,maxdd,p, currrentbatch,n_lambdas);

  //clean up
  delete [] lambda_columns;
  for (int i = 0; i < n_lambdas; i++) {
    delete [] lambdas[i];
  }
  delete [] lambdas;
  //printf("done samplehouseholds\n");
  return data;
}
Пример #6
0
    double nlsResp::updateMu(const VectorXd& gamma) {
	int             n = d_y.size();
	if (gamma.size() != d_gamma.size())
	    throw invalid_argument("size mismatch in updateMu");
	std::copy(gamma.data(), gamma.data() + gamma.size(), d_gamma.data());
	const VectorXd lp(d_gamma + d_offset); // linear predictor
	const double  *gg = lp.data();

	for (int p = 0; p < d_pnames.size(); ++p) {
	    std::string pn(d_pnames[p]);
	    NumericVector pp = d_nlenv.get(pn);
	    std::copy(gg + n * p, gg + n * (p + 1), pp.begin());
	}
	NumericVector  rr = d_nlmod.eval(SEXP(d_nlenv));
	if (rr.size() != n)
	    throw invalid_argument("dimension mismatch");
	std::copy(rr.begin(), rr.end(), d_mu.data());
	NumericMatrix  gr = rr.attr("gradient");
	std::copy(gr.begin(), gr.end(), d_sqrtXwt.data());
	return updateWrss();
    }
Пример #7
0
    /** 
     * Update L, Ut and cu for new weights.
     *
     * Update Ut from Zt and sqrtXwt, then L from Lambda and Ut
     * Update cu from wtres, Lambda and Ut.
     * 
     * @param Xwt Matrix of weights for the model matrix
     * @param wtres weighted residuals
     */
    void reModule::reweight(Rcpp::NumericMatrix const&   Xwt,
			    Rcpp::NumericVector const& wtres) {
	double mone = -1., one = 1.; 
	int Wnc = Xwt.ncol();
	if (d_Ut) M_cholmod_free_sparse(&d_Ut, &c);
	if (Wnc == 1) {
	    d_Ut = M_cholmod_copy_sparse(&d_Zt, &c);
	    chmDn csqrtX(Xwt);
	    M_cholmod_scale(&csqrtX, CHOLMOD_COL, d_Ut, &c);
	} else {
	    int n = Xwt.nrow();
	    CHM_TR tr = M_cholmod_sparse_to_triplet(&d_Zt, &c);
	    int *j = (int*)tr->j, nnz = tr->nnz;
	    double *x = (double*)tr->x, *W = Xwt.begin();
	    for (int k = 0; k < nnz; k++) {
		x[k] *= W[j[k]];
		j[k] = j[k] % n;
	    }
	    tr->ncol = (size_t)n;

	    d_Ut = M_cholmod_triplet_to_sparse(tr, nnz, &c);
	    M_cholmod_free_triplet(&tr, &c);
	}
				// update the factor L
	CHM_SP LambdatUt = d_Lambda.crossprod(d_Ut);
	d_L.update(*LambdatUt, 1.);
	d_ldL2 = d_L.logDet2();
				// update cu
	chmDn ccu(d_cu), cwtres(wtres);
	copy(d_u0.begin(), d_u0.end(), d_cu.begin());
	M_cholmod_sdmult(LambdatUt, 0/*trans*/, &one, &mone, &cwtres, &ccu, &c);
	M_cholmod_free_sparse(&LambdatUt, &c);
	NumericMatrix
	    ans = d_L.solve(CHOLMOD_L, d_L.solve(CHOLMOD_P, d_cu));
	copy(ans.begin(), ans.end(), d_cu.begin());
	d_CcNumer = inner_product(d_cu.begin(), d_cu.end(), d_cu.begin(), double());
    }
Пример #8
0
    double nlmerResp::updateMu(Rcpp::NumericVector const &gamma) throw(std::runtime_error) {
	int n = d_y.size();
#ifdef USE_RCPP_SUGAR
	Rcpp::NumericVector gam = gamma + d_offset;
#else
	NumericVector gam(d_offset.size());
	std::transform(gamma.begin(), gamma.end(), d_offset.begin(),
		       gam.begin(), std::plus<double>());
#endif
	double *gg = gam.begin();

	for (int p = 0; p < d_pnames.size(); p++) {
	    std::string pn(d_pnames[p]);
	    Rcpp::NumericVector pp = d_nlenv.get(pn);
	    std::copy(gg + n * p, gg + n * (p + 1), pp.begin());
	}
	NumericVector rr = d_nlmod.eval(SEXP(d_nlenv));
	if (rr.size() != n)
	    throw std::runtime_error("dimension mismatch");
	std::copy(rr.begin(), rr.end(), d_mu.begin());
	NumericMatrix rrg = rr.attr("gradient");
	std::copy(rrg.begin(), rrg.end(), d_sqrtXwt.begin());
	return updateWrss();
    }
Пример #9
0
// [[Rcpp::export]]
NumericMatrix getRandomMatrix_AbundanceQuantMatrix(NumericVector mAbundance, NumericVector nAbundance, NumericMatrix quantInteractions, NumericMatrix probInteractions) {
	//TODO make sure all lengths are okay
	NumericVector m_abundance = clone(mAbundance);
	NumericVector n_abundance = clone(nAbundance);

	NumericMatrix to_return(probInteractions.nrow(), probInteractions.ncol());
	double total_interactions = std::accumulate(quantInteractions.begin(), quantInteractions.end(), 0);
	
	double num_cur_interactions = 0;
	double num_m_left = std::accumulate(m_abundance.begin(), m_abundance.end(), 0);
	double num_n_left = std::accumulate(n_abundance.begin(), n_abundance.end(), 0);

	while(num_cur_interactions < total_interactions) {
		//pick an M
		int random_m = round(runif(1, 0, num_m_left - 1)[0]);
		int random_m_idx = -1;

		int m_sum = 0;
		for (int m_idx = 0; m_idx < m_abundance.size(); m_idx++) {
			m_sum += m_abundance[m_idx];
			if (m_sum >= random_m) {
				random_m_idx = m_idx;
				break;
			}
		}

		if (random_m_idx < 0) { 
			//TODO We broke something...
			std::cerr << "nah, this isn't right... m" << std::endl;
		}

		//pick an N
		int random_n = round(runif(1, 0, num_n_left - 1)[0]);
		int random_n_idx = -1;

		int n_sum = 0;
		for (int n_idx = 0; n_idx < n_abundance.size(); n_idx++) {
			n_sum += n_abundance[n_idx];
			if (n_sum >= random_n) {
				random_n_idx = n_idx;
				break;
			}
		}

		if (random_n_idx < 0) { 
			//TODO We broke something...
			std::cerr << "nah, this isn't right... n" << std::endl;
		}
		
		//can they interact? If so, add the edge
		if(runif(1, 0, 1)[0] < probInteractions(random_m_idx, random_n_idx)) {
			//update mAbundances, nAbundances, num_cur_interactions, and num_left
			num_cur_interactions += 1;

			m_abundance[random_m_idx] -= 1;
			n_abundance[random_n_idx] -= 1;

			num_m_left -= 1;
			num_n_left -= 1;

			to_return(random_m_idx, random_n_idx) += 1;
		}
	}
	return to_return;
}
Пример #10
0
// [[Rcpp::export]]
NumericVector getMonotonicTimeseries(NumericMatrix originalMatrix, int timeSteps, int nodfFrequency, bool sortFirst=true) {
	RNGScope scope;
	std::vector<double> nodf_timeseries;

	NumericMatrix random_mat(originalMatrix.nrow(), originalMatrix.ncol());
	int num_edges = std::accumulate(originalMatrix.begin(), originalMatrix.end(), 0);
	int num_m = originalMatrix.nrow();
	int num_n = originalMatrix.ncol();

	double lambda_edge = (double(num_edges) - 1) /timeSteps;
	double lambda_m = (double(num_m) - 1)/timeSteps;
	double lambda_n = (double(num_n) - 1)/timeSteps;

	int cur_edges = 1;
	int cur_m = 1;
	int cur_n = 1;

	int time_step = 0;

	while(cur_m < num_m || cur_n < num_n || cur_edges < num_edges) {
		if(time_step > 0 && time_step % nodfFrequency == 0) {
			NumericMatrix to_test = random_mat(Range(0, cur_m - 1), Range(0, cur_n - 1));
			if(sortFirst) to_test = sortMatrix(to_test);
			List nodf_result = calculateNODF(to_test);
			nodf_timeseries.push_back(Rcpp::as<double>(nodf_result["NODF"]));
		}
		//add m (hosts)
		if (cur_m < num_m) cur_m += rpois(1, lambda_m)[0];
		cur_m = std::min(cur_m, num_m);

		//add n (parasites)
		if (cur_n < num_n) cur_n += rpois(1, lambda_n)[0];
		cur_n = std::min(cur_n, num_n);

		//TODO: make sure cur_n and cur_m never go > m and n, since in this model,
		//we have a max size on our matrix from the start. 

		//add edges 
		if(cur_edges < num_edges) {
			int edge_event = rpois(1, lambda_edge)[0];

			if(cur_m*cur_n - cur_edges < edge_event) {
				//not enough space for edges...
				//max out how many edges we should add then
				edge_event = cur_m * cur_n - cur_edges;
			}

			int num_edges_left = edge_event;

			while(num_edges_left > 0) {
				//better algorithm : count how many non-edges there are, use that as prob of picking 1 of them
				//iterate through matrix until we find a 0, then pull from ^ prob to add edge or not...			
				int num_vacant_edges = cur_m * cur_n - cur_edges;
				double prob_add_edge = 1 / double(num_vacant_edges);

				for(int i = 0; i < cur_m; i++) {
					for(int j=0; j < cur_n; j++) {
						double ran_draw = runif(1, 0, 1)[0];

						if(random_mat(i,j) == 0 && ran_draw < prob_add_edge && num_edges_left > 0) {
							num_edges_left -= 1;
							cur_edges += 1;
							random_mat(i, j) = 1;
						}
					}
				}
			}
		}
	time_step += 1;
	}

	return wrap(nodf_timeseries);
}
Пример #11
0
// [[Rcpp::export]]
NumericMatrix getRandomMatrix_GrowMonotonic(NumericMatrix originalMatrix, int timeSteps=100) {
	RNGScope scope;
	NumericMatrix random_mat(originalMatrix.nrow(), originalMatrix.ncol());
	int num_edges = std::accumulate(originalMatrix.begin(), originalMatrix.end(), 0);
	int num_m = originalMatrix.nrow();
	int num_n = originalMatrix.ncol();

	double lambda_edge = (double(num_edges) - 1) /timeSteps;
	double lambda_m = (double(num_m) - 1)/timeSteps;
	double lambda_n = (double(num_n) - 1)/timeSteps;

	int cur_edges = 1;
	int cur_m = 1;
	int cur_n = 1;

	while(cur_m < num_m || cur_n < num_n || cur_edges < num_edges) {
		//add m (hosts)
		if (cur_m < num_m) cur_m += rpois(1, lambda_m)[0];
		cur_m = std::min(cur_m, num_m);

		//add n (parasites)
		if (cur_n < num_n) cur_n += rpois(1, lambda_n)[0];
		cur_n = std::min(cur_n, num_n);

		//TODO: make sure cur_n and cur_m never go > m and n, since in this model,
		//we have a max size on our matrix from the start. 

		//add edges 
		if(cur_edges < num_edges) {
			int edge_event = rpois(1, lambda_edge)[0];

			if(cur_m*cur_n - cur_edges < edge_event) {
				//not enough space for edges...
				//max out how many edges we should add then
				edge_event = cur_m * cur_n - cur_edges;
			}

			int num_edges_left = edge_event;

			while(num_edges_left > 0) {
				//better algorithm : count how many non-edges there are, use that as prob of picking 1 of them
				//iterate through matrix until we find a 0, then pull from ^ prob to add edge or not...			
				int num_vacant_edges = cur_m * cur_n - cur_edges;
				double prob_add_edge = 1 / double(num_vacant_edges);

				for(int i = 0; i < cur_m; i++) {
					for(int j=0; j < cur_n; j++) {
						double ran_draw = runif(1, 0, 1)[0];

						if(random_mat(i,j) == 0 && ran_draw < prob_add_edge && num_edges_left > 0) {
							num_edges_left -= 1;
							cur_edges += 1;
							random_mat(i, j) = 1;
						}
					}
				}
			}
		}
	}

	return random_mat;
}
Пример #12
0
NumericMatrix stl_sort_matrix(NumericMatrix x) {
   NumericMatrix y = clone(x);
   std::sort(y.begin(), y.end());

  return y;
}
Пример #13
0
    /** 
     * Solve for the increment given the updated cu
     * 
     * @param cu 
     */
    void reModule::updateIncr(const Rcpp::NumericVector& cu) {
	NumericMatrix nu = d_L.solve(CHOLMOD_Pt, d_L.solve(CHOLMOD_Lt, cu));
	copy(nu.begin(), nu.end(), d_incr.begin());
    }
Пример #14
0
    /** 
     * Solve for the increment only.
     * 
     */  
    double reModule::solveIncr() {
	NumericMatrix mm = d_L.solve(CHOLMOD_Pt, d_L.solve(CHOLMOD_Lt, d_cu));
	copy(mm.begin(), mm.end(), d_incr.begin());
	return d_CcNumer;
    }
Пример #15
0
RcppExport SEXP R_glmApply2( SEXP expression_, SEXP data_, SEXP pBigMat_, SEXP env_, SEXP terms_, SEXP nthreads_, SEXP useMean_, SEXP useIdentityLink_, SEXP univariate_, SEXP multivariate_, SEXP quiet_, SEXP cincl_){

BEGIN_RCPP
		
	CharacterVector expression( expression_ );
	Environment env( env_ );
	std::vector<int> terms = as<std::vector<int> >( terms_ ); 
	int nthreads = as<int>( nthreads_ );
	bool useMean = (bool) as<int>( useMean_ );
	bool useIdentityLink = as<int>( useIdentityLink_ );
	bool univariate = as<int>( univariate_ );
	bool multivariate = as<int>( multivariate_ );
	bool quiet = as<int>( quiet_ );	
	std::vector<int> cincl = as<std::vector<int> >( cincl_ ); 
		
	regressionType regressType = useIdentityLink ? LINEAR : LOGISTIC;

	featureBatcher fbatch( data_, pBigMat_, 10000, cincl);

	// Get original number of threads
	int n_threads_original = omp_get_max_threads();

	// Set threads to 1
	omp_set_num_threads( nthreads );
	// Intel paralellism
	#ifdef INTEL
	mkl_set_num_threads( 1 );
	#endif
	// disable nested OpenMP parallelism
	omp_set_nested(0);

	// Process exression, X_loop and env
	Rexpress expr( as<string>( expression ), fbatch.get_N_indivs(), env );			

	RcppGSL::matrix<double> Y = expr.get_response_m(); 

	int n_indivs = Y->size1;
	int n_pheno = Y->size2;

	if( n_pheno > 1 && regressType == LOGISTIC ){
		Y.free();
	
		throw invalid_argument("Cannot do multivariate logistic regression\n");
	}

	RcppGSL::matrix<double> Xn = expr.get_model_matrix_null();

	if( n_pheno == 1 ) multivariate = false;

	// Check that sizes of and X_loop match
	if( n_indivs != fbatch.get_N_indivs() ){
		Y.free();
		Xn.free();
	
		throw invalid_argument("Dimensions of response and design matrix do not match\n");
	}
	
	NumericMatrix pValues;
	NumericMatrix pValues_multivariate;

	// Define p-values matrix to be returned, only of the corresponding test is performed
	// Initialize values to NAN
	if( univariate ){
		pValues = NumericMatrix( fbatch.get_N_features(), n_pheno );
		std::fill(pValues.begin(), pValues.end(), NAN);
	}

	if( multivariate ){
		pValues_multivariate = NumericMatrix( fbatch.get_N_features(), 2 );
		std::fill(pValues_multivariate.begin(), pValues_multivariate.end(), NAN);
	}


	// X_clean = model.matrix.default( y ~ sex:One + age )
	// Replace marker with 1's so that design matrix for marker j can be created by multiplcation
	RcppGSL::matrix<double> X_clean = expr.get_model_matrix_clean(); 

	// If trying to access index beyond range
	// Exit gracefully
	if( terms[which_max(terms)] >= X_clean->size2 ){
		Y.free();
		Xn.free();
		X_clean.free();
	
		throw range_error("Element in \"terms\" is out of range");		
	}

	// get indeces of columns that depend on SNP
	vector<int> loopIndex = expr.get_loop_terms();

	// Indeces that don't depend on SNP, i,e. are complementary
	vector<int> loopCompl;

	for(int i=0; i<X_clean->size2; i++){
		if( ! is_in_vector(loopIndex, i) ){
			loopCompl.push_back(i);
		}
	}

	// Extract covariate terms
	gsl_matrix *X_clean_cov = gsl_matrix_alloc(X_clean->size1, loopCompl.size());
	gsl_matrix_sub_col(X_clean, loopCompl, X_clean_cov);

	LM_preproc *preproc = LM_preproc_alloc( Y, X_clean_cov);

	gsl_matrix_free(X_clean_cov);
	
	long batch_size = MAX( 1, fbatch.getBatchSize()/100.0 ); 

	long tests_completed = 0;
	Progress p(0, false);

	time_t start_time;
	time(&start_time);

	bool useThisChunk;

	for(int i_set=0; i_set<fbatch.get_N_features(); i_set+=fbatch.getBatchSize()){

		// Load data from binary matrix (or do noting if NumericMatrix is used)
		useThisChunk = fbatch.loadNextChunk();

		if( ! useThisChunk ) continue;

		#pragma omp parallel
		{
			Preproc_workspace *proc_work = Preproc_workspace_alloc( Y, loopIndex.size(), preproc);

			// Variables local to each thread
			gsl_matrix *X = gsl_matrix_alloc( X_clean->size1, X_clean->size2 );

			gsl_vector_view col_view, col_view_y;

			gsl_vector *marker_j = gsl_vector_alloc( fbatch.get_N_indivs() ); 

			GLM_MV_workspace *workmv;
			GLM_IRLS_workspace *irls_workspace;

			gsl_vector *y, *beta;

			double sig_e, log_L;
			int rank;

			if( regressType == LINEAR ){
				workmv = GLM_MV_workspace_alloc(Y->size1, X->size2, Y->size2, true, false);
			}else{
				y = gsl_vector_attach_array( Y->data, Y->size1 );
				beta = gsl_vector_calloc( X->size2 );

				irls_workspace = GLM_IRLS_workspace_alloc( X->size1, X->size2, true);
			}

			double Hotelling, Pillai;

			bool isMissing;

			#pragma omp for //schedule(static, batch_size)			
			for(int j=0; j<fbatch.getBatchSize(); j++){	

				// If element j+i_set is not include include list, than skip
				if( ! fbatch.contains_element(j+i_set) ){
					continue;
				}

				#pragma omp critical
				tests_completed++;

				fbatch.getFeautureInChunk( j, marker_j );	

				if( useMean ) 	isMissing = gsl_vector_set_missing_mean( marker_j );		
				else 			isMissing = any_is_na( marker_j->data, marker_j->size );

				// Check if X has an NA value 
				// If it does, set the p-value to 
				if( isMissing && ! useMean){
					if( univariate ){
						#pragma omp critical
						for(int k=0; k<n_pheno; k++){
							pValues(j+i_set,k) = NA_REAL;
						}
					}
					#pragma omp critical
					if( multivariate ){  
						pValues_multivariate(j+i_set,0) = NA_REAL;
						pValues_multivariate(j+i_set,1) = NA_REAL;
					}
					continue;
				}

				// Copy clean version of matrices to active variables
				gsl_matrix_memcpy( X, X_clean );

				for(int k=0; k<loopIndex.size(); k++){			
					// X_design[,loopIndex[k]] = X_design[,loopIndex[k]] * marker_j
					col_view = gsl_matrix_column( (gsl_matrix*)(X), loopIndex[k] );
					gsl_vector_mul( &col_view.vector, marker_j );
				}		

				// Evaluate regression
				if( regressType == LINEAR ){

					// Extract marker terms
					gsl_matrix *X_markers = gsl_matrix_alloc(X->size1, loopIndex.size());
					gsl_matrix_sub_col(X, loopIndex, X_markers);

					gsl_vector *pvals = GLM_regression_preproc( Y, X_markers, workmv, preproc, proc_work);

					gsl_matrix_free(X_markers);

					#pragma omp critical
	 				for(int k=0; k<n_pheno; k++){
						pValues(j+i_set,k) = gsl_vector_get(pvals, k);
					}
					gsl_vector_free( pvals );


					/*if( multivariate ){
						GLM_HotellingPillai_test( X, workmv, terms, &Hotelling, &Pillai );

						#pragma omp critical
						{
							pValues_multivariate(j+i_set,0) = Hotelling;
							pValues_multivariate(j+i_set,1) = Pillai;
						}							
					}*/

					/*if( univariate ){
						// Perform Wald test
						gsl_vector *pvals = GLM_wald_test( workmv, terms );

						#pragma omp critical
		 				for(int k=0; k<n_pheno; k++){
							pValues(j+i_set,k) = gsl_vector_get(pvals, k);
						}
						gsl_vector_free( pvals );
					}*/

				}else{						

					// LOGISTIC
					GLM_unpenalized_regression_IRLS(y, X, beta, &sig_e, &log_L, &rank, regressType, false, irls_workspace);

					#pragma omp critical
					pValues(j+i_set,0) = GLM_wald_test( beta, irls_workspace->Sigma, X->size1, terms, regressType, irls_workspace->design_is_singular);
				}

			} // END for
						
			Preproc_workspace_free(proc_work);
			gsl_matrix_free( X );
			gsl_vector_free( marker_j );

			if( regressType == LINEAR ){
				GLM_MV_workspace_free( workmv );
			}else{
				GLM_IRLS_workspace_free( irls_workspace );

				gsl_vector_free(y);
				gsl_vector_free(beta);
			}

		} // End parallel

		if( ! quiet )  Rcpp::Rcout << print_progress( tests_completed, fbatch.get_N_features_included(), 25, start_time);

		if( Progress::check_abort() ){
			Y.free();
			Xn.free();
			X_clean.free();

			return wrap(0);
		}
	} // End set loop

	// Restore original number of threads
	omp_set_num_threads( n_threads_original );

	if( ! quiet ) Rcpp::Rcout << endl;

	Y.free();
	Xn.free();
	X_clean.free();
	LM_preproc_free( preproc );

	Rcpp::List res = Rcpp::List::create(Rcpp::Named("pValues") 	= pValues,
										Rcpp::Named("pValues_mv") = pValues_multivariate);

	return( res );

END_RCPP
}
Пример #16
0
void LinearGaussian::rinit(NumericMatrix & xparticles){
  RNGScope scope;
  int nparticles = xparticles.nrow();
  std::fill(xparticles.begin(), xparticles.end(), 0);
  xparticles(_,0) = rnorm(nparticles, 0, sigma / sqrt(1 - rho*rho));
}
Пример #17
0
// [[Rcpp::export]]
List auxGAPbbDpMulthreadKPs(IntegerMatrix cost, NumericMatrix profitOrLoss, IntegerVector budget,
                int maxCore = 7, double tlimit = 60, bool greedyBranching = true,
                String optim = "max")
{
  int Ntask = cost.ncol(), Nagent = cost.nrow();
  vec<signed char> Bcontainer(cost.size() + Ntask, -1);
  vec<WV<double, int> > costprofitInfo(cost.size());
  double C = -std::numeric_limits<double>::max();
  if(optim == "max")
  {
    for(int i = 0, iend = cost.size(); i < iend; ++i)
    {
      costprofitInfo[i].weight = cost[i];
      costprofitInfo[i].value = profitOrLoss[i];
    }
  }
  else
  {
    C = *std::max_element(profitOrLoss.begin(), profitOrLoss.end()) + 1;
    for(int i = 0, iend = cost.size(); i < iend; ++i)
    {
      costprofitInfo[i].weight = cost[i];
      costprofitInfo[i].value = C - profitOrLoss[i];
    }
  }
  vec<WV<double, int>*> wvptr(Ntask);
  for(int j = 0; j < Ntask; ++j)
  {
    wvptr[j] = &costprofitInfo[0] + j * INT(Nagent);
  }
  WV<double, int> **info = &wvptr[0];
  vec<int> residualBudget(budget.begin(), budget.end());
  vec<signed char> solution;


  double solutionRevenue = 0;
  std::time_t timer; time(&timer);
  int Nnode = 0, Nkp = 0;
  if(greedyBranching)
  {
    solutionRevenue = gapBabDp<double, int, true> (
      solution, Bcontainer, Nagent, Ntask, info, &residualBudget[0], maxCore,
      timer, tlimit, Nnode, Nkp);
  }
  else
  {
    solutionRevenue = gapBabDp<double, int, false> (
      solution, Bcontainer, Nagent, Ntask, info, &residualBudget[0], maxCore,
      timer, tlimit, Nnode, Nkp);
  }


  if(solutionRevenue == -std::numeric_limits<double>::max()) return List::create();
  if(C != -std::numeric_limits<double>::max())
  {
    solutionRevenue = C * Ntask - solutionRevenue;
  }


  IntegerVector agentCost(Nagent);
  IntegerVector assignment(Ntask);
  for(int i = 0; i < Nagent; ++i)
  {
    agentCost[i] = 0;
    for(int j = 0; j < Ntask; ++j)
    {
      if(solution[j * (Nagent + 1) + i] <= 0) continue;
      agentCost[i] += cost[j * Nagent + i];
      assignment[j] = i + 1;
    }
  }
  return List::create(Named("totalProfitOrLoss") = solutionRevenue,
                      Named("agentCost") = agentCost,
                      Named("assignment") = assignment,
                      Named("nodes") = Nnode,
                      Named("bkpSolved") = Nkp);
}