// [[Rcpp::export]] NumericVector computeAllLogLkhd( NumericVector observedCases, NumericVector expectedCases, List nearestNeighborsList, int nZones, String logLkhdType) { NumericVector allLogLkhd(nZones); int nAreas = expectedCases.size(), C = sum(observedCases), N = sum(expectedCases), index = 0; int nNeighbors = 0; double cz = 0.0, nz = 0.0; for (int i = 0; i < nAreas; ++i) { cz = 0; nz = 0; Rcpp::NumericVector nearestNeighbors = nearestNeighborsList[i]; nNeighbors = nearestNeighbors.size(); // For each area's nearest neighbors for(int j = 0; j < nNeighbors; ++j) { // Watch off by 1 vector indexing in C as opposed to R cz += observedCases[nearestNeighbors[j]-1]; nz += expectedCases[nearestNeighbors[j]-1]; if (logLkhdType=="poisson") { allLogLkhd[index] = poissonLogLkhd(cz, nz, N, C); } else if (logLkhdType == "binomial" ) { allLogLkhd[index] = binomialLogLkhd(cz, nz, N, C); } index++; } } return allLogLkhd; }
// [[Rcpp::export]] Rcpp::NumericVector LearnJ(Rcpp::NumericVector x, Rcpp::NumericVector y, Rcpp::NumericVector extent, Rcpp::NumericVector image_labels){ assert(x.size() == y.size()); assert(y.size() == image_labels.size()); const int size_x = extent[1] - extent[0] + 1; const int size_y = extent[3] - extent[2] + 1; int x0 = extent[0]; int y0 = extent[2]; std::cout << "Size of grid: "<< size_x << " " << size_y << std::endl; // Construct the grid of image_labels from the 1D data std::vector<std::vector<int> > grid(size_y, std::vector<int>(size_x,0)); for (int i = 0 ; i < x.size() ; i++){ int posy = y[i] - y0; int posx = x[i] - x0; //std::cout << "i: " << i << " x, y: "; //std::cout << posy << ", "; //std::cout << posx << std::endl; grid[posy][posx] = image_labels[i]; } return 0; }
void eigs_sym_shift_c( mat_op op, int n, int k, double sigma, const spectra_opts *opts, void *data, int *nconv, int *niter, int *nops, double *evals, double *evecs, int *info ) { BEGIN_RCPP CRealShift cmat_op(op, n, data); Rcpp::List res; try { res = run_eigs_shift_sym((RealShift*) &cmat_op, n, k, opts->ncv, opts->rule, sigma, opts->maxitr, opts->tol, opts->retvec != 0); *info = 0; } catch(...) { *info = 1; // indicates error } *nconv = Rcpp::as<int>(res["nconv"]); *niter = Rcpp::as<int>(res["niter"]); *nops = Rcpp::as<int>(res["nops"]); Rcpp::NumericVector val = res["values"]; std::copy(val.begin(), val.end(), evals); if(opts->retvec != 0) { Rcpp::NumericMatrix vec = res["vectors"]; std::copy(vec.begin(), vec.end(), evecs); } VOID_END_RCPP }
SEXP rawSymmetricMatrixToDist(SEXP object) { BEGIN_RCPP Rcpp::S4 rawSymmetric = object; Rcpp::NumericVector levels = Rcpp::as<Rcpp::NumericVector>(rawSymmetric.slot("levels")); Rcpp::CharacterVector markers = Rcpp::as<Rcpp::CharacterVector>(rawSymmetric.slot("markers")); Rcpp::RawVector data = Rcpp::as<Rcpp::RawVector>(rawSymmetric.slot("data")); R_xlen_t size = markers.size(), levelsSize = levels.size(); Rcpp::NumericVector result(size*(size - 1)/2, 0); int counter = 0; for(R_xlen_t row = 0; row < size; row++) { for(R_xlen_t column = row+1; column < size; column++) { int byte = data(column * (column + (R_xlen_t)1)/(R_xlen_t)2 + row); if(byte == 255) result(counter) = std::numeric_limits<double>::quiet_NaN(); else result(counter) = levels(byte); counter++; } } result.attr("Size") = (int)size; result.attr("Labels") = markers; result.attr("Diag") = false; result.attr("Upper") = false; result.attr("class") = "dist"; return result; END_RCPP }
//[[Rcpp::export]] extern "C" SEXP notbayesEstimation(SEXP Xs, SEXP Ys, SEXP Zs, SEXP Vs) { Rcpp::NumericVector Xr(Xs); Rcpp::NumericMatrix Yr(Ys); Rcpp::NumericMatrix Zr(Zs); Rcpp::NumericMatrix Vr(Vs); int n = Yr.nrow(), k = Yr.ncol(); int l = Vr.nrow(), m = Vr.ncol(); arma::mat x(Xr.begin(),n,k,false); //fit2$sigma arma::mat y(Yr.begin(),n,k,false); //fit2b$stdev.unscaled arma::mat z(Zr.begin(),1,1,false); //min.variance.factor arma::mat v(Vr.begin(),l,m,false); //fit2$df.residual arma::mat sda(n,1); arma::vec dof(n); arma::vec sd(n); sd = sqrt( (y%x)%(y%x) + as_scalar(z)); sda = sd/(x%y); dof = v; Rcpp::NumericVector Sd = Rcpp::wrap(sd); Rcpp::NumericVector Sda = Rcpp::wrap(sda); Rcpp::NumericVector DOF = Rcpp::wrap(dof); Sd.names() = Rcpp::List(Yr.attr("dimnames"))[0]; Sda.names() = Rcpp::List(Yr.attr("dimnames"))[0]; return Rcpp::List::create( Rcpp::Named("SD") = Sd, Rcpp::Named("DOF") = DOF, Rcpp::Named("sd.alpha") = Sda); }
// [[Rcpp::export]] Rcpp::NumericVector randomSample(Rcpp::NumericVector a, int n) { // clone a into b to leave a alone Rcpp::NumericVector b(n); __gnu_cxx::random_sample(a.begin(), a.end(), b.begin(), b.end(), randWrapper); return b; }
void MapTematikQuantile::createMapTematikQuantile() { Rcpp::NumericVector quantile ; QString command; try { command = QString("q <- quantile(dframe[[\"%1\"]], prob = seq(0, 1, length = (%2 + 1)), type = 7);").arg(var).arg(typeMap); rconn.parseEvalQ(command.toStdString()); quantile = rconn["q"]; } catch (...) { } QList<int> temp[quantile.size()-1]; for(int i=0; i<numvar.size(); i++){ if(numvar[i] <= quantile[1]){ temp[0].append(table->verticalHeaderItem(i)->text().toInt()); }else{ for(int j=2; j<quantile.size(); j++){ if(numvar[i] > quantile[j-1] && numvar[i] <= quantile[j]){ temp[j-1].append(table->verticalHeaderItem(i)->text().toInt()); } } } } QList<QList<int> > temp2; for(int i=0; i<quantile.size()-1; i++){ temp2.append(temp[i]); } MapTematikConfig* configWidget = new MapTematikConfig(mviewResult,vv,rconn,temp2,var,typeMap.toInt()); setupResultViewVariableTypeChooser("Quantile Map",var, temp2,quantile,configWidget); }
//' rcpp_neutral_hotspots_ntests //' //' Performs repeated neutral tests to yield average distributions of both //' hotspot values and spatial autocorrelation statistics. //' //' @param nbs An \code{spdep} \code{nb} object listing all neighbours of each //' point. //' @param wts Weighting factors for each neighbour; must have same length as //' nbs. //' @param nbsi List of matrices as returned from \code{get_nbsi}. each element //' of which contains the i-th nearest neighbour to each point. //' @param alpha Strength of spatial autocorrelation //' @param sd0 Standard deviation of truncated normal distribution used to model //' environmental variation (with mean of 1) //' @param nt Number of successive layers of temporal and spatial autocorrelation //' used to generate final modelled values //' @param ntests Number of tests used to obtain average values //' @param ac_type Character string specifying type of aucorrelation //' (\code{moran}, \code{geary}, or code{getis-ord}). //' //' @return A matrix of dimension (size, 2), with first column containing //' sorted and re-scaled hotspot values, and second column containing sorted and //' re-scaled spatial autocorrelation statistics. //' // [[Rcpp::export]] Rcpp::NumericMatrix rcpp_neutral_hotspots_ntests (Rcpp::List nbs, Rcpp::List wts, Rcpp::List nbsi, double alpha, double sd0, int niters, std::string ac_type, bool log_scale, int ntests) { const int size = nbs.size (); Rcpp::NumericMatrix hs1; Rcpp::NumericVector z (size), z1 (size), ac (size), ac1 (size); std::fill (ac.begin (), ac.end (), 0.0); std::fill (z.begin (), z.end (), 0.0); for (int n=0; n<ntests; n++) { hs1 = rcpp_neutral_hotspots (nbs, wts, nbsi, alpha, sd0, log_scale, niters, ac_type); z += hs1 (Rcpp::_, 0); ac += hs1 (Rcpp::_, 1); } Rcpp::NumericMatrix result (size, 2); result (Rcpp::_, 0) = z / (double) ntests; result (Rcpp::_, 1) = ac / (double) ntests; Rcpp::colnames (result) = Rcpp::CharacterVector::create ("z", "ac"); return result; }
lmResp::lmResp(Rcpp::NumericVector y) throw (std::runtime_error) : d_y(y), d_weights(y.size(), 1.0), d_offset(y.size()), d_mu(y.size()), d_sqrtrwt(y.size()), d_sqrtXwt(y.size(), 1.0) { init(); }
// [[Rcpp::export]] Rcpp::NumericMatrix update_sigma2_batch(Rcpp::S4 xmod){ Rcpp::RNGScope scope; // get model Rcpp::S4 model(xmod); // get parameter estimates Rcpp::NumericMatrix theta = model.slot("theta"); Rcpp::IntegerVector z = model.slot("z"); double nu_0 = model.slot("nu.0"); double sigma2_0 = model.slot("sigma2.0"); // get data and size attributes Rcpp::NumericVector x = model.slot("data"); int n = x.size(); int K = theta.ncol(); int B = theta.nrow(); //IntegerVector nn = model.slot("zfreq"); // get batch info Rcpp::NumericMatrix tabz = tableBatchZ(xmod); Rcpp::IntegerVector batch = model.slot("batch"); Rcpp::IntegerVector ub = uniqueBatch(batch); Rcpp::NumericMatrix ss(B, K); for (int i = 0; i < n; ++i) { for (int b = 0; b < B; ++b) { if (batch[i] != ub[b]) { continue; } for (int k = 0; k < K; ++k){ if (z[i] == k+1){ ss(b, k) += pow(x[i] - theta(b, k), 2); } } } } //NumericMatrix sigma2_nh(B, K); double shape; double rate; double sigma2_nh; double nu_n; Rcpp::NumericMatrix sigma2_tilde(B, K); Rcpp::NumericMatrix sigma2_(B, K); for (int b = 0; b < B; ++b) { for (int k = 0; k < K; ++k) { nu_n = nu_0 + tabz(b, k); sigma2_nh = 1.0/nu_n*(nu_0*sigma2_0 + ss(b, k)); shape = 0.5 * nu_n; rate = shape * sigma2_nh; sigma2_tilde(b, k) = Rcpp::as<double>(rgamma(1, shape, 1.0/rate)); sigma2_(b, k) = 1.0 / sigma2_tilde(b, k); } } return sigma2_; }
//' Update eigen values, vectors, and inverse matrices for irms //' //' @param tpm_prods array of transition probability matrix products //' @param tpms array of transition probability matrices //' @param pop_mat population level bookkeeping matrix //' @param obstimes vector of observation times //' //' @return Updated eigenvalues, eigenvectors, and inverse matrices // [[Rcpp::export]] void tpmProdSeqs(Rcpp::NumericVector& tpm_prods, Rcpp::NumericVector& tpms, const Rcpp::IntegerVector obs_time_inds) { // Get indices int n_obs = obs_time_inds.size(); Rcpp::IntegerVector tpmDims = tpms.attr("dim"); // Ensure obs_time_inds starts at 0 Rcpp::IntegerVector obs_inds = obs_time_inds - 1; // Set array pointers arma::cube prod_arr(tpm_prods.begin(), tpmDims[0], tpmDims[1], tpmDims[2], false); arma::cube tpm_arr(tpms.begin(), tpmDims[0], tpmDims[1], tpmDims[2], false); // Generate tpm products and store them in the appropriate spots in the tpm product array for(int j = 0; j < (n_obs - 1); ++j) { prod_arr.slice(obs_inds[j+1] - 1) = tpm_arr.slice(obs_inds[j+1] - 1); for(int k = (obs_inds[j+1] - 2); k > (obs_inds[j] - 1); --k) { prod_arr.slice(k) = tpm_arr.slice(k) * prod_arr.slice(k + 1); } } }
// Bellman recursion using row rearrangement //[[Rcpp::export]] Rcpp::List Bellman(const arma::mat& grid, Rcpp::NumericVector reward_, const arma::cube& scrap, Rcpp::NumericVector control_, const arma::cube& disturb, const arma::vec& weight) { // Passing R objects to C++ const std::size_t n_grid = grid.n_rows; const std::size_t n_dim = grid.n_cols; const arma::ivec r_dims = reward_.attr("dim"); const std::size_t n_pos = r_dims(3); const std::size_t n_action = r_dims(2); const std::size_t n_dec = r_dims(4) + 1; const arma::cube reward(reward_.begin(), n_grid, n_dim * n_action * n_pos, n_dec - 1, false); const arma::ivec c_dims = control_.attr("dim"); arma::cube control2; arma::imat control; bool full_control; if (c_dims.n_elem == 3) { full_control = false; arma::cube temp_control2(control_.begin(), n_pos, n_action, n_pos, false); control2 = temp_control2; } else { full_control = true; arma::mat temp_control(control_.begin(), n_pos, n_action, false); control = arma::conv_to<arma::imat>::from(temp_control); } const std::size_t n_disturb = disturb.n_slices; // Bellman recursion arma::cube value(n_grid, n_dim * n_pos, n_dec); arma::cube cont(n_grid, n_dim * n_pos, n_dec - 1, arma::fill::zeros); arma::mat d_value(n_grid, n_dim); Rcpp::Rcout << "At dec: " << n_dec - 1 << "..."; for (std::size_t pp = 0; pp < n_pos; pp++) { value.slice(n_dec - 1).cols(n_dim * pp, n_dim * (pp + 1) - 1) = scrap.slice(pp); } for (int tt = (n_dec - 2); tt >= 0; tt--) { Rcpp::Rcout << tt; // Approximating the continuation value for (std::size_t pp = 0; pp < n_pos; pp++) { cont.slice(tt).cols(n_dim * pp, n_dim * (pp + 1) - 1) = Expected(grid, value.slice(tt + 1).cols(pp * n_dim, n_dim * (pp + 1) - 1), disturb, weight); } Rcpp::Rcout << ".."; // Optimise value function if (full_control) { BellmanOptimal(grid, control, value, reward, cont, tt); } else { BellmanOptimal2(grid, control2, value, reward, cont, tt); } Rcpp::Rcout << "."; } return Rcpp::List::create(Rcpp::Named("value") = value, Rcpp::Named("expected") = cont); }
// kernel Dist function on a Grid // [[Rcpp::export]] Rcpp::NumericVector KdeDist(const Rcpp::NumericMatrix & X , const Rcpp::NumericMatrix & Grid , const double h , const Rcpp::NumericVector & weight , const bool printProgress ) { const unsigned sampleNum = X.nrow(); const unsigned dimension = Grid.ncol(); const unsigned gridNum = Grid.nrow(); // first = sum K_h(X_i, X_j), second = K_h(x, x), third = sum K_h(x, X_i) std::vector< double > firstValue; const double second = 1.0; std::vector< double > thirdValue; double firstmean; Rcpp::NumericVector kdeDistValue(gridNum); int counter = 0, percentageFloor = 0; int totalCount = sampleNum + gridNum; if (printProgress) { printProgressFrame(Rprintf); } firstValue = computeKernel< std::vector< double > >( X, X, h, weight, printProgress, Rprintf, counter, totalCount, percentageFloor); if (dimension <= 1) { thirdValue = computeKernel< std::vector< double > >( X, Grid, h, weight, printProgress, Rprintf, counter, totalCount, percentageFloor); } else { thirdValue = computeGaussOuter< std::vector< double > >( X, Grid, h, weight, printProgress, Rprintf, counter, totalCount, percentageFloor); } if (weight.size() == 1) { firstmean = std::accumulate(firstValue.begin(), firstValue.end(), 0.0) / sampleNum; } else { firstmean = std::inner_product( firstValue.begin(), firstValue.end(), weight.begin(), 0.0) / std::accumulate(weight.begin(), weight.end(), 0.0); } for (unsigned gridIdx = 0; gridIdx < gridNum; ++gridIdx) { kdeDistValue[gridIdx] = std::sqrt(firstmean + second - 2 * thirdValue[gridIdx]); } if (printProgress) { Rprintf("\n"); } return kdeDistValue; }
// redis "set a vector" -- without R serialization, without attributes, ... // this is somewhat experimental std::string setVector(std::string key, Rcpp::NumericVector x) { redisReply *reply = static_cast<redisReply*>(redisCommand(prc_, "SET %s %b", key.c_str(), x.begin(), x.size()*szdb)); std::string res(reply->str); freeReplyObject(reply); return(res); }
double lmerResp::updateMu(const Rcpp::NumericVector& gamma) { #ifdef USE_RCPP_SUGAR d_mu = d_offset + gamma; #else std::transform(gamma.begin(), gamma.end(), d_offset.begin(), d_mu.begin(), std::plus<double>()); #endif return updateWrss(); }
// [[Rcpp::export]] arma::mat rmvnorm_arma(int n, const arma::vec& mu, const arma::mat& Sigma) { unsigned int p = Sigma.n_cols; Rcpp::NumericVector draw = Rcpp::rnorm(n*p); arma::mat Z = arma::mat(draw.begin(), n, p, false, true); arma::mat Y = arma::repmat(mu, 1, n).t()+Z * arma::chol(Sigma); return Y; }
glmResp::glmResp(Rcpp::List fam, Rcpp::NumericVector y, Rcpp::NumericVector weights, Rcpp::NumericVector offset, Rcpp::NumericVector n) throw (std::runtime_error) : lmResp(y, weights, offset), d_fam(fam), d_n(n), d_eta(y.size()) { if (n.size() != y.size()) throw std::runtime_error("lengths of y and n must agree"); }
// Calculate lk, k=1, ..., M with m0=0; // where h=(h0, h1, ..., hM) with h0=0 and d=(d0, d1, ..., dM) with d0=0, dM=R_PosInf void Getlk(Rcpp::NumericVector& lk, const Rcpp::IntegerVector& Mt, int M1, Rcpp::NumericVector d, const Rcpp::NumericVector& t, const Rcpp::NumericVector& Xbeta){ int n = Mt.size(); std::fill(lk.begin(), lk.end(), 0); for (int k=1; k<M1; ++k){ for (int i=0; i<n; ++i){ if(Mt[i]>=k) lk[k] += (std::min(d[k],t[i])-d[k-1])*std::exp(Xbeta[i]); } } }
// [[Rcpp::export]] NumericVector freqsempiricalC(NumericVector x_) { Rcpp::NumericVector x = Rcpp::clone(x_); int n=x.length(); double sum=0; for (int i=0;i<n;i++) { sum+=x[i]; } x=x/sum; return x; }
// gradient-descent ------------------------------------------------------------ //' @title Regular gradient descent for latent factor model with covariates //' @param X The ratings matrix. Unobserved entries must be marked NA. Users //' must be along rows, and tracks must be along columns. //' @param Z The covariates associated with each pair. This must be shaped as an //' array with users along rows, tracks along columns, and features along //' slices. //' @param k_factors The number of latent factors for the problem. //' @param lambdas The regularization parameters for P, Q, and beta, respectively. //' @param n_iter The number of gradient descent iterations to run. //' @param batch_samples The proportion of the observed entries to sample in the //' gradient for each factor in the gradient descent. //' @param batch_factors The proportion of latent factors to update at each //' iteration. //' @param gamma The step-size in the gradient descent. //' @param verbose Print the objective at each iteration of the descent? //' @return A list with the following elements, \cr //' $P The learned user latent factors. \cr //' $Q The learned track latent factors. \cr //' $beta The learned regression coefficients. //' @export // [[Rcpp::export]] Rcpp::List svd_cov(Rcpp::NumericMatrix X, Rcpp::NumericVector Z_vec, int k_factors, Rcpp::NumericVector lambdas, int n_iter, double batch_samples, double batch_factors, double gamma_pq, double gamma_beta, bool verbose) { // convert to arma Rcpp::IntegerVector Z_dim = Z_vec.attr("dim"); arma::cube Z(Z_vec.begin(), Z_dim[0], Z_dim[1], Z_dim[2], false); // initialize results int m = X.nrow(); int n = X.ncol(); int l = Z_dim[2]; arma::mat P = 1.0 / sqrt(m) * arma::randn(m, k_factors); arma::mat Q = 1.0 / sqrt(n) * arma::randn(n, k_factors); arma::vec beta = 1.0 / sqrt(l) * arma::randn(l); arma::vec objs = arma::zeros(n_iter); // perform gradient descent steps for(int cur_iter = 0; cur_iter < n_iter; cur_iter++) { // update user factors arma::uvec cur_factors = get_batch_ix(m, batch_factors); for(int i = 0; i < cur_factors.n_elem; i++) { int cur_ix = cur_factors(i); P.row(cur_ix) = update_factor(X.row(cur_ix), Z(arma::span(cur_ix), arma::span(), arma::span()), arma::conv_to<arma::vec>::from(P.row(cur_ix)), Q, beta, batch_samples, lambdas[0], gamma_pq).t(); } // update track factors cur_factors = get_batch_ix(n, batch_factors); for(int j = 0; j < cur_factors.n_elem; j++) { int cur_ix = cur_factors(j); Q.row(cur_ix) = update_factor(X.column(cur_ix), Z(arma::span(), arma::span(cur_ix), arma::span()), arma::conv_to<arma::vec>::from(Q.row(cur_ix)), P, beta, batch_samples, lambdas[1], gamma_pq).t(); } // update regression coefficients beta = update_beta(Rcpp::as<arma::mat>(X), Z, P, Q, beta, lambdas[2], gamma_beta); objs[cur_iter] = objective_fun(Rcpp::as<arma::mat>(X), Z, P, Q, beta, lambdas); if(verbose) { printf("iteration %d \n", cur_iter); printf("Objective: %g \n", objs[cur_iter]); } } return Rcpp::List::create(Rcpp::Named("P") = P, Rcpp::Named("Q") = Q, Rcpp::Named("beta") = beta, Rcpp::Named("objs") = objs); }
// redis "prepend to list" -- without R serialization // as above: pure vector, no attributes, ... std::string listLPush(std::string key, Rcpp::NumericVector x) { // uses binary protocol, see hiredis doc at github redisReply *reply = static_cast<redisReply*>(redisCommand(prc_, "LPUSH %s %b", key.c_str(), x.begin(), x.size()*szdb)); //std::string res(reply->str); std::string res = ""; freeReplyObject(reply); return(res); }
// [[Rcpp::export(.cpp_convolve)]] Rcpp::NumericVector cpp_convolve(Rcpp::NumericVector xa, Rcpp::NumericVector xb) { int n_xa = xa.size(), n_xb = xb.size(); Rcpp::NumericVector xab(n_xa + n_xb - 1); typedef Rcpp::NumericVector::iterator vec_iterator; vec_iterator ia = xa.begin(), ib = xb.begin(); vec_iterator iab = xab.begin(); for (int i = 0; i < n_xa; i++) for (int j = 0; j < n_xb; j++) iab[i + j] += ia[i] * ib[j]; return xab; }
glmResp::glmResp(Rcpp::List fam, Rcpp::NumericVector y, Rcpp::NumericVector weights, Rcpp::NumericVector offset, Rcpp::NumericVector n, Rcpp::NumericVector eta) throw (std::runtime_error) : lmResp(y, weights, offset), d_fam(fam), d_n(n), d_eta(eta) { int nn = y.size(); if (n.size() != nn || eta.size() != nn ) throw std::runtime_error( "lengths of y, n and eta must agree"); }
//Returns the value of any((object@data >= length(object@levels)) & object@data != as.raw(255)) SEXP checkRawSymmetricMatrix(SEXP rawSymmetric_) { BEGIN_RCPP Rcpp::S4 rawSymmetric = rawSymmetric_; Rcpp::NumericVector levels = Rcpp::as<Rcpp::NumericVector>(rawSymmetric.slot("levels")); Rcpp::RawVector data = Rcpp::as<Rcpp::RawVector>(rawSymmetric.slot("data")); R_xlen_t size = data.size(), levelsSize = levels.size(); for(R_xlen_t i = 0; i < size; i++) { if(data[i] >= levelsSize && data[i] != 0xff) return Rcpp::wrap(true); } return Rcpp::wrap(false); END_RCPP }
// [[Rcpp::export]] NumericVector freqsshrinkC(NumericVector y_,double lambda) { Rcpp::NumericVector y = Rcpp::clone(y_); int m=y.length(); double n=0; for (int i=0;i<m;i++) { n+=y(i); } y=y/n; NumericVector add(m,lambda/m); y=y*(1-lambda); y+=add; return y; }
// Returns the linear predictor from the full step Rcpp::NumericVector densePred::gamma(const Rcpp::NumericVector& xwts, const Rcpp::NumericVector& wtres) throw (std::runtime_error) { int n = d_X.nrow(); if (xwts.size() != n || wtres.size() != n) throw std::runtime_error("length(xwts) or length(wtres) != nrow(X)"); arma::vec a_xwts = arma::vec(xwts.begin(), n, false, true), a_wtres = arma::vec(wtres.begin(), n, false, true); arma::vec tmp = solve(diagmat(a_xwts) * a_X, a_wtres); std::copy(tmp.begin(), tmp.end(), a_delta.begin()); d_coef = d_coef0 + d_delta; return NumericVector(wrap(a_X * a_coef)); }
// [[Rcpp::export]] Rcpp::List worstcase_l1(Rcpp::NumericVector z, Rcpp::NumericVector q, double t){ // resulting probability craam::numvec p; // resulting objective value double objective; craam::numvec vz(z.begin(), z.end()), vq(q.begin(), q.end()); std::tie(p,objective) = craam::worstcase_l1(vz,vq,t); Rcpp::List result; result["p"] = Rcpp::NumericVector(p.cbegin(), p.cend()); result["obj"] = objective; return result; }
//' Evaluate an h-function corresponding to a copula density estimate //' //' @param uev mx2 matrix of evaluation points //' @param cond_var either 1 or 2; the variable to condition on. //' @param vals matrix of density estimate evaluated on a kxk grid. //' @param grid the grid points (1-dim) on which vals has been computed. //' //' @return H-function estimate evaluated at uev. //' //' @noRd // [[Rcpp::export]] Rcpp::NumericVector eval_hfunc_2d(const Rcpp::NumericMatrix& uev, const int& cond_var, const Rcpp::NumericMatrix& vals, const Rcpp::NumericVector& grid) { int N = uev.nrow(); int m = grid.size(); NumericVector tmpvals(m), out(N), tmpa(4), tmpb(4); NumericMatrix tmpgrid(m, 2); double upr = 0.0; double tmpint, int1; tmpint = 0.0; for (int n = 0; n < N; ++n) { if (cond_var == 1) { upr = uev(n, 1); tmpgrid(_, 0) = rep(uev(n, 0), m); tmpgrid(_, 1) = grid; } else if (cond_var == 2) { upr = uev(n, 0); tmpgrid(_, 0) = grid; tmpgrid(_, 1) = rep(uev(n, 1), m); } tmpvals = interp_2d(tmpgrid, vals, grid, tmpa, tmpb); tmpint = int_on_grid(upr, tmpvals, grid); int1 = int_on_grid(1.0, tmpvals, grid); out[n] = tmpint/int1; out[n] = fmax(out[n], 1e-10); out[n] = fmin(out[n], 1-1e-10); } return out; }
//' Construct a copula using uniform sampling from the unit simplex //' //' Given two families of parallel hyperplanes intersecting the canonical simplex, this function uniformly samples from the canonical simplex and construct an approximation of the bivariate probability distribution, called copula. //' //' @param h1 A \eqn{d}-dimensional vector that describes the direction of the first family of parallel hyperplanes. //' @param h2 A \eqn{d}-dimensional vector that describes the direction of the second family of parallel hyperplanes. //' @param numSlices The number of the slices for the copula. Default value is 100. //' @param N The number of points to sample. Default value is \eqn{4\cdot 10^6}. //' //' @references \cite{L. Cales, A. Chalkis, I.Z. Emiris, V. Fisikopoulos, //' \dQuote{Practical volume computation of structured convex bodies, and an application to modeling portfolio dependencies and financial crises,} \emph{Proc. of Symposium on Computational Geometry, Budapest, Hungary,} 2018.} //' //' @return A \eqn{numSlices\times numSlices} numerical matrix that corresponds to a copula. //' @examples //' # compute a copula for two random families of parallel hyperplanes //' h1 = runif(n = 10, min = 1, max = 1000) //' h1 = h1 / 1000 //' h2=runif(n = 10, min = 1, max = 1000) //' h2 = h2 / 1000 //' cop = copula1(h1=h1, h2=h2, numSlices = 10, N = 100000) //' @export // [[Rcpp::export]] Rcpp::NumericMatrix copula1 (Rcpp::NumericVector h1, Rcpp::NumericVector h2, Rcpp::Nullable<unsigned int> numSlices, Rcpp::Nullable<unsigned int> N){ typedef double NT; typedef Cartesian<NT> Kernel; typedef typename Kernel::Point Point; typedef boost::mt19937 RNGType; unsigned int num_slices = 100, numpoints = 4000000; if (numSlices.isNotNull()) { num_slices = Rcpp::as<unsigned int>(numSlices); } if (N.isNotNull()) { numpoints = Rcpp::as<unsigned int>(N); } Rcpp::NumericMatrix copula(num_slices, num_slices); std::vector<std::vector<NT> > StdCopula; unsigned int dim = h1.size(), i, j; std::vector<NT> hyp1 = Rcpp::as<std::vector<NT> >(h1); std::vector<NT> hyp2 = Rcpp::as<std::vector<NT> >(h2); StdCopula = twoParHypFam<Point, RNGType >(dim, numpoints, num_slices, hyp1, hyp2); for(i=0; i<num_slices; i++) { for(j=0; j<num_slices; j++){ copula(i,j) = StdCopula[i][j]; } } return copula; }
glmResp::glmResp(Rcpp::List fam, Rcpp::NumericVector y, Rcpp::NumericVector weights, Rcpp::NumericVector offset) throw (std::runtime_error) : lmResp(y, weights, offset), d_fam(fam), d_eta(y.size()) { }