示例#1
0
文件: glmfun.cpp 项目: paasim/glmproj
/**
 * Returns the value of the penalty term in the elastic net regularization.
 */
double elnet_penalty(vec beta, // coefficients
                     double lambda, // regularization parameter
                     double alpha, // elastic net mixing parameter
                     vec penalty) // relative penalties for the variables 
{
  double value;
  uvec fin = find_finite(penalty);
  value = lambda*sum(penalty.elem(fin) % (0.5*(1-alpha)*square(beta.elem(fin))
                                            + alpha*(abs(beta.elem(fin))) ) );
  return(value);
}
//FUNCTION SPECIFIC TO MAIN FUNCTION------------------------------------------------------
//[[Rcpp::export]]
double llmnl_con(vec const& betastar, vec const& y, mat const& X, vec const& SignRes = NumericVector::create(0)){
  
  // Wayne Taylor 7/8/2016
  
  // Evaluates log-likelihood for the multinomial logit model WITH SIGN CONSTRAINTS
  // NOTE: this is exported only because it is used in the shell .R function, it will not be available to users
  
  //Reparameterize betastar to beta to allow for sign restrictions
  vec beta = betastar;
  
  //The default SignRes vector is a single element vector containing a zero
  //any() returns true if any elements of SignRes are non-zero
  if(any(SignRes)){ 
    uvec signInd = find(SignRes != 0);
    beta.elem(signInd) = SignRes.elem(signInd) % exp(beta.elem(signInd));  //% performs element-wise multiplication
  }
  
  int n = y.size();
  int j = X.n_rows/n;
  mat Xbeta = X*beta;
  
  vec xby = zeros<vec>(n);
  vec denom = zeros<vec>(n);
  
  for(int i = 0; i<n;i++){      
    for(int p=0;p<j;p++) denom[i]=denom[i]+exp(Xbeta[i*j+p]);
    xby[i] = Xbeta[i*j+y[i]-1];
  }
  
  return(sum(xby - log(denom)));
}
示例#3
0
文件: PGS.cpp 项目: YinanZheng/PGS
//-----------------------------------------------------
// Cross-validation across an array of lambda and pick up the best.
List CV_lam_grid_cpp(vec y_vect, mat x_mat, vec id_vect, mat hat_R_full, vec beta_ini, int fold, int n, vec m, int obs_n, int p, uvec start, uvec end, vec lam_vect, double eps_tozero, double eps_stop, int iter_try){
  
  int lam_length = lam_vect.n_elem;
  double lam_temp, cv_sum, flag_stop_sum, iter_n_sum, cv_min = math::inf(), lam_min = -1;
  uvec cvgrps_seq = linspace<uvec>(0, (n-1), n); 
  uvec cvgrps_subsets = shuffle(cvgrps_seq);
  uvec cvgrps_which = cvgrps_seq - floor( cvgrps_seq / fold) * fold;
  uvec index_cv_train, index_cv_test; 
  vec cv_vect(lam_length), flag_stop_vect(lam_length), iter_n_vect(lam_length);
  uvec idx_train, idx_test;
  vec y_train, y_test, id_train, m_train, beta_train;
  mat x_train, x_test;
  List indGen_res, beta_shrink_res;
  
  for(int lam_iter = 0; lam_iter < lam_length; lam_iter++)
  {
    lam_temp = lam_vect(lam_iter);
    cv_sum = 0; 
    flag_stop_sum = 0; 
    iter_n_sum = 0;
    
    for(int k = 0; k < fold; k++)
    {
      index_cv_train = cvgrps_subsets.elem(find(cvgrps_which != k));
      index_cv_test = cvgrps_subsets.elem(find(cvgrps_which == k));
      
      idx_train = unique(seqJoin_vec(start.elem(index_cv_train), end.elem(index_cv_train), m.elem(index_cv_train)));
      idx_test = unique(seqJoin_vec(start.elem(index_cv_test), end.elem(index_cv_test), m.elem(index_cv_test)));
      
      y_train = y_vect.elem(idx_train);
      x_train = x_mat.rows(idx_train);
      id_train = id_vect.elem(idx_train);
      indGen_res = indGen_cpp(id_train);
      
      y_test = y_vect.elem(idx_test);
      x_test = x_mat.rows(idx_test);
      
      beta_shrink_res = beta_shrink_normal_cpp(y_train, x_train, id_train, hat_R_full, beta_ini, as<int>(indGen_res[0]), as<vec>(indGen_res[1]), as<int>(indGen_res[2]), p, as<uvec>(indGen_res[3]), as<uvec>(indGen_res[4]), as<vec>(indGen_res[5]), as<uvec>(indGen_res[6]), lam_temp, eps_tozero, eps_stop, iter_try);
      cv_sum += sqrt(mean(pow((y_test - x_test * as<vec>(beta_shrink_res[0])),2)));                        
      flag_stop_sum += as<double>(beta_shrink_res[2]);
      iter_n_sum += as<double>(beta_shrink_res[3]);
    }
    
    // Calculate average across k-fold validation
    cv_vect(lam_iter) = cv_sum / fold;
    flag_stop_vect(lam_iter) = flag_stop_sum / fold;
    iter_n_vect(lam_iter) = iter_n_sum / fold;
    
    if(cv_sum < cv_min)
    {
      lam_min = lam_temp;
      cv_min = cv_sum;
    }
  }
  
  return List::create(Named("lam.vect") = lam_vect,
                      Named("cv.vect") = cv_vect,
                      Named("flag_stop_vect") = flag_stop_vect,
                      Named("iter_n_vect") = iter_n_vect,
                      Named("lam.min") = lam_min,
                      Named("cv.min") = cv_min
  );
}