// Arguments "opt_param" and "grad" are updated by nlopt.
// "opt_param" corresponds to the optimization parameters.
// "grad" corresponds to the gradient.
double CollaborativeFiltering::ComputeCost(
  const std::vector<double> &opt_param,std::vector<double> &grad,
  const DataUnlabeled &ratings_data,const DataUnlabeled &indicator_data,
  int num_users,int num_movies,int num_features) {
  assert(num_users >= 1);
  assert(num_movies >= 1);
  assert(num_features >= 1);
  const int kTotalNumFeatures = num_features*(num_users+num_movies);
  arma::vec nlopt_theta = arma::zeros<arma::vec>(kTotalNumFeatures,1);
  arma::mat features_cvec = arma::zeros<arma::mat>(num_movies*num_features,1);
  arma::mat params_cvec = arma::zeros<arma::mat>(num_users*num_features,1);

  // Uses current value of "theta" from nlopt.
  for(int feature_index=0; feature_index<kTotalNumFeatures; feature_index++)
  {
    nlopt_theta(feature_index) = opt_param[feature_index];
    if (feature_index < (num_users*num_features)) {
      params_cvec(feature_index) = opt_param[feature_index];
    }
    else {
      features_cvec(feature_index-(num_users*num_features)) = \
        opt_param[feature_index];
    }
  }
  set_theta(nlopt_theta);
  arma::mat features_cvec_squared = arma::square(features_cvec);
  arma::mat params_cvec_squared = arma::square(params_cvec);
  arma::mat params_mat = arma::reshape(params_cvec,num_users,num_features);
  arma::mat features_mat = \
    arma::reshape(features_cvec,num_movies,num_features);

  // Computes cost function given current value of "theta".
  const arma::mat kSubsetIndicatorMat = \
    indicator_data.training_features().submat(0,0,num_movies-1,num_users-1);
  const arma::mat kSubsetRatingsMat = \
    ratings_data.training_features().submat(0,0,num_movies-1,num_users-1);
  const arma::mat kYMat = \
    (arma::ones<arma::mat>(num_users,num_movies)-kSubsetIndicatorMat.t()) % \
    (params_mat*features_mat.t())+kSubsetRatingsMat.t();
  const arma::mat kCostFunctionMat = params_mat*features_mat.t()-kYMat;
  const arma::vec kCostFunctionVec = arma::vectorise(kCostFunctionMat);
  const double kJTheta = 0.5*arma::as_scalar(sum(square(kCostFunctionVec)));

  // Adds regularization term.
  const double kRegTerm = 0.5*lambda_*\
    (as_scalar(sum(params_cvec_squared))+\
    as_scalar(sum(features_cvec_squared)));
  const double kJThetaReg = kJTheta+kRegTerm;

  // Updates "grad" for nlopt.
  const int kReturnCode = this->ComputeGradient(ratings_data,indicator_data,\
    num_users,num_movies,num_features);
  const arma::vec kCurrentGradient = gradient();
  for(int feature_index=0; feature_index<kTotalNumFeatures; feature_index++)
  {
    grad[feature_index] = kCurrentGradient(feature_index);
  }

  return kJThetaReg;
}
Exemple #2
0
/* Conditional on other individual */
double F1(unsigned row, unsigned cause, unsigned indiv, unsigned cond_cause, const DataPairs &data, const gmat &sigma, vec u){

  // Other individual
  unsigned cond_indiv;
  if (indiv==0){
    cond_indiv=1;
  }
  else {
    cond_indiv=0;
  }

  // Alpgam of other individual
  double cond_alp = data.alphaMarg_get(row, cond_cause, cond_indiv);
  double cond_gam = data.gammaMarg_get(row, cond_cause, cond_indiv);
  double cond_alpgam = cond_alp - cond_gam;

  vec vcond_alpgam(1); vcond_alpgam(0) = cond_alpgam;

  // Joining u vector and alpgam from other individual
  vec alpgamu = join_cols(vcond_alpgam, u);

  // Attaining variance covariance matrix etc. (conditional on u and other individual)
  vmat cond_sig = sigma(cause,cond_cause);
  double cond_mean = as_scalar(cond_sig.proj*alpgamu);

  double alp = data.alphaMarg_get(row, cause, indiv);
  double gam = data.gammaMarg_get(row, cause, indiv);
  double alpgam = alp - gam;

  double F1 = data.piMarg_get(row, cause, indiv)*pn(alpgam, cond_mean, as_scalar(cond_sig.vcov));
  return(F1);
};
Exemple #3
0
double get_xi_sqr(arma::colvec &x, arma::mat &post_sigma, arma::colvec &post_mu){
  arma::mat v = x.t()*post_sigma*x;
  arma::mat xmu = x.t()*post_mu;
  std::cout << "x.t()*post_sigma*x = " << std::endl;
  v.print();
  std::cout << "x.t()*post_mu = " << std::endl;
  xmu.print();
  return as_scalar(v) + as_scalar(xmu)*as_scalar(xmu);
}
Exemple #4
0
/* Marginal */
double F1(unsigned row, unsigned cause, unsigned indiv, const DataPairs &data, const gmat &sigma, vec u){

  // Attaining variance covariance matrix etc. (conditional on u)
  vmat cond_sig = sigma(cause);
  double cond_mean = as_scalar(cond_sig.proj*u);

  double alp = data.alphaMarg_get(row, cause, indiv);
  double gam = data.gammaMarg_get(row, cause, indiv);
  double alpgam = alp - gam;

  double F1 = data.piMarg_get(row, cause, indiv)*pn(alpgam, cond_mean, as_scalar(cond_sig.vcov));
  return(F1);
};
Exemple #5
0
rowvec dlogdF1du(unsigned row, const unsigned &cause, const unsigned &indiv, const DataPairs &data, const gmat &sigma, vec u){

  // Attaining variance covariance matrix etc. (conditional on u)
  vmat cond_sig = sigma(cause);
  double cond_mean = as_scalar(cond_sig.proj*u);

  double alp = data.alphaMarg_get(row, cause, indiv);
  double gam = data.gammaMarg_get(row, cause, indiv);
  double alpgam = (alp - gam);
  rowvec dinnerdu = (alpgam - cond_mean)*as_scalar(cond_sig.inv)*cond_sig.proj;

  rowvec dlogdF1du = data.dlogpiduMarg_get(row, cause, indiv) + dinnerdu;
  return(dlogdF1du);
};
Exemple #6
0
double logdF1(unsigned row, const unsigned &cause, const unsigned &indiv, const DataPairs &data, const gmat &sigma, vec u){

  // Attaining variance covariance matrix etc. (conditional on u)
  vmat cond_sig = sigma(cause);
  double cond_mean = as_scalar(cond_sig.proj*u);

  double alp = data.alphaMarg_get(row, cause, indiv);
  double gam = data.gammaMarg_get(row, cause, indiv);
  double alpgam = alp - gam;
  double inner = pow((alpgam-cond_mean),2)*as_scalar(cond_sig.inv);

  double logpdf = loginvsqtwopi + cond_sig.loginvsqdet + log(data.dalphaMarg_get(row, cause, indiv)) - 0.5*inner;
  double logdF1 = log(data.piMarg_get(row, cause, indiv)) + logpdf;
  return(logdF1);
};
Exemple #7
0
  /*********************************************
   * Sample particles for a given document
   * 
   * doc: 
   *********************************************/
  LatentSeq DecodeGraph(const Doc doc){
    // ----------------------------------------
    // init
    int nsent = doc.size();
    LatentSeq latseq;
    // ----------------------------------------
    // for each sentence in doc, each latval, compute
    // the posterior prob p(R|cvec, sent)
    vector<float> U;
    for (unsigned sidx = 0; sidx < nsent; sidx ++){
      final_hlist.clear();
      for (int val = 0; val < nlatvar; val ++){
	ComputationGraph cg;
	BuildSentGraph(doc[sidx], sidx, cg, val);
	float prob = as_scalar(cg.forward());
	U.push_back(prob);
	cg.clear();
      }
      // normalize and get the argmax
      log_normalize(U);
      // greedy decoding
      int max_idx = argmax(U);
      // get the corresponding context vector
      final_h = final_hlist[max_idx];
      // 
      U.clear();
      // cerr << "max_latval = " << max_idx << endl;
      latseq.push_back(max_idx);
    }
    // cerr << "====" << endl;
    return latseq;
  }
Exemple #8
0
//' @title Removal of Boundary Wavelet Coefficients
//' @description Removes the first n wavelet coefficients.
//' @param x           A \code{field<vec>} that contains the nlevel decomposition using either modwt or dwt.
//' @param wave_filter A \code{field<vec>} containing filter information. Only "haar" is implemented.
//' @param method      A \code{string} to describe the mode. Choose between "modwt" and "dwt"
//' @return A \code{field<vec>} with boundary modwt or dwt taken care of.
//' @keywords internal
//' @details 
//' The vectors are truncated by removing the first n wavelet coefficients. 
//' These vectors are then stored into the field that is returned.
//' Note: As a result, there are no NA's introduced and hence the na.omit is not needed.
//' @examples
//' x = rnorm(100)
//' me = modwt_cpp(x, filter_name = "haar", nlevels = 4, boundary = "periodic", brickwall = FALSE)
//' brick_wall(me, select_filter("haar"), "modwt")
// [[Rcpp::export]]
arma::field<arma::vec> brick_wall(arma::field<arma::vec> x,  
                                  arma::field<arma::vec> wave_filter, 
                                  std::string method) 
{
    int m = as_scalar(wave_filter(0));

    for(unsigned int j = 0; j < x.n_elem; j++)
    {
        double binary_power = pow(2.0,double(j)+1.0);

        int n = (binary_power - 1.0) * (m - 1.0);

        if (method == "dwt"){
            n = ceil((m - 2.0) * (1.0 - 1.0/binary_power));
        }
        arma::vec temp = x(j);
        int temp_size = temp.n_elem - 1; // numbers are 0,...,(N-1)
        n = std::min(n, temp_size);
        
        // Addresses the case where all scales are removed.
        if(n != temp_size){        
          x(j) = temp.rows(n,temp_size);
        }else{
          x(j) = arma::zeros<arma::vec>(0);
        }
    }
    
    return x;
}
Exemple #9
0
arma_warn_unused
inline
sword
randi(const distr_param& param)
  {
  return as_scalar( randi<ivec>(uword(1), uword(1), param) );
  }
Exemple #10
0
arma_warn_unused
inline
typename arma_scalar_only<eT>::result
randi(const distr_param& param)
  {
  return eT( as_scalar( randi< Col<eT> >(uword(1), uword(1), param) ) );
  }
Exemple #11
0
rowvec dF1du(unsigned row, unsigned cause, unsigned indiv, unsigned cond_cause, const DataPairs &data, const gmat &sigma, vec u){

  // Other individual
  unsigned cond_indiv;
  if (indiv==0){
    cond_indiv=1;
  }
  else {
    cond_indiv=0;
  }

  // Alpgam of other individual
  double cond_alp = data.alphaMarg_get(row, cond_cause, cond_indiv);
  double cond_gam = data.gammaMarg_get(row, cond_cause, cond_indiv);
  double cond_alpgam = cond_alp - cond_gam;

  vec vcond_alpgam(1); vcond_alpgam(0) = cond_alpgam;

  // Joining u vector and alpgam from other individual
  vec alpgamu = join_cols(vcond_alpgam, u);

  // Attaining variance covariance matrix etc. (conditional on u and other individual)
  vmat cond_sig = sigma(cause,cond_cause);
  double cond_mean = as_scalar(cond_sig.proj*alpgamu);

  double alp = data.alphaMarg_get(row, cause, indiv);
  double gam = data.gammaMarg_get(row, cause, indiv);
  double alpgam = alp - gam;
  double c_alpgam = alpgam - cond_mean;

  double inner = pow((c_alpgam),2)*as_scalar(cond_sig.inv);

  double cdf = pn(alpgam, cond_mean, as_scalar(cond_sig.vcov));
  double pdf = invsqtwopi*1/sqrt(as_scalar(cond_sig.vcov))*exp(-0.5*inner);

  uvec upos = zeros<uvec>(u.n_rows);
  for (unsigned h=0; h<u.n_rows; h++){
    upos(h) = h + 1;
  };

  mat proj = cond_sig.proj;
  rowvec dcdfdu = pdf*(-proj.cols(upos));
  rowvec dF1du = data.dpiduMarg_get(row, cause, indiv)*cdf + data.piMarg_get(row, cause, indiv)*dcdfdu;

  return(dF1du);
};
Exemple #12
0
double HMM::getProbability(const colvec& sample)
{
    double P = 0.0;
    // See Eq. (2.0.2) in doc/TechnicalReport.pdf
    for(uint k=0; k<nSTATES; k++)
        P += alpha[k] * as_scalar( this->gmm->getCOMPONENTS(k).getPDFValue(sample) ); // See Eq. (2.0.3) for "getPDFValue" in doc/TechnicalReport.pdf
    return P;
}
inline
typename T1::pod_type
op_norm::mat_norm_1(const SpProxy<T1>& P)
  {
  arma_extra_debug_sigprint();
  
  // TODO: this can be sped up with a dedicated implementation
  return as_scalar( max( sum(abs(P.Q), 0), 1) );
  }
Exemple #14
0
int classify(double xi, arma::mat &sigma, arma::mat &sigma_inv, arma::colvec &x, arma::colvec &mu){
  arma::mat sigma_t_inv = get_post_sigma_inv(xi, x, sigma_inv);
  arma::mat sigma_t = sigma_t_inv.i();
  arma::colvec mu_t; 

  int s = 0;
  mu_t = get_post_mu(x, sigma_inv, sigma_t, mu, s);
  
  double log_0 = log(sigmoid(xi)) - xi/2 - lambda(xi)*xi*xi - 0.5*as_scalar(mu.t()*sigma_inv*mu) + 0.5*as_scalar(mu_t.t()*sigma_t.t()*mu_t) + 0.5*log(det(sigma_t)/det(sigma));

  s = 1;
  mu_t = get_post_mu(x, sigma_inv, sigma_t, mu, s);
  
  double log_1 = log(sigmoid(xi)) - xi/2 - lambda(xi)*xi*xi - 0.5*as_scalar(mu.t()*sigma_inv*mu) + 0.5*as_scalar(mu_t.t()*sigma_t.t()*mu_t) + 0.5*log(det(sigma_t)/det(sigma));

  if(log_1 > log_0) return 1;
  else return 0;

}
Exemple #15
0
inline Polynomial<eT>
polyfit(const eT* x, const eT* y, const uword length, const uword order)
{
    const uword size = order + 1;
    Col<eT> Q(size);   //parameter to be estimated.
    Q.fill(eT(0));        //initialized with zeros.
    Col<eT> phi(size); //
    Mat<eT> P = eye(size, size) * 1e6;
    Col<eT> K;
    Mat<eT> I = eye(size, size);

    for(uword i = 0; i < length; ++i)
    {
        detail::gen_phi(phi, x[i]);
        K = P * phi / (1 + as_scalar(phi.st() * P * phi)); //K(t) = P(t-1)*phi(t)*inv(I+phi'*P(t-1)*phi).
        P = (I - K * phi.st()) * P; //P(t) = (I-K(t)*phi')*P(t-1).
        Q = Q + K * (y[i] - as_scalar(phi.st() * Q)); //Q(t) = Q(t-1) + K(t)*(y(t)-phi'Q(t-1)).
    }
    return std::move(Polynomial<eT>(Q));
}
Exemple #16
0
// Posterior Density Function to sample Theta 
double f_theta( colvec logTheta, colvec mTheta, mat Otheta, double Tau, mat Y, mat Fit, rowvec Sigma, double logEHRTime ) 
{
  double prior, like, post;
  colvec cTheta = ( logTheta - mTheta ) ;
  prior = - 0.5 * Tau * as_scalar( cTheta.t() * Otheta * cTheta );
  mat D =  diagmat( 1 / Sigma );
  like  = - 0.5 * accu( pow ( log( Y ) - log( Fit ), 2 ) * D ); // Need to figure out what to do with Sigma
  // Conditional Posterior -----------------------------------------------------------
  post = prior + like + Rf_dnorm4( logEHRTime, 0, 1, 1 );
  return post;
}
Exemple #17
0
rowvec dF1du(unsigned row, unsigned cause, unsigned indiv, const DataPairs &data, const gmat &sigma, vec u){

  // Attaining variance covariance matrix etc. (conditional on u)
  vmat cond_sig = sigma(cause);
  double cond_mean = as_scalar(cond_sig.proj*u);

  double alp = data.alphaMarg_get(row, cause, indiv);
  double gam = data.gammaMarg_get(row, cause, indiv);
  double alpgam = alp - gam;
  double c_alpgam = alpgam - cond_mean;

  double inner = pow((c_alpgam),2)*as_scalar(cond_sig.inv);

  double cdf = pn(alpgam, cond_mean, as_scalar(cond_sig.vcov));
  double pdf = invsqtwopi*1/sqrt(as_scalar(cond_sig.vcov))*exp(-0.5*inner);

  rowvec dcdfdu = pdf*(-cond_sig.proj);

  rowvec dF1du = data.dpiduMarg_get(row, cause, indiv)*cdf + data.piMarg_get(row, cause, indiv)*dcdfdu;

  return(dF1du);
};
Exemple #18
0
inline
typename T1::pod_type
arma_mat_norm_inf(const Proxy<T1>& A)
  {
  arma_extra_debug_sigprint();
  
  typedef typename T1::elem_type eT;
  typedef typename T1::pod_type   T;
  
  const unwrap<typename Proxy<T1>::stored_type> tmp(A.Q);
  const Mat<eT>& X = tmp.M;
  
  // TODO: this can be sped up with a dedicated implementation
  return as_scalar( max( sum(abs(X),1) ) );
  }
Exemple #19
0
double logdF2(unsigned row, const irowvec &causes, const DataPairs &data, const gmat &sigma, vec u){

  // Attaining variance covariance matrix etc. (conditional on u)
  vmat cond_sig = sigma(causes);
  vec cond_mean = cond_sig.proj*u;
  rowvec alp = data.alpha_get(row, causes);
  rowvec gam = data.gamma_get(row, causes);
  colvec c_alpgam = (alp.t() - gam.t()) - cond_mean;

  double inner = as_scalar(c_alpgam.t()*cond_sig.inv*c_alpgam);

  double logpdf = loginvtwopi + cond_sig.loginvsqdet + log(data.dalphaMarg_get(row, causes(0), 0)) + log(data.dalphaMarg_get(row, causes(1), 1)) - 0.5*inner;

  double logdF2 = log(data.piMarg_get(row, causes(0), 0)) + log(data.piMarg_get(row, causes(1), 1)) + logpdf;

  return(logdF2);
};
mnlMetropOnceOut mnlMetropOnce_con(vec const& y, mat const& X, vec const& oldbeta, 
                                            double oldll,double s, mat const& incroot, 
                                            vec const& betabar, mat const& rootpi,vec const& SignRes = NumericVector::create(2)){ 
  // Wayne Taylor 10/01/2014
  
  // function to execute rw metropolis for the MNL
  // y is n vector with element = 1,...,j indicating which alt chosen
  // X is nj x k matrix of xvalues for each of j alt on each of n occasions
  // RW increments are N(0,s^2*t(inc.root)%*%inc.root)
  // prior on beta is N(betabar,Sigma)  Sigma^-1=rootpi*t(rootpi)
  //  inc.root, rootpi are upper triangular
  //  this means that we are using the UL decomp of Sigma^-1 for prior 
  // oldbeta is the current
  
  
  mnlMetropOnceOut out_struct;
  
  double unif;
  vec betadraw, alphaminv;
  
  int stay = 0;
  vec betac = oldbeta + s*trans(incroot)*as<vec>(rnorm(X.n_cols));
  double cll = llmnl_con(betac,y,X,SignRes);
  double clpost = cll+lndMvn(betac,betabar,rootpi);
  double ldiff = clpost-oldll-lndMvn(oldbeta,betabar,rootpi);
  alphaminv << 1 << exp(ldiff);
  double alpha = min(alphaminv);
  
  if(alpha < 1) {
    unif = as_scalar(vec(runif(1)));
  } else { 
    unif=0;}
  if (unif <= alpha) {
    betadraw = betac;
    oldll = cll;
  } else {
    betadraw = oldbeta;
    stay = 1;
  }
  
  out_struct.betadraw = betadraw;
  out_struct.stay = stay;  
  out_struct.oldll = oldll;
  
  return (out_struct);
}
Exemple #21
0
//--------------------------------------------------------------------------------------------------
double Krigidx( const arma::colvec& KF,
                 const arma::colvec& comb,
                 const arma::mat& X,
                 const arma::cube& Gamma ) {
  int k;
  int n = X.n_rows;
  int c = comb.size();
  double S;
  
  arma::mat W = arma::ones( n, n );
    
  for ( k = 0; k < c; k++ ) {
    W = W % Gamma.slice( comb( k ) - 1 );
  }
  S = as_scalar( KF.t() * W * KF );

  return S;
}
Exemple #22
0
//--------------------------------------------------------------------------------------------------
double Krigvar( const arma::colvec& KF, 
                const arma::cube& Gamma ) {
  
  int i;
  int n = Gamma.n_rows;
  int m = Gamma.n_slices;
  double Var;
  
  arma::mat V = arma::ones( n, n );
  for( i = 0; i < m; i++ ) {
    V = V % ( arma::ones( n, n ) + Gamma.slice( i ) );
  }
  V = V - arma::ones( n, n );
  
  Var = as_scalar( KF.t() * V * KF );
  
  return Var;
}
Exemple #23
0
rowvec laNRb(const rowvec &data, const mat &iS, const double &detS,
	    const rowvec &mu0, const rowvec &mu1, const rowvec &mu2,
	    const rowvec &lambda0, const rowvec &lambda1, const rowvec &lambda2,
	    const rowvec &beta0, const rowvec &beta1, const rowvec &beta2,
	    const rowvec &gamma,  const rowvec &gamma2,
	    const double &Dtol, const unsigned &niter, const double &lambda) {  
  rowvec eta = zeros(1,3);
  for (unsigned i=0; i<niter; i++) {
    hObj K = hb(eta,data,iS,
		mu0,mu1,mu2,lambda0,lambda1,lambda2,beta0,beta1,beta2,gamma,gamma2);
    double Sabs = as_scalar(trans(K.grad)*K.grad);      
    if (Sabs<Dtol) break;
      //      mat Delta = trans(K.grad)*inv(K.hess + 0.1*eye(K.hess.n_cols,K.hess.n_cols));
    mat Delta = trans(K.grad)*inv(K.hess);
    eta = eta-lambda*Delta;  
      //      hObj K = h(eta1,data,iS,
      //		mu1,mu2,lambda1,lambda2,beta,gamma);
    
  }

  hObj K = hb(eta,data,iS,
	      mu0,mu1,mu2,lambda0,lambda1,lambda2,beta0,beta1,beta2,gamma,gamma2);
  //  cerr << "K.grad=" << K.grad << endl;
  double logHdet;
  double sign;
  log_det(logHdet,sign,K.hess); // log(det(-K.hess))
  if (std::isnan(logHdet)) logHdet = -1000;
  double logI = K.hSh - 0.5*(logHdet+log(detS));
  //  double logI = K.hSh - 0.5*log(detS);
  //  cerr << "logI" << logI << endl;
  //  cerr << "hess" << -K.hess << endl;
  //  cerr << "hSh" << -K.hSh << endl;
  rowvec res(4);
  res(0) = logI;  for (unsigned i=0; i<3; i++) res(i+1) = eta(i);
  return(res);
}
Exemple #24
0
/*!
* \brief convolutional layer forward
*             X:        [N, C, Hx, Wx]
*             weight:   [F, C, Hw, Ww]
*             bias:     [F, 1, 1, 1]
*             out:      [N, F, (Hx+pad*2-Hw)/stride+1, (Wx+pad*2-Ww)/stride+1]
* \param[in]  const vector<Blob*>& in       in[0]:X, in[1]:weights, in[2]:bias
* \param[in]  const ConvParam* param        conv params: stride, pad
* \param[out] Blob** out                    Y
*/
void ConvLayer::forward(const vector<shared_ptr<Blob>>& in,
                        shared_ptr<Blob>& out,
                        Param& param) {
    if (out) {
        out.reset();
    }
    assert(in[0]->get_C() == in[1]->get_C());
    int N = in[0]->get_N();
    int F = in[1]->get_N();
    int C = in[0]->get_C();
    int Hx = in[0]->get_H();
    int Wx = in[0]->get_W();
    int Hw = in[1]->get_H();
    int Ww = in[1]->get_W();

    // calc Hy, Wy
    int Hy = (Hx + param.conv_pad*2 -Hw) / param.conv_stride + 1;
    int Wy = (Wx + param.conv_pad*2 -Ww) / param.conv_stride + 1;

    out.reset(new Blob(N, F, Hy, Wy));
    Blob padX = (*in[0]).pad(param.conv_pad);

    for (int n = 0; n < N; ++n) {
        for (int f = 0; f < F; ++f) {
            for (int hh = 0; hh < Hy; ++hh) {
                for (int ww = 0; ww < Wy; ++ww) {
                    cube window = padX[n](span(hh * param.conv_stride, hh * param.conv_stride + Hw - 1),
                                            span(ww * param.conv_stride, ww * param.conv_stride + Ww - 1),
                                            span::all);
                    (*out)[n](hh, ww, f) = accu(window % (*in[1])[f]) + as_scalar((*in[2])[f]);
                }
            }
        }
    }
    return;
}
Exemple #25
0
//--------------------------------------------------------------------------------------------------
double linear_kernel( const arma::colvec& x, const arma::colvec& y, const double& alpha ) {
  return as_scalar( alpha * x.t() * y );
}
Exemple #26
0
//--------------------------------------------------------------------------------------------------
double polynomial_kernel( const arma::colvec& x, const arma::colvec& y, 
                          const double& alpha, const double& beta, const double& n ) {
  return pow( as_scalar( alpha * x.t() * y ) + beta, n );
}
// The number of training examples should always be a positive integer.
int NeuralNetwork::ComputeGradient(const DataMulti &data_multi) {
    const int kNumTrainEx = data_multi.num_train_ex();
    assert(kNumTrainEx >= 1);
    arma::mat accum_hidden_layer_grad = \
                                        arma::zeros<arma::mat>(theta_.at(0).n_rows,\
                                                data_multi.training_features().n_cols);
    arma::mat accum_output_layer_grad = \
                                        arma::zeros<arma::mat>(theta_.at(1).n_rows,\
                                                theta_.at(0).n_rows+1);

    // Iterates over the training examples.
    for(int example_index=0; example_index<kNumTrainEx; example_index++)
    {

        // Performs step 1.
        arma::rowvec example_features = \
                                        data_multi.training_features().row(example_index);
        arma::mat sigmoid_arg = example_features*theta_.at(0).t();
        arma::mat hidden_layer_activation = ComputeSigmoid(sigmoid_arg);
        const arma::mat kOnesMat = arma::ones(hidden_layer_activation.n_rows,1);
        arma::mat hidden_layer_activation_mod = \
                                                arma::join_horiz(kOnesMat,hidden_layer_activation);
        sigmoid_arg = hidden_layer_activation_mod*theta_.at(1).t();
        arma::mat output_layer_activation = ComputeSigmoid(sigmoid_arg);

        // Performs step 2.
        arma::rowvec training_labels = \
                                       arma::zeros<arma::rowvec>(1,output_layer_size_);
        int column_index = \
                           (int)as_scalar(data_multi.training_labels().row(example_index));
        training_labels(column_index-1) = 1;
        arma::colvec output_layer_error = \
                                          (output_layer_activation-training_labels).t();

        // Performs step 3.
        arma::colvec hidden_layer_error_term = theta_.at(1).t()*output_layer_error;
        arma::colvec hidden_layer_error = \
                                          hidden_layer_error_term.rows(1,hidden_layer_size_) % \
                                          ComputeSigmoidGradient((example_features*theta_.at(0).t()).t());

        // Performs step 4.
        accum_hidden_layer_grad = \
                                  accum_hidden_layer_grad+hidden_layer_error*example_features;
        accum_output_layer_grad = \
                                  accum_output_layer_grad+output_layer_error*\
                                  arma::join_horiz(arma::ones<arma::mat>(1,1),hidden_layer_activation);
    }

    // Performs step 5 (without regularization).
    arma::mat hidden_layer_grad = (1.0/kNumTrainEx)*accum_hidden_layer_grad;
    arma::mat output_layer_grad = (1.0/kNumTrainEx)*accum_output_layer_grad;

    // Performs step 5 (with regularization).
    hidden_layer_grad.cols(1,theta_.at(0).n_cols-1) = \
            hidden_layer_grad.cols(1,theta_.at(0).n_cols-1)+\
            (lambda_/kNumTrainEx)*theta_.at(0).cols(1,theta_.at(0).n_cols-1);
    output_layer_grad.cols(1,theta_.at(1).n_cols-1) = \
            output_layer_grad.cols(1,theta_.at(1).n_cols-1)+\
            (lambda_/kNumTrainEx)*theta_.at(1).cols(1,theta_.at(1).n_cols-1);

    // Sets gradient.
    arma::vec hidden_layer_grad_stack = arma::vectorise(hidden_layer_grad);
    arma::vec output_layer_grad_stack = arma::vectorise(output_layer_grad);
    arma::vec gradient_stack = \
                               arma::join_vert(hidden_layer_grad_stack,output_layer_grad_stack);
    set_gradient(gradient_stack);

    return 0;
}
Exemple #28
0
/**
  * The optimal community structure is a subdivision of the network into
  * nonoverlapping groups of nodes in a way that maximizes the number of
  * within-group edges, and minimizes the number of between-group edges.
  * The modularity is a statistic that quantifies the degree to which the
  * network may be subdivided into such clearly delineated groups.
  *
  * The Louvain algorithm is a fast and accurate community detection
  * algorithm (as of writing). The algorithm may also be used to detect
  * hierarchical community structure.
  *
  *     Input:      W       undirected (weighted or binary) connection matrix.
  *                 gamma,  modularity resolution parameter (optional)
  *                             gamma>1     detects smaller modules
  *                             0<=gamma<1  detects larger modules
  *                             gamma=1     (default) classic modularity
  *
  *     Outputs:    Ci,     community structure
  *                 Q,      modularity

  * Note: Ci and Q may vary from run to run, due to heuristics in the
  * algorithm. Consequently, it may be worth to compare multiple runs.
  *
  * Reference: Blondel et al. (2008)  J. Stat. Mech. P10008.
  *           Reichardt and Bornholdt (2006) Phys Rev E 74:016110.
  */
urowvec Connectome::modularity_louvain(mat W, double *Qopt, double gamma)
{
    uint N = W.n_rows, h = 1, n = N, u =0, ma =0, mb =0;
    double s = accu(W), wm = 0, max_dQ = -1;
    uvec M, t;
    rowvec dQ;
    field<urowvec> Ci(20);
    Ci(0) = urowvec(); Ci(1) = linspace<urowvec>(0,n-1,n);
    rowvec Q = "-1,0";

    while (Q(h)-Q(h-1) > 1e-10) {
        rowvec K = sum(W,0), Km = K;
        mat Knm = W;

        M = linspace<uvec>(0,n-1,n);

        bool flag = true;

        while (flag) {
            flag = false;

            arma_rng::set_seed_random();
            t = shuffle(linspace<uvec>(0,n-1,n));
            for (uint i =0;i<n;++i) {
                u = t(i);
                ma = M(u);
                dQ = Knm.row(u) - Knm(u,ma)+W(u,u)-
                        gamma*K(u)*(Km-Km(ma)+K(u))/s;
                dQ(ma) = 0;
                max_dQ = dQ.max();
                mb = as_scalar(find(dQ == max_dQ,1));
                if (max_dQ > 1e-10) {
                    flag = true;
                    M(u) = mb;

                    Knm.col(mb) += W.col(u);
                    Knm.col(ma) -= W.col(u);
                    Km(mb) += K(u);
                    Km(ma) -= K(u);
                }
            }
        }
        Ci(++h) = zeros<urowvec>(1,N);
        M = matlabUnique(M);
        for (uint u=0;u<n;++u) {
            Ci(h)(find(Ci(h-1) == u)).fill(M(u));
        }
        n = M.max()+1;
        mat w = zeros(n,n);
        for (uint u =0;u<n;++u)
            for (uint v=u;v<n;++v) {
                wm = accu(W(find(M==u),find(M==v)));
                w(u,v) = wm;
                w(v,u) = wm;
            }
        W = w;
        Q.resize(h+1);
        Q(h) = trace(W)/s - gamma*accu((W*W)/(s*s));
    }
    *Qopt = Q(h);
    return Ci(h);
}
Exemple #29
0
// Copyright Hien Duy Nguyen - University of Queensland 2016/06/02
double LOGLIKE_FUN(SEXP XX, SEXP YY, SEXP BETA, SEXP PII, SEXP VAR, int MM, int NN, int GG, int PP, int MAXPP) {
  
  // Load Primary Variables 
  Rcpp::List XX_R(XX);
  Rcpp::List YY_R(YY);
  int MM_LEFT = MM-PP;
  
  // Initiate Likelihood Value
  double LL = 0;
  
  // Initiate Loop
  
  for (int nn = 0; nn < NN; nn++) {
    
    // Initiate Dummy Variable
    double INNER_1 = 0;
    
    // Load Variables
    SEXP XX_S(XX_R[nn]);
    Rcpp::NumericMatrix XX_C(XX_S);
    arma::mat XX_A(XX_C.begin(),MM_LEFT,PP+1,false);
    
    SEXP YY_S(YY_R[nn]);
    Rcpp::NumericVector YY_C(YY_S);
    arma::colvec YY_A(YY_C.begin(),MM_LEFT,false);

    Rcpp::NumericVector PI_C(PII);
    arma::colvec PI_A(PI_C.begin(),GG,false);
    
    Rcpp::NumericVector VAR_C(VAR);
    arma::colvec VAR_A(VAR_C.begin(),GG,false);

    // Load BETA
    Rcpp::NumericMatrix BETA_C(BETA);
    arma::mat BETA_A(BETA_C.begin(),PP+1,GG,false);

    for (int gg = 0; gg < GG; gg++) {
      

      
      // Initiate New Dummy Variables
      double INNER_2 = PI_A(gg);
      double IN_PLUS = 0;
      double VAR_IN = VAR_C(gg);
      double SD_C = sqrt(VAR_IN);
      arma::colvec BETA_IN = BETA_A.col(gg);
      
      
      for (int mm = MAXPP-PP; mm < MM_LEFT; mm++) {
        
        // IN_PLUS = IN_PLUS - 0.5*log(2*M_PI) - log(SD_C) - 0.5*pow(YY_A(mm) - as_scalar(XX_A.row(mm)*BETA_IN),2)/VAR_IN;
        
        IN_PLUS = IN_PLUS + R::dnorm(YY_A(mm), as_scalar(XX_A.row(mm)*BETA_IN), SD_C, TRUE);// - log(MM_LEFT);
      }
      
      // Combine Dummies
      INNER_1 = INNER_1 + INNER_2*exp(IN_PLUS); 
    }
    
    // Add to Log Likelihood
    LL = LL + log(INNER_1);// + (MM_LEFT-MAXPP+PP)*log(MM_LEFT);
  }
  
  return LL;
}
Exemple #30
0
hObj hb(const rowvec &eta,
       const rowvec &data, const mat &iS,
       const rowvec &mu0, const rowvec &mu1, const rowvec &mu2,
       const rowvec &lambda0, 
       const rowvec &lambda1, 
       const rowvec &lambda2,
       const rowvec &beta0,
       const rowvec &beta1,
       const rowvec &beta2,
       const rowvec &gamma,
       const rowvec &gamma2,
       int fast=0) {  

  unsigned ny=mu0.n_elem+mu1.n_elem+mu2.n_elem;
  int k=3+ny;
  colvec h(k);
  unsigned pos=2;
  unsigned dpos = 0;
  //  cerr << "mu0" << endl;
  for (unsigned i=0; i<mu0.n_elem; i++) {
    pos++;
    //    cerr << data(dpos) << "; ";
    h(pos) = data(dpos)-mu0(i)-lambda0(i)*eta(0);
    dpos++;
  }
  //  cerr << "mu1" << endl;
  for (unsigned i=0; i<mu1.n_elem; i++) {
    pos++;
    //    cerr << data(dpos) << "; ";
    h(pos) = data(dpos)-mu1(i)-lambda1(i)*eta(1);
    dpos++;
  }
  //  cerr << "mu2" << endl;
  for (unsigned i=0; i<mu2.n_elem; i++) {
    pos++;
    //    cerr << data(dpos) << "; ";
    h(pos) = data(dpos)-mu2(i)-lambda2(i)*eta(2);
    dpos++;
  }
  double zeta2 = eta(2)-gamma(0)*eta(0)-gamma(1)*eta(0)*eta(0);
  double zeta1 = eta(1)-gamma2(0)*eta(0)-gamma2(1)*eta(0)*eta(0);
  double zeta0 = eta(0);
  // cerr << "dpos=" << dpos << endl;
  // cerr << "mu0=" << mu0 << endl;
  // cerr << "mu1=" << mu1 << endl;
  // cerr << "mu2=" << mu2 << endl;
  //  cerr << "beta" << endl;
  for (unsigned i=0; i<beta0.n_elem; i++) {
    //    cerr << data(dpos) << "; ";
    zeta0 -= beta0(i)*data(dpos);
    dpos++;
  }
  for (unsigned i=0; i<beta1.n_elem; i++) {
    zeta1 -= beta1(i)*data(dpos);
    dpos++;
  }
  for (unsigned i=0; i<beta2.n_elem; i++) {
    zeta2 -= beta2(i)*data(dpos);
    dpos++;
  }
  h(0) = zeta0;
  h(1) = zeta1;
  h(2) = zeta2;

  mat iSh = iS*h;
  hObj res;
  res.h = h;
  res.hSh = -0.5*as_scalar(trans(h)*iSh);
  if (fast==1) 
    return(res);
  
  mat D = zeros(k,3);
  D(0,0) = 1;  D(1,0) = -gamma2(0)-2*gamma2(1)*eta(0); D(2,0) = -gamma(0)-2*gamma(1)*eta(0);
  D(1,1) = 1; D(2,2) = 1;
  //  D[1,1] = 1; D[2,2] = 1;
  pos = 3;
  for (unsigned i=0; i<mu0.n_elem; i++) {
    D(pos,0) = -lambda0(i);
    pos++;
  }
  for (unsigned i=0; i<mu1.n_elem; i++) {
    D(pos,1) = -lambda1(i);
    pos++;
  }
  for (unsigned i=0; i<mu2.n_elem; i++) {
    D(pos,2) = -lambda2(i);
    pos++;
  }
  // cerr << "D=\n" << D << endl;
  mat dS = -trans(D)*iS;  
  res.grad = dS*h;
  res.hess = dS*D;
  //mat dummy = trans(iSh)*d2eta0;
  //res.hess(0,0) -= dummy[0];
  res.hess(0,0) -= -2*gamma(1)*iSh(2) - 2*gamma2(1)*iSh(1);
  return(res);
}