Exemplo n.º 1
0
rowvec dF2du(unsigned row, irowvec causes, const DataPairs &data, const gmat &sigma, vec u){

  // Attaining variance covariance matrix etc. (conditional on u)
  vmat cond_sig = sigma(causes);
  vec cond_mean = cond_sig.proj*u;

  rowvec alp = data.alpha_get(row, causes);
  rowvec gam = data.gamma_get(row, causes);
  colvec alpgam = alp.t() - gam.t();

  double cdf = pn(alpgam, cond_mean, cond_sig.vcov);

  vec ll = sqrt(diagvec(cond_sig.vcov));
  mat Lambda = diagmat(ll);
  mat iLambda = diagmat(1/ll);
  mat R = iLambda*cond_sig.vcov*iLambda;
  mat LR = Lambda*R;
  double r = R(0,1);

  vec ytilde = iLambda*(alpgam - cond_mean);
  vecmat D = Dbvn(ytilde(0),ytilde(1),r);
  mat M = -LR*D.V;

  vec dcdfdu = trans(cond_sig.proj)*cond_sig.inv*M;

  rowvec dF2du_1 = data.dpiduMarg_get(row, causes(0), 0)*data.piMarg_get(row, causes(1), 1)*cdf ;
  rowvec dF2du_2 = data.dpiduMarg_get(row, causes(1), 1)*data.piMarg_get(row, causes(0), 0)*cdf;
  vec dF2du_3 = data.piMarg_get(row, causes(0), 0)*data.piMarg_get(row, causes(1), 1)*dcdfdu;

  rowvec dF2du = dF2du_1 + dF2du_2 + dF2du_3.t();
  return(dF2du);
};
Exemplo n.º 2
0
arma::mat grassmannQ::retract(double stepSize, std::string method,bool first){
  if(retraction==1){ //QR
        Yt=Y+stepSize*descD;
    arma::qr_econ(retract_U,retract_V,Yt);
    if(retract_V(0,0)<0){
      retract_U=-retract_U;
    }
    Yt=retract_U;
  }else if(retraction==0){
    if(first){
      arma::mat U_svd;
      arma::svd_econ(U_svd,retract_Sigma,retract_V,-xi);
      retract_U=arma::mat(n,2*p,arma::fill::zeros);
      retract_U(arma::span::all,arma::span(0,p-1))=Y*retract_V;
      retract_U(arma::span::all,arma::span(p,2*p-1))=U_svd;
    }
      arma::mat retract_middle;
      retract_middle=arma::mat(2*p,p,arma::fill::zeros);
      retract_middle(arma::span(0,p-1),arma::span::all)
        =diagmat(cos(retract_Sigma*stepSize));
      retract_middle(arma::span(p,2*p-1),arma::span::all)
        =diagmat(sin(retract_Sigma*stepSize));
      Yt=retract_U*retract_middle*retract_V.t();
  }
  return Yt;
}
inline
mat
CovMeans_mat_kth::mehrtash_suggestion(mat cov_i)
{
  //Following Mehrtash suggestions as per email dated June26th 2014
  mat new_covi;
  
  double THRESH = 0.000001;
  new_covi = 0.5*(cov_i + cov_i.t());
  vec D;
  mat V;
  eig_sym(D, V, new_covi);
  uvec q1 = find(D < THRESH);
  
  
  if (q1.n_elem>0)
  {
    for (uword pos = 0; pos < q1.n_elem; ++pos)
    {
      D( q1(pos) ) = THRESH;
      
    }
    
    new_covi = V*diagmat(D)*V.t();  
    
  }  
  
  return new_covi;
    //end suggestions
}
Exemplo n.º 4
0
//------------------------------------------------------------------------------
void OrbitalEquation::computeOneParticleReducedDensity()
{
    // All possible spatial orbitals
    for(int i=0; i< nOrbitals; i++){
        for(int j=i; j<nOrbitals; j++){
            invRho(i,j)  = reducedOneParticleOperator(2*i,2*j);
            invRho(i,j)  += reducedOneParticleOperator(2*i+1,2*j+1);
            invRho(j,i) = conj(invRho(i,j));
        }
    }

#ifdef DEBUG
//#if 1
    cout << "OrbitalEquation::computeOneParticleReducedDensity()" << endl;
    cout << "Trace(rho) = " << real(trace(invRho)) << " nParticles = " << nParticles << endl;
#endif


#if 0
    //---------------------------
    // Regularization
    //---------------------------
    const double e = 1e-10;
    cx_mat rho_U;
    vec rho_lambda;
    eig_sym(rho_lambda, rho_U, invRho);

    rho_lambda = rho_lambda + e*exp(-rho_lambda*(1.0/e));
    cx_mat X = rho_U*diagmat(1.0/rho_lambda)*rho_U.t();

    invRho += e*X;
    //---------------------------
#endif

}
Exemplo n.º 5
0
void QUIC_SVD::ExtractSVD(arma::mat& u,
                          arma::mat& v,
                          arma::mat& sigma)
{
  // Calculate A * V_hat, necessary for further calculations.
  arma::mat projectedMat;
  if (dataset.n_cols > dataset.n_rows)
    projectedMat = dataset.t() * basis;
  else
    projectedMat = dataset * basis;

  // Calculate the squared projected matrix.
  arma::mat projectedMatSquared = projectedMat.t() * projectedMat;

  // Calculate the SVD of the above matrix.
  arma::mat uBar, vBar;
  arma::vec sigmaBar;
  arma::svd(uBar, sigmaBar, vBar, projectedMatSquared);

  // Calculate the approximate SVD of the original matrix, using the SVD of the
  // squared projected matrix.
  v = basis * vBar;
  sigma = arma::sqrt(diagmat(sigmaBar));
  u = projectedMat * vBar * sigma.i();

  // Since columns are sampled, the unitary matrices have to be exchanged, if
  // the transposed matrix is not passed.
  if (dataset.n_cols > dataset.n_rows)
  {
    arma::mat tempMat = u;
    u = v;
    v = tempMat;
  }
}
Exemplo n.º 6
0
        // Reconstruct block in the image sequence after thresholding
        arma::cube Reconstruct(double lambda) {
            arma::cube v = arma::zeros<arma::cube>(Nx, Ny, T);
            arma::cube weights = arma::zeros<arma::cube>(Nx, Ny, T);

            arma::mat block, Ublock, Vblock;
            arma::vec Sblock;

            block.set_size(Bs*Bs, T);
            Ublock.set_size(Bs*Bs, T);
            Sblock.set_size(T);
            Vblock.set_size(T, T);

            #pragma omp parallel for shared(v, weights) \
                        private(block, Ublock, Sblock, Vblock)
            for (int it = 0; it < newVecSize; it++) {
                Ublock = U[it];
                Sblock = S[it];
                Vblock = V[it];

                // Basic singular value thresholding
                // arma::vec Snew = arma::sign(Sblock)
                //                      % arma::max(
                //                          arma::abs(Sblock) - lambda,
                //                          arma::zeros<arma::vec>(T));

                // Gaussian-weighted singular value thresholding
                arma::vec wvec = arma::abs(Sblock.max()
                                           * arma::exp(-1
                                                * lambda
                                                * arma::square(Sblock)/2));

                // Apply threshold
                arma::vec Snew = arma::sign(Sblock)
                                   % arma::max(
                                        arma::abs(Sblock) - wvec,
                                        arma::zeros<arma::vec>(T));

                // Reconstruct from SVD
                block = Ublock * diagmat(Snew) * Vblock.t();

                // Deal with block weights (TODO: currently all weights = 1)
                for (int k = 0; k < T; k++) {
                    int newy = patches(0, actualpatches(it), k);
                    int newx = patches(1, actualpatches(it), k);
                    v(arma::span(newy, newy+Bs-1),
                      arma::span(newx, newx+Bs-1),
                      arma::span(k, k)) += arma::reshape(block.col(k), Bs, Bs);
                    weights(arma::span(newy, newy+Bs-1),
                            arma::span(newx, newx+Bs-1),
                            arma::span(k, k)) += arma::ones<arma::mat>(Bs, Bs);
                }
            }

            // Include the weighting
            v /= weights;
            v.elem(find_nonfinite(v)).zeros();
            return v;
        }
Exemplo n.º 7
0
void WhitenFeatureMajorMatrix(const mat& matX,
            mat& matXWhitened,
            mat& matWhitening)
{
  mat matU, matV;
  vec s;
  svd(matU, s, matV, cov(matX));
  matWhitening = matU * diagmat(1 / sqrt(s)) * trans(matV);
  matXWhitened = matX * matWhitening;
}
Exemplo n.º 8
0
//update gradient and hessian
void el_sem_naive::set_gradient_hessian(vec &grad, mat &hessian)
{
    int i;
    hessian.zeros();
    d_ = (constraints_.t() * dual_) + 1.0;
    grad = -sum( constraints_ * diagmat( 1.0 / d_), 1);

    for(i = 0; i < n_ ; i ++) {
        hessian += constraints_.col(i) * constraints_.col(i).t() / pow(d_(i),2);
    }
}
Exemplo n.º 9
0
// Posterior Density Function to sample Theta 
double f_theta( colvec logTheta, colvec mTheta, mat Otheta, double Tau, mat Y, mat Fit, rowvec Sigma, double logEHRTime ) 
{
  double prior, like, post;
  colvec cTheta = ( logTheta - mTheta ) ;
  prior = - 0.5 * Tau * as_scalar( cTheta.t() * Otheta * cTheta );
  mat D =  diagmat( 1 / Sigma );
  like  = - 0.5 * accu( pow ( log( Y ) - log( Fit ), 2 ) * D ); // Need to figure out what to do with Sigma
  // Conditional Posterior -----------------------------------------------------------
  post = prior + like + Rf_dnorm4( logEHRTime, 0, 1, 1 );
  return post;
}
Exemplo n.º 10
0
void origNISim::initialize() {
    mat W_t(maxVertexId, maxVertexId, fill::zeros);
    for (int i = 0; i < maxVertexId; i++) {
        int e = origGraphSrc[i + 1], s = origGraphSrc[i];
        for (int j = s; j < e; j++) {
            W_t(i, origGraphDst[j]) = 1.0;
        }
    }
    double sumRow;
    double Degr = 0;
    for (int i = 0; i < maxVertexId; i++) {
        sumRow = 0;
        for (int j = 0; j < maxVertexId; j++)
            sumRow += W_t(i, j);
        if (sumRow == 0.0) {
            printf("node %d has no ingoing edges\n", i);
        } else {
            for (int j = 0; j < maxVertexId; j++)
                W_t(i, j) = W_t(i, j) / sumRow;
        }
        Degr += sumRow;
    }
    //start svd
    Mat<double> U;
    Col<double> s;
    Mat<double> V_t;
    svd(U, s, V_t, W_t);
    mat V = V_t.t();

    U = U.submat(0, 0, maxVertexId - 1, Rank - 1);
    V = V.submat(0, 0, Rank - 1, maxVertexId - 1);
    s = s.submat(0, 0, Rank - 1, 0);
    Ku = kron(U, U);//kronecker roduct
    mat sigma = kron(s, s);//one column
    mat K_sigma = diagmat(sigma);
    Kv = kron(V, V);

    mat K_vu = Kv * Ku;

    mat I(maxVertexId, maxVertexId);
    I.eye();
    A = inv(inv(K_sigma) - decayFactor * K_vu);
    V_r = Kv * vectorise(I);

    A.save(Apath);
    V_r.save(V_rpath);
    Kv.save(Kvpath);
    Ku.save(Kupath);
}
Exemplo n.º 11
0
    // Returns the linear predictor from the full step
    Rcpp::NumericVector
    densePred::gamma(const Rcpp::NumericVector& xwts,
		     const Rcpp::NumericVector& wtres)
	throw (std::runtime_error) {
	int n = d_X.nrow();
	if (xwts.size() != n || wtres.size() != n)
	    throw
		std::runtime_error("length(xwts) or length(wtres) != nrow(X)");
	arma::vec a_xwts = arma::vec(xwts.begin(), n, false, true),
	    a_wtres = arma::vec(wtres.begin(), n, false, true);
	arma::vec tmp = solve(diagmat(a_xwts) * a_X, a_wtres);
	std::copy(tmp.begin(), tmp.end(), a_delta.begin());
	d_coef = d_coef0 + d_delta;
	return NumericVector(wrap(a_X * a_coef));
    }
Exemplo n.º 12
0
arma::vec QRPMMCPP (arma::mat xr,arma::vec yr,arma::vec betar,arma::vec betaoldr,
double to,int m,double ta,double lamdar){

double toler = (to);
int maxit=(m);
double tau=(ta),lamda=(lamdar);
arma:: mat x=(xr),product,xt;
arma:: vec W,newX,z,signw,v,r,y=(yr),E,delta;
arma:: vec betaold,beta=(betar),qrbeta=(betaoldr);
arma::uvec order, index;

int n=x.n_rows;
x.insert_cols( 0, arma::ones(n) );
int p=x.n_cols;
double error=10000,epsilon=0.9999,epsilonprime;
//toler=1e-3;
int iteration=1;
//beta= solve(x.t()*x, x.t()*y);
product.ones(p,n);
epsilonprime=toler*p/2;
xt=x.t();
qrbeta.insert_rows(0,1);

while (iteration<=maxit&& error>toler)
{
	betaold=beta;
 
	r=y-x*beta;
	v=1-2*tau-r/(arma::abs(r)+epsilon);
	E=lamda*arma::abs(beta)/beta/(epsilonprime+arma::abs(beta))/qrbeta/qrbeta;
        E(0)=0;
	W=1/(epsilon+abs(r));

	for (int i=0;i<n;i++)
		{	product.col(i)=xt.col(i)*W(i);}

 	delta=arma::solve(product*x-diagmat(E),xt*v-E%beta); 
	beta=beta-delta;


error=sum(abs(delta));
iteration++;
}
arma::vec betanew=beta;
return betanew;

}
Exemplo n.º 13
0
inline
bool
op_sqrtmat_sympd::apply_direct(Mat<typename T1::elem_type>& out, const Base<typename T1::elem_type,T1>& expr)
  {
  arma_extra_debug_sigprint();
  
  #if defined(ARMA_USE_LAPACK)
    {
    typedef typename T1::pod_type   T;
    typedef typename T1::elem_type eT;
    
    const unwrap<T1>   U(expr.get_ref());
    const Mat<eT>& X = U.M;
    
    arma_debug_check( (X.is_square() == false), "sqrtmat_sympd(): given matrix must be square sized" );
    
    Col< T> eigval;
    Mat<eT> eigvec;
    
    const bool status = auxlib::eig_sym_dc(eigval, eigvec, X);
    
    if(status == false)  { return false; }
    
    const uword N          = eigval.n_elem;
    const T*    eigval_mem = eigval.memptr();
    
    bool all_pos = true;
    
    for(uword i=0; i<N; ++i)  { all_pos = (eigval_mem[i] < T(0)) ? false : all_pos; }
    
    if(all_pos == false)  { return false; }
    
    eigval = sqrt(eigval);
    
    out = eigvec * diagmat(eigval) * eigvec.t();
    
    return true;
    }
  #else
    {
    arma_ignore(out);
    arma_ignore(expr);
    arma_stop_logic_error("sqrtmat_sympd(): use of LAPACK must be enabled");
    return false;
    }
  #endif
  }
Exemplo n.º 14
0
    arma::mat Mahalanobis::getDecomposition() {

        mat Z;
        mat U, V;
        vec s;

        svd_econ(U, s, V, M);

        for(int i = 0; i < s.n_elem; ++i)
            s(i) = sqrt(s(i));

        mat matS = diagmat(s);

        Z = U * matS;

        return Z;

    }
Exemplo n.º 15
0
/**
  * Participation coefficient is a measure of diversity of intermodular
  * connections of individual nodes.
  *  Inputs:     W,      binary/weighted, directed/undirected
  *                      connection matrix
  *              Ci,     community affiliation vector
  *  Output:     P,      participation coefficient

  * Note: The output for directed graphs is the "out-neighbor"
  *   participation coefficient.
  * Reference: Guimera R, Amaral L. Nature (2005) 433:895-900.
  */
rowvec Connectome::participationCoefficient(const mat &W, const urowvec &Ci)
{
    uint n = W.n_rows;
    mat binaryW = zeros(n,n);
    binaryW(find(W!=0)).fill(1);
    rowvec Ko = sum(binaryW,0);
    // A^T = (B C)^T = C^T B^T = C B^T since C = C^T
    mat Gc = diagmat(conv_to<rowvec>::from(Ci))*strans(abs(sign(W)));
    rowvec Kc2 = zeros(1,n);

    for (uint i=0;i<=Ci.max();++i) {
        mat c = zeros(n,n);
        c(find(Gc==i)).fill(1);
        Kc2 += arma::pow(sum((W%c),0),2);
    }

    rowvec P = ones(1,n) - Kc2/arma::pow(Ko,2);
    P(find(Ko == 0)).fill(0);
    return P;
}
Exemplo n.º 16
0
    arma::mat TogersonMetricLearner::generateY(arma::mat B) {

        mat Y;
        mat U, V;
        vec s;

        U.fill(0.0);
        V.fill(0.0);
        s.fill(0.0);

        svd(U, s, V, B);

        for(int i = 0; i < s.n_elem; ++i)
            s(i) = sqrt(s(i));

        mat matS = diagmat(s);

        Y = U * matS;

        return Y;

    }
Exemplo n.º 17
0
inline
bool
op_princomp::direct_princomp
  (
         Mat< std::complex<typename T1::pod_type> >&     coeff_out,
         Mat< std::complex<typename T1::pod_type> >&     score_out,
         Col<              typename T1::pod_type  >&     latent_out,
         Col< std::complex<typename T1::pod_type> >&     tsquared_out,
  const Base< std::complex<typename T1::pod_type>, T1 >& X,
  const typename arma_cx_only<typename T1::elem_type>::result* junk
  )
  {
  arma_extra_debug_sigprint();
  arma_ignore(junk);
  
  typedef typename T1::pod_type     T;
  typedef          std::complex<T> eT;
  
  const unwrap_check<T1> Y( X.get_ref(), score_out );
  const Mat<eT>& in    = Y.M;
  
  const uword n_rows = in.n_rows;
  const uword n_cols = in.n_cols;
  
  if(n_rows > 1) // more than one sample
    {
    // subtract the mean - use score_out as temporary matrix
    score_out = in;  score_out.each_row() -= mean(in);
 	  
    // singular value decomposition
    Mat<eT> U;
    Col< T> s;
    
    const bool svd_ok = svd(U, s, coeff_out, score_out); 
    
    if(svd_ok == false)  { return false; }
    
    // normalize the eigenvalues
    s /= std::sqrt( double(n_rows - 1) );
    
    // project the samples to the principals
    score_out *= coeff_out;
    
    if(n_rows <= n_cols) // number of samples is less than their dimensionality
      {
      score_out.cols(n_rows-1,n_cols-1).zeros();
      
      Col<T> s_tmp = zeros< Col<T> >(n_cols);
      s_tmp.rows(0,n_rows-2) = s.rows(0,n_rows-2);
      s = s_tmp;
          
      // compute the Hotelling's T-squared   
      s_tmp.rows(0,n_rows-2) = 1.0 / s_tmp.rows(0,n_rows-2);
      const Mat<eT> S = score_out * diagmat(Col<T>(s_tmp));                     
      tsquared_out = sum(S%S,1); 
      }
    else
      {
      // compute the Hotelling's T-squared   
      const Mat<eT> S = score_out * diagmat(Col<T>(T(1) / s));                     
      tsquared_out = sum(S%S,1);
      }
    
    // compute the eigenvalues of the principal vectors
    latent_out = s%s;
    
    }
  else // 0 or 1 samples
    {
    coeff_out.eye(n_cols, n_cols);
    
    score_out.copy_size(in);
    score_out.zeros();
      
    latent_out.set_size(n_cols);
    latent_out.zeros();
      
    tsquared_out.set_size(n_rows);
    tsquared_out.zeros();
    }
  
  return true;
  }
Exemplo n.º 18
0
inline
void
op_princomp::direct_princomp
  (
        Mat< std::complex<T> >& coeff_out,
        Mat< std::complex<T> >& score_out,
        Col<T>&                 latent_out,
        Col< std::complex<T> >& tsquared_out,
  const Mat< std::complex<T> >& in
  )
  {
  arma_extra_debug_sigprint();
  
  typedef std::complex<T> eT;
  
  const u32 n_rows = in.n_rows;
  const u32 n_cols = in.n_cols;
  
  if(n_rows > 1) // more than one sample
    {
    // subtract the mean - use score_out as temporary matrix
    score_out = in - repmat(mean(in), n_rows, 1);
 	  
    // singular value decomposition
    Mat<eT> U;
    Col<T> s;
    
    const bool svd_ok = svd(U,s,coeff_out,score_out); 
    
    if(svd_ok == false)
      {
      arma_print("princomp(): singular value decomposition failed");
      
      coeff_out.reset();
      score_out.reset();
      latent_out.reset();
      tsquared_out.reset();
      
      return;
      }
    
    
    //U.reset();
    
    // normalize the eigenvalues
    s /= std::sqrt(n_rows - 1);
    
    // project the samples to the principals
    score_out *= coeff_out;
    
    if(n_rows <= n_cols) // number of samples is less than their dimensionality
      {
      score_out.cols(n_rows-1,n_cols-1).zeros();
      
      Col<T> s_tmp = zeros< Col<T> >(n_cols);
      s_tmp.rows(0,n_rows-2) = s.rows(0,n_rows-2);
      s = s_tmp;
          
      // compute the Hotelling's T-squared   
      s_tmp.rows(0,n_rows-2) = 1.0 / s_tmp.rows(0,n_rows-2);
      const Mat<eT> S = score_out * diagmat(Col<T>(s_tmp));                     
      tsquared_out = sum(S%S,1); 
      }
    else
      {
      // compute the Hotelling's T-squared   
      const Mat<eT> S = score_out * diagmat(Col<T>(T(1) / s));                     
      tsquared_out = sum(S%S,1);
      }
    
    // compute the eigenvalues of the principal vectors
    latent_out = s%s;
    
    }
  else // single sample - row
    {
    if(n_rows == 1)
      {
      coeff_out = eye< Mat<eT> >(n_cols, n_cols);
      
      score_out.copy_size(in);
      score_out.zeros();
      
      latent_out.set_size(n_cols);
      latent_out.zeros();
      
      tsquared_out.set_size(1);
      tsquared_out.zeros();    
      }
    else
      {
      coeff_out.reset();
      score_out.reset();
      latent_out.reset();
      tsquared_out.reset();
      }
    }
  }
Exemplo n.º 19
0
void uhfsolve::setupUnitMatrices(){
    //Bring overlap matrix to unit form, set up V
    eig_sym(s_diag,U,Bs.S);           //following Thijssen, p38-39
    V = U*diagmat(1.0/sqrt(s_diag));
}
Exemplo n.º 20
0
//' @title Master Wrapper for the GMWM Estimator
//' @description This function generates WV, GMWM Estimator, and an initial test estimate.
//' @param data A \code{vec} containing the data.
//' @param theta A \code{vec} with dimensions N x 1 that contains user-supplied initial values for parameters
//' @param desc A \code{vector<string>} indicating the models that should be considered.
//' @param objdesc A \code{field<vec>} containing a list of parameters (e.g. AR(1) = c(1,1), ARMA(p,q) = c(p,q,1))
//' @param model_type A \code{string} that represents the model transformation
//' @param starting A \code{bool} that indicates whether the supplied values are guessed (T) or are user-based (F).
//' @param alpha A \code{double} that handles the alpha level of the confidence interval (1-alpha)*100
//' @param compute_v A \code{string} that describes what kind of covariance matrix should be computed.
//' @param K An \code{int} that controls how many times theta is updated.
//' @param H An \code{int} that controls how many bootstrap replications are done.
//' @param G An \code{int} that controls how many guesses at different parameters are made.
//' @param robust A \code{bool} that indicates whether the estimation should be robust or not.
//' @param eff A \code{double} that specifies the amount of efficiency required by the robust estimator.
//' @return A \code{field<mat>} that contains a list of ever-changing estimates...
//' @author JJB
//' @references Wavelet variance based estimation for composite stochastic processes, S. Guerrier and Robust Inference for Time Series Models: a Wavelet-Based Framework, S. Guerrier
//' @keywords internal
//' @export
//' @backref src/gmwm_logic.cpp
//' @backref src/gmwm_logic.h
// [[Rcpp::export]]
arma::field<arma::mat> gmwm_master_cpp(arma::vec& data, 
                                      arma::vec theta,
                                      const std::vector<std::string>& desc, const arma::field<arma::vec>& objdesc, 
                                      std::string model_type, bool starting,
                                      double alpha, 
                                      std::string compute_v, unsigned int K, unsigned int H,
                                      unsigned int G, 
                                      bool robust, double eff){
  
  // Obtain counts of the different models we need to work with
  std::map<std::string, int> models = count_models(desc);
  
  // HACK METHOD (todo: formalize it)
  // Determine if we need to difference
  if(models["SARIMA"] > 0){
    
    // Note: s, i, si are 6,7,8 => 5,6,7
    for(unsigned int i = 0; i < desc.size(); i ++){
      if(objdesc(i).n_elem > 3){
        arma::vec sarima_desc = objdesc(i);
        // Do we need to difference? 
        if(sarima_desc(6) > 0 || sarima_desc(7) > 0){
          // Perform differencing in specific order...
          // First non-seasonal and then seasonal.
          
          if(sarima_desc(6) > 0){
            // Lag is always 1, number of differences is (i)
            data = diff_cpp(data, 1, sarima_desc(6));
          }
          
          if(sarima_desc(7) > 0){
            // Lag is always seasonality (s) and differences is (si).
            data = diff_cpp(data, sarima_desc(5), sarima_desc(7));
          }
          
          // Kill loop. We only handle the tsmodel object with the first difference.
          break;
        }
      }
    }
  }
  
  // ------ Variable Declarations
  
  // Length of the Time Series
  unsigned int N = data.n_elem;
  
  // Number of Scales (J)
  unsigned int nlevels = floor(log2(N));
  
  // Number of parameters
  unsigned int np = theta.n_elem;
  
  // Take the mean of the first difference
  double expect_diff = mean_diff(data);
  
  // Guessed values of Theta (user supplied or generated)
  arma::vec guessed_theta = theta;
  
  // MODWT decomp
  arma::field<arma::vec> modwt_decomp = modwt_cpp(data, "haar", nlevels, "periodic", true);
  
  // Obtain WV and confidence intervals
  arma::mat wvar = wvar_cpp(modwt_decomp, robust, eff, alpha, "eta3");
  
  // Extract
  arma::vec wv_empir = wvar.col(0);
  arma::vec ci_lo = wvar.col(1);
  arma::vec ci_hi = wvar.col(2);
  
  //-------------------------
  // Obtain Covariance Matrix
  //-------------------------
  
  arma::mat V;
  
  // compute_cov_cpp is the hard core function. It can only be improved by using parallelization.
  if(compute_v == "diag" || compute_v == "full"){
    arma::field<arma::mat> Vout = compute_cov_cpp(modwt_decomp, nlevels, compute_v, robust, eff);
    if(robust){
      V = Vout(1);
    }else{
      V = Vout(0);
    }
  }else{
     V = fast_cov_cpp(wvar.col(2), wvar.col(1));
  }

  // Obtain the Omega matrix
  arma::mat omega = arma::inv(diagmat(V));
  
  // Store the original V matrix (in case of bootstrapping) for use in the update function
  arma::mat orgV = V;
  
  // Calculate the values of the Scales 
  arma::vec scales = scales_cpp(nlevels);
  
  // Min-Max / N
  double ranged = dr_slope(data);
  
  // Guess starting values for the theta parameters
  if(starting){
    
    // Always run guessing algorithm
    theta = guess_initial(desc, objdesc, model_type, np, expect_diff, N, wvar, scales, ranged, G);
    
    // If under ARMA case and only ARMA is in the model, 
    // then see how well these values are.
    if(desc.size() == 1 && (desc[0] == "SARIMA" || desc[0] == "ARMA11") && N <= 1000){
      
      // Use R's ARIMA function to estimate parameter space
      arma::vec theta2 = Rcpp_ARIMA(data, objdesc(0)); // Only 1 objdesc in the available.
      
      // Obtain the obj function under omega with these initial guesses
      // DO >>NOT<< USE Yannick's to optimize starting values!!!!
      double mle_css_obj = getObjFun(theta2, desc, objdesc,  model_type, omega, wv_empir, scales); 
      
      // Obtain the objective function under Yannick's starting algorithm
      double init_guess_obj = getObjFunStarting(theta, desc, objdesc,  model_type, wv_empir, scales);
      
      // What performs better? 
      if(mle_css_obj < init_guess_obj){
        // Disable starting value optimization if using MLE. 
        theta = theta2;
        starting = false;
      }

    }
    
    guessed_theta = theta;
  }
  
  // Obtain the GMWM estimator's estimates.
  theta = gmwm_engine(theta, desc, objdesc, model_type, 
                      wv_empir, omega, scales, starting);
  
  // Optim may return a very small value. In this case, instead of saying its zero (yielding a transform issue), make it EPSILON.
  theta = code_zero(theta);
  
  // Enable bootstrapping
  if(compute_v == "bootstrap"){
    for(unsigned int k = 0; k < K; k++){
        // Create the full V matrix
        V = cov_bootstrapper(theta, desc, objdesc, N, robust, eff, H, false);
      
        // Update the omega matrix
        omega = arma::inv(diagmat(V));
        
        // The theta update in this case MUST not use Yannick's starting algorithm. Hence, the false value.
        theta = gmwm_engine(theta, desc, objdesc, model_type, wv_empir, omega, scales, false);
        
        // Optim may return a very small value. In this case, instead of saying its zero (yielding a transform issue), make it EPSILON.
        theta = code_zero(theta);
    }
  }

  
  if(desc[0] == "SARIMA" && desc.size() == 1){
    
    arma::vec temp = objdesc(0);
    unsigned int p = temp(0);
    if(p != 0 && invert_check(arma::join_cols(arma::ones<arma::vec>(1), -theta.rows(0, p - 1))) == false){
      Rcpp::Rcout << "WARNING: This ARMA model contains AR coefficients that are NON-STATIONARY!" << std::endl;
    }
  } 
  
  // Order AR1s / GM so largest phi is first!
   if(models["AR1"] > 1 || models["GM"] > 1){
     theta = order_AR1s(theta, desc, objdesc);
   }

  // Obtain the objective value function
  arma::vec obj_value(1);
  obj_value(0) = getObjFun(theta, desc, objdesc,  model_type, omega, wv_empir, scales); 
  
  arma::vec dr_s(1);
  dr_s(0) = ranged;
  
  // Decomposition of the WV.
  arma::mat decomp_theo = decomp_theoretical_wv(theta, desc, objdesc, scales);
  arma::vec theo = decomp_to_theo_wv(decomp_theo);

  // Export information back
  arma::field<arma::mat> out(13);
  out(0) = theta;
  out(1) = guessed_theta;
  out(2) = wv_empir;
  out(3) = ci_lo;
  out(4) = ci_hi;
  out(5) = V;
  out(6) = orgV;
  out(7) = expect_diff;
  out(8) = theo;
  out(9) = decomp_theo;
  out(10) = obj_value;
  out(11) = omega;
  out(12) = dr_s;
  return out;
}
Exemplo n.º 21
0
inline
bool
op_princomp::direct_princomp
  (
        Mat<eT>& coeff_out,
        Mat<eT>& score_out,
        Col<eT>& latent_out, 
        Col<eT>& tsquared_out,
  const Mat<eT>& in
  )
  {
  arma_extra_debug_sigprint();

  const u32 n_rows = in.n_rows;
  const u32 n_cols = in.n_cols;
  
  if(n_rows > 1) // more than one sample
    {
    // subtract the mean - use score_out as temporary matrix
    score_out = in - repmat(mean(in), n_rows, 1);
 	  
    // singular value decomposition
    Mat<eT> U;
    Col<eT> s;
    
    const bool svd_ok = svd(U,s,coeff_out,score_out);
    
    if(svd_ok == false)
      {
      return false;
      }
    
    
    //U.reset();  // TODO: do we need this ?  U will get automatically deleted anyway
    
    // normalize the eigenvalues
    s /= std::sqrt( double(n_rows - 1) );
    
    // project the samples to the principals
    score_out *= coeff_out;
    
    if(n_rows <= n_cols) // number of samples is less than their dimensionality
      {
      score_out.cols(n_rows-1,n_cols-1).zeros();
      
      //Col<eT> s_tmp = zeros< Col<eT> >(n_cols);
      Col<eT> s_tmp(n_cols);
      s_tmp.zeros();
      
      s_tmp.rows(0,n_rows-2) = s.rows(0,n_rows-2);
      s = s_tmp;
          
      // compute the Hotelling's T-squared
      s_tmp.rows(0,n_rows-2) = eT(1) / s_tmp.rows(0,n_rows-2);
      
      const Mat<eT> S = score_out * diagmat(Col<eT>(s_tmp));   
      tsquared_out = sum(S%S,1); 
      }
    else
      {
      // compute the Hotelling's T-squared   
      const Mat<eT> S = score_out * diagmat(Col<eT>( eT(1) / s));
      tsquared_out = sum(S%S,1);
      }
            
    // compute the eigenvalues of the principal vectors
    latent_out = s%s;
    }
  else // 0 or 1 samples
    {
    coeff_out.eye(n_cols, n_cols);
    
    score_out.copy_size(in);
    score_out.zeros();
    
    latent_out.set_size(n_cols);
    latent_out.zeros();
    
    tsquared_out.set_size(n_rows);
    tsquared_out.zeros();
    }
  
  return true;
  }
inline
void
CovMeans_mat_kth::gmm_one_video( std::string load_feat_video_i, std::string load_labels_video_i, int sc, int pe, int act, const int Ng )
{
  
  //cout <<  all_people (pe) << "_" << actions(act)  << " ";
  mat mat_features_video_i;
  mat_features_video_i.load( load_feat_video_i, hdf5_binary );
  int n_vec = mat_features_video_i.n_cols;
  
  //cout << " " << n_vec;
  
  
  bool is_finite = mat_features_video_i.is_finite();
  
  if (!is_finite )
  {
    cout << "is_finite?? " << is_finite << endl;
    cout << mat_features_video_i.n_rows << " " << mat_features_video_i.n_cols << endl;
    getchar();
  }
  
  
  gmm_diag gmm_model;
  gmm_diag bg_model;
  
  
  bool status_em = false;
  int rep_em=0;
  
  int km_iter = 10;
  int em_iter  = 5;
  double var_floor = 1e-10;
  bool print_mode = false;
  
  
  while (!status_em)
  {
    bool status_kmeans = false;
    //int rep_km = 0;
    
    while (!status_kmeans)
    {
      arma_rng::set_seed_random();
      
      status_kmeans = gmm_model.learn(mat_features_video_i, Ng, eucl_dist, random_subset, km_iter, 0, var_floor, print_mode);   //Only Kmeans
      bg_model = gmm_model;
      //rep_km++;
    }
    
    
    status_em = gmm_model.learn(mat_features_video_i, Ng, eucl_dist, keep_existing, 0, em_iter, var_floor, print_mode);   
    rep_em++;
    
    if (rep_em==9)
    {
      status_em = true;
      gmm_model = bg_model;
      
    }
    
  }
  
  //cout <<"EM was repeated " << rep_em << endl << endl;
  
  mat dcov = gmm_model.dcovs;
  mat means = gmm_model.means; 
 
  mat cov_i, logM_cov_i, V; 
  vec mean_i, D;
  
  std::stringstream save_folder;
  save_folder << "./CovMeans/sc" << sc << "/scale" << scale_factor << "-shift"<< shift ;
  
  
  for (int i=1; i<=Ng; ++i)
  {
     
    //cout << i << " out " << Ng << endl;
    cov_i = diagmat( dcov.col(i-1) );
    cov_i = mehrtash_suggestion(cov_i);
    
    mean_i = means.col(i -1);
    
    eig_sym(D, V, cov_i);
    mat logM_cov_i = V*diagmat( log(D) )*V.t();
    
    
    std::stringstream save_Covs;
    save_Covs << save_folder.str() << "/Cov_"<< i << "_out_" << Ng << "_" <<  all_people (pe) << "_" << actions(act) <<  ".h5";
    
    std::stringstream save_logMCovs;
    save_logMCovs << save_folder.str() << "/logM_Cov_" << i << "_out_" << Ng << "_"  <<  all_people (pe) << "_" << actions(act) <<  ".h5";
    
    std::stringstream save_Means;
    save_Means << save_folder.str() << "/Means_" << i << "_out_" << Ng << "_"  <<  all_people (pe) << "_" << actions(act) <<  ".h5";
    
    
    //cout << save_Covs.str() << endl;
    cov_i.save(  save_Covs.str(), hdf5_binary ); 
    logM_cov_i.save( save_logMCovs.str(), hdf5_binary ); 
    mean_i.save( save_Means.str(), hdf5_binary ); 

    
  }
  
}
inline
void
CovMeans_mat_kth::one_video_one_cov( std::string load_feat_video_i, std::string load_labels_video_i, int sc, int pe, int act )
{
  //   #pragma omp critical
  //   {
    //   cout << load_feat_video_i << endl;
    //   getchar();
    //   }
    
    mat mat_features_video_i;
    
    mat_features_video_i.load( load_feat_video_i, hdf5_binary );
    
    int n_vec = mat_features_video_i.n_cols;
    
    
    std::stringstream save_folder;
    
    //Shifting both
    save_folder << "./CovMeans/sc" << sc << "/scale" << scale_factor << "-shift"<< shift ;
    
    
    {
      //      If you want to use. You have to add the  flag_shift in this method. 
      //	   if (flag_shift) //Horizontal Shift
      //       {
	// 
	// 	save_folder << "./kth-one-CovsMeans-mat/sc" << sc << "/scale" << scale_factor <<  "-horshift"<< shift ;
	// 	
	//       }
	//       
	//       if (!flag_shift)//Vertical Shift
	//       {
	  // 	 save_folder << "./kth-one-CovsMeans-mat/sc" << sc << "/scale" << scale_factor << "-vershift"<< shift ;
	  //       }
	  //       
    }   
    
    running_stat_vec<rowvec> stat_seg(true);
    
    for (int l=0; l<n_vec; ++l)
    {
      
      vec sample = mat_features_video_i.col(l);
      stat_seg (sample);
      
    }
    
    
    std::stringstream save_Covs;
    save_Covs << save_folder.str() << "/Cov_" <<  all_people (pe) << "_" << actions(act) <<  ".h5";
    
    std::stringstream save_logMCovs;
    save_logMCovs << save_folder.str() << "/logM_Cov_" <<  all_people (pe) << "_" << actions(act) <<  ".h5";
    
    
    std::stringstream save_Means;
    save_Means << save_folder.str() << "/Means_" <<  all_people (pe) << "_" << actions(act) <<  ".h5";
    
    
    
    double THRESH = 0.000001;
    mat cov_i = stat_seg.cov();
    vec mean_i = stat_seg.mean();
    
    
    
    //Following Mehrtash suggestions as per email dated June26th 2014
    cov_i = 0.5*(cov_i + cov_i.t());
    vec D;
    mat V;
    eig_sym(D, V, cov_i);
    uvec q1 = find(D < THRESH);
    
    if (q1.n_elem>0)
    {
      for (uword pos = 0; pos < q1.n_elem; ++pos)
      {
	D( q1(pos) ) = THRESH;
      }
      cov_i = V*diagmat(D)*V.t();  
      
    }  
    //end suggestions
    
    
    eig_sym(D, V, cov_i);
    mat logM_cov_i = V*diagmat( log(D) )*V.t();
    
    
    
    #pragma omp critical
    {
      cout << "saving " <<  all_people (pe) << endl;
      
      cov_i.save(  save_Covs.str(), hdf5_binary ); 
      logM_cov_i.save( save_logMCovs.str(), hdf5_binary ); 
      mean_i.save( save_Means.str(), hdf5_binary ); 
    }
    
    
    
}
Exemplo n.º 24
0
arma::mat Vespucci::Math::Smoothing::SVDDenoise(const arma::mat &X, arma::uword k, arma::mat &U, arma::vec &s, arma::mat &V)
{
    arma::svds(U, s, V, arma::sp_mat(X), k);
    return -1 * U * diagmat(s) * V.t();
}
Exemplo n.º 25
0
// Dictionary step for optimization.
double SparseCoding::OptimizeDictionary(const arma::mat& data,
                                        const arma::mat& codes,
                                        const arma::uvec& adjacencies)
{
    // Count the number of atomic neighbors for each point x^i.
    arma::uvec neighborCounts = arma::zeros<arma::uvec>(data.n_cols, 1);

    if (adjacencies.n_elem > 0)
    {
        // This gets the column index.  Intentional integer division.
        size_t curPointInd = (size_t) (adjacencies(0) / atoms);

        size_t nextColIndex = (curPointInd + 1) * atoms;
        for (size_t l = 1; l < adjacencies.n_elem; ++l)
        {
            // If l no longer refers to an element in this column, advance the column
            // number accordingly.
            if (adjacencies(l) >= nextColIndex)
            {
                curPointInd = (size_t) (adjacencies(l) / atoms);
                nextColIndex = (curPointInd + 1) * atoms;
            }

            ++neighborCounts(curPointInd);
        }
    }

    // Handle the case of inactive atoms (atoms not used in the given coding).
    std::vector<size_t> inactiveAtoms;

    for (size_t j = 0; j < atoms; ++j)
    {
        if (arma::accu(codes.row(j) != 0) == 0)
            inactiveAtoms.push_back(j);
    }

    const size_t nInactiveAtoms = inactiveAtoms.size();
    const size_t nActiveAtoms = atoms - nInactiveAtoms;

    // Efficient construction of Z restricted to active atoms.
    arma::mat matActiveZ;
    if (nInactiveAtoms > 0)
    {
        math::RemoveRows(codes, inactiveAtoms, matActiveZ);
    }

    if (nInactiveAtoms > 0)
    {
        Log::Warn << "There are " << nInactiveAtoms
                  << " inactive atoms. They will be re-initialized randomly.\n";
    }

    Log::Debug << "Solving Dual via Newton's Method.\n";

    // Solve using Newton's method in the dual - note that the final dot
    // multiplication with inv(A) seems to be unavoidable. Although more
    // expensive, the code written this way (we use solve()) should be more
    // numerically stable than just using inv(A) for everything.
    arma::vec dualVars = arma::zeros<arma::vec>(nActiveAtoms);

    //vec dualVars = 1e-14 * ones<vec>(nActiveAtoms);

    // Method used by feature sign code - fails miserably here.  Perhaps the
    // MATLAB optimizer fmincon does something clever?
    //vec dualVars = 10.0 * randu(nActiveAtoms, 1);

    //vec dualVars = diagvec(solve(dictionary, data * trans(codes))
    //    - codes * trans(codes));
    //for (size_t i = 0; i < dualVars.n_elem; i++)
    //  if (dualVars(i) < 0)
    //    dualVars(i) = 0;

    bool converged = false;

    // If we have any inactive atoms, we must construct these differently.
    arma::mat codesXT;
    arma::mat codesZT;

    if (inactiveAtoms.empty())
    {
        codesXT = codes * trans(data);
        codesZT = codes * trans(codes);
    }
    else
    {
        codesXT = matActiveZ * trans(data);
        codesZT = matActiveZ * trans(matActiveZ);
    }

    double normGradient = 0;
    double improvement = 0;
    for (size_t t = 1; (t != maxIterations) && !converged; ++t)
    {
        arma::mat A = codesZT + diagmat(dualVars);

        arma::mat matAInvZXT = solve(A, codesXT);

        arma::vec gradient = -arma::sum(arma::square(matAInvZXT), 1);
        gradient += 1;

        arma::mat hessian = -(-2 * (matAInvZXT * trans(matAInvZXT)) % inv(A));

        arma::vec searchDirection = -solve(hessian, gradient);
        //printf("%e\n", norm(searchDirection, 2));

        // Armijo line search.
        const double c = 1e-4;
        double alpha = 1.0;
        const double rho = 0.9;
        double sufficientDecrease = c * dot(gradient, searchDirection);

        // A maxIterations parameter for the Armijo line search may be a good idea,
        // but it doesn't seem to be causing any problems for now.
        while (true)
        {
            // Calculate objective.
            double sumDualVars = arma::sum(dualVars);
            double fOld = -(-trace(trans(codesXT) * matAInvZXT) - sumDualVars);
            double fNew = -(-trace(trans(codesXT) * solve(codesZT +
                                   diagmat(dualVars + alpha * searchDirection), codesXT)) -
                            (sumDualVars + alpha * arma::sum(searchDirection)));

            if (fNew <= fOld + alpha * sufficientDecrease)
            {
                searchDirection = alpha * searchDirection;
                improvement = fOld - fNew;
                break;
            }

            alpha *= rho;
        }

        // Take step and print useful information.
        dualVars += searchDirection;
        normGradient = arma::norm(gradient, 2);
        Log::Debug << "Newton Method iteration " << t << ":" << std::endl;
        Log::Debug << "  Gradient norm: " << std::scientific << normGradient
                   << "." << std::endl;
        Log::Debug << "  Improvement: " << std::scientific << improvement << ".\n";

        if (normGradient < newtonTolerance)
            converged = true;
    }

    if (inactiveAtoms.empty())
    {
        // Directly update dictionary.
        dictionary = trans(solve(codesZT + diagmat(dualVars), codesXT));
    }
    else
    {
        arma::mat activeDictionary = trans(solve(codesZT +
                                           diagmat(dualVars), codesXT));

        // Update all atoms.
        size_t currentInactiveIndex = 0;
        for (size_t i = 0; i < atoms; ++i)
        {
            if (inactiveAtoms[currentInactiveIndex] == i)
            {
                // This atom is inactive.  Reinitialize it randomly.
                dictionary.col(i) = (data.col(math::RandInt(data.n_cols)) +
                                     data.col(math::RandInt(data.n_cols)) +
                                     data.col(math::RandInt(data.n_cols)));

                dictionary.col(i) /= arma::norm(dictionary.col(i), 2);

                // Increment inactive index counter.
                ++currentInactiveIndex;
            }
            else
            {
                // Update estimate.
                dictionary.col(i) = activeDictionary.col(i - currentInactiveIndex);
            }
        }
    }

    return normGradient;
}
Exemplo n.º 26
0
inline
void
op_pinv::apply(Mat<typename T1::elem_type>& out, const Op<T1,op_pinv>& in)
  {
  arma_extra_debug_sigprint();
  
  typedef typename T1::elem_type            eT;
  typedef typename get_pod_type<eT>::result  T;
  
  const bool use_divide_and_conquer = (in.aux_uword_a == 1);
  
  T tol = access::tmp_real(in.aux);
  
  arma_debug_check((tol < T(0)), "pinv(): tolerance must be >= 0");
  
  const Proxy<T1> P(in.m);
  
  const uword n_rows = P.get_n_rows();
  const uword n_cols = P.get_n_cols();
  
  if( (n_rows*n_cols) == 0 )
    {
    out.set_size(n_cols,n_rows);
    return;
    }
  
  
  // economical SVD decomposition 
  Mat<eT> U;
  Col< T> s;
  Mat<eT> V;
  
  bool status = false;
  
  if(use_divide_and_conquer)
    {
    status = (n_cols > n_rows) ? auxlib::svd_dc_econ(U, s, V, trans(P.Q)) : auxlib::svd_dc_econ(U, s, V, P.Q);
    }
  else
    {
    status = (n_cols > n_rows) ? auxlib::svd_econ(U, s, V, trans(P.Q), 'b') : auxlib::svd_econ(U, s, V, P.Q, 'b');
    }
  
  if(status == false)
    {
    out.reset();
    arma_bad("pinv(): svd failed");
    return;
    }
  
  const uword s_n_elem = s.n_elem;
  const T*    s_mem    = s.memptr();
  
  // set tolerance to default if it hasn't been specified
  if( (tol == T(0)) && (s_n_elem > 0) )
    {
    tol = (std::max)(n_rows, n_cols) * s_mem[0] * std::numeric_limits<T>::epsilon();
    }
  
  
  uword count = 0;
  
  for(uword i = 0; i < s_n_elem; ++i)
    {
    count += (s_mem[i] >= tol) ? uword(1) : uword(0);
    }
  
  
  if(count > 0)
    {
    Col<T> s2(count);
    
    T* s2_mem = s2.memptr();
    
    uword count2 = 0;
    
    for(uword i=0; i < s_n_elem; ++i)
      {
      const T val = s_mem[i];
      
      if(val >= tol)  {  s2_mem[count2] = T(1) / val;  ++count2; }
      }
    
    
    if(n_rows >= n_cols)
      {
      out = ( (V.n_cols > count) ? V.cols(0,count-1) : V ) * diagmat(s2) * trans( (U.n_cols > count) ? U.cols(0,count-1) : U );
      }
    else
      {
      out = ( (U.n_cols > count) ? U.cols(0,count-1) : U ) * diagmat(s2) * trans( (V.n_cols > count) ? V.cols(0,count-1) : V );
      }
    }
  else
    {
    out.zeros(n_cols, n_rows);
    }
  }
Exemplo n.º 27
0
inline
void
op_pinv::direct_pinv(Mat<eT>& out, const Mat<eT>& A, const eT in_tol)
  {
  arma_extra_debug_sigprint();
  
  typedef typename get_pod_type<eT>::result T;
  
  T tol = access::tmp_real(in_tol);
  
  arma_debug_check((tol < T(0)), "pinv(): tolerance must be >= 0");
  
  const uword n_rows = A.n_rows;
  const uword n_cols = A.n_cols;
  
  // economical SVD decomposition 
  Mat<eT> U;
  Col< T> s;
  Mat<eT> V;
  
  const bool status = (n_cols > n_rows) ? auxlib::svd_econ(U,s,V,trans(A),'b') : auxlib::svd_econ(U,s,V,A,'b');
  
  if(status == false)
    {
    out.reset();
    arma_bad("pinv(): svd failed");
    return;
    }
  
  const uword s_n_elem = s.n_elem;
  const T*    s_mem    = s.memptr();
  
  // set tolerance to default if it hasn't been specified as an argument 
  if( (tol == T(0)) && (s_n_elem > 0) )
    {
    tol = (std::max)(n_rows, n_cols) * eop_aux::direct_eps( op_max::direct_max(s_mem, s_n_elem) );
    }
  
  
  // count non zero valued elements in s
  
  uword count = 0;
  
  for(uword i = 0; i < s_n_elem; ++i)
    {
    if(s_mem[i] > tol)
      {
      ++count;
      }
    }
  
  if(count != 0)
    {
    Col<T> s2(count);
    
    T* s2_mem = s2.memptr();
    
    uword count2 = 0;
    
    for(uword i=0; i < s_n_elem; ++i)
      {
      const T val = s_mem[i];
      
      if(val > tol)
        {
        s2_mem[count2] = T(1) / val;
        ++count2;
        }
      }
    
    
    if(n_rows >= n_cols)
      {
      out = ( V.n_cols > count ? V.cols(0,count-1) : V ) * diagmat(s2) * trans( U.n_cols > count ? U.cols(0,count-1) : U );
      }
    else
      {
      out = ( U.n_cols > count ? U.cols(0,count-1) : U ) * diagmat(s2) * trans( V.n_cols > count ? V.cols(0,count-1) : V );
      }
    }
  else
    {
    out.zeros(n_cols, n_rows);
    }
  }
Exemplo n.º 28
0
//' @title Update Wrapper for the GMWM Estimator
//' @description This function uses information obtained previously (e.g. WV covariance matrix) to re-estimate a different model parameterization
//' @param theta A \code{vec} with dimensions N x 1 that contains user-supplied initial values for parameters
//' @param desc A \code{vector<string>} indicating the models that should be considered.
//' @param objdesc A \code{field<vec>} containing a list of parameters (e.g. AR(1) = c(1,1), ARMA(p,q) = c(p,q,1))
//' @param model_type A \code{string} that represents the model transformation
//' @param wv_empir A \code{vec} that contains the empirical wavelet variance
//' @param omega A \code{mat} that represents the covariance matrix.
//' @param scales A \code{vec} that contains the scales or taus (2^(1:J))
//' @param starting A \code{bool} that indicates whether we guessed starting (T) or the user supplied estimates (F).
//' @return A \code{field<mat>} that contains the parameter estimates from GMWM estimator.
//' @author JJB
//' @references Wavelet variance based estimation for composite stochastic processes, S. Guerrier and Robust Inference for Time Series Models: a Wavelet-Based Framework, S. Guerrier
//' @keywords internal
//' @backref src/gmwm_logic.cpp
//' @backref src/gmwm_logic.h
// [[Rcpp::export]]
arma::field<arma::mat> gmwm_update_cpp(arma::vec theta,
                                      const std::vector<std::string>& desc, const arma::field<arma::vec>& objdesc, 
                                      std::string model_type, unsigned int N, double expect_diff, double ranged, 
                                      const arma::mat& orgV, const arma::vec& scales, const arma::mat& wv,
                                      bool starting, 
                                      std::string compute_v, unsigned int K, unsigned int H,
                                      unsigned int G, 
                                      bool robust, double eff){
  
  // Number of parameters
  unsigned int np = theta.n_elem;
    
  // Guessed Values
  arma::vec guessed_theta = theta;
  
  // V matrix
  arma::mat V = orgV;
  
  // Diagonal Omega Matrix
  arma::mat omega = arma::inv(diagmat(V));
  
  arma::vec wv_empir = wv.col(0);
  
  // Do we need to run a guessing algorithm?
  if(starting){

    theta = guess_initial(desc, objdesc, model_type, np, expect_diff, N, wv, scales, ranged, G);
    
    guessed_theta = theta;
  }

  // Obtain the GMWM estimator estimates.
  theta = gmwm_engine(theta, desc, objdesc, model_type, 
                      wv_empir, omega, scales, starting);

  theta = code_zero(theta);
    
  // Bootstrap the V matrix
  if(compute_v == "bootstrap"){
    for(unsigned int k = 0; k < K; k++){
        // False here means we create the "full V" matrix
        V = cov_bootstrapper(theta, desc, objdesc, N, robust, eff, H, false);
        omega = arma::inv(diagmat(V));
        
        // The theta update in this case MUST not use Yannick's starting algorithm. Hence, the false value.
        theta = gmwm_engine(theta, desc, objdesc, model_type, wv_empir, omega, scales, false);
        
        // Optim may return a very small value. In this case, instead of saying its zero (yielding a transform issue), make it EPSILON.
        theta = code_zero(theta);
    }
  }
  
  
  std::map<std::string, int> models = count_models(desc);
  
  // Order AR1s so largest phi is first!
  if(models["AR1"] > 1  || models["GM"] > 1){
    theta = order_AR1s(theta, desc, objdesc);
  }
  
  // Obtain the theoretical WV.
  arma::mat decomp_theo = decomp_theoretical_wv(theta, desc, objdesc, scales);
  arma::vec theo = decomp_to_theo_wv(decomp_theo);
  
  // Obtain the objective value function
  arma::vec obj_value(1);
  obj_value(0) = getObjFun(theta, desc, objdesc,  model_type, omega, wv_empir, scales); 
  
  // Export calculations to R.
  arma::field<arma::mat> out(6);
  out(0) = theta;
  out(1) = guessed_theta;
  out(2) = V;
  out(3) = theo;
  out(4) = decomp_theo;
  out(5) = obj_value;
  
  return out;
                                        
}