示例#1
0
文件: util.cpp 项目: mnievesc/eems
double wishpdfln(const MatrixXd &X, const MatrixXd &Sigma, const double df) {
  double ldX = logdet(X);
  double ldS = logdet(Sigma);
  int n = X.rows();
  return (0.5*(-df*ldS - Sigma.selfadjointView<Lower>().llt().solve(X).trace() + 
	       (df-n-1.0)*ldX - df*n*log_2) - mvgammaln(0.5*df,n));
}
示例#2
0
文件: eems.cpp 项目: dipetkov/eems
void EEMS::initialize_diffs( ) {
  cerr << "[Diffs::initialize]" << endl;
  n_2 = (double)n/2.0; nmin1 = n-1; logn = log(n);
  J = MatrixXd::Zero(n,o);
  cvec = VectorXd::Zero(o);
  cinv = VectorXd::Zero(o);
  for ( int i = 0 ; i < n ; i ++ ) {
    J(i,graph.get_deme_of_indiv(i)) = 1;
    cvec(graph.get_deme_of_indiv(i)) += 1;
  }
  cinv = pow(cvec.array(),-1.0).matrix();  // cinv is the vector of inverse counts
  cmin1 = cvec; cmin1.array() -= 1;        // cmin1 is the vector of counts - 1
  Diffs = readMatrixXd(params.datapath + ".diffs");
  if ((Diffs.rows()!=n)||(Diffs.cols()!=n)) {
    cerr << "  Error reading dissimilarities matrix " << params.datapath + ".diffs" << endl
	 << "  Expect a " << n << "x" << n << " matrix of pairwise differences" << endl; exit(1);      
  }
  cerr << "  Loaded dissimilarities matrix from " << params.datapath + ".diffs" << endl;
  if (!isdistmat(Diffs)) {
    cerr << "  The dissimilarity matrix is not a full-rank distance matrix" << endl; exit(1);
  }
  L = MatrixXd::Constant(nmin1,n,-1.0);
  L.topRightCorner(nmin1,nmin1).setIdentity();
  JtDhatJ = MatrixXd::Zero(o,o);
  JtDobsJ = J.transpose()*Diffs*J;
  ldLLt = logdet(L*L.transpose());
  ldLDLt = logdet(-L*Diffs*L.transpose());
  ldDiQ = ldLLt - ldLDLt;
  if (cvec.maxCoeff()>1) {
    cerr << "[Diffs::initialize] Use this version only if there is at most one sample in every deme" << endl;
    exit(1);
  }
  cerr << "[Diffs::initialize] Done." << endl << endl;
}
    // Conversion from a moment Gaussian.
    canonical_gaussian_param& operator=(const moment_gaussian_param<T>& mg) {
        Eigen::LLT<mat_type>chol(mg.cov);
        if (chol.info() != Eigen::Success) {
            throw numerical_error(
                "canonical_gaussian: Cannot invert the covariance matrix. "
                "Are you passing in a non-singular moment Gaussian distribution?"
            );
        }
        mat_type sol_xy = chol.solve(mg.coef);

        std::size_t m = mg.head_size();
        std::size_t n = mg.tail_size();
        resize(m + n);

        eta.segment(0, m) = chol.solve(mg.mean);
        eta.segment(m, n).noalias() = -sol_xy.transpose() * mg.mean;

        lambda.block(0, 0, m, m) = chol.solve(mat_type::Identity(m, m));
        lambda.block(0, m, m, n) = -sol_xy;
        lambda.block(m, 0, n, m) = -sol_xy.transpose();
        lambda.block(m, m, n, n).noalias() = mg.coef.transpose() * sol_xy;

        lm = mg.lm - T(0.5) * (eta.segment(0, m).dot(mg.mean)
                               + logdet(chol) + m * std::log(two_pi<T>()));
        return *this;
    }
示例#4
0
/** An sample for taylor expansion of logdet(X). */
void taylorSample() {

  std::string ans;
  char rowChar[5];
  int rowTmp = ROW;
  sprintf(rowChar, "%d", rowTmp);
  std::string row = rowChar;
  // Initialize the matrices.
  symbolic_matrix_type X("X", ROW, COL);
  symbolic_matrix_type X0("X0", ROW, COL);
  symbolic_matrix_type Delta("(X-X0)", ROW, COL);
    
  AMD::SymbolicScalarMatlab a2("1/2!");
  AMD::SymbolicScalarMatlab a3("1/3!"); 
  SymbolicSMFunc r2(a2,ROW,COL);
  SymbolicSMFunc r3(a3,ROW, COL);

  // Initialize MatrixMatrixFunction. 
  SymbolicMMFunc fX(X, false);
  SymbolicMMFunc fX0(X0, false);
  SymbolicMMFunc fDelta(Delta, true);

  // Compute Taylor series iteratively. 
  SymbolicSMFunc f0 =  logdet(fX0);
  SymbolicSMFunc f1 = trace(fDelta * transpose(*f0.derivativeFuncVal));
  SymbolicSMFunc f2 = trace(fDelta * transpose(*f1.derivativeFuncVal));
  SymbolicSMFunc f3 = trace(fDelta * transpose(*f2.derivativeFuncVal));
  // Taylor Expansion. 
  SymbolicSMFunc func = f0 + f1 + r2*f2 + r3*f3;

  std::cout<<"The first 4 terms of Taylor Expansion for logdet(X) around X0 is:";
  std::cout << std::endl;
  std::cout << func.functionVal.getString() << std::endl;

}
示例#5
0
文件: util.cpp 项目: mnievesc/eems
// Not a general pseudowishpdfln (because L*Diff*L' at a single locus has rank 1
double pseudowishpdfln(const MatrixXd &X, const MatrixXd &Sigma, const int df) {
  int rank = 1;
  double ldX = pseudologdet(X,rank);
  double ldS = logdet(Sigma);
  int n = X.rows();
  int q = (df<n) ? df : n;
  return (0.5*(-df*ldS - Sigma.selfadjointView<Lower>().llt().solve(X).trace() +
	       (df-n-1.0)*ldX - df*n*log_2 - df*(n-q)*log_pi) - mvgammaln(0.5*df,q));
}
示例#6
0
 //----------------------------------------------------------------------
 // log posterior if R(i_,j_) is replaced by r.  performs work needed
 // for slice sampling in draw_R(i,j)
 double SepStratSampler::logp_slice_R(double r){
   set_R(r);
   fill_siginv(false);  // false means we need to compute Rinv
   const Spd & Siginv(cand_);
   double ans =  .5 * n_ * logdet(Siginv);  // positive .5
   ans +=  -.5 * traceAB(Siginv, sumsq_);
   ans += Rpri_->logp(R_);
   // skip the jacobian because it only has products of sigma^2's in it
   return ans;
 }
示例#7
0
void CParam::S5_MuSigma(CData &Data, double f_Sigma,double h_Mu) {
  vector<Matrix> X = vector<Matrix>(K);
  int *Counts = new int[K];
  for (int k =0; k < K; k++) {
    if (n_z(k+1) > 0) {
      X[k] = Matrix(n_z(k+1),n_var_independent); X[k] = 0;
      Counts[k] = 0;
    }
  }
  for (int i=1; i<=Y_aug_compact.nrows(); i++){
    int k = z_aug(i);
    X[k-1].row(++Counts[k-1]) = Y_aug_compact.row(i) -  X_bar.column(k).t();
  }
  SymmetricMatrix SqMatrix;
  for (int k=1; k<=K ; k++) {
    // propose Sigma_k_q
    double f_Sigma_tilde_k = f_Sigma + n_z(k);
    double h_k = h_Mu + n_z(k);
    SymmetricMatrix Phi_tilde_k = Phi;
    ColumnVector mu_tilde_k = mu_bar;
    if ( n_z(k) > 0) {
      mu_tilde_k = (h_Mu * mu_bar +  X_bar.column(k) * n_z(k)) / h_k;
      SqMatrix << X[k-1].t() * X[k-1];
      Phi_tilde_k += SqMatrix; //can be further optimized
      ColumnVector xbar_mubar = X_bar.column(k) - mu_bar;
      SqMatrix << (h_Mu*n_z(k)/h_k) * ( xbar_mubar * xbar_mubar.t());
      Phi_tilde_k += SqMatrix ;
    }
    LowerTriangularMatrix LPhi_tilde_k = Cholesky(Phi_tilde_k);
    LowerTriangularMatrix LSigma_k_q = rIW_w_pd_check_fn( f_Sigma_tilde_k, LPhi_tilde_k );

    // propose mu_k_q
    LowerTriangularMatrix LSigma_k_tilde = (1.0/sqrt(h_k)) * LSigma_k_q ;
    ColumnVector mu_k_q = rMVN_fn( mu_tilde_k, LSigma_k_tilde ); // Modified

    // Gibbs update
    Mu.column(k) =	mu_k_q ;
    LSIGMA[k-1] = LSigma_k_q;
    LSIGMA_i[k-1] = LSigma_k_q.i();
    SIGMA[k-1] << LSigma_k_q * LSigma_k_q.t();
    logdet_and_more(k) = -0.5*n_var* LOG_2_PI + logdet(LSIGMA_i[k-1]);

    // S = L * L.t() ;   S.i() = (L.i()).t() * L.i() ;
    Matrix Sigma_k_inv = LSIGMA_i[k-1].t() * LSIGMA_i[k-1];
    for (int i_var=1; i_var<= n_var_independent; i_var++) {
      Sigma_k_inv_ll(k,i_var) = Sigma_k_inv(i_var,i_var);
    }
  }
  delete [] Counts;
}
示例#8
0
void CParam::init_sigmamu(CData &Data) {
  mu_bar = ColumnVector(n_var_independent);
  ColumnVector log_obs_mean(n_var), log_obs_sd(n_var) ;
  Matrix logD_editpassing(n_sample-Data.n_faulty,n_var) ;
  for (int i_sample=1, count = 1; i_sample<=n_sample; i_sample++){
    if (Data.is_case(i_sample,0)) {
      logD_editpassing.row(count++) = Data.log_D_Observed.row(i_sample);
    }
  }

  for (int i_var=1; i_var<=n_var; i_var++){
    ColumnVector temp_col = logD_editpassing.column(i_var) ;
    log_obs_mean(i_var) = 1.0/(n_sample-Data.n_faulty)*(temp_col.sum()) ;
    ColumnVector temp_mean(n_sample-Data.n_faulty) ; temp_mean = log_obs_mean(i_var) ;
    Matrix SumSq = (temp_col-temp_mean).t()*(temp_col-temp_mean) ;
    log_obs_sd(i_var) = sqrt( 1.0/(n_sample-Data.n_faulty-1)*SumSq(1,1) ) ;
  }

  Data.UpdateCompactVector(mu_bar,log_obs_mean);
  DiagonalMatrix LSigma_temp(n_var_independent);
  for (int i=1; i<= n_var_independent; i++){
    LSigma_temp(i) = log_obs_sd(i);
  }
  DiagonalMatrix LSigma_temp_i = LSigma_temp.i();

  LSIGMA = vector<LowerTriangularMatrix>(K);
  LSIGMA_i = vector<LowerTriangularMatrix>(K);
  SIGMA = vector<SymmetricMatrix>(K);
  for (int k=0 ; k<K; k++) {
    LSIGMA[k] = LSigma_temp;
    LSIGMA_i[k] = LSigma_temp_i;
    SIGMA[k] << LSigma_temp * LSigma_temp; //lazy
  }
  logdet_and_more = ColumnVector(K); logdet_and_more = -0.5*n_var* LOG_2_PI + logdet(LSIGMA_i[0]);

  Mu = Matrix((n_var_independent),K);      // Note that mu_k is Mu.column(k)
  for (int k=1; k<=K; k++){
    Mu.column(k) = mu_bar ;  // starting value
  }
}
示例#9
0
void testMatrixMatrixFunc() {
  typedef AMD::MatrixMatrixFunc<AMD::SymbolicMatrixMatlab,
				AMD::SymbolicScalarMatlab> MMFunc;
  typedef AMD::ScalarMatrixFunc<AMD::SymbolicMatrixMatlab,
				AMD::SymbolicScalarMatlab> SMFunc;

  std::string ans;
  AMD::SymbolicMatrixMatlab x("X",3,3);
  MMFunc fx(x,true); // a matrix variable
  AMD::SymbolicMatrixMatlab y("Y",3,3);
  MMFunc fy(y); // a matrix variable
  SMFunc func;

  ans = "Y'";
  // d/dX trace(X*Y)=Y^T
  func = trace(fx*fy);
  assert(func.derivativeVal.getString()==ans);

  // d/dX trace(X^T*Y^T)=Y^T
  func = trace(transpose(fx)*transpose(fy));
  assert(func.derivativeVal.getString()==ans);

  // d/dX trace((X*Y)^T)=Y^T
  func = trace(transpose(fx*fy));
  assert(func.derivativeVal.getString()==ans);


  ans = "Y";
  // d/dX trace(X*Y^T) = Y
  func = trace(fx*transpose(fy));
  assert(func.derivativeVal.getString()==ans);

  // d/dX trace(Y*X^T) = Y
  func = trace(fy*transpose(fx));
  assert(func.derivativeVal.getString()==ans);

  ans = "eye(3)";
  // d/dX trace(X) = I
  func = trace(fx);
  assert(func.derivativeVal.getString()==ans);

  // d/dX trace(Y+X^T+Y) = I
  func = trace(fy+transpose(fx)+fy);
  assert(func.derivativeVal.getString()==ans);

  func = trace(fy*inv(fx));
  ans = "(((-inv(X))*Y)*inv(X))'";
  assert(func.derivativeVal.getString()==ans);

  assert(func.derivativeVal.getString()==ans);
  func = trace(fy-fx);
  ans = "(-eye(3))";
  assert(func.derivativeVal.getString()==ans);

  func = logdet(fx);
  ans = "inv(X)'";
  assert(func.derivativeVal.getString()==ans);

  func = logdet(transpose(fx));
  assert(func.derivativeVal.getString()==ans);

  func = logdet(fy+fx);
  ans = "inv(Y+X)'";
  assert(func.derivativeVal.getString()==ans);

  func = logdet(fy-fx);
  ans = "(-inv(Y-X))'";
  assert(func.derivativeVal.getString()==ans);

  func = logdet(inv(transpose(fx)));
  ans = "(-inv(X)')";
  assert(func.derivativeVal.getString()==ans);

  std::cout << "d/dX " << func.functionVal.getString() 
	    << " = " << func.derivativeVal.getString() << std::endl;

}