void pade (gf_view<refreq> &gr, gf_view<imfreq> const &gw, int n_points, double freq_offset) { // make sure the GFs have the same structure //assert(gw.shape() == gr.shape()); // copy the tail. it doesn't need to conform to the pade approximant gr.singularity() = gw.singularity(); auto sh = gw.data().shape().front_pop(); int N1 = sh[0], N2 = sh[1]; for (int n1=0; n1<N1; n1++) { for (int n2=0; n2<N2; n2++) { arrays::vector<dcomplex> z_in(n_points); // complex points arrays::vector<dcomplex> u_in(n_points); // values at these points arrays::vector<dcomplex> a(n_points); // corresponding Pade coefficients for (int i=0; i < n_points; ++i) z_in(i) = gw.mesh()[i]; for (int i=0; i < n_points; ++i) u_in(i) = gw.on_mesh(i)(n1,n2); triqs::utility::pade_approximant PA(z_in,u_in); gr() = 0.0; for (auto om : gr.mesh()) { dcomplex e = om + dcomplex(0.0,1.0)*freq_offset; gr[om](n1,n2) = PA(e); } } } }
void CParam::S2_add(Uniform &randUnif,CData &Data) { int n_needtoupdate = 0; for (int i_faulty=1; i_faulty<=Data.n_faulty; i_faulty++){ int i_original = Data.Faulty2Original[i_faulty-1]; ColumnVector item_by_bal; ColumnVector s_i = S_Mat.column(i_faulty); ColumnVector item_by_rnorm = Data.get_item_by_norm_indicator(s_i,item_by_bal); //Generate from normal distribution if ( item_by_rnorm.sum() >= 1 ) { // if no random number, other values by balanc edits remain same n_needtoupdate++; ColumnVector mu_z_i = Mu.column(z_in(i_original)) ; ColumnVector tilde_y_i = Data.log_D_Observed.row(i_original).t(); ColumnVector s_1_compact = Data.get_compact_vector(item_by_rnorm); ColumnVector Mu_1i = subvector(mu_z_i,s_1_compact); LowerTriangularMatrix LSigma_1i_i; ColumnVector y_q(n_var); double log_cond_norm_q = calculate_log_cond_norm(Data, i_original, item_by_rnorm, tilde_y_i, y_q, true, LSigma_1i_i, s_i); // MODIFIED 2015/02/16 ColumnVector y_i = (Y_in.row(i_original)).t() ; // ColumnVector y_part_i = subvector(y_i,item_by_rnorm); // Put values from balance edits ColumnVector x_q = exp_ColumnVector(y_q) ; Data.set_balance_edit_values_for_x_q(s_i, x_q, item_by_bal); // CHANGED by Hang, 2014/12/29 // double log_cond_norm_i = log_MVN_fn(y_part_i,Mu_1i,LSigma_1i_i); double log_cond_norm_i = calculate_log_cond_norm(Data, i_original, item_by_rnorm, tilde_y_i, y_q, false, LSigma_1i_i, s_i); // CHANGED 2015/01/27 , // MODIFIED 2015/02/16 // Acceptance/Rejection if (Data.PassEdits(x_q)) { // Check constraints y_q = log_ColumnVector(x_q) ; ColumnVector y_compact_q = Data.get_compact_vector(y_q); ColumnVector y_compact_i = Data.get_compact_vector(y_i); double log_full_norm_q = log_MVN_fn(y_compact_q,mu_z_i,LSIGMA_i[z_in(i_original)-1],logdet_and_more(z_in(i_original))); double log_full_norm_i = log_MVN_fn(y_compact_i,mu_z_i,LSIGMA_i[z_in(i_original)-1],logdet_and_more(z_in(i_original))); // Calculate acceptance ratio double logNum = log_full_norm_q - log_cond_norm_q; double logDen = log_full_norm_i - log_cond_norm_i; accept_rate(2) = exp( logNum - logDen ); if (randUnif.Next() < accept_rate(2)){ Y_in.row(i_original) = y_q.t(); is_accept(2)++; } } } } is_accept(2) = is_accept(2) / n_needtoupdate; }
ColumnVector CParam::GetComponentCounts() { ColumnVector n_z_in(K) ; n_z_in = 0 ; for (int i=1; i<=n_sample ; i++ ){ n_z_in(z_in(i))++; } return n_z_in; }
COMPLEX Pade_approximant::operator()(COMPLEX e) const { COMPLEX A1(0); COMPLEX A2 = a(0); COMPLEX B1(1.0), B2(1.0); int N = a.size(); for(int i=0; i<=N-2; ++i){ COMPLEX Anew = A2 + (e - z_in(i))*a(i+1)*A1; COMPLEX Bnew = B2 + (e - z_in(i))*a(i+1)*B1; A1 = A2; A2 = Anew; B1 = B2; B2 = Bnew; } return A2/B2; }
void pade(GF_Bloc_ImFreq const & Gw, GF_Bloc_ReFreq & Ge, int N_Matsubara_Frequencies, double Freq_Offset) { check_have_same_structure (Gw,Ge,false,true); assert (Gw.mesh.index_min==0); assert (Ge.mesh.index_min==0); double Beta = Gw.Beta; double omegaShift = (Gw.Statistic==Fermion ? 1 : 0); Array<COMPLEX,1> z_in(N_Matsubara_Frequencies); firstIndex i; z_in = I*Pi/Beta*(2*i+omegaShift); // Just copy the tail. It doesn't need to conform to the Pade approximant. Gw.tail = Ge.tail; int N1 = Gw.N1, N2 = Gw.N2; for (int n1=1; n1<=N1;n1++) for (int n2=1; n2<=N2;n2++){ int N = Gw.mesh.len(); Array<COMPLEX,1> u_in(N_Matsubara_Frequencies); // Values at the Matsubara frequencies Array<COMPLEX,1> a(N_Matsubara_Frequencies); // Pade coefficients for(int i=0; i < N_Matsubara_Frequencies; ++i){ u_in(i) = (i < N ? Gw.data_const(n1,n2,i) : Gw.tail.eval(z_in(i))(n1,n2)); } Pade_approximant PA(z_in,u_in); int Ne = Ge.mesh.len(); Ge.zero(); for (int i=0; i < Ne; ++i) { COMPLEX e = Ge.mesh[i] + I*Freq_Offset; Ge.data(n1,n2,i) = PA(e); } } }
void CParam::init_z_in() { z_in = ColumnVector(n_sample) ; for (int i_sample=1; i_sample<=n_sample; i_sample++) { // calculate pi_tilde_in ColumnVector logN_unnorm(K); ColumnVector y_compact = Y_in_compact.row(i_sample).t() ; for (int k=1; k<=K; k++) { ColumnVector mu_k = Mu.column(k); logN_unnorm(k) = log_MVN_fn( y_compact, mu_k, LSIGMA_i[k-1],logdet_and_more(k)); } double max_logN_unnorm = logN_unnorm.maximum(); ColumnVector pi_tilde_in_unnorm(K); pi_tilde_in_unnorm = 0.0 ; for (int k=1; k<=K; k++){ pi_tilde_in_unnorm(k) = pi(k) * exp(logN_unnorm(k)-max_logN_unnorm); } ColumnVector pi_tilde_in = (1.0/pi_tilde_in_unnorm.sum()) * pi_tilde_in_unnorm ; z_in(i_sample) = rdiscrete_fn(pi_tilde_in); } }
void CParam::S1(int iter, Uniform &randUnif, CData &Data, CFeasibilityMap &FM, int n_simul) { is_accept(1) = 0; for (int i_faulty=1; i_faulty<=Data.n_faulty; i_faulty++) { if (FM.useMap) {FM.pmm->iter = iter;} int i_original = Data.Faulty2Original[i_faulty-1]; double g_option_q, g_mode_q; int what_type_move; ColumnVector s_i = S_Mat.column(i_faulty); int tau_q = FM.EvaluateMove(i_original, Data, s_i, iter, what_type_move, g_option_q, g_mode_q, true); // calculate g_mode_i & g_option_i ColumnVector s_q = CFeasibilityMap::tau_to_s_fn(tau_q, n_var); double g_option_i, g_mode_i; FM.EvaluateMove(i_original, Data, s_q, iter, what_type_move, g_option_i, g_mode_i, false); // calculate g_s_q & g_s_i double g_s_q = g_mode_q * g_option_q; double g_s_i = g_mode_i * g_option_i; // whether drawn by item_by_rnorm and balance edits ColumnVector item_by_bal; ColumnVector item_by_rnorm; if (Data.is_case(i_original,1)) { item_by_rnorm = Data.get_item_by_norm_indicator(s_q,item_by_bal); } else { item_by_rnorm = Data.copy_non_balance_edit(s_q); } //Generate from normal distribution ColumnVector mu_z_i = Mu.column(z_in(i_original)) ; ColumnVector tilde_y_i = Data.log_D_Observed.row(i_original).t(); ColumnVector y_q(n_var); LowerTriangularMatrix LSigma_i; double log_cond_norm_q = calculate_log_cond_norm(Data, i_original, item_by_rnorm, tilde_y_i, y_q, true, LSigma_i, s_q); // MODIFIED 2015/02/16 // Put values from balance edits ColumnVector x_q = exp_ColumnVector(y_q); if (Data.is_case(i_original,1)) { Data.set_balance_edit_values_for_x_q(s_q, x_q, item_by_bal); // CHANGED by Hang, 2014/12/29 } else { Data.update_full_x_for_balance_edit(x_q); } // Acceptance/Rejection if (Data.PassEdits(x_q)) { // Check constraints ColumnVector item_i_by_rnorm; if (Data.is_case(i_original,1)) { item_i_by_rnorm = Data.get_item_by_norm_indicator(s_i,item_by_bal); } else { item_i_by_rnorm = Data.copy_non_balance_edit(s_i); } double log_cond_norm_i = calculate_log_cond_norm(Data, i_original, item_i_by_rnorm, tilde_y_i, y_q, false, LSigma_i, s_i); // MODIFIED 2015/02/16 y_q = log_ColumnVector(x_q) ; ColumnVector y_i = (Y_in.row(i_original)).t(); // Calculate acceptance ratio ColumnVector y_compact_q = Data.get_compact_vector(y_q); ColumnVector y_compact_i = Data.get_compact_vector(y_i); double log_full_norm_q = log_MVN_fn(y_compact_q,mu_z_i,LSIGMA_i[z_in(i_original)-1],logdet_and_more(z_in(i_original))); double log_full_norm_i = log_MVN_fn(y_compact_i,mu_z_i,LSIGMA_i[z_in(i_original)-1],logdet_and_more(z_in(i_original))); double log_f_y_tilde_q = 0; if (FM.useMap && Data.is_case(i_original,2)) { log_f_y_tilde_q = FM.Simulate_logUnif_case2(CFeasibilityMap::s_to_tau_fn(s_q),i_original,n_simul,randUnif); } else { log_f_y_tilde_q = Data.get_logUnif_y_tilde(s_q, CFeasibilityMap::s_to_tau_fn(s_q),i_original); } double logNum = log_full_norm_q + log_f_y_tilde_q - log_cond_norm_q - log(g_s_q); double logDen = log_full_norm_i + logUnif_y_tilde(i_faulty) - log_cond_norm_i - log(g_s_i); accept_rate(1) = exp( logNum - logDen ) ; if ( randUnif.Next() < accept_rate(1) ){ Y_in.row(i_original) = y_q.t() ; S_Mat.column(i_faulty) = s_q; logUnif_y_tilde(i_faulty) = log_f_y_tilde_q; is_accept(1)++; } } } is_accept(1) = is_accept(1) / Data.n_faulty; }
double CParam::calculate_log_cond_norm(CData &Data, int i_original, ColumnVector &item_by_rnorm, ColumnVector &tilde_y_i, ColumnVector &y_q, bool is_q, LowerTriangularMatrix &LSigma_1_i, ColumnVector &s_q) { // MODIFIED 2015/02/16 double log_cond_norm; if ( item_by_rnorm.sum() >= 1 ) { ColumnVector mu_z_i = Mu.column(z_in(i_original)); ColumnVector s_1_compact = Data.get_compact_vector(item_by_rnorm); ColumnVector Mu_1 = subvector(mu_z_i,s_1_compact); Matrix Sigma_1 = Submatrix_elem_2(SIGMA[z_in(i_original)-1],s_1_compact,s_1_compact); // ADDED 2015/01/27 ColumnVector s_q_compact = Data.get_compact_vector(s_q) ; // MODIFIED 2015/02/16 ColumnVector VectorOne = s_q_compact ; VectorOne = 1 ; // MODIFIED 2015/02/16 ColumnVector s_0_compact = VectorOne - s_q_compact ; // MODIFIED 2015/02/16 int sum_s_0_comp = s_0_compact.sum() ; LowerTriangularMatrix LSigma_cond ; ColumnVector Mu_cond ; if ( sum_s_0_comp>0 ){ ColumnVector Mu_0 = subvector(mu_z_i,s_0_compact); // (s_1_compact.sum()) vector Matrix Sigma_0 = Submatrix_elem_2(SIGMA[z_in(i_original)-1],s_0_compact,s_0_compact); Matrix Sigma_10 = Submatrix_elem_2(SIGMA[z_in(i_original)-1],s_1_compact,s_0_compact); ColumnVector y_tilde_compact = Data.get_compact_vector(tilde_y_i) ; ColumnVector y_tilde_0 = subvector(y_tilde_compact,s_0_compact) ; SymmetricMatrix Sigma_0_symm ; Sigma_0_symm << Sigma_0 ; LowerTriangularMatrix LSigma_0 = Cholesky(Sigma_0_symm) ; Mu_cond = Mu_1 + Sigma_10 * (LSigma_0.i()).t()*LSigma_0.i() * ( y_tilde_0-Mu_0 ) ; Matrix Sigma_cond = Sigma_1 - Sigma_10 * (LSigma_0.i()).t()*LSigma_0.i() * Sigma_10.t() ; SymmetricMatrix Sigma_cond_symm ; Sigma_cond_symm << Sigma_cond ; int sum_s_1_comp = s_1_compact.sum() ; DiagonalMatrix D(sum_s_1_comp) ; Matrix V(sum_s_1_comp,sum_s_1_comp) ; Jacobi(Sigma_cond_symm,D,V) ; int is_zero_exist = 0 ; for (int i_var=1; i_var<=sum_s_1_comp; i_var++){ if ( D(i_var) < 1e-9 ){ D(i_var) = 1e-9 ; is_zero_exist = 1 ; } } // for (int i_var=1; i_var<=sum_s_1_comp; i_var++) if ( is_zero_exist == 1 ){ Sigma_cond_symm << V * D * V.t() ; if ( msg_level >= 1 ) { Rprintf( " Warning: When generating y_j from conditional normal(Mu_-j,Sigma_-j), Sigma_-j is non-positive definite because of computation precision. The eigenvalues D(j,j) smaller than 1e-9 is replaced with 1e-9, and let Sigma_-j = V D V.t().\n"); } } // LSigma_cond = Cholesky(Sigma_cond_symm); // y_part = rMVN_fn(Mu_cond,LSigma_cond); // log_cond_norm = log_MVN_fn(y_part,Mu_cond,LSigma_cond) ; } else { Mu_cond = Mu_1 ; SymmetricMatrix Sigma_1_symm = Submatrix_elem(SIGMA[z_in(i_original)-1],s_1_compact); LSigma_cond = Cholesky(Sigma_1_symm) ; // SymmetricMatrix Sigma_1_symm ; Sigma_1_symm << Sigma_1 ; // LowerTriangularMatrix LSigma_1 = Cholesky_Sigma_star_symm(Sigma_1_symm); // y_part = rMVN_fn(Mu_1,LSigma_1); // log_cond_norm = log_MVN_fn(y_part,Mu_1,LSigma_1) ; } // if ( sum_s_0_comp>0 ) else ... // ADDED 2015/01/26 LowerTriangularMatrix LSigma_cond_i = LSigma_cond.i() ; // LowerTriangularMatrix LSigma_1 = Cholesky(Sigma_1); // LSigma_1_i = LSigma_1.i(); ColumnVector y_part; if (is_q) { y_part = rMVN_fn(Mu_cond,LSigma_cond); } else { ColumnVector y_i = (Y_in.row(i_original)).t(); y_part = subvector(y_i,item_by_rnorm); } log_cond_norm = log_MVN_fn(y_part,Mu_cond,LSigma_cond_i); if (is_q) { y_q = tilde_y_i; for ( int temp_j = 1,temp_count1 = 0; temp_j<=n_var; temp_j++ ){ if ( item_by_rnorm(temp_j)==1 ){ y_q(temp_j) = y_part(++temp_count1); } } } // if (is_q) } else { log_cond_norm = 0; if (is_q) { y_q = tilde_y_i;} } // if ( item_by_rnorm.sum() > = 1 ) else .. return log_cond_norm; }
int augmented_solver::CartToJnt(const JntArray& q_in, Twist& v_in, JntArray& qdot_out, JntArray& qdot_base_out) { double damping_factor = 0.01; //Let the ChainJntToJacSolver calculate the jacobian "jac" for //the current joint positions "q_in" jnt2jac.JntToJac(q_in,jac); //v_in.vel.z(0.01); //Create standard platform jacobian Eigen::Matrix<double,6,3> jac_base; jac_base.setZero(); if(base_is_actived_) { jac_base(0,0) = base_to_arm_factor_; jac_base(1,1) = base_to_arm_factor_; jac_base(5,2) = base_to_arm_factor_; } //Put full jacobian matrix together Eigen::Matrix<double, 6, Eigen::Dynamic> jac_full; jac_full.resize(6,chain.getNrOfJoints() + jac_base.cols()); jac_full << jac.data, jac_base; int num_dof = chain.getNrOfJoints() + jac_base.cols(); if(DEBUG) std::cout << "Combined jacobian:\n " << jac_full << "\n"; //Weighting Matrices Eigen::Matrix<double, Eigen::Dynamic, Eigen::Dynamic> W_v; W_v.resize(num_dof,num_dof); W_v.setZero(); for(int i=0 ; i<num_dof ; i++) W_v(i,i) = damping_factor; Eigen::Matrix<double, 6,6> W_e; W_e.setZero(); for(unsigned int i=0 ; i<6 ; i++) W_e(i,i) = 1; if(DEBUG) std::cout << "Weight matrix defined\n"; //W_e.setIdentity(6,6); //Definition of additional task Eigen::Matrix<double, 7, 1> z_in; z_in.setZero(); z_in(0,0) = 1.57; z_in(1,0) = 1.57; z_in(2,0) = 1.57; z_in(3,0) = 1.57; z_in(4,0) = 1.57; z_in(5,0) = 1.57; z_in(6,0) = 1.57; Eigen::Matrix<double, 7 , 10> jac_c; jac_c.setZero(); for(unsigned int i=0 ; i<7 ; i++) jac_c(i,i) = 1; Eigen::Matrix<double, 7, 7> W_c; W_c.setZero(); //Inversion // qdot_out = (jac_full^T * W_e * jac_full + jac_augmented^T * W_c * jac_augmented + W_v)^-1(jac_full^T * W_e * v_in + jac_augmented^T * W_c * z_in) Eigen::Matrix<double, Eigen::Dynamic, Eigen::Dynamic> damped_inversion; damped_inversion.resize(num_dof,num_dof); damped_inversion = (jac_full.transpose() * W_e * jac_full) + (jac_c.transpose() * W_c * jac_c) + W_v; if(DEBUG) std::cout << "Inversion done\n"; Eigen::Matrix<double, Eigen::Dynamic, Eigen::Dynamic> q_dot_conf_control; Eigen::Matrix<double, 6, 1> v_in_eigen; v_in_eigen.setZero(); v_in_eigen(0,0) = v_in.vel.x(); v_in_eigen(1,0) = v_in.vel.y(); v_in_eigen(2,0) = v_in.vel.z(); v_in_eigen(3,0) = v_in.rot.x(); v_in_eigen(4,0) = v_in.rot.y(); v_in_eigen(5,0) = v_in.rot.z(); q_dot_conf_control = damped_inversion.inverse() * ((jac_full.transpose() * W_e * v_in_eigen) + (jac_c.transpose() * W_c * z_in)); if(DEBUG) std::cout << "Endergebnis: \n" << q_dot_conf_control << "\n"; //Do a singular value decomposition of "jac" with maximum //iterations "maxiter", put the results in "U", "S" and "V" //jac = U*S*Vt int ret = svd.calculate(jac,U,S,V,maxiter); double sum; unsigned int i,j; // We have to calculate qdot_out = jac_pinv*v_in // Using the svd decomposition this becomes(jac_pinv=V*S_pinv*Ut): // qdot_out = V*S_pinv*Ut*v_in //first we calculate Ut*v_in for (i=0;i<jac.columns();i++) { sum = 0.0; for (j=0;j<jac.rows();j++) { sum+= U[j](i)*v_in(j); } //If the singular value is too small (<eps), don't invert it but //set the inverted singular value to zero (truncated svd) tmp(i) = sum*(fabs(S(i))<eps?0.0:1.0/S(i)); //tmp(i) = sum*1.0/S(i); } //tmp is now: tmp=S_pinv*Ut*v_in, we still have to premultiply //it with V to get qdot_out for (i=0;i<jac.columns();i++) { sum = 0.0; for (j=0;j<jac.columns();j++) { sum+=V[i](j)*tmp(j); } //Put the result in qdot_out qdot_out(i)=sum; } if(DEBUG) std::cout << "Solution SVD: " << qdot_out(0) << " " << qdot_out(1) << " " << qdot_out(2) << " " << qdot_out(3) << " " << qdot_out(4) << " " << qdot_out(5) << " " << qdot_out(6) << "\n====\n"; //return the return value of the svd decomposition //New calculation for(unsigned int i=0;i<7;i++) { qdot_out(i)=q_dot_conf_control(i,0); } if(base_is_actived_) { for(unsigned int i = 7; i<7+3; i++) qdot_base_out(i-7) = q_dot_conf_control(i,0); } if(DEBUG) std::cout << "Solution ConfControl: " << qdot_out(0) << " " << qdot_out(1) << " " << qdot_out(2) << " " << qdot_out(3) << " " << qdot_out(4) << " " << qdot_out(5) << " " << qdot_out(6) << "\n====\n"; return ret; }