void ContactDynamics::applySolution() { const int c = getNumContacts(); // First compute the external forces VectorXd f_n = mX.head(c); VectorXd f_d = mX.segment(c, c * mNumDir); VectorXd lambda = mX.tail(c); VectorXd forces = mN * f_n; forces.noalias() += mB * f_d; // Next, apply the external forces skeleton by skeleton. int startRow = 0; for (int i = 0; i < getNumSkels(); i++) { if (mSkels[i]->getImmobileState()) continue; int nDof = mSkels[i]->getNumDofs(); mConstrForces[i] = forces.segment(startRow, nDof); startRow += nDof; } for (int i = 0; i < c; i++) { Contact& contact = mCollisionChecker->getContact(i); contact.force.noalias() = getTangentBasisMatrix(contact.point, contact.normal) * f_d.segment(i * mNumDir, mNumDir); contact.force += contact.normal * f_n[i]; } }
double probutils::eigpower (const MatrixXd& A, VectorXd& eigvec) { // Check if A is square if (A.rows() != A.cols()) throw invalid_argument("Matrix A must be square!"); // Check if A is a scalar if (A.rows() == 1) { eigvec.setOnes(1); return A(0,0); } // Initialise working vectors VectorXd v = VectorXd::LinSpaced(A.rows(), -1, 1); VectorXd oeigvec(A.rows()); // Initialise eigenvalue and eigenvectors etc double eigval = v.norm(); double vdist = numeric_limits<double>::infinity(); eigvec = v/eigval; // Loop until eigenvector converges or we reach max iterations for (int i=0; (vdist>EIGCONTHRESH) && (i<MAXITER); ++i) { oeigvec = eigvec; v.noalias() = A * oeigvec; eigval = v.norm(); eigvec = v/eigval; vdist = (eigvec - oeigvec).norm(); } return eigval; }
void MacauOnePrior<FType>::sample_latents( Eigen::MatrixXd &U, const Eigen::SparseMatrix<double> &Ymat, double mean_value, const Eigen::MatrixXd &V, double alpha, const int num_latent) { const int N = U.cols(); const int D = U.rows(); #pragma omp parallel for schedule(dynamic, 4) for (int i = 0; i < N; i++) { const int nnz = Ymat.outerIndexPtr()[i + 1] - Ymat.outerIndexPtr()[i]; VectorXd Yhat(nnz); // precalculating Yhat and Qi int idx = 0; VectorXd Qi = lambda; for (SparseMatrix<double>::InnerIterator it(Ymat, i); it; ++it, idx++) { Qi.noalias() += alpha * V.col(it.row()).cwiseAbs2(); Yhat(idx) = mean_value + U.col(i).dot( V.col(it.row()) ); } VectorXd rnorms(num_latent); bmrandn_single(rnorms); for (int d = 0; d < D; d++) { // computing Lid const double uid = U(d, i); double Lid = lambda(d) * (mu(d) + Uhat(d, i)); idx = 0; for ( SparseMatrix<double>::InnerIterator it(Ymat, i); it; ++it, idx++) { const double vjd = V(d, it.row()); // L_id += alpha * (Y_ij - k_ijd) * v_jd Lid += alpha * (it.value() - (Yhat(idx) - uid*vjd)) * vjd; } // Now use Lid and Qid to update uid double uid_old = U(d, i); double uid_var = 1.0 / Qi(d); // sampling new u_id ~ Norm(Lid / Qid, 1/Qid) U(d, i) = Lid * uid_var + sqrt(uid_var) * rnorms(d); // updating Yhat double uid_delta = U(d, i) - uid_old; idx = 0; for (SparseMatrix<double>::InnerIterator it(Ymat, i); it; ++it, idx++) { Yhat(idx) += uid_delta * V(d, it.row()); } } } }
void ContactDynamics::updateTauStar() { int startRow = 0; for (int i = 0; i < getNumSkels(); i++) { if (mSkels[i]->getImmobileState()) continue; VectorXd tau = mSkels[i]->getExternalForces() + mSkels[i]->getInternalForces(); VectorXd tauStar = mSkels[i]->getMassMatrix() * mSkels[i]->get_dq(); tauStar.noalias() -= (mDt * (mSkels[i]->getCombinedVector() - tau)); mTauStar.segment(startRow, tauStar.rows()) = tauStar; startRow += tauStar.rows(); } }
bool Cepstrum::process(Ports<InputBuffer*>& inp, Ports<OutputBuffer*>& outp) { assert(inp.size()==1); InputBuffer* in = inp[0].data; if (in->empty()) return false; assert(outp.size()==1); OutputBuffer* out = outp[0].data; safeLogOp<double> slop; VectorXd outDct; while (!in->empty()) { Map<VectorXd> inData(in->readToken(),in->info().size); outDct.noalias() = m_dctPlan * inData.unaryExpr(slop); memcpy(out->writeToken(),outDct.data() + m_ignoreFirst, out->info().size*sizeof(double)); in->consumeToken(); } return true; }
void insertElem(VectorXd &vec, const double &elem) { int elems = vec.size(); vec.noalias() = (VectorXd(elems+1) << vec, elem).finished(); }
// Validate gradients with finite differences. void GPCMOptimization::validateGradients( GPCMOptimizable *model // Model to validate gradients for. ) { // Compute gradient. clearGradients(); double center = model->recompute(true); double centerc = 0.0; packGradients(x,g); // std::cout << x << std::endl; // Optionally compute the constraint gradient. VectorXd cg(g.rows()); if (model->hasConstraint()) { clearGradients(); centerc = model->recomputeConstraint(true); packGradients(x,cg); } // Take samples to evaluate finite differences. VectorXd pt = x; VectorXd fdg(params); VectorXd fdgc(params); for (int i = 0; i < params; i++) { // Evaluate upper and lower values. pt.noalias() = x + VectorXd::Unit(params,i)*FD_EPSILON; unpackVariables(pt); double valp = model->recompute(false); double valpc = model->recomputeConstraint(false); pt.noalias() = x - VectorXd::Unit(params,i)*FD_EPSILON; unpackVariables(pt); double valm = model->recompute(false); double valmc = model->recomputeConstraint(false); fdg(i) = 0.5*(valp-valm)/FD_EPSILON; fdgc(i) = 0.5*(valpc-valmc)/FD_EPSILON; DBPRINTLN("Computed finite difference for dimension " << i << " of " << params << ": " << fdg(i)); } // std::cout << x << std::endl; // Reset variables. unpackVariables(x); // Construct gradient names. std::vector<std::string> varname(x.rows()); for (std::vector<GPCMOptVariable>::iterator itr = variables.begin(); itr != variables.end(); itr++) { for (int i = itr->getIndex(); i < itr->getIndex()+itr->getParamCount(); i++) { varname[i] = itr->getName(); if (itr->getParamCount() > 1) varname[i] += std::string(" ") + boost::lexical_cast<std::string>(i-itr->getIndex()); } } // Print gradients. int idx; DBPRINTLN("True gradient / finite-difference gradient:"); for (int i = 0; i < params; i++) { if (model->hasConstraint()) { DBPRINTLN(std::setw(10) << g(i) << " " << std::setw(10) << fdg(i) << std::setw(10) << cg(i) << " " << std::setw(10) << fdgc(i) << std::setw(10) << "(" << x(i) << ")" << " " << varname[i]); } else { DBPRINTLN(std::setw(10) << g(i) << " " << std::setw(10) << fdg(i) << std::setw(10) << "(" << x(i) << ")" << " " << varname[i]); } } // Check objective gradient. double maxDiff = (g-fdg).array().abs().matrix().maxCoeff(&idx); if (maxDiff >= 0.1) DBWARNING("Gradients appear significantly different!"); DBPRINTLN("Max difference: " << maxDiff); DBPRINTLN("Max difference at index " << idx << ":" << std::endl << std::setw(10) << g(idx) << " " << std::setw(10) << fdg(idx) << " " << varname[idx]); if (model->hasConstraint()) { // Check constraint gradient. maxDiff = (cg-fdgc).array().abs().matrix().maxCoeff(&idx); if (maxDiff >= 0.1) DBWARNING("Constraint gradients appear significantly different!"); DBPRINTLN("Max constraint difference: " << maxDiff); DBPRINTLN("Max constraint difference at index " << idx << ":" << std::endl << std::setw(10) << cg(idx) << " " << std::setw(10) << fdgc(idx) << " " << varname[idx]); } }