bool TpsOrderParameter::isTransitionPath(TpsTrajectory& trajectory) { if(hA(trajectory) && hB(trajectory)) { return true; } else { return false; } }
bool TpsOrderParameter::hBorHB(TpsTrajectory& traj) { if(_use_HB) { return HB(traj); } else { return hB(traj); } }
void testDeviceVector() { const int aSize = 64; std::vector<int> hA(aSize), hB(aSize); bolt::cl::device_vector<int> dA(aSize), dB(aSize); for(int i=0; i<aSize; i++) { hA[i] = hB[i] = dB[i] = dA[i] = i; }; int hSum = std::inner_product(hA.begin(), hA.end(), hB.begin(), 1); int sum = bolt::cl::inner_product( dA.begin(), dA.end(), dB.begin(), 1, bolt::cl::plus<int>(), bolt::cl::multiplies<int>() ); };
bool TpsOrderParameter::HB(TpsTrajectory& traj) { int n = traj.getNumberOfTimeslices(); assert(n > 0); TpsTrajectory* traj_clone = traj.clone(); traj_clone->setID(-10001); traj_clone->copy(traj); for (int i=0; i<n; i++) { bool good = hB(*traj_clone); if (good) { delete traj_clone; return true; } traj_clone->popBack(); } delete traj_clone; return false; }
Vector UnconstrainedLocalSearch::conj_grad(const Vector& gk, const Matrix& Bk, const Vector& xk, const Vector& x_gcp, const IntervalVector& region, const BitSet& I) { int hn = n-I.size(); // the restricted dimension // cout << " [conj_grad] init x_gcp= " << x_gcp << endl; if (hn==0) return x_gcp; // we are in a corner: nothing to do // ====================== STEP 3.0 : Initialization ====================== Vector x=x_gcp; // next point, initialized to gcp // gradient of the quadratic model on zk1 Vector r = -gk-Bk*(x-xk); double eta = get_eta(gk,xk,region,I); // Initialization of the conjuguate gradient restricted to the direction not(I[i]) Vector hp(hn); // the restricted conjugate direction Vector hx(hn); // the restricted iterate Vector hr(hn); // the restricted gradient Vector hy(hn); // temporary vector Matrix hB(hn,hn); // the restricted hessian matrix IntervalVector hregion(hn); // the restricted region // initialization of \hat{B}: int p=0, q=0; for (int i=0; i<n; i++) { if (!I[i]) { for (int j=0; j<n; j++) { if (!I[j]) { hB[p][q] = Bk[i][j]; q++; } } p++; q=0; } } // initialization of \hat{r} and \hat{region} p=0; for (int i=0; i<n; i++) { if (!I[i]) { hregion[p] = region[i]; hr[p] = r[i]; hx[p] = x[i]; p++; } } double rho1 = 1.0; // norm of the restricted gradient at the previous iterate double rho2 = ::pow(hr.norm(),2); // norm of the restricted gradient // ====================== STEP 3.1 : Test for the required accuracy in c.g. iteration ====================== bool cond = (rho2>::pow(eta,2)); try { while (cond) { // ====================== STEP 3.2 : Conjugate gradient recurrences ====================== // Update the restricted conjugate direction // \hat{p} = \hat{r} +beta* \hat{p} hp = hr + (rho2/rho1)*hp; // Update the temporary vector // \hat{y} = \hat{Bk}*\hat{p} hy = hB*hp; // cout << " [conj_grad] current hr=" << hr << endl; // cout << " [conj_grad] current hp=" << hp << endl; LineSearch ls(hregion,hx,hp,data,sigma); double alpha1=ls.alpha_max(); // cout << " [conj_grad] alpha1=" << alpha1 << endl; // first termination condition // we check if the hessian is "positive definite for \hat{p}" // otherwise the quadratic approximation is concave // and the solution if on the border of the region double aux = hp*hy; if ( aux <= 0) { cond = false; hx = ls.endpoint(); } else { // second termination condition alpha2>alpha1 double alpha2 = rho2/aux; if (alpha2>alpha1) { cond = false; hx = ls.endpoint(); } else { // otherwise update x, r=\hat{r}, hy=y, rho1 and rho2= \hat{r}*\hat{r}=r*r hx += alpha2*hp; ls.proj(hx); hr -= alpha2*hy; rho1 = rho2; rho2 = hr*hr; cond = (rho2>(::pow(eta,2))); } } } } catch(LineSearch::NullDirectionException&) { } // update of x p=0; for (int i=0; i<n; i++) { if (!I[i]) { x[i] = hx[p]; p++; } } // cout << " [conj_grad] new x= " << x << endl; return x; }