void l1_regularize(const int iter) { float lambda_hat = get_eta(iter) * lambda; std::tr1::unordered_map<int, float> tmp = w; std::tr1::unordered_map<int, float>::iterator it = tmp.begin(); for (; it != tmp.end(); it++) { int key = it->first; std::tr1::unordered_map<int, float>::iterator wit = w.find(key); float aaa = wit->second; wit->second = clip_by_zero(wit->second, lambda_hat); if (fabsf(aaa) < lambda_hat) { w.erase(wit); } } };
double mindt( double * prim , double w , double r , double dr ){ double rho = prim[RHO]; double Pp = prim[PPP]; double vr = prim[VRR]; double gam = GAMMA_LAW; double cs = sqrt(fabs(gam*Pp/rho)); double eta = get_eta( prim , NULL , r ); double maxvr = cs + fabs( vr - w ); double dt = dr/maxvr; double dt_eta = dr*dr/eta; if( dt > dt_eta && USE_RT ) dt = dt_eta; return( dt ); }
Vector UnconstrainedLocalSearch::conj_grad(const Vector& gk, const Matrix& Bk, const Vector& xk, const Vector& x_gcp, const IntervalVector& region, const BitSet& I) { int hn = n-I.size(); // the restricted dimension // cout << " [conj_grad] init x_gcp= " << x_gcp << endl; if (hn==0) return x_gcp; // we are in a corner: nothing to do // ====================== STEP 3.0 : Initialization ====================== Vector x=x_gcp; // next point, initialized to gcp // gradient of the quadratic model on zk1 Vector r = -gk-Bk*(x-xk); double eta = get_eta(gk,xk,region,I); // Initialization of the conjuguate gradient restricted to the direction not(I[i]) Vector hp(hn); // the restricted conjugate direction Vector hx(hn); // the restricted iterate Vector hr(hn); // the restricted gradient Vector hy(hn); // temporary vector Matrix hB(hn,hn); // the restricted hessian matrix IntervalVector hregion(hn); // the restricted region // initialization of \hat{B}: int p=0, q=0; for (int i=0; i<n; i++) { if (!I[i]) { for (int j=0; j<n; j++) { if (!I[j]) { hB[p][q] = Bk[i][j]; q++; } } p++; q=0; } } // initialization of \hat{r} and \hat{region} p=0; for (int i=0; i<n; i++) { if (!I[i]) { hregion[p] = region[i]; hr[p] = r[i]; hx[p] = x[i]; p++; } } double rho1 = 1.0; // norm of the restricted gradient at the previous iterate double rho2 = ::pow(hr.norm(),2); // norm of the restricted gradient // ====================== STEP 3.1 : Test for the required accuracy in c.g. iteration ====================== bool cond = (rho2>::pow(eta,2)); try { while (cond) { // ====================== STEP 3.2 : Conjugate gradient recurrences ====================== // Update the restricted conjugate direction // \hat{p} = \hat{r} +beta* \hat{p} hp = hr + (rho2/rho1)*hp; // Update the temporary vector // \hat{y} = \hat{Bk}*\hat{p} hy = hB*hp; // cout << " [conj_grad] current hr=" << hr << endl; // cout << " [conj_grad] current hp=" << hp << endl; LineSearch ls(hregion,hx,hp,data,sigma); double alpha1=ls.alpha_max(); // cout << " [conj_grad] alpha1=" << alpha1 << endl; // first termination condition // we check if the hessian is "positive definite for \hat{p}" // otherwise the quadratic approximation is concave // and the solution if on the border of the region double aux = hp*hy; if ( aux <= 0) { cond = false; hx = ls.endpoint(); } else { // second termination condition alpha2>alpha1 double alpha2 = rho2/aux; if (alpha2>alpha1) { cond = false; hx = ls.endpoint(); } else { // otherwise update x, r=\hat{r}, hy=y, rho1 and rho2= \hat{r}*\hat{r}=r*r hx += alpha2*hp; ls.proj(hx); hr -= alpha2*hy; rho1 = rho2; rho2 = hr*hr; cond = (rho2>(::pow(eta,2))); } } } } catch(LineSearch::NullDirectionException&) { } // update of x p=0; for (int i=0; i<n; i++) { if (!I[i]) { x[i] = hx[p]; p++; } } // cout << " [conj_grad] new x= " << x << endl; return x; }