/// Renormalize the weights void SvmSgd::renorm() { if (wDivisor != 1.0) { w.scale(1.0 / wDivisor); wDivisor = 1.0; } }
/// Renormalize the weights void SvmSag::renorm() { if (wb != 0) w.combine(wa, g, wb); else if (wa != 1) w.scale(wa); wa = 1; wb = 0; }
/// Renormalize the weights void SvmAisgd::renorm() { if (wDivisor != 1.0 || aDivisor != 1.0 || wFraction != 0) { a.combine(1/aDivisor, w, wFraction/aDivisor); w.scale(1/wDivisor); wDivisor = aDivisor = 1; wFraction = 0; } }
void SvmSgd::train(int imin, int imax, const xvec_t &xp, const yvec_t &yp, const char *prefix) { cout << prefix << "Training on [" << imin << ", " << imax << "]." << endl; assert(imin <= imax); count = skip; for (int i=imin; i<=imax; i++) { const SVector &x = xp.at(i); double y = yp.at(i); double wx = dot(w,x); double z = y * (wx + bias); double eta = 1.0 / (lambda * t); #if LOSS < LOGLOSS if (z < 1) #endif { double etd = eta * dloss(z); w.add(x, etd * y); #if BIAS #if REGULARIZEBIAS bias *= 1 - eta * lambda * bscale; #endif bias += etd * y * bscale; #endif } if (--count <= 0) { double r = 1 - eta * lambda * skip; if (r < 0.8) r = pow(1 - eta * lambda, skip); w.scale(r); count = skip; } t += 1; } cout << prefix << setprecision(6) << "Norm: " << dot(w,w) << ", Bias: " << bias << endl; }
void SvmSgd::train(int imin, int imax, const xvec_t &xp, const yvec_t &yp, const char *prefix) { cout << prefix << "Training on [" << imin << ", " << imax << "]." << endl; assert(imin <= imax); for (int i=imin; i<=imax; i++) { double eta = 1.0 / (lambda * t); double s = 1 - eta * lambda; wscale *= s; if (wscale < 1e-9) { w.scale(wscale); wscale = 1; } const SVector &x = xp.at(i); double y = yp.at(i); double wx = dot(w,x) * wscale; double z = y * (wx + bias); #if LOSS < LOGLOSS if (z < 1) #endif { double etd = eta * dloss(z); w.add(x, etd * y / wscale); #if BIAS // Slower rate on the bias because // it learns at each iteration. bias += etd * y * 0.01; #endif } t += 1; } double wnorm = dot(w,w) * wscale * wscale; cout << prefix << setprecision(6) << "Norm: " << wnorm << ", Bias: " << bias << endl; }