Example #1
0
/**  
 *  Compute loss and gradient of Huber hinge loss. 
 *  CAUTION: f is passed by reference and is changed within this
 *  function. This is done for efficiency reasons, otherwise we would
 *  have had to create a new copy of f.
 *   
 *  @param loss [write] loss value computed.
 *  @param f [read/write] prediction vector. 
 *  @param l [write] partial derivative of loss function w.r.t. f
 */
void CHuberHingeLoss::LossAndGrad(double& loss, TheMatrix& f, TheMatrix& l)
{
   f.ElementWiseMult(_data->labels());
   double* yf = f.Data();
   double* Y = _data->labels().Data();
   int len = f.Length();
   loss = 0.0;
   l.Zero();

   for(int i=0; i < len; i++) 
   {
      double v = 1-yf[i];
      if(h < v)
      {
         loss += v;
         l.Set(i,-Y[i]);
      }
      else if(-h > v) {}
      else
      {
         loss += (v+h)*(v+h)/4/h;
         l.Set(i, -Y[i]*(v+h)/2/h);
      }
   }
}
Example #2
0
/**  
 *  Compute loss and partial derivative of hinge loss w.r.t f
 *   
 *  @param loss [write] loss value computed.
 *  @param f [r/w] = X*w
 *  @param l [write] partial derivative of loss w.r.t. f
 */
void CLogisticLoss::LossAndGrad(double& loss, TheMatrix& f, TheMatrix& l)
{
    l.Zero();  // for gradient computation i.e. grad := l'*X
    f.ElementWiseMult(_data->labels());
    double* f_array = f.Data();  // pointer to memory location of f (faster element access)
    int len = f.Length();	
    double exp_yf = 0.0;

    for(int i=0; i < len; i++)
    {
	if(fabs(f_array[i]) == 0.0)
        {
            loss += LN2;
            l.Set(i,-0.5);
        }
        else if (f_array[i] > 0.0)
        {
            exp_yf = exp(-f_array[i]);
            loss += log(1+exp_yf);
            l.Set(i,-exp_yf/(1+exp_yf));
        }
        else
        {
            exp_yf = exp(f_array[i]);
            loss += log(1+exp_yf) - f_array[i];
            l.Set(i,-1.0/(1+exp_yf));
        }
    }	
    l.ElementWiseMult(_data->labels());
}
Example #3
0
File: l1n1.cpp Project: funkey/bmrm
/** The subgradient is chosen as sgn(w)
 */
void CL1N1::ComputeRegAndGradient(CModel& model, double& reg, TheMatrix& grad)
{
   reg = 0;
   TheMatrix &w = model.GetW();
   w.Norm1(reg);
   grad.Zero();
   for(int i=0; i<w.Length(); i++)
   {
      double val = 0;
      w.Get(i,val);
      grad.Set(i,SML::sgn(val));
   }
}
Example #4
0
/**
 *  Compute loss and gradient of Least Absolute Deviation loss w.r.t f
 *
 *  @param loss [write] loss value computed.
 *  @param f [r/w] = X*w
 *  @param l [write] partial derivative of loss w.r.t. f
 */
void CLeastAbsDevLoss::LossAndGrad(double& loss, TheMatrix& f, TheMatrix& l)
{
    loss = 0;
    l.Zero();
    double *Y_array = _data->labels().Data();
    double* f_array = f.Data();
    int len = f.Length();
    for(int i=0; i < len; i++)
    {
        double f_minus_y = f_array[i] - Y_array[i];
        loss += fabs(f_minus_y);
        l.Set(i, SML::sgn(f_minus_y));
    }
}
Example #5
0
/**  
 *  Compute loss and gradient of novelty detection loss. 
 *  CAUTION: f is passed by reference and is changed within this
 *  function. This is done for efficiency reasons, otherwise we would
 *  have had to create a new copy of f.
 *   
 *  @param loss [write] loss value computed.
 *  @param f [read/write] prediction vector. 
 *  @param l [write] partial derivative of loss function w.r.t. f
 */
void CNoveltyLoss::LossAndGrad(double& loss, TheMatrix& f, TheMatrix& l)
{
   double* f_array = f.Data();  // pointer to memory location of f (faster element access)
   int len = f.Length();
   l.Zero();  // grad := l'*X
   
   for(int i=0; i < len; i++) 
   {
      if(rho > f_array[i])
      {
         loss += rho - f_array[i];
         l.Set(i, -1.0);
      }
   }
}
void CNDCGRankLoss::add(TheMatrix &l, int offset, int i, double value){
  Scalar temp;
  l.Get(offset + current_ideal_pi[i], temp);
  l.Set(offset + current_ideal_pi[i], temp + value);
}