Пример #1
0
double test6() {
	string t0[] = {"101", 
 "011", 
 "101", 
 "010"};
	vector <string> p0(t0, t0+sizeof(t0)/sizeof(string));
	TheMatrix * obj = new TheMatrix();
	clock_t start = clock();
	int my_answer = obj->MaxArea(p0);
	clock_t end = clock();
	delete obj;
	cout <<"Time: " <<(double)(end-start)/CLOCKS_PER_SEC <<" seconds" <<endl;
	int p1 = 8;
	cout <<"Desired answer: " <<endl;
	cout <<"\t" << p1 <<endl;
	cout <<"Your answer: " <<endl;
	cout <<"\t" << my_answer <<endl;
	if (p1 != my_answer) {
		cout <<"DOESN'T MATCH!!!!" <<endl <<endl;
		return -1;
	}
	else {
		cout <<"Match :-)" <<endl <<endl;
		return (double)(end-start)/CLOCKS_PER_SEC;
	}
}
Пример #2
0
/** The subgradient is chosen as sgn(w)
 */
void CL1N1::ComputeRegAndGradient(CModel& model, double& reg, TheMatrix& grad)
{
   reg = 0;
   TheMatrix &w = model.GetW();
   w.Norm1(reg);
   grad.Zero();
   for(int i=0; i<w.Length(); i++)
   {
      double val = 0;
      w.Get(i,val);
      grad.Set(i,SML::sgn(val));
   }
}
Пример #3
0
/**  
 *  Compute loss and partial derivative of hinge loss w.r.t f
 *   
 *  @param loss [write] loss value computed.
 *  @param f [r/w] = X*w
 *  @param l [write] partial derivative of loss w.r.t. f
 */
void CLogisticLoss::LossAndGrad(double& loss, TheMatrix& f, TheMatrix& l)
{
    l.Zero();  // for gradient computation i.e. grad := l'*X
    f.ElementWiseMult(_data->labels());
    double* f_array = f.Data();  // pointer to memory location of f (faster element access)
    int len = f.Length();	
    double exp_yf = 0.0;

    for(int i=0; i < len; i++)
    {
	if(fabs(f_array[i]) == 0.0)
        {
            loss += LN2;
            l.Set(i,-0.5);
        }
        else if (f_array[i] > 0.0)
        {
            exp_yf = exp(-f_array[i]);
            loss += log(1+exp_yf);
            l.Set(i,-exp_yf/(1+exp_yf));
        }
        else
        {
            exp_yf = exp(f_array[i]);
            loss += log(1+exp_yf) - f_array[i];
            l.Set(i,-1.0/(1+exp_yf));
        }
    }	
    l.ElementWiseMult(_data->labels());
}
Пример #4
0
/**
 *  Compute loss and gradient of Least Absolute Deviation loss w.r.t f
 *
 *  @param loss [write] loss value computed.
 *  @param f [r/w] = X*w
 *  @param l [write] partial derivative of loss w.r.t. f
 */
void CLeastAbsDevLoss::LossAndGrad(double& loss, TheMatrix& f, TheMatrix& l)
{
    loss = 0;
    l.Zero();
    double *Y_array = _data->labels().Data();
    double* f_array = f.Data();
    int len = f.Length();
    for(int i=0; i < len; i++)
    {
        double f_minus_y = f_array[i] - Y_array[i];
        loss += fabs(f_minus_y);
        l.Set(i, SML::sgn(f_minus_y));
    }
}
Пример #5
0
/**  
 *  Compute loss and gradient of novelty detection loss. 
 *  CAUTION: f is passed by reference and is changed within this
 *  function. This is done for efficiency reasons, otherwise we would
 *  have had to create a new copy of f.
 *   
 *  @param loss [write] loss value computed.
 *  @param f [read/write] prediction vector. 
 *  @param l [write] partial derivative of loss function w.r.t. f
 */
void CNoveltyLoss::LossAndGrad(double& loss, TheMatrix& f, TheMatrix& l)
{
   double* f_array = f.Data();  // pointer to memory location of f (faster element access)
   int len = f.Length();
   l.Zero();  // grad := l'*X
   
   for(int i=0; i < len; i++) 
   {
      if(rho > f_array[i])
      {
         loss += rho - f_array[i];
         l.Set(i, -1.0);
      }
   }
}
Пример #6
0
/**  
 *  Compute NDCGRank loss. CAUTION: f is passed by reference and is
 *  changed within this function. This is done for efficiency reasons,
 *  otherwise we would have had to create a new copy of f. 
 *   
 *  @param loss [write] loss value computed.
 *  @param f [read/write] prediction vector. 
 */
void CNDCGRankLoss::Loss(Scalar& loss, TheMatrix& f)
{
  // chteo: here we make use of the subset information 
        
  loss = 0.0;	
  Scalar* f_array = f.Data();  
  for(int q=0; q < _data->NumOfSubset(); q++)
    {
      int offset = _data->subset[q].startIndex;
      int subsetsize = _data->subset[q].size;
      current_ideal_pi = sort_vectors[q];
      vector<double> b = bs[q];

      //compute_coefficients(offset, subsetsize, y_array, current_ideal_pi, a, b);
      
      /* find the best permutation */
      find_permutation(subsetsize, offset, a, b, c, f_array, pi);
      
      /* compute the loss */
      double value;
      delta(subsetsize, a, b, pi, value);
      
      loss += value;
      
      for (int i=0;i<subsetsize;i++){
	loss = loss + c[i]*(get(f_array, offset, pi[i]) - get(f_array, offset, i));
      }
      //free(c);
      //free(a);
      //free(b);
      //free(pi);
      
    }

}
Пример #7
0
/**  
 *  Compute hinge loss. CAUTION: f is passed by reference and is
 *  changed within this function. This is done for efficiency reasons,
 *  otherwise we would have had to create a new copy of f. 
 *   
 *  @param loss [write] loss value computed.
 *  @param f [read/write] prediction vector. 
 */
void CLogisticLoss::Loss(double& loss, TheMatrix& f)
{
	loss = 0;
	f.ElementWiseMult(_data->labels());  // f = y*f
	double* f_array = f.Data();  // pointer to memory location of f (faster element access)
	int len = f.Length();
	for(int i=0; i < len; i++)
    {
		if(fabs(f_array[i]) == 0.0)
            loss += LN2;
        else if (f_array[i] > 0.0)
            loss += log(1+exp(-f_array[i]));
        else
            loss += log(1+exp(f_array[i])) - f_array[i];
    }
}
Пример #8
0
void CL2N2::ComputeRegAndGradient(CModel& model, double& reg, TheMatrix& grad)
{
   reg = 0;
   TheMatrix &w = model.GetW();
   w.Norm2(reg);
   reg = 0.5*reg*reg;
   grad.Assign(w); 	
}
Пример #9
0
/**  
 *  Compute loss and partial derivative of NDCGRank loss w.r.t f
 *   
 *  @param loss [write] loss value computed.
 *  @param f [r/w] = X*w
 *  @param l [write] partial derivative of loss w.r.t. f
 */
void CNDCGRankLoss::LossAndGrad(Scalar& loss, TheMatrix& f, TheMatrix& l)
{
  // chteo: here we make use of the subset information 
        
  loss = 0.0;	
  l.Zero();  
  Scalar* f_array = f.Data();  
  for(int q=0; q < _data->NumOfSubset(); q++)
    {
      //cout << "q = "<< q <<endl;
      int offset = _data->subset[q].startIndex;
      int subsetsize = _data->subset[q].size;
      current_ideal_pi = sort_vectors[q];
      vector<double> b = bs[q];

      //compute_coefficients(offset, subsetsize, y_array, current_ideal_pi, a, b);
      
      //cout << "before finding permutation\n";
      /* find the best permutation */
      find_permutation(subsetsize, offset, a, b, c, f_array, pi);
      //cout << "after finding permutation\n";

      //cout << "before finding delta\n";
      /* compute the loss */
      double value;
      delta(subsetsize, a, b, pi, value);
      //cout << "before finding delta\n";

      loss += value;
      
      for (int i=0;i<subsetsize;i++){
	loss = loss + c[i]*(get(f_array, offset, pi[i]) - get(f_array, offset, i));
      }
      
      for (int i=0;i<subsetsize;i++){
	//add(l, offset, i, c[pi[i]] - c[i]);
	add(l, offset, i, - c[i]);
	add(l, offset, pi[i], c[i]);
      }
    }
  

}
Пример #10
0
/** Flag = 0: marginloss, no label loss. The label loss will always be zero
           1: marginloss, and label loss.
*/
void CSMMMulticlassLoss::ComputeLoss(vector<unsigned int> y, vector<unsigned int> ylabel, vector<unsigned int> ybar, vector<unsigned int> ybarlabel, const CSeqMulticlassFeature::seqfeature_struct &x, const TheMatrix &w, double & marginloss, double & labelloss, int flag)
{
    unsigned int i;
    double w_dot_phi1 = 0;
    double w_dot_phi2 = 0;
    marginloss = 0;

    unsigned int start;
    if(is_first_phi1_used)
	start = 0;
    else
	start = 1;
    for(i=start; i < ybar.size(); i++)
    {
       _data->TensorPhi1(x.phi_1[ybar[i]],ybarlabel[i],0,tphi_1);
       //tphi_1->Print();
       w.Dot(*(tphi_1), w_dot_phi1);
       marginloss += w_dot_phi1;
       //printf("%d(%d):%2.4f\t",ybar[i],ybarlabel[i],marginloss);
    }	
    for(i=1;i<ybar.size();i++)
    {
       int vb = 0;
       _data->TensorPhi2(x.phi_2[ybar[i-1]][ybar[i]-ybar[i-1]-1], ybarlabel[i-1], ybarlabel[i], 0,vb,tphi_2);
       w.Dot(*(tphi_2), w_dot_phi2);
       marginloss += w_dot_phi2;
    }
    
    if(ybar.size() > 0)
    {
       
       //grad.Add(*(X[i].phi_2[ybar[ybar.size()-1]][X[i].len-1 - ybar[ybar.size()-1]-1]));////       
       _data->TensorPhi2(x.phi_2[ybar[ybar.size()-1]][x.len - ybar[ybar.size()-1]-1 ], ybarlabel[ybar.size()-1], 0, 0,0,tphi_2);
       w.Dot(*(tphi_2), w_dot_phi2);
       marginloss += w_dot_phi2;
    }
    
    //vector <unsigned int> yss = Boundry2StatSequence(y,ylabel,x.len);
    //vector <unsigned int> ybarss = Boundry2StatSequence(ybar,ybarlabel,x.len);
    //labelloss = Labelloss(yss,ybarss);
    labelloss = AllDelta(ybar,y,ybarlabel,ylabel,x.len);
}
Пример #11
0
// BEGIN KAWIGIEDIT TESTING
// Generated by KawigiEdit 2.1.4 (beta) modified by pivanof
bool KawigiEdit_RunTest(int testNum, vector <string> p0, bool hasAnswer, int p1) {
    cout << "Test " << testNum << ": [" << "{";
    for (int i = 0; int(p0.size()) > i; ++i) {
        if (i > 0) {
            cout << ",";
        }
        cout << "\"" << p0[i] << "\"";
    }
    cout << "}";
    cout << "]" << endl;
    TheMatrix *obj;
    int answer;
    obj = new TheMatrix();
    clock_t startTime = clock();
    answer = obj->MaxArea(p0);
    clock_t endTime = clock();
    delete obj;
    bool res;
    res = true;
    cout << "Time: " << double(endTime - startTime) / CLOCKS_PER_SEC << " seconds" << endl;
    if (hasAnswer) {
        cout << "Desired answer:" << endl;
        cout << "\t" << p1 << endl;
    }
    cout << "Your answer:" << endl;
    cout << "\t" << answer << endl;
    if (hasAnswer) {
        res = answer == p1;
    }
    if (!res) {
        cout << "DOESN'T MATCH!!!!" << endl;
    } else if (double(endTime - startTime) / CLOCKS_PER_SEC >= 2) {
        cout << "FAIL the timeout" << endl;
        res = false;
    } else if (hasAnswer) {
        cout << "Match :-)" << endl;
    } else {
        cout << "OK, but is it right?" << endl;
    }
    cout << "" << endl;
    return res;
}
Пример #12
0
void CBMRM::DisplayAfterTrainingInfo(unsigned int iter, double finalExactObjVal, 
                                      double approxObjVal, double loss, 
                                      TheMatrix& w_best, CTimer& lossAndGradientTime,
                                      CTimer& innerSolverTime, CTimer& totalTime)
{
   // legends
   if(verbosity >= 1) 
   {
      printf("\n[Legends]\n");
      if(verbosity > 1)
         printf("pobj: primal objective function value"
                "\naobj: approximate objective function value\n");

      printf("gam: gamma (approximation error) "
             "\neps: lower bound on gam "
             "\nloss: loss function value "
             "\nreg: regularizer value\n");
   }
   
   double norm1 = 0, norm2 = 0, norminf = 0;
   w_best.Norm1(norm1);
   w_best.Norm2(norm2);
   w_best.NormInf(norminf);
   
   printf("\nNote: the final w is the w_t where J(w_t) is the smallest.\n");
   printf("No. of iterations:  %d\n",iter);
   printf("Primal obj. val.: %.6e\n",finalExactObjVal);
   printf("Approx obj. val.: %.6e\n",approxObjVal);
   printf("Primal - Approx.: %.6e\n",finalExactObjVal-approxObjVal);
   printf("Loss:             %.6e\n",loss);
   printf("|w|_1:            %.6e\n",norm1);
   printf("|w|_2:            %.6e\n",norm2);
   printf("|w|_oo:           %.6e\n",norminf);
   
   
   // display timing profile
   printf("\nCPU seconds in:\n");
   printf("1. loss and gradient: %8.2f\n", lossAndGradientTime.CPUTotal());
   printf("2. solver:            %8.2f\n", innerSolverTime.CPUTotal()); 
   printf("               Total: %8.2f\n", totalTime.CPUTotal());
   printf("Wall-clock total:     %8.2f\n", totalTime.WallclockTotal());
}
Пример #13
0
/**  
 *  Compute loss and gradient of Huber hinge loss. 
 *  CAUTION: f is passed by reference and is changed within this
 *  function. This is done for efficiency reasons, otherwise we would
 *  have had to create a new copy of f.
 *   
 *  @param loss [write] loss value computed.
 *  @param f [read/write] prediction vector. 
 *  @param l [write] partial derivative of loss function w.r.t. f
 */
void CHuberHingeLoss::LossAndGrad(double& loss, TheMatrix& f, TheMatrix& l)
{
   f.ElementWiseMult(_data->labels());
   double* yf = f.Data();
   double* Y = _data->labels().Data();
   int len = f.Length();
   loss = 0.0;
   l.Zero();

   for(int i=0; i < len; i++) 
   {
      double v = 1-yf[i];
      if(h < v)
      {
         loss += v;
         l.Set(i,-Y[i]);
      }
      else if(-h > v) {}
      else
      {
         loss += (v+h)*(v+h)/4/h;
         l.Set(i, -Y[i]*(v+h)/2/h);
      }
   }
}
Пример #14
0
void CGenericLoss::ComputeLossAndGradient(double& loss, TheMatrix& grad)
{
  loss = 0;
  grad.Zero();
  TheMatrix &w = _model->GetW();
  double* dat = w.Data();
  double* raw_g = grad.Data();

  {
    double* resy;
    double* resybar;

    map<int,int> ybar;

    resy = new double [data->dim()];
    resybar = new double [data->dim()];

    minimize(data->nodeFeatures, &(data->nodeLabels), data->edgeFeatures, dat, dat + data->nNodeFeatures, ybar, data->nNodeFeatures, data->nEdgeFeatures, data->lossPositive, data->lossNegative, data->indexEdge, NULL, 1, data->firstOrderResponses);

    Phi(data->nodeFeatures, &(data->nodeLabels), data->edgeFeatures, data->nNodeFeatures, data->nEdgeFeatures, resy,    resy    + data->nNodeFeatures, data->indexEdge);
    Phi(data->nodeFeatures, &ybar,               data->edgeFeatures, data->nNodeFeatures, data->nEdgeFeatures, resybar, resybar + data->nNodeFeatures, data->indexEdge);
    
    loss += LabelLoss(data->nodeLabels, ybar, data->lossPositive, data->lossNegative, LOSS);

    for (int j = 0; j < (int) data->dim(); j ++)
    {
      loss += dat[j]*(resybar[j]-resy[j]);
      raw_g[j] += (1.0/data->N)*(resybar[j]-resy[j]);
    }

    delete [] resy;
    delete [] resybar;
  }

  loss = loss/data->N;
}
Пример #15
0
/**   Compute loss and gradient
 */
void CSMMMulticlassLoss::ComputeLossAndGradient(double& loss, TheMatrix& grad)
{
   iterNum ++;
   TheMatrix &w = _model->GetW();
   loss = 0;
   grad.Zero();
   TheMatrix g(grad, SML::DENSE);
   
   const vector<CSeqMulticlassLabel::seqlabel_struct> &Y = _data->labels();
   const vector<CSeqMulticlassFeature::seqfeature_struct> &X = _data->features();
   
   unsigned int trainExNum = 0;
   vector <int > cvmark = _data->Getcvmark();	
   for(unsigned int i=0; i < m; i++)
   {
      if(cvmark.size()!=0)			
      {
         if(cvmark[i]!=SMM::TRAIN_DATA)
            continue;
      }
      trainExNum ++;
      
      //if(cvmark)
      vector<unsigned int> ybar(X[i].len,0);
      vector<unsigned int> ybarlabel(X[i].len,0);
      double labelloss = 0;
      double marginloss = 0;
      double w_dot_g = 0.0;;
      
      // find best label y' and return the score wrt to y'
      if(verbosity>=2)
      {
         cout <<"ex:"<< i<< endl;fflush(stdout);
      }
      
      if(is_single_action_persequence)
         find_best_label_grammer(Y[i].pos,Y[i].type, X[i], w, ybar, ybarlabel, marginloss, labelloss, 0, _data->getNumOfClass());
      else
         find_best_label(Y[i].pos,Y[i].type, X[i], w, ybar, ybarlabel, marginloss, labelloss, 0, _data->getNumOfClass());
      
      double labelloss_y = 0;
      double marginloss_y = 0;
      double labelloss_ybar = 0;
      double marginloss_ybar = 0;
      
      
      ComputeLoss(Y[i].pos,Y[i].type,ybar,ybarlabel,X[i],w,marginloss_ybar,labelloss_ybar,1);
      if(lossw[0]!=0)
         labelloss+=lossw[0];
      
      if(lastDuration>0)
      {
         marginloss = marginloss_ybar;
         labelloss = labelloss_ybar;
      }
      if(verbosity>=3)
      {					
         ComputeLoss(Y[i].pos,Y[i].type,Y[i].pos,Y[i].type,X[i],w,marginloss_y,labelloss_y,1);
         printf("dp------marginloss:%2.4f---labelloss:%2.4f------\n",marginloss,labelloss);	
         printf("ybar----marginloss:%2.4f---labelloss:%2.4f------\n",marginloss_ybar,labelloss_ybar);
         printf("y-------marginloss:%2.4f---labelloss:%2.4f------\n",marginloss_y,labelloss_y);			
         if(abs(labelloss_ybar-labelloss)>1e-5)
         {
            printf("labelloss doesn't match!\n");
            //exit(0);
         }
         if(abs(marginloss_ybar-marginloss)>1e-5)
         {
            printf("marginloss_ybar_dp:%2.4f != marginloss_ybar_computeLoss:%2.4f\n",marginloss,marginloss_ybar);
            printf("marginloss doesn't match!\n");
         }
      }
      
      // construct the gradient vector for the part of true y
      const vector<unsigned int> &y = Y[i].pos;
      const vector<unsigned int> &ylabel = Y[i].type;
      g.Zero();
      
      for(unsigned int j=0; j < y.size(); j++)
      {
         //g.Add(*(X[i].phi_1[y[j]]));
         //g.Add(*(X[i].phi_2[y[j-1]][y[j]-y[j-1]-1]));
         _data->TensorPhi1(X[i].phi_1[y[j]],ylabel[j],0,tphi_1);
         g.Add(*tphi_1);
         if(j > 0)
         {
            _data->TensorPhi2(X[i].phi_2[y[j-1]][y[j]-y[j-1]-1], ylabel[j-1], ylabel[j], 0,0,tphi_2);
            g.Add(*tphi_2);			
         }
      }
      if(y.size() > 0)
      {
         //g.Add(*(X[i].phi_2[y[y.size()-1]][X[i].len-1 - y[y.size()-1]-1]));////
         _data->TensorPhi2(X[i].phi_2[y[y.size()-1]][X[i].len - y[y.size()-1]-1 ], ylabel[y.size()-1], 0,0,0,tphi_2);
         g.Add(*tphi_2);
      }
      
      // for predicted y'
      for(unsigned int j=0; j < ybar.size(); j++)
      {  
         //grad.Add(*(X[i].phi_1[ybar[j]]));                         
         //grad.Add(*(X[i].phi_2[ybar[j-1]][ybar[j]-ybar[j-1]-1]));
         _data->TensorPhi1(X[i].phi_1[ybar[j]],ybarlabel[j],0,tphi_1);
         grad.Add(*tphi_1);
         if(j>0)			
         {
            _data->TensorPhi2(X[i].phi_2[ybar[j-1]][ybar[j]-ybar[j-1]-1], ybarlabel[j-1], ybarlabel[j], 0,0,tphi_2);
            grad.Add(*tphi_2); ////			
         }
      }
      if(ybar.size() > 0)
      {
         //grad.Add(*(X[i].phi_2[ybar[ybar.size()-1]][X[i].len-1 - ybar[ybar.size()-1]-1]));
         _data->TensorPhi2(X[i].phi_2[ybar[ybar.size()-1]][X[i].len - ybar[ybar.size()-1]-1 ], ybarlabel[ybar.size()-1], 0, 0,0,tphi_2);
         grad.Add(*tphi_2);
      }
      grad.Minus(g);
      
      
      // accumulate the loss
      w.Dot(g, w_dot_g);	
      loss = loss - w_dot_g + marginloss + labelloss;    
      
   }
   scalingFactor = 1.0/trainExNum;
   grad.Scale(scalingFactor);	
   loss *= scalingFactor;        
   
   if(verbosity)
   {
      double gnorm = 0.0;
      grad.Norm2(gnorm);
      cout << "gradient norm=" << gnorm << endl;
   }
   //Evaluate(_model);
}
Пример #16
0
void CNDCGRankLoss::add(TheMatrix &l, int offset, int i, double value){
  Scalar temp;
  l.Get(offset + current_ideal_pi[i], temp);
  l.Set(offset + current_ideal_pi[i], temp + value);
}
Пример #17
0
/** find best label with a grammer(with label loss): g(w) := max_y' <w,\phi(x,y')> + Delta(y', y)
 *
 *  @param x [read] sequence
 *  @param y [read] actual label for x
 *  @param w [read] weight vector
 *  @param ybar [write] found best label
 *  @param marginloss [write] margin loss <w,\Phi(x,y')> w.r.t to best y'
 *  @param labelloss [write] label loss \Delta(y',y) w.r.t. to best y'
 *
 */
void CSMMMulticlassLoss::find_best_label_grammer(const vector<unsigned int> &y,const vector<unsigned int> &ylabel, const CSeqMulticlassFeature::seqfeature_struct &x, const TheMatrix &w, vector<unsigned int> &ybar,vector<unsigned int> &ybarlabel, double &marginloss, double &labelloss, unsigned int personid, unsigned int classNum)
{
    // reset return values
    marginloss = 0;
    labelloss = 0;
    ybar.clear();
    ybarlabel.clear();
    
    /** The margin value vector used in dynamic programming
     */
    
    vector< vector<double> > M (x.len+1,vector<double> (classNum,0));
    
    /** The label loss value vector used in dynamic programming
     */
    vector< vector<double> > L (x.len+1,vector<double> (classNum,0));
    
    /** The back pointers vector used in dynamic programming to retrieve the optimal path
     */
    // The positions
    vector< vector<int> > A (x.len+1,vector<int> (classNum,-1));
    // The class labels
    vector< vector<int> > C (x.len+1,vector<int> (classNum,0));
    
    
    double maxval = -SML::INFTY;
    double w_dot_phi1 = 0;
    double w_dot_phi2 = 0;
    double marginval = 0;
    double labelval = 0;
    unsigned int right = 0;
    unsigned int left = 0;
    unsigned int start = 0;
    unsigned int end = 0;
    unsigned int classID = 0;
    unsigned int classIDPrev = 0;
    
    double sum = 0;
    
    // compute DP statistics for positions 1 to len-1
//         L[0] += y.size()-2;
//         A[1] = 0;
    for(classID=0;classID<classNum;classID++)
    {
	A[1][classID] = 0;
	//C[1][classID] = 0;
    }
    
    //debug
    
    
    //printf("x.len:%d",x.len);
    if(is_first_phi1_used)
    {
	right =0;
	for(classID=0;classID<classNum;classID++)
	{
	    maxval = -SML::INFTY;
	    w_dot_phi1 = 0.0;
	    _data->TensorPhi1(x.phi_1[right],classID,0,tphi_1);
	    //tphi_1->Print();
	    w.Dot(*(tphi_1), w_dot_phi1);
	    marginval = w_dot_phi1;   					
	    sum = marginval;
	    if(sum > maxval)
	    {
		M[right][classID] = marginval;			
		maxval = sum;
	    }
	}
    }
    
    for(right=1; right < x.len+1; right++)        
    {
	
	for(classID=0;classID<classNum;classID++)
	{		
	    // \Phi = (phi1, phi2[left,right])
	    // <w, \Phi> = <w,phi1> + <w,phi[left,right]>                
	    maxval = -SML::INFTY;
	    w_dot_phi1 = 0.0;
	    
	    //w.Dot(*(x.phi_1[right]), w_dot_phi1);
	    //printf("pos:%d,classid:%d ",right,classID);fflush(stdout);
	    //x.phi_1[right]->Print();
	    if(right<x.len)
	    {
		_data->TensorPhi1(x.phi_1[right],classID,0,tphi_1);
		//tphi_1->Print();
		w.Dot(*(tphi_1), w_dot_phi1);
	    }		
	    
	    start = max(0,int(right-maxDuration));
	    //end = right;//-minDuration+1;
	    
	    if(lastDuration>0)
	    {			
		unsigned int lastpos = x.len-lastDuration+1 ;
		end = MIN(right,lastpos);
	    }
	    else
		end = right;
	    for(left=start; left < end; left++)
	    {
		classIDPrev = classID;
		labelval = PartialDelta(left,right,y,ylabel,classIDPrev,x.len);
		assert( (labelval<=x.len) && (labelval>=0) );
		int vb = 0;
		_data->TensorPhi2(x.phi_2[left][right-left-1], classIDPrev, classID, 0,vb,tphi_2);
		w.Dot(*(tphi_2), w_dot_phi2); 
		marginval = w_dot_phi1 + w_dot_phi2;   
		sum = M[left][classIDPrev]+marginval + L[left][classIDPrev]+labelval;
		if(sum > maxval)
		{
		    A[right][classID] = left;
		    C[right][classID] = classIDPrev;
		    M[right][classID] = M[left][classIDPrev] + marginval;
		    L[right][classID] = L[left][classIDPrev] + labelval;
		    maxval = sum;
		}
		
		
	    }
	    
	}
    }
    
    // get optimal path (i.e. segmentation)        
    unsigned int pos,prepos,classid,preclassid;
    
    int maxclassid = 0;
    maxval = -SML::INFTY;
    for(unsigned int i=0;i<classNum;i++)
    {
	sum = M[x.len][i] + L[x.len][i];
	if(sum>maxval)
	{
	    maxval = sum;
	    maxclassid = i;
	}
    }
    
    pos = A[x.len][maxclassid];
    classid = C[x.len][maxclassid];
    
    if(lastDuration>0)
    {	
	pos = x.len-lastDuration;
	classid = 0;
    }
    ybar.push_back(pos);
    ybarlabel.push_back(classid);

    prepos = pos;
    preclassid = classid;
    
    
    while(A[pos][classid] >= 0)
    {                
	pos = A[prepos][preclassid];
	classid = C[prepos][preclassid];
	ybar.push_back(pos);//positions
	ybarlabel.push_back(classid);//class labels		
	
	//printf("%d(%d):%2.4f ",pos,classid,L[pos][classid]);fflush(stdout);
	prepos = pos;
	preclassid = classid;
    }
    
    
    marginloss = M[x.len][maxclassid];
    labelloss = L[x.len][maxclassid];
    //printf("finished back track\n labelloss:%3.4f,marginloss:%3.4f\n",labelloss,marginloss);fflush(stdout);
    reverse(ybar.begin(), ybar.end());
    reverse(ybarlabel.begin(), ybarlabel.end());
    
    //printf("reversed\n");fflush(stdout);
    unsigned int i;
    if(verbosity>=2)
    {
	printf("y:   ");
	for(i=0;i<y.size();i++)
	{
	    printf("%d(%d) ",y[i],ylabel[i]);
	}
	fflush(stdout);
	printf("\nybar:");
	for(i=0;i<ybar.size();i++)
	{
	    printf("%d(%d) ",ybar[i],ybarlabel[i]);
	}
	fflush(stdout);
	printf("\nmargin:%f, loss:%f, totalloss:%f\n",marginloss,labelloss,marginloss+labelloss);
    }
}
Пример #18
0
/** find best label (without label loss): g(w) := max_y' <w,\phi(x,y')>
 *
 *  @param x [read] sequence
 *  @param w [read] weight vector
 *  @param ybar [write] found best label
 *  @param marginloss [write] margin loss <w,\Phi(x,y')> w.r.t to best y'
 */
void CSMMMulticlassLoss::find_best_label_grammer(const CSeqMulticlassFeature::seqfeature_struct &x, const TheMatrix &w, vector<unsigned int> &ybar, vector<unsigned int> &ybarlabel, double &marginloss, unsigned int personid, unsigned int classNum)
{
    using namespace std;
    
    // reset return values
    marginloss = 0;        
    ybar.clear();
    ybarlabel.clear();
    
    /** The margin value vector used in dynamic programming
     */	
    vector< vector<double> > M (x.len+1,vector<double> (classNum,0));
    
    /** The back pointers vector used in dynamic programming to retrieve the optimal path
     */
    // The positions
    vector< vector<int> > A (x.len+1,vector<int> (classNum,-1));
    // The class labels
    vector< vector<int> > C (x.len+1,vector<int> (classNum,0));
    
    
    double maxval = -SML::INFTY;
    double w_dot_phi1 = 0;
    double w_dot_phi2 = 0;
    double marginval = 0;
    unsigned int right = 0;
    unsigned int left = 0;
    unsigned int start = 0;
    unsigned int end = 0;
    unsigned int classID = 0;
    unsigned int classIDPrev = 0;
    
    double sum = 0;
    
    // compute DP statistics for positions 1 to len-1
    for(classID=0;classID<classNum;classID++)
    {
	A[1][classID] = 0;
	//C[1][classID] = 0;
    }
    
    if(is_first_phi1_used)
    {
	right =0;
	for(classID=0;classID<classNum;classID++)
	{
	    maxval = -SML::INFTY;
	    w_dot_phi1 = 0.0;
	    _data->TensorPhi1(x.phi_1[right],classID,0,tphi_1);
	    //tphi_1->Print();
	    w.Dot(*(tphi_1), w_dot_phi1);
	    marginval = w_dot_phi1;   					
	    sum = marginval;
	    if(sum > maxval)
	    {
		M[right][classID] = marginval;			
		maxval = sum;
	    }
	}
    }
    for(right=1; right < x.len+1; right++)
    {
	for(classID=0;classID<classNum;classID++)
	{		
	    // \Phi = (phi1, phi2[left,right])
	    // <w, \Phi> = <w,phi1> + <w,phi[left,right]>                
	    maxval = -SML::INFTY;
	    w_dot_phi1 = 0.0;
	    
	    if(right<x.len)
	    {
		_data->TensorPhi1(x.phi_1[right],classID,0,tphi_1);
		w.Dot(*(tphi_1), w_dot_phi1);
	    }		

	    start = max(0,int(right-maxDuration));
	    //end = right;//-minDuration+1;
	    if(lastDuration>0)
	    {			
		unsigned int lastpos = x.len-lastDuration+1 ;
		end = MIN(right,lastpos);
	    }
	    else
		end = right;
	    for(left=start; left < end; left++)
	    {
		classIDPrev = classID;
		int vb = 0;
		_data->TensorPhi2(x.phi_2[left][right-left-1], classIDPrev, classID, 0,vb,tphi_2);
		w.Dot(*(tphi_2), w_dot_phi2); 
		marginval = w_dot_phi1 + w_dot_phi2;   
		sum = M[left][classIDPrev]+marginval;
		if(sum > maxval)
		{
		    A[right][classID] = left;
		    C[right][classID] = classIDPrev;
		    M[right][classID] = M[left][classIDPrev] + marginval;						
		    maxval = sum;
		}
	    }
	    
	}
    }
        
    // get optimal path (i.e. segmentation)        
    unsigned int pos,prepos,classid,preclassid;
    int maxclassid = 0;
    maxval = -SML::INFTY;
    for(unsigned int i=0;i<classNum;i++)
    {
	sum = M[x.len][i];
	if(sum>maxval)
	{
	    maxval = sum;
	    maxclassid = i;
	}
    }
    
    pos = A[x.len][maxclassid];
    classid = C[x.len][maxclassid];
    
    if(lastDuration>0)
    {	
	pos = x.len-lastDuration;
	classid = 0;
    }
    ybar.push_back(pos);
    ybarlabel.push_back(classid);
	
    prepos = pos;
    preclassid = classid;
    while(A[pos][classid] >= 0)
    {                
	pos = A[prepos][preclassid];
	classid = C[prepos][preclassid];
	ybar.push_back(pos);//positions
	ybarlabel.push_back(classid);//class labels		
	prepos = pos;
	preclassid = classid;
    }
        
    marginloss = M[x.len][maxclassid];
    reverse(ybar.begin(), ybar.end());
    reverse(ybarlabel.begin(), ybarlabel.end());
}
Пример #19
0
int main(int argc, char* argv[])
{

    if (argc<4) {
        printf("usage: %s foundkey bitpos framecount (framecount2 burst2)\n", argv[0]);
        return -1;
    }

    unsigned framecount = 0;
    uint64_t stop;
    sscanf(argv[1],"%lux",&stop);
    int pos;
    sscanf(argv[2],"%i",&pos);
    Bidirectional back;
    TheMatrix tm;
    back.doPrintCand(false);
    sscanf(argv[3],"%i",&framecount);

    uint64_t stop_val = Bidirectional::ReverseBits(stop);
    printf("#### Found potential key (bits: %i)####\n", pos);
    stop_val = back.Forwards(stop_val, 100, NULL);
    back.ClockBack( stop_val, 101+pos );
    uint64_t tst;
    unsigned char bytes[16];
    char out[115];
    out[114]='\0';
    int x = 0;
    printf("Framecount is %i\n", framecount);

    unsigned framecount2 = -1;
    if (argc>=6) {
	if (strlen(argv[5]) != 114) {
		fprintf(stderr, "burst2 must be a 114 digit bitstring\n");
		exit(1);
	}
        sscanf(argv[4],"%i",&framecount2);
    }

    while (back.PopCandidate(tst)) {
        uint64_t orig = tm.CountUnmix(tst, framecount);
        orig = tm.KeyUnmix(orig);
        printf("KC(%i): ", x);
        for(int i=7; i>=0; i--) {
            printf("%02x ",(unsigned)(orig>>(8*i))&0xff);
        }
        x++;

        if (framecount2>=0) {
            uint64_t mix = tm.KeyMix(orig);
            mix = tm.CountMix(mix,framecount2);
            mix = back.Forwards(mix, 101, NULL);
            back.Forwards(mix, 114, bytes);
            int ok = 0;
            for (int bit=0;bit<114;bit++) {
                int byte = bit / 8;
                int b = bit & 0x7;
                int v = bytes[byte] & (1<<(7-b));
                char check = v ? '1' : '0';
                if (check==argv[5][bit]) ok++;
            }
            if (ok>104) {
                printf(" *** MATCHED ***");
            } else {
                printf(" mismatch");
            }
        }

        printf("\n");

#if 0
        uint64_t mixed = back.Forwards(tst, 101, NULL);
        back.Forwards(mixed, 114, bytes);
        for (int bit=0;bit<114;bit++) {
            int byte = bit / 8;
            int b = bit & 0x7;
            int v = bytes[byte] & (1<<(7-b));
            out[bit] = v ? '1' : '0';
        }
        printf("cipher %s\n", out);
#endif
    }
}