コード例 #1
0
ファイル: SmoTutor.cpp プロジェクト: icaoberg/murphylab139
void SmoTutor::sequentialMinimalOptimisation()
{
   int numberChanged;
   int examineAll    = 0;
   int epoch         = 1;

   do
   {
      numberChanged = 0;

      if (examineAll == 1)
      {
         for (int i = 0; i < ntp; i++)
         {
            numberChanged += examineExample(i);
         }

         examineAll = 0;
      }
      else
      {
         for (int i = 0; i < ntp; i++)
         {
            if (alpha[i] > 0 && alpha[i] < C[i])
            {
               numberChanged += examineExample(i);
            }
         }

         if (numberChanged == 0)
         {
            examineAll = 1;
         }
      }

      /*
      mexPrintf("epoch %d number of changes %d/%d\n",
                epoch++,
                numberChanged,
                nonZeroLagrangeMultipliers());
      */
   }
   while (numberChanged > 0 || examineAll);
}
コード例 #2
0
ファイル: ltiSVM.cpp プロジェクト: mvancompernolle/ai_project
  // Calls the same method of the superclass.
  bool svm::genericTrain(const dmatrix& input, const ivector& ids) {

    char buffer[80];

    if (validProgressObject()) {
      getProgressObject().reset();
      getProgressObject().setTitle("SVM: Training");
      getProgressObject().setMaxSteps(nClasses);
    }

    bias.resize(nClasses,getParameters().bias,false,true);
    trainData=new dmatrix(input);
    alpha.resize(nClasses,input.rows(),0,false,true);
    makeTargets(ids);
    errorCache.resize(input.rows());

    const parameters& param=getParameters();

    C=param.C;
    tolerance=param.tolerance;
    epsilon=param.epsilon;
    bool abort=false;

    // train one SVM for each class
    for (int cid=0; cid<nClasses && !abort; cid++) {
      int numChanged=0;
      bool examineAll=true;

      currentTarget=&target->getRow(cid);
      currentClass=cid;
      currentAlpha=&alpha.getRow(cid);

      _lti_debug("Training class " << cid << "\n");

      fillErrorCache();

      while ((numChanged > 0 || examineAll) && !abort) {
        numChanged=0;
        if (examineAll) {
          // iterate over all alphas
          for (int i=0; i<trainData->rows(); i++) {
            if (examineExample(i)) {
              numChanged++;
            }
          }
          // next turn, look only at non-bound alphas
          examineAll=false;
        } else {
          // iterate over all non-0 and non-C alphas
          int *tmpAlpha=new int[alpha.getRow(cid).size()];
          int j=0,i=0;
          for (i=0; i<alpha.getRow(cid).size(); i++) {
            if (alpha.getRow(cid).at(i) != 0.0 &&
                alpha.getRow(cid).at(i) != C) {
              tmpAlpha[j++]=i;
            }
          }
          delete[] tmpAlpha;
          for (i=0; i<j; i++) {
            if (examineExample(i)) {
              numChanged++;
            }
          }
          // next turn, examine all if we did not succeed this time
          if (numChanged == 0) {
            examineAll=true;
          }
        }
      }
      // update progress info object
      if (validProgressObject()) {
        sprintf(buffer,"numChanged=%d, error=%f",numChanged,errorSum);
        getProgressObject().step(buffer);
        abort=abort || getProgressObject().breakRequested();
      }

     // now limit the number of support vectors
      // does not work yet, so disable it
      if (0) {
        int supnum=0;
        ivector index(currentAlpha->size());
        ivector newindex(currentAlpha->size());
        dvector newkey(currentAlpha->size());
        for (int i=0; i<currentAlpha->size(); i++) {
          if (currentAlpha->at(i) > 0) {
            supnum++;
          }
          index[i]=i;
        }
        if (supnum > param.nSupport && param.nSupport > 0) {
          lti::sort2<double> sorter;
          sorter.apply(*currentAlpha,index,newkey,newindex);

          int i;
          for (i=0; i<newkey.size() &&
                 lti::abs(newkey[i]) > std::numeric_limits<double>::epsilon(); i++) {
          }
          for (int j=i; j<currentAlpha->size()-param.nSupport; j++) {
            currentAlpha->at(newindex[j])=0;
          }
          _lti_debug("Final alpha: " << *currentAlpha << std::endl);
        }
      }
    }

    defineOutputTemplate();

    _lti_debug("alpha:\n" << alpha << "\n");

    // make sure that all lagrange multipliers are larger than
    // zero, otherwise we might get into trouble later
    alpha.apply(rectify);

    if (abort) {
      setStatusString("Training aborted by user!");
    }
    return !abort;
  }
コード例 #3
0
ファイル: svm.c プロジェクト: citterio/CRCDocker
static int compute_svm_adaboost(ESupportVectorMachine *esvm,int n,int d,
				double *x[],int y[],int nmodels,int kernel,
				double kp,double C,double tol,double eps,
				int maxloops,int verbose)
{
  int i,b;
  int *samples;
  double **trx;
  int *try;
  double *prob;
  double *prob_copy;
  double sumalpha;
  double epsilon;
  int *pred;
  double *margin;
  double sumprob;
  int nclasses;
  int *classes; 

  if(nmodels<1){
    fprintf(stderr,"compute_svm_adaboost: nmodels must be greater than 0\n");
    return 1;
  }

 if(C<=0){
    fprintf(stderr,"compute_svm_adaboost: regularization parameter C must be > 0\n");
    return 1;
  }
  if(eps<=0){
    fprintf(stderr,"compute_svm_adaboost: parameter eps must be > 0\n");
    return 1;
  }
  if(tol<=0){
    fprintf(stderr,"compute_svm_adaboost: parameter tol must be > 0\n");
    return 1;
  }
  if(maxloops<=0){
    fprintf(stderr,"compute_svm_adaboost: parameter maxloops must be > 0\n");
    return 1;
  }

  switch(kernel){
  case SVM_KERNEL_LINEAR:
    break;
  case SVM_KERNEL_GAUSSIAN:
    if(kp <=0){
      fprintf(stderr,"compute_svm_adaboost: parameter kp must be > 0\n");
      return 1;
    }
    break;
  case SVM_KERNEL_POLINOMIAL:
    if(kp <=0){
      fprintf(stderr,"compute_svm_adaboost: parameter kp must be > 0\n");
      return 1;
    }
    break;
  default:
    fprintf(stderr,"compute_svm_adaboost: kernel not recognized\n");
    return 1;
  }

  nclasses=iunique(y,n, &classes);

  if(nclasses<=0){
    fprintf(stderr,"compute_svm_adaboost: iunique error\n");
    return 1;
  }
  if(nclasses==1){
    fprintf(stderr,"compute_svm_adaboost: only 1 class recognized\n");
    return 1;
  }
  if(nclasses==2)
    if(classes[0] != -1 || classes[1] != 1){
      fprintf(stderr,"compute_svm_adaboost: for binary classification classes must be -1,1\n");
      return 1;
    }
  if(nclasses>2){
    fprintf(stderr,"compute_svm_adaboost: multiclass classification not allowed\n");
    return 1;
  }

  if(!(esvm->svm=(SupportVectorMachine *)
       calloc(nmodels,sizeof(SupportVectorMachine)))){
    fprintf(stderr,"compute_svm_adaboost: out of memory\n");
    return 1;
  }

  if(!(esvm->weights=dvector(nmodels))){
    fprintf(stderr,"compute_svm_adaboost: out of memory\n");
    return 1;
  }

  if(!(trx=(double **)calloc(n,sizeof(double*)))){
    fprintf(stderr,"compute_svm_adaboost: out of memory\n");
    return 1;
  }
  if(!(try=ivector(n))){
    fprintf(stderr,"compute_svm_adaboost: out of memory\n");
    return 1;
  }
  
  if(!(prob_copy=dvector(n))){
    fprintf(stderr,"compute_svm_adaboost: out of memory\n");
    return 1;
  }
  if(!(prob=dvector(n))){
    fprintf(stderr,"compute_svm_adaboost: out of memory\n");
    return 1;
  }

  if(!(pred=ivector(n))){
    fprintf(stderr,"compute_svm_adaboost: out of memory\n");
    return 1;
  }

  for(i =0;i<n;i++)
    prob[i]=1.0/(double)n;

  esvm->nmodels=nmodels;
  sumalpha=0.0;
  for(b=0;b<nmodels;b++){

    for(i =0;i<n;i++)
      prob_copy[i]=prob[i];
    if(sample(n, prob_copy, n, &samples, TRUE,b)!=0){
      fprintf(stderr,"compute_svm_adaboost: sample error\n");
      return 1;
    }

    for(i =0;i<n;i++){
      trx[i] = x[samples[i]];
      try[i] = y[samples[i]];
    }
    
    if(compute_svm(&(esvm->svm[b]),n,d,trx,try,kernel,kp,C,
		   tol,eps,maxloops,verbose,NULL)!=0){
      fprintf(stderr,"compute_svm_adaboost: compute_svm error\n");
      return 1;
    }
    free_ivector(samples);

    epsilon=0.0;
    for(i=0;i<n;i++){
      pred[i]=predict_svm(&(esvm->svm[b]),x[i],&margin);
      if(pred[i] < -1 ){
	fprintf(stderr,"compute_svm_adaboost: predict_svm error\n");
	return 1;
      }
      if(pred[i]==0 || pred[i] != y[i])
	epsilon += prob[i];
      free_dvector(margin);
    }
    
    if(epsilon > 0 && epsilon < 0.5){
      esvm->weights[b]=0.5 *log((1.0-epsilon)/epsilon);
      sumalpha+=esvm->weights[b];
    }else{
      esvm->nmodels=b;
      break;
    }
      
    sumprob=0.0;
    for(i=0;i<n;i++){
      prob[i]=prob[i]*exp(-esvm->weights[b]*y[i]*pred[i]);
      sumprob+=prob[i];
    }

    if(sumprob <=0){
      fprintf(stderr,"compute_svm_adaboost: sumprob = 0\n");
      return 1;
    }
    for(i=0;i<n;i++)
      prob[i] /= sumprob;
    
  }
  
  if(esvm->nmodels<=0){
    fprintf(stderr,"compute_svm_adaboost: no models produced\n");
    return 1;
  }

  if(sumalpha <=0){
      fprintf(stderr,"compute_svm_adaboost: sumalpha = 0\n");
      return 1;
  }
  for(b=0;b<esvm->nmodels;b++)
    esvm->weights[b] /= sumalpha;
  
  free(trx);
  free_ivector(classes);
  free_ivector(try);
  free_ivector(pred);
  free_dvector(prob);
  free_dvector(prob_copy);
  return 0;

}





static void svm_smo(SupportVectorMachine *svm)
{
  int i,k;
  int numChanged;
  int examineAll;
  int nloops=0;


  svm->end_support_i=svm->n;

  if(svm->kernel_type==SVM_KERNEL_LINEAR){
    svm->kernel_func=dot_product_func;
    svm->learned_func=learned_func_linear;
  }

  if(svm->kernel_type==SVM_KERNEL_POLINOMIAL){
    svm->kernel_func=polinomial_kernel;
    svm->learned_func=learned_func_nonlinear;
  }

  if(svm->kernel_type==SVM_KERNEL_GAUSSIAN){
    /*
    svm->precomputed_self_dot_product=(double *)calloc(svm->n,sizeof(double));
    */
    for(i=0;i<svm->n;i++)
      svm->precomputed_self_dot_product[i] = dot_product_func(i,i,svm);
    svm->kernel_func=rbf_kernel;
    svm->learned_func=learned_func_nonlinear;
  }

  numChanged=0;
  examineAll=1;

  svm->convergence=1;
  while(svm->convergence==1 &&(numChanged>0 || examineAll)){
    numChanged=0;
    if(examineAll){
      for(k=0;k<svm->n;k++)
	numChanged += examineExample(k,svm);
    }else{
      for(k=0;k<svm->n;k++)
	if(svm->alph[k] > 0 && svm->alph[k] < svm->Cw[k])
	  numChanged += examineExample(k,svm);
    }
    if(examineAll==1)
      examineAll=0;
    else if(numChanged==0)
      examineAll=1;

    nloops+=1;
    if(nloops==svm->maxloops)
      svm->convergence=0;
    if(svm->verbose==1)
      fprintf(stdout,"%6d\b\b\b\b\b\b\b",nloops);
  }

}
コード例 #4
0
alphaRet* getAlphaFromTrainSet(int N, F2D* trn1, F2D* trn2, int iterations)
{
    float tolerance, C, eps, *b;
    F2D *a_result, *b_result;
    int NumChanged, r, ExamineAll, cnt, d, dim, ret, iter, i;
    F2D *X, *Y;
    F2D *a, *e;

    b = malloc(sizeof(float));
    alphaRet* alpha;
    alpha = (alphaRet*)malloc(sizeof(alphaRet));
    tolerance = 0.001;
    C = 0.05;
    d = -1;
    dim = 256;
    eps = 0.001;
    a_result = fSetArray(iterations, N, 0);
    b_result = fSetArray(iterations, 1, 0);
    ret = 0;
    
    X = usps_read_partial( trn1, trn2, 0, 1, (N/iterations), iterations);
    
    for(iter=0; iter<iterations; iter++)
    {
        Y = usps_read_partial( trn1, trn2, iter, 0, N/iterations, iterations);
        
        a = fSetArray(N, 1, 0);
        arrayref(b,0) = 0;                  /** check if ptr **/
        e = fSetArray(N, 1, 0);
        ExamineAll = 1;
        cnt = 0;
        NumChanged = 0;

        while(NumChanged>0 || ExamineAll == 1)
        {
            cnt = cnt + 1;
            NumChanged = 0;
            if(ExamineAll == 1)
            {
                for(i=0; i<N; i++)
                {
                    ret = examineExample(i, a, b, C, e, X, Y, tolerance, N, eps, dim);
                    NumChanged = NumChanged + ret;
                }
            }
            else
            {
                for(i=0; i<N; i++)
                {
                    if( asubsref(a,i) > 0 && asubsref(a,i) <C )
                    {
                        ret = examineExample(i, a, b, C, e, X, Y, tolerance, N, eps, dim);
                        NumChanged = NumChanged + ret;
                    }
                }
            }
            if(ExamineAll == 1)
                ExamineAll = 0;
            else if(NumChanged == 0)
                ExamineAll = 1;
        }

        for(r=0; r<N; r++)
            subsref(a_result,iter,r) = asubsref(a,r);   /** a_result has size iteration,N .. Check **/
        asubsref(b_result,iter) = arrayref(b,0);

        fFreeHandle(Y);
        fFreeHandle(e);
        fFreeHandle(a);
    }
  
    alpha->C = C;
    alpha->d = d;
    alpha->dim = dim;
    alpha->eps = eps;
    alpha->a_result = a_result;
    alpha->b_result = b_result;
    alpha->a = a;
    alpha->b = arrayref(b,0);
    alpha->X = X;
    alpha->tolerance = tolerance;
    alpha->ret;
    
    free(b);
   
    return alpha; 

}
コード例 #5
0
svmModel SMO::train()
{
    int passes = 0;
    int maxPasses = 25;
    int numChanged = 0;
    int examineAll = 1;
    alpha.resize(points.size(), 0);
    errorCache.resize(points.size(), 0);
    w.resize(points.size(), 0);
    threshold = 0;
    // Normalize the input points
    normalizeFeatures();
    // SMO outer loop:
    // Every iteration altranates between sweep through all points examineAll = 1 and sweep through non-boundary points examineAll = 0.
    while ((numChanged > 0 || examineAll) && (passes < maxPasses)) {
        numChanged = 0;
        if (examineAll) { 
            for (unsigned int i = 0; i < points.size(); i++)
            {
                if (plugin->isAborted() == true)
                {
                    plugin->progress.report("User Aborted", 0, ABORT, true);
                    return svmModel();
                }
                plugin->progress.report("Training SVM for class " + className, passes*100/maxPasses + (i+1)*100/maxPasses/points.size(), NORMAL);
                numChanged += examineExample (i);
            }
        }
        else { 
            for (unsigned int i = 0; i < points.size(); i++)
                if (alpha[i] != 0 && alpha[i] != C)
                {
                    if (plugin->isAborted() == true)
                    {
                        plugin->progress.report("User Aborted", 0, ABORT, true);
                        return svmModel();
                    }
                    plugin->progress.report("Training SVM for class " + className, passes*100/maxPasses + (i+1)*100/maxPasses/points.size(), NORMAL);
                    numChanged += examineExample (i);
                }
        }
        if (examineAll == 1)
            examineAll = 0;
        else if (numChanged == 0)
            examineAll = 1;
        /*       
        double s = 0.0;
        for (unsigned int i=0; i<points.size(); i++)
        s += alpha[i];
        double t = 0.;
        for (unsigned int i=0; i<points.size(); i++)
        for (unsigned int j=0; j<points.size(); j++)
        t += alpha[i]*alpha[j]*target[i]*target[j]*kernel(points[i],points[j]);
        double objFunc = (s - t/2.0); 
        plugin->progress.report(QString("The value of objective function should increase with each iteration.\n The value of objective function = %1").arg(objFunc).toStdString(), (passes*100)/maxPasses, NORMAL);
        */
        passes++;
    }
    plugin->progress.report("Finished training SVM for class " + className, 100, NORMAL, true);

    // Get the model for this class
    vector<double> m_alpha;
    vector<int> m_target;
    int numberOfsupportVectors = 0;
    vector<point> supportVectors;
    int attributes = points[0].size();
    for (unsigned int i = 0; i < alpha.size() ; i++)
    {
        if (alpha[i] > 0)
        {
            m_alpha.push_back(alpha[i]);
        }
    }
    for (unsigned int i = 0; i < alpha.size(); i++)
    {
        if (alpha[i] > 0)
        {
            numberOfsupportVectors++;
            supportVectors.push_back(points[i]);
            m_target.push_back(target[i]);
        }
    }
    svmModel model = svmModel(className, kernelType, threshold, attributes, w, sigma, numberOfsupportVectors, m_alpha, supportVectors, m_target, mu, stdv);

    // Compute the error rates.
    plugin->progress.report("Computing Error Rates using the model for class " + className, 0, NORMAL, true);
    double trainErrorRate = 0;
    double testErrorRate = 0;
    double crossValidationErrorRate = 0;

    for (unsigned int i = 0; i < points.size(); i++)
    {
        plugin->progress.report("Comuting Error Rates using the model for class " + className, (i+1)*60.0/points.size(), NORMAL, true);

        if (model.predict(points[i]) > 0 != target[i] > 0)
            trainErrorRate++;
    }
    trainErrorRate = trainErrorRate*100/points.size();

    for (unsigned int i = 0; i < testSet.size(); i++)
    {
        plugin->progress.report("Comuting Error Rates using the model for class " + className, 60 + (i+1)*20.0/testSet.size(), NORMAL, true);
        if (model.predict(testSet[i]) > 0 != yTest[i] > 0)
            testErrorRate++;
    }
    testErrorRate = testErrorRate*100/points.size();

    for (unsigned int i = 0; i < crossValidationSet.size(); i++)
    {
        plugin->progress.report("Comuting Error Rates using the model for class " + className, 80 + (i+1)*20/crossValidationSet.size(), NORMAL, true);
        if (model.predict(crossValidationSet[i]) > 0 != yCV[i] > 0)
            crossValidationErrorRate++;
    }
    crossValidationErrorRate = crossValidationErrorRate*100/crossValidationSet.size();

	if (testSet.size() && crossValidationSet.size())
		plugin->progress.report(QString("%1\nTrain error = %2\nCrossValidation error = %3\nTest error = %4\n").arg(className.c_str()).arg(trainErrorRate).arg(crossValidationErrorRate).arg(testErrorRate).toStdString(), 100, WARNING, true);
	else
		plugin->progress.report(QString("%1\nTrain error = %2").arg(className.c_str()).arg(trainErrorRate).toStdString(), 100, WARNING, true);

    return model;
}
コード例 #6
0
ファイル: smo_mex.c プロジェクト: lambday/svc
/* --------------------------------------------------------------
 Finds the second Lagrange multiplayer to be optimize.
-------------------------------------------------------------- */
long examineExample( long i1 )
{
   double y1, alpha1, E1, r1;
   double tmax;
   double E2, temp;
   long k, i2;
   long k0;

   y1 = target[i1];
   alpha1 = alpha[i1];

   if( alpha1 > 0 && alpha1 < C(i1) )
      E1 = error_cache[i1];
   else
      E1 = learned_func(i1) - y1;

   r1 = y1 * E1;
   if(( r1 < -tolerance && alpha1 < C(i1) )
      || (r1 > tolerance && alpha1 > 0)) {
    /* Try i2 by three ways; if successful, then immediately return 1; */

      for( i2 = (-1), tmax = 0, k = 0; k < N; k++ ) {
         if( alpha[k] > 0 && alpha[k] < C(k) ) {
            E2 = error_cache[k];
            temp = fabs(E1 - E2);
            if( temp > tmax ) {
               tmax = temp;
               i2 = k;
            }
         }
      }
      if( i2 >= 0 ) {
         if( takeStep(i1,i2) )
            return( 1 );
      }

#ifdef RANDOM
      for( k0 = rand(), k = k0; k < N + k0; k++ ) {
         i2 = k % N;
#else
      for( k = 0; k < N; k++) {
         i2 = k;
#endif
         if( alpha[i2] > 0 && alpha[i2] < C(i2) ) {
            if( takeStep(i1,i2) )
               return( 1 );
         }
      }

#ifdef RANDOM
      for( k0 = rand(), k = k0; k < N + k0; k++ ) {
         i2 = k % N;
#else
      for( k = 0; k < N; k++) {
         i2 = k;
#endif
         if( takeStep(i1,i2) )
            return( 1 );
      }

   } /* if( ... ) */

   return( 0 );
}

/* --------------------------------------------------------------
 Main SMO optimization cycle.
-------------------------------------------------------------- */
void runSMO( void )
{
   long numChanged = 0;
   long examineAll = 1;
   long k;

   while( numChanged > 0 || examineAll ) {
      numChanged = 0;

      if( examineAll ) {
         for( k = 0; k < N; k++ ) {
            numChanged += examineExample( k );
         }
      }
      else {
         for( k = 0; k < N; k++ ) {
            if( alpha[k] != 0 && alpha[k] != C(k) )
               numChanged += examineExample( k );
         }
      }

      if( examineAll == 1 )
         examineAll = 0;
      else if( numChanged == 0 )
         examineAll = 1;
   }
}

/* ==============================================================
 Main MEX function - interface to Matlab.
============================================================== */
void mexFunction( int nlhs, mxArray *plhs[],
		  int nrhs, const mxArray*prhs[] )
{
   long i,j ;
   double *labels12, *initAlpha, *nsv, *tmp, *trn_err, *margin;
   double nerr;
   double C1, C2;


   /* ---- get input arguments  ----------------------- */
   if(nrhs < 5)
      mexErrMsgTxt("Not enough input arguments.");

   /* data matrix [dim x N ] */
   if( !mxIsNumeric(prhs[0]) || !mxIsDouble(prhs[0]) ||
       mxIsEmpty(prhs[0])    || mxIsComplex(prhs[0]) )
      mexErrMsgTxt("Input X must be a real matrix.");

   /* vector of labels (1,2) */
   if( !mxIsNumeric(prhs[1]) || !mxIsDouble(prhs[1]) ||
       mxIsEmpty(prhs[1])    || mxIsComplex(prhs[1]) ||
       (mxGetN(prhs[1]) != 1 && mxGetM(prhs[1]) != 1))
      mexErrMsgTxt("Input I must be a real vector.");

   labels12 = mxGetPr(prhs[1]);    /* labels (1,2) */
   dataA = mxGetPr(prhs[0]);  /* pointer at patterns */
   dataB = dataA;
   dim = mxGetM(prhs[0]);     /* data dimension */
   N = mxGetN(prhs[0]);       /* number of data */

   /* kernel identifier */
   ker = kernel_id( prhs[2] );
   if( ker == -1 )
     mexErrMsgTxt("Improper kernel identifier.");

   /*  get pointer to arguments  */
   arg1 = mxGetPr(prhs[3]);

   /*  one or two real trade-off constant(s)  */
   if( !mxIsNumeric(prhs[4]) || !mxIsDouble(prhs[4]) ||
       mxIsEmpty(prhs[4])    || mxIsComplex(prhs[4]) ||
       (mxGetN(prhs[4]) != 1  && mxGetM(prhs[4]) != 1 ))
      mexErrMsgTxt("Improper input argument C.");
   else {
      /* allocate memory for constant C */
      if( (const_C = mxCalloc(N, sizeof(double) )) == NULL) {
        mexErrMsgTxt("Not enough memory.");
      }

      if( MAX( mxGetN(prhs[4]), mxGetM(prhs[4])) == 1 ) {
        C1 = mxGetScalar(prhs[4]);
        for( i=0; i < N; i++ ) const_C[i] = C1;
      } else
      if( MAX( mxGetN(prhs[4]), mxGetM(prhs[4])) == 2 ) {
         tmp = mxGetPr(prhs[4]);
         C1 = tmp[0];
         C2 = tmp[1];
         for( i=0; i < N; i++ ) {
           if( labels12[i]==1) const_C[i] = C1; else const_C[i] = C2;
         }
      } else
      if( MAX( mxGetN(prhs[4]), mxGetM(prhs[4])) == N ) {
         tmp = mxGetPr(prhs[4]);
         for( i=0; i < N; i++ ) const_C[i] = tmp[i];
      } else {
        mexErrMsgTxt("Improper argument C.");
      }
   }

   /* real parameter eps */
   if( nrhs >= 6 ) {
      if( !mxIsNumeric(prhs[5]) || !mxIsDouble(prhs[5]) ||
         mxIsEmpty(prhs[5])    || mxIsComplex(prhs[5]) ||
         mxGetN(prhs[5]) != 1  || mxGetM(prhs[5]) != 1 )
         mexErrMsgTxt("Input eps must be a scalar.");
      else
         eps = mxGetScalar(prhs[5]);   /* take eps argument */
   }

   /* real parameter tol */
   if(nrhs >= 7) {
      if( !mxIsNumeric(prhs[6]) || !mxIsDouble(prhs[6]) ||
         mxIsEmpty(prhs[6])    || mxIsComplex(prhs[6]) ||
         mxGetN(prhs[6]) != 1  || mxGetM(prhs[6]) != 1 )
         mexErrMsgTxt("Input tol must be a scalar.");
      else
         tolerance = mxGetScalar(prhs[6]);  /* take tolerance argument */
   }

   /* real vector of Lagrangeian multipliers */
   if(nrhs >= 8) {
      if( !mxIsNumeric(prhs[7]) || !mxIsDouble(prhs[7]) ||
          mxIsEmpty(prhs[7])    || mxIsComplex(prhs[7]) ||
          (mxGetN(prhs[7]) != 1  && mxGetM(prhs[7]) != 1 ))
          mexErrMsgTxt("Input Alpha must be a vector.");
   }

   /* real scalar - bias */
   if( nrhs >= 9 ) {
      if( !mxIsNumeric(prhs[8]) || !mxIsDouble(prhs[8]) ||
         mxIsEmpty(prhs[8])    || mxIsComplex(prhs[8]) ||
         mxGetN(prhs[8]) != 1  || mxGetM(prhs[8]) != 1 )
         mexErrMsgTxt("Input bias must be a scalar.");
   }

   /* ---- init variables ------------------------------- */

   ker_cnt = 0;

   /* allocate memory for targets (labels) (1,-1) */
   if( (target = mxCalloc(N, sizeof(double) )) == NULL) {
      mexErrMsgTxt("Not enough memory.");
   }

   /* transform labels12 (1,2) from to targets (1,-1) */
   for( i = 0; i < N; i++ ) {
      target[i] = - labels12[i]*2 + 3;
   }

   /* create output variable for bias */
   plhs[1] = mxCreateDoubleMatrix(1,1,mxREAL);
   b = mxGetPr(plhs[1]);

   /* take init value of bias if given */
   if( nrhs >= 9 ) {
      *b = -mxGetScalar(prhs[8]);
   }

   /* allocate memory for error_cache */
   if( (error_cache = mxCalloc(N, sizeof(double) )) == NULL) {
      mexErrMsgTxt("Not enough memory for error cache.");
   }

   /* create vector for Lagrangeians */
   plhs[0] = mxCreateDoubleMatrix(N,1,mxREAL);
   alpha = mxGetPr(plhs[0]);

   /* if Lagrangeians given then use them as initial values */
   if( nrhs >= 8 ) {
      initAlpha = mxGetPr(prhs[7]);
      for( i = 0; i < N; i++ ) {
         alpha[i] = initAlpha[i];
      }

      /* Init error cache for non-bound multipliers. */
      for( i = 0; i < N; i++ ) {
         if( alpha[i] != 0 && alpha[i] != C(i) ) {
            error_cache[i] = learned_func(i) - target[i];
         }
      }
   }

   /* ---- run SMO ------------------------------------------- */
   runSMO();

   /* ---- outputs  --------------------------------- */
   if( nlhs >= 3 ) {

      /* count number of support vectors */
      plhs[2] = mxCreateDoubleMatrix(1,1,mxREAL);
      nsv = mxGetPr(plhs[2]);
      *nsv = 0;

      for( i = 0; i < N; i++ ) {
         if( alpha[i] > ZERO_LIM ) (*nsv)++; else alpha[i] = 0;
      }
   }

   if( nlhs >= 4 ) {
     plhs[3] = mxCreateDoubleMatrix(1,1,mxREAL);
     (*mxGetPr(plhs[3])) = (double)ker_cnt;
   }

   if( nlhs >= 5) {

     /* evaluates classification error on traning patterns */
     plhs[4] = mxCreateDoubleMatrix(1,1,mxREAL);
     trn_err = mxGetPr(plhs[4]);
     nerr = 0;

     for( i = 0; i < N; i++ ) {
        if( target[i] == 1 ) {
           if( learned_func(i) < 0 ) nerr++;
        }
        else
           if( learned_func(i) >= 0 ) nerr++;
     }

     *trn_err = nerr/N;
   }

   if( nlhs >= 6) {

      /* compute margin */
      plhs[5] = mxCreateDoubleMatrix(1,1,mxREAL);
      margin = mxGetPr(plhs[5]);
      *margin = 0;
      for( i = 0; i < N; i++ ) {
        for( j = 0; j < N; j++ ) {
           if( alpha[i] > 0 && alpha[j] > 0 )
              *margin += alpha[i]*alpha[j]*target[i]*target[j]*kernel(i,j);
        }
      }

      *margin = 1/sqrt(*margin);
   }

   /* decision function of type <w,x>+b is used */
   *b = -*b;

   /* ----- free memory --------------------------------------- */
   mxFree( error_cache );
   mxFree( target );
}
コード例 #7
0
ファイル: smo1d_mex.c プロジェクト: Aura-zx/sharat-cbcl
/* --------------------------------------------------------------
 Finds the second Lagrange multiplayer to be optimize.
-------------------------------------------------------------- */
long examineExample( long i1 )
{
   double y1, alpha1, E1, r1;
   double tmax;
   double E2, temp;
   long k, i2;
   long k0;

   y1 = target[i1];
   alpha1 = alpha[i1];

   E1 = w*data[i1] - *b - y1;

   r1 = y1 * E1;
   if(( r1 < -tolerance && alpha1 < C )
      || (r1 > tolerance && alpha1 > 0)) {
    /* Try i2 by three ways; if successful, then immediately return 1; */

      for( i2 = (-1), tmax = 0, k = 0; k < num_data; k++ ) {
         if( alpha[k] > 0 && alpha[k] < C ) {
            E2 = w*data[k] - *b - target[k];

            temp = fabs(E1 - E2);
            if( temp > tmax ) {
               tmax = temp;
               i2 = k;
            }
         }
      }
      if( i2 >= 0 ) {
         if( takeStep(i1,i2) )
            return( 1 );
      }

#ifdef RANDOM
      for( k0 = rand(), k = k0; k < num_data + k0; k++ ) {
         i2 = k % num_data;
#else
      for( k = 0; k < num_data; k++) {
         i2 = k;
#endif
         if( alpha[i2] > 0 && alpha[i2] < C ) {
            if( takeStep(i1,i2) )
               return( 1 );
         }
      }

#ifdef RANDOM
      for( k0 = rand(), k = k0; k < num_data + k0; k++ ) {
         i2 = k % num_data;
#else
      for( k = 0; k < num_data; k++) {
         i2 = k;
#endif
         if( takeStep(i1,i2) )
            return( 1 );
      }

   } /* if( ... ) */

   return( 0 );
}

/* --------------------------------------------------------------
 Main SMO optimization cycle.
-------------------------------------------------------------- */
void runSMO( void )
{
   long numChanged = 0;
   long examineAll = 1;
   long k;

   while( numChanged > 0 || examineAll ) {
      numChanged = 0;

      if( examineAll ) {
         for( k = 0; k < num_data; k++ ) {
            numChanged += examineExample( k );
         }
      }
      else {
         for( k = 0; k < num_data; k++ ) {
            if( alpha[k] != 0 && alpha[k] != C )
               numChanged += examineExample( k );
         }
      }

      if( examineAll == 1 )
         examineAll = 0;
      else if( numChanged == 0 )
         examineAll = 1;
   }
}

/* ==============================================================
 Main MEX function - interface to Matlab.
============================================================== */
void mexFunction( int nlhs, mxArray *plhs[],
		  int nrhs, const mxArray*prhs[] )
{
   long i,j ;
   double *labels12, *nsv, *trn_err, *margin;
   double nerr;

   /* ---- check number of input arguments  ------------- */

   if(nrhs != 5 )
      mexErrMsgTxt("Incorrect number of input arguments.");
   if(nlhs < 2)
      mexErrMsgTxt("Not enough output arguments.");


   /* ---- get input arguments  ----------------------- */
   labels12 = mxGetPr(prhs[1]);    /* labels (1,2) */
   data = mxGetPr(prhs[0]);  /* pointer at data */
   dim = mxGetM(prhs[0]);     /* data dimension */
   num_data = mxGetN(prhs[0]);       /* number of data */
   C = mxGetScalar( prhs[2] );
   eps = mxGetScalar( prhs[3] );
   tolerance = mxGetScalar( prhs[4] );

   /* ---- init variables ------------------------------- */   
   ker_cnt=0;      /* num of dot product evaluations  */

   /* allocate memory for targets (labels) (1,-1) */
   if( (target = (double*)mxCalloc(num_data, sizeof(double) )) == NULL) {
      mexErrMsgTxt("Not enough memory.");
   }

   /* transform labels12 (1,2) from to targets (1,-1) */
   for( i = 0; i < num_data; i++ ) {
      target[i] = - labels12[i]*2 + 3;
   }

   /* create output variable for bias */
   plhs[1] = mxCreateDoubleMatrix(1,1,mxREAL);
   b = mxGetPr(plhs[1]); *b= 0;

   /* create vector for Lagrangeians */
   plhs[0] = mxCreateDoubleMatrix(num_data,1,mxREAL);
   alpha = mxGetPr(plhs[0]);

   /* inicialize alpha  */
   for( i = 0; i < num_data; i++ ) {
     alpha[i] = 0;
   }
   w=0;

   /* ---- run SMO ------------------------------------------- */
   runSMO();


   /* ---- outputs ---------------------------------- */
   if( nlhs >= 3 ) {

      /* count number of support vectors */
      plhs[2] = mxCreateDoubleMatrix(1,1,mxREAL);
      nsv = mxGetPr(plhs[2]);
      *nsv = 0;

      for( i = 0; i < num_data; i++ ) {
         if( alpha[i] > 0) (*nsv)++; 
      }
   }

   if( nlhs >= 4 ) {
     /* number of used iterations */
     plhs[3] = mxCreateDoubleMatrix(1,1,mxREAL);
     (*mxGetPr(plhs[3])) = (double)ker_cnt;
   }

   if( nlhs >= 5) {

     /* evaluates classification error on traning patterns */
     plhs[4] = mxCreateDoubleMatrix(1,1,mxREAL);
     trn_err = mxGetPr(plhs[4]);
     *trn_err = 0;

     for( i = 0; i < num_data; i++ ) {
       if( target[i]*(w*data[i]-*b) < 0) (*trn_err)++;
     }

     *trn_err = (*trn_err)/(double)num_data;
   }

   if( nlhs >= 6) {
     
      /* compute margin */
      plhs[5] = mxCreateDoubleMatrix(1,1,mxREAL);
      margin = mxGetPr(plhs[5]);
      *margin = 1/sqrt(w*w);
   }

   /* decision function of type <w,x>+b is used */
   *b = -*b;

   /* ----- free memory --------------------------------------- */
   mxFree( target );
}