Example #1
0
int LINEAR::recognize( const ColumnVector &v ) const {
  int label;
  if( v.length() < liblinear->prob.n ){
    ERR_PRINT("Dimension of feature vector is too small. %d < %d\n",v.length(),liblinear->prob.n );
    exit(1);
  }
  else if( v.length() > liblinear->prob.n )
    ERR_PRINT("Warning: Dimension of feature vector is too large. %d > %d\n",v.length(),liblinear->prob.n );

  // 特徴ベクトルをセット
  struct feature_node *x = new struct feature_node[ liblinear->prob.n + 2 ];
  int idx = 0;
  for( int i = 0; i < liblinear->prob.n; i++ ){
    x[ idx ].index = i;
    x[ idx ].value = v( i );

    if( is_scaling ){
      // 下記の条件を満たさないときはスキップ(idxを更新しない)	
      if( ( feature_max[ x[ idx ].index ] != feature_min[ x[ idx ].index ] ) && ( x[ idx ].value != 0 ) ){
	x[ idx ].value = scaling( x[ idx ].index, x[ idx ].value );
	if( liblinear->model->bias < 0 ) x[ idx ].index++; // indexが1から始まる
	++idx;
      }
    }
    else{
      // 下記の条件を満たさないときはスキップ(idxを更新しない)	
      if( x[ idx ].value != 0 ){
	if( liblinear->model->bias < 0 ) x[ idx ].index++; // indexが1から始まる
	++idx;
      }
    }
  }

  if(liblinear->model->bias>=0){
    x[ idx ].index = liblinear->prob.n;
    x[ idx ].value = liblinear->model->bias;
    idx++;
  }
  x[ idx ].index = -1;

  // predict
  if( probability ){
    if( liblinear->model->param.solver_type != L2R_LR ){
      ERR_PRINT( "probability output is only supported for logistic regression\n" );
      exit(1);
    }
    label = predict_probability(liblinear->model,x,prob_estimates);
    //     for(j=0;j<nclass;j++)
    //       printf(" %g",prob_estimates[j]);
    //     printf("\n");
  }
  else
    label = predict(liblinear->model,x);

  if( is_y_scaling )
    label = y_scaling( label );

  delete x;  
  return label;
}
/* Solve Ax = b using the Jacobi Method. */
Matrix jacobi(Matrix& A, Matrix& b)
{
    ColumnVector x0(A.rows()); // our initial guess
    ColumnVector x1(A.rows()); // our next guess
    
    // STEP 1: Choose an initial guess
    fill(x0.begin(),x0.end(),1);
    
    // STEP 2: While convergence is not reached, iterate.
    ColumnVector r = static_cast<ColumnVector>(A*x0 - b);
    while (r.length() > 1)
    {
        for (int i=0;i<A.cols();i++)
        {
            double sum = 0;
            for (int j=0;j<A.cols();j++)
            {
                if (j==i) continue;
                sum = sum + A(i,j) * x0(j,0);
                
            }            
            x1(i,0) = (b(i,0) - sum) / A(i,i);
        }
        x0 = x1;
        r = static_cast<ColumnVector>(A*x0 - b);
    }
    
    shared_ptr<Matrix> final_x(new Matrix(static_cast<Matrix>(x0)));
    return *final_x;
}
/* solve Ax=b using the Method of Steepest Descent. */
Matrix steepestDescent(Matrix& A, Matrix& b)
{
    // the Method of Steepest Descent *requires* a symmetric matrix.
    if (isSymmetric(A)==false)
    {
        shared_ptr<Matrix> nullMat(new Matrix(0,0));
        return *nullMat;
    }
    
    /* STEP 1: Start with a guess. Our guess is all ones. */
    ColumnVector x(A.cols());
    fill(x.begin(),x.end(),1);
    
    /* This is NOT an infinite loop. There's a break statement inside. */
    while(true)
    {
        /* STEP 2: Calculate the residual r_0 = b - Ax_0 */
        ColumnVector r =  static_cast<ColumnVector> (b - A*x);

        if (r.length() < .01) break;
        
        /* STEP 3: Calculate alpha */
        double alpha = (r.transpose() * r)(0,0) / (r.transpose() * A * r)(0,0);
                
        /* STEP 4: Calculate new X_1 where X_1 = X_0 + alpha*r_0 */
        x = x + alpha * r;
    }
    
    shared_ptr<Matrix> final_x(new Matrix(static_cast<Matrix>(x)));
    
    return *final_x;
}