Esempio n. 1
0
void precondition(SparseMatrixTypeT const & A,
                  std::vector< std::map<SizeT, NumericT> > & output,
                  ilut_tag const & tag)
{
  typedef std::map<SizeT, NumericT>                             SparseVector;
  typedef typename SparseVector::iterator                       SparseVectorIterator;
  typedef typename std::map<SizeT, NumericT>::const_iterator    OutputRowConstIterator;
  typedef std::multimap<NumericT, std::pair<SizeT, NumericT> >  TemporarySortMap;

  assert(viennacl::traits::size1(A) == output.size() && bool("Output matrix size mismatch") );

  SparseVector w;
  TemporarySortMap temp_map;

  for (SizeT i=0; i<viennacl::traits::size1(A); ++i)  // Line 1
  {
/*    if (i%10 == 0)
  std::cout << i << std::endl;*/

    //line 2: set up w
    NumericT row_norm = setup_w(A, i, w);
    NumericT tau_i = static_cast<NumericT>(tag.get_drop_tolerance()) * row_norm;

    //line 3:
    for (SparseVectorIterator w_k = w.begin(); w_k != w.end(); ++w_k)
    {
      SizeT k = w_k->first;
      if (k >= i)
        break;

      //line 4:
      NumericT a_kk = output[k][k];
      if (a_kk <= 0 && a_kk >= 0) // a_kk == 0
      {
        std::cerr << "ViennaCL: FATAL ERROR in ILUT(): Diagonal entry is zero in row " << k
                  << " while processing line " << i << "!" << std::endl;
        throw "ILUT zero diagonal!";
      }

      NumericT w_k_entry = w_k->second / a_kk;
      w_k->second = w_k_entry;

      //line 5: (dropping rule to w_k)
      if ( std::fabs(w_k_entry) > tau_i)
      {
        //line 7:
        for (OutputRowConstIterator u_k = output[k].begin(); u_k != output[k].end(); ++u_k)
        {
          if (u_k->first > k)
            w[u_k->first] -= w_k_entry * u_k->second;
        }
      }
      //else
      //  w.erase(k);

    } //for w_k

    //Line 10: Apply a dropping rule to w
    //Sort entries which are kept
    temp_map.clear();
    for (SparseVectorIterator w_k = w.begin(); w_k != w.end(); ++w_k)
    {
      SizeT k = w_k->first;
      NumericT w_k_entry = w_k->second;

      NumericT abs_w_k = std::fabs(w_k_entry);
      if ( (abs_w_k > tau_i) || (k == i) )//do not drop diagonal element!
      {

        if (abs_w_k <= 0) // this can only happen for diagonal entry
          throw "Triangular factor in ILUT singular!";

        temp_map.insert(std::make_pair(abs_w_k, std::make_pair(k, w_k_entry)));
      }
    }

    //Lines 10-12: write the largest p values to L and U
    SizeT written_L = 0;
    SizeT written_U = 0;
    for (typename TemporarySortMap::reverse_iterator iter = temp_map.rbegin(); iter != temp_map.rend(); ++iter)
    {
      std::map<SizeT, NumericT> & row_i = output[i];
      SizeT j = (iter->second).first;
      NumericT w_j_entry = (iter->second).second;

      if (j < i) // Line 11: entry for L
      {
        if (written_L < tag.get_entries_per_row())
        {
          row_i[j] = w_j_entry;
          ++written_L;
        }
      }
      else if (j == i)  // Diagonal entry is always kept
      {
        row_i[j] = w_j_entry;
      }
      else //Line 12: entry for U
      {
        if (written_U < tag.get_entries_per_row())
        {
          row_i[j] = w_j_entry;
          ++written_U;
        }
      }
    }

    w.clear(); //Line 13

  } //for i
}
Esempio n. 2
0
void GPHIKRawClassifier::classify ( const NICE::SparseVector * _xstar,
                                 uint & _result,
                                 SparseVector & _scores
                               ) const
{
  if ( ! this->b_isTrained )
     fthrow(Exception, "Classifier not trained yet -- aborting!" );
  _scores.clear();


    // classification with quantization of test inputs
    if ( this->q != NULL )
    {
        uint maxClassNo = 0;
        for ( std::map< uint, double * >::const_iterator itT = this->precomputedT.begin() ;
              itT != this->precomputedT.end();
              itT++
            )
        {
          uint classno = itT->first;
          maxClassNo   = std::max ( maxClassNo, classno );
          double beta  = 0;
          double *T    = itT->second;

          for (SparseVector::const_iterator i = _xstar->begin(); i != _xstar->end(); i++ )
          {
            uint dim  = i->first;
            double v  = i->second;
            uint qBin = this->q->quantize( v, dim );

            beta += T[dim * this->q->getNumberOfBins() + qBin];
          }//for-loop over dimensions of test input

          _scores[ classno ] = beta;

        }//for-loop over 1-vs-all models
    }
    // classification with exact test inputs, i.e., no quantization involved
    else
    {
        uint maxClassNo = 0;
        for ( std::map<uint, PrecomputedType>::const_iterator i = this->precomputedA.begin() ; i != this->precomputedA.end(); i++ )
        {
          uint classno = i->first;
          maxClassNo   = std::max ( maxClassNo, classno );
          double beta  = 0;
          GMHIKernelRaw::sparseVectorElement **dataMatrix = this->gm->getDataMatrix();

          const PrecomputedType & A = i->second;
          std::map<uint, PrecomputedType>::const_iterator j = this->precomputedB.find ( classno );
          const PrecomputedType & B = j->second;

          for (SparseVector::const_iterator i = _xstar->begin(); i != _xstar->end(); i++)
          {
            uint dim    = i->first;
            double fval = i->second;

            uint nnz = this->nnz_per_dimension[dim];
            uint nz  = this->num_examples - nnz;

            if ( nnz == 0 ) continue;
            // useful
            //if ( fval < this->f_tolerance ) continue;

            uint position = 0;

            //this->X_sorted.findFirstLargerInDimension(dim, fval, position);
            GMHIKernelRaw::sparseVectorElement fval_element;
            fval_element.value = fval;

            //std::cerr << "value to search for " << fval << endl;
            //std::cerr << "data matrix in dimension " << dim << endl;
            //for (int j = 0; j < nnz; j++)
            //    std::cerr << dataMatrix[dim][j].value << std::endl;

            GMHIKernelRaw::sparseVectorElement *it = upper_bound ( dataMatrix[dim], dataMatrix[dim] + nnz, fval_element );
            position = distance ( dataMatrix[dim], it );
            
//             /*// add zero elements
//             if ( fval_element.value > 0.0 )
//                 position += nz;*/


            bool posIsZero ( position == 0 );
            
            // special case 1:
            // new example is smaller than all known examples
            // -> resulting value = fval * sum_l=1^n alpha_l               
            if ( position == 0 )
            {
              beta += fval * B[ dim ][ nnz - 1 ];  
            }
            // special case 2:
            // new example is equal to or larger than the largest training example in this dimension
            // -> the term B[ dim ][ nnz-1 ] - B[ dim ][ indexElem ] is equal to zero and vanishes, which is logical, since all elements are smaller than the remaining prototypes!            
            else if ( position == nnz )
            {
              beta += A[ dim ][ nnz - 1 ];
            }
            // standard case: new example is larger then the smallest element, but smaller then the largest one in the corrent dimension        
            else
            {
                beta += A[ dim ][ position - 1 ] + fval * ( B[ dim ][ nnz - 1 ] - B[ dim ][ position - 1 ] );
            }
            
//             // correct upper bound to correct position, only possible if new example is not the smallest value in this dimension
//             if ( !posIsZero )
//                 position--;
// 
// 
//             double firstPart = 0.0;
//             if ( !posIsZero  )
//               firstPart = ( A[ dim ][ position ] );
// 
//             double secondPart( B[ dim ][ this->num_examples-1-nz ]);
//             if ( !posIsZero && (position >= nz) )
//                 secondPart -= B[dim][ position ];
// 
//             // but apply using the transformed one
//             beta += firstPart + secondPart* fval;
          }//for-loop over dimensions of test input

          _scores[ classno ] = beta;

        }//for-loop over 1-vs-all models

    } // if-condition wrt quantization
  _scores.setDim ( *this->knownClasses.rbegin() + 1 );


  if ( this->knownClasses.size() > 2 )
  { // multi-class classification
    _result = _scores.maxElement();
  }
  else if ( this->knownClasses.size() == 2 ) // binary setting
  {
    uint class1 = *(this->knownClasses.begin());
    uint class2 = *(this->knownClasses.rbegin());

    // since we erased the binary label vector corresponding to the smaller class number,
    // we only have scores for the larger class number
    uint class_for_which_we_have_a_score          = class2;
    uint class_for_which_we_dont_have_a_score     = class1;

    _scores[class_for_which_we_dont_have_a_score] = - _scores[class_for_which_we_have_a_score];

    _result = _scores[class_for_which_we_have_a_score] > 0.0 ? class_for_which_we_have_a_score : class_for_which_we_dont_have_a_score;
  }

}