예제 #1
0
void multadd_ns(DenseVector &w, const SparseVector &b, base_type factor, size_t offset)
{
    for (SparseVector::const_iterator iter = b.begin(); iter!=b.end(); ++iter)
    {
        w[iter->first+offset] += factor*iter->second;
    }
}
SparseVector<T_Element,T_Alloc>::SparseVector(
    const SparseVector<T_Element,T_Alloc>& sp_vec )
  :
      alloc_(sp_vec.alloc_), size_(sp_vec.size_), max_nz_(sp_vec.max_nz_)
    , assume_sorted_(sp_vec.assume_sorted_)
    , know_is_sorted_(sp_vec.know_is_sorted_)
{
  // Allocate the memory for the elements and set the memory of the sparse vector.
  index_lookup_.set_sp_vec(
#ifdef _PG_CXX
    new element_type[max_nz_]
#else
    alloc_.allocate(max_nz_,NULL)
#endif
    ,sp_vec.nz(),sp_vec.offset());
  // Perform an uninitialized copy of the elements
  iterator		ele_to_itr		= index_lookup_.ele();
  const_iterator	ele_from_itr	= sp_vec.begin();
  while(ele_from_itr != sp_vec.end()) {
#ifdef _PG_CXX
    new (ele_to_itr++) element_type(*ele_from_itr++);
#else
    alloc_.construct(ele_to_itr++,*ele_from_itr++);
#endif
  }
}
예제 #3
0
Real MonotonicSpline::TDeriv2(Real u) const
{
  SparseVector v;
  basis.Deriv2(u,v);
  Real sum=Zero;
  for(SparseVector::const_iterator i=v.begin();i!=v.end();i++) 
    sum += t[i->first]*i->second;
  return sum;
}
예제 #4
0
base_type sprod_ns(const DenseVector &w, const SparseVector &b, size_t offset)
{
    base_type ans=0;
    for (SparseVector::const_iterator iter = b.begin(); iter!=b.end(); ++iter)
    {
        ans += w[iter->first+offset]*iter->second;
    }
    return ans;
}
예제 #5
0
SparseVector multadd_ss(const SparseVector &a, const SparseVector &b, base_type factor)
{
    vector<pair<size_t, base_type>> words;

    SparseVector::const_iterator iter_a = a.begin();
    SparseVector::const_iterator iter_b = b.begin();

    while (iter_a!=a.end() && iter_b!=b.end())
    {
        if (iter_a->first > iter_b->first)
        {
            words.push_back(make_pair(iter_b->first, factor*iter_b->second));
            iter_b++;
        } else {
            if (iter_a->first < iter_b->first)
            {
                words.push_back(*iter_a);
                iter_a++;
            } else {
                // indices equal
                base_type weight = iter_a->second + factor*iter_b->second;
                if (weight!=0)
                    words.push_back(make_pair(iter_a->first, weight));
                iter_a++;
                iter_b++;
            }
        }
    }

    while (iter_b!=b.end())
    {
        words.push_back(make_pair(iter_b->first, factor*iter_b->second));
        iter_b++;
    }

    while (iter_a!=a.end())
    {
        words.push_back(*iter_a);
        iter_a++;
    }

    return(SparseVector(words));
}
예제 #6
0
Real MonotonicSpline::UtoT(Real u) const
{
  if(u < basis.knots[basis.Degree()]) return t.front();
  if(u >= basis.knots[basis.knots.size()-basis.Degree()]) return t.back();
  SparseVector v;
  basis.Evaluate(u,v);
  Assert(v.numEntries()!=0);
  Real sum=Zero;
  for(SparseVector::const_iterator i=v.begin();i!=v.end();i++) 
    sum += t[i->first]*i->second;
  return sum;
}
예제 #7
0
파일: dftable.cpp 프로젝트: fhieber/cclir
/*
 * iterate over given vector keys and add 1 once for each type
 */
void DfTable::update(const SparseVector<prob_t>& v) {
	for(SparseVector<prob_t>::const_iterator it = v.begin(); it != v.end(); ++ it)
		this->add_weight(it->first, 1);
}
예제 #8
0
END_SECTION

START_SECTION((void erase(SparseVectorIterator it)))
{
	sv.erase(sv.begin()+5);
	TEST_EQUAL(sv.size(),7)

	//real test
	SparseVector<double> sv2;
	sv2.push_back(1.0);
	sv2.push_back(1.1);
	sv2.push_back(1.2);
	sv2.push_back(1.3);
	sv2.push_back(1.4);

	sv2.erase(sv2.begin());
	TEST_EQUAL(sv2.size(),4)
	TEST_EQUAL(sv2.at(0),1.1)
	TEST_EQUAL(sv2.at(1),1.2)
	TEST_EQUAL(sv2.at(2),1.3)
	TEST_EQUAL(sv2.at(3),1.4)

	sv2.erase(sv2.begin()+2);
	TEST_EQUAL(sv2.size(),3)
	TEST_EQUAL(sv2.at(0),1.1)
	TEST_EQUAL(sv2.at(1),1.2)
	TEST_EQUAL(sv2.at(2),1.4)

	sv2.erase(sv2.end()-1);
	TEST_EQUAL(sv2.size(),2)
	TEST_EQUAL(sv2.at(0),1.1)
예제 #9
0
void runLIBSVM_cv( const char* topic,
                       RowSetMem & trainData,
                       const class ModelType& modelType,
                       HyperParamPlan& hyperParamPlan,
                       IRowSet & testData,
                       std::ostream& modelFile,
                       std::ostream& result)
{
#ifdef LAUNCH_SVM
    const char* learnApp = "svmtrain-cv.exe";
    const char* classifyApp = "svmpredictscore.exe";
    const char* trainFName = "SVM_Train.dat";
    const char* testFName = "SVM_Test.dat";
    const char* modelFName = "SVM.Model";
    const char* predictFName = "SVM_Predict.dat";
    int ret;

    // write training file for SVMlight
    BoolVector y( false, trainData.n() );
    ofstream ftrain(trainFName);
    ftrain<<setprecision(10);
    unsigned r=0;
    while( trainData.next() ) {
        ftrain<<( trainData.y() ? 1 : -1 );
        const SparseVector x = trainData.xsparse();
        for( SparseVector::const_iterator ix=x.begin(); ix!=x.end(); ix++ )
            ftrain<<" "<<ix->first<<":"<<ix->second;
        ftrain<<endl;
        y[r++] = trainData.y();
    }
    ftrain.close();

    //additional parameter(s)
    std::ostrstream sparam;
    double odds = double(ntrue(y)) / (trainData.n()-ntrue(y));
    if( 0==strcmp("balance",modelType.StringParam().c_str()) ) {
        // http://www.cs.cornell.edu/People/tj/publications/morik_etal_99a.pdf - ref from SVMlight:
        //  C+ / C- = number of negative training examples / number of positive training examples
        //sparam<<" -w1 "<< 1/odds <<std::ends;
        //should we treat it as inverse?
        sparam<<" -w1 "<< odds <<std::ends;
        Log(3)<<"\nBalanced training: w1=(odds of positive)= "<<odds;
    }
    else
        sparam<<modelType.StringParam()<<std::ends;

    // Hyperparameter loop
    unsigned bestParamIndex = unsigned(-1);
    if( hyperParamPlan.plan().size() > 1 ) {
        vector<double> cvres;
        for( unsigned iparam=0; iparam<hyperParamPlan.plan().size(); iparam++ )//hyper-parameter loop
        {
            double hpvalue = hyperParamPlan.plan()[iparam];
            Log(5)<<"\nHyperparameter plan #"<<iparam+1<<" value="<<hpvalue;
            std::ostrstream cvparam;
            cvparam<<sparam.str()<<" -v "<<hyperParamPlan.nfolds()<<" -c "<<hpvalue<<std::ends;
            Log(5)<<"\n\nLaunch LIBSVM cv learning - Time "<<Log.time()
                <<"\n  command line: "<<learnApp<<" "<<cvparam.str()<<" "<<trainFName<<" "<<modelFName<<"\n";
            Log(5).flush();
            try{
            ret = _spawnlp( _P_WAIT, learnApp, learnApp, cvparam.str(), trainFName, modelFName, NULL );
            }catch(...){
                Log(1)<<"\nLIBSVM learning exception";
                continue;
            }
            if( 0!=ret ){
                Log(1)<<"\nLIBSVM learning run-time error, return value="<<ret;
                continue;
            }
            Log(3)<<"\nEnd LIBSVM cv learning - Time "<<Log.time();
            
            ifstream accuracyfile(modelFName);
            double accuracy;
            accuracyfile>>accuracy;
            cvres.push_back(accuracy);
            accuracyfile.close();
        }
        // best by cv
        double bestEval = - numeric_limits<double>::max();
        for( unsigned i=0; i<cvres.size(); i++ )
            if( cvres[i]>bestEval ) {
                bestEval = cvres[i];
                bestParamIndex = i;
            }
        if( bestParamIndex==unsigned(-1) )
            throw runtime_error("No good hyperparameter value found");
        Log(5)<<"\nBest parameter value "<<hyperParamPlan.plan()[bestParamIndex]<<" cv average accuracy "<<bestEval;
    }
예제 #10
0
void precondition(SparseMatrixTypeT const & A,
                  std::vector< std::map<SizeT, NumericT> > & output,
                  ilut_tag const & tag)
{
  typedef std::map<SizeT, NumericT>                             SparseVector;
  typedef typename SparseVector::iterator                       SparseVectorIterator;
  typedef typename std::map<SizeT, NumericT>::const_iterator    OutputRowConstIterator;
  typedef std::multimap<NumericT, std::pair<SizeT, NumericT> >  TemporarySortMap;

  assert(viennacl::traits::size1(A) == output.size() && bool("Output matrix size mismatch") );

  SparseVector w;
  TemporarySortMap temp_map;

  for (SizeT i=0; i<viennacl::traits::size1(A); ++i)  // Line 1
  {
/*    if (i%10 == 0)
  std::cout << i << std::endl;*/

    //line 2: set up w
    NumericT row_norm = setup_w(A, i, w);
    NumericT tau_i = static_cast<NumericT>(tag.get_drop_tolerance()) * row_norm;

    //line 3:
    for (SparseVectorIterator w_k = w.begin(); w_k != w.end(); ++w_k)
    {
      SizeT k = w_k->first;
      if (k >= i)
        break;

      //line 4:
      NumericT a_kk = output[k][k];
      if (a_kk <= 0 && a_kk >= 0) // a_kk == 0
      {
        std::cerr << "ViennaCL: FATAL ERROR in ILUT(): Diagonal entry is zero in row " << k
                  << " while processing line " << i << "!" << std::endl;
        throw "ILUT zero diagonal!";
      }

      NumericT w_k_entry = w_k->second / a_kk;
      w_k->second = w_k_entry;

      //line 5: (dropping rule to w_k)
      if ( std::fabs(w_k_entry) > tau_i)
      {
        //line 7:
        for (OutputRowConstIterator u_k = output[k].begin(); u_k != output[k].end(); ++u_k)
        {
          if (u_k->first > k)
            w[u_k->first] -= w_k_entry * u_k->second;
        }
      }
      //else
      //  w.erase(k);

    } //for w_k

    //Line 10: Apply a dropping rule to w
    //Sort entries which are kept
    temp_map.clear();
    for (SparseVectorIterator w_k = w.begin(); w_k != w.end(); ++w_k)
    {
      SizeT k = w_k->first;
      NumericT w_k_entry = w_k->second;

      NumericT abs_w_k = std::fabs(w_k_entry);
      if ( (abs_w_k > tau_i) || (k == i) )//do not drop diagonal element!
      {

        if (abs_w_k <= 0) // this can only happen for diagonal entry
          throw "Triangular factor in ILUT singular!";

        temp_map.insert(std::make_pair(abs_w_k, std::make_pair(k, w_k_entry)));
      }
    }

    //Lines 10-12: write the largest p values to L and U
    SizeT written_L = 0;
    SizeT written_U = 0;
    for (typename TemporarySortMap::reverse_iterator iter = temp_map.rbegin(); iter != temp_map.rend(); ++iter)
    {
      std::map<SizeT, NumericT> & row_i = output[i];
      SizeT j = (iter->second).first;
      NumericT w_j_entry = (iter->second).second;

      if (j < i) // Line 11: entry for L
      {
        if (written_L < tag.get_entries_per_row())
        {
          row_i[j] = w_j_entry;
          ++written_L;
        }
      }
      else if (j == i)  // Diagonal entry is always kept
      {
        row_i[j] = w_j_entry;
      }
      else //Line 12: entry for U
      {
        if (written_U < tag.get_entries_per_row())
        {
          row_i[j] = w_j_entry;
          ++written_U;
        }
      }
    }

    w.clear(); //Line 13

  } //for i
}