void multadd_ns(DenseVector &w, const SparseVector &b, base_type factor, size_t offset)
{
    for (SparseVector::const_iterator iter = b.begin(); iter!=b.end(); ++iter)
    {
        w[iter->first+offset] += factor*iter->second;
    }
}
SparseVector<T_Element,T_Alloc>::SparseVector(
    const SparseVector<T_Element,T_Alloc>& sp_vec )
  :
      alloc_(sp_vec.alloc_), size_(sp_vec.size_), max_nz_(sp_vec.max_nz_)
    , assume_sorted_(sp_vec.assume_sorted_)
    , know_is_sorted_(sp_vec.know_is_sorted_)
{
  // Allocate the memory for the elements and set the memory of the sparse vector.
  index_lookup_.set_sp_vec(
#ifdef _PG_CXX
    new element_type[max_nz_]
#else
    alloc_.allocate(max_nz_,NULL)
#endif
    ,sp_vec.nz(),sp_vec.offset());
  // Perform an uninitialized copy of the elements
  iterator		ele_to_itr		= index_lookup_.ele();
  const_iterator	ele_from_itr	= sp_vec.begin();
  while(ele_from_itr != sp_vec.end()) {
#ifdef _PG_CXX
    new (ele_to_itr++) element_type(*ele_from_itr++);
#else
    alloc_.construct(ele_to_itr++,*ele_from_itr++);
#endif
  }
}
Example #3
0
//update all rows of Z^t
void M3LLinear::update_all_rows( SparseVector& x, const M3LFloat* Rrow, const M3LFloat& scale)
{
	
	
	M3LFloat* Zk=Z;
	M3LFloat mk;
	int nnz=x.get_nnz();
	for(int k=0;k<L;k++)
	{
		
		if(Rrow[k]==0)
		{
			Zk+=d;
			continue;
		}
		mk=Rrow[k]*scale;
		for(int i=0;i<nnz;i++)
		{
			Zk[x.get_ith_index(i)]+=mk*(x.get_ith_value(i));
		}
		b[k]+=Rrow[k]*scale*bias;	
		Zk+=d;
	}
	
}
void MiraFeatureVector::InitSparse(const SparseVector& sparse, size_t ignoreLimit)
{
  vector<size_t> sparseFeats = sparse.feats();
  bool bFirst = true;
  size_t lastFeat = 0;
  m_sparseFeats.reserve(sparseFeats.size());
  m_sparseVals.reserve(sparseFeats.size());
  for(size_t i=0; i<sparseFeats.size(); i++) {
    if (sparseFeats[i] < ignoreLimit) continue;
    size_t feat = m_dense.size() + sparseFeats[i];
    m_sparseFeats.push_back(feat);
    m_sparseVals.push_back(sparse.get(sparseFeats[i]));

    // Check ordered property
    if(bFirst) {
      bFirst = false;
    } else {
      if(lastFeat>=feat) {
        cerr << "Error: Feature indeces must be strictly ascending coming out of SparseVector" << endl;
        exit(1);
      }
    }
    lastFeat = feat;
  }
}
base_type sprod_ns(const DenseVector &w, const SparseVector &b, size_t offset)
{
    base_type ans=0;
    for (SparseVector::const_iterator iter = b.begin(); iter!=b.end(); ++iter)
    {
        ans += w[iter->first+offset]*iter->second;
    }
    return ans;
}
Real MonotonicSpline::TDeriv2(Real u) const
{
  SparseVector v;
  basis.Deriv2(u,v);
  Real sum=Zero;
  for(SparseVector::const_iterator i=v.begin();i!=v.end();i++) 
    sum += t[i->first]*i->second;
  return sum;
}
Example #7
0
//helper functions
void add(M3LFloat* s, SparseVector a, M3LFloat scale)
{
	
	int nnz=a.get_nnz();
	for(int i=0;i<nnz;i++)
	{
		s[a.get_ith_index(i)]+=scale*(a.get_ith_value(i));
	}
}
Example #8
0
double SparseRow::timesColumn(const SparseVector &v) const  
{
    double sum = 0;
    int loc;
    for (int cnt = 0; cnt < this->size; ++cnt)
        if (v.isNonZero( loc = nb[cnt].getIx() ))
            sum += v.getValue( loc ) * nb[cnt].getWeight();
    return sum;
}
Example #9
0
//update only current row of Z^t
void M3LLinear::update_current_row(const int& l, SparseVector& x, const M3LFloat& scale)
{
	M3LFloat* Zl=getZrow(l);
	int nnz=x.get_nnz();
	for(int i=0;i<nnz;i++)
	{
		Zl[x.get_ith_index(i)]+=scale*(x.get_ith_value(i));
	}
	b[l]+=scale*bias;
}
Example #10
0
void SparseVector::addTimes(const SparseVector &v, const double &w)  {

    for (int cnt = 0; cnt < v.getNzEntries(); ++cnt)  {

        int ix = v.getIx(cnt);

        this->plusAt( ix, v.getValue(ix) * w );

    }

}
Example #11
0
    void runTest() {
        std::stringstream oss;
        m_A.marshal_out(oss);

        SparseVector<Pairing<GA, GB>> B;

        std::stringstream iss(oss.str());
        checkPass(B.marshal_in(iss));

        checkPass(m_A == B);
    }
Real MonotonicSpline::UtoT(Real u) const
{
  if(u < basis.knots[basis.Degree()]) return t.front();
  if(u >= basis.knots[basis.knots.size()-basis.Degree()]) return t.back();
  SparseVector v;
  basis.Evaluate(u,v);
  Assert(v.numEntries()!=0);
  Real sum=Zero;
  for(SparseVector::const_iterator i=v.begin();i!=v.end();i++) 
    sum += t[i->first]*i->second;
  return sum;
}
Example #13
0
 void accumQuery(const SparseVector<Pairing<GA, GB>>& query,
                 const std::size_t reserveTune,
                 ProgressCallback* callback) {
     m_val = m_val
         + (*m_random_d) * query.getElementForIndex(Z_INDEX)
         + query.getElementForIndex(3)
         + multiExp01(query,
                      *m_witness,
                      4,
                      4 + m_numVariables,
                      0 == reserveTune ? reserveTune : m_numVariables / reserveTune,
                      callback);
 }
Example #14
0
	double computeLoss() {
		// compute loss w.r.t to oracle hypothesis and current weights w
		assert(oracle);
		if (hope_select==1)
			loss = (features.dot(w) + cost) - (oracle->features.dot(w) - oracle->cost);
		else
			loss = (features.dot(w) + cost) - (oracle->features.dot(w));
		if (loss < 0) {
			cerr << "Warning! Loss < 0! this_score=" << features.dot(w) << " oracle_score=" << oracle->features.dot(w) << " this_cost=" << cost << " oracle_cost=" << oracle->cost << endl;
			loss = 0;
		}
		return loss;
	}
Example #15
0
M3LFloat M3LLinear::score(const int& l, SparseVector& x)
{
	M3LFloat* Zl=getZrow(l);
	int nnz=x.get_nnz();
	M3LFloat sum=0;
	
	for(int i=0;i<nnz;i++)
	{
		sum+=Zl[x.get_ith_index(i)]*x.get_ith_value(i);
		
	}
	sum+=bias*b[l];
	
	return sum;
}
static void outputSample(ostream& out, const FeatureDataItem& f1, const FeatureDataItem& f2) {
  // difference in score in regular features
  for(unsigned int j=0; j<f1.dense.size(); j++)
    if (abs(f1.dense[j]-f2.dense[j]) > 0.00001)
      out << " F" << j << " " << (f1.dense[j]-f2.dense[j]);

  if (f1.sparse.size() || f2.sparse.size()) {
    out << " ";

    // sparse features
    const SparseVector &s1 = f1.sparse;
    const SparseVector &s2 = f2.sparse;
    SparseVector diff = s1 - s2;
    diff.write(out);
  }
}
void SparseSystem::add_row(const SparseVector& new_row, double new_r) {
  ensure_num_cols(new_row.last()+1);

  append_new_rows(1);
  int row = num_rows() - 1;
  _rhs(row) = new_r;
  set_row(row, new_row);
}
Example #18
0
void Data::outputSample( ostream &out, const FeatureStats &f1, const FeatureStats &f2 ) 
{
  // difference in score in regular features
	for(unsigned int j=0; j<f1.size(); j++)
		if (abs(f1.get(j)-f2.get(j)) > 0.00001)
			out << " F" << j << " " << (f1.get(j)-f2.get(j));

  if (!hasSparseFeatures())
    return;

  out << " ";

  // sparse features
  const SparseVector &s1 = f1.getSparse();
  const SparseVector &s2 = f2.getSparse();
  SparseVector diff = s1 - s2;
  diff.write(out);
}
MiraFeatureVector::MiraFeatureVector(const SparseVector& sparse, size_t num_dense)
{
  m_dense.resize(num_dense);
  //Assume that features with id [0,num_dense) are the dense features
  for (size_t id = 0; id < num_dense; ++id) {
    m_dense[id] = sparse.get(id);
  }
  InitSparse(sparse,num_dense);
}
Example #20
0
// NOTE: does not pass tests
SparseMatrix BSplineBasis::evalBasisJacobian(DenseVector &x) const
{
    // Jacobian basis matrix
    SparseMatrix J(getNumBasisFunctions(), numVariables);
    //J.setZero(numBasisFunctions(), numInputs);

    // Calculate partial derivatives
    for (unsigned int i = 0; i < numVariables; ++i)
    {
        // One column in basis jacobian
        std::vector<SparseVector> values(numVariables);

        for (unsigned int j = 0; j < numVariables; ++j)
        {
            if (j == i)
            {
                // Differentiated basis
                values.at(j) = bases.at(j).evaluateDerivative(x(j),1);
            }
            else
            {
                // Normal basis
                values.at(j) = bases.at(j).evaluate(x(j));
            }
        }

        SparseVector Ji = kroneckerProductVectors(values);

        // Fill out column
        for (int k = 0; k < Ji.outerSize(); ++k)
        for (SparseMatrix::InnerIterator it(Ji,k); it; ++it)
        {
            if (it.value() != 0)
                J.insert(it.row(),i) = it.value();
        }
        //J.block(0,i,Ji.rows(),1) = bi.block(0,0,Ji.rows(),1);
    }

    J.makeCompressed();

    return J;
}
SparseVector SparseVector::operator+(SparseVector & sv) {
	if (getLength() != sv.getLength()) {
		throw "Rozne dlugosci wektorow!";
	} else {
		SparseVector sum = *this;
		for (int i = 0; i < getLength(); i++) {
			sum[i] = sum[i] + sv[i];
		}
		return sum;
	}
}
Example #22
0
    void runTest() {
        std::stringstream oss;
        m_A.marshal_out(
            oss,
            [] (std::ostream& o, const Pairing<GA, GB>& a) {
                a.marshal_out_raw(o);
            });

        SparseVector<Pairing<GA, GB>> B;

        std::stringstream iss(oss.str());
        checkPass(
            B.marshal_in(
                iss,
                [] (std::istream& i, Pairing<GA, GB>& a) {
                    return a.marshal_in_raw(i);
                }));

        checkPass(m_A == B);
    }
Vec RQR_Multiply(const VECTOR &v,
                 const SparseKalmanMatrix &RQR,
                 const SparseVector &Z,
                 double H) {
    int state_dim = Z.size();
    if(v.size() != state_dim + 2) {
        report_error("wrong sizes in RQR_Multiply");
    }
    // Partition v = [eta, epsilon, 0]
    ConstVectorView eta(v, 0, state_dim);
    double epsilon = v[state_dim];

    // Partition this
    Vec RQRZ = RQR * Z.dense();
    double ZRQRZ_plus_H = Z.dot(RQRZ) + H;

    Vec ans(v.size());
    VectorView(ans, 0, state_dim) = (RQR * eta).axpy(RQRZ, epsilon);
    ans[state_dim] = RQRZ.dot(eta) + ZRQRZ_plus_H * epsilon;
    return ans;
}
Example #24
0
double ComputeDelta(const vector<boost::shared_ptr<HypothesisInfo> >& pair) {
	const double loss0 = pair[0]->features.dot(w) + pair[0]->cost - (pair[0]->oracle->features.dot(w) - pair[0]->oracle->cost);
	const double loss1 = pair[1]->features.dot(w) + pair[1]->cost - (pair[1]->oracle->features.dot(w) - pair[1]->oracle->cost);
	const double num = loss0 - loss1;
	//const double num = pair[0]->loss - pair[1]->loss;
	//cerr << "loss_0=" << pair[0]->loss << " loss_1=" << pair[1]->loss << endl;
	cerr << " ComputeDelta: loss_0=" << loss0 << " loss_1=" << loss1;
	SparseVector<double> diff = pair[0]->features;
	diff -= pair[1]->features;
	double diffsqnorm = diff.l2norm_sq();
	double delta;
	if (diffsqnorm > 0)
		delta = num / (diffsqnorm * lr);
	else
		delta = 0;
	cerr << " delta1=" << delta;
	// clip
	delta = max(-pair[0]->alpha, min(delta, pair[1]->alpha));
	cerr << " delta2=" << delta << endl;
	return delta;

}
Example #25
0
// Read an event line. An event line consists of:
//
// - The event frequency/weight.
// - The number of non-zero features.
// - Feature/value pairs.
//
pair<double, SparseVector<double> > DataSet::readEvent(string const &eventLine)
{
	std::vector<std::string> lineParts = stringSplit(eventLine);
	
	if (lineParts.size() < 2)
		throw runtime_error(ERR_INCORRECT_EVENT + eventLine);
	
	double eventProb = parseString<double>(lineParts[0]);
	size_t nFeatures = parseString<size_t>(lineParts[1]);

	if ((nFeatures * 2) + 2 != lineParts.size())
		throw runtime_error(ERR_INCORRECT_NFEATURES + eventLine);

	SparseVector<double> fVals;
	for (size_t i = 0; i < (2 * nFeatures); i += 2)
	{
		size_t fId = parseString<size_t>(lineParts[i + 2]);
		double fVal = parseString<double>(lineParts[i + 3]);
		fVals.coeffRef(fId) = fVal;
	}
	
	return make_pair(eventProb, fVals);
}
Example #26
0
id_t DataSet::Read (const char *file_name)
{
  std::ifstream ifs (file_name);
  std::string line;
  id_t max_id = 0;
  int line_count = 0;

  while (getline (ifs, line))
  {
    line_count++;
    SparseVector temp;
    const char *pos = sdf_parse_line (line.c_str (), temp);
    if (*pos && (*pos != '#'))
    {
      FATAL << "Error in input:" << line_count << ':' 
        << pos - line.c_str () + 1 << std::endl; 
      return 0;
    }
    data_set_.push_back (temp);
    if (temp.max_id () > max_id)
      max_id = temp.max_id ();
  }   
  return max_id;
}
void report_multiplication_error(const SparseKalmanMatrix *T,
                                 const SparseVector &Z,
                                 bool new_time,
                                 double fraction_in_initial_period,
                                 const VEC &v) {
    ostringstream err;
    int state_dim = T->nrow();
    err << "incompatible sizes in AccumulatorTransitionMatrix multiplication"
        << endl
        << "T.nrow() = " << state_dim << endl
        << "Z.size() = " << Z.size() << endl
        << "v.size() = " << v.size() << endl
        << "The first two should match.  The last should be two more "
        << "than the others" << endl;
    report_error(err.str());
}
Example #28
0
double SparseVector::product(const SparseVector &a)  const  {

    double ss = 0;

    if (this->nzEntries > a.nzEntries)  return a.product(*this);

    for (int cnt = 0; cnt < this->nzEntries; ++cnt)  {

        int ix = this->nz[ cnt ];

        if ( a.isNz[ ix ] )

            ss += a.val[ ix ] * this->val[ ix ];

    };

    return ss;



}
SparseVector<double> SparseSolverEigenCustom::cgSolveSparse(const SparseMatrix<double> & A,const SparseVector<double> & b,int iter, double residual)
{
	SparseVector<double> r(b.rows());
	SparseVector<double> p(b.rows());
	SparseVector<double> Ap(b.rows());
	SparseVector<double> x(b.rows());

	r = b - A *x;
	p = r;

	double rTr,pTAp,alpha,beta,rTrnew,rnorm;
	SparseVector<double> vtemp;
	bool isConverged = false;
	for(int k=0;k<iter;k++)
	{
		Ap = A*p;
		vtemp = r.transpose()*r;
		rTr = vtemp.coeff(0);

		vtemp = p.transpose()*Ap;
		pTAp = vtemp.coeff(0);
		alpha = rTr/pTAp;

		x = x + (alpha * p);
		r = r - (alpha * Ap);
		rnorm = r.norm();
		if(rnorm<residual)
		{
			isConverged = true;
			break;
		}

		vtemp = r.transpose()*r;
		rTrnew = vtemp.coeff(0);

		beta = rTrnew / rTr;
		p = r + (beta * p);
	}

	return x;
}
SparseVector multadd_ss(const SparseVector &a, const SparseVector &b, base_type factor)
{
    vector<pair<size_t, base_type>> words;

    SparseVector::const_iterator iter_a = a.begin();
    SparseVector::const_iterator iter_b = b.begin();

    while (iter_a!=a.end() && iter_b!=b.end())
    {
        if (iter_a->first > iter_b->first)
        {
            words.push_back(make_pair(iter_b->first, factor*iter_b->second));
            iter_b++;
        } else {
            if (iter_a->first < iter_b->first)
            {
                words.push_back(*iter_a);
                iter_a++;
            } else {
                // indices equal
                base_type weight = iter_a->second + factor*iter_b->second;
                if (weight!=0)
                    words.push_back(make_pair(iter_a->first, weight));
                iter_a++;
                iter_b++;
            }
        }
    }

    while (iter_b!=b.end())
    {
        words.push_back(make_pair(iter_b->first, factor*iter_b->second));
        iter_b++;
    }

    while (iter_a!=a.end())
    {
        words.push_back(*iter_a);
        iter_a++;
    }

    return(SparseVector(words));
}