/**
 * The basic calibration algorithm initially used in OpenPilot. This is a basic
 * 6-point calibration that doesn't attempt to account for any of the white noise
 * in the sensors.
 * The measurement equation is
 * B_k = D^-1 * (A_k * H_k - b)
 * Where B_k is the measurement of the field at time k
 *      D is the diagonal matrix of scale factors
 *      A_k is the attitude direction cosine matrix at time k
 *  H_k is the reference field at the kth sample
 *  b is the vector of scale factors.
 *
 * After computing the scale factor and bias, the optimized measurement is
 * \tilde{B}_k = D * (B_k + b)
 *
 * @param bias[out] The computed bias of the sensor
 * @param scale[out] The computed scale factor of the sensor
 * @param samples An array of sample data points.  Only the first 6 are
 *      used.
 * @param n_samples The number of sample data points.  Must be at least 6.
 * @param referenceField The field being measured by the sensor.
 */
void openpilot_bias_scale(Vector3f & bias,
                          Vector3f & scale,
                          const Vector3f samples[],
                          const size_t n_samples,
                          const Vector3f & referenceField)
{
    // TODO: Add error handling, and return error codes through the return
    // value.
    if (n_samples < 6) {
        bias  = Vector3f::Zero();
        scale = Vector3f::Zero();
        return;
    }
    // mag = (S*x + b)**2
    // mag = (S**2 * x**2 + 2*S*b*x + b*b)
    // 0 = S**2 * (x1**2 - x2**2) + 2*S*B*(x1 - x2))
    // Fill in matrix A -
    // write six difference-in-magnitude equations of the form
    // Sx^2(x2^2-x1^2) + 2*Sx*bx*(x2-x1) + Sy^2(y2^2-y1^2) +
    // 2*Sy*by*(y2-y1) + Sz^2(z2^2-z1^2) + 2*Sz*bz*(z2-z1) = 0
    //
    // or in other words
    // 2*Sx*bx*(x2-x1)/Sx^2  + Sy^2(y2^2-y1^2)/Sx^2  +
    // 2*Sy*by*(y2-y1)/Sx^2  + Sz^2(z2^2-z1^2)/Sx^2  + 2*Sz*bz*(z2-z1)/Sx^2  =
    // (x1^2-x2^2)
    Matrix<float, 5, 5> A;
    Matrix<float, 5, 1> f;
    for (unsigned i = 0; i < 5; i++) {
        A.row(i)[0] = 2.0 * (samples[i + 1].x() - samples[i].x());
        A.row(i)[1] = samples[i + 1].y() * samples[i + 1].y() - samples[i].y() * samples[i].y();
        A.row(i)[2] = 2.0 * (samples[i + 1].y() - samples[i].y());
        A.row(i)[3] = samples[i + 1].z() * samples[i + 1].z() - samples[i].z() * samples[i].z();
        A.row(i)[4] = 2.0 * (samples[i + 1].z() - samples[i].z());
        f[i] = samples[i].x() * samples[i].x() - samples[i + 1].x() * samples[i + 1].x();
    }
    Matrix<float, 5, 1> c;
    A.lu().solve(f, &c);


    // use one magnitude equation and c's to find Sx - doesn't matter which - all give the same answer
    float xp = samples[0].x();
    float yp = samples[0].y();
    float zp = samples[0].z();

    float Sx = sqrt(referenceField.squaredNorm() /
                    (xp * xp + 2 * c[0] * xp + c[0] * c[0] + c[1] * yp * yp + 2 * c[2] * yp + c[2] * c[2] / c[1] + c[3] * zp * zp + 2 * c[4] * zp + c[4] * c[4] / c[3]));

    scale[0] = Sx;
    bias[0]  = Sx * c[0];
    scale[1] = sqrt(c[1] * Sx * Sx);
    bias[1]  = c[2] * Sx * Sx / scale[1];
    scale[2] = sqrt(c[3] * Sx * Sx);
    bias[2]  = c[4] * Sx * Sx / scale[2];

    for (int i = 0; i < 3; ++i) {
        // Fixup difference between openpilot measurement model and twostep
        // version
        bias[i] = -bias[i] / scale[i];
    }
}
Example #2
0
 void LiuWestParticleFilter::set_particles(const Matrix &state,
                                           const Matrix &parameters) {
   if (state.nrow() != parameters.nrow()) {
     report_error("state and parameters must have the same number of rows.");
   }
   if (state.ncol() != hmm_->state_dimension()) {
     report_error("State matrix should have state_dimension() columns.");
   }
   if (parameters.ncol() != parameter_particles_[0].size()) {
     std::ostringstream err;
     err << "Parameter matrix had " << parameters.ncol()
         << " columns, but " << parameter_particles_[0].size()
         << " were expected.";
     report_error(err.str());
   }
   int nparticles = state.nrow();
   state_particles_.resize(nparticles);
   parameter_particles_.resize(nparticles);
   log_weights_.resize(nparticles);
   for (int i = 0; i < nparticles; ++i) {
     state_particles_[i] = state.row(i);
     parameter_particles_[i] = parameters.row(i);
     log_weights_[i] = 0;
   }
 }
Example #3
0
Vector4 triangulateHomogeneousDLT(
    const std::vector<Matrix34>& projection_matrices,
    const std::vector<Point2>& measurements, double rank_tol) {

  // number of cameras
  size_t m = projection_matrices.size();

  // Allocate DLT matrix
  Matrix A = Matrix::Zero(m * 2, 4);

  for (size_t i = 0; i < m; i++) {
    size_t row = i * 2;
    const Matrix34& projection = projection_matrices.at(i);
    const Point2& p = measurements.at(i);

    // build system of equations
    A.row(row) = p.x() * projection.row(2) - projection.row(0);
    A.row(row + 1) = p.y() * projection.row(2) - projection.row(1);
  }
  int rank;
  double error;
  Vector v;
  boost::tie(rank, error, v) = DLT(A, rank_tol);

  if (rank < 3)
    throw(TriangulationUnderconstrainedException());

  return v;
}
Example #4
0
double BackProp::measureMse(Matrix& validationSet, Matrix& validationSetLabels)
{
	assert(validationSetLabels.cols() == 1);

	size_t valueCount = validationSetLabels.valueCount(0);
	double sumSquaredError = 0.0;
	double nErrors = 0.0;
	for(size_t i = 0; i < validationSet.rows(); i++)
	{
		std::vector<double> targetOutput = targetNetworkOutput(validationSetLabels.row(i), valueCount);
		std::vector<double> actualOutput;
		_layers[0].predict(validationSet.row(i), actualOutput);

		assert(targetOutput.size() == actualOutput.size());
		for(size_t j = 0; j < targetOutput.size(); j++)
		{
			double error = targetOutput[j] - actualOutput[j];
			nErrors += 1;
			sumSquaredError += error * error;
		}
	}

	double meanSquaredError = sumSquaredError / nErrors;
	return meanSquaredError;
}
Example #5
0
double calc_cost
(std::list< std::vector<int> > const &clusters,
 Matrix const &alpha,
 Matrix const &x,
 SymNoDiag *W,
 SymNoDiag const &diffs,
 double lambda
 ){
  std::list< std::vector<int> >::const_iterator it,cj;
  std::vector<int> rows,rowsj;
  unsigned int i,j,k,l;
  double loss=0,penalty=0;
  for(it=clusters.begin();it!=clusters.end();it++){
    //first calc loss
    rows = *it;
    i=rows[0]; //index to use for alpha/diffs
    Array a=alpha.row(i);
    for(k=0;k<rows.size();k++){//index to use for x/W
      loss += nrm2sq(a-x.row(rows[k]));
      //then calc penalty
      for(cj=it,cj++;cj!=clusters.end();cj++){
	rowsj = *cj;
	j=rowsj[0];//alpha/diffs
	for(l=0;l<rowsj.size();l++){//x/W
	  penalty += W->getval(rows[k],rowsj[l]) * diffs.getval(i,j);
	}
      }
    }
  }
  //printf("loss %f lambda %f penalty %f\n",loss,lambda,penalty);
  //return 0.5 * loss + lambda / ((double)(W->N-1)) * penalty;
  //TDH alternate parameterization
  return 0.5 * loss + lambda * penalty;
}
Example #6
0
double BackProp::measureAccuracy(Matrix& validationSet, Matrix& validationSetLabels, bool showNetwork)
{
	if(showNetwork)
	{
		std::cout << "Network... " << std::endl;
		BackPropLayer* itr = &_layers[0];
		while(itr != NULL)
		{
			std::cout << itr->toString() << std::endl;
			itr = itr->getNextLayer();
		}
	}

	double nRight = 0;
	double total = validationSet.rows();
	std::vector<double> prediction(1);
	for(size_t i = 0; i < validationSet.rows(); i++)
	{
		double actualLabel = validationSetLabels.row(i)[0];
		predict(validationSet.row(i), prediction);

		if(actualLabel == prediction[0])
			nRight += 1;
	}

	double percentRight = nRight / total;
	return percentRight;
}
Example #7
0
  pyPolynomialLoads(int nodes, const Vector &nP_np, const Matrix &Px_np, const Matrix &Py_np, const Matrix &Pz_np,
		    const Vector &Fx_np, const Vector &Fy_np, const Vector &Fz_np,
		    const Vector &Mx_np, const Vector &My_np, const Vector &Mz_np){

    loads.nodes = nodes;

    loads.Px.resize(loads.nodes-1);
    loads.Py.resize(loads.nodes-1);
    loads.Pz.resize(loads.nodes-1);
    loads.Fx = Fx_np;
    loads.Fy = Fy_np;
    loads.Fz = Fz_np;
    loads.Mx = Mx_np;
    loads.My = My_np;
    loads.Mz = Mz_np;

    for (int i = 0; i < loads.nodes-1; i++){
      int nP = nP_np[i];

      loads.Px[i] = Poly(nP, Px_np.row(i));
      loads.Py[i] = Poly(nP, Py_np.row(i));
      loads.Pz[i] = Poly(nP, Pz_np.row(i));
    }

  }
Example #8
0
Eigen::Matrix<typename Derived::Scalar, 4, 1> rotmat2quat(const Eigen::MatrixBase<Derived>& M)
{
  EIGEN_STATIC_ASSERT_MATRIX_SPECIFIC_SIZE(Eigen::MatrixBase<Derived>, 3, 3);
  using namespace std;

  Matrix<typename Derived::Scalar, 4, 3> A;
  A.row(0) << 1.0, 1.0, 1.0;
  A.row(1.0) << 1.0, -1.0, -1.0;
  A.row(2) << -1.0, 1.0, -1.0;
  A.row(3) << -1.0, -1.0, 1.0;
  Matrix<typename Derived::Scalar, 4, 1> B = A * M.diagonal();
  typename Matrix<typename Derived::Scalar, 4, 1>::Index ind, max_col;
  typename Derived::Scalar val = B.maxCoeff(&ind, &max_col);

  typename Derived::Scalar w, x, y, z;
  switch (ind) {
  case 0: {
    // val = trace(M)
    w = sqrt(1.0 + val) / 2.0;
    typename Derived::Scalar w4 = w * 4.0;
    x = (M(2, 1) - M(1, 2)) / w4;
    y = (M(0, 2) - M(2, 0)) / w4;
    z = (M(1, 0) - M(0, 1)) / w4;
    break;
  }
  case 1: {
    // val = M(1,1) - M(2,2) - M(3,3)
    double s = 2.0 * sqrt(1.0 + val);
    w = (M(2, 1) - M(1, 2)) / s;
    x = 0.25 * s;
    y = (M(0, 1) + M(1, 0)) / s;
    z = (M(0, 2) + M(2, 0)) / s;
    break;
  }
  case 2: {
    //  % val = M(2,2) - M(1,1) - M(3,3)
    double s = 2.0 * (sqrt(1.0 + val));
    w = (M(0, 2) - M(2, 0)) / s;
    x = (M(0, 1) + M(1, 0)) / s;
    y = 0.25 * s;
    z = (M(1, 2) + M(2, 1)) / s;
    break;
  }
  default: {
    // val = M(3,3) - M(2,2) - M(1,1)
    double s = 2.0 * (sqrt(1.0 + val));
    w = (M(1, 0) - M(0, 1)) / s;
    x = (M(0, 2) + M(2, 0)) / s;
    y = (M(1, 2) + M(2, 1)) / s;
    z = 0.25 * s;
    break;
  }
  }

  Eigen::Matrix<typename Derived::Scalar, 4, 1> q;
  q << w, x, y, z;
  return q;
}
 void DRSM::add_forecast_data(const Matrix &predictors) {
   if (ncol(predictors) != xdim_) {
     report_error("Forecast data has the wrong number of columns");
   }
   for (int i = 0; i < nrow(predictors); ++i) {
     sparse_predictor_vectors_.push_back(SparseVector(predictors.row(i)));
     sparse_predictor_matrices_.push_back(
         new DenseMatrix(Matrix(1, xdim_, predictors.row(i))));
   }
 }
Example #10
0
    typename Matrix::value_type 
    norm( const Matrix& A )
    {
        typedef typename Matrix::value_type value_type;

        std::vector<value_type> m( A.row() );

        for ( std::size_t i = 0; i != A.row(); ++i )
            m[i] = std::accumulate( A.row_cbegin(i), A.row_cend(i), value_type(0), []( value_type u, value_type v ) { return u + std::abs(v); } );

        return *(std::max_element( m.begin(), m.end() ));
    }
Example #11
0
template <typename num_t> void Tensor<num_t>::setBlock(Matrix<num_t>& mat) {
    if (all_elem_.data() != mat.data()) {
	
	// std::cout << "ENTER setBlock" << std::endl;
	if (mat.row() == getInSize() && mat.col() == getOutSize()) {
	    std::copy(mat.data(),mat.data()+mat.row()*mat.col(),all_elem_.data());
	} else {
	    std::cout << "The Matrix do not correspond with the tensor bonds size\nPass the operation" << std::endl;
	}
    }
    return;
}
Example #12
0
double
MLP<Model, Tuple>::getLossAndUpdateModel(
        model_type           &model,
        const Matrix         &x_batch,
        const Matrix         &y_true_batch,
        const double         &stepsize) {

    uint16_t num_layers = model.u.size(); // assuming nu. of layers >= 1
    size_t num_rows_in_batch = x_batch.rows();
    size_t i, k;
    double total_loss = 0.;

    // gradient added over the batch
    std::vector<Matrix> total_gradient_per_layer(num_layers);
    for (k=0; k < num_layers; ++k)
        total_gradient_per_layer[k] = Matrix::Zero(model.u[k].rows(),
                                                   model.u[k].cols());

    for (i=0; i < num_rows_in_batch; i++){
        ColumnVector x = x_batch.row(i);
        ColumnVector y_true = y_true_batch.row(i);

        std::vector<ColumnVector> net, o, delta;
        feedForward(model, x, net, o);
        backPropogate(y_true, o.back(), net, model, delta);

        for (k=0; k < num_layers; k++){
                total_gradient_per_layer[k] += o[k] * delta[k].transpose();
        }

        // loss computation
        ColumnVector y_estimated = o.back();
        if(model.is_classification){
            double clip = 1.e-10;
            y_estimated = y_estimated.cwiseMax(clip).cwiseMin(1.-clip);
            total_loss += - (y_true.array()*y_estimated.array().log()
                   + (-y_true.array()+1)*(-y_estimated.array()+1).log()).sum();
        }
        else{
            total_loss += 0.5 * (y_estimated - y_true).squaredNorm();
        }
    }
    for (k=0; k < num_layers; k++){
        Matrix regularization = MLP<Model, Tuple>::lambda * model.u[k];
        regularization.row(0).setZero(); // Do not update bias
        model.u[k] -= stepsize * (total_gradient_per_layer[k] / \
                                  num_rows_in_batch + \
                                  regularization);
    }
    return total_loss;
}
Example #13
0
void CLogistic::GradientDescent(const Matrix<double, Dynamic, Dynamic, RowMajor>& X, const VectorXd& y_class, VectorXd& theta, double lambda)
{
	/*
	// Performs stochatic gradient descent to learn theta
	*/
	double prediction, hypothesis, error;
	for(int iter = 0; iter < X.rows(); iter++)
	{
		prediction = X.row(iter) * theta;
		hypothesis = sigmoid(prediction);
		error = hypothesis - y_class(iter);
		theta -= (lambda * error) * X.row(iter);
	}
}
Example #14
0
 DRSM::DynamicRegressionStateModel(const Matrix &X)
     : xdim_(ncol(X)),
       initial_state_mean_(xdim_, 0.0),
       initial_state_variance_(xdim_, 1.0),
       transition_matrix_(new IdentityMatrix(xdim_)) {
   setup_models_and_transition_variance_matrix();
   sparse_predictor_vectors_.reserve(nrow(X));
   for (int i = 0; i < nrow(X); ++i) {
     sparse_predictor_vectors_.push_back(SparseVector(X.row(i)));
     sparse_predictor_matrices_.push_back(
         new DenseMatrix(Matrix(1, xdim_, X.row(i))));
   }
   compute_predictor_variance();
 }
Example #15
0
Eigen::Matrix<double, 4, 8> dquatProduct(const Eigen::Vector4d& q1, const Eigen::Vector4d& q2)
{
  double w1 = q1(0);
  double w2 = q2(0);
  const auto& v1 = q1.tail<3>();
  const auto& v2 = q2.tail<3>();

  Matrix<double, 4, 8> dr;
  dr.row(0) << w2, -v2.transpose(), w1, -v1.transpose();
  dr.row(1) << q2(1), q2(0), q2(3), -q2(2), q1(1), q1(0), -q1(3), q1(2);
  dr.row(2) << q2(2), -q2(3), q2(0), q2(1), q1(2), q1(3), q1(0), -q1(1);
  dr.row(3) << q2(3), q2(2), -q2(1), q2(0), q1(3), -q1(2), q1(1), q1(0);
  return dr;
}
 SSLM::StateSpaceLogitModel(const Vector &successes,
                            const Vector &trials,
                            const Matrix &design,
                            const std::vector<bool> &observed)
     : StateSpaceNormalMixture(ncol(design)),
       observation_model_(new BinomialLogitModel(ncol(design)))
 {
   setup();
   bool all_observed = observed.empty();
   if (successes.size() != trials.size()
       || successes.size() != nrow(design)
       || (!all_observed && successes.size() != observed.size())) {
     report_error("Data sizes do not match in StateSpaceLogitModel "
                  "constructor");
   }
   for (int i = 0; i < successes.size(); ++i) {
     NEW(ABRD, dp)(successes[i],
                   trials[i],
                   design.row(i));
     if (!(all_observed || observed[i])) {
       dp->set_missing_status(Data::missing_status::completely_missing);
     }
     add_data(dp);
   }
 }
MatrixXd
PatchFit::parab (Matrix<double, Dynamic, 3> q, VectorXd a)
{   
  Vector3d k3, rv; 
  k3 << a(0), a(1), 0.0;
  rv << a.segment(2,3);
  
  MatrixXd rr; 
  rr = p_->rexp(rv);
  
  Vector3d c;
  if (ccon_)
    c = plane_c+a(5)*plane_n;
  else
    c = a.segment(5,3);
  MatrixXd qc(q.rows(),q.cols());

  for (int i=0; i<q.rows(); i++)
    qc.row(i) = q.row(i) - c.transpose();
  
  MatrixXd ql(q.rows(),3);
  ql = qc*rr;
  
  MatrixXd ql_sq(q.rows(),3);
  ql_sq = ql.array().square();

  return ((ql_sq * k3) - (2.0 * ql * zh));

  //TBD: enmo
}
Example #18
0
  void firbank(
    Matrix<T, Block1> inputs,
    Matrix<T, Block2> filters,
    Matrix<T, Block3> outputs,
    Matrix<T, Block4> expected,
    length_type       loop,
    float&            time)
  {
    this->verify_views(inputs, filters, outputs, expected);

    // Create fir filters
    length_type local_M = LOCAL(inputs).size(0);
    length_type N = inputs.row(0).size();

    typedef Fir<T, nonsym, state_no_save, 1> fir_type;
    fir_vector<T> fir(local_M);
    for ( length_type i = 0; i < local_M; ++i )
      fir[i] = new fir_type(LOCAL(filters).row(i), N, 1);


    vsip::impl::profile::Timer t1;
    
    t1.start();
    for (index_type l=0; l<loop; ++l)
    {
      // Perform FIR convolutions
      for ( length_type i = 0; i < local_M; ++i )
        (*fir[i])( LOCAL(inputs).row(i), LOCAL(outputs).row(i) );
    }
    t1.stop();
    time = t1.delta();

    // Verify data
    assert( view_equal(LOCAL(outputs), LOCAL(expected)) );
  }
Example #19
0
 double PDM::pdf(const Matrix &Pi, bool logscale) const {
   double ans(0);
   for (uint i = 0; i < Pi.nrow(); ++i) {
     ans += ddirichlet(Pi.row(i), Nu().row(1), true);
   }
   return logscale ? ans : exp(ans);
 }
Example #20
0
 SymmetryHandle
 rows_interchange(const Matrix<A>& m) {
   typename Matrix<A>::ArgsType xs;
   for (int r = 0 ; r < m.height() ; r++)
     xs << m.row(r);
   return VariableSequenceSymmetry(xs, m.width());
 }
  // Take the design matrix and a vector of group names and group
  // values.  Picks out the information for each group, and then creates
  // and stores a Group.
  void AggregatedRegressionModel::initialize_groups(
      const Matrix &X,
      const std::vector<string> & group_names,
      const Vector & group_values){
    if(nrow(X) != group_names.size() || nrow(X) != group_values.size()){
      ostringstream err;
      err << "The number of rows in the design matrix (" << nrow(X)
          << ") should match the length of the group_names vector (" << group_names.size()
          << ") and the length of the group_values vector (" << group_values.size() << ")."
          << endl;
      report_error(err.str());
    }

    for(int i = 0; i < group_names.size(); ++i){
      string group_name = group_names[i];
      int pos = find_group(group_name, group_values[i]);
      Ptr<RegressionData> dp(new RegressionData(0, X.row(i)));
      dat()[pos]->add_unit(dp);
      model_->add_data(dp);
    }

    for(int i = 0; i < dat().size(); ++i){
      dat()[i]->initialize_unit_values();
    }

    refresh_suf();
  }
Example #22
0
 double fwd_1(Vector &pi, Matrix &P, const Matrix &logQ, const Vector &logd,
              const Vector &one) {
   /*----------------------------------------------------------------------
    * Input: pi[0..S-1] is the conditionl distribution of h[t-1]|Y[t-1]
    *        one[0..S-1] is a vector of 1's
    *        logd[s] is logp(y[t] | model[s])
    *        logQ[0..S-1] is the log of the square transition
    *                     probability matrix (rows of Q sum to 1)
    *
    * Output:  pi[0..S-1]  is prob(h[t] | Y[t])
    *          P[0..S-1]^2 is prob(h[t-1], h[t] | Y[t])
    *
    * Return:  logp(y[t] | y[1]..y[t-1])
    * --------------------------------------------------------------------*/
   uint S = pi.size();
   P = logQ;
   pi = log(pi);
   for (uint r = 0; r < S; ++r) P.row(r) += logd;  // P(r,s) += logd[s]
   for (uint s = 0; s < S; ++s) P.col(s) += pi;    // P(r,s) += pi[r]
   double m = max(P);
   P -= m;
   P.exp();
   double nc = P.abs_norm();
   P /= nc;
   pi = one * P;
   return m + log(nc);
 }
  static void exec(DstBlock& dst, SrcBlock const& src)
  {
    length_type rows = dst.size(2, 0);
    length_type cols = dst.size(2, 1);
    Matrix<T> tmp(1, cols);

    Matrix<T, CoeffsMatBlockT> w 
      (const_cast<CoeffsMatBlockT&>(src.functor().block().right()));
    Matrix<T, MatBlockT> in 
      (const_cast<MatBlockT&>(src.functor().block().left().functor().block()));
    Matrix<T, DstBlock> out(dst);

    Workspace2T const& fwd_workspace(
      src.functor().block().left().functor().workspace());

    Backend2T&         fwd_backend  (const_cast<Backend2T&>(
      src.functor().block().left().functor().backend()) );

    Workspace1T const& inv_workspace(src.functor().workspace());
    Backend1T&         inv_backend  (const_cast<Backend1T&>(src.functor().backend()));

    for (index_type r=0; r<rows; ++r)
    {
      fwd_workspace.by_reference(&fwd_backend,
				 in (Domain<2>(Domain<1>(r, 1, 1), cols)),
				 tmp(Domain<2>(Domain<1>(0, 1, 1), cols)) );
      tmp.row(0) *= w.row(r);
      inv_workspace.by_reference(&inv_backend,
				 tmp(Domain<2>(Domain<1>(0, 1, 1), cols)),
				 out(Domain<2>(Domain<1>(r, 1, 1), cols)) );
    }
  }
Example #24
0
 Matrix<T> HermBandSVDiv<T>::getVt() const
 {
     Matrix<T> temp = pimpl->U.adjoint();
     const ptrdiff_t N = pimpl->S.size();
     for(ptrdiff_t i=0;i<N;i++) 
         if (pimpl->S(i) < 0) temp.row(i) *= RT(-1);
     return temp;
 }
Example #25
0
valarray<double> operator*(const Matrix &m, const valarray<double> &v) {
    valarray<double> res(m.dim1());
    for (int i = 0; i < m.dim1(); i++) {
        res[i] = mul(m.row(i), v);
    }

    return res;
}
Example #26
0
inline Matrix<T, Dynamic, Dynamic> pick_row(const Matrix<T, Dynamic, Dynamic>& M, const VectorXi& ind)
{
	Matrix<T, Dynamic, Dynamic> N(ind.size(), M.cols());
	for(unsigned i = 0; i < ind.size(); ++i)
		N.row(i) = M.row(ind(i));
	
	return N;
}
Example #27
0
VALUE OR_Matrix::to_ruby()
{
  Matrix matrix;
  MArray<double> values;
  double cell;
  int i, number_of_values;
  VALUE argv[2];
  
  matrix = octave_val.matrix_value();
  int number_of_rows = matrix.rows();
  int number_of_columns = matrix.columns();
  
  if ((number_of_rows == 0) && (number_of_columns == 0)) {
    return rb_ary_new2(0);
  } else if (number_of_columns == 1) {
    values = matrix.column(0);
  } else {
    argv[0] = INT2FIX(number_of_rows);
    argv[1] = INT2FIX(number_of_columns);
    ruby_val = rb_class_new_instance(2, argv, rb_path2class("Octave::Matrix"));
    
    int row_index, column_index = 0;
    VALUE cells, row;
    cells = rb_ary_new2(number_of_rows);
    for (row_index = 0; row_index < number_of_rows; row_index++) {
      row = rb_ary_new2(number_of_columns);
      values = matrix.row(row_index);
      
      for (column_index = 0; column_index < number_of_columns; column_index++) {
        cell = values(column_index);
        if (xisnan(cell) || octave_is_NA(cell)) {
          rb_ary_push(row, Qnil);
        } else {
          rb_ary_push(row, rb_float_new(cell));
        }
      }
      
      rb_ary_push(cells, row);
    }
    
    rb_iv_set(ruby_val, "@cells", cells);
    return ruby_val;
  }
  
  number_of_values = values.length();
  ruby_val = rb_ary_new2(number_of_values);
  for (i = 0; i < number_of_values; i++) {
    cell = values(i);
    if (xisnan(cell) || octave_is_NA(cell)) {
      rb_ary_push(ruby_val, Qnil);
    } else {
      rb_ary_push(ruby_val, rb_float_new(cell));
    }
  }
  
  return ruby_val;
}
Example #28
0
 SpdMatrix var(const Matrix &m){
   SpdMatrix ans(m.ncol(), 0.0);
   Vector mu = mean(m);
   for(uint i = 0; i<m.nrow(); ++i){
     Vector tmp = m.row(i)- mu;
     ans.add_outer(tmp);}
   ans/=(m.nrow()-1);
   return ans;
 }
Example #29
0
Eigen::Matrix<double, 1, 11> dquatDiffAxisInvar(const Eigen::Vector4d& q1, const Eigen::Vector4d& q2, const Eigen::Vector3d& u)
{
  Vector4d r = quatDiff(q1, q2);
  Matrix<double, 4, 8> dr = dquatDiff(q1, q2);
  Matrix<double, 1, 11> de;
  const auto& rvec = r.tail<3>();
  de << 4.0 * r(0) * dr.row(0) + 4.0 * u.transpose() * rvec *u.transpose() * dr.block<3, 8>(1, 0), 4.0 * u.transpose() * rvec * rvec.transpose();
  return de;
}
Example #30
0
bool Matrix::operator==(const Matrix& m) const {
	if (m.nb_rows()!=nb_rows()) return false;
	if (m.nb_cols()!=nb_cols()) return false;

	for (int i=0; i<_nb_rows; i++) {
		if (row(i)!=m.row(i)) return false;
	}
	return true;
}