Ejemplo n.º 1
0
void RigidObject::computeBoundingBox() {
  _bounding_box.resize(8 * 3);
  Eigen::Map<const Eigen::MatrixXf> vertices_f(getPositions().data(), 3,
                                               getNPositions());
  Eigen::MatrixXd vertices;
  vertices = vertices_f.cast<double>();

  // subtract vertices mean
  Eigen::Vector3d mean_vertices = vertices.rowwise().mean();
  vertices = vertices - mean_vertices.replicate(1, getNPositions());

  // compute eigenvector covariance matrix
  Eigen::EigenSolver<Eigen::MatrixXd> eigen_solver(vertices *
                                                   vertices.transpose());
  Eigen::MatrixXd real_eigenvectors = eigen_solver.eigenvectors().real();

  // rotate centered vertices with inverse eigenvector matrix
  vertices = real_eigenvectors.transpose() * vertices;

  // compute simple bounding box
  auto mn = vertices.rowwise().minCoeff();
  auto mx = vertices.rowwise().maxCoeff();
  Eigen::Matrix<double, 3, 8> bounding_box;
  bounding_box << mn(0), mn(0), mn(0), mn(0), mx(0), mx(0), mx(0), mx(0), mn(1),
      mn(1), mx(1), mx(1), mn(1), mn(1), mx(1), mx(1), mn(2), mx(2), mn(2),
      mx(2), mn(2), mx(2), mn(2), mx(2);

  // rotate and translate bounding box back to original position
  Eigen::Matrix3d rot_back = real_eigenvectors;
  Eigen::Translation<double, 3> tra_back(mean_vertices);
  Eigen::Transform<double, 3, Eigen::Affine> t = tra_back * rot_back;
  bounding_box = t * bounding_box;

  // convert to float
  Eigen::Map<Eigen::MatrixXf> bounding_box_f(_bounding_box.data(), 3, 8);

  bounding_box_f = bounding_box.cast<float>();
}
Eigen::VectorXd ExperimentalTrajectory::getVariance(double time)
{
    if (varianceStartTrigger) {
        t0_variance = time;
        varianceStartTrigger = false;
    }


    double t = time - t0_variance;


    Eigen::VectorXd variance;

    if (t<=pointToPointDurationVector.sum()) {
        Eigen::MatrixXd Ks = kernelFunction(t);
        variance = maxCovariance - (Ks * designMatrixInv * Ks.transpose()).diagonal();
    }
    else{
        variance = Eigen::VectorXd::Zero(maxCovariance.rows());
    }

    return variance;
}
Ejemplo n.º 3
0
void
NurbsTools::pca (const vector_vec3d &data, Eigen::Vector3d &mean, Eigen::Matrix3d &eigenvectors,
                 Eigen::Vector3d &eigenvalues)
{
  if (data.empty ())
  {
    printf ("[NurbsTools::pca] Error, data is empty\n");
    abort ();
  }

  mean = computeMean (data);

  unsigned s = unsigned (data.size ());

  Eigen::MatrixXd Q (3, s);

  for (unsigned i = 0; i < s; i++)
    Q.col (i) << (data[i] - mean);

  Eigen::Matrix3d C = Q * Q.transpose ();

  Eigen::SelfAdjointEigenSolver<Eigen::Matrix3d> eigensolver (C);
  if (eigensolver.info () != Success)
  {
    printf ("[NurbsTools::pca] Can not find eigenvalues.\n");
    abort ();
  }

  for (int i = 0; i < 3; ++i)
  {
    eigenvalues (i) = eigensolver.eigenvalues () (2 - i);
    if (i == 2)
      eigenvectors.col (2) = eigenvectors.col (0).cross (eigenvectors.col (1));
    else
      eigenvectors.col (i) = eigensolver.eigenvectors ().col (2 - i);
  }
}
Ejemplo n.º 4
0
void mrpt::vision::pnp::rpnp::calcampose(
	Eigen::MatrixXd& XXc, Eigen::MatrixXd& XXw, Eigen::Matrix3d& R2,
	Eigen::Vector3d& t2)
{
	Eigen::MatrixXd X = XXc;
	Eigen::MatrixXd Y = XXw;
	Eigen::MatrixXd K =
		Eigen::MatrixXd::Identity(n, n) - Eigen::MatrixXd::Ones(n, n) * 1 / n;
	Eigen::VectorXd ux, uy;
	uy = X.rowwise().mean();
	ux = Y.rowwise().mean();

	// Need to verify sigmax2
	double sigmax2 =
		(((X * K).array() * (X * K).array()).colwise().sum()).mean();

	Eigen::MatrixXd SXY = Y * K * (X.transpose()) / n;

	Eigen::JacobiSVD<Eigen::MatrixXd> svd(
		SXY, Eigen::ComputeThinU | Eigen::ComputeThinV);

	Eigen::Matrix3d S = Eigen::MatrixXd::Identity(3, 3);
	if (SXY.determinant() < 0) S(2, 2) = -1;

	R2 = svd.matrixV() * S * svd.matrixU().transpose();

	double c2 = (svd.singularValues().asDiagonal() * S).trace() / sigmax2;
	t2 = uy - c2 * R2 * ux;

	Eigen::Vector3d x, y, z;
	x = R2.col(0);
	y = R2.col(1);
	z = R2.col(2);

	if ((x.cross(y) - z).norm() > 0.02) R2.col(2) = -R2.col(2);
}
void ExperimentalTrajectory::calculateRbfnWeights()
{
    int nC = kernelCenters.rows();
    Eigen::MatrixXd rbfnDesignMatrix = rbfnKernelFunction(kernelCenters);
    // rbfnWeights = Eigen::MatrixXd::Zero(nDoF, nC);
    Eigen::MatrixXd wayTrans = waypoints.transpose();

    // std::cout << "phi = " << rbfnDesignMatrix.rows() << " x " << rbfnDesignMatrix.cols() << std::endl;
    // std::cout << "way = " << wayTrans.rows()<< " x " << wayTrans.cols() << std::endl;
    Eigen::MatrixXd A = rbfnDesignMatrix * rbfnDesignMatrix.transpose();
    // std::cout << "A = " << A.rows()<< " x " << A.cols() << std::endl;

    Eigen::MatrixXd b = rbfnDesignMatrix * wayTrans;

    // std::cout << "b = " << b.rows()<< " x " << b.cols() << std::endl;


    // rbfnWeights = (A.inverse() * b).transpose(); // the transpose makes weights = nDoF x nCenters which is better for the output function.

    // rbfnWeights = A.fullPivLu().solve(b).transpose();
    rbfnWeights = rbfnDesignMatrix.fullPivLu().solve(wayTrans).transpose();
    // std::cout << "rbfn weights:\n" << rbfnWeights << std::endl;

}
Ejemplo n.º 6
0
	void object::test<6>()
	{
		for (int i = 0;i<10;i++)
		{
			int dim = rand()%1000+2;
			int samplenum1 = rand()%1000+100;
			int samplenum2 = rand()%1000+100;

			Eigen::MatrixXd ha = Eigen::MatrixXd::Random(dim,samplenum1);
			Eigen::MatrixXd hb = Eigen::MatrixXd::Random(dim,samplenum2);



			Eigen::VectorXd haa = (ha.array()*ha.array()).colwise().sum();
			Eigen::VectorXd hbb = (hb.array()*hb.array()).colwise().sum();

			Eigen::MatrixXd hdist = -2*ha.transpose()*hb;

			hdist.colwise() += haa;

			hdist.rowwise() += hbb.transpose();

			
			Matrix<double> ga(ha),gb(hb);

			Matrix<double> gdist = -2*ga.transpose()*gb;

			Vector<double> gaa = (ga.array()*ga.array()).colwise().sum();
			Vector<double> gbb = (gb.array()*gb.array()).colwise().sum();

			gdist.colwise() += gaa;
			gdist.rowwise() += gbb;

			ensure(check_diff(hdist,gdist));
		}
	}
Ejemplo n.º 7
0
void cIKSolver::StepHybrid(const Eigen::MatrixXd& cons_desc, const tProblem& prob, Eigen::MatrixXd& joint_desc,
							Eigen::VectorXd& out_pose)
{
	const int num_dof = cKinTree::GetNumDof(joint_desc);
	const int num_joints = static_cast<int>(joint_desc.rows());
	const int num_cons = static_cast<int>(cons_desc.rows());

	Eigen::VectorXd err;
	Eigen::MatrixXd J;
	Eigen::MatrixXd J_weighted_buff = Eigen::MatrixXd::Zero(num_dof, num_dof);
	Eigen::VectorXd Jt_err_weighted_buff = Eigen::VectorXd::Zero(num_dof);
	Eigen::MatrixXd N = Eigen::MatrixXd::Identity(num_dof, num_dof);
	Eigen::VectorXi chain_joints(num_joints); // keeps track of joints in Ik chain

	double clamp_dist = prob.mClampDist;
	double damp = prob.mDamp;

	int min_priority = std::numeric_limits<int>::max();
	int max_priority = std::numeric_limits<int>::min();

	for (int c = 0; c < num_cons; ++c)
	{
		int curr_priority = static_cast<int>(cons_desc(c, eConsDescPriority));
		min_priority = std::min(min_priority, curr_priority);
		max_priority = std::max(max_priority, curr_priority);
	}

	for (int p = min_priority; p <= max_priority; ++p)
	{
		int curr_num_dof = static_cast<int>(N.cols());
		auto J_weighted = J_weighted_buff.block(0, 0, curr_num_dof, curr_num_dof);
		auto Jt_err_weighted = Jt_err_weighted_buff.block(0, 0, curr_num_dof, 1);

		J_weighted.setZero();
		Jt_err_weighted.setZero();

		chain_joints.setZero();
		
		int num_valid_cons = 0;
		for (int c = 0; c < num_cons; ++c)
		{
			const tConsDesc& curr_cons = cons_desc.row(c);
			int curr_priority = static_cast<int>(curr_cons(eConsDescPriority));

			if (curr_priority == p)
			{
				++num_valid_cons;
				err = BuildErr(joint_desc, out_pose, curr_cons, clamp_dist);
				J = BuildJacob(joint_desc, out_pose, curr_cons);

#if !defined(DISABLE_LINK_SCALE)
				for (int i = 0; i < num_joints; ++i)
				{
					// use entries in the jacobian to figure out if a joint is on the
					// link chain from the root to the constrained end effectors
					// this ignores the root which should not have any scaling
					int scale_idx = gPosDims + num_joints + i;
					int theta_idx = gPosDims + i;
					double scaling = J.col(scale_idx).squaredNorm();
					if (scaling > 0)
					{
						chain_joints(i) = 1;
					}
				}
#endif
				J = J * N;

				double weight = curr_cons(eConsDescWeight);
				J_weighted += weight * J.transpose() * J;
				Jt_err_weighted += weight * J.transpose() * err;
			}
		}

		if (num_valid_cons > 0)
		{
			// apply damping
			// a little more tricky with the null space

			auto N_block = N.block(0, 0, gPosDims + num_joints, N.cols());
			J_weighted += damp * N.transpose() * N;

#if !defined(DISABLE_LINK_SCALE)
			// damp link scaling according to stiffness
			for (int i = 0; i < num_joints; ++i)
			{
				// only scale links that are part of the IK chain
				if (chain_joints(i) == 1)
				{
					int idx = gPosDims + num_joints + i;
					const Eigen::VectorXd& N_row = N.row(idx);

					double d_scale = 1.f - joint_desc(i, cKinTree::eJointDescScale);
					double link_stiffness = joint_desc(i, cKinTree::eJointDescLinkStiffness);
					J_weighted += link_stiffness * N_row * N_row.transpose();
					Jt_err_weighted += link_stiffness * d_scale * N_row;
				}
			}
#endif

			Eigen::VectorXd y = J_weighted.lu().solve(Jt_err_weighted);
			Eigen::VectorXd x = N * y;
			cKinTree::ApplyStep(joint_desc, x, out_pose);
			
			bool is_last = p == max_priority;

			if (!is_last)
			{
				Eigen::MatrixXd cons_mat = Eigen::MatrixXd(num_valid_cons, cons_desc.cols());
				int r = 0;
				for (int c = 0; c < num_cons; ++c)
				{
					const tConsDesc& curr_cons = cons_desc.row(c);
					int curr_priority = static_cast<int>(curr_cons(eConsDescPriority));

					if (curr_priority == p)
					{
						cons_mat.row(r) = curr_cons;
						++r;
					}
				}

				J = BuildJacob(joint_desc, out_pose, cons_mat);
				J = J * N;

				Eigen::MatrixXd curr_N = BuildKernel(J);
				if (curr_N.cols() == 0)
				{
					break;
				}

				N = N * curr_N;
			}
		}
	}
}
Ejemplo n.º 8
0
void marginalize(
			std::vector<aslam::backend::DesignVariable*>& inDesignVariables,
			std::vector<aslam::backend::ErrorTerm*>& inErrorTerms,
			int numberOfInputDesignVariablesToRemove,
			bool useMEstimator,
			boost::shared_ptr<aslam::backend::MarginalizationPriorErrorTerm>& outPriorErrorTermPtr,
			Eigen::MatrixXd& outCov,
			std::vector<aslam::backend::DesignVariable*>& outDesignVariablesInRTop,
			size_t numTopRowsInCov,
			size_t /* numThreads */)
{
		  SM_WARN_STREAM_COND(inDesignVariables.size() == 0, "Zero input design variables in the marginalizer!");

		  // check for duplicates!
		  std::unordered_set<aslam::backend::DesignVariable*> inDvSetHT;
		  for(auto it = inDesignVariables.begin(); it != inDesignVariables.end(); ++it)
		  {
			  auto ret = inDvSetHT.insert(*it);
			  SM_ASSERT_TRUE(aslam::Exception, ret.second, "Error! Duplicate design variables in input list!");
		  }
		  std::unordered_set<aslam::backend::ErrorTerm*> inEtSetHT;
		  for(auto it = inErrorTerms.begin(); it != inErrorTerms.end(); ++it)
		  {
			  auto ret = inEtSetHT.insert(*it);
			  SM_ASSERT_TRUE(aslam::Exception, ret.second, "Error! Duplicate error term in input list!");
		  }
		  SM_DEBUG_STREAM("NO duplicates in input design variables or input error terms found.");

          // Partition the design varibles into removed/remaining.
		  int dimOfDesignVariablesToRemove = 0;
		  std::vector<aslam::backend::DesignVariable*> remainingDesignVariables;
		  int k = 0;
		  size_t dimOfDvsInTopBlock = 0;
		  for(std::vector<aslam::backend::DesignVariable*>::const_iterator it = inDesignVariables.begin(); it != inDesignVariables.end(); ++it)
		  {
			  if (k < numberOfInputDesignVariablesToRemove)
			  {
				  dimOfDesignVariablesToRemove += (*it)->minimalDimensions();
			  } else
			  {
				  remainingDesignVariables.push_back(*it);
			  }

			  if(dimOfDvsInTopBlock < numTopRowsInCov)
			  {
				  outDesignVariablesInRTop.push_back(*it);
			  }
			  dimOfDvsInTopBlock += (*it)->minimalDimensions();
			  k++;
		  }

		  // store original block indices to prevent side effects
		  std::vector<int> originalBlockIndices;
		  std::vector<int> originalColumnBase;
			// assign block indices
			int columnBase = 0;
			for (size_t i = 0; i < inDesignVariables.size(); ++i) {
				originalBlockIndices.push_back(inDesignVariables[i]->blockIndex());
				originalColumnBase.push_back(inDesignVariables[i]->columnBase());

				inDesignVariables[i]->setBlockIndex(i);
				inDesignVariables[i]->setColumnBase(columnBase);
			  columnBase += inDesignVariables[i]->minimalDimensions();
			}

			int dim = 0;
			std::vector<size_t> originalRowBase;
			for(std::vector<aslam::backend::ErrorTerm*>::iterator it = inErrorTerms.begin(); it != inErrorTerms.end(); ++it)
			{
				originalRowBase.push_back((*it)->rowBase());
				(*it)->setRowBase(dim);
				dim += (*it)->dimension();
			}

		  aslam::backend::DenseQrLinearSystemSolver qrSolver;
          qrSolver.initMatrixStructure(inDesignVariables, inErrorTerms, false);

		  SM_INFO_STREAM("Marginalization optimization problem initialized with " << inDesignVariables.size() << " design variables and " << inErrorTerms.size() << " error terrms");
		  SM_INFO_STREAM("The Jacobian matrix is " << dim << " x " << columnBase);

		  qrSolver.evaluateError(1, useMEstimator);
		  qrSolver.buildSystem(1, useMEstimator);


		  const Eigen::MatrixXd& jacobian = qrSolver.getJacobian();
		  const Eigen::VectorXd& b = qrSolver.e();

		  // check dimension of jacobian
		  int jrows = jacobian.rows();
		  int jcols = jacobian.cols();

		  int dimOfRemainingDesignVariables = jcols - dimOfDesignVariablesToRemove;
		  //int dimOfPriorErrorTerm = jrows;

		  // check the rank
		  Eigen::FullPivLU<Eigen::MatrixXd> lu_decomp(jacobian);
		  //lu_decomp.setThreshold(1e-20);
		  double threshold = lu_decomp.threshold();
		  int rank = lu_decomp.rank();
		  int fullRank = std::min(jacobian.rows(), jacobian.cols());
		  SM_DEBUG_STREAM("Rank of jacobian: " << rank << " (full rank: " << fullRank << ", threshold: " << threshold << ")");
		  bool rankDeficient = rank < fullRank;
		  if(rankDeficient)
		  {
			  SM_WARN("Marginalization jacobian is rank deficient!");
		  }
		  //SM_ASSERT_FALSE(aslam::Exception, rankDeficient, "Right now, we don't want the jacobian to be rank deficient - ever...");

		  Eigen::MatrixXd R_reduced;
		  Eigen::VectorXd d_reduced;
		  if (jrows < jcols)
		  {
			  SM_THROW(aslam::Exception, "underdetermined LSE!");
			  // underdetermined LSE, don't do QR
			  R_reduced = jacobian.block(0, dimOfDesignVariablesToRemove, jrows, jcols - dimOfDesignVariablesToRemove);
			  d_reduced = b;

		  } else {
			  // PTF: Do we know what will happen when the jacobian matrix is rank deficient?
			  // MB: yes, bad things!

              // do QR decomposition
			  sm::timing::Timer myTimer("QR Decomposition");
              Eigen::HouseholderQR<Eigen::MatrixXd> qr(jacobian);
			  Eigen::MatrixXd Q = qr.householderQ();
			  Eigen::MatrixXd R = qr.matrixQR().triangularView<Eigen::Upper>();
			  Eigen::VectorXd d = Q.transpose()*b;
			  myTimer.stop();

			  if(numTopRowsInCov > 0)
			  {
				sm::timing::Timer myTimer("Covariance computation");
				Eigen::FullPivLU<Eigen::MatrixXd> lu_decomp(R);
				Eigen::MatrixXd Rinv = lu_decomp.inverse();
				Eigen::MatrixXd covariance = Rinv * Rinv.transpose();
				outCov = covariance.block(0, 0, numTopRowsInCov, numTopRowsInCov);
				myTimer.stop();
			  }

//			  size_t numRowsToKeep = rank - dimOfDesignVariablesToRemove;
//			  SM_ASSERT_TRUE_DBG(aslam::Exception, rankDeficient || (numRowsToKeep == dimOfRemainingDesignVariables), "must be the same if full rank!");

			  // get the top left block
			  SM_ASSERT_GE(aslam::Exception, R.rows(), numTopRowsInCov, "Cannot extract " << numTopRowsInCov << " rows of R because it only has " << R.rows() << " rows.");
			  SM_ASSERT_GE(aslam::Exception, R.cols(), numTopRowsInCov, "Cannot extract " << numTopRowsInCov << " cols of R because it only has " << R.cols() << " cols.");
			  //outRtop = R.block(0, 0, numTopRowsInRtop, numTopRowsInRtop);

              // cut off the zero rows at the bottom
              R_reduced = R.block(dimOfDesignVariablesToRemove, dimOfDesignVariablesToRemove, dimOfRemainingDesignVariables, dimOfRemainingDesignVariables);
			  //R_reduced = R.block(dimOfDesignVariablesToRemove, dimOfDesignVariablesToRemove, numRowsToKeep, dimOfRemainingDesignVariables);

              d_reduced = d.segment(dimOfDesignVariablesToRemove, dimOfRemainingDesignVariables);
              //d_reduced = d.segment(dimOfDesignVariablesToRemove, numRowsToKeep);
              //dimOfPriorErrorTerm = dimOfRemainingDesignVariables;
		  }

		  // now create the new error term
		  boost::shared_ptr<aslam::backend::MarginalizationPriorErrorTerm> err(new aslam::backend::MarginalizationPriorErrorTerm(remainingDesignVariables, d_reduced, R_reduced));

		  outPriorErrorTermPtr.swap(err);

		  // restore initial block indices to prevent side effects
          for (size_t i = 0; i < inDesignVariables.size(); ++i) {
              inDesignVariables[i]->setBlockIndex(originalBlockIndices[i]);
              inDesignVariables[i]->setColumnBase(originalColumnBase[i]);
          }
          int index = 0;
          for(std::vector<aslam::backend::ErrorTerm*>::iterator it = inErrorTerms.begin(); it != inErrorTerms.end(); ++it)
          {
              (*it)->setRowBase(originalRowBase[index++]);
          }
}
  void Ekf::correct(const Measurement &measurement)
  {
    if (getDebug())
    {
      *debugStream_ << "---------------------- Ekf::correct ----------------------\n";
      *debugStream_ << "Measurement is:\n";
      *debugStream_ << measurement.measurement_ << "\n";
      *debugStream_ << "Measurement covariance is:\n";
      *debugStream_ << measurement.covariance_ << "\n";
    }

    // We don't want to update everything, so we need to build matrices that only update
    // the measured parts of our state vector

    // First, determine how many state vector values we're updating
    std::vector<size_t> updateIndices;
    for (size_t i = 0; i < measurement.updateVector_.size(); ++i)
    {
      if (measurement.updateVector_[i])
      {
        // Handle nan and inf values in measurements
        if (std::isnan(measurement.measurement_(i)) || std::isnan(measurement.covariance_(i,i)))
        {
          if (getDebug())
          {
            *debugStream_ << "Value at index " << i << " was nan. Excluding from update.\n";
          }
        }
        else if (std::isinf(measurement.measurement_(i)) || std::isinf(measurement.covariance_(i,i)))
        {
          if (getDebug())
          {
            *debugStream_ << "Value at index " << i << " was inf. Excluding from update.\n";
          }
        }
        else
        {
          updateIndices.push_back(i);
        }
      }
    }

    if (getDebug())
    {
      *debugStream_ << "Update indices are:\n";
      *debugStream_ << updateIndices << "\n";
    }

    size_t updateSize = updateIndices.size();

    // Now set up the relevant matrices
    Eigen::VectorXd stateSubset(updateSize);                             // x (in most literature)
    Eigen::VectorXd measurementSubset(updateSize);                       // z
    Eigen::MatrixXd measurementCovarianceSubset(updateSize, updateSize); // R
    Eigen::MatrixXd stateToMeasurementSubset(updateSize, state_.rows()); // H
    Eigen::MatrixXd kalmanGainSubset(state_.rows(), updateSize);         // K
    Eigen::VectorXd innovationSubset(updateSize);                        // z - Hx

    stateSubset.setZero();
    measurementSubset.setZero();
    measurementCovarianceSubset.setZero();
    stateToMeasurementSubset.setZero();
    kalmanGainSubset.setZero();
    innovationSubset.setZero();

    // Now build the sub-matrices from the full-sized matrices
    for (size_t i = 0; i < updateSize; ++i)
    {
      measurementSubset(i) = measurement.measurement_(updateIndices[i]);
      stateSubset(i) = state_(updateIndices[i]);

      for (size_t j = 0; j < updateSize; ++j)
      {
        measurementCovarianceSubset(i, j) = measurement.covariance_(updateIndices[i], updateIndices[j]);
      }

      // Handle negative (read: bad) covariances in the measurement. Rather
      // than exclude the measurement or make up a covariance, just take
      // the absolute value.
      if (measurementCovarianceSubset(i, i) < 0)
      {
        if (getDebug())
        {
          *debugStream_ << "WARNING: Negative covariance for index " << i << " of measurement (value is" << measurementCovarianceSubset(i, i)
            << "). Using absolute value...\n";
        }

        measurementCovarianceSubset(i, i) = ::fabs(measurementCovarianceSubset(i, i));
      }

      // If the measurement variance for a given variable is very
      // near 0 (as in e-50 or so) and the variance for that
      // variable in the covariance matrix is also near zero, then
      // the Kalman gain computation will blow up. Really, no
      // measurement can be completely without error, so add a small
      // amount in that case.
      if (measurementCovarianceSubset(i, i) < 1e-6)
      {
        measurementCovarianceSubset(i, i) = 1e-6;

        if (getDebug())
        {
          *debugStream_ << "WARNING: measurement had very small error covariance for index " << i << ". Adding some noise to maintain filter stability.\n";
        }
      }
    }

    // The state-to-measurement function, h, will now be a measurement_size x full_state_size
    // matrix, with ones in the (i, i) locations of the values to be updated
    for (size_t i = 0; i < updateSize; ++i)
    {
      stateToMeasurementSubset(i, updateIndices[i]) = 1;
    }

    if (getDebug())
    {
      *debugStream_ << "Current state subset is:\n";
      *debugStream_ << stateSubset << "\n";
      *debugStream_ << "Measurement subset is:\n";
      *debugStream_ << measurementSubset << "\n";
      *debugStream_ << "Measurement covariance subset is:\n";
      *debugStream_ << measurementCovarianceSubset << "\n";
      *debugStream_ << "State-to-measurement subset is:\n";
      *debugStream_ << stateToMeasurementSubset << "\n";
    }

    // (1) Compute the Kalman gain: K = (PH') / (HPH' + R)
    kalmanGainSubset = estimateErrorCovariance_ * stateToMeasurementSubset.transpose()
      * (stateToMeasurementSubset * estimateErrorCovariance_ * stateToMeasurementSubset.transpose() + measurementCovarianceSubset).inverse();

    // (2) Apply the gain to the difference between the state and measurement: x = x + K(z - Hx)
    innovationSubset = (measurementSubset - stateSubset);

    // Wrap angles in the innovation
    for (size_t i = 0; i < updateSize; ++i)
    {
      if (updateIndices[i] == StateMemberRoll || updateIndices[i] == StateMemberPitch || updateIndices[i] == StateMemberYaw)
      {
        if (innovationSubset(i) < -pi_)
        {
          innovationSubset(i) += tau_;
        }
        else if (innovationSubset(i) > pi_)
        {
          innovationSubset(i) -= tau_;
        }
      }
    }

    state_ = state_ + kalmanGainSubset * innovationSubset;

    // (3) Update the estimate error covariance using the Joseph form: (I - KH)P(I - KH)' + KRK'
    Eigen::MatrixXd gainResidual = identity_ - kalmanGainSubset * stateToMeasurementSubset;
    estimateErrorCovariance_ = gainResidual * estimateErrorCovariance_ * gainResidual.transpose() +
        kalmanGainSubset * measurementCovarianceSubset * kalmanGainSubset.transpose();

    // Handle wrapping of angles
    wrapStateAngles();

    if (getDebug())
    {
      *debugStream_ << "Kalman gain subset is:\n";
      *debugStream_ << kalmanGainSubset << "\n";
      *debugStream_ << "Innovation:\n";
      *debugStream_ << innovationSubset << "\n\n";
      *debugStream_ << "Corrected full state is:\n";
      *debugStream_ << state_ << "\n";
      *debugStream_ << "Corrected full estimate error covariance is:\n";
      *debugStream_ << estimateErrorCovariance_ << "\n";
      *debugStream_ << "Last measurement time is:\n";
      *debugStream_ << std::setprecision(20) << lastMeasurementTime_ << "\n";
      *debugStream_ << "\n---------------------- /Ekf::correct ----------------------\n";
    }
  }
Ejemplo n.º 10
0
Archivo: svm.cpp Proyecto: cerdogan/Job
/// Find the plan parameters by computing the Lagrangian coefficients by solving for the 
/// quadratic program
Eigen::Vector3d lagrangian () {

	bool dbg = 0;

	// Create the cost function
	int N = data.size();
	double H_ [N * N], f_ [N];
	for(int i = 0; i < N; i++) {
		for(int j = 0; j < N; j++) 
			H_[i * N + j] = data[i].second * data[j].second * data[i].first.dot(data[j].first);
		f_[i] = -1.0;
		H_[i * (N+1)] += 1e-8;
	}
	QuadProgPP::Matrix <double> H (&(H_[0]), N, N);
	QuadProgPP::Vector <double> f (&(f_[0]), N);
	if(dbg) cout << "H: " << H << endl;
	if(dbg) cout << "f: " << f << endl;

	// Create the inequality constraints (alpha_i >= 0)
	double I_ [N * N], i0_ [N];
	for(int i = 0; i < N; i++) {
		I_[i * (N+1)] = 1.0;
		i0_[i] = 0.0;
	}
	QuadProgPP::Matrix <double> I (&(I_[0]), N, N);
	QuadProgPP::Vector <double> i0 (&(i0_[0]), N);
	if(dbg) cout << "I: " << I << endl;
	if(dbg) cout << "i0: " << i0 << endl;

	// Create the equality constraint ((sum_i alpha_i * y_i) = 0)
	double E_ [N], e0_ [1];
	for(int i = 0; i < N; i++) E_[i] = data[i].second;
	e0_[0] = 0.0;
	QuadProgPP::Matrix <double> E (&(E_[0]), N, 1);
	QuadProgPP::Vector <double> e0 (&(e0_[0]), 1);
	if(dbg) cout << "E: " << E << endl;
	if(dbg) cout << "e0: " << e0 << endl;

	// Solve the problem
	QuadProgPP::Vector <double> x;
	solve_quadprog(H, f, E, e0, I, i0, x);
	if(dbg) cout << "x: " << x << endl;

	// Compute the line direction 
	Eigen::Vector2d w (0, 0);
	Eigen::VectorXd x_ (N), y(N);
	for(int i = 0; i < N; i++) {
		w += x[i] * data[i].second * data[i].first;
		x_(i) = x[i];
		y(i) = data[i].second;
	}
	if(dbg) cout << "w: " << w.transpose() << endl;

	// Compute the line intersection
	int minPos;
	for(int i = 0; i < N; i++) {
		if((x[i] > 0.1) && (data[i].second > 0)) {
			minPos = i;
			break;
		}
	}
	Eigen::MatrixXd X (N,2), G(N,N);
	for(int i = 0; i < N; i++) X.row(i) = data[i].first;
	G = X * X.transpose();
	double b = 1 - (G.row(minPos) * (x_.cwiseProduct(y)));
	if(dbg) printf("b: %lf\n", b);

	return Eigen::Vector3d(w(0), w(1), b);
}
Ejemplo n.º 11
0
int main() {
	
	/* Set up module test environment */
	
	std::ofstream  ofs ("./output/mt_results.txt", std::ofstream::out);
	
	/* TC_1: Open two random MIT-BIH measurements */
	ofs<<"**************** MT TC 1 ****************"<<std::endl;
	ofs<<"testcase 1: open two random MIT-BIH measurements"<<std::endl;
	
	EcgSigPrep esp_1(MT_MIT_BIH_MEASUREMENT_NAME_1_STR, MT_NUM_OF_LEADS_INT, MT_NUM_OF_SAMPLES_1_INT);
	EcgSigPrep esp_2(MT_MIT_BIH_MEASUREMENT_NAME_2_STR, MT_NUM_OF_LEADS_INT, MT_NUM_OF_SAMPLES_2_INT);
	
	ofs<<"testcase 1: DONE"<<std::endl<<std::endl;
	
	/* TC_2: Print out the first loaded beat from the first recording. Then load the next beat and print that out as well. */
	
	ofs<<"**************** MT TC 2 ****************"<<std::endl;
	ofs<<"testcase 2: Print out the first loaded beat from the first recording"<<std::endl;
	
	Eigen::MatrixXd signal = *(esp_1.getSignal());
	
	ofs<<signal.transpose()<<std::endl;
	
	ofs<<"testcase 2: Load next beat and print it"<<std::endl;
	
	esp_1.getNextSegment();
	signal = *(esp_1.getSignal());
	ofs<<signal.transpose()<<std::endl;
	
	ofs<<"testcase 2: DONE"<<std::endl<<std::endl;	
	
	/* TC_3: Set the dilatation and translation of the second beat. Print out results. */
	
	ofs<<"**************** MT TC 3 ****************"<<std::endl;
	ofs<<"testcase 3: Apply dilatation and translation to signal. Print results."<<std::endl;
	
	Hermite H(signal.cols());
	Eigen::MatrixXd dilatTransSig = signal;
	dilatTransSig = esp_1.setDilatTrans(MT_SAMPLE_DILATATION_DOUBLE, MT_SAMPLE_TRANSLATION_DOUBLE, H.get_ort_fun_roots(), dilatTransSig);
	
	ofs<<dilatTransSig.transpose()<<std::endl;
	
	ofs<<"testcase 3: DONE"<<std::endl<<std::endl;	
	
	/* TC_4: Test getter functions. Print results. */
	
	ofs<<"**************** MT TC 4 ****************"<<std::endl;
	ofs<<"testcase 4: Test getter functions."<<std::endl;
	
	ofs<<"sigPrep::getSignal()"<<std::endl;
	ofs<<esp_1.getSignal()->transpose()<<std::endl<<std::endl;
	
	ofs<<"sigPrep::getEntireSignal()"<<std::endl;
	ofs<<esp_1.getEntireSignal()->transpose()<<std::endl<<std::endl;
	
	ofs<<"sigPrep::getSigFirstVal()"<<std::endl;
	ofs<<esp_1.getSigFirstVal()<<std::endl<<std::endl;
	
	ofs<<"sigPrep::getSigLastVal()"<<std::endl;
	ofs<<esp_1.getSigLastVal()<<std::endl<<std::endl;
	
	ofs<<"sigPrep::getSigMaxVal()"<<std::endl;
	ofs<<esp_1.getSigMaxVal()<<std::endl<<std::endl;
	
	ofs<<"EcgsigPrep::getDilat()"<<std::endl;
	ofs<<esp_1.getDilat()<<std::endl<<std::endl;
	
	ofs<<"EcgsigPrep::getTrans()"<<std::endl;
	ofs<<esp_1.getTrans()<<std::endl<<std::endl;
	
	ofs<<"testcase 4: DONE"<<std::endl<<std::endl;	
	
	/* Clear up module test environment */
	ofs.close();
	
	return 0;
}
Ejemplo n.º 12
0
Eigen::MatrixXd pseudoInverse( Eigen::MatrixXd &A)
{
    Eigen::MatrixXd B = (A.transpose()*A).inverse()*A.transpose();
    return B;
}
Ejemplo n.º 13
0
		void KPLSModel::train()
		{	
			if (descriptor_matrix_.cols() == 0)
			{
				throw Exception::InconsistentUsage(__FILE__, __LINE__, "Data must be read into the model before training!"); 
			}
		// 	if (descriptor_matrix_.cols() < no_components_)
		// 	{
		// 		throw Exception::TooManyPLSComponents(__FILE__, __LINE__, no_components_, descriptor_matrix_.cols());
		// 	}
			
			kernel->calculateKernelMatrix(descriptor_matrix_, K_);

			Eigen::MatrixXd P;  // Matrix P saves all vectors p

			int cols = K_.cols();
			
			// determine the number of components that are to be created.
			// no_components_ contains the number of components desired by the user, 
			// but obviously we cannot calculate more PLS components than features
			int features = descriptor_matrix_.cols();
			unsigned int components_to_create = no_components_;
			if (features < no_components_) components_to_create = features; 

			U_.resize(K_.rows(), components_to_create);
			loadings_.resize(cols, components_to_create);
			weights_.resize(Y_.cols(), components_to_create);
			latent_variables_.resize(K_.rows(), components_to_create);
			P.resize(cols, components_to_create);
			
			Eigen::VectorXd w;
			Eigen::VectorXd t;
			Eigen::VectorXd c;
			Eigen::VectorXd u = Y_.col(0);

			Eigen::VectorXd u_old;
			
			for (unsigned int j = 0; j < components_to_create; j++)
			{
				for (int i = 0; i < 10000 ; i++)
				{	
					w = K_.transpose()*u / Statistics::scalarProduct(u);
					w = w / Statistics::euclNorm(w);
					t = K_*w;
					c = Y_.transpose()*t / Statistics::scalarProduct(t);
					u_old = u;
					u = Y_*c / Statistics::scalarProduct(c); 
				
					if (Statistics::euclDistance(u, u_old)/Statistics::euclNorm(u) > 0.0000001) 
					{ 
						continue;				
					}
					else  // if u has converged
					{
						break;
					}
				}

				Eigen::VectorXd p = K_.transpose() * t / Statistics::scalarProduct(t);
				K_ -= t * p.transpose();

				U_.col(j) = u;
				loadings_.col(j) = w;
				weights_.col(j) = c;
				P.col(j) = p;
				latent_variables_.col(j) = t;
			}

		// 	try // p's are not orthogonal to each other, so that in rare cases P.t()*loadings_ is not invertible
		// 	{
		// 		loadings_ = loadings_*(P.t()*loadings_).i();
		// 	}
		// 	catch(BALL::Exception::MatrixIsSingular e)
		// 	{
		// 		Eigen::MatrixXd I; I.setToIdentity(P.cols());
		// 		I *= 0.001;
		// 		loadings_ = loadings_*(P.t()*loadings_+I).i();
		// 	}

			Eigen::MatrixXd m = P.transpose()*loadings_;
			training_result_ = loadings_*m.colPivHouseholderQr().solve(weights_.transpose());

			calculateOffsets();
		}
Ejemplo n.º 14
0
// Get the total hamiltonian matrix element between phi1 and phi2
double hamiltonianElement(int N, BasisFunction& phi1, BasisFunction& phi2,
						  Eigen::MatrixXd& R, Eigen::MatrixXd& L2, Eigen::MatrixXd& M,
						  std::vector<double> q, Eigen::MatrixXd& Smat, int s_i, int s_j)
{

	// Construct the double-primed matrices
	Eigen::MatrixXd A_dp, B_dp, C_dp, k_dp;
	A_dp = phi1.getA() + phi2.getA();
	B_dp = phi1.getB() + phi2.getB();
	C_dp = phi1.getC() + phi2.getC();
	k_dp = phi1.getk() + phi2.getk();

	// Diagonalise A''
	Eigen::SelfAdjointEigenSolver<Eigen::MatrixXd> solver(A_dp);
	Eigen::MatrixXd U = solver.eigenvectors(); // Eigenvectors
	Eigen::MatrixXd D = solver.eigenvalues().asDiagonal(); // Eigenvalues

	// Compute determinant and inverse of A''
	Eigen::MatrixXd D_inv = D;
	double detA_dp = 1.0;
	for (int i = 0; i < N; i++){
		D_inv(i, i) = 1.0/D(i, i);
		detA_dp *= D(i, i);
	}
	Eigen::MatrixXd A_dp_inv = U * D_inv * U.transpose();

	// Calculate overlap integral
	
	double S = overlap(N, A_dp_inv, B_dp, C_dp, R, detA_dp, phi1.getNorm(), phi2.getNorm(),
					   phi1.getJR(), phi2.getJR());
	Smat(s_i, s_j) = S;
	Smat(s_j, s_i) = S;
	
	// Kinetic energy integral
	double T = kinetic(N, phi1.getA(), phi2.getA(), A_dp, A_dp_inv, phi1.getk(), phi2.getk(),  k_dp, S, L2);

	// Drude potential energy integral
	double VD = drudePotential(A_dp_inv, B_dp, R, S, M, N);

	// Now calculate the coulombic potential integrals
	double VC = 0.0;
	double Rij = 0.0;
	std::vector<double> Rij_vec(3);
	
	for (int i = 0; i < N-1; i++){
		Rij_vec[0] = R(i, 0); Rij_vec[1] = R(i, 1); Rij_vec[2] = R(i, 2);
		Rij = R(i, 0)*R(i, 0) + R(i, 1)*R(i, 1) + R(i, 2)*R(i, 2);
		Rij = std::sqrt(Rij);
		
		// Get gi
		double gi =1.0/A_dp_inv(i, i);
		
		for (int j = i+1; j < N; j++){
			// Add the Rij term
			double VC_temp = S/Rij;

			std::vector<double> Pa_vec(3), Pb_vec(3), Pc_vec(3);
			double Pa = 0, Pb = 0, Pc = 0;
			for (int m = 0; m < 3; m++){
				Pa_vec[m] = Rij_vec[m];
				Pb_vec[m] = Rij_vec[m];
				Pc_vec[m] = Rij_vec[m];
				for (int n = 0; n < N; n++){
					Pa_vec[m] += 0.5*A_dp_inv(i, n) * k_dp(n, m);
					Pb_vec[m] -= 0.5*A_dp_inv(j, n) * k_dp(n, m);
					Pc_vec[m] -= 0.5*(A_dp_inv(j, n) - A_dp_inv(i, n))*k_dp(n, m);
				}
				Pa += Pa_vec[m]*Pa_vec[m];
				Pb += Pb_vec[m]*Pb_vec[m];
				Pc += Pc_vec[m]*Pc_vec[m];
			}
			Pa = std::sqrt(Pa);
			Pb = std::sqrt(Pb);
			Pc = std::sqrt(Pc);

			// Get  gj, and gij
			double gj = 1.0/A_dp_inv(j, j);
			double gij = A_dp_inv(i, i) + A_dp_inv(j, j) - 2.0*A_dp_inv(i, j);
			gij = 1.0/gij;

			// Add the aij, bji, cij terms
			VC_temp += coulombPotential(Pc, gij, S);
			VC_temp -= coulombPotential(Pa, gi, S);
			VC_temp -= coulombPotential(Pb, gj, S);

			VC += q[i]*q[j]*VC_temp;
			// Update Rij
			if (j != N-1) { 
				Rij_vec[0] += R(j, 0); Rij_vec[1] += R(j, 1); Rij_vec[2] += R(j, 2);
				double temp = R(j, 0)*R(j, 0) +R(j, 1)*R(j, 1) + R(j, 2)*R(j, 2);
				Rij += std::sqrt(temp);
			}

		}
	}

	double h_el = T + VD + VC;
	
	if (std::isnan(h_el) || std::isinf(h_el)) {  h_el = 0.0; std::cout << "Dodgy hamiltonian element.\n"; }
	
	return h_el;
}
TEST(SparseMatrixFunctionTests, testSchurComplement1)
{
  try {
    using namespace aslam::backend;
    typedef sparse_block_matrix::SparseBlockMatrix<Eigen::MatrixXd> SparseBlockMatrix;
    // Create the sparse Hessian. Two dense blocks. Three sparse.
    int structure[5] = {2, 2, 3, 3, 3};
    std::partial_sum(structure, structure + 5, structure);
    int marginalizedStartingBlock = 2;
    int marginalizedStartingIndex = structure[ marginalizedStartingBlock - 1 ];
    double lambda = 1;
    SparseBlockMatrix H(structure, structure, 5, 5, true);
    Eigen::VectorXd e(H.rows());
    e.setRandom();
    Eigen::VectorXd b(H.rowBaseOfBlock(marginalizedStartingBlock));
    b.setZero();
    boost::shared_ptr<SparseBlockMatrix> A(H.slice(0, marginalizedStartingBlock, 0, marginalizedStartingBlock, true));
    ASSERT_EQ(marginalizedStartingBlock, A->bRows());
    ASSERT_EQ(marginalizedStartingBlock, A->bCols());
    A->clear(false);
    std::vector<Eigen::MatrixXd> invVi;
    invVi.resize(H.bRows() - marginalizedStartingBlock);
    // Fill in H.
    *H.block(0, 0, true) = sm::eigen::randomCovariance<2>() * 100;
    *H.block(1, 1, true) = sm::eigen::randomCovariance<2>() * 100;
    *H.block(2, 2, true) = sm::eigen::randomCovariance<3>() * 100;
    *H.block(3, 3, true) = sm::eigen::randomCovariance<3>() * 100;
    *H.block(4, 4, true) = sm::eigen::randomCovariance<3>() * 100;
    // Start with two off diagonals.
    H.block(0, 4, true)->setRandom();
    H.block(0, 4, true)->array() *= 100;
    H.block(1, 4, true)->setRandom();
    H.block(1, 4, true)->array() *= 100;
    //std::cout << "H:\n" << H << std::endl;
    applySchurComplement(H,
                         e,
                         lambda,
                         marginalizedStartingBlock,
                         true,
                         *A,
                         invVi,
                         b);
    Eigen::MatrixXd Hd = H.toDense();
    Eigen::MatrixXd U = Hd.topLeftCorner(marginalizedStartingIndex, marginalizedStartingIndex);
    Eigen::MatrixXd V = Hd.bottomRightCorner(H.rows() - marginalizedStartingIndex, H.rows() - marginalizedStartingIndex);
    Eigen::MatrixXd W = Hd.topRightCorner(marginalizedStartingIndex, H.rows() - marginalizedStartingIndex);
    V.diagonal().array() += lambda;
    Eigen::MatrixXd AA = U - W * V.inverse() * W.transpose();
    AA.diagonal().array() += lambda;
    Eigen::VectorXd epsSparse = e.tail(e.size() - marginalizedStartingIndex);
    Eigen::VectorXd epsDense = e.head(marginalizedStartingIndex);
    Eigen::VectorXd bb = epsDense - W * V.inverse() * epsSparse;
    {
      SCOPED_TRACE("");
      Eigen::MatrixXd Asa = A->toDense().selfadjointView<Eigen::Upper>();
      sm::eigen::assertNear(Asa, AA, 1e-12, SM_SOURCE_FILE_POS, "Testing the lhs schur complement");
    }
    {
      SCOPED_TRACE("");
      sm::eigen::assertNear(b, bb, 1e-12, SM_SOURCE_FILE_POS, "Testing the rhs schur complement");
    }
    // Let's try it again to make sure stuff gets initialized correctly.
    applySchurComplement(H,
                         e,
                         lambda,
                         marginalizedStartingBlock,
                         true,
                         *A,
                         invVi,
                         b);
    {
      SCOPED_TRACE("");
      Eigen::MatrixXd Asa = A->toDense().selfadjointView<Eigen::Upper>();
      sm::eigen::assertNear(Asa, AA, 1e-12, SM_SOURCE_FILE_POS, "Testing the lhs schur complement");
    }
    {
      SCOPED_TRACE("");
      sm::eigen::assertNear(b, bb, 1e-12, SM_SOURCE_FILE_POS, "Testing the rhs schur complement");
    }
    // Now we check the update function.
    Eigen::VectorXd dx(marginalizedStartingIndex);
    dx.setRandom();
    Eigen::VectorXd denseDs = V.inverse() * (epsSparse - W.transpose() * dx);
    for (int i = 0; i < H.bRows() - marginalizedStartingBlock; ++i) {
      Eigen::VectorXd outDsi;
      buildDsi(i, H, e, marginalizedStartingBlock, invVi[i], dx, outDsi);
      Eigen::VectorXd dsi = denseDs.segment(H.rowBaseOfBlock(i + marginalizedStartingBlock) - marginalizedStartingIndex, H.rowsOfBlock(i + marginalizedStartingBlock));
      sm::eigen::assertNear(outDsi, dsi, 1e-12, SM_SOURCE_FILE_POS, "Checking the update step calculation");
    }
  } catch (const std::exception& e) {
    FAIL() << "Exception: " << e.what();
  }
}
void propa_2d()
{
    trace.beginBlock("2d propagation");

    typedef DiscreteExteriorCalculus<2, 2, EigenLinearAlgebraBackend> Calculus;
		typedef DiscreteExteriorCalculusFactory<EigenLinearAlgebraBackend> CalculusFactory;

    {
        trace.beginBlock("solving time dependent equation");

        const Calculus::Scalar cc = 8; // px/s
        trace.info() << "cc = " << cc << endl;

        const Z2i::Domain domain(Z2i::Point(0,0), Z2i::Point(29,29));
        const Calculus calculus = CalculusFactory::createFromDigitalSet(generateDiskSet(domain), false);

        //! [time_laplace]
        const Calculus::DualIdentity0 laplace = calculus.laplace<DUAL>() + 1e-8 * calculus.identity<0, DUAL>();
        //! [time_laplace]
        trace.info() << "laplace = " << laplace << endl;

        trace.beginBlock("finding eigen pairs");
        //! [time_eigen]
        typedef Eigen::SelfAdjointEigenSolver<Eigen::MatrixXd> EigenSolverMatrix;
        const EigenSolverMatrix eigen_solver(laplace.myContainer);

        const Eigen::VectorXd eigen_values = eigen_solver.eigenvalues();
        const Eigen::MatrixXd eigen_vectors = eigen_solver.eigenvectors();
        //! [time_eigen]
        trace.info() << "eigen_values_range = " << eigen_values.minCoeff() << "/" << eigen_values.maxCoeff() << endl;
        trace.endBlock();

        //! [time_omega]
        const Eigen::VectorXd angular_frequencies = cc * eigen_values.array().sqrt();
        //! [time_omega]

        Eigen::VectorXcd initial_wave = Eigen::VectorXcd::Zero(calculus.kFormLength(0, DUAL));
        //set_initial_phase_null(calculus, initial_wave);
        set_initial_phase_dir_yy(calculus, initial_wave);

        {
            Board2D board;
            board << domain;
            board << CustomStyle("KForm", new KFormStyle2D(-1, 1));
            board << Calculus::DualForm0(calculus, initial_wave.real());
            board.saveSVG("propagation_time_wave_initial_coarse.svg");
        }

        //! [time_init_proj]
        Eigen::VectorXcd initial_projections = eigen_vectors.transpose() * initial_wave;
        //! [time_init_proj]

        // low pass
        //! [time_low_pass]
        const Calculus::Scalar lambda_cutoff = 4.5;
        const Calculus::Scalar angular_frequency_cutoff = 2*M_PI * cc / lambda_cutoff;
        int cutted = 0;
        for (int kk=0; kk<initial_projections.rows(); kk++)
        {
            const Calculus::Scalar angular_frequency = angular_frequencies(kk);
            if (angular_frequency < angular_frequency_cutoff) continue;
            initial_projections(kk) = 0;
            cutted ++;
        }
        //! [time_low_pass]
        trace.info() << "cutted = " << cutted << "/" << initial_projections.rows() << endl;

        {
            const Eigen::VectorXcd wave = eigen_vectors * initial_projections;
            Board2D board;
            board << domain;
            board << CustomStyle("KForm", new KFormStyle2D(-1, 1));
            board << Calculus::DualForm0(calculus, wave.real());
            board.saveSVG("propagation_time_wave_initial_smoothed.svg");
        }

        const int kk_max = 60;
        trace.progressBar(0,kk_max);
        for (int kk=0; kk<kk_max; kk++)
        {
            //! [time_solve_time]
            const Calculus::Scalar time = kk/20.;
            const Eigen::VectorXcd current_projections = (angular_frequencies * std::complex<double>(0,time)).array().exp() * initial_projections.array();
            const Eigen::VectorXcd current_wave = eigen_vectors * current_projections;
            //! [time_solve_time]

            std::stringstream ss;
            ss << "propagation_time_wave_solution_" << std::setfill('0') << std::setw(3) << kk << ".svg";

            Board2D board;
            board << domain;
            board << CustomStyle("KForm", new KFormStyle2D(-1, 1));
            board << Calculus::DualForm0(calculus, current_wave.real());
            board.saveSVG(ss.str().c_str());

            trace.progressBar(kk+1,kk_max);
        }
        trace.info() << endl;

        trace.endBlock();
    }

    {
        trace.beginBlock("forced oscillations");

        const Z2i::Domain domain(Z2i::Point(0,0), Z2i::Point(50,50));
        const Calculus calculus = CalculusFactory::createFromDigitalSet(generateDiskSet(domain), false);

        const Calculus::DualIdentity0 laplace = calculus.laplace<DUAL>();
        trace.info() << "laplace = " << laplace << endl;

        trace.beginBlock("finding eigen pairs");
        typedef Eigen::SelfAdjointEigenSolver<Eigen::MatrixXd> EigenSolverMatrix;
        const EigenSolverMatrix eigen_solver(laplace.myContainer);

        const Eigen::VectorXd laplace_eigen_values = eigen_solver.eigenvalues();
        const Eigen::MatrixXd laplace_eigen_vectors = eigen_solver.eigenvectors();
        trace.info() << "eigen_values_range = " << laplace_eigen_values.minCoeff() << "/" << laplace_eigen_values.maxCoeff() << endl;
        trace.endBlock();

        Calculus::DualForm0 concentration(calculus);
        {
            const Z2i::Point point(25,25);
            const Calculus::Cell cell = calculus.myKSpace.uSpel(point);
            const Calculus::Index index = calculus.getCellIndex(cell);
            concentration.myContainer(index) = 1;
        }

        {
            Board2D board;
            board << domain;
            board << concentration;
            board.saveSVG("propagation_forced_concentration.svg");
        }

        for (int ll=0; ll<6; ll++)
        {
            //! [forced_lambda]
            const Calculus::Scalar lambda = 4*20.75/(1+2*ll);
            //! [forced_lambda]
            trace.info() << "lambda = " << lambda << endl;

            //! [forced_dalembert_eigen]
            const Eigen::VectorXd dalembert_eigen_values = (laplace_eigen_values.array() - (2*M_PI/lambda)*(2*M_PI/lambda)).array().inverse();
            const Eigen::MatrixXd concentration_to_wave = laplace_eigen_vectors * dalembert_eigen_values.asDiagonal() * laplace_eigen_vectors.transpose();
            //! [forced_dalembert_eigen]

            //! [forced_wave]
            Calculus::DualForm0 wave(calculus, concentration_to_wave * concentration.myContainer);
            //! [forced_wave]
            wave.myContainer /= wave.myContainer(calculus.getCellIndex(calculus.myKSpace.uSpel(Z2i::Point(25,25))));

            {
                trace.info() << "saving samples" << endl;
                const Calculus::Properties properties = calculus.getProperties();
                {
                    std::stringstream ss;
                    ss << "propagation_forced_samples_hv_" << ll << ".dat";
                    std::ofstream handle(ss.str().c_str());
                    for (int kk=0; kk<=50; kk++)
                    {
                        const Z2i::Point point_horizontal(kk,25);
                        const Z2i::Point point_vertical(25,kk);
                        const Calculus::Scalar xx = 2 * (kk/50. - .5);
                        handle << xx << " ";
                        handle << sample_dual_0_form<Calculus>(properties, wave, point_horizontal) << " ";
                        handle << sample_dual_0_form<Calculus>(properties, wave, point_vertical) << endl;
                    }
                }

                {
                    std::stringstream ss;
                    ss << "propagation_forced_samples_diag_" << ll << ".dat";
                    std::ofstream handle(ss.str().c_str());
                    for (int kk=0; kk<=50; kk++)
                    {
                        const Z2i::Point point_diag_pos(kk,kk);
                        const Z2i::Point point_diag_neg(kk,50-kk);
                        const Calculus::Scalar xx = 2 * sqrt(2) * (kk/50. - .5);
                        handle << xx << " ";
                        handle << sample_dual_0_form<Calculus>(properties, wave, point_diag_pos) << " ";
                        handle << sample_dual_0_form<Calculus>(properties, wave, point_diag_neg) << endl;
                    }
                }
            }

            {
                std::stringstream ss;
                ss << "propagation_forced_wave_" << ll << ".svg";
                Board2D board;
                board << domain;
                board << CustomStyle("KForm", new KFormStyle2D(-.5, 1));
                board << wave;
                board.saveSVG(ss.str().c_str());
            }
        }

        trace.endBlock();
    }
    trace.endBlock();
}
Ejemplo n.º 17
0
void CDKF::measurementUpdate(const ros::Time& prev_stamp,
                             const ros::Time& current_stamp,
                             const double image_angular_velocity,
                             const IMUList& imu_rotations,
                             const bool calc_offset) {
  // convert tracked points to measurement
  Eigen::VectorXd real_measurement(kMeasurementSize);
  accessM(real_measurement, MEASURED_TIMESTAMP).array() =
      (current_stamp - zero_timestamp_).toSec();

  accessM(real_measurement, ANGULAR_VELOCITY).array() = image_angular_velocity;

  if (verbose_) {
    ROS_INFO_STREAM("Measured Values: \n" << real_measurement.transpose());
  }

  // create sigma points
  MeasurementSigmaPoints sigma_points(
      state_, cov_, measurement_noise_sd_, CDKF::stateToMeasurementEstimate,
      imu_rotations, zero_timestamp_, calc_offset);

  // get mean and cov
  Eigen::VectorXd predicted_measurement;
  sigma_points.calcEstimatedMean(&predicted_measurement);
  Eigen::MatrixXd innovation;
  sigma_points.calcEstimatedCov(&innovation);

  if (verbose_) {
    ROS_INFO_STREAM("Predicted Measurements: \n"
                    << predicted_measurement.transpose());
  }

  // calc mah distance
  Eigen::VectorXd diff = real_measurement - predicted_measurement;
  double mah_dist = std::sqrt(diff.transpose() * innovation.inverse() * diff);
  if (mah_dist > mah_threshold_) {
    ROS_WARN("Outlier detected, measurement rejected");
    return;
  }

  Eigen::MatrixXd cross_cov;
  sigma_points.calcEstimatedCrossCov(&cross_cov);

  Eigen::MatrixXd gain = cross_cov * innovation.inverse();

  const Eigen::VectorXd state_diff =
      gain * (real_measurement - predicted_measurement);

  state_ += state_diff;

  cov_ -= gain * innovation * gain.transpose();

  if (verbose_) {
    ROS_INFO_STREAM("Updated State: \n" << state_.transpose());
    ROS_INFO_STREAM("Updated Cov: \n" << cov_);
  }

  // guard against precision issues
  constexpr double kMaxTime = 10000.0;
  if (accessS(state_, STATE_TIMESTAMP)[0] > kMaxTime) {
    rezeroTimestamps(current_stamp);
  }
}
Ejemplo n.º 18
0
/**
 * Generates a bathymetryGrid by reading data from a local file.  Results are
 * dumped into topographyGrid.
 * @param topographyGrid A pointer to a zero-initialized Grid of size
 *      numRows x numCols.
 * @param inputFile The relative path to the topography file you wish to use.
 * @param inputFileType Use 'netcdf' if your file is in netcdf format.  Use
 *      'asc' if the file is a matrix of ascii values in the GIS asc format.
 * @param startRow The row to start reading data from.
 * @param startCol The col to start reading data from.
 * @param numRows The desired number of rows of data to read.
 * @param numCols The desired number of cols of data to read.
 * @param seriesName If inputFileType was set to 'netcdf', this should be set
 *      to the name of the series you wish to use.
 * @param timestamp A timestamp value used for error reporting.
 */
void getBathy(Grid* topographyGrid, std::string inputFile,
              std::string inputFileType, size_t startRow, size_t startCol,
              size_t numRows, size_t numCols, std::string seriesName,
              std::string timestamp) {
    // This will be the netCDF ID for the file and data variable.
    Eigen::MatrixXd temp;
    int ncid, varid, retval = -100;

    // NetCDF
    if (inputFileType.compare("netcdf") == 0) {
        // Data will be read in column major, so set up a matrix of inverse
        //    size to recieve it.
        temp.resize(numCols, numRows);
        // Open the file. NC_NOWRITE tells netCDF we want read-only access to
        //    the file.
        if ((retval = nc_open(inputFile.c_str(), NC_NOWRITE, &ncid))) {
            printError("ERROR: Cant open NetCDF File. Invalid inputFile path.",
                        retval, timestamp);
        }

        // Get the varid of the data variable, based on its name.
        if ((retval = nc_inq_varid(ncid, seriesName.c_str(), &varid))) {
            printError("ERROR: Can't access variable id.  Invalid seriesName.",
                        retval, timestamp);
        }
        // Read the data.
        try {
            // for whatever reason, this is in column, row order.
            static size_t start[] = {startRow, startCol};
            static size_t range[] = {numRows, numCols};
            retval = nc_get_vara_double(ncid, varid, start, range,
                                        temp.data());
            // TODO(Greg) Figure out a way to read data in row wise to avoid
            //             this transposition.
            topographyGrid->data.block(border, border, numRows, numCols) =
                            temp.transpose().block(0, 0, numRows, numCols);
        }
        catch (int i) {
            printError("ERROR: Error reading data.  Invalid file format.",
                        retval, timestamp);
        }
        // Close the file, freeing all resources.
        if ((retval = nc_close(ncid))) {
            printError("ERROR: Error closing the file.", retval, timestamp);
        }
    } else if (inputFileType.compare("asc") == 0) {
        // ASC
        temp.resize(numRows, numCols);
        std::ifstream input(inputFile);
        int null = 0;
        size_t numHeaderLines = 5,
               rowsLine = 1,
               colsLine = 0,
               nullLine = 5,
               cursor = 0,
               rows = 0,
               cols = 0,
               startRowIndex = startRow+numHeaderLines,
               endRowIndex = startRowIndex+numRows,
               i = 0,
               j = 0;
        std::string line = "";
        std::vector<std::string> vLine;
        if (input.is_open()) {
            for (cursor = 0; cursor < endRowIndex; cursor ++) {
                getline(input, line);

                if (cursor <= numHeaderLines) {
                    // Get the number of columns in the file
                    if (cursor == colsLine) {
                        vLine = split(&line, ' ');
                        if (!silent) {
                            std::cout << "\nAvailable Cols: " <<
                                         vLine[vLine.size()-1];
                        }
                        cols = stoi(vLine[vLine.size() - 1]);

                        if (cols < startCol + numCols) {
                            std::cout << "\nERROR, requested bathymetry " <<
                                    " grid column coordinates are out of" <<
                                    " bounds.\n";
                            exit(1);
                        }
                    } else if (cursor == rowsLine) {
                        // Get the number of rows in the file.
                        vLine = split(&line, ' ');
                        if (!silent) {
                        std::cout << "Available Rows:" <<
                                     vLine[vLine.size() - 1];
                        }
                        rows = stoi(vLine[vLine.size() - 1]);

                        if (rows < startRow + numRows) {
                            std::cout << "\nERROR, requested bathymetry" <<
                                    " grid row coordinates are out of" <<
                                    " bounds.\n";
                            exit(1);
                        }
                    } else if (cursor == nullLine) {
                        // Get the null value substitute
                        vLine = split(&line, ' ');
                        if (debug) {
                            std::cout << "Null values =" <<
                                         vLine[vLine.size() - 1] << "\n";
                        }
                        null = stoi(vLine[vLine.size() - 1]);
                    }
                } else if (cursor >= startRowIndex) {
                    vLine = split(&line, ' ');
                    for (i = startCol; i < startCol + numCols; i ++) {
                        // std::cout<<"accessing temp(" <<
                        // cursor-startRowIndex-1 << "," <<i-startCol<<")\n";
                        // std::cout<<"Cursor:"<<cursor<<"   SRI:" <<
                        // startRowIndex << "\n";
                        temp(cursor - startRowIndex, i - startCol) =
                                stod(vLine[i]);
                    }
                }
            }

            input.close();

            for (i = 0; i < numRows; i ++) {
                for (j = 0; j < numCols; j ++) {
                    if (temp(i, j) == null) {
                        temp(i, j) = 0;
                    }
                }
            }
            topographyGrid->data.block(border, border, numRows, numCols) =
                            temp.block(0, 0, numRows, numCols);
        } else {
            std::cout << "\nUnable to open bathymetry file: \"" << inputFile <<
                         "\"\n";
            exit(0);
        }
    } else {
        // Invalid filetype
        std::cout << "Bathymetry file type not supported.  Simulating" <<
                     "Bathymetry.\n";
        simulatetopographyGrid(topographyGrid, static_cast<int>(numRows),
                               static_cast<int>(numCols));
    }
    topographyGrid->clearNA();
    topographyGrid->data =
            topographyGrid->data.unaryExpr(std::ptr_fun(validateDepth));
    if (acousticParams["debug"] == "1") {
        // topographyGrid->printData();
        std::cout << "startx " << startCol << "\nXDist: "<< numCols <<
                  "\nstartY:  "<< startRow << "\nYDist: " << numRows << "\n";
        std::cout << "inputFileType: " << inputFileType << "\ninputFile: " <<
                  inputFile << "\nseriesName: " << seriesName << "\n";
        std::cout << "retval: " << retval << "\n" << "ncid: " << ncid << "\n\n";
    }
}
Ejemplo n.º 19
0
  void Ekf::correct(const Measurement &measurement)
  {
    FB_DEBUG("---------------------- Ekf::correct ----------------------\n" <<
             "State is:\n" << state_ << "\n"
             "Topic is:\n" << measurement.topicName_ << "\n"
             "Measurement is:\n" << measurement.measurement_ << "\n"
             "Measurement topic name is:\n" << measurement.topicName_ << "\n\n"
             "Measurement covariance is:\n" << measurement.covariance_ << "\n");

    // We don't want to update everything, so we need to build matrices that only update
    // the measured parts of our state vector. Throughout prediction and correction, we
    // attempt to maximize efficiency in Eigen.

    // First, determine how many state vector values we're updating
    std::vector<size_t> updateIndices;
    for (size_t i = 0; i < measurement.updateVector_.size(); ++i)
    {
      if (measurement.updateVector_[i])
      {
        // Handle nan and inf values in measurements
        if (std::isnan(measurement.measurement_(i)))
        {
          FB_DEBUG("Value at index " << i << " was nan. Excluding from update.\n");
        }
        else if (std::isinf(measurement.measurement_(i)))
        {
          FB_DEBUG("Value at index " << i << " was inf. Excluding from update.\n");
        }
        else
        {
          updateIndices.push_back(i);
        }
      }
    }

    FB_DEBUG("Update indices are:\n" << updateIndices << "\n");

    size_t updateSize = updateIndices.size();

    // Now set up the relevant matrices
    Eigen::VectorXd stateSubset(updateSize);                              // x (in most literature)
    Eigen::VectorXd measurementSubset(updateSize);                        // z
    Eigen::MatrixXd measurementCovarianceSubset(updateSize, updateSize);  // R
    Eigen::MatrixXd stateToMeasurementSubset(updateSize, state_.rows());  // H
    Eigen::MatrixXd kalmanGainSubset(state_.rows(), updateSize);          // K
    Eigen::VectorXd innovationSubset(updateSize);                         // z - Hx

    stateSubset.setZero();
    measurementSubset.setZero();
    measurementCovarianceSubset.setZero();
    stateToMeasurementSubset.setZero();
    kalmanGainSubset.setZero();
    innovationSubset.setZero();

    // Now build the sub-matrices from the full-sized matrices
    for (size_t i = 0; i < updateSize; ++i)
    {
      measurementSubset(i) = measurement.measurement_(updateIndices[i]);
      stateSubset(i) = state_(updateIndices[i]);

      for (size_t j = 0; j < updateSize; ++j)
      {
        measurementCovarianceSubset(i, j) = measurement.covariance_(updateIndices[i], updateIndices[j]);
      }

      // Handle negative (read: bad) covariances in the measurement. Rather
      // than exclude the measurement or make up a covariance, just take
      // the absolute value.
      if (measurementCovarianceSubset(i, i) < 0)
      {
        FB_DEBUG("WARNING: Negative covariance for index " << i <<
                 " of measurement (value is" << measurementCovarianceSubset(i, i) <<
                 "). Using absolute value...\n");

        measurementCovarianceSubset(i, i) = ::fabs(measurementCovarianceSubset(i, i));
      }

      // If the measurement variance for a given variable is very
      // near 0 (as in e-50 or so) and the variance for that
      // variable in the covariance matrix is also near zero, then
      // the Kalman gain computation will blow up. Really, no
      // measurement can be completely without error, so add a small
      // amount in that case.
      if (measurementCovarianceSubset(i, i) < 1e-9)
      {
        FB_DEBUG("WARNING: measurement had very small error covariance for index " << updateIndices[i] <<
                 ". Adding some noise to maintain filter stability.\n");

        measurementCovarianceSubset(i, i) = 1e-9;
      }
    }

    // The state-to-measurement function, h, will now be a measurement_size x full_state_size
    // matrix, with ones in the (i, i) locations of the values to be updated
    for (size_t i = 0; i < updateSize; ++i)
    {
      stateToMeasurementSubset(i, updateIndices[i]) = 1;
    }

    FB_DEBUG("Current state subset is:\n" << stateSubset <<
             "\nMeasurement subset is:\n" << measurementSubset <<
             "\nMeasurement covariance subset is:\n" << measurementCovarianceSubset <<
             "\nState-to-measurement subset is:\n" << stateToMeasurementSubset << "\n");

    // (1) Compute the Kalman gain: K = (PH') / (HPH' + R)
    Eigen::MatrixXd pht = estimateErrorCovariance_ * stateToMeasurementSubset.transpose();
    Eigen::MatrixXd hphrInv  = (stateToMeasurementSubset * pht + measurementCovarianceSubset).inverse();
    kalmanGainSubset.noalias() = pht * hphrInv;

    innovationSubset = (measurementSubset - stateSubset);

    // Wrap angles in the innovation
    for (size_t i = 0; i < updateSize; ++i)
    {
      if (updateIndices[i] == StateMemberRoll  ||
          updateIndices[i] == StateMemberPitch ||
          updateIndices[i] == StateMemberYaw)
      {
        while (innovationSubset(i) < -PI)
        {
          innovationSubset(i) += TAU;
        }

        while (innovationSubset(i) > PI)
        {
          innovationSubset(i) -= TAU;
        }
      }
    }
    
    // (2) Check Mahalanobis distance between mapped measurement and state.
    if (checkMahalanobisThreshold(innovationSubset, hphrInv, measurement.mahalanobisThresh_))
    {
      // (3) Apply the gain to the difference between the state and measurement: x = x + K(z - Hx)
      state_.noalias() += kalmanGainSubset * innovationSubset;

      // (4) Update the estimate error covariance using the Joseph form: (I - KH)P(I - KH)' + KRK'
      Eigen::MatrixXd gainResidual = identity_;
      gainResidual.noalias() -= kalmanGainSubset * stateToMeasurementSubset;
      estimateErrorCovariance_ = gainResidual * estimateErrorCovariance_ * gainResidual.transpose();
      estimateErrorCovariance_.noalias() += kalmanGainSubset *
                                            measurementCovarianceSubset *
                                            kalmanGainSubset.transpose();

      // Handle wrapping of angles
      wrapStateAngles();

      FB_DEBUG("Kalman gain subset is:\n" << kalmanGainSubset <<
               "\nInnovation is:\n" << innovationSubset <<
               "\nCorrected full state is:\n" << state_ <<
               "\nCorrected full estimate error covariance is:\n" << estimateErrorCovariance_ <<
               "\n\n---------------------- /Ekf::correct ----------------------\n");
    }
  }
Ejemplo n.º 20
0
void Deformable::projectPositions()
{
    if (mNumVertices <= 1) return;
    int i;//,j,k;
    
    // center of mass
    Eigen::Vector2d cm, originalCm;
    cm.setZero(); originalCm.setZero();
    double mass = 0.0;
    
    for (i = 0; i < mNumVertices; i++) {
        double m = mMasses(i,0);
        if (mFixed(i,0)) m *= 1000.0;
        mass += m;
        cm += mNewPos.row(i) * m;
        originalCm += mOriginalPos.row(i) * m;
        
    }
    //std::cout<<std::endl;
    cm /= mass;
    
    originalCm /= mass;
    
    Eigen::Matrix2d Apq;
    Eigen::Matrix2d Aqq;
    Eigen::Vector2d p;
    Eigen::Vector2d q;
    Apq.setZero();
    Aqq.setZero();
    
    for (i = 0; i < mNumVertices; i++) {
        p(0) = mNewPos(i,0) - cm(0);
        p(1) = mNewPos(i,1) - cm(1);
        
        q(0) = mOriginalPos(i,0) - originalCm(0);
        q(1) = mOriginalPos(i,1) - originalCm(1);
        double m = mMasses(i,0);
        Apq += m*p*q.transpose();
        Aqq += m*q*q.transpose();
    }
  
    /*if (!params.allowFlip && Apq.determinant() < 0.0f) {  	// prevent from flipping
        Apq(0,1) = -Apq(0,1);
        Apq(1,1) = -Apq(1,1);
    }*/
    
    //std::cout<<Apq<<std::endl;
    
    Eigen::Matrix2d R;
    Eigen::JacobiSVD<Eigen::MatrixXd> svd(Apq, Eigen::ComputeThinU | Eigen::ComputeThinV);
    Eigen::MatrixXd U = svd.matrixU();
    Eigen::MatrixXd V = svd.matrixV();
    R = U*V.transpose();
    if (!params.quadraticMatch) {	// --------- linear match
        
        
        Eigen::Matrix2d A = Aqq;
        

        Eigen::Matrix2d A_=A.inverse();
        /*std::cout<<A_.row(0)<<std::endl;
        std::cout<<A_.row(1)<<std::endl;*/
        A = Apq*A_;
        
        if (params.volumeConservation) {
            double det = A.determinant();
            if(det<0) det=-det;
            if (det != 0.0) {
                det = 1.0 / sqrt(fabs(det));
                if (det > 2.0) det = 2.0;
                A *= det;
            }
        }
        float one_beta =1.0 - params.beta;
        Eigen::Matrix2d T = R * one_beta;
        
        
        A=A*params.beta;
        T = T + A;
        
        
        for (i = 0; i < mNumVertices; i++) {
            if (mFixed(i)) continue;
            q(0) = mOriginalPos(i,0) - originalCm(0);
            q(1) = mOriginalPos(i,1) - originalCm(1);
            Eigen::Vector2d Tq = T*q;
            mGoalPos(i,0) = Tq(0)+cm(0);
            mGoalPos(i,1) = Tq(1)+cm(1);
            //mGoalPos.row(i)=(R * (1.0f - params.beta) + A * params.beta)*q+cm;
            mNewPos.row(i) += (mGoalPos.row(i) - mNewPos.row(i)) * params.alpha;
            
            //std::cout<<T.row(0)<<std::endl;
            //std::cout<<T.row(1)<<std::endl;
        }
    }
}
Ejemplo n.º 21
0
void MagCal::calcMagComp()
{
    /*
     * Inspired by
     * http://davidegironi.blogspot.it/2013/01/magnetometer-calibration-helper-01-for.html#.UriTqkMjulM
     *
     * Ellipsoid fit from:
     * http://www.mathworks.com/matlabcentral/fileexchange/24693-ellipsoid-fit
     *
     * To use Eigen to convert matlab code, have a look at Eigen/AsciiQuickReference.txt
     */

    if (mMagSamples.size() < 9) {
        QMessageBox::warning(this, "Magnetometer compensation",
                             "Too few points.");
        return;
    }

    int samples = mMagSamples.size();
    Eigen::VectorXd ex(samples);
    Eigen::VectorXd ey(samples);
    Eigen::VectorXd ez(samples);

    for (int i = 0;i < samples;i++) {
        ex(i) = mMagSamples.at(i).at(0);
        ey(i) = mMagSamples.at(i).at(1);
        ez(i) = mMagSamples.at(i).at(2);
    }

    Eigen::MatrixXd eD(samples, 9);

    for (int i = 0;i < samples;i++) {
        eD(i, 0) = ex(i) * ex(i);
        eD(i, 1) = ey(i) * ey(i);
        eD(i, 2) = ez(i) * ez(i);
        eD(i, 3) = 2.0 * ex(i) * ey(i);
        eD(i, 4) = 2.0 * ex(i) * ez(i);
        eD(i, 5) = 2.0 * ey(i) * ez(i);
        eD(i, 6) = 2.0 * ex(i);
        eD(i, 7) = 2.0 * ey(i);
        eD(i, 8) = 2.0 * ez(i);
    }

    Eigen::MatrixXd etmp1 = eD.transpose() * eD;
    Eigen::MatrixXd etmp2 = eD.transpose() * Eigen::MatrixXd::Ones(samples, 1);
    Eigen::VectorXd eV = etmp1.lu().solve(etmp2);

    Eigen::MatrixXd eA(4, 4);
    eA(0,0)=eV(0);   eA(0,1)=eV(3);   eA(0,2)=eV(4);   eA(0,3)=eV(6);
    eA(1,0)=eV(3);   eA(1,1)=eV(1);   eA(1,2)=eV(5);   eA(1,3)=eV(7);
    eA(2,0)=eV(4);   eA(2,1)=eV(5);   eA(2,2)=eV(2);   eA(2,3)=eV(8);
    eA(3,0)=eV(6);   eA(3,1)=eV(7);   eA(3,2)=eV(8);   eA(3,3)=-1.0;

    Eigen::MatrixXd eCenter = -eA.topLeftCorner(3, 3).lu().solve(eV.segment(6, 3));
    Eigen::MatrixXd eT = Eigen::MatrixXd::Identity(4, 4);
    eT(3, 0) = eCenter(0);
    eT(3, 1) = eCenter(1);
    eT(3, 2) = eCenter(2);

    Eigen::MatrixXd eR = eT * eA * eT.transpose();

    Eigen::SelfAdjointEigenSolver<Eigen::Matrix3d> eEv(eR.topLeftCorner(3, 3) * (-1.0 / eR(3, 3)));
    Eigen::MatrixXd eVecs = eEv.eigenvectors();
    Eigen::MatrixXd eVals = eEv.eigenvalues();

    Eigen::MatrixXd eRadii(3, 1);
    eRadii(0) = sqrt(1.0 / eVals(0));
    eRadii(1) = sqrt(1.0 / eVals(1));
    eRadii(2) = sqrt(1.0 / eVals(2));

    Eigen::MatrixXd eScale = eRadii.asDiagonal().inverse() * eRadii.minCoeff();
    Eigen::MatrixXd eComp = eVecs * eScale * eVecs.transpose();

    mMagComp.resize(9);
    mMagComp[0] = eComp(0, 0);
    mMagComp[1] = eComp(0, 1);
    mMagComp[2] = eComp(0, 2);

    mMagComp[3] = eComp(1, 0);
    mMagComp[4] = eComp(1, 1);
    mMagComp[5] = eComp(1, 2);

    mMagComp[6] = eComp(2, 0);
    mMagComp[7] = eComp(2, 1);
    mMagComp[8] = eComp(2, 2);

    mMagCompCenter.resize(3);
    mMagCompCenter[0] = eCenter(0, 0);
    mMagCompCenter[1] = eCenter(1, 0);
    mMagCompCenter[2] = eCenter(2, 0);

    QVector<double> magX, magY, magZ;

    for (int i = 0;i < mMagSamples.size();i++) {
        double mx = mMagSamples.at(i).at(0);
        double my = mMagSamples.at(i).at(1);
        double mz = mMagSamples.at(i).at(2);

        mx -= mMagCompCenter.at(0);
        my -= mMagCompCenter.at(1);
        mz -= mMagCompCenter.at(2);

        magX.append(mx * mMagComp.at(0) + my * mMagComp.at(1) + mz * mMagComp.at(2));
        magY.append(mx * mMagComp.at(3) + my * mMagComp.at(4) + mz * mMagComp.at(5));
        magZ.append(mx * mMagComp.at(6) + my * mMagComp.at(7) + mz * mMagComp.at(8));
    }

    ui->magSampXyPlot->graph(1)->setData(magX, magY);
    ui->magSampXzPlot->graph(1)->setData(magX, magZ);
    ui->magSampYzPlot->graph(1)->setData(magY, magZ);

    updateMagPlots();
}
Ejemplo n.º 22
0
Eigen::MatrixXd rcppeigen_ftrans(const Eigen::MatrixXd & A) {
  Eigen::MatrixXd m = A.transpose();
  return m;
}
// [[Rcpp::export]]
double calculateAOptimalityPseudo(const Eigen::MatrixXd& currentDesign) {
  Eigen::MatrixXd XtX = currentDesign.transpose()*currentDesign;
  return(XtX.completeOrthogonalDecomposition().pseudoInverse().trace());
}
Ejemplo n.º 24
0
void Deformable::projectPositionsCluster(std::vector<int> cluster, int cluster_indx)
{
    int numVertices =  cluster.size();
    if (numVertices <= 1) return;
    int i;//,j,k;
    double beta_cluster =params.lbeta.at(cluster_indx);
    
    // center of mass
    Eigen::Vector2d cm, originalCm;
    cm.setZero(); originalCm.setZero();
    double mass = 0.0;
    int indx;
    for (i = 0; i < numVertices; i++) {
        indx= cluster.at(i);
        double m = mMasses(indx,0);
        if (mFixed(indx,0)) m *= 100.0;
        mass += m;
        cm += mNewPos.row(indx) * m;
        originalCm += mOriginalPos.row(indx) * m;
        //std::cout<<"before: "<<mNewPos.row(indx)<<std::endl;
    }
    
    cm /= mass;
    originalCm /= mass;
    
    Eigen::Matrix2d Apq;
    Eigen::Matrix2d Aqq;
    Eigen::Vector2d p;
    Eigen::Vector2d q;
    Apq.setZero();
    Aqq.setZero();
    
    for (i = 0; i < numVertices; i++) {
        indx= cluster.at(i);
        p(0) = mNewPos(indx,0) - cm(0);
        p(1) = mNewPos(indx,1) - cm(1);
        
        q(0) = mOriginalPos(indx,0) - originalCm(0);
        q(1) = mOriginalPos(indx,1) - originalCm(1);
        double m = mMasses(indx,0);
        Apq += m*p*q.transpose();
        Aqq += m*q*q.transpose();
    }
    

    
    if (!params.allowFlip && Apq.determinant() < 0.0f)
    {  	// prevent from flipping
        Apq(0,1) = -Apq(0,1);
        Apq(1,1) = -Apq(1,1);
    }
    
    Eigen::Matrix2d R;
    Eigen::JacobiSVD<Eigen::MatrixXd> svd(Apq, Eigen::ComputeThinU | Eigen::ComputeThinV);
    Eigen::MatrixXd U = svd.matrixU();
    Eigen::MatrixXd V = svd.matrixV();
    R = U*V.transpose();
    
    if (!params.quadraticMatch) {	// --------- linear match
        Eigen::Matrix2d A = Aqq;
        A = Apq*A.inverse();
        
        if (params.volumeConservation) {
            
            double det = A.determinant();
            if (det != 0.0) {
                det = 1.0 / sqrt(fabs(det));
                if (det > 2.0) det = 2.0;
                
                A = A*det;
            }
        }
        
        Eigen::Matrix2d T = R * (1.0 - beta_cluster) + A * beta_cluster;
        std::cout<<"cluster's beta "<<beta_cluster<<std::endl;
        
        for (i = 0; i < numVertices; i++) {
            int indx= cluster.at(i);
            indxCount.at(indx) +=1;
            
            if (mFixed(indx)) continue;
            q(0) = mOriginalPos(indx,0) - originalCm(0);
            q(1) = mOriginalPos(indx,1) - originalCm(1);
            Eigen::Vector2d Tq = T*q;
            
            mGoalPos(indx,0) = Tq(0)+cm(0);
            mGoalPos(indx,1) = Tq(1)+cm(1);
            mNewPos.row(indx) += (mGoalPos.row(indx) - mNewPos.row(indx)) * params.alpha;
            
            mGoalPos_sum.row(indx) += mNewPos.row(indx);
        }
    }
}
Ejemplo n.º 25
0
int main(int argc, char* argv[])
{
    // Output precision
    int p=9;
    // Matrix dimension
    int N=9;
    // Matrix specification
    Eigen::MatrixXd A = Eigen::MatrixXd::Zero(N,N); 
    Eigen::MatrixXd A1 = Eigen::MatrixXd::Zero(N,N); 
    for (int i=0; i<N; i++) {
        A1(i,i) = 4.0;
        if (i>=1) A1(i,i-1) = -1.0;
        if (i>=2) A1(i,i-2) = 3.0;
        if (i<=6) A1(i,i+2) = -2.0;
    }
    // Could be AT+A or AT*A
    A = A1.transpose()+A1;
    // Vector specification
    Eigen::VectorXd b = Eigen::VectorXd::Zero(N);
    for (int i=0; i<N; i++) {
        b(i) = (i*i-9.0)/(i+5.0); 
    }

    // Cholesky decomposition
    Eigen::MatrixXd U = cholesky(A);
    Eigen::MatrixXd L = U.transpose();

    // Perform forward substitution and backward substitution
    // Using Eigen's library method: Eigen::VectorXd x = A.llt().solve(b);
    Eigen::VectorXd y = forward_subst(L, b);
    Eigen::VectorXd x = backward_subst(U, y);

    // Compute residual error
    double R = (A*x-b).norm();
    std::cout << "Residual error =, "
              << std::setprecision(p)
              << R
              << std::endl;

    // Compute L-inverse, U-inverse, and A-inverse
    Eigen::MatrixXd L_inv = Eigen::MatrixXd::Zero(N,N);
    Eigen::MatrixXd U_inv = Eigen::MatrixXd::Zero(N,N);
    Eigen::MatrixXd A_inv = Eigen::MatrixXd::Zero(N,N);
    // L-inverse
    for (int i=0; i<N; i++) {
        Eigen::VectorXd ei = Eigen::VectorXd::Zero(N);
        ei(i) = 1;
        Eigen::VectorXd x = forward_subst(L, ei);
        L_inv.col(i) = x;
    }
    // U-inverse
    for (int i=0; i<N; i++) {
        Eigen::VectorXd ei = Eigen::VectorXd::Zero(N);
        ei(i) = 1;
        Eigen::VectorXd x = backward_subst(U, ei);
        U_inv.col(i) = x;
    }
    // A-inverse
    for (int i=0; i<N; i++) {
        Eigen::VectorXd ei = Eigen::VectorXd::Zero(N);
        ei(i) = 1;
        Eigen::VectorXd y = forward_subst(L, ei);
        Eigen::VectorXd x = backward_subst(U, y);
        A_inv.col(i) = x;
    }
    
    // Compute residual error by matrix inverse
    Eigen::VectorXd v = A_inv*b;
    R = (b-A*v).norm();
    std::cout << "Residual error by inverse =, "
              << std::setprecision(p)
              << R
              << std::endl;

    std::cout << "Cholesky decomposition" << std::endl;
    for (int i=0; i<N; i++) {
        std::cout << " |, ";
        for (int j=0; j<N; j++) {
            std::cout << std::right 
                      << std::setw(p+3) 
                      << std::fixed 
                      << std::setprecision(p) 
                      << A(i,j) 
                      << ", ";
        }
        std::cout << " |, =, |, ";
        for (int j=0; j<N; j++) {
            std::cout << std::right 
                      << std::setw(p+3) 
                      << std::fixed 
                      << std::setprecision(p) 
                      << L(i,j) 
                      << ", ";
        }
        std::cout << " |, ";
        for (int j=0; j<N; j++) {
            std::cout << std::right 
                      << std::setw(p+3) 
                      << std::fixed 
                      << std::setprecision(p) 
                      << U(i,j) 
                      << ", ";
        }
        std::cout << " |, " << std::endl;
    }

    std::cout << "Linear solve - x:" << std::endl;
    for (int i=0; i<N; i++) {
        std::cout << std::right 
                  << std::setw(p+3) 
                  << std::fixed 
                  << std::setprecision(p) 
                  << x(i)
                  << std::endl; 
    }
    
    std::cout << "Inverse relations:" << std::endl;
    for (int i=0; i<N; i++) {
        std::cout << " |, ";
        for (int j=0; j<N; j++) {
            std::cout << std::right 
                      << std::setw(p+3) 
                      << std::fixed 
                      << std::setprecision(p) 
                      << A_inv(i,j) 
                      << ", ";
        }
        std::cout << " |, =, |, ";
        for (int j=0; j<N; j++) {
            std::cout << std::right 
                      << std::setw(p+3) 
                      << std::fixed 
                      << std::setprecision(p) 
                      << U_inv(i,j) 
                      << ", ";
        }
        std::cout << " |, ";
        for (int j=0; j<N; j++) {
            std::cout << std::right 
                      << std::setw(p+3) 
                      << std::fixed 
                      << std::setprecision(p) 
                      << L_inv(i,j) 
                      << ", ";
        }
        std::cout << " |, " << std::endl;
    }
    
    std::cout << "Linear solve by inversion- x:" << std::endl;
    for (int i=0; i<N; i++) {
        std::cout << std::right 
                  << std::setw(p+3) 
                  << std::fixed 
                  << std::setprecision(p) 
                  << v(i)
                  << std::endl; 
    }
    
    // Verify accuracy of matrix inverse
    std::cout << "L * L-inverse residual error =, "
              << std::scientific
              << std::setprecision(p)
              << (L*L_inv-Eigen::MatrixXd::Identity(N,N)).norm() 
              << std::endl;
    std::cout << "U * U-inverse residual error =, "
              << std::scientific
              << std::setprecision(p)
              << (U*U_inv-Eigen::MatrixXd::Identity(N,N)).norm() 
              << std::endl;
    std::cout << "A * A-inverse residual error =, "
              << std::scientific
              << std::setprecision(p)
              << (A*A_inv-Eigen::MatrixXd::Identity(N,N)).norm() 
              << std::endl;

    return 0;
}
Ejemplo n.º 26
0
Eigen::MatrixXd RtHPIS::fminsearch(Eigen::MatrixXd pos,int maxiter, int maxfun, int display, Eigen::MatrixXd data, struct sens sensors)
{
    double tolx, tolf, rho, chi, psi, sigma, func_evals, usual_delta, zero_term_delta, temp1, temp2;
    std::string header, how;
    int n, itercount, prnt;
    Eigen::MatrixXd onesn, two2np1, one2n, v, y, v1, tempX1, tempX2, xbar, xr, x, xe, xc, xcc, xin;
    std::vector <double> fv, fv1;
    std::vector <int> idx;

    dipError tempdip, fxr, fxe, fxc, fxcc;

    //tolx = tolf = 1e-4;
    // Seok
    tolx = tolf = 1e-9;

    switch(display)
    {
        case 0:
            prnt = 0;
            break;
        default:
            prnt = 1;
    }

    header = " Iteration   Func-count     min f(x) Procedure";

    n = pos.cols();

    // Initialize parameters
    rho = 1; chi = 2; psi = 0.5; sigma = 0.5;
    onesn = Eigen::MatrixXd::Ones(1,n);
    two2np1 = one2n = Eigen::MatrixXd::Zero(1,n);

    for(int i = 0;i < n;i++) {
        two2np1(i) = 1 + i;
        one2n(i) = i;
    }

    v = v1 = Eigen::MatrixXd::Zero(n, n+1);
    fv.resize(n+1);idx.resize(n+1);fv1.resize(n+1);

    for(int i = 0;i < n; i++) v(i,0) = pos(i);

    tempdip = dipfitError(pos, data, sensors);
    fv[0] = tempdip.error;


    func_evals = 1;itercount = 0;how = "";

    // Continue setting up the initial simplex.
    // Following improvement suggested by L.Pfeffer at Stanford
    usual_delta = 0.05;             // 5 percent deltas for non-zero terms
    zero_term_delta = 0.00025;      // Even smaller delta for zero elements of x
    xin = pos.transpose();

    for(int j = 0;j < n;j++) {
        y = xin;

        if(y(j) != 0) y(j) = (1 + usual_delta) * y(j);
        else y(j) = zero_term_delta;

        v.col(j+1).array() = y;
        pos = y.transpose();
        tempdip = dipfitError(pos, data, sensors);
        fv[j+1] = tempdip.error;
    }

    // Sort elements of fv
    base_arr = fv;
    for (int i = 0; i < n+1; i++) idx[i] = i;

    sort (idx.begin(), idx.end(), RtHPIS::compar);

    for (int i = 0;i < n+1;i++) {
        v1.col(i) = v.col(idx[i]);
        fv1[i] = fv[idx[i]];
    }

    v = v1;fv = fv1;

    how = "initial simplex";
    itercount = itercount + 1;
    func_evals = n + 1;

    tempX1 = Eigen::MatrixXd::Zero(1,n);

    while ((func_evals < maxfun) && (itercount < maxiter)) {
        for (int i = 0;i < n;i++) tempX1(i) = abs(fv[0] - fv[i+1]);
        temp1 = tempX1.maxCoeff();

        tempX2 = Eigen::MatrixXd::Zero(n,n);

        for(int i = 0;i < n;i++) tempX2.col(i) = v.col(i+1) -  v.col(0);

        tempX2 = tempX2.array().abs();

        temp2 = tempX2.maxCoeff();

        if((temp1 <= tolf) && (temp2 <= tolx)) break;

        xbar = v.block(0,0,n,n).rowwise().sum();
        xbar /= n;

        xr = (1+rho) * xbar - rho * v.block(0,n,v.rows(),1);

        x = xr.transpose();
        //std::cout << "Iteration Count: " << itercount << ":" << x << std::endl;

        fxr = dipfitError(x, data, sensors);

        func_evals = func_evals+1;

        if (fxr.error < fv[0]) {
            // Calculate the expansion point
            xe = (1 + rho * chi) * xbar - rho * chi * v.col(v.cols()-1);
            x = xe.transpose();
            fxe = dipfitError(x, data, sensors);
            func_evals = func_evals+1;

            if(fxe.error < fxr.error) {
                v.col(v.cols()-1) = xe;
                fv[n] = fxe.error;
                how = "expand";
            }
            else {
                v.col(v.cols()-1) = xr;
                fv[n] = fxr.error;
                how = "reflect";
            }
        }
        else {
            if(fxr.error < fv[n-1]) {
                v.col(v.cols()-1) = xr;
                fv[n] = fxr.error;
                how = "reflect";
            }
            else { // fxr.error >= fv[:,n-1]
                // Perform contraction
                if(fxr.error < fv[n]) {
                    // Perform an outside contraction
                    xc = (1 + psi * rho) * xbar - psi * rho * v.col(v.cols()-1);
                    x = xc.transpose();
                    fxc = dipfitError(x, data, sensors);
                    func_evals = func_evals + 1;

                    if(fxc.error <= fxr.error) {
                        v.col(v.cols()-1) = xc;
                        fv[n] = fxc.error;
                        how = "contract outside";
                    }
                    else {
                        // perform a shrink
                        how = "shrink";
                    }
                }
                else {
                    xcc = (1 - psi) * xbar + psi * v.col(v.cols()-1);
                    x = xcc.transpose();
                    fxcc = dipfitError(x, data, sensors);
                    func_evals = func_evals+1;
                    if(fxcc.error < fv[n]) {
                        v.col(v.cols()-1) = xcc;
                        fv[n] = fxcc.error;
                        how = "contract inside";
                    }
                    else {
                        // perform a shrink
                        how = "shrink";
                    }
                }
                if(how.compare("shrink") == 0) {
                    for(int j = 1;j < n+1;j++) {
                        v.col(j).array() = v.col(0).array() + sigma * (v.col(j).array() - v.col(0).array());
                        x = v.col(j).array().transpose();
                        tempdip = dipfitError(x,data, sensors);
                        fv[j] = tempdip.error;
                    }
                }
            }
        }
        // Sort elements of fv
        base_arr = fv;
        for (int i = 0; i < n+1; i++) idx[i] = i;
        sort (idx.begin (), idx.end (), compar);
        for (int i = 0;i < n+1;i++) {
            v1.col(i) = v.col(idx[i]);
            fv1[i] = fv[idx[i]];
        }
        v = v1;fv = fv1;
        itercount = itercount + 1;
    } // end of while loop
//    }while(dipfitError(x, data, sensors).error > 0.1);

    x = v.col(0).transpose();

    // Seok
    simplex_numitr = itercount;

    return x;
}
Ejemplo n.º 27
0
void MolconvWindow::minimizeRMSD(molconv::moleculePtr refMol, molconv::moleculePtr otherMol)
{
    if (refMol->size() != otherMol->size())
    {
        QMessageBox::critical(this, tr("Alignment impossible"), tr("Only molecules with equal number of atoms can be aligned."));
        return;
    }

    int Natoms = refMol->size();

    Eigen::Vector3d center = refMol->center();
    Eigen::Vector3d shift = center - otherMol->center();
    Eigen::Vector3d newOrigin = otherMol->internalOriginPosition() + shift;

    updateActiveMolecule(otherMol);
    d->m_MoleculeSettings->setMolecule(otherMol);
    d->m_MoleculeSettings->moveMolecule(double(newOrigin(0)), double(newOrigin(1)), double(newOrigin(2)), 0.0, 0.0, 0.0);

    Eigen::MatrixXd Xr = Eigen::MatrixXd::Zero(3,Natoms);
    Eigen::MatrixXd Xo = Eigen::MatrixXd::Zero(3,Natoms);
    for (int i = 0; i < Natoms; i++)
    {
        Xr.col(i) = refMol->atom(i)->position() - center;
        Xo.col(i) = otherMol->atom(i)->position() - center;
    }

    Eigen::Matrix3d corr = Xo * Xr.transpose();

    // construct the quaternion matrix
    Eigen::Matrix4d F = Eigen::Matrix4d::Zero();
    F(0,0) =  corr(0,0) + corr(1,1) + corr(2,2);
    F(1,1) =  corr(0,0) - corr(1,1) - corr(2,2);
    F(2,2) = -corr(0,0) + corr(1,1) - corr(2,2);
    F(3,3) = -corr(0,0) - corr(1,1) + corr(2,2);
    F(0,1) =  corr(1,2) - corr(2,1);
    F(0,2) =  corr(2,0) - corr(0,2);
    F(0,3) =  corr(0,1) - corr(1,0);
    F(1,2) =  corr(0,1) + corr(1,0);
    F(1,3) =  corr(0,2) + corr(2,0);
    F(2,3) =  corr(1,2) + corr(2,1);
    F(1,0) = F(0,1);
    F(2,0) = F(0,2);
    F(3,0) = F(0,3);
    F(2,1) = F(1,2);
    F(3,1) = F(1,3);
    F(3,2) = F(2,3);

    Eigen::SelfAdjointEigenSolver<Eigen::Matrix4d> Feig(F);
    Eigen::Vector4d Feval = Feig.eigenvalues();
    Eigen::Matrix4d Fevec = Feig.eigenvectors();

    // the optimal rotation corresponds to either the first or the last eigenvector, depending on which eigenvalue is larger
    Eigen::Vector4d lQuart = std::abs(double(Feval(0))) > std::abs(double(Feval(3))) ? Fevec.block(0, 0, 4, 1) : Fevec.block(0, 3, 4, 1);

    Eigen::Matrix3d rotmat = Eigen::Matrix3d::Zero();
    rotmat(0,0) = lQuart(0) * lQuart(0) + lQuart(1) * lQuart(1) - lQuart(2) * lQuart(2) - lQuart(3) * lQuart(3);
    rotmat(1,1) = lQuart(0) * lQuart(0) - lQuart(1) * lQuart(1) + lQuart(2) * lQuart(2) - lQuart(3) * lQuart(3);
    rotmat(2,2) = lQuart(0) * lQuart(0) - lQuart(1) * lQuart(1) - lQuart(2) * lQuart(2) + lQuart(3) * lQuart(3);
    rotmat(0,1) = 2.0 * (lQuart(1) * lQuart(2) - lQuart(0) * lQuart(3));
    rotmat(0,2) = 2.0 * (lQuart(1) * lQuart(3) + lQuart(0) * lQuart(2));
    rotmat(1,2) = 2.0 * (lQuart(2) * lQuart(3) - lQuart(0) * lQuart(1));
    rotmat(1,0) = 2.0 * (lQuart(1) * lQuart(2) + lQuart(0) * lQuart(3));
    rotmat(2,0) = 2.0 * (lQuart(1) * lQuart(3) - lQuart(0) * lQuart(2));
    rotmat(2,1) = 2.0 * (lQuart(2) * lQuart(3) + lQuart(0) * lQuart(1));

    std::array<double,3> newEulers = molconv::Molecule::rot2euler(rotmat);
    double newPhi = newEulers[2];
    double newTheta = newEulers[1];
    double newPsi = newEulers[0];

    d->m_MoleculeSettings->moveMolecule(double(newOrigin(0)), double(newOrigin(1)), double(newOrigin(2)), newPhi, newTheta, newPsi);

    updateAxes();
}
Ejemplo n.º 28
0
void run ()
{
  auto input_header = Header::open (argument[0]);
  Header output_header (input_header);
  output_header.datatype() = DataType::from_command_line (DataType::from<float> ());

  // Linear
  transform_type linear_transform;
  bool linear = false;
  auto opt = get_options ("linear");
  if (opt.size()) {
    linear = true;
    linear_transform = load_transform (opt[0][0]);
  } else {
    linear_transform.setIdentity();
  }

  // Replace
  const bool replace = get_options ("replace").size();
  if (replace && !linear) {
    INFO ("no linear is supplied so replace with the default (identity) transform");
    linear = true;
  }

  // Template
  opt = get_options ("template");
  Header template_header;
  if (opt.size()) {
    if (replace)
      throw Exception ("you cannot use the -replace option with the -template option");
    template_header = Header::open (opt[0][0]);
    for (size_t i = 0; i < 3; ++i) {
      output_header.size(i) = template_header.size(i);
      output_header.spacing(i) = template_header.spacing(i);
    }
    output_header.transform() = template_header.transform();
    add_line (output_header.keyval()["comments"], std::string ("regridded to template image \"" + template_header.name() + "\""));
  }

  // Warp 5D warp
  // TODO add reference to warp format documentation
  opt = get_options ("warp_full");
  Image<default_type> warp;
  if (opt.size()) {
    warp = Image<default_type>::open (opt[0][0]).with_direct_io();
    if (warp.ndim() != 5)
      throw Exception ("the input -warp_full image must be a 5D file.");
    if (warp.size(3) != 3)
      throw Exception ("the input -warp_full image must have 3 volumes (x,y,z) in the 4th dimension.");
    if (warp.size(4) != 4)
      throw Exception ("the input -warp_full image must have 4 volumes in the 5th dimension.");
    if (linear)
      throw Exception ("the -warp_full option cannot be applied in combination with -linear since the "
                       "linear transform is already included in the warp header");
  }

  // Warp from image1 or image2
  int from = 1;
  opt = get_options ("from");
  if (opt.size()) {
    from = opt[0][0];
    if (!warp.valid())
      WARN ("-from option ignored since no 5D warp was input");
  }

  // Warp deformation field
  opt = get_options ("warp");
  if (opt.size()) {
    if (warp.valid())
      throw Exception ("only one warp field can be input with either -warp or -warp_mid");
    warp = Image<default_type>::open (opt[0][0]).with_direct_io (Stride::contiguous_along_axis(3));
    if (warp.ndim() != 4)
      throw Exception ("the input -warp file must be a 4D deformation field");
    if (warp.size(3) != 3)
      throw Exception ("the input -warp file must have 3 volumes in the 4th dimension (x,y,z positions)");
  }

  // Inverse
  const bool inverse = get_options ("inverse").size();
  if (inverse) {
    if (!(linear || warp.valid()))
      throw Exception ("no linear or warp transformation provided for option '-inverse'");
    if (warp.valid())
      if (warp.ndim() == 4)
        throw Exception ("cannot apply -inverse with the input -warp_df deformation field.");
    linear_transform = linear_transform.inverse();
  }

  // Half
  const bool half = get_options ("half").size();
  if (half) {
    if (!(linear))
      throw Exception ("no linear transformation provided for option '-half'");
    {
      Eigen::Matrix<default_type, 4, 4> temp;
      temp.row(3) << 0, 0, 0, 1.0;
      temp.topLeftCorner(3,4) = linear_transform.matrix().topLeftCorner(3,4);
      linear_transform.matrix() = temp.sqrt().topLeftCorner(3,4);
    }
  }

  // Flip
  opt = get_options ("flip");
  if (opt.size()) {
    std::vector<int> axes = opt[0][0];
    transform_type flip;
    flip.setIdentity();
    for (size_t i = 0; i < axes.size(); ++i) {
      if (axes[i] < 0 || axes[i] > 2)
        throw Exception ("axes supplied to -flip are out of bounds (" + std::string (opt[0][0]) + ")");
      flip(axes[i],3) += flip(axes[i],axes[i]) * input_header.spacing(axes[i]) * (input_header.size(axes[i])-1);
      flip(axes[i], axes[i]) *= -1.0;
    }
    if (!replace)
      flip = input_header.transform() * flip * input_header.transform().inverse();
    linear_transform = linear_transform * flip;
    linear = true;
  }

  Stride::List stride = Stride::get (input_header);

  // Detect FOD image for reorientation
  opt = get_options ("noreorientation");
  bool fod_reorientation = false;
  Eigen::MatrixXd directions_cartesian;
  if (!opt.size() && (linear || warp.valid() || template_header.valid()) && input_header.ndim() == 4 &&
      input_header.size(3) >= 6 &&
      input_header.size(3) == (int) Math::SH::NforL (Math::SH::LforN (input_header.size(3)))) {
    CONSOLE ("SH series detected, performing apodised PSF reorientation");
    fod_reorientation = true;

    Eigen::MatrixXd directions_az_el;
    opt = get_options ("directions");
    if (opt.size())
      directions_az_el = load_matrix (opt[0][0]);
    else
      directions_az_el = DWI::Directions::electrostatic_repulsion_300();
    Math::SH::spherical2cartesian (directions_az_el, directions_cartesian);

    // load with SH coeffients contiguous in RAM
    stride = Stride::contiguous_along_axis (3, input_header);
  }

  // Modulate FODs
  bool modulate = false;
  if (get_options ("modulate").size()) {
    modulate = true;
    if (!fod_reorientation)
      throw Exception ("modulation can only be performed with FOD reorientation");
  }

  // Rotate/Flip gradient directions if present
  if (linear && input_header.ndim() == 4 && !warp && !fod_reorientation) {
    try {
      auto grad = DWI::get_DW_scheme (input_header);
      if (input_header.size(3) == (ssize_t) grad.rows()) {
        INFO ("DW gradients detected and will be reoriented");
        Eigen::MatrixXd rotation = linear_transform.linear();
        Eigen::MatrixXd test = rotation.transpose() * rotation;
        test = test.array() / test.diagonal().mean();
        if (!test.isIdentity (0.001))
        WARN ("the input linear transform contains shear or anisotropic scaling and "
              "therefore should not be used to reorient diffusion gradients");
        if (replace)
          rotation = linear_transform.linear() * input_header.transform().linear().inverse();
        for (ssize_t n = 0; n < grad.rows(); ++n) {
          Eigen::Vector3 grad_vector = grad.block<1,3>(n,0);
          grad.block<1,3>(n,0) = rotation * grad_vector;
        }
        DWI::set_DW_scheme (output_header, grad);
      }
    }
    catch (Exception& e) {
      e.display (2);
    }
  }

  // Interpolator
  int interp = 2;  // cubic
  opt = get_options ("interp");
  if (opt.size()) {
    interp = opt[0][0];
    if (!warp && !template_header)
      WARN ("interpolator choice ignored since the input image will not be regridded");
  }

  // Out of bounds value
  float out_of_bounds_value = 0.0;
  opt = get_options ("nan");
  if (opt.size()) {
    out_of_bounds_value = NAN;
    if (!warp && !template_header)
      WARN ("Out of bounds value ignored since the input image will not be regridded");
  }

  auto input = input_header.get_image<float>().with_direct_io (stride);

  // Reslice the image onto template
  if (template_header.valid() && !warp) {
    INFO ("image will be regridded");

    if (get_options ("midway_space").size()) {
      INFO("regridding to midway space");
      std::vector<Header> headers;
      headers.push_back(input_header);
      headers.push_back(template_header);
      std::vector<Eigen::Transform<default_type, 3, Eigen::Projective>> void_trafo;
      auto padding = Eigen::Matrix<double, 4, 1>(1.0, 1.0, 1.0, 1.0);
      int subsampling = 1;
      auto midway_header = compute_minimum_average_header (headers, subsampling, padding, void_trafo);
      for (size_t i = 0; i < 3; ++i) {
        output_header.size(i) = midway_header.size(i);
        output_header.spacing(i) = midway_header.spacing(i);
      }
      output_header.transform() = midway_header.transform();
    }

    if (interp == 0)
      output_header.datatype() = DataType::from_command_line (input_header.datatype());
    auto output = Image<float>::create (argument[1], output_header).with_direct_io();

    switch (interp) {
      case 0:
        Filter::reslice<Interp::Nearest> (input, output, linear_transform, Adapter::AutoOverSample, out_of_bounds_value);
        break;
      case 1:
        Filter::reslice<Interp::Linear> (input, output, linear_transform, Adapter::AutoOverSample, out_of_bounds_value);
        break;
      case 2:
        Filter::reslice<Interp::Cubic> (input, output, linear_transform, Adapter::AutoOverSample, out_of_bounds_value);
        break;
      case 3:
        Filter::reslice<Interp::Sinc> (input, output, linear_transform, Adapter::AutoOverSample, out_of_bounds_value);
        break;
      default:
        assert (0);
        break;
    }

    if (fod_reorientation)
      Registration::Transform::reorient ("reorienting", output, output, linear_transform, directions_cartesian.transpose(), modulate);

  } else if (warp.valid()) {

    if (replace)
      throw Exception ("you cannot use the -replace option with the -warp or -warp_df option");

    if (!template_header) {
      for (size_t i = 0; i < 3; ++i) {
        output_header.size(i) = warp.size(i);
        output_header.spacing(i) = warp.spacing(i);
      }
      output_header.transform() = warp.transform();
      add_line (output_header.keyval()["comments"], std::string ("resliced using warp image \"" + warp.name() + "\""));
    }

    auto output = Image<float>::create(argument[1], output_header).with_direct_io();

    if (warp.ndim() == 5) {
      Image<default_type> warp_deform;

      // Warp to the midway space defined by the warp grid
      if (get_options ("midway_space").size()) {
        warp_deform = Registration::Warp::compute_midway_deformation (warp, from);
      // Use the full transform to warp from the image image to the template
      } else {
        warp_deform = Registration::Warp::compute_full_deformation (warp, template_header, from);
      }
      apply_warp (input, output, warp_deform, interp, out_of_bounds_value);
      if (fod_reorientation)
        Registration::Transform::reorient_warp ("reorienting", output, warp_deform, directions_cartesian.transpose(), modulate);

    // Compose and apply input linear and 4D deformation field
    } else if (warp.ndim() == 4 && linear) {
      auto warp_composed = Image<default_type>::scratch (warp);
      Registration::Warp::compose_linear_deformation (linear_transform, warp, warp_composed);
      apply_warp (input, output, warp_composed, interp, out_of_bounds_value);
      if (fod_reorientation)
        Registration::Transform::reorient_warp ("reorienting", output, warp_composed, directions_cartesian.transpose(), modulate);

    // Apply 4D deformation field only
    } else {
      apply_warp (input, output, warp, interp, out_of_bounds_value);
      if (fod_reorientation)
        Registration::Transform::reorient_warp ("reorienting", output, warp, directions_cartesian.transpose(), modulate);
    }

  // No reslicing required, so just modify the header and do a straight copy of the data
  } else {

    if (get_options ("midway").size())
      throw Exception ("midway option given but no template image defined");

    INFO ("image will not be regridded");
    Eigen::MatrixXd rotation = linear_transform.linear();
    Eigen::MatrixXd temp = rotation.transpose() * rotation;
    if (!temp.isIdentity (0.001))
      WARN("the input linear transform is not orthonormal and therefore applying this without the -template"
           "option will mean the output header transform will also be not orthonormal");

    add_line (output_header.keyval()["comments"], std::string ("transform modified"));
    if (replace)
      output_header.transform() = linear_transform;
    else
      output_header.transform() = linear_transform.inverse() * output_header.transform();
    auto output = Image<float>::create (argument[1], output_header).with_direct_io();
    copy_with_progress (input, output);

    if (fod_reorientation) {
      transform_type transform = linear_transform;
      if (replace)
        transform = linear_transform * output_header.transform().inverse();
      Registration::Transform::reorient ("reorienting", output, output, transform, directions_cartesian.transpose());
    }
  }
}
Ejemplo n.º 29
0
Archivo: mls.hpp Proyecto: diegodgs/PCL
template <typename PointInT, typename PointOutT> void
pcl::MovingLeastSquares<PointInT, PointOutT>::computeMLSPointNormal (int index,
                                                                     const PointCloudIn &input,
                                                                     const std::vector<int> &nn_indices,
                                                                     std::vector<float> &nn_sqr_dists,
                                                                     PointCloudOut &projected_points,
                                                                     NormalCloud &projected_points_normals)
{
  // Compute the plane coefficients
  //pcl::computePointNormal<PointInT> (*input_, nn_indices, model_coefficients, curvature);
  EIGEN_ALIGN16 Eigen::Matrix3f covariance_matrix;
  Eigen::Vector4f xyz_centroid;

  // Estimate the XYZ centroid
  pcl::compute3DCentroid (input, nn_indices, xyz_centroid);
  //pcl::compute3DCentroid (input, nn_indices, xyz_centroid);

  pcl::computeCovarianceMatrix (input, nn_indices, xyz_centroid, covariance_matrix);
  // Compute the 3x3 covariance matrix

  EIGEN_ALIGN16 Eigen::Vector3f::Scalar eigen_value;
  EIGEN_ALIGN16 Eigen::Vector3f eigen_vector;
  Eigen::Vector4f model_coefficients;
  pcl::eigen33 (covariance_matrix, eigen_value, eigen_vector);
  model_coefficients.head<3> () = eigen_vector;
  model_coefficients[3] = 0;
  model_coefficients[3] = -1 * model_coefficients.dot (xyz_centroid);

  // Projected query point
  Eigen::Vector3f point = input[(*indices_)[index]].getVector3fMap ();
  float distance = point.dot (model_coefficients.head<3> ()) + model_coefficients[3];
  point -= distance * model_coefficients.head<3> ();

  float curvature = covariance_matrix.trace ();
  // Compute the curvature surface change
  if (curvature != 0)
    curvature = fabsf (eigen_value / curvature);


  // Get a copy of the plane normal easy access
  Eigen::Vector3d plane_normal = model_coefficients.head<3> ().cast<double> ();
  // Vector in which the polynomial coefficients will be put
  Eigen::VectorXd c_vec;
  // Local coordinate system (Darboux frame)
  Eigen::Vector3d v (0.0f, 0.0f, 0.0f), u (0.0f, 0.0f, 0.0f);



  // Perform polynomial fit to update point and normal
  ////////////////////////////////////////////////////
  if (polynomial_fit_ && static_cast<int> (nn_indices.size ()) >= nr_coeff_)
  {
    // Update neighborhood, since point was projected, and computing relative
    // positions. Note updating only distances for the weights for speed
    std::vector<Eigen::Vector3d> de_meaned (nn_indices.size ());
    for (size_t ni = 0; ni < nn_indices.size (); ++ni)
    {
      de_meaned[ni][0] = input_->points[nn_indices[ni]].x - point[0];
      de_meaned[ni][1] = input_->points[nn_indices[ni]].y - point[1];
      de_meaned[ni][2] = input_->points[nn_indices[ni]].z - point[2];
      nn_sqr_dists[ni] = static_cast<float> (de_meaned[ni].dot (de_meaned[ni]));
    }

    // Allocate matrices and vectors to hold the data used for the polynomial fit
    Eigen::VectorXd weight_vec (nn_indices.size ());
    Eigen::MatrixXd P (nr_coeff_, nn_indices.size ());
    Eigen::VectorXd f_vec (nn_indices.size ());
    Eigen::MatrixXd P_weight; // size will be (nr_coeff_, nn_indices.size ());
    Eigen::MatrixXd P_weight_Pt (nr_coeff_, nr_coeff_);

    // Get local coordinate system (Darboux frame)
    v = plane_normal.unitOrthogonal ();
    u = plane_normal.cross (v);

    // Go through neighbors, transform them in the local coordinate system,
    // save height and the evaluation of the polynome's terms
    double u_coord, v_coord, u_pow, v_pow;
    for (size_t ni = 0; ni < nn_indices.size (); ++ni)
    {
      // (re-)compute weights
      weight_vec (ni) = exp (-nn_sqr_dists[ni] / sqr_gauss_param_);

      // transforming coordinates
      u_coord = de_meaned[ni].dot (u);
      v_coord = de_meaned[ni].dot (v);
      f_vec (ni) = de_meaned[ni].dot (plane_normal);

      // compute the polynomial's terms at the current point
      int j = 0;
      u_pow = 1;
      for (int ui = 0; ui <= order_; ++ui)
      {
        v_pow = 1;
        for (int vi = 0; vi <= order_ - ui; ++vi)
        {
          P (j++, ni) = u_pow * v_pow;
          v_pow *= v_coord;
        }
        u_pow *= u_coord;
      }
    }

    // Computing coefficients
    P_weight = P * weight_vec.asDiagonal ();
    P_weight_Pt = P_weight * P.transpose ();
    c_vec = P_weight * f_vec;
    P_weight_Pt.llt ().solveInPlace (c_vec);
  }

  switch (upsample_method_)
  {
    case (NONE):
    {
      Eigen::Vector3d normal = plane_normal;

      if (polynomial_fit_ && static_cast<int> (nn_indices.size ()) >= nr_coeff_ && pcl_isfinite (c_vec[0]))
      {
        point += (c_vec[0] * plane_normal).cast<float> ();

        // Compute tangent vectors using the partial derivates evaluated at (0,0) which is c_vec[order_+1] and c_vec[1]
        if (compute_normals_)
          normal = plane_normal - c_vec[order_ + 1] * u - c_vec[1] * v;
      }

      PointOutT aux;
      aux.x = point[0];
      aux.y = point[1];
      aux.z = point[2];
      projected_points.push_back (aux);

      if (compute_normals_)
      {
        pcl::Normal aux_normal;
        aux_normal.normal_x = static_cast<float> (normal[0]);
        aux_normal.normal_y = static_cast<float> (normal[1]);
        aux_normal.normal_z = static_cast<float> (normal[2]);
        aux_normal.curvature = curvature;
        projected_points_normals.push_back (aux_normal);
      }

      break;
    }

    case (SAMPLE_LOCAL_PLANE):
    {
      // Uniformly sample a circle around the query point using the radius and step parameters
      for (float u_disp = -static_cast<float> (upsampling_radius_); u_disp <= upsampling_radius_; u_disp += static_cast<float> (upsampling_step_))
        for (float v_disp = -static_cast<float> (upsampling_radius_); v_disp <= upsampling_radius_; v_disp += static_cast<float> (upsampling_step_))
          if (u_disp*u_disp + v_disp*v_disp < upsampling_radius_*upsampling_radius_)
          {
            PointOutT projected_point;
            pcl::Normal projected_normal;
            projectPointToMLSSurface (u_disp, v_disp, u, v, plane_normal, curvature, point, c_vec, 
                                      static_cast<int> (nn_indices.size ()),
                                      projected_point, projected_normal);

            projected_points.push_back (projected_point);
            if (compute_normals_)
              projected_points_normals.push_back (projected_normal);
          }
      break;
    }

    case (RANDOM_UNIFORM_DENSITY):
    {
      // Compute the local point density and add more samples if necessary
      int num_points_to_add = static_cast<int> (floor (desired_num_points_in_radius_ / 2.0 / static_cast<double> (nn_indices.size ())));

      // Just add the query point, because the density is good
      if (num_points_to_add <= 0)
      {
        // Just add the current point
        Eigen::Vector3d normal = plane_normal;
        if (polynomial_fit_ && static_cast<int> (nn_indices.size ()) >= nr_coeff_ && pcl_isfinite (c_vec[0]))
        {
          // Projection onto MLS surface along Darboux normal to the height at (0,0)
          point += (c_vec[0] * plane_normal).cast<float> ();
          // Compute tangent vectors using the partial derivates evaluated at (0,0) which is c_vec[order_+1] and c_vec[1]
          if (compute_normals_)
            normal = plane_normal - c_vec[order_ + 1] * u - c_vec[1] * v;
        }
        PointOutT aux;
        aux.x = point[0];
        aux.y = point[1];
        aux.z = point[2];
        projected_points.push_back (aux);

        if (compute_normals_)
        {
          pcl::Normal aux_normal;
          aux_normal.normal_x = static_cast<float> (normal[0]);
          aux_normal.normal_y = static_cast<float> (normal[1]);
          aux_normal.normal_z = static_cast<float> (normal[2]);
          aux_normal.curvature = curvature;
          projected_points_normals.push_back (aux_normal);
        }
      }
      else
      {
        // Sample the local plane
        for (int num_added = 0; num_added < num_points_to_add;)
        {
          float u_disp = (*rng_uniform_distribution_) (),
                v_disp = (*rng_uniform_distribution_) ();
          // Check if inside circle; if not, try another coin flip
          if (u_disp * u_disp + v_disp * v_disp > search_radius_ * search_radius_/4)
            continue;


          PointOutT projected_point;
          pcl::Normal projected_normal;
          projectPointToMLSSurface (u_disp, v_disp, u, v, plane_normal, curvature, point, c_vec, 
                                    static_cast<int> (nn_indices.size ()),
                                    projected_point, projected_normal);

          projected_points.push_back (projected_point);
          if (compute_normals_)
            projected_points_normals.push_back (projected_normal);

          num_added ++;
        }
      }
      break;
    }

    case (VOXEL_GRID_DILATION):
    {
      // Take all point pairs and sample space between them in a grid-fashion
      // \note consider only point pairs with increasing indices
      MLSResult result (plane_normal, u, v, c_vec, static_cast<int> (nn_indices.size ()), curvature);
      mls_results_[index] = result;
      break;
    }
  }
}
Ejemplo n.º 30
0
void Controller::virtualForce(Eigen::Vector3d _force, dart::dynamics::BodyNode* _bodyNode, Eigen::Vector3d _offset) {
  Eigen::MatrixXd jacobian = mSkel->getJacobian(_bodyNode, _offset);
  mTorques += jacobian.transpose() * _force;
  //std::cout << "Printing Torques" << std::endl;
  //std::cout << mTorques << std::endl;
}