예제 #1
0
파일: geometry.hpp 프로젝트: uf-mil/Sub8
size_t closest_point_index_rayOMP(const pcl::PointCloud<PointT>& cloud,
                                  const Eigen::Vector3f& direction_pre,
                                  const Eigen::Vector3f& line_pt,
                                  double& distance_to_ray) {
    Eigen::Vector3f direction = direction_pre / direction_pre.norm();
    PointT point;
    std::vector<double> distances(cloud.points.size(), 10);  // Initialize to value 10
// Generate a vector of distances
    #pragma omp parallel for
    for (size_t index = 0; index < cloud.points.size(); index++) {
        point = cloud.points[index];

        Eigen::Vector3f cloud_pt(point.x, point.y, point.z);
        Eigen::Vector3f difference = (line_pt - cloud_pt);
        // https://en.wikipedia.org/wiki/Distance_from_a_point_to_a_line#Vector_formulation
        double distance = (difference - (difference.dot(direction) * direction)).norm();
        distances[index] = distance;
    }

    double min_distance = std::numeric_limits<double>::infinity();
    size_t min_index = 0;
    // Find index of maximum (TODO: Figure out how to OMP this)
    for (size_t index = 0; index < cloud.points.size(); index++) {
        const double distance = distances[index];
        if (distance < min_distance) {
            min_distance = distance;
            min_index = index;
        }
    }

    distance_to_ray = min_distance;

    return (min_index);
}
예제 #2
0
bool CommandSubscriber::getForcedTfFrame(Eigen::Affine3f & result) const
  {
  tf::StampedTransform transform;
  try
    {
    m_tf_listener.lookupTransform(m_forced_tf_frame_reference,m_forced_tf_frame_name,ros::Time(0),transform);
    }
    catch (tf::TransformException ex)
      {
      ROS_ERROR("kinfu: hint was forced by TF, but retrieval failed because: %s",ex.what());
      return false;
      }

  Eigen::Vector3f vec;
  vec.x() = transform.getOrigin().x();
  vec.y() = transform.getOrigin().y();
  vec.z() = transform.getOrigin().z();
  Eigen::Quaternionf quat;
  quat.x() = transform.getRotation().x();
  quat.y() = transform.getRotation().y();
  quat.z() = transform.getRotation().z();
  quat.w() = transform.getRotation().w();

  result.linear() = Eigen::AngleAxisf(quat).matrix();
  result.translation() = vec;
  return true;
  }
void NodeRendererEvent::render( const Eigen::Matrix4f& view, const Eigen::Matrix4f& model )
{
    glPushMatrix();
    glMultMatrixf( ( model * view ).array() );

    glColor3f( 1.0f, 1.0f, 0.0f );

    // render a filled elipse
    glBegin( GL_POLYGON );
    for ( float step = 0.0f; step < 2.0f * M_PI; step += 0.1f )
    {
        float x = GEOM_WIDTH * sinf( step );
        float y = GEOM_HEIGHT * cosf( step );
        glVertex3f( x, y, 0.0f );
    }
    glEnd();

    // render the border
    if ( _isHighlighted )
    {
        Eigen::Vector3f col = RenderManager::get()->getHeighlightColour();
        glColor3f( col.x(), col.y(), col.z() );
    }
    else
    {
        glColor3f( 0.3f, 0.3f, 0.0f );
    }

    glLineWidth( 8.0f );
    glBegin( GL_LINE_LOOP );
    for ( float step = 0.0f; step < 2.0f * M_PI; step += 0.05f )
    {
        float x = ( GEOM_WIDTH + 0.9f ) * sinf( step );
        float y = ( GEOM_HEIGHT + 0.9f ) * cosf( step );
        glVertex3f( x, y, 0.0f );
    }
    glEnd();

    // render the node text
    glColor3f( 0.0, 0.0, 0.0 );
    std::string text( getText() );
    if ( text.length() )
    {
        Eigen::Vector3f tmin, tmax;
        RenderManager::get()->fontGetDims( text.c_str(), tmin, tmax );

        float xpos = tmax.x() - tmin.x();
        float ypos = tmax.y() - tmin.y();
        if ( getScale().x() )
            xpos /= getScale().x();

        if ( getScale().y() )
            ypos /= getScale().y();

        Eigen::Vector2f pos( -xpos / 2.0f, -ypos / 2.0f );
        RenderManager::get()->fontRender( text.c_str(), pos );
    }

    glPopMatrix();
}
	void EXPORT_API InitShapeMatching(float* restPositions, float* invMasses, bool* posLocks, int pointsCount, float* restCMs, float* invRestMat)
	{

		Eigen::Vector3f* X_0 = new Eigen::Vector3f[pointsCount];
		float* w = new float[pointsCount];

		for (size_t i = 0; i < pointsCount; i++)
		{
			X_0[i] = Eigen::Map<Eigen::Vector3f>(restPositions + (i * 4));
			w[i] = posLocks[i] ? 0.0f : invMasses[i];
		}

		Eigen::Vector3f restCM;
		Eigen::Matrix3f A;


		PBD::PositionBasedDynamics::initShapeMatchingRestInfo(X_0, w, pointsCount, restCM, A);

		restCMs[0] = restCM.x();
		restCMs[1] = restCM.y();
		restCMs[2] = restCM.z();
		restCMs[3] = 0.0f;

		invRestMat[0] = A(0, 0); invRestMat[1] = A(0, 1); invRestMat[2] = A(0, 2);
		invRestMat[4] = A(1, 0); invRestMat[5] = A(1, 1); invRestMat[6] = A(1, 2);
		invRestMat[8] = A(2, 0); invRestMat[9] = A(2, 1); invRestMat[10]= A(2, 2);

		delete[] X_0;
		delete[] w;
		
	}
	void EXPORT_API InitTetrasShapeMatchingJB(btVector3* initialPositions,float* invMasses, bool* posLocks, Tetrahedron* tetras, btVector3* restCMs, float* invRestMat, int tetrasCount)
	{
		for (int i = 0; i < tetrasCount; i++)
		{
			Tetrahedron tetra = tetras[i];

			Eigen::Vector3f x1_0(initialPositions[tetra.idA].x(), initialPositions[tetra.idA].y(), initialPositions[tetra.idA].z());
			Eigen::Vector3f x2_0(initialPositions[tetra.idB].x(), initialPositions[tetra.idB].y(), initialPositions[tetra.idB].z());
			Eigen::Vector3f x3_0(initialPositions[tetra.idC].x(), initialPositions[tetra.idC].y(), initialPositions[tetra.idC].z());
			Eigen::Vector3f x4_0(initialPositions[tetra.idD].x(), initialPositions[tetra.idD].y(), initialPositions[tetra.idD].z());


			float w1 = posLocks[tetra.idA] ? 0.0f : invMasses[tetra.idA];
			float w2 = posLocks[tetra.idB] ? 0.0f : invMasses[tetra.idB];
			float w3 = posLocks[tetra.idC] ? 0.0f : invMasses[tetra.idC];
			float w4 = posLocks[tetra.idD] ? 0.0f : invMasses[tetra.idD];

			Eigen::Vector3f restCM;
			Eigen::Matrix3f A;


			Eigen::Vector3f x0[4] = { x1_0, x2_0, x3_0, x4_0 };
			float w[4] = { w1, w2, w3, w4 };
			Eigen::Vector3f corr[4];

			PBD::PositionBasedDynamics::initShapeMatchingRestInfo(x0, w, 4, restCM, A);

			restCMs[i] = btVector3(restCM.x(), restCM.y(), restCM.z());

			invRestMat[16 * i + 0] = A(0, 0); invRestMat[16 * i + 1] = A(0, 1); invRestMat[16 * i + 2] = A(0, 2);
			invRestMat[16 * i + 4] = A(1, 0); invRestMat[16 * i + 5] = A(1, 1); invRestMat[16 * i + 6] = A(1, 2);
			invRestMat[16 * i + 8] = A(2, 0); invRestMat[16 * i + 9] = A(2, 1); invRestMat[16 * i +10] = A(2, 2);
		}
	}
예제 #6
0
/**
 * Verifica si un plano corta a una nube en dos
 * @param  pc    nube de puntos a evaluar
 * @param  coefs Coeficientes del plano
 * @return       True si lo corta, false en caso contrario.
 */
bool isPointCloudCutByPlane(PointCloud<PointXYZ>::Ptr pc, ModelCoefficients::Ptr coefs, PointXYZ p_plane){
	/*
	Algoritmo:
		- Obtener vector normal a partir de coeficientes
		- Iterar sobre puntos de la nube
			- Calcular vector delta entre punto entregado (dentro del plano) y punto iterado
			- Calcular ángulo entre vector delta y normal
			- Angulos menores a PI/2 son de un lado, mayores son de otro
			- Si aparecen puntos de ambos lados, retornar True, else, false.
	*/
	const double PI = 3.1416;
	Eigen::Vector3f normal = Eigen::Vector3f(coefs->values[0], coefs->values[1], coefs->values[2]);
	normal.normalize();
	// cout << "Normal: " << normal << endl;
	bool side;
	// Iterar sobre los puntos
	for (int i=0; i<pc->points.size(); i++){
		// Calcular ángulo entre punto y normal
		Eigen::Vector3f delta = Eigen::Vector3f (p_plane.x, p_plane.y, p_plane.z) - Eigen::Vector3f(pc->points[i].x, pc->points[i].y, pc->points[i].z);
		delta.normalize();
		double alpha = acos(normal.dot(delta));
		// printf ("Alpha: %f°\n", (alpha*180/PI));
		if (i==0){
			side = (alpha < PI/2.0);
			// printf("Lado escogido: %s", side ? "true": "false");
			continue;
		}
		if (side != (alpha < PI/2.0)){
			// printf("Nube es cortada por plano\n");
			return true;
		}
	}
	// printf("Nube NO es cortada por plano\n");
	return false;
}
예제 #7
0
void WorldDownloadManager::findExtraCubesForBoundingBox(const Eigen::Vector3f& current_cube_min,
  const Eigen::Vector3f& current_cube_max,const Eigen::Vector3f& bbox_min,const Eigen::Vector3f& bbox_max,Vector3fVector& cubes_centers,
  bool& extract_current)
{
  const Eigen::Vector3f & cube_size = current_cube_max - current_cube_min;
  cubes_centers.clear();
  extract_current = false;

  const Eigen::Vector3f relative_act_bbox_min = bbox_min - current_cube_min;
  const Eigen::Vector3f relative_act_bbox_max = bbox_max - current_cube_min;
  const Eigen::Vector3i num_cubes_plus = Eigen::Vector3f(floor3f(Eigen::Vector3f(relative_act_bbox_max.array() / cube_size.array())
    - (Eigen::Vector3f::Ones() * 0.0001))).cast<int>();
  const Eigen::Vector3i num_cubes_minus = Eigen::Vector3f(floor3f(Eigen::Vector3f(relative_act_bbox_min.array() / cube_size.array())
    + (Eigen::Vector3f::Ones() * 0.0001))).cast<int>();
  for (int z = num_cubes_minus.z(); z <= num_cubes_plus.z(); z++)
    for (int y = num_cubes_minus.y(); y <= num_cubes_plus.y(); y++)
      for (int x = num_cubes_minus.x(); x <= num_cubes_plus.x(); x++)
      {
        const Eigen::Vector3i cube_index(x,y,z);
        if ((cube_index.array() == Eigen::Vector3i::Zero().array()).all())
        {
          extract_current = true;
          continue;
        }

        const Eigen::Vector3f relative_cube_origin = cube_index.cast<float>().array() * cube_size.array();
        const Eigen::Vector3f cube_center = relative_cube_origin + current_cube_min + (cube_size * 0.5);
        cubes_centers.push_back(cube_center);
      }
}
예제 #8
0
void SensorFusion::reset(const Eigen::Vector3f& accel, const Eigen::Vector3f& magnetom)
{
	G_Dt = 0;

	Eigen::Vector3f temp1;
	Eigen::Vector3f temp2;
	Eigen::Vector3f xAxis;
	xAxis << 1.0f, 0.0f, 0.0f;

	timestamp = highResClock.now();

	// GET PITCH
	// Using y-z-plane-component/x-component of gravity vector
	pitch = -atan2f(accel(0), sqrtf(accel(1) * accel(1) + accel(2) * accel(2)));

	// GET ROLL
	// Compensate pitch of gravity vector
	temp1 = accel.cross(xAxis);
	temp2 = xAxis.cross(temp1);

	// Normally using x-z-plane-component/y-component of compensated gravity vector
	// roll = atan2(temp2[1], sqrt(temp2[0] * temp2[0] + temp2[2] * temp2[2]));
	// Since we compensated for pitch, x-z-plane-component equals z-component:
	roll = atan2f(temp2(1), temp2(2));

	// GET YAW
	compassHeading(magnetom);
	yaw = magHeading;

	// Init rotation matrix
	init_rotation_matrix(dcmMatrix, yaw, pitch, roll);
}
예제 #9
0
void adjustNormalsToViewPoints(
		const pcl::PointCloud<pcl::PointXYZ>::Ptr & viewpoints,
		pcl::PointCloud<pcl::PointXYZRGBNormal> & cloud)
{
	if(viewpoints->size() && cloud.size())
	{
		pcl::search::KdTree<pcl::PointXYZ>::Ptr tree (new pcl::search::KdTree<pcl::PointXYZ>);
		tree->setInputCloud (viewpoints);

		for(unsigned int i=0; i<cloud.size(); ++i)
		{
			std::vector<int> indices;
			std::vector<float> dist;
			tree->nearestKSearch(pcl::PointXYZ(cloud.points[i].x, cloud.points[i].y, cloud.points[i].z), 1, indices, dist);
			UASSERT(indices.size() == 1);

			Eigen::Vector3f v = viewpoints->at(indices[0]).getVector3fMap() - cloud.points[i].getVector3fMap();
			Eigen::Vector3f n(cloud.points[i].normal_x, cloud.points[i].normal_y, cloud.points[i].normal_z);

			float result = v.dot(n);
			if(result < 0)
			{
				//reverse normal
				cloud.points[i].normal_x *= -1.0f;
				cloud.points[i].normal_y *= -1.0f;
				cloud.points[i].normal_z *= -1.0f;
			}
		}
	}
}
float robotDepth()
{
	// Two interesting points
	float alpha1 = (x + (sx-height)/2)/(float)height*2*tan(VFOV/2.);
	float alpha2 = (x - (sx+height)/2)/(float)height*2*tan(VFOV/2.);
	float beta = (y - width/2)/(float)width*2*tan(HFOV/2.);

	// Vectors
	Eigen::Vector3f v1(1,-beta,-alpha1);
	Eigen::Vector3f v2(1,-beta,-alpha2);

	// Normalization
	v1 = v1/v1.norm();
	v2 = v2/v2.norm();

	// Center
	Eigen::Vector3f c = (v1+v2)/2.;
	float c_norm = c.norm();

	// Projection
	Eigen::MatrixXf proj_mat = c.transpose()*v1;
	float proj = proj_mat(0,0);

	// Orthogonal part in v1
	Eigen::Vector3f orth = v1 - proj/c_norm*c;

	// Norm
	float orth_norm = orth.norm();

	// Approximate depth
	float d = H/2.*proj/orth_norm;
	return d;
}
예제 #11
0
파일: model.cpp 프로젝트: Noctune/Bumpier
png::image<png::rgb_pixel_16> bumpier::model::calculate_normals(const png::image<png::gray_pixel_16>& input, double phi, space_type space) const
{
    int width = input.get_width(), height = input.get_height();
    png::image<png::rgb_pixel_16> output(width, height);

    for (int y = 0; y < height; y++)
        for (int x = 0; x < width; x++)
        {
            int xmin = (x + width  - 1) % width,
                ymin = (y + height - 1) % height,
                xmax = (x + 1) % width,
                ymax = (y + 1) % height;

            Eigen::Vector3f normal = (position(input, x, ymax) - position(input, x, ymin)).cross(
                                      position(input, xmax, y) - position(input, xmin, y));

            if(space == tangent_space)
				normal = tangentspace(Eigen::Vector2f((float)x / width, (float)y / height)) * normal;

			normal.normalize();

            normal = (normal.array() * 0.5 + 0.5) * 0xFFFF;
            output.set_pixel(x, y, png::rgb_pixel_16(normal[0], normal[1], normal[2]));
        }

    return output;
}
예제 #12
0
void m2tq(Eigen::Vector3f& t, Eigen::Quaternionf& q, const Eigen::Matrix4f& m){
  t.x()=m(0,3);
  t.y()=m(1,3);
  t.z()=m(2,3);
  Eigen::Matrix3f R=m.block<3,3>(0,0);
  q=Eigen::Quaternionf(R);
}
예제 #13
0
template <typename PointNT> bool
pcl::GridProjection<PointNT>::isIntersected (const std::vector<Eigen::Vector4f, Eigen::aligned_allocator<Eigen::Vector4f> > &end_pts,
                                             std::vector<Eigen::Vector3f, Eigen::aligned_allocator<Eigen::Vector3f> > &vect_at_end_pts,
                                             std::vector <int> &pt_union_indices)
{
  assert (end_pts.size () == 2);
  assert (vect_at_end_pts.size () == 2);

  double length[2];
  for (size_t i = 0; i < 2; ++i)
  {
    length[i] = vect_at_end_pts[i].norm ();
    vect_at_end_pts[i].normalize ();
  }
  double dot_prod = vect_at_end_pts[0].dot (vect_at_end_pts[1]);
  if (dot_prod < 0)
  {
    double ratio = length[0] / (length[0] + length[1]);
    Eigen::Vector4f start_pt = end_pts[0] + (end_pts[1] - end_pts[0]) * ratio;
    Eigen::Vector4f intersection_pt = Eigen::Vector4f::Zero ();
    findIntersection (0, end_pts, vect_at_end_pts, start_pt, pt_union_indices, intersection_pt);

    Eigen::Vector3f vec;
    getVectorAtPoint (intersection_pt, pt_union_indices, vec);
    vec.normalize ();

    double d2 = getD2AtPoint (intersection_pt, vec, pt_union_indices);
    if (d2 < 0)
      return (true);
  }
  return (false);
}
예제 #14
0
template <typename PointNT> void
pcl::GridProjection<PointNT>::getProjectionWithPlaneFit (const Eigen::Vector4f &p,
                                                         std::vector<int> (&pt_union_indices),
                                                         Eigen::Vector4f &projection)
{
  // Compute the plane coefficients
  Eigen::Vector4f model_coefficients;
  /// @remark iterative weighted least squares or sac might give better results
  Eigen::Matrix3f covariance_matrix;
  Eigen::Vector4f xyz_centroid;

  computeMeanAndCovarianceMatrix (*data_, pt_union_indices, covariance_matrix, xyz_centroid);

  // Get the plane normal
  EIGEN_ALIGN16 Eigen::Vector3f::Scalar eigen_value = -1;
  EIGEN_ALIGN16 Eigen::Vector3f eigen_vector;
  pcl::eigen33 (covariance_matrix, eigen_value, eigen_vector);

  // The normalization is not necessary, since the eigenvectors from libeigen are already normalized
  model_coefficients[0] = eigen_vector [0];
  model_coefficients[1] = eigen_vector [1];
  model_coefficients[2] = eigen_vector [2];
  model_coefficients[3] = 0;
  // Hessian form (D = nc . p_plane (centroid here) + p)
  model_coefficients[3] = -1 * model_coefficients.dot (xyz_centroid);

  // Projected point
  Eigen::Vector3f point (p.x (), p.y (), p.z ());     //= Eigen::Vector3f::MapAligned (&output.points[cp].x, 3);
  float distance = point.dot (model_coefficients.head <3> ()) + model_coefficients[3];
  point -= distance * model_coefficients.head < 3 > ();

  projection = Eigen::Vector4f (point[0], point[1], point[2], 0);
}
예제 #15
0
template <typename PointSource, typename PointTarget, typename NormalT, typename Scalar> int
pcl::registration::FPCSInitialAlignment <PointSource, PointTarget, NormalT, Scalar>::selectBaseTriangle (std::vector <int> &base_indices)
{
  int nr_points = static_cast <int> (target_indices_->size ());
  float best_t = 0.f;

  // choose random first point
  base_indices[0] = (*target_indices_)[rand () % nr_points];
  int *index1 = &base_indices[0];

  // random search for 2 other points (as far away as overlap allows)
  for (int i = 0; i < ransac_iterations_; i++)
  {
    int *index2 = &(*target_indices_)[rand () % nr_points];
    int *index3 = &(*target_indices_)[rand () % nr_points];

    Eigen::Vector3f u = target_->points[*index2].getVector3fMap () - target_->points[*index1].getVector3fMap ();
    Eigen::Vector3f v = target_->points[*index3].getVector3fMap () - target_->points[*index1].getVector3fMap ();
    float t = u.cross (v).squaredNorm (); // triangle area (0.5 * sqrt(t)) should be maximal

    // check for most suitable point triple
    if (t > best_t && u.squaredNorm () < max_base_diameter_sqr_ && v.squaredNorm () < max_base_diameter_sqr_)
    {
      best_t = t;
      base_indices[1] = *index2;
      base_indices[2] = *index3;
    }
  }

  // return if a triplet could be selected
  return (best_t == 0.f ? -1 : 0);
}
예제 #16
0
파일: light.cpp 프로젝트: xanxys/construct
boost::optional<Intersection> Triangle::intersect(Ray ray) {
	Eigen::Vector3f s1 = ray.dir.cross(d2);
	const float div = s1.dot(d1);
	if(div <= 0) {  // parallel or back
		return boost::optional<Intersection>();
	}

	const float div_inv = 1 / div;
		
	Eigen::Vector3f s  = ray.org - p0;
	const float a = s.dot(s1) * div_inv;
	if(a < 0 || a > 1) {
		return boost::optional<Intersection>();
	}
				
	Eigen::Vector3f s2 = s.cross(d1);
	const float b = ray.dir.dot(s2) * div_inv;
		
	if(b < 0 || a + b > 1) {
		return boost::optional<Intersection>();
	}

	const float t = d2.dot(s2) * div_inv;
	if(t < 0) {
		return boost::optional<Intersection>();
	}

	return boost::optional<Intersection>(Intersection(
		t,
		ray.at(t),
		normal,
		(1 - a - b) * uv0 + a * uv1 + b * uv2,
		(1 - a - b) * ir0 + a * ir1 + b * ir2,
		attribute));
}
예제 #17
0
// This algorithm comes from Sebastian O.H. Madgwick's 2010 paper:
// "An efficient orientation filter for inertial and inertial/magnetic sensor arrays"
// https://www.samba.org/tridge/UAV/madgwick_internal_report.pdf
static void _psmove_orientation_fusion_imu_update(
	PSMoveOrientation *orientation_state,
	float delta_t,
	const Eigen::Vector3f &current_omega,
	const Eigen::Vector3f &current_g)
{
	// Current orientation from earth frame to sensor frame
	Eigen::Quaternionf SEq = orientation_state->quaternion;
	Eigen::Quaternionf SEq_new = SEq;

	// Compute the quaternion derivative measured by gyroscopes
	// Eqn 12) q_dot = 0.5*q*omega
	Eigen::Quaternionf omega = Eigen::Quaternionf(0.f, current_omega.x(), current_omega.y(), current_omega.z());
	Eigen::Quaternionf SEqDot_omega = Eigen::Quaternionf(SEq.coeffs() * 0.5f) *omega;

	if (current_g.isApprox(Eigen::Vector3f::Zero(), k_normal_epsilon))
	{
		// Get the direction of the gravitational fields in the identity pose		
		PSMove_3AxisVector identity_g= psmove_orientation_get_gravity_calibration_direction(orientation_state);
		Eigen::Vector3f k_identity_g_direction = Eigen::Vector3f(identity_g.x, identity_g.y, identity_g.z);

		// Eqn 15) Applied to the gravity vector
		// Fill in the 3x1 objective function matrix f(SEq, Sa) =|f_g|
		Eigen::Matrix<float, 3, 1> f_g;
		psmove_alignment_compute_objective_vector(SEq, k_identity_g_direction, current_g, f_g, NULL);

		// Eqn 21) Applied to the gravity vector
		// Fill in the 4x3 objective function Jacobian matrix: J_gb(SEq)= [J_g]
		Eigen::Matrix<float, 4, 3> J_g;
		psmove_alignment_compute_objective_jacobian(SEq, k_identity_g_direction, J_g);

		// Eqn 34) gradient_F= J_g(SEq)*f(SEq, Sa)
		// Compute the gradient of the objective function
		Eigen::Matrix<float, 4, 1> gradient_f = J_g * f_g;
		Eigen::Quaternionf SEqHatDot =
			Eigen::Quaternionf(gradient_f(0, 0), gradient_f(1, 0), gradient_f(2, 0), gradient_f(3, 0));

		// normalize the gradient
		psmove_quaternion_normalize_with_default(SEqHatDot, *k_psmove_quaternion_zero);

		// Compute the estimated quaternion rate of change
		// Eqn 43) SEq_est = SEqDot_omega - beta*SEqHatDot
		Eigen::Quaternionf SEqDot_est = Eigen::Quaternionf(SEqDot_omega.coeffs() - SEqHatDot.coeffs()*beta);

		// Compute then integrate the estimated quaternion rate
		// Eqn 42) SEq_new = SEq + SEqDot_est*delta_t
		SEq_new = Eigen::Quaternionf(SEq.coeffs() + SEqDot_est.coeffs()*delta_t);
	}
	else
	{
		SEq_new = Eigen::Quaternionf(SEq.coeffs() + SEqDot_omega.coeffs()*delta_t);
	}

	// Make sure the net quaternion is a pure rotation quaternion
	SEq_new.normalize();

	// Save the new quaternion back into the orientation state
	orientation_state->quaternion = SEq_new;
}
void
pd_solver_advance(struct PdSolver *solver, uint32_t const n_iterations)
{
        /* set external force */
        Eigen::VectorXf ext_accel = Eigen::VectorXf::Zero(solver->positions.size());
#pragma omp parallel for
        for (uint32_t i = 0; i < solver->positions.size()/3; ++i)
                for (int j = 0; j < 3; ++j)
                        ext_accel[3*i + j] = solver->ext_force[j];
        Eigen::VectorXf const ext_force = solver->mass_mat*ext_accel;
        Eigen::VectorXf const mass_y = solver->mass_mat*(2.0f*solver->positions - solver->positions_last);

        solver->positions_last = solver->positions;

        for (uint32_t iter = 0; iter < n_iterations; ++iter) {
                /* LOCAL STEP */
                /* LOCAL STEP (we account everything except the global solve */
                struct timespec local_start;
                clock_gettime(CLOCK_MONOTONIC, &local_start);
                uint32_t const n_constraints = solver->n_attachments + solver->n_springs;
                Eigen::VectorXf d(3*n_constraints);
                for (uint32_t i = 0; i < solver->n_attachments; ++i) {
                        struct PdConstraintAttachment c = solver->attachments[i];
                        d.block<3, 1>(3*i, 0) = Eigen::Map<Eigen::Vector3f>(c.position);
                }

                uint32_t const offset = solver->n_attachments;
                for (uint32_t i = 0; i < solver->n_springs; ++i) {
                        struct PdConstraintSpring c = solver->springs[i];
                        Eigen::Vector3f const v = solver->positions.block<3, 1>(3*c.i[1], 0)
                                - solver->positions.block<3, 1>(3*c.i[0], 0);
                        d.block<3, 1>(3*(offset + i), 0) = v.normalized()*c.rest_length;
                }

                Eigen::VectorXf const b = mass_y + solver->t2*(solver->j_mat*d + ext_force);
                struct timespec local_end;
                clock_gettime(CLOCK_MONOTONIC, &local_end);

                /* GLOBAL STEP */
                struct timespec global_start;
                clock_gettime(CLOCK_MONOTONIC, &global_start);
                solve(solver, b);
                struct timespec global_end;
                clock_gettime(CLOCK_MONOTONIC, &global_end);

                solver->global_time = pd_time_diff_ms(&global_start, &global_end);
                solver->local_time = pd_time_diff_ms(&local_start, &local_end);

                solver->global_cma = (solver->global_time + solver->n_iters*solver->global_cma)/(solver->n_iters + 1);
                solver->local_cma = (solver->local_time + solver->n_iters*solver->local_cma)/(solver->n_iters + 1);

                if (!(solver->n_iters % 1000)) {
                        printf("Local CMA: %f ms\n", solver->local_cma);
                        printf("Global CMA: %f ms\n\n", solver->global_cma);
                }

                ++solver->n_iters;
        }
}
예제 #19
0
template<typename PointT, typename LeafT, typename BranchT, typename OctreeT> int
pcl::octree::OctreePointCloud<PointT, LeafT, BranchT, OctreeT>::getApproxIntersectedVoxelCentersBySegment (
    const Eigen::Vector3f& origin,
    const Eigen::Vector3f& end,
    AlignedPointTVector &voxel_center_list,
    float precision)
{
  Eigen::Vector3f direction = end - origin;
  float norm = direction.norm ();
  direction.normalize ();

  const float step_size = static_cast<const float> (resolution_) * precision;
  // Ensure we get at least one step for the first voxel.
  const int nsteps = std::max (1, static_cast<int> (norm / step_size));

  OctreeKey prev_key;

  bool bkeyDefined = false;

  // Walk along the line segment with small steps.
  for (int i = 0; i < nsteps; ++i)
  {
    Eigen::Vector3f p = origin + (direction * step_size * static_cast<const float> (i));

    PointT octree_p;
    octree_p.x = p.x ();
    octree_p.y = p.y ();
    octree_p.z = p.z ();

    OctreeKey key;
    this->genOctreeKeyforPoint (octree_p, key);

    // Not a new key, still the same voxel.
    if ((key == prev_key) && (bkeyDefined) )
      continue;

    prev_key = key;
    bkeyDefined = true;

    PointT center;
    genLeafNodeCenterFromOctreeKey (key, center);
    voxel_center_list.push_back (center);
  }

  OctreeKey end_key;
  PointT end_p;
  end_p.x = end.x ();
  end_p.y = end.y ();
  end_p.z = end.z ();
  this->genOctreeKeyforPoint (end_p, end_key);
  if (!(end_key == prev_key))
  {
    PointT center;
    genLeafNodeCenterFromOctreeKey (end_key, center);
    voxel_center_list.push_back (center);
  }

  return (static_cast<int> (voxel_center_list.size ()));
}
예제 #20
0
void Camera::setRotate3D(const Eigen::Vector3f& rot) {
    Eigen::AngleAxis<float> aaZ(rot.z(), Eigen::Vector3f::UnitZ());
    Eigen::AngleAxis<float> aaY(rot.y(), Eigen::Vector3f::UnitY());
    Eigen::AngleAxis<float> aaX(rot.x(), Eigen::Vector3f::UnitX());

    rotation_future_ = aaZ * aaY * aaX;
    emit modified();
}
예제 #21
0
/** Add a new point to the line string.
  */
void
MapLineString::addPoint(const Eigen::Vector3f& p)
{
    // TODO: Automatically split arcs that cross the 180 degree meridian

    m_points.push_back(p);
    setBounds(bounds().extend(Vector2f(p.x(), p.y())));
}
예제 #22
0
파일: RayTracer.cpp 프로젝트: cthomp18/GI
Eigen::Vector3f RayTracer::findReflect(Eigen::Vector3f ray, Eigen::Vector3f normal, SceneObject* obj) {
   Eigen::Vector3f reflectRay;
   
   reflectRay = ray + (2.0*normal*(normal.dot(-ray)));
   reflectRay.normalize();
   
   return reflectRay;
}
예제 #23
0
파일: Camera.cpp 프로젝트: Scoobadood/tsdf
/**
 * Convert the global coordinates into camera space
 * Multiply by pose.inverse()
 * @param world_coordinate The 3D point in world space
 * @return camera_coordinate The 3D pointin camera space
 */
Eigen::Vector3f Camera::world_to_camera( const Eigen::Vector3f & world_coordinate ) const {
    using namespace Eigen;

    Vector4f world_h { world_coordinate.x(), world_coordinate.y(), world_coordinate.z(), 1.0f};
    Vector4f cam_h = m_pose_inverse * world_h;

    return cam_h.block(0, 0, 3, 1) / cam_h[3];
}
예제 #24
0
파일: Camera.cpp 프로젝트: Scoobadood/tsdf
/**
 * Convert the camera coordinate into world space
 * Multiply by pose
 * @param camera_coordinate The 3D pointin camera space
 * @return world_coordinate The 3D point in world space
 */
Eigen::Vector3f Camera::camera_to_world( const Eigen::Vector3f & camera_coordinate ) const {
    using namespace Eigen;

    Vector4f cam_h { camera_coordinate.x(), camera_coordinate.y(), camera_coordinate.z(), 1.0f};
    Vector4f world_h = m_pose * cam_h;

    return world_h.block(0, 0, 3, 1) / world_h.w();
}
예제 #25
0
Eigen::Vector2f PhotoCamera::project(const Eigen::Vector3f &point)
{
    Eigen::MatrixXf P = intrinsicMatrix*extrinsicMatrix.matrix();
    Eigen::Vector4f pointH;
    pointH << point(0), point(1), point(2), 1;
    Eigen::Vector3f pixelH = P*pointH;
    return pixelH.hnormalized();
}
예제 #26
0
void stateCallback(const nav_msgs::Odometry::ConstPtr& state)
{
  
	
		vel[0] = state->twist.twist.linear.x;
		vel[1] = state->twist.twist.linear.y;
		vel[2] = state->twist.twist.linear.z;
		pos[2] = state->pose.pose.position.z;
    
		//q.x() = state->pose.pose.orientation.x;
		//q.y() = state->pose.pose.orientation.y;
		//q.z() = state->pose.pose.orientation.z;
		//q.w() = state->pose.pose.orientation.w; 
    float q_x = state->pose.pose.orientation.x;
		float q_y = state->pose.pose.orientation.y;
		float q_z = state->pose.pose.orientation.z;
		float q_w = state->pose.pose.orientation.w;

float  yaw = atan2(2*(q_w*q_z+q_x*q_y),1-2*(q_y*q_y+q_z*q_z));
//Eigen::Matrix3d R(q);
	force(0) = offset_x+k_vxy*(vel_des(0)-vel(0))+m*acc_des(0);
	if(pd_control==0)
  force(1) = offset_y+k_vxy*(vel_des(1)-vel(1))+m*acc_des(1);
  else if(pd_control==1)
  {
    pos[1]=state->pose.pose.position.y;
    force(1) = offset_y+k_vxy*(vel_des(1)-vel(1))+k_xy*(0-pos[1])+m*acc_des(1);
  }
	force(2) = (k_z*(pos_des(2)-pos(2))+k_vz*(vel_des(2)-vel(2))+m*g(2)+m*acc_des(2));


  b3 = force.normalized();
  b2 = b3.cross(b1w);
  b1 = b2.cross(b3);
  R_des<<b1[0],b2[0],b3[0],
    b1[1],b2[1],b3[1],
    b1[2],b2[2],b3[2];
  Eigen::Quaternionf q_des(R_des);
  	
  quadrotor_msgs::SO3Command command;
	command.force.x = force[0];
  command.force.y = force[1];
  command.force.z = force[2];
  command.orientation.x = q_des.x();
  command.orientation.y = q_des.y();
  command.orientation.z = q_des.z();
  command.orientation.w = q_des.w();
  command.kR[0] = k_R;
  command.kR[1] = k_R;
  command.kR[2] = k_R;
  command.kOm[0] = k_omg;
  command.kOm[1] = k_omg;
  command.kOm[2] = k_omg;
	command.aux.current_yaw = yaw;
	command.aux.enable_motors = true;
	command.aux.use_external_yaw = true;
  control_pub.publish(command);
}
예제 #27
0
파일: line_info.cpp 프로젝트: timn/fawkes
/** Update this line.
 * @param linfo new info to consume
 * This also updates moving averages for all fields.
 */
void TrackedLineInfo::update(LineInfo &linfo)
{
  if (visibility_history <= 0)
    visibility_history = 1;
  else
    visibility_history += 1;

  this->raw = linfo;
  fawkes::tf::Stamped<fawkes::tf::Point> bp_new(
	  fawkes::tf::Point(
	      linfo.base_point[0], linfo.base_point[1], linfo.base_point[2]
	  ), fawkes::Time(0,0), input_frame_id);
  try {
    transformer->transform_point(tracking_frame_id, bp_new, this->base_point_odom);
  } catch (fawkes::tf::TransformException &e) {
    logger->log_warn(plugin_name.c_str(), "Can't transform to %s. Attempting to track in %s.",
										 tracking_frame_id.c_str(), input_frame_id.c_str());
    this->base_point_odom = bp_new;
  }
  this->history.push_back(linfo);

  Eigen::Vector3f base_point_sum(0,0,0), end_point_1_sum(0,0,0),
	  end_point_2_sum(0,0,0), line_direction_sum(0,0,0), point_on_line_sum(0,0,0);
  float length_sum(0);
  for (LineInfo &l : this->history) {
	base_point_sum += l.base_point;
	end_point_1_sum += l.end_point_1;
	end_point_2_sum += l.end_point_2;
	line_direction_sum += l.line_direction;
	point_on_line_sum += l.point_on_line;
	length_sum += l.length;
  }

  size_t sz = this->history.size();
  this->smooth.base_point = base_point_sum / sz;
  this->smooth.cloud = linfo.cloud;
  this->smooth.end_point_1 = end_point_1_sum / sz;
  this->smooth.end_point_2 = end_point_2_sum / sz;
  this->smooth.length = length_sum / sz;
  this->smooth.line_direction = line_direction_sum / sz;
  this->smooth.point_on_line = point_on_line_sum / sz;

  Eigen::Vector3f x_axis(1,0,0);

  Eigen::Vector3f ld_unit = this->smooth.line_direction / this->smooth.line_direction.norm();
  Eigen::Vector3f pol_invert = Eigen::Vector3f(0,0,0) - this->smooth.point_on_line;
  Eigen::Vector3f P = this->smooth.point_on_line + pol_invert.dot(ld_unit) * ld_unit;
  this->smooth.bearing = std::acos(x_axis.dot(P) / P.norm());
  // we also want to encode the direction of the angle
  if (P[1] < 0)
    this->smooth.bearing = std::abs(this->smooth.bearing) * -1.;

  Eigen::Vector3f l_diff = raw.end_point_2 - raw.end_point_1;
  Eigen::Vector3f l_ctr = raw.end_point_1 + l_diff / 2.;
  this->bearing_center = std::acos(x_axis.dot(l_ctr) / l_ctr.norm());
  if (l_ctr[1] < 0)
    this->bearing_center = std::abs(this->bearing_center) * -1.;
}
예제 #28
0
void Task::alignPointcloudAsMLS(const base::Time& ts, const std::vector< base::Vector3d >& sample_pointcloud, const envire::TransformWithUncertainty& body2odometry)
{
    if(sample_pointcloud.size() == 0)
	return;
    boost::shared_ptr<envire::Environment> pointcloud_env;
    pointcloud_env.reset(new envire::Environment());
    envire::MLSProjection* pointcloud_projection = new envire::MLSProjection();
    pointcloud_projection->useNegativeInformation(false);
    pointcloud_projection->useUncertainty(true);
    double grid_size = 200.0;
    double cell_resolution = 0.1;
    double grid_count = grid_size / cell_resolution;
    envire::MultiLevelSurfaceGrid* pointcloud_grid = new envire::MultiLevelSurfaceGrid(grid_count, grid_count, cell_resolution, cell_resolution, -0.5 * grid_size, -0.5 * grid_size);
    pointcloud_grid->getConfig().updateModel = envire::MLSConfiguration::KALMAN;
    pointcloud_grid->getConfig().gapSize = 0.5;
    pointcloud_grid->getConfig().thickness = 0.05;
    pointcloud_env->attachItem(pointcloud_grid, pointcloud_env->getRootNode());
    pointcloud_env->addOutput(pointcloud_projection, pointcloud_grid);
    
    // create envire pointcloud
    envire::Pointcloud* pc = new envire::Pointcloud();
    pc->vertices.reserve(sample_pointcloud.size());
    for(unsigned i = 0; i < sample_pointcloud.size(); i++)
	pc->vertices.push_back(sample_pointcloud[i]);
    
    // update mls
    envire::MLSGrid* grid = pointcloud_projection->getOutput<envire::MLSGrid*>();
    if(!grid)
    {
	RTT::log(RTT::Error) << "Missing mls grid in pointcloud environment." << RTT::endlog();
	return;
    }
    grid->clear();
    pointcloud_env->attachItem(pc, pointcloud_env->getRootNode());
    pointcloud_env->addInput(pointcloud_projection, pc);
    pointcloud_projection->updateAll();
        
    // remove inputs
    pointcloud_env->removeInput(pointcloud_projection, pc);
    pointcloud_env->detachItem(pc, true);

    // create pointcloud from mls_grid
    PCLPointCloudPtr pcl_pointcloud(new PCLPointCloud());
    createPointcloudFromMLS(pcl_pointcloud, grid);
    
    // create debug pointcloud
    aligned_cloud.points.clear();
    aligned_cloud.colors.clear();
    aligned_cloud.points.reserve(pcl_pointcloud->size());
    for(unsigned i = 0; i < pcl_pointcloud->size(); i++)
    {
	Eigen::Vector3f point = pcl_pointcloud->at(i).getVector3fMap();
    	aligned_cloud.points.push_back(base::Vector3d(point.x(), point.y(), point.z()));
    }
    aligned_cloud.colors.resize(aligned_cloud.points.size(), base::Vector4d(1.0, 0.0, 0.0, 1.0));
    
    alignPointcloud(ts, pcl_pointcloud, body2odometry);
}
 Eigen::Vector3f FindObjectOnPlane::rayPlaneInteersect(
   const cv::Point3d& ray,
   const jsk_recognition_utils::Plane::Ptr& plane)
 {
   Eigen::Vector3f n = plane->getNormal();
   Eigen::Vector3f d(ray.x, ray.y, ray.z);
   double alpha = - plane->getD() / n.dot(d);
   return alpha * d;
 }
static void
write_calibration_parameter(
    const Eigen::Vector3f &in_vector,
    PSMoveProtocol::FloatVector *out_vector)
{
    out_vector->set_i(in_vector.x());
    out_vector->set_j(in_vector.y());
    out_vector->set_k(in_vector.z());
}