Example #1
0
	/* ************************************************************************* */
	Matrix StereoCamera::Dproject_to_stereo_camera1(const Point3& P) const {
		double d = 1.0 / P.z(), d2 = d*d;
		const Cal3_S2Stereo& K = *K_;
		double f_x = K.fx(), f_y = K.fy(), b = K.baseline();
		return Matrix_(3, 3,
				 f_x*d,   0.0, -d2*f_x* P.x(),
				 f_x*d,   0.0, -d2*f_x*(P.x() - b),
				   0.0, f_y*d, -d2*f_y* P.y()
		);
	}
void Camera::lookAt (const Point3 &eyePos, const Point3 &centerOfView, const Vector3 &_up)
{
	viewMatrix = Matrix4::Identity();
	Vector3 up = _up;

	Vector3 direction;
	direction.x() = centerOfView.x() - eyePos.x();
	direction.y() = centerOfView.y() - eyePos.y();
	direction.z() = centerOfView.z() - eyePos.z();
	direction.normalize();

	up.normalize();
	Vector3 side = direction.cross(up);
	Vector3 Utrue = side.cross (direction);

	viewMatrix (0, 0) = side.x();
	viewMatrix (0, 1) = side.y();
	viewMatrix (0, 2) = side.z();
	viewMatrix (1, 0) = Utrue.x();
	viewMatrix (1, 1) = Utrue.y();
	viewMatrix (1, 2) = Utrue.z();
	viewMatrix (2, 0) = -direction.x();
	viewMatrix (2, 1) = -direction.y();
	viewMatrix (2, 2) = -direction.z();

	viewMatrix (0, 3) = -eyePos.x();
	viewMatrix (1, 3) = -eyePos.y();
	viewMatrix (2, 3) = -eyePos.z();

	std::cout << viewMatrix << std::endl;
}
Example #3
0
  /* ************************************************************************* */
  StereoPoint2 StereoCamera::project(const Point3& point,
      boost::optional<Matrix&> H1, boost::optional<Matrix&> H2) const {

#ifdef STEREOCAMERA_CHAIN_RULE
    const Point3 q = leftCamPose_.transform_to(point, H1, H2);
#else
    // omit derivatives
    const Point3 q = leftCamPose_.transform_to(point);
#endif

    if ( q.z() <= 0 ) throw StereoCheiralityException();

    // get calibration
    const Cal3_S2Stereo& K = *K_;
    const double fx = K.fx(), fy = K.fy(), b = K.baseline();

    // calculate scaled but not translated image coordinates
    const double d = 1.0 / q.z();
    const double x = q.x(), y = q.y();
    const double dfx = d*fx, dfy = d*fy;
    const double uL = dfx*x;
    const double uR = dfx*(x - b);
    const double v  = dfy*y;

    // check if derivatives need to be computed
    if (H1 || H2) {
#ifdef STEREOCAMERA_CHAIN_RULE
      // just implement chain rule
      Matrix D_project_point = Dproject_to_stereo_camera1(q); // 3x3 Jacobian
      if (H1) *H1 = D_project_point*(*H1);
      if (H2) *H2 = D_project_point*(*H2);
#else
      // optimized version, see StereoCamera.nb
      if (H1) {
        const double v1 = v/fy, v2 = fx*v1, dx=d*x;
        *H1 = (Matrix(3, 6) <<
                uL*v1, -fx-dx*uL,     v2, -dfx,  0.0, d*uL,
                uR*v1, -fx-dx*uR,     v2, -dfx,  0.0, d*uR,
            fy + v*v1,    -dx*v , -x*dfy,  0.0, -dfy, d*v
          );
      }
      if (H2) {
        const Matrix R(leftCamPose_.rotation().matrix());
        *H2 = d * (Matrix(3, 3) <<
             fx*R(0, 0) - R(0, 2)*uL, fx*R(1, 0) - R(1, 2)*uL, fx*R(2, 0) - R(2, 2)*uL,
             fx*R(0, 0) - R(0, 2)*uR, fx*R(1, 0) - R(1, 2)*uR, fx*R(2, 0) - R(2, 2)*uR,
             fy*R(0, 1) - R(0, 2)*v , fy*R(1, 1) - R(1, 2)*v , fy*R(2, 1) - R(2, 2)*v
         );
      }
#endif
    }

    // finally translate
    return StereoPoint2(K.px() + uL, K.px() + uR, K.py() + v);
  }
Example #4
0
/* ************************************************************************* */
TEST( dataSet, gtsam2openGL)
{
  Vector3 rotVec(0.2, 0.7, 1.1);
  Rot3 R = Rot3::Expmap(rotVec);
  Point3 t = Point3(1.0,20.0,10.0);
  Pose3 actual = Pose3(R,t);
  Pose3 poseGTSAM = openGL2gtsam(R, t.x(), t.y(), t.z());

  Pose3 expected = gtsam2openGL(poseGTSAM);
  EXPECT(assert_equal(expected,actual));
}
Example #5
0
/* ************************************************************************* */
double PoseRTV::range(const PoseRTV& other,
    OptionalJacobian<1,9> H1, OptionalJacobian<1,9> H2) const {
  Matrix36 D_t1_pose, D_t2_other;
  const Point3 t1 = pose().translation(H1 ? &D_t1_pose : 0);
  const Point3 t2 = other.pose().translation(H2 ? &D_t2_other : 0);
  Matrix13 D_d_t1, D_d_t2;
  double d = t1.distance(t2, H1 ? &D_d_t1 : 0, H2 ? &D_d_t2 : 0);
  if (H1) *H1 << D_d_t1 * D_t1_pose, 0,0,0;
  if (H2) *H2 << D_d_t2 * D_t2_other, 0,0,0;
  return d;
}
Example #6
0
void CObjectRotateCamera::SetCenter(float x, float y, float z)
{
    SetLookat(x, y, z);
    m_center = Point3(x, y, z);
    Point3 eye = Point3(vEyePt.x, vEyePt.y, vEyePt.z);
    Point3 dir = eye - m_center;
    m_radius = dir.GetLength();
    Point3 angle = Dir2Angle(eye - m_center);
    m_anglex = angle.x;
    m_anglez = angle.y;
}
Example #7
0
bool IsScaled(Matrix3& tm)
{
	Matrix3 t(1);
	t = tm - t;
	Point3 x = t.GetRow(0), y = t.GetRow(1), z = t.GetRow(2), u = t.GetRow(3);
	float v = x.LengthSquared() + y.LengthSquared() + z.LengthSquared() + u.LengthSquared();
	if(v > gTolerenceEpsilon)
		return true;
	else
		return false;
}
            void HeightGrid::init() {
                if (m_heightImage != NULL) {
                    clog << "Generate terrain data: " << m_heightImage->getWidth() << ", " << m_heightImage->getHeight() << ", Format: " << m_heightImage->getFormat() << ", width step: " << m_heightImage->getWidthStep() << endl;

                    MyImage<BGRPixel> img(m_heightImage);

                    // TODO: Compute normals.

                    float scaleZ = m_max - m_min;
                    if (scaleZ < 0) {
                        scaleZ = 1.0f;
                    }
                    if ( (m_ground < 0) || (m_ground > 1) ) {
                        m_ground = 0.5;
                    }
                    clog << "Ground height: " << m_ground << ", scaling Z : " << scaleZ << ", translation for z-direction: " << (-1 * scaleZ * m_ground) << endl;

                    m_callList = glGenLists(1);
                    glNewList(m_callList, GL_COMPILE);
                    for (uint32_t y = 0; y < m_heightImage->getHeight() - 2; y++) {
                        glBegin(GL_TRIANGLE_STRIP);
                        for (uint32_t x = 0; x < m_heightImage->getWidth(); x++) {
                            glColor3f(static_cast<int>(img.getPixel(x, m_heightImage->getHeight() - 1 - y)->r) / 255.0f, static_cast<int>(img.getPixel(x, m_heightImage->getHeight() - 1 - y)->r) / 255.0f, static_cast<int>(img.getPixel(x, m_heightImage->getHeight() - 1 - y)->r) / 255.0f);

                            glVertex3f(static_cast<float>(x), static_cast<float>(y), (static_cast<int>(img.getPixel(x, m_heightImage->getHeight() - 1 - y)->r) / 255.0f - m_ground) * scaleZ);
                            glVertex3f(static_cast<float>(x), static_cast<float>(y + 1), (static_cast<int>(img.getPixel(x, m_heightImage->getHeight() - 1 - (y + 1))->r) / 255.0f - m_ground) * scaleZ);
                        }
                        glEnd();
                    }
                    glEndList();

                    // Compute translation.
                    Point3 translate;
                    translate.setX(-1 * m_originPixelXY.getX() * m_scalingPixelXY.getX());
                    translate.setY(-1 * (m_heightImage->getHeight() - m_originPixelXY.getY()) * m_scalingPixelXY.getY());

                    Point3 scale(m_scalingPixelXY);
                    // m_scalingPixelXY sets z to 0, thus elevation is disable. Therefore, scale z to 1.0, i.e. keep computed elevation.
                    scale.setZ(1.0);

                    // Set up the actual renderer.
                    m_heightImageRenderer = new HeightGridRenderer(getNodeDescriptor(), m_callList);

                    // Set up transform group.
                    m_heightImageNode = new TransformGroup();
                    m_heightImageNode->addChild(m_heightImageRenderer);

                    // Translate the height image.
                    m_heightImageNode->setTranslation(Point3(translate.getX(), translate.getY(), 0));

                    // Scale the height image.
                    m_heightImageNode->setScaling(scale);
                }
            }
Example #9
0
/// Creates a new coordinate matrix with the coordinates from
/// \p conformer.
Coordinates::Coordinates(const Conformer *conformer)
    : m_matrix(conformer->molecule()->size(), 3)
{
    int size = conformer->molecule()->size();

    for(int i = 0; i < size; i++){
        Point3 position = conformer->position(conformer->molecule()->atom(i));
        m_matrix.setValue(i, 0, position.x());
        m_matrix.setValue(i, 1, position.y());
        m_matrix.setValue(i, 2, position.z());
    }
}
Example #10
0
/// Creates a new coordinate matrix with the coordinates from
/// \p atoms.
Coordinates::Coordinates(const std::vector<Atom *> &atoms)
    : m_matrix(atoms.size(), 3)
{
    unsigned int size = atoms.size();

    for(unsigned int i = 0; i < size; i++){
        Point3 position = atoms[i]->position();
        m_matrix.setValue(i, 0, position.x());
        m_matrix.setValue(i, 1, position.y());
        m_matrix.setValue(i, 2, position.z());
    }
}
Example #11
0
/// Creates a new coordinate matrix with the coordinates from
/// \p molecule.
Coordinates::Coordinates(const Molecule *molecule)
    : m_matrix(molecule->size(), 3)
{
    int size = molecule->size();

    for(int i = 0; i < size; i++){
        Point3 position = molecule->atom(i)->position();
        m_matrix.setValue(i, 0, position.x());
        m_matrix.setValue(i, 1, position.y());
        m_matrix.setValue(i, 2, position.z());
    }
}
Example #12
0
void GCodeExport::writeMoveBFB(int x, int y, int z, double speed, double extrusion_mm3_per_mm)
{
    double extrusion_per_mm = mm3ToE(extrusion_mm3_per_mm);
    
    Point gcode_pos = getGcodePos(x,y, current_extruder);
    
    //For Bits From Bytes machines, we need to handle this completely differently. As they do not use E values but RPM values.
    float fspeed = speed * 60;
    float rpm = extrusion_per_mm * speed * 60;
    const float mm_per_rpm = 4.0; //All BFB machines have 4mm per RPM extrusion.
    rpm /= mm_per_rpm;
    if (rpm > 0)
    {
        if (extruder_attr[current_extruder].retraction_e_amount_current)
        {
            if (currentSpeed != double(rpm))
            {
                //fprintf(f, "; %f e-per-mm %d mm-width %d mm/s\n", extrusion_per_mm, lineWidth, speed);
                //fprintf(f, "M108 S%0.1f\r\n", rpm);
                *output_stream << "M108 S" << std::setprecision(1) << rpm << new_line;
                currentSpeed = double(rpm);
            }
            //Add M101 or M201 to enable the proper extruder.
            *output_stream << "M" << int((current_extruder + 1) * 100 + 1) << new_line;
            extruder_attr[current_extruder].retraction_e_amount_current = 0.0;
        }
        //Fix the speed by the actual RPM we are asking, because of rounding errors we cannot get all RPM values, but we have a lot more resolution in the feedrate value.
        // (Trick copied from KISSlicer, thanks Jonathan)
        fspeed *= (rpm / (roundf(rpm * 100) / 100));

        //Increase the extrusion amount to calculate the amount of filament used.
        Point3 diff = Point3(x,y,z) - getPosition();
        
        current_e_value += extrusion_per_mm * diff.vSizeMM();
    }
    else
    {
        //If we are not extruding, check if we still need to disable the extruder. This causes a retraction due to auto-retraction.
        if (!extruder_attr[current_extruder].retraction_e_amount_current)
        {
            *output_stream << "M103" << new_line;
            extruder_attr[current_extruder].retraction_e_amount_current = 1.0; // 1.0 used as stub; BFB doesn't use the actual retraction amount; it performs retraction on the firmware automatically
        }
    }
    *output_stream << std::setprecision(3) << 
        "G1 X" << INT2MM(gcode_pos.X) << 
        " Y" << INT2MM(gcode_pos.Y) << 
        " Z" << INT2MM(z) << std::setprecision(1) << " F" << fspeed << new_line;
    
    currentPosition = Point3(x, y, z);
    estimateCalculator.plan(TimeEstimateCalculator::Position(INT2MM(currentPosition.x), INT2MM(currentPosition.y), INT2MM(currentPosition.z), eToMm(current_e_value)), speed);
}
	/**
	* @param includeMargin Indicate whether algorithm operates on objects with margin
	*/
	template<class T> std::unique_ptr<GJKResult<T>> GJKAlgorithm<T>::processGJK(const CollisionConvexObject3D &convexObject1,
			const CollisionConvexObject3D &convexObject2, bool includeMargin) const
	{
		//get point which belongs to the outline of the shape (Minkowski difference)
		Vector3<T> initialDirection = Vector3<T>(1.0, 0.0, 0.0);
		Point3<T> initialSupportPointA = convexObject1.getSupportPoint(initialDirection.template cast<float>(), includeMargin).template cast<T>();
		Point3<T> initialSupportPointB = convexObject2.getSupportPoint((-initialDirection).template cast<float>(), includeMargin).template cast<T>();
		Point3<T> initialPoint = initialSupportPointA - initialSupportPointB;

		Vector3<T> direction = (-initialPoint).toVector();

		Simplex<T> simplex;
		simplex.addPoint(initialSupportPointA, initialSupportPointB);

		T minimumToleranceMultiplicator = (T)1.0;

		for(unsigned int iterationNumber=0; iterationNumber<maxIteration; ++iterationNumber)
		{
			Point3<T> supportPointA = convexObject1.getSupportPoint(direction.template cast<float>(), includeMargin).template cast<T>();
			Point3<T> supportPointB = convexObject2.getSupportPoint((-direction).template cast<float>(), includeMargin).template cast<T>();
			Point3<T> newPoint = supportPointA - supportPointB;

			const Vector3<T> &vClosestPoint = -direction; //vector from origin to closest point of simplex
			T closestPointSquareDistance = vClosestPoint.dotProduct(vClosestPoint);
			T closestPointDotNewPoint = vClosestPoint.dotProduct(newPoint.toVector());

			//check termination conditions: new point is not more extreme that existing ones OR new point already exist in simplex
			T distanceTolerance = std::max(minimumTerminationTolerance*minimumToleranceMultiplicator, relativeTerminationTolerance*closestPointSquareDistance);
			if((closestPointSquareDistance-closestPointDotNewPoint) <= distanceTolerance || simplex.isPointInSimplex(newPoint))
			{
				if(closestPointDotNewPoint <= 0.0)
				{ //collision detected
					return std::make_unique<GJKResultCollide<T>>(simplex);
				}else
				{
					return std::make_unique<GJKResultNoCollide<T>>(std::sqrt(closestPointSquareDistance), simplex);
				}
			}

			simplex.addPoint(supportPointA, supportPointB);

			direction = (-simplex.getClosestPointToOrigin()).toVector();

			minimumToleranceMultiplicator += percentageIncreaseOfMinimumTolerance;
		}

		#ifdef _DEBUG
			logMaximumIterationReach();
		#endif

		return std::make_unique<GJKResultInvalid<T>>();
	}
Example #14
0
        /*
         * compute the angle formed by v0-v1-v2
         */
        static inline T angle(const Point3<T>& v0, 
                const Point3<T>& v1, const Point3<T>& v2)
        {
            T a2 = v0.distanceSqr(v1);
            T a  = sqrt(a2);
            T b2 = v2.distanceSqr(v1);
            T b  = sqrt(b2);

            T s = 2. * a * b;
            if ( fabs(s) < 1E-12 ) 
                fprintf(stderr, "ERROR: zero-length edge encountered\n");
            return acos((a2 + b2 - v0.distanceSqr(v2)) / s);
        }
Example #15
0
        Point3 Transformation::transformInversely(const Point3 &coordinate, const Position &position) const {
            Point3 cc = coordinate;

            // Rotate the coordinate.
            cc.rotateX(-1 * position.getRotation().getX());
            cc.rotateY(-1 * position.getRotation().getY());
            cc.rotateZ(-1 * position.getRotation().getZ());

            // Translate the coordinate.
            cc -= position.getPosition();

            return cc;
        }
Example #16
0
bool lexic(Point3 P1, Point3 P2)
{ 
    return  P1.getX() <  P2.getX() ||
            P1.getX() == P2.getX() && P1.getY() <  P2.getY() ||
            P1.getX() == P2.getX() && P1.getY() == P2.getY() &&
            P1.getZ() <  P2.getZ();
}
void MatchWindow::updatePoseText ()
{
	Point3 worldPos = glcanvas->pose()->getWorldPosition();
	double
		heading = (glcanvas->pose()->getHeading()) * 180.0/M_PI,
		elevation = glcanvas->pose()->getElevation() * 180.0/M_PI,
		bank = glcanvas->pose()->getBank() * 180.0/M_PI;
	wposx->setText (worldPos.x());
	wposy->setText (worldPos.y());
	wposz->setText (worldPos.z());
	wyaw->setText (heading);
	wpitch->setText (elevation);
	wroll->setText (bank);
}
Example #18
0
/* ************************************************************************* */
Point3 Pose3::transform_from(const Point3& p, OptionalJacobian<3,6> Dpose,
    OptionalJacobian<3,3> Dpoint) const {
  // Only get matrix once, to avoid multiple allocations,
  // as well as multiple conversions in the Quaternion case
  const Matrix3 R = R_.matrix();
  if (Dpose) {
    Dpose->leftCols<3>() = R * skewSymmetric(-p.x(), -p.y(), -p.z());
    Dpose->rightCols<3>() = R;
  }
  if (Dpoint) {
    *Dpoint = R;
  }
  return Point3(R * p.vector()) + t_;
}
Example #19
0
Point3 getRefractionVector(Point3 view, Point3 normal, float n1, float n2)
{
	Point3 Vnormal = normal;
	float cosThetaOne = view.Dot(normal) / view.Length();
	float sinThetaTwo = (n1 / n2) * sqrt(1 - pow(cosThetaOne, 2));
	float cosThetaTwo = sqrt(1 - pow(sinThetaTwo, 2));
	Point3 Vt = (view - (view.Dot(normal)*normal)).GetNormalized();

	/******************Total Internal Reflection **************************/
	//if (sinThetaTwo > 1 || sinThetaTwo < -1) //Checking total interanl reflection
	//	return(getReflectionVector(-view, normal)); //If happened returning a reflection vector
	/*************************End Total Internal Reflection***************/
	return ((cosThetaTwo * -1 * Vnormal) + (sinThetaTwo * -1 * Vt));
}
Example #20
0
inline Angle direction( const Point3 & p1, const Point3 & p2 )
{

        qreal hypotenuse = distance( p1, p2 );

        if ( qFuzzyCompare( hypotenuse, 0 ) )
            return Angle();

        Angle angle( asin( ( p2.x() - p1.x() ) / hypotenuse ) );

        return ( ( p2.y() - p1.y() ) < 0 )
            ? Angle::normalize( M_PI - angle.radian() )
            : angle;
}
Example #21
0
/* ************************************************************************* */
double Pose3::range(const Point3& point, OptionalJacobian<1, 6> H1,
                    OptionalJacobian<1, 3> H2) const {
  Matrix36 D_local_pose;
  Matrix3 D_local_point;
  Point3 local = transform_to(point, H1 ? &D_local_pose : 0, H2 ? &D_local_point : 0);
  if (!H1 && !H2) {
    return local.norm();
  } else {
    Matrix13 D_r_local;
    const double r = local.norm(D_r_local);
    if (H1) *H1 = D_r_local * D_local_pose;
    if (H2) *H2 = D_r_local * D_local_point;
    return r;
  }
}
// Calculate bounding sphere using average position of the points.  Better fit but slower.
void CalcCenteredSphere(Mesh& mesh, Point3& center, float& radius)
{
	int nv = mesh.getNumVerts();
	Point3 sum(0.0f, 0.0f, 0.0f);
	for (int i=0; i<nv; ++i)
		sum += mesh.getVert(i);
	center = sum / float(nv);
	float radsq = 0.0f;
	for (int i=0; i<nv; ++i){
		Point3 diff = mesh.getVert(i) - center;
		float mag = diff.LengthSquared();
		radsq = max(radsq, mag);
	}
	radius = Sqrt(radsq);
}
Example #23
0
Point3 plDistributor::IPerpAxis(const Point3& p) const
{
    const float kMinLengthSquared = 1.e-1f;

    int minAx = p.MinComponent();
    Point3 ax(0,0,0);
    ax[minAx] = 1.f;

    Point3 perp = p ^ ax;
    if( perp.LengthSquared() < kMinLengthSquared )
    {
        // hmm, think we might be screwed, but this shouldn't happen.
    }

    return perp = perp.FNormalize();
}
Example #24
0
/**
*  @brief
*    Returns the position, rotation and scale of the scene node at a given time
*/
void PLSceneNode::GetPosRotScale(Point3 &vPos, Quat &qRot, Point3 &vScale, TimeValue nTime)
{
	if (m_pIGameNode) {
		// Get the position, rotation and scale - relative to the parent node
		GMatrix mParentMatrix;
		IGameNode *pIGameNodeParent = m_pIGameNode->GetNodeParent();
		if (pIGameNodeParent)
			mParentMatrix = pIGameNodeParent->GetWorldTM(nTime);
		PLTools::GetPosRotScale(m_pIGameNode->GetWorldTM(nTime)*PLTools::Inverse(mParentMatrix), vPos, qRot, vScale, IsRotationFlipped());

		// Get the scale (NOT done for special nodes!)
		if (m_nType != TypeContainer && m_nType != TypeScene && m_nType != TypeCell &&
			m_nType != TypeCamera && m_nType != TypeLight) {
			// [TODO] Do we still need this hint?
			// Check for none uniform scale
//			if (m_vScale.x != m_vScale.y || m_vScale.x != m_vScale.z || m_vScale.y != m_vScale.z) {
				// We have to use '%e' because else we may get output like '(1 1 1) is no uniform scale'
				// g_pLog->LogFLine(PLLog::Hint, "Node '%s' has a none uniform scale. (%e %e %e) This 'may' cause problems in special situations...", m_sName.c_str(), m_vScale.x, m_vScale.y, m_vScale.z);
//			}
		} else {
			// Set scale to 1
			vScale.Set(1.0f, 1.0f, 1.0f);
		}
	}
}
        void DistanceToObjectsReport::report(const odcore::wrapper::Time &t) {
            cerr << "Call to DistanceToObjectsReport for t = " << t.getSeconds() << "." << t.getPartialMicroseconds() << ", containing " << getFIFO().getSize() << " containers." << endl;

            // Get last EgoState.
            KeyValueDataStore &kvds = getKeyValueDataStore();
            Container c = kvds.get(opendlv::data::environment::EgoState::ID());
            EgoState es = c.getData<EgoState>();

            const uint32_t SIZE = getFIFO().getSize();
            for (uint32_t i = 0; i < SIZE; i++) {
                c = getFIFO().leave();
                cerr << "Received: " << c.toString() << endl;

                if (c.getDataType() == opendlv::data::environment::Obstacle::ID()) {
                    Obstacle o = c.getData<Obstacle>();

                    const float DISTANCE = (es.getPosition().getDistanceTo(o.getPosition()));
                    cerr << "DistanceToObjectsReport: Distance to object: " << DISTANCE << ", E: " << es.toString() << ", o: " << o.getPosition().toString() << endl;

                    // Continuously check distance.
                    m_correctDistance &= (DISTANCE > m_threshold);

                    vector<Point3> shape = o.getPolygon().getVertices();
                    Point3 head = shape.front();
                    shape.push_back(head);
                    const uint32_t NUMVERTICES = shape.size();
                    for(uint32_t j = 1; j < NUMVERTICES; j++) {
                        Point3 pA = shape.at(j-1);
                        Point3 pB = shape.at(j);

                        // TODO: Check polygonal data as well as perpendicular to all sides.
                        // Create line.
                        Line l(pA, pB);

                        // Compute perpendicular point.
                        Point3 perpendicularPoint = l.getPerpendicularPoint(es.getPosition());

                        // Compute distance between current position and perpendicular point.
                        const float DISTANCE_PP = (es.getPosition().getDistanceTo(perpendicularPoint));

                        cerr << "DistanceToObjectsReport: Distance to object's shape: " << DISTANCE_PP << ", E: " << es.toString() << ", o: " << o.getPosition().toString() << ", perpendicular point:" << perpendicularPoint.toString() << endl;

                        // Continuously check distance.
                        m_correctDistance &= (DISTANCE > m_threshold);
                    }
                }

                if (c.getDataType() == opendlv::data::environment::OtherVehicleState::ID()) {
                    OtherVehicleState o = c.getData<OtherVehicleState>();

                    const float DISTANCE = (es.getPosition().getDistanceTo(o.getPosition()));

                    // Compute distance between current position and perpendicular point.
                    cerr << "DistanceToObjectsReport: Distance to other vehicle: " << DISTANCE << ", E: " << es.toString() << ", o: " << o.getPosition().toString() << endl;

                    // Continuously check distance.
                    m_correctDistance &= (DISTANCE > m_threshold);
                }
            }
        }
Example #26
0
void getOrthoNormalBasisVector(Point3 i_up, Point3 &o_out_vector /*U*/, Point3& o_vector_right /*v*/)
{
    Point3 randomVectorW;
    //bool foundRandomVector = false;
    while (true)
    {
        randomVectorW = getRandomVector();
        if ( fabs(i_up.Dot(randomVectorW)) < RANDOMCOSINEANGLE)
        {
            o_out_vector = i_up.Cross(randomVectorW);
            o_vector_right = i_up.Cross(o_out_vector).GetNormalized();
            o_out_vector.Normalize();
            break;
        }
    }
}
Example #27
0
void printBox3D(Box3D box)
{
    Point3 min = box.min();
    Point3 max = box.max();
    printf("<%f,%f,%f> to <%f,%f,%f>\n",
           min.x(), min.y(), min.z(),
           max.x(), max.y(), max.z());
    fflush(stdout);
}
Example #28
0
  static inline bool point_is_spike_or_equal(Point1 const& last_point, Point2 const& segment_a, Point3 const& segment_b)
  {
    // adapted from boost\geometry\algorithms\detail\point_is_spike_or_equal.hpp to include tolerance checking

    // segment_a is at the beginning
    // segment_b is in the middle
    // last_point is at the end

    // segment_b is being considered for deletion

    double normTol = 0.001; // 1 mm
    double tol = 0.001; // relative to 1
      
    double diff1_x = last_point.x()-segment_b.x();
    double diff1_y = last_point.y()-segment_b.y();
    double norm1 = sqrt(pow(diff1_x, 2) + pow(diff1_y, 2)); 
    if (norm1 > normTol){
      diff1_x = diff1_x/norm1;
      diff1_y = diff1_y/norm1;
    }else{
      // last point is too close to segment b
      return true;
    }

    double diff2_x = segment_b.x()-segment_a.x();
    double diff2_y = segment_b.y()-segment_a.y();
    double norm2 = sqrt(pow(diff2_x, 2) + pow(diff2_y, 2));
    if (norm2 > normTol){
      diff2_x = diff2_x/norm2;
      diff2_y = diff2_y/norm2;
    }else{
      // segment b is too close to segment a
      return true;
    }

    double crossProduct = diff1_x*diff2_y-diff1_y*diff2_x;
    if (abs(crossProduct) < tol){
      double dotProduct = diff1_x*diff2_x+diff1_y*diff2_y;
      if (dotProduct <= -1.0 + tol){
        // reversal
        return true;
      }
    }

    return false;
  }
Example #29
0
/* ************************************************************************* */
Point3 Pose3::transform_to(const Point3& p, OptionalJacobian<3,6> Dpose,
    OptionalJacobian<3,3> Dpoint) const {
  // Only get transpose once, to avoid multiple allocations,
  // as well as multiple conversions in the Quaternion case
  const Matrix3 Rt = R_.transpose();
  const Point3 q(Rt*(p - t_).vector());
  if (Dpose) {
    const double wx = q.x(), wy = q.y(), wz = q.z();
    (*Dpose) <<
        0.0, -wz, +wy,-1.0, 0.0, 0.0,
        +wz, 0.0, -wx, 0.0,-1.0, 0.0,
        -wy, +wx, 0.0, 0.0, 0.0,-1.0;
  }
  if (Dpoint) {
    *Dpoint = Rt;
  }
  return q;
}
Example #30
0
float movingDiskFixedLineSegmentCollisionTime(Point2 center, float radius, Vector2 velocity, const LineSegment2D& segment, Point2& collisionLocation) {
    // Ridiculously implemented in terms of existing 3D functionality 

    Point3 X;
    const float t = CollisionDetection::collisionTimeForMovingSphereFixedTriangle
        (Sphere(Point3(center, 0.0f), radius),
         Vector3(velocity, 0.0f),
         Triangle(Point3(segment.point(0), +1.0f),
                  Point3(segment.point(0), -1.0f),
                  Point3(segment.point(1),  0.0f)),
                  X);

    if (t < finf()) {
        collisionLocation = X.xy();
    }

    return t;
}