Eigen::Vector2d CircularGroundPath::RelativePositionCalculator::localCoordinates( Eigen::Vector3d const& _center, Eigen::Vector2d global_coordinates )
{
  Eigen::Vector2d local;
  local.x() = global_coordinates.x() - _center.x();
  local.y() = global_coordinates.y() - _center.y();
  return local;
}
Ejemplo n.º 2
0
    /**
     * Computes the undistorted image coordinates of an input pixel via
     * bilinear interpolation of its integer-coordinate 4-neighboors.
     *
     * \param dist_uv input distorted pixel coordinates.
     * \param rect_uv output parameter.
     */
    void rectifyBilinearLookup(const Eigen::Vector2d& dist_uv,
                               Eigen::Vector2d* rect_uv) const {
      int u = (int)dist_uv.x();
      int v = (int)dist_uv.y();

      assert(u >= 0 && v >= 0 && u < _input_camera.width && v < _input_camera.height);

      // weights
      float wright  = (dist_uv.x() - u);
      float wbottom = (dist_uv.y() - v);
      float w[4] = {
        (1 - wright) * (1 - wbottom),
        wright * (1 - wbottom),
        (1 - wright) * wbottom,
        wright * wbottom
      };

      int ra_index = v * _input_camera.width + u;
      uint32_t neighbor_indices[4] = {
        ra_index,
        ra_index + 1,
        ra_index + _input_camera.width,
        ra_index + _input_camera.width + 1
      };

      rect_uv->x() = 0;
      rect_uv->y() = 0;
      for(int i = 0; i < 4; ++i) {
        rect_uv->x() += w[i] * _map_x[neighbor_indices[i]];
        rect_uv->y() += w[i] * _map_y[neighbor_indices[i]];
      }
    }
Ejemplo n.º 3
0
void drawEllipse(cv::Mat & im, const Eigen::Matrix2d & A, const Eigen::Vector2d & b, const double sigma, const cv::Scalar col, const int thickness, const bool bMarkCentre)
{
    if(bMarkCentre)
    {
        const cv::Point centre(cvRound(b.x()), cvRound(b.y()));
        cv::circle(im, centre, 2, col, -1);
    }
    
    const int nNumPoints = 100;
    
    const Eigen::Matrix2d rootA = matrixSqrt(A);
    
    cv::Point p_last;
    for(int i=0;i<nNumPoints;i++)
    {
        const double dTheta = i*(2*M_PI/nNumPoints);
        const Eigen::Vector2d p1 (sin(dTheta), cos(dTheta));
        
        const Eigen::Vector2d p_sigma = p1 * sigma;
        
        const Eigen::Vector2d p_trans = rootA*p_sigma + b;
        const cv::Point p_im(cvRound(p_trans.x()), cvRound(p_trans.y()));
        
        if(i>0)
            cv::line(im, p_last, p_im, col, thickness);
        
        p_last = p_im;
    }
}
/**
 * @brief The 'regression2D' method can be used to fit a line to a given point set.
 * @param points_begin      set begin iterator
 * @param points_end        set end iterator
 * @param fit_start         the start of the line fit
 * @param fit_end           the set termintating iterator position
 * @param line              the parameterized line to work with
*/
inline void regression2D(const std::vector<LaserBeam>::const_iterator &points_begin, const std::vector<LaserBeam>::const_iterator &points_end,
                         Eigen::ParametrizedLine<double, 2> &line)
{
    std::vector<LaserBeam>::const_iterator back_it = points_end;
    --back_it;
    Eigen::Vector2d front (points_begin->posX(), points_end->posY());
    Eigen::Vector2d back (back_it->posX(), back_it->posY());

    unsigned int size = std::distance(points_begin, points_end);
    Eigen::MatrixXd A(size, 2);
    Eigen::VectorXd b(size);
    A.setOnes();

    Eigen::Vector2d dxy = (front - back).cwiseAbs();
    bool solve_for_x = dxy.x() > dxy.y();
    if(solve_for_x) {
        /// SOLVE FOR X
        int i = 0;
        for(std::vector<LaserBeam>::const_iterator it = points_begin ; it != points_end ; ++it, ++i)
        {
            A(i,1) = it->posX();
            b(i)   = it->posY();
        }
    } else {
        /// SOLVE FOR Y
        int i = 0;
        for(std::vector<LaserBeam>::const_iterator it = points_begin ; it != points_end ; ++it, ++i)
        {
            A(i,1) = it->posY();
            b(i)   = it->posX();
        }
    }

    Eigen::VectorXd weights = A.jacobiSvd(Eigen::ComputeThinU | Eigen::ComputeThinV).solve(b);
    double          alpha   = weights(0);
    double          beta    = weights(1);
    Eigen::Vector2d from;
    Eigen::Vector2d to;

    if(solve_for_x) {
        from(0) = 0.0;
        from(1) = alpha;
        to(0)   = 1.0;
        to(1)   = alpha + beta;
    } else {
        from(0) = alpha;
        from(1) = 0.0;
        to(0)   = alpha + beta;
        to(1)   = 1.0;
    }

    Eigen::Vector2d fit_start;
    Eigen::Vector2d fit_end;
    line = Eigen::ParametrizedLine<double, 2>(from, (to - from).normalized());
    fit_start = line.projection(front);
    fit_end   = line.projection(back);
    line = Eigen::ParametrizedLine<double, 2>(fit_start, (fit_end - fit_start));
}
Ejemplo n.º 5
0
void ElevationVisualization::visualize(
    const grid_map::GridMap& map,
    const std::string& typeNameElevation,
    const std::string& typeNameColor,
    const float lowerValueBound,
    const float upperValueBound)
{
  // Set marker info.
  marker_.header.frame_id = map.getFrameId();
  marker_.header.stamp.fromNSec(map.getTimestamp());
  marker_.scale.x = map.getResolution();
  marker_.scale.y = map.getResolution();

  // Clear points.
  marker_.points.clear();
  marker_.colors.clear();

  float markerHeightOffset = static_cast<float>(markerHeight_/2.0);

  const Eigen::Array2i buffSize = map.getBufferSize();
  const Eigen::Array2i buffStartIndex = map.getBufferStartIndex();
  const bool haveColor = map.isType("color");
  for (unsigned int i = 0; i < buffSize(0); ++i)
  {
    for (unsigned int j = 0; j < buffSize(1); ++j)
    {
      // Getting map data.
      const Eigen::Array2i cellIndex(i, j);
      const Eigen::Array2i buffIndex =
          grid_map_lib::getBufferIndexFromIndex(cellIndex, buffSize, buffStartIndex);
      const float& elevation = map.at(typeNameElevation, buffIndex);
      if(std::isnan(elevation))
        continue;
      const float color = haveColor ? map.at(typeNameColor, buffIndex) : lowerValueBound;

      // Add marker point.
      Eigen::Vector2d position;
      map.getPosition(buffIndex, position);
      geometry_msgs::Point point;
      point.x = position.x();
      point.y = position.y();
      point.z = elevation - markerHeightOffset;
      marker_.points.push_back(point);

      // Add marker color.
      if(haveColor)
      {
        std_msgs::ColorRGBA markerColor =
            grid_map_visualization::color_utils::interpolateBetweenColors(
              color, lowerValueBound, upperValueBound, lowerColor_, upperColor_);
        marker_.colors.push_back(markerColor);
      }
    }
  }

  markerPublisher_.publish(marker_);
}
Ejemplo n.º 6
0
Edge * IEdgesExtractor::segmentalize(double m, double q, const Eigen::Vector2d & mi, const Eigen::Vector2d & ma) const{

	Eigen::Vector2d min;
	//Position min;
	Eigen::Vector2d max;
	//Position max;

	if(m < 0){
// 		min.setX(mi.x());
// 		min.setY(ma.y());
// 		max.setX(ma.x());
// 		max.setY(mi.y());
			min[0] = mi.x();
			min[1] = ma.y();
			max[0] = ma.x();
			max[1] = mi.y();
			
	}else{
// 		min.setX(mi.x());
// 		min.setY(mi.y());
// 		max.setX(ma.x());
// 		max.setY(ma.y());
			min[0] = mi.x();
			min[1] = mi.y();
			max[0] = ma.x();
			max[1] = ma.y();
	}
	
	float mm = m*m;
	float mq = m*q;

	float x1 = (min.x() + m*min.y() - mq) / (1+mm);
	float y1 = (min.x()*m + mm*min.y() + q) / (1+mm);
	float x2 = (max.x() + m*max.y() - mq) / (1+mm);
	float y2 = (max.x()*m + mm*max.y() + q) / (1+mm);

	Vertex* v1 = new Vertex(-1, x1, y1);
	Vertex* v2 = new Vertex(-1, x2, y2);
	return new Edge(0, v1, v2);

}
void displayopencv::DisplayBspline(const mathtools::application::Bspline<2> &bspline, cv::Mat &img, const mathtools::affine::Frame<2>::Ptr frame, const cv::Scalar &color, const unsigned int &nb_pts)
{
	double inf = bspline.getInfBound(),
		   sup = bspline.getSupBound();
	
	Eigen::Vector2d prev = mathtools::affine::Point<2>(bspline(inf)).getCoords(frame);

	for(unsigned int i=1;i<nb_pts;i++)
	{
		double t = inf + (double)i*(sup-inf)/(double)(nb_pts-1);

		Eigen::Vector2d curr = mathtools::affine::Point<2>(bspline(t)).getCoords(frame);

		cv::Point pt1(prev.x(), prev.y()),
				  pt2(curr.x(), curr.y());
		
		prev=curr;

		cv::line(img,pt1,pt2,color);
	}
}
Ejemplo n.º 8
0
void drawCurve(const SCurve* in_curve, const SVar& in_Vars)
{
	Eigen::Vector2d center = Eigen::Vector2d::Zero();

	glColor3f(0.2, 0.7, 0.2);
	::glLineWidth(4.0);
	::glBegin(GL_LINE_STRIP);

	for(int i=0; i<in_curve->nVertices; i++)
		glVertex2d(in_Vars.pos.col(i).x() - center.x(), in_Vars.pos.col(i).y() - center.y());
	if(in_curve->closed)
		glVertex2d(in_Vars.pos.col(0).x() - center.x(), in_Vars.pos.col(0).y() - center.y());
	::glEnd();

	// Inserted points
	::glPointSize(5.0); //3.0
	::glBegin(GL_POINTS);
	for(int i=0; i<in_curve->nVertices; i++)
	{
		if(in_curve->vertexIDs(i) < 0)
		{
			glColor3f(0.2, 0.7, 0.2);
			glVertex2d(in_Vars.pos.col(i).x() - center.x(), in_Vars.pos.col(i).y() - center.y());
		}	
	}
	::glEnd();

	// The input feature points
	::glPointSize(10.0); //5.0
	::glBegin(GL_POINTS);
	for(int i=0; i<in_curve->nVertices; i++)
	{
		if(in_curve->vertexIDs(i) >= 0)
		{
			glColor3f(0.7, 0.2, 0.2);	
			glVertex2d(in_Vars.pos.col(i).x() - center.x(), in_Vars.pos.col(i).y() - center.y());
		}
	}
	::glEnd();
}
Ejemplo n.º 9
0
void VertexCell::drawRawTopology(Time time, ViewSettings & viewSettings)
{
    bool screenRelative = viewSettings.screenRelative();
    if(screenRelative)
    {
        int n = 50;
        Eigen::Vector2d p = pos(time);
        glBegin(GL_POLYGON);
        {
            double r = 0.5 * viewSettings.vertexTopologySize() / viewSettings.zoom();
            //if(r == 0) r = 3;
            //else if (r<1) r = 1;
            for(int i=0; i<n; ++i)
            {
                double theta = 2 * (double) i * 3.14159 / (double) n ;
                glVertex2d(p.x() + r*std::cos(theta),p.y()+ r*std::sin(theta));
            }
        }
        glEnd();
    }
    else
    {
        int n = 50;
        Eigen::Vector2d p = pos(time);
        glBegin(GL_POLYGON);
        {
            double r = 0.5 * viewSettings.vertexTopologySize();
            if(r == 0) r = 3;
            else if (r<1) r = 1;
            for(int i=0; i<n; ++i)
            {
                double theta = 2 * (double) i * 3.14159 / (double) n ;
                glVertex2d(p.x() + r*std::cos(theta),p.y()+ r*std::sin(theta));
            }
        }
        glEnd();
    }
}
Ejemplo n.º 10
0
void mesh_core::LineSegment2D::initialize(
      const Eigen::Vector2d& a,
      const Eigen::Vector2d& b)
{
  pt_[0] = a;
  pt_[1] = b;
  Eigen::Vector2d delta = b - a;

  if (std::abs(delta.x()) <= std::numeric_limits<double>::epsilon())
  {
    vertical_ = true;
    if (std::abs(delta.y()) <= std::numeric_limits<double>::epsilon())
      inv_dx_ = 0.0;
    else
      inv_dx_ = 1.0 / delta.y();
  }
  else
  {
    vertical_ = false;
    inv_dx_ = 1.0 / delta.x();
    slope_ = delta.y() * inv_dx_;
    y_intercept_ = a.y() - slope_ * a.x();
  }
}
/**
 * @brief Retrieve the cartesian point coordinates of a range reading.
 * @param reading           the range reading
 * @param angular_res       the angular resolution of the range reading
 * @param min_angle         the minimum angle of the range reading
 * @param points            the point coordiantes in cartesian space
 */
inline void polarToCartesian(const std::vector<float> &reading, const float angular_res, const float min_angle,
                             const float min_rho, const float max_rho, std::vector<LaserBeam> &points)
{
    points.clear();
    double angle = min_angle;
    for(std::vector<float>::const_iterator it = reading.begin() ; it != reading.end() ; ++it, angle += angular_res) {
        float rho = *it;
        if(rho < max_rho && rho > min_rho && rho > 0.0) {
            Eigen::Vector2d pt;
            pt.x() = std::cos(angle) * rho;
            pt.y() = std::sin(angle) * rho;
            points.push_back(pt);
        }

    }
}
Ejemplo n.º 12
0
void Camera::pan(Eigen::Vector2i oldPosition, Eigen::Vector2i newPosition){

  const Eigen::Vector2d delta = 
	(newPosition - oldPosition).eval().template cast<double>();
  
  const Eigen::Vector3d forwardVector = lookAt - position;
  const Eigen::Vector3d rightVector = forwardVector.cross(up);
  const Eigen::Vector3d upVector = rightVector.cross(forwardVector);
  
  const double scale = 0.0005;
  
  position += scale*(-delta.x()*rightVector +
	  delta.y()*upVector);
  

}
Ejemplo n.º 13
0
void VertexCell::drawPickCustom(Time time, ViewSettings & /*viewSettings*/)
{
    if(!exists(time))
        return;

    int n = 50;
    Eigen::Vector2d p = pos(time);
    glBegin(GL_POLYGON);
    {
        double r = 0.5 * size(time);
        for(int i=0; i<n; ++i)
        {
            double theta = 2 * (double) i * 3.14159 / (double) n ;
            glVertex2d(p.x() + r*std::cos(theta),p.y()+ r*std::sin(theta));
        }
    }
    glEnd();
}
void DepthObstacleGrid::initializeStatics(NodeMap *map){
  
  Environment env = map->getEnvironment();
  
  for(std::vector<Plane>::iterator it = env.planes.begin(); it != env.planes.end(); it++){
    
    double plane_length = Eigen::Vector2d( it->span_horizontal.x(), it->span_horizontal.y() ).norm();
    Eigen::Vector2d step = Eigen::Vector2d( it->span_horizontal.x(), it->span_horizontal.y() ) / (plane_length / (0.5 * resolution) );
    double step_length = step.norm();
        
    Eigen::Vector2d lastCell(NAN, NAN);
    Eigen::Vector2d pos = Eigen::Vector2d(it->position.x(), it->position.y());
    
    //iterate through the cells
    for(double i = 0.0; i < plane_length; i += step_length){
      
      Eigen::Vector2d cell_coord = getGridCoord( pos.x(), pos.y() );
      
      //step was not wide enough, we are still in the same cell
      if(cell_coord != lastCell){       
      
        lastCell = cell_coord;
        
        Eigen::Vector2i ID = getCellID(cell_coord.x(), cell_coord.y());
        
        //Step was invalid, we are outside the grid
        if(ID.x() == -1){
          pos += step;
          continue;
        }
        
        //Set the cell static
        GridElement &elem = get(ID.x(), ID.y());      
        elem.static_obstacle = true;
                
      }
      
      pos += step;
      
    }
    
  }
  
}
Ejemplo n.º 15
0
const Eigen::Vector2d CMiniVisionToolbox::getPointDistortedPlumbBob( const Eigen::Vector2d& p_vecPointUndistorted, const Eigen::Vector2d& p_vecPointPrincipal, const Eigen::Vector4d& p_vecDistortionCoefficients )
{
    const int32_t iUndistortedX( std::abs( p_vecPointUndistorted.x( ) ) );
    const int32_t iUndistortedY( std::abs( p_vecPointUndistorted.y( ) ) );

    //ds current ranges
    const int32_t iXStart( iUndistortedX-40 );
    const int32_t iXStop( iUndistortedX+40 );
    const int32_t iYStart( iUndistortedY-40 );
    const int32_t iYStop( iUndistortedY+40 );

    double dErrorLowest( 10.0 );
    Eigen::Vector2d vecPointDistorted( 0, 0 );

    //ds loop around the current point
    for( int32_t i = iXStart; i < iXStop; ++i )
    {
        for( int32_t j = iYStart; j < iYStop; ++j )
        {
            /*ds get current error
            const Eigen::Vector2d vecError( p_vecPointUndistorted-CMiniVisionToolbox::getPointUndistortedPlumbBob( Eigen::Vector2i( iUndistortedX, iUndistortedY ), p_vecPointPrincipal, p_vecDistortionCoefficients ) );

            //ds error
            const double dErrorCurrent( std::fabs( vecError(0) ) + std::fabs( vecError(1) ) );

            //ds if the error is lower
            if( dErrorLowest > dErrorCurrent )
            {
                vecPointDistorted = Eigen::Vector2d( iUndistortedX, iUndistortedY );
            }*/
        }
    }

    std::printf( "error: %f\n", dErrorLowest );

    return vecPointDistorted;
}
Ejemplo n.º 16
0
 void analytical_derivative(const Camera& camera_, const Point3d& point_, const Eigen::Vector2d& obs, Eigen::Matrix<double,2,12>& j)
 {
   const T camera[3] = {T(camera_[0],0),T(camera_[1],1),T(camera_[2],2)};
   const T point[3] = {T(point_[0],9),T(point_[1],10),T(point_[2],11)};
   T p[3];
   ceres::AngleAxisRotatePoint(camera, point, p);
   p[0] += T(camera_[3],3);
   p[1] += T(camera_[4],4);
   p[2] += T(camera_[5],5);
   auto xp = - p[0] / p[2];
   auto yp = - p[1] / p[2];
   const T l1(camera_[7],7);
   const T l2(camera_[8],8);
   auto r2 = xp*xp + yp*yp;
   auto distortion = T(1.0) + r2  * (l1 + l2  * r2);
   T focal(camera_[6],6);
   auto jacobx = focal * distortion * xp - T(obs.x());
   auto jacoby = focal * distortion * yp - T(obs.y());
   for(int i = 0 ; i < 12 ; ++i)
   {
     j(0,i) = jacobx.v[i];
     j(1,i) = jacoby.v[i];
   }
 }
  void generateVisualizationMarker(double steering_angle_rad, visualization_msgs::MarkerArray& marker_array)
  {
    double turn_radius = 0.0;

    if (std::abs(steering_angle_rad) > 0.000001){
      // use cotangent
      //turn_radius = std::tan((M_PI*0.5 - steering_angle_rad) * p_wheel_base_);
      turn_radius = (1.0/std::tan(steering_angle_rad)) * p_wheel_base_;
    }

    // (0,0) is rear axle middle
    // x axis towards front, y axis to left of car

    Eigen::Vector2d icc (0.0, turn_radius);

    Eigen::Vector2d left_wheel (p_wheel_base_, p_wheel_track_*0.5);
    Eigen::Vector2d right_wheel(p_wheel_base_, -p_wheel_track_*0.5);

    double dist_left  = (left_wheel  - icc).norm();
    double dist_right = (right_wheel - icc).norm();

    double steer_angle_left  = std::asin(p_wheel_base_/dist_left);
    double steer_angle_right = std::asin(p_wheel_base_/dist_right);

    if (turn_radius < 0){
      steer_angle_left  = -steer_angle_left;
      steer_angle_right = -steer_angle_right;
    }

    ROS_DEBUG("turn radius: %f dist left: %f right: %f", turn_radius, dist_left, dist_right);
    ROS_DEBUG("steer angle left: %f right: %f", steer_angle_left, steer_angle_right);

    marker_array.markers[0].points.resize(40);
    marker_array.markers[1].points.resize(40);


    std::vector<geometry_msgs::Point>& point_vector_left = marker_array.markers[0].points;
    std::vector<geometry_msgs::Point>& point_vector_right = marker_array.markers[1].points;

    marker_array.markers[1].color.r = 0.0;
    marker_array.markers[1].color.b = 1.0;
    marker_array.markers[1].id = 1;

    Eigen::Affine3d rot_left (Eigen::AngleAxisd(M_PI*0.5, Eigen::Vector3d::UnitX())*
                              Eigen::AngleAxisd(steer_angle_left, Eigen::Vector3d::UnitY()));

    tf::quaternionEigenToMsg(Eigen::Quaterniond(rot_left.rotation()), marker_array_.markers[LEFT_FRONT_WHEEL].pose.orientation);

    Eigen::Affine3d rot_right (Eigen::AngleAxisd(M_PI*0.5, Eigen::Vector3d::UnitX())*
                               Eigen::AngleAxisd(steer_angle_right, Eigen::Vector3d::UnitY()));

    tf::quaternionEigenToMsg(Eigen::Quaterniond(rot_right.rotation()), marker_array_.markers[RIGHT_FRONT_WHEEL].pose.orientation);


    //marker_array_.markers[LEFT_FRONT_WHEEL].pose.orientation; // = marker;
    //marker_array_.markers[RIGHT_FRONT_WHEEL].pose.orientation; // = marker;


    Eigen::Vector3d rotation_vector( Eigen::Vector3d::UnitZ() );

    if (turn_radius > 0.0){
      rotation_vector = -rotation_vector;
    }

    //std::cout << "rotation_vector:\n" << rotation_vector << "\n";

    for (size_t i = 0; i < 40; ++i)
    {
      Eigen::Affine3d o_t_i (Eigen::Affine3d::Identity());
      o_t_i.translation() = Eigen::Vector3d(icc.x(), -icc.y(), 0.0);

      //Eigen::Rotation2Dd rotation(steer_angle_left + static_cast<double>(i) * 0.05);



      Eigen::Affine3d rotation_left (Eigen::AngleAxisd(static_cast<double>(i) * 0.05,
                                     rotation_vector));

      //Eigen::Affine3d rotation_left (Eigen::AngleAxisd( static_cast<double>(i) * 0.05,
//                                     (turn_radius > 0.0) ? -(Eigen::Vector3d::UnitZ()) : Eigen::Vector3d::UnitZ() ));

      //Eigen::Vector2d tmp(o_t_i * rotation * left_wheel);
      //Eigen::Vector2d tmp(o_t_i * rotation * left_wheel).translation();
      Eigen::Vector3d tmp(o_t_i * rotation_left *Eigen::Vector3d(p_wheel_base_, (turn_radius)-p_wheel_track_*0.5, 0.0));

      point_vector_left[i].x = tmp.x();
      point_vector_left[i].y = -tmp.y();

      Eigen::Affine3d rotation_right (Eigen::AngleAxisd(static_cast<double>(i) * 0.05,
                                      rotation_vector));

      //Eigen::Affine3d rotation_right (Eigen::AngleAxisd( static_cast<double>(i) * 0.05,
      //                                 (turn_radius > 0.0) ? -(Eigen::Vector3d::UnitZ()) : Eigen::Vector3d::UnitZ() ));

      tmp = o_t_i * rotation_right*Eigen::Vector3d(p_wheel_base_, (turn_radius)+p_wheel_track_*0.5, 0.0);

      point_vector_right[i].x = tmp.x();
      point_vector_right[i].y = -tmp.y();
    }
  }
Ejemplo n.º 18
0
bool BaseVH::isInsideToEdge( Eigen::Vector2d point1 , Eigen::Vector2d point2 , Eigen::Vector2d point )
{
  double val = ( point.y() - point1.y() ) * ( point2.x() - point1.x() ) - ( point2.y() - point1.y() ) * ( point.x() - point1.x() );

  return ( val < 0 );
}
Ejemplo n.º 19
0
void SurfaceVisualization::visualize(
    const grid_map::GridMap& map,
    const std::string& typeNameSurface,
    const std::string& typeNameColor,
    const float lowerValueBound,
    const float upperValueBound)
{
  // Set marker info.
  marker_.header.frame_id = map.getFrameId();
  marker_.header.stamp.fromNSec(map.getTimestamp());
  marker_.scale.x = map.getResolution();
  marker_.scale.y = map.getResolution();

  // Clear points.
  marker_.points.clear();
  marker_.colors.clear();

  float markerHeightOffset = static_cast<float>(markerHeight_/2.0);

  const Eigen::Array2i buffSize = map.getBufferSize();
  const Eigen::Array2i buffStartIndex = map.getBufferStartIndex();
  const bool haveColor = map.isType("class");
  for (unsigned int i = 0; i < buffSize(0); ++i)
  {
    for (unsigned int j = 0; j < buffSize(1); ++j)
    {
      // Getting map data.
      const Eigen::Array2i cellIndex(i, j);
      const Eigen::Array2i buffIndex =
          grid_map_lib::getBufferIndexFromIndex(cellIndex, buffSize, buffStartIndex);
      const float& elevation = map.at("mu", buffIndex);
      const float& class_pred = map.at("class", buffIndex);
      int class_number=0;

      if(!std::isnan(class_pred))
      {
        class_number=(int)class_pred;
      }
      if(std::isnan(elevation))
        {

          continue;
        }


      //const float color = haveColor ? 16711680/class_pred : lowerValueBound;
      //ROS_INFO_STREAM("class_pred: "<<class_pred <<" int: "<<(int)class_pred);

      std_msgs::ColorRGBA surface_color;
      surface_color.a = 1.0;
      switch (class_number) {
        case 1:
          //long_grass
          surface_color.r = 0.0;
          surface_color.g = 1.0;
          surface_color.b = 0.0;
          break;
        case 3:
          //cobblestone
          surface_color.r = 0.62;
          surface_color.g = 0.32;
          surface_color.b = 0.17;
          break;
        case 4:
          //gravel
          surface_color.r = 0.2;
          surface_color.g = 0.2;
          surface_color.b = 0.2;
          break;
        default:
          surface_color.r = 1.0;
          surface_color.g = 1.0;
          surface_color.b = 1.0;          
          break;
        }





      // Add marker point.
      Eigen::Vector2d position;
      map.getPosition(buffIndex, position);
      geometry_msgs::Point point;
      point.x = position.x();
      point.y = position.y();
      point.z = elevation - markerHeightOffset;
      marker_.points.push_back(point);

      // Add marker color.
      if(haveColor)
      {
//        std_msgs::ColorRGBA markerColor =
//            grid_map_visualization::color_utils::interpolateBetweenColors(
//              color, lowerValueBound, upperValueBound, lowerColor_, upperColor_);
        //marker_.colors.push_back(markerColor);
        marker_.colors.push_back(surface_color);
      }
    }
  }

  markerPublisher_.publish(marker_);
}
Ejemplo n.º 20
0
void glsl_program::set_uniform(unsigned loc, const Eigen::Vector2d& value) const
{
    if (used_program != this)
	throw runtime_error("glsl_program::set_uniform, program not bound!");
    glUniform2f(loc, value.x(), value.y());
}
Ejemplo n.º 21
0
// Compute activation level function
level_t Dribble::computeActivationLevel()
{
	// Get ball location (don't activate if we can't see it)
	bI = SM->ballPosition.read();
	if(!bI)
		return false; // TODO: Does this have bad effect with not resetting locked_on? (timeout to reset m_locked_on?)

	// Retrieve dribble target
	bool isGoal = true;
	Eigen::Vector2d target = L->dribbleTarget(isGoal);

	// Get goal target angles
	const double goalTargetAngle = atan2(target.y(), target.x());
	const double goalTargetAngleRange = L->maxGoalPostAngle() - L->minGoalPostAngle();
	const double goalTargetAngleLBnd = L->minGoalPostAngle() + 0.1 * goalTargetAngleRange;
	const double goalTargetAngleUBnd = L->maxGoalPostAngle() - 0.1 * goalTargetAngleRange;
	
	// Lock-on constants
	const double ballTolY = 0.05;
	const double minBallX = 0.05;
	const double maxBallX = 0.65;
	const double maxBallYExtra = 0.05;
	const double maxBallYWeak = L->targetBallOffsetY + ballTolY + maxBallYExtra;
	const double targetTolYNonGoal = 1.8;
	const double targetTolAngle = 0.7;

	// Check conditions
	bool xCondition =     (bI->point.x >= minBallX) && (bI->point.x <= maxBallX);
	bool xConditionWeak = (bI->point.x >= minBallX) && (bI->point.x <= L->maxBallXWeak);
	bool yCondition = (fabs(fabs(bI->point.y) - L->targetBallOffsetY) <= ballTolY);
	bool yConditionWeak = (fabs(bI->point.y) <= maxBallYWeak);
	bool targetXCondition = (target.x() > bI->point.x);
	bool lineUpGoalCondition = goalTargetAngleLBnd < 0 && goalTargetAngleUBnd > 0;
	bool lineUpNonGoalCondition = (fabs(target.y()) <= 0.5 * targetTolYNonGoal) && (fabs(goalTargetAngle) <= 0.5 * targetTolAngle);
	bool lineUpCondition = (isGoal && lineUpGoalCondition) || (!isGoal && lineUpNonGoalCondition);
	bool timeCondition = lastToggle.hasElapsed(0.3);

	// TODO: DEBUG - Print our boolean conditions
// 	ROS_INFO_STREAM_THROTTLE(0.1, "X: " << xCondition << " | Y: " << yCondition << " | targetX: " << targetXCondition << " | lineUpNonGoal: " << lineUpNonGoalCondition << " | lineUpGoal: " << lineUpGoalCondition << " | time: " << timeCondition << " |");


	// Lock on if in narrow area plus other checks
	if(!m_locked_on && xCondition && yCondition && targetXCondition && lineUpCondition && timeCondition)
	{
// 		ROS_INFO_STREAM("DRIBBLE: X: " << xCondition << " | Y: " << yCondition << " | targetX: " << targetXCondition << " | lineUpNonGoal: " << lineUpNonGoalCondition << " | lineUpGoal: " << lineUpGoalCondition << " | time: " << timeCondition << " |");

// 		ROS_INFO_STREAM("Dribble lock toggled ON!" << endl << endl << endl << endl << endl);
		lastToggle.setMarker();
		m_locked_on = true;
		return true;
	}

	// Lock off if not in wider area (plus other checks) anymore
	if(m_locked_on && !(xConditionWeak && yConditionWeak && targetXCondition && lineUpCondition) && timeCondition)
	{
// 		ROS_INFO_STREAM("Dribble lock toggled OFF!" << endl << endl << endl << endl << endl);
		lastToggle.setMarker();
		m_locked_on = false;
		return false;
	}

	// Return current behaviour lock state if no toggle is necessary
	return m_locked_on;
}
    bool BeaconBasedPoseEstimator::m_kalmanAutocalibEstimator(LedGroup &leds,
                                                              double dt) {
        auto const beaconsSize = m_beacons.size();
        // Default measurement variance (for now, factor) per axis.
        double varianceFactor = 1;

        auto maxBoxRatio = m_params.boundingBoxFilterRatio;
        auto minBoxRatio = 1.f / m_params.boundingBoxFilterRatio;

        auto inBoundsID = std::size_t{0};
        // Default to using all the measurements we can
        auto skipBright = false;
        {
            auto totalLeds = leds.size();
            auto identified = std::size_t{0};
            auto inBoundsBright = std::size_t{0};
            auto inBoundsRound = std::size_t{0};
            for (auto const &led : leds) {
                if (!led.identified()) {
                    continue;
                }
                identified++;
                auto id = led.getID();
                if (id >= beaconsSize) {
                    continue;
                }
                inBoundsID++;
                if (led.isBright()) {
                    inBoundsBright++;
                }

                if (led.getMeasurement().knowBoundingBox) {
                    auto boundingBoxRatio =
                        led.getMeasurement().boundingBox.height /
                        led.getMeasurement().boundingBox.width;
                    if (boundingBoxRatio > minBoxRatio &&
                        boundingBoxRatio < maxBoxRatio) {
                        inBoundsRound++;
                    }
                }
            }

/// If we only see a few beacons, they may be as likely to send us spinning as
/// help us keep tracking.
#if 0
            // Now we decide if we want to cut the variance artificially to
            // reduce latency in low-beacon situations
            if (inBoundsID < LOW_BEACON_CUTOFF) {
                varianceFactor = 0.5;
            }
#endif

            if (inBoundsID - inBoundsBright >
                    DIM_BEACON_CUTOFF_TO_SKIP_BRIGHTS &&
                m_params.shouldSkipBrightLeds) {
                skipBright = true;
            }
            if (0 == inBoundsID) {
                m_framesWithoutIdentifiedBlobs++;
            } else {
                m_framesWithoutIdentifiedBlobs = 0;
            }
        }

        CameraModel cam;
        cam.focalLength = m_camParams.focalLength();
        cam.principalPoint = cvToVector(m_camParams.principalPoint());
        ImagePointMeasurement meas{cam};

        kalman::ConstantProcess<kalman::PureVectorState<>> beaconProcess;

        const auto maxSquaredResidual =
            m_params.maxResidual * m_params.maxResidual;
        const auto maxZComponent = m_params.maxZComponent;
        kalman::predict(m_state, m_model, dt);

        /// @todo should we be recalculating this for each beacon after each
        /// correction step? The order we filter them in is rather arbitrary...
        Eigen::Matrix3d rotate =
            Eigen::Matrix3d(m_state.getCombinedQuaternion());
        auto numBad = std::size_t{0};
        auto numGood = std::size_t{0};
        for (auto &led : leds) {
            if (!led.identified()) {
                continue;
            }
            auto id = led.getID();
            if (id >= beaconsSize) {
                continue;
            }

            auto &debug = m_beaconDebugData[id];
            debug.seen = true;
            debug.measurement = led.getLocation();
            if (skipBright && led.isBright()) {
                continue;
            }

            // Angle of emission checking
            // If we transform the body-local emission vector, an LED pointed
            // right at the camera will be -Z. Anything with a 0 or positive z
            // component is clearly out, and realistically, anything with a z
            // component over -0.5 is probably fairly oblique. We don't want to
            // use such beacons since they can easily introduce substantial
            // error.
            double zComponent =
                (rotate * cvToVector(m_beaconEmissionDirection[id])).z();
            if (zComponent > 0.) {
                if (m_params.extraVerbose) {
                    std::cout << "Rejecting an LED at " << led.getLocation()
                              << " claiming ID " << led.getOneBasedID()
                              << std::endl;
                }
                /// This means the LED is pointed away from us - so we shouldn't
                /// be able to see it.
                led.markMisidentified();

                /// @todo This could be a mis-identification, or it could mean
                /// we're in a totally messed up state. Do we count this against
                /// ourselves?
                numBad++;
                continue;
            } else if (zComponent > maxZComponent) {
                /// LED is too askew of the camera to provide reliable data, so
                /// skip it.
                continue;
            }

#if 0
            if (led.getMeasurement().knowBoundingBox) {
                /// @todo For right now, if we don't have a bounding box, we're
                /// assuming it's square enough (and only testing for
                /// non-squareness on those who actually do have bounding
                /// boxes). This is very much a temporary situation.
                auto boundingBoxRatio =
                    led.getMeasurement().boundingBox.height /
                    led.getMeasurement().boundingBox.width;
                if (boundingBoxRatio < minBoxRatio ||
                    boundingBoxRatio > maxBoxRatio) {
                    /// skip non-circular blobs.
                    numBad++;
                    continue;
                }
            }
#endif

            auto localVarianceFactor = varianceFactor;
            auto newIdentificationVariancePenalty =
                std::pow(2.0, led.novelty());

            /// Stick a little bit of process model uncertainty in the beacon,
            /// if it's meant to have some
            if (m_beaconFixed[id]) {
                beaconProcess.setNoiseAutocorrelation(0);
            } else {
                beaconProcess.setNoiseAutocorrelation(
                    m_params.beaconProcessNoise);
                kalman::predict(*(m_beacons[id]), beaconProcess, dt);
            }

            meas.setMeasurement(
                Eigen::Vector2d(led.getLocation().x, led.getLocation().y));
            led.markAsUsed();
            auto state = kalman::makeAugmentedState(m_state, *(m_beacons[id]));
            meas.updateFromState(state);
            Eigen::Vector2d residual = meas.getResidual(state);
            if (residual.squaredNorm() > maxSquaredResidual) {
                // probably bad
                numBad++;
                localVarianceFactor *= m_params.highResidualVariancePenalty;
            } else {
                numGood++;
            }
            debug.residual.x = residual.x();
            debug.residual.y = residual.y();
            auto effectiveVariance =
                localVarianceFactor * m_params.measurementVarianceScaleFactor *
                newIdentificationVariancePenalty *
                (led.isBright() ? BRIGHT_PENALTY : 1.) *
                m_beaconMeasurementVariance[id] / led.getMeasurement().area;
            debug.variance = effectiveVariance;
            meas.setVariance(effectiveVariance);

            /// Now, do the correction.
            auto model =
                kalman::makeAugmentedProcessModel(m_model, beaconProcess);
            kalman::correct(state, model, meas);
            m_gotMeasurement = true;
        }

        /// Probation: Dealing with ratios of bad to good residuals
        bool incrementProbation = false;
        if (0 == m_framesInProbation) {
            // Let's try to keep a 3:2 ratio of good to bad when not "in
            // probation"
            incrementProbation = (numBad * 3 > numGood * 2);

        } else {
            // Already in trouble, add a bit of hysteresis and raising the bar
            // so we don't hop out easily.
            incrementProbation = numBad * 2 > numGood;
            if (!incrementProbation) {
                // OK, we're good again
                m_framesInProbation = 0;
            }
        }
        if (incrementProbation) {
            m_framesInProbation++;
        }

        /// Frames without measurements: dealing with getting in a bad state
        if (m_gotMeasurement) {
            m_framesWithoutUtilizedMeasurements = 0;
        } else {
            if (inBoundsID > 0) {
                /// We had a measurement, we rejected it. The problem may be the
                /// plank in our own eye, not the speck in our beacon's eye.
                m_framesWithoutUtilizedMeasurements++;
            }
        }

        /// Output to the OpenCV state types so we can see the reprojection
        /// debug view.
        m_rvec = eiQuatToRotVec(m_state.getQuaternion());
        cv::eigen2cv(m_state.position().eval(), m_tvec);
        return true;
    }
Ejemplo n.º 23
0
	double y () const { return translation.y(); }
Ejemplo n.º 24
0
  void EdgeSE2::linearizeOplus()
  {
    const VertexSE2* vi = static_cast<const VertexSE2*>(_vertices[0]);
    const VertexSE2* vj = static_cast<const VertexSE2*>(_vertices[1]);
    double thetai = vi->estimate().rotation().angle();

    Eigen::Vector2d dt = vj->estimate().translation() - vi->estimate().translation();
    double si=sin(thetai), ci=cos(thetai);

    _jacobianOplusXi(0, 0) = -ci; _jacobianOplusXi(0, 1) = -si; _jacobianOplusXi(0, 2) = -si*dt.x()+ci*dt.y();
    _jacobianOplusXi(1, 0) =  si; _jacobianOplusXi(1, 1) = -ci; _jacobianOplusXi(1, 2) = -ci*dt.x()-si*dt.y();
    _jacobianOplusXi(2, 0) =  0;  _jacobianOplusXi(2, 1) = 0;   _jacobianOplusXi(2, 2) = -1;

    _jacobianOplusXj(0, 0) = ci; _jacobianOplusXj(0, 1)= si; _jacobianOplusXj(0, 2)= 0;
    _jacobianOplusXj(1, 0) =-si; _jacobianOplusXj(1, 1)= ci; _jacobianOplusXj(1, 2)= 0;
    _jacobianOplusXj(2, 0) = 0;  _jacobianOplusXj(2, 1)= 0;  _jacobianOplusXj(2, 2)= 1;

    const SE2& rmean = _inverseMeasurement;
    Eigen::Matrix3d z = Eigen::Matrix3d::Zero();
    z.block<2, 2>(0, 0) = rmean.rotation().toRotationMatrix();
    z(2, 2) = 1.;
    _jacobianOplusXi = z * _jacobianOplusXi;
    _jacobianOplusXj = z * _jacobianOplusXj;
  }
Ejemplo n.º 25
0
bool ElevationMap::fuse(const grid_map::Index& topLeftIndex, const grid_map::Index& size)
{
  ROS_DEBUG("Fusing elevation map...");

  // Nothing to do.
  if ((size == 0).any()) return false;

  // Initializations.
  const ros::WallTime methodStartTime(ros::WallTime::now());
  boost::recursive_mutex::scoped_lock scopedLock(fusedMapMutex_);

  // Copy raw elevation map data for safe multi-threading.
  boost::recursive_mutex::scoped_lock scopedLockForRawData(rawMapMutex_);
  auto rawMapCopy = rawMap_;
  scopedLockForRawData.unlock();

  // More initializations.
  const double halfResolution = fusedMap_.getResolution() / 2.0;
  const float minimalWeight = std::numeric_limits<float>::epsilon() * (float) 2.0;
  // Conservative cell inclusion for ellipse iterator.
  const double ellipseExtension = M_SQRT2 * fusedMap_.getResolution();

  // Check if there is the need to reset out-dated data.
  if (fusedMap_.getTimestamp() != rawMapCopy.getTimestamp()) resetFusedData();

  // Align fused map with raw map.
  if (rawMapCopy.getPosition() != fusedMap_.getPosition()) fusedMap_.move(rawMapCopy.getPosition());

  // For each cell in requested area.
  for (SubmapIterator areaIterator(rawMapCopy, topLeftIndex, size); !areaIterator.isPastEnd(); ++areaIterator) {

    // Check if fusion for this cell has already been done earlier.
    if (fusedMap_.isValid(*areaIterator)) continue;

    if (!rawMapCopy.isValid(*areaIterator)) {
      // This is an empty cell (hole in the map).
      // TODO.
      continue;
    }

    // Get size of error ellipse.
    const float& sigmaXsquare = rawMapCopy.at("horizontal_variance_x", *areaIterator);
    const float& sigmaYsquare = rawMapCopy.at("horizontal_variance_y", *areaIterator);
    const float& sigmaXYsquare = rawMapCopy.at("horizontal_variance_xy", *areaIterator);

    Eigen::Matrix2d covarianceMatrix;
    covarianceMatrix << sigmaXsquare, sigmaXYsquare, sigmaXYsquare, sigmaYsquare;
    // 95.45% confidence ellipse which is 2.486-sigma for 2 dof problem.
    // http://www.reid.ai/2012/09/chi-squared-distribution-table-with.html
    const double uncertaintyFactor = 2.486; // sqrt(6.18)
    Eigen::EigenSolver<Eigen::Matrix2d> solver(covarianceMatrix);
    Eigen::Array2d eigenvalues(solver.eigenvalues().real().cwiseAbs());

    Eigen::Array2d::Index maxEigenvalueIndex;
    eigenvalues.maxCoeff(&maxEigenvalueIndex);
    Eigen::Array2d::Index minEigenvalueIndex;
    maxEigenvalueIndex == Eigen::Array2d::Index(0) ? minEigenvalueIndex = 1 : minEigenvalueIndex = 0;
    const Length ellipseLength =  2.0 * uncertaintyFactor * Length(eigenvalues(maxEigenvalueIndex), eigenvalues(minEigenvalueIndex)).sqrt() + ellipseExtension;
    const double ellipseRotation(atan2(solver.eigenvectors().col(maxEigenvalueIndex).real()(1), solver.eigenvectors().col(maxEigenvalueIndex).real()(0)));

    // Requested length and position (center) of submap in map.
    Position requestedSubmapPosition;
    rawMapCopy.getPosition(*areaIterator, requestedSubmapPosition);
    EllipseIterator ellipseIterator(rawMapCopy, requestedSubmapPosition, ellipseLength, ellipseRotation);

    // Prepare data fusion.
    Eigen::ArrayXf means, weights;
    const unsigned int maxNumberOfCellsToFuse = ellipseIterator.getSubmapSize().prod();
    means.resize(maxNumberOfCellsToFuse);
    weights.resize(maxNumberOfCellsToFuse);
    WeightedEmpiricalCumulativeDistributionFunction<float> lowerBoundDistribution;
    WeightedEmpiricalCumulativeDistributionFunction<float> upperBoundDistribution;

    float maxStandardDeviation = sqrt(eigenvalues(maxEigenvalueIndex));
    float minStandardDeviation = sqrt(eigenvalues(minEigenvalueIndex));
    Eigen::Rotation2Dd rotationMatrix(ellipseRotation);
    std::string maxEigenvalueLayer, minEigenvalueLayer;
    if (maxEigenvalueIndex == 0) {
      maxEigenvalueLayer = "horizontal_variance_x";
      minEigenvalueLayer = "horizontal_variance_y";
    } else {
      maxEigenvalueLayer = "horizontal_variance_y";
      minEigenvalueLayer = "horizontal_variance_x";
    }

    // For each cell in error ellipse.
    size_t i = 0;
    for (; !ellipseIterator.isPastEnd(); ++ellipseIterator) {
      if (!rawMapCopy.isValid(*ellipseIterator)) {
        // Empty cell in submap (cannot be center cell because we checked above).
        continue;
      }

      means[i] = rawMapCopy.at("elevation", *ellipseIterator);

      // Compute weight from probability.
      Position absolutePosition;
      rawMapCopy.getPosition(*ellipseIterator, absolutePosition);
      Eigen::Vector2d distanceToCenter = (rotationMatrix * (absolutePosition - requestedSubmapPosition)).cwiseAbs();

      float probability1 =
            cumulativeDistributionFunction(distanceToCenter.x() + halfResolution, 0.0, maxStandardDeviation)
          - cumulativeDistributionFunction(distanceToCenter.x() - halfResolution, 0.0, maxStandardDeviation);
      float probability2 =
            cumulativeDistributionFunction(distanceToCenter.y() + halfResolution, 0.0, minStandardDeviation)
          - cumulativeDistributionFunction(distanceToCenter.y() - halfResolution, 0.0, minStandardDeviation);

      const float weight = max(minimalWeight, probability1 * probability2);
      weights[i] = weight;
      const float standardDeviation = sqrt(rawMapCopy.at("variance", *ellipseIterator));
      lowerBoundDistribution.add(means[i] - 2.0 * standardDeviation, weight);
      upperBoundDistribution.add(means[i] + 2.0 * standardDeviation, weight);

      i++;
    }

    if (i == 0) {
      // Nothing to fuse.
      fusedMap_.at("elevation", *areaIterator) = rawMapCopy.at("elevation", *areaIterator);
      fusedMap_.at("lower_bound", *areaIterator) = rawMapCopy.at("elevation", *areaIterator) - 2.0 * sqrt(rawMapCopy.at("variance", *areaIterator));
      fusedMap_.at("upper_bound", *areaIterator) = rawMapCopy.at("elevation", *areaIterator) + 2.0 * sqrt(rawMapCopy.at("variance", *areaIterator));
      fusedMap_.at("color", *areaIterator) = rawMapCopy.at("color", *areaIterator);
      continue;
    }

    // Fuse.
    means.conservativeResize(i);
    weights.conservativeResize(i);

    float mean = (weights * means).sum() / weights.sum();

    if (!std::isfinite(mean)) {
      ROS_ERROR("Something went wrong when fusing the map: Mean = %f", mean);
      continue;
    }

    // Add to fused map.
    fusedMap_.at("elevation", *areaIterator) = mean;
    lowerBoundDistribution.compute();
    upperBoundDistribution.compute();
    fusedMap_.at("lower_bound", *areaIterator) = lowerBoundDistribution.quantile(0.01); // TODO
    fusedMap_.at("upper_bound", *areaIterator) = upperBoundDistribution.quantile(0.99); // TODO
    // TODO Add fusion of colors.
    fusedMap_.at("color", *areaIterator) = rawMapCopy.at("color", *areaIterator);
  }

  fusedMap_.setTimestamp(rawMapCopy.getTimestamp());

  const ros::WallDuration duration(ros::WallTime::now() - methodStartTime);
  ROS_INFO("Elevation map has been fused in %f s.", duration.toSec());

  return true;
}
Ejemplo n.º 26
0
double great_circle::arc::bearing( double angle ) const
{
    Eigen::Vector2d t = tangent_in_navigation_frame_at( angle );
    double b = std::acos( t.x() ) * ( t.y() < 0 ? -1 : 1 );
    return b >= M_PI ? b - M_PI * 2 : b;
}
Ejemplo n.º 27
0
bool Polygon::sortVertices(const Eigen::Vector2d& vector1,
                           const Eigen::Vector2d& vector2)
{
  return (vector1.x() < vector2.x()
      || (vector1.x() == vector2.x() && vector1.y() < vector2.y()));
}
Ejemplo n.º 28
0
const Eigen::Vector2d CMiniVisionToolbox::getPointDistorted( const Eigen::Vector2d& p_vecPointUndistorted, const Eigen::Vector2d& p_vecPrincipalPoint, const Eigen::Vector4d& p_vecDistortionCoefficients )
{
    //ds transform to 1:1 aspect ratio
    const Eigen::Vector2d vecPointUndistorted( p_vecPointUndistorted.x( )/752, p_vecPointUndistorted.y( )/480 );
    const Eigen::Vector2d vecPrincipalPoint( p_vecPrincipalPoint.x( )/752, p_vecPrincipalPoint.y( )/480 );

    //ds get distances
    const double dDeltaX( vecPointUndistorted(0)-vecPrincipalPoint(0) );
    const double dDeltaY( vecPointUndistorted(1)-vecPrincipalPoint(1) );

    //ds if not on focal point
    if( 0.0 != dDeltaX )
    {
        //ds compute coefficients
        const double dFraction( dDeltaY/dDeltaX );
        const double dEpsilon( 1 + dFraction*dFraction );
        const double dSqrtEpsilon( std::sqrt( dEpsilon ) );
        const double dA( p_vecDistortionCoefficients(0)*dSqrtEpsilon );
        const double dB( p_vecDistortionCoefficients(1)*dEpsilon );
        const double dC( p_vecDistortionCoefficients(2)*dSqrtEpsilon*dSqrtEpsilon*dSqrtEpsilon );
        const double dD( p_vecDistortionCoefficients(3)*dEpsilon*dEpsilon );
        const double dE( vecPointUndistorted(0)-vecPrincipalPoint(0) );

        //ds sample resolution in aspect ratio at unity (from 0 to 1)
        const double dResolution( 0.001 );
        const uint64_t uSteps( 1.0/dResolution );
        double dErrorLowest( 1.0 );
        double dUBest( 0.0 );
        double dLBest( 0.0 );

        //ds sample a solution
        for( uint64_t u = 1; u < uSteps; ++u )
        {
            //ds set current sample
            const double dU( u*dResolution-vecPrincipalPoint(0) );
            const double dU2( dU*dU );
            const double dU3( dU2*dU );
            const double dU4( dU3*dU );
            const double dL( 1 + dA*dU + dB*dU2 + dC*dU3 + dD*dU4 );

            //ds compute error
            const double dError( std::fabs( dU*dL-dE ) );

            //ds if the error is lower pick the sample
            if( dErrorLowest > dError )
            {
                dErrorLowest = dError;
                dUBest       = dU;
                dLBest       = dL;
            }
        }

        //ds compute x (since u=x-x_c -> x=u+x_c)
        const double dDistortedX( dUBest+vecPrincipalPoint(0) );
        const double dDistortedY( vecPrincipalPoint(1) + dFraction*dUBest );

        const double dUndistortedX( vecPrincipalPoint(0) + dLBest*dUBest );

        std::printf( "error: %f\n", dErrorLowest );
        std::printf( "undistorted: %f %f\n", vecPointUndistorted(0)*752, vecPointUndistorted(1)*480 );
        std::printf( "distorted: %f %f\n", dDistortedX*752, dDistortedY*480 );

        const Eigen::Vector2d vecUndistortedCheck( CMiniVisionToolbox::getPointUndistorted( Eigen::Vector2d( dDistortedX*752, dDistortedY*480 ), p_vecPrincipalPoint, p_vecDistortionCoefficients ) );

        std::printf( "redistorted: %f %f\n", vecUndistortedCheck(0), vecUndistortedCheck(1) );
        std::printf( "redistorted X: %f\n", dUndistortedX*752 );

        return Eigen::Vector2d( dDistortedX*752, dDistortedY*480 );

        /*ds compute quadratic coefficients
        const double dFraction( dDeltaY/dDeltaX );
        const double dEpsilon( p_vecDistortionCoefficients(0)*std::sqrt( 1 + dFraction*dFraction ) );
        const double dA( dEpsilon );
        const double dB( 1 - 2*dEpsilon*vecPrincipalPoint(0) );
        const double dC( dEpsilon*vecPrincipalPoint(0)*vecPrincipalPoint(0) - vecPointUndistorted(0) );

        //ds squared element
        const double dRootContent( dB*dB - 4*dA*dC );

        if( 0 <= dRootContent )
        {
            //ds compute solutions
            const double dDistortedX1( 1/( 2*dA )*( -dB+std::sqrt( dRootContent ) ) );
            const double dDistortedX2( 1/( 2*dA )*( -dB-std::sqrt( dRootContent ) ) );

            //ds compute y coordinates
            const double dDistortedY1( vecPrincipalPoint(1) + dFraction*( dDistortedX1-vecPrincipalPoint(0) ) );
            const double dDistortedY2( vecPrincipalPoint(1) + dFraction*( dDistortedX2-vecPrincipalPoint(0) ) );

            //ds get distortion free
            const Eigen::Vector2d vecUndistortedCheck( CMiniVisionToolbox::getPointUndistorted( Eigen::Vector2d( dDistortedX1*752, dDistortedY1*480 ), p_vecPrincipalPoint, p_vecDistortionCoefficients ) );

            std::printf( "undistorted: %f %f\n", vecPrincipalPoint(0)*752, vecPrincipalPoint(1)*480 );
            std::printf( "distorted 1: %f %f\n", dDistortedX1*752, dDistortedY1*480 );
            std::printf( "distorted 2: %f %f\n", dDistortedX2*752, dDistortedY2*480 );
            std::printf( "redistorted: %f %f\n", vecUndistortedCheck(0), vecUndistortedCheck(1) );

            return Eigen::Vector2d( dDistortedX1*752, dDistortedY1*480 );
        }
        else
        {
            //ds compute solutions (real parts)
            const double dDistortedX( 1/( 2*dA )*( -dB ) );
            const double dDistortedY( vecPrincipalPoint(1) + dFraction*( dDistortedX-vecPrincipalPoint(0) ) );
            std::printf( "undistorted: %f %f\n", vecPrincipalPoint(0)*752, vecPrincipalPoint(1)*480 );
            std::printf( "distorted IMAGINARY: %f %f\n", dDistortedX*752, dDistortedY*480 );
            return Eigen::Vector2d( dDistortedX*752, dDistortedY*480 );
        }*/
    }
    else
    {
        return Eigen::Vector2d( p_vecPointUndistorted(0), p_vecPointUndistorted(1) );
    }
}
Ejemplo n.º 29
0
double Polygon::computeCrossProduct2D(const Eigen::Vector2d& vector1,
                                      const Eigen::Vector2d& vector2)
{
  return (vector1.x() * vector2.y() - vector1.y() * vector2.x());
}
Ejemplo n.º 30
0
 inline cv::Point2f eigenVecToPoint(Eigen::Vector2d const &vec) {
     return cv::Point2f(vec.x(), vec.y());
 }