void SelfRobot::selfLandmarkDataCallback(const read_omni_dataset::LRMLandmarksData::ConstPtr& landmarkData, int RobotNumber)
{
  ROS_WARN(" got landmark data from self robot (ID=%d)",RobotNumber);  
  
  uint seq = landmarkData->header.seq;

  // There are a total of 10 distinct, known and static landmarks in this dataset.
  for(int i=0;i<10; i++)
  {
    if(landmarkData->found[i])
    {
    
      ///Below is the procedure to calculate the observation covariance associate with the ball measurement made by the robots. Caution: Make sure the validity of the calculations below by crosschecking the obvious things, e.g., covariance cannot be negative or very close to 0     
      
      Eigen::Vector2d tempLandmarkObsVec = Eigen::Vector2d(landmarkData->x[i],landmarkData->y[i]);

      double d = tempLandmarkObsVec.norm(),
	     phi = atan2(landmarkData->y[i],landmarkData->x[i]);
      
      double covDD = (K1*fabs(1.0-(landmarkData->AreaLandMarkActualinPixels[i]/landmarkData->AreaLandMarkExpectedinPixels[i])))*(d*d);
      double covPhiPhi = K2*(1/(d+1));
      
      double covXX = pow(cos(phi),2) * covDD 
				  + pow(sin(phi),2) * ( pow(d,2) * covPhiPhi + covDD * covPhiPhi );
      double covYY = pow(sin(phi),2) * covDD 
				  + pow(cos(phi),2) * ( pow(d,2) * covPhiPhi + covDD * covPhiPhi );
      
      ROS_INFO("Landmark %d found in the image, refer to the method to see how covariances are calculated",i);  
    }
  }
  
 }
void SelfRobot::selfTargetDataCallback(const read_omni_dataset::BallData::ConstPtr& ballData, int RobotNumber)
{
  ROS_WARN("Got ball data from self robot %d",RobotNumber);  
  Time curObservationTime = ballData->header.stamp;
  
  if(ballData->found)
  {    
    ///Below is the procedure to calculate the observation covariance associate with the ball measurement made by the robots. Caution: Make sure the validity of the calculations below by crosschecking the obvious things, e.g., covariance cannot be negative or very close to 0
    
    Eigen::Vector2d tempBallObsVec = Eigen::Vector2d(ballData->x,ballData->y);
    
    double d = tempBallObsVec.norm(),
	   phi = atan2(ballData->y,ballData->x);
    
    double covDD = (double)(1/ballData->mismatchFactor)*(K3*d + K4*(d*d));
    double covPhiPhi = K5*(1/(d+1));
    
    double covXX = pow(cos(phi),2) * covDD 
				+ pow(sin(phi),2) * ( pow(d,2) * covPhiPhi + covDD * covPhiPhi );
    double covYY = pow(sin(phi),2) * covDD 
				+ pow(cos(phi),2) * ( pow(d,2) * covPhiPhi + covDD * covPhiPhi );
    ROS_INFO("Ball found in the image, refer to the method to see how covariances are calculated");	
    
  }
  else
  {
    ROS_INFO("Ball not found in the image");
  }
  
}
void DepthObstacleGrid::initializeStatics(NodeMap *map){
  
  Environment env = map->getEnvironment();
  
  for(std::vector<Plane>::iterator it = env.planes.begin(); it != env.planes.end(); it++){
    
    double plane_length = Eigen::Vector2d( it->span_horizontal.x(), it->span_horizontal.y() ).norm();
    Eigen::Vector2d step = Eigen::Vector2d( it->span_horizontal.x(), it->span_horizontal.y() ) / (plane_length / (0.5 * resolution) );
    double step_length = step.norm();
        
    Eigen::Vector2d lastCell(NAN, NAN);
    Eigen::Vector2d pos = Eigen::Vector2d(it->position.x(), it->position.y());
    
    //iterate through the cells
    for(double i = 0.0; i < plane_length; i += step_length){
      
      Eigen::Vector2d cell_coord = getGridCoord( pos.x(), pos.y() );
      
      //step was not wide enough, we are still in the same cell
      if(cell_coord != lastCell){       
      
        lastCell = cell_coord;
        
        Eigen::Vector2i ID = getCellID(cell_coord.x(), cell_coord.y());
        
        //Step was invalid, we are outside the grid
        if(ID.x() == -1){
          pos += step;
          continue;
        }
        
        //Set the cell static
        GridElement &elem = get(ID.x(), ID.y());      
        elem.static_obstacle = true;
                
      }
      
      pos += step;
      
    }
    
  }
  
}
Beispiel #4
0
	double distance () const { return translation.norm(); }
/*------------------------------------------------------------------------------*/
FM_INLINE
FastMarchingVertex * UnfoldTriangle(const Eigen::MatrixXd &V, const Eigen::MatrixXi &F,
	const Eigen::MatrixXi &TT, const  Eigen::MatrixXi &TTi,
	std::vector<struct FastMarchingVertex> &vertices,
	int f, int v, int v1, int v2,
	FM_Float& dist, FM_Float& dot1, FM_Float& dot2)
{
	const Eigen::Vector3d& p = V.row(v);// vert.GetPosition();
	const Eigen::Vector3d& p1 = V.row(v1);//vert1.GetPosition();
	const Eigen::Vector3d& p2 = V.row(v2);//vert2.GetPosition();

	Eigen::Vector3d e1 = p1 - p;
	FM_Float rNorm1 = e1.norm(); //~e1
	e1.normalize(); // e1 /= rNorm1;
	Eigen::Vector3d e2 = p2 - p;
	FM_Float rNorm2 = e2.norm(); // ~e2;
	e2.normalize(); // e2 /= rNorm2;

	FM_Float dot = e1.adjoint()*e2;// e1*e2;
	FM_ASSERT(dot < 0);

	/* the equation of the lines defining the unfolding region [e.g. line 1 : {x ; <x,eq1>=0} ]*/
	Eigen::Vector2d eq1 = Eigen::Vector2d(dot, sqrt(1 - dot*dot));
	Eigen::Vector2d eq2 = Eigen::Vector2d(1, 0);

	/* position of the 2 points on the unfolding plane */
	Eigen::Vector2d x1(rNorm1, 0);
	Eigen::Vector2d x2 = eq1*rNorm2;

	/* keep track of the starting point */
	Eigen::Vector2d xstart1 = x1;
	Eigen::Vector2d xstart2 = x2;

	FastMarchingVertex* pV1 = &(vertices[v1]);
	FastMarchingVertex* pV2 = &(vertices[v2]);

	
	int cur_f, cur_v;
	
	get_oppisite_f_v(F, TT,TTi, f,v, cur_f, cur_v);

	//FM_GeodesicFace* pCurFace = (FM_GeodesicFace*)CurFace.GetFaceNeighbor(vert);


	int nNum = 0;
	while (nNum < 50 && cur_f != -1) // NULL)
	{
		//	FastMarchingVertex* pV = (FastMarchingVertex*)pCurFace->GetVertex(*pV1, *pV2); //opposite vertex to face and edge(pV1,pV2)
		//	FM_ASSERT(pV != NULL);
		FastMarchingVertex* pV = &(vertices[cur_v]); //opposite vertex to face and vert
													 /*
													 e1 = pV2->GetPosition() - pV1->GetPosition();
													 FM_Float rNorm1 = ~e1;
													 e1 /= rNorm1;
													 e2 = pV->GetPosition() - pV1->GetPosition();
													 FM_Float rNorm2 = ~e2;
													 e2 /= rNorm2;
													 */

		Eigen::Vector3d e1 = V.row(pV2->vid) - V.row(pV1->vid);
		FM_Float rNorm1 = e1.norm(); //~e1
		e1.normalize(); // e1 /= rNorm1;
		Eigen::Vector3d e2 = V.row(pV->vid) - V.row(pV1->vid);
		FM_Float rNorm2 = e2.norm(); // ~e2;
		e2.normalize(); // e2 /= rNorm2;

						/* compute the position of the new point x on the unfolding plane (via a rotation of -alpha on (x2-x1)/rNorm1 )
						| cos(alpha) sin(alpha)|
						x = |-sin(alpha) cos(alpha)| * [x2-x1]*rNorm2/rNorm1 + x1   where cos(alpha)=dot
						*/
		Eigen::Vector2d vv = (x2 - x1)*rNorm2 / rNorm1;
		dot = e1.adjoint()*e2;  //e1*e2;
								//	Eigen::Vector2d x = vv.Rotate2D();////vv.Rotate(-acos(dot)) + x1;

		Eigen::Rotation2D<double> rot2(-acos(dot));
		Eigen::Vector2d x = rot2*vv + x1;  //dhw to check


										   /* compute the intersection points.
										   We look for x=x1+lambda*(x-x1) or x=x2+lambda*(x-x2) with <x,eqi>=0, so */
		FM_Float lambda11 = -(x1.dot(eq1)) / ((x - x1).dot(eq1));	 //-(x1*eq1) / ((x - x1)*eq1);	// left most 
		FM_Float lambda12 = -(x1.dot(eq2)) / ((x - x1).dot(eq2));  //-(x1*eq2) / ((x - x1)*eq2);	// right most
		FM_Float lambda21 = -(x2.dot(eq1)) / ((x - x2).dot(eq1)); //-(x2*eq1) / ((x - x2)*eq1);	// left most 
		FM_Float lambda22 = -(x2.dot(eq2)) / ((x - x2).dot(eq2));   //-(x2*eq2) / ((x - x2)*eq2);	// right most
		bool bIntersect11 = (lambda11 >= 0) && (lambda11 <= 1);
		bool bIntersect12 = (lambda12 >= 0) && (lambda12 <= 1);
		bool bIntersect21 = (lambda21 >= 0) && (lambda21 <= 1);
		bool bIntersect22 = (lambda22 >= 0) && (lambda22 <= 1);
		if (bIntersect11 && bIntersect12)
		{
			//			FM_ASSERT( !bIntersect21 && !bIntersect22 );
			/* we should unfold on edge [x x1] */
			//	pCurFace = (FM_GeodesicFace*)pCurFace->GetFaceNeighbor(*pV2);
			f = cur_f;
			get_oppisite_f_v(F, TT,TTi, f, pV2->vid, cur_f, cur_v);

			pV2 = pV;
			x2 = x;
		}
		else if (bIntersect21 && bIntersect22)
		{
			//			FM_ASSERT( !bIntersect11 && !bIntersect12 );
			/* we should unfold on edge [x x2] */
			//	pCurFace = (FM_GeodesicFace*)pCurFace->GetFaceNeighbor(*pV1);
			f = cur_f;
			get_oppisite_f_v(F, TT,TTi, f, pV1->vid, cur_f, cur_v);

			pV1 = pV;
			x1 = x;
		}
		else
		{
			FM_ASSERT(bIntersect11 && !bIntersect12 &&
				!bIntersect21 && bIntersect22);
			/* that's it, we have found the point */
			dist = x.norm(); // ~x;
			dot1 = x.dot(xstart1) / (dist * xstart1.norm());//  ~xstart1);
			dot2 = x.dot(xstart2) / (dist * xstart2.norm());// ~xstart2);
			return pV;
		}
		nNum++;
	}

	return NULL;
}
Beispiel #6
0
	Eigen::Vector2d Edge::normal()const{
	    Eigen::Vector2d nn;
	    nn << -M_extr[0].y()+M_extr[1].y(), M_extr[0].x() - M_extr[1].x();
	    double norm = nn.norm();
	    return nn/norm;
	}
/**
   * Compute the 3D pose in 6DOF using to camera for mutual localization
   *
   * \param pixelA1 Position of the left LED on robot A
   * \param pixelA2 Position of the right LED on robot A
   * \param pixelB1 Position of the left LED on robot B
   * \param pixelB2 Position of the right LED on robot
   * \param position (Output) the position vector
   * \param rotation (Output) the rotation matrix
   *
   * \return the rotation matrix of the relative pose
   *
   */
void MutualPoseEstimation::compute3DMutualLocalisation(const Eigen::Vector2d &lPixelA1, const Eigen::Vector2d &lPixelA2,
                                                       const Eigen::Vector2d &lPixelB1,const  Eigen::Vector2d &lPixelB2,
                                                       Eigen::Vector3d &position, Eigen::Matrix3d &rotation){

  Eigen::Vector2d fCamA = this->focalCam;
  Eigen::Vector2d fCamB = this->focalCam;
  Eigen::Vector2d ppA, pixelA1, pixelA2, pixelB1, pixelB2;
  if(ConventionUV){ // Oliver's code use the uv convention ( u point left and v point up)
      ppA = this->centerCam;
      pixelA1 = lPixelA1;
      pixelA2 = lPixelA2;
      pixelB1 = lPixelB1;
      pixelB2 = lPixelB2;
  }
  else{
      ppA = fromXYCoordinateToUVCoordinate(this->centerCam);
      pixelA1 = fromXYCoordinateToUVCoordinate(lPixelA1);
      pixelA2 = fromXYCoordinateToUVCoordinate(lPixelA2);
      pixelB1 = fromXYCoordinateToUVCoordinate(lPixelB1);
      pixelB2 = fromXYCoordinateToUVCoordinate(lPixelB2);
  }
  Eigen::Vector2d ppB = ppA;
  //Eigen::Vector2d ppA = 0;
  //Eigen::Vector2d ppB = 0;


  /*cout<<"-Parameters-"<<endl;
  cout<<"pixelA1:"<<pixelA1<<endl;
  cout<<"pixelA2:"<<pixelA2<<endl;
  cout<<"pixelB1:"<<pixelB1<<endl;
  cout<<"pixelB2:"<<pixelB2<<endl;
  cout<<"ppA:"<<ppA<<endl;
  cout<<"ppB:"<<ppB<<endl;
  cout<<"fCamA:"<<fCamA<<endl;
  cout<<"fCamB:"<<fCamB<<endl;
  cout<<"rdA:"<<rdA<<endl;
  cout<<"ldA:"<<ldA<<endl;
  cout<<"rdB:"<<rdB<<endl;
  cout<<"ldB:"<<ldB<<endl;*/

  //Eigen::Vector3d PAM1, PAM2;
  //if (ConventionUV) {
   Eigen::Vector3d PAM1((pixelB1[0]-ppB[0])/fCamB[0], (pixelB1[1]-ppB[1])/fCamB[1], 1);
   Eigen::Vector3d PAM2((pixelB2[0]-ppB[0])/fCamB[0], (pixelB2[1]-ppB[1])/fCamB[1], 1);
//  }
//  else {
//    // Convention x-y
//    PAM1((ppB[0]-pixelB1[0])/fCamB[0], (ppB[1]-pixelB1[1])/fCamB[1], 1);
//    PAM2((ppB[0]-pixelB2[0])/fCamB[0], (ppB[1]-pixelB2[1])/fCamB[1], 1);
//  }

  PAM1.normalize();
  PAM2.normalize();
  double alpha = acos(PAM1.dot(PAM2));
  //printf("Alpha: %f\n",alpha);

  double d = this->rdA + this->ldA;

  Eigen::Vector2d BLeftMarker = pixelA2;
  Eigen::Vector2d BRightMarker = pixelA1;
  
  Eigen::Vector2d PB1(BLeftMarker[0] + (this->ldB/(rdB+ldB)) * (BRightMarker[0] - BLeftMarker[0]),
                      BLeftMarker[1] + (this->ldB/(rdB+ldB)) * (BRightMarker[1] - BLeftMarker[1]));

  Eigen::Vector3d PB12((PB1[0]-ppA[0])/fCamA[0], (PB1[1]-ppA[1])/fCamA[1], 1);
  PB12.normalize();
  double phi = acos(PB12[0]);
  double beta = 0.5f * M_PI - phi;
  //printf("Beta: %f\n",beta* 180.f/M_PI);

  Eigen::Vector2d plane = MutualPoseEstimation::computePositionMutual(alpha, beta, d);

  double EstimatedDistance = plane.norm();

  position =  PB12 * EstimatedDistance;
    //====================================================================
    //=========================Axis Angle Part============================
    //Create the two plans
    //Plan in B Refs
  Eigen::Vector2d ALeftMarker = pixelB2;
  Eigen::Vector2d ARightMarker = pixelB1;

  Eigen::Vector3d ALM((ALeftMarker[0]-ppB[0])/fCamB[0], (ALeftMarker[1]-ppB[1])/fCamB[1], 1);
  ALM.normalize();

  Eigen::Vector3d ARM((ARightMarker[0]-ppB[0])/fCamB[0], (ARightMarker[1]-ppB[1])/fCamB[1], 1);
  ARM.normalize();
  //Plan in A Refs
  Eigen::Vector3d AToB = PB12;

  Eigen::Vector3d LeftMarker(1, 0, 0);

  //Align the two plans
  Eigen::Vector3d NormalPlanInB = ALM.cross(ARM);
  Eigen::Vector3d NormalPlanInA = AToB.cross(LeftMarker);
  Eigen::Vector3d AxisAlignPlans = NormalPlanInB.cross(NormalPlanInA);
  NormalPlanInB.normalize();
  NormalPlanInA.normalize();
  AxisAlignPlans.normalize();
  double AngleAlignPlans = acos(NormalPlanInB.dot(NormalPlanInA));

  Eigen::MatrixXd AlignPlans = vrrotvec2mat(AngleAlignPlans, AxisAlignPlans);

  //Align the vector of the cameraA seen from B with the plan
  Eigen::Vector3d CameraASeenFromB(
                      ((ALeftMarker[0] + (this->ldA/(this->rdA+this->ldA))*(ARightMarker[0] - ALeftMarker[0]))-ppB[0])/fCamB[0],
                      ((ALeftMarker[1] + (this->ldA/(this->rdA+this->ldA))*(ARightMarker[1] - ALeftMarker[1]))-ppB[1])/fCamB[1],
                      1);
  CameraASeenFromB.normalize();
  Eigen::Vector3d alignedBToA = AlignPlans * CameraASeenFromB;
  //Turn the vector BToA to make it align with AToB
  Eigen::Vector3d AxisAlignABwBA = alignedBToA.cross(AToB);
  AxisAlignABwBA.normalize();
  //Since we know that cameras are facing each other, we rotate pi
  double AngleAlignABwBA = acos(alignedBToA.dot(AToB)) - M_PI;
  
  Eigen::Matrix3d AlignVectors = vrrotvec2mat(AngleAlignABwBA, AxisAlignABwBA);

  rotation =  AlignVectors * AlignPlans;

}
Beispiel #8
0
    bool updatePose( Node *root, Camera *camera )
    {
        ceres::Problem problem;
        
        Node *node = camera->node;
        
        double params[6];
        
        Eigen::Map<Eigen::Vector3d> translationvec(params);
        translationvec = node->pose.translation();
        
        ceres::RotationMatrixToAngleAxis( node->pose.so3().matrix().data(), params+3 );

        ceres::LossFunction *lossFunction = new ceres::HuberLoss( 4.0 );
        
        Calibration *calibration = camera->calibration;
        
        ElementList::iterator pointit;
        for ( pointit = root->points.begin(); pointit != root->points.end(); pointit++ )
        {
            Point *point = (Point*)pointit->second;
            if ( !point->tracked ) continue;
            
            Eigen::Vector3d XYZ = project(point->position);
            
            ReprojectionError *reproj_error = new ReprojectionError(XYZ,
                                                                    calibration->focal,
                                                                    point->location[0]-calibration->center[0],
                                                                    point->location[1]-calibration->center[1]);
            
            ceres::CostFunction* cost_function = new ceres::AutoDiffCostFunction<ReprojectionError, 2, 6>(reproj_error);
            problem.AddResidualBlock(cost_function, lossFunction, params );
        }
        
        ceres::Solver::Options options;
        options.linear_solver_type = ceres::DENSE_NORMAL_CHOLESKY;
        ceres::Solver::Summary summary;
        ceres::Solve(options, &problem, &summary);
//        std::cout << summary.FullReport() << "\n";
        bool success = ( summary.termination_type != ceres::FAILURE );
        if ( success )
        {
            node->pose.translation() = translationvec;
            
            Eigen::Matrix3d R;
            ceres::AngleAxisToRotationMatrix(params+3, R.data());
            node->pose.so3() = Sophus::SO3d(R);
        }
        
        // update tracked flag
        for ( pointit = root->points.begin(); pointit != root->points.end(); pointit++ )
        {
            Point *point = (Point*)pointit->second;
            if ( !point->tracked ) continue;
            
            // check re-projection error
            Eigen::Vector2d proj = calibration->focal * project( node->pose * project(point->position) ) + calibration->center;
            Eigen::Vector2d diff = proj - point->location.cast<double>();
            double err = diff.norm();
            if ( err > 16.0 )
            {
                point->tracked = false;
            }
        }
        return success;
    }
double
FittingCurve2d::inverseMapping (const ON_NurbsCurve &nurbs, const Eigen::Vector2d &pt, const double &hint,
                                double &error, Eigen::Vector2d &p, Eigen::Vector2d &t, double rScale, int maxSteps,
                                double accuracy, bool quiet)
{
  if (nurbs.Order () == 2)
    return inverseMappingO2 (nurbs, pt, error, p, t);

  int cp_red = (nurbs.m_order - 2);
  int ncpj = (nurbs.m_cv_count - 2 * cp_red);
  double pointAndTangents[4];

  double current, delta;
  Eigen::Vector2d r;
  std::vector<double> elements = getElementVector (nurbs);
  double minU = elements[0];
  double maxU = elements[elements.size () - 1];

  current = hint;

  for (int k = 0; k < maxSteps; k++)
  {

    nurbs.Evaluate (current, 1, 2, pointAndTangents);

    p (0) = pointAndTangents[0];
    p (1) = pointAndTangents[1];

    t (0) = pointAndTangents[2];
    t (1) = pointAndTangents[3];

    r = p - pt;

    // step width control
    int E = findElement (current, elements);
    double e = elements[E + 1] - elements[E];

    delta = -(0.5 * e * rScale) * r.dot (t) / t.norm (); //  A.ldlt().solve(b);

    //    e = 0.5 * std::abs<double> (e);
    //    if (delta > e)
    //      delta = e;
    //    if (delta < -e)
    //      delta = -e;

    if (std::abs<double> (delta) < accuracy)
    {

      error = r.norm ();
      return current;

    }
    else
    {
      current = current + delta;

      if (current < minU)
        current = maxU - (minU - current);
      else if (current > maxU)
        current = minU + (current - maxU);

    }

  }

  error = r.norm ();

  if (!quiet)
  {
    printf ("[FittingCurve2d::inverseMapping] Warning: Method did not converge (%e %d).\n", accuracy, maxSteps);
    printf ("[FittingCurve2d::inverseMapping] hint: %f current: %f delta: %f error: %f\n", hint, current, delta, error);
  }

  return current;
}
bool TimedElasticBand::initTEBtoGoal(BidirIter path_start, BidirIter path_end, Fun fun_position, double max_vel_x, double max_vel_theta,
		      boost::optional<double> max_acc_x, boost::optional<double> max_acc_theta,
		      boost::optional<double> start_orientation, boost::optional<double> goal_orientation, int min_samples) 
{
    Eigen::Vector2d start_position = fun_position( *path_start );
    Eigen::Vector2d goal_position = fun_position( *boost::prior(path_end) );
    
    double start_orient, goal_orient;
    if (start_orientation)
    {
      start_orient = *start_orientation;
    }
    else
    {
      Eigen::Vector2d start2goal =  goal_position - start_position;
      start_orient = atan2(start2goal[1],start2goal[0]);
    }
    double timestep = 1; // TODO: time
    
    
    if (goal_orientation)
    {
      goal_orient = *goal_orientation;
    }
    else
    {
      goal_orient = start_orient;
    }
    
    if (!isInit())
    {	
      addPose(start_position, start_orient, true); // add starting point and mark it as fixed for optimization		

      // we insert middle points now (increase start by 1 and decrease goal by 1)
      std::advance(path_start,1);
      std::advance(path_end,-1);
      unsigned int idx=0;
      for (; path_start != path_end; ++path_start) // insert middle-points
      {
	//Eigen::Vector2d point_to_goal = path.back()-*it;
	//double dir_to_goal = atan2(point_to_goal[1],point_to_goal[0]); // direction to goal
	// Alternative: Direction from last path
	Eigen::Vector2d curr_point = fun_position(*path_start);
	Eigen::Vector2d diff_last = curr_point - Pose(idx).position(); // we do not use boost::prior(*path_start) for those cases,
								       // where fun_position() does not return a reference or is expensive.
	double diff_norm = diff_last.norm();
	
	double timestep_vel = diff_norm/max_vel_x; // constant velocity
	double timestep_acc;
	if (max_acc_x)
	{
		timestep_acc = sqrt(2*diff_norm/(*max_acc_x)); // constant acceleration
		if (timestep_vel < timestep_acc && max_acc_x) timestep = timestep_acc;
		else timestep = timestep_vel;
	}
	else timestep = timestep_vel;
	
	if (timestep<0) timestep=0.2; // TODO: this is an assumption
	
	addPoseAndTimeDiff(curr_point, atan2(diff_last[1],diff_last[0]) ,timestep);
	
	Eigen::Vector2d diff_next = fun_position(*boost::next(path_start))-curr_point; // TODO maybe store the boost::next for the following iteration
	double ang_diff = std::abs( g2o::normalize_theta( atan2(diff_next[1],diff_next[0])
						         -atan2(diff_last[1],diff_last[0]) ) );
	
	timestep_vel = ang_diff/max_vel_theta; // constant velocity
	if (max_acc_theta)
	{
		timestep_acc = sqrt(2*ang_diff/(*max_acc_theta)); // constant acceleration
		if (timestep_vel < timestep_acc) timestep = timestep_acc;
		else timestep = timestep_vel;
	}
	else timestep = timestep_vel;
	
	if (timestep<0) timestep=0.2; // TODO: this is an assumption
	
	addPoseAndTimeDiff(curr_point, atan2(diff_last[1],diff_last[0]) ,timestep);
	
	++idx;
      }
      Eigen::Vector2d diff = goal_position-Pose(idx).position();
      double diff_norm = diff.norm();
      double timestep_vel = diff_norm/max_vel_x; // constant velocity
      if (max_acc_x)
      {
	double timestep_acc = sqrt(2*diff_norm/(*max_acc_x)); // constant acceleration
	if (timestep_vel < timestep_acc) timestep = timestep_acc;
	else timestep = timestep_vel;
      }
      else timestep = timestep_vel;

      
      PoseSE2 goal(goal_position, goal_orient);
      
      // if number of samples is not larger than min_samples, insert manually
      if ( (int)sizePoses() < min_samples-1 )
      {
        ROS_DEBUG("initTEBtoGoal(): number of generated samples is less than specified by min_samples. Forcing the insertion of more samples...");
        while ((int)sizePoses() < min_samples-1) // subtract goal point that will be added later
        {
          // simple strategy: interpolate between the current pose and the goal
          addPoseAndTimeDiff( PoseSE2::average(BackPose(), goal), timestep ); // let the optimier correct the timestep (TODO: better initialization	
        }
      }
      
      // now add goal
      addPoseAndTimeDiff(goal, timestep); // add goal point
      setPoseVertexFixed(sizePoses()-1,true); // GoalConf is a fixed constraint during optimization
    }
    else // size!=0
    {
      ROS_WARN("Cannot init TEB between given configuration and goal, because TEB vectors are not empty or TEB is already initialized (call this function before adding states yourself)!");
      ROS_WARN("Number of TEB configurations: %d, Number of TEB timediffs: %d",(unsigned int) sizePoses(),(unsigned int) sizeTimeDiffs());
      return false;
    }
    return true;
}