CPose2D getPataMesa(){ FILE* file=fopen("patamesa.dat","r"); CPose2D pataMesa; int nDatos(0); if(file){ float x,y; // Archivo existe fscanf(file,"x:%f,y:%f",&x,&y); pataMesa.x(x); pataMesa.y(y); fclose(file); } else{ //Archivo no existe file=fopen("patamesa.dat","w"); for(int i=1;i<21;i++){ Detector detector; char nombre[100]; sprintf(nombre,"/home/jplata/Eclipse/MedidasPiernas/23Mayo/laser%i.dat",i); cout << "Fichero: " << nombre << endl; detector.abrirFichero(nombre,false); // Clusteres detectados vector<Cluster> piernas=detector.clusterizar(0.1,3); detector.printClusters(piernas); // El ultimo cluster corresponde a la pata de la mesa en las primeras muestras nDatos++; pataMesa+=piernas.back().getCentro(); } pataMesa.x(pataMesa.x()/nDatos); pataMesa.y(pataMesa.y()/nDatos); fprintf(file,"x:%f,y:%f",pataMesa.x(),pataMesa.y()); fclose(file); } return pataMesa; }
void test_to_from_2d(double x,double y, double phi) { const CPose2D p2d = CPose2D(x,y,phi); const CPose3D p3d = CPose3D(p2d); const CPose2D p2d_bis = CPose2D(p3d); EXPECT_FLOAT_EQ( p2d.x(), p2d_bis.x() ) << "p2d: " << p2d << endl; EXPECT_FLOAT_EQ( p2d.y(), p2d_bis.y() ) << "p2d: " << p2d << endl; EXPECT_FLOAT_EQ( p2d.phi(), p2d_bis.phi() ) << "p2d: " << p2d << endl; EXPECT_FLOAT_EQ( p2d.phi(), p3d.yaw() ) << "p2d: " << p2d << endl; }
bool MyReactiveInterface::senseObstacles(CSimplePointsMap &obstacles){ cout << "senseObstacles" << endl; CObservation2DRangeScan laserScan; CPose2D rPose; CPose3D rPose3D; getCurrentMeasures(laserScan,rPose); rPose3D=rPose; obstacles.insertionOptions.minDistBetweenLaserPoints=0.005f; obstacles.insertionOptions.also_interpolate=false; obstacles.clear(); obstacles.insertObservation(&laserScan); // Update data for plot thread pthread_mutex_lock(&m_mutex); path.AddVertex(rPose.x(),rPose.y()); laserCurrentMap.loadFromRangeScan(laserScan,&rPose3D); newScan=true; pthread_mutex_unlock(&m_mutex); refreshPlot(); return true; }
void SRBASolver::Compute() { ROS_INFO("Computing corrected poses"); corrections_.clear(); if(!rba_.get_rba_state().keyframes.empty()) { // Use a spanning tree to estimate the global pose of every node // starting (root) at the given keyframe: //corrections_.resize(rba_.get_rba_state().keyframes.size()); srba_t::frameid2pose_map_t spantree; if(curr_kf_id_ == 0) return; TKeyFrameID root_keyframe(0); rba_.create_complete_spanning_tree(root_keyframe,spantree); for (srba_t::frameid2pose_map_t::const_iterator itP = spantree.begin();itP!=spantree.end();++itP) { std::pair<int, karto::Pose2> id_pose; id_pose.first = itP->first; const CPose2D p = itP->second.pose; karto::Pose2 pos(p.x(), p.y(), p.phi()); id_pose.second = pos; corrections_.push_back(id_pose); } } // Get the global graph and return updated poses? //typedef std::vector<sba::Node2d, Eigen::aligned_allocator<sba::Node2d> > NodeVector; //ROS_INFO("Calling SRBA compute"); // Do nothing here? }
/** Return one or both of the following 6x6 Jacobians, useful in graph-slam * problems... */ void SE_traits<2>::jacobian_dP1DP2inv_depsilon( const CPose2D& P1DP2inv, matrix_VxV_t* df_de1, matrix_VxV_t* df_de2) { if (df_de1) { matrix_VxV_t& J1 = *df_de1; // This Jacobian has the structure: // [ I_2 | -[d_t]_x ] // Jacob1 = [ ---------+--------------- ] // [ 0 | 1 ] // J1.unit(VECTOR_SIZE, 1.0); J1(0, 2) = -P1DP2inv.y(); J1(1, 2) = P1DP2inv.x(); } if (df_de2) { // This Jacobian has the structure: // [ -R | 0 ] // Jacob2 = [ ---------+------- ] // [ 0 | -1 ] // matrix_VxV_t& J2 = *df_de2; const double ccos = cos(P1DP2inv.phi()); const double csin = sin(P1DP2inv.phi()); const double vals[] = {-ccos, csin, 0, -csin, -ccos, 0, 0, 0, -1}; J2 = CMatrixFixedNumeric<double, 3, 3>(vals); } }
/*--------------------------------------------------------------- squareErrorVector ---------------------------------------------------------------*/ void TMatchingPairList::squareErrorVector( const CPose2D &q, vector_float &out_sqErrs, vector_float &xs, vector_float &ys ) const { out_sqErrs.resize( size() ); xs.resize( size() ); ys.resize( size() ); // * \f[ e_i = | x_{this} - q \oplus x_{other} |^2 \f] const float ccos = cos(q.phi()); const float csin = sin(q.phi()); const float qx = q.x(); const float qy = q.y(); const_iterator corresp; vector_float::iterator e_i, xx, yy; for (corresp=begin(), e_i = out_sqErrs.begin(), xx = xs.begin(), yy = ys.begin();corresp!=end();corresp++, e_i++, xx++,yy++) { *xx = qx + ccos * corresp->other_x - csin * corresp->other_y; *yy = qy + csin * corresp->other_x + ccos * corresp->other_y; *e_i = square( corresp->this_x - *xx ) + square( corresp->this_y - *yy ); } }
/*--------------------------------------------------------------- computeObservationLikelihood_CellsDifference ---------------------------------------------------------------*/ double COccupancyGridMap2D::computeObservationLikelihood_CellsDifference( const CObservation *obs, const CPose2D &takenFrom ) { double ret = 0.5; // This function depends on the observation type: // ----------------------------------------------------- if ( obs->GetRuntimeClass() == CLASS_ID(CObservation2DRangeScan) ) { // Observation is a laser range scan: // ------------------------------------------- const CObservation2DRangeScan *o = static_cast<const CObservation2DRangeScan*>( obs ); // Insert only HORIZONTAL scans, since the grid is supposed to // be a horizontal representation of space. if (!o->isPlanarScan(insertionOptions.horizontalTolerance)) return 0.5; // NO WAY TO ESTIMATE NON HORIZONTAL SCANS!! // Build a copy of this occupancy grid: COccupancyGridMap2D compareGrid(takenFrom.x()-10,takenFrom.x()+10,takenFrom.y()-10,takenFrom.y()+10,resolution); CPose3D robotPose(takenFrom); int Ax, Ay; // Insert in this temporary grid: compareGrid.insertionOptions.maxDistanceInsertion = insertionOptions.maxDistanceInsertion; compareGrid.insertionOptions.maxOccupancyUpdateCertainty = 0.95f; o->insertObservationInto( &compareGrid, &robotPose ); // Save Cells offset between the two grids: Ax = round((x_min - compareGrid.x_min) / resolution); Ay = round((y_min - compareGrid.y_min) / resolution); int nCellsCompared = 0; float cellsDifference = 0; int x0 = max(0,Ax); int y0 = max(0,Ay); int x1 = min(compareGrid.size_x, size_x+Ax); int y1 = min(compareGrid.size_y, size_y+Ay); for (int x=x0;x<x1;x+=1) { for (int y=y0;y<y1;y+=1) { float xx = compareGrid.idx2x(x); float yy = compareGrid.idx2y(y); float c1 = getPos(xx,yy); float c2 = compareGrid.getCell(x,y); if ( c2<0.45f || c2>0.55f ) { nCellsCompared++; if ((c1>0.5 && c2<0.5) || (c1<0.5 && c2>0.5)) cellsDifference++; } } } ret = 1 - cellsDifference / (nCellsCompared); } return log(ret); }
IdPoseVector& SRBASolver::GetCorrections() { ROS_INFO("Computing corrected poses up to %d", curr_kf_id_-1); corrections_.clear(); if(!rba_.get_rba_state().keyframes.empty()) { // Use a spanning tree to estimate the global pose of every node // starting (root) at the given keyframe: //corrections_.resize(rba_.get_rba_state().keyframes.size()); srba_t::frameid2pose_map_t spantree; if(curr_kf_id_ == 0) return corrections_; TKeyFrameID root_keyframe(0); rba_.create_complete_spanning_tree(root_keyframe,spantree); for (srba_t::frameid2pose_map_t::const_iterator itP = spantree.begin();itP!=spantree.end();++itP) { std::pair<int, karto::Pose2> id_pose; id_pose.first = itP->first; const CPose2D p = itP->second.pose; karto::Pose2 pos(p.x(), p.y(), p.phi()); id_pose.second = pos; corrections_.push_back(id_pose); } } return corrections_; }
/*--------------------------------------------------------------- getMean Returns an estimate of the pose, (the mean, or mathematical expectation of the PDF) ---------------------------------------------------------------*/ void CPosePDFSOG::getMean(CPose2D &p) const { size_t N = m_modes.size(); if (N) { // Use an auxiliary parts. set to manage the problematic computation of the mean "PHI": // See CPosePDFParticles::getEstimatedPose CPosePDFParticles auxParts( N ); CPosePDFParticles::CParticleList::iterator itPart; const_iterator it; for (it=m_modes.begin(),itPart=auxParts.m_particles.begin();it!=m_modes.end();++it,++itPart) { itPart->log_w = (it)->log_w; *itPart->d = (it)->mean; } auxParts.getMean(p); return; } else { p.x(0); p.y(0); p.phi(0); return; } }
/** Must return the action vector u. * \param out_u The action vector which will be passed to OnTransitionModel */ void CRangeBearingKFSLAM2D::OnGetAction(KFArray_ACT &u) const { // Get odometry estimation: CActionRobotMovement2DPtr actMov2D = m_action->getBestMovementEstimation(); CActionRobotMovement3DPtr actMov3D = m_action->getActionByClass<CActionRobotMovement3D>(); if (actMov3D) { u[0]=actMov3D->poseChange.mean.x(); u[1]=actMov3D->poseChange.mean.y(); u[2]=actMov3D->poseChange.mean.yaw(); } else if (actMov2D) { CPose2D estMovMean; actMov2D->poseChange->getMean(estMovMean); u[0]=estMovMean.x(); u[1]=estMovMean.y(); u[2]=estMovMean.phi(); } else { // Left u as zeros for (size_t i=0;i<3;i++) u[i]=0; } }
void RS_drawFromProposal( CPose2D &outSample ) { double ang = randomGenerator.drawUniform(-M_PI,M_PI); double R = randomGenerator.drawGaussian1D(1.0,SIGMA); outSample.x( 1.0f - cos(ang) * R ); outSample.y( sin(ang) * R ); outSample.phi( randomGenerator.drawUniform(-M_PI,M_PI) ); }
TPoint2D mrpt::poses::operator +(const CPose2D &pose, const TPoint2D &u) { const double ccos = pose.phi_cos(); const double ssin = pose.phi_sin(); return TPoint2D( pose.x() + u.x * ccos - u.y * ssin, pose.y() + u.x * ssin + u.y * ccos ); }
/*--------------------------------------------------------------- getMean Returns an estimate of the pose, (the mean, or mathematical expectation of the PDF), computed as a weighted average over all m_particles. ---------------------------------------------------------------*/ void CPosePDFParticles::getMean(CPose2D &est_) const { TPose2D est(0,0,0); CPose2D p; size_t i,n = m_particles.size(); double phi,w,W=0; double W_phi_R=0,W_phi_L=0; double phi_R=0,phi_L=0; if (!n) return; // First: XY // ----------------------------------- for (i=0;i<n;i++) { p = *m_particles[i].d; w = exp(m_particles[i].log_w); W += w; est.x+= p.x() * w; est.y+= p.y() * w; // PHI is special: phi = p.phi(); if (fabs(phi)>1.5707963267948966192313216916398f) { // LEFT HALF: 0,2pi if (phi<0) phi = (M_2PI + phi); phi_L += phi * w; W_phi_L += w; } else { // RIGHT HALF: -pi,pi phi_R += phi * w; W_phi_R += w; } } est_ = est; est_ *= (1.0/W); // Next: PHI // ----------------------------------- // The mean value from each side: if (W_phi_L>0) phi_L /= W_phi_L; // [0,2pi] if (W_phi_R>0) phi_R /= W_phi_R; // [-pi,pi] // Left side to [-pi,pi] again: if (phi_L>M_PI) phi_L = phi_L - M_2PI; // The total mean: est_.phi( ((phi_L * W_phi_L + phi_R * W_phi_R )/(W_phi_L+W_phi_R)) ); }
/*--------------------------------------------------------------- The operator D="this"-b is the pose inverse compounding operator. The resulting pose "D" is the diference between this pose and "b" ---------------------------------------------------------------*/ CPoint2D CPoint2D::operator - (const CPose2D& b) const { const double ccos = cos(b.phi()); const double ssin = sin(b.phi()); const double Ax = x()-b.x(); const double Ay = y()-b.y(); return CPoint2D( Ax * ccos + Ay * ssin, -Ax * ssin + Ay * ccos ); }
void MyReactiveInterface::getCurrentMeasures(CObservation2DRangeScan &laserScan,CPose2D &robotPose){ cout << "getCurrentMeasures" << endl; // Clear readings laserScan.scan.clear(); laserScan.validRange.clear(); laserScan.aperture=M_PIf; laserScan.rightToLeft=false; float dist; char valid; ArPose punto; //Bloquear laser laser->lockDevice(); ArPose pose=robot->getPose(); vector<ArSensorReading> *readings=laser->getRawReadingsAsVector(); robotPose.x(pose.getX()*0.001); robotPose.y(pose.getY()*0.001); robotPose.phi(DEG2RAD(pose.getTh())); for(vector<ArSensorReading>::iterator it=readings->begin(); it != readings->end(); it++) { if(it->getIgnoreThisReading()) { // Establezco 10m, máximo rango del láser dist=10000; valid=0; } else { punto=it->getPose(); dist=pose.findDistanceTo(punto); valid=1; } laserScan.scan.push_back(dist*0.001); laserScan.validRange.push_back(valid); } laser->unlockDevice(); }
/*--------------------------------------------------------------- insertObservation ---------------------------------------------------------------*/ bool CWirelessPowerGridMap2D::internal_insertObservation( const CObservation *obs, const CPose3D *robotPose ) { MRPT_START CPose2D robotPose2D; CPose3D robotPose3D; if (robotPose) { robotPose2D = CPose2D(*robotPose); robotPose3D = (*robotPose); } else { // Default values are (0,0,0) } if ( IS_CLASS(obs, CObservationWirelessPower )) { /******************************************************************** OBSERVATION TYPE: CObservationWirelessPower ********************************************************************/ const CObservationWirelessPower *o = static_cast<const CObservationWirelessPower*>( obs ); float sensorReading; // Compute the 3D sensor pose in world coordinates: CPose2D sensorPose = CPose2D(robotPose3D + o->sensorPoseOnRobot ); sensorReading = o->power; // Normalization: sensorReading = (sensorReading - insertionOptions.R_min) /( insertionOptions.R_max - insertionOptions.R_min ); // Update the gross estimates of mean/vars for the whole reading history (see IROS2009 paper): m_average_normreadings_mean = (sensorReading + m_average_normreadings_count*m_average_normreadings_mean)/(1+m_average_normreadings_count); m_average_normreadings_var = (square(sensorReading - m_average_normreadings_mean) + m_average_normreadings_count*m_average_normreadings_var) /(1+m_average_normreadings_count); m_average_normreadings_count++; // Finally, do the actual map update with that value: this->insertIndividualReading(sensorReading, mrpt::math::TPoint2D(sensorPose.x(),sensorPose.y()) ); return true; // Done! } // end if "CObservationWirelessPower" return false; MRPT_END }
/*--------------------------------------------------------------- getEstimatedCovariance ---------------------------------------------------------------*/ void CPosePDFParticles::getCovarianceAndMean(CMatrixDouble33 &cov, CPose2D &mean) const { cov.zeros(); getMean(mean); size_t i,n = m_particles.size(); double var_x=0,var_y=0,var_p=0,var_xy=0,var_xp=0,var_yp=0; double mean_phi = mean.phi(); if (mean_phi<0) mean_phi = M_2PI + mean_phi; double lin_w_sum = 0; for (i=0;i<n;i++) lin_w_sum+= exp( m_particles[i].log_w ); if (lin_w_sum==0) lin_w_sum=1; for (i=0;i<n;i++) { double w = exp( m_particles[i].log_w ) / lin_w_sum; // Manage 1 PI range: double err_x = m_particles[i].d->x() - mean.x(); double err_y = m_particles[i].d->y() - mean.y(); double err_phi = math::wrapToPi( fabs(m_particles[i].d->phi() - mean_phi) ); var_x+= square(err_x)*w; var_y+= square(err_y)*w; var_p+= square(err_phi)*w; var_xy+= err_x*err_y*w; var_xp+= err_x*err_phi*w; var_yp+= err_y*err_phi*w; } if (n<2) { // Not enought information to estimate the variance: } else { // Unbiased estimation of variance: cov(0,0) = var_x; cov(1,1) = var_y; cov(2,2) = var_p; cov(1,0) = cov(0,1) = var_xy; cov(2,0) = cov(0,2) = var_xp; cov(1,2) = cov(2,1) = var_yp; } }
/*--------------------------------------------------------------- jacobiansPoseComposition ---------------------------------------------------------------*/ void CPosePDF::jacobiansPoseComposition( const CPose2D &x, const CPose2D &u, CMatrixDouble33 &df_dx, CMatrixDouble33 &df_du, const bool compute_df_dx, const bool compute_df_du ) { const double spx = sin(x.phi()); const double cpx = cos(x.phi()); if (compute_df_dx) { /* df_dx = [ 1, 0, -sin(phi_x)*x_u-cos(phi_x)*y_u ] [ 0, 1, cos(phi_x)*x_u-sin(phi_x)*y_u ] [ 0, 0, 1 ] */ df_dx.unit(3,1.0); const double xu = u.x(); const double yu = u.y(); df_dx.get_unsafe(0,2) = -spx*xu-cpx*yu; df_dx.get_unsafe(1,2) = cpx*xu-spx*yu; } if (compute_df_du) { /* df_du = [ cos(phi_x) , -sin(phi_x) , 0 ] [ sin(phi_x) , cos(phi_x) , 0 ] [ 0 , 0 , 1 ] */ // This is the homogeneous matrix of "x": df_du.get_unsafe(0,2) = df_du.get_unsafe(1,2) = df_du.get_unsafe(2,0) = df_du.get_unsafe(2,1) = 0; df_du.get_unsafe(2,2) = 1; df_du.get_unsafe(0,0) = cpx; df_du.get_unsafe(0,1) = -spx; df_du.get_unsafe(1,0) = spx; df_du.get_unsafe(1,1) = cpx; } }
void CVirtualWorld::LocateRobot(CPose2D &location) { location.phi_incr(3.14159/2); robot->setPose(location); double x=location.x(); double y=location.y(); win.setCameraPointingToPoint(x,y,0); win.repaint(); }
bool MyReactiveInterface::getCurrentPoseAndSpeeds(CPose2D &curPose, float &curV,float &curW){ cout << "getCurrentPoseAndSpeeds" << endl; ArPose pose=robot->getPose(); curPose.x(pose.getX()*0.001); curPose.y(pose.getY()*0.001); curPose.phi(DEG2RAD(pose.getTh())); curV = robot->getVel() * 0.001; curW = DEG2RAD( robot->getRotVel() ); return true; }
/*--------------------------------------------------------------- drawSingleSample ---------------------------------------------------------------*/ void CPosePDFGaussian::drawSingleSample(CPose2D& outPart) const { MRPT_START CVectorDouble v; getRandomGenerator().drawGaussianMultivariate(v, cov); outPart.x(mean.x() + v[0]); outPart.y(mean.y() + v[1]); outPart.phi(mean.phi() + v[2]); // Range -pi,pi outPart.normalizePhi(); MRPT_END_WITH_CLEAN_UP( cov.saveToTextFile("__DEBUG_EXC_DUMP_drawSingleSample_COV.txt"););
/*--------------------------------------------------------------- squareErrorVector ---------------------------------------------------------------*/ void TMatchingPairList::squareErrorVector(const CPose2D &q, vector<float> &out_sqErrs ) const { out_sqErrs.resize( size() ); // * \f[ e_i = | x_{this} - q \oplus x_{other} |^2 \f] const float ccos = cos(q.phi()); const float csin = sin(q.phi()); const float qx = q.x(); const float qy = q.y(); const_iterator corresp; vector<float>::iterator e_i; for (corresp=begin(), e_i = out_sqErrs.begin();corresp!=end();++corresp, ++e_i) { float xx = qx + ccos * corresp->other_x - csin * corresp->other_y; float yy = qy + csin * corresp->other_x + ccos * corresp->other_y; *e_i = square( corresp->this_x - xx ) + square( corresp->this_y - yy ); } }
void COccupancyGridMap2D::sonarSimulator( CObservationRange &inout_observation, const CPose2D &robotPose, float threshold, float rangeNoiseStd, float angleNoiseStd) const { const float free_thres = 1.0f - threshold; const unsigned int max_ray_len = round(inout_observation.maxSensorDistance / resolution); for (CObservationRange::iterator itR=inout_observation.begin();itR!=inout_observation.end();++itR) { const CPose2D sensorAbsolutePose = CPose2D( CPose3D(robotPose) + CPose3D(itR->sensorPose) ); // For each sonar cone, simulate several rays and keep the shortest distance: ASSERT_(inout_observation.sensorConeApperture>0) size_t nRays = round(1+ inout_observation.sensorConeApperture / DEG2RAD(1.0) ); double direction = sensorAbsolutePose.phi() - 0.5*inout_observation.sensorConeApperture; const double Adir = inout_observation.sensorConeApperture / nRays; float min_detected_obs=0; for (size_t i=0;i<nRays;i++, direction+=Adir ) { bool valid; float sim_rang; simulateScanRay( sensorAbsolutePose.x(), sensorAbsolutePose.y(), direction, sim_rang, valid, max_ray_len, free_thres, rangeNoiseStd, angleNoiseStd ); if (valid && (sim_rang<min_detected_obs || !i)) min_detected_obs = sim_rang; } // Save: itR->sensedDistance = min_detected_obs; } }
/*---------------------------------------------------------------------------- ICP_Method_LM ----------------------------------------------------------------------------*/ CPosePDFPtr CICP::ICP_Method_LM( const mrpt::maps::CMetricMap *mm1, const mrpt::maps::CMetricMap *m2, const CPosePDFGaussian &initialEstimationPDF, TReturnInfo &outInfo ) { MRPT_START // The result can be either a Gaussian or a SOG: size_t nCorrespondences=0; bool keepIteratingICP; CPose2D grossEst = initialEstimationPDF.mean; TMatchingPairList correspondences,old_correspondences; CPose2D lastMeanPose; std::vector<float> other_xs_trans,other_ys_trans; // temporary container of "other" map (map2) transformed by "q" CMatrixFloat dJ_dq; // The jacobian CPose2D q; // The updated 2D transformation estimate CPose2D q_new; const bool onlyUniqueRobust = options.onlyUniqueRobust; const bool onlyKeepTheClosest = options.onlyClosestCorrespondences; // Assure the class of the maps: ASSERT_(mm1->GetRuntimeClass()->derivedFrom(CLASS_ID(CPointsMap))); const CPointsMap *m1 = static_cast<const CPointsMap*>(mm1); // Asserts: // ----------------- ASSERT_( options.ALFA>0 && options.ALFA<1 ); // The algorithm output auxiliar info: // ------------------------------------------------- outInfo.nIterations = 0; outInfo.goodness = 1.0f; TMatchingParams matchParams; TMatchingExtraResults matchExtraResults; matchParams.maxDistForCorrespondence = options.thresholdDist; // Distance threshold matchParams.maxAngularDistForCorrespondence = options.thresholdAng; // Angular threshold matchParams.angularDistPivotPoint = TPoint3D(q.x(),q.y(),0); // Pivot point for angular measurements matchParams.onlyKeepTheClosest = onlyKeepTheClosest; matchParams.onlyUniqueRobust = onlyUniqueRobust; matchParams.decimation_other_map_points = options.corresponding_points_decimation; // The gaussian PDF to estimate: // ------------------------------------------------------ // First gross approximation: q = grossEst; // For LM inverse CMatrixFixedNumeric<float,3,3> C; CMatrixFixedNumeric<float,3,3> C_inv; // This will keep the cov. matrix at the end // Asure maps are not empty! // ------------------------------------------------------ if ( !m2->isEmpty() ) { matchParams.offset_other_map_points = 0; // ------------------------------------------------------ // The ICP loop // ------------------------------------------------------ do { // ------------------------------------------------------ // Find the matching (for a points map) // ------------------------------------------------------ m1->determineMatching2D( m2, // The other map q, // The other map pose correspondences, matchParams, matchExtraResults); nCorrespondences = correspondences.size(); if ( !nCorrespondences ) { // Nothing we can do !! keepIteratingICP = false; } else { // Compute the estimated pose through iterative least-squares: Levenberg-Marquardt // ---------------------------------------------------------------------- dJ_dq.setSize(3,nCorrespondences); // The jacobian of the error function wrt the transformation q double lambda = options.LM_initial_lambda; // The LM parameter double ccos = cos(q.phi()); double csin = sin(q.phi()); double w1,w2,w3; double q1,q2,q3; double A,B; const double Axy = options.Axy_aprox_derivatives; // For approximating the derivatives // Compute at once the square errors for each point with the current "q" and the transformed points: std::vector<float> sq_errors; correspondences.squareErrorVector( q, sq_errors, other_xs_trans,other_ys_trans); double OSE_initial = math::sum( sq_errors ); // Compute "dJ_dq" // ------------------------------------ double rho2 = square( options.kernel_rho ); mrpt::utils::TMatchingPairList::iterator it; std::vector<float>::const_iterator other_x_trans,other_y_trans; size_t i; for (i=0, it=correspondences.begin(), other_x_trans = other_xs_trans.begin(), other_y_trans = other_ys_trans.begin(); i<nCorrespondences; ++i, ++it,++other_x_trans,++other_y_trans ) { // Jacobian: dJ_dx // -------------------------------------- //#define ICP_DISTANCES_TO_LINE #ifndef ICP_DISTANCES_TO_LINE w1= *other_x_trans-Axy; q1 = m1->squareDistanceToClosestCorrespondence( w1, *other_y_trans ); q1= kernel( q1, rho2 ); w2= *other_x_trans; q2 = m1->squareDistanceToClosestCorrespondence( w2, *other_y_trans ); q2= kernel( q2, rho2 ); w3= *other_x_trans+Axy; q3 = m1->squareDistanceToClosestCorrespondence( w3, *other_y_trans ); q3= kernel( q3, rho2 ); #else // The distance to the line that interpolates the TWO closest points: float x1,y1, x2,y2, d1,d2; m1->kdTreeTwoClosestPoint2D( *other_x_trans, *other_y_trans, // The query x1, y1, // Closest point #1 x2, y2, // Closest point #2 d1,d2); w1= *other_x_trans-Axy; q1 = math::closestSquareDistanceFromPointToLine( w1, *other_y_trans, x1,y1, x2,y2 ); q1= kernel( q1, rho2 ); w2= *other_x_trans; q2 = math::closestSquareDistanceFromPointToLine( w2, *other_y_trans, x1,y1, x2,y2 ); q2= kernel( q2, rho2 ); w3= *other_x_trans+Axy; q3 = math::closestSquareDistanceFromPointToLine( w3, *other_y_trans, x1,y1, x2,y2 ); q3= kernel( q3, rho2 ); #endif //interpolate A=( (q3-q2)/((w3-w2)*(w3-w1)) ) - ( (q1-q2)/((w1-w2)*(w3-w1)) ); B=( (q1-q2)+(A*((w2*w2)-(w1*w1))) )/(w1-w2); dJ_dq.get_unsafe(0,i) = (2*A* *other_x_trans)+B; // Jacobian: dJ_dy // -------------------------------------- w1= *other_y_trans-Axy; #ifdef ICP_DISTANCES_TO_LINE q1 = math::closestSquareDistanceFromPointToLine( *other_x_trans, w1, x1,y1, x2,y2 ); q1= kernel( q1, rho2 ); #else q1= kernel( square(it->this_x - *other_x_trans)+ square( it->this_y - w1 ), rho2 ); #endif w2= *other_y_trans; // q2 is alreay computed from above! //q2 = m1->squareDistanceToClosestCorrespondence( *other_x_trans, w2 ); //q2= kernel( square(it->this_x - *other_x_trans)+ square( it->this_y - w2 ), rho2 ); w3= *other_y_trans+Axy; #ifdef ICP_DISTANCES_TO_LINE q3 = math::closestSquareDistanceFromPointToLine( *other_x_trans, w3, x1,y1, x2,y2 ); q3= kernel( q3, rho2 ); #else q3= kernel( square(it->this_x - *other_x_trans)+ square( it->this_y - w3 ), rho2 ); #endif //interpolate A=( (q3-q2)/((w3-w2)*(w3-w1)) ) - ( (q1-q2)/((w1-w2)*(w3-w1)) ); B=( (q1-q2)+(A*((w2*w2)-(w1*w1))) )/(w1-w2); dJ_dq.get_unsafe(1,i) = (2*A* *other_y_trans)+B; // Jacobian: dR_dphi // -------------------------------------- dJ_dq.get_unsafe(2,i) = dJ_dq.get_unsafe(0,i) * ( -csin * it->other_x - ccos * it->other_y ) + dJ_dq.get_unsafe(1,i) * ( ccos * it->other_x - csin * it->other_y ); } // end for each corresp. // Now we have the Jacobian in dJ_dq. // Compute the Hessian matrix H = dJ_dq * dJ_dq^T CMatrixFloat H_(3,3); H_.multiply_AAt(dJ_dq); CMatrixFixedNumeric<float,3,3> H = CMatrixFixedNumeric<float,3,3>(H_); bool keepIteratingLM = true; // --------------------------------------------------- // Iterate the inner LM loop until convergence: // --------------------------------------------------- q_new = q; std::vector<float> new_sq_errors, new_other_xs_trans, new_other_ys_trans; size_t nLMiters = 0; const size_t maxLMiters = 100; while ( keepIteratingLM && ++nLMiters<maxLMiters) { // The LM heuristic is: // x_{k+1} = x_k - ( H + \lambda diag(H) )^-1 * grad(J) // grad(J) = dJ_dq * e (vector of errors) C = H; for (i=0;i<3;i++) C(i,i) *= (1+lambda); // Levenberg-Maquardt heuristic C_inv = C.inv(); // LM_delta = C_inv * dJ_dq * sq_errors Eigen::VectorXf dJsq, LM_delta; dJ_dq.multiply_Ab( Eigen::Map<Eigen::VectorXf>(&sq_errors[0],sq_errors.size()), dJsq ); C_inv.multiply_Ab(dJsq,LM_delta); q_new.x( q.x() - LM_delta[0] ); q_new.y( q.y() - LM_delta[1] ); q_new.phi( q.phi() - LM_delta[2] ); // Compute the new square errors: // --------------------------------------- correspondences.squareErrorVector( q_new, new_sq_errors, new_other_xs_trans, new_other_ys_trans); float OSE_new = math::sum( new_sq_errors ); bool improved = OSE_new < OSE_initial; #if 0 // Debuggin' cout << "_____________" << endl; cout << "q -> q_new : " << q << " -> " << q_new << endl; printf("err: %f -> %f lambda: %e\n", OSE_initial ,OSE_new, lambda ); cout << "\\/J = "; utils::operator <<(cout,dJsq); cout << endl; mrpt::system::pause(); #endif keepIteratingLM = fabs(LM_delta[0])>options.minAbsStep_trans || fabs(LM_delta[1])>options.minAbsStep_trans || fabs(LM_delta[2])>options.minAbsStep_rot; if(improved) { //If resids have gone down, keep change and make lambda smaller by factor of 10 lambda/=10; q=q_new; OSE_initial = OSE_new; } else { // Discard movement and try with larger lambda: lambda*=10; } } // end iterative LM #if 0 // Debuggin' cout << "ICP loop: " << lastMeanPose << " -> " << q << " LM iters: " << nLMiters << " threshold: " << matchParams.maxDistForCorrespondence << endl; #endif // -------------------------------------------------------- // now the conditions for the outer ICP loop // -------------------------------------------------------- keepIteratingICP = true; if (fabs(lastMeanPose.x()-q.x())<options.minAbsStep_trans && fabs(lastMeanPose.y()-q.y())<options.minAbsStep_trans && fabs( math::wrapToPi( lastMeanPose.phi()-q.phi()) )<options.minAbsStep_rot) { matchParams.maxDistForCorrespondence *= options.ALFA; matchParams.maxAngularDistForCorrespondence *= options.ALFA; if (matchParams.maxDistForCorrespondence < options.smallestThresholdDist ) keepIteratingICP = false; if (++matchParams.offset_other_map_points>=options.corresponding_points_decimation) matchParams.offset_other_map_points=0; } lastMeanPose = q; } // end of "else, there are correspondences" // Next iteration: outInfo.nIterations++; if (outInfo.nIterations >= options.maxIterations && matchParams.maxDistForCorrespondence>options.smallestThresholdDist) { matchParams.maxDistForCorrespondence *= options.ALFA; } } while ( (keepIteratingICP && outInfo.nIterations<options.maxIterations) || (outInfo.nIterations >= options.maxIterations && matchParams.maxDistForCorrespondence>options.smallestThresholdDist) ); outInfo.goodness = matchExtraResults.correspondencesRatio; //if (!options.skip_quality_calculation) { outInfo.quality= matchExtraResults.correspondencesRatio; } } // end of "if m2 is not empty" return CPosePDFGaussianPtr( new CPosePDFGaussian(q, C_inv.cast<double>() ) ); MRPT_END }
/*---------------------------------------------------------------------------- ICP_Method_Classic ----------------------------------------------------------------------------*/ CPosePDFPtr CICP::ICP_Method_Classic( const mrpt::maps::CMetricMap *m1, const mrpt::maps::CMetricMap *mm2, const CPosePDFGaussian &initialEstimationPDF, TReturnInfo &outInfo ) { MRPT_START // The result can be either a Gaussian or a SOG: CPosePDFPtr resultPDF; CPosePDFGaussianPtr gaussPdf; CPosePDFSOGPtr SOG; size_t nCorrespondences=0; bool keepApproaching; CPose2D grossEst = initialEstimationPDF.mean; mrpt::utils::TMatchingPairList correspondences,old_correspondences; CPose2D lastMeanPose; // Assure the class of the maps: const mrpt::maps::CMetricMap *m2 = mm2; // Asserts: // ----------------- ASSERT_( options.ALFA>=0 && options.ALFA<1 ); // The algorithm output auxiliar info: // ------------------------------------------------- outInfo.nIterations = 0; outInfo.goodness = 1; outInfo.quality = 0; // The gaussian PDF to estimate: // ------------------------------------------------------ gaussPdf = CPosePDFGaussian::Create(); // First gross approximation: gaussPdf->mean = grossEst; // Initial thresholds: mrpt::maps::TMatchingParams matchParams; mrpt::maps::TMatchingExtraResults matchExtraResults; matchParams.maxDistForCorrespondence = options.thresholdDist; // Distance threshold matchParams.maxAngularDistForCorrespondence = options.thresholdAng; // Angular threshold matchParams.onlyKeepTheClosest = options.onlyClosestCorrespondences; matchParams.onlyUniqueRobust = options.onlyUniqueRobust; matchParams.decimation_other_map_points = options.corresponding_points_decimation; // Asure maps are not empty! // ------------------------------------------------------ if ( !m2->isEmpty() ) { matchParams.offset_other_map_points = 0; // ------------------------------------------------------ // The ICP loop // ------------------------------------------------------ do { // ------------------------------------------------------ // Find the matching (for a points map) // ------------------------------------------------------ matchParams.angularDistPivotPoint = TPoint3D(gaussPdf->mean.x(),gaussPdf->mean.y(),0); // Pivot point for angular measurements m1->determineMatching2D( m2, // The other map gaussPdf->mean, // The other map pose correspondences, matchParams, matchExtraResults); nCorrespondences = correspondences.size(); // ***DEBUG*** // correspondences.dumpToFile("debug_correspondences.txt"); if ( !nCorrespondences ) { // Nothing we can do !! keepApproaching = false; } else { // Compute the estimated pose. // (Method from paper of J.Gonzalez, Martinez y Morales) // ---------------------------------------------------------------------- mrpt::math::TPose2D est_mean; mrpt::tfest::se2_l2(correspondences, est_mean); gaussPdf->mean = est_mean; // If matching has not changed, decrease the thresholds: // -------------------------------------------------------- keepApproaching = true; if (!(fabs(lastMeanPose.x()-gaussPdf->mean.x())>options.minAbsStep_trans || fabs(lastMeanPose.y()-gaussPdf->mean.y())>options.minAbsStep_trans || fabs(math::wrapToPi(lastMeanPose.phi()-gaussPdf->mean.phi()))>options.minAbsStep_rot)) { matchParams.maxDistForCorrespondence *= options.ALFA; matchParams.maxAngularDistForCorrespondence *= options.ALFA; if (matchParams.maxDistForCorrespondence < options.smallestThresholdDist ) keepApproaching = false; if (++matchParams.offset_other_map_points>=options.corresponding_points_decimation) matchParams.offset_other_map_points=0; } lastMeanPose = gaussPdf->mean; } // end of "else, there are correspondences" // Next iteration: outInfo.nIterations++; if (outInfo.nIterations >= options.maxIterations && matchParams.maxDistForCorrespondence>options.smallestThresholdDist) { matchParams.maxDistForCorrespondence *= options.ALFA; } } while ( (keepApproaching && outInfo.nIterations<options.maxIterations) || (outInfo.nIterations >= options.maxIterations && matchParams.maxDistForCorrespondence>options.smallestThresholdDist) ); // ------------------------------------------------- // Obtain the covariance matrix of the estimation // ------------------------------------------------- if (!options.skip_cov_calculation && nCorrespondences) { switch(options.ICP_covariance_method) { // ---------------------------------------------- // METHOD: MSE linear estimation // ---------------------------------------------- case icpCovLinealMSE: mrpt::tfest::se2_l2(correspondences, *gaussPdf ); // Scale covariance: gaussPdf->cov *= options.covariance_varPoints; break; // ---------------------------------------------- // METHOD: Method from Oxford MRG's "OXSMV2" // // It is the equivalent covariance resulting // from a Levenberg-Maquardt optimization stage. // ---------------------------------------------- case icpCovFiniteDifferences: { Eigen::Matrix<double,3,Eigen::Dynamic> D(3,nCorrespondences); const TPose2D transf( gaussPdf->mean ); double ccos = cos(transf.phi); double csin = sin(transf.phi); double w1,w2,w3; double q1,q2,q3; double A,B; double Axy = 0.01; // Fill out D: double rho2 = square( options.kernel_rho ); mrpt::utils::TMatchingPairList::iterator it; size_t i; for (i=0, it=correspondences.begin();i<nCorrespondences; ++i, ++it) { float other_x_trans = transf.x + ccos * it->other_x - csin * it->other_y; float other_y_trans = transf.y + csin * it->other_x + ccos * it->other_y; // Jacobian: dR2_dx // -------------------------------------- w1= other_x_trans-Axy; q1= kernel( square(it->this_x - w1)+ square( it->this_y - other_y_trans ), rho2 ); w2= other_x_trans; q2= kernel( square(it->this_x - w2)+ square( it->this_y - other_y_trans ), rho2 ); w3= other_x_trans+Axy; q3= kernel( square(it->this_x - w3)+ square( it->this_y - other_y_trans ), rho2 ); //interpolate A=( (q3-q2)/((w3-w2)*(w3-w1)) ) - ( (q1-q2)/((w1-w2)*(w3-w1)) ); B=( (q1-q2)+(A*((w2*w2)-(w1*w1))) )/(w1-w2); D(0,i) = (2*A*other_x_trans)+B; // Jacobian: dR2_dy // -------------------------------------- w1= other_y_trans-Axy; q1= kernel( square(it->this_x - other_x_trans)+ square( it->this_y - w1 ), rho2 ); w2= other_y_trans; q2= kernel( square(it->this_x - other_x_trans)+ square( it->this_y - w2 ), rho2 ); w3= other_y_trans+Axy; q3= kernel( square(it->this_x - other_x_trans)+ square( it->this_y - w3 ), rho2 ); //interpolate A=( (q3-q2)/((w3-w2)*(w3-w1)) ) - ( (q1-q2)/((w1-w2)*(w3-w1)) ); B=( (q1-q2)+(A*((w2*w2)-(w1*w1))) )/(w1-w2); D(1,i) = (2*A*other_y_trans)+B; // Jacobian: dR_dphi // -------------------------------------- D(2,i) = D(0,i) * ( -csin * it->other_x - ccos * it->other_y ) + D(1,i) * ( ccos * it->other_x - csin * it->other_y ); } // end for each corresp. // COV = ( D*D^T + lamba*I )^-1 CMatrixDouble33 DDt = D*D.transpose(); for (i=0;i<3;i++) DDt( i,i ) += 1e-6; // Just to make sure the matrix is not singular, while not changing its covariance significantly. DDt.inv(gaussPdf->cov); } break; default: THROW_EXCEPTION_FMT("Invalid value for ICP_covariance_method: %i", static_cast<int>(options.ICP_covariance_method)); } } outInfo.goodness = matchExtraResults.correspondencesRatio; if (!nCorrespondences || options.skip_quality_calculation) { outInfo.quality = matchExtraResults.correspondencesRatio; } else { // Compute a crude estimate of the quality of scan matching at this local minimum: // ----------------------------------------------------------------------------------- float Axy = matchParams.maxDistForCorrespondence*0.9f; TPose2D P0( gaussPdf->mean ); TPose2D PX1(P0); PX1.x -= Axy; TPose2D PX2(P0); PX2.x += Axy; TPose2D PY1(P0); PY1.y -= Axy; TPose2D PY2(P0); PY2.y += Axy; matchParams.angularDistPivotPoint = TPoint3D( gaussPdf->mean.x(),gaussPdf->mean.y(),0); m1->determineMatching2D( m2, // The other map P0, // The other map pose correspondences, matchParams, matchExtraResults); const float E0 = matchExtraResults.correspondencesRatio; m1->determineMatching2D( m2, // The other map PX1, // The other map pose correspondences, matchParams, matchExtraResults); const float EX1 = matchExtraResults.correspondencesRatio; m1->determineMatching2D( m2, // The other map PX2, // The other map pose correspondences, matchParams, matchExtraResults); const float EX2 = matchExtraResults.correspondencesRatio; m1->determineMatching2D( m2, // The other map PY1, // The other map pose correspondences, matchParams, matchExtraResults); const float EY1 = matchExtraResults.correspondencesRatio; m1->determineMatching2D( m2, // The other map PY2, // The other map pose correspondences, matchParams, matchExtraResults); const float EY2 = matchExtraResults.correspondencesRatio; outInfo.quality= -max(EX1-E0, max(EX2-E0, max(EY1-E0 , EY2-E0 ) ) )/(E0+1e-1); } } // end of "if m2 is not empty" // We'll return a CPosePDFGaussian or a CPosePDFSOG if RANSAC is enabled: // ----------------------------------------------------------------------- // RANSAC? if (options.doRANSAC) { mrpt::tfest::TSE2RobustParams params; params.ransac_minSetSize = options.ransac_minSetSize; params.ransac_maxSetSize = options.ransac_maxSetSize; params.ransac_mahalanobisDistanceThreshold = options.ransac_mahalanobisDistanceThreshold; params.ransac_nSimulations = options.ransac_nSimulations; params.ransac_fuseByCorrsMatch = options.ransac_fuseByCorrsMatch; params.ransac_fuseMaxDiffXY = options.ransac_fuseMaxDiffXY; params.ransac_fuseMaxDiffPhi = options.ransac_fuseMaxDiffPhi; params.ransac_algorithmForLandmarks = false; mrpt::tfest::TSE2RobustResult results; mrpt::tfest::se2_l2_robust(correspondences, options.normalizationStd, params, results); SOG = CPosePDFSOG::Create(); *SOG = results.transformation; // And return the SOG: resultPDF = SOG; } else { // Return the gaussian distribution: resultPDF = gaussPdf; } return resultPDF; MRPT_END }
double CPose2D::distance2DFrobeniusTo( const CPose2D & p) const { return std::sqrt(square(p.x()-x())+square(p.y()-y())+4*(1-cos(p.phi()-phi()))); }
void SRBASolver::publishGraphVisualization(visualization_msgs::MarkerArray &marray) { ROS_INFO("Visualizing"); // Vertices are round, red spheres visualization_msgs::Marker m; m.header.frame_id = relative_map_frame_; m.header.stamp = ros::Time::now(); m.id = 0; m.ns = "karto"; m.type = visualization_msgs::Marker::ARROW; m.pose.position.x = 0.0; m.pose.position.y = 0.0; m.pose.position.z = 0.0; m.scale.x = 0.15; m.scale.y = 0.15; m.scale.z = 0.15; m.color.r = 1.0; m.color.g = 0; m.color.b = 0.0; m.color.a = 1.0; m.lifetime = ros::Duration(0); // Odometry edges are opaque blue line strips visualization_msgs::Marker edge; edge.header.frame_id = relative_map_frame_; edge.header.stamp = ros::Time::now(); edge.action = visualization_msgs::Marker::ADD; edge.ns = "karto"; edge.id = 0; edge.type = visualization_msgs::Marker::LINE_STRIP; edge.scale.x = 0.1; edge.scale.y = 0.1; edge.scale.z = 0.1; edge.color.a = 1.0; edge.color.r = 0.0; edge.color.g = 0.0; edge.color.b = 1.0; // Loop edges are purple, opacity depends on backend state visualization_msgs::Marker loop_edge; loop_edge.header.frame_id = relative_map_frame_; loop_edge.header.stamp = ros::Time::now(); loop_edge.action = visualization_msgs::Marker::ADD; loop_edge.ns = "spanning_tree"; loop_edge.id = 0; loop_edge.type = visualization_msgs::Marker::LINE_STRIP; loop_edge.scale.x = 0.1; loop_edge.scale.y = 0.1; loop_edge.scale.z = 0.1; loop_edge.color.a = 1.0; loop_edge.color.r = 1.0; loop_edge.color.g = 0.0; loop_edge.color.b = 1.0; visualization_msgs::Marker node_text; node_text.header.frame_id = relative_map_frame_; node_text.header.stamp = ros::Time::now(); node_text.action = visualization_msgs::Marker::ADD; node_text.ns = "karto"; node_text.id = 0; node_text.type = visualization_msgs::Marker::TEXT_VIEW_FACING; node_text.scale.z = 0.3; node_text.color.a = 1.0; node_text.color.r = 1.0; node_text.color.g = 1.0; node_text.color.b = 1.0; if(!rba_.get_rba_state().keyframes.empty()) { // Use a spanning tree to estimate the global pose of every node // starting (root) at the given keyframe: srba_t::frameid2pose_map_t spantree; if(curr_kf_id_ == 0) return; TKeyFrameID root_keyframe(curr_kf_id_ -1 ); rba_.create_complete_spanning_tree(root_keyframe,spantree); int id = 0; for (srba_t::frameid2pose_map_t::const_iterator itP = spantree.begin();itP!=spantree.end();++itP) { if (root_keyframe==itP->first) continue; const CPose2D p = itP->second.pose; // Add the vertex to the marker array m.id = id; m.pose.position.x = p.x(); m.pose.position.y = p.y(); geometry_msgs::Quaternion q = tf::createQuaternionMsgFromYaw(p.phi()); m.pose.orientation = q; marray.markers.push_back(visualization_msgs::Marker(m)); id++; node_text.id = id; node_text.text= boost::lexical_cast<std::string>(itP->first); node_text.pose.position.x = p.x()+0.15; node_text.pose.position.y = p.y()+0.15; marray.markers.push_back(visualization_msgs::Marker(node_text)); id++; } for (srba_t::rba_problem_state_t::k2k_edges_deque_t::const_iterator itEdge = rba_.get_rba_state().k2k_edges.begin(); itEdge!=rba_.get_rba_state().k2k_edges.end();++itEdge) { CPose2D p1, p2; if(itEdge->from != root_keyframe) { srba_t::frameid2pose_map_t::const_iterator itN1 = spantree.find(itEdge->from); if(itN1==spantree.end()) continue; p1 = itN1->second.pose; } if(itEdge->to != root_keyframe) { srba_t::frameid2pose_map_t::const_iterator itN2 = spantree.find(itEdge->to); if(itN2==spantree.end()) continue; p2 = itN2->second.pose; } geometry_msgs::Point pt1, pt2; pt1.x = p1.x(); pt1.y = p1.y(); pt2.x = p2.x(); pt2.y = p2.y(); loop_edge.points.clear(); loop_edge.points.push_back(pt1); loop_edge.points.push_back(pt2); loop_edge.id = id; marray.markers.push_back(visualization_msgs::Marker(loop_edge)); id++; } // Render landmark as pose constraint // For each KF: check all its "observations" for (srba_t::frameid2pose_map_t::const_iterator it=spantree.begin();it!=spantree.end();++it) { const TKeyFrameID kf_id = it->first; const srba_t::pose_flag_t & pf = it->second; const typename srba_t::keyframe_info &kfi = rba_.get_rba_state().keyframes[kf_id]; for (size_t i=0;i<kfi.adjacent_k2f_edges.size();i++) { const srba_t::k2f_edge_t * k2f = kfi.adjacent_k2f_edges[i]; const TKeyFrameID other_kf_id = k2f->feat_rel_pos->id_frame_base; if (kf_id==other_kf_id) continue; // It's not an constraint with ANOTHER keyframe // Is the other KF in the spanning tree? srba_t::frameid2pose_map_t::const_iterator other_it=spantree.find(other_kf_id); if (other_it==spantree.end()) continue; const srba_t::pose_flag_t & other_pf = other_it->second; // Add edge between the two KFs to represent the pose constraint: mrpt::poses::CPose2D p1 = mrpt::poses::CPose2D(pf.pose); // Convert to 3D mrpt::poses::CPose2D p2 = mrpt::poses::CPose2D(other_pf.pose); geometry_msgs::Point pt1, pt2; pt1.x = p1.x(); pt1.y = p1.y(); pt2.x = p2.x(); pt2.y = p2.y(); edge.points.clear(); edge.points.push_back(pt1); edge.points.push_back(pt2); edge.id = id; marray.markers.push_back(visualization_msgs::Marker(edge)); id++; } } // end for each KF // Delete any excess markers whose ids haven't been reclaimed m.action = visualization_msgs::Marker::DELETE; for (; id < marker_count_; id++) { m.id = id; marray.markers.push_back(visualization_msgs::Marker(m)); } marker_count_ = marray.markers.size(); } else ROS_INFO("Graph is empty"); }
bool mrpt::poses::operator!=(const CPose2D &p1,const CPose2D &p2) { return (p1.x()!=p2.x())||(p1.y()!=p2.y())||(p1.phi()!=p2.phi()); }
bool mrpt::poses::operator==(const CPose2D &p1,const CPose2D &p2) { return (p1.x()==p2.x())&&(p1.y()==p2.y())&&(p1.phi()==p2.phi()); }
void CAbstractPTGBasedReactive::setHolonomicMethod( const THolonomicMethod method, const mrpt::utils::CConfigFileBase &ini) { this->deleteHolonomicObjects(); const size_t nPTGs = this->getPTG_count(); m_holonomicMethod.resize(nPTGs); for (size_t i=0; i<nPTGs; i++) { switch (method) { case hmSEARCH_FOR_BEST_GAP: m_holonomicMethod[i] = new CHolonomicND(); break; case hmVIRTUAL_FORCE_FIELDS: m_holonomicMethod[i] = new CHolonomicVFF(); break; default: THROW_EXCEPTION_CUSTOM_MSG1("Unknown Holonomic method: %u",static_cast<unsigned int>(method)) }; // Load params: m_holonomicMethod[i]->initialize( ini ); } } // The main method: executes one time-iteration of the reactive navigation algorithm. void CAbstractPTGBasedReactive::performNavigationStep() { // Security tests: if (m_closing_navigator) return; // Are we closing in the main thread? if (!m_init_done) THROW_EXCEPTION("Have you called loadConfigFile() before?") const size_t nPTGs = this->getPTG_count(); // Whether to worry about log files: const bool fill_log_record = (m_logFile!=NULL || m_enableKeepLogRecords); CLogFileRecord newLogRec; newLogRec.infoPerPTG.resize(nPTGs); // Lock mrpt::synch::CCriticalSectionLocker lock( &m_critZoneNavigating ); CTimeLoggerEntry tle1(m_timelogger,"navigationStep"); try { totalExecutionTime.Tic(); // Start timer const mrpt::system::TTimeStamp tim_start_iteration = mrpt::system::now(); /* ---------------------------------------------------------------- Request current robot pose and velocities ---------------------------------------------------------------- */ poses::CPose2D curPose; float curVL; // in m/s float curW; // in rad/s { CTimeLoggerEntry tle2(m_timelogger,"navigationStep.getCurrentPoseAndSpeeds"); if ( !m_robot.getCurrentPoseAndSpeeds(curPose, curVL, curW ) ) { doEmergencyStop("ERROR calling m_robot.getCurrentPoseAndSpeeds, stopping robot and finishing navigation"); return; } } /* ---------------------------------------------------------------- Have we reached the target location? ---------------------------------------------------------------- */ const double targetDist = curPose.distance2DTo( m_navigationParams->target.x, m_navigationParams->target.y ); // Should "End of navigation" event be sent?? if (!navigationEndEventSent && targetDist < DIST_TO_TARGET_FOR_SENDING_EVENT) { navigationEndEventSent = true; m_robot.sendNavigationEndEvent(); } // Have we really reached the target? if ( targetDist < m_navigationParams->targetAllowedDistance ) { m_robot.stop(); m_navigationState = IDLE; if (m_enableConsoleOutput) printf_debug("Navigation target (%.03f,%.03f) was reached\n", m_navigationParams->target.x,m_navigationParams->target.y); if (!navigationEndEventSent) { navigationEndEventSent = true; m_robot.sendNavigationEndEvent(); } return; } // Check the "no approaching the target"-alarm: // ----------------------------------------------------------- if (targetDist < badNavAlarm_minDistTarget ) { badNavAlarm_minDistTarget = targetDist; badNavAlarm_lastMinDistTime = system::getCurrentTime(); } else { // Too much time have passed? if ( system::timeDifference( badNavAlarm_lastMinDistTime, system::getCurrentTime() ) > badNavAlarm_AlarmTimeout) { std::cout << "\n--------------------------------------------\nWARNING: Timeout for approaching toward the target expired!! Aborting navigation!! \n---------------------------------\n"; m_navigationState = NAV_ERROR; return; } } // Compute target location relative to current robot pose: // --------------------------------------------------------------------- const CPose2D relTarget = CPose2D(m_navigationParams->target.x,m_navigationParams->target.y,m_navigationParams->targetHeading) - curPose; // STEP1: Collision Grids Builder. // ----------------------------------------------------------------------------- STEP1_CollisionGridsBuilder(); // Will only recompute if "m_collisionGridsMustBeUpdated==true" // STEP2: Load the obstacles and sort them in height bands. // ----------------------------------------------------------------------------- if (! STEP2_SenseObstacles() ) { printf_debug("Error while loading and sorting the obstacles. Robot will be stopped.\n"); m_robot.stop(); m_navigationState = NAV_ERROR; return; } // Start timer executionTime.Tic(); m_infoPerPTG.resize(nPTGs); vector<THolonomicMovement> holonomicMovements(nPTGs); for (size_t indexPTG=0;indexPTG<nPTGs;indexPTG++) { CHolonomicLogFileRecordPtr HLFR; // STEP3(a): Transform target location into TP-Space for each PTG // ----------------------------------------------------------------------------- CParameterizedTrajectoryGenerator * ptg = getPTG(indexPTG); ASSERT_(ptg) TInfoPerPTG &ipf = m_infoPerPTG[indexPTG]; // The picked movement in TP-Space (to be determined by holonomic method below) THolonomicMovement &holonomicMovement = holonomicMovements[indexPTG]; holonomicMovement.PTG = ptg; // If the user doesn't want to use this PTG, just mark it as invalid: ipf.valid_TP = true; { const TNavigationParamsPTG * navp = dynamic_cast<const TNavigationParamsPTG*>(m_navigationParams); if (navp && !navp->restrict_PTG_indices.empty()) { bool use_this_ptg = false; for (size_t i=0;i<navp->restrict_PTG_indices.size() && !use_this_ptg;i++) { if (navp->restrict_PTG_indices[i]==indexPTG) use_this_ptg = true; } ipf.valid_TP = use_this_ptg; } } // Normal PTG validity filter: check if target falls into the PTG domain: ipf.valid_TP = ipf.valid_TP && ptg->PTG_IsIntoDomain( relTarget.x(),relTarget.y() ); if (ipf.valid_TP) { ptg->inverseMap_WS2TP(relTarget.x(),relTarget.y(),ipf.target_k,ipf.target_dist); ipf.target_alpha = ptg->index2alpha(ipf.target_k); ipf.TP_Target.x = cos(ipf.target_alpha) * ipf.target_dist; ipf.TP_Target.y = sin(ipf.target_alpha) * ipf.target_dist; // STEP3(b): Build TP-Obstacles // ----------------------------------------------------------------------------- { CTimeLoggerEntry tle(m_timelogger,"navigationStep.STEP3_WSpaceToTPSpace"); // Initialize TP-Obstacles: const size_t Ki = ptg->getAlfaValuesCount(); ipf.TP_Obstacles.resize( Ki ); for (size_t k=0;k<Ki;k++) { ipf.TP_Obstacles[k] = ptg->refDistance; // if the robot ends the trajectory due to a >180deg turn, set that as the max. distance, not D_{max}: float phi = ptg->GetCPathPoint_phi(k,ptg->getPointsCountInCPath_k(k)-1); if (fabs(phi) >= M_PI* 0.95f ) ipf.TP_Obstacles[k]= ptg->GetCPathPoint_d(k,ptg->getPointsCountInCPath_k(k)-1); } // Implementation-dependent conversion: STEP3_WSpaceToTPSpace(indexPTG,ipf.TP_Obstacles); // Distances in TP-Space are normalized to [0,1]: const double _refD = 1.0/ptg->refDistance; for (size_t i=0;i<Ki;i++) ipf.TP_Obstacles[i] *= _refD; } // STEP4: Holonomic navigation method // ----------------------------------------------------------------------------- { CTimeLoggerEntry tle(m_timelogger,"navigationStep.STEP4_HolonomicMethod"); ASSERT_(m_holonomicMethod[indexPTG]) m_holonomicMethod[indexPTG]->navigate( ipf.TP_Target, ipf.TP_Obstacles, ptg->getMax_V_inTPSpace(), holonomicMovement.direction, holonomicMovement.speed, HLFR); // Security: Scale down the velocity when heading towards obstacles, // such that it's assured that we never go thru an obstacle! const int kDirection = static_cast<int>( holonomicMovement.PTG->alpha2index( holonomicMovement.direction ) ); const double obsFreeNormalizedDistance = ipf.TP_Obstacles[kDirection]; double velScale = 1.0; ASSERT_(secureDistanceEnd>secureDistanceStart); if (obsFreeNormalizedDistance<secureDistanceEnd) { if (obsFreeNormalizedDistance<=secureDistanceStart) velScale = 0.0; // security stop else velScale = (obsFreeNormalizedDistance-secureDistanceStart)/(secureDistanceEnd-secureDistanceStart); } // Scale: holonomicMovement.speed *= velScale; } // STEP5: Evaluate each movement to assign them a "evaluation" value. // --------------------------------------------------------------------- { CTimeLoggerEntry tle(m_timelogger,"navigationStep.STEP5_PTGEvaluator"); STEP5_PTGEvaluator( holonomicMovement, ipf.TP_Obstacles, TPose2D(relTarget), ipf.TP_Target, newLogRec.infoPerPTG[indexPTG]); } } // end "valid_TP" else { // Invalid PTG (target out of reachable space): // - holonomicMovement= Leave default values HLFR = CLogFileRecord_VFF::Create(); } // Logging: if (fill_log_record) { metaprogramming::copy_container_typecasting(ipf.TP_Obstacles, newLogRec.infoPerPTG[indexPTG].TP_Obstacles); newLogRec.infoPerPTG[indexPTG].PTG_desc = ptg->getDescription(); newLogRec.infoPerPTG[indexPTG].TP_Target = CPoint2D(ipf.TP_Target); newLogRec.infoPerPTG[indexPTG].HLFR = HLFR; newLogRec.infoPerPTG[indexPTG].desiredDirection = holonomicMovement.direction; newLogRec.infoPerPTG[indexPTG].desiredSpeed = holonomicMovement.speed; newLogRec.infoPerPTG[indexPTG].evaluation = holonomicMovement.evaluation; newLogRec.infoPerPTG[indexPTG].timeForTPObsTransformation = 0; // XXX newLogRec.infoPerPTG[indexPTG].timeForHolonomicMethod = 0; // XXX } } // end for each PTG // STEP6: After all PTGs have been evaluated, pick the best scored: // --------------------------------------------------------------------- int nSelectedPTG = 0; for (size_t indexPTG=0;indexPTG<nPTGs;indexPTG++) { if ( holonomicMovements[indexPTG].evaluation > holonomicMovements[nSelectedPTG].evaluation ) nSelectedPTG = indexPTG; } const THolonomicMovement & selectedHolonomicMovement = holonomicMovements[nSelectedPTG]; // STEP7: Get the non-holonomic movement command. // --------------------------------------------------------------------- { CTimeLoggerEntry tle(m_timelogger,"navigationStep.STEP7_NonHolonomicMovement"); STEP7_GenerateSpeedCommands( selectedHolonomicMovement ); } // --------------------------------------------------------------------- // SEND MOVEMENT COMMAND TO THE ROBOT // --------------------------------------------------------------------- if ( new_cmd_v == 0.0 && new_cmd_w == 0.0 ) { m_robot.stop(); } else { if ( !m_robot.changeSpeeds( new_cmd_v, new_cmd_w ) ) { doEmergencyStop("\nERROR calling RobotMotionControl::changeSpeeds!! Stopping robot and finishing navigation\n"); return; } } // Statistics: // ---------------------------------------------------- float executionTimeValue = (float) executionTime.Tac(); meanExecutionTime= 0.3f * meanExecutionTime + 0.7f * executionTimeValue; meanTotalExecutionTime= 0.3f * meanTotalExecutionTime + 0.7f * ((float)totalExecutionTime.Tac() ); meanExecutionPeriod = 0.3f * meanExecutionPeriod + 0.7f * min(1.0f, (float)timerForExecutionPeriod.Tac()); timerForExecutionPeriod.Tic(); if (m_enableConsoleOutput) { printf_debug( "CMD:%.02fm/s,%.02fd/s \t" "T=%.01lfms Exec:%.01lfms|%.01lfms \t" "E=%.01lf PTG#%i\n", (double)new_cmd_v, (double)RAD2DEG( new_cmd_w ), 1000.0*meanExecutionPeriod, 1000.0*meanExecutionTime, 1000.0*meanTotalExecutionTime, (double)selectedHolonomicMovement.evaluation, nSelectedPTG ); } // --------------------------------------- // STEP8: Generate log record // --------------------------------------- if (fill_log_record) { m_timelogger.enter("navigationStep.populate_log_info"); this->loggingGetWSObstaclesAndShape(newLogRec); newLogRec.robotOdometryPose = curPose; newLogRec.WS_target_relative = CPoint2D(relTarget.x(), relTarget.y()); newLogRec.v = new_cmd_v; newLogRec.w = new_cmd_w; newLogRec.nSelectedPTG = nSelectedPTG; newLogRec.executionTime = executionTimeValue; newLogRec.actual_v = curVL; newLogRec.actual_w = curW; newLogRec.estimatedExecutionPeriod = meanExecutionPeriod; newLogRec.timestamp = tim_start_iteration; newLogRec.nPTGs = nPTGs; newLogRec.navigatorBehavior = nSelectedPTG; m_timelogger.leave("navigationStep.populate_log_info"); // Save to log file: // -------------------------------------- m_timelogger.enter("navigationStep.write_log_file"); if (m_logFile) (*m_logFile) << newLogRec; m_timelogger.leave("navigationStep.write_log_file"); // Set as last log record { mrpt::synch::CCriticalSectionLocker lock_log(&m_critZoneLastLog); // Lock lastLogRecord = newLogRec; // COPY } } // if (fill_log_record) } catch (std::exception &e) { std::cout << e.what(); std::cout << "[CAbstractPTGBasedReactive::performNavigationStep] Exceptions!!\n"; } catch (...) { std::cout << "[CAbstractPTGBasedReactive::performNavigationStep] Unexpected exception!!:\n"; } } void CAbstractPTGBasedReactive::STEP5_PTGEvaluator( THolonomicMovement & holonomicMovement, const vector_double & in_TPObstacles, const mrpt::math::TPose2D & WS_Target, const mrpt::math::TPoint2D & TP_Target, CLogFileRecord::TInfoPerPTG & log ) { const double refDist = holonomicMovement.PTG->refDistance; const double TargetDir = (TP_Target.x!=0 || TP_Target.y!=0) ? atan2( TP_Target.y, TP_Target.x) : 0.0; const int TargetSector = static_cast<int>( holonomicMovement.PTG->alpha2index( TargetDir ) ); const double TargetDist = TP_Target.norm(); // Picked movement direction: const int kDirection = static_cast<int>( holonomicMovement.PTG->alpha2index( holonomicMovement.direction ) ); // Coordinates of the trajectory end for the given PTG and "alpha": float x,y,phi,t,d; d = min( in_TPObstacles[ kDirection ], 0.90f*TargetDist); holonomicMovement.PTG->getCPointWhen_d_Is( d, kDirection,x,y,phi,t ); // Factor 1: Free distance for the chosen PTG and "alpha" in the TP-Space: // ---------------------------------------------------------------------- const double factor1 = in_TPObstacles[kDirection]; // Factor 2: Distance in sectors: // ------------------------------------------- double dif = std::abs(((double)( TargetSector - kDirection ))); const double nSectors = (float)in_TPObstacles.size(); if ( dif > (0.5*nSectors)) dif = nSectors - dif; const double factor2 = exp(-square( dif / (in_TPObstacles.size()/3.0f))) ; // Factor 3: Angle between the robot at the end of the chosen trajectory and the target // ------------------------------------------------------------------------------------- double t_ang = atan2( WS_Target.y - y, WS_Target.x - x ); t_ang -= phi; mrpt::math::wrapToPiInPlace(t_ang); const double factor3 = exp(-square( t_ang / (float)(0.5f*M_PI)) ); // Factor4: Decrease in euclidean distance between (x,y) and the target: // Moving away of the target is negatively valued // --------------------------------------------------------------------------- const double dist_eucl_final = std::sqrt(square(WS_Target.x-x)+square(WS_Target.y-y)); const double dist_eucl_now = std::sqrt(square(WS_Target.x)+square(WS_Target.y)); const double factor4 = min(2.0*refDist,max(0.0,((dist_eucl_now - dist_eucl_final)+refDist)))/(2*refDist); // --------- // float decrementDistanc = dist_eucl_now - dist_eucl_final; // if (dist_eucl_now>0) // factor4 = min(1.0,min(refDist*2,max(0,decrementDistanc + refDist)) / dist_eucl_now); // else factor4 = 0; // --------- // factor4 = min(2*refDist2,max(0,decrementDistanc + refDist2)) / (2*refDist2); // factor4= (refDist2 - min( refDist2, dist_eucl ) ) / refDist2; // Factor5: Hysteresis: // ----------------------------------------------------- float want_v,want_w; holonomicMovement.PTG->directionToMotionCommand( kDirection, want_v,want_w); float likely_v = exp( -fabs(want_v-last_cmd_v)/0.10f ); float likely_w = exp( -fabs(want_w-last_cmd_w)/0.40f ); const double factor5 = min( likely_v,likely_w ); // Factor6: Fast when free space // ----------------------------------------------------- float aver_obs = 0; for (int i=0; i<in_TPObstacles.size(); i++) aver_obs += in_TPObstacles[i]; aver_obs = aver_obs/in_TPObstacles.size(); const double factor6 = aver_obs*want_v; // SAVE LOG log.evalFactors.resize(6); log.evalFactors[0] = factor1; log.evalFactors[1] = factor2; log.evalFactors[2] = factor3; log.evalFactors[3] = factor4; log.evalFactors[4] = factor5; log.evalFactors[5] = factor6; if (holonomicMovement.speed == 0) { // If no movement has been found -> the worst evaluation: holonomicMovement.evaluation = 0; } else { // Sum: two cases: *************************************I'm not sure about this... if (dif<2 && // Heading the target in_TPObstacles[kDirection]*0.95f>TargetDist // and free space towards the target ) { // Direct path to target: // holonomicMovement.evaluation = 1.0f + (1 - TargetDist) + factor5 * weight5 + factor6*weight6; holonomicMovement.evaluation = 1.0f + (1 - t/15.0f) + factor5 * weights[4] + factor6*weights[5]; } else { // General case: holonomicMovement.evaluation = ( factor1 * weights[0] + factor2 * weights[1] + factor3 * weights[2] + factor4 * weights[3] + factor5 * weights[4] + factor6 * weights[5] ) / ( math::sum(weights)); } } } void CAbstractPTGBasedReactive::STEP7_GenerateSpeedCommands( const THolonomicMovement &in_movement) { try { last_cmd_v = new_cmd_v; last_cmd_w = new_cmd_w; if (in_movement.speed == 0) { // The robot will stop: new_cmd_v = new_cmd_w = 0; } else { // Take the normalized movement command: in_movement.PTG->directionToMotionCommand( in_movement.PTG->alpha2index( in_movement.direction ), new_cmd_v, new_cmd_w ); // Scale holonomic speeds to real-world one: //const double reduction = min(1.0, in_movement.speed / sqrt( square(new_cmd_v) + square(r*new_cmd_w))); double reduction = in_movement.speed; if (reduction < 0.5) reduction = 0.5; // To scale: new_cmd_v*=reduction; new_cmd_w*=reduction; // Assure maximum speeds: if (fabs(new_cmd_v) > robotMax_V_mps) { // Scale: float F = fabs(robotMax_V_mps / new_cmd_v); new_cmd_v *= F; new_cmd_w *= F; } if (fabs(new_cmd_w) > DEG2RAD(robotMax_W_degps)) { // Scale: float F = fabs((float)DEG2RAD(robotMax_W_degps) / new_cmd_w); new_cmd_v *= F; new_cmd_w *= F; } //First order low-pass filter float alfa = meanExecutionPeriod/( meanExecutionPeriod + SPEEDFILTER_TAU); new_cmd_v = alfa*new_cmd_v + (1-alfa)*last_cmd_v; new_cmd_w = alfa*new_cmd_w + (1-alfa)*last_cmd_w; } m_timelogger.leave("navigationStep.STEP7_NonHolonomicMovement"); } catch (std::exception &e) { m_timelogger.leave("navigationStep.STEP7_NonHolonomicMovement"); printf_debug("[CReactiveNavigationSystem::STEP7_NonHolonomicMovement] Exception:"); printf_debug((char*)(e.what())); } catch (...) { m_timelogger.leave("navigationStep.STEP7_NonHolonomicMovement"); printf_debug("[CReactiveNavigationSystem::STEP7_NonHolonomicMovement] Unexpected exception!\n"); } }