// ------------------------------------------------------ // Test_Kinect // ------------------------------------------------------ void Test_Kinect() { // Launch grabbing thread: // -------------------------------------------------------- TThreadParam thrPar; std::thread thHandle = std::thread(thread_grabbing, std::ref(thrPar)); // Wait until data stream starts so we can say for sure the sensor has been // initialized OK: cout << "Waiting for sensor initialization...\n"; do { CObservation3DRangeScan::Ptr possiblyNewObs = std::atomic_load(&thrPar.new_obs); if (possiblyNewObs && possiblyNewObs->timestamp != INVALID_TIMESTAMP) break; else std::this_thread::sleep_for(10ms); } while (!thrPar.quit); // Check error condition: if (thrPar.quit) return; // Feature tracking variables: CFeatureList trackedFeats; unsigned int step_num = 0; bool SHOW_FEAT_IDS = true; bool SHOW_RESPONSES = true; CGenericFeatureTrackerAutoPtr tracker; // "CFeatureTracker_KL" is by far the most robust implementation for now: tracker = CGenericFeatureTrackerAutoPtr(new CFeatureTracker_KL); tracker->enableTimeLogger(true); // Do time profiling. // Set of parameters common to any tracker implementation: // To see all the existing params and documentation, see // mrpt::vision::CGenericFeatureTracker // http://reference.mrpt.org/devel/structmrpt_1_1vision_1_1_c_generic_feature_tracker.html tracker->extra_params["add_new_features"] = 1; // track, AND ALSO, add new features tracker->extra_params["add_new_feat_min_separation"] = 25; tracker->extra_params["add_new_feat_max_features"] = 150; tracker->extra_params["add_new_feat_patch_size"] = 21; tracker->extra_params["minimum_KLT_response_to_add"] = 40; tracker->extra_params["check_KLT_response_every"] = 5; // Re-check the KLT-response to assure features are in good points. tracker->extra_params["minimum_KLT_response"] = 25; // Re-check the KLT-response to assure features are in good points. tracker->extra_params["update_patches_every"] = 0; // Update patches // Specific params for "CFeatureTracker_KL" tracker->extra_params["window_width"] = 25; tracker->extra_params["window_height"] = 25; // Global points map: CColouredPointsMap globalPtsMap; globalPtsMap.colorScheme.scheme = CColouredPointsMap::cmFromIntensityImage; // Take points color from // RGB+D observations // globalPtsMap.colorScheme.scheme = // CColouredPointsMap::cmFromHeightRelativeToSensorGray; // Create window and prepare OpenGL object in the scene: // -------------------------------------------------------- mrpt::gui::CDisplayWindow3D win3D("kinect-3d-slam 3D view", 800, 600); win3D.setCameraAzimuthDeg(140); win3D.setCameraElevationDeg(20); win3D.setCameraZoom(8.0); win3D.setFOV(90); win3D.setCameraPointingToPoint(2.5, 0, 0); mrpt::opengl::CPointCloudColoured::Ptr gl_points = mrpt::make_aligned_shared<mrpt::opengl::CPointCloudColoured>(); gl_points->setPointSize(2.5); mrpt::opengl::CSetOfObjects::Ptr gl_curFeats = mrpt::make_aligned_shared<mrpt::opengl::CSetOfObjects>(); mrpt::opengl::CSetOfObjects::Ptr gl_keyframes = mrpt::make_aligned_shared<mrpt::opengl::CSetOfObjects>(); mrpt::opengl::CPointCloudColoured::Ptr gl_points_map = mrpt::make_aligned_shared<mrpt::opengl::CPointCloudColoured>(); gl_points_map->setPointSize(2.0); const double aspect_ratio = 480.0 / 640.0; // kinect.rows() / double( kinect.cols() ); mrpt::opengl::CSetOfObjects::Ptr gl_cur_cam_corner = mrpt::opengl::stock_objects::CornerXYZSimple(0.4f, 4); opengl::COpenGLViewport::Ptr viewInt; { mrpt::opengl::COpenGLScene::Ptr& scene = win3D.get3DSceneAndLock(); // Create the Opengl object for the point cloud: scene->insert(gl_points_map); scene->insert(gl_points); scene->insert(gl_curFeats); scene->insert(gl_keyframes); scene->insert(mrpt::make_aligned_shared<mrpt::opengl::CGridPlaneXY>()); scene->insert(gl_cur_cam_corner); const int VW_WIDTH = 350; // Size of the viewport into the window, in pixel units. const int VW_HEIGHT = aspect_ratio * VW_WIDTH; // Create the Opengl objects for the planar images each in a separate // viewport: viewInt = scene->createViewport("view2d_int"); viewInt->setViewportPosition(2, 2, VW_WIDTH, VW_HEIGHT); viewInt->setTransparent(true); win3D.unlockAccess3DScene(); win3D.repaint(); } CImage previous_image; map<TFeatureID, TPoint3D> lastVisibleFeats; std::vector<TPose3D> camera_key_frames_path; // The 6D path of the Kinect camera. CPose3D currentCamPose_wrt_last; // wrt last pose in "camera_key_frames_path" bool gl_keyframes_must_refresh = true; // Need to update gl_keyframes from camera_key_frames_path?? CObservation3DRangeScan::Ptr last_obs; string str_status, str_status2; while (win3D.isOpen() && !thrPar.quit) { CObservation3DRangeScan::Ptr possiblyNewObs = std::atomic_load(&thrPar.new_obs); if (possiblyNewObs && possiblyNewObs->timestamp != INVALID_TIMESTAMP && (!last_obs || possiblyNewObs->timestamp != last_obs->timestamp)) { // It IS a new observation: last_obs = possiblyNewObs; // Feature tracking ------------------------------------------- ASSERT_(last_obs->hasIntensityImage); CImage theImg; // The grabbed image: theImg = last_obs->intensityImage; // Do tracking: if (step_num > 1) // we need "previous_image" to be valid. { tracker->trackFeatures(previous_image, theImg, trackedFeats); // Remove those now out of the image plane: CFeatureList::iterator itFeat = trackedFeats.begin(); while (itFeat != trackedFeats.end()) { const TFeatureTrackStatus status = (*itFeat)->track_status; bool eras = (status_TRACKED != status && status_IDLE != status); if (!eras) { // Also, check if it's too close to the image border: const float x = (*itFeat)->x; const float y = (*itFeat)->y; static const float MIN_DIST_MARGIN_TO_STOP_TRACKING = 10; if (x < MIN_DIST_MARGIN_TO_STOP_TRACKING || y < MIN_DIST_MARGIN_TO_STOP_TRACKING || x > (last_obs->cameraParamsIntensity.ncols - MIN_DIST_MARGIN_TO_STOP_TRACKING) || y > (last_obs->cameraParamsIntensity.nrows - MIN_DIST_MARGIN_TO_STOP_TRACKING)) { eras = true; } } if (eras) // Erase or keep? itFeat = trackedFeats.erase(itFeat); else ++itFeat; } } // Create list of 3D features in space, wrt current camera pose: // -------------------------------------------------------------------- map<TFeatureID, TPoint3D> curVisibleFeats; for (CFeatureList::iterator itFeat = trackedFeats.begin(); itFeat != trackedFeats.end(); ++itFeat) { // Pixel coordinates in the intensity image: const int int_x = (*itFeat)->x; const int int_y = (*itFeat)->y; // Convert to pixel coords in the range image: // APPROXIMATION: Assume coordinates are equal (that's not // exact!!) const int x = int_x; const int y = int_y; // Does this (x,y) have valid range data? const float d = last_obs->rangeImage(y, x); if (d > 0.05 && d < 10.0) { ASSERT_( size_t( last_obs->rangeImage.cols() * last_obs->rangeImage.rows()) == last_obs->points3D_x.size()); const size_t nPt = last_obs->rangeImage.cols() * y + x; curVisibleFeats[(*itFeat)->ID] = TPoint3D( last_obs->points3D_x[nPt], last_obs->points3D_y[nPt], last_obs->points3D_z[nPt]); } } // Load local points map from 3D points + color: CColouredPointsMap localPntsMap; localPntsMap.colorScheme.scheme = CColouredPointsMap::cmFromIntensityImage; localPntsMap.loadFromRangeScan(*last_obs); // Estimate our current camera pose from feature2feature matching: // -------------------------------------------------------------------- if (!lastVisibleFeats.empty()) { TMatchingPairList corrs; // pairs of correspondences for (map<TFeatureID, TPoint3D>::const_iterator itCur = curVisibleFeats.begin(); itCur != curVisibleFeats.end(); ++itCur) { map<TFeatureID, TPoint3D>::const_iterator itFound = lastVisibleFeats.find(itCur->first); if (itFound != lastVisibleFeats.end()) { corrs.push_back( TMatchingPair( itFound->first, itCur->first, itFound->second.x, itFound->second.y, itFound->second.z, itCur->second.x, itCur->second.y, itCur->second.z)); } } if (corrs.size() >= 3) { // Find matchings: mrpt::tfest::TSE3RobustParams params; params.ransac_minSetSize = 3; params.ransac_maxSetSizePct = 6.0 / corrs.size(); mrpt::tfest::TSE3RobustResult results; bool register_ok = false; try { mrpt::tfest::se3_l2_robust(corrs, params, results); register_ok = true; } catch (std::exception&) { /* Cannot find a minimum number of matches, inconsistent * parameters due to very reduced numberof matches,etc. */ } const CPose3D relativePose = CPose3D(results.transformation); str_status = mrpt::format( "%d corrs | inliers: %d | rel.pose: %s ", int(corrs.size()), int(results.inliers_idx.size()), relativePose.asString().c_str()); str_status2 = string( results.inliers_idx.size() == 0 ? "LOST! Please, press 'r' to restart" : ""); if (register_ok && std::abs(results.scale - 1.0) < 0.1) { // Seems a good match: if ((relativePose.norm() > KEYFRAMES_MIN_DISTANCE || std::abs(relativePose.yaw()) > KEYFRAMES_MIN_ANG || std::abs(relativePose.pitch()) > KEYFRAMES_MIN_ANG || std::abs(relativePose.roll()) > KEYFRAMES_MIN_ANG)) { // Accept this as a new key-frame pose ------------ // Append new global pose of this key-frame: const CPose3D new_keyframe_global = CPose3D(*camera_key_frames_path.rbegin()) + relativePose; camera_key_frames_path.push_back( new_keyframe_global.asTPose()); gl_keyframes_must_refresh = true; currentCamPose_wrt_last = CPose3D(); // It's (0,0,0) since the last // key-frame is the current pose! lastVisibleFeats = curVisibleFeats; cout << "Adding new key-frame: pose=" << new_keyframe_global << endl; // Update global map: append another map at a given // position: globalPtsMap.insertObservation( last_obs.get(), &new_keyframe_global); win3D.get3DSceneAndLock(); gl_points_map->loadFromPointsMap(&globalPtsMap); win3D.unlockAccess3DScene(); } else { currentCamPose_wrt_last = relativePose; // cout << "cur pose: " << currentCamPose_wrt_last // << endl; } } } } if (camera_key_frames_path.empty() || lastVisibleFeats.empty()) { // First iteration: camera_key_frames_path.clear(); camera_key_frames_path.push_back(TPose3D(0, 0, 0, 0, 0, 0)); gl_keyframes_must_refresh = true; lastVisibleFeats = curVisibleFeats; // Update global map: globalPtsMap.clear(); globalPtsMap.insertObservation(last_obs.get()); win3D.get3DSceneAndLock(); gl_points_map->loadFromPointsMap(&globalPtsMap); win3D.unlockAccess3DScene(); } // Save the image for the next step: previous_image = theImg; // Draw marks on the RGB image: theImg.selectTextFont("10x20"); { // Tracked feats: theImg.drawFeatures( trackedFeats, TColor(0, 0, 255), SHOW_FEAT_IDS, SHOW_RESPONSES); theImg.textOut( 3, 22, format("# feats: %u", (unsigned int)trackedFeats.size()), TColor(200, 20, 20)); } // Update visualization --------------------------------------- // Show intensity image win3D.get3DSceneAndLock(); viewInt->setImageView(theImg); win3D.unlockAccess3DScene(); // Show 3D points & current visible feats, at the current camera 3D // pose "currentCamPose_wrt_last" // --------------------------------------------------------------------- if (last_obs->hasPoints3D) { const CPose3D curGlobalPose = CPose3D(*camera_key_frames_path.rbegin()) + currentCamPose_wrt_last; win3D.get3DSceneAndLock(); // All 3D points: gl_points->loadFromPointsMap(&localPntsMap); gl_points->setPose(curGlobalPose); gl_cur_cam_corner->setPose(curGlobalPose); // Current visual landmarks: gl_curFeats->clear(); for (map<TFeatureID, TPoint3D>::const_iterator it = curVisibleFeats.begin(); it != curVisibleFeats.end(); ++it) { static double D = 0.02; mrpt::opengl::CBox::Ptr box = mrpt::make_aligned_shared<mrpt::opengl::CBox>( TPoint3D(-D, -D, -D), TPoint3D(D, D, D)); box->setWireframe(true); box->setName(format("%d", int(it->first))); box->enableShowName(true); box->setLocation(it->second); gl_curFeats->insert(box); } gl_curFeats->setPose(curGlobalPose); win3D.unlockAccess3DScene(); win3D.repaint(); } win3D.get3DSceneAndLock(); win3D.addTextMessage( -100, -20, format("%.02f Hz", thrPar.Hz), TColorf(0, 1, 1), 100, MRPT_GLUT_BITMAP_HELVETICA_18); win3D.unlockAccess3DScene(); win3D.repaint(); step_num++; } // end update visualization: if (gl_keyframes_must_refresh) { gl_keyframes_must_refresh = false; // cout << "Updating gl_keyframes with " << // camera_key_frames_path.size() << " frames.\n"; win3D.get3DSceneAndLock(); gl_keyframes->clear(); for (size_t i = 0; i < camera_key_frames_path.size(); i++) { CSetOfObjects::Ptr obj = mrpt::opengl::stock_objects::CornerXYZSimple(0.3f, 3); obj->setPose(camera_key_frames_path[i]); gl_keyframes->insert(obj); } win3D.unlockAccess3DScene(); } // Process possible keyboard commands: // -------------------------------------- if (win3D.keyHit() && thrPar.pushed_key == 0) { const int key = tolower(win3D.getPushedKey()); switch (key) { // Some of the keys are processed in this thread: case 'r': lastVisibleFeats.clear(); camera_key_frames_path.clear(); gl_keyframes_must_refresh = true; globalPtsMap.clear(); win3D.get3DSceneAndLock(); gl_points_map->loadFromPointsMap(&globalPtsMap); win3D.unlockAccess3DScene(); break; case 's': { const std::string s = "point_cloud.txt"; cout << "Dumping 3D point-cloud to: " << s << endl; globalPtsMap.save3D_to_text_file(s); break; } case 'o': win3D.setCameraZoom(win3D.getCameraZoom() * 1.2); win3D.repaint(); break; case 'i': win3D.setCameraZoom(win3D.getCameraZoom() / 1.2); win3D.repaint(); break; // ...and the rest in the kinect thread: default: thrPar.pushed_key = key; break; }; } win3D.get3DSceneAndLock(); win3D.addTextMessage( 2, -30, format( "'s':save point cloud, 'r': reset, 'o'/'i': zoom " "out/in, mouse: orbit 3D, ESC: quit"), TColorf(1, 1, 1), 110, MRPT_GLUT_BITMAP_HELVETICA_12); win3D.addTextMessage( 2, -50, str_status, TColorf(1, 1, 1), 111, MRPT_GLUT_BITMAP_HELVETICA_12); win3D.addTextMessage( 2, -70, str_status2, TColorf(1, 1, 1), 112, MRPT_GLUT_BITMAP_HELVETICA_18); win3D.unlockAccess3DScene(); std::this_thread::sleep_for(1ms); } cout << "Waiting for grabbing thread to exit...\n"; thrPar.quit = true; thHandle.join(); cout << "Bye!\n"; }
void Run_KF_SLAM(CConfigFile& cfgFile, const std::string& rawlogFileName) { // The EKF-SLAM class: // Traits for this KF implementation (2D or 3D) using traits_t = kfslam_traits<IMPL>; using ekfslam_t = typename traits_t::ekfslam_t; ekfslam_t mapping; // The rawlog file: // ---------------------------------------- const unsigned int rawlog_offset = cfgFile.read_int("MappingApplication", "rawlog_offset", 0); const unsigned int SAVE_LOG_FREQUENCY = cfgFile.read_int("MappingApplication", "SAVE_LOG_FREQUENCY", 1); const bool SAVE_DA_LOG = cfgFile.read_bool("MappingApplication", "SAVE_DA_LOG", true); const bool SAVE_3D_SCENES = cfgFile.read_bool("MappingApplication", "SAVE_3D_SCENES", true); const bool SAVE_MAP_REPRESENTATIONS = cfgFile.read_bool( "MappingApplication", "SAVE_MAP_REPRESENTATIONS", true); bool SHOW_3D_LIVE = cfgFile.read_bool("MappingApplication", "SHOW_3D_LIVE", false); const bool CAMERA_3DSCENE_FOLLOWS_ROBOT = cfgFile.read_bool( "MappingApplication", "CAMERA_3DSCENE_FOLLOWS_ROBOT", false); #if !MRPT_HAS_WXWIDGETS SHOW_3D_LIVE = false; #endif string OUT_DIR = cfgFile.read_string( "MappingApplication", "logOutput_dir", "OUT_KF-SLAM"); string ground_truth_file = cfgFile.read_string("MappingApplication", "ground_truth_file", ""); string ground_truth_file_robot = cfgFile.read_string( "MappingApplication", "ground_truth_file_robot", ""); string ground_truth_data_association = cfgFile.read_string( "MappingApplication", "ground_truth_data_association", ""); cout << "RAWLOG FILE:" << endl << rawlogFileName << endl; ASSERT_FILE_EXISTS_(rawlogFileName); CFileGZInputStream rawlogFile(rawlogFileName); cout << "---------------------------------------------------" << endl << endl; deleteFilesInDirectory(OUT_DIR); createDirectory(OUT_DIR); // Load the config options for mapping: // ---------------------------------------- mapping.loadOptions(cfgFile); mapping.KF_options.dumpToConsole(); mapping.options.dumpToConsole(); // debug: // mapping.KF_options.use_analytic_observation_jacobian = true; // mapping.KF_options.use_analytic_transition_jacobian = true; // mapping.KF_options.debug_verify_analytic_jacobians = true; // Is there ground truth of the robot poses?? CMatrixDouble GT_PATH(0, 0); if (ground_truth_file_robot.size() && fileExists(ground_truth_file_robot)) { GT_PATH.loadFromTextFile(ground_truth_file_robot); ASSERT_(GT_PATH.rows() > 0 && GT_PATH.cols() == 6); } // Is there a ground truth file of the data association? std::map<double, std::vector<int>> GT_DA; // Map: timestamp -> vector(index in observation -> real index) mrpt::containers::bimap<int, int> DA2GTDA_indices; // Landmark indices // bimapping: SLAM DA <---> // GROUND TRUTH DA if (!ground_truth_data_association.empty() && fileExists(ground_truth_data_association)) { CMatrixDouble mGT_DA; mGT_DA.loadFromTextFile(ground_truth_data_association); ASSERT_ABOVEEQ_(mGT_DA.cols(), 3); // Convert the loaded matrix into a std::map in GT_DA: for (int i = 0; i < mGT_DA.rows(); i++) { std::vector<int>& v = GT_DA[mGT_DA(i, 0)]; if (v.size() <= mGT_DA(i, 1)) v.resize(mGT_DA(i, 1) + 1); v[mGT_DA(i, 1)] = mGT_DA(i, 2); } cout << "Loaded " << GT_DA.size() << " entries from DA ground truth file\n"; } // Create output file for DA perf: std::ofstream out_da_performance_log; { const std::string f = std::string( OUT_DIR + std::string("/data_association_performance.log")); out_da_performance_log.open(f.c_str()); ASSERTMSG_( out_da_performance_log.is_open(), std::string("Error writing to: ") + f); // Header: out_da_performance_log << "% TIMESTAMP INDEX_IN_OBS TruePos " "FalsePos TrueNeg FalseNeg NoGroundTruthSoIDontKnow \n" << "%--------------------------------------------------------------" "--------------------------------------------------\n"; } if (SHOW_3D_LIVE) { win3d = mrpt::make_aligned_shared<mrpt::gui::CDisplayWindow3D>( "KF-SLAM live view", 800, 500); win3d->addTextMessage( 0.01, 0.96, "Red: Estimated path", TColorf(0.8f, 0.8f, 0.8f), 100, MRPT_GLUT_BITMAP_HELVETICA_10); win3d->addTextMessage( 0.01, 0.93, "Black: Ground truth path", TColorf(0.8f, 0.8f, 0.8f), 101, MRPT_GLUT_BITMAP_HELVETICA_10); } // Create DA-log output file: std::ofstream out_da_log; if (SAVE_DA_LOG) { const std::string f = std::string(OUT_DIR + std::string("/data_association.log")); out_da_log.open(f.c_str()); ASSERTMSG_(out_da_log.is_open(), std::string("Error writing to: ") + f); // Header: out_da_log << "% TIMESTAMP INDEX_IN_OBS ID " " RANGE(m) YAW(rad) PITCH(rad) \n" << "%-------------------------------------------------------" "-------------------------------------\n"; } // The main loop: // --------------------------------------- CActionCollection::Ptr action; CSensoryFrame::Ptr observations; size_t rawlogEntry = 0, step = 0; vector<TPose3D> meanPath; // The estimated path typename traits_t::posepdf_t robotPose; const bool is_pose_3d = robotPose.state_length != 3; std::vector<typename IMPL::landmark_point_t> LMs; std::map<unsigned int, CLandmark::TLandmarkID> LM_IDs; CMatrixDouble fullCov; CVectorDouble fullState; CTicTac kftictac; auto rawlogArch = mrpt::serialization::archiveFrom(rawlogFile); for (;;) { if (os::kbhit()) { char pushKey = os::getch(); if (27 == pushKey) break; } // Load action/observation pair from the rawlog: // -------------------------------------------------- if (!CRawlog::readActionObservationPair( rawlogArch, action, observations, rawlogEntry)) break; // file EOF if (rawlogEntry >= rawlog_offset) { // Process the action and observations: // -------------------------------------------- kftictac.Tic(); mapping.processActionObservation(action, observations); const double tim_kf_iter = kftictac.Tac(); // Get current state: // ------------------------------- mapping.getCurrentState(robotPose, LMs, LM_IDs, fullState, fullCov); cout << "Mean pose: " << endl << robotPose.mean << endl; cout << "# of landmarks in the map: " << LMs.size() << endl; // Get the mean robot pose as 3D: const CPose3D robotPoseMean3D = CPose3D(robotPose.mean); // Build the path: meanPath.push_back(robotPoseMean3D.asTPose()); // Save mean pose: if (!(step % SAVE_LOG_FREQUENCY)) { const auto p = robotPose.mean.asVectorVal(); p.saveToTextFile( OUT_DIR + format("/robot_pose_%05u.txt", (unsigned int)step)); } // Save full cov: if (!(step % SAVE_LOG_FREQUENCY)) { fullCov.saveToTextFile( OUT_DIR + format("/full_cov_%05u.txt", (unsigned int)step)); } // Generate Data Association log? if (SAVE_DA_LOG) { const typename ekfslam_t::TDataAssocInfo& da = mapping.getLastDataAssociation(); const CObservationBearingRange::Ptr obs = observations ->getObservationByClass<CObservationBearingRange>(); if (obs) { const CObservationBearingRange* obsRB = obs.get(); const double tim = mrpt::system::timestampToDouble(obsRB->timestamp); for (size_t i = 0; i < obsRB->sensedData.size(); i++) { auto it = da.results.associations.find(i); int assoc_ID_in_SLAM; if (it != da.results.associations.end()) assoc_ID_in_SLAM = it->second; else { // It should be a newly created LM: auto itNewLM = da.newly_inserted_landmarks.find(i); if (itNewLM != da.newly_inserted_landmarks.end()) assoc_ID_in_SLAM = itNewLM->second; else assoc_ID_in_SLAM = -1; } out_da_log << format( "%35.22f %8i %10i %10f %12f %12f\n", tim, (int)i, assoc_ID_in_SLAM, (double)obsRB->sensedData[i].range, (double)obsRB->sensedData[i].yaw, (double)obsRB->sensedData[i].pitch); } } } // Save report on DA performance: { const typename ekfslam_t::TDataAssocInfo& da = mapping.getLastDataAssociation(); const CObservationBearingRange::Ptr obs = observations ->getObservationByClass<CObservationBearingRange>(); if (obs) { const CObservationBearingRange* obsRB = obs.get(); const double tim = mrpt::system::timestampToDouble(obsRB->timestamp); auto itDA = GT_DA.find(tim); for (size_t i = 0; i < obsRB->sensedData.size(); i++) { bool is_FP = false, is_TP = false, is_FN = false, is_TN = false; if (itDA != GT_DA.end()) { const std::vector<int>& vDA = itDA->second; ASSERT_BELOW_(i, vDA.size()); const int GT_ASSOC = vDA[i]; auto it = da.results.associations.find(i); if (it != da.results.associations.end()) { // This observation was assigned the already // existing LM in the map: "it->second" // TruePos -> If that LM index corresponds to // that in the GT (with index mapping): // mrpt::containers::bimap<int,int> // DA2GTDA_indices; // // Landmark indices bimapping: SLAM DA <---> // GROUND TRUTH DA if (DA2GTDA_indices.hasKey(it->second)) { const int slam_asigned_LM_idx = DA2GTDA_indices.direct(it->second); if (slam_asigned_LM_idx == GT_ASSOC) is_TP = true; else is_FP = true; } else { // Is this case possible? Assigned to an // index not ever seen for the first time // with a GT.... // Just in case: is_FP = true; } } else { // No pairing, but should be a newly created LM: auto itNewLM = da.newly_inserted_landmarks.find(i); if (itNewLM != da.newly_inserted_landmarks.end()) { const int new_LM_in_SLAM = itNewLM->second; // Was this really a NEW LM not observed // before? if (DA2GTDA_indices.hasValue(GT_ASSOC)) { // GT says this LM was already observed, // so it shouldn't appear here as new: is_FN = true; } else { // Really observed for the first time: is_TN = true; DA2GTDA_indices.insert( new_LM_in_SLAM, GT_ASSOC); } } else { // Not associated neither inserted: // Shouldn't really never arrive here. } } } // "% TIMESTAMP INDEX_IN_OBS // TruePos FalsePos TrueNeg FalseNeg // NoGroundTruthSoIDontKnow \n" out_da_performance_log << format( "%35.22f %13i %8i %8i %8i %8i %8i\n", tim, (int)i, (int)(is_TP ? 1 : 0), (int)(is_FP ? 1 : 0), (int)(is_TN ? 1 : 0), (int)(is_FN ? 1 : 0), (int)(!is_FP && !is_TP && !is_FN && !is_TN ? 1 : 0)); } } } // Save map to file representations? if (SAVE_MAP_REPRESENTATIONS && !(step % SAVE_LOG_FREQUENCY)) { mapping.saveMapAndPath2DRepresentationAsMATLABFile( OUT_DIR + format("/slam_state_%05u.m", (unsigned int)step)); } // Save 3D view of the filter state: if (win3d || (SAVE_3D_SCENES && !(step % SAVE_LOG_FREQUENCY))) { COpenGLScene::Ptr scene3D = mrpt::make_aligned_shared<COpenGLScene>(); { opengl::CGridPlaneXY::Ptr grid = mrpt::make_aligned_shared<opengl::CGridPlaneXY>( -1000, 1000, -1000, 1000, 0, 5); grid->setColor(0.4, 0.4, 0.4); scene3D->insert(grid); } // Robot path: { opengl::CSetOfLines::Ptr linesPath = mrpt::make_aligned_shared<opengl::CSetOfLines>(); linesPath->setColor(1, 0, 0); TPose3D init_pose; if (!meanPath.empty()) init_pose = CPose3D(meanPath[0]).asTPose(); int path_decim = 0; for (auto& it : meanPath) { linesPath->appendLine(init_pose, it); init_pose = it; if (++path_decim > 10) { path_decim = 0; mrpt::opengl::CSetOfObjects::Ptr xyz = mrpt::opengl::stock_objects::CornerXYZSimple( 0.3f, 2.0f); xyz->setPose(CPose3D(it)); scene3D->insert(xyz); } } scene3D->insert(linesPath); // finally a big corner for the latest robot pose: { mrpt::opengl::CSetOfObjects::Ptr xyz = mrpt::opengl::stock_objects::CornerXYZSimple( 1.0, 2.5); xyz->setPose(robotPoseMean3D); scene3D->insert(xyz); } // The camera pointing to the current robot pose: if (CAMERA_3DSCENE_FOLLOWS_ROBOT) { win3d->setCameraPointingToPoint( robotPoseMean3D.x(), robotPoseMean3D.y(), robotPoseMean3D.z()); } } // Do we have a ground truth? if (GT_PATH.cols() == 6 || GT_PATH.cols() == 3) { opengl::CSetOfLines::Ptr GT_path = mrpt::make_aligned_shared<opengl::CSetOfLines>(); GT_path->setColor(0, 0, 0); size_t N = std::min((int)GT_PATH.rows(), (int)meanPath.size()); if (GT_PATH.cols() == 6) { double gtx0 = 0, gty0 = 0, gtz0 = 0; for (size_t i = 0; i < N; i++) { const CPose3D p( GT_PATH(i, 0), GT_PATH(i, 1), GT_PATH(i, 2), GT_PATH(i, 3), GT_PATH(i, 4), GT_PATH(i, 5)); GT_path->appendLine( gtx0, gty0, gtz0, p.x(), p.y(), p.z()); gtx0 = p.x(); gty0 = p.y(); gtz0 = p.z(); } } else if (GT_PATH.cols() == 3) { double gtx0 = 0, gty0 = 0; for (size_t i = 0; i < N; i++) { const CPose2D p( GT_PATH(i, 0), GT_PATH(i, 1), GT_PATH(i, 2)); GT_path->appendLine(gtx0, gty0, 0, p.x(), p.y(), 0); gtx0 = p.x(); gty0 = p.y(); } } scene3D->insert(GT_path); } // Draw latest data association: { const typename ekfslam_t::TDataAssocInfo& da = mapping.getLastDataAssociation(); mrpt::opengl::CSetOfLines::Ptr lins = mrpt::make_aligned_shared<mrpt::opengl::CSetOfLines>(); lins->setLineWidth(1.2f); lins->setColor(1, 1, 1); for (auto it = da.results.associations.begin(); it != da.results.associations.end(); ++it) { const prediction_index_t idxPred = it->second; // This index must match the internal list of features // in the map: typename ekfslam_t::KFArray_FEAT featMean; mapping.getLandmarkMean(idxPred, featMean); TPoint3D featMean3D; traits_t::landmark_to_3d(featMean, featMean3D); // Line: robot -> landmark: lins->appendLine( robotPoseMean3D.x(), robotPoseMean3D.y(), robotPoseMean3D.z(), featMean3D.x, featMean3D.y, featMean3D.z); } scene3D->insert(lins); } // The current state of KF-SLAM: { opengl::CSetOfObjects::Ptr objs = mrpt::make_aligned_shared<opengl::CSetOfObjects>(); mapping.getAs3DObject(objs); scene3D->insert(objs); } if (win3d) { mrpt::opengl::COpenGLScene::Ptr& scn = win3d->get3DSceneAndLock(); scn = scene3D; // Update text messages: win3d->addTextMessage( 0.02, 0.02, format( "Step %u - Landmarks in the map: %u", (unsigned int)step, (unsigned int)LMs.size()), TColorf(1, 1, 1), 0, MRPT_GLUT_BITMAP_HELVETICA_12); win3d->addTextMessage( 0.02, 0.06, format( is_pose_3d ? "Estimated pose: (x y z qr qx qy qz) = %s" : "Estimated pose: (x y yaw) = %s", robotPose.mean.asString().c_str()), TColorf(1, 1, 1), 1, MRPT_GLUT_BITMAP_HELVETICA_12); static vector<double> estHz_vals; const double curHz = 1.0 / std::max(1e-9, tim_kf_iter); estHz_vals.push_back(curHz); if (estHz_vals.size() > 50) estHz_vals.erase(estHz_vals.begin()); const double meanHz = mrpt::math::mean(estHz_vals); win3d->addTextMessage( 0.02, 0.10, format( "Iteration time: %7ss", mrpt::system::unitsFormat(tim_kf_iter).c_str()), TColorf(1, 1, 1), 2, MRPT_GLUT_BITMAP_HELVETICA_12); win3d->addTextMessage( 0.02, 0.14, format( "Execution rate: %7sHz", mrpt::system::unitsFormat(meanHz).c_str()), TColorf(1, 1, 1), 3, MRPT_GLUT_BITMAP_HELVETICA_12); win3d->unlockAccess3DScene(); win3d->repaint(); } if (SAVE_3D_SCENES && !(step % SAVE_LOG_FREQUENCY)) { // Save to file: CFileGZOutputStream f( OUT_DIR + format("/kf_state_%05u.3Dscene", (unsigned int)step)); mrpt::serialization::archiveFrom(f) << *scene3D; } } // Free rawlog items memory: // -------------------------------------------- action.reset(); observations.reset(); } // (rawlogEntry>=rawlog_offset) cout << format( "\nStep %u - Rawlog entries processed: %i\n", (unsigned int)step, (unsigned int)rawlogEntry); step++; }; // end "while(1)" // Partitioning experiment: Only for 6D SLAM: traits_t::doPartitioningExperiment(mapping, fullCov, OUT_DIR); // Is there ground truth of landmarks positions?? if (ground_truth_file.size() && fileExists(ground_truth_file)) { CMatrixFloat GT(0, 0); try { GT.loadFromTextFile(ground_truth_file); } catch (const std::exception& e) { cerr << "Ignoring the following error loading ground truth file: " << mrpt::exception_to_str(e) << endl; } if (GT.rows() > 0 && !LMs.empty()) { // Each row has: // [0] [1] [2] [6] // x y z ID CVectorDouble ERRS(0); for (size_t i = 0; i < LMs.size(); i++) { // Find the entry in the GT for this mapped LM: bool found = false; for (int r = 0; r < GT.rows(); r++) { if (LM_IDs[i] == GT(r, 6)) { const CPoint3D gtPt(GT(r, 0), GT(r, 1), GT(r, 2)); ERRS.push_back(gtPt.distanceTo( CPoint3D(TPoint3D(LMs[i])))); // All these // conversions // are to make it // work with // either // CPoint3D & // TPoint2D found = true; break; } } if (!found) { cerr << "Ground truth entry not found for landmark ID:" << LM_IDs[i] << endl; } } printf("ERRORS VS. GROUND TRUTH:\n"); printf("Mean Error: %f meters\n", math::mean(ERRS)); printf("Minimum error: %f meters\n", math::minimum(ERRS)); printf("Maximum error: %f meters\n", math::maximum(ERRS)); } } // end if GT cout << "********* KF-SLAM finished! **********" << endl; if (win3d) { cout << "\n Close the 3D window to quit the application.\n"; win3d->waitForKey(); } }