Transform Odometry::process(const SensorData & data, OdometryInfo * info) { if(_pose.isNull()) { _pose.setIdentity(); // initialized } UASSERT(!data.image().empty()); if(dynamic_cast<OdometryMono*>(this) == 0) { UASSERT(!data.depthOrRightImage().empty()); } if(data.fx() <= 0 || data.fyOrBaseline() <= 0) { UERROR("Rectified images required! Calibrate your camera. (fx=%f, fy/baseline=%f, cx=%f, cy=%f)", data.fx(), data.fyOrBaseline(), data.cx(), data.cy()); return Transform(); } UTimer time; Transform t = this->computeTransform(data, info); if(info) { info->time = time.elapsed(); info->lost = t.isNull(); } if(!t.isNull()) { _resetCurrentCount = _resetCountdown; if(_force2D) { float x,y,z, roll,pitch,yaw; t.getTranslationAndEulerAngles(x, y, z, roll, pitch, yaw); t = Transform(x,y,0, 0,0,yaw); } return _pose *= t; // updated } else if(_resetCurrentCount > 0) { UWARN("Odometry lost! Odometry will be reset after next %d consecutive unsuccessful odometry updates...", _resetCurrentCount); --_resetCurrentCount; if(_resetCurrentCount == 0) { UWARN("Odometry automatically reset to latest pose!"); this->reset(_pose); } } return Transform(); }
// return not null transform if odometry is correctly computed Transform OdometryDVO::computeTransform( SensorData & data, const Transform & guess, OdometryInfo * info) { Transform t; #ifdef RTABMAP_DVO UTimer timer; if(data.imageRaw().empty() || data.imageRaw().rows != data.depthOrRightRaw().rows || data.imageRaw().cols != data.depthOrRightRaw().cols) { UERROR("Not supported input!"); return t; } if(!(data.cameraModels().size() == 1 && data.cameraModels()[0].isValidForReprojection())) { UERROR("Invalid camera model! Only single RGB-D camera supported by DVO. Try another odometry approach."); return t; } if(dvo_ == 0) { dvo::DenseTracker::Config cfg = dvo::DenseTracker::getDefaultConfig(); dvo_ = new dvo::DenseTracker(cfg); } cv::Mat grey, grey_s16, depth_inpainted, depth_mask, depth_mono, depth_float; if(data.imageRaw().type() != CV_32FC1) { if(data.imageRaw().type() == CV_8UC3) { cv::cvtColor(data.imageRaw(), grey, CV_BGR2GRAY); } else { grey = data.imageRaw(); } grey.convertTo(grey_s16, CV_32F); } else { grey_s16 = data.imageRaw(); } // make sure all zeros are NAN if(data.depthRaw().type() == CV_32FC1) { depth_float = data.depthRaw(); for(int i=0; i<depth_float.rows; ++i) { for(int j=0; j<depth_float.cols; ++j) { float & d = depth_float.at<float>(i,j); if(d == 0.0f) { d = NAN; } } } } else if(data.depthRaw().type() == CV_16UC1) { depth_float = cv::Mat(data.depthRaw().size(), CV_32FC1); for(int i=0; i<data.depthRaw().rows; ++i) { for(int j=0; j<data.depthRaw().cols; ++j) { float d = float(data.depthRaw().at<unsigned short>(i,j))/1000.0f; depth_float.at<float>(i, j) = d==0.0f?NAN:d; } } } else { UFATAL("Unknown depth format!"); } if(camera_ == 0) { dvo::core::IntrinsicMatrix intrinsics = dvo::core::IntrinsicMatrix::create( data.cameraModels()[0].fx(), data.cameraModels()[0].fy(), data.cameraModels()[0].cx(), data.cameraModels()[0].cy()); camera_ = new dvo::core::RgbdCameraPyramid( data.cameraModels()[0].imageWidth(), data.cameraModels()[0].imageHeight(), intrinsics); } dvo::core::RgbdImagePyramid * current = new dvo::core::RgbdImagePyramid(*camera_, grey_s16, depth_float); cv::Mat covariance; if(reference_ == 0) { reference_ = current; if(!lost_) { t.setIdentity(); } covariance = cv::Mat::eye(6,6,CV_64FC1) * 9999.0; } else { dvo::DenseTracker::Result result; dvo_->match(*reference_, *current, result); t = Transform::fromEigen3d(result.Transformation); if(result.Information(0,0) > 0.0 && result.Information(0,0) != 1.0) { lost_ = false; cv::Mat information = cv::Mat::eye(6,6, CV_64FC1); memcpy(information.data, result.Information.data(), 36*sizeof(double)); covariance = information.inv(); covariance *= 100.0; // to be in the same scale than loop closure detection Transform currentMotion = t; t = motionFromKeyFrame_.inverse() * t; // TODO make parameters? if(currentMotion.getNorm() > 0.01 || currentMotion.getAngle() > 0.01) { if(info) { info->keyFrameAdded = true; } // new keyframe delete reference_; reference_ = current; motionFromKeyFrame_.setIdentity(); } else { delete current; motionFromKeyFrame_ = currentMotion; } } else { lost_ = true; delete reference_; delete current; reference_ = 0; // this will make restart from the next frame motionFromKeyFrame_.setIdentity(); t.setNull(); covariance = cv::Mat::eye(6,6,CV_64FC1) * 9999.0; UWARN("dvo failed to estimate motion, tracking will be reinitialized on next frame."); } } const Transform & localTransform = data.cameraModels()[0].localTransform(); if(!t.isNull() && !t.isIdentity() && !localTransform.isIdentity() && !localTransform.isNull()) { // from camera frame to base frame t = localTransform * t * localTransform.inverse(); } if(info) { info->type = (int)kTypeDVO; info->covariance = covariance; } UINFO("Odom update time = %fs", timer.elapsed()); #else UERROR("RTAB-Map is not built with DVO support! Select another visual odometry approach."); #endif return t; }
int main(int argc, char * argv[]) { signal(SIGABRT, &sighandler); signal(SIGTERM, &sighandler); signal(SIGINT, &sighandler); /*for(int i=0; i<argc; i++) { printf("argv[%d] = %s\n", i, argv[i]); }*/ const ParametersMap & defaultParameters = Parameters::getDefaultParameters(); if(argc < 2) { showUsage(); } else if(argc == 2 && strcmp(argv[1], "-v") == 0) { printf("%s\n", Rtabmap::getVersion().c_str()); exit(0); } else if(argc == 2 && strcmp(argv[1], "-default_params") == 0) { for(ParametersMap::const_iterator iter = defaultParameters.begin(); iter!=defaultParameters.end(); ++iter) { printf("%s=%s\n", iter->first.c_str(), iter->second.c_str()); } exit(0); } printf("\n"); std::string path; float timeThreshold = 0.0; float rate = 0.0; int loopDataset = 0; int repeat = 0; bool createGT = false; int imageWidth = 0; int imageHeight = 0; int startAt = 1; ParametersMap pm; ULogger::Level logLevel = ULogger::kError; ULogger::Level exitLevel = ULogger::kFatal; for(int i=1; i<argc; ++i) { if(i == argc-1) { // The last must be the path path = argv[i]; if(!UDirectory::exists(path.c_str()) && !UFile::exists(path.c_str())) { printf("Path not valid : %s\n", path.c_str()); showUsage(); exit(1); } break; } if(strcmp(argv[i], "-t") == 0) { ++i; if(i < argc) { timeThreshold = std::atof(argv[i]); if(timeThreshold < 0) { showUsage(); } } else { showUsage(); } continue; } if(strcmp(argv[i], "-rate") == 0) { ++i; if(i < argc) { rate = std::atof(argv[i]); if(rate < 0) { showUsage(); } } else { showUsage(); } continue; } if(strcmp(argv[i], "-rateHz") == 0) { ++i; if(i < argc) { rate = std::atof(argv[i]); if(rate < 0) { showUsage(); } else if(rate) { rate = 1/rate; } } else { showUsage(); } continue; } if(strcmp(argv[i], "-repeat") == 0) { ++i; if(i < argc) { repeat = std::atoi(argv[i]); if(repeat < 1) { showUsage(); } } else { showUsage(); } continue; } if(strcmp(argv[i], "-image_width") == 0) { ++i; if(i < argc) { imageWidth = std::atoi(argv[i]); if(imageWidth < 0) { showUsage(); } } else { showUsage(); } continue; } if(strcmp(argv[i], "-image_height") == 0) { ++i; if(i < argc) { imageHeight = std::atoi(argv[i]); if(imageHeight < 0) { showUsage(); } } else { showUsage(); } continue; } if(strcmp(argv[i], "-start_at") == 0) { ++i; if(i < argc) { startAt = std::atoi(argv[i]); if(startAt < 0) { showUsage(); } } else { showUsage(); } continue; } if(strcmp(argv[i], "-createGT") == 0) { createGT = true; continue; } if(strcmp(argv[i], "-debug") == 0) { logLevel = ULogger::kDebug; continue; } if(strcmp(argv[i], "-info") == 0) { logLevel = ULogger::kInfo; continue; } if(strcmp(argv[i], "-warn") == 0) { logLevel = ULogger::kWarning; continue; } if(strcmp(argv[i], "-exit_warn") == 0) { exitLevel = ULogger::kWarning; continue; } if(strcmp(argv[i], "-exit_error") == 0) { exitLevel = ULogger::kError; continue; } // Check for RTAB-Map's parameters std::string key = argv[i]; key = uSplit(key, '-').back(); if(defaultParameters.find(key) != defaultParameters.end()) { ++i; if(i < argc) { std::string value = argv[i]; if(value.empty()) { showUsage(); } else { value = uReplaceChar(value, ',', ' '); } std::pair<ParametersMap::iterator, bool> inserted = pm.insert(ParametersPair(key, value)); if(inserted.second == false) { inserted.first->second = value; } } else { showUsage(); } continue; } printf("Unrecognized option : %s\n", argv[i]); showUsage(); } if(repeat && createGT) { printf("Cannot create a Ground truth if repeat is on.\n"); showUsage(); } else if((imageWidth && imageHeight == 0) || (imageHeight && imageWidth == 0)) { printf("If imageWidth is set, imageHeight must be too.\n"); showUsage(); } UTimer timer; timer.start(); std::queue<double> iterationMeanTime; Camera * camera = 0; if(UDirectory::exists(path)) { camera = new CameraImages(path, startAt, false, 1/rate, imageWidth, imageHeight); } else { camera = new CameraVideo(path, 1/rate, imageWidth, imageHeight); } if(!camera || !camera->init()) { printf("Camera init failed, using path \"%s\"\n", path.c_str()); exit(1); } std::map<int, int> groundTruth; ULogger::setType(ULogger::kTypeConsole); //ULogger::setType(ULogger::kTypeFile, rtabmap.getWorkingDir()+"/LogConsole.txt", false); //ULogger::setBuffered(true); ULogger::setLevel(logLevel); ULogger::setExitLevel(exitLevel); // Create tasks : memory is deleted Rtabmap rtabmap; // Disable statistics (we don't need them) pm.insert(ParametersPair(Parameters::kRtabmapPublishStats(), "false")); // use default working directory pm.insert(ParametersPair(Parameters::kRtabmapWorkingDirectory(), Parameters::defaultRtabmapWorkingDirectory())); pm.insert(ParametersPair(Parameters::kRGBDEnabled(), "false")); rtabmap.init(pm); rtabmap.setTimeThreshold(timeThreshold); // in ms printf("Avpd init time = %fs\n", timer.ticks()); // Start thread's task int loopClosureId; int count = 0; int countLoopDetected=0; printf("\nParameters : \n"); printf(" Data set : %s\n", path.c_str()); printf(" Time threshold = %1.2f ms\n", timeThreshold); printf(" Image rate = %1.2f s (%1.2f Hz)\n", rate, 1/rate); printf(" Repeating data set = %s\n", repeat?"true":"false"); printf(" Camera width=%d, height=%d (0 is default)\n", imageWidth, imageHeight); printf(" Camera starts at image %d (default 1)\n", startAt); if(createGT) { printf(" Creating the ground truth matrix.\n"); } printf(" INFO: All other parameters are set to defaults\n"); if(pm.size()>1) { printf(" Overwritten parameters :\n"); for(ParametersMap::iterator iter = pm.begin(); iter!=pm.end(); ++iter) { printf(" %s=%s\n",iter->first.c_str(), iter->second.c_str()); } } if(rtabmap.getWM().size() || rtabmap.getSTM().size()) { printf("[Warning] RTAB-Map database is not empty (%s)\n", (rtabmap.getWorkingDir()+Parameters::getDefaultDatabaseName()).c_str()); } printf("\nProcessing images...\n"); UTimer iterationTimer; UTimer rtabmapTimer; int imagesProcessed = 0; std::list<std::vector<float> > teleopActions; while(loopDataset <= repeat && g_forever) { cv::Mat img = camera->takeImage(); int i=0; double maxIterationTime = 0.0; int maxIterationTimeId = 0; while(!img.empty() && g_forever) { ++imagesProcessed; iterationTimer.start(); rtabmapTimer.start(); rtabmap.process(img); double rtabmapTime = rtabmapTimer.elapsed(); loopClosureId = rtabmap.getLoopClosureId(); if(rtabmap.getLoopClosureId()) { ++countLoopDetected; } img = camera->takeImage(); if(++count % 100 == 0) { printf(" count = %d, loop closures = %d, max time (at %d) = %fs\n", count, countLoopDetected, maxIterationTimeId, maxIterationTime); maxIterationTime = 0.0; maxIterationTimeId = 0; std::map<int, int> wm = rtabmap.getWeights(); printf(" WM(%d)=[", (int)wm.size()); for(std::map<int, int>::iterator iter=wm.begin(); iter!=wm.end();++iter) { if(iter != wm.begin()) { printf(";"); } printf("%d,%d", iter->first, iter->second); } printf("]\n"); } // Update generated ground truth matrix if(createGT) { if(loopClosureId > 0) { groundTruth.insert(std::make_pair(i, loopClosureId-1)); } } ++i; double iterationTime = iterationTimer.ticks(); if(rtabmapTime > maxIterationTime) { maxIterationTime = rtabmapTime; maxIterationTimeId = count; } ULogger::flush(); if(rtabmap.getLoopClosureId()) { printf(" iteration(%d) loop(%d) hyp(%.2f) time=%fs/%fs *\n", count, rtabmap.getLoopClosureId(), rtabmap.getLcHypValue(), rtabmapTime, iterationTime); } else if(rtabmap.getRetrievedId()) { printf(" iteration(%d) high(%d) hyp(%.2f) time=%fs/%fs\n", count, rtabmap.getRetrievedId(), rtabmap.getLcHypValue(), rtabmapTime, iterationTime); } else { printf(" iteration(%d) time=%fs/%fs\n", count, rtabmapTime, iterationTime); } if(timeThreshold && rtabmapTime > timeThreshold*100.0f) { printf(" ERROR, there is problem, too much time taken... %fs", rtabmapTime); break; // there is problem, don't continue } } ++loopDataset; if(loopDataset <= repeat) { camera->init(); printf(" Beginning loop %d...\n", loopDataset); } } printf("Processing images completed. Loop closures found = %d\n", countLoopDetected); printf(" Total time = %fs\n", timer.ticks()); if(imagesProcessed && createGT) { cv::Mat groundTruthMat = cv::Mat::zeros(imagesProcessed, imagesProcessed, CV_8U); for(std::map<int, int>::iterator iter = groundTruth.begin(); iter!=groundTruth.end(); ++iter) { groundTruthMat.at<unsigned char>(iter->first, iter->second) = 255; } // Generate the ground truth file printf("Generate ground truth to file %s, size of %d\n", (rtabmap.getWorkingDir()+GENERATED_GT_NAME).c_str(), groundTruthMat.rows); IplImage img = groundTruthMat; cvSaveImage((rtabmap.getWorkingDir()+GENERATED_GT_NAME).c_str(), &img); printf(" Creating ground truth file = %fs\n", timer.ticks()); } if(camera) { delete camera; camera = 0 ; } rtabmap.close(); printf(" Cleanup time = %fs\n", timer.ticks()); return 0; }
// OpenGL thread int RTABMapApp::Render() { // should be before clearSceneOnNextRender_ in case openDatabase is called std::list<rtabmap::Statistics> rtabmapEvents; { boost::mutex::scoped_lock lock(rtabmapMutex_); rtabmapEvents = rtabmapEvents_; rtabmapEvents_.clear(); } if(clearSceneOnNextRender_) { odomMutex_.lock(); odomEvents_.clear(); odomMutex_.unlock(); poseMutex_.lock(); poseEvents_.clear(); poseMutex_.unlock(); main_scene_.clear(); clearSceneOnNextRender_ = false; createdMeshes_.clear(); rawPoses_.clear(); totalPoints_ = 0; totalPolygons_ = 0; lastDrawnCloudsCount_ = 0; renderingFPS_ = 0.0f; } // Process events rtabmap::Transform pose; { boost::mutex::scoped_lock lock(poseMutex_); if(poseEvents_.size()) { pose = poseEvents_.back(); poseEvents_.clear(); } } if(!pose.isNull()) { // update camera pose? main_scene_.SetCameraPose(pose); } bool notifyDataLoaded = false; if(rtabmapEvents.size()) { LOGI("Process rtabmap events"); // update buffered signatures std::map<int, rtabmap::SensorData> bufferedSensorData; if(!trajectoryMode_) { for(std::list<rtabmap::Statistics>::iterator iter=rtabmapEvents.begin(); iter!=rtabmapEvents.end(); ++iter) { for(std::map<int, rtabmap::Signature>::const_iterator jter=iter->getSignatures().begin(); jter!=iter->getSignatures().end(); ++jter) { if(!jter->second.sensorData().imageRaw().empty() && !jter->second.sensorData().depthRaw().empty()) { uInsert(bufferedSensorData, std::make_pair(jter->first, jter->second.sensorData())); uInsert(rawPoses_, std::make_pair(jter->first, jter->second.getPose())); } else if(!jter->second.sensorData().imageCompressed().empty() && !jter->second.sensorData().depthOrRightCompressed().empty()) { // uncompress rtabmap::SensorData data = jter->second.sensorData(); cv::Mat tmpA,tmpB; data.uncompressData(&tmpA, &tmpB); uInsert(bufferedSensorData, std::make_pair(jter->first, data)); uInsert(rawPoses_, std::make_pair(jter->first, jter->second.getPose())); notifyDataLoaded = true; } } } } std::map<int, rtabmap::Transform> poses = rtabmapEvents.back().poses(); // Transform pose in OpenGL world for(std::map<int, rtabmap::Transform>::iterator iter=poses.begin(); iter!=poses.end(); ++iter) { if(!graphOptimization_) { std::map<int, rtabmap::Transform>::iterator jter = rawPoses_.find(iter->first); if(jter != rawPoses_.end()) { iter->second = opengl_world_T_rtabmap_world*jter->second; } } else { iter->second = opengl_world_T_rtabmap_world*iter->second; } } const std::multimap<int, rtabmap::Link> & links = rtabmapEvents.back().constraints(); if(poses.size()) { //update graph main_scene_.updateGraph(poses, links); // update clouds boost::mutex::scoped_lock lock(meshesMutex_); std::set<std::string> strIds; for(std::map<int, rtabmap::Transform>::iterator iter=poses.begin(); iter!=poses.end(); ++iter) { int id = iter->first; if(!iter->second.isNull()) { if(main_scene_.hasCloud(id)) { //just update pose main_scene_.setCloudPose(id, iter->second); main_scene_.setCloudVisible(id, true); std::map<int, Mesh>::iterator meshIter = createdMeshes_.find(id); if(meshIter!=createdMeshes_.end()) { meshIter->second.pose = iter->second; } else { UERROR("Not found mesh %d !?!?", id); } } else if(uContains(bufferedSensorData, id)) { rtabmap::SensorData & data = bufferedSensorData.at(id); if(!data.imageRaw().empty() && !data.depthRaw().empty()) { // Voxelize and filter depending on the previous cloud? pcl::PointCloud<pcl::PointXYZRGB>::Ptr cloud; pcl::IndicesPtr indices(new std::vector<int>); LOGI("Creating node cloud %d (image size=%dx%d)", id, data.imageRaw().cols, data.imageRaw().rows); cloud = rtabmap::util3d::cloudRGBFromSensorData(data, data.imageRaw().rows/data.depthRaw().rows, maxCloudDepth_, 0, indices.get()); if(cloud->size() && indices->size()) { UTimer time; // pcl::organizedFastMesh doesn't take indices, so set to NaN points we don't need to mesh pcl::PointCloud<pcl::PointXYZRGB>::Ptr output(new pcl::PointCloud<pcl::PointXYZRGB>); pcl::ExtractIndices<pcl::PointXYZRGB> filter; filter.setIndices(indices); filter.setKeepOrganized(true); filter.setInputCloud(cloud); filter.filter(*output); std::vector<pcl::Vertices> polygons = rtabmap::util3d::organizedFastMesh(output, meshAngleToleranceDeg_*M_PI/180.0, false, meshTrianglePix_); pcl::PointCloud<pcl::PointXYZRGB>::Ptr outputCloud(new pcl::PointCloud<pcl::PointXYZRGB>); std::vector<pcl::Vertices> outputPolygons; outputCloud = output; outputPolygons = polygons; LOGI("Creating mesh, %d polygons (%fs)", (int)outputPolygons.size(), time.ticks()); if(outputCloud->size() && outputPolygons.size()) { totalPolygons_ += outputPolygons.size(); main_scene_.addCloud(id, outputCloud, outputPolygons, iter->second, data.imageRaw()); // protect createdMeshes_ used also by exportMesh() method std::pair<std::map<int, Mesh>::iterator, bool> inserted = createdMeshes_.insert(std::make_pair(id, Mesh())); UASSERT(inserted.second); inserted.first->second.cloud = outputCloud; inserted.first->second.polygons = outputPolygons; inserted.first->second.pose = iter->second; inserted.first->second.texture = data.imageCompressed(); } else { LOGE("Not mesh could be created for node %d", id); } } totalPoints_+=indices->size(); } } } } } //filter poses? if(poses.size() > 2) { if(nodesFiltering_) { for(std::multimap<int, rtabmap::Link>::const_iterator iter=links.begin(); iter!=links.end(); ++iter) { if(iter->second.type() != rtabmap::Link::kNeighbor) { int oldId = iter->second.to()>iter->second.from()?iter->second.from():iter->second.to(); poses.erase(oldId); } } } } //update cloud visibility std::set<int> addedClouds = main_scene_.getAddedClouds(); for(std::set<int>::const_iterator iter=addedClouds.begin(); iter!=addedClouds.end(); ++iter) { if(*iter > 0 && poses.find(*iter) == poses.end()) { main_scene_.setCloudVisible(*iter, false); } } } else { rtabmap::OdometryEvent event; bool set = false; { boost::mutex::scoped_lock lock(odomMutex_); if(odomEvents_.size()) { LOGI("Process odom events"); event = odomEvents_.back(); odomEvents_.clear(); set = true; } } main_scene_.setCloudVisible(-1, odomCloudShown_ && !trajectoryMode_); //just process the last one if(set && !event.pose().isNull()) { if(odomCloudShown_ && !trajectoryMode_) { if(!event.data().imageRaw().empty() && !event.data().depthRaw().empty()) { pcl::PointCloud<pcl::PointXYZRGB>::Ptr cloud; cloud = rtabmap::util3d::cloudRGBFromSensorData(event.data(), event.data().imageRaw().rows/event.data().depthRaw().rows, maxCloudDepth_); if(cloud->size()) { LOGI("Created odom cloud (rgb=%dx%d depth=%dx%d cloud=%dx%d)", event.data().imageRaw().cols, event.data().imageRaw().rows, event.data().depthRaw().cols, event.data().depthRaw().rows, (int)cloud->width, (int)cloud->height); std::vector<pcl::Vertices> polygons = rtabmap::util3d::organizedFastMesh(cloud, meshAngleToleranceDeg_*M_PI/180.0, false, meshTrianglePix_); main_scene_.addCloud(-1, cloud, polygons, opengl_world_T_rtabmap_world*event.pose(), event.data().imageRaw()); main_scene_.setCloudVisible(-1, true); } else { LOGE("Generated cloud is empty!"); } } else { LOGE("Odom data images are empty!"); } } } } UTimer fpsTime; lastDrawnCloudsCount_ = main_scene_.Render(); renderingFPS_ = 1.0/fpsTime.elapsed(); if(rtabmapEvents.size()) { // send statistics to GUI LOGI("Posting PostRenderEvent!"); UEventsManager::post(new PostRenderEvent(rtabmapEvents.back())); } return notifyDataLoaded?1:0; }
// return not null transform if odometry is correctly computed Transform OdometryF2F::computeTransform( SensorData & data, const Transform & guess, OdometryInfo * info) { UTimer timer; Transform output; if(!data.rightRaw().empty() && !data.stereoCameraModel().isValidForProjection()) { UERROR("Calibrated stereo camera required"); return output; } if(!data.depthRaw().empty() && (data.cameraModels().size() != 1 || !data.cameraModels()[0].isValidForProjection())) { UERROR("Calibrated camera required (multi-cameras not supported)."); return output; } RegistrationInfo regInfo; UASSERT(!this->getPose().isNull()); if(lastKeyFramePose_.isNull()) { lastKeyFramePose_ = this->getPose(); // reset to current pose } Transform motionSinceLastKeyFrame = lastKeyFramePose_.inverse()*this->getPose(); Signature newFrame(data); if(refFrame_.sensorData().isValid()) { Signature tmpRefFrame = refFrame_; output = registrationPipeline_->computeTransformationMod( tmpRefFrame, newFrame, !guess.isNull()?motionSinceLastKeyFrame*guess:Transform(), ®Info); if(info && this->isInfoDataFilled()) { std::list<std::pair<int, std::pair<cv::KeyPoint, cv::KeyPoint> > > pairs; EpipolarGeometry::findPairsUnique(tmpRefFrame.getWords(), newFrame.getWords(), pairs); info->refCorners.resize(pairs.size()); info->newCorners.resize(pairs.size()); std::map<int, int> idToIndex; int i=0; for(std::list<std::pair<int, std::pair<cv::KeyPoint, cv::KeyPoint> > >::iterator iter=pairs.begin(); iter!=pairs.end(); ++iter) { info->refCorners[i] = iter->second.first.pt; info->newCorners[i] = iter->second.second.pt; idToIndex.insert(std::make_pair(iter->first, i)); ++i; } info->cornerInliers.resize(regInfo.inliersIDs.size(), 1); i=0; for(; i<(int)regInfo.inliersIDs.size(); ++i) { info->cornerInliers[i] = idToIndex.at(regInfo.inliersIDs[i]); } Transform t = this->getPose()*motionSinceLastKeyFrame.inverse(); for(std::multimap<int, cv::Point3f>::const_iterator iter=tmpRefFrame.getWords3().begin(); iter!=tmpRefFrame.getWords3().end(); ++iter) { info->localMap.insert(std::make_pair(iter->first, util3d::transformPoint(iter->second, t))); } info->words = newFrame.getWords(); } } else { //return Identity output = Transform::getIdentity(); // a very high variance tells that the new pose is not linked with the previous one regInfo.variance = 9999; } if(!output.isNull()) { output = motionSinceLastKeyFrame.inverse() * output; // new key-frame? if( (registrationPipeline_->isImageRequired() && (keyFrameThr_ == 0 || float(regInfo.inliers) <= keyFrameThr_*float(refFrame_.sensorData().keypoints().size()))) || (registrationPipeline_->isScanRequired() && (scanKeyFrameThr_ == 0 || regInfo.icpInliersRatio <= scanKeyFrameThr_))) { UDEBUG("Update key frame"); int features = newFrame.getWordsDescriptors().size(); if(features == 0) { newFrame = Signature(data); // this will generate features only for the first frame or if optical flow was used (no 3d words) Signature dummy; registrationPipeline_->computeTransformationMod( newFrame, dummy); features = (int)newFrame.sensorData().keypoints().size(); } if((features >= registrationPipeline_->getMinVisualCorrespondences()) && (registrationPipeline_->getMinGeometryCorrespondencesRatio()==0.0f || (newFrame.sensorData().laserScanRaw().cols && (newFrame.sensorData().laserScanMaxPts() == 0 || float(newFrame.sensorData().laserScanRaw().cols)/float(newFrame.sensorData().laserScanMaxPts())>=registrationPipeline_->getMinGeometryCorrespondencesRatio())))) { refFrame_ = newFrame; refFrame_.setWords(std::multimap<int, cv::KeyPoint>()); refFrame_.setWords3(std::multimap<int, cv::Point3f>()); refFrame_.setWordsDescriptors(std::multimap<int, cv::Mat>()); //reset motion lastKeyFramePose_.setNull(); } else { if (!refFrame_.sensorData().isValid()) { // Don't send odometry if we don't have a keyframe yet output.setNull(); } if(features < registrationPipeline_->getMinVisualCorrespondences()) { UWARN("Too low 2D features (%d), keeping last key frame...", features); } if(registrationPipeline_->getMinGeometryCorrespondencesRatio()>0.0f && newFrame.sensorData().laserScanRaw().cols==0) { UWARN("Too low scan points (%d), keeping last key frame...", newFrame.sensorData().laserScanRaw().cols); } else if(registrationPipeline_->getMinGeometryCorrespondencesRatio()>0.0f && newFrame.sensorData().laserScanMaxPts() != 0 && float(newFrame.sensorData().laserScanRaw().cols)/float(newFrame.sensorData().laserScanMaxPts())<registrationPipeline_->getMinGeometryCorrespondencesRatio()) { UWARN("Too low scan points ratio (%d < %d), keeping last key frame...", float(newFrame.sensorData().laserScanRaw().cols)/float(newFrame.sensorData().laserScanMaxPts()), registrationPipeline_->getMinGeometryCorrespondencesRatio()); } } } } else if(!regInfo.rejectedMsg.empty()) { UWARN("Registration failed: \"%s\"", regInfo.rejectedMsg.c_str()); } data.setFeatures(newFrame.sensorData().keypoints(), newFrame.sensorData().descriptors()); if(info) { info->type = 1; info->variance = regInfo.variance; info->inliers = regInfo.inliers; info->icpInliersRatio = regInfo.icpInliersRatio; info->matches = regInfo.matches; info->features = newFrame.sensorData().keypoints().size(); } UINFO("Odom update time = %fs lost=%s inliers=%d, ref frame corners=%d, transform accepted=%s", timer.elapsed(), output.isNull()?"true":"false", (int)regInfo.inliers, (int)newFrame.sensorData().keypoints().size(), !output.isNull()?"true":"false"); return output; }
// return not null transform if odometry is correctly computed Transform OdometryICP::computeTransform(const SensorData & data, OdometryInfo * info) { UTimer timer; Transform output; bool hasConverged = false; double variance = 0; unsigned int minPoints = 100; if(!data.depth().empty()) { if(data.depth().type() == CV_8UC1) { UERROR("ICP 3D cannot be done on stereo images!"); return output; } pcl::PointCloud<pcl::PointXYZ>::Ptr newCloudXYZ = util3d::getICPReadyCloud( data.depth(), data.fx(), data.fy(), data.cx(), data.cy(), _decimation, this->getMaxDepth(), _voxelSize, _samples, data.localTransform()); if(_pointToPlane) { pcl::PointCloud<pcl::PointNormal>::Ptr newCloud = util3d::computeNormals(newCloudXYZ); std::vector<int> indices; newCloud = util3d::removeNaNNormalsFromPointCloud<pcl::PointNormal>(newCloud); if(newCloudXYZ->size() != newCloud->size()) { UWARN("removed nan normals..."); } if(_previousCloudNormal->size() > minPoints && newCloud->size() > minPoints) { int correspondences = 0; Transform transform = util3d::icpPointToPlane(newCloud, _previousCloudNormal, _maxCorrespondenceDistance, _maxIterations, &hasConverged, &variance, &correspondences); // verify if there are enough correspondences float correspondencesRatio = float(correspondences)/float(_previousCloudNormal->size()>newCloud->size()?_previousCloudNormal->size():newCloud->size()); if(!transform.isNull() && hasConverged && correspondencesRatio >= _correspondenceRatio) { output = transform; _previousCloudNormal = newCloud; } else { UWARN("Transform not valid (hasConverged=%s variance = %f)", hasConverged?"true":"false", variance); } } else if(newCloud->size() > minPoints) { output.setIdentity(); _previousCloudNormal = newCloud; } } else { //point to point if(_previousCloud->size() > minPoints && newCloudXYZ->size() > minPoints) { int correspondences = 0; Transform transform = util3d::icp(newCloudXYZ, _previousCloud, _maxCorrespondenceDistance, _maxIterations, &hasConverged, &variance, &correspondences); // verify if there are enough correspondences float correspondencesRatio = float(correspondences)/float(_previousCloud->size()>newCloudXYZ->size()?_previousCloud->size():newCloudXYZ->size()); if(!transform.isNull() && hasConverged && correspondencesRatio >= _correspondenceRatio) { output = transform; _previousCloud = newCloudXYZ; } else { UWARN("Transform not valid (hasConverged=%s variance = %f)", hasConverged?"true":"false", variance); } } else if(newCloudXYZ->size() > minPoints) { output.setIdentity(); _previousCloud = newCloudXYZ; } } } else { UERROR("Depth is empty?!?"); } if(info) { info->variance = variance; } UINFO("Odom update time = %fs hasConverged=%s variance=%f cloud=%d", timer.elapsed(), hasConverged?"true":"false", variance, (int)(_pointToPlane?_previousCloudNormal->size():_previousCloud->size())); return output; }