void CameraThread::mainLoop() { UDEBUG(""); CameraInfo info; SensorData data = _camera->takeImage(&info); if(!data.imageRaw().empty()) { if(_colorOnly && !data.depthRaw().empty()) { data.setDepthOrRightRaw(cv::Mat()); } if(_imageDecimation>1 && !data.imageRaw().empty()) { UDEBUG(""); UTimer timer; if(!data.depthRaw().empty() && !(data.depthRaw().rows % _imageDecimation == 0 && data.depthRaw().cols % _imageDecimation == 0)) { UERROR("Decimation of depth images should be exact (decimation=%d, size=(%d,%d))! " "Images won't be resized.", _imageDecimation, data.depthRaw().cols, data.depthRaw().rows); } else { data.setImageRaw(util2d::decimate(data.imageRaw(), _imageDecimation)); data.setDepthOrRightRaw(util2d::decimate(data.depthOrRightRaw(), _imageDecimation)); std::vector<CameraModel> models = data.cameraModels(); for(unsigned int i=0; i<models.size(); ++i) { if(models[i].isValidForProjection()) { models[i] = models[i].scaled(1.0/double(_imageDecimation)); } } data.setCameraModels(models); StereoCameraModel stereoModel = data.stereoCameraModel(); if(stereoModel.isValidForProjection()) { stereoModel.scale(1.0/double(_imageDecimation)); data.setStereoCameraModel(stereoModel); } } info.timeImageDecimation = timer.ticks(); } if(_mirroring && data.cameraModels().size() == 1) { UDEBUG(""); UTimer timer; cv::Mat tmpRgb; cv::flip(data.imageRaw(), tmpRgb, 1); data.setImageRaw(tmpRgb); UASSERT_MSG(data.cameraModels().size() <= 1 && !data.stereoCameraModel().isValidForProjection(), "Only single RGBD cameras are supported for mirroring."); if(data.cameraModels().size() && data.cameraModels()[0].cx()) { CameraModel tmpModel( data.cameraModels()[0].fx(), data.cameraModels()[0].fy(), float(data.imageRaw().cols) - data.cameraModels()[0].cx(), data.cameraModels()[0].cy(), data.cameraModels()[0].localTransform()); data.setCameraModel(tmpModel); } if(!data.depthRaw().empty()) { cv::Mat tmpDepth; cv::flip(data.depthRaw(), tmpDepth, 1); data.setDepthOrRightRaw(tmpDepth); } info.timeMirroring = timer.ticks(); } if(_stereoToDepth && data.stereoCameraModel().isValidForProjection() && !data.rightRaw().empty()) { UDEBUG(""); UTimer timer; cv::Mat depth = util2d::depthFromDisparity( _stereoDense->computeDisparity(data.imageRaw(), data.rightRaw()), data.stereoCameraModel().left().fx(), data.stereoCameraModel().baseline()); data.setCameraModel(data.stereoCameraModel().left()); data.setDepthOrRightRaw(depth); data.setStereoCameraModel(StereoCameraModel()); info.timeDisparity = timer.ticks(); UDEBUG("Computing disparity = %f s", info.timeDisparity); } if(_scanFromDepth && data.cameraModels().size() && data.cameraModels().at(0).isValidForProjection() && !data.depthRaw().empty()) { UDEBUG(""); if(data.laserScanRaw().empty()) { UASSERT(_scanDecimation >= 1); UTimer timer; pcl::IndicesPtr validIndices(new std::vector<int>); pcl::PointCloud<pcl::PointXYZ>::Ptr cloud = util3d::cloudFromSensorData(data, _scanDecimation, _scanMaxDepth, _scanMinDepth, validIndices.get()); float maxPoints = (data.depthRaw().rows/_scanDecimation)*(data.depthRaw().cols/_scanDecimation); if(_scanVoxelSize>0.0f) { cloud = util3d::voxelize(cloud, validIndices, _scanVoxelSize); float ratio = float(cloud->size()) / float(validIndices->size()); maxPoints = ratio * maxPoints; } else if(!cloud->is_dense) { pcl::PointCloud<pcl::PointXYZ>::Ptr denseCloud(new pcl::PointCloud<pcl::PointXYZ>); pcl::copyPointCloud(*cloud, *validIndices, *denseCloud); cloud = denseCloud; } cv::Mat scan; if(_scanNormalsK>0) { scan = util3d::laserScanFromPointCloud(*util3d::computeNormals(cloud, _scanNormalsK)); } else { scan = util3d::laserScanFromPointCloud(*cloud); } data.setLaserScanRaw(scan, (int)maxPoints, _scanMaxDepth); info.timeScanFromDepth = timer.ticks(); UDEBUG("Computing scan from depth = %f s", info.timeScanFromDepth); } else { UWARN("Option to create laser scan from depth image is enabled, but " "there is already a laser scan in the captured sensor data. Scan from " "depth will not be created."); } } info.cameraName = _camera->getSerial(); this->post(new CameraEvent(data, info)); } else if(!this->isKilled()) { UWARN("no more images..."); this->kill(); this->post(new CameraEvent()); } }
bool RTABMapApp::exportMesh(const std::string & filePath) { bool success = false; //Assemble the meshes if(UFile::getExtension(filePath).compare("obj") == 0) { pcl::TextureMesh textureMesh; std::vector<cv::Mat> textures; int totalPolygons = 0; pcl::PointCloud<pcl::PointXYZRGBNormal>::Ptr mergedClouds(new pcl::PointCloud<pcl::PointXYZRGBNormal>); { boost::mutex::scoped_lock lock(meshesMutex_); textureMesh.tex_materials.resize(createdMeshes_.size()); textureMesh.tex_polygons.resize(createdMeshes_.size()); textureMesh.tex_coordinates.resize(createdMeshes_.size()); textures.resize(createdMeshes_.size()); int polygonsStep = 0; int oi = 0; for(std::map<int, Mesh>::iterator iter=createdMeshes_.begin(); iter!= createdMeshes_.end(); ++iter) { UASSERT(!iter->second.cloud->is_dense); if(!iter->second.texture.empty() && iter->second.cloud->size() && iter->second.polygons.size()) { // OBJ format requires normals pcl::PointCloud<pcl::Normal>::Ptr normals = rtabmap::util3d::computeNormals(iter->second.cloud, 20); pcl::PointCloud<pcl::PointXYZRGBNormal>::Ptr cloudWithNormals; pcl::concatenateFields(*iter->second.cloud, *normals, *cloudWithNormals); // create dense cloud pcl::PointCloud<pcl::PointXYZRGBNormal>::Ptr denseCloud(new pcl::PointCloud<pcl::PointXYZRGBNormal>); std::vector<pcl::Vertices> densePolygons; std::map<int, int> newToOldIndices; newToOldIndices = rtabmap::util3d::filterNotUsedVerticesFromMesh( *cloudWithNormals, iter->second.polygons, *denseCloud, densePolygons); // polygons UASSERT(densePolygons.size()); unsigned int polygonSize = densePolygons.front().vertices.size(); textureMesh.tex_polygons[oi].resize(densePolygons.size()); textureMesh.tex_coordinates[oi].resize(densePolygons.size() * polygonSize); for(unsigned int j=0; j<densePolygons.size(); ++j) { pcl::Vertices vertices = densePolygons[j]; UASSERT(polygonSize == vertices.vertices.size()); for(unsigned int k=0; k<vertices.vertices.size(); ++k) { //uv std::map<int, int>::iterator jter = newToOldIndices.find(vertices.vertices[k]); textureMesh.tex_coordinates[oi][j*vertices.vertices.size()+k] = Eigen::Vector2f( float(jter->second % iter->second.cloud->width) / float(iter->second.cloud->width), // u float(iter->second.cloud->height - jter->second / iter->second.cloud->width) / float(iter->second.cloud->height)); // v vertices.vertices[k] += polygonsStep; } textureMesh.tex_polygons[oi][j] = vertices; } totalPolygons += densePolygons.size(); polygonsStep += denseCloud->size(); pcl::PointCloud<pcl::PointXYZRGBNormal>::Ptr transformedCloud = rtabmap::util3d::transformPointCloud(denseCloud, iter->second.pose); if(mergedClouds->size() == 0) { *mergedClouds = *transformedCloud; } else { *mergedClouds += *transformedCloud; } textures[oi] = iter->second.texture; textureMesh.tex_materials[oi].tex_illum = 1; textureMesh.tex_materials[oi].tex_name = uFormat("material_%d", iter->first); ++oi; } else { UERROR("Texture not set for mesh %d", iter->first); } } textureMesh.tex_materials.resize(oi); textureMesh.tex_polygons.resize(oi); textures.resize(oi); if(textures.size()) { pcl::toPCLPointCloud2(*mergedClouds, textureMesh.cloud); std::string textureDirectory = uSplit(filePath, '.').front(); UINFO("Saving %d textures to %s.", textures.size(), textureDirectory.c_str()); UDirectory::makeDir(textureDirectory); for(unsigned int i=0;i<textures.size(); ++i) { cv::Mat rawImage = rtabmap::uncompressImage(textures[i]); std::string texFile = textureDirectory+"/"+textureMesh.tex_materials[i].tex_name+".png"; cv::imwrite(texFile, rawImage); UINFO("Saved %s (%d bytes).", texFile.c_str(), rawImage.total()*rawImage.channels()); // relative path textureMesh.tex_materials[i].tex_file = uSplit(UFile::getName(filePath), '.').front()+"/"+textureMesh.tex_materials[i].tex_name+".png"; } UINFO("Saving obj (%d vertices, %d polygons) to %s.", (int)mergedClouds->size(), totalPolygons, filePath.c_str()); success = pcl::io::saveOBJFile(filePath, textureMesh) == 0; if(success) { UINFO("Saved obj to %s!", filePath.c_str()); } else { UERROR("Failed saving obj to %s!", filePath.c_str()); } } } } else { pcl::PointCloud<pcl::PointXYZRGB>::Ptr mergedClouds(new pcl::PointCloud<pcl::PointXYZRGB>); std::vector<pcl::Vertices> mergedPolygons; { boost::mutex::scoped_lock lock(meshesMutex_); for(std::map<int, Mesh>::iterator iter=createdMeshes_.begin(); iter!= createdMeshes_.end(); ++iter) { pcl::PointCloud<pcl::PointXYZRGB>::Ptr denseCloud(new pcl::PointCloud<pcl::PointXYZRGB>); std::vector<pcl::Vertices> densePolygons; rtabmap::util3d::filterNotUsedVerticesFromMesh( *iter->second.cloud, iter->second.polygons, *denseCloud, densePolygons); pcl::PointCloud<pcl::PointXYZRGB>::Ptr transformedCloud = rtabmap::util3d::transformPointCloud(denseCloud, iter->second.pose); if(mergedClouds->size() == 0) { *mergedClouds = *transformedCloud; mergedPolygons = densePolygons; } else { rtabmap::util3d::appendMesh(*mergedClouds, mergedPolygons, *transformedCloud, densePolygons); } } } if(mergedClouds->size() && mergedPolygons.size()) { pcl::PolygonMesh mesh; pcl::toPCLPointCloud2(*mergedClouds, mesh.cloud); mesh.polygons = mergedPolygons; UINFO("Saving ply (%d vertices, %d polygons) to %s.", (int)mergedClouds->size(), (int)mergedPolygons.size(), filePath.c_str()); success = pcl::io::savePLYFileBinary(filePath, mesh) == 0; if(success) { UINFO("Saved ply to %s!", filePath.c_str()); } else { UERROR("Failed saving ply to %s!", filePath.c_str()); } } } return success; }
void CameraThread::postUpdate(SensorData * dataPtr, CameraInfo * info) const { UASSERT(dataPtr!=0); SensorData & data = *dataPtr; if(_colorOnly && !data.depthRaw().empty()) { data.setDepthOrRightRaw(cv::Mat()); } if(_distortionModel && !data.depthRaw().empty()) { UTimer timer; if(_distortionModel->getWidth() == data.depthRaw().cols && _distortionModel->getHeight() == data.depthRaw().rows ) { cv::Mat depth = data.depthRaw().clone();// make sure we are not modifying data in cached signatures. _distortionModel->undistort(depth); data.setDepthOrRightRaw(depth); } else { UERROR("Distortion model size is %dx%d but dpeth image is %dx%d!", _distortionModel->getWidth(), _distortionModel->getHeight(), data.depthRaw().cols, data.depthRaw().rows); } if(info) info->timeUndistortDepth = timer.ticks(); } if(_bilateralFiltering && !data.depthRaw().empty()) { UTimer timer; data.setDepthOrRightRaw(util2d::fastBilateralFiltering(data.depthRaw(), _bilateralSigmaS, _bilateralSigmaR)); if(info) info->timeBilateralFiltering = timer.ticks(); } if(_imageDecimation>1 && !data.imageRaw().empty()) { UDEBUG(""); UTimer timer; if(!data.depthRaw().empty() && !(data.depthRaw().rows % _imageDecimation == 0 && data.depthRaw().cols % _imageDecimation == 0)) { UERROR("Decimation of depth images should be exact (decimation=%d, size=(%d,%d))! " "Images won't be resized.", _imageDecimation, data.depthRaw().cols, data.depthRaw().rows); } else { data.setImageRaw(util2d::decimate(data.imageRaw(), _imageDecimation)); data.setDepthOrRightRaw(util2d::decimate(data.depthOrRightRaw(), _imageDecimation)); std::vector<CameraModel> models = data.cameraModels(); for(unsigned int i=0; i<models.size(); ++i) { if(models[i].isValidForProjection()) { models[i] = models[i].scaled(1.0/double(_imageDecimation)); } } data.setCameraModels(models); StereoCameraModel stereoModel = data.stereoCameraModel(); if(stereoModel.isValidForProjection()) { stereoModel.scale(1.0/double(_imageDecimation)); data.setStereoCameraModel(stereoModel); } } if(info) info->timeImageDecimation = timer.ticks(); } if(_mirroring && !data.imageRaw().empty() && data.cameraModels().size() == 1) { UDEBUG(""); UTimer timer; cv::Mat tmpRgb; cv::flip(data.imageRaw(), tmpRgb, 1); data.setImageRaw(tmpRgb); UASSERT_MSG(data.cameraModels().size() <= 1 && !data.stereoCameraModel().isValidForProjection(), "Only single RGBD cameras are supported for mirroring."); if(data.cameraModels().size() && data.cameraModels()[0].cx()) { CameraModel tmpModel( data.cameraModels()[0].fx(), data.cameraModels()[0].fy(), float(data.imageRaw().cols) - data.cameraModels()[0].cx(), data.cameraModels()[0].cy(), data.cameraModels()[0].localTransform()); data.setCameraModel(tmpModel); } if(!data.depthRaw().empty()) { cv::Mat tmpDepth; cv::flip(data.depthRaw(), tmpDepth, 1); data.setDepthOrRightRaw(tmpDepth); } if(info) info->timeMirroring = timer.ticks(); } if(_stereoToDepth && !data.imageRaw().empty() && data.stereoCameraModel().isValidForProjection() && !data.rightRaw().empty()) { UDEBUG(""); UTimer timer; cv::Mat depth = util2d::depthFromDisparity( _stereoDense->computeDisparity(data.imageRaw(), data.rightRaw()), data.stereoCameraModel().left().fx(), data.stereoCameraModel().baseline()); // set Tx for stereo bundle adjustment (when used) CameraModel model = CameraModel( data.stereoCameraModel().left().fx(), data.stereoCameraModel().left().fy(), data.stereoCameraModel().left().cx(), data.stereoCameraModel().left().cy(), data.stereoCameraModel().localTransform(), -data.stereoCameraModel().baseline()*data.stereoCameraModel().left().fx()); data.setCameraModel(model); data.setDepthOrRightRaw(depth); data.setStereoCameraModel(StereoCameraModel()); if(info) info->timeDisparity = timer.ticks(); } if(_scanFromDepth && data.cameraModels().size() && data.cameraModels().at(0).isValidForProjection() && !data.depthRaw().empty()) { UDEBUG(""); if(data.laserScanRaw().empty()) { UASSERT(_scanDecimation >= 1); UTimer timer; pcl::IndicesPtr validIndices(new std::vector<int>); pcl::PointCloud<pcl::PointXYZ>::Ptr cloud = util3d::cloudFromSensorData( data, _scanDecimation, _scanMaxDepth, _scanMinDepth, validIndices.get()); float maxPoints = (data.depthRaw().rows/_scanDecimation)*(data.depthRaw().cols/_scanDecimation); cv::Mat scan; const Transform & baseToScan = data.cameraModels()[0].localTransform(); if(validIndices->size()) { if(_scanVoxelSize>0.0f) { cloud = util3d::voxelize(cloud, validIndices, _scanVoxelSize); float ratio = float(cloud->size()) / float(validIndices->size()); maxPoints = ratio * maxPoints; } else if(!cloud->is_dense) { pcl::PointCloud<pcl::PointXYZ>::Ptr denseCloud(new pcl::PointCloud<pcl::PointXYZ>); pcl::copyPointCloud(*cloud, *validIndices, *denseCloud); cloud = denseCloud; } if(cloud->size()) { if(_scanNormalsK>0) { Eigen::Vector3f viewPoint(baseToScan.x(), baseToScan.y(), baseToScan.z()); pcl::PointCloud<pcl::Normal>::Ptr normals = util3d::computeNormals(cloud, _scanNormalsK, viewPoint); pcl::PointCloud<pcl::PointNormal>::Ptr cloudNormals(new pcl::PointCloud<pcl::PointNormal>); pcl::concatenateFields(*cloud, *normals, *cloudNormals); scan = util3d::laserScanFromPointCloud(*cloudNormals, baseToScan.inverse()); } else { scan = util3d::laserScanFromPointCloud(*cloud, baseToScan.inverse()); } } } data.setLaserScanRaw(scan, LaserScanInfo((int)maxPoints, _scanMaxDepth, baseToScan)); if(info) info->timeScanFromDepth = timer.ticks(); } else { UWARN("Option to create laser scan from depth image is enabled, but " "there is already a laser scan in the captured sensor data. Scan from " "depth will not be created."); } } }