int main(int argc, char **argv) { // Parse command line options. arg_base::set_help_option("-h"); arg_parse(argc, argv); // Set debug level to 1. ntk::ntk_debug_level = 1; // Set current directory to application directory. // This is to find Nite config in config/ directory. QApplication app (argc, argv); QDir::setCurrent(QApplication::applicationDirPath()); // Declare the global OpenNI driver. Only one can be instantiated in a program. OpenniDriver ni_driver; // Declare the frame grabber. OpenniGrabber grabber(ni_driver, opt::kinect_id()); // High resolution 1280x1024 RGB Image. if (opt::high_resolution()) grabber.setHighRgbResolution(true); // Start the grabber. grabber.connectToDevice(); grabber.start(); // Holder for the current image. RGBDImage image; // Image post processor. Compute mappings when RGB resolution is 1280x1024. OpenniRGBDProcessor post_processor; // Wait for a new frame, get a local copy and postprocess it. grabber.waitForNextFrame(); grabber.copyImageTo(image); post_processor.processImage(image); ntk_ensure(image.calibration(), "Uncalibrated rgbd image, cannot project to 3D!"); // Generate a mesh out of the rgbd image. MeshGenerator generator; generator.setUseColor(true); generator.setMeshType(MeshGenerator::PointCloudMesh); generator.generate(image, *image.calibration()->depth_pose, *image.calibration()->rgb_pose); // Save the grabber mesh to a ply file. ntk_dbg(0) << "Saving mesh file to output.ply..."; generator.mesh().saveToPlyFile("output.ply"); grabber.stop(); }
void MeshGenerator :: generate(const RGBDImage& image, const Pose3D& depth_pose, const Pose3D& rgb_pose) { const Pose3D& real_depth_pose = depth_pose.isValid() ? depth_pose : *(image.calibration()->depth_pose); const Pose3D& real_rgb_pose = rgb_pose.isValid() ? rgb_pose : *(image.calibration()->rgb_pose); switch (m_mesh_type) { case TriangleMesh: return generateTriangleMesh(image, real_depth_pose, real_rgb_pose); case PointCloudMesh: return generatePointCloudMesh(image, real_depth_pose, real_rgb_pose); case SurfelsMesh: return generateSurfelsMesh(image, real_depth_pose, real_rgb_pose); } }
void filterColor(const RGBDImage& img, vector<Mat>& hsvs, Mat1b& result, cv::Point3f upleft, cv::Point2f fudge, int hue_from, int hue_to, int sat_from, int sat_to, int val_from, int val_to) { const Mat1f& depth = img.depth(); const Mat1b& mask = img.depthMask(); const Mat& rgb = img.rgb(); const RGBDCalibration* calib = img.calibration(); if (!calib || upleft.y > rgb.rows || upleft.x > rgb.cols) return; int wid = result.cols * STEP_SIZE, ht = result.rows * STEP_SIZE; cv::Range rslice(upleft.y - fudge.y, upleft.y + ht + fudge.y), cslice(upleft.x - fudge.x, upleft.x + wid + fudge.x); rslice.start = max(0, rslice.start); rslice.end = min(rgb.rows, rslice.end); cslice.start = max(0, cslice.start); cslice.end = min(rgb.cols, cslice.end); //printf("%i %i %i %i\n", rslice.start, rslice.end, cslice.start, cslice.end); if (hsvs.size() == 0) { Mat sliced_rgb(rgb, rslice, cslice); Mat hsv; cvtColor(sliced_rgb, hsv, CV_BGR2HSV); split(hsv, hsvs); } const Pose3D* rgb_pose = calib->rgb_pose; const Pose3D* depth_pose = calib->depth_pose; for (int r = 0; r < result.rows; r++) for (int c = 0; c < result.cols; c++) { int x = c * STEP_SIZE + upleft.x; int y = r * STEP_SIZE + upleft.y; if (mask(y, x) == 0) continue; // Calculate rgb location for this particular depth element. double dv = depth(y, x); Point3f pu = depth_pose->unprojectFromImage(Point2f(x, y), dv); Point3f prgb = rgb_pose->projectToImage(pu); int i_x = ntk::math::rnd(prgb.x) - cslice.start; int i_y = ntk::math::rnd(prgb.y) - rslice.start; if (is_yx_in_range(hsvs[0], i_y, i_x)) { uchar hue = hsvs[0].at<uchar>(i_y, i_x); uchar sat = hsvs[1].at<uchar>(i_y, i_x); uchar val = hsvs[2].at<uchar>(i_y, i_x); result.at<bool>(r, c) = ( ((hue_from > hue_to) && ((hue > hue_from) || (hue < hue_to))) || ((hue_from < hue_to) && ((hue > hue_from) && (hue < hue_to)))) && sat > sat_from && sat < sat_to && val > val_from && val < val_to; } } }
void RelativePoseEstimatorFromRgbFeatures::setTargetImage(const RGBDImage &image) { ntk_ensure(image.calibration(), "Image must be calibrated."); m_target_image = ℑ if (!m_target_pose.isValid()) { m_target_pose = *m_target_image->calibration()->depth_pose; } m_target_features = toPtr(new FeatureSet); }
bool SurfelsRGBDModeler :: addNewView(const RGBDImage& image_, Pose3D& depth_pose) { ntk::TimeCount tc("SurfelsRGBDModeler::addNewView", 1); const float max_camera_normal_angle = ntk::deg_to_rad(90); RGBDImage image; image_.copyTo(image); if (!image_.normal().data) { OpenniRGBDProcessor processor; processor.computeNormalsPCL(image); } Pose3D rgb_pose = depth_pose; rgb_pose.toRightCamera(image.calibration()->rgb_intrinsics, image.calibration()->R, image.calibration()->T); Pose3D world_to_camera_normal_pose; world_to_camera_normal_pose.applyTransformBefore(cv::Vec3f(0,0,0), depth_pose.cvEulerRotation()); Pose3D camera_to_world_normal_pose = world_to_camera_normal_pose; camera_to_world_normal_pose.invert(); const Mat1f& depth_im = image.depth(); Mat1b covered_pixels (depth_im.size()); covered_pixels = 0; std::list<Surfel> surfels_to_reinsert; // Surfel updating. for (SurfelMap::iterator next_it = m_surfels.begin(); next_it != m_surfels.end(); ) { SurfelMap::iterator surfel_it = next_it; ++next_it; Surfel& surfel = surfel_it->second; if (!surfel.enabled()) continue; Point3f surfel_2d = depth_pose.projectToImage(surfel.location); bool surfel_deleted = false; int r = ntk::math::rnd(surfel_2d.y); int c = ntk::math::rnd(surfel_2d.x); int d = ntk::math::rnd(surfel_2d.z); if (!is_yx_in_range(depth_im, r, c) || !image.depthMask()(r, c) || !image.isValidNormal(r,c)) continue; const float update_max_dist = getCompatibilityDistance(depth_im(r,c)); Vec3f camera_normal = image.normal()(r, c); normalize(camera_normal); Vec3f world_normal = camera_to_world_normal_pose.cameraTransform(camera_normal); normalize(world_normal); Vec3f eyev = camera_eye_vector(depth_pose, r, c); double camera_angle = acos(camera_normal.dot(-eyev)); if (camera_angle > max_camera_normal_angle) continue; float normal_angle = acos(world_normal.dot(surfel.normal)); // Surfels have different normals, maybe two different faces of the same object. if (normal_angle > (m_update_max_normal_angle*M_PI/180.0)) { // Removal check. If a surfel has a different normal and is closer to the camera // than the new scan, remove it. if ((-surfel_2d.z) < depth_im(r,c) && surfel.n_views < 3) { m_surfels.erase(surfel_it); surfel_deleted = true; } continue; } // If existing surfel is far from new depth value: // - If existing one had a worst point of view, and was seen only once, remove it. // - Otherwise do not include the new one. if (std::abs(surfel_2d.z - depth_im(r,c)) > update_max_dist) { if (surfel.min_camera_angle > camera_angle && surfel.n_views < 3) { m_surfels.erase(surfel_it); surfel_deleted = true; } else covered_pixels(r,c) = 1; continue; } // Compatible surfel found. const float depth = depth_im(r,c) + m_global_depth_offset; Point3f p3d = depth_pose.unprojectFromImage(Point2f(c,r), depth); cv::Vec3b rgb_color = bgr_to_rgb(image.mappedRgb()(r, c)); Surfel image_surfel; image_surfel.location = p3d; image_surfel.normal = world_normal; image_surfel.color = rgb_color; image_surfel.min_camera_angle = camera_angle; image_surfel.n_views = 1; image_surfel.radius = computeSurfelRadius(depth, camera_normal[2], depth_pose.meanFocal()); mergeToLeftSurfel(surfel, image_surfel); covered_pixels(r,c) = 1; // needs to change the cell? Cell new_cell = worldToCell(surfel.location); if (new_cell != surfel_it->first) { surfels_to_reinsert.push_back(surfel); m_surfels.erase(surfel_it); } } foreach_const_it(it, surfels_to_reinsert, std::list<Surfel>) { Cell new_cell = worldToCell(it->location); m_surfels.insert(std::make_pair(new_cell, *it)); }
void RelativePoseEstimatorFromRgbFeatures::setSourceImage(const RGBDImage &image, ntk::Ptr<FeatureSet> features) { ntk_ensure(image.calibration(), "Image must be calibrated."); super::setSourceImage(image); m_source_features = features; }
void RGBDFrameRecorder :: writeFrame(const RGBDImage& image, const std::string& frame_dir) { std::string raw_frame_dir = format("%s/raw", frame_dir.c_str(), m_frame_index); QDir dir (frame_dir.c_str()); dir.mkpath("raw"); std::string filename; if (m_save_rgb_pose && image.calibration()) { filename = cv::format("%s/rgb_pose.avs", frame_dir.c_str()); image.rgbPose().saveToAvsFile(filename.c_str()); } if (!m_only_raw) { filename = cv::format("%s/color.png", frame_dir.c_str()); imwrite(filename, image.rgb()); } if (m_save_pcl_point_cloud) { filename = cv::format("%s/cloud.pcd", frame_dir.c_str()); #ifdef NESTK_USE_PCL pcl::PointCloud<pcl::PointXYZ> cloud; rgbdImageToPointCloud(cloud, image); pcl::io::savePCDFileASCII(filename.c_str(), cloud); #endif } if (m_use_compressed_format) filename = cv::format("%s/raw/color.png", frame_dir.c_str()); else filename = cv::format("%s/raw/color.bmp", frame_dir.c_str()); imwrite(filename, image.rawRgb()); if (!m_only_raw && image.mappedDepth().data) { filename = cv::format("%s/mapped_depth.png", frame_dir.c_str()); imwrite_normalized(filename, image.mappedDepth()); filename = cv::format("%s/mapped_color.png", frame_dir.c_str()); imwrite(filename, image.mappedRgb()); filename = cv::format("%s/depth.yml", frame_dir.c_str()); imwrite_yml(filename, image.mappedDepth()); } if (!m_only_raw) { filename = cv::format("%s/raw/depth.png", frame_dir.c_str()); if (image.rawDepth().data) imwrite_normalized(filename.c_str(), image.rawDepth()); filename = cv::format("%s/depth.png", frame_dir.c_str()); if (image.depth().data) imwrite_normalized(filename.c_str(), image.depth()); filename = cv::format("%s/intensity.png", frame_dir.c_str()); if (image.intensity().data) imwrite_normalized(filename.c_str(), image.intensity()); } if (image.rawDepth().data) { if (m_use_binary_raw) { filename = cv::format("%s/raw/depth.raw", frame_dir.c_str()); imwrite_Mat1f_raw(filename.c_str(), image.rawDepth()); } else { filename = cv::format("%s/raw/depth.yml", frame_dir.c_str()); imwrite_yml(filename.c_str(), image.rawDepth()); } } if (m_save_intensity && image.rawIntensity().data) { if (m_use_binary_raw) { filename = cv::format("%s/raw/intensity.raw", frame_dir.c_str()); imwrite_Mat1f_raw(filename.c_str(), image.rawIntensity()); } else { filename = cv::format("%s/raw/intensity.yml", frame_dir.c_str()); imwrite_yml(filename.c_str(), image.rawIntensity()); } } if (!m_only_raw) { filename = cv::format("%s/raw/amplitude.png", frame_dir.c_str()); if (image.rawAmplitude().data) imwrite_normalized(filename.c_str(), image.rawAmplitude()); filename = cv::format("%s/amplitude.png", frame_dir.c_str()); if (image.amplitude().data) imwrite_normalized(filename.c_str(), image.amplitude()); } if (image.rawAmplitude().data) { if (m_use_binary_raw) { filename = cv::format("%s/raw/amplitude.raw", frame_dir.c_str()); imwrite_Mat1f_raw(filename.c_str(), image.rawAmplitude()); } else { filename = cv::format("%s/raw/amplitude.yml", frame_dir.c_str()); imwrite_yml(filename.c_str(), image.rawAmplitude()); } } }
void PlaneEstimator :: estimate(RGBDImage& image, Mat1b& plane_points) { // Passing from 3D to the optimizer const cv::Mat3f& normal_image = image.normal(); const cv::Mat1f& distance_image = image.depth(); cv::Mat1b& mask_image = image.depthMaskRef(); cv::Mat1b objfilter; mask_image.copyTo(objfilter); plane_points = image.normal().clone(); plane_points = 0; if (!image.normal().data) { ntk_dbg(0) << "WARNING: you must active the normal filter to get plane estimation!"; return; } double min[dim]; double max[dim]; int i; for (i=0;i<dim;i++) { max[i] = 1000.0; min[i] = -1000.0; } m_solver.Setup(min,max,DifferentialEvolutionSolver::stBest1Exp,0.8,0.75); // Early estimation of plane points projecting the normal values for (int r = 1; r < plane_points.rows-1; ++r) for (int c = 1; c < plane_points.cols-1; ++c) { if (objfilter.data && objfilter(r,c)) { cv::Vec3f normal = normal_image(r,c); double prod = normal.dot(m_ref_plane); if (prod > 0.95) plane_points(r,c) = 255; else plane_points(r,c) = 0; } } // cleaning of the surface very first estimation dilate(plane_points,plane_points,cv::Mat()); erode(plane_points,plane_points,cv::Mat()); //imwrite("plane-initial.png",plane_points); std::vector<Point3f>& g = m_solver.planePointsRef(); g.clear(); for (int r = 1; r < plane_points.rows-1; ++r) for (int c = 1; c < plane_points.cols-1; ++c) { if (plane_points(r,c)) { // possible table member! Point3f p3d = image.calibration()->depth_pose->unprojectFromImage(Point2f(c,r), distance_image(r,c)); g.push_back(p3d); } } // Calculating... m_solver.Solve(max_generations); double *solution = m_solver.Solution(); // Plane normalizer float suma = solution[0] + solution[1] + solution[2] + solution[3] ; for (int i = 0; i < 4; i++) solution[i] = solution[i]/ suma; ntk::Plane plano (solution[0], solution[1], solution[2], solution[3]); //Update RGBD object m_plane.set(solution[0], solution[1], solution[2], solution[3]); // Final estimation of plane points projecting the normal values cv::Vec3f diffevolnormal(solution[0], solution[1], solution[2]); for (int r = 1; r < plane_points.rows-1; ++r) for (int c = 1; c < plane_points.cols-1; ++c) { if (objfilter.data && objfilter(r,c)) { cv::Vec3f normal = normal_image(r,c); double prod = normal.dot(diffevolnormal); if (prod > 0.5) plane_points(r,c) = 255; else plane_points(r,c) = 0; } } // Cleaning of the DE plane-pixels solution dilate(plane_points,plane_points,cv::Mat()); erode(plane_points,plane_points,cv::Mat()); // imwrite("plane-DE.png",plane_points); }
bool RelativePoseEstimatorICP :: estimateNewPose(const RGBDImage& image) { if (m_ref_cloud.points.size() < 1) { ntk_dbg(1) << "Reference cloud was empty"; return false; } PointCloud<PointXYZ>::Ptr target = m_ref_cloud.makeShared(); PointCloud<PointXYZ>::Ptr source (new PointCloud<PointXYZ>()); rgbdImageToPointCloud(*source, image); PointCloud<PointXYZ>::Ptr filtered_source (new PointCloud<PointXYZ>()); PointCloud<PointXYZ>::Ptr filtered_target (new PointCloud<PointXYZ>()); pcl::VoxelGrid<pcl::PointXYZ> grid; grid.setLeafSize (m_voxel_leaf_size, m_voxel_leaf_size, m_voxel_leaf_size); grid.setInputCloud(source); grid.filter(*filtered_source); grid.setInputCloud(target); grid.filter(*filtered_target); PointCloud<PointXYZ> cloud_reg; IterativeClosestPoint<PointXYZ, PointXYZ> reg; reg.setMaximumIterations (m_max_iterations); reg.setTransformationEpsilon (1e-5); reg.setMaxCorrespondenceDistance (m_distance_threshold); reg.setInputCloud (filtered_source); reg.setInputTarget (filtered_target); reg.align (cloud_reg); if (!reg.hasConverged()) { ntk_dbg(1) << "ICP did not converge, ignoring."; return false; } Eigen::Matrix4f t = reg.getFinalTransformation (); cv::Mat1f T(4,4); //toOpencv(t,T); for (int r = 0; r < 4; ++r) for (int c = 0; c < 4; ++c) T(r,c) = t(r,c); Pose3D icp_pose; icp_pose.setCameraTransform(T); ntk_dbg_print(icp_pose.cvTranslation(), 1); // Pose3D stores the transformation from 3D space to image. // So we have to invert everything so that unprojecting from // image to 3D gives the right transformation. // H2' = ICP * H1' m_current_pose.resetCameraTransform(); m_current_pose = *image.calibration()->depth_pose; m_current_pose.invert(); m_current_pose.applyTransformAfter(icp_pose); m_current_pose.invert(); return true; }
int main(int argc, char **argv) { // Parse command line options. arg_base::set_help_option("-h"); arg_parse(argc, argv); // Set debug level to 1. ntk::ntk_debug_level = 1; // Set current directory to application directory. // This is to find Nite config in config/ directory. QApplication app (argc, argv); QDir::setCurrent(QApplication::applicationDirPath()); // Declare the global OpenNI driver. Only one can be instantiated in a program. OpenniDriver ni_driver; // Declare the frame grabber. OpenniGrabber grabber(ni_driver, opt::kinect_id()); // High resolution 1280x1024 RGB Image. if (opt::high_resolution()) grabber.setHighRgbResolution(true); // Start the grabber. grabber.connectToDevice(); grabber.start(); // Holder for the current image. RGBDImage image; // Image post processor. Compute mappings when RGB resolution is 1280x1024. OpenniRGBDProcessor post_processor; // Wait for a new frame, get a local copy and postprocess it. grabber.waitForNextFrame(); grabber.copyImageTo(image); post_processor.processImage(image); ntk_ensure(image.calibration(), "Uncalibrated rgbd image, cannot project to 3D!"); // Initialize the object modeler. PointCloud<PointXYZ> cloud; rgbdImageToPointCloud(cloud, image); TableObjectDetector<PointXYZ> detector; detector.setObjectVoxelSize(0.003); // 3 mm voxels. bool ok = detector.detect(cloud); ntk_throw_exception_if(!ok, "No cluster detected on a table plane!"); for (int cluster_id = 0; cluster_id < detector.objectClusters().size(); ++cluster_id) { TableObjectRGBDModeler modeler; modeler.feedFromTableObjectDetector(detector, cluster_id); Pose3D pose = *image.calibration()->depth_pose; modeler.addNewView(image, pose); modeler.computeMesh(); modeler.currentMesh().saveToPlyFile(cv::format("object%d.ply", cluster_id).c_str()); } grabber.stop(); grabber.disconnectFromDevice(); }