void MeshGenerator :: generatePointCloudMesh(const RGBDImage& image, const Pose3D& depth_pose, const Pose3D& rgb_pose) { m_mesh.clear(); m_mesh.vertices.reserve(image.depth().rows*image.depth().cols); m_mesh.colors.reserve(image.depth().rows*image.depth().cols); m_mesh.normals.reserve(image.depth().rows*image.depth().cols); const cv::Mat1f& depth_im = image.depth(); const cv::Mat1b& mask_im = image.depthMask(); cv::Mat3f voxels (depth_im.size()); cv::Mat3f rgb_points (depth_im.size()); cv::Mat1b subsample_mask(mask_im.size()); subsample_mask = 0; for (float r = 0; r < subsample_mask.rows-1; r += 1.0/m_resolution_factor) for (float c = 0; c < subsample_mask.cols-1; c += 1.0/m_resolution_factor) subsample_mask(ntk::math::rnd(r),ntk::math::rnd(c)) = 1; subsample_mask = mask_im & subsample_mask; depth_pose.unprojectFromImage(depth_im, subsample_mask, voxels); if (m_use_color && image.hasRgb()) rgb_pose.projectToImage(voxels, subsample_mask, rgb_points); for (int r = 0; r < voxels.rows; ++r) { Vec3f* voxels_data = voxels.ptr<Vec3f>(r); const uchar* mask_data = subsample_mask.ptr<uchar>(r); for (int c = 0; c < voxels.cols; ++c) { if (!mask_data[c]) continue; Vec3b color (0,0,0); if (m_use_color) { Point3f prgb = rgb_points(r,c); int i_y = ntk::math::rnd(prgb.y); int i_x = ntk::math::rnd(prgb.x); if (is_yx_in_range(image.rgb(), i_y, i_x)) { Vec3b bgr = image.rgb()(i_y, i_x); color = Vec3b(bgr[2], bgr[1], bgr[0]); } } else { int g = 0; if (image.intensity().data) g = image.intensity()(r,c); else g = 255 * voxels_data[c][2] / 10.0; color = Vec3b(g,g,g); } m_mesh.vertices.push_back(voxels_data[c]); m_mesh.colors.push_back(color); } } }
void MeshGenerator :: generateTriangleMesh(const RGBDImage& image, const Pose3D& depth_pose, const Pose3D& rgb_pose) { const Mat1f& depth_im = image.depth(); const Mat1b& mask_im = image.depthMask(); m_mesh.clear(); if (m_use_color) image.rgb().copyTo(m_mesh.texture); else if (image.intensity().data) toMat3b(normalize_toMat1b(image.intensity())).copyTo(m_mesh.texture); else { m_mesh.texture.create(depth_im.size()); m_mesh.texture = Vec3b(255,255,255); } m_mesh.vertices.reserve(depth_im.cols*depth_im.rows); m_mesh.texcoords.reserve(depth_im.cols*depth_im.rows); m_mesh.colors.reserve(depth_im.cols*depth_im.rows); Mat1i vertice_map(depth_im.size()); vertice_map = -1; for_all_rc(depth_im) { if (!mask_im(r,c)) continue; double depth = depth_im(r,c); Point3f p3d = depth_pose.unprojectFromImage(Point3f(c,r,depth)); Point3f p2d_rgb; Point2f texcoords; if (m_use_color) { p2d_rgb = rgb_pose.projectToImage(p3d); texcoords = Point2f(p2d_rgb.x/image.rgb().cols, p2d_rgb.y/image.rgb().rows); } else { p2d_rgb = Point3f(c,r,depth); texcoords = Point2f(p2d_rgb.x/image.intensity().cols, p2d_rgb.y/image.intensity().rows); } vertice_map(r,c) = m_mesh.vertices.size(); m_mesh.vertices.push_back(p3d); // m_mesh.colors.push_back(bgr_to_rgb(im.rgb()(p2d_rgb.y, p2d_rgb.x))); m_mesh.texcoords.push_back(texcoords); } for_all_rc(vertice_map) { if (vertice_map(r,c) < 0) continue; if ((c < vertice_map.cols - 1) && (r < vertice_map.rows - 1) && (vertice_map(r+1,c)>=0) && (vertice_map(r,c+1) >= 0) && (std::abs(depth_im(r,c) - depth_im(r+1, c)) < m_max_delta_depth) && (std::abs(depth_im(r,c) - depth_im(r, c+1)) < m_max_delta_depth)) { Face f; f.indices[2] = vertice_map(r,c); f.indices[1] = vertice_map(r,c+1); f.indices[0] = vertice_map(r+1,c); m_mesh.faces.push_back(f); } if ((c > 0) && (r < vertice_map.rows - 1) && (vertice_map(r+1,c)>=0) && (vertice_map(r+1,c-1) >= 0) && (std::abs(depth_im(r,c) - depth_im(r+1, c)) < m_max_delta_depth) && (std::abs(depth_im(r,c) - depth_im(r+1, c-1)) < m_max_delta_depth)) { Face f; f.indices[2] = vertice_map(r,c); f.indices[1] = vertice_map(r+1,c); f.indices[0] = vertice_map(r+1,c-1); m_mesh.faces.push_back(f); } } m_mesh.computeNormalsFromFaces(); }
void MeshGenerator :: generateSurfelsMesh(const RGBDImage& image, const Pose3D& depth_pose, const Pose3D& rgb_pose) { double min_val = 0, max_val = 0; if (image.amplitude().data) minMaxLoc(image.amplitude(), &min_val, &max_val); m_mesh.clear(); const cv::Mat1f& depth_im = image.depth(); const cv::Mat1b& mask_im = image.depthMask(); for_all_rc(depth_im) { int i_r = r; int i_c = c; if (!is_yx_in_range(depth_im, i_r, i_c)) continue; if (!mask_im(r,c)) continue; double depth = depth_im(i_r,i_c); cv::Point3f p = depth_pose.unprojectFromImage(Point2f(c,r), depth); Point3f normal = image.normal().data ? image.normal()(i_r, i_c) : Vec3f(0,0,1); Vec3b color (0,0,0); if (m_use_color) { cv::Point3f prgb = rgb_pose.projectToImage(p); int i_y = ntk::math::rnd(prgb.y); int i_x = ntk::math::rnd(prgb.x); if (is_yx_in_range(image.rgb(), i_y, i_x)) { Vec3b bgr = image.rgb()(i_y, i_x); color = Vec3b(bgr[2], bgr[1], bgr[0]); } } else { int g = 0; if (image.amplitude().data) g = 255.0 * (image.amplitude()(i_r,i_c) - min_val) / (max_val-min_val); else g = 255 * depth / 10.0; color = Vec3b(g,g,g); } Surfel s; s.color = color; s.confidence = 0; s.location = p; s.normal = normal; s.n_views = 1; double normal_z = std::max(normal.z, 0.5f); s.radius = m_resolution_factor * ntk::math::sqrt1_2 * depth / (depth_pose.focalX() * normal_z); m_mesh.addSurfel(s); } }
void calibrate_kinect_depth(std::vector< std::vector<Point2f> >& stereo_corners, std::vector< DepthCalibrationPoint >& depth_values) { std::vector< std::vector<Point2f> > good_corners; stereo_corners.resize(global::images_list.size()); for (int i_image = 0; i_image < global::images_list.size(); ++i_image) { QString filename = global::images_list[i_image]; QDir cur_image_dir (global::images_dir.absoluteFilePath(filename)); std::string full_filename; cv::Mat3b image; load_intensity_file(cur_image_dir.path().toStdString(), full_filename, image); ntk_ensure(image.data, "Could not load color image"); kinect_shift_ir_to_depth(image); std::vector<Point2f> current_view_corners; calibrationCorners(full_filename, "corners", global::opt_pattern_width(), global::opt_pattern_height(), current_view_corners, image, 1); if (current_view_corners.size() == global::opt_pattern_height()*global::opt_pattern_width()) { good_corners.push_back(current_view_corners); stereo_corners[i_image] = current_view_corners; showCheckerboardCorners(image, current_view_corners, 1); } else { ntk_dbg(0) << "Warning: corners not detected"; stereo_corners[i_image].resize(0); } } std::vector< std::vector<Point3f> > pattern_points; calibrationPattern(pattern_points, global::opt_pattern_width(), global::opt_pattern_height(), global::opt_square_size(), good_corners.size()); ntk_assert(pattern_points.size() == good_corners.size(), "Invalid points size"); std::vector<Mat> rvecs, tvecs; int flags = 0; if (global::opt_ignore_distortions()) flags = CV_CALIB_ZERO_TANGENT_DIST; double error = calibrateCamera(pattern_points, good_corners, global::image_size, global::depth_intrinsics, global::depth_distortion, rvecs, tvecs, flags); if (global::opt_ignore_distortions()) global::depth_distortion = 0.f; ntk_dbg_print(error, 1); int good_i = 0; foreach_idx(stereo_i, stereo_corners) { if (stereo_corners[stereo_i].size() > 0) { QString filename = global::images_list[stereo_i]; QDir cur_image_dir (global::images_dir.absoluteFilePath(filename)); std::string full_filename; cv::Mat3b image; load_intensity_file(cur_image_dir.path().toStdString(), full_filename, image); ntk_ensure(image.data, "Could not load intensity image"); kinect_shift_ir_to_depth(image); cv::Mat1f depth_image; if (is_file(cur_image_dir.absoluteFilePath("raw/depth.yml").toStdString())) { full_filename = cur_image_dir.absoluteFilePath("raw/depth.yml").toStdString(); depth_image = imread_yml(full_filename); } else if (is_file(cur_image_dir.absoluteFilePath("raw/depth.raw").toStdString())) { full_filename = cur_image_dir.absoluteFilePath("raw/depth.raw").toStdString(); depth_image = imread_Mat1f_raw(full_filename); } ntk_ensure(depth_image.data, "Could not load depth image"); cv::Mat3b undistorted_image; if (global::opt_ignore_distortions()) image.copyTo(undistorted_image); else undistort(image, undistorted_image, global::depth_intrinsics, global::depth_distortion); std::vector<Point2f> current_view_corners; calibrationCorners(full_filename, "corners", global::opt_pattern_width(), global::opt_pattern_height(), current_view_corners, undistorted_image, 1); if (current_view_corners.size() == (global::opt_pattern_width()*global::opt_pattern_height())) { stereo_corners[stereo_i] = current_view_corners; showCheckerboardCorners(undistorted_image, stereo_corners[stereo_i], 200); } else { stereo_corners[stereo_i].resize(0); continue; } // Generate calibration points { // FIXME: why rvecs and tvecs from calibrateCamera seems to be nonsense ? // calling findExtrinsics another time to get good chessboard estimations. cv::Mat1f H; estimate_checkerboard_pose(pattern_points[0], current_view_corners, global::depth_intrinsics, H); Pose3D pose; pose.setCameraParametersFromOpencv(global::depth_intrinsics); ntk_dbg_print(pose, 1); pose.setCameraTransform(H); foreach_idx(pattern_i, pattern_points[0]) { ntk_dbg_print(pattern_points[0][pattern_i], 1); Point3f p = pose.projectToImage(pattern_points[0][pattern_i]); ntk_dbg_print(p, 1); double kinect_raw = depth_image(p.y, p.x); if (!(kinect_raw < 2047)) continue; ntk_dbg_print(kinect_raw, 1); double linear_depth = 1.0 / (kinect_raw * -0.0030711016 + 3.3309495161); const float k1 = 1.1863; const float k2 = 2842.5; const float k3 = 0.1236; double tan_depth = k3 * tanf(kinect_raw/k2 + k1); ntk_dbg_print(linear_depth, 1); ntk_dbg_print(tan_depth, 1); depth_values.push_back(DepthCalibrationPoint(kinect_raw, p.z)); } } ++good_i; }
bool SurfelsRGBDModeler :: addNewView(const RGBDImage& image_, Pose3D& depth_pose) { ntk::TimeCount tc("SurfelsRGBDModeler::addNewView", 1); const float max_camera_normal_angle = ntk::deg_to_rad(90); RGBDImage image; image_.copyTo(image); if (!image_.normal().data) { OpenniRGBDProcessor processor; processor.computeNormalsPCL(image); } Pose3D rgb_pose = depth_pose; rgb_pose.toRightCamera(image.calibration()->rgb_intrinsics, image.calibration()->R, image.calibration()->T); Pose3D world_to_camera_normal_pose; world_to_camera_normal_pose.applyTransformBefore(cv::Vec3f(0,0,0), depth_pose.cvEulerRotation()); Pose3D camera_to_world_normal_pose = world_to_camera_normal_pose; camera_to_world_normal_pose.invert(); const Mat1f& depth_im = image.depth(); Mat1b covered_pixels (depth_im.size()); covered_pixels = 0; std::list<Surfel> surfels_to_reinsert; // Surfel updating. for (SurfelMap::iterator next_it = m_surfels.begin(); next_it != m_surfels.end(); ) { SurfelMap::iterator surfel_it = next_it; ++next_it; Surfel& surfel = surfel_it->second; if (!surfel.enabled()) continue; Point3f surfel_2d = depth_pose.projectToImage(surfel.location); bool surfel_deleted = false; int r = ntk::math::rnd(surfel_2d.y); int c = ntk::math::rnd(surfel_2d.x); int d = ntk::math::rnd(surfel_2d.z); if (!is_yx_in_range(depth_im, r, c) || !image.depthMask()(r, c) || !image.isValidNormal(r,c)) continue; const float update_max_dist = getCompatibilityDistance(depth_im(r,c)); Vec3f camera_normal = image.normal()(r, c); normalize(camera_normal); Vec3f world_normal = camera_to_world_normal_pose.cameraTransform(camera_normal); normalize(world_normal); Vec3f eyev = camera_eye_vector(depth_pose, r, c); double camera_angle = acos(camera_normal.dot(-eyev)); if (camera_angle > max_camera_normal_angle) continue; float normal_angle = acos(world_normal.dot(surfel.normal)); // Surfels have different normals, maybe two different faces of the same object. if (normal_angle > (m_update_max_normal_angle*M_PI/180.0)) { // Removal check. If a surfel has a different normal and is closer to the camera // than the new scan, remove it. if ((-surfel_2d.z) < depth_im(r,c) && surfel.n_views < 3) { m_surfels.erase(surfel_it); surfel_deleted = true; } continue; } // If existing surfel is far from new depth value: // - If existing one had a worst point of view, and was seen only once, remove it. // - Otherwise do not include the new one. if (std::abs(surfel_2d.z - depth_im(r,c)) > update_max_dist) { if (surfel.min_camera_angle > camera_angle && surfel.n_views < 3) { m_surfels.erase(surfel_it); surfel_deleted = true; } else covered_pixels(r,c) = 1; continue; } // Compatible surfel found. const float depth = depth_im(r,c) + m_global_depth_offset; Point3f p3d = depth_pose.unprojectFromImage(Point2f(c,r), depth); cv::Vec3b rgb_color = bgr_to_rgb(image.mappedRgb()(r, c)); Surfel image_surfel; image_surfel.location = p3d; image_surfel.normal = world_normal; image_surfel.color = rgb_color; image_surfel.min_camera_angle = camera_angle; image_surfel.n_views = 1; image_surfel.radius = computeSurfelRadius(depth, camera_normal[2], depth_pose.meanFocal()); mergeToLeftSurfel(surfel, image_surfel); covered_pixels(r,c) = 1; // needs to change the cell? Cell new_cell = worldToCell(surfel.location); if (new_cell != surfel_it->first) { surfels_to_reinsert.push_back(surfel); m_surfels.erase(surfel_it); } } foreach_const_it(it, surfels_to_reinsert, std::list<Surfel>) { Cell new_cell = worldToCell(it->location); m_surfels.insert(std::make_pair(new_cell, *it)); }
void calibrate_kinect_depth(std::vector< DepthCalibrationPoint >& depth_values) { std::vector< std::vector<Point3f> > pattern_points; calibrationPattern(pattern_points, global::opt_pattern_width(), global::opt_pattern_height(), global::opt_square_size(), global::images_list.size()); for (int i_image = 0; i_image < global::images_list.size(); ++i_image) { // Generate depth calibration points QString filename = global::images_list[i_image]; QDir cur_image_dir (global::images_dir.absoluteFilePath(filename)); std::string full_filename = cur_image_dir.absoluteFilePath("raw/color.png").toStdString(); RGBDImage image; OpenniRGBDProcessor processor; processor.setFilterFlag(RGBDProcessorFlags::ComputeMapping, true); image.loadFromDir(cur_image_dir.absolutePath().toStdString(), &global::calibration, &processor); imshow_normalized("mapped depth", image.mappedDepth()); imshow("color", image.rgb()); std::vector<Point2f> current_view_corners; calibrationCorners(full_filename, "corners", global::opt_pattern_width(), global::opt_pattern_height(), current_view_corners, image.rgb(), 1, global::pattern_type); if (current_view_corners.size() != (global::opt_pattern_width()*global::opt_pattern_height())) { ntk_dbg(1) << "Corners not detected in " << cur_image_dir.absolutePath().toStdString(); continue; } // FIXME: why rvecs and tvecs from calibrateCamera seems to be nonsense ? // calling findExtrinsics another time to get good chessboard estimations. cv::Mat1f H; estimate_checkerboard_pose(pattern_points[0], current_view_corners, global::calibration.rgb_intrinsics, H); Pose3D pose; pose.setCameraParametersFromOpencv(global::calibration.rgb_intrinsics); pose.setCameraTransform(H); ntk_dbg_print(pose, 1); cv::Mat3b debug_img; image.rgb().copyTo(debug_img); foreach_idx(pattern_i, pattern_points[0]) { Point3f p = pose.projectToImage(pattern_points[0][pattern_i]); ntk_dbg_print(p, 1); float kinect_raw = image.mappedDepth()(p.y, p.x); ntk_dbg_print(kinect_raw, 1); if (kinect_raw < 1e-5) continue; float err = kinect_raw-p.z; cv::putText(debug_img, format("%d", (int)(err*1000)), Point(p.x, p.y), CV_FONT_NORMAL, 0.4, Scalar(255,0,0)); ntk_dbg_print(pattern_points[0][pattern_i], 1); ntk_dbg_print(p, 1); ntk_dbg_print(kinect_raw, 1); depth_values.push_back(DepthCalibrationPoint(kinect_raw, p.z)); } imshow("errors", debug_img); cv::waitKey(0); }