int main (int argc, char** argv) { // Make sure that we have the right number of arguments if (argc != 2) { print_info ("\nVisualizes the hash table after adding the provided mesh to it.\n" "usage:\n" "./obj_rec_ransac_hash_table <mesh.vtk>\n"); return (-1); } ObjRecRANSAC::PointCloudIn points_in; ObjRecRANSAC::PointCloudN normals_in; double b[6]; if ( !vtk_to_pointcloud (argv[1], points_in, normals_in, b) ) return (-1); // Compute the bounding box diagonal float diag = static_cast<float> (sqrt (my_sqr (b[1]-b[0]) + my_sqr (b[3]-b[2]) + my_sqr (b[5]-b[4]))); // Create the recognition object (we need it only for its hash table) ObjRecRANSAC objrec (diag/8.0f, diag/60.0f); objrec.addModel (points_in, normals_in, "test_model"); // Start visualization (and the main VTK loop) visualize (objrec.getHashTable ()); return (0); }
int main (int argc, char** argv) { // Make sure that we have the right number of arguments if (argc != 2) { print_info ("\nVisualizes the hash table after adding the provided mesh to it.\n" "usage:\n" "./visualize_obj_rec_ransac_hash_table mesh.vtk\n"); return (-1); } // Parse the command line arguments for .vtk files if ( parse_file_extension_argument (argc, argv, ".vtk").size () != 1 ) { print_error ("We need a .vtk object.\n"); return (-1); } // Load the model vtkPolyDataReader *vtk_reader = vtkPolyDataReader::New (); vtk_reader->SetFileName (argv[1]); vtk_reader->Update (); vtkPolyData *vtk_mesh = vtk_reader->GetOutput (); // Get the bounds of the mesh double mb[6]; vtk_mesh->ComputeBounds (); vtk_mesh->GetBounds (mb); // Create a point cloud with normals ModelLibrary::PointCloudInPtr eigen_points (new ModelLibrary::PointCloudIn ()); ModelLibrary::PointCloudNPtr eigen_normals (new ModelLibrary::PointCloudN ()); if ( !get_points_and_normals (vtk_mesh, *eigen_points.get(), *eigen_normals.get()) ) { vtk_reader->Delete (); return (-1); } // Compute the bounding box diagonal double diag = sqrt (my_sqr (mb[1]-mb[0]) + my_sqr (mb[3]-mb[2]) + my_sqr (mb[5]-mb[4])); // Create the recognition object (we need it only for its hash table) ObjRecRANSAC objrec (diag/8.0, diag/20.0); printf("Adding the model to the library ... "); fflush (stdout); objrec.addModel (eigen_points, eigen_normals, string ("test_model")); printf("OK\n"); // Start visualization (and the main VTK loop) visualize (objrec.getHashTable ()); // Cleanup vtk_reader->Delete (); return 0; }
void ObjRecICP::doICP(ObjRecRANSAC& objrec, list<PointSetShape*>& detectedShapes) { vtkPoints* scene_points = objrec.getInputScene(); double p[3], mat[4][4], icp_mat[4][4], **model2scene = mat_alloc(4, 4), **scene2model = mat_alloc(4, 4); list<int>::iterator id; int i; #ifdef OBJ_REC_RANSAC_VERBOSE printf("ICP refinement "); fflush(stdout); #endif for ( list<PointSetShape*>::iterator it = detectedShapes.begin() ; it != detectedShapes.end() ; ++it ) { // Get the shape PointSetShape *shape = *it; // This one will contain the shape points vtkPoints *shape_points = vtkPoints::New(VTK_DOUBLE), *out = vtkPoints::New(VTK_DOUBLE); shape_points->SetNumberOfPoints((*it)->getScenePointIds().size()); for ( i = 0, id = shape->getScenePointIds().begin() ; id != shape->getScenePointIds().end() ; ++id, ++i ) { // Get the scene point scene_points->GetPoint(*id, p); // Insert the scene point to the shape point set shape_points->SetPoint(i, p); } // Invert the model to scene transformation shape->getHomogeneousRigidTransform(model2scene); mat_invert_rigid_transform4x4((const double**)model2scene, scene2model); // Use the model as target this->setTarget(shape->getPolyData()); // Register the shape points to the model this->doRegistration(shape_points, out, mat, INT_MAX/*max iterations*/, (const double**)scene2model); // Invert the result such that we have again the model to scene transformation mat_invert_rigid_transform4x4(mat, icp_mat); // Save the result in the shape shape->setHomogeneousRigidTransform(icp_mat); // Cleanup shape_points->Delete(); out->Delete(); #ifdef OBJ_REC_RANSAC_VERBOSE printf("."); fflush(stdout); #endif } #ifdef OBJ_REC_RANSAC_VERBOSE printf(" done\n"); fflush(stdout); #endif // Cleanup mat_dealloc(model2scene, 4); mat_dealloc(scene2model, 4); }
void run (float pair_width, float voxel_size, float max_coplanarity_angle, int num_hypotheses_to_show) { PointCloud<PointXYZ>::Ptr scene_points (new PointCloud<PointXYZ> ()), model_points (new PointCloud<PointXYZ> ()); PointCloud<Normal>::Ptr scene_normals (new PointCloud<Normal> ()), model_normals (new PointCloud<Normal> ()); // Get the points and normals from the scene if ( !vtk_to_pointcloud ("../../test/tum_table_scene.vtk", *scene_points, *scene_normals) ) return; vtkPolyData *vtk_model = vtkPolyData::New (); // Get the points and normals from the scene if ( !vtk_to_pointcloud ("../../test/tum_amicelli_box.vtk", *model_points, *model_normals, vtk_model) ) return; // The recognition object ObjRecRANSAC objrec (pair_width, voxel_size); objrec.setMaxCoplanarityAngleDegrees (max_coplanarity_angle); objrec.addModel (*model_points, *model_normals, "amicelli", vtk_model); // Switch to the test mode in which only oriented point pairs from the scene are sampled objrec.enterTestModeTestHypotheses (); // The visualizer PCLVisualizer viz; CallbackParameters params(objrec, viz, *scene_points, *scene_normals, num_hypotheses_to_show); viz.registerKeyboardCallback (keyboardCB, static_cast<void*> (¶ms)); // Run the recognition and update the viewer update (¶ms); #ifdef _SHOW_SCENE_POINTS_ viz.addPointCloud (scene_points, "cloud in"); viz.setPointCloudRenderingProperties (pcl::visualization::PCL_VISUALIZER_POINT_SIZE, 2, "cloud in"); #endif #ifdef _SHOW_OCTREE_POINTS_ PointCloud<PointXYZ>::Ptr octree_points (new PointCloud<PointXYZ> ()); objrec.getSceneOctree ().getFullLeavesPoints (*octree_points); viz.addPointCloud (octree_points, "octree points"); viz.setPointCloudRenderingProperties (pcl::visualization::PCL_VISUALIZER_POINT_SIZE, 5, "octree points"); viz.setPointCloudRenderingProperties (pcl::visualization::PCL_VISUALIZER_COLOR, 1.0, 0.0, 0.0, "octree points"); #endif #if defined _SHOW_OCTREE_NORMALS_ && defined _SHOW_OCTREE_POINTS_ PointCloud<Normal>::Ptr octree_normals (new PointCloud<Normal> ()); objrec.getSceneOctree ().getNormalsOfFullLeaves (*octree_normals); viz.addPointCloudNormals<PointXYZ,Normal> (octree_points, octree_normals, 1, 6.0f, "normals out"); #endif // Enter the main loop while (!viz.wasStopped ()) { //main loop of the visualizer viz.spinOnce (100); boost::this_thread::sleep (boost::posix_time::microseconds (100000)); } vtk_model->Delete (); }
void run (float pair_width, float voxel_size, float max_coplanarity_angle) { // The object recognizer ObjRecRANSAC objrec (pair_width, voxel_size); objrec.setMaxCoplanarityAngleDegrees (max_coplanarity_angle); // The models to be loaded list<string> model_names; model_names.emplace_back("tum_amicelli_box"); model_names.emplace_back("tum_rusk_box"); model_names.emplace_back("tum_soda_bottle"); list<PointCloud<PointXYZ>::Ptr> model_points_list; list<PointCloud<Normal>::Ptr> model_normals_list; list<vtkSmartPointer<vtkPolyData> > vtk_models_list; // Load the models and add them to the recognizer for (const auto &model_name : model_names) { PointCloud<PointXYZ>::Ptr model_points (new PointCloud<PointXYZ> ()); model_points_list.push_back (model_points); PointCloud<Normal>::Ptr model_normals (new PointCloud<Normal> ()); model_normals_list.push_back (model_normals); vtkSmartPointer<vtkPolyData> vtk_model = vtkSmartPointer<vtkPolyData>::New (); vtk_models_list.push_back (vtk_model); // Compose the file string file_name = string("../../test/") + model_name + string (".vtk"); // Get the points and normals from the input model if ( !vtk2PointCloud (file_name.c_str (), *model_points, *model_normals, vtk_model) ) continue; // Add the model objrec.addModel (*model_points, *model_normals, model_name, vtk_model); } // The scene in which the models are supposed to be recognized PointCloud<PointXYZ>::Ptr non_plane_points (new PointCloud<PointXYZ> ()), plane_points (new PointCloud<PointXYZ> ()); PointCloud<Normal>::Ptr non_plane_normals (new PointCloud<Normal> ()); // Detect the largest plane in the dataset if ( !loadScene ("../../test/tum_table_scene.vtk", *non_plane_points, *non_plane_normals, *plane_points) ) return; // The parameters for the callback function and the visualizer PCLVisualizer viz; CallbackParameters params(objrec, viz, *non_plane_points, *non_plane_normals); viz.registerKeyboardCallback (keyboardCB, static_cast<void*> (¶ms)); // Run the recognition and update the viewer. Have a look at this method, to see how to start the recognition and use the result! update (¶ms); // From this line on: visualization stuff only! #ifdef _SHOW_OCTREE_ show_octree(objrec.getSceneOctree (), viz); #endif #ifdef _SHOW_SCENE_POINTS_ viz.addPointCloud (scene_points, "scene points"); viz.setPointCloudRenderingProperties (pcl::visualization::PCL_VISUALIZER_POINT_SIZE, 2, "scene points"); #endif #ifdef _SHOW_OCTREE_POINTS_ PointCloud<PointXYZ>::Ptr octree_points (new PointCloud<PointXYZ> ()); objrec.getSceneOctree ().getFullLeavesPoints (*octree_points); viz.addPointCloud (octree_points, "octree points"); // viz.setPointCloudRenderingProperties (pcl::visualization::PCL_VISUALIZER_POINT_SIZE, 1, "octree points"); viz.setPointCloudRenderingProperties (pcl::visualization::PCL_VISUALIZER_COLOR, 1.0, 0.0, 0.0, "octree points"); viz.addPointCloud (plane_points, "plane points"); viz.setPointCloudRenderingProperties (pcl::visualization::PCL_VISUALIZER_COLOR, 0.9, 0.9, 0.9, "plane points"); #endif #if defined _SHOW_OCTREE_NORMALS_ && defined _SHOW_OCTREE_POINTS_ PointCloud<Normal>::Ptr normals_octree (new PointCloud<Normal> ()); objrec.getSceneOctree ().getNormalsOfFullLeaves (*normals_octree); viz.addPointCloudNormals<PointXYZ,Normal> (points_octree, normals_octree, 1, 6.0f, "normals out"); #endif // Enter the main loop while (!viz.wasStopped ()) { //main loop of the visualizer viz.spinOnce (100); boost::this_thread::sleep (boost::posix_time::microseconds (100000)); } }