// Finds a child of the node. Returns NULL if it's not found. Val_T * getChild(int ID) { typename std::map<Id_T, Val_T*>::iterator it = m_children.find(ID); if(it != m_children.end()) return it->second; return NULL; }
T ConstructionFactory<T>::constructObject(const std::string& key) const { typename std::map<std::string, Constructor>::const_iterator it = this->_constructors.find(key); if (it != this->_constructors.end()) { if (it->second) { return it->second(); } return fl::null; } std::ostringstream ss; ss << "[factory error] constructor of " + _name + " <" << key << "> not registered"; throw fl::Exception(ss.str(), FL_AT); }
/** Returns the id of the given name, throws on lookup failure */ int get(const T& name) const { typename std::map<T, int>::const_iterator it = m_name2int.find(name); if (it == m_name2int.end()) { raise_exception(std::runtime_error, "lookup failure for: '" << name << "'"); } else { return it->second; } }
/** Returns the id of the given name, on lookup failure the name is put into the table */ int getput(const T& name) { typename std::map<T, int>::const_iterator it = m_name2int.find(name); if (it == m_name2int.end()) { return put(name); } else { return it->second; } }
bool call(const std::string& command, CommandArgumentsType const& arguments) const { typename std::map<std::string, boost::function<Signature> >::const_iterator i; i = callbacks_.find(command); if (i == callbacks_.end()) { return BaseType::call(command, arguments); } else { return i->second(command, arguments); } }
void ConstitutiveModelParameters<EvalT, Traits>:: evaluateFields(typename Traits::EvalData workset) { typename std::map<std::string, PHX::MDField<ScalarT, Cell, QuadPoint> >::iterator it; for (it = field_map_.begin(); it != field_map_.end(); ++it) { ScalarT constant_value = constant_value_map_[it->first]; if (is_constant_map_[it->first]) { for (std::size_t cell(0); cell < workset.numCells; ++cell) { for (std::size_t pt(0); pt < num_pts_; ++pt) { it->second(cell, pt) = constant_value; } } } else { for (std::size_t cell(0); cell < workset.numCells; ++cell) { for (std::size_t pt(0); pt < num_pts_; ++pt) { Teuchos::Array<MeshScalarT> point(num_dims_); for (std::size_t i(0); i < num_dims_; ++i) point[i] = Sacado::ScalarValue<MeshScalarT>::eval( coord_vec_(cell, pt, i)); it->second(cell, pt) = exp_rf_kl_map_[it->first]->evaluate(point, rv_map_[it->first]); } } } if (have_temperature_) { if (temp_type_map_[it->first] == "Linear" ) { RealType dPdT = dparam_dtemp_map_[it->first]; RealType ref_temp = ref_temp_map_[it->first]; for (std::size_t cell(0); cell < workset.numCells; ++cell) { for (std::size_t pt(0); pt < num_pts_; ++pt) { it->second(cell, pt) += dPdT * (temperature_(cell, pt) - ref_temp); } } } else if (temp_type_map_[it->first] == "Arrhenius") { RealType pre_exp_ = pre_exp_map_[it->first]; RealType exp_param_ = exp_param_map_[it->first]; for (std::size_t cell(0); cell < workset.numCells; ++cell) { for (std::size_t pt(0); pt < num_pts_; ++pt) { it->second(cell, pt) = pre_exp_ * std::exp( -exp_param_ / temperature_(cell, pt) ); } } } } } }
void do_scan(typename betree<Key, Value>::iterator &betit, typename std::map<Key, Value>::iterator &refit, betree<Key, Value> &b, typename std::map<Key, Value> &reference) { while (refit != reference.end()) { assert(betit != b.end()); assert(betit.first == refit->first); assert(betit.second == refit->second); ++refit; if (refit == reference.end()) { debug(std::cout << "Almost done" << std::endl); } ++betit; } assert(betit == b.end()); }
// Prints the node's sub-tree in a stream, using indentation // to represent parents and children. // d < 0 : display at real depth // d = 0 : display as root node // d > 0 : display at specified depth virtual void printTree(std::ostream & os, int d = -1) const { if(d < 0) d = getDepth(); // Indentation for(int i = 0; i < d; i++) std::cout << " "; // Printing the node print(os); // Printing children typename std::map<Id_T, Val_T*>::iterator it; for(it = m_children.begin(); it != m_children.end(); it++) { it->second->printTree(os, d + 1); } }
// Adds a child to the node. Returns false if it fails. // Warning : don't remember to free memory if an error occurs ! bool addChild(Id_T ID, Val_T * newChild) { return m_children.insert( typename std::pair<Id_T, Val_T*>(ID, newChild)).second; }
// Destroys the tree node and its children. virtual ~TreeNode() { typename std::map<Id_T, Val_T*>::iterator it; for(it = m_children.begin(); it != m_children.end(); it++) delete it->second; }
bool has(const T& name) const { return m_name2int.find(name) != m_name2int.end(); }
void pcl::rec_3d_framework::GlobalNNCVFHRecognizer<Distance, PointInT, FeatureT>::recognize () { models_.reset (new std::vector<ModelT>); transforms_.reset (new std::vector<Eigen::Matrix4d, Eigen::aligned_allocator<Eigen::Matrix4d> >); PointInTPtr processed (new pcl::PointCloud<PointInT>); PointInTPtr in (new pcl::PointCloud<PointInT>); std::vector<pcl::PointCloud<FeatureT>, Eigen::aligned_allocator<pcl::PointCloud<FeatureT> > > signatures; std::vector < Eigen::Vector3d > centroids; if (indices_.size ()) pcl::copyPointCloud (*input_, indices_, *in); else in = input_; { pcl::ScopeTime t ("Estimate feature"); micvfh_estimator_->estimate (in, processed, signatures, centroids); } std::vector<index_score> indices_scores; descriptor_distances_.clear (); if (signatures.size () > 0) { { pcl::ScopeTime t_matching ("Matching and roll..."); if (use_single_categories_ && (categories_to_be_searched_.size () > 0)) { //perform search of the different signatures in the categories_to_be_searched_ for (size_t c = 0; c < categories_to_be_searched_.size (); c++) { std::cout << "Using category:" << categories_to_be_searched_[c] << std::endl; for (size_t idx = 0; idx < signatures.size (); idx++) { /*double* hist = signatures[idx].points[0].histogram; std::vector<double> std_hist (hist, hist + getHistogramLength (dummy)); flann_model histogram ("", std_hist); flann::Matrix<int> indices; flann::Matrix<double> distances; std::map<std::string, int>::iterator it; it = category_to_vectors_indices_.find (categories_to_be_searched_[c]); assert (it != category_to_vectors_indices_.end ()); nearestKSearch (single_categories_index_[it->second], histogram, nmodels_, indices, distances);*/ double* hist = signatures[idx].points[0].histogram; int size_feat = sizeof(signatures[idx].points[0].histogram) / sizeof(double); std::vector<double> std_hist (hist, hist + size_feat); //ModelT empty; flann_model histogram; histogram.descr = std_hist; flann::Matrix<int> indices; flann::Matrix<double> distances; std::map<std::string, int>::iterator it; it = category_to_vectors_indices_.find (categories_to_be_searched_[c]); assert (it != category_to_vectors_indices_.end ()); nearestKSearch (single_categories_index_[it->second], histogram, NN_, indices, distances); //gather NN-search results double score = 0; for (size_t i = 0; i < NN_; ++i) { score = distances[0][i]; index_score is; is.idx_models_ = single_categories_pointers_to_models_[it->second]->at (indices[0][i]); is.idx_input_ = static_cast<int> (idx); is.score_ = score; indices_scores.push_back (is); } } //we cannot add more than nmodels per category, so sort here and remove offending ones... std::sort (indices_scores.begin (), indices_scores.end (), sortIndexScoresOp); indices_scores.resize ((c + 1) * NN_); } } else { for (size_t idx = 0; idx < signatures.size (); idx++) { double* hist = signatures[idx].points[0].histogram; int size_feat = sizeof(signatures[idx].points[0].histogram) / sizeof(double); std::vector<double> std_hist (hist, hist + size_feat); //ModelT empty; flann_model histogram; histogram.descr = std_hist; flann::Matrix<int> indices; flann::Matrix<double> distances; nearestKSearch (flann_index_, histogram, NN_, indices, distances); //gather NN-search results double score = 0; for (int i = 0; i < NN_; ++i) { score = distances[0][i]; index_score is; is.idx_models_ = indices[0][i]; is.idx_input_ = static_cast<int> (idx); is.score_ = score; indices_scores.push_back (is); //ModelT m = flann_models_[indices[0][i]].model; } } } std::sort (indices_scores.begin (), indices_scores.end (), sortIndexScoresOp); /* * There might be duplicated candidates, in those cases it makes sense to take * the closer one in descriptor space */ typename std::map<flann_model, bool> found; typename std::map<flann_model, bool>::iterator it_map; for (size_t i = 0; i < indices_scores.size (); i++) { flann_model m = flann_models_[indices_scores[i].idx_models_]; it_map = found.find (m); if (it_map == found.end ()) { indices_scores[found.size ()] = indices_scores[i]; found[m] = true; } } indices_scores.resize (found.size ()); int num_n = std::min (NN_, static_cast<int> (indices_scores.size ())); /* * Filter some hypothesis regarding to their distance to the first neighbour */ /*std::vector<index_score> indices_scores_filtered; indices_scores_filtered.resize (num_n); indices_scores_filtered[0] = indices_scores[0]; double best_score = indices_scores[0].score_; int kept = 1; for (int i = 1; i < num_n; ++i) { //std::cout << best_score << indices_scores[i].score_ << (best_score / indices_scores[i].score_) << std::endl; if ((best_score / indices_scores[i].score_) > 0.65) { indices_scores_filtered[i] = indices_scores[i]; kept++; } //best_score = indices_scores[i].score_; } indices_scores_filtered.resize (kept); //std::cout << indices_scores_filtered.size () << " § " << num_n << std::endl; indices_scores = indices_scores_filtered; num_n = indices_scores.size ();*/ std::cout << "Number of object hypotheses... " << num_n << std::endl; std::vector<bool> valid_trans; std::vector < Eigen::Matrix4d, Eigen::aligned_allocator<Eigen::Matrix4d> > transformations; micvfh_estimator_->getValidTransformsVec (valid_trans); micvfh_estimator_->getTransformsVec (transformations); for (int i = 0; i < num_n; ++i) { ModelT m = flann_models_[indices_scores[i].idx_models_].model; int view_id = flann_models_[indices_scores[i].idx_models_].view_id; int desc_id = flann_models_[indices_scores[i].idx_models_].descriptor_id; int idx_input = indices_scores[i].idx_input_; std::cout << m.class_ << "/" << m.id_ << " ==> " << indices_scores[i].score_ << std::endl; Eigen::Matrix4d roll_view_pose; bool roll_pose_found = getRollPose (m, view_id, desc_id, roll_view_pose); if (roll_pose_found && valid_trans[idx_input]) { Eigen::Matrix4d transposed = roll_view_pose.transpose (); //std::cout << transposed << std::endl; PointInTPtr view; getView (m, view_id, view); Eigen::Matrix4d model_view_pose; getPose (m, view_id, model_view_pose); Eigen::Matrix4d scale_mat; scale_mat.setIdentity (4, 4); if (compute_scale_) { //compute scale using the whole view PointInTPtr view_transformed (new pcl::PointCloud<PointInT>); Eigen::Matrix4d hom_from_OVC_to_CC; hom_from_OVC_to_CC = transformations[idx_input].inverse () * transposed; pcl::transformPointCloud (*view, *view_transformed, hom_from_OVC_to_CC); Eigen::Vector3d input_centroid = centroids[indices_scores[i].idx_input_]; Eigen::Vector3d view_centroid; getCentroid (m, view_id, desc_id, view_centroid); Eigen::Vector4d cmatch4f (view_centroid[0], view_centroid[1], view_centroid[2], 0); Eigen::Vector4d cinput4f (input_centroid[0], input_centroid[1], input_centroid[2], 0); Eigen::Vector4d max_pt_input; pcl::getMaxDistance (*processed, cinput4f, max_pt_input); max_pt_input[3] = 0; double max_dist_input = (cinput4f - max_pt_input).norm (); //compute max dist for transformed model_view pcl::getMaxDistance (*view, cmatch4f, max_pt_input); max_pt_input[3] = 0; double max_dist_view = (cmatch4f - max_pt_input).norm (); cmatch4f = hom_from_OVC_to_CC * cmatch4f; std::cout << max_dist_view << " " << max_dist_input << std::endl; double scale_factor_view = max_dist_input / max_dist_view; std::cout << "Scale factor:" << scale_factor_view << std::endl; Eigen::Matrix4d center, center_inv; center.setIdentity (4, 4); center (0, 3) = -cinput4f[0]; center (1, 3) = -cinput4f[1]; center (2, 3) = -cinput4f[2]; center_inv.setIdentity (4, 4); center_inv (0, 3) = cinput4f[0]; center_inv (1, 3) = cinput4f[1]; center_inv (2, 3) = cinput4f[2]; scale_mat (0, 0) = scale_factor_view; scale_mat (1, 1) = scale_factor_view; scale_mat (2, 2) = scale_factor_view; scale_mat = center_inv * scale_mat * center; } Eigen::Matrix4d hom_from_OC_to_CC; hom_from_OC_to_CC = scale_mat * transformations[idx_input].inverse () * transposed * model_view_pose; models_->push_back (m); transforms_->push_back (hom_from_OC_to_CC); descriptor_distances_.push_back (static_cast<double> (indices_scores[i].score_)); } else { PCL_WARN("The roll pose was not found, should use CRH here... \n"); } } } std::cout << "Number of object hypotheses:" << models_->size () << std::endl; /** * POSE REFINEMENT **/ if (ICP_iterations_ > 0) { pcl::ScopeTime t ("Pose refinement"); //Prepare scene and model clouds for the pose refinement step double VOXEL_SIZE_ICP_ = 0.005; PointInTPtr cloud_voxelized_icp (new pcl::PointCloud<PointInT> ()); { pcl::ScopeTime t ("Voxelize stuff..."); pcl::VoxelGrid<PointInT> voxel_grid_icp; voxel_grid_icp.setInputCloud (processed); voxel_grid_icp.setLeafSize (VOXEL_SIZE_ICP_, VOXEL_SIZE_ICP_, VOXEL_SIZE_ICP_); voxel_grid_icp.filter (*cloud_voxelized_icp); source_->voxelizeAllModels (VOXEL_SIZE_ICP_); } #pragma omp parallel for num_threads(omp_get_num_procs()) for (int i = 0; i < static_cast<int> (models_->size ()); i++) { ConstPointInTPtr model_cloud; PointInTPtr model_aligned (new pcl::PointCloud<PointInT>); if (compute_scale_) { model_cloud = models_->at (i).getAssembled (-1); PointInTPtr model_aligned_m (new pcl::PointCloud<PointInT>); pcl::transformPointCloud (*model_cloud, *model_aligned_m, transforms_->at (i)); pcl::VoxelGrid<PointInT> voxel_grid_icp; voxel_grid_icp.setInputCloud (model_aligned_m); voxel_grid_icp.setLeafSize (VOXEL_SIZE_ICP_, VOXEL_SIZE_ICP_, VOXEL_SIZE_ICP_); voxel_grid_icp.filter (*model_aligned); } else { model_cloud = models_->at (i).getAssembled (VOXEL_SIZE_ICP_); pcl::transformPointCloud (*model_cloud, *model_aligned, transforms_->at (i)); } pcl::IterativeClosestPoint<PointInT, PointInT> reg; reg.setInputCloud (model_aligned); //model reg.setInputTarget (cloud_voxelized_icp); //scene reg.setMaximumIterations (ICP_iterations_); reg.setMaxCorrespondenceDistance (VOXEL_SIZE_ICP_ * 3.); reg.setTransformationEpsilon (1e-6); typename pcl::PointCloud<PointInT>::Ptr output_ (new pcl::PointCloud<PointInT> ()); reg.align (*output_); Eigen::Matrix4d icp_trans = reg.getFinalTransformation (); transforms_->at (i) = icp_trans * transforms_->at (i); } } /** * HYPOTHESES VERIFICATION **/ if (hv_algorithm_) { pcl::ScopeTime t ("HYPOTHESES VERIFICATION"); std::vector<typename pcl::PointCloud<PointInT>::ConstPtr> aligned_models; aligned_models.resize (models_->size ()); for (size_t i = 0; i < models_->size (); i++) { ConstPointInTPtr model_cloud; PointInTPtr model_aligned (new pcl::PointCloud<PointInT>); if (compute_scale_) { model_cloud = models_->at (i).getAssembled (-1); PointInTPtr model_aligned_m (new pcl::PointCloud<PointInT>); pcl::transformPointCloud (*model_cloud, *model_aligned_m, transforms_->at (i)); pcl::VoxelGrid<PointInT> voxel_grid_icp; voxel_grid_icp.setInputCloud (model_aligned_m); voxel_grid_icp.setLeafSize (0.005, 0.005, 0.005); voxel_grid_icp.filter (*model_aligned); } else { model_cloud = models_->at (i).getAssembled (0.005); pcl::transformPointCloud (*model_cloud, *model_aligned, transforms_->at (i)); } //ConstPointInTPtr model_cloud = models_->at (i).getAssembled (0.005); //PointInTPtr model_aligned (new pcl::PointCloud<PointInT>); //pcl::transformPointCloud (*model_cloud, *model_aligned, transforms_->at (i)); aligned_models[i] = model_aligned; } std::vector<bool> mask_hv; hv_algorithm_->setSceneCloud (input_); hv_algorithm_->addModels (aligned_models, true); hv_algorithm_->verify (); hv_algorithm_->getMask (mask_hv); boost::shared_ptr < std::vector<ModelT> > models_temp; boost::shared_ptr < std::vector<Eigen::Matrix4d, Eigen::aligned_allocator<Eigen::Matrix4d> > > transforms_temp; models_temp.reset (new std::vector<ModelT>); transforms_temp.reset (new std::vector<Eigen::Matrix4d, Eigen::aligned_allocator<Eigen::Matrix4d> >); for (size_t i = 0; i < models_->size (); i++) { if (!mask_hv[i]) continue; models_temp->push_back (models_->at (i)); transforms_temp->push_back (transforms_->at (i)); } models_ = models_temp; transforms_ = transforms_temp; } } }