bool CubeCollider::raycastALT(const Ray & ray, RaycastHit & hitInfo){ glm::vec3 position = object->getTransform()->getPosition(); glm::vec3 dirToCenter = ray.origin - position; if(glm::dot(ray.direction, dirToCenter) >= 0){ //std::cout << "ray is going in a different direction" << std::endl; return false; } transformNormals(); Plane planes[6]; for(int i = 0; i < 6; i ++){ planes[i] = Plane(transformedNormals[i],position + center + (transformedNormals[i] * size * .5f) ); } for(int i = 0; i < 6; i ++){ bool viable = true; Plane plane = planes[i]; float enter; if(plane.raycast(ray, enter)){ glm::vec3 point = ray.getPoint(enter); for(int j = 0; j < 6; j++){ if(i == j) continue; if(glm::dot(planes[i].normal, point - planes[i].point ) >= 0){ //point is behind plane } else{ //point is in front of plane viable = false; break; } } if(viable){ hitInfo.point = point; hitInfo.distance = enter; hitInfo.normal = transformedNormals[i]; hitInfo.object = object; return true; } } else{ // a ray missed one of the faces return false; } } return false; }
bool RecognizerROS<PointT>::respondSrvCall(recognition_srv_definitions::recognize::Request &req, recognition_srv_definitions::recognize::Response &response) const { typename pcl::PointCloud<PointT>::Ptr pRecognizedModels (new pcl::PointCloud<PointT>); cv::Mat_<cv::Vec3b> annotated_img; ConvertPCLCloud2Image<PointT>(scene_, annotated_img); for (size_t j = 0; j < models_verified_.size(); j++) { std_msgs::String ss_tmp; ss_tmp.data = models_verified_[j]->id_; response.ids.push_back(ss_tmp); Eigen::Matrix4f trans = transforms_verified_[j]; geometry_msgs::Transform tt; tt.translation.x = trans(0,3); tt.translation.y = trans(1,3); tt.translation.z = trans(2,3); Eigen::Matrix3f rotation = trans.block<3,3>(0,0); Eigen::Quaternionf q(rotation); tt.rotation.x = q.x(); tt.rotation.y = q.y(); tt.rotation.z = q.z(); tt.rotation.w = q.w(); response.transforms.push_back(tt); typename pcl::PointCloud<PointT>::ConstPtr model_cloud = models_verified_[j]->getAssembled ( resolution_ ); typename pcl::PointCloud<PointT>::Ptr model_aligned (new pcl::PointCloud<PointT>); pcl::transformPointCloud (*model_cloud, *model_aligned, transforms_verified_[j]); *pRecognizedModels += *model_aligned; sensor_msgs::PointCloud2 rec_model; pcl::toROSMsg(*model_aligned, rec_model); response.models_cloud.push_back(rec_model); pcl::PointCloud<pcl::Normal>::ConstPtr normal_cloud = models_verified_[j]->getNormalsAssembled ( resolution_ ); pcl::PointCloud<pcl::Normal>::Ptr normal_aligned (new pcl::PointCloud<pcl::Normal>); transformNormals(*normal_cloud, *normal_aligned, transforms_verified_[j]); //ratio of inlier points float confidence = 0; const float focal_length = 525.f; const int img_width = 640; const int img_height = 480; VisibilityReasoning<pcl::PointXYZRGB> vr (focal_length, img_width, img_height); vr.setThresholdTSS (0.01f); /*float fsv_ratio =*/ vr.computeFSVWithNormals (scene_, model_aligned, normal_aligned); confidence = 1.f - vr.getFSVUsedPoints() / static_cast<float>(model_aligned->points.size()); response.confidence.push_back(confidence); //centroid and BBox Eigen::Vector4f centroid; pcl::compute3DCentroid(*model_aligned, centroid); geometry_msgs::Point32 centroid_msg; centroid_msg.x = centroid[0]; centroid_msg.y = centroid[1]; centroid_msg.z = centroid[2]; response.centroid.push_back(centroid_msg); Eigen::Vector4f min; Eigen::Vector4f max; pcl::getMinMax3D (*model_aligned, min, max); object_perception_msgs::BBox bbox; geometry_msgs::Point32 pt; pt.x = min[0]; pt.y = min[1]; pt.z = min[2]; bbox.point.push_back(pt); pt.x = min[0]; pt.y = min[1]; pt.z = max[2]; bbox.point.push_back(pt); pt.x = min[0]; pt.y = max[1]; pt.z = min[2]; bbox.point.push_back(pt); pt.x = min[0]; pt.y = max[1]; pt.z = max[2]; bbox.point.push_back(pt); pt.x = max[0]; pt.y = min[1]; pt.z = min[2]; bbox.point.push_back(pt); pt.x = max[0]; pt.y = min[1]; pt.z = max[2]; bbox.point.push_back(pt); pt.x = max[0]; pt.y = max[1]; pt.z = min[2]; bbox.point.push_back(pt); pt.x = max[0]; pt.y = max[1]; pt.z = max[2]; bbox.point.push_back(pt); response.bbox.push_back(bbox); const float cx = static_cast<float> (img_width) / 2.f; //- 0.5f; const float cy = static_cast<float> (img_height) / 2.f; // - 0.5f; int min_u, min_v, max_u, max_v; min_u = img_width; min_v = img_height; max_u = max_v = 0; for(size_t m_pt_id=0; m_pt_id < model_aligned->points.size(); m_pt_id++) { const float x = model_aligned->points[m_pt_id].x; const float y = model_aligned->points[m_pt_id].y; const float z = model_aligned->points[m_pt_id].z; const int u = static_cast<int> (focal_length * x / z + cx); const int v = static_cast<int> (focal_length * y / z + cy); if (u >= img_width || v >= img_width || u < 0 || v < 0) continue; if(u < min_u) min_u = u; if(v < min_v) min_v = v; if(u > max_u) max_u = u; if(v > max_v) max_v = v; } cv::Point text_start; text_start.x = min_u; text_start.y = std::max(0, min_v - 10); cv::putText(annotated_img, models_verified_[j]->id_, text_start, cv::FONT_HERSHEY_COMPLEX_SMALL, 0.8, cv::Scalar(255,0,255), 1, CV_AA); cv::rectangle(annotated_img, cv::Point(min_u, min_v), cv::Point(max_u, max_v), cv::Scalar( 0, 255, 255 ), 2); } sensor_msgs::PointCloud2 recognizedModelsRos; pcl::toROSMsg (*pRecognizedModels, recognizedModelsRos); recognizedModelsRos.header.frame_id = req.cloud.header.frame_id; vis_pc_pub_.publish(recognizedModelsRos); sensor_msgs::ImagePtr msg = cv_bridge::CvImage(std_msgs::Header(), "bgr8", annotated_img).toImageMsg(); image_pub_.publish(msg); return true; }
void NMBasedCloudIntegration<PointT>::collectInfo () { size_t total_point_count = 0; for(size_t i = 0; i < input_clouds_.size(); i++) total_point_count += (indices_.empty() || indices_[i].empty()) ? input_clouds_[i]->size() : indices_[i].size(); VLOG(1) << "Allocating memory for point information of " << total_point_count << "points. "; big_cloud_info_.resize(total_point_count); std::vector<pcl::PointCloud<PointT> > input_clouds_aligned (input_clouds_.size()); std::vector<pcl::PointCloud<pcl::Normal> > input_normals_aligned (input_clouds_.size()); #pragma omp parallel for schedule(dynamic) for(size_t i=0; i < input_clouds_.size(); i++) { pcl::transformPointCloud(*input_clouds_[i], input_clouds_aligned[i], transformations_to_global_[i]); transformNormals(*input_normals_[i], input_normals_aligned[i], transformations_to_global_[i]); } size_t point_count = 0; for(size_t i=0; i < input_clouds_.size(); i++) { const pcl::PointCloud<PointT> &cloud_aligned = input_clouds_aligned[i]; const pcl::PointCloud<pcl::Normal> &normals_aligned = input_normals_aligned[i]; size_t kept_new_pts = 0; if (indices_.empty() || indices_[i].empty()) { for(size_t jj=0; jj<cloud_aligned.points.size(); jj++) { if ( !pcl::isFinite(cloud_aligned.points[jj]) || !pcl::isFinite(normals_aligned.points[jj]) ) continue; PointInfo &pt = big_cloud_info_[point_count + kept_new_pts]; pt.pt = cloud_aligned.points[jj]; pt.normal = normals_aligned.points[jj]; pt.sigma_lateral = pt_properties_[i][jj][0]; pt.sigma_axial = pt_properties_[i][jj][1]; pt.distance_to_depth_discontinuity = pt_properties_[i][jj][2]; pt.pt_idx = jj; kept_new_pts++; } } else { for(int idx : indices_[i]) { if ( !pcl::isFinite(cloud_aligned.points[idx]) || !pcl::isFinite(normals_aligned.points[idx]) ) continue; PointInfo &pt = big_cloud_info_[point_count + kept_new_pts]; pt.pt = cloud_aligned.points[idx]; pt.normal = normals_aligned.points[ idx ]; pt.sigma_lateral = pt_properties_[i][idx][0]; pt.sigma_axial = pt_properties_[i][idx][1]; pt.distance_to_depth_discontinuity = pt_properties_[i][idx][2]; pt.pt_idx = idx; kept_new_pts++; } } // compute and store remaining information #pragma omp parallel for schedule (dynamic) firstprivate(i, point_count, kept_new_pts) for(size_t jj=0; jj<kept_new_pts; jj++) { PointInfo &pt = big_cloud_info_ [point_count + jj]; pt.origin = i; Eigen::Matrix3f sigma = Eigen::Matrix3f::Zero(), sigma_aligned = Eigen::Matrix3f::Zero(); sigma(0,0) = pt.sigma_lateral; sigma(1,1) = pt.sigma_lateral; sigma(2,2) = pt.sigma_axial; const Eigen::Matrix4f &tf = transformations_to_global_[ i ]; Eigen::Matrix3f rotation = tf.block<3,3>(0,0); // or inverse? sigma_aligned = rotation * sigma * rotation.transpose(); double det = sigma_aligned.determinant(); // if( std::isfinite(det) && det>0) // pt.probability = 1 / sqrt(2 * M_PI * det); // else // pt.probability = std::numeric_limits<float>::min(); if( std::isfinite(det) && det>0) pt.weight = det; else pt.weight = std::numeric_limits<float>::max(); } point_count += kept_new_pts; } big_cloud_info_.resize(point_count); }
bool WingedEdgeBuilder::buildWShape(WShape& shape, IndexedFaceSet& ifs) { unsigned int vsize = ifs.vsize(); unsigned int nsize = ifs.nsize(); //soc unused - unsigned tsize = ifs.tsize(); const real *vertices = ifs.vertices(); const real *normals = ifs.normals(); const real *texCoords = ifs.texCoords(); real *new_vertices; real *new_normals; new_vertices = new real[vsize]; new_normals = new real[nsize]; // transform coordinates from local to world system if (_current_matrix) { transformVertices(vertices, vsize, *_current_matrix, new_vertices); transformNormals(normals, nsize, *_current_matrix, new_normals); } else { memcpy(new_vertices, vertices, vsize * sizeof(*new_vertices)); memcpy(new_normals, normals, nsize * sizeof(*new_normals)); } const IndexedFaceSet::TRIANGLES_STYLE *faceStyle = ifs.trianglesStyle(); vector<FrsMaterial> frs_materials; if (ifs.msize()) { const FrsMaterial *const *mats = ifs.frs_materials(); for (unsigned i = 0; i < ifs.msize(); ++i) frs_materials.push_back(*(mats[i])); shape.setFrsMaterials(frs_materials); } #if 0 const FrsMaterial *mat = (ifs.frs_material()); if (mat) shape.setFrsMaterial(*mat); else if (_current_frs_material) shape.setFrsMaterial(*_current_frs_material); #endif const IndexedFaceSet::FaceEdgeMark *faceEdgeMarks = ifs.faceEdgeMarks(); // sets the current WShape to shape _current_wshape = &shape; // create a WVertex for each vertex buildWVertices(shape, new_vertices, vsize); const unsigned int *vindices = ifs.vindices(); const unsigned int *nindices = ifs.nindices(); const unsigned int *tindices = NULL; if (ifs.tsize()) { tindices = ifs.tindices(); } const unsigned int *mindices = NULL; if (ifs.msize()) mindices = ifs.mindices(); const unsigned int *numVertexPerFace = ifs.numVertexPerFaces(); const unsigned int numfaces = ifs.numFaces(); for (unsigned int index = 0; index < numfaces; index++) { switch (faceStyle[index]) { case IndexedFaceSet::TRIANGLE_STRIP: buildTriangleStrip(new_vertices, new_normals, frs_materials, texCoords, faceEdgeMarks, vindices, nindices, mindices, tindices, numVertexPerFace[index]); break; case IndexedFaceSet::TRIANGLE_FAN: buildTriangleFan(new_vertices, new_normals, frs_materials, texCoords, faceEdgeMarks, vindices, nindices, mindices, tindices, numVertexPerFace[index]); break; case IndexedFaceSet::TRIANGLES: buildTriangles(new_vertices, new_normals, frs_materials, texCoords, faceEdgeMarks, vindices, nindices, mindices, tindices, numVertexPerFace[index]); break; } vindices += numVertexPerFace[index]; nindices += numVertexPerFace[index]; if (mindices) mindices += numVertexPerFace[index]; if (tindices) tindices += numVertexPerFace[index]; faceEdgeMarks++; } delete[] new_vertices; delete[] new_normals; if (shape.GetFaceList().size() == 0) // this may happen due to degenerate triangles return false; // compute bbox shape.ComputeBBox(); // compute mean edge size: shape.ComputeMeanEdgeSize(); // Parse the built winged-edge shape to update post-flags set<Vec3r> normalsSet; vector<WVertex *>& wvertices = shape.getVertexList(); for (vector<WVertex *>::iterator wv = wvertices.begin(), wvend = wvertices.end(); wv != wvend; ++wv) { if ((*wv)->isBoundary()) continue; if ((*wv)->GetEdges().size() == 0) // This means that the WVertex has no incoming edges... (12-Sep-2011 T.K.) continue; normalsSet.clear(); WVertex::face_iterator fit = (*wv)->faces_begin(); WVertex::face_iterator fitend = (*wv)->faces_end(); for (; fit != fitend; ++fit) { WFace *face = *fit; normalsSet.insert(face->GetVertexNormal(*wv)); if (normalsSet.size() != 1) { break; } } if (normalsSet.size() != 1) { (*wv)->setSmooth(false); } } // Adds the new WShape to the WingedEdge structure _winged_edge->addWShape(&shape); return true; }
void yaw(Angle angle){ const Mat4 rotation = Mat4::rotation(Axis::y, angle); transformNormals(rotation); }
void pitch(Angle angle){ const Mat4 rotation = Mat4::rotation(getU().normalize(), angle); transformNormals(rotation); }
void CubeCollider::onConnect(){ transformNormals(); }
void Recognizer<PointT>::hypothesisVerification () { std::vector<typename pcl::PointCloud<PointT>::ConstPtr> aligned_models (models_.size ()); std::vector<pcl::PointCloud<pcl::Normal>::ConstPtr> aligned_model_normals (models_.size ()); model_or_plane_is_verified_.clear(); planes_.clear(); if(models_.empty()) { std::cout << "No generated models to verify!" << std::endl; return; } #pragma omp parallel for schedule(dynamic) for(size_t i=0; i<models_.size(); i++) { typename pcl::PointCloud<PointT>::Ptr aligned_model_tmp (new pcl::PointCloud<PointT>); pcl::PointCloud<pcl::Normal>::Ptr aligned_normal_tmp (new pcl::PointCloud<pcl::Normal>); ConstPointTPtr model_cloud = models_[i]->getAssembled ( param_.resolution_mm_model_assembly_ ); pcl::transformPointCloud (*model_cloud, *aligned_model_tmp, transforms_[i]); aligned_models[i] = aligned_model_tmp; pcl::PointCloud<pcl::Normal>::ConstPtr normal_cloud_const = models_[i]->getNormalsAssembled (hv_algorithm_->getResolution()); transformNormals(*normal_cloud_const, *aligned_normal_tmp, transforms_[i]); aligned_model_normals[i] = aligned_normal_tmp; } boost::shared_ptr<GHV<PointT, PointT> > hv_algorithm_ghv; if( hv_algorithm_ ) hv_algorithm_ghv = boost::dynamic_pointer_cast<GHV<PointT, PointT>> (hv_algorithm_); hv_algorithm_->setOcclusionCloud (scene_); hv_algorithm_->setSceneCloud (scene_); hv_algorithm_->addModels (aligned_models, true); if (hv_algorithm_->getRequiresNormals ()) hv_algorithm_->addNormalsClouds (aligned_model_normals); if( hv_algorithm_ghv ) { hv_algorithm_ghv->setRequiresNormals(false); hv_algorithm_ghv->setNormalsForClutterTerm(scene_normals_); if( hv_algorithm_ghv->param_.add_planes_ ) { if( hv_algorithm_ghv->param_.plane_method_ == 0 ) { MultiPlaneSegmentation<PointT> mps; mps.setInputCloud( scene_ ); mps.setMinPlaneInliers( hv_algorithm_ghv->param_.min_plane_inliers_ ); mps.setResolution( hv_algorithm_ghv->param_.resolution_ ); mps.setNormals( scene_normals_ ); mps.setMergePlanes( true ); mps.segment(); planes_ = mps.getModels(); } else { typename ClusterNormalsToPlanesPCL<PointT>::Parameter p_param; p_param.inlDistSmooth = 0.05f; p_param.minPoints = hv_algorithm_ghv->param_.min_plane_inliers_; p_param.inlDist = hv_algorithm_ghv->param_.plane_inlier_distance_; p_param.thrAngle = hv_algorithm_ghv->param_.plane_thrAngle_; p_param.K_ = hv_algorithm_ghv->param_.knn_plane_clustering_search_; p_param.normal_computation_method_ = param_.normal_computation_method_; ClusterNormalsToPlanesPCL<PointT> pest(p_param); pest.compute(scene_, *scene_normals_, planes_); } hv_algorithm_ghv->addPlanarModels(planes_); } } hv_algorithm_->verify (); hv_algorithm_->getMask (model_or_plane_is_verified_); }