void SuperFrameParser::convertImageToPointCloud (const sensor_msgs::ImagePtr& depth_msg, const pcl::PointCloud<pcl::PointXYZ>::Ptr &cloud) { cloud->height = depth_msg->height; cloud->width = depth_msg->width; cloud->resize (cloud->height * cloud->width); // Use correct principal point from calibration float center_x = depth_info_->K[2]; // c_x float center_y = depth_info_->K[5]; // c_y // Combine unit conversion (if necessary) with scaling by focal length for computing (X,Y) double unit_scaling = 0.001f; float constant_x = unit_scaling / depth_info_->K[0]; // f_x float constant_y = unit_scaling / depth_info_->K[4]; // f_y float bad_point = std::numeric_limits<float>::quiet_NaN (); pcl::PointCloud<pcl::PointXYZ>::iterator pt_iter = cloud->begin (); const uint16_t* depth_row = reinterpret_cast<const uint16_t*> (&depth_msg->data[0]); int row_step = depth_msg->step / sizeof (uint16_t); for (int v = 0; v < (int)depth_msg->height; ++v, depth_row += row_step) { for (int u = 0; u < (int)depth_msg->width; ++u) { pcl::PointXYZ& pt = *pt_iter++; uint16_t depth = depth_row[u]; // Missing points denoted by NaNs if (depth == 0.0f) { pt.x = pt.y = pt.z = bad_point; continue; } // Fill in XYZ pt.x = (u - center_x) * depth * constant_x; pt.y = (v - center_y) * depth * constant_y; pt.z = depth * 0.001f; } } }
void pcl::StereoMatching::getVisualMap (pcl::PointCloud<pcl::RGB>::Ptr vMap) { if ( static_cast<int> (vMap->width) != width_ || static_cast<int> (vMap->height) != height_) { vMap->resize(width_*height_); vMap->width = width_; vMap->height = height_; } if ( vMap->is_dense) vMap->is_dense = false; pcl::RGB invalid_val; invalid_val.r = 0; invalid_val.g = 255; invalid_val.b = 0; float scale = 255.0f / (16.0f * static_cast<float> (max_disp_)); for (int y = 0; y<height_; y++) { for (int x = 0; x<width_; x++) { if (disp_map_[y * width_+ x] <= 0) { vMap->at (x,y) = invalid_val; } else { unsigned char val = static_cast<unsigned char> (floor (scale*disp_map_[y * width_+ x])); vMap->at (x, y).r = val; vMap->at (x, y).g = val; vMap->at (x, y).b = val; } } } }
template <typename PointInT, typename PointOutT> void pcl_1_8::Edge<PointInT, PointOutT>::detectEdgeRoberts (pcl::PointCloud<PointOutT> &output) { convolution_.setInputCloud (input_); pcl::PointCloud<pcl::PointXYZI>::Ptr kernel_x (new pcl::PointCloud<pcl::PointXYZI>); pcl::PointCloud<pcl::PointXYZI>::Ptr magnitude_x (new pcl::PointCloud<pcl::PointXYZI>); kernel_.setKernelType (kernel<pcl::PointXYZI>::ROBERTS_X); kernel_.fetchKernel (*kernel_x); convolution_.setKernel (*kernel_x); convolution_.filter (*magnitude_x); pcl::PointCloud<pcl::PointXYZI>::Ptr kernel_y (new pcl::PointCloud<pcl::PointXYZI>); pcl::PointCloud<pcl::PointXYZI>::Ptr magnitude_y (new pcl::PointCloud<pcl::PointXYZI>); kernel_.setKernelType (kernel<pcl::PointXYZI>::ROBERTS_Y); kernel_.fetchKernel (*kernel_y); convolution_.setKernel (*kernel_y); convolution_.filter (*magnitude_y); const int height = input_->height; const int width = input_->width; output.resize (height * width); output.height = height; output.width = width; for (size_t i = 0; i < output.size (); ++i) { output[i].magnitude_x = (*magnitude_x)[i].intensity; output[i].magnitude_y = (*magnitude_y)[i].intensity; output[i].magnitude = sqrtf ((*magnitude_x)[i].intensity * (*magnitude_x)[i].intensity + (*magnitude_y)[i].intensity * (*magnitude_y)[i].intensity); output[i].direction = atan2f ((*magnitude_y)[i].intensity, (*magnitude_x)[i].intensity); } }
int computeNormalsAndEigenvalues(const typename pcl::PointCloud<PointInT>::Ptr &in_cloud, const float &radius, const int &n_threads, pcl::PointCloud<PointOutT> &out_cloud) { size_t n_points = in_cloud->size(); // resize the out cloud out_cloud.resize(n_points); // build the kdtree pcl::KdTreeFLANN<PointInT> flann; flann.setInputCloud(in_cloud); // place for neighs ids and distances std::vector<int> indices; std::vector<float> distances; #ifdef USE_OPENMP #pragma omp parallel for shared(out_cloud) private(indices, distances) \ num_threads(n_threads) #endif for (int i = 0; i < n_points; ++i) { // get neighbors for this point flann.radiusSearch((*in_cloud)[i], radius, indices, distances); // estimate normal and eigenvalues computePointNormal(*in_cloud, indices, out_cloud[i].normal_x, out_cloud[i].normal_y, out_cloud[i].normal_z, out_cloud[i].lam0, out_cloud[i].lam1, out_cloud[i].lam2); flipNormal <PointInT>((*(in_cloud))[i], 0.0, 0.0, 0.0, out_cloud[i].normal_x, out_cloud[i].normal_y, out_cloud[i].normal_z); } return 1; }
void FeatureFactory::extractRangeFeature (cv_bridge::CvImagePtr cv_ptr, pcl::PointCloud<feature_factory::FeaturePoint< RANGE_FEATURES_HISTOGRAM> > &range_features, FeatureType feature_type) { double max_feature_val; if (feature_type == DEPTH) max_feature_val = MAX_DEPTH_FEATURE_VAL; else max_feature_val = MAX_CONF_AMP_FEATURE_VAL; const uint16_t n_grids_per_row = RANGE_IMAGE_WIDTH / grid_edge_size; const uint16_t n_grids_per_col = RANGE_IMAGE_HEIGHT / grid_edge_size; const uint16_t n_grids = n_grids_per_col * n_grids_per_row; range_features.resize (n_grids); for (uint16_t gri = 0; gri < n_grids_per_col; gri++) for (uint16_t gci = 0; gci < n_grids_per_row; gci++) { // Hint: Rect(top_left_x, top_left_y, width, height) range_features[gri * n_grids_per_row + gci] = feature_factory::FeaturePoint<RANGE_FEATURES_HISTOGRAM>::calculate (cv_ptr->image, cv::Rect (gci * grid_edge_size, gri * grid_edge_size, grid_edge_size, grid_edge_size), 0, max_feature_val); } }
////////////////////////////////////////////////////////////////////////////// //const pcl::PointCloud<pcl::PointXYZRGBA>::Ptr pcl::StereoMatching::getPointCloud(float uC, float vC, float focal, float baseline) bool pcl::StereoMatching::getPointCloud ( float u_c, float v_c, float focal, float baseline, pcl::PointCloud<pcl::PointXYZ>::Ptr cloud) { //disp map has not been computed yet.. if ( disp_map_ == NULL) { PCL_ERROR( "[pcl::StereoMatching::getPointCloud] Error: a disparity map has not been computed yet. The resulting cloud can not be computed..\n" ); return false; } //cloud needs to be re-allocated if (static_cast<int> (cloud->width) != width_ || static_cast<int> (cloud->height) != height_) { cloud->resize(width_*height_); cloud->width = width_; cloud->height = height_; cloud->is_dense = false; } if ( cloud->is_dense) cloud->is_dense = false; //Loop pcl::PointXYZ temp_point; pcl::PointXYZ nan_point; nan_point.x = std::numeric_limits<float>::quiet_NaN(); nan_point.y = std::numeric_limits<float>::quiet_NaN(); nan_point.z = std::numeric_limits<float>::quiet_NaN(); //nan_point.intensity = std::numeric_limits<float>::quiet_NaN(); //all disparities are multiplied by a constant equal to 16; //this must be taken into account when computing z values float depth_scale = baseline * focal * 16.0f; for ( int j=0; j<height_; j++) { for ( int i=0; i<width_; i++) { if ( disp_map_[ j*width_ + i] > 0 ) { temp_point.z = depth_scale / disp_map_[j * width_ + i]; temp_point.x = ((static_cast<float> (i) - u_c) * temp_point.z) / focal; temp_point.y = ((static_cast<float> (j) - v_c) * temp_point.z) / focal; //temp_point.intensity = 255; (*cloud)[j * width_ + i] = temp_point; } //adding NaN value else { (*cloud)[j * width_ + i] = nan_point; } } } return (true); }
bool pcl::StereoMatching::getPointCloud ( float u_c, float v_c, float focal, float baseline, pcl::PointCloud<pcl::PointXYZRGB>::Ptr cloud, pcl::PointCloud<pcl::RGB>::Ptr texture) { //disp map has not been computed yet.. if (disp_map_ == NULL) { PCL_ERROR ("[pcl::StereoMatching::getPointCloud] Error: a disparity map has not been computed yet. The resulting cloud can not be computed..\n"); return (false); } if (static_cast<int> (texture->width) != width_ || static_cast<int> (texture->height) != height_) { PCL_ERROR("[pcl::StereoMatching::getPointCloud] Error: the size of the texture cloud does not match that of the computed range map. The resulting cloud can not be computed..\n"); return (false); } //cloud needs to be re-allocated if (static_cast<int> (cloud->width) != width_ || static_cast<int> (cloud->height) != height_) { //cloud.reset(new pcl::PointCloud<pcl::PointXYZRGBA>(width_, height_) ); cloud->resize (width_ * height_); cloud->width = width_; cloud->height = height_; cloud->is_dense = false; } //Loop pcl::PointXYZRGB temp_point; /*pcl::PointXYZRGB nan_point; nan_point.x = std::numeric_limits<float>::quiet_NaN(); nan_point.y = std::numeric_limits<float>::quiet_NaN(); nan_point.z = std::numeric_limits<float>::quiet_NaN(); nan_point.r = std::numeric_limits<unsigned char>::quiet_NaN(); nan_point.g = std::numeric_limits<unsigned char>::quiet_NaN(); nan_point.b = std::numeric_limits<unsigned char>::quiet_NaN();*/ //all disparities are multiplied by a constant equal to 16; //this must be taken into account when computing z values float depth_scale = baseline * focal * 16.0f; for (int j = 0; j < height_; j++) { for (int i = 0; i < width_; i++) { if (disp_map_[ j*width_ + i] > 0) { temp_point.z = (depth_scale) / (disp_map_[ j*width_ + i]); temp_point.x = ((static_cast<float> (i) - u_c) * temp_point.z) / focal; temp_point.y = ((static_cast<float> (j) - v_c) * temp_point.z) / focal; //temp_point.intensity = ( texture->at(j*width_+i).r +texture->at(j*width_+i).g + texture->at(j*width_+i).b) / 3.0f; temp_point.r = texture->at (j * width_ + i).r; temp_point.g = texture->at (j * width_ + i).g; temp_point.b = texture->at (j * width_ + i).b; (*cloud)[j*width_ + i] = temp_point; } //adding NaN value else { temp_point.x = std::numeric_limits<float>::quiet_NaN(); temp_point.y = std::numeric_limits<float>::quiet_NaN(); temp_point.z = std::numeric_limits<float>::quiet_NaN(); temp_point.r = texture->at (j * width_ + i).r; temp_point.g = texture->at (j * width_ + i).g; temp_point.b = texture->at (j * width_ + i).b; (*cloud)[j*width_ + i] = temp_point; } } } return (true); }
template <typename PointNT> void pcl::Poisson<PointNT>::performReconstruction (pcl::PointCloud<PointNT> &points, std::vector<pcl::Vertices> &polygons) { poisson::CoredVectorMeshData mesh; poisson::Point3D<float> center; float scale = 1.0f; switch (degree_) { case 1: { execute<1> (mesh, center, scale); break; } case 2: { execute<2> (mesh, center, scale); break; } case 3: { execute<3> (mesh, center, scale); break; } case 4: { execute<4> (mesh, center, scale); break; } case 5: { execute<5> (mesh, center, scale); break; } default: { PCL_ERROR (stderr, "Degree %d not supported\n", degree_); } } // Write output PolygonMesh // Write vertices points.resize (int (mesh.outOfCorePointCount () + mesh.inCorePoints.size ())); poisson::Point3D<float> p; for (int i = 0; i < int(mesh.inCorePoints.size ()); i++) { p = mesh.inCorePoints[i]; points.points[i].x = p.coords[0]*scale+center.coords[0]; points.points[i].y = p.coords[1]*scale+center.coords[1]; points.points[i].z = p.coords[2]*scale+center.coords[2]; } for (int i = int(mesh.inCorePoints.size()); i < int (mesh.outOfCorePointCount() + mesh.inCorePoints.size ()); i++) { mesh.nextOutOfCorePoint (p); points.points[i].x = p.coords[0]*scale+center.coords[0]; points.points[i].y = p.coords[1]*scale+center.coords[1]; points.points[i].z = p.coords[2]*scale+center.coords[2]; } polygons.resize (mesh.polygonCount ()); // Write faces std::vector<poisson::CoredVertexIndex> polygon; for (int p_i = 0; p_i < mesh.polygonCount (); p_i++) { pcl::Vertices v; mesh.nextPolygon (polygon); v.vertices.resize (polygon.size ()); for (int i = 0; i < static_cast<int> (polygon.size ()); ++i) if (polygon[i].inCore ) v.vertices[i] = polygon[i].idx; else v.vertices[i] = polygon[i].idx + int (mesh.inCorePoints.size ()); polygons[p_i] = v; } }
template <typename PointT> void pcl::copyPointCloud (const pcl::PointCloud<PointT> &cloud_in, pcl::PointCloud<PointT> &cloud_out, int top, int bottom, int left, int right, pcl::InterpolationType border_type, const PointT& value) { if (top < 0 || left < 0 || bottom < 0 || right < 0) { std::string faulty = (top < 0) ? "top" : (left < 0) ? "left" : (bottom < 0) ? "bottom" : "right"; PCL_THROW_EXCEPTION (pcl::BadArgumentException, "[pcl::copyPointCloud] error: " << faulty << " must be positive!"); return; } if (top == 0 && left == 0 && bottom == 0 && right == 0) cloud_out = cloud_in; else { // Allocate enough space and copy the basics cloud_out.header = cloud_in.header; cloud_out.width = cloud_in.width + left + right; cloud_out.height = cloud_in.height + top + bottom; if (cloud_out.size () != cloud_out.width * cloud_out.height) cloud_out.resize (cloud_out.width * cloud_out.height); cloud_out.is_dense = cloud_in.is_dense; cloud_out.sensor_orientation_ = cloud_in.sensor_orientation_; cloud_out.sensor_origin_ = cloud_in.sensor_origin_; if (border_type == pcl::BORDER_TRANSPARENT) { const PointT* in = &(cloud_in.points[0]); PointT* out = &(cloud_out.points[0]); PointT* out_inner = out + cloud_out.width*top + left; for (uint32_t i = 0; i < cloud_in.height; i++, out_inner += cloud_out.width, in += cloud_in.width) { if (out_inner != in) memcpy (out_inner, in, cloud_in.width * sizeof (PointT)); } } else { // Copy the data if (border_type != pcl::BORDER_CONSTANT) { try { std::vector<int> padding (cloud_out.width - cloud_in.width); int right = cloud_out.width - cloud_in.width - left; int bottom = cloud_out.height - cloud_in.height - top; for (int i = 0; i < left; i++) padding[i] = pcl::interpolatePointIndex (i-left, cloud_in.width, border_type); for (int i = 0; i < right; i++) padding[i+left] = pcl::interpolatePointIndex (cloud_in.width+i, cloud_in.width, border_type); const PointT* in = &(cloud_in.points[0]); PointT* out = &(cloud_out.points[0]); PointT* out_inner = out + cloud_out.width*top + left; for (uint32_t i = 0; i < cloud_in.height; i++, out_inner += cloud_out.width, in += cloud_in.width) { if (out_inner != in) memcpy (out_inner, in, cloud_in.width * sizeof (PointT)); for (int j = 0; j < left; j++) out_inner[j - left] = in[padding[j]]; for (int j = 0; j < right; j++) out_inner[j + cloud_in.width] = in[padding[j + left]]; } for (int i = 0; i < top; i++) { int j = pcl::interpolatePointIndex (i - top, cloud_in.height, border_type); memcpy (out + i*cloud_out.width, out + (j+top) * cloud_out.width, sizeof (PointT) * cloud_out.width); } for (int i = 0; i < bottom; i++) { int j = pcl::interpolatePointIndex (i + cloud_in.height, cloud_in.height, border_type); memcpy (out + (i + cloud_in.height + top)*cloud_out.width, out + (j+top)*cloud_out.width, cloud_out.width * sizeof (PointT)); } } catch (pcl::BadArgumentException &e) { PCL_ERROR ("[pcl::copyPointCloud] Unhandled interpolation type %d!\n", border_type); } } else { int right = cloud_out.width - cloud_in.width - left; int bottom = cloud_out.height - cloud_in.height - top; std::vector<PointT> buff (cloud_out.width, value); PointT* buff_ptr = &(buff[0]); const PointT* in = &(cloud_in.points[0]); PointT* out = &(cloud_out.points[0]); PointT* out_inner = out + cloud_out.width*top + left; for (uint32_t i = 0; i < cloud_in.height; i++, out_inner += cloud_out.width, in += cloud_in.width) { if (out_inner != in) memcpy (out_inner, in, cloud_in.width * sizeof (PointT)); memcpy (out_inner - left, buff_ptr, left * sizeof (PointT)); memcpy (out_inner + cloud_in.width, buff_ptr, right * sizeof (PointT)); } for (int i = 0; i < top; i++) { memcpy (out + i*cloud_out.width, buff_ptr, cloud_out.width * sizeof (PointT)); } for (int i = 0; i < bottom; i++) { memcpy (out + (i + cloud_in.height + top)*cloud_out.width, buff_ptr, cloud_out.width * sizeof (PointT)); } } } } }
template <typename PointInT, typename PointOutT> void pcl_1_8::Edge<PointInT, PointOutT>::canny ( const pcl::PointCloud<PointInT> &input_x, const pcl::PointCloud<PointInT> &input_y, pcl::PointCloud<PointOutT> &output) { float tHigh = hysteresis_threshold_high_; float tLow = hysteresis_threshold_low_; const int height = input_x.height; const int width = input_x.width; output.resize (height * width); output.height = height; output.width = width; // Noise reduction using gaussian blurring pcl::PointCloud<pcl::PointXYZI>::Ptr gaussian_kernel (new pcl::PointCloud<pcl::PointXYZI>); kernel_.setKernelSize (3); kernel_.setKernelSigma (1.0); kernel_.setKernelType (kernel<pcl::PointXYZI>::GAUSSIAN); kernel_.fetchKernel (*gaussian_kernel); convolution_.setKernel (*gaussian_kernel); PointCloudIn smoothed_cloud_x; convolution_.setInputCloud (input_x.makeShared()); convolution_.filter (smoothed_cloud_x); PointCloudIn smoothed_cloud_y; convolution_.setInputCloud (input_y.makeShared()); convolution_.filter (smoothed_cloud_y); // Edge detection usign Sobel pcl::PointCloud<PointXYZIEdge>::Ptr edges (new pcl::PointCloud<PointXYZIEdge>); sobelMagnitudeDirection (smoothed_cloud_x, smoothed_cloud_y, *edges.get ()); // Edge discretization discretizeAngles (*edges); pcl::PointCloud<pcl::PointXYZI>::Ptr maxima (new pcl::PointCloud<pcl::PointXYZI>); suppressNonMaxima (*edges, *maxima, tLow); // Edge tracing for (int i = 0; i < height; i++) { for (int j = 0; j < width; j++) { if ((*maxima)(j, i).intensity < tHigh || (*maxima)(j, i).intensity == std::numeric_limits<float>::max ()) continue; (*maxima)(j, i).intensity = std::numeric_limits<float>::max (); cannyTraceEdge ( 1, 0, i, j, *maxima); cannyTraceEdge (-1, 0, i, j, *maxima); cannyTraceEdge ( 1, 1, i, j, *maxima); cannyTraceEdge (-1, -1, i, j, *maxima); cannyTraceEdge ( 0, -1, i, j, *maxima); cannyTraceEdge ( 0, 1, i, j, *maxima); cannyTraceEdge (-1, 1, i, j, *maxima); cannyTraceEdge ( 1, -1, i, j, *maxima); } } // Final thresholding for (int i = 0; i < height; i++) { for (int j = 0; j < width; j++) { if ((*maxima)(j, i).intensity == std::numeric_limits<float>::max ()) output (j, i).magnitude = 255; else output (j, i).magnitude = 0; } } }
template<typename PointInT, typename PointOutT> void pcl_1_8::Edge<PointInT, PointOutT>::detectEdgeCanny (pcl::PointCloud<PointOutT> &output) { float tHigh = hysteresis_threshold_high_; float tLow = hysteresis_threshold_low_; const int height = input_->height; const int width = input_->width; output.resize (height * width); output.height = height; output.width = width; //pcl::console::TicToc tt; //tt.tic (); // Noise reduction using gaussian blurring pcl::PointCloud<pcl::PointXYZI>::Ptr gaussian_kernel (new pcl::PointCloud<pcl::PointXYZI>); PointCloudInPtr smoothed_cloud (new PointCloudIn); kernel_.setKernelSize (3); kernel_.setKernelSigma (1.0); kernel_.setKernelType (kernel<pcl::PointXYZI>::GAUSSIAN); kernel_.fetchKernel (*gaussian_kernel); convolution_.setKernel (*gaussian_kernel); convolution_.setInputCloud (input_); convolution_.filter (*smoothed_cloud); //PCL_ERROR ("Gaussian blur: %g\n", tt.toc ()); tt.tic (); // Edge detection usign Sobel pcl::PointCloud<PointXYZIEdge>::Ptr edges (new pcl::PointCloud<PointXYZIEdge>); setInputCloud (smoothed_cloud); detectEdgeSobel (*edges); //PCL_ERROR ("Sobel: %g\n", tt.toc ()); tt.tic (); // Edge discretization discretizeAngles (*edges); //PCL_ERROR ("Discretize: %g\n", tt.toc ()); tt.tic (); // tHigh and non-maximal supression pcl::PointCloud<pcl::PointXYZI>::Ptr maxima (new pcl::PointCloud<pcl::PointXYZI>); suppressNonMaxima (*edges, *maxima, tLow); //PCL_ERROR ("NM suppress: %g\n", tt.toc ()); tt.tic (); // Edge tracing for (int i = 0; i < height; i++) { for (int j = 0; j < width; j++) { if ((*maxima)(j, i).intensity < tHigh || (*maxima)(j, i).intensity == std::numeric_limits<float>::max ()) continue; (*maxima)(j, i).intensity = std::numeric_limits<float>::max (); cannyTraceEdge ( 1, 0, i, j, *maxima); cannyTraceEdge (-1, 0, i, j, *maxima); cannyTraceEdge ( 1, 1, i, j, *maxima); cannyTraceEdge (-1, -1, i, j, *maxima); cannyTraceEdge ( 0, -1, i, j, *maxima); cannyTraceEdge ( 0, 1, i, j, *maxima); cannyTraceEdge (-1, 1, i, j, *maxima); cannyTraceEdge ( 1, -1, i, j, *maxima); } } //PCL_ERROR ("Edge tracing: %g\n", tt.toc ()); // Final thresholding for (size_t i = 0; i < input_->size (); ++i) { if ((*maxima)[i].intensity == std::numeric_limits<float>::max ()) output[i].magnitude = 255; else output[i].magnitude = 0; } }
template <typename PointInT, typename PointOutT> void pcl_1_8::Edge<PointInT, PointOutT>::suppressNonMaxima ( const pcl::PointCloud<PointXYZIEdge> &edges, pcl::PointCloud<pcl::PointXYZI> &maxima, float tLow) { const int height = edges.height; const int width = edges.width; maxima.height = height; maxima.width = width; maxima.resize (height * width); for (size_t i = 0; i < maxima.size (); ++i) maxima[i].intensity = 0.0f; // tHigh and non-maximal supression for (int i = 1; i < height - 1; i++) { for (int j = 1; j < width - 1; j++) { const PointXYZIEdge &ptedge = edges (j, i); pcl::PointXYZI &ptmax = maxima (j, i); if (ptedge.magnitude < tLow) continue; //maxima (j, i).intensity = 0; switch (int (ptedge.direction)) { case 0: { if (ptedge.magnitude >= edges (j - 1, i).magnitude && ptedge.magnitude >= edges (j + 1, i).magnitude) ptmax.intensity = ptedge.magnitude; break; } case 45: { if (ptedge.magnitude >= edges (j - 1, i - 1).magnitude && ptedge.magnitude >= edges (j + 1, i + 1).magnitude) ptmax.intensity = ptedge.magnitude; break; } case 90: { if (ptedge.magnitude >= edges (j, i - 1).magnitude && ptedge.magnitude >= edges (j, i + 1).magnitude) ptmax.intensity = ptedge.magnitude; break; } case 135: { if (ptedge.magnitude >= edges (j + 1, i - 1).magnitude && ptedge.magnitude >= edges (j - 1, i + 1).magnitude) ptmax.intensity = ptedge.magnitude; break; } } } } }
void get_en_image(pcl::PointCloud<pcl::PointXYZ> &cloud) { char flag = 'g'; int i = 0; while(flag != 'q') { ostringstream conv; conv << i; cout<<"Capturing new calibration image from the ensenso stereo vision camera."<<endl; ///Read the Ensenso stereo cameras: try { // Initialize NxLib and enumerate cameras nxLibInitialize(true); // Reference to the first camera in the node BySerialNo NxLibItem root; NxLibItem camera = root[itmCameras][itmBySerialNo][0]; // Open the Ensenso NxLibCommand open(cmdOpen); open.parameters()[itmCameras] = camera[itmSerialNumber].asString(); open.execute(); // Capture an image NxLibCommand (cmdCapture).execute(); // Stereo matching task NxLibCommand (cmdComputeDisparityMap).execute (); // Convert disparity map into XYZ data for each pixel NxLibCommand (cmdComputePointMap).execute (); // Get info about the computed point map and copy it into a std::vector double timestamp; std::vector<float> pointMap; int width, height; camera[itmImages][itmRaw][itmLeft].getBinaryDataInfo (0, 0, 0, 0, 0, ×tamp); // Get raw image timestamp camera[itmImages][itmPointMap].getBinaryDataInfo (&width, &height, 0, 0, 0, 0); camera[itmImages][itmPointMap].getBinaryData (pointMap, 0); // Copy point cloud and convert in meters //cloud.header.stamp = getPCLStamp (timestamp); cloud.resize (height * width); cloud.width = width; cloud.height = height; cloud.is_dense = false; // Copy data in point cloud (and convert milimeters in meters) for (size_t i = 0; i < pointMap.size (); i += 3) { cloud.points[i / 3].x = pointMap[i] / 1000.0; cloud.points[i / 3].y = pointMap[i + 1] / 1000.0; cloud.points[i / 3].z = pointMap[i + 2] / 1000.0; } NxLibCommand (cmdRectifyImages).execute(); // Save images NxLibCommand saveImage(cmdSaveImage); // raw left saveImage.parameters()[itmNode] = camera[itmImages][itmRaw][itmLeft].path; saveImage.parameters()[itmFilename] = "calib_en/raw_left" + conv.str()+".png"; saveImage.execute(); // raw right /*saveImage.parameters()[itmNode] = camera[itmImages][itmRaw][itmRight].path; saveImage.parameters()[itmFilename] = "calib_en/raw_right.png"; saveImage.execute(); // rectified left saveImage.parameters()[itmNode] = camera[itmImages][itmRectified][itmLeft].path; saveImage.parameters()[itmFilename] = "calib_en/rectified_left.png"; saveImage.execute(); // rectified right saveImage.parameters()[itmNode] = camera[itmImages][itmRectified][itmRight].path; saveImage.parameters()[itmFilename] = "calib_en/rectified_right.png"; saveImage.execute();*/ } catch (NxLibException& e) { // Display NxLib API exceptions, if any printf("An NxLib API error with code %d (%s) occurred while accessing item %s.\n", e.getErrorCode(), e.getErrorText().c_str(), e.getItemPath().c_str()); if (e.getErrorCode() == NxLibExecutionFailed) printf("/Execute:\n%s\n", NxLibItem(itmExecute).asJson(true).c_str()); } /*catch (NxLibException &ex) { ensensoExceptionHandling (ex, "grabSingleCloud"); }*/ catch (...) { // Display other exceptions printf("Something, somewhere went terribly wrong!\n"); } /*cout<<"Plug in the RGB camera and press any key to continue."<<endl; cin.ignore(); cin.get();*/ cout<<"Capturing new calibration image from the ensenso RGB camera."<<endl; ///Read the IDS RGB Camera attached to the Ensenso stereo camera HIDS hCam = 0; printf("Success-Code: %d\n",IS_SUCCESS); //Kamera öffnen INT nRet = is_InitCamera (&hCam, NULL); printf("Status Init %d\n",nRet); //Pixel-Clock setzen UINT nPixelClockDefault = 9; nRet = is_PixelClock(hCam, IS_PIXELCLOCK_CMD_SET, (void*)&nPixelClockDefault, sizeof(nPixelClockDefault)); printf("Status is_PixelClock %d\n",nRet); //Farbmodus der Kamera setzen //INT colorMode = IS_CM_CBYCRY_PACKED; INT colorMode = IS_CM_BGR8_PACKED; nRet = is_SetColorMode(hCam,colorMode); printf("Status SetColorMode %d\n",nRet); UINT formatID = 4; //Bildgröße einstellen -> 2592x1944 nRet = is_ImageFormat(hCam, IMGFRMT_CMD_SET_FORMAT, &formatID, 4); printf("Status ImageFormat %d\n",nRet); //Speicher für Bild alloziieren char* pMem = NULL; int memID = 0; nRet = is_AllocImageMem(hCam, 1280, 1024, 24, &pMem, &memID); printf("Status AllocImage %d\n",nRet); //diesen Speicher aktiv setzen nRet = is_SetImageMem(hCam, pMem, memID); printf("Status SetImageMem %d\n",nRet); //Bilder im Kameraspeicher belassen INT displayMode = IS_SET_DM_DIB; nRet = is_SetDisplayMode (hCam, displayMode); printf("Status displayMode %d\n",nRet); //Bild aufnehmen nRet = is_FreezeVideo(hCam, IS_WAIT); printf("Status is_FreezeVideo %d\n",nRet); //Bild aus dem Speicher auslesen und als Datei speichern String path = "./calib_en/snap_BGR"+conv.str()+".png"; std::wstring widepath; for(int i = 0; i < path.length(); ++i) widepath += wchar_t (path[i] ); IMAGE_FILE_PARAMS ImageFileParams; ImageFileParams.pwchFileName = &widepath[0]; ImageFileParams.pnImageID = NULL; ImageFileParams.ppcImageMem = NULL; ImageFileParams.nQuality = 0; ImageFileParams.nFileType = IS_IMG_PNG; nRet = is_ImageFile(hCam, IS_IMAGE_FILE_CMD_SAVE, (void*) &ImageFileParams, sizeof(ImageFileParams)); printf("Status is_ImageFile %d\n",nRet); //Kamera wieder freigeben is_ExitCamera(hCam); cout<<"To quit capturing calibration images, choose q. Else, choose any other letter."<<endl; cin >> flag; i++; } }