void imageCallback(const sensor_msgs::ImageConstPtr& depth_msg) { std::cout << "imageCallback" <<std::endl; //Convert to opencv cv_bridge::CvImagePtr cv_ptr; try { cv_ptr = cv_bridge::toCvCopy(depth_msg, enc::TYPE_32FC1); } catch (cv_bridge::Exception & e) { ROS_ERROR("cv_bridge exception: %s", e.what()); return; } cv::imshow("Depth Image", cv_ptr->image); cv::waitKey(3); // std::cout << this << " " << &point_cloud_pub_ << std::endl; PointCloud::Ptr cloud_msg(new PointCloud); cloud_msg->header = depth_msg->header; cloud_msg->height = depth_msg->height; cloud_msg->width = depth_msg->width; cloud_msg->is_dense = false; cloud_msg->is_bigendian = false; sensor_msgs::PointCloud2Modifier pcd_modifier(*cloud_msg); pcd_modifier.setPointCloud2FieldsByString(1, "xyz"); if (depth_msg->encoding == enc::TYPE_16UC1) { convert<uint16_t>(depth_msg, cloud_msg); } else if (depth_msg->encoding == enc::TYPE_32FC1) { convert<float>(depth_msg, cloud_msg); } else { ROS_ERROR("Depth image has unsupported encoding [%s]", depth_msg->encoding.c_str()); return; } pointCloudPub.publish (cloud_msg); newImagePub.publish(depth_msg); }
static void depthCb(const sensor_msgs::msg::Image::SharedPtr image) { // The meat of this function is a port of the code from: // https://github.com/ros-perception/image_pipeline/blob/92d7f6b/depth_image_proc/src/nodelets/point_cloud_xyz.cpp if (nullptr == g_cam_info) { // we haven't gotten the camera info yet, so just drop until we do RCUTILS_LOG_WARN("No camera info, skipping point cloud conversion"); return; } sensor_msgs::msg::PointCloud2::SharedPtr cloud_msg = std::make_shared<sensor_msgs::msg::PointCloud2>(); cloud_msg->header = image->header; cloud_msg->height = image->height; cloud_msg->width = image->width; cloud_msg->is_dense = false; cloud_msg->is_bigendian = false; cloud_msg->fields.clear(); cloud_msg->fields.reserve(1); sensor_msgs::PointCloud2Modifier pcd_modifier(*cloud_msg); pcd_modifier.setPointCloud2FieldsByString(1, "xyz"); // g_cam_info here is a sensor_msg::msg::CameraInfo::SharedPtr, // which we get from the depth_camera_info topic. image_geometry::PinholeCameraModel model; model.fromCameraInfo(g_cam_info); if (image->encoding == sensor_msgs::image_encodings::TYPE_16UC1) { depthimage_to_pointcloud2::convert<uint16_t>(image, cloud_msg, model); } else if (image->encoding == sensor_msgs::image_encodings::TYPE_32FC1) { depthimage_to_pointcloud2::convert<float>(image, cloud_msg, model); } else { RCUTILS_LOG_WARN_THROTTLE(RCUTILS_STEADY_TIME, 5000, "Depth image has unsupported encoding [%s]", image->encoding.c_str()); return; } g_pub_point_cloud->publish(cloud_msg); }
// Fill depth information bool GazeboRosOpenniKinect::FillPointCloudHelper( sensor_msgs::PointCloud2 &point_cloud_msg, uint32_t rows_arg, uint32_t cols_arg, uint32_t step_arg, void* data_arg) { sensor_msgs::PointCloud2Modifier pcd_modifier(point_cloud_msg); pcd_modifier.setPointCloud2FieldsByString(2, "xyz", "rgb"); // convert to flat array shape, we need to reconvert later pcd_modifier.resize(rows_arg*cols_arg); point_cloud_msg.is_dense = true; sensor_msgs::PointCloud2Iterator<float> iter_x(point_cloud_msg_, "x"); sensor_msgs::PointCloud2Iterator<float> iter_y(point_cloud_msg_, "y"); sensor_msgs::PointCloud2Iterator<float> iter_z(point_cloud_msg_, "z"); sensor_msgs::PointCloud2Iterator<uint8_t> iter_rgb(point_cloud_msg_, "rgb"); float* toCopyFrom = (float*)data_arg; int index = 0; double hfov = this->parentSensor->GetDepthCamera()->GetHFOV().Radian(); double fl = ((double)this->width) / (2.0 *tan(hfov/2.0)); // convert depth to point cloud for (uint32_t j=0; j<rows_arg; j++) { double pAngle; if (rows_arg>1) pAngle = atan2( (double)j - 0.5*(double)(rows_arg-1), fl); else pAngle = 0.0; for (uint32_t i=0; i<cols_arg; i++, ++iter_x, ++iter_y, ++iter_z, ++iter_rgb) { double yAngle; if (cols_arg>1) yAngle = atan2( (double)i - 0.5*(double)(cols_arg-1), fl); else yAngle = 0.0; double depth = toCopyFrom[index++]; // + 0.0*this->myParent->GetNearClip(); if(depth > this->point_cloud_cutoff_ && depth < this->point_cloud_cutoff_max_) { // in optical frame // hardcoded rotation rpy(-M_PI/2, 0, -M_PI/2) is built-in // to urdf, where the *_optical_frame should have above relative // rotation from the physical camera *_frame *iter_x = depth * tan(yAngle); *iter_y = depth * tan(pAngle); *iter_z = depth; } else //point in the unseeable range { *iter_x = *iter_y = *iter_z = std::numeric_limits<float>::quiet_NaN (); point_cloud_msg.is_dense = false; } // put image color data for each point uint8_t* image_src = (uint8_t*)(&(this->image_msg_.data[0])); if (this->image_msg_.data.size() == rows_arg*cols_arg*3) { // color iter_rgb[0] = image_src[i*3+j*cols_arg*3+0]; iter_rgb[1] = image_src[i*3+j*cols_arg*3+1]; iter_rgb[2] = image_src[i*3+j*cols_arg*3+2]; } else if (this->image_msg_.data.size() == rows_arg*cols_arg) { // mono (or bayer? @todo; fix for bayer) iter_rgb[0] = image_src[i+j*cols_arg]; iter_rgb[1] = image_src[i+j*cols_arg]; iter_rgb[2] = image_src[i+j*cols_arg]; } else { // no image iter_rgb[0] = 0; iter_rgb[1] = 0; iter_rgb[2] = 0; } } } // reconvert to original height and width after the flat reshape point_cloud_msg.height = rows_arg; point_cloud_msg.width = cols_arg; point_cloud_msg.row_step = point_cloud_msg.point_step * point_cloud_msg.width; return true; }