/** fill the point cloud with the 3D points */ void LinemodPointcloud::fill(const std::vector<cv::Vec3f> & pts, const cv::Vec3b &color) { int size_old = modifier->size(); modifier->resize(size_old + pts.size()); sensor_msgs::PointCloud2Iterator<float> iter_x(pc_msg, "x"); sensor_msgs::PointCloud2Iterator<float> iter_y(pc_msg, "y"); sensor_msgs::PointCloud2Iterator<float> iter_z(pc_msg, "z"); sensor_msgs::PointCloud2Iterator<uint8_t> iter_r(pc_msg, "r"); sensor_msgs::PointCloud2Iterator<uint8_t> iter_g(pc_msg, "g"); sensor_msgs::PointCloud2Iterator<uint8_t> iter_b(pc_msg, "b"); iter_x += size_old; iter_y += size_old; iter_z += size_old; iter_r += size_old; iter_g += size_old; iter_b += size_old; std::vector<cv::Vec3f>::const_iterator it_data = pts.begin(); for(; it_data != pts.end(); ++it_data, ++iter_x, ++iter_y, ++iter_z, ++iter_r, ++iter_g, ++iter_b) { *iter_x = (*it_data)(0); *iter_y = (*it_data)(1); *iter_z = (*it_data)(2); *iter_r = color(0); *iter_g = color(1); *iter_b = color(2); } }
void convert(const sensor_msgs::ImageConstPtr& depth_msg, PointCloud::Ptr& cloud_msg) { // Use correct principal point from calibration float center_x = cameraModel.cx(); float center_y = cameraModel.cy(); // Combine unit conversion (if necessary) with scaling by focal length for computing (X,Y) double unit_scaling = DepthTraits<T>::toMeters( T(1) ); float constant_x = unit_scaling / cameraModel.fx(); float constant_y = unit_scaling / cameraModel.fy(); float bad_point = std::numeric_limits<float>::quiet_NaN(); sensor_msgs::PointCloud2Iterator<float> iter_x(*cloud_msg, "x"); sensor_msgs::PointCloud2Iterator<float> iter_y(*cloud_msg, "y"); sensor_msgs::PointCloud2Iterator<float> iter_z(*cloud_msg, "z"); const T* depth_row = reinterpret_cast<const T*>(&depth_msg->data[0]); int row_step = depth_msg->step / sizeof(T); for (int v = 0; v < (int)cloud_msg->height; ++v, depth_row += row_step) { for (int u = 0; u < (int)cloud_msg->width; ++u, ++iter_x, ++iter_y, ++iter_z) { T depth = depth_row[u]; // Missing points denoted by NaNs if (!DepthTraits<T>::valid(depth)) { *iter_x = *iter_y = *iter_z = bad_point; continue; } // Fill in XYZ *iter_x = (u - center_x) * depth * constant_x; *iter_y = (v - center_y) * depth * constant_y; *iter_z = DepthTraits<T>::toMeters(depth); } } }
void point_containment_filter::ShapeMask::maskContainment(const sensor_msgs::PointCloud2& data_in, const Eigen::Vector3d& sensor_origin, const double min_sensor_dist, const double max_sensor_dist, std::vector<int>& mask) { boost::mutex::scoped_lock _(shapes_lock_); const unsigned int np = data_in.data.size() / data_in.point_step; mask.resize(np); if (bodies_.empty()) std::fill(mask.begin(), mask.end(), (int)OUTSIDE); else { Eigen::Isometry3d tmp; bspheres_.resize(bodies_.size()); std::size_t j = 0; for (std::set<SeeShape>::const_iterator it = bodies_.begin(); it != bodies_.end(); ++it) { if (!transform_callback_(it->handle, tmp)) { if (!it->body) ROS_ERROR_STREAM_NAMED("shape_mask", "Missing transform for shape with handle " << it->handle << " without a body"); else ROS_ERROR_STREAM_NAMED("shape_mask", "Missing transform for shape " << it->body->getType() << " with handle " << it->handle); } else { it->body->setPose(tmp); it->body->computeBoundingSphere(bspheres_[j++]); } } // compute a sphere that bounds the entire robot bodies::BoundingSphere bound; bodies::mergeBoundingSpheres(bspheres_, bound); const double radius_squared = bound.radius * bound.radius; // we now decide which points we keep sensor_msgs::PointCloud2ConstIterator<float> iter_x(data_in, "x"); sensor_msgs::PointCloud2ConstIterator<float> iter_y(data_in, "y"); sensor_msgs::PointCloud2ConstIterator<float> iter_z(data_in, "z"); // Cloud iterators are not incremented in the for loop, because of the pragma // Comment out below parallelization as it can result in very high CPU consumption //#pragma omp parallel for schedule(dynamic) for (int i = 0; i < (int)np; ++i) { Eigen::Vector3d pt = Eigen::Vector3d(*(iter_x + i), *(iter_y + i), *(iter_z + i)); double d = pt.norm(); int out = OUTSIDE; if (d < min_sensor_dist || d > max_sensor_dist) out = CLIP; else if ((bound.center - pt).squaredNorm() < radius_squared) for (std::set<SeeShape>::const_iterator it = bodies_.begin(); it != bodies_.end() && out == OUTSIDE; ++it) if (it->body->containsPoint(pt)) out = INSIDE; mask[i] = out; } } }
// Fill depth information bool GazeboRosOpenniKinect::FillPointCloudHelper( sensor_msgs::PointCloud2 &point_cloud_msg, uint32_t rows_arg, uint32_t cols_arg, uint32_t step_arg, void* data_arg) { sensor_msgs::PointCloud2Modifier pcd_modifier(point_cloud_msg); pcd_modifier.setPointCloud2FieldsByString(2, "xyz", "rgb"); // convert to flat array shape, we need to reconvert later pcd_modifier.resize(rows_arg*cols_arg); point_cloud_msg.is_dense = true; sensor_msgs::PointCloud2Iterator<float> iter_x(point_cloud_msg_, "x"); sensor_msgs::PointCloud2Iterator<float> iter_y(point_cloud_msg_, "y"); sensor_msgs::PointCloud2Iterator<float> iter_z(point_cloud_msg_, "z"); sensor_msgs::PointCloud2Iterator<uint8_t> iter_rgb(point_cloud_msg_, "rgb"); float* toCopyFrom = (float*)data_arg; int index = 0; double hfov = this->parentSensor->GetDepthCamera()->GetHFOV().Radian(); double fl = ((double)this->width) / (2.0 *tan(hfov/2.0)); // convert depth to point cloud for (uint32_t j=0; j<rows_arg; j++) { double pAngle; if (rows_arg>1) pAngle = atan2( (double)j - 0.5*(double)(rows_arg-1), fl); else pAngle = 0.0; for (uint32_t i=0; i<cols_arg; i++, ++iter_x, ++iter_y, ++iter_z, ++iter_rgb) { double yAngle; if (cols_arg>1) yAngle = atan2( (double)i - 0.5*(double)(cols_arg-1), fl); else yAngle = 0.0; double depth = toCopyFrom[index++]; // + 0.0*this->myParent->GetNearClip(); if(depth > this->point_cloud_cutoff_ && depth < this->point_cloud_cutoff_max_) { // in optical frame // hardcoded rotation rpy(-M_PI/2, 0, -M_PI/2) is built-in // to urdf, where the *_optical_frame should have above relative // rotation from the physical camera *_frame *iter_x = depth * tan(yAngle); *iter_y = depth * tan(pAngle); *iter_z = depth; } else //point in the unseeable range { *iter_x = *iter_y = *iter_z = std::numeric_limits<float>::quiet_NaN (); point_cloud_msg.is_dense = false; } // put image color data for each point uint8_t* image_src = (uint8_t*)(&(this->image_msg_.data[0])); if (this->image_msg_.data.size() == rows_arg*cols_arg*3) { // color iter_rgb[0] = image_src[i*3+j*cols_arg*3+0]; iter_rgb[1] = image_src[i*3+j*cols_arg*3+1]; iter_rgb[2] = image_src[i*3+j*cols_arg*3+2]; } else if (this->image_msg_.data.size() == rows_arg*cols_arg) { // mono (or bayer? @todo; fix for bayer) iter_rgb[0] = image_src[i+j*cols_arg]; iter_rgb[1] = image_src[i+j*cols_arg]; iter_rgb[2] = image_src[i+j*cols_arg]; } else { // no image iter_rgb[0] = 0; iter_rgb[1] = 0; iter_rgb[2] = 0; } } } // reconvert to original height and width after the flat reshape point_cloud_msg.height = rows_arg; point_cloud_msg.width = cols_arg; point_cloud_msg.row_step = point_cloud_msg.point_step * point_cloud_msg.width; return true; }