コード例 #1
1
TEST(sensor_msgs, PointCloud2Iterator)
{
  // Create a dummy PointCloud2
  size_t n_points = 4;
  sensor_msgs::msg::PointCloud2 cloud_msg_1, cloud_msg_2;
  cloud_msg_1.height = static_cast<uint32_t>(n_points);
  cloud_msg_1.width = 1;
  sensor_msgs::PointCloud2Modifier modifier(cloud_msg_1);
  modifier.setPointCloud2FieldsByString(2, "xyz", "rgb");
  cloud_msg_2 = cloud_msg_1;

  // Fill 1 by hand
  float point_data_raw[] = {1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0};
  std::vector<float> point_data(point_data_raw, point_data_raw + 3 * n_points);
  // colors in RGB order
  uint8_t color_data_raw[] = {40, 80, 120, 160, 200, 240, 20, 40, 60, 80, 100, 120};
  std::vector<uint8_t> color_data(color_data_raw, color_data_raw + 3 * n_points);

  float * data = reinterpret_cast<float *>(&cloud_msg_1.data.front());
  for (size_t n = 0, i = 0; n < n_points; ++n) {
    for (; i < 3 * (n + 1); ++i) {
      *(data++) = point_data[i];
    }
    // Add an extra float of padding
    ++data;
    uint8_t * bgr = reinterpret_cast<uint8_t *>(data++);
    // add the colors in order BGRA like PCL
    size_t j_max = 2;
    for (size_t j = 0; j <= j_max; ++j) {
      *(bgr++) = color_data[3 * n + (j_max - j)];
    }
    // Add 3 extra floats of padding
    data += 3;
  }

  // Fill 2 using an iterator
  sensor_msgs::PointCloud2Iterator<float> iter_x(cloud_msg_2, "x");
  sensor_msgs::PointCloud2Iterator<uint8_t> iter_r(cloud_msg_2, "r");
  sensor_msgs::PointCloud2Iterator<uint8_t> iter_g(cloud_msg_2, "g");
  sensor_msgs::PointCloud2Iterator<uint8_t> iter_b(cloud_msg_2, "b");
  for (size_t i = 0; i < n_points; ++i, ++iter_x, ++iter_r, ++iter_g, ++iter_b) {
    for (size_t j = 0; j < 3; ++j) {
      iter_x[j] = point_data[j + 3 * i];
    }
    *iter_r = color_data[3 * i];
    *iter_g = color_data[3 * i + 1];
    *iter_b = color_data[3 * i + 2];
  }


  // Check the values using an iterator
  sensor_msgs::PointCloud2ConstIterator<float> iter_const_1_x(cloud_msg_1, "x"), iter_const_2_x(
    cloud_msg_2, "x");
  sensor_msgs::PointCloud2ConstIterator<float> iter_const_1_y(cloud_msg_1, "y"), iter_const_2_y(
    cloud_msg_2, "y");
  sensor_msgs::PointCloud2ConstIterator<float> iter_const_1_z(cloud_msg_1, "z"), iter_const_2_z(
    cloud_msg_2, "z");
  sensor_msgs::PointCloud2ConstIterator<uint8_t> iter_const_1_r(cloud_msg_1, "r"), iter_const_2_r(
    cloud_msg_2, "r");
  sensor_msgs::PointCloud2ConstIterator<uint8_t> iter_const_1_g(cloud_msg_1, "g"), iter_const_2_g(
    cloud_msg_2, "g");
  sensor_msgs::PointCloud2ConstIterator<uint8_t> iter_const_1_b(cloud_msg_1, "b"), iter_const_2_b(
    cloud_msg_2, "b");

  size_t i = 0;
  for (; iter_const_1_x != iter_const_1_x.end();
    ++i, ++iter_const_1_x, ++iter_const_2_x, ++iter_const_1_y, ++iter_const_2_y,
    ++iter_const_1_z, ++iter_const_2_z, ++iter_const_1_r, ++iter_const_1_g, ++iter_const_1_b)
  {
    EXPECT_EQ(*iter_const_1_x, *iter_const_2_x);
    EXPECT_EQ(*iter_const_1_x, point_data[0 + 3 * i]);
    EXPECT_EQ(*iter_const_1_y, *iter_const_2_y);
    EXPECT_EQ(*iter_const_1_y, point_data[1 + 3 * i]);
    EXPECT_EQ(*iter_const_1_z, *iter_const_2_z);
    EXPECT_EQ(*iter_const_1_z, point_data[2 + 3 * i]);
    EXPECT_EQ(*iter_const_1_r, *iter_const_2_r);
    EXPECT_EQ(*iter_const_1_r, color_data[3 * i + 0]);
    EXPECT_EQ(*iter_const_1_g, *iter_const_2_g);
    EXPECT_EQ(*iter_const_1_g, color_data[3 * i + 1]);
    EXPECT_EQ(*iter_const_1_b, *iter_const_2_b);
    EXPECT_EQ(*iter_const_1_b, color_data[3 * i + 2]);
    // This is to test the different operators
    ++iter_const_2_r;
    iter_const_2_g += 1;
    iter_const_2_b = iter_const_2_b + 1;
  }
  EXPECT_EQ(i, n_points);
}
コード例 #2
0
/** fill the point cloud with the 3D points */
void LinemodPointcloud::fill(const std::vector<cv::Vec3f> & pts, const cv::Vec3b &color)
{
  int size_old = modifier->size();
  modifier->resize(size_old + pts.size());
  sensor_msgs::PointCloud2Iterator<float> iter_x(pc_msg, "x");
  sensor_msgs::PointCloud2Iterator<float> iter_y(pc_msg, "y");
  sensor_msgs::PointCloud2Iterator<float> iter_z(pc_msg, "z");
  sensor_msgs::PointCloud2Iterator<uint8_t> iter_r(pc_msg, "r");
  sensor_msgs::PointCloud2Iterator<uint8_t> iter_g(pc_msg, "g");
  sensor_msgs::PointCloud2Iterator<uint8_t> iter_b(pc_msg, "b");
  iter_x += size_old;
  iter_y += size_old;
  iter_z += size_old;
  iter_r += size_old;
  iter_g += size_old;
  iter_b += size_old;
  std::vector<cv::Vec3f>::const_iterator it_data = pts.begin();
  for(; it_data != pts.end(); ++it_data, ++iter_x, ++iter_y, ++iter_z, ++iter_r, ++iter_g, ++iter_b)
  {
    *iter_x = (*it_data)(0);
    *iter_y = (*it_data)(1);
    *iter_z = (*it_data)(2);
    *iter_r = color(0);
    *iter_g = color(1);
    *iter_b = color(2);
  }
}
コード例 #3
0
ファイル: Teste.cpp プロジェクト: brunogouveia/hand_shake
void convert(const sensor_msgs::ImageConstPtr& depth_msg, PointCloud::Ptr& cloud_msg)
{
	// Use correct principal point from calibration
	float center_x = cameraModel.cx();
	float center_y = cameraModel.cy();

	// Combine unit conversion (if necessary) with scaling by focal length for computing (X,Y)
	double unit_scaling = DepthTraits<T>::toMeters( T(1) );
	float constant_x = unit_scaling / cameraModel.fx();
	float constant_y = unit_scaling / cameraModel.fy();
	float bad_point = std::numeric_limits<float>::quiet_NaN();

	sensor_msgs::PointCloud2Iterator<float> iter_x(*cloud_msg, "x");
	sensor_msgs::PointCloud2Iterator<float> iter_y(*cloud_msg, "y");
	sensor_msgs::PointCloud2Iterator<float> iter_z(*cloud_msg, "z");
	const T* depth_row = reinterpret_cast<const T*>(&depth_msg->data[0]);
	int row_step = depth_msg->step / sizeof(T);
	for (int v = 0; v < (int)cloud_msg->height; ++v, depth_row += row_step)
	{
		for (int u = 0; u < (int)cloud_msg->width; ++u, ++iter_x, ++iter_y, ++iter_z)
		{
			T depth = depth_row[u];

			// Missing points denoted by NaNs
			if (!DepthTraits<T>::valid(depth))
			{
				*iter_x = *iter_y = *iter_z = bad_point;
				continue;
			}

			// Fill in XYZ
			*iter_x = (u - center_x) * depth * constant_x;
			*iter_y = (v - center_y) * depth * constant_y;
			*iter_z = DepthTraits<T>::toMeters(depth);
		}
	}
}
コード例 #4
0
ファイル: shape_mask.cpp プロジェクト: ros-planning/moveit
void point_containment_filter::ShapeMask::maskContainment(const sensor_msgs::PointCloud2& data_in,
                                                          const Eigen::Vector3d& sensor_origin,
                                                          const double min_sensor_dist, const double max_sensor_dist,
                                                          std::vector<int>& mask)
{
  boost::mutex::scoped_lock _(shapes_lock_);
  const unsigned int np = data_in.data.size() / data_in.point_step;
  mask.resize(np);

  if (bodies_.empty())
    std::fill(mask.begin(), mask.end(), (int)OUTSIDE);
  else
  {
    Eigen::Isometry3d tmp;
    bspheres_.resize(bodies_.size());
    std::size_t j = 0;
    for (std::set<SeeShape>::const_iterator it = bodies_.begin(); it != bodies_.end(); ++it)
    {
      if (!transform_callback_(it->handle, tmp))
      {
        if (!it->body)
          ROS_ERROR_STREAM_NAMED("shape_mask", "Missing transform for shape with handle " << it->handle
                                                                                          << " without a body");
        else
          ROS_ERROR_STREAM_NAMED("shape_mask", "Missing transform for shape " << it->body->getType() << " with handle "
                                                                              << it->handle);
      }
      else
      {
        it->body->setPose(tmp);
        it->body->computeBoundingSphere(bspheres_[j++]);
      }
    }

    // compute a sphere that bounds the entire robot
    bodies::BoundingSphere bound;
    bodies::mergeBoundingSpheres(bspheres_, bound);
    const double radius_squared = bound.radius * bound.radius;

    // we now decide which points we keep
    sensor_msgs::PointCloud2ConstIterator<float> iter_x(data_in, "x");
    sensor_msgs::PointCloud2ConstIterator<float> iter_y(data_in, "y");
    sensor_msgs::PointCloud2ConstIterator<float> iter_z(data_in, "z");

    // Cloud iterators are not incremented in the for loop, because of the pragma
    // Comment out below parallelization as it can result in very high CPU consumption
    //#pragma omp parallel for schedule(dynamic)
    for (int i = 0; i < (int)np; ++i)
    {
      Eigen::Vector3d pt = Eigen::Vector3d(*(iter_x + i), *(iter_y + i), *(iter_z + i));
      double d = pt.norm();
      int out = OUTSIDE;
      if (d < min_sensor_dist || d > max_sensor_dist)
        out = CLIP;
      else if ((bound.center - pt).squaredNorm() < radius_squared)
        for (std::set<SeeShape>::const_iterator it = bodies_.begin(); it != bodies_.end() && out == OUTSIDE; ++it)
          if (it->body->containsPoint(pt))
            out = INSIDE;
      mask[i] = out;
    }
  }
}
// Fill depth information
bool GazeboRosOpenniKinect::FillPointCloudHelper(
    sensor_msgs::PointCloud2 &point_cloud_msg,
    uint32_t rows_arg, uint32_t cols_arg,
    uint32_t step_arg, void* data_arg)
{
    sensor_msgs::PointCloud2Modifier pcd_modifier(point_cloud_msg);
    pcd_modifier.setPointCloud2FieldsByString(2, "xyz", "rgb");
    // convert to flat array shape, we need to reconvert later
    pcd_modifier.resize(rows_arg*cols_arg);
    point_cloud_msg.is_dense = true;

    sensor_msgs::PointCloud2Iterator<float> iter_x(point_cloud_msg_, "x");
    sensor_msgs::PointCloud2Iterator<float> iter_y(point_cloud_msg_, "y");
    sensor_msgs::PointCloud2Iterator<float> iter_z(point_cloud_msg_, "z");
    sensor_msgs::PointCloud2Iterator<uint8_t> iter_rgb(point_cloud_msg_, "rgb");

    float* toCopyFrom = (float*)data_arg;
    int index = 0;

    double hfov = this->parentSensor->GetDepthCamera()->GetHFOV().Radian();
    double fl = ((double)this->width) / (2.0 *tan(hfov/2.0));

    // convert depth to point cloud
    for (uint32_t j=0; j<rows_arg; j++)
    {
        double pAngle;
        if (rows_arg>1) pAngle = atan2( (double)j - 0.5*(double)(rows_arg-1), fl);
        else            pAngle = 0.0;

        for (uint32_t i=0; i<cols_arg; i++, ++iter_x, ++iter_y, ++iter_z, ++iter_rgb)
        {
            double yAngle;
            if (cols_arg>1) yAngle = atan2( (double)i - 0.5*(double)(cols_arg-1), fl);
            else            yAngle = 0.0;

            double depth = toCopyFrom[index++]; // + 0.0*this->myParent->GetNearClip();

            if(depth > this->point_cloud_cutoff_ &&
                    depth < this->point_cloud_cutoff_max_)
            {
                // in optical frame
                // hardcoded rotation rpy(-M_PI/2, 0, -M_PI/2) is built-in
                // to urdf, where the *_optical_frame should have above relative
                // rotation from the physical camera *_frame
                *iter_x = depth * tan(yAngle);
                *iter_y = depth * tan(pAngle);
                *iter_z = depth;
            }
            else //point in the unseeable range
            {
                *iter_x = *iter_y = *iter_z = std::numeric_limits<float>::quiet_NaN ();
                point_cloud_msg.is_dense = false;
            }

            // put image color data for each point
            uint8_t*  image_src = (uint8_t*)(&(this->image_msg_.data[0]));
            if (this->image_msg_.data.size() == rows_arg*cols_arg*3)
            {
                // color
                iter_rgb[0] = image_src[i*3+j*cols_arg*3+0];
                iter_rgb[1] = image_src[i*3+j*cols_arg*3+1];
                iter_rgb[2] = image_src[i*3+j*cols_arg*3+2];
            }
            else if (this->image_msg_.data.size() == rows_arg*cols_arg)
            {
                // mono (or bayer?  @todo; fix for bayer)
                iter_rgb[0] = image_src[i+j*cols_arg];
                iter_rgb[1] = image_src[i+j*cols_arg];
                iter_rgb[2] = image_src[i+j*cols_arg];
            }
            else
            {
                // no image
                iter_rgb[0] = 0;
                iter_rgb[1] = 0;
                iter_rgb[2] = 0;
            }
        }
    }

    // reconvert to original height and width after the flat reshape
    point_cloud_msg.height = rows_arg;
    point_cloud_msg.width = cols_arg;
    point_cloud_msg.row_step = point_cloud_msg.point_step * point_cloud_msg.width;

    return true;
}