Пример #1
0
pcl::PointCloud<pcl::PointXYZI>::Ptr
pcl::io::OpenNI2Grabber::convertToXYZIPointCloud (const IRImage::Ptr &ir_image, const DepthImage::Ptr &depth_image)
{
  boost::shared_ptr<pcl::PointCloud<pcl::PointXYZI> > cloud (new pcl::PointCloud<pcl::PointXYZI > ());

  cloud->header.seq = depth_image->getFrameID ();
  cloud->header.stamp = depth_image->getTimestamp ();
  cloud->header.frame_id = rgb_frame_id_;
  cloud->height = depth_height_;
  cloud->width = depth_width_;
  cloud->is_dense = false;

  cloud->points.resize (cloud->height * cloud->width);


  float fx = device_->getDepthFocalLength (); // Horizontal focal length
  float fy = device_->getDepthFocalLength (); // Vertcal focal length
  float cx = ((float)cloud->width - 1.f) / 2.f;  // Center x
  float cy = ((float)cloud->height - 1.f) / 2.f; // Center y

  // Load pre-calibrated camera parameters if they exist
  if (pcl_isfinite (depth_parameters_.focal_length_x))
    fx =  static_cast<float> (depth_parameters_.focal_length_x);

  if (pcl_isfinite (depth_parameters_.focal_length_y))
    fy =  static_cast<float> (depth_parameters_.focal_length_y);

  if (pcl_isfinite (depth_parameters_.principal_point_x))
    cx =  static_cast<float> (depth_parameters_.principal_point_x);

  if (pcl_isfinite (depth_parameters_.principal_point_y))
    cy =  static_cast<float> (depth_parameters_.principal_point_y);

  float fx_inv = 1.0f / fx;
  float fy_inv = 1.0f / fy;


  const uint16_t* depth_map = (const uint16_t*) depth_image->getData ();
  if (depth_image->getWidth () != depth_width_ || depth_image->getHeight () != depth_height_)
  {
    // Resize the image if nessacery
    depth_resize_buffer_.resize(depth_width_ * depth_height_);
    depth_map = depth_resize_buffer_.data();
    depth_image->fillDepthImageRaw (depth_width_, depth_height_, (unsigned short*) depth_map );
  }

  const uint16_t* ir_map = (const uint16_t*) ir_image->getData ();
  if (ir_image->getWidth () != depth_width_ || ir_image->getHeight () != depth_height_)
  {
    // Resize the image if nessacery
    ir_resize_buffer_.resize(depth_width_ * depth_height_);
    ir_map = ir_resize_buffer_.data();
    ir_image->fillRaw (depth_width_, depth_height_, (unsigned short*) ir_map);
  }


  int depth_idx = 0;
  float bad_point = std::numeric_limits<float>::quiet_NaN ();

  for (int v = 0; v < depth_height_; ++v)
  {
    for (int u = 0; u < depth_width_; ++u, ++depth_idx)
    {
      pcl::PointXYZI& pt = cloud->points[depth_idx];
      /// @todo Different values for these cases
      // Check for invalid measurements
      if (depth_map[depth_idx] == 0 ||
        depth_map[depth_idx] == depth_image->getNoSampleValue () ||
        depth_map[depth_idx] == depth_image->getShadowValue ())
      {
        pt.x = pt.y = pt.z = bad_point;
      }
      else
      {
        pt.z = depth_map[depth_idx] * 0.001f; // millimeters to meters
        pt.x = (static_cast<float> (u) - cx) * pt.z * fx_inv;
        pt.y = (static_cast<float> (v) - cy) * pt.z * fy_inv;
      }

      pt.data_c[0] = pt.data_c[1] = pt.data_c[2] = pt.data_c[3] = 0;
      pt.intensity = static_cast<float> (ir_map[depth_idx]);
    }
  }
  cloud->sensor_origin_.setZero ();
  cloud->sensor_orientation_.setIdentity ();
  return (cloud);
}
Пример #2
0
pcl::PointCloud<pcl::PointXYZ>::Ptr
pcl::io::OpenNI2Grabber::convertToXYZPointCloud (const DepthImage::Ptr& depth_image)
{
  pcl::PointCloud<pcl::PointXYZ>::Ptr cloud (new pcl::PointCloud <pcl::PointXYZ>);

  cloud->header.seq = depth_image->getFrameID ();
  cloud->header.stamp = depth_image->getTimestamp ();
  cloud->height = depth_height_;
  cloud->width = depth_width_;
  cloud->is_dense = false;

  cloud->points.resize (cloud->height * cloud->width);

  float constant_x = 1.0f / device_->getDepthFocalLength ();
  float constant_y = 1.0f / device_->getDepthFocalLength ();
  float centerX = ((float)cloud->width - 1.f) / 2.f;
  float centerY = ((float)cloud->height - 1.f) / 2.f;

  if (pcl_isfinite (depth_parameters_.focal_length_x))
    constant_x =  1.0f / static_cast<float> (depth_parameters_.focal_length_x);

  if (pcl_isfinite (depth_parameters_.focal_length_y))
    constant_y =  1.0f / static_cast<float> (depth_parameters_.focal_length_y);

  if (pcl_isfinite (depth_parameters_.principal_point_x))
    centerX =  static_cast<float> (depth_parameters_.principal_point_x);

  if (pcl_isfinite (depth_parameters_.principal_point_y))
    centerY =  static_cast<float> (depth_parameters_.principal_point_y);

  if ( device_->isDepthRegistered() )
    cloud->header.frame_id = rgb_frame_id_;
  else
    cloud->header.frame_id = depth_frame_id_;


  float bad_point = std::numeric_limits<float>::quiet_NaN ();

  const uint16_t* depth_map = (const uint16_t*) depth_image->getData ();
  if (depth_image->getWidth () != depth_width_ || depth_image->getHeight () != depth_height_)
  {
    // Resize the image if nessacery
    depth_resize_buffer_.resize(depth_width_ * depth_height_);

    depth_image->fillDepthImageRaw (depth_width_, depth_height_, (uint16_t*) depth_resize_buffer_.data() );
    depth_map = depth_resize_buffer_.data();
  }

  int depth_idx = 0;
  for (int v = 0; v < depth_height_; ++v)
  {
    for (int u = 0; u < depth_width_; ++u, ++depth_idx)
    {
      pcl::PointXYZ& pt = cloud->points[depth_idx];
      // Check for invalid measurements
      if (depth_map[depth_idx] == 0 ||
        depth_map[depth_idx] == depth_image->getNoSampleValue () ||
        depth_map[depth_idx] == depth_image->getShadowValue ())
      {
        // not valid
        pt.x = pt.y = pt.z = bad_point;
        continue;
      }
      pt.z = depth_map[depth_idx] * 0.001f;
      pt.x = (static_cast<float> (u) - centerX) * pt.z * constant_x;
      pt.y = (static_cast<float> (v) - centerY) * pt.z * constant_y;
    }
  }
  cloud->sensor_origin_.setZero ();
  cloud->sensor_orientation_.w () = 1.0f;
  cloud->sensor_orientation_.x () = 0.0f;
  cloud->sensor_orientation_.y () = 0.0f;
  cloud->sensor_orientation_.z () = 0.0f;  
  return (cloud);
}
Пример #3
0
template <typename PointT> typename pcl::PointCloud<PointT>::Ptr
pcl::io::OpenNI2Grabber::convertToXYZRGBPointCloud (const Image::Ptr &image, const DepthImage::Ptr &depth_image)
{
  boost::shared_ptr<pcl::PointCloud<PointT> > cloud (new pcl::PointCloud<PointT>);

  cloud->header.seq = depth_image->getFrameID ();
  cloud->header.stamp = depth_image->getTimestamp ();
  cloud->header.frame_id = rgb_frame_id_;
  cloud->height = std::max (image_height_, depth_height_);
  cloud->width = std::max (image_width_, depth_width_);
  cloud->is_dense = false;

  cloud->points.resize (cloud->height * cloud->width);

  // Generate default camera parameters
  float fx = device_->getDepthFocalLength (); // Horizontal focal length
  float fy = device_->getDepthFocalLength (); // Vertcal focal length
  float cx = ((float)depth_width_ - 1.f) / 2.f;  // Center x
  float cy = ((float)depth_height_- 1.f) / 2.f; // Center y

  // Load pre-calibrated camera parameters if they exist
  if (pcl_isfinite (depth_parameters_.focal_length_x))
    fx =  static_cast<float> (depth_parameters_.focal_length_x);

  if (pcl_isfinite (depth_parameters_.focal_length_y))
    fy =  static_cast<float> (depth_parameters_.focal_length_y);

  if (pcl_isfinite (depth_parameters_.principal_point_x))
    cx =  static_cast<float> (depth_parameters_.principal_point_x);

  if (pcl_isfinite (depth_parameters_.principal_point_y))
    cy =  static_cast<float> (depth_parameters_.principal_point_y);

  // Get inverse focal length for calculations below
  float fx_inv = 1.0f / fx;
  float fy_inv = 1.0f / fy;

  const uint16_t* depth_map = (const uint16_t*) depth_image->getData ();
  if (depth_image->getWidth () != depth_width_ || depth_image->getHeight () != depth_height_)
  {
    // Resize the image if nessacery
    depth_resize_buffer_.resize(depth_width_ * depth_height_);
    depth_map = depth_resize_buffer_.data();
    depth_image->fillDepthImageRaw (depth_width_, depth_height_, (unsigned short*) depth_map );
  }

  const uint8_t* rgb_buffer = (const uint8_t*) image->getData ();
  if (image->getWidth () != image_width_ || image->getHeight () != image_height_)
  {
    // Resize the image if nessacery
    color_resize_buffer_.resize(image_width_ * image_height_ * 3);
    rgb_buffer = color_resize_buffer_.data();
    image->fillRGB (image_width_, image_height_, (unsigned char*) rgb_buffer, image_width_ * 3);
  }


  float bad_point = std::numeric_limits<float>::quiet_NaN ();

  // set xyz to Nan and rgb to 0 (black)  
  if (image_width_ != depth_width_)
  {
    PointT pt;
    pt.x = pt.y = pt.z = bad_point;
    pt.b = pt.g = pt.r = 0;
    pt.a = 255; // point has no color info -> alpha = max => transparent 
    cloud->points.assign (cloud->points.size (), pt);
  }

  // fill in XYZ values
  unsigned step = cloud->width / depth_width_;
  unsigned skip = cloud->width - (depth_width_ * step);

  int value_idx = 0;
  int point_idx = 0;
  for (int v = 0; v < depth_height_; ++v, point_idx += skip)
  {
    for (int u = 0; u < depth_width_; ++u, ++value_idx, point_idx += step)
    {
      PointT& pt = cloud->points[point_idx];
      /// @todo Different values for these cases
      // Check for invalid measurements

      OniDepthPixel pixel = depth_map[value_idx];
      if (pixel != 0 &&
        pixel != depth_image->getNoSampleValue () &&
        pixel != depth_image->getShadowValue () )
      {
        pt.z = depth_map[value_idx] * 0.001f;  // millimeters to meters
        pt.x = (static_cast<float> (u) - cx) * pt.z * fx_inv;
        pt.y = (static_cast<float> (v) - cy) * pt.z * fy_inv;
      }
      else
      {
        pt.x = pt.y = pt.z = bad_point;
      }
    }
  }

  // fill in the RGB values
  step = cloud->width / image_width_;
  skip = cloud->width - (image_width_ * step);

  value_idx = 0;
  point_idx = 0;
  RGBValue color;
  color.Alpha = 0xff;

  for (unsigned yIdx = 0; yIdx < image_height_; ++yIdx, point_idx += skip)
  {
    for (unsigned xIdx = 0; xIdx < image_width_; ++xIdx, point_idx += step, value_idx += 3)
    {
      PointT& pt = cloud->points[point_idx];

      color.Red   = rgb_buffer[value_idx];
      color.Green = rgb_buffer[value_idx + 1];
      color.Blue  = rgb_buffer[value_idx + 2];

      pt.rgba = color.long_value;
    }
  }
  cloud->sensor_origin_.setZero ();
  cloud->sensor_orientation_.setIdentity ();
  return (cloud);
}
Пример #4
0
bool LcmTranslator::
toLcm(const DepthImageView& iView, drc::map_image_t& oMessage,
      const float iQuantMax, const bool iCompress) {

  oMessage.view_id = iView.getId();

  // copy depth array
  DepthImage::Type imageType = DepthImage::TypeDepth;
  DepthImage::Ptr depthImage = iView.getDepthImage();
  float invalidValue = std::numeric_limits<float>::infinity();
  int numDepths = depthImage->getWidth() * depthImage->getHeight();
  std::vector<float> inDepths = depthImage->getData(imageType);
  std::vector<uint8_t> data((uint8_t*)(&inDepths[0]),
                            (uint8_t*)(&inDepths[0]) + numDepths*sizeof(float));
  float* outDepths = (float*)(&data[0]);

  // find extremal values and transform
  float zMin(std::numeric_limits<float>::infinity());
  float zMax(-std::numeric_limits<float>::infinity());
  for (int i = 0; i < numDepths; ++i) {
    float val = outDepths[i];
    if (val == invalidValue) continue;
    zMin = std::min(zMin, val);
    zMax = std::max(zMax, val);
  }
  if (zMin >= zMax) {
    zMin = 0;
    zMax = 1;
  }
  float zOffset(zMin), zScale(zMax-zMin);
  int bits = 11;
  if (iQuantMax == 0) {
    bits = 32;
    zScale = 1;
    zOffset = 0;
  }
  else {
    if (iQuantMax > 0) {
      bits = ceil(std::log2(zScale/iQuantMax));
      bits = std::min(bits, 16);
      bits = std::max(bits, 0);
    }
    zScale /= ((1 << bits) - 1);
  }
  for (int i = 0; i < numDepths; ++i) {
    float val = outDepths[i];
    if (val == invalidValue) continue;
    outDepths[i] = (outDepths[i]-zOffset)/zScale;
    if (bits < 32) outDepths[i] += 0.5f;  // for rounding
  }

  // store to blob
  DataBlob::Spec spec;
  spec.mDimensions.push_back(depthImage->getWidth());
  spec.mDimensions.push_back(depthImage->getHeight());
  spec.mStrideBytes.push_back(sizeof(float));
  spec.mStrideBytes.push_back(depthImage->getWidth()*sizeof(float));
  spec.mCompressionType = DataBlob::CompressionTypeNone;
  spec.mDataType = DataBlob::DataTypeFloat32;
  DataBlob blob;
  blob.setData(data, spec);

  // compress and convert
  DataBlob::CompressionType compressionType =
    iCompress ? DataBlob::CompressionTypeZlib : DataBlob::CompressionTypeNone;
  DataBlob::DataType dataType;
  if (bits <= 8) dataType = DataBlob::DataTypeUint8;
  else if (bits <= 16) dataType = DataBlob::DataTypeUint16;
  else dataType = DataBlob::DataTypeFloat32;
  blob.convertTo(compressionType, dataType);
  if (!toLcm(blob, oMessage.blob)) return false;

  // transform from reference to image
  Eigen::Projective3f xform = iView.getTransform();
  oMessage.data_scale = zScale;
  oMessage.data_shift = zOffset;
  for (int i = 0; i < 4; ++i) {
    for (int j = 0; j < 4; ++j) {
      oMessage.transform[i][j] = xform(i,j);
    }
  }

  return true;
}