Пример #1
0
void KeyPointsFilter::runByImageBorder( std::vector<KeyPoint>& keypoints, Size imageSize, int borderSize )
{
    if( borderSize > 0)
    {
        if (imageSize.height <= borderSize * 2 || imageSize.width <= borderSize * 2)
            keypoints.clear();
        else
            keypoints.erase( std::remove_if(keypoints.begin(), keypoints.end(),
                                       RoiPredicate(Rect(Point(borderSize, borderSize),
                                                         Point(imageSize.width - borderSize, imageSize.height - borderSize)))),
                             keypoints.end() );
    }
}
Пример #2
0
template <typename PointInT, typename PointOutT, typename KeypointT, typename IntensityT> void
pcl::BRISK2DEstimation<PointInT, PointOutT, KeypointT, IntensityT>::compute (
    PointCloudOutT &output)
{
  if (!input_cloud_->isOrganized ())
  {    
    PCL_ERROR ("[pcl::%s::initCompute] %s doesn't support non organized clouds!\n", name_.c_str ());
    return;
  }

  // image size
  const int width = int (input_cloud_->width);
  const int height = int (input_cloud_->height);

  // destination for intensity data; will be forwarded to BRISK
  std::vector<unsigned char> image_data (width*height);

  for (size_t row_index = 0; row_index < height; ++row_index)
  {
    for (size_t col_index = 0; col_index < width; ++col_index)
    {
      image_data[row_index*width + col_index] = static_cast<unsigned char> (intensity_ ((*input_cloud_) (col_index, row_index)));
    }
  }

  // Remove keypoints very close to the border
  size_t ksize = keypoints_->points.size ();
  std::vector<int> kscales; // remember the scale per keypoint
  kscales.resize (ksize);
 
  // initialize constants
   static const float log2 = 0.693147180559945f;
  static const float lb_scalerange = std::log (scalerange_) / (log2);

  typename std::vector<KeypointT, Eigen::aligned_allocator<KeypointT> >::iterator beginning = keypoints_->points.begin ();
  std::vector<int>::iterator beginningkscales = kscales.begin ();
  
  static const float basic_size_06 = basic_size_ * 0.6f;
  unsigned int basicscale = 0;

  if (!scale_invariance_enabled_)
    basicscale = std::max (static_cast<int> (float (scales_) / lb_scalerange * (log (1.45f * basic_size_ / (basic_size_06)) / log2) + 0.5f), 0);

  for (size_t k = 0; k < ksize; k++)
  {
    unsigned int scale;
    if (scale_invariance_enabled_)
    {
      scale = std::max (static_cast<int> (float (scales_) / lb_scalerange * (log (keypoints_->points[k].size / (basic_size_06)) / log2) + 0.5f), 0);
      // saturate
      if (scale >= scales_) scale = scales_ - 1;
      kscales[k] = scale;
    }
    else
    {
      scale = basicscale;
      kscales[k] = scale;
    }

    const int border   = size_list_[scale];
    const int border_x = width - border;
    const int border_y = height - border;

    if (RoiPredicate (float (border), float (border), float (border_x), float (border_y), keypoints_->points[k]))
    {
      keypoints_->points.erase (beginning + k);
      kscales.erase (beginningkscales + k);
      if (k == 0)
      {
        beginning = keypoints_->points.begin ();
        beginningkscales = kscales.begin ();
      }
      ksize--;
      k--;
    }
  }

  // first, calculate the integral image over the whole image:
  // current integral image
  std::vector<int> integral;    // the integral image
  //integral (image, integral);

  int* values = new int[points_]; // for temporary use

  // resize the descriptors:
  //output = zeros (ksize, strings_);

  // now do the extraction for all keypoints:

  // temporary variables containing gray values at sample points:
  int t1;
  int t2;

  // the feature orientation
  int direction0;
  int direction1;

  unsigned char* ptr = &output.points[0].descriptor[0];
  for (size_t k = 0; k < ksize; k++)
  {
    int theta;
    KeypointT &kp    = keypoints_->points[k];
    const int& scale = kscales[k];
    int shifter = 0;
    int* pvalues = values;
    const float& x = float (kp.x);
    const float& y = float (kp.y);
    if (true) // kp.angle==-1
    {
      if (!rotation_invariance_enabled_)
        // don't compute the gradient direction, just assign a rotation of 0°
        theta = 0;
      else
      {
        // get the gray values in the unrotated pattern
        for (unsigned int i = 0; i < points_; i++)
          *(pvalues++) = smoothedIntensity (image_data, width, height, integral, x, y, scale, 0, i);

        direction0 = 0;
        direction1 = 0;
        // now iterate through the long pairings
        const BriskLongPair* max = long_pairs_ + no_long_pairs_;

        for (BriskLongPair* iter = long_pairs_; iter < max; ++iter)
        {
          t1 = *(values + iter->i);
          t2 = *(values + iter->j);
          const int delta_t = (t1 - t2);

          // update the direction:
          const int tmp0 = delta_t * (iter->weighted_dx) / 1024;
          const int tmp1 = delta_t * (iter->weighted_dy) / 1024;
          direction0 += tmp0;
          direction1 += tmp1;
        }
        kp.angle = atan2 (float (direction1), float (direction0)) / float (M_PI) * 180.0f;
        theta = static_cast<int> ((float (n_rot_) * kp.angle) / (360.0f) + 0.5f);
        if (theta < 0)
          theta += n_rot_;
        if (theta >= int (n_rot_))
          theta -= n_rot_;
      }
    }
    else
    {
      // figure out the direction:
      //int theta=rotationInvariance*round((_n_rot*atan2(direction.at<int>(0,0),direction.at<int>(1,0)))/(2*M_PI));
      if (!rotation_invariance_enabled_)
        theta = 0;
      else
      {
        theta = static_cast<int> (n_rot_ * (kp.angle / (360.0)) + 0.5);
        if (theta < 0)
          theta += n_rot_;
        if (theta >= int (n_rot_))
          theta -= n_rot_;
      }
    }

    // now also extract the stuff for the actual direction:
    // let us compute the smoothed values
    shifter = 0;

    //unsigned int mean=0;
    pvalues = values;
    // get the gray values in the rotated pattern
    for (unsigned int i = 0; i < points_; i++)
      *(pvalues++) = smoothedIntensity (image_data, width, height, integral, x, y, scale, theta, i);

#ifdef __GNUC__
      typedef uint32_t __attribute__ ((__may_alias__)) UINT32_ALIAS;
#endif
#ifdef _MSC_VER
      // Todo: find the equivalent to may_alias
      #define UCHAR_ALIAS uint32_t //__declspec(noalias)
#endif

    // now iterate through all the pairings
    UINT32_ALIAS* ptr2 = reinterpret_cast<UINT32_ALIAS*> (ptr);
    const BriskShortPair* max = short_pairs_ + no_short_pairs_;
    
    for (BriskShortPair* iter = short_pairs_; iter < max; ++iter)
    {
      t1 = *(values + iter->i);
      t2 = *(values + iter->j);
      
      if (t1 > t2)
        *ptr2 |= ((1) << shifter);

      // else already initialized with zero
      // take care of the iterators:
      ++shifter;

      if (shifter == 32)
      {
        shifter = 0;
        ++ptr2;
      }
    }

    ptr += strings_;
 
    // Account for the scale + orientation;
    ptr += sizeof (output.points[0].scale);
    ptr += sizeof (output.points[0].orientation);
  }

  // we do not change the denseness
  output.width = int (output.points.size ());
  output.height = 1;
  output.is_dense = true;

  // clean-up
  delete [] values;
}