Ejemplo n.º 1
0
/*!
 * \breif detect multi scale
 * \note    detection parameters can be configured in `config.json`
 */
static void detectMultiScale(const JoinCascador& joincascador, const Mat& img, \
                             vector<Rect>& rects, vector<double>& scores, \
                             vector<Mat_<double> >& shapes) {
  const Config& c = Config::GetInstance();
  const int win_w = c.img_o_size;
  const int win_h = c.img_o_size;
  int width = img.cols;
  int height = img.rows;
  const double factor = c.fddb_scale_factor;
  double scale = 1.;
  Mat img_ = img.clone();

  rects.clear();
  scores.clear();
  shapes.clear();

  while ((width >= win_w) && (height >= win_h)) {
    vector<Rect> rects_;
    vector<double> scores_;
    vector<Mat_<double> > shapes_;
    detectSingleScale(joincascador, img_, rects_, scores_, shapes_);
    const int n = rects_.size();
    for (int i = 0; i < n; i++) {
      Rect& r = rects_[i];
      r.x *= scale; r.y *= scale;
      r.width *= scale; r.height *= scale;
    }
    rects.insert(rects.end(), rects_.begin(), rects_.end());
    scores.insert(scores.end(), scores_.begin(), scores_.end());
    shapes.insert(shapes.end(), shapes_.begin(), shapes_.end());

    scale *= factor;
    width = int(width / factor + 0.5);
    height = int(height / factor + 0.5);
    cv::resize(img_, img_, Size(width, height));
  }
}
Ejemplo n.º 2
0
void CascadeClassifier::detectMultiScaleNoGrouping( const Mat& image, std::vector<Rect>& candidates,
                                                    std::vector<int>& rejectLevels, std::vector<double>& levelWeights,
                                                    double scaleFactor, Size minObjectSize, Size maxObjectSize,
                                                    bool outputRejectLevels )
{
    candidates.clear();

    if (!maskGenerator.empty())
        maskGenerator->initializeMask(image);

    if( maxObjectSize.height == 0 || maxObjectSize.width == 0 )
        maxObjectSize = image.size();

    Mat grayImage = image;
    if( grayImage.channels() > 1 )
    {
        Mat temp;
        cvtColor(grayImage, temp, COLOR_BGR2GRAY);
        grayImage = temp;
    }

    Mat imageBuffer(image.rows + 1, image.cols + 1, CV_8U);

    for( double factor = 1; ; factor *= scaleFactor )
    {
        Size originalWindowSize = getOriginalWindowSize();

        Size windowSize( cvRound(originalWindowSize.width*factor), cvRound(originalWindowSize.height*factor) );
        Size scaledImageSize( cvRound( grayImage.cols/factor ), cvRound( grayImage.rows/factor ) );
        Size processingRectSize( scaledImageSize.width - originalWindowSize.width, scaledImageSize.height - originalWindowSize.height );

        if( processingRectSize.width <= 0 || processingRectSize.height <= 0 )
            break;
        if( windowSize.width > maxObjectSize.width || windowSize.height > maxObjectSize.height )
            break;
        if( windowSize.width < minObjectSize.width || windowSize.height < minObjectSize.height )
            continue;

        Mat scaledImage( scaledImageSize, CV_8U, imageBuffer.data );
        resize( grayImage, scaledImage, scaledImageSize, 0, 0, INTER_LINEAR );

        int yStep;
        if( getFeatureType() == cv::FeatureEvaluator::HOG )
        {
            yStep = 4;
        }
        else
        {
            yStep = factor > 2. ? 1 : 2;
        }

        int stripCount, stripSize;

        const int PTS_PER_THREAD = 1000;
        stripCount = ((processingRectSize.width/yStep)*(processingRectSize.height + yStep-1)/yStep + PTS_PER_THREAD/2)/PTS_PER_THREAD;
        stripCount = std::min(std::max(stripCount, 1), 100);
        stripSize = (((processingRectSize.height + stripCount - 1)/stripCount + yStep-1)/yStep)*yStep;

        if( !detectSingleScale( scaledImage, stripCount, processingRectSize, stripSize, yStep, factor, candidates,
            rejectLevels, levelWeights, outputRejectLevels ) )
            break;
    }
}