Example #1
0
    void prepare(cv::Rect dst_roi)
    {
        using namespace cv;
        dst_roi_final_ = dst_roi;

        // Crop unnecessary bands
        double max_len = static_cast<double>(max(dst_roi.width, dst_roi.height));
        num_bands_ = min(actual_num_bands_, static_cast<int>(ceil(log(max_len) / log(2.0))));

        // Add border to the final image, to ensure sizes are divided by (1 << num_bands_)
        dst_roi.width += ((1 << num_bands_) - dst_roi.width % (1 << num_bands_)) % (1 << num_bands_);
        dst_roi.height += ((1 << num_bands_) - dst_roi.height % (1 << num_bands_)) % (1 << num_bands_);

        Blender::prepare(dst_roi);

        dst_pyr_laplace_.resize(num_bands_ + 1);
        dst_pyr_laplace_[0] = dst_;

        dst_band_weights_.resize(num_bands_ + 1);
        dst_band_weights_[0].create(dst_roi.size(), weight_type_);
        dst_band_weights_[0].setTo(0);

        for (int i = 1; i <= num_bands_; ++i)
        {
            dst_pyr_laplace_[i].create((dst_pyr_laplace_[i - 1].rows + 1) / 2,
                                       (dst_pyr_laplace_[i - 1].cols + 1) / 2, CV_16SC3);
            dst_band_weights_[i].create((dst_band_weights_[i - 1].rows + 1) / 2,
                                        (dst_band_weights_[i - 1].cols + 1) / 2, weight_type_);
            dst_pyr_laplace_[i].setTo(Scalar::all(0));
            dst_band_weights_[i].setTo(0);
        }
    }
Example #2
0
//RETURNS THE TOP-LEFT CORNER OF THE REFLECTION OF sourceTemplate ON image
cv::Rect findBestMatchLocation( const cv::Mat& image, const cv::Rect& source_rect, 
        double* nsigma, const cv::Mat& mask )
{
    cv::Mat image_gray;
    cvtColor( image, image_gray, CV_RGB2GRAY, 1 );
    cv::Mat image_copy = image_gray.clone();
    
    // Create template.
    cv::Mat image_template_copy = image_gray.clone();
    cv::Mat sourceTemplate = image_template_copy( source_rect );
    flip( sourceTemplate, sourceTemplate, 0 );

    // Creates results matrix where the top left corner of the 
    // template is slid across each pixel of the source
    int result_cols = image.cols-sourceTemplate.cols+1;
    int result_rows = image.rows-sourceTemplate.rows+1;
    cv::Mat result;
    result.create( result_cols,result_rows, CV_32FC1 );

    // Mask image to match only in selected ROI.
    if( !mask.empty() )
    {
        cv::Mat tmp;
		image_copy.copyTo( tmp, mask );
        image_copy = tmp;
    }

    //0:CV_TM_SQDIFF
    //1:CV_TM_SQDIFF_NORMED
    //2:CV_TM_CORR
    //3:CV_TM_CCOR_NORMED
    //4:CV_TM_CCOEFF
    //5:CV_TM_CCOEFF_NORMED <----Most succesful at finding reflections
    
    int match_method = CV_TM_CCOEFF_NORMED; // 4 seemed good for stddev thresholding.

    //matchTemplate( masked_scene, sourceTemplate, result, match_method );
    matchTemplate( image_gray, sourceTemplate, result, match_method );

    double minVal, maxVal; 
    cv::Point minLoc, maxLoc, matchLoc;
    minMaxLoc( result, &minVal, &maxVal, &minLoc, &maxLoc, cv::Mat() );
     
    if( match_method == CV_TM_SQDIFF || match_method == CV_TM_SQDIFF_NORMED )
    {
        matchLoc = minLoc;	
    }
    else 
    {
        matchLoc = maxLoc;
    }
    cv::Scalar mean, stddev;
    meanStdDev( result, mean, stddev, cv::Mat() );
    *nsigma = ( maxVal-mean[0] )/ stddev[0];
    // matchLoc is the location of the top left corner of the reflection
    // that matchTemplate found

    return cv::Rect( matchLoc, source_rect.size() );
}
Example #3
0
//--------------------------------------------------------------------------------------
//       Class:  ARC_Pair
//      Method:  ARC_Pair :: convert_to_point
// Description:  Returns the coordinate of a feature in the center of a rect bounded in
// the center by Size s.
//--------------------------------------------------------------------------------------
    cv::Point
ARC_Pair::convert_to_point ( const cv::Rect& r, const cv::Mat& img, const cv::Size& s )
{
    std::vector<cv::Point> lv;
    cv::Rect little;
    cv::Mat gray;
    cv::Mat mask;

    little = r;
    little += 0.5*cv::Point( r.size()-s );
    little -= r.size() - s;

    cvtColor( img, gray, CV_BGR2GRAY );
    mask = cv::Mat::zeros( img.size(), CV_8UC1 );
    rectangle( mask, little, 255, CV_FILLED );

    goodFeaturesToTrack( gray, lv, 1, 0.01, 10, mask, 3, 0, 0.04);

    return ( lv.size()>0 ) ? lv[0] : cv::Point( -1, -1 ) ;
}		// -----  end of method ARC_Pair::convert_to_point  ----- 
Example #4
0
MatCluster::MatCluster(const std::vector<cv::Point> &points,
				 const cv::Rect &boundsInWhole,
				 std::vector<cv::Point> deepest,
				 int smoothing)
				 : Cluster(points.size(), boundsInWhole, deepest, smoothing),
				 mat(boundsInWhole.size())
{
	mat.setTo(0);
	for (vector<Point>::const_iterator it = points.begin(), itEnd = points.end();
		it != itEnd; ++it)
		mat(*it - offset) = 1;
}
Example #5
0
const ImageLandmarkDataPtr ImageTransformer::TransformData(const ImageLandmarkDataPtr& landmarkData,
                                                           const cv::Rect& activeRect,
                                                           const cv::Size& destSize,
                                                           bool shouldSaveNewImage)
{
  double scaleFactor = (double)destSize.width / (double)activeRect.size().width;
//  double scaleY = destSize.height / activeRect.size().height;
  
  const cv::Point2i center(activeRect.x + activeRect.width / 2, activeRect.y + activeRect.height / 2);
  cv::Mat transformMat = cv::getRotationMatrix2D(center, 0.f, scaleFactor);
  
  const ImageLandmarkDataPtr newLandmarkData = TransformDataWithMat(landmarkData, transformMat, shouldSaveNewImage);
  
  return newLandmarkData;
}
Example #6
0
const LandmarkCollectionDataPtr ImageTransformer::TransformCollectionUsingFaceCrop(const LandmarkCollectionDataPtr& collectionData)
{
  LandmarkCollectionDataPtr newCollection(new LandmarkCollectionData(collectionData->CollectionSize()));
  
  CascadeClassifier classifier;
  collectionData->EnumerateConstColectionWithCallback([&] (const ImageLandmarkDataPtr& landmarkData, const int index, bool* stop) {

    const cv::Rect activeRect = classifier.DetectFace(landmarkData->ImageSource());
    if (activeRect.size().width != 0)
    {
      const ImageLandmarkDataPtr newData = TransformData(landmarkData, activeRect, {kImageSize, kImageSize}, true);
      newCollection->AddImageLandmarkData(newData);
    }
    else
    {
      std::cout<<"Face not found: "<<landmarkData->ImagePath()<<std::endl;
    }
    
    std::cout<<index<<std::endl;
  });
  
  return newCollection;
}