Ejemplo n.º 1
0
Plane OpenCVContourMatcher::compareBruteForce(Contour contour1, Contour contour2){
	//Convert from contour class to matrix
	cv::Mat c1 = sliceContour(contour1.getContourBoundary(),image1);
	cv::Mat c2 = sliceContour(contour2.getContourBoundary(),image2);
	// Detect feature points of contour 1 and contour 2
	std::vector<cv::KeyPoint> feat1 = detectFeaturePoints(c1);
	std::vector<cv::KeyPoint> feat2 = detectFeaturePoints(c2);
	// Extract feature descriptors
	cv::Mat desc1 = extractDescriptors(c1,feat1);
	cv::Mat desc2 = extractDescriptors(c2,feat2);
	// Match feature descriptors and refine matches then return as vector of PixelLocations
	std::vector<cv::DMatch> matches = matchBruteForce(desc1,desc2);
	Plane planePoints = convertDMatchesToPlane(feat1,feat2,refineMatches(matches));
	return planePoints;
}
Ejemplo n.º 2
0
Mat Kernel::projectProbe(Mat image){
  Mat desc = extractDescriptors(image);
  Mat temp = lda.eigenvectors().t();
  temp.convertTo(temp, CV_32F);
  Mat result = (temp*pca.eigenvectors)*(projectProbeIntern(desc)-this->mean);
  return result;
}
Ejemplo n.º 3
0
void Kernel::compute()
{
  
  int n = trainingPhotos.size();
  
  for(int i=0; i<n; i++){
    trainingPhotosDescriptors.push_back(extractDescriptors(trainingPhotos[i]));
    trainingSketchesDescriptors.push_back(extractDescriptors(trainingSketches[i]));
  }
  
  Kp = Mat::zeros(n/2,n/2,CV_32F);
  Kg = Mat::zeros(n/2,n/2,CV_32F);
  
  for(int i=0; i<n/2; i++)
    for(int j=0; j<n/2; j++){
      Kg.at<float>(i,j) = this->calcKernel(trainingPhotosDescriptors[i], trainingPhotosDescriptors[j]);
      Kp.at<float>(i,j) = this->calcKernel(trainingSketchesDescriptors[i],trainingSketchesDescriptors[j]);
    }
    
    R = Kg*((Kp).t()*Kp).inv()*(Kp).t();
  
  vector<int> _classes;
  
  for(int i=n/2; i<n; i++){
    if(T2.empty()){
      T2.push_back(this->projectProbeIntern(trainingSketchesDescriptors[i]));
      hconcat(T2,projectGalleryIntern(trainingPhotosDescriptors[i]),T2);
    }
    else{
      hconcat(T2,projectProbeIntern(trainingSketchesDescriptors[i]),T2);
      hconcat(T2,projectGalleryIntern(trainingPhotosDescriptors[i]),T2);
    }
    _classes.push_back(i);
    _classes.push_back(i);
  }
  
  this->pca.computeVar(T2, Mat(), CV_PCA_DATA_AS_COL, 0.99);
  this->mean = pca.mean.clone();
  //cout << mean.size() << endl;
  //cout << pca.eigenvectors.size() << endl;
  //cout << T2.size() << endl;
  Mat T2_pca = pca.eigenvectors*T2;
  T2_pca = T2_pca.t();
  //cout << T2_pca.size() << endl; 
  lda.compute(T2_pca, _classes);
  //cout << lda.eigenvectors().size() << endl;
}
std::vector<cv::Mat> FeatureExtractor::extractDescriptors(std::vector<std::string> img_files){
	std::vector<cv::Mat> descriptors;
	for(std::string img_file : img_files){
		cv::Mat current = extractDescriptors(img_file);
		descriptors.push_back(current);
	}
		
	return descriptors;
}
Ejemplo n.º 5
0
ICCVTutorial<FeatureType>::ICCVTutorial(boost::shared_ptr<pcl::Keypoint<pcl::PointXYZRGB, pcl::PointXYZI> >keypoint_detector,
                                        typename pcl::Feature<pcl::PointXYZRGB, FeatureType>::Ptr feature_extractor,
                                        boost::shared_ptr<pcl::PCLSurfaceBase<pcl::PointXYZRGBNormal> > surface_reconstructor,
                                        typename pcl::PointCloud<pcl::PointXYZRGB>::ConstPtr source,
                                        typename pcl::PointCloud<pcl::PointXYZRGB>::ConstPtr target)
: source_keypoints_ (new pcl::PointCloud<pcl::PointXYZI> ())
, target_keypoints_ (new pcl::PointCloud<pcl::PointXYZI> ())
, keypoint_detector_ (keypoint_detector)
, feature_extractor_ (feature_extractor)
, surface_reconstructor_ (surface_reconstructor)
, source_ (source)
, target_ (target)
, source_segmented_ (new pcl::PointCloud<pcl::PointXYZRGB>)
, target_segmented_ (new pcl::PointCloud<pcl::PointXYZRGB>)
, source_transformed_ (new pcl::PointCloud<pcl::PointXYZRGB>)
, source_registered_ (new pcl::PointCloud<pcl::PointXYZRGB>)
, source_features_ (new pcl::PointCloud<FeatureType>)
, target_features_ (new pcl::PointCloud<FeatureType>)
, correspondences_ (new pcl::Correspondences)
, show_source2target_ (false)
, show_target2source_ (false)
, show_correspondences (false)
{
  // visualizer_.registerKeyboardCallback(&ICCVTutorial::keyboard_callback, *this, 0);
  
  segmentation (source_, source_segmented_);
  segmentation (target_, target_segmented_);  
  
  detectKeypoints (source_segmented_, source_keypoints_);
  detectKeypoints (target_segmented_, target_keypoints_);
  
  extractDescriptors (source_segmented_, source_keypoints_, source_features_);
  extractDescriptors (target_segmented_, target_keypoints_, target_features_);
  
  findCorrespondences (source_features_, target_features_, source2target_);
  findCorrespondences (target_features_, source_features_, target2source_);
  
  filterCorrespondences ();
  
  determineInitialTransformation ();
  determineFinalTransformation ();
  
  // reconstructSurface ();
}
void DescriptorExtractor::onNewImage() {

    CLOG(LTRACE) << "onNewImage";
    try {

        // Change keypoint detector type (if required).
        setDescriptorExtractor();

        models_imgs = in_models_imgs.read();
        models_names = in_models_names.read();
        models_keypoints = in_models_keypoints.read();
        scene_keypoints = in_scene_keypoints.read();

        cv::Mat scene_descriptors;

        // Load image containing the scene.
        cv::Mat scene_img = in_img.read().clone();

        extractDescriptors();



        // Extract features from scene.
        extractFeatures(scene_img, scene_keypoints, scene_descriptors);
        CLOG(LINFO) << "Scene features: " << scene_keypoints.size();

        std::vector< std::vector<cv::Mat> > out_descriptors;
        //out_keypoints.push_back(scene_keypoints);
        for(int i = 0; i < models_descriptors.size(); ++i) {
            out_descriptors.push_back(models_descriptors[i]);
        }
        out_models_descriptors.write(out_descriptors);
        out_scene_descriptors.write(scene_descriptors);




    } catch (...) {
        CLOG(LERROR) << "onNewImage failed";
    }

}
Ejemplo n.º 7
0
int main(int argc, char** argv)
{
  THCState *state = (THCState*)malloc(sizeof(THCState));
  THCudaInit(state);

  if(argc < 3)
  {
    std::cout << "arguments: [network] [image1] [image2]\n";
    return 1;
  }

  const char *network_path = argv[1];
  auto net = loadNetwork(state, network_path);

  // load the images
  cv::Mat ima = cv::imread(argv[2]);
  cv::Mat imb = cv::imread(argv[3]);

  if(ima.empty() || imb.empty())
  {
    std::cout << "images not found\n";
    return 1;
  }

  cv::Mat ima_gray, imb_gray;
  cv::cvtColor(ima, ima_gray, cv::COLOR_BGR2GRAY);
  cv::cvtColor(imb, imb_gray, cv::COLOR_BGR2GRAY);

  // Here we set min_area parameter to a bigger value, like that minimal size
  // of a patch will be around 11x11, because the network was trained on bigger patches
  // this parameter is important in practice
  cv::Ptr<cv::MSER> detector = cv::MSER::create(5, 620);
  std::vector<cv::KeyPoint> kpa, kpb;
  detector->detect(ima_gray, kpa);
  detector->detect(imb_gray, kpb);
  std::cout << "image A MSER points detected: " << kpa.size() << std::endl;
  std::cout << "image B MSER points detected: " << kpb.size() << std::endl;

  std::vector<cv::Mat> patches_a, patches_b;
  extractPatches(ima_gray, kpa, patches_a);
  extractPatches(imb_gray, kpb, patches_b);

  cv::Mat descriptors_a, descriptors_b;
  extractDescriptors(state, net, patches_a, descriptors_a);
  extractDescriptors(state, net, patches_b, descriptors_b);

  cv::FlannBasedMatcher matcher;
  std::vector<cv::DMatch> matches;
  matcher.match( descriptors_a, descriptors_b, matches );

  double max_dist = 0; double min_dist = 100;

  //-- Quick calculation of max and min distances between keypoints
  for( int i = 0; i < descriptors_a.rows; i++ )
  { double dist = matches[i].distance;
    if( dist < min_dist ) min_dist = dist;
    if( dist > max_dist ) max_dist = dist;
  }

  printf("-- Max dist : %f \n", max_dist );
  printf("-- Min dist : %f \n", min_dist );


  std::vector<cv::DMatch> good_matches;
  for( int i = 0; i < descriptors_a.rows; i++ )
  { if( matches[i].distance <= std::max(4*min_dist, 0.02) )
    { good_matches.push_back( matches[i]); }
  }

  //-- Draw only "good" matches
  float f = 0.25;
  cv::resize(ima, ima, cv::Size(), f, f);
  cv::resize(imb, imb, cv::Size(), f, f);
  for(auto &it: kpa) { it.pt *= f; it.size *= f; }
  for(auto &it: kpb) { it.pt *= f; it.size *= f; }
  cv::Mat img_matches;
  cv::drawMatches( ima, kpa, imb, kpb,
               good_matches, img_matches, cv::Scalar::all(-1), cv::Scalar::all(-1),
               std::vector<char>(), cv::DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS );

  for(auto &it : kpa)
    cv::circle(ima, cv::Point(it.pt.x, it.pt.y), it.size, cv::Scalar(255,255,0));
  for(auto &it : kpb)
    cv::circle(imb, cv::Point(it.pt.x, it.pt.y), it.size, cv::Scalar(255,255,0));

  cv::imshow("matches", img_matches);
  //cv::imshow("keypoints image 1", ima);
  //cv::imshow("keypoints image 2", imb);
  cv::waitKey();
  THCudaShutdown(state);

  return 0;
}