Esempio n. 1
0
inline void quality_test(cv::Ptr<quality::QualityBase> ptr, const TMat& cmp, const Scalar& expected, const std::size_t quality_maps_expected = 1, const bool empty_expected = false )
{
    std::vector<cv::Mat> qMats = {};
    ptr->getQualityMaps(qMats);
    EXPECT_TRUE( qMats.empty());

    quality_expect_near( expected, ptr->compute(cmp));

    if (empty_expected)
        EXPECT_TRUE(ptr->empty());
    else
        EXPECT_FALSE(ptr->empty());

    ptr->getQualityMaps(qMats);

    EXPECT_EQ( qMats.size(), quality_maps_expected);
    for (auto& qm : qMats)
    {
        EXPECT_GT(qm.rows, 0);
        EXPECT_GT(qm.cols, 0);
    }

    ptr->clear();
    EXPECT_TRUE(ptr->empty());
}
Esempio n. 2
0
void calculatePointCloud(cv::Ptr<cv::StereoSGBM> sgbm, Mat & imgU1, Mat & imgU2)
{
	Mat normalized_depth_map, depth_map;
	sgbm->compute(imgU1, imgU2, depth_map);
	normalize(depth_map, normalized_depth_map, 0, 255, CV_MINMAX, CV_8U);

	imshow("depth_map", normalized_depth_map);
	waitKey(0);
}
Esempio n. 3
0
Mat StereoCamera::getDisparityMatrix(const Mat &left, const Mat &right) {
    Mat magic(left.rows, left.cols, CV_8UC1);

    Mat imgLeftGray, imgRightGray;
    cvtColor(left, imgLeftGray, COLOR_BGR2GRAY);
    cvtColor(right, imgRightGray, COLOR_BGR2GRAY);

    sgbm->compute(imgLeftGray, imgRightGray, magic);

    return magic;
}
Esempio n. 4
0
  void extract(const cv::Mat &color, const cv::Rect &roi, const cv::Mat &mask, std::vector<cv::KeyPoint> &keypoints, cv::Mat &descriptors)
  {
    detector->detect(color(roi), keypoints, mask);

    for(size_t i = 0; i < keypoints.size(); ++i)
    {
      cv::KeyPoint &p = keypoints[i];
      p.pt.x += roi.x;
      p.pt.y += roi.y;
    }

    extractor->compute(color, keypoints, descriptors);
  }
Esempio n. 5
0
Node::Node(ros::NodeHandle* nh,
        const cv::Mat& visual, const cv::Mat& depth,
        image_geometry::PinholeCameraModel cam_model,
        cv::Ptr<cv::FeatureDetector> detector,
        cv::Ptr<cv::DescriptorExtractor> extractor,
        cv::Ptr<cv::DescriptorMatcher> matcher,
        const sensor_msgs::PointCloud2ConstPtr& point_cloud,
        unsigned int msg_id,
        unsigned int id,
        const cv::Mat& detection_mask):
        nh_(nh),
        msg_id_(msg_id),
        id_(id),
        cloudMessage_(*point_cloud),
        cam_model_(cam_model),
        matcher_(matcher)
{
    std::clock_t starttime=std::clock();

    ROS_FATAL_COND(detector.empty(), "No valid detector!");
    detector->detect( visual, feature_locations_2d_, detection_mask);// fill 2d locations
    ROS_INFO_STREAM_COND_NAMED(( (std::clock()-starttime) / (double)CLOCKS_PER_SEC) > 0.01, "timings", "Feature detection runtime: " << ( std::clock() - starttime ) / (double)CLOCKS_PER_SEC );
    ROS_INFO("Found %d Keypoints", (int)feature_locations_2d_.size());

    cloud_pub = nh_->advertise<sensor_msgs::PointCloud2>("/rgbdslam/batch_clouds",20);
    cloud_pub2 = nh_->advertise<sensor_msgs::PointCloud2>("/rgbdslam/my_clouds",20);

    // get pcl::Pointcloud to extract depthValues a pixel positions
    std::clock_t starttime5=std::clock();
    // TODO: This takes 0.1 seconds and is not strictly necessary
    //pcl::fromROSMsg(*point_cloud,pc);
    pcl::fromROSMsg(*point_cloud,pc_col);
    ROS_INFO_STREAM_COND_NAMED(( (std::clock()-starttime5) / (double)CLOCKS_PER_SEC) > 0.01, "timings", "projection runtime: " << ( std::clock() - starttime5 ) / (double)CLOCKS_PER_SEC );

    // project pixels to 3dPositions and create search structures for the gicp
    projectTo3D(depth, feature_locations_2d_, feature_locations_3d_,pc_col); //takes less than 0.01 sec

    std::clock_t starttime4=std::clock();
    // projectTo3d need a dense cloud to use the points.at(px.x,px.y)-Call
    ROS_INFO_STREAM_COND_NAMED(( (std::clock()-starttime4) / (double)CLOCKS_PER_SEC) > 0.01, "timings", "projection runtime: " << ( std::clock() - starttime4 ) / (double)CLOCKS_PER_SEC );

    std::clock_t starttime2=std::clock();
    extractor->compute(visual, feature_locations_2d_, feature_descriptors_); //fill feature_descriptors_ with information 
    assert(feature_locations_2d_.size() == feature_locations_3d_.size());
    ROS_INFO_STREAM_COND_NAMED(( (std::clock()-starttime2) / (double)CLOCKS_PER_SEC) > 0.01, "timings", "Feature extraction runtime: " << ( std::clock() - starttime2 ) / (double)CLOCKS_PER_SEC );
    flannIndex = NULL;

    ROS_INFO_STREAM_COND_NAMED(( (std::clock()-starttime) / (double)CLOCKS_PER_SEC) > 0.01, "timings", "constructor runtime: "<< ( std::clock() - starttime ) / (double)CLOCKS_PER_SEC  <<"sec");


}
Esempio n. 6
0
Node::Node(ros::NodeHandle& nh, const cv::Mat& visual,
    cv::Ptr<cv::FeatureDetector> detector,
    cv::Ptr<cv::DescriptorExtractor> extractor,
    cv::Ptr<cv::DescriptorMatcher> matcher,
    const sensor_msgs::PointCloud2ConstPtr point_cloud,
    const cv::Mat& detection_mask)
: id_(0), 
flannIndex(NULL),
matcher_(matcher)
{
#ifdef USE_ICP_CODE
  gicp_initialized = false;
#endif
  std::clock_t starttime=std::clock();

#ifdef USE_SIFT_GPU
  SiftGPUFeatureDetector* siftgpu = SiftGPUFeatureDetector::GetInstance();
  float* descriptors = siftgpu->detect(visual, feature_locations_2d_);
  if (descriptors == NULL) {
    ROS_FATAL("Can't run SiftGPU");
  }
#else
  ROS_FATAL_COND(detector.empty(), "No valid detector!");
  detector->detect( visual, feature_locations_2d_, detection_mask);// fill 2d locations
#endif

  ROS_INFO("Feature detection and descriptor extraction runtime: %f", ( std::clock() - starttime ) / (double)CLOCKS_PER_SEC);
  ROS_INFO_STREAM_COND_NAMED(( (std::clock()-starttime) / (double)CLOCKS_PER_SEC) > global_min_time_reported, "timings", "Feature detection runtime: " << ( std::clock() - starttime ) / (double)CLOCKS_PER_SEC );

  /*
    if (id_  == 0)
        cloud_pub_ = nh_->advertise<sensor_msgs::PointCloud2>("clouds_from_node_base",10);
    else{
   */
  cloud_pub_ = nh.advertise<sensor_msgs::PointCloud2>("/rgbdslam/batch_clouds",20);
  //   cloud_pub_ransac = nh_->advertise<sensor_msgs::PointCloud2>("clouds_from_node_current_ransac",10);
  //} */

  // get pcl::Pointcloud to extract depthValues a pixel positions
  std::clock_t starttime5=std::clock();
  // TODO: If batch sending/saving of clouds would be removed, the pointcloud wouldn't have to be saved
  // which would slim down the Memory requirements
  pcl::fromROSMsg(*point_cloud,pc_col);
  ROS_INFO_STREAM_COND_NAMED(( (std::clock()-starttime5) / (double)CLOCKS_PER_SEC) > global_min_time_reported, "timings", "pc2->pcl conversion runtime: " << ( std::clock() - starttime5 ) / (double)CLOCKS_PER_SEC );

  // project pixels to 3dPositions and create search structures for the gicp
#ifdef USE_SIFT_GPU
  // removes also unused descriptors from the descriptors matrix
  // build descriptor matrix
  projectTo3DSiftGPU(feature_locations_2d_, feature_locations_3d_, pc_col, descriptors, feature_descriptors_); //takes less than 0.01 sec

  if (descriptors != NULL) delete descriptors;

#else
  projectTo3D(feature_locations_2d_, feature_locations_3d_, pc_col); //takes less than 0.01 sec
#endif

  // projectTo3d need a dense cloud to use the points.at(px.x,px.y)-Call
#ifdef USE_ICP_CODE
  std::clock_t starttime4=std::clock();
  createGICPStructures(); 
  ROS_INFO_STREAM_COND_NAMED(( (std::clock()-starttime4) / (double)CLOCKS_PER_SEC) > global_min_time_reported, "timings", "gicp runtime: " << ( std::clock() - starttime4 ) / (double)CLOCKS_PER_SEC );
#endif

  std::clock_t starttime2=std::clock();
#ifndef USE_SIFT_GPU
//  ROS_INFO("Use extractor");
  //cv::Mat topleft, topright;
  //topleft = visual.colRange(0,visual.cols/2+50);
  //topright= visual.colRange(visual.cols/2+50, visual.cols-1);
	//std::vector<cv::KeyPoint> kp1, kp2; 
  //extractor->compute(topleft, kp1, feature_descriptors_); //fill feature_descriptors_ with information 
  extractor->compute(visual, feature_locations_2d_, feature_descriptors_); //fill feature_descriptors_ with information 
#endif
  assert(feature_locations_2d_.size() == feature_locations_3d_.size());
  ROS_INFO_STREAM_COND_NAMED(( (std::clock()-starttime2) / (double)CLOCKS_PER_SEC) > global_min_time_reported, "timings", "Feature extraction runtime: " << ( std::clock() - starttime2 ) / (double)CLOCKS_PER_SEC );

  ROS_INFO_STREAM_COND_NAMED(( (std::clock()-starttime) / (double)CLOCKS_PER_SEC) > global_min_time_reported, "timings", "constructor runtime: "<< ( std::clock() - starttime ) / (double)CLOCKS_PER_SEC  <<"sec");
}
Esempio n. 7
0
/*
generate the data needed to train a codebook/vocabulary for bag-of-words methods
*/
int generateVocabTrainData(std::string trainPath,
						   std::string vocabTrainDataPath,
						   cv::Ptr<cv::FeatureDetector> &detector,
						   cv::Ptr<cv::DescriptorExtractor> &extractor)
{

	//Do not overwrite any files
	std::ifstream checker;
	checker.open(vocabTrainDataPath.c_str());
	if(checker.is_open()) {	
		std::cerr << vocabTrainDataPath << ": Training Data already present" <<
			std::endl;
		checker.close();
		return -1;
	}

	//load training movie
	cv::VideoCapture movie;
	movie.open(trainPath);
	if (!movie.isOpened()) {
		std::cerr << trainPath << ": training movie not found" << std::endl;
		return -1;
	}

	//extract data
	std::cout << "Extracting Descriptors" << std::endl;
	cv::Mat vocabTrainData;
	cv::Mat frame, descs, feats;
	std::vector<cv::KeyPoint> kpts;
	
	std::cout.setf(std::ios_base::fixed); 
	std::cout.precision(0);
	
	while(movie.read(frame)) {

		//detect & extract features
		detector->detect(frame, kpts);
		extractor->compute(frame, kpts, descs);

		//add all descriptors to the training data 
		vocabTrainData.push_back(descs);

		//show progress
		cv::drawKeypoints(frame, kpts, feats);
		cv::imshow("Training Data", feats);
		
		std::cout << 100.0*(movie.get(CV_CAP_PROP_POS_FRAMES) / 
			movie.get(CV_CAP_PROP_FRAME_COUNT)) << "%. " << 
			vocabTrainData.rows << " descriptors         \r";
		fflush(stdout); 
		
		if(cv::waitKey(5) == 27) {
			cv::destroyWindow("Training Data");
			std::cout << std::endl;
			return -1;
		}

	}
	cv::destroyWindow("Training Data");
	std::cout << "Done: " << vocabTrainData.rows << " Descriptors" << std::endl;

	//save the training data
	cv::FileStorage fs;	
	fs.open(vocabTrainDataPath, cv::FileStorage::WRITE);
	fs << "VocabTrainData" << vocabTrainData;
	fs.release();

	return 0;
}
/*****************************************************************************
 // The knn matching with k = 2
 // This code performs the matching and the refinement.
 // @paraam query_image: the input image
 // @param matches_out: a pointer that stores the output matches. It is necessary for
 //                     pose estimation.
 */
int knn_match(cv::Mat& query_image,  std::vector< cv::DMatch> * matches_out)
{
    // variabels that keep the query keypoints and query descriptors
    std::vector<cv::KeyPoint>           keypointsQuery;
    cv::Mat                             descriptorQuery;
    
    // Temporary variables for the matching results
    std::vector< std::vector< cv::DMatch> > matches1;
    std::vector< std::vector< cv::DMatch> > matches2;
    std::vector< std::vector< cv::DMatch> > matches_opt1;
    
    
    //////////////////////////////////////////////////////////////////////
    // 1. Detect the keypoints
    // This line detects keypoints in the query image
    _detector->detect(query_image, keypointsQuery);
    
    // If keypoints were found, descriptors are extracted.
    if(keypointsQuery.size() > 0)
    {
        // extract descriptors
        _extractor->compute( query_image, keypointsQuery, descriptorQuery);
        
    }
    
    //////////////////////////////////////////////////////////////////////////////
    // 2. Here we match the descriptors with the database descriptors.
    // with k-nearest neighbors with k=2
    _matcher.knnMatch(descriptorQuery , matches1, 2);
    
#ifdef DEBUG_OUT
    std::cout << "Found " << matches1.size() << " matching feature descriptors out of " << _matcher.getTrainDescriptors().size() << " database descriptors."  << std::endl;
#endif
    
    
    //////////////////////////////////////////////////////////////////////////////
    // 3 Filter the matches.
    // Accept only matches (knn with k=2) which belong ot one images
    // The database tree within _matcher contains descriptors of all input images.
    // We expect that both nearest neighbors must belong to one image.
    // Otherwise we can remove the result.
    // Along with this, we count which reference image has the highest number of matches.
    // At this time, we consinder this image as the searched image.
    
    // we init the variable hit with 0
    std::vector<int> hits(_num_ref_images);
    for (int i=0; i<_num_ref_images; i++)
    {
        hits[i] = 0;
    }
    
    // the loop runs through all matches and comparees the image indes
    // imgIdx. The must be equal otherwise the matches belong to two
    // different reference images.
    for (int i=0; i<matches1.size(); i++)
    {
        // The comparison.
        if(matches1[i].at(0).imgIdx == matches1[i].at(1).imgIdx)
        {
            // we keep it
            matches_opt1.push_back(matches1[i]);
            // and count a hit
            hits[matches1[i].at(0).imgIdx]++;
        }
    }
    
#ifdef DEBUG_OUT
    std::cout << "Optimized " << matches_opt1.size() << " feature descriptors." << std::endl;
#endif
    
    // Now we search for the highest number of hits in our hit array
    // The variable max_idx keeps the image id.
    // The variable max_value the amount of hits.
    int max_idx = -1;
    int max_value = 0;
    for (int i=0; i<_num_ref_images; i++)
    {
#ifdef DEBUG_OUT
        std::cout << "for " << i << " : " << hits[i] << std::endl;
#endif
        if(hits[i]  > max_value)
        {
            max_value = hits[i];
            max_idx = i;
        }
    }
    
    
    
    ///////////////////////////////////////////////////////
    // 4. The cross-match
    // At this time, we test the database agains the query descriptors.
    // The variable max_id stores the reference image id. Thus, we test only
    // the descriptors that belong to max_idx agains the query descriptors.
    _matcher.knnMatch(_descriptorsRefDB[max_idx], descriptorQuery, matches2, 2);
    
    
    ///////////////////////////////////////////////////////
    // 5. Refinement; Ratio test
    // The ratio test only accept matches which are clear without ambiguity.
    // The best hit must be closer to the query descriptors than the second hit.
    int removed = ratioTest(matches_opt1);
#ifdef DEBUG_OUT
    std::cout << "Removed " << removed << " matched."  << std::endl;
#endif
    
    removed = ratioTest(matches2);
#ifdef DEBUG_OUT
    std::cout << "Removed " << removed << " matched."  << std::endl;
#endif
    
    ///////////////////////////////////////////////////////
    // 6. Refinement; Symmetry test
    // We only accept matches which appear in both knn-matches.
    // It should not matter whether we test the database against the query desriptors
    // or the query descriptors against the database.
    // If we do not find the same solution in both directions, we toss the match.
    std::vector<cv::DMatch> symMatches;
    symmetryTest(  matches_opt1, matches2, symMatches);
#ifdef DEBUG_OUT
    std::cout << "Kept " << symMatches.size() << " matches after symetry test test."  << std::endl;
#endif
    
    ///////////////////////////////////////////////////////
    // 7. Refinement; Epipolar constraint
    // We perform a Epipolar test using the RANSAC method.
    if(symMatches.size() > 25)
    {
        matches_out->clear();
        ransacTest( symMatches,  _keypointsRefDB[max_idx], keypointsQuery, *matches_out);
        
        
    }
    
#ifdef DEBUG_OUT
    std::cout << "Kept " << matches_out->size() << " matches after RANSAC test."  << std::endl;
#endif
    
    ///////////////////////////////////////////////////////
    // 8.  Draw this image on screen.
    cv::Mat out;
    cv::drawMatches(feature_map_database[max_idx]._ref_image , _keypointsRefDB[max_idx], query_image, keypointsQuery, *matches_out, out, cv::Scalar(255,255,255), cv::Scalar(0,0,255));
    
    std::string num_matches_str;
    std::strstream conv;
    conv << matches_out->size();
    conv >> num_matches_str;
    
    std::string text;
    text.append( num_matches_str);
    text.append("( " + _num_ref_features_in_db_str + " total)");
    text.append(" matches were found in reference image ");
    text.append( feature_map_database[max_idx]._ref_image_str);
    
    putText(out, text, cvPoint(20,20),
            cv::FONT_HERSHEY_COMPLEX_SMALL, 1.0, cvScalar(0,255,255), 1, CV_AA);
    
    cv::imshow("result", out);
    if (run_video) cv::waitKey(1);
    else cv::waitKey();
    
    
    
    // Delete the images
    query_image.release();
    out.release();
    
    
    
    return max_idx;
    
}
/*****************************************************************************
 // This applies a brute force match without a trained datastructure.
 // It also calculates the two nearest neigbors.
 // @paraam query_image: the input image
 // @param matches_out: a pointer that stores the output matches. It is necessary for
 //                     pose estimation.
 */
int brute_force_match(cv::Mat& query_image,  std::vector< cv::DMatch> * matches_out)
{
    
    // variabels that keep the query keypoints and query descriptors
    std::vector<cv::KeyPoint>           keypointsQuery;
    cv::Mat                             descriptorQuery;
    
    // Temporary variables for the matching results
    std::vector< std::vector< cv::DMatch> > matches1;
    std::vector< std::vector< cv::DMatch> > matches2;
    std::vector< std::vector< cv::DMatch> > matches_opt1;
    
    
    //////////////////////////////////////////////////////////////////////
    // 1. Detect the keypoints
    // This line detects keypoints in the query image
    _detector->detect(query_image, keypointsQuery);
    
    
    
    // If keypoints were found, descriptors are extracted.
    if(keypointsQuery.size() > 0)
    {
        // extract descriptors
        _extractor->compute( query_image, keypointsQuery, descriptorQuery);
        
    }
    
#ifdef DEBUG_OUT
    std::cout << "Found " << descriptorQuery.size() << " feature descriptors in the image."  << std::endl;
#endif
    
    
    //////////////////////////////////////////////////////////////////////////////
    // 2. Here we match the descriptors with all descriptors in the database
    // with k-nearest neighbors with k=2
    
    int max_removed = INT_MAX;
    int max_id = -1;
    
    for(int i=0; i<_descriptorsRefDB.size(); i++)
    {
        std::vector< std::vector< cv::DMatch> > matches_temp1;
        
        // Here we match all query descriptors agains all db descriptors and try to find
        // matching descriptors
        _brute_force_matcher.knnMatch( descriptorQuery, _descriptorsRefDB[i],  matches_temp1, 2);
        
        
        ///////////////////////////////////////////////////////
        // 3. Refinement; Ratio test
        // The ratio test only accept matches which are clear without ambiguity.
        // The best hit must be closer to the query descriptors than the second hit.
        int removed = ratioTest(matches_temp1);
        
        
        
        // We only accept the match with the highest number of hits / the vector with the minimum revmoved features
        int num_matches = matches_temp1.size();
        if(removed < max_removed)
        {
            max_removed = removed;
            max_id = i;
            matches1.clear();
            matches1 = matches_temp1;
        }
    }
    
#ifdef DEBUG_OUT
    std::cout << "Feature map number " << max_id << " has the highest hit with "<< matches1.size() -  max_removed << " descriptors." << std::endl;
#endif
    
    
    std::vector< std::vector< cv::DMatch> > matches_temp2;
    
    // Here we match all query descriptors agains all db descriptors and try to find
    // matching descriptors
    _brute_force_matcher.knnMatch(_descriptorsRefDB[max_id],  descriptorQuery,  matches_temp2, 2);
    
    // The ratio test only accept matches which are clear without ambiguity.
    // The best hit must be closer to the query descriptors than the second hit.
    int removed = ratioTest(matches_temp2);
    
    
    
    
    ///////////////////////////////////////////////////////
    // 6. Refinement; Symmetry test
    // We only accept matches which appear in both knn-matches.
    // It should not matter whether we test the database against the query desriptors
    // or the query descriptors against the database.
    // If we do not find the same solution in both directions, we toss the match.
    std::vector<cv::DMatch> symMatches;
    symmetryTest(  matches1, matches_temp2, symMatches);
#ifdef DEBUG_OUT
    std::cout << "Kept " << symMatches.size() << " matches after symetry test test."  << std::endl;
#endif
    
    ///////////////////////////////////////////////////////
    // 7. Refinement; Epipolar constraint
    // We perform a Epipolar test using the RANSAC method.
    if(symMatches.size() > 25)
    {
        matches_out->clear();
        ransacTest( symMatches,  _keypointsRefDB[max_id], keypointsQuery, *matches_out);
        
        
    }
    
#ifdef DEBUG_OUT
    std::cout << "Kept " << matches_out->size() << " matches after RANSAC test."  << std::endl;
#endif
    
    ///////////////////////////////////////////////////////
    // 8.  Draw this image on screen.
    cv::Mat out;
    cv::drawMatches(feature_map_database[max_id]._ref_image , _keypointsRefDB[max_id], query_image, keypointsQuery, *matches_out, out, cv::Scalar(255,255,255), cv::Scalar(0,0,255));
    
    std::string num_matches_str;
    std::strstream conv;
    conv << matches_out->size();
    conv >> num_matches_str;
    
    std::string text;
    text.append( num_matches_str);
    text.append("( " + _num_ref_features_in_db_str + " total)");
    text.append(" matches were found in reference image ");
    text.append( feature_map_database[max_id]._ref_image_str);
    
    putText(out, text, cvPoint(20,20),
            cv::FONT_HERSHEY_COMPLEX_SMALL, 1.0, cvScalar(0,255,255), 1, CV_AA);
    
    cv::imshow("result", out);
    if (run_video) cv::waitKey(1);
    else cv::waitKey();
    
    
    
    // Delete the images
    query_image.release();
    out.release();
    
    
    return max_id;
    
}
/*****************************************************************************
 Inits the database by loading all images from a certain directory, extracts the feature
 keypoints and descriptors and saves them in the database
 -keypointsRefDB and _descriptorsRefDB
 @param directory_path: the path of all input images for query, e.g., C:/my_database
 @param files: a std::vector object with a list of filenames.
 */
void init_database(std::string directory_path, std::vector<std::string> files)
{
    
    // This code reads the image names from the argv variable, loads the image
    // detect keypoints, extract the descriptors and push everything into
    // two database objects, indicate by the ending DB.
    for (int i = 0; i<files.size(); i++)
    {
        FeatureMap fm;
        
        // Fetch the ref. image name from the input array
        fm._ref_image_str =  directory_path;
        fm._ref_image_str.append("/");
        fm._ref_image_str.append( files[i]);
        
        bool ret = exists_test(fm._ref_image_str);
        if (ret == false) continue;
        
        
        // Load the image
        fm._ref_image = cv::imread(fm._ref_image_str);
        cvtColor( fm._ref_image ,  fm._ref_image , CV_BGR2GRAY);
        
        // Detect the keypoints
        _detector->detect(fm._ref_image,fm._keypointsRef);
        
        std::cout << "For referemce image " << fm._ref_image_str << " - found " << fm._keypointsRef.size() << " keypoints" << std::endl;
        
        // If keypoints found
        if(fm._keypointsRef.size() > 0)
        {
            // extract descriptors
            _extractor->compute( fm._ref_image, fm._keypointsRef, fm._descriptorRef);
            
            // push descriptors and keypoints into database
            _keypointsRefDB.push_back(fm._keypointsRef);
            _descriptorsRefDB.push_back(fm._descriptorRef);
            
            // count the total number of feature descriptors and keypoints in the database.
            _num_ref_features_in_db += fm._keypointsRef.size();
        }
        
        // Draw the keypoints
        /*
         cv::Mat out;
         cv::drawKeypoints(fm._ref_image , fm._keypointsRef, out);
         cv::imshow("out", out );
         cv::waitKey();
         
         out.release();
         */
        
        feature_map_database.push_back(fm);
        
    }
    
    // Converting the total number of features to a string.
    std::strstream conv;
    conv << _num_ref_features_in_db;
    conv >> _num_ref_features_in_db_str;
    
}
    void callback(const sensor_msgs::ImageConstPtr& msg)
    {
        if (image_0_ == NULL)
        {
            // Take first image:
            try
            {
                image_0_ = cv_bridge::toCvCopy(msg,
                        sensor_msgs::image_encodings::isColor(msg->encoding) ?
                        sensor_msgs::image_encodings::BGR8 :
                        sensor_msgs::image_encodings::MONO8);
            }
            catch (cv_bridge::Exception& e)
            {
                ROS_ERROR_STREAM("Failed to take first image: " << e.what());
                return;
            }

            ROS_INFO("First image taken");

            // Detect keypoints:
            detector_->detect(image_0_->image, keypoints_0_);
            ROS_INFO_STREAM(keypoints_0_.size() << " points found.");

            // Extract keypoints' descriptors:
            extractor_->compute(image_0_->image, keypoints_0_, descriptors_0_);
        }
        else
        {
            // Take second image:
            try
            {
                image_1_ = cv_bridge::toCvShare(msg,
                        sensor_msgs::image_encodings::isColor(msg->encoding) ?
                        sensor_msgs::image_encodings::BGR8 :
                        sensor_msgs::image_encodings::MONO8);
            }
            catch (cv_bridge::Exception& e)
            {
                ROS_ERROR_STREAM("Failed to take image: " << e.what());
                return;
            }

            // Detect keypoints:
            std::vector<cv::KeyPoint> keypoints_1;
            detector_->detect(image_1_->image, keypoints_1);
            ROS_INFO_STREAM(keypoints_1.size() << " points found on the new image.");

            // Extract keypoints' descriptors:
            cv::Mat descriptors_1;
            extractor_->compute(image_1_->image, keypoints_1, descriptors_1);

            // Compute matches:
            std::vector<cv::DMatch> matches;
            match(descriptors_0_, descriptors_1, matches);

            // Compute homography:
            cv::Mat H;
            homography(keypoints_0_, keypoints_1, matches, H);

            // Draw matches:
            const int s = std::max(image_0_->image.rows, image_0_->image.cols);
            cv::Size size(s, s);
            cv::Mat draw_image;
            warped_image_ = boost::make_shared<cv_bridge::CvImage>(
                    image_0_->header, image_0_->encoding,
                    cv::Mat(size, image_0_->image.type()));
            if (!H.empty()) // filter outliers
            {
                std::vector<char> matchesMask(matches.size(), 0);

                const size_t N = matches.size();
                std::vector<int> queryIdxs(N), trainIdxs(N);
                for (size_t i = 0; i < N; ++i)
                {
                    queryIdxs[i] = matches[i].queryIdx;
                    trainIdxs[i] = matches[i].trainIdx;
                }

                std::vector<cv::Point2f> points1, points2;
                cv::KeyPoint::convert(keypoints_0_, points1, queryIdxs);
                cv::KeyPoint::convert(keypoints_1, points2, trainIdxs);

                cv::Mat points1t;
                cv::perspectiveTransform(cv::Mat(points1), points1t, H);

                double maxInlierDist = threshold_ < 0 ? 3 : threshold_;
                for (size_t i1 = 0; i1 < points1.size(); ++i1)
                {
                    if (cv::norm(points2[i1] - points1t.at<cv::Point2f>((int)i1,0)) <= maxInlierDist ) // inlier
                        matchesMask[i1] = 1;
                }
                // draw inliers
                cv::drawMatches(
                        image_0_->image, keypoints_0_,
                        image_1_->image, keypoints_1, matches,
                        draw_image, cv::Scalar(0, 255, 0), cv::Scalar(0, 0, 255),
                        matchesMask,
                        cv::DrawMatchesFlags::DRAW_RICH_KEYPOINTS);

                // draw outliers
                for (size_t i1 = 0; i1 < matchesMask.size(); ++i1)
                    matchesMask[i1] = !matchesMask[i1];
                cv::drawMatches(
                        image_0_->image, keypoints_0_,
                        image_1_->image, keypoints_1, matches,
                        draw_image, cv::Scalar(0, 0, 255), cv::Scalar(255, 0, 0),
                        matchesMask,
                        cv::DrawMatchesFlags::DRAW_OVER_OUTIMG | cv::DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS);

                ROS_INFO_STREAM("Number of inliers: " << cv::countNonZero(matchesMask));

                // Wrap the new image using the homography,
                // so we should have something similar to the original image:
                warpPerspective(
                        image_1_->image, warped_image_->image, H, size,
                        cv::INTER_LINEAR | cv::WARP_INVERSE_MAP);

                // Print the homography found:
                ROS_INFO_STREAM("Homography = " << H);
            }
            else
            {
                cv::drawMatches(
                        image_0_->image, keypoints_0_,
                        image_1_->image, keypoints_1, matches,
                        draw_image);

                image_1_->image.copyTo(warped_image_->image);

                ROS_WARN_STREAM("No homography transformation found!");
            }

            // Publish warped image (using homography):
            warped_image_->header = image_1_->header;
            pub_.publish(warped_image_->toImageMsg());

            // Show images:
            cv::imshow("correspondences", draw_image);
            cv::imshow("homography", warped_image_->image);
            cv::waitKey(3);
        }
    }
void reduce_measurement_g2o::compute_features(const cv::Mat & rgb,
		const cv::Mat & depth, const Eigen::Vector3f & intrinsics,
		cv::Ptr<cv::FeatureDetector> & fd,
		cv::Ptr<cv::DescriptorExtractor> & de,
		std::vector<cv::KeyPoint> & filtered_keypoints,
		pcl::PointCloud<pcl::PointXYZ> & keypoints3d, cv::Mat & descriptors) {
	cv::Mat gray;

	if (rgb.channels() != 1) {
		cv::cvtColor(rgb, gray, cv::COLOR_BGR2GRAY);
	} else {
		gray = rgb;
	}

	cv::GaussianBlur(gray, gray, cv::Size(3, 3), 3);

	int threshold = 400;
	fd->setInt("hessianThreshold", threshold);

	//int threshold = 100;
	//fd->setInt("thres", threshold);

	std::vector<cv::KeyPoint> keypoints;

	cv::Mat mask(depth.size(), CV_8UC1);
	depth.convertTo(mask, CV_8U);

	fd->detect(gray, keypoints, mask);

	for (int i = 0; i < 5; i++) {
		if (keypoints.size() < 300) {
			threshold = threshold / 2;
			fd->setInt("hessianThreshold", threshold);
			//fd->setInt("thres", threshold);
			keypoints.clear();
			fd->detect(gray, keypoints, mask);
		} else {
			break;
		}
	}

	if (keypoints.size() > 400)
		keypoints.resize(400);

	filtered_keypoints.clear();
	keypoints3d.clear();

	for (size_t i = 0; i < keypoints.size(); i++) {
		if (depth.at<unsigned short>(keypoints[i].pt) != 0) {
			filtered_keypoints.push_back(keypoints[i]);

			pcl::PointXYZ p;
			p.z = depth.at<unsigned short>(keypoints[i].pt) / 1000.0f;
			p.x = (keypoints[i].pt.x - intrinsics[1]) * p.z / intrinsics[0];
			p.y = (keypoints[i].pt.y - intrinsics[2]) * p.z / intrinsics[0];

			//ROS_INFO("Point %f %f %f from  %f %f ", p.x, p.y, p.z, keypoints[i].pt.x, keypoints[i].pt.y);

			keypoints3d.push_back(p);

		}
	}

	de->compute(gray, filtered_keypoints, descriptors);
}
Esempio n. 13
0
void keyframe::extractDescriptors(cv::Ptr<cv::DescriptorExtractor>& extractor) {
	extractor->compute(im, keypoints, descriptors);
}