Exemplo n.º 1
0
/*
shows the features detected on the training video
*/
int showFeatures(std::string trainPath, cv::Ptr<cv::FeatureDetector> &detector)
{
	
	//open the movie
	cv::VideoCapture movie;
	movie.open(trainPath);

	if (!movie.isOpened()) {
		std::cerr << trainPath << ": training movie not found" << std::endl;
		return -1;
	}

	std::cout << "Press Esc to Exit" << std::endl;
	cv::Mat frame, kptsImg;
	
	movie.read(frame);
	std::vector<cv::KeyPoint> kpts;
	while (movie.read(frame)) {
		detector->detect(frame, kpts);
		
		std::cout << kpts.size() << " keypoints detected...         \r";
		fflush(stdout);
		
		cv::drawKeypoints(frame, kpts, kptsImg);
		
		cv::imshow("Features", kptsImg);
		if(cv::waitKey(5) == 27) {
			break;
		}
	}
	std::cout << std::endl;

	cv::destroyWindow("Features");
	return 0;
}
Exemplo n.º 2
0
  void extract(const cv::Mat &color, const cv::Rect &roi, const cv::Mat &mask, std::vector<cv::KeyPoint> &keypoints, cv::Mat &descriptors)
  {
    detector->detect(color(roi), keypoints, mask);

    for(size_t i = 0; i < keypoints.size(); ++i)
    {
      cv::KeyPoint &p = keypoints[i];
      p.pt.x += roi.x;
      p.pt.y += roi.y;
    }

    extractor->compute(color, keypoints, descriptors);
  }
Exemplo n.º 3
0
Node::Node(ros::NodeHandle* nh,
        const cv::Mat& visual, const cv::Mat& depth,
        image_geometry::PinholeCameraModel cam_model,
        cv::Ptr<cv::FeatureDetector> detector,
        cv::Ptr<cv::DescriptorExtractor> extractor,
        cv::Ptr<cv::DescriptorMatcher> matcher,
        const sensor_msgs::PointCloud2ConstPtr& point_cloud,
        unsigned int msg_id,
        unsigned int id,
        const cv::Mat& detection_mask):
        nh_(nh),
        msg_id_(msg_id),
        id_(id),
        cloudMessage_(*point_cloud),
        cam_model_(cam_model),
        matcher_(matcher)
{
    std::clock_t starttime=std::clock();

    ROS_FATAL_COND(detector.empty(), "No valid detector!");
    detector->detect( visual, feature_locations_2d_, detection_mask);// fill 2d locations
    ROS_INFO_STREAM_COND_NAMED(( (std::clock()-starttime) / (double)CLOCKS_PER_SEC) > 0.01, "timings", "Feature detection runtime: " << ( std::clock() - starttime ) / (double)CLOCKS_PER_SEC );
    ROS_INFO("Found %d Keypoints", (int)feature_locations_2d_.size());

    cloud_pub = nh_->advertise<sensor_msgs::PointCloud2>("/rgbdslam/batch_clouds",20);
    cloud_pub2 = nh_->advertise<sensor_msgs::PointCloud2>("/rgbdslam/my_clouds",20);

    // get pcl::Pointcloud to extract depthValues a pixel positions
    std::clock_t starttime5=std::clock();
    // TODO: This takes 0.1 seconds and is not strictly necessary
    //pcl::fromROSMsg(*point_cloud,pc);
    pcl::fromROSMsg(*point_cloud,pc_col);
    ROS_INFO_STREAM_COND_NAMED(( (std::clock()-starttime5) / (double)CLOCKS_PER_SEC) > 0.01, "timings", "projection runtime: " << ( std::clock() - starttime5 ) / (double)CLOCKS_PER_SEC );

    // project pixels to 3dPositions and create search structures for the gicp
    projectTo3D(depth, feature_locations_2d_, feature_locations_3d_,pc_col); //takes less than 0.01 sec

    std::clock_t starttime4=std::clock();
    // projectTo3d need a dense cloud to use the points.at(px.x,px.y)-Call
    ROS_INFO_STREAM_COND_NAMED(( (std::clock()-starttime4) / (double)CLOCKS_PER_SEC) > 0.01, "timings", "projection runtime: " << ( std::clock() - starttime4 ) / (double)CLOCKS_PER_SEC );

    std::clock_t starttime2=std::clock();
    extractor->compute(visual, feature_locations_2d_, feature_descriptors_); //fill feature_descriptors_ with information 
    assert(feature_locations_2d_.size() == feature_locations_3d_.size());
    ROS_INFO_STREAM_COND_NAMED(( (std::clock()-starttime2) / (double)CLOCKS_PER_SEC) > 0.01, "timings", "Feature extraction runtime: " << ( std::clock() - starttime2 ) / (double)CLOCKS_PER_SEC );
    flannIndex = NULL;

    ROS_INFO_STREAM_COND_NAMED(( (std::clock()-starttime) / (double)CLOCKS_PER_SEC) > 0.01, "timings", "constructor runtime: "<< ( std::clock() - starttime ) / (double)CLOCKS_PER_SEC  <<"sec");


}
Histogram compute_histogram(std::string imFile, cv::Ptr<cv::FeatureDetector> &detector, cv::Ptr<cv::FeatureDetector> &detector2, cv::SiftDescriptorExtractor &extractor, Quantization *quant) {
    cv::Mat img = cv::imread(imFile);

    //detect SIFT keypoints
    std::vector<cv::KeyPoint> keypoints;
    detector->detect( img, keypoints );

    //detect MSER keypoints
    std::vector<cv::KeyPoint> keypoints2;
    detector2->detect( img, keypoints2 );

    //group them together
    for(cv::KeyPoint& keypoint : keypoints2) {
        keypoints.push_back(keypoint);
    }

    std::cout << " - keypoint_ct: " << keypoints.size() << std::endl;

    //compute descriptors
    cv::Mat descriptor_uchar;
    extractor.compute(img, keypoints, descriptor_uchar);

    cv::Mat descriptor_double;
    descriptor_uchar.convertTo(descriptor_double, CV_64F);

    //convert from mat to bag of unquantized features
    BagOfFeatures unquantized_features;
    convert_mat_to_vector(descriptor_double, unquantized_features);

    //quantize to form bag of words
    Histogram bag_of_words;
    quant->quantize(unquantized_features, bag_of_words);

    //normalize

    return bag_of_words;
}
Exemplo n.º 5
0
    void testDetect(const cv::Mat& img)
    {
        hog->setGammaCorrection(false);
        hog->setSVMDetector(hog->getDefaultPeopleDetector());

        std::vector<cv::Point> locations;

        // Test detect
        hog->detect(loadMat(img), locations);

#ifdef DUMP
        dump(locations);
#else
        compare(locations);
#endif

        // Test detect on smaller image
        cv::Mat img2;
        cv::resize(img, img2, cv::Size(img.cols / 2, img.rows / 2));
        hog->detect(loadMat(img2), locations);

#ifdef DUMP
        dump(locations);
#else
        compare(locations);
#endif

        // Test detect on greater image
        cv::resize(img, img2, cv::Size(img.cols * 2, img.rows * 2));
        hog->detect(loadMat(img2), locations);

#ifdef DUMP
        dump(locations);
#else
        compare(locations);
#endif
    }
void compute_bow_histogram(cv::Mat &sample, Histogram &feature_vector, cv::Ptr<cv::FeatureDetector> &detector, cv::SiftDescriptorExtractor &extractor, Quantization *quant){
    //detect keypoints
    std::vector<cv::KeyPoint> keypoints;
    detector->detect( sample, keypoints );

    //compute descriptor
    cv::Mat descriptor_uchar;
    extractor.compute(sample, keypoints, descriptor_uchar);

    cv::Mat descriptor_double;
    descriptor_uchar.convertTo(descriptor_double, CV_64F);

    //convert from mat to bag of unquantized features
    BagOfFeatures unquantized_features;
    convert_mat_to_vector(descriptor_double, unquantized_features);

    //quantize regions -- true BagOfFeatures
    quant->quantize(unquantized_features, feature_vector);
}
/*
 * Class:     ph_edu_dlsu_akazefeaturesjni_MainActivity
 * Method:    findAKAZEFeatures
 * Signature: (JJ)V
 */
JNIEXPORT void JNICALL Java_ph_edu_dlsu_akazefeaturesjni_MainActivity_findAKAZEFeatures
  (JNIEnv *pEnv, jobject, jlong mGray, jlong mRGBA){
  
  	cv::Mat& mGr  = *(cv::Mat*)mGray;
    cv::Mat& mRgb = *(cv::Mat*)mRGBA;
    
    std::vector<cv::KeyPoint> v;

	if (detector == NULL)
    	detector = cv::AKAZE::create();
    
    detector->detect(mGr, v, cv::Mat());
    

    for( unsigned int i = 0; i < v.size(); i++ )
    {
        const cv::KeyPoint& kp = v[i];
        cv::circle(mRgb, cv::Point(kp.pt.x, kp.pt.y), 10, cv::Scalar(0,255,0,255));
    }
 
  }
//gets centroid for category from training images
void LocalDescriptorAndBagOfFeature::train_category(const std::vector<cv::Mat> &samples, Histogram &centroid, const cv::Ptr<cv::FeatureDetector> &detector, const cv::SiftDescriptorExtractor &extractor, Quantization *quant){
    clock_t start = clock();
    int i = 0;
    for(const cv::Mat& sample : samples){
        i++;
        std::cout << "converting img " << i << " of " << samples.size() << " to bag of features" << std::endl;

        //detect keypoints
        std::vector<cv::KeyPoint> keypoints;
        detector->detect( sample, keypoints );

        //compute descriptor
        cv::Mat descriptor_uchar;
        extractor.compute(sample, keypoints, descriptor_uchar);

        cv::Mat descriptor_double;
        descriptor_uchar.convertTo(descriptor_double, CV_64F);

        //convert from mat to bag of unquantized features
        BagOfFeatures unquantized_features;
        convert_mat_to_vector(descriptor_double, unquantized_features);

        //quantize regions -- true BagOfFeatures
        Histogram feature_vector;
        quant->quantize(unquantized_features, feature_vector);

        //aggregate
        vector_add(centroid, feature_vector);
    }

    //divide by training category size to compute centroid
    //std::transform(centroid.begin(), centroid.end(), centroid.begin(), std::bind1st(std::divides<double>(),bikes.size()));
    for(double& d : centroid){
        d = d/samples.size();
    }
    std::cout << double( clock() - start ) / (double)CLOCKS_PER_SEC<< " seconds." << std::endl;
}
Exemplo n.º 9
0
Node::Node(ros::NodeHandle& nh, const cv::Mat& visual,
    cv::Ptr<cv::FeatureDetector> detector,
    cv::Ptr<cv::DescriptorExtractor> extractor,
    cv::Ptr<cv::DescriptorMatcher> matcher,
    const sensor_msgs::PointCloud2ConstPtr point_cloud,
    const cv::Mat& detection_mask)
: id_(0), 
flannIndex(NULL),
matcher_(matcher)
{
#ifdef USE_ICP_CODE
  gicp_initialized = false;
#endif
  std::clock_t starttime=std::clock();

#ifdef USE_SIFT_GPU
  SiftGPUFeatureDetector* siftgpu = SiftGPUFeatureDetector::GetInstance();
  float* descriptors = siftgpu->detect(visual, feature_locations_2d_);
  if (descriptors == NULL) {
    ROS_FATAL("Can't run SiftGPU");
  }
#else
  ROS_FATAL_COND(detector.empty(), "No valid detector!");
  detector->detect( visual, feature_locations_2d_, detection_mask);// fill 2d locations
#endif

  ROS_INFO("Feature detection and descriptor extraction runtime: %f", ( std::clock() - starttime ) / (double)CLOCKS_PER_SEC);
  ROS_INFO_STREAM_COND_NAMED(( (std::clock()-starttime) / (double)CLOCKS_PER_SEC) > global_min_time_reported, "timings", "Feature detection runtime: " << ( std::clock() - starttime ) / (double)CLOCKS_PER_SEC );

  /*
    if (id_  == 0)
        cloud_pub_ = nh_->advertise<sensor_msgs::PointCloud2>("clouds_from_node_base",10);
    else{
   */
  cloud_pub_ = nh.advertise<sensor_msgs::PointCloud2>("/rgbdslam/batch_clouds",20);
  //   cloud_pub_ransac = nh_->advertise<sensor_msgs::PointCloud2>("clouds_from_node_current_ransac",10);
  //} */

  // get pcl::Pointcloud to extract depthValues a pixel positions
  std::clock_t starttime5=std::clock();
  // TODO: If batch sending/saving of clouds would be removed, the pointcloud wouldn't have to be saved
  // which would slim down the Memory requirements
  pcl::fromROSMsg(*point_cloud,pc_col);
  ROS_INFO_STREAM_COND_NAMED(( (std::clock()-starttime5) / (double)CLOCKS_PER_SEC) > global_min_time_reported, "timings", "pc2->pcl conversion runtime: " << ( std::clock() - starttime5 ) / (double)CLOCKS_PER_SEC );

  // project pixels to 3dPositions and create search structures for the gicp
#ifdef USE_SIFT_GPU
  // removes also unused descriptors from the descriptors matrix
  // build descriptor matrix
  projectTo3DSiftGPU(feature_locations_2d_, feature_locations_3d_, pc_col, descriptors, feature_descriptors_); //takes less than 0.01 sec

  if (descriptors != NULL) delete descriptors;

#else
  projectTo3D(feature_locations_2d_, feature_locations_3d_, pc_col); //takes less than 0.01 sec
#endif

  // projectTo3d need a dense cloud to use the points.at(px.x,px.y)-Call
#ifdef USE_ICP_CODE
  std::clock_t starttime4=std::clock();
  createGICPStructures(); 
  ROS_INFO_STREAM_COND_NAMED(( (std::clock()-starttime4) / (double)CLOCKS_PER_SEC) > global_min_time_reported, "timings", "gicp runtime: " << ( std::clock() - starttime4 ) / (double)CLOCKS_PER_SEC );
#endif

  std::clock_t starttime2=std::clock();
#ifndef USE_SIFT_GPU
//  ROS_INFO("Use extractor");
  //cv::Mat topleft, topright;
  //topleft = visual.colRange(0,visual.cols/2+50);
  //topright= visual.colRange(visual.cols/2+50, visual.cols-1);
	//std::vector<cv::KeyPoint> kp1, kp2; 
  //extractor->compute(topleft, kp1, feature_descriptors_); //fill feature_descriptors_ with information 
  extractor->compute(visual, feature_locations_2d_, feature_descriptors_); //fill feature_descriptors_ with information 
#endif
  assert(feature_locations_2d_.size() == feature_locations_3d_.size());
  ROS_INFO_STREAM_COND_NAMED(( (std::clock()-starttime2) / (double)CLOCKS_PER_SEC) > global_min_time_reported, "timings", "Feature extraction runtime: " << ( std::clock() - starttime2 ) / (double)CLOCKS_PER_SEC );

  ROS_INFO_STREAM_COND_NAMED(( (std::clock()-starttime) / (double)CLOCKS_PER_SEC) > global_min_time_reported, "timings", "constructor runtime: "<< ( std::clock() - starttime ) / (double)CLOCKS_PER_SEC  <<"sec");
}
Exemplo n.º 10
0
/*
generate FabMap bag-of-words data : an image descriptor for each frame
*/
int generateBOWImageDescs(std::string dataPath,
							std::string bowImageDescPath,
							std::string vocabPath,
							cv::Ptr<cv::FeatureDetector> &detector,
							cv::Ptr<cv::DescriptorExtractor> &extractor,
							int minWords)
{
	
	cv::FileStorage fs;	

	//ensure not overwriting training data
	std::ifstream checker;
	checker.open(bowImageDescPath.c_str());
	if(checker.is_open()) {	
		std::cerr << bowImageDescPath << ": FabMap Training/Testing Data "
			"already present" << std::endl;
		checker.close();
		return -1;
	}

	//load vocabulary
	std::cout << "Loading Vocabulary" << std::endl;
	fs.open(vocabPath, cv::FileStorage::READ);
	cv::Mat vocab;
	fs["Vocabulary"] >> vocab;
	if (vocab.empty()) {
		std::cerr << vocabPath << ": Vocabulary not found" << std::endl;
		return -1;
	}
	fs.release();

	//use a FLANN matcher to generate bag-of-words representations
	cv::Ptr<cv::DescriptorMatcher> matcher = 
		cv::DescriptorMatcher::create("FlannBased");
	cv::BOWImgDescriptorExtractor bide(extractor, matcher);
	bide.setVocabulary(vocab);

	//load movie
	cv::VideoCapture movie;
	movie.open(dataPath);

	if(!movie.isOpened()) {
		std::cerr << dataPath << ": movie not found" << std::endl;
		return -1;
	}

	//extract image descriptors
	cv::Mat fabmapTrainData;
	std::cout << "Extracting Bag-of-words Image Descriptors" << std::endl;
	std::cout.setf(std::ios_base::fixed);
	std::cout.precision(0);

	std::ofstream maskw;
	
	if(minWords) {
		maskw.open(std::string(bowImageDescPath + "mask.txt").c_str());
	}

	cv::Mat frame, bow;
	std::vector<cv::KeyPoint> kpts;
	
	while(movie.read(frame)) {
		detector->detect(frame, kpts);
		bide.compute(frame, kpts, bow);

		if(minWords) {
			//writing a mask file
			if(cv::countNonZero(bow) < minWords) {
				//frame masked
				maskw << "0" << std::endl;
			} else {
				//frame accepted
				maskw << "1" << std::endl;
				fabmapTrainData.push_back(bow);
			}
		} else {
			fabmapTrainData.push_back(bow);
		}
		
		std::cout << 100.0 * (movie.get(CV_CAP_PROP_POS_FRAMES) / 
			movie.get(CV_CAP_PROP_FRAME_COUNT)) << "%    \r";
		fflush(stdout); 
	}
	std::cout << "Done                                       " << std::endl;
	
	movie.release();

	//save training data
	fs.open(bowImageDescPath, cv::FileStorage::WRITE);
	fs << "BOWImageDescs" << fabmapTrainData;
	fs.release();

	return 0;	
}
    void callback(const sensor_msgs::ImageConstPtr& msg)
    {
        if (image_0_ == NULL)
        {
            // Take first image:
            try
            {
                image_0_ = cv_bridge::toCvCopy(msg,
                        sensor_msgs::image_encodings::isColor(msg->encoding) ?
                        sensor_msgs::image_encodings::BGR8 :
                        sensor_msgs::image_encodings::MONO8);
            }
            catch (cv_bridge::Exception& e)
            {
                ROS_ERROR_STREAM("Failed to take first image: " << e.what());
                return;
            }

            ROS_INFO("First image taken");

            // Detect keypoints:
            detector_->detect(image_0_->image, keypoints_0_);
            ROS_INFO_STREAM(keypoints_0_.size() << " points found.");

            // Extract keypoints' descriptors:
            extractor_->compute(image_0_->image, keypoints_0_, descriptors_0_);
        }
        else
        {
            // Take second image:
            try
            {
                image_1_ = cv_bridge::toCvShare(msg,
                        sensor_msgs::image_encodings::isColor(msg->encoding) ?
                        sensor_msgs::image_encodings::BGR8 :
                        sensor_msgs::image_encodings::MONO8);
            }
            catch (cv_bridge::Exception& e)
            {
                ROS_ERROR_STREAM("Failed to take image: " << e.what());
                return;
            }

            // Detect keypoints:
            std::vector<cv::KeyPoint> keypoints_1;
            detector_->detect(image_1_->image, keypoints_1);
            ROS_INFO_STREAM(keypoints_1.size() << " points found on the new image.");

            // Extract keypoints' descriptors:
            cv::Mat descriptors_1;
            extractor_->compute(image_1_->image, keypoints_1, descriptors_1);

            // Compute matches:
            std::vector<cv::DMatch> matches;
            match(descriptors_0_, descriptors_1, matches);

            // Compute homography:
            cv::Mat H;
            homography(keypoints_0_, keypoints_1, matches, H);

            // Draw matches:
            const int s = std::max(image_0_->image.rows, image_0_->image.cols);
            cv::Size size(s, s);
            cv::Mat draw_image;
            warped_image_ = boost::make_shared<cv_bridge::CvImage>(
                    image_0_->header, image_0_->encoding,
                    cv::Mat(size, image_0_->image.type()));
            if (!H.empty()) // filter outliers
            {
                std::vector<char> matchesMask(matches.size(), 0);

                const size_t N = matches.size();
                std::vector<int> queryIdxs(N), trainIdxs(N);
                for (size_t i = 0; i < N; ++i)
                {
                    queryIdxs[i] = matches[i].queryIdx;
                    trainIdxs[i] = matches[i].trainIdx;
                }

                std::vector<cv::Point2f> points1, points2;
                cv::KeyPoint::convert(keypoints_0_, points1, queryIdxs);
                cv::KeyPoint::convert(keypoints_1, points2, trainIdxs);

                cv::Mat points1t;
                cv::perspectiveTransform(cv::Mat(points1), points1t, H);

                double maxInlierDist = threshold_ < 0 ? 3 : threshold_;
                for (size_t i1 = 0; i1 < points1.size(); ++i1)
                {
                    if (cv::norm(points2[i1] - points1t.at<cv::Point2f>((int)i1,0)) <= maxInlierDist ) // inlier
                        matchesMask[i1] = 1;
                }
                // draw inliers
                cv::drawMatches(
                        image_0_->image, keypoints_0_,
                        image_1_->image, keypoints_1, matches,
                        draw_image, cv::Scalar(0, 255, 0), cv::Scalar(0, 0, 255),
                        matchesMask,
                        cv::DrawMatchesFlags::DRAW_RICH_KEYPOINTS);

                // draw outliers
                for (size_t i1 = 0; i1 < matchesMask.size(); ++i1)
                    matchesMask[i1] = !matchesMask[i1];
                cv::drawMatches(
                        image_0_->image, keypoints_0_,
                        image_1_->image, keypoints_1, matches,
                        draw_image, cv::Scalar(0, 0, 255), cv::Scalar(255, 0, 0),
                        matchesMask,
                        cv::DrawMatchesFlags::DRAW_OVER_OUTIMG | cv::DrawMatchesFlags::NOT_DRAW_SINGLE_POINTS);

                ROS_INFO_STREAM("Number of inliers: " << cv::countNonZero(matchesMask));

                // Wrap the new image using the homography,
                // so we should have something similar to the original image:
                warpPerspective(
                        image_1_->image, warped_image_->image, H, size,
                        cv::INTER_LINEAR | cv::WARP_INVERSE_MAP);

                // Print the homography found:
                ROS_INFO_STREAM("Homography = " << H);
            }
            else
            {
                cv::drawMatches(
                        image_0_->image, keypoints_0_,
                        image_1_->image, keypoints_1, matches,
                        draw_image);

                image_1_->image.copyTo(warped_image_->image);

                ROS_WARN_STREAM("No homography transformation found!");
            }

            // Publish warped image (using homography):
            warped_image_->header = image_1_->header;
            pub_.publish(warped_image_->toImageMsg());

            // Show images:
            cv::imshow("correspondences", draw_image);
            cv::imshow("homography", warped_image_->image);
            cv::waitKey(3);
        }
    }
Exemplo n.º 12
0
/*****************************************************************************
 // The knn matching with k = 2
 // This code performs the matching and the refinement.
 // @paraam query_image: the input image
 // @param matches_out: a pointer that stores the output matches. It is necessary for
 //                     pose estimation.
 */
int knn_match(cv::Mat& query_image,  std::vector< cv::DMatch> * matches_out)
{
    // variabels that keep the query keypoints and query descriptors
    std::vector<cv::KeyPoint>           keypointsQuery;
    cv::Mat                             descriptorQuery;
    
    // Temporary variables for the matching results
    std::vector< std::vector< cv::DMatch> > matches1;
    std::vector< std::vector< cv::DMatch> > matches2;
    std::vector< std::vector< cv::DMatch> > matches_opt1;
    
    
    //////////////////////////////////////////////////////////////////////
    // 1. Detect the keypoints
    // This line detects keypoints in the query image
    _detector->detect(query_image, keypointsQuery);
    
    // If keypoints were found, descriptors are extracted.
    if(keypointsQuery.size() > 0)
    {
        // extract descriptors
        _extractor->compute( query_image, keypointsQuery, descriptorQuery);
        
    }
    
    //////////////////////////////////////////////////////////////////////////////
    // 2. Here we match the descriptors with the database descriptors.
    // with k-nearest neighbors with k=2
    _matcher.knnMatch(descriptorQuery , matches1, 2);
    
#ifdef DEBUG_OUT
    std::cout << "Found " << matches1.size() << " matching feature descriptors out of " << _matcher.getTrainDescriptors().size() << " database descriptors."  << std::endl;
#endif
    
    
    //////////////////////////////////////////////////////////////////////////////
    // 3 Filter the matches.
    // Accept only matches (knn with k=2) which belong ot one images
    // The database tree within _matcher contains descriptors of all input images.
    // We expect that both nearest neighbors must belong to one image.
    // Otherwise we can remove the result.
    // Along with this, we count which reference image has the highest number of matches.
    // At this time, we consinder this image as the searched image.
    
    // we init the variable hit with 0
    std::vector<int> hits(_num_ref_images);
    for (int i=0; i<_num_ref_images; i++)
    {
        hits[i] = 0;
    }
    
    // the loop runs through all matches and comparees the image indes
    // imgIdx. The must be equal otherwise the matches belong to two
    // different reference images.
    for (int i=0; i<matches1.size(); i++)
    {
        // The comparison.
        if(matches1[i].at(0).imgIdx == matches1[i].at(1).imgIdx)
        {
            // we keep it
            matches_opt1.push_back(matches1[i]);
            // and count a hit
            hits[matches1[i].at(0).imgIdx]++;
        }
    }
    
#ifdef DEBUG_OUT
    std::cout << "Optimized " << matches_opt1.size() << " feature descriptors." << std::endl;
#endif
    
    // Now we search for the highest number of hits in our hit array
    // The variable max_idx keeps the image id.
    // The variable max_value the amount of hits.
    int max_idx = -1;
    int max_value = 0;
    for (int i=0; i<_num_ref_images; i++)
    {
#ifdef DEBUG_OUT
        std::cout << "for " << i << " : " << hits[i] << std::endl;
#endif
        if(hits[i]  > max_value)
        {
            max_value = hits[i];
            max_idx = i;
        }
    }
    
    
    
    ///////////////////////////////////////////////////////
    // 4. The cross-match
    // At this time, we test the database agains the query descriptors.
    // The variable max_id stores the reference image id. Thus, we test only
    // the descriptors that belong to max_idx agains the query descriptors.
    _matcher.knnMatch(_descriptorsRefDB[max_idx], descriptorQuery, matches2, 2);
    
    
    ///////////////////////////////////////////////////////
    // 5. Refinement; Ratio test
    // The ratio test only accept matches which are clear without ambiguity.
    // The best hit must be closer to the query descriptors than the second hit.
    int removed = ratioTest(matches_opt1);
#ifdef DEBUG_OUT
    std::cout << "Removed " << removed << " matched."  << std::endl;
#endif
    
    removed = ratioTest(matches2);
#ifdef DEBUG_OUT
    std::cout << "Removed " << removed << " matched."  << std::endl;
#endif
    
    ///////////////////////////////////////////////////////
    // 6. Refinement; Symmetry test
    // We only accept matches which appear in both knn-matches.
    // It should not matter whether we test the database against the query desriptors
    // or the query descriptors against the database.
    // If we do not find the same solution in both directions, we toss the match.
    std::vector<cv::DMatch> symMatches;
    symmetryTest(  matches_opt1, matches2, symMatches);
#ifdef DEBUG_OUT
    std::cout << "Kept " << symMatches.size() << " matches after symetry test test."  << std::endl;
#endif
    
    ///////////////////////////////////////////////////////
    // 7. Refinement; Epipolar constraint
    // We perform a Epipolar test using the RANSAC method.
    if(symMatches.size() > 25)
    {
        matches_out->clear();
        ransacTest( symMatches,  _keypointsRefDB[max_idx], keypointsQuery, *matches_out);
        
        
    }
    
#ifdef DEBUG_OUT
    std::cout << "Kept " << matches_out->size() << " matches after RANSAC test."  << std::endl;
#endif
    
    ///////////////////////////////////////////////////////
    // 8.  Draw this image on screen.
    cv::Mat out;
    cv::drawMatches(feature_map_database[max_idx]._ref_image , _keypointsRefDB[max_idx], query_image, keypointsQuery, *matches_out, out, cv::Scalar(255,255,255), cv::Scalar(0,0,255));
    
    std::string num_matches_str;
    std::strstream conv;
    conv << matches_out->size();
    conv >> num_matches_str;
    
    std::string text;
    text.append( num_matches_str);
    text.append("( " + _num_ref_features_in_db_str + " total)");
    text.append(" matches were found in reference image ");
    text.append( feature_map_database[max_idx]._ref_image_str);
    
    putText(out, text, cvPoint(20,20),
            cv::FONT_HERSHEY_COMPLEX_SMALL, 1.0, cvScalar(0,255,255), 1, CV_AA);
    
    cv::imshow("result", out);
    if (run_video) cv::waitKey(1);
    else cv::waitKey();
    
    
    
    // Delete the images
    query_image.release();
    out.release();
    
    
    
    return max_idx;
    
}
Exemplo n.º 13
0
/*****************************************************************************
 // This applies a brute force match without a trained datastructure.
 // It also calculates the two nearest neigbors.
 // @paraam query_image: the input image
 // @param matches_out: a pointer that stores the output matches. It is necessary for
 //                     pose estimation.
 */
int brute_force_match(cv::Mat& query_image,  std::vector< cv::DMatch> * matches_out)
{
    
    // variabels that keep the query keypoints and query descriptors
    std::vector<cv::KeyPoint>           keypointsQuery;
    cv::Mat                             descriptorQuery;
    
    // Temporary variables for the matching results
    std::vector< std::vector< cv::DMatch> > matches1;
    std::vector< std::vector< cv::DMatch> > matches2;
    std::vector< std::vector< cv::DMatch> > matches_opt1;
    
    
    //////////////////////////////////////////////////////////////////////
    // 1. Detect the keypoints
    // This line detects keypoints in the query image
    _detector->detect(query_image, keypointsQuery);
    
    
    
    // If keypoints were found, descriptors are extracted.
    if(keypointsQuery.size() > 0)
    {
        // extract descriptors
        _extractor->compute( query_image, keypointsQuery, descriptorQuery);
        
    }
    
#ifdef DEBUG_OUT
    std::cout << "Found " << descriptorQuery.size() << " feature descriptors in the image."  << std::endl;
#endif
    
    
    //////////////////////////////////////////////////////////////////////////////
    // 2. Here we match the descriptors with all descriptors in the database
    // with k-nearest neighbors with k=2
    
    int max_removed = INT_MAX;
    int max_id = -1;
    
    for(int i=0; i<_descriptorsRefDB.size(); i++)
    {
        std::vector< std::vector< cv::DMatch> > matches_temp1;
        
        // Here we match all query descriptors agains all db descriptors and try to find
        // matching descriptors
        _brute_force_matcher.knnMatch( descriptorQuery, _descriptorsRefDB[i],  matches_temp1, 2);
        
        
        ///////////////////////////////////////////////////////
        // 3. Refinement; Ratio test
        // The ratio test only accept matches which are clear without ambiguity.
        // The best hit must be closer to the query descriptors than the second hit.
        int removed = ratioTest(matches_temp1);
        
        
        
        // We only accept the match with the highest number of hits / the vector with the minimum revmoved features
        int num_matches = matches_temp1.size();
        if(removed < max_removed)
        {
            max_removed = removed;
            max_id = i;
            matches1.clear();
            matches1 = matches_temp1;
        }
    }
    
#ifdef DEBUG_OUT
    std::cout << "Feature map number " << max_id << " has the highest hit with "<< matches1.size() -  max_removed << " descriptors." << std::endl;
#endif
    
    
    std::vector< std::vector< cv::DMatch> > matches_temp2;
    
    // Here we match all query descriptors agains all db descriptors and try to find
    // matching descriptors
    _brute_force_matcher.knnMatch(_descriptorsRefDB[max_id],  descriptorQuery,  matches_temp2, 2);
    
    // The ratio test only accept matches which are clear without ambiguity.
    // The best hit must be closer to the query descriptors than the second hit.
    int removed = ratioTest(matches_temp2);
    
    
    
    
    ///////////////////////////////////////////////////////
    // 6. Refinement; Symmetry test
    // We only accept matches which appear in both knn-matches.
    // It should not matter whether we test the database against the query desriptors
    // or the query descriptors against the database.
    // If we do not find the same solution in both directions, we toss the match.
    std::vector<cv::DMatch> symMatches;
    symmetryTest(  matches1, matches_temp2, symMatches);
#ifdef DEBUG_OUT
    std::cout << "Kept " << symMatches.size() << " matches after symetry test test."  << std::endl;
#endif
    
    ///////////////////////////////////////////////////////
    // 7. Refinement; Epipolar constraint
    // We perform a Epipolar test using the RANSAC method.
    if(symMatches.size() > 25)
    {
        matches_out->clear();
        ransacTest( symMatches,  _keypointsRefDB[max_id], keypointsQuery, *matches_out);
        
        
    }
    
#ifdef DEBUG_OUT
    std::cout << "Kept " << matches_out->size() << " matches after RANSAC test."  << std::endl;
#endif
    
    ///////////////////////////////////////////////////////
    // 8.  Draw this image on screen.
    cv::Mat out;
    cv::drawMatches(feature_map_database[max_id]._ref_image , _keypointsRefDB[max_id], query_image, keypointsQuery, *matches_out, out, cv::Scalar(255,255,255), cv::Scalar(0,0,255));
    
    std::string num_matches_str;
    std::strstream conv;
    conv << matches_out->size();
    conv >> num_matches_str;
    
    std::string text;
    text.append( num_matches_str);
    text.append("( " + _num_ref_features_in_db_str + " total)");
    text.append(" matches were found in reference image ");
    text.append( feature_map_database[max_id]._ref_image_str);
    
    putText(out, text, cvPoint(20,20),
            cv::FONT_HERSHEY_COMPLEX_SMALL, 1.0, cvScalar(0,255,255), 1, CV_AA);
    
    cv::imshow("result", out);
    if (run_video) cv::waitKey(1);
    else cv::waitKey();
    
    
    
    // Delete the images
    query_image.release();
    out.release();
    
    
    return max_id;
    
}
Exemplo n.º 14
0
/*****************************************************************************
 Inits the database by loading all images from a certain directory, extracts the feature
 keypoints and descriptors and saves them in the database
 -keypointsRefDB and _descriptorsRefDB
 @param directory_path: the path of all input images for query, e.g., C:/my_database
 @param files: a std::vector object with a list of filenames.
 */
void init_database(std::string directory_path, std::vector<std::string> files)
{
    
    // This code reads the image names from the argv variable, loads the image
    // detect keypoints, extract the descriptors and push everything into
    // two database objects, indicate by the ending DB.
    for (int i = 0; i<files.size(); i++)
    {
        FeatureMap fm;
        
        // Fetch the ref. image name from the input array
        fm._ref_image_str =  directory_path;
        fm._ref_image_str.append("/");
        fm._ref_image_str.append( files[i]);
        
        bool ret = exists_test(fm._ref_image_str);
        if (ret == false) continue;
        
        
        // Load the image
        fm._ref_image = cv::imread(fm._ref_image_str);
        cvtColor( fm._ref_image ,  fm._ref_image , CV_BGR2GRAY);
        
        // Detect the keypoints
        _detector->detect(fm._ref_image,fm._keypointsRef);
        
        std::cout << "For referemce image " << fm._ref_image_str << " - found " << fm._keypointsRef.size() << " keypoints" << std::endl;
        
        // If keypoints found
        if(fm._keypointsRef.size() > 0)
        {
            // extract descriptors
            _extractor->compute( fm._ref_image, fm._keypointsRef, fm._descriptorRef);
            
            // push descriptors and keypoints into database
            _keypointsRefDB.push_back(fm._keypointsRef);
            _descriptorsRefDB.push_back(fm._descriptorRef);
            
            // count the total number of feature descriptors and keypoints in the database.
            _num_ref_features_in_db += fm._keypointsRef.size();
        }
        
        // Draw the keypoints
        /*
         cv::Mat out;
         cv::drawKeypoints(fm._ref_image , fm._keypointsRef, out);
         cv::imshow("out", out );
         cv::waitKey();
         
         out.release();
         */
        
        feature_map_database.push_back(fm);
        
    }
    
    // Converting the total number of features to a string.
    std::strstream conv;
    conv << _num_ref_features_in_db;
    conv >> _num_ref_features_in_db_str;
    
}
Exemplo n.º 15
0
bool findCirclesGridAB( cv::InputArray _image, cv::Size patternSize,
		cv::OutputArray _centers, int flags, const cv::Ptr<cv::FeatureDetector> &blobDetector )
{
    bool isAsymmetricGrid = (flags & cv::CALIB_CB_ASYMMETRIC_GRID) ? true : false;
    bool isSymmetricGrid  = (flags & cv::CALIB_CB_SYMMETRIC_GRID ) ? true : false;
    CV_Assert(isAsymmetricGrid ^ isSymmetricGrid);

    cv::Mat image = _image.getMat();
    std::vector<cv::Point2f> centers;

    std::vector<cv::KeyPoint> keypoints;
    blobDetector->detect(image, keypoints);
    std::vector<cv::Point2f> points;
    for (size_t i = 0; i < keypoints.size(); i++)
    {
      points.push_back (keypoints[i].pt);
    }

    if(flags & cv::CALIB_CB_CLUSTERING)
    {
      CirclesGridClusterFinder circlesGridClusterFinder(isAsymmetricGrid);
      circlesGridClusterFinder.findGrid(points, patternSize, centers);
      cv::Mat(centers).copyTo(_centers);
      return !centers.empty();
    }

    CirclesGridFinderParameters parameters;
    parameters.vertexPenalty = -0.6f;
    parameters.vertexGain = 1;
    parameters.existingVertexGain = 10000;
    parameters.edgeGain = 1;
    parameters.edgePenalty = -0.6f;

    if(flags & cv::CALIB_CB_ASYMMETRIC_GRID)
      parameters.gridType = CirclesGridFinderParameters::ASYMMETRIC_GRID;
    if(flags & cv::CALIB_CB_SYMMETRIC_GRID)
      parameters.gridType = CirclesGridFinderParameters::SYMMETRIC_GRID;

    const int attempts = 2;
    const size_t minHomographyPoints = 4;
    cv::Mat H;
    for (int i = 0; i < attempts; i++)
    {
      centers.clear();
      CirclesGridFinder boxFinder(patternSize, points, parameters);
      bool isFound = false;
//#define BE_QUIET 1
#if BE_QUIET
      void* oldCbkData;
      //cv::ErrorCallback oldCbk = redirectError(quiet_error, 0, &oldCbkData);
#endif
      try
      {
        isFound = boxFinder.findHoles();
      }
      catch (cv::Exception)
      {

      }
#if BE_QUIET
      redirectError(oldCbk, oldCbkData);
#endif
      if (isFound)
      {
      	switch(parameters.gridType)
      	{
          case CirclesGridFinderParameters::SYMMETRIC_GRID:
            boxFinder.getHoles(centers);
            break;
          case CirclesGridFinderParameters::ASYMMETRIC_GRID:
	    boxFinder.getAsymmetricHoles(centers);
	    break;
          default:
            CV_Error(CV_StsBadArg, "Unkown pattern type");
      	}

        if (i != 0)
        {
        	cv::Mat orgPointsMat;
        	cv::transform(centers, orgPointsMat, H.inv());
        	cv::convertPointsFromHomogeneous(orgPointsMat, centers);
        }
        cv::Mat(centers).copyTo(_centers);
        return true;
      }

      boxFinder.getHoles(centers);
      if (i != attempts - 1)
      {
        if (centers.size() < minHomographyPoints)
          break;
        H = CirclesGridFinder::rectifyGrid(boxFinder.getDetectedGridSize(), centers, points, points);
      }
    }
    cv::Mat(centers).copyTo(_centers);
    return false;
}
JNIEXPORT void JNICALL
Java_ph_edu_dlsu_orbflow_CameraActivity_process(JNIEnv *env, jobject instance,
                                                jobject pTarget, jbyteArray pSource) {
    uint32_t start;
    cv::Mat srcBGR;

    cv::RNG randnum(12345); // for random color
    cv::Scalar color;

    AndroidBitmapInfo bitmapInfo;
    uint32_t *bitmapContent;

    if (AndroidBitmap_getInfo(env, pTarget, &bitmapInfo) < 0) abort();
    if (bitmapInfo.format != ANDROID_BITMAP_FORMAT_RGBA_8888) abort();
    if (AndroidBitmap_lockPixels(env, pTarget, (void **) &bitmapContent) < 0) abort();

    /// Access source array data... OK
    jbyte *source = (jbyte *) env->GetPrimitiveArrayCritical(pSource, 0);
    if (source == NULL) abort();

    /// cv::Mat for YUV420sp source and output BGRA
    cv::Mat srcGray(bitmapInfo.height, bitmapInfo.width, CV_8UC1, (unsigned char *) source);
    cv::Mat src(bitmapInfo.height + bitmapInfo.height / 2, bitmapInfo.width, CV_8UC1,
                (unsigned char *) source);
    cv::Mat mbgra(bitmapInfo.height, bitmapInfo.width, CV_8UC4, (unsigned char *) bitmapContent);


/***********************************************************************************************/
    /// Native Image Processing HERE...

    start = static_cast<uint32_t>(cv::getTickCount());

    if (srcBGR.empty())
        srcBGR = cv::Mat(bitmapInfo.height, bitmapInfo.width, CV_8UC3);

    // RGB frame input
    cv::cvtColor(src, srcBGR, CV_YUV420sp2RGB);

    /// If previous frame doesn't exist yet, initialize to srcGray
    if (previous_gray_frame.empty()) {
        srcGray.copyTo(previous_gray_frame);
        LOGI("Initializing previous frame...");
    }

    if (detector == NULL)
        detector = cv::ORB::create();

    // create(int nfeatures=500, float scaleFactor=1.2f, int nlevels=8, int edgeThreshold=31,
    // int firstLevel=0, int WTA_K=2, int scoreType=ORB::HARRIS_SCORE, int patchSize=31, int fastThreshold=20);


    // Detect the strong corners on an image.
    detector->detect(previous_gray_frame, previous_keypoints, cv::Mat());


    // Debugging
    //LOGI("previous_keypoints.size() = %d", previous_keypoints.size());

    // Convert Keypoints to Points
    keypoints2points(previous_keypoints, previous_features);

    //LOGI("previous_features.size() = %d", previous_features.size());

    // Refines the corner locations
     cornerSubPix(previous_gray_frame, previous_features,
                 cv::Size(win_size, win_size), cv::Size(-1, -1),
                cv::TermCriteria(CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, 0.03));

    std::vector<uchar> features_found;
    // The output status vector. Each element of the vector is set to 1 if the flow for
    // the corresponding features has been found, 0 otherwise

    // Calculates the optical flow for a sparse feature set using the iterative Lucas-Kanade method with
    // pyramids
    cv::calcOpticalFlowPyrLK(previous_gray_frame, srcGray,
                             previous_features, current_features, features_found,
                             cv::noArray(), cv::Size(win_size * 4 + 1, win_size * 4 + 1), 0,
                             cv::TermCriteria(CV_TERMCRIT_ITER | CV_TERMCRIT_EPS, 20, 0.3));

    for (int i = 0; i < (int) previous_features.size(); i++) {
        if (features_found[i]) {
            // Randomize color and display the velocity vectors
            color = cv::Scalar(randnum.uniform(0, 255), randnum.uniform(0, 255),
                               randnum.uniform(0, 255));
            line(srcBGR, previous_features[i], current_features[i], color);
        }
    }


    LOGI("Processing took %0.2f ms.", 1000*(static_cast<uint32_t>(cv::getTickCount()) - start)/(float)cv::getTickFrequency());

    cvtColor(srcBGR, mbgra, CV_BGR2BGRA);

    // Copy the current gray frame into previous_gray_frame
    srcGray.copyTo(previous_gray_frame);

/************************************************************************************************/

    /// Release Java byte buffer and unlock backing bitmap
    //env-> ReleasePrimitiveArrayCritical(pSource,source,0);
    /*
     * If 0, then JNI should copy the modified array back into the initial Java
     * array and tell JNI to release its temporary memory buffer.
     *
     * */

    env->ReleasePrimitiveArrayCritical(pSource, source, JNI_COMMIT);
    /*
 * If JNI_COMMIT, then JNI should copy the modified array back into the
 * initial array but without releasing the memory. That way, the client code
 * can transmit the result back to Java while still pursuing its work on the
 * memory buffer
 *
 * */

    /*
     * Get<Primitive>ArrayCritical() and Release<Primitive>ArrayCritical()
     * are similar to Get<Primitive>ArrayElements() and Release<Primitive>ArrayElements()
     * but are only available to provide a direct access to the target array
     * (instead of a copy). In exchange, the caller must not perform blocking
     * or JNI calls and should not hold the array for a long time
     *
     */


    if (AndroidBitmap_unlockPixels(env, pTarget) < 0) abort();
}
Exemplo n.º 17
0
/*
generate the data needed to train a codebook/vocabulary for bag-of-words methods
*/
int generateVocabTrainData(std::string trainPath,
						   std::string vocabTrainDataPath,
						   cv::Ptr<cv::FeatureDetector> &detector,
						   cv::Ptr<cv::DescriptorExtractor> &extractor)
{

	//Do not overwrite any files
	std::ifstream checker;
	checker.open(vocabTrainDataPath.c_str());
	if(checker.is_open()) {	
		std::cerr << vocabTrainDataPath << ": Training Data already present" <<
			std::endl;
		checker.close();
		return -1;
	}

	//load training movie
	cv::VideoCapture movie;
	movie.open(trainPath);
	if (!movie.isOpened()) {
		std::cerr << trainPath << ": training movie not found" << std::endl;
		return -1;
	}

	//extract data
	std::cout << "Extracting Descriptors" << std::endl;
	cv::Mat vocabTrainData;
	cv::Mat frame, descs, feats;
	std::vector<cv::KeyPoint> kpts;
	
	std::cout.setf(std::ios_base::fixed); 
	std::cout.precision(0);
	
	while(movie.read(frame)) {

		//detect & extract features
		detector->detect(frame, kpts);
		extractor->compute(frame, kpts, descs);

		//add all descriptors to the training data 
		vocabTrainData.push_back(descs);

		//show progress
		cv::drawKeypoints(frame, kpts, feats);
		cv::imshow("Training Data", feats);
		
		std::cout << 100.0*(movie.get(CV_CAP_PROP_POS_FRAMES) / 
			movie.get(CV_CAP_PROP_FRAME_COUNT)) << "%. " << 
			vocabTrainData.rows << " descriptors         \r";
		fflush(stdout); 
		
		if(cv::waitKey(5) == 27) {
			cv::destroyWindow("Training Data");
			std::cout << std::endl;
			return -1;
		}

	}
	cv::destroyWindow("Training Data");
	std::cout << "Done: " << vocabTrainData.rows << " Descriptors" << std::endl;

	//save the training data
	cv::FileStorage fs;	
	fs.open(vocabTrainDataPath, cv::FileStorage::WRITE);
	fs << "VocabTrainData" << vocabTrainData;
	fs.release();

	return 0;
}
void reduce_measurement_g2o::compute_features(const cv::Mat & rgb,
		const cv::Mat & depth, const Eigen::Vector3f & intrinsics,
		cv::Ptr<cv::FeatureDetector> & fd,
		cv::Ptr<cv::DescriptorExtractor> & de,
		std::vector<cv::KeyPoint> & filtered_keypoints,
		pcl::PointCloud<pcl::PointXYZ> & keypoints3d, cv::Mat & descriptors) {
	cv::Mat gray;

	if (rgb.channels() != 1) {
		cv::cvtColor(rgb, gray, cv::COLOR_BGR2GRAY);
	} else {
		gray = rgb;
	}

	cv::GaussianBlur(gray, gray, cv::Size(3, 3), 3);

	int threshold = 400;
	fd->setInt("hessianThreshold", threshold);

	//int threshold = 100;
	//fd->setInt("thres", threshold);

	std::vector<cv::KeyPoint> keypoints;

	cv::Mat mask(depth.size(), CV_8UC1);
	depth.convertTo(mask, CV_8U);

	fd->detect(gray, keypoints, mask);

	for (int i = 0; i < 5; i++) {
		if (keypoints.size() < 300) {
			threshold = threshold / 2;
			fd->setInt("hessianThreshold", threshold);
			//fd->setInt("thres", threshold);
			keypoints.clear();
			fd->detect(gray, keypoints, mask);
		} else {
			break;
		}
	}

	if (keypoints.size() > 400)
		keypoints.resize(400);

	filtered_keypoints.clear();
	keypoints3d.clear();

	for (size_t i = 0; i < keypoints.size(); i++) {
		if (depth.at<unsigned short>(keypoints[i].pt) != 0) {
			filtered_keypoints.push_back(keypoints[i]);

			pcl::PointXYZ p;
			p.z = depth.at<unsigned short>(keypoints[i].pt) / 1000.0f;
			p.x = (keypoints[i].pt.x - intrinsics[1]) * p.z / intrinsics[0];
			p.y = (keypoints[i].pt.y - intrinsics[2]) * p.z / intrinsics[0];

			//ROS_INFO("Point %f %f %f from  %f %f ", p.x, p.y, p.z, keypoints[i].pt.x, keypoints[i].pt.y);

			keypoints3d.push_back(p);

		}
	}

	de->compute(gray, filtered_keypoints, descriptors);
}