Esempio n. 1
0
	//--------------------------------------------------
	// As of writing this, you can use:
	// SIFT, SURF
	Mat MatchableImage::findDescriptors(string alg_name)
	{
		if(descriptorsCurrent && descriptorAlgUsed.compare(alg_name)==0) return descriptors;
		if(!featuresCurrent) findFeatures();
		
		if(empty() || features.size()==0)
		{
			log(LOG_LEVEL_ERROR, "in findDescriptors(), image is empty or there are no features in image");
		}
		
		Mat cvImageGray;
		cvtColor(cvImage, cvImageGray, CV_RGB2GRAY);
		
		Ptr<DescriptorExtractor> descriptorExtractor = createDescriptorExtractor(alg_name);
		if(descriptorExtractor==0)
		{
			log(LOG_LEVEL_ERROR, "Detector not found!");
		}
		
		descriptorExtractor->compute( cvImageGray, features, descriptors );
		log(LOG_LEVEL_DEBUG, "in findDescriptors(), %d x %d descriptors", descriptors.rows, descriptors.cols);
		
		descriptorAlgUsed = alg_name;
		descriptorsCurrent=true;
		matcherTrained=false;
		return descriptors;
	}
Esempio n. 2
0
OpenNIListener::OpenNIListener(GraphManager* graph_mgr)
: graph_mgr_(graph_mgr),
  stereo_sync_(NULL), kinect_sync_(NULL), no_cloud_sync_(NULL),
  visua_sub_(NULL), depth_sub_(NULL), cloud_sub_(NULL),
  depth_mono8_img_(cv::Mat()),
  pause_(ParameterServer::instance()->get<bool>("start_paused")),
  getOneFrame_(false),
  first_frame_(true),
  data_id_(0),
  num_processed_(0),
  image_encoding_("rgb8")
{
  ParameterServer* ps = ParameterServer::instance();
  if(ps->get<bool>("encoding_bgr")){
    image_encoding_ = "bgr8";//used in conversion to qimage. exact value does not matter, just not rgb8
  }
  detector_ = createDetector(ps->get<std::string>("feature_detector_type"));
  extractor_ = createDescriptorExtractor(ps->get<std::string>("feature_extractor_type"));
  setupSubscribers();
}
Esempio n. 3
0
OpenNIListener::OpenNIListener(GraphManager* graph_mgr)
: graph_mgr_(graph_mgr),
  stereo_sync_(NULL), kinect_sync_(NULL), no_cloud_sync_(NULL),
  visua_sub_(NULL), depth_sub_(NULL), cloud_sub_(NULL),
  depth_mono8_img_(cv::Mat()),
  save_bag_file(false),
  pause_(ParameterServer::instance()->get<bool>("start_paused")),
  getOneFrame_(false),
  first_frame_(true),
  data_id_(0),
  num_processed_(0),
  image_encoding_("rgb8")
{
  ParameterServer* ps = ParameterServer::instance();
  int q = ps->get<int>("subscriber_queue_size");
  if(ps->get<bool>("encoding_bgr")){
    image_encoding_ = "bgr8";//used in conversion to qimage. exact value does not matter, just not rgb8
  }
  std::string bagfile_name = ps->get<std::string>("bagfile_name");
  std::string visua_tpc = ps->get<std::string>("topic_image_mono");
  std::string depth_tpc = ps->get<std::string>("topic_image_depth");
  std::string cinfo_tpc = ps->get<std::string>("camera_info_topic");
  ros::NodeHandle nh;
  tflistener_ = new tf::TransformListener(nh);
  if(bagfile_name.empty()){
    std::string cloud_tpc = ps->get<std::string>("topic_points");
    std::string widev_tpc = ps->get<std::string>("wide_topic");
    std::string widec_tpc = ps->get<std::string>("wide_cloud_topic");

    //All information from Kinect
    if(!visua_tpc.empty() && !depth_tpc.empty() && !cloud_tpc.empty())
    {   
        visua_sub_ = new image_sub_type(nh, visua_tpc, q);
        depth_sub_ = new image_sub_type (nh, depth_tpc, q);
        cloud_sub_ = new pc_sub_type (nh, cloud_tpc, q);  
        kinect_sync_ = new message_filters::Synchronizer<KinectSyncPolicy>(KinectSyncPolicy(q),  *visua_sub_, *depth_sub_, *cloud_sub_),
        kinect_sync_->registerCallback(boost::bind(&OpenNIListener::kinectCallback, this, _1, _2, _3));
        ROS_INFO_STREAM("Listening to " << visua_tpc << ", " << depth_tpc << " and " << cloud_tpc);
    } 
    //No cloud, but visual image and depth
    else if(!visua_tpc.empty() && !depth_tpc.empty() && !cinfo_tpc.empty() && cloud_tpc.empty())
    {   
        visua_sub_ = new image_sub_type(nh, visua_tpc, q);
        depth_sub_ = new image_sub_type(nh, depth_tpc, q);
        cinfo_sub_ = new cinfo_sub_type(nh, cinfo_tpc, q);
        no_cloud_sync_ = new message_filters::Synchronizer<NoCloudSyncPolicy>(NoCloudSyncPolicy(q),  *visua_sub_, *depth_sub_, *cinfo_sub_);
        no_cloud_sync_->registerCallback(boost::bind(&OpenNIListener::noCloudCallback, this, _1, _2, _3));
        ROS_INFO_STREAM("Listening to " << visua_tpc << " and " << depth_tpc);
    } 

    //All information from stereo                                               
    if(!widec_tpc.empty() && !widev_tpc.empty())
    {   
      visua_sub_ = new image_sub_type(nh, widev_tpc, q);
      cloud_sub_ = new pc_sub_type(nh, widec_tpc, q);
      stereo_sync_ = new message_filters::Synchronizer<StereoSyncPolicy>(StereoSyncPolicy(q), *visua_sub_, *cloud_sub_);
      stereo_sync_->registerCallback(boost::bind(&OpenNIListener::stereoCallback, this, _1, _2));
      ROS_INFO_STREAM("Listening to " << widev_tpc << " and " << widec_tpc );
    } 
  } 
  detector_ = cv::Ptr<cv::FeatureDetector>( createDetector(ps->get<std::string>("feature_detector_type")) );
  extractor_ = cv::Ptr<cv::DescriptorExtractor>( createDescriptorExtractor(ps->get<std::string>("feature_extractor_type")) );
}