void OpenNIListener::setupSubscribers(){ ParameterServer* ps = ParameterServer::instance(); int q = ps->get<int>("subscriber_queue_size"); ros::NodeHandle nh; tflistener_ = new tf::TransformListener(nh); std::string bagfile_name = ps->get<std::string>("bagfile_name"); if(bagfile_name.empty()){ std::string visua_tpc = ps->get<std::string>("topic_image_mono"); std::string depth_tpc = ps->get<std::string>("topic_image_depth"); std::string cinfo_tpc = ps->get<std::string>("camera_info_topic"); std::string cloud_tpc = ps->get<std::string>("topic_points"); std::string widev_tpc = ps->get<std::string>("wide_topic"); std::string widec_tpc = ps->get<std::string>("wide_cloud_topic"); //All information from Kinect if(!visua_tpc.empty() && !depth_tpc.empty() && !cloud_tpc.empty()) { visua_sub_ = new image_sub_type(nh, visua_tpc, q); depth_sub_ = new image_sub_type (nh, depth_tpc, q); cloud_sub_ = new pc_sub_type (nh, cloud_tpc, q); kinect_sync_ = new message_filters::Synchronizer<KinectSyncPolicy>(KinectSyncPolicy(q), *visua_sub_, *depth_sub_, *cloud_sub_), kinect_sync_->registerCallback(boost::bind(&OpenNIListener::kinectCallback, this, _1, _2, _3)); ROS_INFO_STREAM_NAMED("OpenNIListener", "Listening to " << visua_tpc << ", " << depth_tpc << " and " << cloud_tpc); } //No cloud, but visual image and depth else if(!visua_tpc.empty() && !depth_tpc.empty() && !cinfo_tpc.empty() && cloud_tpc.empty()) { visua_sub_ = new image_sub_type(nh, visua_tpc, q); depth_sub_ = new image_sub_type(nh, depth_tpc, q); cinfo_sub_ = new cinfo_sub_type(nh, cinfo_tpc, q); no_cloud_sync_ = new message_filters::Synchronizer<NoCloudSyncPolicy>(NoCloudSyncPolicy(q), *visua_sub_, *depth_sub_, *cinfo_sub_); no_cloud_sync_->registerCallback(boost::bind(&OpenNIListener::noCloudCallback, this, _1, _2, _3)); ROS_INFO_STREAM_NAMED("OpenNIListener", "Listening to " << visua_tpc << " and " << depth_tpc); } //All information from stereo if(!widec_tpc.empty() && !widev_tpc.empty()) { visua_sub_ = new image_sub_type(nh, widev_tpc, q); cloud_sub_ = new pc_sub_type(nh, widec_tpc, q); stereo_sync_ = new message_filters::Synchronizer<StereoSyncPolicy>(StereoSyncPolicy(q), *visua_sub_, *cloud_sub_); stereo_sync_->registerCallback(boost::bind(&OpenNIListener::stereoCallback, this, _1, _2)); ROS_INFO_STREAM_NAMED("OpenNIListener", "Listening to " << widev_tpc << " and " << widec_tpc ); } if(ps->get<bool>("use_robot_odom")){ odom_sub_= new odom_sub_type(nh, ps->get<std::string>("odometry_tpc"), 3); ROS_INFO_STREAM_NAMED("OpenNIListener", "Listening to odometry on " << ps->get<std::string>("odometry_tpc")); odom_sub_->registerCallback(boost::bind(&OpenNIListener::odomCallback,this,_1)); } } else { ROS_WARN("RGBDSLAM loads a bagfile - therefore doesn't subscribe to topics."); } }
OpenNIListener::OpenNIListener(GraphManager* graph_mgr) : graph_mgr_(graph_mgr), stereo_sync_(NULL), kinect_sync_(NULL), no_cloud_sync_(NULL), visua_sub_(NULL), depth_sub_(NULL), cloud_sub_(NULL), depth_mono8_img_(cv::Mat()), save_bag_file(false), pause_(ParameterServer::instance()->get<bool>("start_paused")), getOneFrame_(false), first_frame_(true), data_id_(0), num_processed_(0), image_encoding_("rgb8") { ParameterServer* ps = ParameterServer::instance(); int q = ps->get<int>("subscriber_queue_size"); if(ps->get<bool>("encoding_bgr")){ image_encoding_ = "bgr8";//used in conversion to qimage. exact value does not matter, just not rgb8 } std::string bagfile_name = ps->get<std::string>("bagfile_name"); std::string visua_tpc = ps->get<std::string>("topic_image_mono"); std::string depth_tpc = ps->get<std::string>("topic_image_depth"); std::string cinfo_tpc = ps->get<std::string>("camera_info_topic"); ros::NodeHandle nh; tflistener_ = new tf::TransformListener(nh); if(bagfile_name.empty()){ std::string cloud_tpc = ps->get<std::string>("topic_points"); std::string widev_tpc = ps->get<std::string>("wide_topic"); std::string widec_tpc = ps->get<std::string>("wide_cloud_topic"); //All information from Kinect if(!visua_tpc.empty() && !depth_tpc.empty() && !cloud_tpc.empty()) { visua_sub_ = new image_sub_type(nh, visua_tpc, q); depth_sub_ = new image_sub_type (nh, depth_tpc, q); cloud_sub_ = new pc_sub_type (nh, cloud_tpc, q); kinect_sync_ = new message_filters::Synchronizer<KinectSyncPolicy>(KinectSyncPolicy(q), *visua_sub_, *depth_sub_, *cloud_sub_), kinect_sync_->registerCallback(boost::bind(&OpenNIListener::kinectCallback, this, _1, _2, _3)); ROS_INFO_STREAM("Listening to " << visua_tpc << ", " << depth_tpc << " and " << cloud_tpc); } //No cloud, but visual image and depth else if(!visua_tpc.empty() && !depth_tpc.empty() && !cinfo_tpc.empty() && cloud_tpc.empty()) { visua_sub_ = new image_sub_type(nh, visua_tpc, q); depth_sub_ = new image_sub_type(nh, depth_tpc, q); cinfo_sub_ = new cinfo_sub_type(nh, cinfo_tpc, q); no_cloud_sync_ = new message_filters::Synchronizer<NoCloudSyncPolicy>(NoCloudSyncPolicy(q), *visua_sub_, *depth_sub_, *cinfo_sub_); no_cloud_sync_->registerCallback(boost::bind(&OpenNIListener::noCloudCallback, this, _1, _2, _3)); ROS_INFO_STREAM("Listening to " << visua_tpc << " and " << depth_tpc); } //All information from stereo if(!widec_tpc.empty() && !widev_tpc.empty()) { visua_sub_ = new image_sub_type(nh, widev_tpc, q); cloud_sub_ = new pc_sub_type(nh, widec_tpc, q); stereo_sync_ = new message_filters::Synchronizer<StereoSyncPolicy>(StereoSyncPolicy(q), *visua_sub_, *cloud_sub_); stereo_sync_->registerCallback(boost::bind(&OpenNIListener::stereoCallback, this, _1, _2)); ROS_INFO_STREAM("Listening to " << widev_tpc << " and " << widec_tpc ); } } detector_ = cv::Ptr<cv::FeatureDetector>( createDetector(ps->get<std::string>("feature_detector_type")) ); extractor_ = cv::Ptr<cv::DescriptorExtractor>( createDescriptorExtractor(ps->get<std::string>("feature_extractor_type")) ); }