AlprImpl::AlprImpl(const std::string country, const std::string configFile, const std::string runtimeDir) { timespec startTime; getTimeMonotonic(&startTime); config = new Config(country, configFile, runtimeDir); plateDetector = ALPR_NULL_PTR; stateDetector = ALPR_NULL_PTR; ocr = ALPR_NULL_PTR; prewarp = ALPR_NULL_PTR; // Config file or runtime dir not found. Don't process any further. if (config->loaded == false) { return; } plateDetector = createDetector(config); ocr = new OCR(config); setNumThreads(0); setDetectRegion(DEFAULT_DETECT_REGION); this->topN = DEFAULT_TOPN; setDefaultRegion(""); prewarp = new PreWarp(config); timespec endTime; getTimeMonotonic(&endTime); if (config->debugTiming) cout << "OpenALPR Initialization Time: " << diffclock(startTime, endTime) << "ms." << endl; }
AlprImpl::AlprImpl(const std::string country, const std::string configFile, const std::string runtimeDir) { config = new Config(country, configFile, runtimeDir); plateDetector = ALPR_NULL_PTR; stateIdentifier = ALPR_NULL_PTR; ocr = ALPR_NULL_PTR; prewarp = ALPR_NULL_PTR; // Config file or runtime dir not found. Don't process any further. if (config->loaded == false) { return; } plateDetector = createDetector(config); ocr = new OCR(config); setNumThreads(0); setDetectRegion(DEFAULT_DETECT_REGION); this->topN = DEFAULT_TOPN; setDefaultRegion(""); prewarp = new PreWarp(config); }
ProcPipeGuiElem* createByNo( int eno, GlobalInfo *info ) { switch( eno ) { case 1: return createDetector( info ); break; case 2: return createTransform( info ); break; case 3: return createLogic( info ); break; default: throw "bad elem no"; } }
OpenNIListener::OpenNIListener(GraphManager* graph_mgr) : graph_mgr_(graph_mgr), stereo_sync_(NULL), kinect_sync_(NULL), no_cloud_sync_(NULL), visua_sub_(NULL), depth_sub_(NULL), cloud_sub_(NULL), depth_mono8_img_(cv::Mat()), pause_(ParameterServer::instance()->get<bool>("start_paused")), getOneFrame_(false), first_frame_(true), data_id_(0), num_processed_(0), image_encoding_("rgb8") { ParameterServer* ps = ParameterServer::instance(); if(ps->get<bool>("encoding_bgr")){ image_encoding_ = "bgr8";//used in conversion to qimage. exact value does not matter, just not rgb8 } detector_ = createDetector(ps->get<std::string>("feature_detector_type")); extractor_ = createDescriptorExtractor(ps->get<std::string>("feature_extractor_type")); setupSubscribers(); }
OpenNIListener::OpenNIListener(GraphManager* graph_mgr) : graph_mgr_(graph_mgr), stereo_sync_(NULL), kinect_sync_(NULL), no_cloud_sync_(NULL), visua_sub_(NULL), depth_sub_(NULL), cloud_sub_(NULL), depth_mono8_img_(cv::Mat()), save_bag_file(false), pause_(ParameterServer::instance()->get<bool>("start_paused")), getOneFrame_(false), first_frame_(true), data_id_(0), num_processed_(0), image_encoding_("rgb8") { ParameterServer* ps = ParameterServer::instance(); int q = ps->get<int>("subscriber_queue_size"); if(ps->get<bool>("encoding_bgr")){ image_encoding_ = "bgr8";//used in conversion to qimage. exact value does not matter, just not rgb8 } std::string bagfile_name = ps->get<std::string>("bagfile_name"); std::string visua_tpc = ps->get<std::string>("topic_image_mono"); std::string depth_tpc = ps->get<std::string>("topic_image_depth"); std::string cinfo_tpc = ps->get<std::string>("camera_info_topic"); ros::NodeHandle nh; tflistener_ = new tf::TransformListener(nh); if(bagfile_name.empty()){ std::string cloud_tpc = ps->get<std::string>("topic_points"); std::string widev_tpc = ps->get<std::string>("wide_topic"); std::string widec_tpc = ps->get<std::string>("wide_cloud_topic"); //All information from Kinect if(!visua_tpc.empty() && !depth_tpc.empty() && !cloud_tpc.empty()) { visua_sub_ = new image_sub_type(nh, visua_tpc, q); depth_sub_ = new image_sub_type (nh, depth_tpc, q); cloud_sub_ = new pc_sub_type (nh, cloud_tpc, q); kinect_sync_ = new message_filters::Synchronizer<KinectSyncPolicy>(KinectSyncPolicy(q), *visua_sub_, *depth_sub_, *cloud_sub_), kinect_sync_->registerCallback(boost::bind(&OpenNIListener::kinectCallback, this, _1, _2, _3)); ROS_INFO_STREAM("Listening to " << visua_tpc << ", " << depth_tpc << " and " << cloud_tpc); } //No cloud, but visual image and depth else if(!visua_tpc.empty() && !depth_tpc.empty() && !cinfo_tpc.empty() && cloud_tpc.empty()) { visua_sub_ = new image_sub_type(nh, visua_tpc, q); depth_sub_ = new image_sub_type(nh, depth_tpc, q); cinfo_sub_ = new cinfo_sub_type(nh, cinfo_tpc, q); no_cloud_sync_ = new message_filters::Synchronizer<NoCloudSyncPolicy>(NoCloudSyncPolicy(q), *visua_sub_, *depth_sub_, *cinfo_sub_); no_cloud_sync_->registerCallback(boost::bind(&OpenNIListener::noCloudCallback, this, _1, _2, _3)); ROS_INFO_STREAM("Listening to " << visua_tpc << " and " << depth_tpc); } //All information from stereo if(!widec_tpc.empty() && !widev_tpc.empty()) { visua_sub_ = new image_sub_type(nh, widev_tpc, q); cloud_sub_ = new pc_sub_type(nh, widec_tpc, q); stereo_sync_ = new message_filters::Synchronizer<StereoSyncPolicy>(StereoSyncPolicy(q), *visua_sub_, *cloud_sub_); stereo_sync_->registerCallback(boost::bind(&OpenNIListener::stereoCallback, this, _1, _2)); ROS_INFO_STREAM("Listening to " << widev_tpc << " and " << widec_tpc ); } } detector_ = cv::Ptr<cv::FeatureDetector>( createDetector(ps->get<std::string>("feature_detector_type")) ); extractor_ = cv::Ptr<cv::DescriptorExtractor>( createDescriptorExtractor(ps->get<std::string>("feature_extractor_type")) ); }