void ImageProcessing::createROSSubscribers()
{


    if(parameters_.source_ == "camera")
    {

        //! hint to modify the image_transport. Here I use raw transport
        image_transport::TransportHints hints(parameters_.image_transport_,ros::TransportHints(),node_handle_);

        //! image subscription
        image_subscriber_ = it_.subscribe(parameters_.image_topic_,1,&ImageProcessing::imageCallback,this,hints);

    }
    else if (parameters_.source_ == "sonar")
    {

        //! hint to modify the image_transport. Here I use raw transport
        image_transport::TransportHints hints(parameters_.sonar_transport_,ros::TransportHints(),node_handle_);

        //! image subscription
        image_subscriber_ = it_.subscribe(parameters_.sonar_topic_,1,&ImageProcessing::imageCallback,this,hints);

    }


}
Exemple #2
0
MainWindow::MainWindow(QWidget *parent) :
        QMainWindow(parent),
        ui(new Ui::MainWindow)
{
    ui->setupUi(this);
    computer_timer.setSingleShot(true);
    connect(&computer_timer, SIGNAL(timeout()), this, SLOT(computerPlay()));
    //connect(this, SIGNAL(gameOver(QString)), this, SLOT(setGameOver(QString)));
    //connect(ui->gameBoard, SIGNAL(boardLeftClicked(QString,QString)),this,SLOT(doPlay(QString)));
    connect(ui->gameBoard, SIGNAL(boardLeftClicked(QString)),this,SLOT(doPlay(QString)));
    connect(&players, SIGNAL(blackScore(QString)), this, SLOT(updateBlackScore(QString)));
    connect(&players, SIGNAL(whiteScore(QString)), this, SLOT(updateWhiteScore(QString)));
    connect(&gtp, SIGNAL(move(QString,QString)),ui->gameBoard,SLOT(placeStone(QString,QString)));
    connect(&gtp, SIGNAL(move(QString,QString)),this,SLOT(addHistory(QString,QString)));
    connect(&gtp, SIGNAL(stoneListing(QString,QStringList)), ui->gameBoard, SLOT(checkStones(QString,QStringList)));
    connect(&gtp, SIGNAL(hints(QString,QStringList)), ui->gameBoard, SLOT(showTopMoves(QString,QStringList)));
    connect(&gtp, SIGNAL(blackScore(QString)), this, SLOT(updateBlackScore(QString)));
    connect(&gtp, SIGNAL(whiteScore(QString)), this, SLOT(updateWhiteScore(QString)));
    connect(&engine.process, SIGNAL(started()), this, SLOT(engineStarted()));
    readSettings();

    engine.addProgramArg("--mode gtp");
    engine.addProgramArg("--level 1");
    engine.start();
    gtp.setEngine(engine);
    ui->labelBlack->setStyleSheet("#labelBlack { color:#000000; background: #785229;}");
    ui->labelWhite->setStyleSheet("#labelWhite { color:#FFFFFF; background: #785229;}");
}
void DepthImageOccupancyMapUpdater::start()
{
  image_transport::TransportHints hints("raw", ros::TransportHints(), nh_);
  sub_depth_image_ = input_depth_transport_.subscribeCamera(image_topic_, queue_size_, &DepthImageOccupancyMapUpdater::depthImageCallback, this, hints);
  pub_model_depth_image_ = model_depth_transport_.advertiseCamera("model_depth", 10);
  pub_filtered_depth_image_ = filtered_depth_transport_.advertiseCamera("filtered_depth", 10);
}
// Handles (un)subscribing when clients (un)subscribe
void Stereoproc::connectCb()
{
    boost::lock_guard<boost::mutex> connect_lock(connect_mutex_);
    connected_.DebayerMonoLeft = (pub_mono_left_.getNumSubscribers() > 0)?1:0;
    connected_.DebayerMonoRight = (pub_mono_right_.getNumSubscribers() > 0)?1:0;
    connected_.DebayerColorLeft = (pub_color_left_.getNumSubscribers() > 0)?1:0;
    connected_.DebayerColorRight = (pub_color_right_.getNumSubscribers() > 0)?1:0;
    connected_.RectifyMonoLeft = (pub_mono_rect_left_.getNumSubscribers() > 0)?1:0;
    connected_.RectifyMonoRight = (pub_mono_rect_right_.getNumSubscribers() > 0)?1:0;
    connected_.RectifyColorLeft = (pub_color_rect_left_.getNumSubscribers() > 0)?1:0;
    connected_.RectifyColorRight = (pub_color_rect_right_.getNumSubscribers() > 0)?1:0;
    connected_.Disparity = (pub_disparity_.getNumSubscribers() > 0)?1:0;
    connected_.DisparityVis = (pub_disparity_vis_.getNumSubscribers() > 0)?1:0;
    connected_.Pointcloud = (pub_pointcloud_.getNumSubscribers() > 0)?1:0;
    int level = connected_.level();
    if (level == 0)
    {
        sub_l_raw_image_.unsubscribe();
        sub_l_info_.unsubscribe();
        sub_r_raw_image_.unsubscribe();
        sub_r_info_.unsubscribe();
    }
    else if (!sub_l_raw_image_.getSubscriber())
    {
        ros::NodeHandle &nh = getNodeHandle();
        // Queue size 1 should be OK; the one that matters is the synchronizer queue size.
        /// @todo Allow remapping left, right?
        image_transport::TransportHints hints("raw", ros::TransportHints(), getPrivateNodeHandle());
        sub_l_raw_image_.subscribe(*it_, "left/image_raw", 1, hints);
        sub_l_info_ .subscribe(nh,   "left/camera_info", 1);
        sub_r_raw_image_.subscribe(*it_, "right/image_raw", 1, hints);
        sub_r_info_ .subscribe(nh,   "right/camera_info", 1);
    }
}
Exemple #5
0
int main(int argc, char **argv)
{
    // Qt requires that we construct the global QApplication before creating any widgets.
    QApplication app(argc, argv);

    // use an ArgumentParser object to manage the program arguments.
    osg::ArgumentParser arguments(&argc,argv);

    bool useFrameLoopThread = true;
    if (arguments.read("--no-frame-thread")) useFrameLoopThread = false;

    osg::ref_ptr<osgQt::QWebViewImage> image = new osgQt::QWebViewImage;

    if (arguments.argc()>1) image->navigateTo((arguments[1]));
    else image->navigateTo("http://www.youtube.com/");

    osgWidget::GeometryHints hints(osg::Vec3(0.0f,0.0f,0.0f),
                                   osg::Vec3(1.0f,0.0f,0.0f),
                                   osg::Vec3(0.0f,0.0f,1.0f),
                                   osg::Vec4(1.0f,1.0f,1.0f,1.0f),
                                   osgWidget::GeometryHints::RESIZE_HEIGHT_TO_MAINTAINCE_ASPECT_RATIO);


    osg::ref_ptr<osgWidget::Browser> browser = new osgWidget::Browser;
    browser->assign(image.get(), hints);

    // image->focusBrowser(true);

    osg::ref_ptr<osgViewer::Viewer> viewer = new osgViewer::Viewer(arguments);
    viewer->setSceneData(browser.get());
    viewer->setCameraManipulator(new osgGA::TrackballManipulator());
    viewer->addEventHandler(new osgViewer::StatsHandler);
    viewer->addEventHandler(new osgViewer::WindowSizeHandler);

    if (useFrameLoopThread)
    {
        // create a thread to run the viewer's frame loop
        ViewerFrameThread viewerThread(viewer.get(), true);
        viewerThread.startThread();

        // now start the standard Qt event loop, then exists when the viewerThead sends the QApplication::exit() signal.
        return QApplication::exec();

    }
    else
    {
        // run the frame loop, interleaving Qt and the main OSG frame loop
        while(!viewer->done())
        {
            // process Qt events - this handles both events and paints the browser image
            QCoreApplication::processEvents(QEventLoop::AllEvents, 100);

            viewer->frame();
        }

        return 0;
    }
}
JSValue JSInjectedScriptHost::inspect(ExecState* exec)
{
    if (exec->argumentCount() >= 2) {
        ScriptValue object(exec->globalData(), exec->argument(0));
        ScriptValue hints(exec->globalData(), exec->argument(1));
        impl()->inspectImpl(object.toInspectorValue(exec), hints.toInspectorValue(exec));
    }
    return jsUndefined();
}
void V8InjectedScriptHost::inspectMethodCustom(const v8::FunctionCallbackInfo<v8::Value>& info)
{
    if (info.Length() < 2)
        return;

    InjectedScriptHost* host = V8InjectedScriptHost::toNative(info.Holder());
    ScriptValue object(info[0], info.GetIsolate());
    ScriptValue hints(info[1], info.GetIsolate());
    host->inspectImpl(object.toJSONValue(ScriptState::current()), hints.toJSONValue(ScriptState::current()));
}
JSValue JSCommandLineAPIHost::inspect(ExecState& state)
{
    if (state.argumentCount() >= 2) {
        Deprecated::ScriptValue object(state.vm(), state.uncheckedArgument(0));
        Deprecated::ScriptValue hints(state.vm(), state.uncheckedArgument(1));
        wrapped().inspectImpl(object.toInspectorValue(&state), hints.toInspectorValue(&state));
    }

    return jsUndefined();
}
JSValue JSCommandLineAPIHost::inspect(ExecState* exec)
{
    if (exec->argumentCount() >= 2) {
        Deprecated::ScriptValue object(exec->vm(), exec->uncheckedArgument(0));
        Deprecated::ScriptValue hints(exec->vm(), exec->uncheckedArgument(1));
        impl().inspectImpl(object.toInspectorValue(exec), hints.toInspectorValue(exec));
    }

    return jsUndefined();
}
void CalibProcNode::ConnectCb() {
  std::lock_guard<std::mutex> lock(connect_mutex_);
  if (!pub_calib_.getNumSubscribers()) {
    sub_image_.shutdown();
  } else if (!sub_image_) {
    image_transport::TransportHints hints("raw", ros::TransportHints(), nh_);
    sub_image_ =
        it_.subscribe("image_raw", 2, &CalibProcNode::ImageCb, this, hints);
  }
}
Exemple #11
0
void DetectorNode::ConnectCb() {
  std::lock_guard<std::mutex> lock(connect_mutex_);
  if (!pub_tags_.getNumSubscribers())
    sub_camera_.shutdown();
  else if (!sub_camera_) {
    image_transport::TransportHints hints("raw", ros::TransportHints(), nh_);
    sub_camera_ = it_.subscribeCamera("image_raw", 2, &DetectorNode::CameraCb,
                                      this, hints);
  }
}
Exemple #12
0
void Ephemeris<Frame>::FlowWithFixedStep(
    std::vector<not_null<DiscreteTrajectory<Frame>*>> const& trajectories,
    std::vector<IntrinsicAcceleration> const& intrinsic_accelerations,
    Instant const& t,
    FixedStepParameters const& parameters) {
  VLOG(1) << __FUNCTION__ << " " << NAMED(parameters.step_) << " " << NAMED(t);
  if (empty() || t > t_max()) {
    Prolong(t);
  }

  std::vector<typename ContinuousTrajectory<Frame>::Hint> hints(bodies_.size());
  NewtonianMotionEquation massless_body_equation;
  massless_body_equation.compute_acceleration =
      std::bind(&Ephemeris::ComputeMasslessBodiesTotalAccelerations,
                this,
                std::cref(intrinsic_accelerations), _1, _2, _3, &hints);

  typename NewtonianMotionEquation::SystemState initial_state;
  for (auto const& trajectory : trajectories) {
    auto const trajectory_last = trajectory->last();
    auto const last_degrees_of_freedom = trajectory_last.degrees_of_freedom();
    // TODO(phl): why do we keep rewriting this?  Should we check consistency?
    initial_state.time = trajectory_last.time();
    initial_state.positions.push_back(last_degrees_of_freedom.position());
    initial_state.velocities.push_back(last_degrees_of_freedom.velocity());
  }

  IntegrationProblem<NewtonianMotionEquation> problem;
  problem.equation = massless_body_equation;

#if defined(WE_LOVE_228)
  typename NewtonianMotionEquation::SystemState last_state;
  problem.append_state =
      [&last_state](
          typename NewtonianMotionEquation::SystemState const& state) {
        last_state = state;
      };
#else
  problem.append_state =
      std::bind(&Ephemeris::AppendMasslessBodiesState,
                _1, std::cref(trajectories));
#endif
  problem.t_final = t;
  problem.initial_state = &initial_state;

  parameters.integrator_->Solve(problem, parameters.step_);

#if defined(WE_LOVE_228)
  // The |positions| are empty if and only if |append_state| was never called;
  // in that case there was not enough room to advance the |trajectories|.
  if (!last_state.positions.empty()) {
    AppendMasslessBodiesState(last_state, trajectories);
  }
#endif
}
void SplitXB3Nodelet::onInit() {
    ros::NodeHandle &nh = getNodeHandle();
    ros::NodeHandle &nh_priv = getPrivateNodeHandle();

    it_.reset(new image_transport::ImageTransport(nh));

    std::string name;
    std::string narrow_frame_id;
    std::string wide_frame_id;

    nh_priv.param("name", name, std::string("camera"));

    narrow_ctx_.name = name;
    wide_ctx_.name = name;

    {   //narrow
        ros::NodeHandle nh_baseline(nh.getNamespace() + "/narrow");
        ros::NodeHandle nh_left(nh_baseline.getNamespace() + "/left");
        ros::NodeHandle nh_right(nh_baseline.getNamespace() + "/right");

        nh_priv.param("narrow/frame_id", narrow_ctx_.frame_id, std::string(""));
        nh_priv.param("narrow/left/camera_info_url", narrow_ctx_.left.url, std::string(""));
        nh_priv.param("narrow/right/camera_info_url", narrow_ctx_.right.url, std::string(""));

        narrow_ctx_.left.publisher = it_->advertiseCamera(nh_left.getNamespace() + "/image_raw", 1);
        narrow_ctx_.right.publisher = it_->advertiseCamera(nh_right.getNamespace() + "/image_raw", 1);

        narrow_ctx_.left.manager.reset(new camera_info_manager::CameraInfoManager(
                                           nh_left, narrow_ctx_.name + "_narrow_left", narrow_ctx_.left.url));
        narrow_ctx_.right.manager.reset(new camera_info_manager::CameraInfoManager(
                                            nh_right, narrow_ctx_.name + "_narrow_right", narrow_ctx_.right.url));
    }

    {   //wide
        ros::NodeHandle nh_baseline(nh.getNamespace() + "/wide");
        ros::NodeHandle nh_left(nh_baseline.getNamespace() + "/left");
        ros::NodeHandle nh_right(nh_baseline.getNamespace() + "/right");

        nh_priv.param("wide/frame_id", narrow_ctx_.frame_id, std::string(""));
        nh_priv.param("wide/left/camera_info_url", wide_ctx_.left.url, std::string(""));
        nh_priv.param("wide/right/camera_info_url", wide_ctx_.right.url, std::string(""));

        wide_ctx_.left.publisher = it_->advertiseCamera(nh_left.getNamespace() + "/image_raw", 1);
        wide_ctx_.right.publisher = it_->advertiseCamera(nh_right.getNamespace() + "/image_raw", 1);

        wide_ctx_.left.manager.reset(new camera_info_manager::CameraInfoManager(
                                         nh_left, wide_ctx_.name + "_wide_left", wide_ctx_.left.url));
        wide_ctx_.right.manager.reset(new camera_info_manager::CameraInfoManager(
                                          nh_right, wide_ctx_.name + "_wide_right", wide_ctx_.right.url));
    }

    image_transport::TransportHints hints("raw", ros::TransportHints(), nh);
    sub_ = it_->subscribe("camera/image_raw", 1, &SplitXB3Nodelet::imageCb, this, hints);
}
int main(int argc, char **argv)
{
    osg::ArgumentParser arguments(&argc, argv);
    osgViewer::Viewer   viewer(arguments);

    osgWidget::GeometryHints hints(osg::Vec3(0.0f, 0.0f, 0.0f),
                                   osg::Vec3(1.0f, 0.0f, 0.0f),
                                   osg::Vec3(0.0f, 0.0f, 1.0f),
                                   osg::Vec4(1.0f, 1.0f, 1.0f, 1.0f),
                                   osgWidget::GeometryHints::RESIZE_HEIGHT_TO_MAINTAINCE_ASPECT_RATIO);

    osg::ref_ptr<osg::Group> group = new osg::Group;

    std::string password;

    while (arguments.read("--password", password))
    {}

    for (int i = 1; i < arguments.argc(); ++i)
    {
        if (!arguments.isOption(i))
        {
            std::string hostname = arguments[i];

            if (!password.empty())
            {
                if (!osgDB::Registry::instance()->getAuthenticationMap())
                    osgDB::Registry::instance()->setAuthenticationMap(new osgDB::AuthenticationMap);

                osgDB::Registry::instance()->getAuthenticationMap()->addAuthenticationDetails(hostname, new osgDB::AuthenticationDetails("", password));
            }

            osg::ref_ptr<osgWidget::VncClient> vncClient = new osgWidget::VncClient;
            if (vncClient->connect(arguments[i], hints))
            {
                group->addChild(vncClient.get());

                hints.position.x() += 1.1f;
            }
        }
    }

    viewer.setSceneData(group.get());

    viewer.addEventHandler(new osgViewer::StatsHandler);

    // add a custom escape handler, but disable the standard viewer one to enable the vnc images to handle
    // the escape without it getting caught by the viewer.
    viewer.addEventHandler(new EscapeHandler);
    viewer.setKeyEventSetsDone(0);

    return viewer.run();
}
void ImageNodelet::onInit()
{
  ros::NodeHandle nh = getNodeHandle();
  ros::NodeHandle local_nh = getPrivateNodeHandle();

  // Command line argument parsing
  const std::vector<std::string>& argv = getMyArgv();
  // First positional argument is the transport type
  std::string transport;
  local_nh.param("image_transport", transport, std::string("raw"));
  for (int i = 0; i < (int)argv.size(); ++i)
  {
    if (argv[i][0] != '-')
    {
      transport = argv[i];
      break;
    }
  }
  NODELET_INFO_STREAM("Using transport \"" << transport << "\"");
  // Internal option, should be used only by the image_view node
  bool shutdown_on_close = std::find(argv.begin(), argv.end(),
                                     "--shutdown-on-close") != argv.end();

  // Default window name is the resolved topic name
  std::string topic = nh.resolveName("image");
  local_nh.param("window_name", window_name_, topic);

  bool autosize;
  local_nh.param("autosize", autosize, false);
  
  std::string format_string;
  local_nh.param("filename_format", format_string, std::string("frame%04i.jpg"));
  filename_format_.parse(format_string);

  cv::namedWindow(window_name_, autosize ? cv::WND_PROP_AUTOSIZE : 0);
  cv::setMouseCallback(window_name_, &ImageNodelet::mouseCb, this);
  
#ifdef HAVE_GTK
  // Register appropriate handler for when user closes the display window
  GtkWidget *widget = GTK_WIDGET( cvGetWindowHandle(window_name_.c_str()) );
  if (shutdown_on_close)
    g_signal_connect(widget, "destroy", G_CALLBACK(destroyNode), NULL);
  else
    g_signal_connect(widget, "destroy", G_CALLBACK(destroyNodelet), &sub_);
#endif

  // Start the OpenCV window thread so we don't have to waitKey() somewhere
  startWindowThread();

  image_transport::ImageTransport it(nh);
  image_transport::TransportHints hints(transport, ros::TransportHints(), getPrivateNodeHandle());
  sub_ = it.subscribe(topic, 1, &ImageNodelet::imageCb, this, hints);
}
v8::Handle<v8::Value> V8InjectedScriptHost::inspectMethodCustom(const v8::Arguments& args)
{
    if (args.Length() < 2)
        return v8::Undefined();

    InjectedScriptHost* host = V8InjectedScriptHost::toNative(args.Holder());
    ScriptValue object(args[0]);
    ScriptValue hints(args[1]);
    host->inspectImpl(object.toInspectorValue(ScriptState::current()), hints.toInspectorValue(ScriptState::current()));

    return v8::Undefined();
}
int main(int argc, char **argv)
{
  ros::init(argc, argv, "image_view", ros::init_options::AnonymousName);
  if (ros::names::remap("image") == "image") {
    ROS_WARN("Topic 'image' has not been remapped! Typical command-line usage:\n"
             "\t$ rosrun image_view image_view image:=<image topic> [transport]");
  }

  ros::NodeHandle nh;
  ros::NodeHandle local_nh("~");

  // Default window name is the resolved topic name
  std::string topic = nh.resolveName("image");
  local_nh.param("window_name", g_window_name, topic);

  std::string format_string;
  local_nh.param("filename_format", format_string, std::string("frame%04i.jpg"));
  g_filename_format.parse(format_string);

  // Handle window size
  bool autosize;
  local_nh.param("autosize", autosize, false);
  cv::namedWindow(g_window_name, autosize ? (CV_WINDOW_AUTOSIZE | CV_WINDOW_KEEPRATIO | CV_GUI_EXPANDED) : 0);
  cv::setMouseCallback(g_window_name, &mouseCb);

  // Start the OpenCV window thread so we don't have to waitKey() somewhere
  cv::startWindowThread();

  // Handle transport
  // priority:
  //    1. command line argument
  //    2. rosparam '~image_transport'
  std::string transport;
  local_nh.param("image_transport", transport, std::string("raw"));
  ros::V_string myargv;
  ros::removeROSArgs(argc, argv, myargv);
  for (size_t i = 1; i < myargv.size(); ++i) {
    if (myargv[i][0] != '-') {
      transport = myargv[i];
      break;
    }
  }
  ROS_INFO_STREAM("Using transport \"" << transport << "\"");
  image_transport::ImageTransport it(nh);
  image_transport::TransportHints hints(transport, ros::TransportHints(), local_nh);
  image_transport::Subscriber sub = it.subscribe(topic, 1, imageCb, hints);

  ros::spin();

  cv::destroyWindow(g_window_name);
  return 0;
}
// Handles (un)subscribing when clients (un)subscribe
void ConvertMetricNodelet::connectCb()
{
  boost::lock_guard<boost::mutex> lock(connect_mutex_);
  if (pub_depth_.getNumSubscribers() == 0)
  {
    sub_raw_.shutdown();
  }
  else if (!sub_raw_)
  {
    image_transport::TransportHints hints("raw", ros::TransportHints(), getPrivateNodeHandle());
    sub_raw_ = it_->subscribe("image_raw", 1, &ConvertMetricNodelet::depthCb, this, hints);
  }
}
Exemple #19
0
void ImageView::onTextChanged(const QString& string) {
  subscriber_.shutdown();
  try {
    topic = string.toStdString();
    ros::NodeHandle nh;
    image_transport::ImageTransport it(nh);
    image_transport::TransportHints hints("raw");

    subscriber_ = it.subscribe(topic, 1, &ImageView::callbackImage, this, hints);
    //qDebug("ImageView::onTopicChanged() to topic '%s' with transport '%s'", topic.toStdString().c_str(), subscriber_.getTransport().c_str());
  } catch (image_transport::TransportLoadException& e) {
    QMessageBox::warning(widget_, tr("Loading image transport plugin failed"), e.what());
  }
}
Exemple #20
0
  void start(const Mode mode)
  {
    this->mode = mode;
    running = true;

    std::string topicCameraInfoColor = topicColor.substr(0, topicColor.rfind('/')) + "/camera_info";
    std::string topicCameraInfoDepth = topicDepth.substr(0, topicDepth.rfind('/')) + "/camera_info";
    // std::cout << "test1..." << std::endl;

    image_transport::TransportHints hints("raw");
    subImageColor = new image_transport::SubscriberFilter(it, topicColor, queueSize, hints);
    subImageDepth = new image_transport::SubscriberFilter(it, topicDepth, queueSize, hints);
    subCameraInfoColor = new message_filters::Subscriber<sensor_msgs::CameraInfo>(nh, topicCameraInfoColor, queueSize);
    subCameraInfoDepth = new message_filters::Subscriber<sensor_msgs::CameraInfo>(nh, topicCameraInfoDepth, queueSize);
    // std::cout << "test2..." << std::endl;

    syncExact = new message_filters::Synchronizer<ExactSyncPolicy>(ExactSyncPolicy(queueSize), *subImageColor, *subImageDepth, *subCameraInfoColor, *subCameraInfoDepth);
    syncExact->registerCallback(boost::bind(&Receiver::callback, this, _1, _2, _3, _4));

    spinner.start();
    std::cout << "waiting for kinect2_bridge..." << std::endl;

    std::chrono::milliseconds duration(1);
    while(!updateImage || !updateCloud)
    {
      if(!ros::ok())
      {
        return;
      }
      std::this_thread::sleep_for(duration);
    }
    std::cout << "kinect2_bridge is running.." << std::endl;
    cloud = pcl::PointCloud<pcl::PointXYZRGBA>::Ptr(new pcl::PointCloud<pcl::PointXYZRGBA>());

    cloud->height = color.rows;
    cloud->width = color.cols;
    cloud->is_dense = false;
    cloud->points.resize(cloud->height * cloud->width);
    createLookup(this->color.cols, this->color.rows);
//
//     // Create the filtering object
//     // pcl::PassThrough<pcl::PointXYZRGBA> pass;
//     // pass.setInputCloud (cloud);
//     // pass.setFilterFieldName ("z");
//     // pass.setFilterLimits (0.0, 1.0);
//     // //pass.setFilterLimitsNegative (true);
//     // pass.filter (*cloud);

    cloudViewer();
  }
Exemple #21
0
Vector<Acceleration, Frame> Ephemeris<Frame>::
ComputeGravitationalAccelerationOnMasslessBody(
    Position<Frame> const& position,
    Instant const& t) const {
  std::vector<Vector<Acceleration, Frame>> accelerations(1);
  std::vector<typename ContinuousTrajectory<Frame>::Hint> hints(bodies_.size());
  ComputeMasslessBodiesGravitationalAccelerations(
      t,
      {position},
      &accelerations,
      &hints);

  return accelerations[0];
}
Exemple #22
0
std::shared_ptr<std::vector<std::shared_ptr<RectFloat>>> MuPDFDoc::SearchText(const char* searchText)
{
	fz_text_sheet *sheet = nullptr;
	fz_text_page *text = nullptr;
	fz_device *dev  = nullptr;
	PageCache *pageCache = &m_pages[m_currentPage];
	fz_var(sheet);
	fz_var(text);
	fz_var(dev);
	std::shared_ptr<std::vector<std::shared_ptr<RectFloat>>> hints(new std::vector<std::shared_ptr<RectFloat>>());
	fz_try(m_context)
	{
		int hitCount = 0;
		fz_matrix ctm = CalcConvertMatrix();
		fz_rect mbrect = fz_transform_rect(ctm, pageCache->mediaBox);
		sheet = fz_new_text_sheet(m_context);
		text = fz_new_text_page(m_context, mbrect);
		dev = fz_new_text_device(m_context, sheet, text);
		fz_run_page(m_document, pageCache->page, dev, ctm, nullptr);
		fz_free_device(dev);
		dev = nullptr;
		int len = TextLen(text);
		for (int pos = 0; pos < len; pos++)
		{
			fz_bbox rr = fz_empty_bbox;
			int n = Match(text, searchText, pos);
			for (int i = 0; i < n; i++)
				rr = fz_union_bbox(rr, BBoxCharAt(text, pos + i));

			if (!fz_is_empty_bbox(rr) && hitCount < MAX_SEARCH_HITS)
			{
				hints->push_back(std::shared_ptr<RectFloat>(new RectFloat(rr.x0, rr.y0, rr.x1, rr.y1)));
				if (++hitCount >= MAX_SEARCH_HITS)
					break;
			}
		}
	}
	fz_always(m_context)
	{
		fz_free_text_page(m_context, text);
		fz_free_text_sheet(m_context, sheet);
		fz_free_device(dev);
	}
	fz_catch(m_context)
	{
		return std::shared_ptr<std::vector<std::shared_ptr<RectFloat>>>(nullptr);
	}
	return hints;
}
    void AudioOutputDeviceManager::addDeviceEventProc(
        int                                                 _card
        , std::multimap< int, const AudioOutputDevice * > & _devices
    )
    {
        if( _devices.find( _card ) != _devices.end() ) {
            return;
        }

        DeviceNameHints hints(
            _card
        );

        const auto &    HINTS = hints.get();

        for( int i = 0 ; HINTS[ i ] != nullptr ; i++ ) {
            const auto  HINT = HINTS[ i ];

            if( isOutputDevice( HINT ) == false ) {
                continue;
            }

            DeviceDescription   description(
                HINT
            );

            DeviceName  name(
                HINT
            );

            const auto  DEVICE = new AudioOutputDevice(
                description.get()
                , name.get()
            );

            _devices.insert(
                std::multimap< int, const AudioOutputDevice * >::value_type(
                    _card
                    , DEVICE
                )
            );

            this->callConnectEventHandler(
                *DEVICE
            );
        }
    }
int main(int argc, char **argv)
{
    ros::init(argc, argv, "image_view");

    if (ros::names::remap("image")=="image"){
        ROS_WARN("Topic 'image' has not been remapped! Typical command-line usage:\n"
                 "/t$ rosrun movesense_sensor image_view image:=<image topic>");
    }
    ros::NodeHandle nh;
    ros::NodeHandle local_nh("~");

    // Default window name is the resolved topic name
    std::string topic = nh.resolveName("image");
    local_nh.param("window_name",g_window_name,topic);

    // Handle window size
    bool autosize;
    local_nh.param("autosize",autosize, true);
    cv::namedWindow(g_window_name, autosize ? (CV_WINDOW_AUTOSIZE | CV_WINDOW_KEEPRATIO | CV_GUI_EXPANDED) : 0);

    // CreateTrackbar
    cvCreateTrackbar( "Depth_Min", g_window_name.c_str(), &MIN_DEPTH, 10000);
    cvCreateTrackbar( "Depth_Max", g_window_name.c_str(), &MAX_DEPTH, 50000);

    // Start the OpenCV window thread so we don't have to waitKey() somewhere
    cv::startWindowThread();

    // Handle transport
    std::string transport;
    local_nh.param("image_transport", transport, std::string("raw"));
    ROS_INFO_STREAM("Using transport \"" << transport << "\"");

    image_transport::ImageTransport it(nh);
    image_transport::TransportHints hints(transport, ros::TransportHints(), local_nh);
    image_transport::Subscriber sub = it.subscribe(topic, 1, imageCallback, hints);

    ros::spin();

    cv::destroyWindow(g_window_name);

    return 0;
}
int Results::wonLost(string perp[], string comp[],int limit,int &n, float percent)
{ 
    //if else statement to determine if the user has won or not
    if(comp[0]==perp[0]&&comp[1]==perp[1]&&comp[2]==perp[2]&&comp[3]==perp[3])
    {
        //Gives the comparison of the user to the computer choice
        limit=compare(percent,n,limit);
        return n;
    }
    else
    {
        //Gives hints if the user decides to have
        hints(comp,perp);
        cout<<endl<<"You have used up "<<n+1<<" tries, you have "<<10-(n+1)<<" ";
        cout<<"tries left before it is considered you have lost the game."<<endl;
        cout<<"You do however have "<<limit-(n+1)<<" tries left."<<endl;
        return n;
    }
   
}
FeatureExtraction::FeatureExtraction(ros::NodeHandle& n, ros::NodeHandle& np)
    : n_(n), np_(np), it_(n),
      image_feature_(new ImageFeatureMatching("GridFAST", "OpponentBRIEF")){
  /// subscriber
  image_transport::TransportHints hints("raw", ros::TransportHints(), np_);

  /// using CameraSubscriber
  sub_camera_ =
      it_.subscribeCamera("image", 1,
                          &FeatureExtraction::ImageCallback, this, hints);

  /// using sync policy
  // sub_image_.subscribe(it_, "image", 1, hints);
  // sub_info_ .subscribe(n_, "camera_info", 1);
  // approximate_sync_.reset(
  //     new ApproximateSync(ApproximatePolicy(10),
  //                         sub_image_, sub_info_));
  // approximate_sync_->registerCallback(
  //     boost::bind(&FeatureExtraction::ImageCallback, this, _1, _2));
}
Exemple #27
0
void		main2(t_all *all, t_add *add, t_window *wind, t_coords *list)
{
	create_window(&wind);
	wind->image = mlx_new_image(wind->mlx, SIZE_W, SIZE_H);
	wind->adress = (int *)mlx_get_data_addr(wind->image, \
			&wind->endian, &wind->size_line, &wind->bpp);
	add->k = fmin((SIZE_W / 2) / add->width, (SIZE_H / 2) / add->height);
	add->sdvig_x = 0;
	add->sdvig_y = 0;
	all->list = list;
	all->wind = wind;
	all->add = add;
	all->add->a = 0.4;
	all->add->b = 0.4;
	all->add->c = 3.2;
	dooo(all);
	mlx_put_image_to_window(wind->mlx, wind->window, wind->image, 0, 0);
	hints(all->wind);
	mlx_hook(all->wind->window, 2, 5, hadle_input_key, all);
	mlx_hook(all->wind->window, 17, 1L << 17, exit_x, all);
	mlx_loop(all->wind->mlx);
}
uint NotificationManager::Notify(const QString &appName, uint replacesId, const QString &appIcon, const QString &summary, const QString &body, const QStringList &actions, const QVariantHash &originalHints, int expireTimeout)
{
    uint id = replacesId != 0 ? replacesId : nextAvailableNotificationID();

    if (replacesId == 0 || notifications.contains(id)) {
        // Apply a category definition, if any, to the hints
        QVariantHash hints(originalHints);
        applyCategoryDefinition(hints);

        // Ensure the hints contain a timestamp
        addTimestamp(hints);

        if (replacesId == 0) {
            // Create a new notification
            Notification *notification = new Notification(appName, id, appIcon, summary, body, actions, hints, expireTimeout, this);
            connect(notification, SIGNAL(actionInvoked(QString)), this, SLOT(invokeAction(QString)));
            notifications.insert(id, notification);
        } else {
            // Only replace an existing notification if it really exists
            Notification *notification = notifications.value(id);
            notification->setAppName(appName);
            notification->setAppIcon(appIcon);
            notification->setSummary(summary);
            notification->setBody(body);
            notification->setActions(actions);
            notification->setHints(hints);
            notification->setExpireTimeout(expireTimeout);

            // Delete the existing notification from the database
            execSQL(QString("DELETE FROM notifications WHERE id=?"), QVariantList() << id);
            execSQL(QString("DELETE FROM actions WHERE id=?"), QVariantList() << id);
            execSQL(QString("DELETE FROM hints WHERE id=?"), QVariantList() << id);
        }

        // Add the notification, its actions and its hints to the database
        execSQL("INSERT INTO notifications VALUES (?, ?, ?, ?, ?, ?)", QVariantList() << id << appName << appIcon << summary << body << expireTimeout);
        foreach (const QString &action, actions) {
            execSQL("INSERT INTO actions VALUES (?, ?)", QVariantList() << id << action);
        }
  void startRecord()
  {
    std::cout << "Controls:" << std::endl
              << "   [ESC, q] - Exit" << std::endl
              << " [SPACE, s] - Save current frame" << std::endl
              << "        [l] - decreas min and max value for IR value rage" << std::endl
              << "        [h] - increas min and max value for IR value rage" << std::endl
              << "        [1] - decreas min value for IR value rage" << std::endl
              << "        [2] - increas min value for IR value rage" << std::endl
              << "        [3] - decreas max value for IR value rage" << std::endl
              << "        [4] - increas max value for IR value rage" << std::endl;

    image_transport::TransportHints hints("compressed");
    image_transport::TransportHints hintsIr("compressed");
    image_transport::TransportHints hintsDepth("compressedDepth");
    subImageColor = new image_transport::SubscriberFilter(it, topicColor, 4, hints);
    subImageIr = new image_transport::SubscriberFilter(it, topicIr, 4, hintsIr);
    subImageDepth = new image_transport::SubscriberFilter(it, topicDepth, 4, hintsDepth);

    sync = new message_filters::Synchronizer<ColorIrDepthSyncPolicy>(ColorIrDepthSyncPolicy(4), *subImageColor, *subImageIr, *subImageDepth);
    sync->registerCallback(boost::bind(&Recorder::callback, this, _1, _2, _3));

    spinner.start();
  }
Exemple #30
0
// start function
void Recorder::start()
{
  // get the camera info ros topics for color and depth
  topicCameraInfoColor = topicColor.substr(0, topicColor.rfind('/')) + "/camera_info";
  topicCameraInfoDepth = topicDepth.substr(0, topicDepth.rfind('/')) + "/camera_info";


  // TransportHints stores the transport settings for the image topic subscriber
  // here we are giving the setting of raw or compressed using the useCompressed variable
  image_transport::TransportHints hints("raw");

  // SubscriberFilters are used to subscribe to the kinect image topics
  subImageColor = new image_transport::SubscriberFilter(it, topicColor, queueSize, hints);
  subImageDepth = new image_transport::SubscriberFilter(it, topicDepth, queueSize, hints);

  // message filters are used to subscribe to the camera info topics
  subCameraInfoColor = new message_filters::Subscriber<sensor_msgs::CameraInfo>(nh, topicCameraInfoColor, queueSize);
  subCameraInfoDepth = new message_filters::Subscriber<sensor_msgs::CameraInfo>(nh, topicCameraInfoDepth, queueSize);

  // creating a exact synchronizer for 4 ros topics with queueSize
  // the recorder class callback function is set as the callback function
  syncExact = new message_filters::Synchronizer<ExactSyncPolicy>(ExactSyncPolicy(queueSize), *subImageColor, *subImageDepth, *subCameraInfoColor, *subCameraInfoDepth);
  syncExact->registerCallback(boost::bind(&Recorder::callback, this, _1, _2, _3, _4));
}