/*! Processes the latest point cloud and gives back the resulting array of models.
 */
bool TabletopSegmentor::serviceCallback(TabletopSegmentation::Request &request, 
                                             TabletopSegmentation::Response &response)
{
  ros::Time start_time = ros::Time::now();
  std::string topic = nh_.resolveName("cloud_in");
  //ROS_INFO("Tabletop detection service called; waiting for a point_cloud2 on topic %s", topic.c_str());

  sensor_msgs::PointCloud2::ConstPtr recent_cloud = 
    ros::topic::waitForMessage<sensor_msgs::PointCloud2>(topic, nh_, ros::Duration(10.0));

  pcl::PointCloud<Point>::Ptr cloud(new pcl::PointCloud<Point>);
  pcl::PointCloud<Point>::Ptr converted_cloud(new pcl::PointCloud<Point>);

  pcl::fromROSMsg(*recent_cloud,*cloud);

  if (!recent_cloud)
  {
    ROS_ERROR("Tabletop object detector: no point_cloud2 has been received");
    response.result = response.NO_CLOUD_RECEIVED;
    return true;
  }

  if(convertFrame(cloud,converted_cloud))
  {
    processCloud(converted_cloud,response);
    //ROS_INFO_STREAM("In total, segmentation took " << ros::Time::now() - start_time << " seconds");
    return true;
  }
  else
  {
    response.result = response.OTHER_ERROR;
    //ROS_INFO_STREAM("In total, segmentation took " << ros::Time::now() - start_time << " seconds");
    return true;                               
  }
}
Exemplo n.º 2
0
void WebMEncoder::encodeVideo(unsigned char *inputImage,int inputStride) {
    //cout << "Blown timeline encoding video\n";
    //cout << "Waiting to encode video\n";
    videoProcessorThread.join();
    //cout << "Starting to encode video\n";

    convertFrame(inputImage,inputStride);

    videoProcessorThread = boost::thread(videoProcessor);
}
Exemplo n.º 3
0
FrameSource::FrameStatus OpenCVFrameSourceImpl::fetch(vx_image image, vx_uint32 timeout)
{
    cv::Mat frame;

    if (queue_.pop(frame, timeout))
    {
        vx_imagepatch_addressing_t image_addr;
        image_addr.dim_x = frame.cols;
        image_addr.dim_y = frame.rows;
        image_addr.stride_x = static_cast<vx_int32>(frame.channels());
        image_addr.stride_y = static_cast<vx_int32>(frame.step);
        image_addr.scale_x = image_addr.scale_y = VX_SCALE_UNITY;

        convertFrame(context_,
                     image,
                     getConfiguration(),
                     image_addr,
                     frame.data,
                     false,
                     devMem,
                     devMemPitch,
                     scaledImage);

        lastFrameTimestamp.tic();

        return OK;
    }
    else
    {
        if (alive_)
        {
            if ((lastFrameTimestamp.toc()/1000.0) > Application::get().getSourceDefaultTimeout())
            {
                close();
                return CLOSED;
            }
            else
                return TIMEOUT;
        }
        else
        {
            close();
            return CLOSED;
        }
    }
}
void TabletopSegmentor::cloudCallBack(const sensor_msgs::PointCloud2::ConstPtr cloud_in)
{
  ros::Time start_time = ros::Time::now();
  pcl::PointCloud<Point>::Ptr cloud(new pcl::PointCloud<Point>);
  pcl::PointCloud<Point>::Ptr converted_cloud(new pcl::PointCloud<Point>);

  pcl::fromROSMsg(*cloud_in,*cloud);

  convertFrame(cloud,converted_cloud);

  TabletopSegmentation::Response r;
  processCloud(converted_cloud,r);

  TabletopClusters c;
  c.table = r.table;
  c.clusters = r.clusters;
  c.result = r.result;

  cluster_pub.publish(c);
}
Exemplo n.º 5
0
void VideoOutput::encodeFrame(const uint8* _frame, const ImageFormat* format, bool invertY) {
    alwaysAssertM(m_isInitialized, "VideoOutput was not initialized before call to encodeAndWriteFrame.");
    alwaysAssertM(! m_isFinished, "Cannot call VideoOutput::append() after commit() or abort().");

#   ifndef G3D_NO_FFMPEG

        const uint8* frame = convertFrame(_frame, format, invertY);
        (void)frame;
        m_avInputFrame->width = m_settings.width;
        m_avInputFrame->height = m_settings.height;
        // encode frame          

        m_avInputFrame->pts = m_framecount;
        ++m_framecount;
        int encodeSize = avcodec_encode_video(m_avStream->codec, 
                                              m_avEncodingBuffer, 
                                              m_avEncodingBufferSize,
                                              m_avInputFrame);

        // write the frame
        if (encodeSize > 0) {
            AVPacket packet;
            av_init_packet(&packet);
            
            packet.stream_index = m_avStream->index;
            packet.data         = m_avEncodingBuffer;
            packet.size         = encodeSize;
            packet.pts = av_rescale_q(m_avInputFrame->pts, m_avStream->codec->time_base, m_avStream->time_base);
            packet.dts = AV_NOPTS_VALUE;
            if (m_avStream->codec->coded_frame->key_frame) {
                packet.flags |= AV_PKT_FLAG_KEY;
            }

            av_write_frame(m_avFormatContext, &packet);
        }
#   endif
}