Esempio n. 1
0
  bool processFrame(tPvFrame* frame, sensor_msgs::Image &img, sensor_msgs::CameraInfo &cam_info)
  {
    /// @todo Better trigger timestamp matching
    if ((trigger_mode_ == prosilica::SyncIn1 || trigger_mode_ == prosilica::SyncIn2) && !trig_time_.isZero()) {
      img.header.stamp = cam_info.header.stamp = trig_time_;
      trig_time_ = ros::Time(); // Zero
    }
    else {
      /// @todo Match time stamp from frame to ROS time?
      img.header.stamp = cam_info.header.stamp = ros::Time::now();
    }

    /// @todo Binning values retrieved here may differ from the ones used to actually
    /// capture the frame! Maybe need to clear queue when changing binning and/or
    /// stuff binning values into context?
    tPvUint32 binning_x = 1, binning_y = 1;
    if (cam_->hasAttribute("BinningX")) {
      cam_->getAttribute("BinningX", binning_x);
      cam_->getAttribute("BinningY", binning_y);
    }
    // Binning averages bayer samples, so just call it mono8 in that case
    if (frame->Format == ePvFmtBayer8 && (binning_x > 1 || binning_y > 1))
      frame->Format = ePvFmtMono8;

    if (!frameToImage(frame, img))
      return false;

    // Set the operational parameters in CameraInfo (binning, ROI)
    cam_info.binning_x = binning_x;
    cam_info.binning_y = binning_y;
    // ROI in CameraInfo is in unbinned coordinates, need to scale up
    cam_info.roi.x_offset = frame->RegionX * binning_x;
    cam_info.roi.y_offset = frame->RegionY * binning_y;
    cam_info.roi.height = frame->Height * binning_y;
    cam_info.roi.width = frame->Width * binning_x;
    cam_info.roi.do_rectify = (frame->Height != sensor_height_ / binning_y) ||
                              (frame->Width  != sensor_width_  / binning_x);

    count_++;
    return true;
  }
  bool processFrame (FlyCapture2::Image * frame, sensor_msgs::Image & img, sensor_msgs::CameraInfoPtr & cam_info)
  {
    // cam_->saveImageToFile(frame); // Just a test: comment this out if not needed

    if (!frameToImage (frame, img))
      return false;

    /// @todo Use time from frame?
    img.header.stamp = cam_info->header.stamp = ros::Time::now ();

    // Throw out any CamInfo that's not calibrated to this camera mode
    if (cam_info->K[0] != 0.0 && (img.width != cam_info->width || img.height != cam_info->height))
    {
      cam_info.reset(new sensor_msgs::CameraInfo());
    }

    // If we don't have a calibration, set the image dimensions
    if (cam_info->K[0] == 0.0)
    {
      cam_info->width = img.width = width_;
      cam_info->height = img.height = height_;
    }

    // TF frame
    img.header.frame_id = cam_info->header.frame_id = frame_id_;

    //frame->GetDimensions(&cam_info.height, &cam_info.width);
    //cam_info.width = frame->GetCols();
    //
    //              cam_info.roi.x_offset = frame->RegionX;
    //              cam_info.roi.y_offset = frame->RegionY;
    //              cam_info.roi.height = frame->Height;
    //              cam_info.roi.width = frame->Width;

    count_++;
    //ROS_INFO("count = %d", count_);
    return true;
  }
bool FFmpegDecoder::handleVideoPacket(
    const AVPacket& packet,
    double& videoClock,
    VideoParseContext& context)
{
    enum { MAX_SKIPPED = 4 };
    const double MAX_DELAY = 0.2;

    const int ret = avcodec_send_packet(m_videoCodecContext, &packet);
    if (ret < 0)
        return false;

    AVFramePtr videoFrame(av_frame_alloc());
    while (avcodec_receive_frame(m_videoCodecContext, videoFrame.get()) == 0)
    {
		const int64_t duration_stamp =
			videoFrame->best_effort_timestamp; //av_frame_get_best_effort_timestamp(m_videoFrame);

        // compute the exact PTS for the picture if it is omitted in the stream
        // pts1 is the dts of the pkt / pts of the frame
        if (duration_stamp != AV_NOPTS_VALUE)
        {
            videoClock = duration_stamp * av_q2d(m_videoStream->time_base);
        }
        const double pts = videoClock;

        // update video clock for next frame
        // for MPEG2, the frame can be repeated, so we update the clock accordingly
        const double frameDelay = av_q2d(m_videoCodecContext->time_base) *
            (1. + videoFrame->repeat_pict * 0.5);
        videoClock += frameDelay;

        boost::posix_time::time_duration td(boost::posix_time::pos_infin);
        bool inNextFrame = false;
        const bool haveVideoPackets = !m_videoPacketsQueue.empty();

        {
            boost::lock_guard<boost::mutex> locker(m_isPausedMutex);

            inNextFrame = m_isPaused && m_isVideoSeekingWhilePaused;
            if (!context.initialized || inNextFrame)
            {
                m_videoStartClock = (m_isPaused ? m_pauseTimer : GetHiResTime()) - pts;
            }

            // Skipping frames
            if (context.initialized && !inNextFrame && haveVideoPackets)
            {
                const double curTime = GetHiResTime();
                if (m_videoStartClock + pts <= curTime)
                {
                    if (m_videoStartClock + pts < curTime - MAX_DELAY)
                    {
                        InterlockedAdd(m_videoStartClock, MAX_DELAY);
                    }

                    if (++context.numSkipped > MAX_SKIPPED)
                    {
                        context.numSkipped = 0;
                    }
                    else
                    {
                        CHANNEL_LOG(ffmpeg_sync) << "Hard skip frame";

                        // pause
                        if (m_isPaused && !m_isVideoSeekingWhilePaused)
                        {
                            break;
                        }

                        continue;
                    }
                }
                else
                {
                    int speedNumerator, speedDenominator;
                    std::tie(speedNumerator, speedDenominator) = static_cast<const std::pair<int, int>&>(m_speedRational);
                    context.numSkipped = 0;
                    td = boost::posix_time::milliseconds(
                        int((m_videoStartClock + pts - curTime) * 1000.  * speedDenominator / speedNumerator) + 1);
                }
            }
        }

        context.initialized = true;

        {
            boost::unique_lock<boost::mutex> locker(m_videoFramesMutex);

            if (!m_videoFramesCV.timed_wait(locker, td, [this]
                {
                    return m_isPaused && !m_isVideoSeekingWhilePaused ||
                        m_videoFramesQueue.canPush();
                }))
            {
                continue;
            }
        }

        {
            boost::lock_guard<boost::mutex> locker(m_isPausedMutex);
            if (m_isPaused && !m_isVideoSeekingWhilePaused)
            {
                break;
            }

            m_isVideoSeekingWhilePaused = false;
        }

        if (inNextFrame)
        {
            m_isPausedCV.notify_all();
        }

        VideoFrame& current_frame = m_videoFramesQueue.back();
        handleDirect3dData(videoFrame.get());
        if (!frameToImage(current_frame, videoFrame, m_imageCovertContext, m_pixelFormat))
        {
            continue;
        }

        current_frame.m_pts = pts;
        current_frame.m_duration = duration_stamp;

        {
            boost::lock_guard<boost::mutex> locker(m_videoFramesMutex);
            m_videoFramesQueue.pushBack();
        }
        m_videoFramesCV.notify_all();
    }

    return true;
}