Example #1
0
bool
HardwareAccel::extractData(VideoFrame& input)
{
    try {
        auto inFrame = input.pointer();

        if (inFrame->format != format_) {
            std::stringstream buf;
            buf << "Frame format mismatch: expected " << av_get_pix_fmt_name(format_);
            buf << ", got " << av_get_pix_fmt_name((AVPixelFormat)inFrame->format);
            throw std::runtime_error(buf.str());
        }

        // FFmpeg requires a second frame in which to transfer the data
        // from the GPU buffer to the main memory
        auto output = std::unique_ptr<VideoFrame>(new VideoFrame());
        auto outFrame = output->pointer();
        outFrame->format = AV_PIX_FMT_YUV420P;

        extractData(input, *output);

        // move outFrame into inFrame so the caller receives extracted image data
        // but we have to delete inFrame first
        av_frame_unref(inFrame);
        av_frame_move_ref(inFrame, outFrame);
    } catch (const std::runtime_error& e) {
        fail(false);
        RING_ERR("%s", e.what());
        return false;
    }

    succeed();
    return true;
}
Example #2
0
MediaDecoder::Status
MediaDecoder::flush(VideoFrame& result)
{
    AVPacket inpacket;
    av_init_packet(&inpacket);

    int frameFinished = 0;
    auto len = avcodec_decode_video2(decoderCtx_, result.pointer(),
                                    &frameFinished, &inpacket);

    av_packet_unref(&inpacket);

    if (len <= 0)
        return Status::DecodeError;

    if (frameFinished) {
#ifdef RING_ACCEL
        // flush is called when closing the stream
        // so don't restart the media decoder
        if (accel_ && !accel_->hasFailed())
            accel_->extractData(result);
#endif // RING_ACCEL
        return Status::FrameFinished;
    }

    return Status::Success;
}
Example #3
0
MediaDecoder::Status
MediaDecoder::decode(VideoFrame& result)
{
    AVPacket inpacket;
    av_init_packet(&inpacket);
    int ret = av_read_frame(inputCtx_, &inpacket);
    if (ret == AVERROR(EAGAIN)) {
        return Status::Success;
    } else if (ret == AVERROR_EOF) {
        return Status::EOFError;
    } else if (ret < 0) {
        char errbuf[64];
        av_strerror(ret, errbuf, sizeof(errbuf));
        RING_ERR("Couldn't read frame: %s\n", errbuf);
        return Status::ReadError;
    }

    // is this a packet from the video stream?
    if (inpacket.stream_index != streamIndex_) {
        av_packet_unref(&inpacket);
        return Status::Success;
    }

    auto frame = result.pointer();
    int frameFinished = 0;
    int len = avcodec_decode_video2(decoderCtx_, frame,
                                    &frameFinished, &inpacket);
    av_packet_unref(&inpacket);

    if (len <= 0)
        return Status::DecodeError;

    if (frameFinished) {
        frame->format = (AVPixelFormat) correctPixFmt(frame->format);
#ifdef RING_ACCEL
        if (accel_) {
            if (!accel_->hasFailed())
                accel_->extractData(result);
            else
                return Status::RestartRequired;
        }
#endif // RING_ACCEL
        if (emulateRate_ and frame->pkt_pts != AV_NOPTS_VALUE) {
            auto frame_time = getTimeBase()*(frame->pkt_pts - avStream_->start_time);
            auto target = startTime_ + static_cast<std::int64_t>(frame_time.real() * 1e6);
            auto now = av_gettime();
            if (target > now) {
                std::this_thread::sleep_for(std::chrono::microseconds(target - now));
            }
        }
        return Status::FrameFinished;
    }

    return Status::Success;
}
void
VideoMixer::render_frame(VideoFrame& output, const VideoFrame& input, int index)
{
    if (!width_ or !height_ or !input.pointer())
        return;

    const int n = sources_.size();
    const int zoom = ceil(sqrt(n));
    int cell_width = width_ / zoom;
    int cell_height = height_ / zoom;
    int xoff = (index % zoom) * cell_width;
    int yoff = (index / zoom) * cell_height;

    scaler_.scale_and_pad(input, output, xoff, yoff, cell_width, cell_height, true);
}