示例#1
0
void QVideoDecoder::timerEvent(QTimerEvent * te)
{
	if(te->timerId() == m_decode_timer)
	{
		if(g_packet_queue.audio_packets.count() < 100 || g_packet_queue.video_packets.count() < 100)
		{
			readFrame();
		}
		else
		{
			if(g_packet_queue.video_packets.count() > 1)
			{
				decodeVideoFrame();
			}
		}
	}
}
示例#2
0
void MovieDecoder::getScaledVideoFrame(int scaledSize, bool maintainAspectRatio, VideoFrame& videoFrame)
{
    initializeFilterGraph(m_pFormatContext->streams[m_VideoStream]->time_base, scaledSize, maintainAspectRatio);

    auto del = [] (AVFrame* f) { av_frame_free(&f); };
    std::unique_ptr<AVFrame, decltype(del)> res(av_frame_alloc(), del);

    checkRc(av_buffersrc_write_frame(m_pFilterSource, m_pFrame), "Failed to write frame to filter graph");

    int attempts = 0;
    int rc = av_buffersink_get_frame(m_pFilterSink, res.get());
    while (rc == AVERROR(EAGAIN) && attempts++ < 10)
    {
        decodeVideoFrame();
        checkRc(av_buffersrc_write_frame(m_pFilterSource, m_pFrame), "Failed to write frame to filter graph");
        rc = av_buffersink_get_frame(m_pFilterSink, res.get());
    }

    checkRc(rc, "Failed to get buffer from filter");

    videoFrame.width = res->width;
    videoFrame.height = res->height;
    videoFrame.lineSize = videoFrame.width * 4;

	if(videoFrame.frameData != nullptr)
		delete videoFrame.frameData;

	uint8_t * framedata = res->data[0];

	videoFrame.frameData = new uint8_t[videoFrame.width * 4 * videoFrame.height];
	for(int y = 0;y < videoFrame.height;y++)
	{
		memcpy(videoFrame.frameData + ((videoFrame.height - y - 1) * videoFrame.lineSize), framedata + (y * res->linesize[0]), videoFrame.lineSize);
	}

    if (m_pFilterGraph)
    {
        avfilter_graph_free(&m_pFilterGraph);
    }
}