Ejemplo n.º 1
0
bool MediaEngine::stepVideo(int videoPixelMode) {
#ifdef USE_FFMPEG
	auto codecIter = m_pCodecCtxs.find(m_videoStream);
	AVCodecContext *m_pCodecCtx = codecIter == m_pCodecCtxs.end() ? 0 : codecIter->second;

	if (!m_pFormatCtx)
		return false;
	if (!m_pCodecCtx)
		return false;
	if ((!m_pFrame)||(!m_pFrameRGB))
		return false;

	updateSwsFormat(videoPixelMode);
	// TODO: Technically we could set this to frameWidth instead of m_desWidth for better perf.
	// Update the linesize for the new format too.  We started with the largest size, so it should fit.
	m_pFrameRGB->linesize[0] = getPixelFormatBytes(videoPixelMode) * m_desWidth;

	AVPacket packet;
	int frameFinished;
	bool bGetFrame = false;
	while (!bGetFrame) {
		bool dataEnd = av_read_frame(m_pFormatCtx, &packet) < 0;
		// Even if we've read all frames, some may have been re-ordered frames at the end.
		// Still need to decode those, so keep calling avcodec_decode_video2().
		if (dataEnd || packet.stream_index == m_videoStream) {
			// avcodec_decode_video2() gives us the re-ordered frames with a NULL packet.
			if (dataEnd)
				av_free_packet(&packet);

			int result = avcodec_decode_video2(m_pCodecCtx, m_pFrame, &frameFinished, &packet);
			if (frameFinished) {
				sws_scale(m_sws_ctx, m_pFrame->data, m_pFrame->linesize, 0,
					m_pCodecCtx->height, m_pFrameRGB->data, m_pFrameRGB->linesize);

				if (av_frame_get_best_effort_timestamp(m_pFrame) != AV_NOPTS_VALUE)
					m_videopts = av_frame_get_best_effort_timestamp(m_pFrame) + av_frame_get_pkt_duration(m_pFrame) - m_firstTimeStamp;
				else
					m_videopts += av_frame_get_pkt_duration(m_pFrame);
				bGetFrame = true;
			}
			if (result <= 0 && dataEnd) {
				// Sometimes, m_readSize is less than m_streamSize at the end, but not by much.
				// This is kinda a hack, but the ringbuffer would have to be prematurely empty too.
				m_isVideoEnd = !bGetFrame && (m_pdata->getQueueSize() == 0);
				if (m_isVideoEnd)
					m_decodingsize = 0;
				break;
			}
		}
		av_free_packet(&packet);
	}
	return bGetFrame;
#else
	// If video engine is not available, just add to the timestamp at least.
	m_videopts += 3003;
	return true;
#endif // USE_FFMPEG
}
Ejemplo n.º 2
0
static int filter_frame(AVFilterLink *inlink, AVFrame *frame)
{
    AVFilterContext *ctx = inlink->dst;
    AVFilterLink *outlink = ctx->outputs[0];
    LoopContext *s = ctx->priv;
    int ret = 0;

    if (inlink->frame_count_out >= s->start && s->size > 0 && s->loop != 0) {
        if (s->nb_frames < s->size) {
            if (!s->nb_frames)
                s->start_pts = frame->pts;
            s->frames[s->nb_frames] = av_frame_clone(frame);
            if (!s->frames[s->nb_frames]) {
                av_frame_free(&frame);
                return AVERROR(ENOMEM);
            }
            s->nb_frames++;
            s->duration = frame->pts + av_frame_get_pkt_duration(frame);
            ret = ff_filter_frame(outlink, frame);
        } else {
            av_frame_free(&frame);
            ret = push_frame(ctx);
        }
    } else {
        frame->pts += s->duration;
        ret = ff_filter_frame(outlink, frame);
    }

    return ret;
}
Ejemplo n.º 3
0
static int push_frame(AVFilterContext *ctx)
{
    AVFilterLink *outlink = ctx->outputs[0];
    LoopContext *s = ctx->priv;
    int64_t pts;
    int ret;

    AVFrame *out = av_frame_clone(s->frames[s->current_frame]);

    if (!out)
        return AVERROR(ENOMEM);
    out->pts += s->duration - s->start_pts;
    pts = out->pts + av_frame_get_pkt_duration(out);
    ret = ff_filter_frame(outlink, out);
    s->current_frame++;

    if (s->current_frame >= s->nb_frames) {
        s->duration = pts;
        s->current_frame = 0;

        if (s->loop > 0)
            s->loop--;
    }

    return ret;
}
Ejemplo n.º 4
0
int TFFmpegVideoDecoder::Decode(FF_VIDEO_FRAME *frame)
{
	FF_PACKET_LIST *pktList = NULL;
	int gotPic = 0;
	int ret = FF_OK;
	int w, h;
	AVPixelFormat fmt;
	if (!frame)
	{
		ret = FF_ERR_NOPOINTER;
		return ret;
	}
	while (!gotPic)
	{
		if (_pkter->GetVideoPacket(&pktList) >= 0)
		{
			AVPacket *pkt = (AVPacket *)pktList->pkt;
			if (_decFrame)
				avcodec_get_frame_defaults(_decFrame);
			else
				_decFrame = avcodec_alloc_frame();

			avcodec_decode_video2(_ctx->videoStream->codec,
				_decFrame, &gotPic, pkt);
			if (gotPic)
			{
				TFF_GetMutex(_settingMutex, TFF_INFINITE);
				w = _outputSetting.width;
				h = _outputSetting.height;
				fmt = _outputSetting.pixFmt;
				TFF_ReleaseMutex(_settingMutex);
				AllocSwrContextIfNeeded(_decFrame);
				frame->frame = avcodec_alloc_frame();
				int numBytes = avpicture_get_size(fmt, w, h);
				frame->buffer = (uint8_t *)av_malloc(numBytes * sizeof(uint8_t));
				avpicture_fill((AVPicture *)frame->frame, frame->buffer, fmt, w, h);

				sws_scale(_swsCtx,
					_decFrame->data,
					_decFrame->linesize,
					0,
					_decFrame->height,
					frame->frame->data,
					frame->frame->linesize);
				frame->width = w;
				frame->height = h;
				frame->frame->pts = av_frame_get_best_effort_timestamp(_decFrame);
				frame->frame->pkt_duration = av_frame_get_pkt_duration(_decFrame);
			}
			_pkter->FreeSinglePktList(&pktList);
		}
		else
		{
			ret = FF_EOF;
			break;
		}
	}
	return ret;
}
Ejemplo n.º 5
0
std::shared_ptr<Frame> CFFmpegImage::ReadFrame()
{
  AVFrame* avframe = ExtractFrame();
  if (avframe == nullptr)
    return nullptr;
  std::shared_ptr<Frame> frame(new Frame());
  frame->m_delay = (unsigned int)av_frame_get_pkt_duration(avframe);
  frame->m_pitch = avframe->width * 4;
  frame->m_pImage = new unsigned char[avframe->height * frame->m_pitch];
  DecodeFrame(avframe, avframe->width, avframe->height, frame->m_pitch, frame->m_pImage);
  av_frame_free(&avframe);
  return frame;
}
Ejemplo n.º 6
0
double ff_decoder_get_best_effort_pts(struct ff_decoder *decoder,
		AVFrame *frame)
{
	// this is how long each frame is added to the amount of repeated frames
	// according to the codec
	double estimated_frame_delay;
	int64_t best_effort_pts;
	double d_pts;

	// This function is ffmpeg only, replace with frame->pkt_pts
	// if you are trying to compile for libav as a temporary
	// measure
	best_effort_pts = av_frame_get_best_effort_timestamp(frame);

	if (best_effort_pts != AV_NOPTS_VALUE) {
		// Fix the first pts if less than start_pts
		if (best_effort_pts < decoder->start_pts) {
			if (decoder->first_frame) {
				best_effort_pts = decoder->start_pts;
			} else {
				av_log(NULL, AV_LOG_WARNING, "multiple pts < "
						"start_pts; setting start pts "
						"to 0");
				decoder->start_pts = 0;
			}
		}

		best_effort_pts -= decoder->start_pts;

		// Since the best effort pts came from the stream we use his
		// time base
		d_pts = best_effort_pts * av_q2d(decoder->stream->time_base);
		decoder->predicted_pts = d_pts;
	} else {
		d_pts = decoder->predicted_pts;
	}

	// Update our predicted pts to include the repeated picture count
	// Our predicted pts clock is based on the codecs time base
	estimated_frame_delay = av_frame_get_pkt_duration(frame)
			* av_q2d(decoder->codec->time_base);
	// Add repeat frame delay
	estimated_frame_delay += frame->repeat_pict
		/ (1.0L / estimated_frame_delay);

	decoder->predicted_pts += estimated_frame_delay;

	return d_pts;
}
Ejemplo n.º 7
0
static int audio_write_frame(AVFormatContext *s1, int stream_index,
                             AVFrame **frame, unsigned flags)
{
    AlsaData *s = s1->priv_data;
    AVPacket pkt;

    /* ff_alsa_open() should have accepted only supported formats */
    if ((flags & AV_WRITE_UNCODED_FRAME_QUERY))
        return av_sample_fmt_is_planar(s1->streams[stream_index]->codec->sample_fmt) ?
               AVERROR(EINVAL) : 0;
    /* set only used fields */
    pkt.data     = (*frame)->data[0];
    pkt.size     = (*frame)->nb_samples * s->frame_size;
    pkt.dts      = (*frame)->pkt_dts;
    pkt.duration = av_frame_get_pkt_duration(*frame);
    return audio_write_packet(s1, &pkt);
}
Ejemplo n.º 8
0
void emit_frame(const AVFrame *frame)
{
    const int n = frame->nb_samples * av_get_channel_layout_nb_channels(av_frame_get_channel_layout(frame));
    const uint16_t *p     = (uint16_t*)frame->data[0];
    const uint16_t *p_end = p + n;

#if 0
    while (p < p_end) {
        fputc(*p    & 0xff, stdout);
        fputc(*p>>8 & 0xff, stdout);
        p++;
    }
#else
    static int a = 0; a += n;
    int64_t dur = av_frame_get_pkt_duration(frame); // in AVStream->time_base units
    double seconds = (double(dur) * timebase.num) / timebase.den;
    static double s = 0; s += seconds;
    printf("another %d samples, %f seconds, respective totals %d %f\n", n, seconds, a, s);
#endif
    fflush(stdout);
}
Ejemplo n.º 9
0
static int compute_crc_of_packets(AVFormatContext *fmt_ctx, int video_stream,
                                AVCodecContext *ctx, AVFrame *fr, uint64_t ts_start, uint64_t ts_end, int no_seeking)
{
    int number_of_written_bytes;
    int got_frame = 0;
    int result;
    int end_of_stream = 0;
    int byte_buffer_size;
    uint8_t *byte_buffer;
    int64_t crc;
    AVPacket pkt;

    byte_buffer_size = av_image_get_buffer_size(ctx->pix_fmt, ctx->width, ctx->height, 16);
    byte_buffer = av_malloc(byte_buffer_size);
    if (!byte_buffer) {
        av_log(NULL, AV_LOG_ERROR, "Can't allocate buffer\n");
        return AVERROR(ENOMEM);
    }

    if (!no_seeking) {
        result = av_seek_frame(fmt_ctx, video_stream, ts_start, AVSEEK_FLAG_ANY);
        printf("Seeking to %"PRId64", computing crc for frames with pts < %"PRId64"\n", ts_start, ts_end);
        if (result < 0) {
            av_log(NULL, AV_LOG_ERROR, "Error in seeking\n");
            return result;
        }
        avcodec_flush_buffers(ctx);
    }

    av_init_packet(&pkt);
    do {
        if (!end_of_stream)
            if (av_read_frame(fmt_ctx, &pkt) < 0)
                end_of_stream = 1;
        if (end_of_stream) {
            pkt.data = NULL;
            pkt.size = 0;
        }
        if (pkt.stream_index == video_stream || end_of_stream) {
            got_frame = 0;
            if ((pkt.pts == AV_NOPTS_VALUE) && (!end_of_stream)) {
                av_log(NULL, AV_LOG_ERROR, "Error: frames doesn't have pts values\n");
                return -1;
            }
            result = avcodec_decode_video2(ctx, fr, &got_frame, &pkt);
            if (result < 0) {
                av_log(NULL, AV_LOG_ERROR, "Error decoding frame\n");
                return result;
            }
            if (got_frame) {
                number_of_written_bytes = av_image_copy_to_buffer(byte_buffer, byte_buffer_size,
                                        (const uint8_t* const *)fr->data, (const int*) fr->linesize,
                                        ctx->pix_fmt, ctx->width, ctx->height, 1);
                if (number_of_written_bytes < 0) {
                    av_log(NULL, AV_LOG_ERROR, "Can't copy image to buffer\n");
                    return number_of_written_bytes;
                }
                if ((fr->pkt_pts > ts_end) && (!no_seeking))
                    break;
                crc = av_adler32_update(0, (const uint8_t*)byte_buffer, number_of_written_bytes);
                printf("%10"PRId64", 0x%08lx\n", fr->pkt_pts, crc);
                if (no_seeking) {
                    if (add_crc_to_array(crc, fr->pkt_pts) < 0)
                        return -1;
                }
                else {
                    if (compare_crc_in_array(crc, fr->pkt_pts) < 0)
                        return -1;
                }
            }
        }
        av_packet_unref(&pkt);
        av_init_packet(&pkt);
    } while ((!end_of_stream || got_frame) && (no_seeking || (fr->pkt_pts + av_frame_get_pkt_duration(fr) <= ts_end)));

    av_packet_unref(&pkt);
    av_freep(&byte_buffer);

    return 0;
}
Ejemplo n.º 10
0
JNIEXPORT jlong JNICALL Java_bits_jav_codec_JavFrame_nPktDuration
(JNIEnv* env, jclass clazz, jlong pointer)
{
  AVFrame* frame = *(AVFrame**)&pointer;
  return av_frame_get_pkt_duration( frame );
}