コード例 #1
0
ファイル: buffersink.cpp プロジェクト: adarovsky/avcpp
bool BufferSinkFilterContext::getFrame(AVFrame *frame, int flags, OptionalErrorCode ec)
{
    clear_if(ec);
    if (!m_sink) {
        throws_if(ec, Errors::Unallocated);
        return false;
    }

    if (m_req == ReqGetSamples) {
        throws_if(ec, Errors::MixBufferSinkAccess);
        return false;
    }

    m_req = ReqGetFrame;

    int sts = av_buffersink_get_frame_flags(m_sink.raw(), frame, flags);
    if (sts < 0) {
        if (sts == AVERROR_EOF || sts == AVERROR(EAGAIN)) {
            if (ec) {
                *ec = make_ffmpeg_error(sts);
            }
        } else {
            throws_if(ec, sts, ffmpeg_category());
        }
        return false;
    }
    return true;
}
コード例 #2
0
ファイル: buffersink.c プロジェクト: markjreed/vice-emu
int attribute_align_arg av_buffersink_get_samples(AVFilterContext *ctx,
                                                  AVFrame *frame, int nb_samples)
{
    BufferSinkContext *s = ctx->priv;
    AVFilterLink   *link = ctx->inputs[0];
    AVFrame *cur_frame;
    int ret = 0;
#ifdef IDE_COMPILE
	AVRational tmp;
#endif

    if (!s->audio_fifo) {
        int nb_channels = link->channels;
        if (!(s->audio_fifo = av_audio_fifo_alloc(link->format, nb_channels, nb_samples)))
            return AVERROR(ENOMEM);
    }

    while (ret >= 0) {
        if (av_audio_fifo_size(s->audio_fifo) >= nb_samples)
            return read_from_fifo(ctx, frame, nb_samples);

        if (!(cur_frame = av_frame_alloc()))
            return AVERROR(ENOMEM);
        ret = av_buffersink_get_frame_flags(ctx, cur_frame, 0);
        if (ret == AVERROR_EOF && av_audio_fifo_size(s->audio_fifo)) {
            av_frame_free(&cur_frame);
            return read_from_fifo(ctx, frame, av_audio_fifo_size(s->audio_fifo));
        } else if (ret < 0) {
            av_frame_free(&cur_frame);
            return ret;
        }

        if (cur_frame->pts != AV_NOPTS_VALUE) {
#ifdef IDE_COMPILE
			tmp.num = 1;
			tmp.den = link->sample_rate;
			s->next_pts = cur_frame->pts -
                          av_rescale_q(av_audio_fifo_size(s->audio_fifo),
                                       tmp, link->time_base);
#else
			s->next_pts = cur_frame->pts -
                          av_rescale_q(av_audio_fifo_size(s->audio_fifo),
                                       (AVRational){ 1, link->sample_rate },
                                       link->time_base);
#endif
		}

        ret = av_audio_fifo_write(s->audio_fifo, (void**)cur_frame->extended_data,
                                  cur_frame->nb_samples);
        av_frame_free(&cur_frame);
    }

    return ret;
}
コード例 #3
0
ファイル: buffersink.c プロジェクト: axmhari/FFmpeg
static int attribute_align_arg compat_read(AVFilterContext *ctx, AVFilterBufferRef **pbuf, int nb_samples, int flags)
{
    AVFilterBufferRef *buf;
    AVFrame *frame;
    int ret;

    if (!pbuf)
        return ff_poll_frame(ctx->inputs[0]);

    frame = av_frame_alloc();
    if (!frame)
        return AVERROR(ENOMEM);

    if (!nb_samples)
        ret = av_buffersink_get_frame_flags(ctx, frame, flags);
    else
        ret = av_buffersink_get_samples(ctx, frame, nb_samples);

    if (ret < 0)
        goto fail;

    if (ctx->inputs[0]->type == AVMEDIA_TYPE_VIDEO) {
        buf = avfilter_get_video_buffer_ref_from_arrays(frame->data, frame->linesize,
                                                        AV_PERM_READ,
                                                        frame->width, frame->height,
                                                        frame->format);
    } else {
        buf = avfilter_get_audio_buffer_ref_from_arrays(frame->extended_data,
                                                        frame->linesize[0], AV_PERM_READ,
                                                        frame->nb_samples,
                                                        frame->format,
                                                        frame->channel_layout);
    }
    if (!buf) {
        ret = AVERROR(ENOMEM);
        goto fail;
    }

    avfilter_copy_frame_props(buf, frame);

    buf->buf->priv = frame;
    buf->buf->free = compat_free_buffer;

    *pbuf = buf;

    return 0;
fail:
    av_frame_free(&frame);
    return ret;
}
コード例 #4
0
int reap_filter(struct liveStream *ctx)
{
	int ret = 0;
	/* pull filtered frames from the filtergraph */
	while (1)
	{
		int i = 0;
		int nb_frames = 1;
		take_filter_lock(&ctx->filter_lock);
		ret = av_buffersink_get_frame_flags(ctx->out_filter, ctx->OutFrame,AV_BUFFERSINK_FLAG_NO_REQUEST);
		give_filter_lock(&ctx->filter_lock);
		if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
		{
			ret = 0;
			break;
		}
		if (ret < 0)
		{
			av_log(NULL, AV_LOG_ERROR, "nothing in buffer sink\n");
			ret = -1;
			break;
		}
		if (ctx->OutFrame->pts != AV_NOPTS_VALUE)
		{
			ctx->OutFrame->pts = av_rescale_q(ctx->OutFrame->pts, ctx->out_filter->inputs[0]->time_base, ctx->oc->streams[0]->codec->time_base);
		}
		nb_frames += ctx->OutFrame->pts - ctx->sync_out_pts;
		/** drop all frames if extra are provided */
		if(nb_frames < 0)
			nb_frames = 1;
		/** Some time insane gap is seen,remove that in ffmpeg itself */
		if(nb_frames > 15)
			nb_frames = 1;

		for( i = 0;i < nb_frames;i++)
		{
			ctx->OutFrame->pts = ctx->sync_out_pts;
			if (ctx->OutFrame->pts != AV_NOPTS_VALUE)
			{
				ctx->OutFrame->pts = av_rescale_q(ctx->OutFrame->pts, ctx->oc->streams[0]->codec->time_base, ctx->oc->streams[0]->time_base);
			}
			write_video_frame(ctx->oc,ctx->oc->streams[0],ctx->OutFrame);
			ctx->sync_out_pts++;
		}
		av_frame_unref(ctx->OutFrame);
	}
	return ret;
}
コード例 #5
0
ファイル: buffersink.c プロジェクト: axmhari/FFmpeg
int av_buffersink_get_frame(AVFilterContext *ctx, AVFrame *frame)
{
    return av_buffersink_get_frame_flags(ctx, frame, 0);
}
コード例 #6
0
ファイル: ffdecoder.cpp プロジェクト: zhenghuadai/FFmpeg
int VideoDecoder::video_thread(void *arg)
{
    VideoState *is = (VideoState *) arg;
    AVStreamsParser* ps = is->getAVStreamsParser();
    AVFrame *frame = av_frame_alloc();
    double pts;
    double duration;
    int ret;
    AVRational tb = ps->video_st->time_base;
    AVRational frame_rate = av_guess_frame_rate(ps->ic, ps->video_st, NULL);

#if CONFIG_AVFILTER
    AVFilterGraph *graph = avfilter_graph_alloc();
    AVFilterContext *filt_out = NULL, *filt_in = NULL;
    int last_w = 0;
    int last_h = 0;
    enum AVPixelFormat last_format = (AVPixelFormat) (-2);
    int last_serial = -1;
    int last_vfilter_idx = 0;
    if (!graph) {
        av_frame_free(&frame);
        return AVERROR(ENOMEM);
    }

#endif

    if (!frame) {
#if CONFIG_AVFILTER
        avfilter_graph_free(&graph);
#endif
        return AVERROR(ENOMEM);
    }

    for (;;) {
        ret = is->viddec().get_video_frame(is, frame);
        if (ret < 0)
            goto the_end;
        if (!ret)
            continue;

#if CONFIG_AVFILTER
        if (   last_w != frame->width
            || last_h != frame->height
            || last_format != frame->format
            || last_serial != is->viddec().pkt_serial
            || last_vfilter_idx != is->vfilter_idx) {
            av_log(NULL, AV_LOG_DEBUG,
                   "Video frame changed from size:%dx%d format:%s serial:%d to size:%dx%d format:%s serial:%d\n",
                   last_w, last_h,
                   (const char *)av_x_if_null(av_get_pix_fmt_name(last_format), "none"), last_serial,
                   frame->width, frame->height,
                   (const char *)av_x_if_null(av_get_pix_fmt_name((AVPixelFormat)frame->format), "none"), is->viddec().pkt_serial);
            avfilter_graph_free(&graph);
            graph = avfilter_graph_alloc();
            if ((ret = configure_video_filters(graph, is,gOptions.vfilters_list ? gOptions.vfilters_list[is->vfilter_idx] : NULL, frame)) < 0) {
                SDL_Event event;
                event.type = FF_QUIT_EVENT;
                event.user.data1 = is;
                SDL_PushEvent(&event);
                goto the_end;
            }
            filt_in  = is->in_video_filter;
            filt_out = is->out_video_filter;
            last_w = frame->width;
            last_h = frame->height;
            last_format = (AVPixelFormat) frame->format;
            last_serial = is->viddec().pkt_serial;
            last_vfilter_idx = is->vfilter_idx;
            frame_rate = filt_out->inputs[0]->frame_rate;
        }

        ret = av_buffersrc_add_frame(filt_in, frame);
        if (ret < 0)
            goto the_end;

        while (ret >= 0) {
            is->frame_last_returned_time = av_gettime_relative() / 1000000.0;

            ret = av_buffersink_get_frame_flags(filt_out, frame, 0);
            if (ret < 0) {
                if (ret == AVERROR_EOF)
                    is->viddec().finished = is->viddec().pkt_serial;
                ret = 0;
                break;
            }

            is->frame_last_filter_delay = av_gettime_relative() / 1000000.0 - is->frame_last_returned_time;
            if (fabs(is->frame_last_filter_delay) > AV_NOSYNC_THRESHOLD / 10.0)
                is->frame_last_filter_delay = 0;
            tb = filt_out->inputs[0]->time_base;
#endif
            duration = (frame_rate.num && frame_rate.den ? av_q2d((AVRational){frame_rate.den, frame_rate.num}) : 0);
            pts = (frame->pts == AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(tb);
            ret = queue_picture(is, frame, pts, duration, av_frame_get_pkt_pos(frame), is->viddec().pkt_serial);
            av_frame_unref(frame);
#if CONFIG_AVFILTER
        }
#endif

        if (ret < 0)
            goto the_end;
    }
 the_end:
#if CONFIG_AVFILTER
    avfilter_graph_free(&graph);
#endif
    av_frame_free(&frame);
    return 0;
}
コード例 #7
0
ファイル: ffdecoder.cpp プロジェクト: zhenghuadai/FFmpeg
int AudioDecoder::audio_thread(void *arg)
{
    VideoState *is = (VideoState *) arg;
    AVStreamsParser* ps = is->getAVStreamsParser();
    AVFrame *frame = av_frame_alloc();
    Frame *af;
#if CONFIG_AVFILTER
    int last_serial = -1;
    int64_t dec_channel_layout;
    int reconfigure;
#endif
    int got_frame = 0;
    AVRational tb;
    int ret = 0;

    if (!frame)
        return AVERROR(ENOMEM);

    do {
        if ((got_frame = is->auddec().decode_frame(frame)) < 0)
            goto the_end;

        if (got_frame) {
                tb = (AVRational){1, frame->sample_rate};

#if CONFIG_AVFILTER
                dec_channel_layout = get_valid_channel_layout(frame->channel_layout, av_frame_get_channels(frame));

                reconfigure =
                    cmp_audio_fmts(is->audio_filter_src.fmt, is->audio_filter_src.channels,
                            (AVSampleFormat)frame->format, av_frame_get_channels(frame))    ||
                    is->audio_filter_src.channel_layout != dec_channel_layout ||
                    is->audio_filter_src.freq           != frame->sample_rate ||
                    is->auddec().pkt_serial               != last_serial;

                if (reconfigure) {
                    char buf1[1024], buf2[1024];
                    av_get_channel_layout_string(buf1, sizeof(buf1), -1, is->audio_filter_src.channel_layout);
                    av_get_channel_layout_string(buf2, sizeof(buf2), -1, dec_channel_layout);
                    av_log(NULL, AV_LOG_DEBUG,
                           "Audio frame changed from rate:%d ch:%d fmt:%s layout:%s serial:%d to rate:%d ch:%d fmt:%s layout:%s serial:%d\n",
                           is->audio_filter_src.freq, is->audio_filter_src.channels, av_get_sample_fmt_name(is->audio_filter_src.fmt), buf1, last_serial,
                           frame->sample_rate, av_frame_get_channels(frame), av_get_sample_fmt_name((AVSampleFormat)frame->format), buf2, is->auddec().pkt_serial);

                    is->audio_filter_src.fmt            = (AVSampleFormat)frame->format;
                    is->audio_filter_src.channels       = av_frame_get_channels(frame);
                    is->audio_filter_src.channel_layout = dec_channel_layout;
                    is->audio_filter_src.freq           = frame->sample_rate;
                    last_serial                         = is->auddec().pkt_serial;

                    if ((ret = configure_audio_filters(is,gOptions. afilters, 1)) < 0)
                        goto the_end;
                }

            if ((ret = av_buffersrc_add_frame(is->in_audio_filter, frame)) < 0)
                goto the_end;

            while ((ret = av_buffersink_get_frame_flags(is->out_audio_filter, frame, 0)) >= 0) {
                tb = is->out_audio_filter->inputs[0]->time_base;
#endif
                if (!(af = is->sampq().peek_writable()))
                    goto the_end;

                af->pts = (frame->pts == AV_NOPTS_VALUE) ? NAN : frame->pts * av_q2d(tb);
                af->pos = av_frame_get_pkt_pos(frame);
                af->serial = is->auddec().pkt_serial;
                af->duration = av_q2d((AVRational){frame->nb_samples, frame->sample_rate});

                av_frame_move_ref(af->frame, frame);
                is->sampq().push();

#if CONFIG_AVFILTER
                if (ps->audioq.serial != is->auddec().pkt_serial)
                    break;
            }
            if (ret == AVERROR_EOF)
                is->auddec().finished = is->auddec().pkt_serial;
#endif
        }
    } while (ret >= 0 || ret == AVERROR(EAGAIN) || ret == AVERROR_EOF);
 the_end:
#if CONFIG_AVFILTER
    avfilter_graph_free(&is->agraph);
#endif
    av_frame_free(&frame);
    return ret;
}
コード例 #8
0
ファイル: uncoded_frame.c プロジェクト: 0day-ci/FFmpeg
int main(int argc, char **argv)
{
    char *in_graph_desc, **out_dev_name;
    int nb_out_dev = 0, nb_streams = 0;
    AVFilterGraph *in_graph = NULL;
    Stream *streams = NULL, *st;
    AVFrame *frame = NULL;
    int i, j, run = 1, ret;

    //av_log_set_level(AV_LOG_DEBUG);

    if (argc < 3) {
        av_log(NULL, AV_LOG_ERROR,
               "Usage: %s filter_graph dev:out [dev2:out2...]\n\n"
               "Examples:\n"
               "%s movie=file.nut:s=v+a xv:- alsa:default\n"
               "%s movie=file.nut:s=v+a uncodedframecrc:pipe:0\n",
               argv[0], argv[0], argv[0]);
        exit(1);
    }
    in_graph_desc = argv[1];
    out_dev_name = argv + 2;
    nb_out_dev = argc - 2;

    av_register_all();
    avdevice_register_all();
    avfilter_register_all();

    /* Create input graph */
    if (!(in_graph = avfilter_graph_alloc())) {
        ret = AVERROR(ENOMEM);
        av_log(NULL, AV_LOG_ERROR, "Unable to alloc graph graph: %s\n",
               av_err2str(ret));
        goto fail;
    }
    ret = avfilter_graph_parse_ptr(in_graph, in_graph_desc, NULL, NULL, NULL);
    if (ret < 0) {
        av_log(NULL, AV_LOG_ERROR, "Unable to parse graph: %s\n",
               av_err2str(ret));
        goto fail;
    }
    nb_streams = 0;
    for (i = 0; i < in_graph->nb_filters; i++) {
        AVFilterContext *f = in_graph->filters[i];
        for (j = 0; j < f->nb_inputs; j++) {
            if (!f->inputs[j]) {
                av_log(NULL, AV_LOG_ERROR, "Graph has unconnected inputs\n");
                ret = AVERROR(EINVAL);
                goto fail;
            }
        }
        for (j = 0; j < f->nb_outputs; j++)
            if (!f->outputs[j])
                nb_streams++;
    }
    if (!nb_streams) {
        av_log(NULL, AV_LOG_ERROR, "Graph has no output stream\n");
        ret = AVERROR(EINVAL);
        goto fail;
    }
    if (nb_out_dev != 1 && nb_out_dev != nb_streams) {
        av_log(NULL, AV_LOG_ERROR,
               "Graph has %d output streams, %d devices given\n",
               nb_streams, nb_out_dev);
        ret = AVERROR(EINVAL);
        goto fail;
    }

    if (!(streams = av_calloc(nb_streams, sizeof(*streams)))) {
        ret = AVERROR(ENOMEM);
        av_log(NULL, AV_LOG_ERROR, "Could not allocate streams\n");
    }
    st = streams;
    for (i = 0; i < in_graph->nb_filters; i++) {
        AVFilterContext *f = in_graph->filters[i];
        for (j = 0; j < f->nb_outputs; j++) {
            if (!f->outputs[j]) {
                if ((ret = create_sink(st++, in_graph, f, j)) < 0)
                    goto fail;
            }
        }
    }
    av_assert0(st - streams == nb_streams);
    if ((ret = avfilter_graph_config(in_graph, NULL)) < 0) {
        av_log(NULL, AV_LOG_ERROR, "Failed to configure graph\n");
        goto fail;
    }

    /* Create output devices */
    for (i = 0; i < nb_out_dev; i++) {
        char *fmt = NULL, *dev = out_dev_name[i];
        st = &streams[i];
        if ((dev = strchr(dev, ':'))) {
            *(dev++) = 0;
            fmt = out_dev_name[i];
        }
        ret = avformat_alloc_output_context2(&st->mux, NULL, fmt, dev);
        if (ret < 0) {
            av_log(NULL, AV_LOG_ERROR, "Failed to allocate output: %s\n",
                   av_err2str(ret));
            goto fail;
        }
        if (!(st->mux->oformat->flags & AVFMT_NOFILE)) {
            ret = avio_open2(&st->mux->pb, st->mux->filename, AVIO_FLAG_WRITE,
                             NULL, NULL);
            if (ret < 0) {
                av_log(st->mux, AV_LOG_ERROR, "Failed to init output: %s\n",
                       av_err2str(ret));
                goto fail;
            }
        }
    }
    for (; i < nb_streams; i++)
        streams[i].mux = streams[0].mux;

    /* Create output device streams */
    for (i = 0; i < nb_streams; i++) {
        st = &streams[i];
        if (!(st->stream = avformat_new_stream(st->mux, NULL))) {
            ret = AVERROR(ENOMEM);
            av_log(NULL, AV_LOG_ERROR, "Failed to create output stream\n");
            goto fail;
        }
        st->stream->codec->codec_type = st->link->type;
        st->stream->time_base = st->stream->codec->time_base =
            st->link->time_base;
        switch (st->link->type) {
        case AVMEDIA_TYPE_VIDEO:
            st->stream->codec->codec_id = AV_CODEC_ID_RAWVIDEO;
            st->stream->avg_frame_rate =
            st->stream->  r_frame_rate = av_buffersink_get_frame_rate(st->sink);
            st->stream->codec->width               = st->link->w;
            st->stream->codec->height              = st->link->h;
            st->stream->codec->sample_aspect_ratio = st->link->sample_aspect_ratio;
            st->stream->codec->pix_fmt             = st->link->format;
            break;
        case AVMEDIA_TYPE_AUDIO:
            st->stream->codec->channel_layout = st->link->channel_layout;
            st->stream->codec->channels = avfilter_link_get_channels(st->link);
            st->stream->codec->sample_rate = st->link->sample_rate;
            st->stream->codec->sample_fmt = st->link->format;
            st->stream->codec->codec_id =
                av_get_pcm_codec(st->stream->codec->sample_fmt, -1);
            break;
        default:
            av_assert0(!"reached");
        }
    }

    /* Init output devices */
    for (i = 0; i < nb_out_dev; i++) {
        st = &streams[i];
        if ((ret = avformat_write_header(st->mux, NULL)) < 0) {
            av_log(st->mux, AV_LOG_ERROR, "Failed to init output: %s\n",
                   av_err2str(ret));
            goto fail;
        }
    }

    /* Check output devices */
    for (i = 0; i < nb_streams; i++) {
        st = &streams[i];
        ret = av_write_uncoded_frame_query(st->mux, st->stream->index);
        if (ret < 0) {
            av_log(st->mux, AV_LOG_ERROR,
                   "Uncoded frames not supported on stream #%d: %s\n",
                   i, av_err2str(ret));
            goto fail;
        }
    }

    while (run) {
        ret = avfilter_graph_request_oldest(in_graph);
        if (ret < 0) {
            if (ret == AVERROR_EOF) {
                run = 0;
            } else {
                av_log(NULL, AV_LOG_ERROR, "Error filtering: %s\n",
                       av_err2str(ret));
                break;
            }
        }
        for (i = 0; i < nb_streams; i++) {
            st = &streams[i];
            while (1) {
                if (!frame && !(frame = av_frame_alloc())) {
                    ret = AVERROR(ENOMEM);
                    av_log(NULL, AV_LOG_ERROR, "Could not allocate frame\n");
                    goto fail;
                }
                ret = av_buffersink_get_frame_flags(st->sink, frame,
                                                    AV_BUFFERSINK_FLAG_NO_REQUEST);
                if (ret < 0) {
                    if (ret != AVERROR(EAGAIN) && ret != AVERROR_EOF)
                        av_log(NULL, AV_LOG_WARNING, "Error in sink: %s\n",
                               av_err2str(ret));
                    break;
                }
                if (frame->pts != AV_NOPTS_VALUE)
                    frame->pts = av_rescale_q(frame->pts,
                                              st->link  ->time_base,
                                              st->stream->time_base);
                ret = av_interleaved_write_uncoded_frame(st->mux,
                                                         st->stream->index,
                                                         frame);
                frame = NULL;
                if (ret < 0) {
                    av_log(st->stream->codec, AV_LOG_ERROR,
                           "Error writing frame: %s\n", av_err2str(ret));
                    goto fail;
                }
            }
        }
    }
    ret = 0;

    for (i = 0; i < nb_out_dev; i++) {
        st = &streams[i];
        av_write_trailer(st->mux);
    }

fail:
    av_frame_free(&frame);
    avfilter_graph_free(&in_graph);
    if (streams) {
        for (i = 0; i < nb_out_dev; i++) {
            st = &streams[i];
            if (st->mux) {
                if (st->mux->pb)
                    avio_closep(&st->mux->pb);
                avformat_free_context(st->mux);
            }
        }
    }
    av_freep(&streams);
    return ret < 0;
}
コード例 #9
0
ファイル: f_lavfi.c プロジェクト: Akemi/mpv
static bool read_output_pads(struct lavfi *c)
{
    bool progress = false;

    assert(c->initialized);

    for (int n = 0; n < c->num_out_pads; n++) {
        struct lavfi_pad *pad = c->out_pads[n];

        if (!mp_pin_in_needs_data(pad->pin))
            continue;

        assert(pad->buffer);

        int r = AVERROR_EOF;
        if (!pad->buffer_is_eof)
            r = av_buffersink_get_frame_flags(pad->buffer, c->tmp_frame, 0);
        if (r >= 0) {
#if LIBAVUTIL_VERSION_MICRO >= 100
            mp_tags_copy_from_av_dictionary(pad->metadata, c->tmp_frame->metadata);
#endif
            struct mp_frame frame =
                mp_frame_from_av(pad->type, c->tmp_frame, &pad->timebase);
            if (c->emulate_audio_pts && frame.type == MP_FRAME_AUDIO) {
                AVFrame *avframe = c->tmp_frame;
                struct mp_aframe *aframe = frame.data;
                double in_time = c->in_samples * av_q2d(c->in_pads[0]->timebase);
                double out_time = avframe->pts * av_q2d(pad->timebase);
                mp_aframe_set_pts(aframe, c->in_pts +
                    (c->in_pts != MP_NOPTS_VALUE ? (out_time - in_time) : 0));
            }
            if (frame.type == MP_FRAME_VIDEO) {
                struct mp_image *vframe = frame.data;
                vframe->nominal_fps =
                    av_q2d(av_buffersink_get_frame_rate(pad->buffer));
            }
            av_frame_unref(c->tmp_frame);
            if (frame.type) {
                mp_pin_in_write(pad->pin, frame);
            } else {
                MP_ERR(c, "could not use filter output\n");
                mp_frame_unref(&frame);
            }
            progress = true;
        } else if (r == AVERROR(EAGAIN)) {
            // We expect that libavfilter will request input on one of the
            // input pads (via av_buffersrc_get_nb_failed_requests()).
        } else if (r == AVERROR_EOF) {
            if (!c->draining_recover && !pad->buffer_is_eof)
                mp_pin_in_write(pad->pin, MP_EOF_FRAME);
            if (!pad->buffer_is_eof)
                progress = true;
            pad->buffer_is_eof = true;
        } else {
            // Real error - ignore it.
            MP_ERR(c, "error on filtering (%d)\n", r);
        }
    }

    return progress;
}
コード例 #10
0
int attribute_align_arg av_buffersink_get_frame(AVFilterContext *ctx, AVFrame *frame)
{
    return av_buffersink_get_frame_flags(ctx, frame, 0);
}
コード例 #11
0
int main(int argc, char* argv[])
{
    AVFormatContext *ifmt_ctx = NULL;
    AVFormatContext *ifmt_ctx_a = NULL;
    AVFormatContext *ofmt_ctx;
    AVInputFormat* ifmt;
    AVStream* video_st;
    AVStream* audio_st;
    AVCodecContext* pCodecCtx;
    AVCodecContext* pCodecCtx_a;
    AVCodec* pCodec;
    AVCodec* pCodec_a;
    AVPacket *dec_pkt, enc_pkt;
    AVPacket *dec_pkt_a, enc_pkt_a;
    AVFrame *pframe, *pFrameYUV;
    struct SwsContext *img_convert_ctx;
    struct SwrContext *aud_convert_ctx;

    char capture_name[80] = { 0 };
	char device_name[80] = { 0 };
	char device_name_a[80] = { 0 };
    int framecnt = 0;
	int nb_samples = 0;
    int videoindex;
    int audioindex;
    int i;
    int ret;
    HANDLE  hThread;

	const char* out_path = "rtmp://localhost/live/livestream";
    int dec_got_frame, enc_got_frame;
	int dec_got_frame_a, enc_got_frame_a;

	int aud_next_pts = 0;
	int vid_next_pts = 0;
	int encode_video = 1, encode_audio = 1;

	AVRational time_base_q = { 1, AV_TIME_BASE };

    av_register_all();
    //Register Device
    avdevice_register_all();
    avformat_network_init();
#if USEFILTER
    //Register Filter
    avfilter_register_all();
    buffersrc = avfilter_get_by_name("buffer");
    buffersink = avfilter_get_by_name("buffersink");
#endif

    //Show Dshow Device  
    show_dshow_device();

    printf("\nChoose video capture device: ");
    if (gets(capture_name) == 0)
    {
		printf("Error in gets()\n");
		return -1;
    }
    sprintf(device_name, "video=%s", capture_name);

	printf("\nChoose audio capture device: ");
	if (gets(capture_name) == 0)
	{
		printf("Error in gets()\n");
		return -1;
	}
	sprintf(device_name_a, "audio=%s", capture_name);

    //wchar_t *cam = L"video=Integrated Camera";
	//wchar_t *cam = L"video=YY伴侣";
	//char *device_name_utf8 = dup_wchar_to_utf8(cam);
    //wchar_t *cam_a = L"audio=麦克风阵列 (Realtek High Definition Audio)";
	//char *device_name_utf8_a = dup_wchar_to_utf8(cam_a);

	ifmt = av_find_input_format("dshow");
    // Set device params
    AVDictionary *device_param = 0;
	//if not setting rtbufsize, error messages will be shown in cmd, but you can still watch or record the stream correctly in most time
	//setting rtbufsize will erase those error messages, however, larger rtbufsize will bring latency
    //av_dict_set(&device_param, "rtbufsize", "10M", 0);

    //Set own video device's name
	if (avformat_open_input(&ifmt_ctx, device_name, ifmt, &device_param) != 0){

        printf("Couldn't open input video stream.(无法打开输入流)\n");
        return -1;
    }
	//Set own audio device's name
	if (avformat_open_input(&ifmt_ctx_a, device_name_a, ifmt, &device_param) != 0){

        printf("Couldn't open input audio stream.(无法打开输入流)\n");
        return -1;
    }
    //input video initialize
    if (avformat_find_stream_info(ifmt_ctx, NULL) < 0)
    {
        printf("Couldn't find video stream information.(无法获取流信息)\n");
        return -1;
    }
    videoindex = -1;
    for (i = 0; i < ifmt_ctx->nb_streams; i++)
    if (ifmt_ctx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO)
    {
        videoindex = i;
        break;
    }
    if (videoindex == -1)
    {
        printf("Couldn't find a video stream.(没有找到视频流)\n");
        return -1;
    }
    if (avcodec_open2(ifmt_ctx->streams[videoindex]->codec, avcodec_find_decoder(ifmt_ctx->streams[videoindex]->codec->codec_id), NULL) < 0)
    {
        printf("Could not open video codec.(无法打开解码器)\n");
        return -1;
    }
    //input audio initialize
    if (avformat_find_stream_info(ifmt_ctx_a, NULL) < 0)
    {
        printf("Couldn't find audio stream information.(无法获取流信息)\n");
        return -1;
    }
    audioindex = -1;
    for (i = 0; i < ifmt_ctx_a->nb_streams; i++)
    if (ifmt_ctx_a->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO)
    {
        audioindex = i;
        break;
    }
    if (audioindex == -1)
    {
        printf("Couldn't find a audio stream.(没有找到视频流)\n");
        return -1;
	}
    if (avcodec_open2(ifmt_ctx_a->streams[audioindex]->codec, avcodec_find_decoder(ifmt_ctx_a->streams[audioindex]->codec->codec_id), NULL) < 0)
    {
        printf("Could not open audio codec.(无法打开解码器)\n");
        return -1;
    }

    //output initialize
    avformat_alloc_output_context2(&ofmt_ctx, NULL, "flv", out_path);
    //output video encoder initialize
    pCodec = avcodec_find_encoder(AV_CODEC_ID_H264);
    if (!pCodec){
        printf("Can not find output video encoder! (没有找到合适的编码器!)\n");
        return -1;
    }
    pCodecCtx = avcodec_alloc_context3(pCodec);
    pCodecCtx->pix_fmt = PIX_FMT_YUV420P;
    pCodecCtx->width = ifmt_ctx->streams[videoindex]->codec->width;
    pCodecCtx->height = ifmt_ctx->streams[videoindex]->codec->height;
    pCodecCtx->time_base.num = 1;
    pCodecCtx->time_base.den = 25;
    pCodecCtx->bit_rate = 300000;
    pCodecCtx->gop_size = 250;
    /* Some formats want stream headers to be separate. */
    if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
        pCodecCtx->flags |= CODEC_FLAG_GLOBAL_HEADER;

    //H264 codec param
    //pCodecCtx->me_range = 16;
    //pCodecCtx->max_qdiff = 4;
    //pCodecCtx->qcompress = 0.6;
    pCodecCtx->qmin = 10;
    pCodecCtx->qmax = 51;
    //Optional Param
    pCodecCtx->max_b_frames = 0;
    // Set H264 preset and tune
    AVDictionary *param = 0;
    av_dict_set(&param, "preset", "fast", 0);
    av_dict_set(&param, "tune", "zerolatency", 0);

    if (avcodec_open2(pCodecCtx, pCodec, &param) < 0){
        printf("Failed to open output video encoder! (编码器打开失败!)\n");
        return -1;
    }

    //Add a new stream to output,should be called by the user before avformat_write_header() for muxing
    video_st = avformat_new_stream(ofmt_ctx, pCodec);
    if (video_st == NULL){
        return -1;
    }
    video_st->time_base.num = 1;
    video_st->time_base.den = 25;
    video_st->codec = pCodecCtx;


    //output audio encoder initialize
    pCodec_a = avcodec_find_encoder(AV_CODEC_ID_AAC);
    if (!pCodec_a){
        printf("Can not find output audio encoder! (没有找到合适的编码器!)\n");
        return -1;
    }
    pCodecCtx_a = avcodec_alloc_context3(pCodec_a);
    pCodecCtx_a->channels = 2;
    pCodecCtx_a->channel_layout = av_get_default_channel_layout(2);
	pCodecCtx_a->sample_rate = ifmt_ctx_a->streams[audioindex]->codec->sample_rate;
    pCodecCtx_a->sample_fmt = pCodec_a->sample_fmts[0];
    pCodecCtx_a->bit_rate = 32000;
    pCodecCtx_a->time_base.num = 1;
	pCodecCtx_a->time_base.den = pCodecCtx_a->sample_rate;
    /** Allow the use of the experimental AAC encoder */
    pCodecCtx_a->strict_std_compliance = FF_COMPLIANCE_EXPERIMENTAL;
    /* Some formats want stream headers to be separate. */
    if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
        pCodecCtx_a->flags |= CODEC_FLAG_GLOBAL_HEADER;
    if (avcodec_open2(pCodecCtx_a, pCodec_a, NULL) < 0){
        printf("Failed to open ouput audio encoder! (编码器打开失败!)\n");
        return -1;
    }

    //Add a new stream to output,should be called by the user before avformat_write_header() for muxing
    audio_st = avformat_new_stream(ofmt_ctx, pCodec_a);
    if (audio_st == NULL){
        return -1;
    }
    audio_st->time_base.num = 1;
	audio_st->time_base.den = pCodecCtx_a->sample_rate;
    audio_st->codec = pCodecCtx_a;

    //Open output URL,set before avformat_write_header() for muxing
    if (avio_open(&ofmt_ctx->pb, out_path, AVIO_FLAG_READ_WRITE) < 0){
        printf("Failed to open output file! (输出文件打开失败!)\n");
        return -1;
    }

    //Show some Information
    av_dump_format(ofmt_ctx, 0, out_path, 1);

    //Write File Header
    avformat_write_header(ofmt_ctx, NULL);

    //prepare before decode and encode
    dec_pkt = (AVPacket *)av_malloc(sizeof(AVPacket));

#if USEFILTER
#else
	//camera data may has a pix fmt of RGB or sth else,convert it to YUV420
    img_convert_ctx = sws_getContext(ifmt_ctx->streams[videoindex]->codec->width, ifmt_ctx->streams[videoindex]->codec->height,
        ifmt_ctx->streams[videoindex]->codec->pix_fmt, pCodecCtx->width, pCodecCtx->height, PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL);
    
	// Initialize the resampler to be able to convert audio sample formats
	aud_convert_ctx = swr_alloc_set_opts(NULL,
		av_get_default_channel_layout(pCodecCtx_a->channels),
		pCodecCtx_a->sample_fmt,
		pCodecCtx_a->sample_rate,
		av_get_default_channel_layout(ifmt_ctx_a->streams[audioindex]->codec->channels),
		ifmt_ctx_a->streams[audioindex]->codec->sample_fmt,
		ifmt_ctx_a->streams[audioindex]->codec->sample_rate,
		0, NULL);
	
	/**
	* Perform a sanity check so that the number of converted samples is
	* not greater than the number of samples to be converted.
	* If the sample rates differ, this case has to be handled differently
	*/
	//av_assert0(pCodecCtx_a->sample_rate == ifmt_ctx_a->streams[audioindex]->codec->sample_rate);

	swr_init(aud_convert_ctx);

    
#endif
    //Initialize the buffer to store YUV frames to be encoded.
	pFrameYUV = av_frame_alloc();
    uint8_t *out_buffer = (uint8_t *)av_malloc(avpicture_get_size(PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height));
    avpicture_fill((AVPicture *)pFrameYUV, out_buffer, PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height);

	//Initialize the FIFO buffer to store audio samples to be encoded. 
    AVAudioFifo *fifo = NULL;
	fifo = av_audio_fifo_alloc(pCodecCtx_a->sample_fmt, pCodecCtx_a->channels, 1);

	//Initialize the buffer to store converted samples to be encoded.
	uint8_t **converted_input_samples = NULL;
	/**
	* Allocate as many pointers as there are audio channels.
	* Each pointer will later point to the audio samples of the corresponding
	* channels (although it may be NULL for interleaved formats).
	*/
	if (!(converted_input_samples = (uint8_t**)calloc(pCodecCtx_a->channels,
		sizeof(**converted_input_samples)))) {
		printf("Could not allocate converted input sample pointers\n");
		return AVERROR(ENOMEM);
	}


    printf("\n --------call started----------\n");
#if USEFILTER
    printf("\n Press differnet number for different filters:");
    printf("\n 1->Mirror");
    printf("\n 2->Add Watermark");
    printf("\n 3->Negate");
    printf("\n 4->Draw Edge");
    printf("\n 5->Split Into 4");
    printf("\n 6->Vintage");
    printf("\n Press 0 to remove filter\n");
#endif
    printf("\nPress enter to stop...\n");
    hThread = CreateThread(
        NULL,                   // default security attributes
        0,                      // use default stack size  
        MyThreadFunction,       // thread function name
        NULL,          // argument to thread function 
        0,                      // use default creation flags 
        NULL);   // returns the thread identifier 

    //start decode and encode
    int64_t start_time = av_gettime();
    while (encode_video || encode_audio)
    {
        if (encode_video &&
			(!encode_audio || av_compare_ts(vid_next_pts, time_base_q,
			aud_next_pts, time_base_q) <= 0))
        {
            if ((ret=av_read_frame(ifmt_ctx, dec_pkt)) >= 0){

                if (exit_thread)
                    break;

                av_log(NULL, AV_LOG_DEBUG, "Going to reencode the frame\n");
                pframe = av_frame_alloc();
                if (!pframe) {
                    ret = AVERROR(ENOMEM);
                    return ret;
                }
                ret = avcodec_decode_video2(ifmt_ctx->streams[dec_pkt->stream_index]->codec, pframe,
                    &dec_got_frame, dec_pkt);
                if (ret < 0) {
                    av_frame_free(&pframe);
                    av_log(NULL, AV_LOG_ERROR, "Decoding failed\n");
                    break;
                }
                if (dec_got_frame){
#if USEFILTER
                    pframe->pts = av_frame_get_best_effort_timestamp(pframe);

                    if (filter_change)
                        apply_filters(ifmt_ctx);
                    filter_change = 0;
                    /* push the decoded frame into the filtergraph */
                    if (av_buffersrc_add_frame(buffersrc_ctx, pframe) < 0) {
                        printf("Error while feeding the filtergraph\n");
                        break;
                    }
                    picref = av_frame_alloc();

                    /* pull filtered pictures from the filtergraph */
                    while (1) {
                        ret = av_buffersink_get_frame_flags(buffersink_ctx, picref, 0);
                        if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
                            break;
                        if (ret < 0)
                            return ret;

                        if (picref) {
                            img_convert_ctx = sws_getContext(picref->width, picref->height, (AVPixelFormat)picref->format, pCodecCtx->width, pCodecCtx->height, AV_PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL);
                            sws_scale(img_convert_ctx, (const uint8_t* const*)picref->data, picref->linesize, 0, pCodecCtx->height, pFrameYUV->data, pFrameYUV->linesize);
                            sws_freeContext(img_convert_ctx);
                            pFrameYUV->width = picref->width;
                            pFrameYUV->height = picref->height;
                            pFrameYUV->format = PIX_FMT_YUV420P;
#else
                    sws_scale(img_convert_ctx, (const uint8_t* const*)pframe->data, pframe->linesize, 0, pCodecCtx->height, pFrameYUV->data, pFrameYUV->linesize);
                    pFrameYUV->width = pframe->width;
                    pFrameYUV->height = pframe->height;
                    pFrameYUV->format = PIX_FMT_YUV420P;
#endif					
                    enc_pkt.data = NULL;
                    enc_pkt.size = 0;
                    av_init_packet(&enc_pkt);
                    ret = avcodec_encode_video2(pCodecCtx, &enc_pkt, pFrameYUV, &enc_got_frame);
                    av_frame_free(&pframe);
                    if (enc_got_frame == 1){
                        //printf("Succeed to encode frame: %5d\tsize:%5d\n", framecnt, enc_pkt.size);
                        framecnt++;
                        enc_pkt.stream_index = video_st->index;						

                        //Write PTS
						AVRational time_base = ofmt_ctx->streams[0]->time_base;//{ 1, 1000 };
                        AVRational r_framerate1 = ifmt_ctx->streams[videoindex]->r_frame_rate;//{ 50, 2 }; 
                        //Duration between 2 frames (us)
                        int64_t calc_duration = (double)(AV_TIME_BASE)*(1 / av_q2d(r_framerate1));	//内部时间戳
                        //Parameters
                        //enc_pkt.pts = (double)(framecnt*calc_duration)*(double)(av_q2d(time_base_q)) / (double)(av_q2d(time_base));
                        enc_pkt.pts = av_rescale_q(framecnt*calc_duration, time_base_q, time_base);
                        enc_pkt.dts = enc_pkt.pts;
                        enc_pkt.duration = av_rescale_q(calc_duration, time_base_q, time_base); //(double)(calc_duration)*(double)(av_q2d(time_base_q)) / (double)(av_q2d(time_base));
                        enc_pkt.pos = -1;
                        //printf("video pts : %d\n", enc_pkt.pts);

						vid_next_pts=framecnt*calc_duration; //general timebase

                        //Delay
						int64_t pts_time = av_rescale_q(enc_pkt.pts, time_base, time_base_q);
						int64_t now_time = av_gettime() - start_time;						
						if ((pts_time > now_time) && ((vid_next_pts + pts_time - now_time)<aud_next_pts))
							av_usleep(pts_time - now_time);
						
                        ret = av_interleaved_write_frame(ofmt_ctx, &enc_pkt);
                        av_free_packet(&enc_pkt);
                    }
#if USEFILTER
                    av_frame_unref(picref);
                }
            }
#endif
        }
        else {
            av_frame_free(&pframe);
        }
        av_free_packet(dec_pkt);
    }
    else
		if (ret == AVERROR_EOF)
			encode_video = 0;
		else
		{
			printf("Could not read video frame\n");
			return ret;
		}
    }
    else
    {
        //audio trancoding here
        const int output_frame_size = pCodecCtx_a->frame_size;

		if (exit_thread)
			break;

        /**
        * Make sure that there is one frame worth of samples in the FIFO
        * buffer so that the encoder can do its work.
        * Since the decoder's and the encoder's frame size may differ, we
        * need to FIFO buffer to store as many frames worth of input samples
        * that they make up at least one frame worth of output samples.
        */
        while (av_audio_fifo_size(fifo) < output_frame_size) {
            /**
            * Decode one frame worth of audio samples, convert it to the
            * output sample format and put it into the FIFO buffer.
            */
			AVFrame *input_frame = av_frame_alloc();
			if (!input_frame)
			{
				ret = AVERROR(ENOMEM);
				return ret;
			}			
			
			/** Decode one frame worth of audio samples. */
			/** Packet used for temporary storage. */
			AVPacket input_packet;
			av_init_packet(&input_packet);
			input_packet.data = NULL;
			input_packet.size = 0;
			
			/** Read one audio frame from the input file into a temporary packet. */
			if ((ret = av_read_frame(ifmt_ctx_a, &input_packet)) < 0) {
				/** If we are at the end of the file, flush the decoder below. */
				if (ret == AVERROR_EOF)
				{
					encode_audio = 0;
				}
				else
				{
					printf("Could not read audio frame\n");
					return ret;
				}					
			}

			/**
			* Decode the audio frame stored in the temporary packet.
			* The input audio stream decoder is used to do this.
			* If we are at the end of the file, pass an empty packet to the decoder
			* to flush it.
			*/
			if ((ret = avcodec_decode_audio4(ifmt_ctx_a->streams[audioindex]->codec, input_frame,
				&dec_got_frame_a, &input_packet)) < 0) {
				printf("Could not decode audio frame\n");
				return ret;
			}
			av_packet_unref(&input_packet);
			/** If there is decoded data, convert and store it */
			if (dec_got_frame_a) {
				/**
				* Allocate memory for the samples of all channels in one consecutive
				* block for convenience.
				*/
				if ((ret = av_samples_alloc(converted_input_samples, NULL,
					pCodecCtx_a->channels,
					input_frame->nb_samples,
					pCodecCtx_a->sample_fmt, 0)) < 0) {
					printf("Could not allocate converted input samples\n");
					av_freep(&(*converted_input_samples)[0]);
					free(*converted_input_samples);
					return ret;
				}

				/**
				* Convert the input samples to the desired output sample format.
				* This requires a temporary storage provided by converted_input_samples.
				*/
				/** Convert the samples using the resampler. */
				if ((ret = swr_convert(aud_convert_ctx,
					converted_input_samples, input_frame->nb_samples,
					(const uint8_t**)input_frame->extended_data, input_frame->nb_samples)) < 0) {
					printf("Could not convert input samples\n");
					return ret;
				}

				/** Add the converted input samples to the FIFO buffer for later processing. */
				/**
				* Make the FIFO as large as it needs to be to hold both,
				* the old and the new samples.
				*/
				if ((ret = av_audio_fifo_realloc(fifo, av_audio_fifo_size(fifo) + input_frame->nb_samples)) < 0) {
					printf("Could not reallocate FIFO\n");
					return ret;
				}

				/** Store the new samples in the FIFO buffer. */
				if (av_audio_fifo_write(fifo, (void **)converted_input_samples,
					input_frame->nb_samples) < input_frame->nb_samples) {
					printf("Could not write data to FIFO\n");
					return AVERROR_EXIT;
				}				
			}
        }

        /**
        * If we have enough samples for the encoder, we encode them.
        * At the end of the file, we pass the remaining samples to
        * the encoder.
        */
        if (av_audio_fifo_size(fifo) >= output_frame_size)
            /**
            * Take one frame worth of audio samples from the FIFO buffer,
            * encode it and write it to the output file.
            */
        {
            /** Temporary storage of the output samples of the frame written to the file. */
			AVFrame *output_frame=av_frame_alloc();
			if (!output_frame)
			{
				ret = AVERROR(ENOMEM);
				return ret;
			}
			/**
			* Use the maximum number of possible samples per frame.
			* If there is less than the maximum possible frame size in the FIFO
			* buffer use this number. Otherwise, use the maximum possible frame size
			*/
			const int frame_size = FFMIN(av_audio_fifo_size(fifo),
				pCodecCtx_a->frame_size);
			
			/** Initialize temporary storage for one output frame. */
			/**
			* Set the frame's parameters, especially its size and format.
			* av_frame_get_buffer needs this to allocate memory for the
			* audio samples of the frame.
			* Default channel layouts based on the number of channels
			* are assumed for simplicity.
			*/
			output_frame->nb_samples = frame_size;
			output_frame->channel_layout = pCodecCtx_a->channel_layout;
			output_frame->format = pCodecCtx_a->sample_fmt;
			output_frame->sample_rate = pCodecCtx_a->sample_rate;

			/**
			* Allocate the samples of the created frame. This call will make
			* sure that the audio frame can hold as many samples as specified.
			*/
			if ((ret = av_frame_get_buffer(output_frame, 0)) < 0) {
				printf("Could not allocate output frame samples\n");
				av_frame_free(&output_frame);
				return ret;
			}
			
			/**
			* Read as many samples from the FIFO buffer as required to fill the frame.
			* The samples are stored in the frame temporarily.
			*/
			if (av_audio_fifo_read(fifo, (void **)output_frame->data, frame_size) < frame_size) {
				printf("Could not read data from FIFO\n");
				return AVERROR_EXIT;
			}

			/** Encode one frame worth of audio samples. */
			/** Packet used for temporary storage. */
			AVPacket output_packet;
			av_init_packet(&output_packet);
			output_packet.data = NULL;
			output_packet.size = 0;
			
			/** Set a timestamp based on the sample rate for the container. */
			if (output_frame) {
				nb_samples += output_frame->nb_samples;
			}

			/**
			* Encode the audio frame and store it in the temporary packet.
			* The output audio stream encoder is used to do this.
			*/
			if ((ret = avcodec_encode_audio2(pCodecCtx_a, &output_packet,
				output_frame, &enc_got_frame_a)) < 0) {
				printf("Could not encode frame\n");
				av_packet_unref(&output_packet);
				return ret;
			}

			/** Write one audio frame from the temporary packet to the output file. */
			if (enc_got_frame_a) {

				output_packet.stream_index = 1;

				AVRational time_base = ofmt_ctx->streams[1]->time_base;
				AVRational r_framerate1 = { ifmt_ctx_a->streams[audioindex]->codec->sample_rate, 1 };// { 44100, 1};  
				int64_t calc_duration = (double)(AV_TIME_BASE)*(1 / av_q2d(r_framerate1));  //内部时间戳  

				output_packet.pts = av_rescale_q(nb_samples*calc_duration, time_base_q, time_base);
				output_packet.dts = output_packet.pts;
				output_packet.duration = output_frame->nb_samples;

				//printf("audio pts : %d\n", output_packet.pts);
				aud_next_pts = nb_samples*calc_duration;

				int64_t pts_time = av_rescale_q(output_packet.pts, time_base, time_base_q);
				int64_t now_time = av_gettime() - start_time;
				if ((pts_time > now_time) && ((aud_next_pts + pts_time - now_time)<vid_next_pts))
					av_usleep(pts_time - now_time);

				if ((ret = av_interleaved_write_frame(ofmt_ctx, &output_packet)) < 0) {
					printf("Could not write frame\n");
					av_packet_unref(&output_packet);
					return ret;
				}

				av_packet_unref(&output_packet);
			}			
			av_frame_free(&output_frame);		
        }      
	}
  }


    //Flush Encoder
    ret = flush_encoder(ifmt_ctx, ofmt_ctx, 0, framecnt);
    if (ret < 0) {
        printf("Flushing encoder failed\n");
        return -1;
    }
	ret = flush_encoder_a(ifmt_ctx_a, ofmt_ctx, 1, nb_samples);
	if (ret < 0) {
		printf("Flushing encoder failed\n");
		return -1;
	}



    //Write file trailer
    av_write_trailer(ofmt_ctx);

cleanup:
    //Clean
#if USEFILTER
    if (filter_graph)
        avfilter_graph_free(&filter_graph);
#endif
    if (video_st)
        avcodec_close(video_st->codec);
    if (audio_st)
        avcodec_close(audio_st->codec);
    av_free(out_buffer);
	if (converted_input_samples) {
		av_freep(&converted_input_samples[0]);
		//free(converted_input_samples);
	}
	if (fifo)
		av_audio_fifo_free(fifo);
    avio_close(ofmt_ctx->pb);
    avformat_free_context(ifmt_ctx);
	avformat_free_context(ifmt_ctx_a);
    avformat_free_context(ofmt_ctx);
    CloseHandle(hThread);
    return 0;
}
コード例 #12
0
static vod_status_t
audio_filter_read_filter_sink(audio_filter_state_t* state)
{
	AVPacket output_packet;
	vod_status_t rc;
	int got_packet;
	int avrc;
#ifdef AUDIO_FILTER_DEBUG
	size_t data_size;
#endif // AUDIO_FILTER_DEBUG

	for (;;)
	{
		avrc = av_buffersink_get_frame_flags(state->sink.buffer_sink, state->filtered_frame, AV_BUFFERSINK_FLAG_NO_REQUEST);
		if (avrc == AVERROR(EAGAIN) || avrc == AVERROR_EOF)
		{
			break;
		}

		if (avrc < 0)
		{
			vod_log_error(VOD_LOG_ERR, state->request_context->log, 0,
				"audio_filter_read_filter_sink: av_buffersink_get_frame_flags failed %d", avrc);
			return VOD_UNEXPECTED;
		}

#ifdef AUDIO_FILTER_DEBUG
		data_size = av_samples_get_buffer_size(
			NULL,
			state->sink.encoder->channels,
			state->filtered_frame->nb_samples,
			state->sink.encoder->sample_fmt,
			1);
		audio_filter_append_debug_data("sink", "pcm", state->filtered_frame->data[0], data_size);
#endif // AUDIO_FILTER_DEBUG

		av_init_packet(&output_packet);
		output_packet.data = NULL; // packet data will be allocated by the encoder
		output_packet.size = 0;

		got_packet = 0;
		avrc = avcodec_encode_audio2(state->sink.encoder, &output_packet, state->filtered_frame, &got_packet);

		av_frame_unref(state->filtered_frame);

		if (avrc < 0)
		{
			vod_log_error(VOD_LOG_ERR, state->request_context->log, 0,
				"audio_filter_read_filter_sink: avcodec_encode_audio2 failed %d", avrc);
			return VOD_ALLOC_FAILED;
		}

		if (got_packet)
		{
			rc = audio_filter_write_frame(state, &output_packet);

			av_free_packet(&output_packet);

			if (rc != VOD_OK)
			{
				return rc;
			}
		}
	}

	return VOD_OK;
}
コード例 #13
0
vod_status_t 
audio_filter_process_frame(void* context, input_frame_t* frame, u_char* buffer)
{
	audio_filter_state_t* state = (audio_filter_state_t*)context;
	vod_status_t rc;
	AVPacket output_packet;
	AVPacket input_packet;
	int got_packet;
	int got_frame;
	int ret;
#ifdef AUDIO_FILTER_DEBUG
	size_t data_size;
#endif // AUDIO_FILTER_DEBUG
	
	if (frame == NULL)
	{
		return audio_filter_flush_encoder(state);
	}

#ifdef AUDIO_FILTER_DEBUG
	audio_filter_append_debug_data(AUDIO_FILTER_DEBUG_FILENAME_INPUT, buffer, frame->size);
#endif // AUDIO_FILTER_DEBUG
	
	vod_memzero(&input_packet, sizeof(input_packet));
	input_packet.data = buffer;
	input_packet.size = frame->size;
	input_packet.dts = state->dts;
	input_packet.pts = (state->dts + frame->pts_delay);
	input_packet.duration = frame->duration;
	input_packet.flags = AV_PKT_FLAG_KEY;
	state->dts += frame->duration;
	
	avcodec_get_frame_defaults(state->decoded_frame);

	got_frame = 0;
	ret = avcodec_decode_audio4(state->decoder, state->decoded_frame, &got_frame, &input_packet);
	if (ret < 0) 
	{
		vod_log_error(VOD_LOG_ERR, state->request_context->log, 0,
			"audio_filter_process_frame: avcodec_decode_audio4 failed %d", ret);
		return VOD_BAD_DATA;
	}

	if (!got_frame)
	{
		return VOD_OK;
	}

#ifdef AUDIO_FILTER_DEBUG
	data_size = av_samples_get_buffer_size(
		NULL, 
		state->decoder->channels,
		state->decoded_frame->nb_samples,
		state->decoder->sample_fmt, 
		1);
	audio_filter_append_debug_data(AUDIO_FILTER_DEBUG_FILENAME_DECODED, state->decoded_frame->data[0], data_size);
#endif // AUDIO_FILTER_DEBUG
	
	ret = av_buffersrc_add_frame_flags(state->buffer_src, state->decoded_frame, AV_BUFFERSRC_FLAG_PUSH);
	if (ret < 0) 
	{
		vod_log_error(VOD_LOG_ERR, state->request_context->log, 0,
			"audio_filter_process_frame: av_buffersrc_add_frame_flags failed %d", ret);
		return VOD_ALLOC_FAILED;
	}

	for (;;)
	{
		ret = av_buffersink_get_frame_flags(state->buffer_sink, state->filtered_frame, AV_BUFFERSINK_FLAG_NO_REQUEST);
		if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
		{
			break;
		}
		
		if (ret < 0)
		{
			vod_log_error(VOD_LOG_ERR, state->request_context->log, 0,
				"audio_filter_process_frame: av_buffersink_get_frame_flags failed %d", ret);
			return VOD_UNEXPECTED;
		}

#ifdef AUDIO_FILTER_DEBUG
		data_size = av_samples_get_buffer_size(
			NULL, 
			state->encoder->channels,
			state->filtered_frame->nb_samples,
			state->encoder->sample_fmt, 
			1);
		audio_filter_append_debug_data(AUDIO_FILTER_DEBUG_FILENAME_FILTERED, state->filtered_frame->data[0], data_size);
#endif // AUDIO_FILTER_DEBUG

		av_init_packet(&output_packet);
		output_packet.data = NULL; // packet data will be allocated by the encoder
		output_packet.size = 0;

		got_packet = 0;
		ret = avcodec_encode_audio2(state->encoder, &output_packet, state->filtered_frame, &got_packet);
		if (ret < 0)
		{
			vod_log_error(VOD_LOG_ERR, state->request_context->log, 0,
				"audio_filter_process_frame: avcodec_encode_audio2 failed %d", ret);
			return VOD_ALLOC_FAILED;
		}
		
		if (got_packet)
		{
			rc = audio_filter_write_frame(state, &output_packet);

			av_free_packet(&output_packet);
			
			if (rc != VOD_OK)
			{
				return rc;
			}
		}
		
		av_frame_unref(state->filtered_frame);
	}
	
	return VOD_OK;
}