コード例 #1
0
int configure_output_filter(struct liveStream *ctx, AVFilterInOut *out)
{
	int ret = 0;
	AVFilterContext *filter;
	int pad_idx = out->pad_idx;
	AVFilterContext *last_filter = out->filter_ctx;

	ret = avfilter_graph_create_filter(&ctx->out_filter,avfilter_get_by_name("buffersink"),"out", NULL, NULL, ctx->filter_graph);
	if(ret < 0)
		return ret;
	ret = avfilter_graph_create_filter(&filter,avfilter_get_by_name("format"),"format", "yuv420p", NULL, ctx->filter_graph);
	if(ret < 0)
		return ret;

	 if ((ret = avfilter_link(last_filter, pad_idx, filter, 0)) < 0)
		return ret;

	last_filter = filter;
	pad_idx = 0;
	if((ret = avfilter_link(last_filter, pad_idx, ctx->out_filter, 0)) < 0)
		return ret;

	return ret;

}
コード例 #2
0
ファイル: LibAVFilter.cpp プロジェクト: ccf19881030/QtAV
    bool setup() {
        avfilter_graph_free(&filter_graph);
        filter_graph = avfilter_graph_alloc();
        //QString sws_flags_str;
        QString buffersrc_args = QString("video_size=%1x%2:pix_fmt=%3:time_base=%4/%5:sar=1")
                .arg(width).arg(height).arg(pixfmt).arg(1).arg(AV_TIME_BASE);
        qDebug("buffersrc_args=%s", buffersrc_args.toUtf8().constData());
        AVFilter *buffersrc  = avfilter_get_by_name("buffer");
        Q_ASSERT(buffersrc);
        int ret = avfilter_graph_create_filter(&in_filter_ctx,
                                               buffersrc,
                                               "in", buffersrc_args.toUtf8().constData(), NULL,
                                               filter_graph);
        if (ret < 0) {
            qWarning("Can not create buffer source: %s", av_err2str(ret));
            return false;
        }
        /* buffer video sink: to terminate the filter chain. */
        AVFilter *buffersink = avfilter_get_by_name("buffersink");
        Q_ASSERT(buffersink);
        if ((ret = avfilter_graph_create_filter(&out_filter_ctx, buffersink, "out",
                                           NULL, NULL, filter_graph)) < 0) {
            qWarning("Can not create buffer sink: %s", av_err2str(ret));
            return false;
        }

        /* Endpoints for the filter graph. */
        AVFilterInOut *outputs = avfilter_inout_alloc();
        AVFilterInOut *inputs  = avfilter_inout_alloc();
        outputs->name       = av_strdup("in");
        outputs->filter_ctx = in_filter_ctx;
        outputs->pad_idx    = 0;
        outputs->next       = NULL;

        inputs->name       = av_strdup("out");
        inputs->filter_ctx = out_filter_ctx;
        inputs->pad_idx    = 0;
        inputs->next       = NULL;


        //avfilter_graph_parse, avfilter_graph_parse2?
        if ((ret = avfilter_graph_parse_ptr(filter_graph, options.toUtf8().constData(),
                                        &inputs, &outputs, NULL)) < 0) {
            qWarning("avfilter_graph_parse_ptr fail: %s", av_err2str(ret));
            avfilter_inout_free(&outputs);
            avfilter_inout_free(&inputs);
            return false;
        }
        if ((ret = avfilter_graph_config(filter_graph, NULL)) < 0) {
            qWarning("avfilter_graph_config fail: %s", av_err2str(ret));
            avfilter_inout_free(&outputs);
            avfilter_inout_free(&inputs);
            return false;
        }
        avfilter_inout_free(&outputs);
        avfilter_inout_free(&inputs);
        avframe = av_frame_alloc();
        return true;
    }
コード例 #3
0
void Utility::FilterApplier::initFilters() {
	int ret=0;

	filterGraph_=avfilter_graph_alloc();
	if(!filterGraph_) {
		throw std::runtime_error("Could not create filtergraph");
	}

	AVFilter *buffersrc  = avfilter_get_by_name("buffer");
	AVFilter *buffersink = avfilter_get_by_name("buffersink");

	std::string args="video_size="+std::to_string(width_)+"x"+std::to_string(
	                     height_)+":pix_fmt="+std::to_string(pixelFormat_)+":time_base=1/1";
	ret=avfilter_graph_create_filter(&buffersourceContext_, buffersrc, "in",args.c_str(), NULL,
	                                 filterGraph_);
	if(ret<0) {
		throw std::runtime_error("Could not create the buffersource for the filtergraph.");
	}

	ret = avfilter_graph_create_filter(&buffersinkContext_, buffersink, "out",NULL, NULL, filterGraph_);
	if(ret<0) {
		throw std::runtime_error("Could not create the buffersink for the filtergraph.");
	}

	enum AVPixelFormat pixelFormats[]= {AV_PIX_FMT_RGB24,AV_PIX_FMT_NONE};
	ret = av_opt_set_int_list(buffersinkContext_, "pix_fmts", pixelFormats,AV_PIX_FMT_NONE,
	                          AV_OPT_SEARCH_CHILDREN);
	if (ret < 0) {
		throw std::runtime_error("Could net set output pixelformat for the graph");
	}

	AVFilterInOut *outputs = avfilter_inout_alloc();
	AVFilterInOut *inputs  = avfilter_inout_alloc();

	outputs->name       = av_strdup("in");
	outputs->filter_ctx = buffersourceContext_;
	outputs->pad_idx    = 0;
	outputs->next       = NULL;

	inputs->name       = av_strdup("out");
	inputs->filter_ctx = buffersinkContext_;
	inputs->pad_idx    = 0;
	inputs->next       = NULL;

	ret = avfilter_graph_parse_ptr(filterGraph_, filterDescription_.c_str(), &inputs, &outputs, NULL);
	if(ret<0) {
		avfilter_inout_free(&inputs);
		avfilter_inout_free(&outputs);
		throw std::runtime_error("Could not parse the filter descritopns.");
	}

	ret = avfilter_graph_config(filterGraph_, NULL);
	if(ret<0) {
		avfilter_inout_free(&inputs);
		avfilter_inout_free(&outputs);
		throw std::runtime_error("Could not configure filtergraph.");
	}

}
コード例 #4
0
static int configure_input_audio_filter(FilterGraph *fg, InputFilter *ifilter,
                                        AVFilterInOut *in)
{
    AVFilterContext *first_filter = in->filter_ctx;
    AVFilter *filter = avfilter_get_by_name("abuffer");
    InputStream *ist = ifilter->ist;
    int pad_idx = in->pad_idx;
    char args[255], name[255];
    int ret;

    snprintf(args, sizeof(args), "time_base=%d/%d:sample_rate=%d:sample_fmt=%s"
             ":channel_layout=0x%"PRIx64,
             1, ist->st->codec->sample_rate,
             ist->st->codec->sample_rate,
             av_get_sample_fmt_name(ist->st->codec->sample_fmt),
             ist->st->codec->channel_layout);
    snprintf(name, sizeof(name), "graph %d input from stream %d:%d", fg->index,
             ist->file_index, ist->st->index);

    if ((ret = avfilter_graph_create_filter(&ifilter->filter, filter,
                                            name, args, NULL,
                                            fg->graph)) < 0)
        return ret;

    if (audio_sync_method > 0) {
        AVFilterContext *async;
        char args[256];
        int  len = 0;

        av_log(NULL, AV_LOG_WARNING, "-async has been deprecated. Used the "
               "asyncts audio filter instead.\n");

        if (audio_sync_method > 1)
            len += snprintf(args + len, sizeof(args) - len, "compensate=1:"
                            "max_comp=%d:", audio_sync_method);
        snprintf(args + len, sizeof(args) - len, "min_delta=%f",
                 audio_drift_threshold);

        snprintf(name, sizeof(name), "graph %d audio sync for input stream %d:%d",
                 fg->index, ist->file_index, ist->st->index);
        ret = avfilter_graph_create_filter(&async,
                                           avfilter_get_by_name("asyncts"),
                                           name, args, NULL, fg->graph);
        if (ret < 0)
            return ret;

        ret = avfilter_link(async, 0, first_filter, pad_idx);
        if (ret < 0)
            return ret;

        first_filter = async;
        pad_idx = 0;
    }
    if ((ret = avfilter_link(ifilter->filter, 0, first_filter, pad_idx)) < 0)
        return ret;

    return 0;
}
コード例 #5
0
ファイル: filtering_video.c プロジェクト: TomAndCat/FFmpeg
static int init_filters(const char *filters_descr)
{
    char args[512];
    int ret;
    AVFilter *buffersrc  = avfilter_get_by_name("buffer");
    AVFilter *buffersink = avfilter_get_by_name("buffersink");
    AVFilterInOut *outputs = avfilter_inout_alloc();
    AVFilterInOut *inputs  = avfilter_inout_alloc();
    enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_GRAY8, AV_PIX_FMT_NONE };
    AVBufferSinkParams *buffersink_params;

    filter_graph = avfilter_graph_alloc();

    /* buffer video source: the decoded frames from the decoder will be inserted here. */
    snprintf(args, sizeof(args),
            "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
            dec_ctx->width, dec_ctx->height, dec_ctx->pix_fmt,
            dec_ctx->time_base.num, dec_ctx->time_base.den,
            dec_ctx->sample_aspect_ratio.num, dec_ctx->sample_aspect_ratio.den);

    ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in",
                                       args, NULL, filter_graph);
    if (ret < 0) {
        av_log(NULL, AV_LOG_ERROR, "Cannot create buffer source\n");
        return ret;
    }

    /* buffer video sink: to terminate the filter chain. */
    buffersink_params = av_buffersink_params_alloc();
    buffersink_params->pixel_fmts = pix_fmts;
    ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out",
                                       NULL, buffersink_params, filter_graph);
    av_free(buffersink_params);
    if (ret < 0) {
        av_log(NULL, AV_LOG_ERROR, "Cannot create buffer sink\n");
        return ret;
    }

    /* Endpoints for the filter graph. */
    outputs->name       = av_strdup("in");
    outputs->filter_ctx = buffersrc_ctx;
    outputs->pad_idx    = 0;
    outputs->next       = NULL;

    inputs->name       = av_strdup("out");
    inputs->filter_ctx = buffersink_ctx;
    inputs->pad_idx    = 0;
    inputs->next       = NULL;

    if ((ret = avfilter_graph_parse_ptr(filter_graph, filters_descr,
                                    &inputs, &outputs, NULL)) < 0)
        return ret;

    if ((ret = avfilter_graph_config(filter_graph, NULL)) < 0)
        return ret;
    return 0;
}
コード例 #6
0
ファイル: avconv_filter.c プロジェクト: Acidburn0zzz/libav
static int configure_input_video_filter(FilterGraph *fg, InputFilter *ifilter,
                                        AVFilterInOut *in)
{
    AVFilterContext *last_filter;
    const AVFilter *buffer_filt = avfilter_get_by_name("buffer");
    InputStream *ist = ifilter->ist;
    InputFile     *f = input_files[ist->file_index];
    AVRational tb = ist->framerate.num ? av_inv_q(ist->framerate) :
                                         ist->st->time_base;
    AVRational sar;
    char args[255], name[255];
    int ret, pad_idx = 0;

    sar = ist->st->sample_aspect_ratio.num ?
          ist->st->sample_aspect_ratio :
          ist->dec_ctx->sample_aspect_ratio;
    snprintf(args, sizeof(args), "%d:%d:%d:%d:%d:%d:%d", ist->dec_ctx->width,
             ist->dec_ctx->height,
             ist->hwaccel_retrieve_data ? ist->hwaccel_retrieved_pix_fmt : ist->dec_ctx->pix_fmt,
             tb.num, tb.den, sar.num, sar.den);
    snprintf(name, sizeof(name), "graph %d input from stream %d:%d", fg->index,
             ist->file_index, ist->st->index);

    if ((ret = avfilter_graph_create_filter(&ifilter->filter, buffer_filt, name,
                                            args, NULL, fg->graph)) < 0)
        return ret;
    last_filter = ifilter->filter;

    if (ist->framerate.num) {
        AVFilterContext *setpts;

        snprintf(name, sizeof(name), "force CFR for input from stream %d:%d",
                 ist->file_index, ist->st->index);
        if ((ret = avfilter_graph_create_filter(&setpts,
                                                avfilter_get_by_name("setpts"),
                                                name, "N", NULL,
                                                fg->graph)) < 0)
            return ret;

        if ((ret = avfilter_link(last_filter, 0, setpts, 0)) < 0)
            return ret;

        last_filter = setpts;
    }

    snprintf(name, sizeof(name), "trim for input stream %d:%d",
             ist->file_index, ist->st->index);
    ret = insert_trim(((f->start_time == AV_NOPTS_VALUE) || !f->accurate_seek) ?
                      AV_NOPTS_VALUE : 0, f->recording_time, &last_filter, &pad_idx, name);
    if (ret < 0)
        return ret;

    if ((ret = avfilter_link(last_filter, 0, in->filter_ctx, in->pad_idx)) < 0)
        return ret;
    return 0;
}
コード例 #7
0
static int configure_input_video_filter(FilterGraph *fg, InputFilter *ifilter,
                                        AVFilterInOut *in)
{
    AVFilterContext *first_filter = in->filter_ctx;
    AVFilter *filter = avfilter_get_by_name("buffer");
    InputStream *ist = ifilter->ist;
    AVRational tb = ist->framerate.num ? av_inv_q(ist->framerate) :
                                         ist->st->time_base;
    AVRational sar;
    char args[255], name[255];
    int pad_idx = in->pad_idx;
    int ret;

    sar = ist->st->sample_aspect_ratio.num ?
          ist->st->sample_aspect_ratio :
          ist->st->codec->sample_aspect_ratio;
    snprintf(args, sizeof(args), "%d:%d:%d:%d:%d:%d:%d", ist->st->codec->width,
             ist->st->codec->height, ist->st->codec->pix_fmt,
             tb.num, tb.den, sar.num, sar.den);
    snprintf(name, sizeof(name), "graph %d input from stream %d:%d", fg->index,
             ist->file_index, ist->st->index);

    if ((ret = avfilter_graph_create_filter(&ifilter->filter, filter, name,
                                            args, NULL, fg->graph)) < 0)
        return ret;

    if (ist->framerate.num) {
        AVFilterContext *setpts;

        snprintf(name, sizeof(name), "force CFR for input from stream %d:%d",
                 ist->file_index, ist->st->index);
        if ((ret = avfilter_graph_create_filter(&setpts,
                                                avfilter_get_by_name("setpts"),
                                                name, "N", NULL,
                                                fg->graph)) < 0)
            return ret;

        if ((ret = avfilter_link(setpts, 0, first_filter, pad_idx)) < 0)
            return ret;

        first_filter = setpts;
        pad_idx = 0;
    }

    if ((ret = avfilter_link(ifilter->filter, 0, first_filter, pad_idx)) < 0)
        return ret;
    return 0;
}
コード例 #8
0
ファイル: uncoded_frame.c プロジェクト: 0day-ci/FFmpeg
static int create_sink(Stream *st, AVFilterGraph *graph,
                       AVFilterContext *f, int idx)
{
    enum AVMediaType type = avfilter_pad_get_type(f->output_pads, idx);
    const char *sink_name;
    int ret;

    switch (type) {
    case AVMEDIA_TYPE_VIDEO: sink_name =  "buffersink"; break;
    case AVMEDIA_TYPE_AUDIO: sink_name = "abuffersink"; break;
    default:
        av_log(NULL, AV_LOG_ERROR, "Stream type not supported\n");
        return AVERROR(EINVAL);
    }
    ret = avfilter_graph_create_filter(&st->sink,
                                       avfilter_get_by_name(sink_name),
                                       NULL, NULL, NULL, graph);
    if (ret < 0)
        return ret;
    ret = avfilter_link(f, idx, st->sink, 0);
    if (ret < 0)
        return ret;
    st->link = st->sink->inputs[0];
    return 0;
}
コード例 #9
0
ファイル: ffmpeg_filter.c プロジェクト: ho1iarty/ffmpeg_vs
static int insert_filter(AVFilterContext **last_filter, int *pad_idx,
                         const char *filter_name, const char *args)
{
    AVFilterGraph *graph = (*last_filter)->graph;
    AVFilterContext *ctx;
    int ret;
    
    ret = avfilter_graph_create_filter(&ctx,
                                       avfilter_get_by_name(filter_name),
                                       filter_name, args, NULL, graph);
    if(ret < 0)
    {
        return ret;
    }
    
    ret = avfilter_link(*last_filter, *pad_idx, ctx, 0);
    if(ret < 0)
    {
        return ret;
    }
    
    *last_filter = ctx;
    *pad_idx     = 0;
    return 0;
}
コード例 #10
0
void 
audio_filter_process_init(vod_log_t* log)
{
	avcodec_register_all();
	avfilter_register_all();

	buffersrc_filter = avfilter_get_by_name(BUFFERSRC_FILTER_NAME);
	if (buffersrc_filter == NULL)
	{
		vod_log_error(VOD_LOG_WARN, log, 0,
			"audio_filter_process_init: failed to get buffer source filter, audio filtering is disabled");
		return;
	}

	buffersink_filter = avfilter_get_by_name(BUFFERSINK_FILTER_NAME);
	if (buffersink_filter == NULL)
	{
		vod_log_error(VOD_LOG_WARN, log, 0,
			"audio_filter_process_init: failed to get buffer sink filter, audio filtering is disabled");
		return;
	}

	decoder_codec = avcodec_find_decoder(AV_CODEC_ID_AAC);
	if (decoder_codec == NULL)
	{
		vod_log_error(VOD_LOG_WARN, log, 0,
			"audio_filter_process_init: failed to get AAC decoder, audio filtering is disabled");
		return;
	}

	encoder_codec = avcodec_find_encoder_by_name(AAC_ENCODER_NAME);
	if (encoder_codec == NULL)
	{
		vod_log_error(VOD_LOG_WARN, log, 0,
			"audio_filter_process_init: failed to get AAC encoder, audio filtering is disabled. recompile libavcodec with libfdk_aac to enable it");
		return;
	}

	if (!audio_filter_is_format_supported(encoder_codec, ENCODER_INPUT_SAMPLE_FORMAT))
	{
		vod_log_error(VOD_LOG_WARN, log, 0,
			"audio_filter_process_init: encoder does not support the required input format, audio filtering is disabled");
		return;
	}

	initialized = TRUE;
}
コード例 #11
0
ファイル: avfiltergraph.c プロジェクト: JDsolution/ipnc
static int query_formats(AVFilterGraph *graph, AVClass *log_ctx)
{
    int i, j;
    int scaler_count = 0;
    char inst_name[30];

    /* ask all the sub-filters for their supported colorspaces */
    for(i = 0; i < graph->filter_count; i ++) {
        if(graph->filters[i]->filter->query_formats)
            graph->filters[i]->filter->query_formats(graph->filters[i]);
        else
            avfilter_default_query_formats(graph->filters[i]);
    }

    /* go through and merge as many format lists as possible */
    for(i = 0; i < graph->filter_count; i ++) {
        AVFilterContext *filter = graph->filters[i];

        for(j = 0; j < filter->input_count; j ++) {
            AVFilterLink *link = filter->inputs[j];
            if(link && link->in_formats != link->out_formats) {
                if(!avfilter_merge_formats(link->in_formats,
                                           link->out_formats)) {
                    AVFilterContext *scale;
                    char scale_args[256];
                    /* couldn't merge format lists. auto-insert scale filter */
                    snprintf(inst_name, sizeof(inst_name), "auto-inserted scaler %d",
                             scaler_count++);
                    scale =
                        avfilter_open(avfilter_get_by_name("scale"),inst_name);

                    snprintf(scale_args, sizeof(scale_args), "0:0:%s", graph->scale_sws_opts);
                    if(!scale || scale->filter->init(scale, scale_args, NULL) ||
                            avfilter_insert_filter(link, scale, 0, 0)) {
                        avfilter_destroy(scale);
                        return -1;
                    }

                    if (avfilter_graph_add_filter(graph, scale) < 0)
                        return -1;

                    scale->filter->query_formats(scale);
                    if (((link = scale-> inputs[0]) &&
                            !avfilter_merge_formats(link->in_formats, link->out_formats)) ||
                            ((link = scale->outputs[0]) &&
                             !avfilter_merge_formats(link->in_formats, link->out_formats))) {
                        av_log(log_ctx, AV_LOG_ERROR,
                               "Impossible to convert between the formats supported by the filter "
                               "'%s' and the filter '%s'\n", link->src->name, link->dst->name);
                        return -1;
                    }
                }
            }
        }
    }

    return 0;
}
コード例 #12
0
ファイル: avfiltergraph.c プロジェクト: roman5566/FFmpeg
static int insert_conv_filter(AVFilterGraph *graph, AVFilterLink *link,
                              const char *filt_name, const char *filt_args)
{
    static int auto_count = 0, ret;
    char inst_name[32];
    AVFilterContext *filt_ctx;

    if (graph->disable_auto_convert) {
        av_log(NULL, AV_LOG_ERROR,
               "The filters '%s' and '%s' do not have a common format "
               "and automatic conversion is disabled.\n",
               link->src->name, link->dst->name);
        return AVERROR(EINVAL);
    }

    snprintf(inst_name, sizeof(inst_name), "auto-inserted %s %d",
            filt_name, auto_count++);

    if ((ret = avfilter_graph_create_filter(&filt_ctx,
                                            avfilter_get_by_name(filt_name),
                                            inst_name, filt_args, NULL, graph)) < 0)
        return ret;
    if ((ret = avfilter_insert_filter(link, filt_ctx, 0, 0)) < 0)
        return ret;

    filter_query_formats(filt_ctx);

    if ( ((link = filt_ctx-> inputs[0]) &&
           !ff_merge_formats(link->in_formats, link->out_formats)) ||
         ((link = filt_ctx->outputs[0]) &&
           !ff_merge_formats(link->in_formats, link->out_formats))
       ) {
        av_log(NULL, AV_LOG_ERROR,
               "Impossible to convert between the formats supported by the filter "
               "'%s' and the filter '%s'\n", link->src->name, link->dst->name);
        return AVERROR(EINVAL);
    }

    if (link->type == AVMEDIA_TYPE_AUDIO &&
         (((link = filt_ctx-> inputs[0]) &&
           !ff_merge_channel_layouts(link->in_channel_layouts, link->out_channel_layouts)) ||
         ((link = filt_ctx->outputs[0]) &&
           !ff_merge_channel_layouts(link->in_channel_layouts, link->out_channel_layouts)))
       ) {
        av_log(NULL, AV_LOG_ERROR,
               "Impossible to convert between the channel layouts formats supported by the filter "
               "'%s' and the filter '%s'\n", link->src->name, link->dst->name);
        return AVERROR(EINVAL);
    }

    return 0;
}
コード例 #13
0
ファイル: avconv_filter.c プロジェクト: darcyg/avconv
static int insert_trim(int64_t start_time, int64_t duration,
                       AVFilterContext **last_filter, int *pad_idx,
                       const char *filter_name)
{
    AVFilterGraph *graph = (*last_filter)->graph;
    AVFilterContext *ctx;
    const AVFilter *trim;
    enum AVMediaType type = avfilter_pad_get_type((*last_filter)->output_pads, *pad_idx);
    const char *name = (type == AVMEDIA_TYPE_VIDEO) ? "trim" : "atrim";
    int ret = 0;

    if (duration == INT64_MAX && start_time == AV_NOPTS_VALUE)
        return 0;

    trim = avfilter_get_by_name(name);
    if (!trim) {
        av_log(NULL, AV_LOG_ERROR, "%s filter not present, cannot limit "
               "recording time.\n", name);
        return AVERROR_FILTER_NOT_FOUND;
    }

    ctx = avfilter_graph_alloc_filter(graph, trim, filter_name);
    if (!ctx)
        return AVERROR(ENOMEM);

    if (duration != INT64_MAX) {
        ret = av_opt_set_double(ctx, "duration", (double)duration / 1e6,
                                AV_OPT_SEARCH_CHILDREN);
    }
    if (ret >= 0 && start_time != AV_NOPTS_VALUE) {
        ret = av_opt_set_double(ctx, "start", (double)start_time / 1e6,
                                AV_OPT_SEARCH_CHILDREN);
    }
    if (ret < 0) {
        av_log(ctx, AV_LOG_ERROR, "Error configuring the %s filter", name);
        return ret;
    }

    ret = avfilter_init_str(ctx, NULL);
    if (ret < 0)
        return ret;

    ret = avfilter_link(*last_filter, *pad_idx, ctx, 0);
    if (ret < 0)
        return ret;

    *last_filter = ctx;
    *pad_idx     = 0;
    return 0;
}
コード例 #14
0
ファイル: avfiltergraph.c プロジェクト: roman5566/FFmpeg
static int graph_insert_fifos(AVFilterGraph *graph, AVClass *log_ctx)
{
    AVFilterContext *f;
    int i, j, ret;
    int fifo_count = 0;

    for (i = 0; i < graph->filter_count; i++) {
        f = graph->filters[i];

        for (j = 0; j < f->nb_inputs; j++) {
            AVFilterLink *link = f->inputs[j];
            AVFilterContext *fifo_ctx;
            AVFilter *fifo;
            char name[32];

            if (!link->dstpad->needs_fifo)
                continue;

            fifo = f->inputs[j]->type == AVMEDIA_TYPE_VIDEO ?
                   avfilter_get_by_name("fifo") :
                   avfilter_get_by_name("afifo");

            snprintf(name, sizeof(name), "auto-inserted fifo %d", fifo_count++);

            ret = avfilter_graph_create_filter(&fifo_ctx, fifo, name, NULL,
                                               NULL, graph);
            if (ret < 0)
                return ret;

            ret = avfilter_insert_filter(link, fifo_ctx, 0, 0);
            if (ret < 0)
                return ret;
        }
    }

    return 0;
}
コード例 #15
0
ファイル: Filter.cpp プロジェクト: avTranscoder/avTranscoder
Filter::Filter(const std::string& name, const std::string& options, const std::string& instanceName)
    : _filter(NULL)
    , _context(NULL)
    , _options(options)
    , _instanceName(instanceName.empty() ? name : instanceName)
{
    _filter = avfilter_get_by_name(name.c_str());
    if(!_filter)
    {
        std::string msg("Cannot find filter ");
        msg += name;
        msg += ". It will not be added to the filter graph.";
        throw std::runtime_error(msg);
    }
}
コード例 #16
0
ファイル: graphparser.c プロジェクト: 309746069/FFmpeg
/**
 * Create an instance of a filter, initialize and insert it in the
 * filtergraph in *ctx.
 *
 * @param filt_ctx put here a filter context in case of successful creation and configuration, NULL otherwise.
 * @param ctx the filtergraph context
 * @param index an index which is supposed to be unique for each filter instance added to the filtergraph
 * @param filt_name the name of the filter to create
 * @param args the arguments provided to the filter during its initialization
 * @param log_ctx the log context to use
 * @return >= 0 in case of success, a negative AVERROR code otherwise
 */
static int create_filter(AVFilterContext **filt_ctx, AVFilterGraph *ctx, int index,
                         const char *filt_name, const char *args, void *log_ctx)
{
    AVFilter *filt;
    char inst_name[30];
    char *tmp_args = NULL;
    int ret;

    snprintf(inst_name, sizeof(inst_name), "Parsed_%s_%d", filt_name, index);

    filt = avfilter_get_by_name(filt_name);

    if (!filt) {
        av_log(log_ctx, AV_LOG_ERROR,
               "No such filter: '%s'\n", filt_name);
        return AVERROR(EINVAL);
    }

    *filt_ctx = avfilter_graph_alloc_filter(ctx, filt, inst_name);
    if (!*filt_ctx) {
        av_log(log_ctx, AV_LOG_ERROR,
               "Error creating filter '%s'\n", filt_name);
        return AVERROR(ENOMEM);
    }

    if (!strcmp(filt_name, "scale") && args && !strstr(args, "flags") &&
        ctx->scale_sws_opts) {
        tmp_args = av_asprintf("%s:%s",
                 args, ctx->scale_sws_opts);
        if (!tmp_args)
            return AVERROR(ENOMEM);
        args = tmp_args;
    }

    ret = avfilter_init_str(*filt_ctx, args);
    if (ret < 0) {
        av_log(log_ctx, AV_LOG_ERROR,
               "Error initializing filter '%s'", filt_name);
        if (args)
            av_log(log_ctx, AV_LOG_ERROR, " with args '%s'", args);
        av_log(log_ctx, AV_LOG_ERROR, "\n");
        avfilter_free(*filt_ctx);
        *filt_ctx = NULL;
    }

    av_free(tmp_args);
    return ret;
}
コード例 #17
0
ファイル: graphparser.c プロジェクト: crispino/dmr
/**
 * Create an instance of a filter, initialize and insert it in the
 * filtergraph in *ctx.
 *
 * @param ctx the filtergraph context
 * @param put here a filter context in case of successful creation and configuration, NULL otherwise.
 * @param index an index which is supposed to be unique for each filter instance added to the filtergraph
 * @param filt_name the name of the filter to create
 * @param args the arguments provided to the filter during its initialization
 * @param log_ctx the log context to use
 * @return 0 in case of success, a negative AVERROR code otherwise
 */
static int create_filter(AVFilterContext **filt_ctx, AVFilterGraph *ctx, int index,
                         const char *filt_name, const char *args, void *log_ctx)
{
    AVFilter *filt;
    char inst_name[30];
    char tmp_args[256];
    int ret;

    snprintf(inst_name, sizeof(inst_name), "Parsed filter %d %s", index, filt_name);

    filt = avfilter_get_by_name(filt_name);

    if (!filt) {
        av_log(log_ctx, AV_LOG_ERROR,
               "No such filter: '%s'\n", filt_name);
        return AVERROR(EINVAL);
    }

    ret = avfilter_open(filt_ctx, filt, inst_name);
    if (!*filt_ctx) {
        av_log(log_ctx, AV_LOG_ERROR,
               "Error creating filter '%s'\n", filt_name);
        return ret;
    }

    if ((ret = avfilter_graph_add_filter(ctx, *filt_ctx)) < 0) {
        avfilter_free(*filt_ctx);
        return ret;
    }

    if (!strcmp(filt_name, "scale") && args && !strstr(args, "flags") &&
        ctx->scale_sws_opts) {
        snprintf(tmp_args, sizeof(tmp_args), "%s:%s",
                 args, ctx->scale_sws_opts);
        args = tmp_args;
    }

    if ((ret = avfilter_init_filter(*filt_ctx, args, NULL)) < 0) {
        av_log(log_ctx, AV_LOG_ERROR,
               "Error initializing filter '%s' with args '%s'\n", filt_name, args);
        return ret;
    }

    return 0;
}
コード例 #18
0
ファイル: UdpAudioPlayer.cpp プロジェクト: alexliyu/MeetSDK
static int insert_filter(const char *name, const char* arg, AVFilterContext **last_filter)
{
	AVFilterContext *filt_ctx;
	int ret;
	char fltr_name[64] ={0};
	_snprintf_s(fltr_name, sizeof(fltr_name), "ffplayer_%s", name);
	ret = avfilter_graph_create_filter(&filt_ctx, avfilter_get_by_name(name),
		fltr_name, arg, NULL, filter_graph);
	if (ret < 0) {
		printf("Cannot create filter: %s\n", name);
		return -1;
	}

	ret = avfilter_link(filt_ctx, 0, *last_filter, 0);
	if (ret < 0) {
		printf("Cannot link filter: %s\n", name);
		return -1;
	}

	*last_filter = filt_ctx;
	return 0;
}
コード例 #19
0
int configure_input_filter(struct liveStream *ctx, long in_id, AVFilterInOut *in)
{
	AVFilter *buffer = avfilter_get_by_name("buffer");
	struct lsInput *input = NULL;
	AVStream *st;
	AVCodecContext *dec_ctx;
	AVRational tb;
	AVBPrint args;
	int ret = 0;
	char name[128];

	input = get_input_by_id(ctx->inputs,in_id);
	if(!input)
	{
		av_log(NULL,AV_LOG_ERROR,"Invalid input id for inputs list\n");
		return -1;
	}
	st = input->st;
	dec_ctx = input->dec_ctx;
	tb = st->time_base;

	av_bprint_init(&args, 0, 1);
	/* buffer video source: the decoded frames from the decoder will be inserted here. */
	av_bprintf(&args,"video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
			dec_ctx->width, dec_ctx->height, dec_ctx->pix_fmt,
			tb.num, tb.den,
			dec_ctx->sample_aspect_ratio.num,
			dec_ctx->sample_aspect_ratio.den);
	snprintf(name, sizeof(name), "Input %ld", in_id);
	ret = avfilter_graph_create_filter(&input->in_filter, buffer, name, args.str, NULL, ctx->filter_graph);
	if(ret < 0)
		return ret;

	if ((ret = avfilter_link(input->in_filter, 0, in->filter_ctx, in->pad_idx)) < 0)
		return ret;


	return ret;
}
コード例 #20
0
static int insert_filter(ABufferSourceContext *abuffer,
                         AVFilterLink *link, AVFilterContext **filt_ctx,
                         const char *filt_name)
{
    int ret;

    if ((ret = avfilter_open(filt_ctx, avfilter_get_by_name(filt_name), NULL)) < 0)
        return ret;

    link->src->outputs[0] = NULL;
    if ((ret = avfilter_link(link->src, 0, *filt_ctx, 0)) < 0) {
        link->src->outputs[0] = link;
        return ret;
    }

    set_link_source(*filt_ctx, link);

    if ((ret = reconfigure_filter(abuffer, *filt_ctx)) < 0) {
        avfilter_free(*filt_ctx);
        return ret;
    }

    return 0;
}
コード例 #21
0
ファイル: avconv_filter.c プロジェクト: darcyg/avconv
static int configure_output_audio_filter(FilterGraph *fg, OutputFilter *ofilter, AVFilterInOut *out)
{
    OutputStream *ost = ofilter->ost;
    OutputFile    *of = output_files[ost->file_index];
    AVCodecContext *codec  = ost->enc_ctx;
    AVFilterContext *last_filter = out->filter_ctx;
    int pad_idx = out->pad_idx;
    char *sample_fmts, *sample_rates, *channel_layouts;
    char name[255];
    int ret;


    snprintf(name, sizeof(name), "output stream %d:%d", ost->file_index, ost->index);
    ret = avfilter_graph_create_filter(&ofilter->filter,
                                       avfilter_get_by_name("abuffersink"),
                                       name, NULL, NULL, fg->graph);
    if (ret < 0)
        return ret;

    if (codec->channels && !codec->channel_layout)
        codec->channel_layout = av_get_default_channel_layout(codec->channels);

    sample_fmts     = choose_sample_fmts(ost);
    sample_rates    = choose_sample_rates(ost);
    channel_layouts = choose_channel_layouts(ost);
    if (sample_fmts || sample_rates || channel_layouts) {
        AVFilterContext *format;
        char args[256];
        int len = 0;

        if (sample_fmts)
            len += snprintf(args + len, sizeof(args) - len, "sample_fmts=%s:",
                            sample_fmts);
        if (sample_rates)
            len += snprintf(args + len, sizeof(args) - len, "sample_rates=%s:",
                            sample_rates);
        if (channel_layouts)
            len += snprintf(args + len, sizeof(args) - len, "channel_layouts=%s:",
                            channel_layouts);
        args[len - 1] = 0;

        av_freep(&sample_fmts);
        av_freep(&sample_rates);
        av_freep(&channel_layouts);

        snprintf(name, sizeof(name), "audio format for output stream %d:%d",
                 ost->file_index, ost->index);
        ret = avfilter_graph_create_filter(&format,
                                           avfilter_get_by_name("aformat"),
                                           name, args, NULL, fg->graph);
        if (ret < 0)
            return ret;

        ret = avfilter_link(last_filter, pad_idx, format, 0);
        if (ret < 0)
            return ret;

        last_filter = format;
        pad_idx = 0;
    }

    snprintf(name, sizeof(name), "trim for output stream %d:%d",
             ost->file_index, ost->index);
    ret = insert_trim(of->start_time, of->recording_time,
                      &last_filter, &pad_idx, name);
    if (ret < 0)
        return ret;

    if ((ret = avfilter_link(last_filter, pad_idx, ofilter->filter, 0)) < 0)
        return ret;

    return 0;
}
コード例 #22
0
ファイル: avconv_filter.c プロジェクト: darcyg/avconv
static int configure_output_video_filter(FilterGraph *fg, OutputFilter *ofilter, AVFilterInOut *out)
{
    char *pix_fmts;
    OutputStream *ost = ofilter->ost;
    OutputFile    *of = output_files[ost->file_index];
    AVCodecContext *codec = ost->enc_ctx;
    AVFilterContext *last_filter = out->filter_ctx;
    int pad_idx = out->pad_idx;
    int ret;
    char name[255];

    snprintf(name, sizeof(name), "output stream %d:%d", ost->file_index, ost->index);
    ret = avfilter_graph_create_filter(&ofilter->filter,
                                       avfilter_get_by_name("buffersink"),
                                       name, NULL, NULL, fg->graph);
    if (ret < 0)
        return ret;

    if (codec->width || codec->height) {
        char args[255];
        AVFilterContext *filter;

        snprintf(args, sizeof(args), "%d:%d:0x%X",
                 codec->width,
                 codec->height,
                 (unsigned)ost->sws_flags);
        snprintf(name, sizeof(name), "scaler for output stream %d:%d",
                 ost->file_index, ost->index);
        if ((ret = avfilter_graph_create_filter(&filter, avfilter_get_by_name("scale"),
                                                name, args, NULL, fg->graph)) < 0)
            return ret;
        if ((ret = avfilter_link(last_filter, pad_idx, filter, 0)) < 0)
            return ret;

        last_filter = filter;
        pad_idx = 0;
    }

    if ((pix_fmts = choose_pix_fmts(ost))) {
        AVFilterContext *filter;
        snprintf(name, sizeof(name), "pixel format for output stream %d:%d",
                 ost->file_index, ost->index);
        ret = avfilter_graph_create_filter(&filter,
                                           avfilter_get_by_name("format"),
                                           "format", pix_fmts, NULL, fg->graph);
        av_freep(&pix_fmts);
        if (ret < 0)
            return ret;
        if ((ret = avfilter_link(last_filter, pad_idx, filter, 0)) < 0)
            return ret;

        last_filter = filter;
        pad_idx     = 0;
    }

    if (ost->frame_rate.num) {
        AVFilterContext *fps;
        char args[255];

        snprintf(args, sizeof(args), "fps=%d/%d", ost->frame_rate.num,
                 ost->frame_rate.den);
        snprintf(name, sizeof(name), "fps for output stream %d:%d",
                 ost->file_index, ost->index);
        ret = avfilter_graph_create_filter(&fps, avfilter_get_by_name("fps"),
                                           name, args, NULL, fg->graph);
        if (ret < 0)
            return ret;

        ret = avfilter_link(last_filter, pad_idx, fps, 0);
        if (ret < 0)
            return ret;
        last_filter = fps;
        pad_idx = 0;
    }

    snprintf(name, sizeof(name), "trim for output stream %d:%d",
             ost->file_index, ost->index);
    ret = insert_trim(of->start_time, of->recording_time,
                      &last_filter, &pad_idx, name);
    if (ret < 0)
        return ret;


    if ((ret = avfilter_link(last_filter, pad_idx, ofilter->filter, 0)) < 0)
        return ret;

    return 0;
}
コード例 #23
0
ファイル: avfiltergraph.c プロジェクト: Fatbag/libav
static int query_formats(AVFilterGraph *graph, AVClass *log_ctx)
{
    int i, j, ret;
    int scaler_count = 0, resampler_count = 0;

    /* ask all the sub-filters for their supported media formats */
    for (i = 0; i < graph->filter_count; i++) {
        if (graph->filters[i]->filter->query_formats)
            graph->filters[i]->filter->query_formats(graph->filters[i]);
        else
            avfilter_default_query_formats(graph->filters[i]);
    }

    /* go through and merge as many format lists as possible */
    for (i = 0; i < graph->filter_count; i++) {
        AVFilterContext *filter = graph->filters[i];

        for (j = 0; j < filter->input_count; j++) {
            AVFilterLink *link = filter->inputs[j];
            int convert_needed = 0;

            if (!link)
                continue;

            if (link->in_formats != link->out_formats &&
                    !avfilter_merge_formats(link->in_formats,
                                            link->out_formats))
                convert_needed = 1;
            if (link->type == AVMEDIA_TYPE_AUDIO) {
                if (link->in_channel_layouts != link->out_channel_layouts &&
                        !ff_merge_channel_layouts(link->in_channel_layouts,
                                                  link->out_channel_layouts))
                    convert_needed = 1;
                if (link->in_samplerates != link->out_samplerates &&
                        !ff_merge_samplerates(link->in_samplerates,
                                              link->out_samplerates))
                    convert_needed = 1;
            }

            if (convert_needed) {
                AVFilterContext *convert;
                AVFilter *filter;
                AVFilterLink *inlink, *outlink;
                char scale_args[256];
                char inst_name[30];

                /* couldn't merge format lists. auto-insert conversion filter */
                switch (link->type) {
                case AVMEDIA_TYPE_VIDEO:
                    snprintf(inst_name, sizeof(inst_name), "auto-inserted scaler %d",
                             scaler_count++);
                    snprintf(scale_args, sizeof(scale_args), "0:0:%s", graph->scale_sws_opts);
                    if ((ret = avfilter_graph_create_filter(&convert,
                                                            avfilter_get_by_name("scale"),
                                                            inst_name, scale_args, NULL,
                                                            graph)) < 0)
                        return ret;
                    break;
                case AVMEDIA_TYPE_AUDIO:
                    if (!(filter = avfilter_get_by_name("resample"))) {
                        av_log(log_ctx, AV_LOG_ERROR, "'resample' filter "
                               "not present, cannot convert audio formats.\n");
                        return AVERROR(EINVAL);
                    }

                    snprintf(inst_name, sizeof(inst_name), "auto-inserted resampler %d",
                             resampler_count++);
                    if ((ret = avfilter_graph_create_filter(&convert,
                                                            avfilter_get_by_name("resample"),
                                                            inst_name, NULL, NULL, graph)) < 0)
                        return ret;
                    break;
                default:
                    return AVERROR(EINVAL);
                }

                if ((ret = avfilter_insert_filter(link, convert, 0, 0)) < 0)
                    return ret;

                convert->filter->query_formats(convert);
                inlink  = convert->inputs[0];
                outlink = convert->outputs[0];
                if (!avfilter_merge_formats( inlink->in_formats,  inlink->out_formats) ||
                        !avfilter_merge_formats(outlink->in_formats, outlink->out_formats))
                    ret |= AVERROR(ENOSYS);
                if (inlink->type == AVMEDIA_TYPE_AUDIO &&
                        (!ff_merge_samplerates(inlink->in_samplerates,
                                               inlink->out_samplerates) ||
                         !ff_merge_channel_layouts(inlink->in_channel_layouts,
                                                   inlink->out_channel_layouts)))
                    ret |= AVERROR(ENOSYS);
                if (outlink->type == AVMEDIA_TYPE_AUDIO &&
                        (!ff_merge_samplerates(outlink->in_samplerates,
                                               outlink->out_samplerates) ||
                         !ff_merge_channel_layouts(outlink->in_channel_layouts,
                                                   outlink->out_channel_layouts)))
                    ret |= AVERROR(ENOSYS);

                if (ret < 0) {
                    av_log(log_ctx, AV_LOG_ERROR,
                           "Impossible to convert between the formats supported by the filter "
                           "'%s' and the filter '%s'\n", link->src->name, link->dst->name);
                    return ret;
                }
            }
        }
    }

    return 0;
}
コード例 #24
0
ファイル: DVDVideoCodecFFmpeg.cpp プロジェクト: Kahlzarg/xbmc
int CDVDVideoCodecFFmpeg::FilterOpen(const std::string& filters, bool scale)
{
  int result;

  if (m_pFilterGraph)
    FilterClose();

  if (filters.empty() && !scale)
    return 0;

  if (m_pHardware)
  {
    CLog::Log(LOGWARNING, "CDVDVideoCodecFFmpeg::FilterOpen - skipped opening filters on hardware decode");
    return 0;
  }

  if (!(m_pFilterGraph = avfilter_graph_alloc()))
  {
    CLog::Log(LOGERROR, "CDVDVideoCodecFFmpeg::FilterOpen - unable to alloc filter graph");
    return -1;
  }

  AVFilter* srcFilter = avfilter_get_by_name("buffer");
  AVFilter* outFilter = avfilter_get_by_name("buffersink"); // should be last filter in the graph for now

  std::string args = StringUtils::Format("%d:%d:%d:%d:%d:%d:%d",
                                        m_pCodecContext->width,
                                        m_pCodecContext->height,
                                        m_pCodecContext->pix_fmt,
                                        m_pCodecContext->time_base.num ? m_pCodecContext->time_base.num : 1,
                                        m_pCodecContext->time_base.num ? m_pCodecContext->time_base.den : 1,
                                        m_pCodecContext->sample_aspect_ratio.num != 0 ? m_pCodecContext->sample_aspect_ratio.num : 1,
                                        m_pCodecContext->sample_aspect_ratio.num != 0 ? m_pCodecContext->sample_aspect_ratio.den : 1);

  if ((result = avfilter_graph_create_filter(&m_pFilterIn, srcFilter, "src", args.c_str(), NULL, m_pFilterGraph)) < 0)
  {
    CLog::Log(LOGERROR, "CDVDVideoCodecFFmpeg::FilterOpen - avfilter_graph_create_filter: src");
    return result;
  }

  if ((result = avfilter_graph_create_filter(&m_pFilterOut, outFilter, "out", NULL, NULL, m_pFilterGraph)) < 0)
  {
    CLog::Log(LOGERROR, "CDVDVideoCodecFFmpeg::FilterOpen - avfilter_graph_create_filter: out");
    return result;
  }
  if ((result = av_opt_set_int_list(m_pFilterOut, "pix_fmts", &m_formats[0],  AV_PIX_FMT_NONE, AV_OPT_SEARCH_CHILDREN)) < 0)
  {
    CLog::Log(LOGERROR, "CDVDVideoCodecFFmpeg::FilterOpen - failed settings pix formats");
    return result;
  }

  if (!filters.empty())
  {
    AVFilterInOut* outputs = avfilter_inout_alloc();
    AVFilterInOut* inputs  = avfilter_inout_alloc();

    outputs->name = av_strdup("in");
    outputs->filter_ctx = m_pFilterIn;
    outputs->pad_idx = 0;
    outputs->next = nullptr;

    inputs->name = av_strdup("out");
    inputs->filter_ctx = m_pFilterOut;
    inputs->pad_idx = 0;
    inputs->next = nullptr;

    if ((result = avfilter_graph_parse_ptr(m_pFilterGraph, (const char*)m_filters.c_str(), &inputs, &outputs, NULL)) < 0)
    {
      CLog::Log(LOGERROR, "CDVDVideoCodecFFmpeg::FilterOpen - avfilter_graph_parse");
      return result;
    }

    avfilter_inout_free(&outputs);
    avfilter_inout_free(&inputs);
  }
  else
  {
    if ((result = avfilter_link(m_pFilterIn, 0, m_pFilterOut, 0)) < 0)
    {
      CLog::Log(LOGERROR, "CDVDVideoCodecFFmpeg::FilterOpen - avfilter_link");
      return result;
    }
  }

  if ((result = avfilter_graph_config(m_pFilterGraph,  nullptr)) < 0)
  {
    CLog::Log(LOGERROR, "CDVDVideoCodecFFmpeg::FilterOpen - avfilter_graph_config");
    return result;
  }

  m_filterEof = false;
  return result;
}
コード例 #25
0
ファイル: avfiltergraph.c プロジェクト: roman5566/FFmpeg
static int query_formats(AVFilterGraph *graph, AVClass *log_ctx)
{
    int i, j, ret;
    char filt_args[128];
    AVFilterFormats *formats;
    AVFilterChannelLayouts *chlayouts;
    AVFilterFormats *samplerates;
    int scaler_count = 0, resampler_count = 0;

    for (j = 0; j < 2; j++) {
    /* ask all the sub-filters for their supported media formats */
    for (i = 0; i < graph->filter_count; i++) {
        /* Call query_formats on sources first.
           This is a temporary workaround for amerge,
           until format renegociation is implemented. */
        if (!graph->filters[i]->nb_inputs == j)
            continue;
        if (graph->filters[i]->filter->query_formats)
            ret = filter_query_formats(graph->filters[i]);
        else
            ret = ff_default_query_formats(graph->filters[i]);
        if (ret < 0)
            return ret;
    }
    }

    /* go through and merge as many format lists as possible */
    for (i = 0; i < graph->filter_count; i++) {
        AVFilterContext *filter = graph->filters[i];

        for (j = 0; j < filter->nb_inputs; j++) {
            AVFilterLink *link = filter->inputs[j];
#if 0
            if (!link) continue;

            if (!link->in_formats || !link->out_formats)
                return AVERROR(EINVAL);

            if (link->type == AVMEDIA_TYPE_VIDEO &&
                !ff_merge_formats(link->in_formats, link->out_formats)) {

                /* couldn't merge format lists, auto-insert scale filter */
                snprintf(filt_args, sizeof(filt_args), "0:0:%s",
                         graph->scale_sws_opts);
                if (ret = insert_conv_filter(graph, link, "scale", filt_args))
                    return ret;
            }
            else if (link->type == AVMEDIA_TYPE_AUDIO) {
                if (!link->in_channel_layouts || !link->out_channel_layouts)
                    return AVERROR(EINVAL);

                /* Merge all three list before checking: that way, in all
                 * three categories, aconvert will use a common format
                 * whenever possible. */
                formats     = ff_merge_formats(link->in_formats,   link->out_formats);
                chlayouts   = ff_merge_channel_layouts(link->in_channel_layouts  , link->out_channel_layouts);
                samplerates = ff_merge_samplerates    (link->in_samplerates, link->out_samplerates);

                if (!formats || !chlayouts || !samplerates)
                    if (ret = insert_conv_filter(graph, link, "aresample", NULL))
                       return ret;
#else
            int convert_needed = 0;

            if (!link)
                continue;

            if (link->in_formats != link->out_formats &&
                !ff_merge_formats(link->in_formats,
                                        link->out_formats))
                convert_needed = 1;
            if (link->type == AVMEDIA_TYPE_AUDIO) {
                if (link->in_channel_layouts != link->out_channel_layouts &&
                    !ff_merge_channel_layouts(link->in_channel_layouts,
                                              link->out_channel_layouts))
                    convert_needed = 1;
                if (link->in_samplerates != link->out_samplerates &&
                    !ff_merge_samplerates(link->in_samplerates,
                                          link->out_samplerates))
                    convert_needed = 1;
            }

            if (convert_needed) {
                AVFilterContext *convert;
                AVFilter *filter;
                AVFilterLink *inlink, *outlink;
                char scale_args[256];
                char inst_name[30];

                /* couldn't merge format lists. auto-insert conversion filter */
                switch (link->type) {
                case AVMEDIA_TYPE_VIDEO:
                    if (!(filter = avfilter_get_by_name("scale"))) {
                        av_log(log_ctx, AV_LOG_ERROR, "'scale' filter "
                               "not present, cannot convert pixel formats.\n");
                        return AVERROR(EINVAL);
                    }

                    snprintf(inst_name, sizeof(inst_name), "auto-inserted scaler %d",
                             scaler_count++);
                    snprintf(scale_args, sizeof(scale_args), "0:0:%s", graph->scale_sws_opts);
                    if ((ret = avfilter_graph_create_filter(&convert, filter,
                                                            inst_name, scale_args, NULL,
                                                            graph)) < 0)
                        return ret;
                    break;
                case AVMEDIA_TYPE_AUDIO:
                    if (!(filter = avfilter_get_by_name("aresample"))) {
                        av_log(log_ctx, AV_LOG_ERROR, "'aresample' filter "
                               "not present, cannot convert audio formats.\n");
                        return AVERROR(EINVAL);
                    }

                    snprintf(inst_name, sizeof(inst_name), "auto-inserted resampler %d",
                             resampler_count++);
                    if ((ret = avfilter_graph_create_filter(&convert, filter,
                                                            inst_name, NULL, NULL, graph)) < 0)
                        return ret;
                    break;
                default:
                    return AVERROR(EINVAL);
                }

                if ((ret = avfilter_insert_filter(link, convert, 0, 0)) < 0)
                    return ret;

                filter_query_formats(convert);
                inlink  = convert->inputs[0];
                outlink = convert->outputs[0];
                if (!ff_merge_formats( inlink->in_formats,  inlink->out_formats) ||
                    !ff_merge_formats(outlink->in_formats, outlink->out_formats))
                    ret |= AVERROR(ENOSYS);
                if (inlink->type == AVMEDIA_TYPE_AUDIO &&
                    (!ff_merge_samplerates(inlink->in_samplerates,
                                           inlink->out_samplerates) ||
                     !ff_merge_channel_layouts(inlink->in_channel_layouts,
                                               inlink->out_channel_layouts)))
                    ret |= AVERROR(ENOSYS);
                if (outlink->type == AVMEDIA_TYPE_AUDIO &&
                    (!ff_merge_samplerates(outlink->in_samplerates,
                                           outlink->out_samplerates) ||
                     !ff_merge_channel_layouts(outlink->in_channel_layouts,
                                               outlink->out_channel_layouts)))
                    ret |= AVERROR(ENOSYS);

                if (ret < 0) {
                    av_log(log_ctx, AV_LOG_ERROR,
                           "Impossible to convert between the formats supported by the filter "
                           "'%s' and the filter '%s'\n", link->src->name, link->dst->name);
                    return ret;
                }
#endif
            }
        }
    }

    return 0;
}

static int pick_format(AVFilterLink *link, AVFilterLink *ref)
{
    if (!link || !link->in_formats)
        return 0;

    if (link->type == AVMEDIA_TYPE_VIDEO) {
        if(ref && ref->type == AVMEDIA_TYPE_VIDEO){
            int has_alpha= av_pix_fmt_descriptors[ref->format].nb_components % 2 == 0;
            enum PixelFormat best= PIX_FMT_NONE;
            int i;
            for (i=0; i<link->in_formats->format_count; i++) {
                enum PixelFormat p = link->in_formats->formats[i];
                best= avcodec_find_best_pix_fmt2(best, p, ref->format, has_alpha, NULL);
            }
            av_log(link->src,AV_LOG_DEBUG, "picking %s out of %d ref:%s alpha:%d\n",
                   av_get_pix_fmt_name(best), link->in_formats->format_count,
                   av_get_pix_fmt_name(ref->format), has_alpha);
            link->in_formats->formats[0] = best;
        }
    }

    link->in_formats->format_count = 1;
    link->format = link->in_formats->formats[0];

    if (link->type == AVMEDIA_TYPE_AUDIO) {
        if (!link->in_samplerates->format_count) {
            av_log(link->src, AV_LOG_ERROR, "Cannot select sample rate for"
                   " the link between filters %s and %s.\n", link->src->name,
                   link->dst->name);
            return AVERROR(EINVAL);
        }
        link->in_samplerates->format_count = 1;
        link->sample_rate = link->in_samplerates->formats[0];

        if (!link->in_channel_layouts->nb_channel_layouts) {
            av_log(link->src, AV_LOG_ERROR, "Cannot select channel layout for"
                   "the link between filters %s and %s.\n", link->src->name,
                   link->dst->name);
            return AVERROR(EINVAL);
        }
        link->in_channel_layouts->nb_channel_layouts = 1;
        link->channel_layout = link->in_channel_layouts->channel_layouts[0];
    }

    ff_formats_unref(&link->in_formats);
    ff_formats_unref(&link->out_formats);
    ff_formats_unref(&link->in_samplerates);
    ff_formats_unref(&link->out_samplerates);
    ff_channel_layouts_unref(&link->in_channel_layouts);
    ff_channel_layouts_unref(&link->out_channel_layouts);

    return 0;
}

#define REDUCE_FORMATS(fmt_type, list_type, list, var, nb, add_format) \
do {                                                                   \
    for (i = 0; i < filter->nb_inputs; i++) {                          \
        AVFilterLink *link = filter->inputs[i];                        \
        fmt_type fmt;                                                  \
                                                                       \
        if (!link->out_ ## list || link->out_ ## list->nb != 1)        \
            continue;                                                  \
        fmt = link->out_ ## list->var[0];                              \
                                                                       \
        for (j = 0; j < filter->nb_outputs; j++) {                     \
            AVFilterLink *out_link = filter->outputs[j];               \
            list_type *fmts;                                           \
                                                                       \
            if (link->type != out_link->type ||                        \
                out_link->in_ ## list->nb == 1)                        \
                continue;                                              \
            fmts = out_link->in_ ## list;                              \
                                                                       \
            if (!out_link->in_ ## list->nb) {                          \
                add_format(&out_link->in_ ##list, fmt);                \
                break;                                                 \
            }                                                          \
                                                                       \
            for (k = 0; k < out_link->in_ ## list->nb; k++)            \
                if (fmts->var[k] == fmt) {                             \
                    fmts->var[0]  = fmt;                               \
                    fmts->nb = 1;                                      \
                    ret = 1;                                           \
                    break;                                             \
                }                                                      \
        }                                                              \
    }                                                                  \
} while (0)

static int reduce_formats_on_filter(AVFilterContext *filter)
{
    int i, j, k, ret = 0;

    REDUCE_FORMATS(int,      AVFilterFormats,        formats,         formats,
                   format_count, ff_add_format);
    REDUCE_FORMATS(int,      AVFilterFormats,        samplerates,     formats,
                   format_count, ff_add_format);
    REDUCE_FORMATS(uint64_t, AVFilterChannelLayouts, channel_layouts,
                   channel_layouts, nb_channel_layouts, ff_add_channel_layout);

    return ret;
}
コード例 #26
0
ファイル: avconv_filter.c プロジェクト: darcyg/avconv
static int configure_input_video_filter(FilterGraph *fg, InputFilter *ifilter,
                                        AVFilterInOut *in)
{
    AVFilterContext *last_filter;
    const AVFilter *buffer_filt = avfilter_get_by_name("buffer");
    InputStream *ist = ifilter->ist;
    InputFile     *f = input_files[ist->file_index];
    AVRational tb = ist->framerate.num ? av_inv_q(ist->framerate) :
                                         ist->st->time_base;
    AVRational sar;
    char args[255], name[255];
    int ret, pad_idx = 0;

    sar = ist->st->sample_aspect_ratio.num ?
          ist->st->sample_aspect_ratio :
          ist->dec_ctx->sample_aspect_ratio;
    snprintf(args, sizeof(args),
             "width=%d:height=%d:pix_fmt=%d:time_base=%d/%d:sar=%d/%d",
             ist->dec_ctx->width, ist->dec_ctx->height,
             ist->hwaccel_retrieve_data ? ist->hwaccel_retrieved_pix_fmt : ist->dec_ctx->pix_fmt,
             tb.num, tb.den, sar.num, sar.den);
    snprintf(name, sizeof(name), "graph %d input from stream %d:%d", fg->index,
             ist->file_index, ist->st->index);

    if ((ret = avfilter_graph_create_filter(&ifilter->filter, buffer_filt, name,
                                            args, NULL, fg->graph)) < 0)
        return ret;
    last_filter = ifilter->filter;

    if (ist->autorotate) {
        uint8_t* displaymatrix = av_stream_get_side_data(ist->st,
                                                         AV_PKT_DATA_DISPLAYMATRIX, NULL);
        if (displaymatrix) {
            double rot = av_display_rotation_get((int32_t*) displaymatrix);
            if (rot < -135 || rot > 135) {
                ret = insert_filter(&last_filter, &pad_idx, "vflip", NULL);
                if (ret < 0)
                    return ret;
                ret = insert_filter(&last_filter, &pad_idx, "hflip", NULL);
            } else if (rot < -45) {
                ret = insert_filter(&last_filter, &pad_idx, "transpose", "dir=clock");
            } else if (rot > 45) {
                ret = insert_filter(&last_filter, &pad_idx, "transpose", "dir=cclock");
            }
            if (ret < 0)
                return ret;
        }
    }

    if (ist->framerate.num) {
        AVFilterContext *setpts;

        snprintf(name, sizeof(name), "force CFR for input from stream %d:%d",
                 ist->file_index, ist->st->index);
        if ((ret = avfilter_graph_create_filter(&setpts,
                                                avfilter_get_by_name("setpts"),
                                                name, "N", NULL,
                                                fg->graph)) < 0)
            return ret;

        if ((ret = avfilter_link(last_filter, 0, setpts, 0)) < 0)
            return ret;

        last_filter = setpts;
    }

    snprintf(name, sizeof(name), "trim for input stream %d:%d",
             ist->file_index, ist->st->index);
    ret = insert_trim(((f->start_time == AV_NOPTS_VALUE) || !f->accurate_seek) ?
                      AV_NOPTS_VALUE : 0, f->recording_time, &last_filter, &pad_idx, name);
    if (ret < 0)
        return ret;

    if ((ret = avfilter_link(last_filter, 0, in->filter_ctx, in->pad_idx)) < 0)
        return ret;
    return 0;
}
コード例 #27
0
ファイル: filtering_video.c プロジェクト: 309746069/FFmpeg
static int init_filters(const char *filters_descr)
{
    char args[512];
    int ret = 0;
    AVFilter *buffersrc  = avfilter_get_by_name("buffer");
    AVFilter *buffersink = avfilter_get_by_name("buffersink");
    AVFilterInOut *outputs = avfilter_inout_alloc();
    AVFilterInOut *inputs  = avfilter_inout_alloc();
    AVRational time_base = fmt_ctx->streams[video_stream_index]->time_base;
    enum AVPixelFormat pix_fmts[] = { AV_PIX_FMT_GRAY8, AV_PIX_FMT_NONE };

    filter_graph = avfilter_graph_alloc();
    if (!outputs || !inputs || !filter_graph) {
        ret = AVERROR(ENOMEM);
        goto end;
    }

    /* buffer video source: the decoded frames from the decoder will be inserted here. */
    snprintf(args, sizeof(args),
            "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
            dec_ctx->width, dec_ctx->height, dec_ctx->pix_fmt,
            time_base.num, time_base.den,
            dec_ctx->sample_aspect_ratio.num, dec_ctx->sample_aspect_ratio.den);

    ret = avfilter_graph_create_filter(&buffersrc_ctx, buffersrc, "in",
                                       args, NULL, filter_graph);
    if (ret < 0) {
        av_log(NULL, AV_LOG_ERROR, "Cannot create buffer source\n");
        goto end;
    }

    /* buffer video sink: to terminate the filter chain. */
    ret = avfilter_graph_create_filter(&buffersink_ctx, buffersink, "out",
                                       NULL, NULL, filter_graph);
    if (ret < 0) {
        av_log(NULL, AV_LOG_ERROR, "Cannot create buffer sink\n");
        goto end;
    }

    ret = av_opt_set_int_list(buffersink_ctx, "pix_fmts", pix_fmts,
                              AV_PIX_FMT_NONE, AV_OPT_SEARCH_CHILDREN);
    if (ret < 0) {
        av_log(NULL, AV_LOG_ERROR, "Cannot set output pixel format\n");
        goto end;
    }

    /*
     * Set the endpoints for the filter graph. The filter_graph will
     * be linked to the graph described by filters_descr.
     */

    /*
     * The buffer source output must be connected to the input pad of
     * the first filter described by filters_descr; since the first
     * filter input label is not specified, it is set to "in" by
     * default.
     */
    outputs->name       = av_strdup("in");
    outputs->filter_ctx = buffersrc_ctx;
    outputs->pad_idx    = 0;
    outputs->next       = NULL;

    /*
     * The buffer sink input must be connected to the output pad of
     * the last filter described by filters_descr; since the last
     * filter output label is not specified, it is set to "out" by
     * default.
     */
    inputs->name       = av_strdup("out");
    inputs->filter_ctx = buffersink_ctx;
    inputs->pad_idx    = 0;
    inputs->next       = NULL;

    if ((ret = avfilter_graph_parse_ptr(filter_graph, filters_descr,
                                    &inputs, &outputs, NULL)) < 0)
        goto end;

    if ((ret = avfilter_graph_config(filter_graph, NULL)) < 0)
        goto end;

end:
    avfilter_inout_free(&inputs);
    avfilter_inout_free(&outputs);

    return ret;
}
コード例 #28
0
static int init_filter(FilteringContext* fctx, AVCodecContext *dec_ctx,
    AVCodecContext *enc_ctx, const char *filter_spec)
{
    char args[512];
    int ret = 0;
    AVFilter *bufferSrc = NULL;
    AVFilter *bufferSink = NULL;
    AVFilterContext* bufferSrcCtx = NULL;
    AVFilterContext* bufferSinkCtx = NULL;
    AVFilterInOut* outputs = avfilter_inout_alloc();
    AVFilterInOut* inputs = avfilter_inout_alloc();
    AVFilterGraph* filterGraph = avfilter_graph_alloc();

    if (!outputs || !inputs || !filterGraph) {
        ret = AVERROR(ENOMEM);
        goto end;
    }

    if (dec_ctx->codec_type == AVMEDIA_TYPE_VIDEO) {
        bufferSrc = avfilter_get_by_name("buffer");
        bufferSink = avfilter_get_by_name("buffersink");
        if (!bufferSrc || !bufferSink) {
            av_log(NULL, AV_LOG_ERROR, "filtering source or sink element not found\n");
            ret = AVERROR_UNKNOWN;
            goto end;
        }

        snprintf(args, sizeof(args),
            "video_size=%dx%d:pix_fmt=%d:time_base=%d/%d:pixel_aspect=%d/%d",
            dec_ctx->width, dec_ctx->height, dec_ctx->pix_fmt,
            dec_ctx->time_base.num, dec_ctx->time_base.den,
            dec_ctx->sample_aspect_ratio.num,
            dec_ctx->sample_aspect_ratio.den);

        ret = avfilter_graph_create_filter(&bufferSrcCtx, bufferSrc, "in",
            args, NULL, filterGraph);
        if (ret < 0) {
            av_log(NULL, AV_LOG_ERROR, "Cannot create buffer source\n");
            goto end;
        }

        ret = avfilter_graph_create_filter(&bufferSinkCtx, bufferSink, "out",
            NULL, NULL, filterGraph);
        if (ret < 0) {
            av_log(NULL, AV_LOG_ERROR, "Cannot create buffer sink\n");
            goto end;
        }

        ret = av_opt_set_bin(bufferSinkCtx, "pix_fmts",
            (uint8_t*)&enc_ctx->pix_fmt, sizeof(enc_ctx->pix_fmt),
            AV_OPT_SEARCH_CHILDREN);
        if (ret < 0) {
            av_log(NULL, AV_LOG_ERROR, "Cannot set output pixel format\n");
            goto end;
        }
    }
    else if (dec_ctx->codec_type == AVMEDIA_TYPE_AUDIO) {
        bufferSrc = avfilter_get_by_name("abuffer");
        bufferSink = avfilter_get_by_name("abuffersink");
        if (!bufferSrc || !bufferSink) {
            av_log(NULL, AV_LOG_ERROR, "filtering source or sink element not found\n");
            ret = AVERROR_UNKNOWN;
            goto end;
        }

        if (!dec_ctx->channel_layout) {
            dec_ctx->channel_layout = av_get_default_channel_layout(dec_ctx->channels);
        }
        snprintf(args, sizeof(args),
            "time_base=%d/%d:sample_rate=%d:sample_fmt=%s:channel_layout=0x%"PRIx64,
            dec_ctx->time_base.num, dec_ctx->time_base.den, dec_ctx->sample_rate,
            av_get_sample_fmt_name(dec_ctx->sample_fmt),
            dec_ctx->channel_layout);
        ret = avfilter_graph_create_filter(&bufferSrcCtx, bufferSrc, "in", args, NULL, filterGraph);
        if (ret < 0) {
            av_log(NULL, AV_LOG_ERROR, "Cannot create audio buffer source\n");
            goto end;
        }

        ret = avfilter_graph_create_filter(&bufferSinkCtx, bufferSink, "out", NULL, NULL, filterGraph);
        if (ret < 0) {
            av_log(NULL, AV_LOG_ERROR, "Cannot create audio buffer sink\n");
            goto end;
        }

        ret = av_opt_set_bin(bufferSinkCtx, "sample_fmts", (uint8_t*)&enc_ctx->sample_fmt,
            sizeof(enc_ctx->sample_fmt), AV_OPT_SEARCH_CHILDREN);
        if (ret < 0) {
            av_log(NULL, AV_LOG_ERROR, "Cannot set output sample format\n");
            goto end;
        }

        ret = av_opt_set_bin(bufferSinkCtx, "channel_layouts", (uint8_t*)&enc_ctx->channel_layout,
            sizeof(enc_ctx->channel_layout), AV_OPT_SEARCH_CHILDREN);
        if (ret < 0) {
            av_log(NULL, AV_LOG_ERROR, "Cannot set output channel layout\n");
            goto end;
        }

        ret = av_opt_set_bin(bufferSinkCtx, "sample_rates", (uint8_t*)&enc_ctx->sample_rate,
            sizeof(enc_ctx->sample_rate), AV_OPT_SEARCH_CHILDREN);
        if (ret < 0) {
            av_log(NULL, AV_LOG_ERROR, "Cannot set output sample rate\n");
            goto end;
        }
    }
    else {
        ret = AVERROR_UNKNOWN;
        goto end;
    }

    /* Endpoints for the filter graph. */
    outputs->name = av_strdup("in");
    outputs->filter_ctx = bufferSrcCtx;
    outputs->pad_idx = 0;
    outputs->next = NULL;

    inputs->name = av_strdup("out");
    inputs->filter_ctx = bufferSinkCtx;
    inputs->pad_idx = 0;
    inputs->next = NULL;

    if (!outputs->name || !inputs->name) {
        ret = AVERROR(ENOMEM);
        goto end;
    }

    if ((ret = avfilter_graph_parse_ptr(filterGraph, filter_spec,
        &inputs, &outputs, NULL)) < 0)
        goto end;

    if ((ret = avfilter_graph_config(filterGraph, NULL)) < 0)
        goto end;

    /* Fill FilteringContext */
    fctx->BuffersrcCtx = bufferSrcCtx;
    fctx->BuffersinkCtx = bufferSinkCtx;
    fctx->FilterGraph = filterGraph;

end:
    avfilter_inout_free(&inputs);
    avfilter_inout_free(&outputs);

    return ret;
}
コード例 #29
0
ファイル: avconv_filter.c プロジェクト: darcyg/avconv
static int configure_input_audio_filter(FilterGraph *fg, InputFilter *ifilter,
                                        AVFilterInOut *in)
{
    AVFilterContext *last_filter;
    const AVFilter *abuffer_filt = avfilter_get_by_name("abuffer");
    InputStream *ist = ifilter->ist;
    InputFile     *f = input_files[ist->file_index];
    char args[255], name[255];
    int ret, pad_idx = 0;

    snprintf(args, sizeof(args), "time_base=%d/%d:sample_rate=%d:sample_fmt=%s"
             ":channel_layout=0x%"PRIx64,
             1, ist->dec_ctx->sample_rate,
             ist->dec_ctx->sample_rate,
             av_get_sample_fmt_name(ist->dec_ctx->sample_fmt),
             ist->dec_ctx->channel_layout);
    snprintf(name, sizeof(name), "graph %d input from stream %d:%d", fg->index,
             ist->file_index, ist->st->index);

    if ((ret = avfilter_graph_create_filter(&ifilter->filter, abuffer_filt,
                                            name, args, NULL,
                                            fg->graph)) < 0)
        return ret;
    last_filter = ifilter->filter;

    if (audio_sync_method > 0) {
        AVFilterContext *async;
        int  len = 0;

        av_log(NULL, AV_LOG_WARNING, "-async has been deprecated. Used the "
               "asyncts audio filter instead.\n");

        if (audio_sync_method > 1)
            len += snprintf(args + len, sizeof(args) - len, "compensate=1:"
                            "max_comp=%d:", audio_sync_method);
        snprintf(args + len, sizeof(args) - len, "min_delta=%f",
                 audio_drift_threshold);

        snprintf(name, sizeof(name), "graph %d audio sync for input stream %d:%d",
                 fg->index, ist->file_index, ist->st->index);
        ret = avfilter_graph_create_filter(&async,
                                           avfilter_get_by_name("asyncts"),
                                           name, args, NULL, fg->graph);
        if (ret < 0)
            return ret;

        ret = avfilter_link(last_filter, 0, async, 0);
        if (ret < 0)
            return ret;

        last_filter = async;
    }
    if (audio_volume != 256) {
        AVFilterContext *volume;

        av_log(NULL, AV_LOG_WARNING, "-vol has been deprecated. Use the volume "
               "audio filter instead.\n");

        snprintf(args, sizeof(args), "volume=%f", audio_volume / 256.0);

        snprintf(name, sizeof(name), "graph %d volume for input stream %d:%d",
                 fg->index, ist->file_index, ist->st->index);
        ret = avfilter_graph_create_filter(&volume,
                                           avfilter_get_by_name("volume"),
                                           name, args, NULL, fg->graph);
        if (ret < 0)
            return ret;

        ret = avfilter_link(last_filter, 0, volume, 0);
        if (ret < 0)
            return ret;

        last_filter = volume;
    }

    snprintf(name, sizeof(name), "trim for input stream %d:%d",
             ist->file_index, ist->st->index);
    ret = insert_trim(((f->start_time == AV_NOPTS_VALUE) || !f->accurate_seek) ?
                      AV_NOPTS_VALUE : 0, f->recording_time, &last_filter, &pad_idx, name);
    if (ret < 0)
        return ret;

    if ((ret = avfilter_link(last_filter, 0, in->filter_ctx, in->pad_idx)) < 0)
        return ret;

    return 0;
}
コード例 #30
0
ファイル: vsrc_buffer.c プロジェクト: ambrero/FFmpeg
int av_vsrc_buffer_add_video_buffer_ref(AVFilterContext *buffer_filter,
                                        AVFilterBufferRef *picref, int flags)
{
    BufferSourceContext *c = buffer_filter->priv;
    AVFilterLink *outlink = buffer_filter->outputs[0];
    int ret;

    if (c->picref) {
        if (flags & AV_VSRC_BUF_FLAG_OVERWRITE) {
            avfilter_unref_buffer(c->picref);
            c->picref = NULL;
        } else {
            av_log(buffer_filter, AV_LOG_ERROR,
                   "Buffering several frames is not supported. "
                   "Please consume all available frames before adding a new one.\n");
            return AVERROR(EINVAL);
        }
    }

    if (picref->video->w != c->w || picref->video->h != c->h || picref->format != c->pix_fmt) {
        AVFilterContext *scale = buffer_filter->outputs[0]->dst;
        AVFilterLink *link;
        char scale_param[1024];

        av_log(buffer_filter, AV_LOG_INFO,
               "Buffer video input changed from size:%dx%d fmt:%s to size:%dx%d fmt:%s\n",
               c->w, c->h, av_pix_fmt_descriptors[c->pix_fmt].name,
               picref->video->w, picref->video->h, av_pix_fmt_descriptors[picref->format].name);

        if (!scale || strcmp(scale->filter->name, "scale")) {
            AVFilter *f = avfilter_get_by_name("scale");

            av_log(buffer_filter, AV_LOG_INFO, "Inserting scaler filter\n");
            if ((ret = avfilter_open(&scale, f, "Input equalizer")) < 0)
                return ret;

            snprintf(scale_param, sizeof(scale_param)-1, "%d:%d:%s", c->w, c->h, c->sws_param);
            if ((ret = avfilter_init_filter(scale, scale_param, NULL)) < 0) {
                avfilter_free(scale);
                return ret;
            }

            if ((ret = avfilter_insert_filter(buffer_filter->outputs[0], scale, 0, 0)) < 0) {
                avfilter_free(scale);
                return ret;
            }
            scale->outputs[0]->time_base = scale->inputs[0]->time_base;

            scale->outputs[0]->format= c->pix_fmt;
        } else if (!strcmp(scale->filter->name, "scale")) {
            snprintf(scale_param, sizeof(scale_param)-1, "%d:%d:%s",
                     scale->outputs[0]->w, scale->outputs[0]->h, c->sws_param);
            scale->filter->init(scale, scale_param, NULL);
        }

        c->pix_fmt = scale->inputs[0]->format = picref->format;
        c->w       = scale->inputs[0]->w      = picref->video->w;
        c->h       = scale->inputs[0]->h      = picref->video->h;

        link = scale->outputs[0];
        if ((ret =  link->srcpad->config_props(link)) < 0)
            return ret;
    }

    c->picref = avfilter_get_video_buffer(outlink, AV_PERM_WRITE,
                                          picref->video->w, picref->video->h);
    av_image_copy(c->picref->data, c->picref->linesize,
                  picref->data, picref->linesize,
                  picref->format, picref->video->w, picref->video->h);
    avfilter_copy_buffer_ref_props(c->picref, picref);

    return 0;
}