コード例 #1
0
ファイル: bsf.c プロジェクト: chris-magic/xplayer
static int bsf_parse_single(const char *str, AVBSFList *bsf_lst)
{
    char *bsf_name, *bsf_options_str, *buf;
    AVDictionary *bsf_options = NULL;
    int ret = 0;

    if (!(buf = av_strdup(str)))
        return AVERROR(ENOMEM);

    bsf_name = av_strtok(buf, "=", &bsf_options_str);
    if (!bsf_name) {
        ret = AVERROR(EINVAL);
        goto end;
    }

    if (bsf_options_str) {
        ret = av_dict_parse_string(&bsf_options, bsf_options_str, "=", ":", 0);
        if (ret < 0)
            goto end;
    }

    ret = av_bsf_list_append2(bsf_lst, bsf_name, &bsf_options);

    av_dict_free(&bsf_options);
end:
    av_free(buf);
    return ret;
}
コード例 #2
0
ファイル: ffmpeg-mux.c プロジェクト: kustom666/obs-studio
static inline int open_output_file(struct ffmpeg_mux *ffm)
{
	AVOutputFormat *format = ffm->output->oformat;
	int ret;

	if ((format->flags & AVFMT_NOFILE) == 0) {
		ret = avio_open(&ffm->output->pb, ffm->params.file,
				AVIO_FLAG_WRITE);
		if (ret < 0) {
			printf("Couldn't open '%s', %s",
					ffm->params.file, av_err2str(ret));
			return FFM_ERROR;
		}
	}

	strncpy(ffm->output->filename, ffm->params.file,
			sizeof(ffm->output->filename));
	ffm->output->filename[sizeof(ffm->output->filename) - 1] = 0;

	AVDictionary *dict = NULL;
	if ((ret = av_dict_parse_string(&dict, ffm->params.muxer_settings,
				"=", " ", 0))) {
		printf("Failed to parse muxer settings: %s\n%s",
				av_err2str(ret), ffm->params.muxer_settings);

		av_dict_free(&dict);
	}

	if (av_dict_count(dict) > 0) {
		printf("Using muxer settings:");

		AVDictionaryEntry *entry = NULL;
		while ((entry = av_dict_get(dict, "", entry,
						AV_DICT_IGNORE_SUFFIX)))
			printf("\n\t%s=%s", entry->key, entry->value);

		printf("\n");
	}

	ret = avformat_write_header(ffm->output, &dict);
	if (ret < 0) {
		printf("Error opening '%s': %s",
				ffm->params.file, av_err2str(ret));

		av_dict_free(&dict);

		return ret == -22 ? FFM_UNSUPPORTED : FFM_ERROR;
	}

	av_dict_free(&dict);

	return FFM_SUCCESS;
}
コード例 #3
0
ファイル: ffmenc.c プロジェクト: 309746069/FFmpeg
static int ffm_write_recommended_config(AVIOContext *pb, AVCodecContext *ctx, unsigned tag,
                                        const char *configuration)
{
    int ret;
    const AVCodec *enc = ctx->codec ? ctx->codec : avcodec_find_encoder(ctx->codec_id);
    AVIOContext *tmp;
    AVDictionaryEntry *t = NULL;
    AVDictionary *all = NULL, *comm = NULL, *prv = NULL;
    char *buf = NULL;

    if (!enc || !enc->priv_class || !enc->priv_data_size) {
        /* codec is not known/has no private options, so save everything as common options */
        if (avio_open_dyn_buf(&tmp) < 0)
            return AVERROR(ENOMEM);
        avio_put_str(tmp, configuration);
        write_header_chunk(pb, tmp, tag);
        return 0;
    }

    if ((ret = av_dict_parse_string(&all, configuration, "=", ",", 0)) < 0)
        return ret;

    while ((t = av_dict_get(all, "", t, AV_DICT_IGNORE_SUFFIX))) {
        if (av_opt_find((void *)&enc->priv_class, t->key, NULL, 0, AV_OPT_SEARCH_FAKE_OBJ)) {
            if ((ret = av_dict_set(&prv, t->key, t->value, 0)) < 0)
                goto fail;
        } else if ((ret = av_dict_set(&comm, t->key, t->value, 0)) < 0)
            goto fail;
    }

    if (comm) {
        if ((ret = av_dict_get_string(comm, &buf, '=', ',')) < 0 ||
            (ret = avio_open_dyn_buf(&tmp)) < 0)
            goto fail;
        avio_put_str(tmp, buf);
        av_freep(&buf);
        write_header_chunk(pb, tmp, tag);
    }
    if (prv) {
        if ((ret = av_dict_get_string(prv, &buf, '=', ',')) < 0 ||
            (ret = avio_open_dyn_buf(&tmp)) < 0)
            goto fail;
        avio_put_str(tmp, buf);
        write_header_chunk(pb, tmp, MKBETAG('C', 'P', 'R', 'V'));
    }

  fail:
    av_free(buf);
    av_dict_free(&all);
    av_dict_free(&comm);
    av_dict_free(&prv);
    return ret;
}
コード例 #4
0
ファイル: dict.c プロジェクト: 0day-ci/FFmpeg
static void test_separators(const AVDictionary *m, const char pair, const char val)
{
    AVDictionary *dict = NULL;
    char pairs[] = {pair , '\0'};
    char vals[]  = {val, '\0'};

    char *buffer = NULL;
    av_dict_copy(&dict, m, 0);
    print_dict(dict);
    av_dict_get_string(dict, &buffer, val, pair);
    printf("%s\n", buffer);
    av_dict_free(&dict);
    av_dict_parse_string(&dict, buffer, vals, pairs, 0);
    av_freep(&buffer);
    print_dict(dict);
    av_dict_free(&dict);
}
コード例 #5
0
ファイル: avfilter.c プロジェクト: Rodeo314/tim-libav
int avfilter_init_str(AVFilterContext *filter, const char *args)
{
    AVDictionary *options = NULL;
    AVDictionaryEntry *e;
    int ret = 0;

    if (args && *args) {
        if (!filter->filter->priv_class) {
            av_log(filter, AV_LOG_ERROR, "This filter does not take any "
                   "options, but options were provided: %s.\n", args);
            return AVERROR(EINVAL);
        }

        if (strchr(args, '=')) {
            /* assume a list of key1=value1:key2=value2:... */
            ret = av_dict_parse_string(&options, args, "=", ":", 0);
            if (ret < 0)
                goto fail;
        } else {
            ret = process_unnamed_options(filter, &options, args);
            if (ret < 0)
                goto fail;
        }
    }

    ret = avfilter_init_dict(filter, &options);
    if (ret < 0)
        goto fail;

    if ((e = av_dict_get(options, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
        av_log(filter, AV_LOG_ERROR, "No such option: %s.\n", e->key);
        ret = AVERROR_OPTION_NOT_FOUND;
        goto fail;
    }

fail:
    av_dict_free(&options);

    return ret;
}
コード例 #6
0
static int parse_slave_fifo_options(const char *use_fifo,
                                    const char *fifo_options, TeeSlave *tee_slave)
{
    int ret = 0;

    if (use_fifo) {
        /*TODO - change this to use proper function for parsing boolean
         *       options when there is one */
        if (av_match_name(use_fifo, "true,y,yes,enable,enabled,on,1")) {
            tee_slave->use_fifo = 1;
        } else if (av_match_name(use_fifo, "false,n,no,disable,disabled,off,0")) {
            tee_slave->use_fifo = 0;
        } else {
            return AVERROR(EINVAL);
        }
    }

    if (fifo_options)
        ret = av_dict_parse_string(&tee_slave->fifo_options, fifo_options, "=", ":", 0);

    return ret;
}
コード例 #7
0
ファイル: libkvazaar.c プロジェクト: gbeauchesne/FFmpeg
static av_cold int libkvazaar_init(AVCodecContext *avctx)
{
    int retval = 0;
    kvz_config *cfg = NULL;
    kvz_encoder *enc = NULL;
    const kvz_api *const api = kvz_api_get(8);

    LibkvazaarContext *const ctx = avctx->priv_data;

    // Kvazaar requires width and height to be multiples of eight.
    if (avctx->width % 8 || avctx->height % 8) {
        av_log(avctx, AV_LOG_ERROR, "Video dimensions are not a multiple of 8.\n");
        retval = AVERROR_INVALIDDATA;
        goto done;
    }

    cfg = api->config_alloc();
    if (!cfg) {
        av_log(avctx, AV_LOG_ERROR, "Could not allocate kvazaar config structure.\n");
        retval = AVERROR(ENOMEM);
        goto done;
    }

    if (!api->config_init(cfg)) {
        av_log(avctx, AV_LOG_ERROR, "Could not initialize kvazaar config structure.\n");
        retval = AVERROR_EXTERNAL;
        goto done;
    }

    cfg->width = avctx->width;
    cfg->height = avctx->height;
    cfg->framerate =
      avctx->time_base.den / (double)(avctx->time_base.num * avctx->ticks_per_frame);
    cfg->target_bitrate = avctx->bit_rate;
    cfg->vui.sar_width = avctx->sample_aspect_ratio.num;
    cfg->vui.sar_height = avctx->sample_aspect_ratio.den;

    if (ctx->kvz_params) {
        AVDictionary *dict = NULL;
        if (!av_dict_parse_string(&dict, ctx->kvz_params, "=", ",", 0)) {
            AVDictionaryEntry *entry = NULL;
            while ((entry = av_dict_get(dict, "", entry, AV_DICT_IGNORE_SUFFIX))) {
                if (!api->config_parse(cfg, entry->key, entry->value)) {
                    av_log(avctx, AV_LOG_WARNING,
                           "Invalid option: %s=%s.\n",
                           entry->key, entry->value);
                }
            }
            av_dict_free(&dict);
        }
    }

    enc = api->encoder_open(cfg);
    if (!enc) {
        av_log(avctx, AV_LOG_ERROR, "Could not open kvazaar encoder.\n");
        retval = AVERROR_EXTERNAL;
        goto done;
    }

    ctx->api = api;
    ctx->encoder = enc;
    ctx->config = cfg;
    enc = NULL;
    cfg = NULL;

done:
    api->config_destroy(cfg);
    api->encoder_close(enc);

    return retval;
}
コード例 #8
0
ファイル: libx265.c プロジェクト: yodamaster/libav
static av_cold int libx265_encode_init(AVCodecContext *avctx)
{
    libx265Context *ctx = avctx->priv_data;
    x265_nal *nal;
    uint8_t *buf;
    int nnal;
    int ret;
    int i;

    avctx->coded_frame = av_frame_alloc();
    if (!avctx->coded_frame) {
        av_log(avctx, AV_LOG_ERROR, "Could not allocate frame.\n");
        return AVERROR(ENOMEM);
    }

    ctx->params = x265_param_alloc();
    if (!ctx->params) {
        av_log(avctx, AV_LOG_ERROR, "Could not allocate x265 param structure.\n");
        return AVERROR(ENOMEM);
    }

    x265_param_default(ctx->params);
    if (x265_param_default_preset(ctx->params, ctx->preset, ctx->tune) < 0) {
        av_log(avctx, AV_LOG_ERROR, "Invalid preset or tune.\n");
        return AVERROR(EINVAL);
    }

    ctx->params->frameNumThreads = avctx->thread_count;
    ctx->params->frameRate       = (int) (avctx->time_base.den / avctx->time_base.num);
    ctx->params->sourceWidth     = avctx->width;
    ctx->params->sourceHeight    = avctx->height;
    ctx->params->inputBitDepth   = av_pix_fmt_desc_get(avctx->pix_fmt)->comp[0].depth_minus1 + 1;

    if (avctx->bit_rate > 0) {
        ctx->params->rc.bitrate         = avctx->bit_rate / 1000;
        ctx->params->rc.rateControlMode = X265_RC_ABR;
    }

    if (ctx->x265_opts) {
        AVDictionary *dict    = NULL;
        AVDictionaryEntry *en = NULL;

        if (!av_dict_parse_string(&dict, ctx->x265_opts, "=", ":", 0)) {
            while ((en = av_dict_get(dict, "", en, AV_DICT_IGNORE_SUFFIX))) {
                int parse_ret = x265_param_parse(ctx->params, en->key, en->value);

                switch (parse_ret) {
                case X265_PARAM_BAD_NAME:
                    av_log(avctx, AV_LOG_WARNING,
                          "Unknown option: %s.\n", en->key);
                    break;
                case X265_PARAM_BAD_VALUE:
                    av_log(avctx, AV_LOG_WARNING,
                          "Invalid value for %s: %s.\n", en->key, en->value);
                    break;
                default:
                    break;
                }
            }
            av_dict_free(&dict);
        }
    }

    ctx->encoder = x265_encoder_open(ctx->params);
    if (!ctx->encoder) {
        av_log(avctx, AV_LOG_ERROR, "Cannot open libx265 encoder.\n");
        libx265_encode_close(avctx);
        return AVERROR_INVALIDDATA;
    }

    ret = x265_encoder_headers(ctx->encoder, &nal, &nnal);
    if (ret < 0) {
        av_log(avctx, AV_LOG_ERROR, "Cannot encode headers.\n");
        libx265_encode_close(avctx);
        return AVERROR_INVALIDDATA;
    }

    for (i = 0; i < nnal; i++)
        ctx->header_size += nal[i].sizeBytes;

    ctx->header = av_malloc(ctx->header_size);
    if (!ctx->header) {
        av_log(avctx, AV_LOG_ERROR,
               "Cannot allocate HEVC header of size %d.\n", ctx->header_size);
        libx265_encode_close(avctx);
        return AVERROR(ENOMEM);
    }

    buf = ctx->header;
    for (i = 0; i < nnal; i++) {
        memcpy(buf, nal[i].payload, nal[i].sizeBytes);
        buf += nal[i].sizeBytes;
    }

    return 0;
}
コード例 #9
0
ファイル: libx265.c プロジェクト: Acidburn0zzz/libav
static av_cold int libx265_encode_init(AVCodecContext *avctx)
{
    libx265Context *ctx = avctx->priv_data;
    x265_nal *nal;
    char sar[12];
    int sar_num, sar_den;
    int nnal;

    if (avctx->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL &&
        !av_pix_fmt_desc_get(avctx->pix_fmt)->log2_chroma_w) {
        av_log(avctx, AV_LOG_ERROR,
               "4:2:2 and 4:4:4 support is not fully defined for HEVC yet. "
               "Set -strict experimental to encode anyway.\n");
        return AVERROR(ENOSYS);
    }

    avctx->coded_frame = av_frame_alloc();
    if (!avctx->coded_frame) {
        av_log(avctx, AV_LOG_ERROR, "Could not allocate frame.\n");
        return AVERROR(ENOMEM);
    }

    ctx->params = x265_param_alloc();
    if (!ctx->params) {
        av_log(avctx, AV_LOG_ERROR, "Could not allocate x265 param structure.\n");
        return AVERROR(ENOMEM);
    }

    if (x265_param_default_preset(ctx->params, ctx->preset, ctx->tune) < 0) {
        av_log(avctx, AV_LOG_ERROR, "Invalid preset or tune.\n");
        return AVERROR(EINVAL);
    }

    ctx->params->frameNumThreads = avctx->thread_count;
    ctx->params->fpsNum          = avctx->time_base.den;
    ctx->params->fpsDenom        = avctx->time_base.num * avctx->ticks_per_frame;
    ctx->params->sourceWidth     = avctx->width;
    ctx->params->sourceHeight    = avctx->height;

    if (avctx->sample_aspect_ratio.num > 0 && avctx->sample_aspect_ratio.den > 0) {
        av_reduce(&sar_num, &sar_den,
                  avctx->sample_aspect_ratio.num,
                  avctx->sample_aspect_ratio.den, 65535);
        snprintf(sar, sizeof(sar), "%d:%d", sar_num, sar_den);
        if (x265_param_parse(ctx->params, "sar", sar) == X265_PARAM_BAD_VALUE) {
            av_log(avctx, AV_LOG_ERROR, "Invalid SAR: %d:%d.\n", sar_num, sar_den);
            return AVERROR_INVALIDDATA;
        }
    }

    switch (avctx->pix_fmt) {
    case AV_PIX_FMT_YUV420P:
    case AV_PIX_FMT_YUV420P10:
        ctx->params->internalCsp = X265_CSP_I420;
        break;
    case AV_PIX_FMT_YUV422P:
    case AV_PIX_FMT_YUV422P10:
        ctx->params->internalCsp = X265_CSP_I422;
        break;
    case AV_PIX_FMT_YUV444P:
    case AV_PIX_FMT_YUV444P10:
        ctx->params->internalCsp = X265_CSP_I444;
        break;
    }

    if (avctx->bit_rate > 0) {
        ctx->params->rc.bitrate         = avctx->bit_rate / 1000;
        ctx->params->rc.rateControlMode = X265_RC_ABR;
    }

    if (!(avctx->flags & CODEC_FLAG_GLOBAL_HEADER))
        ctx->params->bRepeatHeaders = 1;

    if (ctx->x265_opts) {
        AVDictionary *dict    = NULL;
        AVDictionaryEntry *en = NULL;

        if (!av_dict_parse_string(&dict, ctx->x265_opts, "=", ":", 0)) {
            while ((en = av_dict_get(dict, "", en, AV_DICT_IGNORE_SUFFIX))) {
                int parse_ret = x265_param_parse(ctx->params, en->key, en->value);

                switch (parse_ret) {
                case X265_PARAM_BAD_NAME:
                    av_log(avctx, AV_LOG_WARNING,
                          "Unknown option: %s.\n", en->key);
                    break;
                case X265_PARAM_BAD_VALUE:
                    av_log(avctx, AV_LOG_WARNING,
                          "Invalid value for %s: %s.\n", en->key, en->value);
                    break;
                default:
                    break;
                }
            }
            av_dict_free(&dict);
        }
    }

    ctx->encoder = x265_encoder_open(ctx->params);
    if (!ctx->encoder) {
        av_log(avctx, AV_LOG_ERROR, "Cannot open libx265 encoder.\n");
        libx265_encode_close(avctx);
        return AVERROR_INVALIDDATA;
    }

    if (avctx->flags & CODEC_FLAG_GLOBAL_HEADER) {
        avctx->extradata_size = x265_encoder_headers(ctx->encoder, &nal, &nnal);
        if (avctx->extradata_size <= 0) {
            av_log(avctx, AV_LOG_ERROR, "Cannot encode headers.\n");
            libx265_encode_close(avctx);
            return AVERROR_INVALIDDATA;
        }

        avctx->extradata = av_malloc(avctx->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
        if (!avctx->extradata) {
            av_log(avctx, AV_LOG_ERROR,
                   "Cannot allocate HEVC header of size %d.\n", avctx->extradata_size);
            libx265_encode_close(avctx);
            return AVERROR(ENOMEM);
        }

        memcpy(avctx->extradata, nal[0].payload, avctx->extradata_size);
    }

    return 0;
}
コード例 #10
0
ファイル: libkvazaar.c プロジェクト: ZhaoliangGuo/FFmpeg
static av_cold int libkvazaar_init(AVCodecContext *avctx)
{
    LibkvazaarContext *const ctx = avctx->priv_data;
    const kvz_api *const api = ctx->api = kvz_api_get(8);
    kvz_config *cfg = NULL;
    kvz_encoder *enc = NULL;

    /* Kvazaar requires width and height to be multiples of eight. */
    if (avctx->width % 8 || avctx->height % 8) {
        av_log(avctx, AV_LOG_ERROR,
               "Video dimensions are not a multiple of 8 (%dx%d).\n",
               avctx->width, avctx->height);
        return AVERROR(ENOSYS);
    }

    ctx->config = cfg = api->config_alloc();
    if (!cfg) {
        av_log(avctx, AV_LOG_ERROR,
               "Could not allocate kvazaar config structure.\n");
        return AVERROR(ENOMEM);
    }

    if (!api->config_init(cfg)) {
        av_log(avctx, AV_LOG_ERROR,
               "Could not initialize kvazaar config structure.\n");
        return AVERROR_BUG;
    }

    cfg->width  = avctx->width;
    cfg->height = avctx->height;

    if (avctx->ticks_per_frame > INT_MAX / avctx->time_base.num) {
        av_log(avctx, AV_LOG_ERROR,
               "Could not set framerate for kvazaar: integer overflow\n");
        return AVERROR(EINVAL);
    }
    cfg->framerate_num   = avctx->time_base.den;
    cfg->framerate_denom = avctx->time_base.num * avctx->ticks_per_frame;
    cfg->target_bitrate = avctx->bit_rate;
    cfg->vui.sar_width  = avctx->sample_aspect_ratio.num;
    cfg->vui.sar_height = avctx->sample_aspect_ratio.den;

    if (ctx->kvz_params) {
        AVDictionary *dict = NULL;
        if (!av_dict_parse_string(&dict, ctx->kvz_params, "=", ",", 0)) {
            AVDictionaryEntry *entry = NULL;
            while ((entry = av_dict_get(dict, "", entry, AV_DICT_IGNORE_SUFFIX))) {
                if (!api->config_parse(cfg, entry->key, entry->value)) {
                    av_log(avctx, AV_LOG_WARNING, "Invalid option: %s=%s.\n",
                           entry->key, entry->value);
                }
            }
            av_dict_free(&dict);
        }
    }

    ctx->encoder = enc = api->encoder_open(cfg);
    if (!enc) {
        av_log(avctx, AV_LOG_ERROR, "Could not open kvazaar encoder.\n");
        return AVERROR_BUG;
    }

    if (avctx->flags & AV_CODEC_FLAG_GLOBAL_HEADER) {
        kvz_data_chunk *data_out = NULL;
        kvz_data_chunk *chunk = NULL;
        uint32_t len_out;
        uint8_t *p;

        if (!api->encoder_headers(enc, &data_out, &len_out))
            return AVERROR(ENOMEM);

        avctx->extradata = p = av_mallocz(len_out + AV_INPUT_BUFFER_PADDING_SIZE);
        if (!p) {
            ctx->api->chunk_free(data_out);
            return AVERROR(ENOMEM);
        }

        avctx->extradata_size = len_out;

        for (chunk = data_out; chunk != NULL; chunk = chunk->next) {
            memcpy(p, chunk->data, chunk->len);
            p += chunk->len;
        }

        ctx->api->chunk_free(data_out);
    }

    return 0;
}
コード例 #11
0
static demuxer_t* demux_open_lavf(demuxer_t *demuxer){
    AVDictionary *opts = NULL;
    AVFormatContext *avfc;
    AVDictionaryEntry *t = NULL;
    lavf_priv_t *priv= demuxer->priv;
    int i;
    char mp_filename[256]="mp:";

    stream_seek(demuxer->stream, 0);

    avfc = avformat_alloc_context();

    if (opt_cryptokey)
        parse_cryptokey(avfc, opt_cryptokey);
    if (user_correct_pts != 0)
        avfc->flags |= AVFMT_FLAG_GENPTS;
    if (index_mode == 0)
        avfc->flags |= AVFMT_FLAG_IGNIDX;

    if(opt_probesize) {
        if (av_opt_set_int(avfc, "probesize", opt_probesize, 0) < 0)
            mp_msg(MSGT_HEADER,MSGL_ERR, "demux_lavf, couldn't set option probesize to %u\n", opt_probesize);
    }
    if(opt_analyzeduration) {
        if (av_opt_set_int(avfc, "analyzeduration", opt_analyzeduration * AV_TIME_BASE, 0) < 0)
            mp_msg(MSGT_HEADER,MSGL_ERR, "demux_lavf, couldn't set option analyzeduration to %u\n", opt_analyzeduration);
    }

    if (rtsp_transport_http || rtsp_transport_tcp)
       av_dict_set(&opts, "rtsp_transport", rtsp_transport_http ? "http" : "tcp", 0);

    if(opt_avopt){
        if(av_dict_parse_string(&opts, opt_avopt, "=", ",", 0) < 0){
            mp_msg(MSGT_HEADER,MSGL_ERR, "Your options /%s/ look like gibberish to me pal\n", opt_avopt);
            return NULL;
        }
    }

    if(demuxer->stream->url) {
        if (!strncmp(demuxer->stream->url, "ffmpeg://dummy://", 17))
            av_strlcpy(mp_filename, demuxer->stream->url + 17, sizeof(mp_filename));
        else if (!strncmp(demuxer->stream->url, "ffmpeg://", 9))
            av_strlcpy(mp_filename, demuxer->stream->url + 9, sizeof(mp_filename));
        else if (!strncmp(demuxer->stream->url, "rtsp://", 7))
            av_strlcpy(mp_filename, demuxer->stream->url, sizeof(mp_filename));
        else
            av_strlcat(mp_filename, demuxer->stream->url, sizeof(mp_filename));
    } else
        av_strlcat(mp_filename, "foobar.dummy", sizeof(mp_filename));

    if (!(priv->avif->flags & AVFMT_NOFILE)) {
        uint8_t *buffer = av_mallocz(BIO_BUFFER_SIZE);
        priv->pb = avio_alloc_context(buffer, BIO_BUFFER_SIZE, 0,
                                      demuxer, mp_read, NULL, mp_seek);
        priv->pb->read_seek = mp_read_seek;
        if (!demuxer->stream->end_pos || (demuxer->stream->flags & MP_STREAM_SEEK) != MP_STREAM_SEEK)
            priv->pb->seekable = 0;
        avfc->pb = priv->pb;
    }

    if(avformat_open_input(&avfc, mp_filename, priv->avif, &opts)<0){
        mp_msg(MSGT_HEADER,MSGL_ERR,"LAVF_header: av_open_input_stream() failed\n");
        return NULL;
    }
    if (av_dict_count(opts)) {
        AVDictionaryEntry *e = NULL;
        int invalid = 0;
        while ((e = av_dict_get(opts, "", e, AV_DICT_IGNORE_SUFFIX))) {
            if (strcmp(e->key, "rtsp_transport")) {
                invalid++;
                mp_msg(MSGT_HEADER,MSGL_ERR,"Unknown option %s\n", e->key);
            }
        }
        if (invalid)
            return 0;
    }
    av_dict_free(&opts);

    priv->avfc= avfc;

    if(avformat_find_stream_info(avfc, NULL) < 0){
        mp_msg(MSGT_HEADER,MSGL_ERR,"LAVF_header: av_find_stream_info() failed\n");
        return NULL;
    }

    /* Add metadata. */
    while((t = av_dict_get(avfc->metadata, "", t, AV_DICT_IGNORE_SUFFIX)))
        demux_info_add(demuxer, t->key, t->value);

    for(i=0; i < avfc->nb_chapters; i++) {
        AVChapter *c = avfc->chapters[i];
        uint64_t start = av_rescale_q(c->start, c->time_base, (AVRational){1,1000});
        uint64_t end   = av_rescale_q(c->end, c->time_base, (AVRational){1,1000});
        t = av_dict_get(c->metadata, "title", NULL, 0);
        demuxer_add_chapter(demuxer, t ? t->value : NULL, start, end);
    }

    for(i=0; i<avfc->nb_streams; i++)
        handle_stream(demuxer, avfc, i);
    priv->nb_streams_last = avfc->nb_streams;

    if(avfc->nb_programs) {
        int p;
        for (p = 0; p < avfc->nb_programs; p++) {
            AVProgram *program = avfc->programs[p];
            t = av_dict_get(program->metadata, "title", NULL, 0);
            mp_msg(MSGT_HEADER,MSGL_INFO,"LAVF: Program %d %s\n", program->id, t ? t->value : "");
            mp_msg(MSGT_IDENTIFY, MSGL_V, "PROGRAM_ID=%d\n", program->id);
        }
    }

    mp_msg(MSGT_HEADER,MSGL_V,"LAVF: %d audio and %d video streams found\n",priv->audio_streams,priv->video_streams);
    mp_msg(MSGT_HEADER,MSGL_V,"LAVF: build %d\n", LIBAVFORMAT_BUILD);
    if(!priv->audio_streams) demuxer->audio->id=-2;  // nosound
//    else if(best_audio > 0 && demuxer->audio->id == -1) demuxer->audio->id=best_audio;
    if(!priv->video_streams){
        if(!priv->audio_streams){
	    mp_msg(MSGT_HEADER,MSGL_ERR,"LAVF: no audio or video headers found - broken file?\n");
            if (!priv->sub_streams)
            return NULL;
        }
        demuxer->video->id=-2; // audio-only
    } //else if (best_video > 0 && demuxer->video->id == -1) demuxer->video->id = best_video;

    return demuxer;
}
コード例 #12
0
ファイル: libx265.c プロジェクト: AllanDaemon/FFmpeg
static av_cold int libx265_encode_init(AVCodecContext *avctx)
{
    libx265Context *ctx = avctx->priv_data;
    x265_nal *nal;
    uint8_t *buf;
    int sar_num, sar_den;
    int nnal;
    int ret;
    int i;

    if (avctx->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL &&
        !av_pix_fmt_desc_get(avctx->pix_fmt)->log2_chroma_w &&
        !av_pix_fmt_desc_get(avctx->pix_fmt)->log2_chroma_h) {
        av_log(avctx, AV_LOG_ERROR,
               "4:4:4 support is not fully defined for HEVC yet. "
               "Set -strict experimental to encode anyway.\n");
        return AVERROR(ENOSYS);
    }

    avctx->coded_frame = av_frame_alloc();
    if (!avctx->coded_frame) {
        av_log(avctx, AV_LOG_ERROR, "Could not allocate frame.\n");
        return AVERROR(ENOMEM);
    }

    ctx->params = x265_param_alloc();
    if (!ctx->params) {
        av_log(avctx, AV_LOG_ERROR, "Could not allocate x265 param structure.\n");
        return AVERROR(ENOMEM);
    }

    if (x265_param_default_preset(ctx->params, ctx->preset, ctx->tune) < 0) {
        av_log(avctx, AV_LOG_ERROR, "Invalid preset or tune.\n");
        return AVERROR(EINVAL);
    }

    ctx->params->frameNumThreads = avctx->thread_count;
    ctx->params->fpsNum          = avctx->time_base.den;
    ctx->params->fpsDenom        = avctx->time_base.num * avctx->ticks_per_frame;
    ctx->params->sourceWidth     = avctx->width;
    ctx->params->sourceHeight    = avctx->height;

    av_reduce(&sar_num, &sar_den,
              avctx->sample_aspect_ratio.num,
              avctx->sample_aspect_ratio.den, 4096);
    ctx->params->vui.bEnableVuiParametersPresentFlag = 1;
    ctx->params->vui.bEnableAspectRatioIdc           = 1;
    ctx->params->vui.aspectRatioIdc                  = 255;
    ctx->params->vui.sarWidth                        = sar_num;
    ctx->params->vui.sarHeight                       = sar_den;

    if (x265_max_bit_depth == 8)
        ctx->params->internalBitDepth = 8;
    else if (x265_max_bit_depth == 12)
        ctx->params->internalBitDepth = 10;

    switch (avctx->pix_fmt) {
    case AV_PIX_FMT_YUV420P:
    case AV_PIX_FMT_YUV420P10:
        ctx->params->internalCsp = X265_CSP_I420;
        break;
    case AV_PIX_FMT_YUV444P:
    case AV_PIX_FMT_YUV444P10:
        ctx->params->internalCsp = X265_CSP_I444;
        break;
    }

    if (avctx->bit_rate > 0) {
        ctx->params->rc.bitrate         = avctx->bit_rate / 1000;
        ctx->params->rc.rateControlMode = X265_RC_ABR;
    }

    if (ctx->x265_opts) {
        AVDictionary *dict    = NULL;
        AVDictionaryEntry *en = NULL;

        if (!av_dict_parse_string(&dict, ctx->x265_opts, "=", ":", 0)) {
            while ((en = av_dict_get(dict, "", en, AV_DICT_IGNORE_SUFFIX))) {
                int parse_ret = x265_param_parse(ctx->params, en->key, en->value);

                switch (parse_ret) {
                case X265_PARAM_BAD_NAME:
                    av_log(avctx, AV_LOG_WARNING,
                          "Unknown option: %s.\n", en->key);
                    break;
                case X265_PARAM_BAD_VALUE:
                    av_log(avctx, AV_LOG_WARNING,
                          "Invalid value for %s: %s.\n", en->key, en->value);
                    break;
                default:
                    break;
                }
            }
            av_dict_free(&dict);
        }
    }

    ctx->encoder = x265_encoder_open(ctx->params);
    if (!ctx->encoder) {
        av_log(avctx, AV_LOG_ERROR, "Cannot open libx265 encoder.\n");
        libx265_encode_close(avctx);
        return AVERROR_INVALIDDATA;
    }

    ret = x265_encoder_headers(ctx->encoder, &nal, &nnal);
    if (ret < 0) {
        av_log(avctx, AV_LOG_ERROR, "Cannot encode headers.\n");
        libx265_encode_close(avctx);
        return AVERROR_INVALIDDATA;
    }

    for (i = 0; i < nnal; i++)
        ctx->header_size += nal[i].sizeBytes;

    ctx->header = av_malloc(ctx->header_size + FF_INPUT_BUFFER_PADDING_SIZE);
    if (!ctx->header) {
        av_log(avctx, AV_LOG_ERROR,
               "Cannot allocate HEVC header of size %d.\n", ctx->header_size);
        libx265_encode_close(avctx);
        return AVERROR(ENOMEM);
    }

    buf = ctx->header;
    for (i = 0; i < nnal; i++) {
        memcpy(buf, nal[i].payload, nal[i].sizeBytes);
        buf += nal[i].sizeBytes;
    }

    if (avctx->flags & CODEC_FLAG_GLOBAL_HEADER) {
        avctx->extradata_size = ctx->header_size;
        avctx->extradata = ctx->header;
        ctx->header_size = 0;
        ctx->header = NULL;
    }

    return 0;
}
コード例 #13
0
ファイル: bmdcapture.cpp プロジェクト: grzechuj/bmdtools
int main(int argc, char *argv[])
{
    IDeckLinkIterator *deckLinkIterator = CreateDeckLinkIteratorInstance();
    DeckLinkCaptureDelegate *delegate;
    BMDDisplayMode selectedDisplayMode = bmdModeNTSC;
    int displayModeCount               = 0;
    int exitStatus                     = 1;
    int aconnection                    = 0, vconnection = 0, camera = 0, i = 0;
    int ch;
    AVDictionary *opts = NULL;
    BMDPixelFormat pix = bmdFormat8BitYUV;
    HRESULT result;
    pthread_t th;

    pthread_mutex_init(&sleepMutex, NULL);
    pthread_cond_init(&sleepCond, NULL);
    av_register_all();

    if (!deckLinkIterator) {
        fprintf(stderr,
                "This application requires the DeckLink drivers installed.\n");
        goto bail;
    }

    // Parse command line options
    while ((ch = getopt(argc, argv, "?hvc:s:f:a:m:n:p:M:F:C:A:V:o:")) != -1) {
        switch (ch) {
        case 'v':
            g_verbose = true;
            break;
        case 'm':
            g_videoModeIndex = atoi(optarg);
            break;
        case 'c':
            g_audioChannels = atoi(optarg);
            if (g_audioChannels != 2 &&
                g_audioChannels != 8 &&
                g_audioChannels != 16) {
                fprintf(
                    stderr,
                    "Invalid argument: Audio Channels must be either 2, 8 or 16\n");
                goto bail;
            }
            break;
        case 's':
            g_audioSampleDepth = atoi(optarg);
            switch (g_audioSampleDepth) {
            case 16:
                sample_fmt = AV_SAMPLE_FMT_S16;
                break;
            case 32:
                sample_fmt = AV_SAMPLE_FMT_S32;
                break;
            default:
                fprintf(stderr,
                        "Invalid argument:"
                        " Audio Sample Depth must be either 16 bits"
                        " or 32 bits\n");
                goto bail;
            }
            break;
        case 'p':
            switch (atoi(optarg)) {
            case  8:
                pix     = bmdFormat8BitYUV;
                pix_fmt = PIX_FMT_UYVY422;
                break;
            case 10:
                pix     = bmdFormat10BitYUV;
                pix_fmt = PIX_FMT_YUV422P10;
                break;
            default:
                fprintf(
                    stderr,
                    "Invalid argument: Pixel Format Depth must be either 8 bits or 10 bits\n");
                goto bail;
            }
            break;
        case 'f':
            g_videoOutputFile = optarg;
            break;
        case 'n':
            g_maxFrames = atoi(optarg);
            break;
        case 'M':
            g_memoryLimit = atoi(optarg) * 1024 * 1024 * 1024L;
            break;
        case 'F':
            fmt = av_guess_format(optarg, NULL, NULL);
            break;
        case 'A':
            aconnection = atoi(optarg);
            break;
        case 'V':
            vconnection = atoi(optarg);
            break;
        case 'C':
            camera = atoi(optarg);
            break;
        case 'S':
            serial_fd = open(optarg, O_RDWR | O_NONBLOCK);
            break;
        case 'o':
            if (av_dict_parse_string(&opts, optarg, "=", ":", 0) < 0) {
                fprintf(stderr, "Cannot parse option string %s\n",
                        optarg);
                goto bail;
            }
        case '?':
        case 'h':
            usage(0);
        }
    }

    /* Connect to the first DeckLink instance */
    do
        result = deckLinkIterator->Next(&deckLink);
    while (i++ < camera);

    if (result != S_OK) {
        fprintf(stderr, "No DeckLink PCI cards found.\n");
        goto bail;
    }

    if (deckLink->QueryInterface(IID_IDeckLinkInput,
                                 (void **)&deckLinkInput) != S_OK) {
        goto bail;
    }

    result = deckLink->QueryInterface(IID_IDeckLinkConfiguration,
                                      (void **)&deckLinkConfiguration);
    if (result != S_OK) {
        fprintf(
            stderr,
            "Could not obtain the IDeckLinkConfiguration interface - result = %08x\n",
            result);
        goto bail;
    }

    result = S_OK;
    switch (aconnection) {
    case 1:
        result = DECKLINK_SET_AUDIO_CONNECTION(bmdAudioConnectionAnalog);
        break;
    case 2:
        result = DECKLINK_SET_AUDIO_CONNECTION(bmdAudioConnectionEmbedded);
        break;
    case 3:
        result = DECKLINK_SET_AUDIO_CONNECTION(bmdAudioConnectionAESEBU);
        break;
    default:
        // do not change it
        break;
    }
    if (result != S_OK) {
        fprintf(stderr, "Failed to set audio input - result = %08x\n", result);
        goto bail;
    }

    result = S_OK;
    switch (vconnection) {
    case 1:
        result = DECKLINK_SET_VIDEO_CONNECTION(bmdVideoConnectionComposite);
        break;
    case 2:
        result = DECKLINK_SET_VIDEO_CONNECTION(bmdVideoConnectionComponent);
        break;
    case 3:
        result = DECKLINK_SET_VIDEO_CONNECTION(bmdVideoConnectionHDMI);
        break;
    case 4:
        result = DECKLINK_SET_VIDEO_CONNECTION(bmdVideoConnectionSDI);
        break;
    case 5:
        result = DECKLINK_SET_VIDEO_CONNECTION(bmdVideoConnectionOpticalSDI);
        break;
    case 6:
        result = DECKLINK_SET_VIDEO_CONNECTION(bmdVideoConnectionSVideo);
        break;
    default:
        // do not change it
        break;
    }
    if (result != S_OK) {
        fprintf(stderr, "Failed to set video input - result %08x\n", result);
        goto bail;
    }

    delegate = new DeckLinkCaptureDelegate();
    deckLinkInput->SetCallback(delegate);

    // Obtain an IDeckLinkDisplayModeIterator to enumerate the display modes supported on output
    result = deckLinkInput->GetDisplayModeIterator(&displayModeIterator);
    if (result != S_OK) {
        fprintf(
            stderr,
            "Could not obtain the video output display mode iterator - result = %08x\n",
            result);
        goto bail;
    }

    if (!g_videoOutputFile) {
        fprintf(stderr,
                "Missing argument: Please specify output path using -f\n");
        goto bail;
    }

    if (!fmt) {
        fmt = av_guess_format(NULL, g_videoOutputFile, NULL);
        if (!fmt) {
            fprintf(
                stderr,
                "Unable to guess output format, please specify explicitly using -F\n");
            goto bail;
        }
    }

    if (g_videoModeIndex < 0) {
        fprintf(stderr, "No video mode specified\n");
        usage(0);
    }

    selectedDisplayMode = -1;
    while (displayModeIterator->Next(&displayMode) == S_OK) {
        if (g_videoModeIndex == displayModeCount) {
            selectedDisplayMode = displayMode->GetDisplayMode();
            break;
        }
        displayModeCount++;
        displayMode->Release();
    }

    if (selectedDisplayMode < 0) {
        fprintf(stderr, "Invalid mode %d specified\n", g_videoModeIndex);
        goto bail;
    }

    result = deckLinkInput->EnableVideoInput(selectedDisplayMode, pix, 0);
    if (result != S_OK) {
        fprintf(stderr,
                "Failed to enable video input. Is another application using "
                "the card?\n");
        goto bail;
    }

    result = deckLinkInput->EnableAudioInput(bmdAudioSampleRate48kHz,
                                             g_audioSampleDepth,
                                             g_audioChannels);
    if (result != S_OK) {
        fprintf(stderr,
                "Failed to enable audio input. Is another application using "
                "the card?\n");
        goto bail;
    }

    oc          = avformat_alloc_context();
    oc->oformat = fmt;

    snprintf(oc->filename, sizeof(oc->filename), "%s", g_videoOutputFile);

    fmt->video_codec = (pix == bmdFormat8BitYUV ? AV_CODEC_ID_RAWVIDEO : AV_CODEC_ID_V210);
    fmt->audio_codec = (sample_fmt == AV_SAMPLE_FMT_S16 ? AV_CODEC_ID_PCM_S16LE : AV_CODEC_ID_PCM_S32LE);

    video_st = add_video_stream(oc, fmt->video_codec);
    audio_st = add_audio_stream(oc, fmt->audio_codec);

    if (serial_fd > 0)
        data_st = add_data_stream(oc, AV_CODEC_ID_TEXT);

    if (!(fmt->flags & AVFMT_NOFILE)) {
        if (avio_open(&oc->pb, oc->filename, AVIO_FLAG_WRITE) < 0) {
            fprintf(stderr, "Could not open '%s'\n", oc->filename);
            exit(1);
        }
    }

    avformat_write_header(oc, NULL);
    avpacket_queue_init(&queue);

    result = deckLinkInput->StartStreams();
    if (result != S_OK) {
        goto bail;
    }
    // All Okay.
    exitStatus = 0;

    if (pthread_create(&th, NULL, push_packet, oc))
        goto bail;

    // Block main thread until signal occurs
    pthread_mutex_lock(&sleepMutex);
    pthread_cond_wait(&sleepCond, &sleepMutex);
    pthread_mutex_unlock(&sleepMutex);
    deckLinkInput->StopStreams();
    fprintf(stderr, "Stopping Capture\n");
    avpacket_queue_end(&queue);

bail:
    if (displayModeIterator != NULL) {
        displayModeIterator->Release();
        displayModeIterator = NULL;
    }

    if (deckLinkInput != NULL) {
        deckLinkInput->Release();
        deckLinkInput = NULL;
    }

    if (deckLink != NULL) {
        deckLink->Release();
        deckLink = NULL;
    }

    if (deckLinkIterator != NULL) {
        deckLinkIterator->Release();
    }

    if (oc != NULL) {
        av_write_trailer(oc);
        if (!(fmt->flags & AVFMT_NOFILE)) {
            /* close the output file */
            avio_close(oc->pb);
        }
    }

    return exitStatus;
}
コード例 #14
0
static av_cold int ff_libnvenc_init(AVCodecContext *avctx)
{
    NvEncContext *nvenc_ctx = (NvEncContext*)avctx->priv_data;
    int x264_argc;
    char **x264_argv;

    // Basic
    nvenc_ctx->nvenc_cfg.width        = avctx->width;
    nvenc_ctx->nvenc_cfg.height       = avctx->height;
    nvenc_ctx->nvenc_cfg.frameRateNum = avctx->time_base.den;
    nvenc_ctx->nvenc_cfg.frameRateDen = avctx->time_base.num * avctx->ticks_per_frame;

    // Codec
    if (avctx->profile >= 0)
        nvenc_ctx->nvenc_cfg.profile      = avctx->profile;
    if (avctx->gop_size >= 0)
        nvenc_ctx->nvenc_cfg.gopLength    = avctx->gop_size;
    else if (!(avctx->flags & CODEC_FLAG_CLOSED_GOP))
        nvenc_ctx->nvenc_cfg.gopLength    = UINT_MAX;   // infinite GOP
    if (avctx->max_b_frames >= 0)
        nvenc_ctx->nvenc_cfg.numBFrames   = avctx->max_b_frames;
    if (avctx->refs >= 0)
        nvenc_ctx->nvenc_cfg.numRefFrames = avctx->refs;
    if (avctx->flags & CODEC_FLAG_INTERLACED_DCT)
        nvenc_ctx->nvenc_cfg.fieldMode    = 2;

    // Rate-control
    if (avctx->bit_rate > 0) {
        nvenc_ctx->nvenc_cfg.rateControl   = 2;
        nvenc_ctx->nvenc_cfg.avgBitRate    = avctx->bit_rate;
    }
    if (avctx->rc_max_rate >= 0) {
        nvenc_ctx->nvenc_cfg.rateControl   = 1;
        nvenc_ctx->nvenc_cfg.peakBitRate   = avctx->rc_max_rate;
    }
    if (avctx->qmin >= 0)
        nvenc_ctx->nvenc_cfg.qpMin         = avctx->qmin;
    if (avctx->qmax >= 0)
        nvenc_ctx->nvenc_cfg.qpMax         = avctx->qmax;
    if (avctx->rc_buffer_size > 0) {
        nvenc_ctx->nvenc_cfg.vbvBufferSize = avctx->rc_buffer_size;
        if (avctx->rc_initial_buffer_occupancy >= 0) {
            nvenc_ctx->nvenc_cfg.vbvInitialDelay =
                avctx->rc_initial_buffer_occupancy / avctx->rc_buffer_size;
        }
    }

    // Codec-specific
    if (avctx->level >= 0)
        nvenc_ctx->nvenc_cfg.level     = avctx->level;
    if (avctx->gop_size >= 0)
        nvenc_ctx->nvenc_cfg.idrPeriod = avctx->gop_size;
    if (avctx->slices > 0) {
        nvenc_ctx->nvenc_cfg.sliceMode = 3;
        nvenc_ctx->nvenc_cfg.sliceModeData = avctx->slices;
    }
    else if (avctx->rtp_payload_size > 0) {
        nvenc_ctx->nvenc_cfg.sliceMode = 1;
        nvenc_ctx->nvenc_cfg.sliceModeData = avctx->rtp_payload_size;
    }
    if (avctx->coder_type == FF_CODER_TYPE_AC)
        nvenc_ctx->nvenc_cfg.enableCABAC = 1;
    if (avctx->flags & CODEC_FLAG_GLOBAL_HEADER)
        nvenc_ctx->nvenc_cfg.enableRepeatSPSPPS = 1;

    // Allocate list of x264 options
    x264_argc = 0;
    x264_argv = av_calloc(255, sizeof(char*));
    if (!x264_argv)
        return -1;

    // ffmpeg-x264 parameters
    OPT_STRSTR("preset", nvenc_ctx->preset);
    OPT_STRSTR("tune", nvenc_ctx->tune);
    OPT_STRSTR("profile", nvenc_ctx->profile);
    OPT_STRSTR("level", nvenc_ctx->level);
    OPT_NUMSTR("qp", nvenc_ctx->cqp);
    OPT_NUMSTR("intra-refresh", nvenc_ctx->intra_refresh);
    OPT_NUMSTR("aud", nvenc_ctx->aud);
    OPT_STRSTR("deblock", nvenc_ctx->deblock);
    OPT_NUMSTR("direct-pred", nvenc_ctx->direct_pred);
    OPT_NUMSTR("nal_hrd", nvenc_ctx->nal_hrd);
    OPT_NUMSTR("8x8dct", nvenc_ctx->dct8x8);

    // x264-style extra parameters
    if (nvenc_ctx->x264_params) {
        AVDictionary *param_dict = NULL;
        AVDictionaryEntry *param_entry = NULL;

        if (!av_dict_parse_string(&param_dict, nvenc_ctx->x264_params, "=", ":", 0)) {
            while ((param_entry = av_dict_get(param_dict, "", param_entry, AV_DICT_IGNORE_SUFFIX))) {
                x264_argv[x264_argc++] = av_strdup(param_entry->key);
                x264_argv[x264_argc++] = av_strdup(param_entry->value);
            }
            av_dict_free(&param_dict);
        }
    }
    // x264-style extra options
    if (nvenc_ctx->x264_opts) {
        AVDictionary *param_dict = NULL;
        AVDictionaryEntry *param_entry = NULL;

        if (!av_dict_parse_string(&param_dict, nvenc_ctx->x264_opts, "=", ":", 0)) {
            while ((param_entry = av_dict_get(param_dict, "", param_entry, AV_DICT_IGNORE_SUFFIX))) {
                x264_argv[x264_argc++] = av_strdup(param_entry->key);
                x264_argv[x264_argc++] = av_strdup(param_entry->value);
            }
            av_dict_free(&param_dict);
        }
    }

    // Notify encoder to use the list of x264 options
    nvenc_ctx->nvenc_cfg.x264_paramc = x264_argc;
    nvenc_ctx->nvenc_cfg.x264_paramv = x264_argv;

    // Create and initialize nvencoder
    nvenc_ctx->nvenc = nvenc_open(&nvenc_ctx->nvenc_cfg);
    if (!nvenc_ctx->nvenc)
        return -1;

    avctx->coded_frame = av_frame_alloc();
    if (!avctx->coded_frame)
        return AVERROR(ENOMEM);

    avctx->has_b_frames = (nvenc_ctx->nvenc_cfg.numBFrames > 0) ? 1 : 0;
    if (avctx->max_b_frames < 0)
        avctx->max_b_frames = 0;
    avctx->bit_rate = nvenc_ctx->nvenc_cfg.avgBitRate;

    return 0;
}
コード例 #15
0
ファイル: avfilter.c プロジェクト: VFR-maniac/libav
int avfilter_init_str(AVFilterContext *filter, const char *args)
{
    AVDictionary *options = NULL;
    AVDictionaryEntry *e;
    int ret = 0;

    if (args && *args) {
        if (!filter->filter->priv_class) {
            av_log(filter, AV_LOG_ERROR, "This filter does not take any "
                   "options, but options were provided: %s.\n", args);
            return AVERROR(EINVAL);
        }

#if FF_API_OLD_FILTER_OPTS
        if (!strcmp(filter->filter->name, "scale") &&
            strchr(args, ':') && strchr(args, ':') < strchr(args, '=')) {
            /* old w:h:flags=<flags> syntax */
            char *copy = av_strdup(args);
            char *p;

            av_log(filter, AV_LOG_WARNING, "The <w>:<h>:flags=<flags> option "
                   "syntax is deprecated. Use either <w>:<h>:<flags> or "
                   "w=<w>:h=<h>:flags=<flags>.\n");

            if (!copy) {
                ret = AVERROR(ENOMEM);
                goto fail;
            }

            p = strrchr(copy, ':');
            if (p) {
                *p++ = 0;
                ret = av_dict_parse_string(&options, p, "=", ":", 0);
            }
            if (ret >= 0)
                ret = process_unnamed_options(filter, &options, copy);
            av_freep(&copy);

            if (ret < 0)
                goto fail;
        } else
#endif

        if (strchr(args, '=')) {
            /* assume a list of key1=value1:key2=value2:... */
            ret = av_dict_parse_string(&options, args, "=", ":", 0);
            if (ret < 0)
                goto fail;
#if FF_API_OLD_FILTER_OPTS
        } else if (!strcmp(filter->filter->name, "format")     ||
                   !strcmp(filter->filter->name, "noformat")   ||
                   !strcmp(filter->filter->name, "frei0r")     ||
                   !strcmp(filter->filter->name, "frei0r_src") ||
                   !strcmp(filter->filter->name, "ocv")) {
            /* a hack for compatibility with the old syntax
             * replace colons with |s */
            char *copy = av_strdup(args);
            char *p    = copy;
            int nb_leading = 0; // number of leading colons to skip

            if (!copy) {
                ret = AVERROR(ENOMEM);
                goto fail;
            }

            if (!strcmp(filter->filter->name, "frei0r") ||
                !strcmp(filter->filter->name, "ocv"))
                nb_leading = 1;
            else if (!strcmp(filter->filter->name, "frei0r_src"))
                nb_leading = 3;

            while (nb_leading--) {
                p = strchr(p, ':');
                if (!p) {
                    p = copy + strlen(copy);
                    break;
                }
                p++;
            }

            if (strchr(p, ':')) {
                av_log(filter, AV_LOG_WARNING, "This syntax is deprecated. Use "
                       "'|' to separate the list items.\n");
            }

            while ((p = strchr(p, ':')))
                *p++ = '|';

            ret = process_unnamed_options(filter, &options, copy);
            av_freep(&copy);

            if (ret < 0)
                goto fail;
#endif
        } else {
            ret = process_unnamed_options(filter, &options, args);
            if (ret < 0)
                goto fail;
        }
    }

    ret = avfilter_init_dict(filter, &options);
    if (ret < 0)
        goto fail;

    if ((e = av_dict_get(options, "", NULL, AV_DICT_IGNORE_SUFFIX))) {
        av_log(filter, AV_LOG_ERROR, "No such option: %s.\n", e->key);
        ret = AVERROR_OPTION_NOT_FOUND;
        goto fail;
    }

fail:
    av_dict_free(&options);

    return ret;
}
コード例 #16
0
static int tee_write_header(AVFormatContext *avf)
{
    TeeContext *tee = avf->priv_data;
    unsigned nb_slaves = 0, i;
    const char *filename = avf->filename;
    char **slaves = NULL;
    int ret;

    while (*filename) {
        char *slave = av_get_token(&filename, slave_delim);
        if (!slave) {
            ret = AVERROR(ENOMEM);
            goto fail;
        }
        ret = av_dynarray_add_nofree(&slaves, &nb_slaves, slave);
        if (ret < 0) {
            av_free(slave);
            goto fail;
        }
        if (strspn(filename, slave_delim))
            filename++;
    }

    if (tee->fifo_options_str) {
        ret = av_dict_parse_string(&tee->fifo_options, tee->fifo_options_str, "=", ":", 0);
        if (ret < 0)
            goto fail;
    }

    if (!(tee->slaves = av_mallocz_array(nb_slaves, sizeof(*tee->slaves)))) {
        ret = AVERROR(ENOMEM);
        goto fail;
    }
    tee->nb_slaves = tee->nb_alive = nb_slaves;

    for (i = 0; i < nb_slaves; i++) {

        tee->slaves[i].use_fifo = tee->use_fifo;
        ret = av_dict_copy(&tee->slaves[i].fifo_options, tee->fifo_options, 0);
        if (ret < 0)
            goto fail;

        if ((ret = open_slave(avf, slaves[i], &tee->slaves[i])) < 0) {
            ret = tee_process_slave_failure(avf, i, ret);
            if (ret < 0)
                goto fail;
        } else {
            log_slave(&tee->slaves[i], avf, AV_LOG_VERBOSE);
        }
        av_freep(&slaves[i]);
    }

    for (i = 0; i < avf->nb_streams; i++) {
        int j, mapped = 0;
        for (j = 0; j < tee->nb_slaves; j++)
            if (tee->slaves[j].avf)
                mapped += tee->slaves[j].stream_map[i] >= 0;
        if (!mapped)
            av_log(avf, AV_LOG_WARNING, "Input stream #%d is not mapped "
                   "to any slave.\n", i);
    }
    av_free(slaves);
    return 0;

fail:
    for (i = 0; i < nb_slaves; i++)
        av_freep(&slaves[i]);
    close_slaves(avf);
    av_free(slaves);
    return ret;
}
コード例 #17
0
static av_cold int libx265_encode_init(AVCodecContext *avctx)
{
    libx265Context *ctx = avctx->priv_data;

    if (avctx->strict_std_compliance > FF_COMPLIANCE_EXPERIMENTAL &&
        !av_pix_fmt_desc_get(avctx->pix_fmt)->log2_chroma_w) {
        av_log(avctx, AV_LOG_ERROR,
               "4:2:2 and 4:4:4 support is not fully defined for HEVC yet. "
               "Set -strict experimental to encode anyway.\n");
        return AVERROR(ENOSYS);
    }

    avctx->coded_frame = av_frame_alloc();
    if (!avctx->coded_frame) {
        av_log(avctx, AV_LOG_ERROR, "Could not allocate frame.\n");
        return AVERROR(ENOMEM);
    }

    ctx->params = x265_param_alloc();
    if (!ctx->params) {
        av_log(avctx, AV_LOG_ERROR, "Could not allocate x265 param structure.\n");
        return AVERROR(ENOMEM);
    }

    if (x265_param_default_preset(ctx->params, ctx->preset, ctx->tune) < 0) {
        int i;

        av_log(avctx, AV_LOG_ERROR, "Error setting preset/tune %s/%s.\n", ctx->preset, ctx->tune);
        av_log(avctx, AV_LOG_INFO, "Possible presets:");
        for (i = 0; x265_preset_names[i]; i++)
            av_log(avctx, AV_LOG_INFO, " %s", x265_preset_names[i]);

        av_log(avctx, AV_LOG_INFO, "\n");
        av_log(avctx, AV_LOG_INFO, "Possible tunes:");
        for (i = 0; x265_tune_names[i]; i++)
            av_log(avctx, AV_LOG_INFO, " %s", x265_tune_names[i]);

        av_log(avctx, AV_LOG_INFO, "\n");

        return AVERROR(EINVAL);
    }

    ctx->params->frameNumThreads = avctx->thread_count;
    ctx->params->fpsNum          = avctx->time_base.den;
    ctx->params->fpsDenom        = avctx->time_base.num * avctx->ticks_per_frame;
    ctx->params->sourceWidth     = avctx->width;
    ctx->params->sourceHeight    = avctx->height;
    ctx->params->bEnablePsnr     = !!(avctx->flags & CODEC_FLAG_PSNR);

    if ((avctx->color_primaries <= AVCOL_PRI_BT2020 &&
         avctx->color_primaries != AVCOL_PRI_UNSPECIFIED) ||
        (avctx->color_trc <= AVCOL_TRC_BT2020_12 &&
         avctx->color_trc != AVCOL_TRC_UNSPECIFIED) ||
        (avctx->colorspace <= AVCOL_SPC_BT2020_CL &&
         avctx->colorspace != AVCOL_SPC_UNSPECIFIED)) {

        ctx->params->vui.bEnableVideoSignalTypePresentFlag  = 1;
        ctx->params->vui.bEnableColorDescriptionPresentFlag = 1;

        // x265 validates the parameters internally
        ctx->params->vui.colorPrimaries          = avctx->color_primaries;
        ctx->params->vui.transferCharacteristics = avctx->color_trc;
        ctx->params->vui.matrixCoeffs            = avctx->colorspace;
    }

    if (avctx->sample_aspect_ratio.num > 0 && avctx->sample_aspect_ratio.den > 0) {
        char sar[12];
        int sar_num, sar_den;

        av_reduce(&sar_num, &sar_den,
                  avctx->sample_aspect_ratio.num,
                  avctx->sample_aspect_ratio.den, 65535);
        snprintf(sar, sizeof(sar), "%d:%d", sar_num, sar_den);
        if (x265_param_parse(ctx->params, "sar", sar) == X265_PARAM_BAD_VALUE) {
            av_log(avctx, AV_LOG_ERROR, "Invalid SAR: %d:%d.\n", sar_num, sar_den);
            return AVERROR_INVALIDDATA;
        }
    }

    switch (avctx->pix_fmt) {
    case AV_PIX_FMT_YUV420P:
    case AV_PIX_FMT_YUV420P10:
        ctx->params->internalCsp = X265_CSP_I420;
        break;
    case AV_PIX_FMT_YUV422P:
    case AV_PIX_FMT_YUV422P10:
        ctx->params->internalCsp = X265_CSP_I422;
        break;
    case AV_PIX_FMT_YUV444P:
    case AV_PIX_FMT_YUV444P10:
        ctx->params->internalCsp = X265_CSP_I444;
        break;
    }

    if (ctx->crf >= 0) {
        char crf[6];

        snprintf(crf, sizeof(crf), "%2.2f", ctx->crf);
        if (x265_param_parse(ctx->params, "crf", crf) == X265_PARAM_BAD_VALUE) {
            av_log(avctx, AV_LOG_ERROR, "Invalid crf: %2.2f.\n", ctx->crf);
            return AVERROR(EINVAL);
        }
    } else if (avctx->bit_rate > 0) {
        ctx->params->rc.bitrate         = avctx->bit_rate / 1000;
        ctx->params->rc.rateControlMode = X265_RC_ABR;
    }

    if (!(avctx->flags & CODEC_FLAG_GLOBAL_HEADER))
        ctx->params->bRepeatHeaders = 1;

    if (ctx->x265_opts) {
        AVDictionary *dict    = NULL;
        AVDictionaryEntry *en = NULL;

        if (!av_dict_parse_string(&dict, ctx->x265_opts, "=", ":", 0)) {
            while ((en = av_dict_get(dict, "", en, AV_DICT_IGNORE_SUFFIX))) {
                int parse_ret = x265_param_parse(ctx->params, en->key, en->value);

                switch (parse_ret) {
                case X265_PARAM_BAD_NAME:
                    av_log(avctx, AV_LOG_WARNING,
                          "Unknown option: %s.\n", en->key);
                    break;
                case X265_PARAM_BAD_VALUE:
                    av_log(avctx, AV_LOG_WARNING,
                          "Invalid value for %s: %s.\n", en->key, en->value);
                    break;
                default:
                    break;
                }
            }
            av_dict_free(&dict);
        }
    }

    ctx->encoder = x265_encoder_open(ctx->params);
    if (!ctx->encoder) {
        av_log(avctx, AV_LOG_ERROR, "Cannot open libx265 encoder.\n");
        libx265_encode_close(avctx);
        return AVERROR_INVALIDDATA;
    }

    if (avctx->flags & CODEC_FLAG_GLOBAL_HEADER) {
        x265_nal *nal;
        int nnal;

        avctx->extradata_size = x265_encoder_headers(ctx->encoder, &nal, &nnal);
        if (avctx->extradata_size <= 0) {
            av_log(avctx, AV_LOG_ERROR, "Cannot encode headers.\n");
            libx265_encode_close(avctx);
            return AVERROR_INVALIDDATA;
        }

        avctx->extradata = av_malloc(avctx->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
        if (!avctx->extradata) {
            av_log(avctx, AV_LOG_ERROR,
                   "Cannot allocate HEVC header of size %d.\n", avctx->extradata_size);
            libx265_encode_close(avctx);
            return AVERROR(ENOMEM);
        }

        memcpy(avctx->extradata, nal[0].payload, avctx->extradata_size);
    }

    return 0;
}
コード例 #18
0
ファイル: ve_lavc.c プロジェクト: AungWinnHtut/mplayer
static int config(struct vf_instance *vf,
        int width, int height, int d_width, int d_height,
	unsigned int flags, unsigned int outfmt){
    int size, i;
    char *p;
    AVDictionary *opts = NULL;

    mux_v->bih->biWidth=width;
    mux_v->bih->biHeight=height;
    mux_v->bih->biSizeImage=mux_v->bih->biWidth*mux_v->bih->biHeight*(mux_v->bih->biBitCount/8);

    mp_msg(MSGT_MENCODER, MSGL_INFO,"videocodec: libavcodec (%dx%d fourcc=%x [%.4s])\n",
	mux_v->bih->biWidth, mux_v->bih->biHeight, mux_v->bih->biCompression,
	    (char *)&mux_v->bih->biCompression);

    lavc_venc_context->width = width;
    lavc_venc_context->height = height;
    if (lavc_param_vbitrate > 16000) /* != -1 */
	lavc_venc_context->bit_rate = lavc_param_vbitrate;
    else if (lavc_param_vbitrate >= 0) /* != -1 */
	lavc_venc_context->bit_rate = lavc_param_vbitrate*1000;
    else
	lavc_venc_context->bit_rate = 800000; /* default */

    mux_v->avg_rate= lavc_venc_context->bit_rate;

    lavc_venc_context->bit_rate_tolerance= lavc_param_vrate_tolerance*1000;
    lavc_venc_context->time_base= (AVRational){mux_v->h.dwScale, mux_v->h.dwRate};
    lavc_venc_context->qmin= lavc_param_vqmin;
    lavc_venc_context->qmax= lavc_param_vqmax;
    lavc_venc_context->lmin= (int)(FF_QP2LAMBDA * lavc_param_lmin + 0.5);
    lavc_venc_context->lmax= (int)(FF_QP2LAMBDA * lavc_param_lmax + 0.5);
    lavc_venc_context->mb_lmin= (int)(FF_QP2LAMBDA * lavc_param_mb_lmin + 0.5);
    lavc_venc_context->mb_lmax= (int)(FF_QP2LAMBDA * lavc_param_mb_lmax + 0.5);
    lavc_venc_context->max_qdiff= lavc_param_vqdiff;
    lavc_venc_context->qcompress= lavc_param_vqcompress;
    lavc_venc_context->qblur= lavc_param_vqblur;
    lavc_venc_context->max_b_frames= lavc_param_vmax_b_frames;
    lavc_venc_context->b_quant_factor= lavc_param_vb_qfactor;
    lavc_venc_context->rc_strategy= lavc_param_vrc_strategy;
    lavc_venc_context->b_frame_strategy= lavc_param_vb_strategy;
    lavc_venc_context->b_quant_offset= (int)(FF_QP2LAMBDA * lavc_param_vb_qoffset + 0.5);
    lavc_venc_context->rtp_payload_size= lavc_param_packet_size;
    lavc_venc_context->strict_std_compliance= lavc_param_strict;
    lavc_venc_context->i_quant_factor= lavc_param_vi_qfactor;
    lavc_venc_context->i_quant_offset= (int)(FF_QP2LAMBDA * lavc_param_vi_qoffset + 0.5);
    lavc_venc_context->rc_qsquish= lavc_param_rc_qsquish;
    lavc_venc_context->rc_qmod_amp= lavc_param_rc_qmod_amp;
    lavc_venc_context->rc_qmod_freq= lavc_param_rc_qmod_freq;
    lavc_venc_context->rc_eq= lavc_param_rc_eq;

    mux_v->max_rate=
    lavc_venc_context->rc_max_rate= lavc_param_rc_max_rate*1000;
    lavc_venc_context->rc_min_rate= lavc_param_rc_min_rate*1000;

    mux_v->vbv_size=
    lavc_venc_context->rc_buffer_size= lavc_param_rc_buffer_size*1000;

    lavc_venc_context->rc_initial_buffer_occupancy=
            lavc_venc_context->rc_buffer_size *
            lavc_param_rc_initial_buffer_occupancy;
    lavc_venc_context->rc_buffer_aggressivity= lavc_param_rc_buffer_aggressivity;
    lavc_venc_context->rc_initial_cplx= lavc_param_rc_initial_cplx;
    lavc_venc_context->debug= lavc_param_debug;
    lavc_venc_context->last_predictor_count= lavc_param_last_pred;
    lavc_venc_context->pre_me= lavc_param_pre_me;
    lavc_venc_context->me_pre_cmp= lavc_param_me_pre_cmp;
    lavc_venc_context->pre_dia_size= lavc_param_pre_dia_size;
    lavc_venc_context->me_subpel_quality= lavc_param_me_subpel_quality;
    lavc_venc_context->me_range= lavc_param_me_range;
    lavc_venc_context->intra_quant_bias= lavc_param_ibias;
    lavc_venc_context->inter_quant_bias= lavc_param_pbias;
    lavc_venc_context->coder_type= lavc_param_coder;
    lavc_venc_context->context_model= lavc_param_context;
    lavc_venc_context->scenechange_threshold= lavc_param_sc_threshold;
    lavc_venc_context->noise_reduction= lavc_param_noise_reduction;
    lavc_venc_context->nsse_weight= lavc_param_nssew;
    lavc_venc_context->frame_skip_threshold= lavc_param_skip_threshold;
    lavc_venc_context->frame_skip_factor= lavc_param_skip_factor;
    lavc_venc_context->frame_skip_exp= lavc_param_skip_exp;
    lavc_venc_context->frame_skip_cmp= lavc_param_skip_cmp;

    if (lavc_param_intra_matrix)
    {
	char *tmp;

	lavc_venc_context->intra_matrix =
	    av_malloc(sizeof(*lavc_venc_context->intra_matrix)*64);

	i = 0;
	while ((tmp = strsep(&lavc_param_intra_matrix, ",")) && (i < 64))
	{
	    if (!tmp || (tmp && !strlen(tmp)))
		break;
	    lavc_venc_context->intra_matrix[i++] = atoi(tmp);
	}

	if (i != 64)
	    av_freep(&lavc_venc_context->intra_matrix);
	else
	    mp_msg(MSGT_MENCODER, MSGL_V, "Using user specified intra matrix\n");
    }
    if (lavc_param_inter_matrix)
    {
	char *tmp;

	lavc_venc_context->inter_matrix =
	    av_malloc(sizeof(*lavc_venc_context->inter_matrix)*64);

	i = 0;
	while ((tmp = strsep(&lavc_param_inter_matrix, ",")) && (i < 64))
	{
	    if (!tmp || (tmp && !strlen(tmp)))
		break;
	    lavc_venc_context->inter_matrix[i++] = atoi(tmp);
	}

	if (i != 64)
	    av_freep(&lavc_venc_context->inter_matrix);
	else
	    mp_msg(MSGT_MENCODER, MSGL_V, "Using user specified inter matrix\n");
    }

    p= lavc_param_rc_override_string;
    for(i=0; p; i++){
        int start, end, q;
        int e=sscanf(p, "%d,%d,%d", &start, &end, &q);
        if(e!=3){
	    mp_msg(MSGT_MENCODER,MSGL_ERR,"error parsing vrc_q\n");
            return 0;
        }
        lavc_venc_context->rc_override=
            realloc(lavc_venc_context->rc_override, sizeof(RcOverride)*(i+1));
        lavc_venc_context->rc_override[i].start_frame= start;
        lavc_venc_context->rc_override[i].end_frame  = end;
        if(q>0){
            lavc_venc_context->rc_override[i].qscale= q;
            lavc_venc_context->rc_override[i].quality_factor= 1.0;
        }
        else{
            lavc_venc_context->rc_override[i].qscale= 0;
            lavc_venc_context->rc_override[i].quality_factor= -q/100.0;
        }
        p= strchr(p, '/');
        if(p) p++;
    }
    lavc_venc_context->rc_override_count=i;

    lavc_venc_context->mpeg_quant=lavc_param_mpeg_quant;

    lavc_venc_context->dct_algo= lavc_param_fdct;
    lavc_venc_context->idct_algo= lavc_param_idct;

    lavc_venc_context->lumi_masking= lavc_param_lumi_masking;
    lavc_venc_context->temporal_cplx_masking= lavc_param_temporal_cplx_masking;
    lavc_venc_context->spatial_cplx_masking= lavc_param_spatial_cplx_masking;
    lavc_venc_context->p_masking= lavc_param_p_masking;
    lavc_venc_context->dark_masking= lavc_param_dark_masking;
        lavc_venc_context->border_masking = lavc_param_border_masking;

    if (lavc_param_aspect != NULL)
    {
	int par_width, par_height, e;
	float ratio=0;

	e= sscanf (lavc_param_aspect, "%d/%d", &par_width, &par_height);
	if(e==2){
            if(par_height)
                ratio= (float)par_width / (float)par_height;
        }else{
	    e= sscanf (lavc_param_aspect, "%f", &ratio);
	}

	if (e && ratio > 0.1 && ratio < 10.0) {
	    lavc_venc_context->sample_aspect_ratio= av_d2q(ratio * height / width, 255);
	    mp_dbg(MSGT_MENCODER, MSGL_DBG2, "sample_aspect_ratio: %d/%d\n",
                lavc_venc_context->sample_aspect_ratio.num,
                lavc_venc_context->sample_aspect_ratio.den);
	    mux_v->aspect = ratio;
	    mp_dbg(MSGT_MENCODER, MSGL_DBG2, "aspect_ratio: %f\n", ratio);
	} else {
	    mp_dbg(MSGT_MENCODER, MSGL_ERR, "aspect ratio: cannot parse \"%s\"\n", lavc_param_aspect);
	    return 0;
	}
    }
    else if (lavc_param_autoaspect) {
	lavc_venc_context->sample_aspect_ratio = av_d2q((float)d_width/d_height*height / width, 255);
	mux_v->aspect = (float)d_width/d_height;
    }

    /* keyframe interval */
    if (lavc_param_keyint >= 0) /* != -1 */
	lavc_venc_context->gop_size = lavc_param_keyint;
    else
	lavc_venc_context->gop_size = 250; /* default */

    lavc_venc_context->flags = 0;
    if (lavc_param_mb_decision)
    {
	mp_msg(MSGT_MENCODER, MSGL_INFO, MSGTR_MPCODECS_HighQualityEncodingSelected);
        lavc_venc_context->mb_decision= lavc_param_mb_decision;
    }

    lavc_venc_context->me_cmp= lavc_param_me_cmp;
    lavc_venc_context->me_sub_cmp= lavc_param_me_sub_cmp;
    lavc_venc_context->mb_cmp= lavc_param_mb_cmp;
#ifdef FF_CMP_VSAD
    lavc_venc_context->ildct_cmp= lavc_param_ildct_cmp;
#endif
    lavc_venc_context->dia_size= lavc_param_dia_size;
    lavc_venc_context->flags|= lavc_param_qpel;
    lavc_venc_context->trellis = lavc_param_trell;
    lavc_venc_context->flags|= lavc_param_lowdelay;
    lavc_venc_context->flags|= lavc_param_bit_exact;
    lavc_venc_context->flags|= lavc_param_aic;
    if (lavc_param_aiv)
        av_dict_set(&opts, "aiv", "1", 0);
    if (lavc_param_umv)
        av_dict_set(&opts, "umv", "1", 0);
    if (lavc_param_obmc)
        av_dict_set(&opts, "obmc", "1", 0);
    lavc_venc_context->flags|= lavc_param_loop;
    lavc_venc_context->flags|= lavc_param_v4mv ? CODEC_FLAG_4MV : 0;
    if (lavc_param_data_partitioning)
        av_dict_set(&opts, "data_partitioning", "1", 0);
    lavc_venc_context->flags|= lavc_param_mv0;
    if (lavc_param_ss)
        av_dict_set(&opts, "structured_slices", "1", 0);
    if (lavc_param_alt)
        av_dict_set(&opts, "alternate_scan", "1", 0);
    lavc_venc_context->flags|= lavc_param_ilme;
    lavc_venc_context->flags|= lavc_param_gmc;
#ifdef CODEC_FLAG_CLOSED_GOP
    lavc_venc_context->flags|= lavc_param_closed_gop;
#endif
    lavc_venc_context->flags|= lavc_param_gray;

    if(lavc_param_normalize_aqp) lavc_venc_context->flags|= CODEC_FLAG_NORMALIZE_AQP;
    if(lavc_param_interlaced_dct) lavc_venc_context->flags|= CODEC_FLAG_INTERLACED_DCT;
    lavc_venc_context->flags|= lavc_param_psnr;
    lavc_venc_context->intra_dc_precision = lavc_param_dc_precision - 8;
    lavc_venc_context->prediction_method= lavc_param_prediction_method;
    lavc_venc_context->brd_scale = lavc_param_brd_scale;
    lavc_venc_context->bidir_refine = lavc_param_bidir_refine;
    lavc_venc_context->scenechange_factor = lavc_param_sc_factor;
    if((lavc_param_video_global_header&1)
       /*|| (video_global_header==0 && (oc->oformat->flags & AVFMT_GLOBALHEADER))*/){
        lavc_venc_context->flags |= CODEC_FLAG_GLOBAL_HEADER;
    }
    if(lavc_param_video_global_header&2){
        lavc_venc_context->flags2 |= CODEC_FLAG2_LOCAL_HEADER;
    }
    lavc_venc_context->mv0_threshold = lavc_param_mv0_threshold;
    lavc_venc_context->refs = lavc_param_refs;
    lavc_venc_context->b_sensitivity = lavc_param_b_sensitivity;
    lavc_venc_context->level = lavc_param_level;

    if(lavc_param_avopt){
        if(av_dict_parse_string(&opts, lavc_param_avopt, "=", ",", 0) < 0){
            mp_msg(MSGT_MENCODER,MSGL_ERR, "Your options /%s/ look like gibberish to me pal\n", lavc_param_avopt);
            return 0;
        }
    }

    mux_v->imgfmt = lavc_param_format;
    lavc_venc_context->pix_fmt = imgfmt2pixfmt(lavc_param_format);
    if (lavc_venc_context->pix_fmt == PIX_FMT_NONE)
        return 0;

    if(!stats_file) {
    /* lavc internal 2pass bitrate control */
    switch(lavc_param_vpass){
    case 2:
    case 3:
	lavc_venc_context->flags|= CODEC_FLAG_PASS2;
	stats_file= fopen(passtmpfile, "rb");
	if(stats_file==NULL){
	    mp_msg(MSGT_MENCODER,MSGL_ERR,"2pass failed: filename=%s\n", passtmpfile);
            return 0;
	}
	fseek(stats_file, 0, SEEK_END);
	size= ftell(stats_file);
	fseek(stats_file, 0, SEEK_SET);

	lavc_venc_context->stats_in= av_malloc(size + 1);
	lavc_venc_context->stats_in[size]=0;

	if(fread(lavc_venc_context->stats_in, size, 1, stats_file)<1){
	    mp_msg(MSGT_MENCODER,MSGL_ERR,"2pass failed: reading from filename=%s\n", passtmpfile);
            return 0;
	}
	if(lavc_param_vpass == 2)
	    break;
	else
	    fclose(stats_file);
	    /* fall through */
    case 1:
	lavc_venc_context->flags|= CODEC_FLAG_PASS1;
	stats_file= fopen(passtmpfile, "wb");
	if(stats_file==NULL){
	    mp_msg(MSGT_MENCODER,MSGL_ERR,"2pass failed: filename=%s\n", passtmpfile);
            return 0;
	}
	if(lavc_param_turbo && (lavc_param_vpass == 1)) {
	  /* uses SAD comparison functions instead of other hungrier */
	  lavc_venc_context->me_pre_cmp = 0;
	  lavc_venc_context->me_cmp = 0;
	  lavc_venc_context->me_sub_cmp = 0;
	  lavc_venc_context->mb_cmp = 2;

	  /* Disables diamond motion estimation */
	  lavc_venc_context->pre_dia_size = 0;
	  lavc_venc_context->dia_size = 1;

	  lavc_venc_context->noise_reduction = 0; // nr=0
	  lavc_venc_context->mb_decision = 0; // mbd=0 ("realtime" encoding)

	  lavc_venc_context->flags &= ~CODEC_FLAG_QPEL;
	  lavc_venc_context->flags &= ~CODEC_FLAG_4MV;
	  lavc_venc_context->trellis = 0;
	  lavc_venc_context->flags &= ~CODEC_FLAG_MV0;
	  av_dict_set(&opts, "mpv_flags", "-qp_rd-cbp_rd", 0);
	}
	break;
    }
    }

    lavc_venc_context->me_method = ME_ZERO+lavc_param_vme;

    /* fixed qscale :p */
    if (lavc_param_vqscale >= 0.0)
    {
	mp_msg(MSGT_MENCODER, MSGL_INFO, MSGTR_MPCODECS_UsingConstantQscale, lavc_param_vqscale);
	lavc_venc_context->flags |= CODEC_FLAG_QSCALE;
        lavc_venc_context->global_quality=
	vf->priv->pic->quality = (int)(FF_QP2LAMBDA * lavc_param_vqscale + 0.5);
    }

    lavc_venc_context->thread_count = lavc_param_threads;
    lavc_venc_context->thread_type = FF_THREAD_FRAME | FF_THREAD_SLICE;

    if (avcodec_open2(lavc_venc_context, vf->priv->codec, &opts) != 0) {
	mp_msg(MSGT_MENCODER,MSGL_ERR,MSGTR_CantOpenCodec);
	return 0;
    }
    if (av_dict_count(opts)) {
        AVDictionaryEntry *e = NULL;
        while ((e = av_dict_get(opts, "", e, AV_DICT_IGNORE_SUFFIX)))
            mp_msg(MSGT_MENCODER,MSGL_ERR,"Unknown option %s\n", e->key);
        return 0;
    }
    av_dict_free(&opts);

    /* free second pass buffer, its not needed anymore */
    av_freep(&lavc_venc_context->stats_in);
    if(lavc_venc_context->bits_per_coded_sample)
        mux_v->bih->biBitCount= lavc_venc_context->bits_per_coded_sample;
    if(lavc_venc_context->extradata_size){
        mux_v->bih= realloc(mux_v->bih, sizeof(*mux_v->bih) + lavc_venc_context->extradata_size);
        memcpy(mux_v->bih + 1, lavc_venc_context->extradata, lavc_venc_context->extradata_size);
        mux_v->bih->biSize= sizeof(*mux_v->bih) + lavc_venc_context->extradata_size;
    }

    mux_v->decoder_delay = lavc_venc_context->max_b_frames ? 1 : 0;

    return 1;
}