bool RtspStreamWorker::openCodec(AVStream *stream, AVDictionary *options)
{
    startInterruptableOperation(5);
    AVCodec *codec = avcodec_find_decoder(stream->codec->codec_id);

    AVDictionary *optionsCopy = 0;
    av_dict_copy(&optionsCopy, options, 0);
    startInterruptableOperation(5);
    int errorCode = avcodec_open2(stream->codec, codec, &optionsCopy);
    av_dict_free(&optionsCopy);

    return 0 == errorCode;
}
Beispiel #2
0
static int open_url(HLSContext *c, URLContext **uc, const char *url)
{
    AVDictionary *tmp = NULL;
    int ret;

    av_dict_copy(&tmp, c->avio_opts, 0);

    ret = ffurl_open(uc, url, AVIO_FLAG_READ, c->interrupt_callback, &tmp);

    av_dict_free(&tmp);

    return ret;
}
Beispiel #3
0
	bool FFMPEGer::open_audio(AVCodec *codec, OutputStream *ost, AVDictionary *opt_arg){
		AVCodecContext *c;
		int nb_samples;
		int ret;
		AVDictionary *opt = NULL;
		c = ost->st->codec;
		/* open it */
		av_dict_copy(&opt, opt_arg, 0);

		ret = avcodec_open2(c, codec, &opt);
		av_dict_free(&opt);
		if (ret < 0) {
			ALOGE("Could not open audio codec: %s", av_err2str(ret));
			return false;
		}

		if (c->codec->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE)
			nb_samples = 10000;
		else
			nb_samples = c->frame_size;
				
		ost->frame = alloc_audio_frame(c->sample_fmt, c->channel_layout,
									   c->sample_rate, nb_samples);
		ost->tmp_frame = alloc_audio_frame(AV_SAMPLE_FMT_S16, c->channel_layout,
										   c->sample_rate, nb_samples);

		/* create resampler context */
        ost->swr_ctx = swr_alloc();
        if (!ost->swr_ctx) {
            ALOGE("Could not allocate resampler context");
            return false;
        }
		
        /* set options */
        av_opt_set_int       (ost->swr_ctx, "in_channel_count",   c->channels,       0);
        av_opt_set_int       (ost->swr_ctx, "in_sample_rate",     c->sample_rate,    0);
        av_opt_set_sample_fmt(ost->swr_ctx, "in_sample_fmt",      AV_SAMPLE_FMT_S16, 0);
        av_opt_set_int       (ost->swr_ctx, "out_channel_count",  c->channels,       0);
        av_opt_set_int       (ost->swr_ctx, "out_sample_rate",    c->sample_rate,    0);
        av_opt_set_sample_fmt(ost->swr_ctx, "out_sample_fmt",     c->sample_fmt,     0);

		/* initialize the resampling context */
		ret = swr_init(ost->swr_ctx);
        if (ret < 0){
            ALOGE("Failed to initialize the resampling context");
			return false;
		}

		return true;
	}
Beispiel #4
0
static void OpenAudio(AVFormatContext *oc, AVCodec *codec, OutputStream *ost, AVDictionary *opt_arg, int sample_rate)
{
	AVCodecContext *c = NULL;
	int nb_samples = 0;
	int ret = 0;
	AVDictionary *opt = NULL;

	c = ost->st->codec;

	// コーデックを初期化
	av_dict_copy(&opt, opt_arg, 0);
	ret = avcodec_open2(c, codec, &opt);
	av_dict_free(&opt);
	if (ret < 0) {
		fprintf(stderr, "Could not open audio codec: %s\n", MakeErrorString(ret));
		return;
	}

	if (c->codec->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE)
		nb_samples = 10000;
	else
		nb_samples = c->frame_size;

	ost->frame     = AllocAudioFrame(c->sample_fmt, c->channel_layout,
									   c->sample_rate, nb_samples);
	ost->tmp_frame = AllocAudioFrame(AV_SAMPLE_FMT_S16, c->channel_layout,
									   sample_rate, nb_samples/(c->sample_rate/sample_rate));

	// サンプル変換部
	ost->swr_ctx = swr_alloc();
	if (!ost->swr_ctx) {
		fprintf(stderr, "Could not allocate resampler context\n");
		return;
	}

	// 音声フォーマットの設定
	av_opt_set_int       (ost->swr_ctx, "in_channel_count",   c->channels,       0);
	av_opt_set_int       (ost->swr_ctx, "in_sample_rate",     sample_rate,       0);
	av_opt_set_sample_fmt(ost->swr_ctx, "in_sample_fmt",      AV_SAMPLE_FMT_S16, 0);
	av_opt_set_int       (ost->swr_ctx, "out_channel_count",  c->channels,       0);
	av_opt_set_int       (ost->swr_ctx, "out_sample_rate",    c->sample_rate,    0);
	av_opt_set_sample_fmt(ost->swr_ctx, "out_sample_fmt",     c->sample_fmt,     0);

	// サンプル変換部を初期化
	if ((ret = swr_init(ost->swr_ctx)) < 0) {
		fprintf(stderr, "Failed to initialize the resampling context\n");
		return;
	}
}
Beispiel #5
0
static int list_devices_for_context(AVFormatContext *s, AVDictionary *options,
                                    AVDeviceInfoList **device_list)
{
    AVDictionary *tmp = NULL;
    int ret;

    av_dict_copy(&tmp, options, 0);
    if ((ret = av_opt_set_dict2(s, &tmp, AV_OPT_SEARCH_CHILDREN)) < 0)
        goto fail;
    ret = avdevice_list_devices(s, device_list);
  fail:
    av_dict_free(&tmp);
    avformat_free_context(s);
    return ret;
}
bool RtspStreamWorker::openInput(AVFormatContext **context, AVDictionary *options)
{
    AVDictionary *optionsCopy = 0;
    av_dict_copy(&optionsCopy, options, 0);
    startInterruptableOperation(20);
    int errorCode = avformat_open_input(context, qPrintable(m_url.toString()), NULL, &optionsCopy);
    av_dict_free(&optionsCopy);

    if (errorCode < 0)
    {
        emit fatalError(QString::fromLatin1("Open error: %1").arg(errorMessageFromCode(errorCode)));
        return false;
    }

    return true;
}
Beispiel #7
0
static int url_connect(struct variant *var, AVDictionary *opts)
{
    AVDictionary *tmp = NULL;
    int ret;

    av_dict_copy(&tmp, opts, 0);

    if ((ret = av_opt_set_dict(var->input, &tmp)) < 0 ||
        (ret = ffurl_connect(var->input, NULL)) < 0) {
        ffurl_close(var->input);
        var->input = NULL;
    }

    av_dict_free(&tmp);
    return ret;
}
Beispiel #8
0
static void open_audio(AVFormatContext *oc, AVCodec *codec, OutputStream *ost, AVDictionary *opt_arg)
{
    AVCodecContext *c;
    int nb_samples;
    int ret;
    AVDictionary *opt = NULL;
    c = ost->st->codec;
    /* open it */
    av_dict_copy(&opt, opt_arg, 0);
    ret = avcodec_open2(c, codec, &opt);
    av_dict_free(&opt);
    if (ret < 0) {
        fprintf(stderr, "Could not open audio codec: %s\n", av_err2str(ret));
        exit(1);
    }
    /* init signal generator */
    ost->t     = 0;
    ost->tincr = 2 * M_PI * 110.0 / c->sample_rate;
    /* increment frequency by 110 Hz per second */
    ost->tincr2 = 2 * M_PI * 110.0 / c->sample_rate / c->sample_rate;
    if (c->codec->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE)
        nb_samples = 10000;
    else
        nb_samples = c->frame_size;
    ost->frame     = alloc_audio_frame(c->sample_fmt, c->channel_layout,
                                       c->sample_rate, nb_samples);
    ost->tmp_frame = alloc_audio_frame(AV_SAMPLE_FMT_S16, c->channel_layout,
                                       c->sample_rate, nb_samples);
    /* create resampler context */
        ost->swr_ctx = swr_alloc();
        if (!ost->swr_ctx) {
            fprintf(stderr, "Could not allocate resampler context\n");
            exit(1);
        }
        /* set options */
        av_opt_set_int       (ost->swr_ctx, "in_channel_count",   c->channels,       0);
        av_opt_set_int       (ost->swr_ctx, "in_sample_rate",     c->sample_rate,    0);
        av_opt_set_sample_fmt(ost->swr_ctx, "in_sample_fmt",      AV_SAMPLE_FMT_S16, 0);
        av_opt_set_int       (ost->swr_ctx, "out_channel_count",  c->channels,       0);
        av_opt_set_int       (ost->swr_ctx, "out_sample_rate",    c->sample_rate,    0);
        av_opt_set_sample_fmt(ost->swr_ctx, "out_sample_fmt",     c->sample_fmt,     0);
        /* initialize the resampling context */
        if ((ret = swr_init(ost->swr_ctx)) < 0) {
            fprintf(stderr, "Failed to initialize the resampling context\n");
            exit(1);
        }
}
Beispiel #9
0
static void test_separators(const AVDictionary *m, const char pair, const char val)
{
    AVDictionary *dict = NULL;
    char pairs[] = {pair , '\0'};
    char vals[]  = {val, '\0'};

    char *buffer = NULL;
    av_dict_copy(&dict, m, 0);
    print_dict(dict);
    av_dict_get_string(dict, &buffer, val, pair);
    printf("%s\n", buffer);
    av_dict_free(&dict);
    av_dict_parse_string(&dict, buffer, vals, pairs, 0);
    av_freep(&buffer);
    print_dict(dict);
    av_dict_free(&dict);
}
Beispiel #10
0
BOOL CVideoLivRecord::open_audio(AVStream *st, AVCodec* codec, AVDictionary* opt)
{
	AVCodecContext * avcc = st->codec;
	int nb_samples;
	int ret;
	AVDictionary* opt_dst = NULL;//必须初始化为空。

	av_dict_copy(&opt_dst, opt, 0);
	ret = avcodec_open2(avcc, codec, &opt_dst);
	av_dict_free(&opt_dst);
	if (ret < 0){
		log("[CVideoLivRecord::open_audio] -- avcodec_open2() error");
		return FALSE;
	}
	m_t = 0;
	m_tincr = 2 * M_PI * 110.0 / avcc->sample_rate;
	m_tincr2 = 2 * M_PI * 110.0 / avcc->sample_rate / avcc->sample_rate;

	if (avcc->codec->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE){
		nb_samples = 10000;
	} else {
		nb_samples = avcc->frame_size;
	}
	
	m_pAudioFrame = alloc_audio_frame(avcc->sample_fmt, avcc->channel_layout, avcc->sample_rate, nb_samples);
	m_pAudioBkFrame = alloc_audio_frame(AV_SAMPLE_FMT_S16, avcc->channel_layout, avcc->sample_rate, nb_samples);

	m_pAudioSwrctx = swr_alloc();
	if (!m_pAudioSwrctx){
		log("[CVideoLivRecord::open_audio] -- swr_alloc() error");
		return FALSE;
	}
	av_opt_set_int       (m_pAudioSwrctx, "in_channel_count", avcc->channels,    0);
	av_opt_set_int       (m_pAudioSwrctx, "in_sample_rate",   avcc->sample_rate, 0);
	av_opt_set_sample_fmt(m_pAudioSwrctx, "in_sample_fmt",    AV_SAMPLE_FMT_S16, 0);
	av_opt_set_int       (m_pAudioSwrctx, "out_channel_count", avcc->channels,    0);
	av_opt_set_int       (m_pAudioSwrctx, "out_sample_rate",  avcc->sample_rate, 0);
	av_opt_set_sample_fmt(m_pAudioSwrctx, "out_sample_fmt",   avcc->sample_fmt,  0);

	if (swr_init(m_pAudioSwrctx) < 0){
		log("[CVideoLivRecord::open_audio] -- swr_init() error");
		return FALSE;
	}
	return TRUE;
}
Beispiel #11
0
int avfilter_copy_frame_props(AVFilterBufferRef *dst, const AVFrame *src)
{
    dst->pts    = src->pts;
    dst->pos    = av_frame_get_pkt_pos(src);
    dst->format = src->format;

    av_dict_free(&dst->metadata);
    av_dict_copy(&dst->metadata, av_frame_get_metadata(src), 0);

    switch (dst->type) {
    case AVMEDIA_TYPE_VIDEO:
        dst->video->w                   = src->width;
        dst->video->h                   = src->height;
        dst->video->sample_aspect_ratio = src->sample_aspect_ratio;
        dst->video->interlaced          = src->interlaced_frame;
        dst->video->top_field_first     = src->top_field_first;
        dst->video->key_frame           = src->key_frame;
        dst->video->pict_type           = src->pict_type;
        av_freep(&dst->video->qp_table);
        dst->video->qp_table_linesize = 0;
        if (src->qscale_table) {
            int qsize = src->qstride ? src->qstride * ((src->height+15)/16) : (src->width+15)/16;
            dst->video->qp_table = av_malloc(qsize);
            if (!dst->video->qp_table)
                return AVERROR(ENOMEM);
            dst->video->qp_table_linesize = src->qstride;
            dst->video->qp_table_size     = qsize;
            memcpy(dst->video->qp_table, src->qscale_table, qsize);
        }
        break;
    case AVMEDIA_TYPE_AUDIO:
        dst->audio->sample_rate         = src->sample_rate;
        dst->audio->channel_layout      = src->channel_layout;
        dst->audio->channels            = src->channels;
        if(src->channels < av_get_channel_layout_nb_channels(src->channel_layout)) {
            av_log(NULL, AV_LOG_ERROR, "libavfilter does not support this channel layout\n");
            return AVERROR(EINVAL);
        }
        break;
    default:
        return AVERROR(EINVAL);
    }

    return 0;
}
Beispiel #12
0
int ff_http_do_new_request(URLContext *h, const char *uri)
{
    HTTPContext *s = h->priv_data;
    AVDictionary *options = NULL;
    int ret;

    s->off           = 0;
    s->icy_data_read = 0;
    av_free(s->location);
    s->location = av_strdup(uri);
    if (!s->location)
        return AVERROR(ENOMEM);

    av_dict_copy(&options, s->chained_options, 0);
    ret = http_open_cnx(h, &options);
    av_dict_free(&options);
    return ret;
}
Beispiel #13
0
AVFilterBufferRef *avfilter_ref_buffer(AVFilterBufferRef *ref, int pmask)
{
    AVFilterBufferRef *ret = av_malloc(sizeof(AVFilterBufferRef));
    if (!ret)
        return NULL;
    *ret = *ref;

    ret->metadata = NULL;
    av_dict_copy(&ret->metadata, ref->metadata, 0);

    if (ref->type == AVMEDIA_TYPE_VIDEO) {
        ret->video = av_malloc(sizeof(AVFilterBufferRefVideoProps));
        if (!ret->video) {
            av_free(ret);
            return NULL;
        }
        copy_video_props(ret->video, ref->video);
        ret->extended_data = ret->data;
    } else if (ref->type == AVMEDIA_TYPE_AUDIO) {
        ret->audio = av_malloc(sizeof(AVFilterBufferRefAudioProps));
        if (!ret->audio) {
            av_free(ret);
            return NULL;
        }
        *ret->audio = *ref->audio;

        if (ref->extended_data && ref->extended_data != ref->data) {
            int nb_channels = av_get_channel_layout_nb_channels(ref->audio->channel_layout);
            if (!(ret->extended_data = av_malloc(sizeof(*ret->extended_data) *
                                                 nb_channels))) {
                av_freep(&ret->audio);
                av_freep(&ret);
                return NULL;
            }
            memcpy(ret->extended_data, ref->extended_data,
                   sizeof(*ret->extended_data) * nb_channels);
        } else
            ret->extended_data = ret->data;
    }
    ret->perms &= pmask;
    ret->buf->refcount ++;
    return ret;
}
Beispiel #14
0
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
{
    int i;

    dst->key_frame              = src->key_frame;
    dst->pict_type              = src->pict_type;
    dst->sample_aspect_ratio    = src->sample_aspect_ratio;
    dst->pts                    = src->pts;
    dst->repeat_pict            = src->repeat_pict;
    dst->interlaced_frame       = src->interlaced_frame;
    dst->top_field_first        = src->top_field_first;
    dst->palette_has_changed    = src->palette_has_changed;
    dst->sample_rate            = src->sample_rate;
    dst->opaque                 = src->opaque;
    dst->pkt_pts                = src->pkt_pts;
    dst->pkt_dts                = src->pkt_dts;
    dst->reordered_opaque       = src->reordered_opaque;
    dst->quality                = src->quality;
    dst->coded_picture_number   = src->coded_picture_number;
    dst->display_picture_number = src->display_picture_number;
    dst->flags                  = src->flags;

    memcpy(dst->error, src->error, sizeof(dst->error));

    for (i = 0; i < src->nb_side_data; i++) {
        const AVFrameSideData *sd_src = src->side_data[i];
        AVFrameSideData *sd_dst = av_frame_new_side_data(dst, sd_src->type,
                                                         sd_src->size);
        if (!sd_dst) {
            for (i = 0; i < dst->nb_side_data; i++) {
                av_freep(&dst->side_data[i]->data);
                av_freep(&dst->side_data[i]);
                av_dict_free(&dst->side_data[i]->metadata);
            }
            av_freep(&dst->side_data);
            return AVERROR(ENOMEM);
        }
        memcpy(sd_dst->data, sd_src->data, sd_src->size);
        av_dict_copy(&sd_dst->metadata, sd_src->metadata, 0);
    }

    return 0;
}
int ff_wms_parse_sdp_a_line(AVFormatContext *s, const char *p)
{
    int ret = 0;
    if (av_strstart(p, "pgmpu:data:application/vnd.ms.wms-hdr.asfv1;base64,", &p)) {
        AVIOContext pb;
        RTSPState *rt = s->priv_data;
        AVDictionary *opts = NULL;
        int len = strlen(p) * 6 / 8;
        char *buf = av_mallocz(len);
        AVInputFormat *iformat;

        av_base64_decode(buf, p, len);

        if (rtp_asf_fix_header(buf, len) < 0)
            av_log(s, AV_LOG_ERROR,
                   "Failed to fix invalid RTSP-MS/ASF min_pktsize\n");
        init_packetizer(&pb, buf, len);
        if (rt->asf_ctx) {
            avformat_close_input(&rt->asf_ctx);
        }
        if (!(iformat = av_find_input_format("asf")))
            return AVERROR_DEMUXER_NOT_FOUND;
        if (!(rt->asf_ctx = avformat_alloc_context()))
            return AVERROR(ENOMEM);
        rt->asf_ctx->pb      = &pb;
        av_dict_set(&opts, "no_resync_search", "1", 0);

        if ((ret = ff_copy_whitelists(rt->asf_ctx, s)) < 0) {
            av_dict_free(&opts);
            return ret;
        }

        ret = avformat_open_input(&rt->asf_ctx, "", iformat, &opts);
        av_dict_free(&opts);
        if (ret < 0)
            return ret;
        av_dict_copy(&s->metadata, rt->asf_ctx->metadata, 0);
        rt->asf_pb_pos = avio_tell(&pb);
        av_free(buf);
        rt->asf_ctx->pb = NULL;
    }
    return ret;
}
Beispiel #16
0
static int64_t http_seek(URLContext *h, int64_t off, int whence)
{
    HTTPContext *s = h->priv_data;
    URLContext *old_hd = s->hd;
    int64_t old_off = s->off;
    uint8_t old_buf[BUFFER_SIZE];
    int old_buf_size, ret;
    AVDictionary *options = NULL;

    if (whence == AVSEEK_SIZE)
        return s->filesize;
    else if ((whence == SEEK_CUR && off == 0) ||
             (whence == SEEK_SET && off == s->off))
        return s->off;
    else if ((s->filesize == -1 && whence == SEEK_END) || h->is_streamed)
        return AVERROR(ENOSYS);

    /* we save the old context in case the seek fails */
    old_buf_size = s->buf_end - s->buf_ptr;
    memcpy(old_buf, s->buf_ptr, old_buf_size);
    s->hd = NULL;
    if (whence == SEEK_CUR)
        off += s->off;
    else if (whence == SEEK_END)
        off += s->filesize;
    s->off = off;

    /* if it fails, continue on old connection */
    av_dict_copy(&options, s->chained_options, 0);
    if ((ret = http_open_cnx(h, &options)) < 0) {
        av_dict_free(&options);
        memcpy(s->buffer, old_buf, old_buf_size);
        s->buf_ptr = s->buffer;
        s->buf_end = s->buffer + old_buf_size;
        s->hd      = old_hd;
        s->off     = old_off;
        return ret;
    }
    av_dict_free(&options);
    ffurl_close(old_hd);
    return off;
}
Beispiel #17
0
static void open_video(AVFormatContext *oc, AVCodec *codec, OutputStream *ost, AVDictionary *opt_arg)
{
    int ret;
    AVCodecContext *c = ost->enc;
    AVDictionary *opt = NULL;

    av_dict_copy(&opt, opt_arg, 0);

    /* open the codec */
    ret = avcodec_open2(c, codec, &opt);
    av_dict_free(&opt);
    if (ret < 0) {
        fprintf(stderr, "Could not open video codec: %s\n", av_err2str(ret));
        exit(1);
    }

    /* allocate and init a re-usable frame */
    ost->frame = alloc_picture(c->pix_fmt, c->width, c->height);
    if (!ost->frame) {
        fprintf(stderr, "Could not allocate video frame\n");
        exit(1);
    }

    /* If the output format is not YUV420P, then a temporary YUV420P
     * picture is needed too. It is then converted to the required
     * output format. */
    ost->tmp_frame = NULL;
    if (c->pix_fmt != AV_PIX_FMT_YUV420P) {
        ost->tmp_frame = alloc_picture(AV_PIX_FMT_YUV420P, c->width, c->height);
        if (!ost->tmp_frame) {
            fprintf(stderr, "Could not allocate temporary picture\n");
            exit(1);
        }
    }

    /* copy the stream parameters to the muxer */
    ret = avcodec_parameters_from_context(ost->st->codecpar, c);
    if (ret < 0) {
        fprintf(stderr, "Could not copy the stream parameters\n");
        exit(1);
    }
}
Beispiel #18
0
void avfilter_copy_buffer_ref_props(AVFilterBufferRef *dst, AVFilterBufferRef *src)
{
    // copy common properties
    dst->pts             = src->pts;
    dst->pos             = src->pos;

    switch (src->type) {
    case AVMEDIA_TYPE_VIDEO: {
        if (dst->video->qp_table)
            av_freep(&dst->video->qp_table);
        copy_video_props(dst->video, src->video);
        break;
    }
    case AVMEDIA_TYPE_AUDIO: *dst->audio = *src->audio; break;
    default: break;
    }

    av_dict_free(&dst->metadata);
    av_dict_copy(&dst->metadata, src->metadata, 0);
}
Beispiel #19
0
int GenericAudio::init()
{
	AVCodec* codec = avcodec_find_decoder(stream()->codec->codec_id);
	
	if(!codec)
		return error("Could not find decoder");
	
	if(avcodec_open2(stream()->codec, codec, 0) != 0)
		return error("Could not open decoder");
	
	// avcodec_find_decoder does not take sample_fmt into account,
	// so we have to find the decoder ourself...
	AVCodec* encoder = findCodec(
		stream()->codec->codec_id,
		stream()->codec->sample_fmt
	);
	
	if(!encoder)
		return error("Could not find encoder");
	
	outputStream()->disposition = stream()->disposition;
	av_dict_copy(&outputStream()->metadata, stream()->metadata, 0);
	
	outputStream()->codec = avcodec_alloc_context3(encoder);
	avcodec_copy_context(outputStream()->codec, stream()->codec);
	
	if(avcodec_open2(outputStream()->codec, encoder, 0) != 0)
		return error("Could not open encoder");
	
	// Allocate sample buffer
	m_cutout_buf = (int16_t*)av_malloc(BUFSIZE);
	m_cutin_buf = (int16_t*)av_malloc(BUFSIZE);
	if(!m_cutout_buf || !m_cutin_buf)
		return error("Could not allocate sample buffer");
	
	m_nc = cutList().nextCutPoint(0);
	m_cutout = m_nc->direction == CutPoint::IN;
	
	return 0;
}
Beispiel #20
0
static void open_video(AVFormatContext *oc, AVCodec *codec, OutputStream *ost, AVDictionary *opt_arg)
{
    int ret;
    AVCodecContext *c = ost->st->codec;
    AVDictionary *opt = NULL;

    av_dict_copy(&opt, opt_arg, 0);

    /* open the codec */
    ret = avcodec_open2(c, codec, &opt);
    av_dict_free(&opt);
    if (ret < 0) {
        fprintf(stderr, "Could not open video codec: %s\n", av_err2str(ret));
        exit(1);
    }

    /* allocate and init a re-usable frame */
    ost->frame = alloc_picture(c->pix_fmt, c->width, c->height);
    if (!ost->frame) {
        fprintf(stderr, "Could not allocate video frame\n");
        exit(1);
    }
}
Beispiel #21
0
int av_decode_init(AVCodecContext *c,enum AVCodecID codec_id, AVDictionary *opt_arg)
{
    AVCodec * codec;
    AVDictionary *opt = NULL;
    int ret;

    codec = avcodec_find_decoder(codec_id);
    if (!codec)
    {
        av_log(NULL, AV_LOG_FATAL, "Could not find decoder '%s'\n", avcodec_get_name(codec_id));
        return -1;
    }
    /* open it */
    av_dict_copy(&opt, opt_arg, 0);
    ret = avcodec_open2(c, codec, &opt);
    av_dict_free(&opt);
    if (ret < 0) {
        char errmsg[ERROR_BUFFER_SIZE];
        av_strerror(ret, errmsg, ERROR_BUFFER_SIZE);
        av_log(NULL, AV_LOG_FATAL, "Could not open codec: %s\n", errmsg);
        return -2;
    }
    return 0;
}
Beispiel #22
0
void av_metadata_copy(AVDictionary **dst, AVDictionary *src, int flags)
{
    av_dict_copy(dst, src, flags);
}
Beispiel #23
0
static int init_muxer(AVFormatContext *s, AVDictionary **options)
{
    int ret = 0, i;
    AVStream *st;
    AVDictionary *tmp = NULL;
    AVCodecContext *codec = NULL;
    AVOutputFormat *of = s->oformat;
    const AVCodecDescriptor *desc;

    if (options)
        av_dict_copy(&tmp, *options, 0);

    if ((ret = av_opt_set_dict(s, &tmp)) < 0)
        goto fail;

#if FF_API_LAVF_BITEXACT
    if (s->nb_streams && s->streams[0]->codec->flags & AV_CODEC_FLAG_BITEXACT)
        s->flags |= AVFMT_FLAG_BITEXACT;
#endif

    // some sanity checks
    if (s->nb_streams == 0 && !(of->flags & AVFMT_NOSTREAMS)) {
        av_log(s, AV_LOG_ERROR, "no streams\n");
        ret = AVERROR(EINVAL);
        goto fail;
    }

    for (i = 0; i < s->nb_streams; i++) {
        st    = s->streams[i];
        codec = st->codec;

#if FF_API_LAVF_CODEC_TB
FF_DISABLE_DEPRECATION_WARNINGS
        if (!st->time_base.num && codec->time_base.num) {
            av_log(s, AV_LOG_WARNING, "Using AVStream.codec.time_base as a "
                   "timebase hint to the muxer is deprecated. Set "
                   "AVStream.time_base instead.\n");
            avpriv_set_pts_info(st, 64, codec->time_base.num, codec->time_base.den);
        }
FF_ENABLE_DEPRECATION_WARNINGS
#endif

        if (!st->time_base.num) {
            /* fall back on the default timebase values */
            if (codec->codec_type == AVMEDIA_TYPE_AUDIO && codec->sample_rate)
                avpriv_set_pts_info(st, 64, 1, codec->sample_rate);
            else
                avpriv_set_pts_info(st, 33, 1, 90000);
        }

        switch (codec->codec_type) {
        case AVMEDIA_TYPE_AUDIO:
            if (codec->sample_rate <= 0) {
                av_log(s, AV_LOG_ERROR, "sample rate not set\n");
                ret = AVERROR(EINVAL);
                goto fail;
            }
            if (!codec->block_align)
                codec->block_align = codec->channels *
                                     av_get_bits_per_sample(codec->codec_id) >> 3;
            break;
        case AVMEDIA_TYPE_VIDEO:
            if ((codec->width <= 0 || codec->height <= 0) &&
                !(of->flags & AVFMT_NODIMENSIONS)) {
                av_log(s, AV_LOG_ERROR, "dimensions not set\n");
                ret = AVERROR(EINVAL);
                goto fail;
            }

            if (av_cmp_q(st->sample_aspect_ratio,
                         codec->sample_aspect_ratio)) {
                if (st->sample_aspect_ratio.num != 0 &&
                    st->sample_aspect_ratio.den != 0 &&
                    codec->sample_aspect_ratio.den != 0 &&
                    codec->sample_aspect_ratio.den != 0) {
                    av_log(s, AV_LOG_ERROR, "Aspect ratio mismatch between muxer "
                            "(%d/%d) and encoder layer (%d/%d)\n",
                            st->sample_aspect_ratio.num, st->sample_aspect_ratio.den,
                            codec->sample_aspect_ratio.num,
                            codec->sample_aspect_ratio.den);
                    ret = AVERROR(EINVAL);
                    goto fail;
                }
            }
            break;
        }

        desc = avcodec_descriptor_get(codec->codec_id);
        if (desc && desc->props & AV_CODEC_PROP_REORDER)
            st->internal->reorder = 1;

        if (of->codec_tag) {
            if (codec->codec_tag &&
                codec->codec_id == AV_CODEC_ID_RAWVIDEO &&
                !av_codec_get_tag(of->codec_tag, codec->codec_id) &&
                !validate_codec_tag(s, st)) {
                // the current rawvideo encoding system ends up setting
                // the wrong codec_tag for avi, we override it here
                codec->codec_tag = 0;
            }
            if (codec->codec_tag) {
                if (!validate_codec_tag(s, st)) {
                    char tagbuf[32];
                    av_get_codec_tag_string(tagbuf, sizeof(tagbuf), codec->codec_tag);
                    av_log(s, AV_LOG_ERROR,
                           "Tag %s/0x%08x incompatible with output codec id '%d'\n",
                           tagbuf, codec->codec_tag, codec->codec_id);
                    ret = AVERROR_INVALIDDATA;
                    goto fail;
                }
            } else
                codec->codec_tag = av_codec_get_tag(of->codec_tag, codec->codec_id);
        }

        if (of->flags & AVFMT_GLOBALHEADER &&
            !(codec->flags & AV_CODEC_FLAG_GLOBAL_HEADER))
            av_log(s, AV_LOG_WARNING,
                   "Codec for stream %d does not use global headers "
                   "but container format requires global headers\n", i);

        if (codec->codec_type != AVMEDIA_TYPE_ATTACHMENT)
            s->internal->nb_interleaved_streams++;
    }

    if (!s->priv_data && of->priv_data_size > 0) {
        s->priv_data = av_mallocz(of->priv_data_size);
        if (!s->priv_data) {
            ret = AVERROR(ENOMEM);
            goto fail;
        }
        if (of->priv_class) {
            *(const AVClass **)s->priv_data = of->priv_class;
            av_opt_set_defaults(s->priv_data);
            if ((ret = av_opt_set_dict(s->priv_data, &tmp)) < 0)
                goto fail;
        }
    }

    /* set muxer identification string */
    if (!(s->flags & AVFMT_FLAG_BITEXACT)) {
        av_dict_set(&s->metadata, "encoder", LIBAVFORMAT_IDENT, 0);
    }

    if (options) {
         av_dict_free(options);
         *options = tmp;
    }

    return 0;

fail:
    av_dict_free(&tmp);
    return ret;
}
Beispiel #24
0
static 
AVFormatContext *
init_output_context(const struct transcoder_ctx_t *ctx, AVStream **video_stream, AVStream **audio_stream) {
    AVFormatContext *oc;
    AVOutputFormat *fmt;
    AVStream *input_stream, *output_stream;
    AVCodec *c;
    AVCodecContext *cc;
    int audio_copied = 0; //copy just 1 stream

    fmt = av_guess_format("mpegts", NULL, NULL);
    if (!fmt) {
        fprintf(stderr, "[DEBUG] Error guessing format, dying\n");
        exit(199);
    }

    oc = avformat_alloc_context();
    if (!oc) {
        fprintf(stderr, "[DEBUG] Error allocating context, dying\n");
        exit(200);
    }
    
    oc->oformat = fmt;
    snprintf(oc->filename, sizeof(oc->filename), "%s", ctx->output_filename);
    oc->debug = 1;
    oc->start_time_realtime = ctx->input_context->start_time;
    oc->start_time = ctx->input_context->start_time;
    oc->duration = 0;
    oc->bit_rate = 0;

    for (int i = 0; i < ctx->input_context->nb_streams; i++) {
        input_stream = ctx->input_context->streams[i];
        output_stream = NULL;
        if (input_stream->index == ctx->video_stream_index) {
            //copy stuff from input video index
            c = avcodec_find_encoder(CODEC_ID_H264);
            output_stream = avformat_new_stream(oc, c);
            *video_stream = output_stream;
            cc = output_stream->codec;
            cc->width = input_stream->codec->width;
            cc->height = input_stream->codec->height;
#if 0       
            cc->width = viddef->nFrameWidth;
            cc->height = viddef->nFrameHeight;
#endif
            cc->codec_id = CODEC_ID_H264;
            cc->codec_type = AVMEDIA_TYPE_VIDEO;
            cc->bit_rate = ENCODED_BITRATE;
            cc->time_base = input_stream->codec->time_base;

            output_stream->avg_frame_rate = input_stream->avg_frame_rate;
            output_stream->r_frame_rate = input_stream->r_frame_rate;
            output_stream->start_time = AV_NOPTS_VALUE;

        } else if ((input_stream->codec->codec_type == AVMEDIA_TYPE_AUDIO) && !audio_copied)  { 
            /* i care only about audio */
            c = avcodec_find_encoder(input_stream->codec->codec_id);
            output_stream = avformat_new_stream(oc, c);
            *audio_stream = output_stream;
            avcodec_copy_context(output_stream->codec, input_stream->codec);
            /* Apparently fixes a crash on .mkvs with attachments: */
            av_dict_copy(&output_stream->metadata, input_stream->metadata, 0);
            /* Reset the codec tag so as not to cause problems with output format */
            output_stream->codec->codec_tag = 0;
            audio_copied = 1;
        }
    }
    
    for (int i = 0; i < oc->nb_streams; i++) {
        if (oc->oformat->flags & AVFMT_GLOBALHEADER)
            oc->streams[i]->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;
        if (oc->streams[i]->codec->sample_rate == 0)
            oc->streams[i]->codec->sample_rate = 48000; /* ish */
    }

    if (!(fmt->flags & AVFMT_NOFILE)) {
        fprintf(stderr, "[DEBUG] AVFMT_NOFILE set, allocating output container\n");
        if (avio_open(&oc->pb, ctx->output_filename, AVIO_FLAG_WRITE) < 0) {
            fprintf(stderr, "[DEBUG] error creating the output context\n");
            exit(1);
        }
    }
    
    return oc;
}
Beispiel #25
0
static int open_slave(AVFormatContext *avf, char *slave, TeeSlave *tee_slave)
{
    int i, ret;
    AVDictionary *options = NULL;
    AVDictionaryEntry *entry;
    char *filename;
    char *format = NULL, *select = NULL;
    AVFormatContext *avf2 = NULL;
    AVStream *st, *st2;
    int stream_count;

    if ((ret = parse_slave_options(avf, slave, &options, &filename)) < 0)
        return ret;

#define STEAL_OPTION(option, field) do {                                \
        if ((entry = av_dict_get(options, option, NULL, 0))) {          \
            field = entry->value;                                       \
            entry->value = NULL; /* prevent it from being freed */      \
            av_dict_set(&options, option, NULL, 0);                     \
        }                                                               \
    } while (0)

    STEAL_OPTION("f", format);
    STEAL_OPTION("select", select);

    ret = avformat_alloc_output_context2(&avf2, NULL, format, filename);
    if (ret < 0)
        goto end;
    av_dict_copy(&avf2->metadata, avf->metadata, 0);

    tee_slave->stream_map = av_calloc(avf->nb_streams, sizeof(*tee_slave->stream_map));
    if (!tee_slave->stream_map) {
        ret = AVERROR(ENOMEM);
        goto end;
    }

    stream_count = 0;
    for (i = 0; i < avf->nb_streams; i++) {
        st = avf->streams[i];
        if (select) {
            ret = avformat_match_stream_specifier(avf, avf->streams[i], select);
            if (ret < 0) {
                av_log(avf, AV_LOG_ERROR,
                       "Invalid stream specifier '%s' for output '%s'\n",
                       select, slave);
                goto end;
            }

            if (ret == 0) { /* no match */
                tee_slave->stream_map[i] = -1;
                continue;
            }
        }
        tee_slave->stream_map[i] = stream_count++;

        if (!(st2 = avformat_new_stream(avf2, NULL))) {
            ret = AVERROR(ENOMEM);
            goto end;
        }
        st2->id = st->id;
        st2->r_frame_rate        = st->r_frame_rate;
        st2->time_base           = st->time_base;
        st2->start_time          = st->start_time;
        st2->duration            = st->duration;
        st2->nb_frames           = st->nb_frames;
        st2->disposition         = st->disposition;
        st2->sample_aspect_ratio = st->sample_aspect_ratio;
        st2->avg_frame_rate      = st->avg_frame_rate;
        av_dict_copy(&st2->metadata, st->metadata, 0);
        if ((ret = avcodec_copy_context(st2->codec, st->codec)) < 0)
            goto end;
    }

    if (!(avf2->oformat->flags & AVFMT_NOFILE)) {
        if ((ret = avio_open(&avf2->pb, filename, AVIO_FLAG_WRITE)) < 0) {
            av_log(avf, AV_LOG_ERROR, "Slave '%s': error opening: %s\n",
                   slave, av_err2str(ret));
            goto end;
        }
    }

    if ((ret = avformat_write_header(avf2, &options)) < 0) {
        av_log(avf, AV_LOG_ERROR, "Slave '%s': error writing header: %s\n",
               slave, av_err2str(ret));
        goto end;
    }

    tee_slave->avf = avf2;
    tee_slave->bsfs = av_calloc(avf2->nb_streams, sizeof(TeeSlave));
    if (!tee_slave->bsfs) {
        ret = AVERROR(ENOMEM);
        goto end;
    }

    entry = NULL;
    while (entry = av_dict_get(options, "bsfs", NULL, AV_DICT_IGNORE_SUFFIX)) {
        const char *spec = entry->key + strlen("bsfs");
        if (*spec) {
            if (strspn(spec, slave_bsfs_spec_sep) != 1) {
                av_log(avf, AV_LOG_ERROR,
                       "Specifier separator in '%s' is '%c', but only characters '%s' "
                       "are allowed\n", entry->key, *spec, slave_bsfs_spec_sep);
                return AVERROR(EINVAL);
            }
            spec++; /* consume separator */
        }

        for (i = 0; i < avf2->nb_streams; i++) {
            ret = avformat_match_stream_specifier(avf2, avf2->streams[i], spec);
            if (ret < 0) {
                av_log(avf, AV_LOG_ERROR,
                       "Invalid stream specifier '%s' in bsfs option '%s' for slave "
                       "output '%s'\n", spec, entry->key, filename);
                goto end;
            }

            if (ret > 0) {
                av_log(avf, AV_LOG_DEBUG, "spec:%s bsfs:%s matches stream %d of slave "
                       "output '%s'\n", spec, entry->value, i, filename);
                if (tee_slave->bsfs[i]) {
                    av_log(avf, AV_LOG_WARNING,
                           "Duplicate bsfs specification associated to stream %d of slave "
                           "output '%s', filters will be ignored\n", i, filename);
                    continue;
                }
                ret = parse_bsfs(avf, entry->value, &tee_slave->bsfs[i]);
                if (ret < 0) {
                    av_log(avf, AV_LOG_ERROR,
                           "Error parsing bitstream filter sequence '%s' associated to "
                           "stream %d of slave output '%s'\n", entry->value, i, filename);
                    goto end;
                }
            }
        }

        av_dict_set(&options, entry->key, NULL, 0);
    }

    if (options) {
        entry = NULL;
        while ((entry = av_dict_get(options, "", entry, AV_DICT_IGNORE_SUFFIX)))
            av_log(avf2, AV_LOG_ERROR, "Unknown option '%s'\n", entry->key);
        ret = AVERROR_OPTION_NOT_FOUND;
        goto end;
    }

end:
    av_free(format);
    av_free(select);
    av_dict_free(&options);
    return ret;
}
Beispiel #26
0
static int open_slave(AVFormatContext *avf, char *slave, TeeSlave *tee_slave)
{
    int i, ret;
    AVDictionary *options = NULL;
    AVDictionaryEntry *entry;
    char *filename;
    char *format = NULL, *select = NULL, *on_fail = NULL;
    char *use_fifo = NULL, *fifo_options_str = NULL;
    AVFormatContext *avf2 = NULL;
    AVStream *st, *st2;
    int stream_count;
    int fullret;
    char *subselect = NULL, *next_subselect = NULL, *first_subselect = NULL, *tmp_select = NULL;

    if ((ret = ff_tee_parse_slave_options(avf, slave, &options, &filename)) < 0)
        return ret;

#define STEAL_OPTION(option, field) do {                                \
        if ((entry = av_dict_get(options, option, NULL, 0))) {          \
            field = entry->value;                                       \
            entry->value = NULL; /* prevent it from being freed */      \
            av_dict_set(&options, option, NULL, 0);                     \
        }                                                               \
    } while (0)

    STEAL_OPTION("f", format);
    STEAL_OPTION("select", select);
    STEAL_OPTION("onfail", on_fail);
    STEAL_OPTION("use_fifo", use_fifo);
    STEAL_OPTION("fifo_options", fifo_options_str);

    ret = parse_slave_failure_policy_option(on_fail, tee_slave);
    if (ret < 0) {
        av_log(avf, AV_LOG_ERROR,
               "Invalid onfail option value, valid options are 'abort' and 'ignore'\n");
        goto end;
    }

    ret = parse_slave_fifo_options(use_fifo, fifo_options_str, tee_slave);
    if (ret < 0) {
        av_log(avf, AV_LOG_ERROR, "Error parsing fifo options: %s\n", av_err2str(ret));
        goto end;
    }

    if (tee_slave->use_fifo) {

        if (options) {
            char *format_options_str = NULL;
            ret = av_dict_get_string(options, &format_options_str, '=', ':');
            if (ret < 0)
                goto end;

            ret = av_dict_set(&tee_slave->fifo_options, "format_options", format_options_str,
                              AV_DICT_DONT_STRDUP_VAL);
            if (ret < 0)
                goto end;
        }

        if (format) {
            ret = av_dict_set(&tee_slave->fifo_options, "fifo_format", format,
                              AV_DICT_DONT_STRDUP_VAL);
            format = NULL;
            if (ret < 0)
                goto end;
        }

        av_dict_free(&options);
        options = tee_slave->fifo_options;
    }
    ret = avformat_alloc_output_context2(&avf2, NULL,
                                         tee_slave->use_fifo ? "fifo" :format, filename);
    if (ret < 0)
        goto end;
    tee_slave->avf = avf2;
    av_dict_copy(&avf2->metadata, avf->metadata, 0);
    avf2->opaque   = avf->opaque;
    avf2->io_open  = avf->io_open;
    avf2->io_close = avf->io_close;
    avf2->interrupt_callback = avf->interrupt_callback;
    avf2->flags = avf->flags;

    tee_slave->stream_map = av_calloc(avf->nb_streams, sizeof(*tee_slave->stream_map));
    if (!tee_slave->stream_map) {
        ret = AVERROR(ENOMEM);
        goto end;
    }

    stream_count = 0;
    for (i = 0; i < avf->nb_streams; i++) {
        st = avf->streams[i];
        if (select) {
            tmp_select = av_strdup(select);  // av_strtok is destructive so we regenerate it in each loop
            if (!tmp_select) {
                ret = AVERROR(ENOMEM);
                goto end;
            }
            fullret = 0;
            first_subselect = tmp_select;
            next_subselect = NULL;
            while (subselect = av_strtok(first_subselect, slave_select_sep, &next_subselect)) {
                first_subselect = NULL;

                ret = avformat_match_stream_specifier(avf, avf->streams[i], subselect);
                if (ret < 0) {
                    av_log(avf, AV_LOG_ERROR,
                           "Invalid stream specifier '%s' for output '%s'\n",
                           subselect, slave);
                    goto end;
                }
                if (ret != 0) {
                    fullret = 1; // match
                    break;
                }
            }
            av_freep(&tmp_select);

            if (fullret == 0) { /* no match */
                tee_slave->stream_map[i] = -1;
                continue;
            }
        }
        tee_slave->stream_map[i] = stream_count++;

        if (!(st2 = avformat_new_stream(avf2, NULL))) {
            ret = AVERROR(ENOMEM);
            goto end;
        }

        ret = ff_stream_encode_params_copy(st2, st);
        if (ret < 0)
            goto end;
    }

    ret = ff_format_output_open(avf2, filename, NULL);
    if (ret < 0) {
        av_log(avf, AV_LOG_ERROR, "Slave '%s': error opening: %s\n", slave,
               av_err2str(ret));
        goto end;
    }

    if ((ret = avformat_write_header(avf2, &options)) < 0) {
        av_log(avf, AV_LOG_ERROR, "Slave '%s': error writing header: %s\n",
               slave, av_err2str(ret));
        goto end;
    }
    tee_slave->header_written = 1;

    tee_slave->bsfs = av_calloc(avf2->nb_streams, sizeof(*tee_slave->bsfs));
    if (!tee_slave->bsfs) {
        ret = AVERROR(ENOMEM);
        goto end;
    }

    entry = NULL;
    while (entry = av_dict_get(options, "bsfs", NULL, AV_DICT_IGNORE_SUFFIX)) {
        const char *spec = entry->key + strlen("bsfs");
        if (*spec) {
            if (strspn(spec, slave_bsfs_spec_sep) != 1) {
                av_log(avf, AV_LOG_ERROR,
                       "Specifier separator in '%s' is '%c', but only characters '%s' "
                       "are allowed\n", entry->key, *spec, slave_bsfs_spec_sep);
                ret = AVERROR(EINVAL);
                goto end;
            }
            spec++; /* consume separator */
        }

        for (i = 0; i < avf2->nb_streams; i++) {
            ret = avformat_match_stream_specifier(avf2, avf2->streams[i], spec);
            if (ret < 0) {
                av_log(avf, AV_LOG_ERROR,
                       "Invalid stream specifier '%s' in bsfs option '%s' for slave "
                       "output '%s'\n", spec, entry->key, filename);
                goto end;
            }

            if (ret > 0) {
                av_log(avf, AV_LOG_DEBUG, "spec:%s bsfs:%s matches stream %d of slave "
                       "output '%s'\n", spec, entry->value, i, filename);
                if (tee_slave->bsfs[i]) {
                    av_log(avf, AV_LOG_WARNING,
                           "Duplicate bsfs specification associated to stream %d of slave "
                           "output '%s', filters will be ignored\n", i, filename);
                    continue;
                }
                ret = av_bsf_list_parse_str(entry->value, &tee_slave->bsfs[i]);
                if (ret < 0) {
                    av_log(avf, AV_LOG_ERROR,
                           "Error parsing bitstream filter sequence '%s' associated to "
                           "stream %d of slave output '%s'\n", entry->value, i, filename);
                    goto end;
                }
            }
        }

        av_dict_set(&options, entry->key, NULL, 0);
    }

    for (i = 0; i < avf->nb_streams; i++){
        int target_stream = tee_slave->stream_map[i];
        if (target_stream < 0)
            continue;

        if (!tee_slave->bsfs[target_stream]) {
            /* Add pass-through bitstream filter */
            ret = av_bsf_get_null_filter(&tee_slave->bsfs[target_stream]);
            if (ret < 0) {
                av_log(avf, AV_LOG_ERROR,
                       "Failed to create pass-through bitstream filter: %s\n",
                       av_err2str(ret));
                goto end;
            }
        }

        tee_slave->bsfs[target_stream]->time_base_in = avf->streams[i]->time_base;
        ret = avcodec_parameters_copy(tee_slave->bsfs[target_stream]->par_in,
                                      avf->streams[i]->codecpar);
        if (ret < 0)
            goto end;

        ret = av_bsf_init(tee_slave->bsfs[target_stream]);
        if (ret < 0) {
            av_log(avf, AV_LOG_ERROR,
            "Failed to initialize bitstream filter(s): %s\n",
            av_err2str(ret));
            goto end;
        }
    }

    if (options) {
        entry = NULL;
        while ((entry = av_dict_get(options, "", entry, AV_DICT_IGNORE_SUFFIX)))
            av_log(avf2, AV_LOG_ERROR, "Unknown option '%s'\n", entry->key);
        ret = AVERROR_OPTION_NOT_FOUND;
        goto end;
    }

end:
    av_free(format);
    av_free(select);
    av_free(on_fail);
    av_dict_free(&options);
    av_freep(&tmp_select);
    return ret;
}
Beispiel #27
0
QVector<QPair<QString, QString>> CameraDevice::getRawDeviceListGeneric()
{
    QVector<QPair<QString, QString>> devices;

    if (!getDefaultInputFormat())
        return devices;

    // Alloc an input device context
    AVFormatContext *s;
    if (!(s = avformat_alloc_context()))
        return devices;
    if (!iformat->priv_class || !AV_IS_INPUT_DEVICE(iformat->priv_class->category))
    {
        avformat_free_context(s);
        return devices;
    }
    s->iformat = iformat;
    if (s->iformat->priv_data_size > 0)
    {
        s->priv_data = av_mallocz(s->iformat->priv_data_size);
        if (!s->priv_data)
        {
            avformat_free_context(s);
            return devices;
        }
        if (s->iformat->priv_class)
        {
            *(const AVClass**)s->priv_data= s->iformat->priv_class;
            av_opt_set_defaults(s->priv_data);
        }
    }
    else
    {
        s->priv_data = NULL;
    }

    // List the devices for this context
    AVDeviceInfoList* devlist = nullptr;
    AVDictionary *tmp = nullptr;
    av_dict_copy(&tmp, nullptr, 0);
    if (av_opt_set_dict2(s, &tmp, AV_OPT_SEARCH_CHILDREN) < 0)
    {
        av_dict_free(&tmp);
        avformat_free_context(s);
    }
    avdevice_list_devices(s, &devlist);
    if (!devlist)
    {
        qWarning() << "avdevice_list_devices failed";
        return devices;
    }

    // Convert the list to a QVector
    devices.resize(devlist->nb_devices);
    for (int i=0; i<devlist->nb_devices; i++)
    {
        AVDeviceInfo* dev = devlist->devices[i];
        devices[i].first = dev->device_name;
        devices[i].second = dev->device_description;
    }
    avdevice_free_list_devices(&devlist);
    return devices;
}
Beispiel #28
0
int av_frame_copy_props(AVFrame *dst, const AVFrame *src)
{
    int i;

    dst->key_frame              = src->key_frame;
    dst->pict_type              = src->pict_type;
    dst->sample_aspect_ratio    = src->sample_aspect_ratio;
    dst->pts                    = src->pts;
    dst->repeat_pict            = src->repeat_pict;
    dst->interlaced_frame       = src->interlaced_frame;
    dst->top_field_first        = src->top_field_first;
    dst->palette_has_changed    = src->palette_has_changed;
    dst->sample_rate            = src->sample_rate;
    dst->opaque                 = src->opaque;
#if FF_API_AVFRAME_LAVC
    dst->type                   = src->type;
#endif
    dst->pkt_pts                = src->pkt_pts;
    dst->pkt_dts                = src->pkt_dts;
    dst->pkt_pos                = src->pkt_pos;
    dst->pkt_size               = src->pkt_size;
    dst->pkt_duration           = src->pkt_duration;
    dst->reordered_opaque       = src->reordered_opaque;
    dst->quality                = src->quality;
    dst->best_effort_timestamp  = src->best_effort_timestamp;
    dst->coded_picture_number   = src->coded_picture_number;
    dst->display_picture_number = src->display_picture_number;
    dst->flags                  = src->flags;
    dst->decode_error_flags     = src->decode_error_flags;
    dst->colorspace             = src->colorspace;
    dst->color_range            = src->color_range;

    av_dict_copy(&dst->metadata, src->metadata, 0);

    memcpy(dst->error, src->error, sizeof(dst->error));

    for (i = 0; i < src->nb_side_data; i++) {
        const AVFrameSideData *sd_src = src->side_data[i];
        AVFrameSideData *sd_dst = av_frame_new_side_data(dst, sd_src->type,
                                                         sd_src->size);
        if (!sd_dst) {
            for (i = 0; i < dst->nb_side_data; i++) {
                av_freep(&dst->side_data[i]->data);
                av_freep(&dst->side_data[i]);
                av_dict_free(&dst->side_data[i]->metadata);
            }
            av_freep(&dst->side_data);
            return AVERROR(ENOMEM);
        }
        memcpy(sd_dst->data, sd_src->data, sd_src->size);
        av_dict_copy(&sd_dst->metadata, sd_src->metadata, 0);
    }

    dst->qscale_table = NULL;
    dst->qstride      = 0;
    dst->qscale_type  = 0;
    if (src->qp_table_buf) {
        dst->qp_table_buf = av_buffer_ref(src->qp_table_buf);
        if (dst->qp_table_buf) {
            dst->qscale_table = dst->qp_table_buf->data;
            dst->qstride      = src->qstride;
            dst->qscale_type  = src->qscale_type;
        }
    }

    return 0;
}
Beispiel #29
0
static int frame_copy_props(AVFrame *dst, const AVFrame *src, int force_copy)
{
    int i;

    dst->key_frame              = src->key_frame;
    dst->pict_type              = src->pict_type;
    dst->sample_aspect_ratio    = src->sample_aspect_ratio;
    dst->pts                    = src->pts;
    dst->repeat_pict            = src->repeat_pict;
    dst->interlaced_frame       = src->interlaced_frame;
    dst->top_field_first        = src->top_field_first;
    dst->palette_has_changed    = src->palette_has_changed;
    dst->sample_rate            = src->sample_rate;
    dst->opaque                 = src->opaque;
    dst->pkt_pts                = src->pkt_pts;
    dst->pkt_dts                = src->pkt_dts;
    dst->pkt_pos                = src->pkt_pos;
    dst->pkt_size               = src->pkt_size;
    dst->pkt_duration           = src->pkt_duration;
    dst->reordered_opaque       = src->reordered_opaque;
    dst->quality                = src->quality;
    dst->best_effort_timestamp  = src->best_effort_timestamp;
    dst->coded_picture_number   = src->coded_picture_number;
    dst->display_picture_number = src->display_picture_number;
    dst->flags                  = src->flags;
    dst->decode_error_flags     = src->decode_error_flags;
    dst->color_primaries        = src->color_primaries;
    dst->color_trc              = src->color_trc;
    dst->colorspace             = src->colorspace;
    dst->color_range            = src->color_range;
    dst->chroma_location        = src->chroma_location;

    av_dict_copy(&dst->metadata, src->metadata, 0);

#if FF_API_ERROR_FRAME
FF_DISABLE_DEPRECATION_WARNINGS
    memcpy(dst->error, src->error, sizeof(dst->error));
FF_ENABLE_DEPRECATION_WARNINGS
#endif

    for (i = 0; i < src->nb_side_data; i++) {
        const AVFrameSideData *sd_src = src->side_data[i];
        AVFrameSideData *sd_dst;
        if (   sd_src->type == AV_FRAME_DATA_PANSCAN
            && (src->width != dst->width || src->height != dst->height))
            continue;
        if (force_copy) {
            sd_dst = av_frame_new_side_data(dst, sd_src->type,
                                            sd_src->size);
            if (!sd_dst) {
                wipe_side_data(dst);
                return AVERROR(ENOMEM);
            }
            memcpy(sd_dst->data, sd_src->data, sd_src->size);
        } else {
            sd_dst = av_frame_new_side_data(dst, sd_src->type, 0);
            if (!sd_dst) {
                wipe_side_data(dst);
                return AVERROR(ENOMEM);
            }
            sd_dst->buf = av_buffer_ref(sd_src->buf);
            if (!sd_dst->buf) {
                wipe_side_data(dst);
                return AVERROR(ENOMEM);
            }
            sd_dst->data = sd_dst->buf->data;
            sd_dst->size = sd_dst->buf->size;
        }
        av_dict_copy(&sd_dst->metadata, sd_src->metadata, 0);
    }

#if FF_API_FRAME_QP
FF_DISABLE_DEPRECATION_WARNINGS
    dst->qscale_table = NULL;
    dst->qstride      = 0;
    dst->qscale_type  = 0;
    av_buffer_unref(&dst->qp_table_buf);
    if (src->qp_table_buf) {
        dst->qp_table_buf = av_buffer_ref(src->qp_table_buf);
        if (dst->qp_table_buf) {
            dst->qscale_table = dst->qp_table_buf->data;
            dst->qstride      = src->qstride;
            dst->qscale_type  = src->qscale_type;
        }
    }
FF_ENABLE_DEPRECATION_WARNINGS
#endif

    return 0;
}
Beispiel #30
0
static int tee_write_header(AVFormatContext *avf)
{
    TeeContext *tee = avf->priv_data;
    unsigned nb_slaves = 0, i;
    const char *filename = avf->filename;
    char **slaves = NULL;
    int ret;

    while (*filename) {
        char *slave = av_get_token(&filename, slave_delim);
        if (!slave) {
            ret = AVERROR(ENOMEM);
            goto fail;
        }
        ret = av_dynarray_add_nofree(&slaves, &nb_slaves, slave);
        if (ret < 0) {
            av_free(slave);
            goto fail;
        }
        if (strspn(filename, slave_delim))
            filename++;
    }

    if (tee->fifo_options_str) {
        ret = av_dict_parse_string(&tee->fifo_options, tee->fifo_options_str, "=", ":", 0);
        if (ret < 0)
            goto fail;
    }

    if (!(tee->slaves = av_mallocz_array(nb_slaves, sizeof(*tee->slaves)))) {
        ret = AVERROR(ENOMEM);
        goto fail;
    }
    tee->nb_slaves = tee->nb_alive = nb_slaves;

    for (i = 0; i < nb_slaves; i++) {

        tee->slaves[i].use_fifo = tee->use_fifo;
        ret = av_dict_copy(&tee->slaves[i].fifo_options, tee->fifo_options, 0);
        if (ret < 0)
            goto fail;

        if ((ret = open_slave(avf, slaves[i], &tee->slaves[i])) < 0) {
            ret = tee_process_slave_failure(avf, i, ret);
            if (ret < 0)
                goto fail;
        } else {
            log_slave(&tee->slaves[i], avf, AV_LOG_VERBOSE);
        }
        av_freep(&slaves[i]);
    }

    for (i = 0; i < avf->nb_streams; i++) {
        int j, mapped = 0;
        for (j = 0; j < tee->nb_slaves; j++)
            if (tee->slaves[j].avf)
                mapped += tee->slaves[j].stream_map[i] >= 0;
        if (!mapped)
            av_log(avf, AV_LOG_WARNING, "Input stream #%d is not mapped "
                   "to any slave.\n", i);
    }
    av_free(slaves);
    return 0;

fail:
    for (i = 0; i < nb_slaves; i++)
        av_freep(&slaves[i]);
    close_slaves(avf);
    av_free(slaves);
    return ret;
}