Esempio n. 1
0
/** Set the values of the AVCodecContext or AVFormatContext structure.
 * They are set to the defaults specified in the according AVOption options
 * array default_val field.
 *
 * @param s AVCodecContext or AVFormatContext for which the defaults will be set
 */
void av_opt_set_defaults(void *s)
{
    AVOption *opt = NULL;
    while ((opt = av_next_option(s, opt)) != NULL) {
        switch(opt->type) {
            case FF_OPT_TYPE_CONST:
                /* Nothing to be done here */
            break;
            case FF_OPT_TYPE_FLAGS:
            case FF_OPT_TYPE_INT: {
                int val;
                val = opt->default_val;
                av_set_int(s, opt->name, val);
            }
            break;
            case FF_OPT_TYPE_FLOAT: {
                double val;
                val = opt->default_val;
                av_set_double(s, opt->name, val);
            }
            break;
            case FF_OPT_TYPE_RATIONAL: {
                AVRational val;
                val = av_d2q(opt->default_val, INT_MAX);
                av_set_q(s, opt->name, val);
            }
            break;
            case FF_OPT_TYPE_STRING:
                /* Cannot set default for string as default_val is of type * double */
            break;
            default:
                av_log(s, AV_LOG_DEBUG, "AVOption type %d of option %s not implemented yet\n", opt->type, opt->name);
        }
    }
}
Esempio n. 2
0
AVFormatContext *ff_rtp_chain_mux_open(AVFormatContext *s, AVStream *st,
                                       URLContext *handle, int packet_size)
{
    AVFormatContext *rtpctx;
    int ret;
    AVOutputFormat *rtp_format = av_guess_format("rtp", NULL, NULL);

    if (!rtp_format)
        return NULL;

    /* Allocate an AVFormatContext for each output stream */
    rtpctx = avformat_alloc_context();
    if (!rtpctx)
        return NULL;

    rtpctx->oformat = rtp_format;
    if (!av_new_stream(rtpctx, 0)) {
        av_free(rtpctx);
        return NULL;
    }
    /* Copy the max delay setting; the rtp muxer reads this. */
    rtpctx->max_delay = s->max_delay;
    /* Copy other stream parameters. */
    rtpctx->streams[0]->sample_aspect_ratio = st->sample_aspect_ratio;
    rtpctx->flags |= s->flags & AVFMT_FLAG_MP4A_LATM;

    av_set_parameters(rtpctx, NULL);
    /* Copy the rtpflags values straight through */
    if (s->oformat->priv_class &&
        av_find_opt(s->priv_data, "rtpflags", NULL, 0, 0))
        av_set_int(rtpctx->priv_data, "rtpflags",
                   av_get_int(s->priv_data, "rtpflags", NULL));

    /* Set the synchronized start time. */
    rtpctx->start_time_realtime = s->start_time_realtime;

    avcodec_copy_context(rtpctx->streams[0]->codec, st->codec);

    if (handle) {
        ffio_fdopen(&rtpctx->pb, handle);
    } else
        ffio_open_dyn_packet_buf(&rtpctx->pb, packet_size);
    ret = avformat_write_header(rtpctx, NULL);

    if (ret) {
        if (handle) {
            avio_close(rtpctx->pb);
        } else {
            uint8_t *ptr;
            avio_close_dyn_buf(rtpctx->pb, &ptr);
            av_free(ptr);
        }
        avformat_free_context(rtpctx);
        return NULL;
    }

    return rtpctx;
}
Esempio n. 3
0
void MovieDecoder::convertAndScaleFrame(PixelFormat format, int scaledSize, bool maintainAspectRatio, int& scaledWidth, int& scaledHeight)
{
    calculateDimensions(scaledSize, maintainAspectRatio, scaledWidth, scaledHeight);

#ifdef LATEST_GREATEST_FFMPEG
	// Enable this when it hits the released ffmpeg version
    SwsContext* scaleContext = sws_alloc_context();
    if (scaleContext == nullptr)
    {
		throw std::logic_error("Failed to allocate scale context");
	}
	
	av_set_int(scaleContext, "srcw", m_pVideoCodecContext->width);
    av_set_int(scaleContext, "srch", m_pVideoCodecContext->height);
    av_set_int(scaleContext, "src_format", m_pVideoCodecContext->pix_fmt);
    av_set_int(scaleContext, "dstw", scaledWidth);
    av_set_int(scaleContext, "dsth", scaledHeight);
    av_set_int(scaleContext, "dst_format", format);
	av_set_int(scaleContext, "sws_flags", SWS_BICUBIC);
	
	const int* coeff = sws_getCoefficients(SWS_CS_DEFAULT);
    if (sws_setColorspaceDetails(scaleContext, coeff, m_pVideoCodecContext->pix_fmt, coeff, format, 0, 1<<16, 1<<16) < 0)
    {
		sws_freeContext(scaleContext);
		throw std::logic_error("Failed to set colorspace details");
	}

	if (sws_init_context(scaleContext, nullptr, nullptr) < 0)
	{
		sws_freeContext(scaleContext);
		throw std::logic_error("Failed to initialise scale context");
	}
#endif
    
    SwsContext* scaleContext = sws_getContext(m_pVideoCodecContext->width, m_pVideoCodecContext->height,
                                              m_pVideoCodecContext->pix_fmt, scaledWidth, scaledHeight,
                                              format, SWS_BICUBIC, nullptr, nullptr, nullptr);

    if (nullptr == scaleContext)
    {
        throw logic_error("Failed to create resize context");
    }

    AVFrame* convertedFrame = nullptr;
    uint8_t* convertedFrameBuffer = nullptr;

    createAVFrame(&convertedFrame, &convertedFrameBuffer, scaledWidth, scaledHeight, format);
    
    sws_scale(scaleContext, m_pFrame->data, m_pFrame->linesize, 0, m_pVideoCodecContext->height,
              convertedFrame->data, convertedFrame->linesize);
    sws_freeContext(scaleContext);

    av_free(m_pFrame);
    av_free(m_pFrameBuffer);
    
    m_pFrame        = convertedFrame;
    m_pFrameBuffer  = convertedFrameBuffer;
}
Esempio n. 4
0
/** Set the values of the AVCodecContext or AVFormatContext structure.
 * They are set to the defaults specified in the according AVOption options
 * array default_val field.
 *
 * @param s AVCodecContext or AVFormatContext for which the defaults will be set
 */
void av_opt_set_defaults2(void *s, int mask, int flags)
{
    const AVOption *opt = NULL;
    while ((opt = av_next_option(s, opt)) != NULL) {
        if((opt->flags & mask) != flags)
            continue;
        switch(opt->type) {
            case FF_OPT_TYPE_CONST:
                /* Nothing to be done here */
            break;
            case FF_OPT_TYPE_FLAGS:
            case FF_OPT_TYPE_INT: {
                int val;
                val = opt->default_val;
                av_set_int(s, opt->name, val);
            }
            break;
            case FF_OPT_TYPE_INT64:
                if((double)(opt->default_val+0.6) == opt->default_val)
                    av_log(s, AV_LOG_DEBUG, "loss of precission in default of %s\n", opt->name);
                av_set_int(s, opt->name, opt->default_val);
            break;
            case FF_OPT_TYPE_FLOAT: {
                double val;
                val = opt->default_val;
                av_set_double(s, opt->name, val);
            }
            break;
            case FF_OPT_TYPE_RATIONAL: {
                AVRational val;
                val = av_d2q(opt->default_val, INT_MAX);
                av_set_q(s, opt->name, val);
            }
            break;
            case FF_OPT_TYPE_STRING:
            case FF_OPT_TYPE_BINARY:
                /* Cannot set default for string as default_val is of type * double */
            break;
            default:
                av_log(s, AV_LOG_DEBUG, "AVOption type %d of option %s not implemented yet\n", opt->type, opt->name);
        }
    }
}
Esempio n. 5
0
static void set_ffmpeg_property_option(AVCodecContext *c, IDProperty *prop)
{
	char name[128];
	char *param;
	const AVOption *rv = NULL;

	PRINT("FFMPEG expert option: %s: ", prop->name);

	BLI_strncpy(name, prop->name, sizeof(name));

	param = strchr(name, ':');

	if (param) {
		*param++ = 0;
	}

	switch (prop->type) {
		case IDP_STRING:
			PRINT("%s.\n", IDP_String(prop));
			av_set_string3(c, prop->name, IDP_String(prop), 1, &rv);
			break;
		case IDP_FLOAT:
			PRINT("%g.\n", IDP_Float(prop));
			rv = av_set_double(c, prop->name, IDP_Float(prop));
			break;
		case IDP_INT:
			PRINT("%d.\n", IDP_Int(prop));

			if (param) {
				if (IDP_Int(prop)) {
					av_set_string3(c, name, param, 1, &rv);
				}
				else {
					return;
				}
			}
			else {
				rv = av_set_int(c, prop->name, IDP_Int(prop));
			}
			break;
	}

	if (!rv) {
		PRINT("ffmpeg-option not supported: %s! Skipping.\n", prop->name);
	}
}
Esempio n. 6
0
SwrContext *swr_alloc2(struct SwrContext *s, int64_t out_ch_layout, enum AVSampleFormat out_sample_fmt, int out_sample_rate,
                       int64_t  in_ch_layout, enum AVSampleFormat  in_sample_fmt, int  in_sample_rate,
                       int log_offset, void *log_ctx){
    if(!s) s= swr_alloc();
    if(!s) return NULL;

    s->log_level_offset= log_offset;
    s->log_ctx= log_ctx;

    av_set_int(s, "ocl", out_ch_layout);
    av_set_int(s, "osf", out_sample_fmt);
    av_set_int(s, "osr", out_sample_rate);
    av_set_int(s, "icl", in_ch_layout);
    av_set_int(s, "isf", in_sample_fmt);
    av_set_int(s, "isr", in_sample_rate);

    s-> in.ch_count= av_get_channel_layout_nb_channels(s-> in_ch_layout);
    s->out.ch_count= av_get_channel_layout_nb_channels(s->out_ch_layout);
    s->int_sample_fmt = AV_SAMPLE_FMT_S16;

    return s;
}
 void write_video_frame(AVFormatContext *oc, AVStream *st)
{
    int out_size, ret;
    AVCodecContext *c;
    static struct SwsContext *img_convert_ctx;

    //printf("Here0 \n");

    c = st->codec;

        if (c->pix_fmt != PIX_FMT_YUV420P) {
            /* as we only generate a YUV420P picture, we must convert it
               to the codec pixel format if needed */
            if (img_convert_ctx == NULL) {

		#if (LIBSWSCALE_VERSION_INT<AV_VERSION_INT(0,12,0))
            	img_convert_ctx = sws_getContext(c->width, c->height,
                                                 PIX_FMT_YUV420P,
                                                 c->width, c->height,
                                                 c->pix_fmt,
                                                 sws_flags, NULL, NULL, NULL);
		#else
            	img_convert_ctx = sws_alloc_context();

                if (img_convert_ctx == NULL) {
                    fprintf(stderr, "Cannot initialize the conversion context\n");
                    exit(1);
                }

                /* see http://permalink.gmane.org/gmane.comp.video.ffmpeg.devel/118362 */
                /* see http://ffmpeg-users.933282.n4.nabble.com/Documentation-for-sws-init-context-td2956723.html */

                av_set_int(img_convert_ctx, "srcw", c->width);
                av_set_int(img_convert_ctx, "srch", c->height);

                av_set_int(img_convert_ctx, "dstw", c->width);
                av_set_int(img_convert_ctx, "dsth", c->height);

                av_set_int(img_convert_ctx, "src_format", PIX_FMT_YUV420P);
                av_set_int(img_convert_ctx, "dst_format", c->pix_fmt);

                av_set_int(img_convert_ctx, "param0", 0);
                av_set_int(img_convert_ctx, "param1", 0);

                av_set_int(img_convert_ctx, "flags", sws_flags);

                sws_init_context(img_convert_ctx,NULL,NULL);
		#endif

            }
            sws_scale(img_convert_ctx, (const uint8_t* const *)tmp_picture->data,
            		  tmp_picture->linesize,
                      0, c->height, picture_to_encode->data, picture_to_encode->linesize);
        } else {

        }


    if (oc->oformat->flags & AVFMT_RAWPICTURE) {
        /* raw video case. The API will change slightly in the near
           futur for that */
        AVPacket pkt;
        av_init_packet(&pkt);

        pkt.flags |= AV_PKT_FLAG_KEY;
        pkt.stream_index= st->index;
        pkt.data= (uint8_t *)picture_to_encode;
        pkt.size= sizeof(AVPicture);

        ret = av_interleaved_write_frame(oc, &pkt);
    } else {
        /* encode the image */
    	//printf("Here1 \n");
        out_size = avcodec_encode_video(c, video_outbuf, video_outbuf_size, picture_to_encode);
        /* if zero size, it means the image was buffered */
        if (out_size > 0) {
            AVPacket pkt;
            av_init_packet(&pkt);

            if (c->coded_frame->pts != AV_NOPTS_VALUE)
                pkt.pts= av_rescale_q(c->coded_frame->pts, c->time_base, st->time_base);
            if(c->coded_frame->key_frame)
                pkt.flags |= AV_PKT_FLAG_KEY;
            pkt.stream_index= st->index;
            pkt.data= video_outbuf;
            pkt.size= out_size;

            /* write the compressed frame in the media file */
            ret = av_interleaved_write_frame(oc, &pkt);
        } else {
            ret = 0;
        }
    }
    if (ret != 0) {
        fprintf(stderr, "Error while writing video frame\n");
        exit(1);
    }
    frame_count++;
}
Esempio n. 8
0
static demuxer_t* demux_open_lavf(demuxer_t *demuxer){
    AVFormatContext *avfc;
    const AVOption *opt;
    AVDictionaryEntry *t = NULL;
    lavf_priv_t *priv= demuxer->priv;
    int i;
    char mp_filename[256]="mp:";

    stream_seek(demuxer->stream, 0);

    avfc = avformat_alloc_context();

    if (opt_cryptokey)
        parse_cryptokey(avfc, opt_cryptokey);
    if (user_correct_pts != 0)
        avfc->flags |= AVFMT_FLAG_GENPTS;
    if (index_mode == 0)
        avfc->flags |= AVFMT_FLAG_IGNIDX;

    if(opt_probesize) {
        opt = av_set_int(avfc, "probesize", opt_probesize);
        if(!opt) mp_msg(MSGT_HEADER,MSGL_ERR, "demux_lavf, couldn't set option probesize to %u\n", opt_probesize);
    }
    if(opt_analyzeduration) {
        opt = av_set_int(avfc, "analyzeduration", opt_analyzeduration * AV_TIME_BASE);
        if(!opt) mp_msg(MSGT_HEADER,MSGL_ERR, "demux_lavf, couldn't set option analyzeduration to %u\n", opt_analyzeduration);
    }

    if(opt_avopt){
        if(parse_avopts(avfc, opt_avopt) < 0){
            mp_msg(MSGT_HEADER,MSGL_ERR, "Your options /%s/ look like gibberish to me pal\n", opt_avopt);
            return NULL;
        }
    }

    if(demuxer->stream->url) {
        if (!strncmp(demuxer->stream->url, "ffmpeg://rtsp:", 14))
            av_strlcpy(mp_filename, demuxer->stream->url + 9, sizeof(mp_filename));
        else
            av_strlcat(mp_filename, demuxer->stream->url, sizeof(mp_filename));
    } else
        av_strlcat(mp_filename, "foobar.dummy", sizeof(mp_filename));

    if (!(priv->avif->flags & AVFMT_NOFILE)) {
        priv->pb = avio_alloc_context(priv->buffer, BIO_BUFFER_SIZE, 0,
                                      demuxer, mp_read, NULL, mp_seek);
        priv->pb->read_seek = mp_read_seek;
        priv->pb->is_streamed = !demuxer->stream->end_pos || (demuxer->stream->flags & MP_STREAM_SEEK) != MP_STREAM_SEEK;
        priv->pb->seekable = priv->pb->is_streamed ? 0 : AVIO_SEEKABLE_NORMAL;
        avfc->pb = priv->pb;
    }

    if(avformat_open_input(&avfc, mp_filename, priv->avif, NULL)<0){
        mp_msg(MSGT_HEADER,MSGL_ERR,"LAVF_header: av_open_input_stream() failed\n");
        return NULL;
    }

    priv->avfc= avfc;

    if(avformat_find_stream_info(avfc, NULL) < 0){
        mp_msg(MSGT_HEADER,MSGL_ERR,"LAVF_header: av_find_stream_info() failed\n");
        return NULL;
    }

    /* Add metadata. */
    while((t = av_dict_get(avfc->metadata, "", t, AV_METADATA_IGNORE_SUFFIX)))
        demux_info_add(demuxer, t->key, t->value);

    for(i=0; i < avfc->nb_chapters; i++) {
        AVChapter *c = avfc->chapters[i];
        uint64_t start = av_rescale_q(c->start, c->time_base, (AVRational){1,1000});
        uint64_t end   = av_rescale_q(c->end, c->time_base, (AVRational){1,1000});
        t = av_dict_get(c->metadata, "title", NULL, 0);
        demuxer_add_chapter(demuxer, t ? t->value : NULL, start, end);
    }

    for(i=0; i<avfc->nb_streams; i++)
        handle_stream(demuxer, avfc, i);
    priv->nb_streams_last = avfc->nb_streams;

    if(avfc->nb_programs) {
        int p;
        for (p = 0; p < avfc->nb_programs; p++) {
            AVProgram *program = avfc->programs[p];
            t = av_dict_get(program->metadata, "title", NULL, 0);
            mp_msg(MSGT_HEADER,MSGL_INFO,"LAVF: Program %d %s\n", program->id, t ? t->value : "");
            mp_msg(MSGT_IDENTIFY, MSGL_V, "PROGRAM_ID=%d\n", program->id);
        }
    }

    mp_msg(MSGT_HEADER,MSGL_V,"LAVF: %d audio and %d video streams found\n",priv->audio_streams,priv->video_streams);
    mp_msg(MSGT_HEADER,MSGL_V,"LAVF: build %d\n", LIBAVFORMAT_BUILD);
    if(!priv->audio_streams) demuxer->audio->id=-2;  // nosound
//    else if(best_audio > 0 && demuxer->audio->id == -1) demuxer->audio->id=best_audio;
    if(!priv->video_streams){
        if(!priv->audio_streams){
	    mp_msg(MSGT_HEADER,MSGL_ERR,"LAVF: no audio or video headers found - broken file?\n");
            return NULL;
        }
        demuxer->video->id=-2; // audio-only
    } //else if (best_video > 0 && demuxer->video->id == -1) demuxer->video->id = best_video;

    return demuxer;
}
Esempio n. 9
0
static struct proxy_output_ctx *alloc_proxy_output_ffmpeg(
        struct anim *anim,
        AVStream *st, int proxy_size, int width, int height,
        int quality)
{
	struct proxy_output_ctx *rv = MEM_callocN(
	        sizeof(struct proxy_output_ctx), "alloc_proxy_output");
	
	char fname[FILE_MAX];
	int ffmpeg_quality;

	/* JPEG requires this */
	width = round_up(width, 8);
	height = round_up(height, 8);

	rv->proxy_size = proxy_size;
	rv->anim = anim;

	get_proxy_filename(rv->anim, rv->proxy_size, fname, TRUE);
	BLI_make_existing_file(fname);

	rv->of = avformat_alloc_context();
	rv->of->oformat = av_guess_format("avi", NULL, NULL);
	
	BLI_snprintf(rv->of->filename, sizeof(rv->of->filename), "%s", fname);

	fprintf(stderr, "Starting work on proxy: %s\n", rv->of->filename);

	rv->st = av_new_stream(rv->of, 0);
	rv->c = rv->st->codec;
	rv->c->codec_type = AVMEDIA_TYPE_VIDEO;
	rv->c->codec_id = CODEC_ID_MJPEG;
	rv->c->width = width;
	rv->c->height = height;

	rv->of->oformat->video_codec = rv->c->codec_id;
	rv->codec = avcodec_find_encoder(rv->c->codec_id);

	if (!rv->codec) {
		fprintf(stderr, "No ffmpeg MJPEG encoder available? "
		        "Proxy not built!\n");
		av_free(rv->of);
		return NULL;
	}

	if (rv->codec->pix_fmts) {
		rv->c->pix_fmt = rv->codec->pix_fmts[0];
	}
	else {
		rv->c->pix_fmt = PIX_FMT_YUVJ420P;
	}

	rv->c->sample_aspect_ratio =
	    rv->st->sample_aspect_ratio =
	        st->codec->sample_aspect_ratio;

	rv->c->time_base.den = 25;
	rv->c->time_base.num = 1;
	rv->st->time_base = rv->c->time_base;

	/* there's no  way to set JPEG quality in the same way as in AVI JPEG and image sequence,
	 * but this seems to be giving expected quality result */
	ffmpeg_quality = (int)(1.0f + 30.0f * (1.0f - (float)quality / 100.0f) + 0.5f);
	av_set_int(rv->c, "qmin", ffmpeg_quality);
	av_set_int(rv->c, "qmax", ffmpeg_quality);

	if (rv->of->flags & AVFMT_GLOBALHEADER) {
		rv->c->flags |= CODEC_FLAG_GLOBAL_HEADER;
	}

	if (avio_open(&rv->of->pb, fname, AVIO_FLAG_WRITE) < 0) {
		fprintf(stderr, "Couldn't open outputfile! "
		        "Proxy not built!\n");
		av_free(rv->of);
		return 0;
	}

	avcodec_open(rv->c, rv->codec);

	rv->video_buffersize = 2000000;
	rv->video_buffer = (uint8_t *)MEM_mallocN(
	        rv->video_buffersize, "FFMPEG video buffer");

	rv->orig_height = st->codec->height;

	if (st->codec->width != width || st->codec->height != height ||
	    st->codec->pix_fmt != rv->c->pix_fmt)
	{
		rv->frame = avcodec_alloc_frame();
		avpicture_fill((AVPicture *) rv->frame,
		               MEM_mallocN(avpicture_get_size(
		                               rv->c->pix_fmt,
		                               round_up(width, 16), height),
		                           "alloc proxy output frame"),
		               rv->c->pix_fmt, round_up(width, 16), height);

		rv->sws_ctx = sws_getContext(
		        st->codec->width,
		        st->codec->height,
		        st->codec->pix_fmt,
		        width, height,
		        rv->c->pix_fmt,
		        SWS_FAST_BILINEAR | SWS_PRINT_INFO,
		        NULL, NULL, NULL);
	}

	if (avformat_write_header(rv->of, NULL) < 0) {
		fprintf(stderr, "Couldn't set output parameters? "
		        "Proxy not built!\n");
		av_free(rv->of);
		return 0;
	}

	return rv;
}
static demuxer_t* demux_open_lavf(demuxer_t *demuxer){
    AVFormatContext *avfc;
    AVFormatParameters ap;
    const AVOption *opt;
    AVMetadataTag *t = NULL;
    lavf_priv_t *priv= demuxer->priv;
    int i;
    //start_time = 0.0;
    char mp_filename[256]="mp:";
    memset(&ap, 0, sizeof(AVFormatParameters));

    //demux_lavf_find_geodata(demuxer);

    stream_seek(demuxer->stream, 0);

    int filepos=stream_tell(demuxer->stream);
    
    avfc = avformat_alloc_context();

    if (opt_cryptokey)
        parse_cryptokey(avfc, opt_cryptokey);
    if (user_correct_pts != 0)
        avfc->flags |= AVFMT_FLAG_GENPTS;
    /* if (index_mode == 0) */
    /*     avfc->flags |= AVFMT_FLAG_IGNIDX; */

    ap.prealloced_context = 1;
#if 0
    if(opt_probesize) {
        opt = av_set_int(avfc, "probesize", opt_probesize);
        if(!opt) mp_msg(MSGT_HEADER,MSGL_ERR, "demux_lavf, couldn't set option probesize to %u\n", opt_probesize);
    }
    if(opt_analyzeduration) {
        opt = av_set_int(avfc, "analyzeduration", opt_analyzeduration * AV_TIME_BASE);
        if(!opt) mp_msg(MSGT_HEADER,MSGL_ERR, "demux_lavf, couldn't set option analyzeduration to %u\n", opt_analyzeduration);
    }

    if(opt_avopt){
        if(parse_avopts(avfc, opt_avopt) < 0){
            mp_msg(MSGT_HEADER,MSGL_ERR, "Your options /%s/ look like gibberish to me pal\n", opt_avopt);
            return NULL;
        }
    }
#endif
    if(demuxer->stream->url) {
        if (!strncmp(demuxer->stream->url, "ffmpeg://rtsp:", 14))
            av_strlcpy(mp_filename, demuxer->stream->url + 9, sizeof(mp_filename));
        else
            av_strlcat(mp_filename, demuxer->stream->url, sizeof(mp_filename));
    } else
        av_strlcat(mp_filename, "foobar.dummy", sizeof(mp_filename));

    priv->pb = av_alloc_put_byte(priv->buffer, BIO_BUFFER_SIZE, 0,
                                 demuxer, mp_read, NULL, mp_seek);
    priv->pb->read_seek = mp_read_seek;
    priv->pb->is_streamed = !demuxer->stream->end_pos || (demuxer->stream->flags & MP_STREAM_SEEK) != MP_STREAM_SEEK;
    filepos=stream_tell(demuxer->stream);
    if(av_open_input_stream(&avfc, priv->pb, mp_filename, priv->avif, &ap)<0){
        mp_msg(MSGT_HEADER,MSGL_ERR,"LAVF_header: av_open_input_stream() failed\n");
        return NULL;
    }
    filepos=stream_tell(demuxer->stream);
    priv->avfc= avfc;
    if(av_find_stream_info(avfc) < 0){
        mp_msg(MSGT_HEADER,MSGL_ERR,"LAVF_header: av_find_stream_info() failed\n");
        return NULL;
    }
    filepos=stream_tell(demuxer->stream);
    if(!strncmp(avfc->iformat->name,"aac",4))
      get_aac_duration(demuxer,avfc);

    if(!strncmp(avfc->iformat->name,"mp3",4))
       priv->avfc->duration = get_mp3_duration(demuxer) * AV_TIME_BASE;

    /* Add metadata. */
    av_metadata_conv(avfc, NULL, avfc->iformat->metadata_conv);
    while((t = av_metadata_get(avfc->metadata, "", t, AV_METADATA_IGNORE_SUFFIX))){
        demux_info_add(demuxer, t->key, t->value);
    }

    for(i=0; i < avfc->nb_chapters; i++) {
        AVChapter *c = avfc->chapters[i];
        uint64_t start = av_rescale_q(c->start, c->time_base, (AVRational){1,1000});
        uint64_t end   = av_rescale_q(c->end, c->time_base, (AVRational){1,1000});
        t = av_metadata_get(c->metadata, "title", NULL, 0);
        demuxer_add_chapter(demuxer, t ? t->value : NULL, start, end);
    }
    demuxer->ttaflag = 0;
    for(i=0; i<avfc->nb_streams; i++)
        handle_stream(demuxer, avfc, i);
    if(demuxer->matroflag && !demuxer->ttaflag)
      return NULL;
    if(avfc->nb_programs) {
        int p;
        for (p = 0; p < avfc->nb_programs; p++) {
            AVProgram *program = avfc->programs[p];
            t = av_metadata_get(program->metadata, "title", NULL, 0);
            mp_msg(MSGT_HEADER,MSGL_INFO,"LAVF: Program %d %s\n", program->id, t ? t->value : "");
        }
    }

    mp_msg(MSGT_HEADER,MSGL_V,"LAVF: %d audio and %d video streams found\n",priv->audio_streams,priv->video_streams);
    mp_msg(MSGT_HEADER,MSGL_V,"LAVF: build %d\n", LIBAVFORMAT_BUILD);
    if(!priv->audio_streams) demuxer->audio->id=-2;  // nosound
//    else if(best_audio > 0 && demuxer->audio->id == -1) demuxer->audio->id=best_audio;
    if(!priv->video_streams){
        if(!priv->audio_streams){
	    mp_msg(MSGT_HEADER,MSGL_ERR,"LAVF: no audio or video headers found - broken file?\n");
            return NULL;
        }
        demuxer->video->id=-2; // audio-only
    } //else if (best_video > 0 && demuxer->video->id == -1) demuxer->video->id = best_video;

    filepos=stream_tell(demuxer->stream);

    int j=0;
    AVCodecContext *codec;
    AVCodec *avc;
    demuxer->audio_info = malloc(100*avfc->nb_streams);
    for(i=0; i<avfc->nb_streams; i++){
      if(demuxer->a_streams[i]){
	char *info = malloc(100);
	codec= ((AVStream *)avfc->streams[i])->codec;
	avc = avcodec_find_decoder(codec->codec_id);
	char *cn = avc ? avc->name : "unknown";
	char *codec_name = malloc(100);
	strncpy(codec_name,cn,100);
	
	if((codec_name[0]=='d')&&(codec_name[1]=='c')&&(codec_name[2]=='a'))
	  strncpy(codec_name,"dts",100);
	int a;
	for(a=0;codec_name[a];a++) 
	  if(codec_name[a]>='a'&&codec_name[a]<='z')
	    codec_name[a]-=32; 
	sprintf(info, "%s(%dHz %dCh)--%d", codec_name,codec->sample_rate, codec->channels, i);
	free(codec_name);
	memcpy(demuxer->audio_info+j*100,info, 100);
	j++;
	free(info);
	info = NULL;
      }
    }

    j = 0;
    demuxer->sub_info = malloc(100*avfc->nb_streams);
    for(i=0; i<avfc->nb_streams; i++){
      if(demuxer->s_streams[i]){
	char *info = malloc(100);
	AVStream *st= avfc->streams[i];
	AVMetadataTag *lang = av_metadata_get(st->metadata, "language", NULL, 0);
	codec= st->codec;
        if (lang && lang->value)
	  strcpy(info, lang->value);
	else
	  strcpy(info, "unknown");
	sprintf(info, "%s--%d", info,i);
	memcpy(demuxer->sub_info+j*100,info, 100);
	j++;
	free(info);
	info = NULL;
      }
    }

    if(AV_NOPTS_VALUE == priv->avfc->start_time){
        LOGW("priv->avfc->start_time = AV_NOPTS_VALUE");
        demuxer->start_time = 0;
    }else{
        demuxer->start_time = priv->avfc->start_time;
    }

    return demuxer;
}
Esempio n. 11
0
MediaRet MediaRecorder::setup_video_stream(const char *fname, int w, int h, int d)
{
    AVCodecContext *ctx;
    vid_st = av_new_stream(oc, 0);
    if(!vid_st) {
	avformat_free_context(oc);
	oc = NULL;
	return MRET_ERR_NOMEM;
    }
    ctx = vid_st->codec;
    ctx->codec_id = oc->oformat->video_codec;
    ctx->codec_type = AVMEDIA_TYPE_VIDEO;
    ctx->width = w;
    ctx->height = h;
    ctx->time_base.den = 60;
    ctx->time_base.num = 1;
    // dunno if any of these help; some output just looks plain crappy
    // will have to investigate further
    ctx->bit_rate = 400000;
    ctx->gop_size = 12;
    ctx->max_b_frames = 2;
    switch(d) {
    case 16:
	// FIXME: test & make endian-neutral
	pixfmt = PIX_FMT_RGB565LE;
	break;
    case 24:
	pixfmt = PIX_FMT_RGB24;
	break;
    case 32:
    default: // should never be anything else
	pixfmt = PIX_FMT_RGBA;
	break;
    }
    ctx->pix_fmt = pixfmt;
    pixsize = d >> 3;
    linesize = pixsize * w;
    ctx->max_b_frames = 2;
    if(oc->oformat->flags & AVFMT_GLOBALHEADER)
	ctx->flags |= CODEC_FLAG_GLOBAL_HEADER;

    AVCodec *codec = avcodec_find_encoder(oc->oformat->video_codec);
    // make sure RGB is supported (mostly not)
    if(codec->pix_fmts) {
	const enum PixelFormat *p;
	int64_t mask = 0;
	for(p = codec->pix_fmts; *p != -1; p++) {
	    // may get complaints about 1LL; thus the cast
	    mask |= ((int64_t)1) << *p;
	    if(*p == pixfmt)
		break;
	}
	if(*p == -1) {
	    // if not supported, use a converter to the next best format
	    // this is swscale, the converter used by the output demo
	    enum PixelFormat dp = (PixelFormat)avcodec_find_best_pix_fmt(mask, pixfmt, 0, NULL);
	    if(dp == -1)
		dp = codec->pix_fmts[0];
	    if(!(convpic = avcodec_alloc_frame()) ||
	       avpicture_alloc((AVPicture *)convpic, dp, w, h) < 0) {
		avformat_free_context(oc);
		oc = NULL;
		return MRET_ERR_NOMEM;
	    }
#if LIBSWSCALE_VERSION_INT < AV_VERSION_INT(0, 12, 0)
	    converter = sws_getContext(w, h, pixfmt, w, h, dp, SWS_BICUBIC,
				       NULL, NULL, NULL);
#else
	    converter = sws_alloc_context();
	    // what a convoluted, inefficient way to set options
	    av_set_int(converter, "sws_flags", SWS_BICUBIC);
	    av_set_int(converter, "srcw", w);
	    av_set_int(converter, "srch", h);
	    av_set_int(converter, "dstw", w);
	    av_set_int(converter, "dsth", h);
	    av_set_int(converter, "src_format", pixfmt);
	    av_set_int(converter, "dst_format", dp);
	    sws_init_context(converter, NULL, NULL);
#endif
	    ctx->pix_fmt = dp;
	}
    }
    if(!codec || avcodec_open(ctx, codec)) {
	avformat_free_context(oc);
	oc = NULL;
	return MRET_ERR_NOCODEC;
    }

    return MRET_OK;
}
Esempio n. 12
0
static demuxer_t* demux_open_lavf(demuxer_t *demuxer){
    AVFormatContext *avfc;
    AVFormatParameters ap;
    const AVOption *opt;
    lavf_priv_t *priv= demuxer->priv;
    int i;
    char mp_filename[256]="mp:";

    memset(&ap, 0, sizeof(AVFormatParameters));

    stream_seek(demuxer->stream, 0);

    register_protocol(&mp_protocol);

    avfc = av_alloc_format_context();

    if (opt_cryptokey)
        parse_cryptokey(avfc, opt_cryptokey);
    if (correct_pts)
        avfc->flags |= AVFMT_FLAG_GENPTS;
    if (index_mode == 0)
        avfc->flags |= AVFMT_FLAG_IGNIDX;

    ap.prealloced_context = 1;
    if(opt_probesize) {
        opt = av_set_int(avfc, "probesize", opt_probesize);
        if(!opt) mp_msg(MSGT_HEADER,MSGL_ERR, "demux_lavf, couldn't set option probesize to %u\n", opt_probesize);
    }
    if(opt_analyzeduration) {
        opt = av_set_int(avfc, "analyzeduration", opt_analyzeduration * AV_TIME_BASE);
        if(!opt) mp_msg(MSGT_HEADER,MSGL_ERR, "demux_lavf, couldn't set option analyzeduration to %u\n", opt_analyzeduration);
    }

    if(demuxer->stream->url)
        strncpy(mp_filename + 3, demuxer->stream->url, sizeof(mp_filename)-3);
    else
        strncpy(mp_filename + 3, "foobar.dummy", sizeof(mp_filename)-3);
    
    url_fopen(&priv->pb, mp_filename, URL_RDONLY);
    
    ((URLContext*)(priv->pb->opaque))->priv_data= demuxer->stream;
        
    if(av_open_input_stream(&avfc, priv->pb, mp_filename, priv->avif, &ap)<0){
        mp_msg(MSGT_HEADER,MSGL_ERR,"LAVF_header: av_open_input_stream() failed\n");
        return NULL;
    }

    priv->avfc= avfc;

    if(av_find_stream_info(avfc) < 0){
        mp_msg(MSGT_HEADER,MSGL_ERR,"LAVF_header: av_find_stream_info() failed\n");
        return NULL;
    }

    if(avfc->title    [0]) demux_info_add(demuxer, "name"     , avfc->title    );
    if(avfc->author   [0]) demux_info_add(demuxer, "author"   , avfc->author   );
    if(avfc->copyright[0]) demux_info_add(demuxer, "copyright", avfc->copyright);
    if(avfc->comment  [0]) demux_info_add(demuxer, "comments" , avfc->comment  );
    if(avfc->album    [0]) demux_info_add(demuxer, "album"    , avfc->album    );
//    if(avfc->year        ) demux_info_add(demuxer, "year"     , avfc->year     );
//    if(avfc->track       ) demux_info_add(demuxer, "track"    , avfc->track    );
    if(avfc->genre    [0]) demux_info_add(demuxer, "genre"    , avfc->genre    );

    if(avfc->nb_programs) {
        int p, start=0, found=0;

        if(ts_prog) {
            for(p=0; p<avfc->nb_programs; p++) {
                if(avfc->programs[p]->id == ts_prog) {
                    start = p;
                    found = 1;
                    break;
                }
            }
            if(!found) {
                mp_msg(MSGT_HEADER,MSGL_ERR,"DEMUX_LAVF: program %d doesn't seem to be present\n", ts_prog);
                return NULL;
            }
        }
        p = start;
        do {
            AVProgram *program = avfc->programs[p];
            mp_msg(MSGT_HEADER,MSGL_INFO,"LAVF: Program %d %s\n", program->id, (program->name ? program->name : ""));
            for(i=0; i<program->nb_stream_indexes; i++)
                handle_stream(demuxer, avfc, program->stream_index[i]);
            if(!priv->cur_program && (demuxer->video->sh || demuxer->audio->sh))
                priv->cur_program = program->id;
            p = (p + 1) % avfc->nb_programs;
        } while(p!=start);
    } else
        for(i=0; i<avfc->nb_streams; i++)
            handle_stream(demuxer, avfc, i);
    
    mp_msg(MSGT_HEADER,MSGL_V,"LAVF: %d audio and %d video streams found\n",priv->audio_streams,priv->video_streams);
    mp_msg(MSGT_HEADER,MSGL_V,"LAVF: build %d\n", LIBAVFORMAT_BUILD);
    if(!priv->audio_streams) demuxer->audio->id=-2;  // nosound
//    else if(best_audio > 0 && demuxer->audio->id == -1) demuxer->audio->id=best_audio;
    if(!priv->video_streams){
        if(!priv->audio_streams){
	    mp_msg(MSGT_HEADER,MSGL_ERR,"LAVF: no audio or video headers found - broken file?\n");
            return NULL; 
        }
        demuxer->video->id=-2; // audio-only
    } //else if (best_video > 0 && demuxer->video->id == -1) demuxer->video->id = best_video;

    return demuxer;
}
Esempio n. 13
0
static pj_status_t h264_preopen(ffmpeg_private *ff)
{
    h264_data *data;
    pjmedia_h264_packetizer_cfg pktz_cfg;
    pj_status_t status;

    data = PJ_POOL_ZALLOC_T(ff->pool, h264_data);
    ff->data = data;

    /* Parse remote fmtp */
    status = pjmedia_vid_codec_h264_parse_fmtp(&ff->param.enc_fmtp,
					       &data->fmtp);
    if (status != PJ_SUCCESS)
	return status;

    /* Create packetizer */
    pktz_cfg.mtu = ff->param.enc_mtu;
#if 0
    if (data->fmtp.packetization_mode == 0)
	pktz_cfg.mode = PJMEDIA_H264_PACKETIZER_MODE_SINGLE_NAL;
    else if (data->fmtp.packetization_mode == 1)
	pktz_cfg.mode = PJMEDIA_H264_PACKETIZER_MODE_NON_INTERLEAVED;
    else
	return PJ_ENOTSUP;
#else
    if (data->fmtp.packetization_mode!=
				PJMEDIA_H264_PACKETIZER_MODE_SINGLE_NAL &&
	data->fmtp.packetization_mode!=
				PJMEDIA_H264_PACKETIZER_MODE_NON_INTERLEAVED)
    {
	return PJ_ENOTSUP;
    }
    /* Better always send in single NAL mode for better compatibility */
    pktz_cfg.mode = PJMEDIA_H264_PACKETIZER_MODE_SINGLE_NAL;
#endif

    status = pjmedia_h264_packetizer_create(ff->pool, &pktz_cfg, &data->pktz);
    if (status != PJ_SUCCESS)
	return status;

    /* Apply SDP fmtp to format in codec param */
    if (!ff->param.ignore_fmtp) {
	status = pjmedia_vid_codec_h264_apply_fmtp(&ff->param);
	if (status != PJ_SUCCESS)
	    return status;
    }

    if (ff->param.dir & PJMEDIA_DIR_ENCODING) {
	pjmedia_video_format_detail *vfd;
	AVCodecContext *ctx = ff->enc_ctx;
	const char *profile = NULL;

	vfd = pjmedia_format_get_video_format_detail(&ff->param.enc_fmt, 
						     PJ_TRUE);

	/* Override generic params after applying SDP fmtp */
	ctx->width = vfd->size.w;
	ctx->height = vfd->size.h;
	ctx->time_base.num = vfd->fps.denum;
	ctx->time_base.den = vfd->fps.num;

	/* Apply profile. */
	ctx->profile  = data->fmtp.profile_idc;
	switch (ctx->profile) {
	case PROFILE_H264_BASELINE:
	    profile = "baseline";
	    break;
	case PROFILE_H264_MAIN:
	    profile = "main";
	    break;
	default:
	    break;
	}
	if (profile &&
	    av_set_string3(ctx->priv_data, "profile", profile, 0, NULL))
	{
	    PJ_LOG(3, (THIS_FILE, "Failed to set H264 profile"));
	}

	/* Apply profile constraint bits. */
	//PJ_TODO(set_h264_constraint_bits_properly_in_ffmpeg);
	if (data->fmtp.profile_iop) {
#if defined(FF_PROFILE_H264_CONSTRAINED)
	    ctx->profile |= FF_PROFILE_H264_CONSTRAINED;
#endif
	}

	/* Apply profile level. */
	ctx->level    = data->fmtp.level;

	/* Limit NAL unit size as we prefer single NAL unit packetization */
	if (!av_set_int(ctx->priv_data, "slice-max-size", ff->param.enc_mtu))
	{
	    PJ_LOG(3, (THIS_FILE, "Failed to set H264 max NAL size to %d",
		       ff->param.enc_mtu));
	}

	/* Apply intra-refresh */
	if (!av_set_int(ctx->priv_data, "intra-refresh", 1))
	{
	    PJ_LOG(3, (THIS_FILE, "Failed to set x264 intra-refresh"));
	}

	/* Misc x264 settings (performance, quality, latency, etc).
	 * Let's just use the x264 predefined preset & tune.
	 */
	if (av_set_string3(ctx->priv_data, "preset", "veryfast", 0, NULL))
	{
	    PJ_LOG(3, (THIS_FILE, "Failed to set x264 preset 'veryfast'"));
	}
	if (av_set_string3(ctx->priv_data, "tune", "animation+zerolatency",
			   0, NULL))
	{
	    PJ_LOG(3, (THIS_FILE, "Failed to set x264 tune 'zerolatency'"));
	}
    }

    if (ff->param.dir & PJMEDIA_DIR_DECODING) {
	AVCodecContext *ctx = ff->dec_ctx;

	/* Apply the "sprop-parameter-sets" fmtp from remote SDP to
	 * extradata of ffmpeg codec context.
	 */
	if (data->fmtp.sprop_param_sets_len) {
	    ctx->extradata_size = data->fmtp.sprop_param_sets_len;
	    ctx->extradata = data->fmtp.sprop_param_sets;
	}
    }

    return PJ_SUCCESS;
}