示例#1
0
AVFormatContext *
fa_libav_open_format(AVIOContext *avio, const char *url, 
		     char *errbuf, size_t errlen)
{
  AVInputFormat *fmt = NULL;
  AVFormatContext *fctx;
  int err;

  avio_seek(avio, 0, SEEK_SET);

  if((err = av_probe_input_buffer(avio, &fmt, url, NULL, 0, 0)) != 0)
    return fa_libav_open_error(errbuf, errlen,
			       "Unable to probe file", err);

  if(fmt == NULL) {
    snprintf(errbuf, errlen, "Unknown file format");
    return NULL;
  }

  if((err = av_open_input_stream(&fctx, avio, url, fmt, NULL)) != 0)
    return fa_libav_open_error(errbuf, errlen,
			       "Unable to open file as input format", err);

  if(av_find_stream_info(fctx) < 0) {
    av_close_input_stream(fctx);
    return fa_libav_open_error(errbuf, errlen,
			       "Unable to handle file contents", err);
  }

  return fctx;
}
示例#2
0
文件: fa_libav.c 项目: Allba/showtime
AVFormatContext *
fa_libav_open_format(AVIOContext *avio, const char *url, 
		     char *errbuf, size_t errlen, const char *mimetype)
{
  AVInputFormat *fmt = NULL;
  AVFormatContext *fctx;
  int err;

  avio_seek(avio, 0, SEEK_SET);
  if(mimetype != NULL) {
    int i;

    for(i = 0; i < sizeof(mimetype2fmt) / sizeof(mimetype2fmt[0]); i++) {
      if(!strcmp(mimetype, mimetype2fmt[i].mimetype)) {
	fmt = av_find_input_format(mimetype2fmt[i].fmt);
	break;
      }
    }
    if(fmt == NULL)
      TRACE(TRACE_DEBUG, "probe", "Don't know mimetype %s, probing instead",
	    mimetype);
  }

  if(fmt == NULL) {

    if((err = av_probe_input_buffer(avio, &fmt, url, NULL, 0, 0)) != 0)
      return fa_libav_open_error(errbuf, errlen,
				 "Unable to probe file", err);

    if(fmt == NULL) {
      snprintf(errbuf, errlen, "Unknown file format");
      return NULL;
    }
  }

  fctx = avformat_alloc_context();
  fctx->pb = avio;

  if((err = avformat_open_input(&fctx, url, fmt, NULL)) != 0) {
    if(mimetype != NULL)
      return fa_libav_open_format(avio, url, errbuf, errlen, NULL);
    return fa_libav_open_error(errbuf, errlen,
			       "Unable to open file as input format", err);
  }

  if(av_find_stream_info(fctx) < 0) {
    av_close_input_stream(fctx);
    if(mimetype != NULL)
      return fa_libav_open_format(avio, url, errbuf, errlen, NULL);
    return fa_libav_open_error(errbuf, errlen,
			       "Unable to handle file contents", err);
  }

  return fctx;
}
示例#3
0
BOOL DecodeCDInit(PLAYERINFO* playerinfo)
{
	PLAYERDECODE* decode = &playerinfo->decode;
	CDTRACK* cdtrack = &playerinfo->cdtrack;
	CDTrackInit(cdtrack);
	
	decode->io = avio_alloc_context(NULL, 0, 0, cdtrack, ReadCDPacket, NULL, SeekCDPacket);
	if (decode->io != NULL)
	{
		AVInputFormat* input = NULL;
		int ret = av_probe_input_buffer(decode->io, &input, NULL, NULL, 0, 0);
		if (ret == 0)
		{
			decode->format = avformat_alloc_context();
			if (decode->format != NULL)
			{
				decode->format->pb = decode->io;
				ret = avformat_open_input(&decode->format, NULL, input, NULL);				
				if (ret == 0)
				{		
					ret = avformat_find_stream_info(decode->format, NULL);
					if (ret >= 0)
					{			
						int index = -1;
						for (unsigned int i = 0; i < decode->format->nb_streams; i++)
						{
							if (decode->format->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO)
							{
								decode->context = decode->format->streams[i]->codec;
								index = i;
								break;
							}
						}

						if (index > -1)
						{
							decode->codec = avcodec_find_decoder(decode->context->codec_id);
							if (decode->codec != NULL)
							{
								ret = avcodec_open2(decode->context, decode->codec, NULL);
								if (ret == 0)
								{					
									av_init_packet(&decode->packet);
									return TRUE;
								}					
							}				
						}			
					}
				}
			}
		}
	}

	return FALSE;
}
示例#4
0
文件: hls.c 项目: yfli/ffmpeg-hls
static int hls_read_header(AVFormatContext *s)
{
    HLSContext *c = s->priv_data;
    int ret = 0, i, j, stream_offset = 0;

    c->interrupt_callback = &s->interrupt_callback;

    if ((ret = parse_playlist(c, s->filename, NULL, s->pb)) < 0)
        goto fail;

    if (c->n_variants == 0) {
        av_log(NULL, AV_LOG_WARNING, "Empty playlist\n");
        ret = AVERROR_EOF;
        goto fail;
    }
    /* If the playlist only contained variants, parse each individual
     * variant playlist. */
    if (c->n_variants > 1 || c->variants[0]->n_segments == 0) {
        for (i = 0; i < c->n_variants; i++) {
            struct variant *v = c->variants[i];
            if ((ret = parse_playlist(c, v->url, v, NULL)) < 0)
                goto fail;
        }
    }

    if (c->variants[0]->n_segments == 0) {
        av_log(NULL, AV_LOG_WARNING, "Empty playlist\n");
        ret = AVERROR_EOF;
        goto fail;
    }

    /* If this isn't a live stream, calculate the total duration of the
     * stream. */
    if (c->variants[0]->finished) {
        int64_t duration = 0;
        for (i = 0; i < c->variants[0]->n_segments; i++)
            duration += c->variants[0]->segments[i]->duration;
        s->duration = duration * AV_TIME_BASE;
    }

    /* Open the demuxer for each variant */
    for (i = 0; i < c->n_variants; i++) {
        struct variant *v = c->variants[i];
        AVInputFormat *in_fmt = NULL;
        char bitrate_str[20];
        AVProgram * program = NULL;
        if (v->n_segments == 0)
            continue;

        if (!(v->ctx = avformat_alloc_context())) {
            ret = AVERROR(ENOMEM);
            goto fail;
        }

        v->index  = i;
        v->needed = 1;
        v->parent = s;

        /* If this is a live stream with more than 3 segments, start at the
         * third last segment. */
        v->cur_seq_no = v->start_seq_no;
        if (!v->finished && v->n_segments > 3)
            v->cur_seq_no = v->start_seq_no + v->n_segments - 3;

        v->read_buffer = av_malloc(INITIAL_BUFFER_SIZE);
        ffio_init_context(&v->pb, v->read_buffer, INITIAL_BUFFER_SIZE, 0, v,
                          read_data, NULL, NULL);
        v->pb.seekable = 0;
        ret = av_probe_input_buffer(&v->pb, &in_fmt, v->segments[0]->url,
                                    NULL, 0, 0);
        if (ret < 0) {
            /* Free the ctx - it isn't initialized properly at this point,
             * so avformat_close_input shouldn't be called. If
             * avformat_open_input fails below, it frees and zeros the
             * context, so it doesn't need any special treatment like this. */
            av_log(s, AV_LOG_ERROR, "Error when loading first segment '%s'\n", v->segments[0]->url);
            avformat_free_context(v->ctx);
            v->ctx = NULL;
            goto fail;
        }
        v->ctx->pb       = &v->pb;
        ret = avformat_open_input(&v->ctx, v->segments[0]->url, in_fmt, NULL);
        if (ret < 0)
            goto fail;

        v->stream_offset = stream_offset;
        v->ctx->ctx_flags &= ~AVFMTCTX_NOHEADER;
        ret = avformat_find_stream_info(v->ctx, NULL);
        if (ret < 0)
            goto fail;
        snprintf(bitrate_str, sizeof(bitrate_str), "%d", v->bandwidth);

        /* Create new AVprogram for variant i */
        program = av_new_program(s, i);
        if ( !program )
            goto fail;
        av_dict_set(&program->metadata, "variant_bitrate", bitrate_str, 0);
        program->bitrate = v->bandwidth;

        /* Create new AVStreams for each stream in this variant */
        for (j = 0; j < v->ctx->nb_streams; j++) {
            AVStream *st = avformat_new_stream(s, NULL);
            if (!st) {
                ret = AVERROR(ENOMEM);
                goto fail;
            }
            ff_program_add_stream_index(s, i, st->index);
            st->id = i;
            avcodec_copy_context(st->codec, v->ctx->streams[j]->codec);
            if (v->bandwidth)
                av_dict_set(&st->metadata, "variant_bitrate", bitrate_str,
                                 0);
        }
        stream_offset += v->ctx->nb_streams;
    }

    c->first_packet = 1;
    c->first_timestamp = AV_NOPTS_VALUE;
    c->seek_timestamp  = AV_NOPTS_VALUE;

    return 0;
fail:
    free_variant_list(c);
    return ret;
}
示例#5
0
bool youku_demux::open(boost::any ctx)
{
	av_register_all();
	avformat_network_init();

	// 得到传入的参数.
	m_youku_demux_data = boost::any_cast<youku_demux_data>(ctx);
	BOOST_ASSERT(m_youku_demux_data.type == MEDIA_TYPE_YK);

	// 初始化youku的source.
	m_source_ctx = alloc_media_source(MEDIA_TYPE_YK,
		m_youku_demux_data.youku_url.c_str(), m_youku_demux_data.youku_url.size(), 0);

	// 创建m_format_ctx.
	m_format_ctx = avformat_alloc_context();
	if (!m_format_ctx)
		goto FAILED_FLG;

	// 设置参数.
	m_format_ctx->flags = AVFMT_FLAG_GENPTS;
	m_format_ctx->interrupt_callback.callback = decode_interrupt_cb;
	m_format_ctx->interrupt_callback.opaque = (void*)this;

	m_source_ctx->init_source = yk_init_source;
	m_source_ctx->read_data = yk_read_data;
	m_source_ctx->read_seek = yk_read_seek;
	m_source_ctx->close = yk_close;
	m_source_ctx->destory = yk_destory;

	// 如果初始化失败.
	if (m_source_ctx->init_source(m_source_ctx) < 0)
		goto FAILED_FLG;

	int ret = 0;
	AVInputFormat *iformat = NULL;
	m_io_buffer = (unsigned char*)av_malloc(IO_BUFFER_SIZE);
	if (!m_io_buffer)
	{
		std::cerr << "Create buffer failed!\n";
		goto FAILED_FLG;
	}

	// 分配io上下文.
	m_avio_ctx = avio_alloc_context(m_io_buffer,
		IO_BUFFER_SIZE, 0, (void*)this, read_data, NULL, seek_data);
	if (!m_avio_ctx)
	{
		std::cerr << "Create io context failed!\n";
		goto FAILED_FLG;
	}
	m_avio_ctx->write_flag = 0;

	ret = av_probe_input_buffer(m_avio_ctx, &iformat, "", NULL, 0, 0);
	if (ret < 0)
	{
		std::cerr << "av_probe_input_buffer call failed!\n";
		goto FAILED_FLG;
	}

	// 打开输入媒体流.
	m_format_ctx->pb = m_avio_ctx;
	ret = avformat_open_input(&m_format_ctx, "", iformat, NULL);
	if (ret < 0)
	{
		std::cerr << "av_open_input_stream call failed!\n";
		goto FAILED_FLG;
	}

	ret = avformat_find_stream_info(m_format_ctx, NULL);
	if (ret < 0)
		goto FAILED_FLG;

FAILED_FLG:
	// 遇到出错, 释放各种资源.
	if (m_source_ctx)
	{
		free_media_source(m_source_ctx);
		m_source_ctx = NULL;
	}
	if (m_format_ctx)
	{
		avformat_close_input(&m_format_ctx);
		m_format_ctx = NULL;
	}

	return false;
}
示例#6
0
bool OMXReader::open(std::string filename, bool doSkipAvProbe)
{
    
    currentPTS = DVD_NOPTS_VALUE;
    fileName    = filename; 
    speed       = DVD_PLAYSPEED_NORMAL;
    programID     = UINT_MAX;
    AVIOInterruptCB int_cb = { interrupt_cb, NULL };
    
    ClearStreams();
    
    
    int           result    = -1;
    AVInputFormat *iformat  = NULL;
    unsigned char *buffer   = NULL;
    unsigned int  flags     = READ_TRUNCATED | READ_BITRATE | READ_CHUNKED;
    
    if(fileName.substr(0, 8) == "shout://" )
        fileName.replace(0, 8, "http://");
    
    if(fileName.substr(0,6) == "mms://" || fileName.substr(0,7) == "mmsh://" || fileName.substr(0,7) == "mmst://" || fileName.substr(0,7) == "mmsu://" ||
       fileName.substr(0,7) == "http://" || 
       fileName.substr(0,7) == "rtmp://" || fileName.substr(0,6) == "udp://" ||
       fileName.substr(0,7) == "rtsp://" )
    {
        doSkipAvProbe = false;
        // ffmpeg dislikes the useragent from AirPlay urls
        //int idx = fileName.Find("|User-Agent=AppleCoreMedia");
        size_t idx = fileName.find("|");
        if(idx != string::npos)
            fileName = fileName.substr(0, idx);
        
        AVDictionary *d = NULL;
        // Enable seeking if http
        if(fileName.substr(0,7) == "http://")
        {
            av_dict_set(&d, "seekable", "1", 0);
        }
        ofLog(OF_LOG_VERBOSE, "OMXPlayer::OpenFile - avformat_open_input %s ", fileName.c_str());
        result = avformat_open_input(&avFormatContext, fileName.c_str(), iformat, &d);
        if(av_dict_count(d) == 0)
        {
            ofLog(OF_LOG_VERBOSE, "OMXPlayer::OpenFile - avformat_open_input enabled SEEKING ");
            if(fileName.substr(0,7) == "http://")
                avFormatContext->pb->seekable = AVIO_SEEKABLE_NORMAL;
        }
        av_dict_free(&d);
        if(result < 0)
        {
            ofLog(OF_LOG_ERROR, "OMXPlayer::OpenFile - avformat_open_input %s ", fileName.c_str());
            close();
            return false;
        }
    }
    else
    {
        fileObject = new File();
        
        if (!fileObject->open(fileName, flags))
        {
            ofLog(OF_LOG_ERROR, "OMXPlayer::OpenFile - %s ", fileName.c_str());
            close();
            return false;
        }
        
        buffer = (unsigned char*)av_malloc(FFMPEG_FILE_BUFFER_SIZE);
        
        avioContext = avio_alloc_context(buffer, FFMPEG_FILE_BUFFER_SIZE, 0, fileObject, read_callback, NULL, seek_callback);
        avioContext->max_packet_size = 6144;
        if(avioContext->max_packet_size)
            avioContext->max_packet_size *= FFMPEG_FILE_BUFFER_SIZE / avioContext->max_packet_size;
        
        if(fileObject->IoControl(IOCTRL_SEEK_POSSIBLE, NULL) == 0)
            avioContext->seekable = 0;
        
        
        av_probe_input_buffer(avioContext, &iformat, fileName.c_str(), NULL, 0, 0);
        
        if(!iformat)
        {
            ofLog(OF_LOG_ERROR, "OMXPlayer::OpenFile - av_probe_input_buffer %s ", fileName.c_str());
            close();
            return false;
        }
        
        //#warning experimental
        //iformat->flags |= AVFMT_SEEK_TO_PTS;
        avFormatContext     = avformat_alloc_context();
        avFormatContext->pb = avioContext;
        
        
        
        
        result = avformat_open_input(&avFormatContext, fileName.c_str(), iformat, NULL);
        if(result < 0)
        {
            close();
            return false;
        }
    }
    
    // set the interrupt callback, appeared in libavformat 53.15.0
    avFormatContext->interrupt_callback = int_cb;
    
    isMatroska = strncmp(avFormatContext->iformat->name, "matroska", 8) == 0; // for "matroska.webm"
    isAVI = strcmp(avFormatContext->iformat->name, "avi") == 0;
    
    // if format can be nonblocking, let's use that
    avFormatContext->flags |= AVFMT_FLAG_NONBLOCK;
    
    // analyse very short to speed up mjpeg playback start
    if (iformat && (strcmp(iformat->name, "mjpeg") == 0) && avioContext->seekable == 0)
        avFormatContext->max_analyze_duration = 500000;
    
    if(/*isAVI || */isMatroska)
        avFormatContext->max_analyze_duration = 0;
    
    
    if(!doSkipAvProbe)
    {
        unsigned long long startTime = ofGetElapsedTimeMillis();
        result = avformat_find_stream_info(avFormatContext, NULL);
        unsigned long long endTime = ofGetElapsedTimeMillis();
        ofLogNotice(__func__) << "avformat_find_stream_info TOOK " << endTime-startTime <<  " MS";
        
        
        
        if(result < 0)
        {
            close();
            return false;
        }
    }
    
    if(!getStreams())
    {
        close();
        return false;
    }
    
    if(fileObject)
    {
        int64_t len = fileObject->GetLength();
        int64_t tim = getStreamLength();
        
        if(len > 0 && tim > 0)
        {
            unsigned rate = len * 1000 / tim;
            unsigned maxrate = rate + 1024 * 1024 / 8;
            if(fileObject->IoControl(IOCTRL_CACHE_SETRATE, &maxrate) >= 0)
                ofLog(OF_LOG_VERBOSE, "OMXPlayer::OpenFile - set cache throttle rate to %u bytes per second", maxrate);
        }
    }
    
    speed       = DVD_PLAYSPEED_NORMAL;
    /*
     if(dump_format)
     av_dump_format(avFormatContext, 0, fileName.c_str(), 0);*/
    
    updateCurrentPTS();
    
    isOpen        = true;
    
    return true;
}
示例#7
0
int udpsocket::ts_demux(void)
{
    AVCodec *pVideoCodec[VIDEO_NUM];
    AVCodec *pAudioCodec[AUDIO_NUM];
    AVCodecContext *pVideoCodecCtx[VIDEO_NUM];
    AVCodecContext *pAudioCodecCtx[AUDIO_NUM];
    AVIOContext * pb;
    AVInputFormat *piFmt;
    AVFormatContext *pFmt;
    uint8_t *buffer;
    int videoindex[VIDEO_NUM];
    int audioindex[AUDIO_NUM];
    AVStream *pVst[VIDEO_NUM];
    AVStream *pAst[AUDIO_NUM];
    AVFrame *pVideoframe[VIDEO_NUM];
    AVFrame *pAudioframe[AUDIO_NUM];
    AVFrame *pOutAudioframe[AUDIO_NUM];
    AVFrame *pOutAudioframelast[AUDIO_NUM];
    AVPacket pkt;
    int got_picture;
    int video_num[VIDEO_NUM];
    int audio_num[AUDIO_NUM];
    int frame_size;

    //transcodepool
    transcodepool*  pVideoTransPool[VIDEO_NUM];
    transcodepool*  pAudioTransPool[AUDIO_NUM];

    for( int i=0; i<VIDEO_NUM; i++ ){
        pVideoCodec[i] = NULL;
        pVideoCodecCtx[i] =NULL;
        videoindex[i] = -1;
        pVst[i] = NULL;
        video_num[i] = 0;
        pVideoframe[i] = NULL;
        pVideoframe[i] = av_frame_alloc();
        pVideoTransPool[i] = NULL;
    }
    for( int i=0; i<AUDIO_NUM; i++ ){
        pAudioCodec[i] = NULL;
        pAudioCodecCtx[i] = NULL;
        audioindex[i] = -1;
        pAst[i] = NULL;
        audio_num[i] = 0;
        pOutAudioframe[i] = NULL;
        pOutAudioframe[i] = av_frame_alloc();
        pOutAudioframelast[i] = NULL;
        pOutAudioframelast[i] = av_frame_alloc();
        pAudioframe[i] = NULL;
        pAudioframe[i] = av_frame_alloc();
        pAudioTransPool[i] = NULL;
    }
    pb = NULL;
    piFmt = NULL;
    pFmt = NULL;
    buffer = (uint8_t*)av_mallocz(sizeof(uint8_t)*BUFFER_SIZE);
    got_picture = 0;
    frame_size = AVCODEC_MAX_AUDIO_FRAME_SIZE*3/2;

    //encoder
    AVFormatContext *ofmt_ctx = NULL;
    AVPacket enc_pkt;
    AVStream *out_stream;
    AVCodecContext *enc_ctx;
    AVCodec *encoder;

    AVFormatContext *outAudioFormatCtx[AUDIO_NUM];
    AVPacket audio_pkt;
    AVStream *audio_stream[AUDIO_NUM];
    AVCodecContext *AudioEncodeCtx[AUDIO_NUM];
    AVCodec *AudioEncoder[AUDIO_NUM];

    fp_v = fopen("OUT.h264","wb+"); //输出文件
    fp_a = fopen("audio_out.aac","wb+");

    //FFMPEG
    av_register_all();
    pb = avio_alloc_context(buffer, 4096, 0, NULL, read_data, NULL, NULL);
//    printf("thread %d pid %lu tid %lu\n",index,(unsigned long)getpid(),(unsigned long)pthread_self());
    if (!pb) {
        fprintf(stderr, "avio alloc failed!\n");
        return -1;
    }

    int x = av_probe_input_buffer(pb, &piFmt, "", NULL, 0, 0);
    if (x < 0) {
        printf("probe error: %d",x);
       // fprintf(stderr, "probe failed!\n");
    } else {
        fprintf(stdout, "probe success!\n");
        fprintf(stdout, "format: %s[%s]\n", piFmt->name, piFmt->long_name);
    }
    pFmt = avformat_alloc_context();
    pFmt->pb = pb;

    if (avformat_open_input(&pFmt, "", piFmt, NULL) < 0) {
        fprintf(stderr, "avformat open failed.\n");
        return -1;
    } else {
        fprintf(stdout, "open stream success!\n");
    }
    //pFmt->probesize = 4096 * 2000;
    //pFmt->max_analyze_duration = 5 * AV_TIME_BASE;
    //pFmt->probesize = 2048;
   // pFmt->max_analyze_duration = 1000;
    pFmt->probesize = 2048 * 1000 ;
    pFmt->max_analyze_duration = 2048 * 1000;
    if (avformat_find_stream_info(pFmt,0) < 0) {
        fprintf(stderr, "could not fine stream.\n");
        return -1;
    }
    printf("dump format\n");
    av_dump_format(pFmt, 0, "", 0);

    int videox = 0,audiox = 0;
    for (int i = 0; i < pFmt->nb_streams; i++) {
        if(videox == 7 && audiox == 7)
            break;
        if ( pFmt->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO && videox < 7 ) {
            videoindex[ videox++ ] = i;
        }
        if ( pFmt->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO && audiox < 7 ) {
            audioindex[ audiox++ ] = i;
        }
    }

    for(int i=0; i<VIDEO_NUM; i++)
        printf("videoindex %d = %d, audioindex %d = %d\n",i , videoindex[i], i ,audioindex[i]);

    if (videoindex[6] < 0 || audioindex[6] < 0) {
        fprintf(stderr, "videoindex=%d, audioindex=%d\n", videoindex[6], audioindex[6]);
        return -1;
    }

    for( int i=0; i<VIDEO_NUM; i++ ){
        pVst[i] = pFmt->streams[videoindex[i]];
        pVideoCodecCtx[i] = pVst[i]->codec;
        pVideoCodec[i] = avcodec_find_decoder(pVideoCodecCtx[i]->codec_id);
        if (!pVideoCodec[i]) {
            fprintf(stderr, "could not find video decoder!\n");
            return -1;
        }
        if (avcodec_open2(pVideoCodecCtx[i], pVideoCodec[i], NULL) < 0) {
            fprintf(stderr, "could not open video codec!\n");
            return -1;
        }
    }

    for( int i=0; i<AUDIO_NUM; i++ ){
        pAst[i] = pFmt->streams[audioindex[i]];
        pAudioCodecCtx[i] = pAst[i]->codec;
        pAudioCodec[i] = avcodec_find_decoder(pAudioCodecCtx[i]->codec_id);
        if (!pAudioCodec[i]) {
            fprintf(stderr, "could not find audio decoder!\n");
            return -1;
        }
        if (avcodec_open2(pAudioCodecCtx[i], pAudioCodec[i], NULL) < 0) {
            fprintf(stderr, "could not open audio codec!\n");
            return -1;
        }
    }

    //video encoder init
    avformat_alloc_output_context2(&ofmt_ctx, NULL, "h264", NULL);
    unsigned char* outbuffer = NULL;
    outbuffer = (unsigned char*)av_malloc(1024*1000);
    AVIOContext *avio_out = NULL;
    avio_out = avio_alloc_context(outbuffer, 1024*1000, 0, NULL, NULL, write_buffer,NULL);
    if(avio_out == NULL){
        printf("avio_out error\n");
        return -1;
    }
    ofmt_ctx->pb = avio_out;
    ofmt_ctx->flags = AVFMT_FLAG_CUSTOM_IO;
    out_stream = avformat_new_stream(ofmt_ctx, NULL);
    if(!out_stream){
        av_log(NULL, AV_LOG_ERROR, "Failed allocating output stream\n");
        return -1;
    }
    enc_ctx = out_stream->codec;
    encoder = avcodec_find_encoder(AV_CODEC_ID_H264);
    enc_ctx->height = pVideoCodecCtx[0]->height;
    enc_ctx->width = pVideoCodecCtx[0]->width;
    enc_ctx->sample_aspect_ratio = pVideoCodecCtx[0]->sample_aspect_ratio;
    enc_ctx->pix_fmt = encoder->pix_fmts[0];
    out_stream->time_base = pVst[0]->time_base;
//    out_stream->time_base.num = 1;
//    out_stream->time_base.den = 25;
    enc_ctx->me_range = 16;
    enc_ctx->max_qdiff = 4;
    enc_ctx->qmin = 25;
    enc_ctx->qmax = 40;
    enc_ctx->qcompress = 0.6;
    enc_ctx->refs = 3;
    enc_ctx->bit_rate = 1000000;
    int re = avcodec_open2(enc_ctx, encoder, NULL);
    if (re < 0) {
        av_log(NULL, AV_LOG_ERROR, "Cannot open video encoder for stream \n");
        return re;
    }

    if(ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
        enc_ctx->flags |= CODEC_FLAG_GLOBAL_HEADER;
    re = avformat_write_header(ofmt_ctx, NULL);
    if(re < 0){
        av_log(NULL, AV_LOG_ERROR, "Error occured when opening output file\n");
        return re;
    }

    //audio encoder
    for( int i=0; i<AUDIO_NUM; i++){
        outAudioFormatCtx[i] = NULL;
//        audio_pkt = NULL;
        audio_stream[i] = NULL;
        AudioEncodeCtx[i] = NULL;
        AudioEncoder[i] = NULL;
    }
    const char* out_audio_file = "transcodeaudio.aac";          //Output URL

    //Method 1.
    outAudioFormatCtx[0] = avformat_alloc_context();
    outAudioFormatCtx[0]->oformat = av_guess_format(NULL, out_audio_file, NULL);
    AVIOContext *avio_audio_out = NULL;
    avio_audio_out = avio_alloc_context(outbuffer, 1024*1000, 0, NULL, NULL, write_buffer,NULL);
    if(avio_audio_out == NULL){
        printf("avio_out error\n");
        return -1;
    }
    outAudioFormatCtx[0]->pb = avio_audio_out;
    //Method 2.
    //avformat_alloc_output_context2(&pFormatCtx, NULL, NULL, out_file);
    //fmt = pFormatCtx->oformat;

    //Open output URL
    if (avio_open(&outAudioFormatCtx[0]->pb,out_audio_file, AVIO_FLAG_READ_WRITE) < 0){
        printf("Failed to open output file!\n");
        return -1;
    }

    //Show some information
    av_dump_format(outAudioFormatCtx[0], 0, out_audio_file, 1);

    AudioEncoder[0] = avcodec_find_encoder(AV_CODEC_ID_AAC);
    if (!AudioEncoder[0]){
        printf("Can not find encoder!\n");
        return -1;
    }
    audio_stream[0] = avformat_new_stream(outAudioFormatCtx[0], AudioEncoder[0]);
    if (audio_stream[0]==NULL){
        return -1;
    }
    AudioEncodeCtx[0] = audio_stream[0]->codec;
    AudioEncodeCtx[0]->codec_id =  outAudioFormatCtx[0]->oformat->audio_codec;
    AudioEncodeCtx[0]->codec_type = AVMEDIA_TYPE_AUDIO;
    AudioEncodeCtx[0]->sample_fmt = AV_SAMPLE_FMT_S16;
    AudioEncodeCtx[0]->sample_rate= 48000;//44100
    AudioEncodeCtx[0]->channel_layout=AV_CH_LAYOUT_STEREO;
    AudioEncodeCtx[0]->channels = av_get_channel_layout_nb_channels(AudioEncodeCtx[0]->channel_layout);
    AudioEncodeCtx[0]->bit_rate = 64000;//64000
    /** Allow the use of the experimental AAC encoder */
    AudioEncodeCtx[0]->strict_std_compliance = FF_COMPLIANCE_EXPERIMENTAL;

    /** Set the sample rate for the container. */
    audio_stream[0]->time_base.den = pAudioCodecCtx[0]->sample_rate;
    audio_stream[0]->time_base.num = 1;

    if (avcodec_open2(AudioEncodeCtx[0], AudioEncoder[0],NULL) < 0){
        printf("Failed to open encoder!\n");
        return -1;
    }

    av_samples_get_buffer_size(NULL, AudioEncodeCtx[0]->channels,AudioEncodeCtx[0]->frame_size,AudioEncodeCtx[0]->sample_fmt, 1);

    //uint8_t samples[AVCODEC_MAX_AUDIO_FRAME_SIZE*3/2];
    av_init_packet(&pkt);
    av_init_packet(&audio_pkt);
    av_init_packet(&enc_pkt);
    AVAudioFifo *af = NULL;
    SwrContext *resample_context = NULL;
    long long pts = 0;
    /** Initialize the resampler to be able to convert audio sample formats. */
//    if (init_resampler(input_codec_context, output_codec_context,
//                       &resample_context))
    for(int i=0; i<1; i++){
        printf("work \n");
        printf(" samplerate input = %d , samplerate output = %d\n",pAudioCodecCtx[i]->sample_rate, AudioEncodeCtx[i]->sample_rate);
        resample_context = swr_alloc_set_opts(NULL, av_get_default_channel_layout(AudioEncodeCtx[i]->channels),
                                                          AudioEncodeCtx[i]->sample_fmt,
                                                          AudioEncodeCtx[i]->sample_rate,
                                                          av_get_default_channel_layout(pAudioCodecCtx[i]->channels),
                                                          pAudioCodecCtx[i]->sample_fmt,
                                                          pAudioCodecCtx[i]->sample_rate,
                                                          0, NULL);
        swr_init(resample_context);
    }
    af = av_audio_fifo_alloc(AudioEncodeCtx[0]->sample_fmt, AudioEncodeCtx[0]->channels, 1);
    if(af == NULL)
    {
        printf("error af \n");
        return -1;
    }

    while(1) {
        if (av_read_frame(pFmt, &pkt) >= 0) {
            for( int i=0; i<1; i++ ){
                if (pkt.stream_index == videoindex[i]) {
//                    av_frame_free(&pframe);
                    avcodec_decode_video2(pVideoCodecCtx[i], pVideoframe[i], &got_picture, &pkt);
                    if (got_picture) {
                        if(videoindex[i] == 0){
//                            m_tsRecvPool->write_buffer(pkt.data, pkt.size);
                            pVideoframe[i]->pts = av_frame_get_best_effort_timestamp(pVideoframe[i]);
                            pVideoframe[i]->pict_type = AV_PICTURE_TYPE_NONE;
//                            printf("videoframesize0 = %d, size1 = %d, size2 = %d, size3 = %d, size4 = %d,format = %d\n",pVideoframe[i]->linesize[0],
//                                    pVideoframe[i]->linesize[1],pVideoframe[i]->linesize[2],pVideoframe[i]->linesize[3],pVideoframe[i]->linesize[4],pVideoframe[i]->format);
//                            pVideoTransPool[i]->PutFrame( pVideoframe[i] ,i);
                            int enc_got_frame = 0;
                            /*  ffmpeg encoder */
                            enc_pkt.data = NULL;
                            enc_pkt.size = 0;
                            av_init_packet(&enc_pkt);
                            re = avcodec_encode_video2(ofmt_ctx->streams[videoindex[i]]->codec, &enc_pkt,
                                    pVideoframe[i], &enc_got_frame);
//                            printf("enc_got_frame =%d, re = %d \n",enc_got_frame, re);
                            printf("video Encode 1 Packet\tsize:%d\tpts:%lld\n",enc_pkt.size,enc_pkt.pts);
                            /* prepare packet for muxing */
//                            fwrite(enc_pkt.data,enc_pkt.size, 1, fp_v);
                        }
//                        printf(" video %d decode %d num\n", i, video_num[i]++);
                        break;
                    }

                 }else if (pkt.stream_index == audioindex[i]) {
                    if (avcodec_decode_audio4(pAudioCodecCtx[i], pAudioframe[i], &frame_size, &pkt) >= 0) {
                        if (i == 0){

//                                fwrite(pAudioframe[i]->data[0],pAudioframe[i]->linesize[0], 1, fp_a);
//                                printf("index = %d audio %d decode %d num\n", index, i, audio_num[i]++);
                            uint8_t *converted_input_samples = NULL;
                            converted_input_samples = (uint8_t *)calloc(AudioEncodeCtx[i]->channels, sizeof(*converted_input_samples));
                            av_samples_alloc(&converted_input_samples, NULL, AudioEncodeCtx[i]->channels, pAudioframe[i]->nb_samples, AudioEncodeCtx[i]->sample_fmt, 0);
                                        int error = 0;
                            if((error = swr_convert(resample_context, &converted_input_samples, pAudioframe[i]->nb_samples,
                                                   (const uint8_t**)pAudioframe[i]->extended_data, pAudioframe[i]->nb_samples))<0){
                                printf("error  : %d\n",error);
                            }
//                            av_audio_fifo_realloc(af, av_audio_fifo_size(af) + pAudioframe[i]->nb_samples);
                            av_audio_fifo_write(af, (void **)&converted_input_samples, pAudioframe[i]->nb_samples);
//                            fwrite(pkt.data,pkt.size, 1, fp_a);
//                            pAudioframe[i]->data[0] = frame_buf;
//                            init_converted_samples(&converted_input_samples, output_codec_context, pAudioframe[i]->nb_samples);

                            /** Initialize temporary storage for one output frame. */
//                            printf("pkt.size = %d , pkt.pts = %d ,pkt.dts = %d\n",pkt.size, pkt.pts, pkt.dts);
//                            printf("framesize = %d, audioframesize = %d\n", pAudioframe[i]->nb_samples, frame_size);

//                            pOutAudioframe[i]->pict_type = AV_PICTURE_TYPE_NONE;
                            int got_frame=0;
                            //Encode
//                            av_init_packet(&audio_pkt);
//                            audio_pkt.data = NULL;
//                            audio_pkt.size = 0;
//                            avcodec_encode_audio2(AudioEncodeCtx[0], &audio_pkt, pOutAudioframe[i], &got_frame);
//                            printf("Encode 1 Packet\tsize:%d\tpts:%lld\n", audio_pkt.size, audio_pkt.pts);
                            while(av_audio_fifo_size(af) >= AudioEncodeCtx[i]->frame_size){
                                int frame_size = FFMIN(av_audio_fifo_size(af),AudioEncodeCtx[0]->frame_size);
                                pOutAudioframe[i]->nb_samples =  frame_size;
                                pOutAudioframe[i]->channel_layout = AudioEncodeCtx[0]->channel_layout;
                                pOutAudioframe[i]->sample_rate = AudioEncodeCtx[0]->sample_rate;
                                pOutAudioframe[i]->format = AudioEncodeCtx[0]->sample_fmt;

                                av_frame_get_buffer(pOutAudioframe[i], 0);
                                av_audio_fifo_read(af, (void **)&pOutAudioframe[i]->data, frame_size);

                                pOutAudioframe[i]->pts=pts;
                                pts += pOutAudioframe[i]->nb_samples;

                                audio_pkt.data = NULL;
                                audio_pkt.size = 0;
                                av_init_packet(&audio_pkt);
                                avcodec_encode_audio2(AudioEncodeCtx[0], &audio_pkt, pOutAudioframe[i], &got_frame);
                                printf("audio Encode 1 Packet\tsize:%d\tpts:%lld\n", audio_pkt.size, audio_pkt.pts);
                                fwrite(audio_pkt.data,audio_pkt.size, 1, fp_a);
                            }
                        }
//                        if(i == 0){
//                            fwrite(pkt.data,pkt.size, 1, fp_a);
//                        }
//                        printf("index = %d audio %d decode %d num\n", index, i, audio_num[i]++);
                        break;
                    }
                }
            }
            av_free_packet(&pkt);
            av_free_packet(&enc_pkt);
        }
    }

    av_free(buffer);
    for(int i=0; i<VIDEO_NUM; i++)
        av_free(pVideoframe[i]);

    for(int i=0; i<AUDIO_NUM; i++)
        av_free(pAudioframe[i]);

    return 0;

}
示例#8
0
static int applehttp_read_header(AVFormatContext *s, AVFormatParameters *ap)
{
    AppleHTTPContext *c = s->priv_data;
    int ret = 0, i, j, stream_offset = 0;

    if ((ret = parse_playlist(c, s->filename, NULL, s->pb)) < 0)
        goto fail;

    if (c->n_variants == 0) {
        av_log(NULL, AV_LOG_WARNING, "Empty playlist\n");
        ret = AVERROR_EOF;
        goto fail;
    }
    /* If the playlist only contained variants, parse each individual
     * variant playlist. */
    if (c->n_variants > 1 || c->variants[0]->n_segments == 0) {
        for (i = 0; i < c->n_variants; i++) {
            struct variant *v = c->variants[i];
            if ((ret = parse_playlist(c, v->url, v, NULL)) < 0)
                goto fail;
        }
    }

    if (c->variants[0]->n_segments == 0) {
        av_log(NULL, AV_LOG_WARNING, "Empty playlist\n");
        ret = AVERROR_EOF;
        goto fail;
    }

    /* If this isn't a live stream, calculate the total duration of the
     * stream. */
    if (c->variants[0]->finished) {
        int64_t duration = 0;
        for (i = 0; i < c->variants[0]->n_segments; i++)
            duration += c->variants[0]->segments[i]->duration;
        s->duration = duration * AV_TIME_BASE;
    }

    /* Open the demuxer for each variant */
    for (i = 0; i < c->n_variants; i++) {
        struct variant *v = c->variants[i];
        AVInputFormat *in_fmt = NULL;
        char bitrate_str[20];
        if (v->n_segments == 0)
            continue;

        v->index  = i;
        v->needed = 1;
        v->parent = s;

        /* If this is a live stream with more than 3 segments, start at the
         * third last segment. */
        v->cur_seq_no = v->start_seq_no;
        if (!v->finished && v->n_segments > 3)
            v->cur_seq_no = v->start_seq_no + v->n_segments - 3;

        v->read_buffer = av_malloc(INITIAL_BUFFER_SIZE);
        ffio_init_context(&v->pb, v->read_buffer, INITIAL_BUFFER_SIZE, 0, v,
                          read_data, NULL, NULL);
        v->pb.seekable = 0;
        ret = av_probe_input_buffer(&v->pb, &in_fmt, v->segments[0]->url,
                                    NULL, 0, 0);
        if (ret < 0)
            goto fail;
        ret = av_open_input_stream(&v->ctx, &v->pb, v->segments[0]->url,
                                   in_fmt, NULL);
        if (ret < 0)
            goto fail;
        v->stream_offset = stream_offset;
        snprintf(bitrate_str, sizeof(bitrate_str), "%d", v->bandwidth);
        /* Create new AVStreams for each stream in this variant */
        for (j = 0; j < v->ctx->nb_streams; j++) {
            AVStream *st = av_new_stream(s, i);
            if (!st) {
                ret = AVERROR(ENOMEM);
                goto fail;
            }
            avcodec_copy_context(st->codec, v->ctx->streams[j]->codec);
            av_metadata_set2(&st->metadata, "variant_bitrate", bitrate_str, 0);
        }
        stream_offset += v->ctx->nb_streams;
    }

    c->first_packet = 1;

    return 0;
fail:
    free_variant_list(c);
    return ret;
}
示例#9
0
static int cmf_parser_next_slice(AVFormatContext *s, int index, int first)
{
    int i, j, ret=-1;
    struct cmf *bs = s->priv_data;
    struct cmfvpb *oldvpb = bs->cmfvpb;
    struct cmfvpb *newvpb = NULL;
    AVInputFormat *in_fmt = NULL;
    av_log(s, AV_LOG_INFO, "cmf_parser_next_slice:%d\n", index);
    ret = cmfvpb_dup_pb(s->pb, &newvpb, index);
    if (ret < 0) {
        av_log(s, AV_LOG_INFO, "cmfvpb_dup_pb failed %s---%d [%d]\n",__FUNCTION__,__LINE__,ret);
        return ret;
    }
    if (bs->sctx) {
        avformat_free_context(bs->sctx);
    }
    bs->sctx = NULL;
    if (!(bs->sctx = avformat_alloc_context())) {
        av_log(s, AV_LOG_INFO, "cmf_parser_next_slice avformat_alloc_context failed!\n");
        return AVERROR(ENOMEM);
    }
    ret = av_probe_input_buffer(newvpb->pb, &in_fmt, "", NULL, 0, 0);
    if (ret < 0) {
        av_log(s, AV_LOG_INFO, "av_probe_input_buffer failed %s---%d [%d]\n",__FUNCTION__,__LINE__,ret);
		if (bs->sctx) {
        	avformat_free_context(bs->sctx);
			bs->sctx=NULL;
    	}
		return ret;
    }
    bs->sctx->pb = newvpb->pb;
    ret = avformat_open_input(&bs->sctx, "", NULL, NULL);
    if (ret < 0) {
        av_log(s, AV_LOG_INFO, "cmf_parser_next_slice:avformat_open_input failed \n");
		if (bs->sctx) {
        	avformat_free_context(bs->sctx);
			bs->sctx=NULL;
    	}
        goto fail;
    }
    if (first) {
        /* Create new AVStreams for each stream in this chip
             Add into the Best format ;
        */
        for (j = 0; j < (int)bs->sctx->nb_streams ; j++)   {
            AVStream *st = av_new_stream(s, 0);
            if (!st) {
                ret = AVERROR(ENOMEM);
                goto fail;
            }
            avcodec_copy_context(st->codec, bs->sctx->streams[j]->codec);
        }

        for (i = 0; i < (int)s->nb_streams ; i++)   {
            AVStream *st = s->streams[i];
            AVStream *sst = bs->sctx->streams[i];
            st->id=sst->id;
            if (st->codec->codec_type == CODEC_TYPE_AUDIO) {
                st->time_base.den =   sst->time_base.den  ;
                st->time_base.num = sst->time_base.num ;
            }
            if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
                st->time_base.den = sst->time_base.den;
                st->time_base.num = sst->time_base.num;
            }
        }
        s->duration = newvpb->total_duration*1000;
        av_log(s, AV_LOG_INFO, "get duration  [%lld]us [%lld]ms [%lld]s\n", s->duration,newvpb->total_duration,(newvpb->total_duration/1000));
    }
    av_log(s, AV_LOG_INFO, "seek ci->ctx->media_data_offset [%llx]\n", bs->sctx->media_dataoffset);
    ret = avio_seek(bs->sctx->pb, bs->sctx->media_dataoffset, SEEK_SET);
    if (ret < 0) {
        av_log(s, AV_LOG_INFO, "avio_seek failed %s---%d \n",__FUNCTION__,__LINE__);
        return ret;
    }
    bs->cmfvpb = newvpb;
fail:
    if (oldvpb) {
        cmfvpb_pb_free(oldvpb);
    }
    return ret;
}
示例#10
0
int AudioConvertFunc(const char *outfilename,int sample_rate,int channels,int sec,const char *inputfilename,HWND mParentHwnd,UINT mMsg)
{
	AVCodec *aCodec =NULL;
	AVPacket *packet = NULL;
	AVFormatContext *pFormatCtx =NULL;
    AVCodecContext *aCodecCtx= NULL;
	ReSampleContext* ResampleCtx=NULL;
	AVFrame *decoded_frame = NULL;
	int datasize;
	//int tempcount = 0;
	//long total_out_size=0;
	int64_t total_in_convert_size = 0;
	int audioConvertProgress = 0;
	int tempAudioConvertProgress;
	unsigned int i;
	int len, ret, buffer_size, count, audio_stream_index = -1, totle_samplenum = 0;

	FILE *outfile = NULL;// *infile;
	head_pama pt;

	int16_t *audio_buffer = NULL;
	int16_t *resamplebuff = NULL;
	int ResampleChange=0;
	int ChannelsChange=0;
	int tempret;

	packet = (AVPacket*)malloc(sizeof(AVPacket));
	if (packet==NULL)
	{
		PostMessage(mParentHwnd,mMsg,FAILED_TAG,(LPARAM)(-1));
		return -1;
	}
	packet->data=NULL;

	buffer_size = AVCODEC_MAX_AUDIO_FRAME_SIZE * 100;
	audio_buffer = (int16_t *)av_malloc(buffer_size);
	if (audio_buffer==NULL)
	{
		if (packet->data!=NULL)
		{
			av_free_packet(packet);
			packet->data=NULL;
		}
		if (packet!=NULL)
		{
			free(packet);
			packet=NULL;
		}
		PostMessage(mParentHwnd,mMsg,FAILED_TAG,(LPARAM)(-2));
		return -2;
	}
	
	av_register_all();

	av_init_packet(packet);
#if 0
	/**********尝试分解av_open_input_file函数*************/
	int ret = 0;
	
	AVFormatParameters ap = { 0 };
	AVDictionary *tmp = NULL;
	AVInputFormat *fmt = NULL;
	AVDictionary **options = NULL;
	if (!pFormatCtx && !(pFormatCtx = avformat_alloc_context()))
		return AVERROR(ENOMEM);
	if (fmt)
		pFormatCtx->iformat = fmt;

	if (options)
		av_dict_copy(&tmp, *options, 0);

	if ((ret = av_opt_set_dict(pFormatCtx, &tmp)) < 0)
		goto fail;
	
	AVDictionary *tmp = NULL;

	if (!pFormatCtx && !(pFormatCtx = avformat_alloc_context()))
		return AVERROR(ENOMEM);

	int ret;
	AVProbeData pd = {inputfilename, NULL, 0};

	if (pFormatCtx->pb) {
		pFormatCtx->flags |= AVFMT_FLAG_CUSTOM_IO;
		if (!pFormatCtx->iformat)
			return av_probe_input_buffer(pFormatCtx->pb, &pFormatCtx->iformat, inputfilename, pFormatCtx, 0, 0);
		else if (pFormatCtx->iformat->flags & AVFMT_NOFILE)
			av_log(pFormatCtx, AV_LOG_WARNING, "Custom AVIOContext makes no sense and "
			"will be ignored with AVFMT_NOFILE format.\n");
		return 0;
	}

	if ( (pFormatCtx->iformat && pFormatCtx->iformat->flags & AVFMT_NOFILE) ||
		(!pFormatCtx->iformat && (pFormatCtx->iformat = av_probe_input_format(&pd, 0))))
		return 0;

	URLContext *h;
	int err;

	err = ffurl_open(&h, inputfilename, AVIO_RDONLY);
	if (err < 0)
		return err;
	err = ffio_fdopen(pFormatCtx, h);
	if (err < 0) {
		ffurl_close(h);
		return err;
	}

	if (pFormatCtx->iformat)
		return 0;
	av_probe_input_buffer(pFormatCtx->pb, &pFormatCtx->iformat, inputfilename, pFormatCtx, 0, 0);


	if (pFormatCtx->iformat->flags & AVFMT_NEEDNUMBER) {
		if (!av_filename_number_test(inputfilename)) {
			ret = AVERROR(EINVAL);
			goto fail;
		}
	}

	pFormatCtx->duration = pFormatCtx->start_time = AV_NOPTS_VALUE;
	av_strlcpy(pFormatCtx->filename, inputfilename ? inputfilename : "", sizeof(pFormatCtx->filename));

	/* allocate private data */
	if (pFormatCtx->iformat->priv_data_size > 0) {
		if (!(pFormatCtx->priv_data = av_mallocz(pFormatCtx->iformat->priv_data_size))) {
			ret = AVERROR(ENOMEM);
			goto fail;
		}
		if (pFormatCtx->iformat->priv_class) {
			*(const AVClass**)pFormatCtx->priv_data = pFormatCtx->iformat->priv_class;
			av_opt_set_defaults(pFormatCtx->priv_data);
			if ((ret = av_opt_set_dict(pFormatCtx->priv_data, &tmp)) < 0)
				goto fail;
		}
	}

	/* e.g. AVFMT_NOFILE formats will not have a AVIOContext */
	if (pFormatCtx->pb)
		ff_id3v2_read(pFormatCtx, ID3v2_DEFAULT_MAGIC);

	if (!(pFormatCtx->flags&AVFMT_FLAG_PRIV_OPT) && pFormatCtx->iformat->read_header)
		if ((ret = pFormatCtx->iformat->read_header(pFormatCtx, &ap)) < 0)
			goto fail;

	if (!(pFormatCtx->flags&AVFMT_FLAG_PRIV_OPT) && pFormatCtx->pb && !pFormatCtx->data_offset)
		pFormatCtx->data_offset = avio_tell(pFormatCtx->pb);

	pFormatCtx->raw_packet_buffer_remaining_size = RAW_PACKET_BUFFER_SIZE;

	if (options) {
		av_dict_free(options);
		*options = tmp;
	}
	return 0;

fail:
	av_dict_free(&tmp);
	if (pFormatCtx->pb && !(pFormatCtx->flags & AVFMT_FLAG_CUSTOM_IO))
		avio_close(pFormatCtx->pb);
	avformat_free_context(pFormatCtx);
	pFormatCtx = NULL;
	return ret;

	return err;
	/**********尝试分解av_open_input_file函数*************/
	//pFormatCtx = avformat_alloc_context();
#endif
	ret = av_open_input_file(&pFormatCtx, inputfilename, NULL,0, NULL);

	if(ret < 0)
	{
		if (audio_buffer!=NULL)
		{
			av_free(audio_buffer);
			audio_buffer=NULL;
		}
		
		if (packet->data!=NULL)
		{
			av_free_packet(packet);
			packet->data=NULL;
		}
		if (packet!=NULL)
		{
			free(packet);
			packet=NULL;
		}
		if (pFormatCtx!=NULL)
		{
			av_close_input_file(pFormatCtx);
			pFormatCtx=NULL;
		}
		PostMessage(mParentHwnd,mMsg,FAILED_TAG,(LPARAM)1);
		return 1;  
	}

	ret = av_find_stream_info(pFormatCtx);

	if( ret < 0)
	{
		if (audio_buffer!=NULL)
		{
			av_free(audio_buffer);
			audio_buffer=NULL;
		}

		if (packet->data!=NULL)
		{
			av_free_packet(packet);
			packet->data=NULL;
		}
		if (packet!=NULL)
		{
			free(packet);
			packet=NULL;
		}
		if (pFormatCtx!=NULL)
		{
			av_close_input_file(pFormatCtx);
			pFormatCtx=NULL;
		}
		PostMessage(mParentHwnd,mMsg,FAILED_TAG,(LPARAM)2);
		return 2;
	}

	audio_stream_index=-1;
	for(i=0; i< (signed)pFormatCtx->nb_streams; i++)
	{

		if(pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO && audio_stream_index < 0)
		{
			audio_stream_index = i;
			break;
		}
	}

	if(audio_stream_index == -1)
	{
		if (audio_buffer!=NULL)
		{
			av_free(audio_buffer);
			audio_buffer=NULL;
		}

		if (packet->data!=NULL)
		{
			av_free_packet(packet);
			packet->data=NULL;
		}
		if (packet!=NULL)
		{
			free(packet);
			packet=NULL;
		}
		if (pFormatCtx!=NULL)
		{
			av_close_input_file(pFormatCtx);
			pFormatCtx=NULL;
		}
		PostMessage(mParentHwnd,mMsg,FAILED_TAG,(LPARAM)3);
		return 3;
	}

	aCodecCtx = pFormatCtx->streams[audio_stream_index]->codec;
	if (aCodecCtx==NULL)
	{
		if (audio_buffer!=NULL)
		{
			av_free(audio_buffer);
			audio_buffer=NULL;
		}

		if (packet->data!=NULL)
		{
			av_free_packet(packet);
			packet->data=NULL;
		}
		if (packet!=NULL)
		{
			free(packet);
			packet=NULL;
		}
		if (pFormatCtx!=NULL)
		{
			av_close_input_file(pFormatCtx);
			pFormatCtx=NULL;
		}
		PostMessage(mParentHwnd,mMsg,FAILED_TAG,(LPARAM)4);
		return 4;
	}
	aCodec = avcodec_find_decoder(aCodecCtx->codec_id);
	if(!aCodec) 
	{
		if (audio_buffer!=NULL)
		{
			av_free(audio_buffer);
			audio_buffer=NULL;
		}

		if (packet->data!=NULL)
		{
			av_free_packet(packet);
			packet->data=NULL;
		}
		if (packet!=NULL)
		{
			free(packet);
			packet=NULL;
		}
		if (pFormatCtx!=NULL)
		{
			av_close_input_file(pFormatCtx);
			pFormatCtx=NULL;
		}
		/*if (aCodecCtx!=NULL)
		{
			avcodec_close(aCodecCtx);
			aCodecCtx=NULL;
		}*/
		PostMessage(mParentHwnd,mMsg,FAILED_TAG,(LPARAM)5);
		return 5;
	}
	//resample init
	if (channels==0)
	{
		channels=aCodecCtx->channels;
	}
	if (sample_rate==0)
	{
		sample_rate=aCodecCtx->sample_rate;
	}
	//if (aCodecCtx->channels!=channels)
	//{
	//	ChannelsChange=1;
	//	ResampleChange=1;
	//}
	if (aCodecCtx->sample_rate!=sample_rate||aCodecCtx->channels!=channels)
	{
		ResampleChange=1;
	}
	if (ResampleChange==1)
	{
		ResampleCtx = av_audio_resample_init(channels,aCodecCtx->channels,sample_rate,aCodecCtx->sample_rate,SAMPLE_FMT_S16,SAMPLE_FMT_S16,16,10,0,1.0);
		if (ResampleCtx==NULL)
		{
			if (audio_buffer!=NULL)
			{
				av_free(audio_buffer);
				audio_buffer=NULL;
			}

			if (packet->data!=NULL)
			{
				av_free_packet(packet);
				packet->data=NULL;
			}
			if (packet!=NULL)
			{
				free(packet);
				packet=NULL;
			}
			if (pFormatCtx!=NULL)
			{
				av_close_input_file(pFormatCtx);
				pFormatCtx=NULL;
			}
			/*if (aCodecCtx!=NULL)
			{
				avcodec_close(aCodecCtx);
				aCodecCtx=NULL;
			}*/
			ResampleChange=0;
			PostMessage(mParentHwnd,mMsg,FAILED_TAG,(LPARAM)6);
			return 6;
		}
		resamplebuff=(int16_t *)malloc(buffer_size);
		if (resamplebuff==NULL)
		{
			if (audio_buffer!=NULL)
			{
				av_free(audio_buffer);
				audio_buffer=NULL;
			}

			if (packet->data!=NULL)
			{
				av_free_packet(packet);
				packet->data=NULL;
			}
			if (packet!=NULL)
			{
				free(packet);
				packet=NULL;
			}
			if (pFormatCtx!=NULL)
			{
				av_close_input_file(pFormatCtx);
				pFormatCtx=NULL;
			}
			/*if (aCodecCtx!=NULL)
			{
				avcodec_close(aCodecCtx);
				aCodecCtx=NULL;
			}*/
			
			if (ResampleChange==1&&ResampleCtx!=NULL)
			{
				audio_resample_close(ResampleCtx);
				ResampleCtx=NULL;
			}
			PostMessage(mParentHwnd,mMsg,FAILED_TAG,(LPARAM)7);
			return 7;
		}
	}
	//
	datasize=sec*sample_rate;
	if(avcodec_open(aCodecCtx, aCodec)<0)
	{
		if (audio_buffer!=NULL)
		{
			av_free(audio_buffer);
			audio_buffer=NULL;
		}

		if (packet->data!=NULL)
		{
			av_free_packet(packet);
			packet->data=NULL;
		}
		if (packet!=NULL)
		{
			free(packet);
			packet=NULL;
		}
		if (pFormatCtx!=NULL)
		{
			av_close_input_file(pFormatCtx);
			pFormatCtx=NULL;
		}
		if (aCodecCtx!=NULL)
		{
			avcodec_close(aCodecCtx);
			aCodecCtx=NULL;
		}

		if (ResampleChange==1&&ResampleCtx!=NULL&&resamplebuff!=NULL)
		{
			audio_resample_close(ResampleCtx);
			ResampleCtx=NULL;
			free(resamplebuff);
			resamplebuff=NULL;
		}
		ResampleChange=0;
		PostMessage(mParentHwnd,mMsg,FAILED_TAG,(LPARAM)8);
		return 8;
	}

	pt.bits = 16;
	pt.channels = channels;
	pt.rate = sample_rate;

	outfile = fopen(outfilename, "wb");
	if (!outfile) 
	{
		if (audio_buffer!=NULL)
		{
			av_free(audio_buffer);
			audio_buffer=NULL;
		}

		if (packet->data!=NULL)
		{
			av_free_packet(packet);
			packet->data=NULL;
		}
		if (packet!=NULL)
		{
			free(packet);
			packet=NULL;
		}
		if (pFormatCtx!=NULL)
		{
			av_close_input_file(pFormatCtx);
			pFormatCtx=NULL;
		}
		if (aCodecCtx!=NULL)
		{
			avcodec_close(aCodecCtx);
			aCodecCtx=NULL;
		}

		if (ResampleChange==1&&ResampleCtx!=NULL&&resamplebuff!=NULL)
		{
			audio_resample_close(ResampleCtx);
			ResampleCtx=NULL;
			free(resamplebuff);
			resamplebuff=NULL;
		}
		ResampleChange=0;
		PostMessage(mParentHwnd,mMsg,FAILED_TAG,(LPARAM)9);
		return 9;
	}

	fseek(outfile,44,SEEK_SET);
    while(av_read_frame(pFormatCtx, packet) >= 0) 
	{
		CheckMessageQueue();
	    if(packet->stream_index == audio_stream_index)
	    {
			//while(packet->size > 0)
			//{
				buffer_size = AVCODEC_MAX_AUDIO_FRAME_SIZE * 100;
				len = avcodec_decode_audio3(aCodecCtx, audio_buffer, &buffer_size, packet);

				if (len < 0) 
				{
					break;
				}

				if(buffer_size > 0)
				{
					//resample
					if (ResampleChange==1)
					{
						int samples=buffer_size/ ((aCodecCtx->channels) * 2);
						int resamplenum= 0;
						resamplenum = audio_resample(ResampleCtx, 
							resamplebuff, 
							audio_buffer, 
							samples);
						count = fwrite(resamplebuff, 2*channels, resamplenum, outfile);
					}
					
					else
					{
						count = fwrite(audio_buffer, 2*aCodecCtx->channels, buffer_size/((aCodecCtx->channels)*2), outfile);
					}
					totle_samplenum += count;
				}
				//tempcount++;
				//total_out_size += count*2*aCodecCtx->channels;
				total_in_convert_size += packet->size;
				tempAudioConvertProgress = 100*total_in_convert_size/(pFormatCtx->file_size);
				if(tempAudioConvertProgress != audioConvertProgress)
				{
					if(tempAudioConvertProgress == 100)
						tempAudioConvertProgress = 99;
					audioConvertProgress = tempAudioConvertProgress;
					tempret = PostMessage(mParentHwnd,mMsg,DECING_TAG,audioConvertProgress);
				}
				if (packet->data!=NULL)
				{
					av_free_packet(packet);
					packet->data=NULL;
				}
				//packet->size -= len;
				//packet->data += len;
			//}
			if (datasize!=0&&totle_samplenum>=datasize)
			{
				break;
			}
	    }
	}
	audioConvertProgress = 100;
	PostMessage(mParentHwnd,mMsg,DECING_TAG,audioConvertProgress);
	fseek(outfile,0,SEEK_SET);
	wav_write_header(outfile, pt, totle_samplenum);
	
	if (outfile!=NULL)
	{
		fclose(outfile);
		outfile=NULL;
	}
    
	if (audio_buffer!=NULL)
	{
		av_free(audio_buffer);
		audio_buffer=NULL;
	}
	
	if (aCodecCtx!=NULL)
	{
		avcodec_close(aCodecCtx);
		aCodecCtx=NULL;
	}
	
	if (packet!=NULL)
	{
		free(packet);//
		packet=NULL;
	}
	
	if (pFormatCtx!=NULL)
	{
		av_close_input_file(pFormatCtx);
		pFormatCtx=NULL;
	}
	
	if (ResampleChange==1)
	{
		if (resamplebuff!=NULL)
		{
			free(resamplebuff);
			resamplebuff=NULL;
		}
		if (ResampleCtx!=NULL)
		{
			audio_resample_close(ResampleCtx);
			ResampleCtx=NULL;
		}
	}
	if (totle_samplenum<=sample_rate*5)
	{
		PostMessage(mParentHwnd,mMsg,FAILED_TAG,(LPARAM)10);
		return 10;
	}
	PostMessage(mParentHwnd,mMsg,FINISH_TAG,NULL);
	return 0;
}
static int hls_read_header(AVFormatContext *s)
{
    HLSContext *c = s->priv_data;
    int ret = 0, i, j, stream_offset = 0;

    int only_parser_one_variants=1;
    int parser_start,parser_end;
    //c->interrupt_callback = &s->interrupt_callback;
   c->total_brate=500*1024;
   c->latest_3file_brate=c->total_brate;
   c->latest_1file_brate=c->total_brate;	
    s->bit_rate=0;
    if ((ret = parse_playlist(c, s->filename, NULL, s->pb)) < 0){
	av_log(NULL, AV_LOG_WARNING, "parse_playlist failed ret=%d\n",ret);
        goto fail;
    }

    if (c->n_variants == 0) {
        av_log(NULL, AV_LOG_WARNING, "Empty playlist 1\n");
        ret = AVERROR_EOF;
        goto fail;
    }
    /* If the playlist only contained variants, parse each individual
     * variant playlist. */
    if (c->n_variants > 1 || c->variants[0]->n_segments == 0) {
        for (i = 0; i < c->n_variants; i++) {
            struct variant *v = c->variants[i];
            if ((ret = parse_playlist(c, v->url, v, NULL)) < 0)
                goto fail;
        }
    }

    if (c->variants[0]->n_segments == 0) {
        av_log(NULL, AV_LOG_WARNING, "Empty playlist 2\n");
        ret = AVERROR_EOF;
        goto fail;
    }

    /* If this isn't a live stream, calculate the total duration of the
     * stream. */
    if (c->variants[0]->finished) {
        int64_t duration = 0;
        for (i = 0; i < c->variants[0]->n_segments; i++)
            duration += c->variants[0]->segments[i]->duration;
        s->duration = duration * AV_TIME_BASE;
    }
 
   if(only_parser_one_variants){	
   		parser_start=select_best_variant(c,c->total_brate);
		parser_end=parser_start+1;
   }else{
   		parser_start=0;
		parser_end=c->n_variants;
   }
    /* Open the demuxer for each variant */
    for (i = parser_start; i < parser_end; i++) {
        struct variant *v = c->variants[i];
        AVInputFormat *in_fmt = NULL;
        char bitrate_str[20];
        if (v->n_segments == 0)
            continue;

        if (!(v->ctx = avformat_alloc_context())) {
            ret = AVERROR(ENOMEM);
            goto fail;
        }

        v->index  = i;
        v->needed = 1;
        v->parent = s;

        /* If this is a live stream with more than 3 segments, start at the
         * third last segment. */
        v->cur_seq_no = v->start_seq_no;
        if (!v->finished && v->n_segments > 3)
            v->cur_seq_no = v->start_seq_no + v->n_segments - 3;
#ifdef  USED_LP_BUF
	v->read_buffer = av_malloc(INITIAL_BUFFER_SIZE);
       memset(&v->urllpbuf,0,sizeof(&v->urllpbuf));
	if(url_lpopen_ex(&v->urllpbuf,0,AVIO_FLAG_READ,read_data,read_data_exseek)==0){
		ffio_init_context(&v->pb, v->read_buffer, INITIAL_BUFFER_SIZE, 0, &v->urllpbuf,
                          url_lpread, NULL, NULL);
	}else{
		
        	ffio_init_context(&v->pb, v->read_buffer, INITIAL_BUFFER_SIZE, 0, &v->urllpbuf,
                          read_data, NULL, NULL);
	}
	v->urllpbuf.is_streamed=1;
	v->urllpbuf.is_slowmedia=1;
	v->urllpbuf.priv_data=v;
#else
        v->read_buffer = av_malloc(INITIAL_BUFFER_SIZE);
        ffio_init_context(&v->pb, v->read_buffer, INITIAL_BUFFER_SIZE, 0, v,
                          read_data, NULL, NULL);
#endif		
        v->pb.seekable = 0;
        ret = av_probe_input_buffer(&v->pb, &in_fmt, v->segments[0]->url,
                                    NULL, 0, 0);
        if (ret < 0) {
            /* Free the ctx - it isn't initialized properly at this point,
             * so avformat_close_input shouldn't be called. If
             * avformat_open_input fails below, it frees and zeros the
             * context, so it doesn't need any special treatment like this. */
            avformat_free_context(v->ctx);
            v->ctx = NULL;
            goto fail;
        }
        v->ctx->pb       = &v->pb;
        ret = avformat_open_input(&v->ctx, v->segments[0]->url, in_fmt, NULL);
        if (ret < 0)
            goto fail;
	 if(v->bandwidth<=0 && v->segments[0]->seg_filesize>0 && v->segments[0]->duration>0){
	 	v->bandwidth=v->segments[0]->seg_filesize/v->segments[0]->duration;
	 }
        v->stream_offset = stream_offset;
        snprintf(bitrate_str, sizeof(bitrate_str), "%d", v->bandwidth);
        /* Create new AVStreams for each stream in this variant */
        for (j = 0; j < v->ctx->nb_streams; j++) {
            AVStream *st = av_new_stream(s, 0);
            if (!st) {
                ret = AVERROR(ENOMEM);
                goto fail;
            }
            st->id = i;
            avcodec_copy_context(st->codec, v->ctx->streams[j]->codec);
            if (v->bandwidth)
                av_dict_set(&st->metadata, "variant_bitrate", bitrate_str,
                                 0);
		if(st->codec->bit_rate<=0)
			st->codec->bit_rate=v->bandwidth/v->ctx->nb_streams;
        }
	 v->ctx->bit_rate=v->bandwidth;
        stream_offset += v->ctx->nb_streams;
	 s->bit_rate+=v->bandwidth;
    }

    c->first_packet = 1;
    c->first_timestamp = AV_NOPTS_VALUE;
    c->seek_timestamp  = AV_NOPTS_VALUE;
    c->discontinue_pts_interval_ms=2000;	
    s->flags|=AVFMT_FLAG_FILESIZE_NOT_VALID;
   if(s->pb){
	 	/*reset read and free lp buf.*/
		/*del lp buf,to free memory*/
		ffio_fdopen_resetlpbuf(s->pb,0);
		s->pb->flags|=AVIO_FLAG_SIZE_NOTVALID;
		s->file_size=-1;
     }
    return 0;
fail:
    free_variant_list(c);
    
     return ret;
}
示例#12
0
AVFormatContext *
fa_libav_open_format(AVIOContext *avio, const char *url,
		     char *errbuf, size_t errlen, const char *mimetype,
                     int probe_size, int max_analyze_duration,
		     int fps_probe_frames)
{
  AVInputFormat *fmt = NULL;
  AVFormatContext *fctx;
  int err;

  avio_seek(avio, 0, SEEK_SET);
  if(mimetype != NULL) {
    int i;

    for(i = 0; i < sizeof(mimetype2fmt) / sizeof(mimetype2fmt[0]); i++) {
      if(!strcasecmp(mimetype, mimetype2fmt[i].mimetype)) {
	fmt = av_find_input_format(mimetype2fmt[i].fmt);
	break;
      }
    }
    if(fmt == NULL)
      TRACE(TRACE_DEBUG, "probe", "%s: Don't know mimetype %s, probing instead",
	    url, mimetype);
  }

  if(fmt == NULL) {
    if((err = av_probe_input_buffer(avio, &fmt, url, NULL, 0, probe_size)) != 0)
      return fa_libav_open_error(errbuf, errlen,
				 "Unable to probe file", err);

    if(fmt == NULL) {
      snprintf(errbuf, errlen, "Unknown file format");
      return NULL;
    }
    TRACE(TRACE_DEBUG, "probe", "%s: Probed as %s", url, fmt->name);
  }

  fctx = avformat_alloc_context();
  fctx->pb = avio;
  if(max_analyze_duration != -1)
    fctx->max_analyze_duration = max_analyze_duration;

  if((err = avformat_open_input(&fctx, url, fmt, NULL)) != 0) {
    if(mimetype != NULL) {
      TRACE(TRACE_DEBUG, "libav",
            "Unable to open using mimetype %s, retrying with probe",
            mimetype);
      return fa_libav_open_format(avio, url, errbuf, errlen, NULL, probe_size,
                                  max_analyze_duration, fps_probe_frames);
    }
    return fa_libav_open_error(errbuf, errlen,
			       "Unable to open file as input format", err);
  }

  if(fps_probe_frames != -1)
    fctx->fps_probe_size = fps_probe_frames;

  if(avformat_find_stream_info(fctx, NULL) < 0) {
    avformat_close_input(&fctx);
    if(mimetype != NULL) {
      TRACE(TRACE_DEBUG, "libav",
            "Unable to find stream info using mimetype %s, retrying with probe",
            mimetype);
      return fa_libav_open_format(avio, url, errbuf, errlen, NULL, probe_size,
                                  max_analyze_duration, fps_probe_frames);
    }
    return fa_libav_open_error(errbuf, errlen,
			       "Unable to handle file contents", err);
  }

  return fctx;
}
static int cmf_parser_next_slice(AVFormatContext *s, int *index, int first)
{
    int i, j, ret=-1;
    struct cmf *bs = s->priv_data;
    struct cmfvpb *oldvpb = bs->cmfvpb;
    struct cmfvpb *newvpb = NULL;
    AVInputFormat *in_fmt = NULL;
    av_log(s, AV_LOG_INFO, "cmf_parser_next_slice:%d\n", index);
    ret = cmfvpb_dup_pb(s->pb, &newvpb, index);
    if (ret < 0) {
        av_log(s, AV_LOG_INFO, "cmfvpb_dup_pb failed %s---%d [%d]\n",__FUNCTION__,__LINE__,ret);
        return ret;
    }
    if (bs->sctx) {
        avformat_free_context(bs->sctx);
    }
    bs->sctx = NULL;
    if (!(bs->sctx = avformat_alloc_context())) {
        av_log(s, AV_LOG_INFO, "cmf_parser_next_slice avformat_alloc_context failed!\n");
        return AVERROR(ENOMEM);
    }
    ret = av_probe_input_buffer(newvpb->pb, &in_fmt, "", NULL, 0, 0);
    if (ret < 0) {
        av_log(s, AV_LOG_INFO, "av_probe_input_buffer failed %s---%d [%d]\n",__FUNCTION__,__LINE__,ret);
		if (bs->sctx) {
        	avformat_free_context(bs->sctx);
			bs->sctx=NULL;
    	}
		return ret;
    }
    bs->sctx->pb = newvpb->pb;
    ret = avformat_open_input(&bs->sctx, "", NULL, NULL);
    if (ret < 0) {
        av_log(s, AV_LOG_INFO, "cmf_parser_next_slice:avformat_open_input failed \n");
		if (bs->sctx) {
        	avformat_free_context(bs->sctx);
			bs->sctx=NULL;
    	}
        goto fail;
    }
    if (!memcmp(bs->sctx->iformat->name,"flv",3)) {
        ret=av_find_stream_info(bs->sctx);
        av_log(s, AV_LOG_INFO, "xxx-------av_find_stream_info=%d",ret);
    }
    if (first) {
        /* Create new AVStreams for each stream in this chip
             Add into the Best format ;
        */
        if (!memcmp(bs->sctx->iformat->name,"mov",3))
            bs->next_mp4_flag = 1;
        for (j = 0; j < (int)bs->sctx->nb_streams ; j++)   {
            AVStream *st = av_new_stream(s, 0);
            if (!st) {
                ret = AVERROR(ENOMEM);
                goto fail;
            }
            avcodec_copy_context(st->codec, bs->sctx->streams[j]->codec);
        }
        for (i = 0; i < (int)s->nb_streams ; i++)   {
            AVStream *st = s->streams[i];
            AVStream *sst = bs->sctx->streams[i];
            st->id=sst->id;
            if (st->codec->codec_type == CODEC_TYPE_AUDIO) {
                st->time_base.den =  sst->time_base.den;
                st->time_base.num = sst->time_base.num;
                bs->first_slice_audio_index = i;
            }
            if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
                st->time_base.den = sst->time_base.den;
                st->time_base.num = sst->time_base.num;
                bs->first_mp4_video_base_time_num = sst->time_base.num;
                bs->first_mp4_video_base_time_den = sst->time_base.den;
                bs->first_slice_video_index = i;
            }
        }
        s->duration = newvpb->total_duration*1000;
        av_log(s, AV_LOG_INFO, "get duration  [%lld]us [%lld]ms [%lld]s\n", s->duration,newvpb->total_duration,(newvpb->total_duration/1000));
    } else {
        bs->stream_index_changed = 0;
        if (!memcmp(bs->sctx->iformat->name,"mpegts",6)) {
            if(bs->sctx->streams[bs->first_slice_audio_index]->codec->codec_type != s->streams[bs->first_slice_audio_index]->codec->codec_type) {
                bs->stream_index_changed = 1;
                AVStream *temp = NULL;
                AVStream *audio = bs->sctx->streams[bs->first_slice_audio_index];
                temp = bs->sctx->streams[bs->first_slice_video_index];
                bs->sctx->streams[bs->first_slice_video_index] = audio;
                bs->sctx->streams[bs->first_slice_audio_index] = temp;
            }
        }
        if (!memcmp(bs->sctx->iformat->name,"mov",3)) {
            #if 1
            /*
            int audio_index = -1;
            int video_index = -1;
            AVStream *cmf_audio = NULL;
            AVStream *cmf_video = NULL;
            for (i = 0; i < (int)bs->sctx->nb_streams ; i++) {
                if(bs->sctx->streams[i]->codec->codec_type == CODEC_TYPE_AUDIO)
                    cmf_audio = bs->sctx->streams[i];
                if(bs->sctx->streams[i]->codec->codec_type == CODEC_TYPE_VIDEO)
                    cmf_video = bs->sctx->streams[i];
            }
            if(cmf_audio && cmf_video) {*/
                for (i = 0; i < (int)s->nb_streams ; i++) {
                    AVStream *st = s->streams[i];
                    AVStream *sst = bs->sctx->streams[i];
                    if (st->codec->codec_type == CODEC_TYPE_AUDIO) {
                        st->time_base.den =  sst->time_base.den;
                        st->time_base.num = sst->time_base.num;
                        //audio_index = i;
                    }
                    if (st->codec->codec_type == CODEC_TYPE_VIDEO) {
                        st->time_base.den = sst->time_base.den;
                        st->time_base.num = sst->time_base.num;
                        //video_index = i;
                    }
                    if(st->codec->extradata)
                        av_freep(&(st->codec->extradata));
                    if(sst->codec->extradata && sst->codec->extradata_size > 0) {
                        st->codec->extradata = av_malloc(sst->codec->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE);
                        if(st->codec->extradata) {
                             memcpy(st->codec->extradata, sst->codec->extradata, sst->codec->extradata_size);
                             memset(((uint8_t *) st->codec->extradata) + sst->codec->extradata_size, 0, FF_INPUT_BUFFER_PADDING_SIZE);
                        }
                    }
                }
                /*
                if(audio_index >= 0 && bs->sctx->streams[bs->first_mp4_audio_index]->codec->codec_type != s->streams[audio_index]->codec->codec_type) {
                    bs->stream_index_changed = 1;
                    AVStream *temp = NULL;
                    AVStream *audio = s->streams[audio_index];
                    temp = s->streams[video_index];
                    s->streams[video_index] = audio;
                    s->streams[audio_index] = temp;
                    av_log(NULL,AV_LOG_INFO,"--------->tao_cmf_index_change first_mp4_audio_index=%d, audio_index=%d, video_index=%d",bs->first_mp4_audio_index,audio_index,video_index);
                }*/
                bs->next_mp4_flag = 1;
                #endif
            }
        }
    if (memcmp(bs->sctx->iformat->name,"flv",3)) {
        ret = avio_seek(bs->sctx->pb, bs->sctx->media_dataoffset, SEEK_SET);
        if (ret < 0) {
            av_log(s, AV_LOG_INFO, "avio_seek failed %s---%d \n",__FUNCTION__,__LINE__);
            return ret;
        }
    }
    bs->cmfvpb = newvpb;
fail:
    if (oldvpb) {
        cmfvpb_pb_free(oldvpb);
    }
    return ret;
}
示例#14
0
static int hls_read_header(AVFormatContext *s)
{
    URLContext *u = (s->flags & AVFMT_FLAG_CUSTOM_IO) ? NULL : s->pb->opaque;
    HLSContext *c = s->priv_data;
    int ret = 0, i, j, stream_offset = 0;

    c->interrupt_callback = &s->interrupt_callback;

    // if the URL context is good, read important options we must broker later
    if (u && u->prot->priv_data_class) {
        // get the previous user agent & set back to null if string size is zero
        av_freep(&c->user_agent);
        av_opt_get(u->priv_data, "user-agent", 0, (uint8_t**)&(c->user_agent));
        if (c->user_agent && !strlen(c->user_agent))
            av_freep(&c->user_agent);

        // get the previous cookies & set back to null if string size is zero
        av_freep(&c->cookies);
        av_opt_get(u->priv_data, "cookies", 0, (uint8_t**)&(c->cookies));
        if (c->cookies && !strlen(c->cookies))
            av_freep(&c->cookies);

        // get the previous headers & set back to null if string size is zero
        av_freep(&c->headers);
        av_opt_get(u->priv_data, "headers", 0, (uint8_t**)&(c->headers));
        if (c->headers && !strlen(c->headers))
            av_freep(&c->headers);
    }

    if ((ret = parse_playlist(c, s->filename, NULL, s->pb)) < 0)
        goto fail;

    if (c->n_variants == 0) {
        av_log(NULL, AV_LOG_WARNING, "Empty playlist\n");
        ret = AVERROR_EOF;
        goto fail;
    }
    /* If the playlist only contained playlists (Master Playlist),
     * parse each individual playlist. */
    if (c->n_playlists > 1 || c->playlists[0]->n_segments == 0) {
        for (i = 0; i < c->n_playlists; i++) {
            struct playlist *pls = c->playlists[i];
            if ((ret = parse_playlist(c, pls->url, pls, NULL)) < 0)
                goto fail;
        }
    }

    if (c->variants[0]->playlists[0]->n_segments == 0) {
        av_log(NULL, AV_LOG_WARNING, "Empty playlist\n");
        ret = AVERROR_EOF;
        goto fail;
    }

    /* If this isn't a live stream, calculate the total duration of the
     * stream. */
    if (c->variants[0]->playlists[0]->finished) {
        int64_t duration = 0;
        for (i = 0; i < c->variants[0]->playlists[0]->n_segments; i++)
            duration += c->variants[0]->playlists[0]->segments[i]->duration;
        s->duration = duration;
    }

    /* Open the demuxer for each playlist */
    for (i = 0; i < c->n_playlists; i++) {
        struct playlist *pls = c->playlists[i];
        AVInputFormat *in_fmt = NULL;

        if (pls->n_segments == 0)
            continue;

        if (!(pls->ctx = avformat_alloc_context())) {
            ret = AVERROR(ENOMEM);
            goto fail;
        }

        pls->index  = i;
        pls->needed = 1;
        pls->parent = s;

        /* If this is a live stream with more than 3 segments, start at the
         * third last segment. */
        pls->cur_seq_no = pls->start_seq_no;
        if (!pls->finished && pls->n_segments > 3)
            pls->cur_seq_no = pls->start_seq_no + pls->n_segments - 3;

        pls->read_buffer = av_malloc(INITIAL_BUFFER_SIZE);
        ffio_init_context(&pls->pb, pls->read_buffer, INITIAL_BUFFER_SIZE, 0, pls,
                          read_data, NULL, NULL);
        pls->pb.seekable = 0;
        ret = av_probe_input_buffer(&pls->pb, &in_fmt, pls->segments[0]->url,
                                    NULL, 0, 0);
        if (ret < 0) {
            /* Free the ctx - it isn't initialized properly at this point,
             * so avformat_close_input shouldn't be called. If
             * avformat_open_input fails below, it frees and zeros the
             * context, so it doesn't need any special treatment like this. */
            av_log(s, AV_LOG_ERROR, "Error when loading first segment '%s'\n", pls->segments[0]->url);
            avformat_free_context(pls->ctx);
            pls->ctx = NULL;
            goto fail;
        }
        pls->ctx->pb       = &pls->pb;
        pls->stream_offset = stream_offset;
        ret = avformat_open_input(&pls->ctx, pls->segments[0]->url, in_fmt, NULL);
        if (ret < 0)
            goto fail;

        pls->ctx->ctx_flags &= ~AVFMTCTX_NOHEADER;
        ret = avformat_find_stream_info(pls->ctx, NULL);
        if (ret < 0)
            goto fail;

        /* Create new AVStreams for each stream in this playlist */
        for (j = 0; j < pls->ctx->nb_streams; j++) {
            AVStream *st = avformat_new_stream(s, NULL);
            AVStream *ist = pls->ctx->streams[j];
            if (!st) {
                ret = AVERROR(ENOMEM);
                goto fail;
            }
            st->id = i;
            avpriv_set_pts_info(st, ist->pts_wrap_bits, ist->time_base.num, ist->time_base.den);
            avcodec_copy_context(st->codec, pls->ctx->streams[j]->codec);
        }

        stream_offset += pls->ctx->nb_streams;
    }

    /* Create a program for each variant */
    for (i = 0; i < c->n_variants; i++) {
        struct variant *v = c->variants[i];
        char bitrate_str[20];
        AVProgram *program;

        snprintf(bitrate_str, sizeof(bitrate_str), "%d", v->bandwidth);

        program = av_new_program(s, i);
        if (!program)
            goto fail;
        av_dict_set(&program->metadata, "variant_bitrate", bitrate_str, 0);

        for (j = 0; j < v->n_playlists; j++) {
            struct playlist *pls = v->playlists[j];
            int is_shared = playlist_in_multiple_variants(c, pls);
            int k;

            for (k = 0; k < pls->ctx->nb_streams; k++) {
                struct AVStream *st = s->streams[pls->stream_offset + k];

                ff_program_add_stream_index(s, i, pls->stream_offset + k);

                /* Set variant_bitrate for streams unique to this variant */
                if (!is_shared && v->bandwidth)
                    av_dict_set(&st->metadata, "variant_bitrate", bitrate_str, 0);
            }
        }
    }

    c->first_packet = 1;
    c->first_timestamp = AV_NOPTS_VALUE;
    c->seek_timestamp  = AV_NOPTS_VALUE;

    return 0;
fail:
    free_playlist_list(c);
    free_variant_list(c);
    return ret;
}