コード例 #1
0
ファイル: muxer_libav.c プロジェクト: Glandos/tvheadend
/**
 * Free all memory associated with the muxer
 */
static void
lav_muxer_destroy(muxer_t *m)
{
  int i;
  lav_muxer_t *lm = (lav_muxer_t*)m;

  if(lm->lm_h264_filter)
    av_bitstream_filter_close(lm->lm_h264_filter);

  if(lm->lm_hevc_filter)
    av_bitstream_filter_close(lm->lm_hevc_filter);

  if (lm->lm_oc) {
    for(i=0; i<lm->lm_oc->nb_streams; i++)
      av_freep(&lm->lm_oc->streams[i]->codec->extradata);
  }

  if(lm->lm_oc && lm->lm_oc->pb) {
    av_freep(&lm->lm_oc->pb->buffer);
    av_freep(&lm->lm_oc->pb);
  }

  if(lm->lm_oc) {
    avformat_free_context(lm->lm_oc);
    lm->lm_oc = NULL;
  }

  free(lm);
}
コード例 #2
0
ファイル: tee.c プロジェクト: 26mansi/FFmpeg
static void close_slaves(AVFormatContext *avf)
{
    TeeContext *tee = avf->priv_data;
    AVFormatContext *avf2;
    unsigned i, j;

    for (i = 0; i < tee->nb_slaves; i++) {
        avf2 = tee->slaves[i].avf;

        for (j = 0; j < avf2->nb_streams; j++) {
            AVBitStreamFilterContext *bsf_next, *bsf = tee->slaves[i].bsfs[j];
            while (bsf) {
                bsf_next = bsf->next;
                av_bitstream_filter_close(bsf);
                bsf = bsf_next;
            }
        }
        av_freep(&tee->slaves[i].stream_map);
        av_freep(&tee->slaves[i].bsfs);

        avio_close(avf2->pb);
        avf2->pb = NULL;
        avformat_free_context(avf2);
        tee->slaves[i].avf = NULL;
    }
}
コード例 #3
0
ファイル: ffmpeg.cpp プロジェクト: LINGQ1991/av_cache
int CFfmpeg::Close(void)
{
	if(acodec1)
	{
		avcodec_close(acodec1);
		avcodec_close(ast1->codec);
		ring_buffer_free(&adecrbuffer1);
	}
	if(acodec2)
	{
		avcodec_close(acodec2);
		avcodec_close(ast2->codec);
		ring_buffer_free(&adecrbuffer2);
	}
	if(infmt_ctx)
	{
		av_close_input_file(infmt_ctx);
		infmt_ctx = NULL;
	}
	if(oc)
	{
		av_write_trailer(oc);
		avformat_free_context(oc);
		oc = NULL;
	}
	if(bsfc)
	{
		av_bitstream_filter_close(bsfc);
	}
	
	return 0;
}
コード例 #4
0
static void func_destroy(IJKFF_Pipenode *node)
{
    if (!node || !node->opaque)
        return;

    IJKFF_Pipenode_Opaque *opaque = node->opaque;

    SDL_DestroyCondP(&opaque->acodec_cond);
    SDL_DestroyMutexP(&opaque->acodec_mutex);
    SDL_DestroyCondP(&opaque->acodec_first_dequeue_output_cond);
    SDL_DestroyMutexP(&opaque->acodec_first_dequeue_output_mutex);

    SDL_AMediaCodec_decreaseReferenceP(&opaque->acodec);
    SDL_AMediaFormat_deleteP(&opaque->input_aformat);
    SDL_AMediaFormat_deleteP(&opaque->output_aformat);

#if AMC_USE_AVBITSTREAM_FILTER
    av_freep(&opaque->orig_extradata);
#endif

    ffp_packet_queue_destroy(&opaque->fake_pictq);

    if (opaque->bsfc) {
        av_bitstream_filter_close(opaque->bsfc);
        opaque->bsfc = NULL;
    }

    JNIEnv *env = NULL;
    if (JNI_OK == SDL_JNI_SetupThreadEnv(&env)) {
        SDL_JNI_DeleteGlobalRefP(env, &opaque->jsurface);
    }
}
コード例 #5
0
PrivateDecoderCrystalHD::~PrivateDecoderCrystalHD()
{
    if (m_fetcher_thread)
    {
        m_fetcher_pause = true;
        m_fetcher_stop = true;
        int tries = 0;
        while (!m_fetcher_thread->wait(100) && (tries++ < 50))
            VERBOSE(VB_PLAYBACK, WARN + "Waited 100ms for Fetcher to stop");

        if (m_fetcher_thread->isRunning())
            VERBOSE(VB_IMPORTANT, ERR + "Failed to stop Fetcher.");
        else
            VERBOSE(VB_PLAYBACK, LOC + "Stopped frame Fetcher.");
        delete m_fetcher_thread;
    }

    if (m_filter)
        av_bitstream_filter_close(m_filter);

    Reset();
    if (!m_device)
        return;

    INIT_ST
    if (m_device_type != BC_70015)
    {
        st = DtsFlushRxCapture(m_device, false);
        CHECK_ST
    }
コード例 #6
0
ファイル: crystalhd.c プロジェクト: Tjoppen/FFmpeg
static av_cold int uninit(AVCodecContext *avctx)
{
    CHDContext *priv = avctx->priv_data;
    HANDLE device;

    device = priv->dev;
    DtsStopDecoder(device);
    DtsCloseDecoder(device);
    DtsDeviceClose(device);

    av_parser_close(priv->parser);
    if (priv->bsfc) {
        av_bitstream_filter_close(priv->bsfc);
    }

    av_free(priv->sps_pps_buf);

    if (priv->pic.data[0])
        avctx->release_buffer(avctx, &priv->pic);

    if (priv->head) {
       OpaqueList *node = priv->head;
       while (node) {
          OpaqueList *next = node->next;
          av_free(node);
          node = next;
       }
    }

    return 0;
}
コード例 #7
0
ファイル: tee.c プロジェクト: 15806905685/FFmpeg
static int close_slave(TeeSlave *tee_slave)
{
    AVFormatContext *avf;
    unsigned i;
    int ret = 0;

    avf = tee_slave->avf;
    if (!avf)
        return 0;

    if (tee_slave->header_written)
        ret = av_write_trailer(avf);

    if (tee_slave->bsfs) {
        for (i = 0; i < avf->nb_streams; ++i) {
            AVBitStreamFilterContext *bsf_next, *bsf = tee_slave->bsfs[i];
            while (bsf) {
                bsf_next = bsf->next;
                av_bitstream_filter_close(bsf);
                bsf = bsf_next;
            }
        }
    }
    av_freep(&tee_slave->stream_map);
    av_freep(&tee_slave->bsfs);

    ff_format_io_close(avf, &avf->pb);
    avformat_free_context(avf);
    tee_slave->avf = NULL;
    return ret;
}
コード例 #8
0
ファイル: ffmpeg_integration.cpp プロジェクト: AmesianX/pilot
mfxStatus FFmpeg_Reader_Close()
{
    if(g_pBsfc)
        av_bitstream_filter_close(g_pBsfc);
    if(g_pFormatCtx)
        av_close_input_file(g_pFormatCtx);

    return MFX_ERR_NONE;
}
コード例 #9
0
ファイル: indexing.cpp プロジェクト: dwbuiten/ffms2
SharedVideoContext::~SharedVideoContext() {
	if (CodecContext) {
		avcodec_close(CodecContext);
		if (FreeCodecContext)
			av_freep(&CodecContext);
	}
	av_parser_close(Parser);
	if (BitStreamFilter)
		av_bitstream_filter_close(BitStreamFilter);
	delete TCC;
}
コード例 #10
0
ファイル: fwInputHandler.cpp プロジェクト: ViNi89/FLOW
void InputHandler::Close() {
    if(video_dec_ctx) avcodec_close(video_dec_ctx);
    //if(pCodecCtx) avcodec_close(pCodecCtx);
    //if(bsf) av_bitstream_filter_close(bsf);
    //if(out_ctx) avformat_free_context(out_ctx);
    if(fmt_ctx) avformat_close_input(&fmt_ctx);
    if(sws_ctx) sws_freeContext(sws_ctx);
    if(bsf) av_bitstream_filter_close(bsf);
    if(sdp_desc) av_free(sdp_desc);
    if(io_buffer) av_free(io_buffer);
    if(m_PictureBuffer) delete m_PictureBuffer;
    avcodec_free_frame(&frame);
    avcodec_free_frame(&frameRGB);
}
コード例 #11
0
ファイル: ffmpeg-encoder.c プロジェクト: tzlion/mgba
void FFmpegEncoderClose(struct FFmpegEncoder* encoder) {
	if (!encoder->context) {
		return;
	}
	av_write_trailer(encoder->context);
	avio_close(encoder->context->pb);

	if (encoder->audioCodec) {
		av_free(encoder->postaudioBuffer);
		if (encoder->audioBuffer) {
			av_free(encoder->audioBuffer);
		}
#if LIBAVCODEC_VERSION_MAJOR >= 55
		av_frame_free(&encoder->audioFrame);
#else
		avcodec_free_frame(&encoder->audioFrame);
#endif
		avcodec_close(encoder->audio);

		if (encoder->resampleContext) {
#ifdef USE_LIBAVRESAMPLE
			avresample_close(encoder->resampleContext);
#else
			swr_free(&encoder->resampleContext);
#endif
		}

		if (encoder->absf) {
#ifdef FFMPEG_USE_NEW_BSF
			av_bsf_free(&encoder->absf);
#else
			av_bitstream_filter_close(encoder->absf);
			encoder->absf = 0;
#endif
		}
	}

#if LIBAVCODEC_VERSION_MAJOR >= 55
	av_frame_free(&encoder->videoFrame);
#else
	avcodec_free_frame(&encoder->videoFrame);
#endif
	avcodec_close(encoder->video);

	sws_freeContext(encoder->scaleContext);
	encoder->scaleContext = NULL;

	avformat_free_context(encoder->context);
	encoder->context = 0;
}
コード例 #12
0
ファイル: input-stream-avf.c プロジェクト: lucabe72/GRAPES
static void avf_close(struct chunkiser_ctx *s)
{
  int i;

  for (i = 0; i < s->s->nb_streams; i++) {
    if (s->bsf[i]) {
      av_bitstream_filter_close(s->bsf[i]);
    }
  }
  avformat_close_input(&s->s);

  //free buffers
  free(s->v_data);
  free(s->a_data);

  free(s);
}
コード例 #13
0
static av_cold int uninit(AVCodecContext *avctx)
{
    CHDContext *priv = avctx->priv_data;
    HANDLE device;

    device = priv->dev;
    DtsStopDecoder(device);
    DtsCloseDecoder(device);
    DtsDeviceClose(device);

    /*
     * Restore original extradata, so that if the decoder is
     * reinitialised, the bitstream detection and filtering
     * will work as expected.
     */
    if (priv->orig_extradata) {
        av_free(avctx->extradata);
        avctx->extradata = priv->orig_extradata;
        avctx->extradata_size = priv->orig_extradata_size;
        priv->orig_extradata = NULL;
        priv->orig_extradata_size = 0;
    }

    av_parser_close(priv->parser);
    if (priv->bsfc) {
        av_bitstream_filter_close(priv->bsfc);
    }

    av_free(priv->sps_pps_buf);

    if (priv->pic.data[0])
        avctx->release_buffer(avctx, &priv->pic);

    if (priv->head) {
       OpaqueList *node = priv->head;
       while (node) {
          OpaqueList *next = node->next;
          av_free(node);
          node = next;
       }
    }

    return 0;
}
コード例 #14
0
ファイル: segmenter.c プロジェクト: midnox/mediasegmenter
/**
 * @brief free segmenter context
 * @param context segmenter context
 */
void segmenter_free_context(SegmenterContext* context) {
    
    if (context->bfilter) {
        av_bitstream_filter_close(context->bfilter);
    }
    
    if (context->output) {
        avformat_free_context(context->output);
    }
    
    if (context->buf) {
        free(context->buf);
    }
    
    if (context->durations) {
        free(context->durations);
    }
    
    free(context);
}
コード例 #15
0
ファイル: mmaldec.c プロジェクト: OS2World/LIB-libav
static av_cold int ffmmal_close_decoder(AVCodecContext *avctx)
{
    MMALDecodeContext *ctx = avctx->priv_data;

    if (ctx->decoder)
        ffmmal_stop_decoder(avctx);

    mmal_component_destroy(ctx->decoder);
    ctx->decoder = NULL;
    mmal_queue_destroy(ctx->queue_decoded_frames);
    mmal_pool_destroy(ctx->pool_in);
    ffmmal_poolref_unref(ctx->pool_out);

    if (ctx->bsfc)
        av_bitstream_filter_close(ctx->bsfc);

    mmal_vc_deinit();

    return 0;
}
コード例 #16
0
PrivateDecoderCrystalHD::~PrivateDecoderCrystalHD()
{
    if (m_fetcher_thread)
    {
        m_fetcher_pause = true;
        m_fetcher_stop = true;
        int tries = 0;
        while (!m_fetcher_thread->wait(100) && (tries++ < 50))
            LOG(VB_PLAYBACK, LOG_WARNING, LOC +
                "Waited 100ms for Fetcher to stop");

        if (m_fetcher_thread->isRunning())
            LOG(VB_GENERAL, LOG_ERR, LOC + "Failed to stop Fetcher.");
        else
            LOG(VB_PLAYBACK, LOG_INFO, LOC + "Stopped frame Fetcher.");
        delete m_fetcher_thread;
    }

    if (m_filter)
        av_bitstream_filter_close(m_filter);

    Reset();
    if (!m_device)
        return;

    INIT_ST;
    if (m_device_type != BC_70015)
    {
        st = DtsFlushRxCapture(m_device, false);
        CHECK_ST;
    }
    st = DtsStopDecoder(m_device);
    CHECK_ST;
    st = DtsCloseDecoder(m_device);
    CHECK_ST;
    DtsDeviceClose(m_device);
}
コード例 #17
0
ファイル: muxer_libav.c プロジェクト: BarDweller/tvheadend
/**
 * Close the muxer and append trailer to output
 */
static int
lav_muxer_close(muxer_t *m)
{
  int i;
  int ret = 0;
  lav_muxer_t *lm = (lav_muxer_t*)m;

  if(lm->lm_init && av_write_trailer(lm->lm_oc) < 0) {
    tvhlog(LOG_WARNING, "libav",  "Failed to write %s trailer", 
	   muxer_container_type2txt(lm->m_container));
    lm->m_errors++;
    ret = -1;
  }

  if(lm->lm_h264_filter)
    av_bitstream_filter_close(lm->lm_h264_filter);

  for(i=0; i<lm->lm_oc->nb_streams; i++)
    av_freep(&lm->lm_oc->streams[i]->codec->extradata);
 
  lm->lm_oc->nb_streams = 0;

  return ret;
}
コード例 #18
0
ファイル: joinmp4temp.cpp プロジェクト: xwdang/gittest
//链接h264流
int joinmp4(char (*h264file)[400] ,char (*aacfile)[400],char * mp4,int length,int usefilter)
{
	//AVOutputFormat *ofmt = NULL;
	AVPacket pkt;
	AVStream *out_vstream = NULL;
	AVStream *out_astream = NULL;
	AVFormatContext *ofmt_ctx = NULL;
	int join_index = 0;
	AVBitStreamFilterContext* aacbsfc = NULL;
	long  last_video_pts = 0;
	long last_audio_pts = 0;
	long end_video_pts = 0;
	long end_audio_pts = 0;
	int videoindex_out = -1;
	int audioindex_out = -1;
    //Input AVFormatContext and Output AVFormatContext
    AVFormatContext * ifmt_ctx_v = NULL, *ifmt_ctx_a = NULL;

    int ret, i,retu =0,filter_ret=0;
    //	int fps;
    int videoindex_v=-1;
    int audioindex_a=-1;
    int frame_index=0;
    int64_t cur_pts_v=0,cur_pts_a=0;
    //set file path
    char *in_filename_v = h264file[join_index];
    char *in_filename_a = aacfile[join_index];
    char *out_filename = mp4;
joinone:
    //Input AVFormatContext and Output AVFormatContext
    ifmt_ctx_v = NULL;
    ifmt_ctx_a = NULL;

    ret = 0; i = 0;retu =0;filter_ret=0;
    //	int fps;
    videoindex_v=-1;
    audioindex_a=-1;
    frame_index=0;
    cur_pts_v=0;cur_pts_a=0;
    //set file path
    in_filename_v = h264file[join_index];
    in_filename_a = aacfile[join_index];
    out_filename = mp4;

	//register before use
	av_register_all();
	//open Input and set avformatcontext
	if ((ret = avformat_open_input(&ifmt_ctx_a, in_filename_a, 0, 0)) < 0) {
		retu = -1;//-1 mean audio file opened failed
		
		goto end;
	}
	if ((ret = avformat_open_input(&ifmt_ctx_v, in_filename_v, 0, 0)) < 0) {
		retu = -2; //-2 mean video file opened failed
		
		goto end;
	}
	if ((ret = avformat_find_stream_info(ifmt_ctx_v, 0)) < 0) {

		retu = -3; //-3 mean get video info failed
		goto end;
	}


	if ((ret = avformat_find_stream_info(ifmt_ctx_a, 0)) < 0) {
		retu = -4;//-4 mean get audio info failed
		goto end;
	}

	//open Output
	if(join_index == 0)
	{
		avformat_alloc_output_context2(&ofmt_ctx, NULL, NULL, out_filename);
		if (!ofmt_ctx) {
			retu = -5;
			goto end;
		}
	}

	//ofmt = ofmt_ctx->oformat;
	//find all video stream input type
	for (i = 0; i < ifmt_ctx_v->nb_streams; i++) {
		//Create output AVStream according to input AVStream
		if(ifmt_ctx_v->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO){
			AVStream *in_stream = ifmt_ctx_v->streams[i];
			videoindex_v=i;

			if(join_index == 0)
			{
				out_vstream = avformat_new_stream(ofmt_ctx, in_stream->codec->codec);
				videoindex_out=out_vstream->index;
				//Copy the settings of AVCodecContext
				if (avcodec_copy_context(out_vstream->codec, in_stream->codec) < 0) {
					retu = -7;
					goto end;
				}
				out_vstream->codec->codec_tag = 0;
				if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
					out_vstream->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;
			}
			else
			{
				out_vstream->duration += in_stream->duration;
				//printf("duration = %ld\n",out_vstream->duration);
			}
			if (!out_vstream) {
				retu = -6;
				goto end;
			}
			break;
		}
	}

	//find all audio stream input type
	for (i = 0; i < ifmt_ctx_a->nb_streams; i++) {
		//Create output AVStream according to input AVStream
		if(ifmt_ctx_a->streams[i]->codec->codec_type==AVMEDIA_TYPE_AUDIO){
			AVStream *in_stream = ifmt_ctx_a->streams[i];
			audioindex_a=i;

			if(join_index == 0)
			{
				out_astream = avformat_new_stream(ofmt_ctx, in_stream->codec->codec);
				audioindex_out=out_astream->index;
				//Copy the settings of AVCodecContext
				if (avcodec_copy_context(out_astream->codec, in_stream->codec) < 0) {
					retu = -7;
					goto end;
				}
				out_astream->codec->codec_tag = 0;
				if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
					out_astream->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;
			}
			else
			{
				out_astream->duration += in_stream->duration;
				//printf("duration = %ld\n",out_astream->duration);
			}
			if (!out_astream) {
				retu = -6;
				goto end;
			}
			break;
		}
	}
	if(join_index == 0)
	{
			//Open output file
		if (!(ofmt_ctx->oformat->flags & AVFMT_NOFILE)) {
			if (avio_open(&ofmt_ctx->pb, out_filename, AVIO_FLAG_WRITE) < 0) {
				retu = -10;
				goto end;
			}
		}
		//Write file header
		if (avformat_write_header(ofmt_ctx, NULL) < 0) {
			retu = -11;
			goto end;
		}
	}
	if(usefilter&& aacbsfc == NULL)
		aacbsfc = av_bitstream_filter_init("aac_adtstoasc");


	while (true) {
		AVFormatContext *ifmt_ctx;
		int stream_index=0;
		AVStream *in_stream, *out_stream;
		//Get an AVPacket
		if(av_compare_ts(cur_pts_v,ifmt_ctx_v->streams[videoindex_v]->time_base,cur_pts_a,
					ifmt_ctx_a->streams[audioindex_a]->time_base) <= 0)
		{
			ifmt_ctx=ifmt_ctx_v;
			stream_index=videoindex_out;
			if(av_read_frame(ifmt_ctx, &pkt) >= 0){

				do{
					in_stream  = ifmt_ctx->streams[pkt.stream_index];
					out_stream = out_vstream;
					if(pkt.stream_index==videoindex_v){

						//Simple Write PTS
						if(pkt.pts==AV_NOPTS_VALUE){

							//Write PTS
							AVRational time_base1=in_stream->time_base;
							//Duration between 2 frames (us)
							int64_t calc_duration=(double)AV_TIME_BASE/av_q2d(in_stream->r_frame_rate);
							//Parameters
							pkt.pts=(double)(frame_index*calc_duration)/(double)(av_q2d(time_base1)*AV_TIME_BASE);
							pkt.dts=pkt.pts;
							pkt.duration=(double)calc_duration/(double)(av_q2d(time_base1)*AV_TIME_BASE);
							frame_index++;
						}
						cur_pts_v=pkt.pts;
						break;
					}
				}
				while(av_read_frame(ifmt_ctx, &pkt) >= 0);
			}
			else
			{
				//printf("pkt.duration = %ld\n",pkt.duration);
				join_index++;
				end_video_pts = last_video_pts;
				end_audio_pts = last_audio_pts;

					break;
			}
		}
		else
		{
			ifmt_ctx=ifmt_ctx_a;
			stream_index=audioindex_out;
			if(av_read_frame(ifmt_ctx, &pkt) >= 0){
				do
				{
					in_stream  = ifmt_ctx->streams[pkt.stream_index];
					out_stream = out_astream;
					if(pkt.stream_index==audioindex_a)
					{
						//Simple Write PTS
						if(pkt.pts==AV_NOPTS_VALUE)
						{
							//Write PTS
							AVRational time_base1=in_stream->time_base;
							//Duration between 2 frames (us)
							int64_t calc_duration=(double)AV_TIME_BASE/av_q2d(in_stream->r_frame_rate);
							//Parameters
							pkt.pts=(double)(frame_index*calc_duration)/(double)(av_q2d(time_base1)*AV_TIME_BASE);
							pkt.dts=pkt.pts;
							pkt.duration=(double)calc_duration/(double)(av_q2d(time_base1)*AV_TIME_BASE);
							frame_index++;
						}
						cur_pts_a=pkt.pts;
						break;
					}
				}
				while(av_read_frame(ifmt_ctx, &pkt) >= 0);
			}
			else
			{
				join_index++;
				end_video_pts = last_video_pts;
				end_audio_pts = last_audio_pts;

				break;
			}

		}
		if(usefilter)
			filter_ret = av_bitstream_filter_filter(aacbsfc, out_stream->codec, NULL, &pkt.data,&pkt.size, pkt.data, pkt.size, 0);
		if(filter_ret)
		{
			retu = -10;
			goto end;

		}
		//Convert PTS/DTS
		pkt.pts = av_rescale_q_rnd(pkt.pts, in_stream->time_base, out_stream->time_base,(enum AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX));
		pkt.dts = av_rescale_q_rnd(pkt.dts, in_stream->time_base, out_stream->time_base,(enum AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX));
		pkt.duration = av_rescale_q(pkt.duration, in_stream->time_base, out_stream->time_base);


		pkt.pos = -1;
		pkt.stream_index=stream_index;
		if(pkt.stream_index == audioindex_out)
		{
			pkt.pts += end_audio_pts;
			pkt.dts += end_audio_pts;
			last_audio_pts = pkt.pts+pkt.duration;
		//	printf("audio pts = %lld ,audio dts = %lld\n",pkt.pts,pkt.dts);
		}
		else
		{
			pkt.pts += end_video_pts;
			pkt.dts += end_video_pts;
			last_video_pts = pkt.pts+pkt.duration;
		}


		//Write
		if (av_interleaved_write_frame(ofmt_ctx, &pkt) < 0) {
			av_free_packet(&pkt);
			break;
		}
		//av_packet_unref(&pkt);
			//av_interleaved_write_frame(ofmt_ctx, &pkt);
		av_free_packet(&pkt);
	}


end:


	avformat_close_input(&ifmt_ctx_v);
	avformat_close_input(&ifmt_ctx_a);


    avformat_free_context(ifmt_ctx_v);
    avformat_free_context(ifmt_ctx_a);
	if (ret < 0 && ret != AVERROR_EOF) {
	}
	if(join_index < length)
		goto joinone;
	
	av_write_trailer(ofmt_ctx);

	
	if(usefilter)
		av_bitstream_filter_close(aacbsfc);
	/* close output */
	if (ofmt_ctx && !(ofmt_ctx->oformat->flags & AVFMT_NOFILE))
		avio_close(ofmt_ctx->pb);
	avformat_free_context(ofmt_ctx);
	return retu;
}
コード例 #19
0
ファイル: demux.cpp プロジェクト: FlowerWrong/ffmpegc
int demux(const char *in_filename, const char *out_filename_v,
		const char *out_filename_a) {
	AVOutputFormat *ofmt_a = NULL, *ofmt_v = NULL;
	// Input AVFormatContext and Output AVFormatContext
	AVFormatContext *ifmt_ctx = NULL, *ofmt_ctx_a = NULL, *ofmt_ctx_v = NULL;
	AVPacket pkt, enc_pkt;
	int ret, i;
	int video_index = -1, audio_index = -1;
	int frame_index = 0;

	av_register_all();
	// Input
	if ((ret = avformat_open_input(&ifmt_ctx, in_filename, 0, 0)) < 0) {
		printf("Could not open input file.");
		goto end;
	}
	if ((ret = avformat_find_stream_info(ifmt_ctx, 0)) < 0) {
		printf("Failed to retrieve input stream information");
		goto end;
	}

	// Output
	avformat_alloc_output_context2(&ofmt_ctx_v, NULL, NULL, out_filename_v);
	if (!ofmt_ctx_v) {
		printf("Could not create output context.\n");
		ret = AVERROR_UNKNOWN;
		goto end;
	}
	ofmt_v = ofmt_ctx_v->oformat;

	avformat_alloc_output_context2(&ofmt_ctx_a, NULL, NULL, out_filename_a);
	if (!ofmt_ctx_a) {
		printf("Could not create output context\n");
		ret = AVERROR_UNKNOWN;
		goto end;
	}
	ofmt_a = ofmt_ctx_a->oformat;

	for (i = 0; i < ifmt_ctx->nb_streams; i++) {
		// Create output AVStream according to input AVStream
		AVFormatContext *ofmt_ctx;
		AVStream *in_stream = ifmt_ctx->streams[i];
		AVStream *out_stream = NULL;

		if (ifmt_ctx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
			video_index = i;
			out_stream = avformat_new_stream(ofmt_ctx_v,
					in_stream->codec->codec);
			ofmt_ctx = ofmt_ctx_v;
		} else if (ifmt_ctx->streams[i]->codec->codec_type
				== AVMEDIA_TYPE_AUDIO) {
			audio_index = i;
			out_stream = avformat_new_stream(ofmt_ctx_a,
					in_stream->codec->codec);
			ofmt_ctx = ofmt_ctx_a;
		} else {
			break;
		}

		if (!out_stream) {
			printf("Failed allocating output stream\n");
			ret = AVERROR_UNKNOWN;
			goto end;
		}
		// Copy the settings of AVCodecContext
		if (avcodec_copy_context(out_stream->codec, in_stream->codec) < 0) {
			printf(
					"Failed to copy context from input to output stream codec context\n");
			goto end;
		}
		out_stream->codec->codec_tag = 0;

		if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
			out_stream->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;
	}

	// Open output file
	if (!(ofmt_v->flags & AVFMT_NOFILE)) {
		if (avio_open(&ofmt_ctx_v->pb, out_filename_v, AVIO_FLAG_WRITE) < 0) {
			printf("Could not open output file '%s'", out_filename_v);
			goto end;
		}
	}

	if (!(ofmt_a->flags & AVFMT_NOFILE)) {
		if (avio_open(&ofmt_ctx_a->pb, out_filename_a, AVIO_FLAG_WRITE) < 0) {
			printf("Could not open output file '%s'", out_filename_a);
			goto end;
		}
	}

	// Write file header
	if (avformat_write_header(ofmt_ctx_v, NULL) < 0) {
		printf("Error occurred when opening video output file\n");
		goto end;
	}
//	if (avformat_write_header(ofmt_ctx_a, NULL) < 0) {
//		printf("Error occurred when opening audio output file\n");
//		goto end;
//	}

#if USE_H264BSF
	AVBitStreamFilterContext* h264bsfc = av_bitstream_filter_init("h264_mp4toannexb");
#endif

	while (1) {
		AVFormatContext *ofmt_ctx;
		AVStream *in_stream, *out_stream;

		AVCodecContext *dec_ctx = NULL, *enc_ctx = NULL;
		AVCodec *dec = NULL, *encoder = NULL;

		AVFrame *frame = NULL;

		int got_frame;

		// Get an AVPacket
		if (av_read_frame(ifmt_ctx, &pkt) < 0)
			break;
		in_stream = ifmt_ctx->streams[pkt.stream_index];

		if (pkt.stream_index == video_index) {
			ofmt_ctx = ofmt_ctx_v;
			out_stream = avformat_new_stream(ofmt_ctx, NULL);

			/* find decoder for the stream */
			dec_ctx = in_stream->codec;
			dec = avcodec_find_decoder(dec_ctx->codec_id);
			if (!dec) {
				fprintf(stderr, "Failed to find %s codec\n",
						av_get_media_type_string(AVMEDIA_TYPE_VIDEO));
				return AVERROR(EINVAL);
			}

			/* Open decoder */
			int ret = avcodec_open2(dec_ctx, dec, NULL);
			if (ret < 0) {
				av_log(NULL, AV_LOG_ERROR,
						"Failed to open decoder for stream #%u\n", i);
				return ret;
			}

			// decoder is MPEG-4 part 2
			printf("decoder is %s\n", dec->long_name);

			// NOTE
			frame = av_frame_alloc();

			ret = avcodec_decode_video2(dec_ctx, frame, &got_frame, &pkt);
			if (ret < 0) {
				av_frame_free(&frame);
				av_log(NULL, AV_LOG_ERROR, "Decoding failed\n");
				break;
			}

			// printf("frame duration is %d\n", frame->pkt_duration);

			// encode
			encoder = avcodec_find_encoder(AV_CODEC_ID_H264);

			// avcodec_copy_context(enc_ctx, dec_ctx);
			enc_ctx = avcodec_alloc_context3(encoder);
			if (!encoder) {
				av_log(NULL, AV_LOG_FATAL, "Necessary encoder not found\n");
				return AVERROR_INVALIDDATA;
			}

			enc_ctx->height = dec_ctx->height;
			enc_ctx->width = dec_ctx->width;
			enc_ctx->sample_aspect_ratio = dec_ctx->sample_aspect_ratio;
			enc_ctx->pix_fmt = encoder->pix_fmts[0];
			enc_ctx->time_base = dec_ctx->time_base;
			//enc_ctx->time_base.num = 1;
			//enc_ctx->time_base.den = 25;
			//H264的必备选项,没有就会错
			enc_ctx->me_range = 16;
			enc_ctx->max_qdiff = 4;
			enc_ctx->qmin = 10;
			enc_ctx->qmax = 51;
			enc_ctx->qcompress = 0.6;
			enc_ctx->refs = 3;
			enc_ctx->bit_rate = 1500;

			ret = avcodec_open2(enc_ctx, encoder, NULL);
			if (ret < 0) {
				av_log(NULL, AV_LOG_ERROR,
						"Cannot open video encoder for stream #%u\n", i);
				return ret;
			}

			av_opt_set(enc_ctx->priv_data, "preset", "slow", 0);

			// AVOutputFormat *formatOut = av_guess_format(NULL, out_filename_v, NULL);

			enc_pkt.data = NULL;
			enc_pkt.size = 0;
			av_init_packet(&enc_pkt);
			ret = avcodec_open2(enc_ctx, encoder, NULL);
			if (ret < 0) {
				av_log(NULL, AV_LOG_ERROR,
						"Failed to open encoder for stream #%u\n", i);
				return ret;
			}
			ret = avcodec_encode_video2(enc_ctx, &enc_pkt, frame, &got_frame);

			printf("demo is %s\n", "hello");

			av_frame_free(&frame);
			avcodec_close(enc_ctx);
			avcodec_close(dec_ctx);

			// printf("Write Video Packet. size:%d\tpts:%lld\n", pkt.size, pkt.pts);
#if USE_H264BSF
			av_bitstream_filter_filter(h264bsfc, in_stream->codec, NULL, &pkt.data, &pkt.size, pkt.data, pkt.size, 0);
#endif
		} else {
			continue;
		}

		// Convert PTS/DTS
		enc_pkt.pts = av_rescale_q_rnd(enc_pkt.pts, in_stream->time_base,
				out_stream->time_base,
				(AVRounding) (AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
		enc_pkt.dts = av_rescale_q_rnd(enc_pkt.dts, in_stream->time_base,
				out_stream->time_base,
				(AVRounding) (AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
		enc_pkt.duration = av_rescale_q(enc_pkt.duration, in_stream->time_base,
				out_stream->time_base);
		// enc_pkt.pos = -1;
		enc_pkt.stream_index = video_index;

		if (av_interleaved_write_frame(ofmt_ctx, &enc_pkt) < 0) {
			printf("Error muxing packet\n");
			break;
		}
		av_free_packet(&enc_pkt);
		av_free_packet(&pkt);
		frame_index++;
	}

#if USE_H264BSF
	av_bitstream_filter_close(h264bsfc);
#endif

	// Write file trailer
	av_write_trailer(ofmt_ctx_a);
	av_write_trailer(ofmt_ctx_v);

	end: avformat_close_input(&ifmt_ctx);
	/* close output */
	if (ofmt_ctx_a && !(ofmt_a->flags & AVFMT_NOFILE))
		avio_close(ofmt_ctx_a->pb);

	if (ofmt_ctx_v && !(ofmt_v->flags & AVFMT_NOFILE))
		avio_close(ofmt_ctx_v->pb);

	avformat_free_context(ofmt_ctx_a);
	avformat_free_context(ofmt_ctx_v);

	if (ret < 0 && ret != AVERROR_EOF) {
		printf("Error occurred.\n");
		return -1;
	}
	return 0;
}
コード例 #20
0
ファイル: haalivideo.cpp プロジェクト: 1974kpkpkp/ffms2
void FFHaaliVideo::Free(bool CloseCodec) {
	if (CloseCodec)
		avcodec_close(CodecContext);
	if (BitStreamFilter)
		av_bitstream_filter_close(BitStreamFilter);
}
int main(int argc, char* argv[])
{
	AVFormatContext *ifmt_ctx = NULL;
	AVPacket pkt;
	int ret, i;
	int videoindex=-1,audioindex=-1;
	const char *in_filename  = "cuc_ieschool.flv";//Input file URL
	const char *out_filename_v = "cuc_ieschool.h264";//Output file URL
	const char *out_filename_a = "cuc_ieschool.mp3";

	av_register_all();
	//Input
	if ((ret = avformat_open_input(&ifmt_ctx, in_filename, 0, 0)) < 0) {
		printf( "Could not open input file.");
		return -1;
	}
	if ((ret = avformat_find_stream_info(ifmt_ctx, 0)) < 0) {
		printf( "Failed to retrieve input stream information");
		return -1;
	}

	videoindex=-1;
	for(i=0; i<ifmt_ctx->nb_streams; i++) {
		if(ifmt_ctx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO){
			videoindex=i;
		}else if(ifmt_ctx->streams[i]->codec->codec_type==AVMEDIA_TYPE_AUDIO){
			audioindex=i;
		}
	}
	//Dump Format------------------
	printf("\nInput Video===========================\n");
	av_dump_format(ifmt_ctx, 0, in_filename, 0);
	printf("\n======================================\n");

	FILE *fp_audio=fopen(out_filename_a,"wb+");  
	FILE *fp_video=fopen(out_filename_v,"wb+");  

	/*
	FIX: H.264 in some container format (FLV, MP4, MKV etc.) need 
	"h264_mp4toannexb" bitstream filter (BSF)
	  *Add SPS,PPS in front of IDR frame
	  *Add start code ("0,0,0,1") in front of NALU
	H.264 in some container (MPEG2TS) don't need this BSF.
	*/
#if USE_H264BSF
	AVBitStreamFilterContext* h264bsfc =  av_bitstream_filter_init("h264_mp4toannexb"); 
#endif

	while(av_read_frame(ifmt_ctx, &pkt)>=0){
		if(pkt.stream_index==videoindex){
#if USE_H264BSF
			av_bitstream_filter_filter(h264bsfc, ifmt_ctx->streams[videoindex]->codec, NULL, &pkt.data, &pkt.size, pkt.data, pkt.size, 0);
#endif
			printf("Write Video Packet. size:%d\tpts:%lld\n",pkt.size,pkt.pts);
			fwrite(pkt.data,1,pkt.size,fp_video);
		}else if(pkt.stream_index==audioindex){
			/*
			AAC in some container format (FLV, MP4, MKV etc.) need to add 7 Bytes
			ADTS Header in front of AVPacket data manually.
			Other Audio Codec (MP3...) works well.
			*/
			printf("Write Audio Packet. size:%d\tpts:%lld\n",pkt.size,pkt.pts);
			fwrite(pkt.data,1,pkt.size,fp_audio);
		}
		av_free_packet(&pkt);
	}

#if USE_H264BSF
	av_bitstream_filter_close(h264bsfc);  
#endif

	fclose(fp_video);
	fclose(fp_audio);

	avformat_close_input(&ifmt_ctx);

	if (ret < 0 && ret != AVERROR_EOF) {
		printf( "Error occurred.\n");
		return -1;
	}
	return 0;
}
コード例 #22
0
ファイル: ff_utils.c プロジェクト: DeYangLiu/ffmpeg-streaming
int ff_filter_close(int type)
{
	av_bitstream_filter_close(aacbsf);
	aacbsf = NULL;
	return 0;
}
コード例 #23
0
int main(int argc, char* argv[])
{
	AVOutputFormat *ofmt_a = NULL,*ofmt_v = NULL;
	//(Input AVFormatContext and Output AVFormatContext)
	AVFormatContext *ifmt_ctx = NULL, *ofmt_ctx_a = NULL, *ofmt_ctx_v = NULL;
	AVPacket pkt;
	int ret, i;
	int videoindex=-1,audioindex=-1;
	int frame_index=0;

	const char *in_filename  = "cuc_ieschool.ts";//Input file URL
	//char *in_filename  = "cuc_ieschool.mkv";
	const char *out_filename_v = "cuc_ieschool.h264";//Output file URL
	//char *out_filename_a = "cuc_ieschool.mp3";
	const char *out_filename_a = "cuc_ieschool.aac";

	av_register_all();
	//Input
	if ((ret = avformat_open_input(&ifmt_ctx, in_filename, 0, 0)) < 0) {
		printf( "Could not open input file.");
		goto end;
	}
	if ((ret = avformat_find_stream_info(ifmt_ctx, 0)) < 0) {
		printf( "Failed to retrieve input stream information");
		goto end;
	}

	//Output
	avformat_alloc_output_context2(&ofmt_ctx_v, NULL, NULL, out_filename_v);
	if (!ofmt_ctx_v) {
		printf( "Could not create output context\n");
		ret = AVERROR_UNKNOWN;
		goto end;
	}
	ofmt_v = ofmt_ctx_v->oformat;

	avformat_alloc_output_context2(&ofmt_ctx_a, NULL, NULL, out_filename_a);
	if (!ofmt_ctx_a) {
		printf( "Could not create output context\n");
		ret = AVERROR_UNKNOWN;
		goto end;
	}
	ofmt_a = ofmt_ctx_a->oformat;

	for (i = 0; i < ifmt_ctx->nb_streams; i++) {
			//Create output AVStream according to input AVStream
			AVFormatContext *ofmt_ctx;
			AVStream *in_stream = ifmt_ctx->streams[i];
			AVStream *out_stream = NULL;
			
			if(ifmt_ctx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO){
				videoindex=i;
				out_stream=avformat_new_stream(ofmt_ctx_v, in_stream->codec->codec);
				ofmt_ctx=ofmt_ctx_v;
			}else if(ifmt_ctx->streams[i]->codec->codec_type==AVMEDIA_TYPE_AUDIO){
				audioindex=i;
				out_stream=avformat_new_stream(ofmt_ctx_a, in_stream->codec->codec);
				ofmt_ctx=ofmt_ctx_a;
			}else{
				break;
			}
			
			if (!out_stream) {
				printf( "Failed allocating output stream\n");
				ret = AVERROR_UNKNOWN;
				goto end;
			}
			//Copy the settings of AVCodecContext
			if (avcodec_copy_context(out_stream->codec, in_stream->codec) < 0) {
				printf( "Failed to copy context from input to output stream codec context\n");
				goto end;
			}
			out_stream->codec->codec_tag = 0;

			if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
				out_stream->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;
	}

	//Dump Format------------------
	printf("\n==============Input Video=============\n");
	av_dump_format(ifmt_ctx, 0, in_filename, 0);
	printf("\n==============Output Video============\n");
	av_dump_format(ofmt_ctx_v, 0, out_filename_v, 1);
	printf("\n==============Output Audio============\n");
	av_dump_format(ofmt_ctx_a, 0, out_filename_a, 1);
	printf("\n======================================\n");
	//Open output file
	if (!(ofmt_v->flags & AVFMT_NOFILE)) {
		if (avio_open(&ofmt_ctx_v->pb, out_filename_v, AVIO_FLAG_WRITE) < 0) {
			printf( "Could not open output file '%s'", out_filename_v);
			goto end;
		}
	}

	if (!(ofmt_a->flags & AVFMT_NOFILE)) {
		if (avio_open(&ofmt_ctx_a->pb, out_filename_a, AVIO_FLAG_WRITE) < 0) {
			printf( "Could not open output file '%s'", out_filename_a);
			goto end;
		}
	}

	//Write file header
	if (avformat_write_header(ofmt_ctx_v, NULL) < 0) {
		printf( "Error occurred when opening video output file\n");
		goto end;
	}
	if (avformat_write_header(ofmt_ctx_a, NULL) < 0) {
		printf( "Error occurred when opening audio output file\n");
		goto end;
	}
	
#if USE_H264BSF
	AVBitStreamFilterContext* h264bsfc =  av_bitstream_filter_init("h264_mp4toannexb"); 
#endif

	while (1) {
		AVFormatContext *ofmt_ctx;
		AVStream *in_stream, *out_stream;
		//Get an AVPacket
		if (av_read_frame(ifmt_ctx, &pkt) < 0)
			break;
		in_stream  = ifmt_ctx->streams[pkt.stream_index];

		
		if(pkt.stream_index==videoindex){
			out_stream = ofmt_ctx_v->streams[0];
			ofmt_ctx=ofmt_ctx_v;
			printf("Write Video Packet. size:%d\tpts:%lld\n",pkt.size,pkt.pts);
#if USE_H264BSF
			av_bitstream_filter_filter(h264bsfc, in_stream->codec, NULL, &pkt.data, &pkt.size, pkt.data, pkt.size, 0);
#endif
		}else if(pkt.stream_index==audioindex){
			out_stream = ofmt_ctx_a->streams[0];
			ofmt_ctx=ofmt_ctx_a;
			printf("Write Audio Packet. size:%d\tpts:%lld\n",pkt.size,pkt.pts);
		}else{
			continue;
		}


		//Convert PTS/DTS
		pkt.pts = av_rescale_q_rnd(pkt.pts, in_stream->time_base, out_stream->time_base, (AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX));
		pkt.dts = av_rescale_q_rnd(pkt.dts, in_stream->time_base, out_stream->time_base, (AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX));
		pkt.duration = av_rescale_q(pkt.duration, in_stream->time_base, out_stream->time_base);
		pkt.pos = -1;
		pkt.stream_index=0;
		//Write
		if (av_interleaved_write_frame(ofmt_ctx, &pkt) < 0) {
			printf( "Error muxing packet\n");
			break;
		}
		//printf("Write %8d frames to output file\n",frame_index);
		av_free_packet(&pkt);
		frame_index++;
	}

#if USE_H264BSF
	av_bitstream_filter_close(h264bsfc);  
#endif

	//Write file trailer
	av_write_trailer(ofmt_ctx_a);
	av_write_trailer(ofmt_ctx_v);
end:
	avformat_close_input(&ifmt_ctx);
	/* close output */
	if (ofmt_ctx_a && !(ofmt_a->flags & AVFMT_NOFILE))
		avio_close(ofmt_ctx_a->pb);

	if (ofmt_ctx_v && !(ofmt_v->flags & AVFMT_NOFILE))
		avio_close(ofmt_ctx_v->pb);

	avformat_free_context(ofmt_ctx_a);
	avformat_free_context(ofmt_ctx_v);


	if (ret < 0 && ret != AVERROR_EOF) {
		printf( "Error occurred.\n");
		return -1;
	}
	return 0;
}
コード例 #24
0
ファイル: Remux.cpp プロジェクト: bikeyTang/RemuxVideo
bool Remux::executeRemux()
{
	AVPacket readPkt;
	int ret;
	if ((ret = avformat_open_input(&ifmt_ctx, in_filename.c_str(), 0, 0)) < 0) {
		return false;
	}

	if ((ret = avformat_find_stream_info(ifmt_ctx, 0)) < 0) {
		return false;
	}
	string::size_type pos = out_filename.find_last_of(".");
	if (pos == string::npos)
		out_filename.append(".mp4");
	avformat_alloc_output_context2(&ofmt_ctx, NULL, NULL, out_filename.c_str());
	if (!writeHeader())
		return false;
	int frame_index = 0;
	AVBitStreamFilterContext* h264bsfc = av_bitstream_filter_init("h264_mp4toannexb");
	int startFlag = 1;
	int64_t pts_start_time = 0;
	int64_t dts_start_time = 0;
	int64_t pre_pts = 0;
	int64_t pre_dts = 0;
	while (1)
	{
		
		ret = av_read_frame(ifmt_ctx, &readPkt);
		if (ret < 0)
		{
			break;
		}
		if (readPkt.stream_index == videoIndex)
		{
			++frame_index;
			//过滤掉前面的非I帧
			if (frame_index == startFlag&&readPkt.flags != AV_PKT_FLAG_KEY){
				++startFlag;
				continue;
			}
			if (frame_index == startFlag){
				pts_start_time = readPkt.pts>0? readPkt.pts:0;
				dts_start_time = readPkt.dts>0? readPkt.dts:0;
				pre_dts = dts_start_time;
				pre_pts = pts_start_time;
			}

			//过滤得到h264数据包
			if (isMp4)
				av_bitstream_filter_filter(h264bsfc, ifmt_ctx->streams[videoIndex]->codec, NULL, &readPkt.data, &readPkt.size, readPkt.data, readPkt.size, 0);

			if (readPkt.pts != AV_NOPTS_VALUE){
				readPkt.pts = readPkt.pts - pts_start_time;
			}
			if (readPkt.dts != AV_NOPTS_VALUE){
				if (readPkt.dts <= pre_dts&&frame_index != startFlag){
					//保证 dts 单调递增
					int64_t delta = av_rescale_q(1, ofmt_ctx->streams[0]->time_base, ifmt_ctx->streams[videoIndex]->time_base);
					readPkt.dts = pre_dts + delta + 1;
				}
				else{
					//initDts(&readPkt.dts, dts_start_time);
					readPkt.dts = readPkt.dts - dts_start_time;
				}
			}
			pre_dts = readPkt.dts;
			pre_pts = readPkt.pts;
			
			av_packet_rescale_ts(&readPkt, ifmt_ctx->streams[videoIndex]->time_base, ofmt_ctx->streams[0]->time_base);
			if (readPkt.duration < 0)
			{
				readPkt.duration = 0;
			}
			if (readPkt.pts < readPkt.dts)
			{
				readPkt.pts = readPkt.dts + 1;
			}
			readPkt.stream_index = 0;
			//这里如果使用av_interleaved_write_frame 会导致有时候写的视频文件没有数据。
			ret =av_write_frame(ofmt_ctx, &readPkt);
			if (ret < 0) {
				//break;
				std::cout << "write failed" << std::endl;
			}
		}
		
		av_packet_unref(&readPkt);

	}
	av_bitstream_filter_close(h264bsfc);
	av_packet_unref(&readPkt);
	av_write_trailer(ofmt_ctx);
	return true;
}
コード例 #25
0
ファイル: main.cpp プロジェクト: wddpeakking/MmOnLine
int Testffmpeg()
{
	AVOutputFormat *ofmt = NULL;  
	//Input AVFormatContext and Output AVFormatContext  
	AVFormatContext *ifmt_ctx = NULL, *ofmt_ctx = NULL;  
	AVPacket pkt;  
	const char *in_filename, *out_filename;  
	int ret, i;  
	int videoindex=-1;  
	int frame_index=0;  
	in_filename  = "rtmp://live.hkstv.hk.lxdns.com/live/hks";  
	out_filename = "receive.flv";  

	av_register_all();  
	//Network  
	avformat_network_init();  
	//Input  
	if ((ret = avformat_open_input(&ifmt_ctx, in_filename, 0, 0)) < 0) {  
		printf( "Could not open input file.");  
		goto end;  
	}  
	if ((ret = avformat_find_stream_info(ifmt_ctx, 0)) < 0) {  
		printf( "Failed to retrieve input stream information");  
		goto end;  
	}  

	for(i=0; i<ifmt_ctx->nb_streams; i++)   
		if(ifmt_ctx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO){  
			videoindex=i;  
			break;  
		}  

		av_dump_format(ifmt_ctx, 0, in_filename, 0);  

		//Output  
		avformat_alloc_output_context2(&ofmt_ctx, NULL, NULL, out_filename); //RTMP  

		if (!ofmt_ctx) {  
			printf( "Could not create output context\n");  
			ret = AVERROR_UNKNOWN;  
			goto end;  
		}  
		ofmt = ofmt_ctx->oformat;  
		for (i = 0; i < ifmt_ctx->nb_streams; i++) {  
			//Create output AVStream according to input AVStream  
			AVStream *in_stream = ifmt_ctx->streams[i];  
			AVStream *out_stream = avformat_new_stream(ofmt_ctx, in_stream->codec->codec);  
			if (!out_stream) {  
				printf( "Failed allocating output stream\n");  
				ret = AVERROR_UNKNOWN;  
				goto end;  
			}  
			//Copy the settings of AVCodecContext  
			ret = avcodec_copy_context(out_stream->codec, in_stream->codec);  
			if (ret < 0) {  
				printf( "Failed to copy context from input to output stream codec context\n");  
				goto end;  
			}  
			out_stream->codec->codec_tag = 0;  
			if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)  
				out_stream->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;  
		}  
		//Dump Format------------------  
		av_dump_format(ofmt_ctx, 0, out_filename, 1);  
		//Open output URL  
		if (!(ofmt->flags & AVFMT_NOFILE)) {  
			ret = avio_open(&ofmt_ctx->pb, out_filename, AVIO_FLAG_WRITE);  
			if (ret < 0) {  
				printf( "Could not open output URL '%s'", out_filename);  
				goto end;  
			}  
		}  
		//Write file header  
		ret = avformat_write_header(ofmt_ctx, NULL);  
		if (ret < 0) {  
			printf( "Error occurred when opening output URL\n");  
			goto end;  
		}  

#if USE_H264BSF  
		AVBitStreamFilterContext* h264bsfc =  av_bitstream_filter_init("h264_mp4toannexb");   
#endif  

		while (1) {  
			AVStream *in_stream, *out_stream;  
			//Get an AVPacket  
			ret = av_read_frame(ifmt_ctx, &pkt);  
			if (ret < 0)  
				break;  

			in_stream  = ifmt_ctx->streams[pkt.stream_index];  
			out_stream = ofmt_ctx->streams[pkt.stream_index];  
			/* copy packet */  
			//Convert PTS/DTS  
			pkt.pts = av_rescale_q_rnd(pkt.pts, in_stream->time_base, out_stream->time_base, (AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX));  
			pkt.dts = av_rescale_q_rnd(pkt.dts, in_stream->time_base, out_stream->time_base, (AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX));  
			pkt.duration = av_rescale_q(pkt.duration, in_stream->time_base, out_stream->time_base);  
			pkt.pos = -1;  
			//Print to Screen  
			if(pkt.stream_index==videoindex){  
				printf("Receive %8d video frames from input URL\n",frame_index);  
				frame_index++;  

#if USE_H264BSF  
				av_bitstream_filter_filter(h264bsfc, in_stream->codec, NULL, &pkt.data, &pkt.size, pkt.data, pkt.size, 0);  
#endif  
			}  
			//ret = av_write_frame(ofmt_ctx, &pkt);  
			ret = av_interleaved_write_frame(ofmt_ctx, &pkt);  

			if (ret < 0) {  
				printf( "Error muxing packet\n");  
				break;  
			}  

			av_free_packet(&pkt);  

		}  

#if USE_H264BSF  
		av_bitstream_filter_close(h264bsfc);    
#endif  

		//Write file trailer  
		av_write_trailer(ofmt_ctx);  
end:  
		avformat_close_input(&ifmt_ctx);  
		/* close output */  
		if (ofmt_ctx && !(ofmt->flags & AVFMT_NOFILE))  
			avio_close(ofmt_ctx->pb);  
		avformat_free_context(ofmt_ctx);  
		if (ret < 0 && ret != AVERROR_EOF) {  
			printf( "Error occurred.\n");  
			return -1;  
		}  
}
コード例 #26
0
ファイル: muxer_mp4.cpp プロジェクト: zhifeichen/fftest
int muxer_mp4(void* noUse)
{
    AVOutputFormat *ofmt = NULL;
    //Input AVFormatContext and Output AVFormatContext
    AVFormatContext *ifmt_ctx_v = NULL, *ifmt_ctx_a = NULL, *ofmt_ctx = NULL;
    AVPacket pkt;
    int ret, i;
    int videoindex_v = -1, videoindex_out = -1;
    int audioindex_a = -1, audioindex_out = -1;
    int frame_index = 0;
    int64_t cur_pts_v = 0, cur_pts_a = 0;

    //const char *in_filename_v = "cuc_ieschool.ts";//Input file URL
    const char *in_filename_v = "../testResource/bigbuckbunny_480x272.h264";
    //const char *in_filename_a = "cuc_ieschool.mp3";
    //const char *in_filename_a = "gowest.m4a";
    //const char *in_filename_a = "gowest.aac";
    const char *in_filename_a = "../testResource/WavinFlag.aac";

    const char *out_filename = "bigbuckbunny.mp4";//Output file URL
    av_register_all();
    //Input
    if ((ret = avformat_open_input(&ifmt_ctx_v, in_filename_v, 0, 0)) < 0) {
        printf("Could not open input file.");
        goto end;
    }
    if ((ret = avformat_find_stream_info(ifmt_ctx_v, 0)) < 0) {
        printf("Failed to retrieve input stream information");
        goto end;
    }

    if ((ret = avformat_open_input(&ifmt_ctx_a, in_filename_a, 0, 0)) < 0) {
        printf("Could not open input file.");
        goto end;
    }
    if ((ret = avformat_find_stream_info(ifmt_ctx_a, 0)) < 0) {
        printf("Failed to retrieve input stream information");
        goto end;
    }
    printf("===========Input Information==========\n");
    av_dump_format(ifmt_ctx_v, 0, in_filename_v, 0);
    av_dump_format(ifmt_ctx_a, 0, in_filename_a, 0);
    printf("======================================\n");
    //Output
    avformat_alloc_output_context2(&ofmt_ctx, NULL, NULL, out_filename);
    if (!ofmt_ctx) {
        printf("Could not create output context\n");
        ret = AVERROR_UNKNOWN;
        goto end;
    }
    ofmt = ofmt_ctx->oformat;

    unsigned char* outbuffer = NULL;
    outbuffer = (unsigned char*)av_malloc(32768);

    AVIOContext *avio_out = avio_alloc_context(outbuffer, 32768, 0, NULL, NULL, write_buffer, NULL);
    if (avio_out == NULL)
        goto end;
    ofmt_ctx->pb = avio_out;
    ofmt_ctx->flags = AVFMT_FLAG_CUSTOM_IO;

    for (i = 0; i < ifmt_ctx_v->nb_streams; i++) {
        //Create output AVStream according to input AVStream
        if (ifmt_ctx_v->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
            AVStream *in_stream = ifmt_ctx_v->streams[i];
            AVStream *out_stream = avformat_new_stream(ofmt_ctx, in_stream->codec->codec);
            videoindex_v = i;
            if (!out_stream) {
                printf("Failed allocating output stream\n");
                ret = AVERROR_UNKNOWN;
                goto end;
            }
            videoindex_out = out_stream->index;
            //Copy the settings of AVCodecContext
            if (avcodec_copy_context(out_stream->codec, in_stream->codec) < 0) {
                printf("Failed to copy context from input to output stream codec context\n");
                goto end;
            }
            out_stream->codec->codec_tag = 0;
            if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
                out_stream->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;
            break;
        }
    }

    for (i = 0; i < ifmt_ctx_a->nb_streams; i++) {
        //Create output AVStream according to input AVStream
        if (ifmt_ctx_a->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
            AVStream *in_stream = ifmt_ctx_a->streams[i];
            AVStream *out_stream = avformat_new_stream(ofmt_ctx, in_stream->codec->codec);
            audioindex_a = i;
            if (!out_stream) {
                printf("Failed allocating output stream\n");
                ret = AVERROR_UNKNOWN;
                goto end;
            }
            audioindex_out = out_stream->index;
            //Copy the settings of AVCodecContext
            if (avcodec_copy_context(out_stream->codec, in_stream->codec) < 0) {
                printf("Failed to copy context from input to output stream codec context\n");
                goto end;
            }
            out_stream->codec->codec_tag = 0;
            if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
                out_stream->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;

            break;
        }
    }

    printf("==========Output Information==========\n");
    av_dump_format(ofmt_ctx, 0, out_filename, 1);
    printf("======================================\n");
    //Open output file
    if (!(ofmt->flags & AVFMT_NOFILE)) {
        if (avio_open(&ofmt_ctx->pb, out_filename, AVIO_FLAG_WRITE) < 0) {
            printf("Could not open output file '%s'", out_filename);
            goto end;
        }
    }
    //Write file header
    if (avformat_write_header(ofmt_ctx, NULL) < 0) {
        printf("Error occurred when opening output file\n");
        goto end;
    }


    //FIX
#if USE_H264BSF
    AVBitStreamFilterContext* h264bsfc = av_bitstream_filter_init("h264_mp4toannexb");
#endif
#if USE_AACBSF
    AVBitStreamFilterContext* aacbsfc = av_bitstream_filter_init("aac_adtstoasc");
#endif

    while (1) {
        AVFormatContext *ifmt_ctx;
        int stream_index = 0;
        AVStream *in_stream, *out_stream;

        //Get an AVPacket
        if (av_compare_ts(cur_pts_v, ifmt_ctx_v->streams[videoindex_v]->time_base, cur_pts_a, ifmt_ctx_a->streams[audioindex_a]->time_base) <= 0) {
            ifmt_ctx = ifmt_ctx_v;
            stream_index = videoindex_out;

            if (av_read_frame(ifmt_ctx, &pkt) >= 0) {
                do {
                    in_stream = ifmt_ctx->streams[pkt.stream_index];
                    out_stream = ofmt_ctx->streams[stream_index];

                    if (pkt.stream_index == videoindex_v) {
                        //FIX£ºNo PTS (Example: Raw H.264)
                        //Simple Write PTS
                        if (pkt.pts == AV_NOPTS_VALUE) {
                            //Write PTS
                            AVRational time_base1 = in_stream->time_base;
                            //Duration between 2 frames (us)
                            int64_t calc_duration = (double)AV_TIME_BASE / av_q2d(in_stream->r_frame_rate);
                            //Parameters
                            pkt.pts = (double)(frame_index*calc_duration) / (double)(av_q2d(time_base1)*AV_TIME_BASE);
                            pkt.dts = pkt.pts;
                            pkt.duration = (double)calc_duration / (double)(av_q2d(time_base1)*AV_TIME_BASE);
                            frame_index++;
                        }

                        cur_pts_v = pkt.pts;
                        break;
                    }
                } while (av_read_frame(ifmt_ctx, &pkt) >= 0);
            }
            else {
                break;
            }
        }
        else {
            ifmt_ctx = ifmt_ctx_a;
            stream_index = audioindex_out;
            if (av_read_frame(ifmt_ctx, &pkt) >= 0) {
                do {
                    in_stream = ifmt_ctx->streams[pkt.stream_index];
                    out_stream = ofmt_ctx->streams[stream_index];

                    if (pkt.stream_index == audioindex_a) {

                        //FIX£ºNo PTS
                        //Simple Write PTS
                        if (pkt.pts == AV_NOPTS_VALUE) {
                            //Write PTS
                            AVRational time_base1 = in_stream->time_base;
                            //Duration between 2 frames (us)
                            int64_t calc_duration = (double)AV_TIME_BASE / av_q2d(in_stream->r_frame_rate);
                            //Parameters
                            pkt.pts = (double)(frame_index*calc_duration) / (double)(av_q2d(time_base1)*AV_TIME_BASE);
                            pkt.dts = pkt.pts;
                            pkt.duration = (double)calc_duration / (double)(av_q2d(time_base1)*AV_TIME_BASE);
                            frame_index++;
                        }
                        cur_pts_a = pkt.pts;

                        break;
                    }
                } while (av_read_frame(ifmt_ctx, &pkt) >= 0);
            }
            else {
                break;
            }

        }

        //FIX:Bitstream Filter
#if USE_H264BSF
        av_bitstream_filter_filter(h264bsfc, in_stream->codec, NULL, &pkt.data, &pkt.size, pkt.data, pkt.size, 0);
#endif
#if USE_AACBSF
        av_bitstream_filter_filter(aacbsfc, out_stream->codec, NULL, &pkt.data, &pkt.size, pkt.data, pkt.size, 0);
#endif


        //Convert PTS/DTS
        pkt.pts = av_rescale_q_rnd(pkt.pts, in_stream->time_base, out_stream->time_base, (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
        pkt.dts = av_rescale_q_rnd(pkt.dts, in_stream->time_base, out_stream->time_base, (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
        pkt.duration = av_rescale_q(pkt.duration, in_stream->time_base, out_stream->time_base);
        pkt.pos = -1;
        pkt.stream_index = stream_index;

        printf("Write 1 Packet. size:%5d\tpts:%lld\n", pkt.size, pkt.pts);
        //Write
        if (av_interleaved_write_frame(ofmt_ctx, &pkt) < 0) {
            printf("Error muxing packet\n");
            break;
        }
        av_free_packet(&pkt);

    }
    //Write file trailer
    av_write_trailer(ofmt_ctx);

#if USE_H264BSF
    av_bitstream_filter_close(h264bsfc);
#endif
#if USE_AACBSF
    av_bitstream_filter_close(aacbsfc);
#endif

end:
    avformat_close_input(&ifmt_ctx_v);
    avformat_close_input(&ifmt_ctx_a);
    /* close output */
    if (ofmt_ctx && !(ofmt->flags & AVFMT_NOFILE))
        avio_close(ofmt_ctx->pb);
    avformat_free_context(ofmt_ctx);
    if (ret < 0 && ret != AVERROR_EOF) {
        printf("Error occurred.\n");
        return -1;
    }
    return 0;
}
コード例 #27
0
ファイル: crystalhd.c プロジェクト: AlexSeverinov/ffmpeg4ios
static av_cold int init(AVCodecContext *avctx)
{
    CHDContext* priv;
    BC_STATUS ret;
    BC_INFO_CRYSTAL version;
    BC_INPUT_FORMAT format = {
        .FGTEnable   = FALSE,
        .Progressive = TRUE,
        .OptFlags    = 0x80000000 | vdecFrameRate59_94 | 0x40,
        .width       = avctx->width,
        .height      = avctx->height,
    };

    BC_MEDIA_SUBTYPE subtype;

    uint32_t mode = DTS_PLAYBACK_MODE |
                    DTS_LOAD_FILE_PLAY_FW |
                    DTS_SKIP_TX_CHK_CPB |
                    DTS_PLAYBACK_DROP_RPT_MODE |
                    DTS_SINGLE_THREADED_MODE |
                    DTS_DFLT_RESOLUTION(vdecRESOLUTION_1080p23_976);

    av_log(avctx, AV_LOG_VERBOSE, "CrystalHD Init for %s\n",
           avctx->codec->name);

    avctx->pix_fmt = PIX_FMT_YUYV422;

    /* Initialize the library */
    priv               = avctx->priv_data;
    priv->avctx        = avctx;
    priv->is_nal       = avctx->extradata_size > 0 && *(avctx->extradata) == 1;
    priv->last_picture = -1;
    priv->decode_wait  = BASE_WAIT;

    subtype = id2subtype(priv, avctx->codec->id);
    switch (subtype) {
    case BC_MSUBTYPE_AVC1:
        {
            uint8_t *dummy_p;
            int dummy_int;
            AVBitStreamFilterContext *bsfc;

            uint32_t orig_data_size = avctx->extradata_size;
            uint8_t *orig_data = av_malloc(orig_data_size);
            if (!orig_data) {
                av_log(avctx, AV_LOG_ERROR,
                       "Failed to allocate copy of extradata\n");
                return AVERROR(ENOMEM);
            }
            memcpy(orig_data, avctx->extradata, orig_data_size);


            bsfc = av_bitstream_filter_init("h264_mp4toannexb");
            if (!bsfc) {
                av_log(avctx, AV_LOG_ERROR,
                       "Cannot open the h264_mp4toannexb BSF!\n");
                av_free(orig_data);
                return AVERROR_BSF_NOT_FOUND;
            }
            av_bitstream_filter_filter(bsfc, avctx, NULL, &dummy_p,
                                       &dummy_int, NULL, 0, 0);
            av_bitstream_filter_close(bsfc);

            priv->sps_pps_buf     = avctx->extradata;
            priv->sps_pps_size    = avctx->extradata_size;
            avctx->extradata      = orig_data;
            avctx->extradata_size = orig_data_size;

            format.pMetaData   = priv->sps_pps_buf;
            format.metaDataSz  = priv->sps_pps_size;
            format.startCodeSz = (avctx->extradata[4] & 0x03) + 1;
        }
        break;
    case BC_MSUBTYPE_H264:
        format.startCodeSz = 4;
        // Fall-through
    case BC_MSUBTYPE_VC1:
    case BC_MSUBTYPE_WVC1:
    case BC_MSUBTYPE_WMV3:
    case BC_MSUBTYPE_WMVA:
    case BC_MSUBTYPE_MPEG2VIDEO:
    case BC_MSUBTYPE_DIVX:
    case BC_MSUBTYPE_DIVX311:
        format.pMetaData  = avctx->extradata;
        format.metaDataSz = avctx->extradata_size;
        break;
    default:
        av_log(avctx, AV_LOG_ERROR, "CrystalHD: Unknown codec name\n");
        return AVERROR(EINVAL);
    }
    format.mSubtype = subtype;

    if (priv->sWidth) {
        format.bEnableScaling = 1;
        format.ScalingParams.sWidth = priv->sWidth;
    }

    /* Get a decoder instance */
    av_log(avctx, AV_LOG_VERBOSE, "CrystalHD: starting up\n");
    // Initialize the Link and Decoder devices
    ret = DtsDeviceOpen(&priv->dev, mode);
    if (ret != BC_STS_SUCCESS) {
        av_log(avctx, AV_LOG_VERBOSE, "CrystalHD: DtsDeviceOpen failed\n");
        goto fail;
    }

    ret = DtsCrystalHDVersion(priv->dev, &version);
    if (ret != BC_STS_SUCCESS) {
        av_log(avctx, AV_LOG_VERBOSE,
               "CrystalHD: DtsCrystalHDVersion failed\n");
        goto fail;
    }
    priv->is_70012 = version.device == 0;

    if (priv->is_70012 &&
        (subtype == BC_MSUBTYPE_DIVX || subtype == BC_MSUBTYPE_DIVX311)) {
        av_log(avctx, AV_LOG_VERBOSE,
               "CrystalHD: BCM70012 doesn't support MPEG4-ASP/DivX/Xvid\n");
        goto fail;
    }

    ret = DtsSetInputFormat(priv->dev, &format);
    if (ret != BC_STS_SUCCESS) {
        av_log(avctx, AV_LOG_ERROR, "CrystalHD: SetInputFormat failed\n");
        goto fail;
    }

    ret = DtsOpenDecoder(priv->dev, BC_STREAM_TYPE_ES);
    if (ret != BC_STS_SUCCESS) {
        av_log(avctx, AV_LOG_ERROR, "CrystalHD: DtsOpenDecoder failed\n");
        goto fail;
    }

    ret = DtsSetColorSpace(priv->dev, OUTPUT_MODE422_YUY2);
    if (ret != BC_STS_SUCCESS) {
        av_log(avctx, AV_LOG_ERROR, "CrystalHD: DtsSetColorSpace failed\n");
        goto fail;
    }
    ret = DtsStartDecoder(priv->dev);
    if (ret != BC_STS_SUCCESS) {
        av_log(avctx, AV_LOG_ERROR, "CrystalHD: DtsStartDecoder failed\n");
        goto fail;
    }
    ret = DtsStartCapture(priv->dev);
    if (ret != BC_STS_SUCCESS) {
        av_log(avctx, AV_LOG_ERROR, "CrystalHD: DtsStartCapture failed\n");
        goto fail;
    }

    if (avctx->codec->id == CODEC_ID_H264) {
        priv->parser = av_parser_init(avctx->codec->id);
        if (!priv->parser)
            av_log(avctx, AV_LOG_WARNING,
                   "Cannot open the h.264 parser! Interlaced h.264 content "
                   "will not be detected reliably.\n");
    }
    av_log(avctx, AV_LOG_VERBOSE, "CrystalHD: Init complete.\n");

    return 0;

 fail:
    uninit(avctx);
    return -1;
}


static inline CopyRet copy_frame(AVCodecContext *avctx,
                                 BC_DTS_PROC_OUT *output,
                                 void *data, int *data_size)
{
    BC_STATUS ret;
    BC_DTS_STATUS decoder_status;
    uint8_t trust_interlaced;
    uint8_t interlaced;

    CHDContext *priv = avctx->priv_data;
    int64_t pkt_pts  = AV_NOPTS_VALUE;
    uint8_t pic_type = 0;

    uint8_t bottom_field = (output->PicInfo.flags & VDEC_FLAG_BOTTOMFIELD) ==
                           VDEC_FLAG_BOTTOMFIELD;
    uint8_t bottom_first = !!(output->PicInfo.flags & VDEC_FLAG_BOTTOM_FIRST);

    int width    = output->PicInfo.width;
    int height   = output->PicInfo.height;
    int bwidth;
    uint8_t *src = output->Ybuff;
    int sStride;
    uint8_t *dst;
    int dStride;

    if (output->PicInfo.timeStamp != 0) {
        OpaqueList *node = opaque_list_pop(priv, output->PicInfo.timeStamp);
        if (node) {
            pkt_pts = node->reordered_opaque;
            pic_type = node->pic_type;
            av_free(node);
        } else {
            /*
             * We will encounter a situation where a timestamp cannot be
             * popped if a second field is being returned. In this case,
             * each field has the same timestamp and the first one will
             * cause it to be popped. To keep subsequent calculations
             * simple, pic_type should be set a FIELD value - doesn't
             * matter which, but I chose BOTTOM.
             */
            pic_type = PICT_BOTTOM_FIELD;
        }
        av_log(avctx, AV_LOG_VERBOSE, "output \"pts\": %"PRIu64"\n",
               output->PicInfo.timeStamp);
        av_log(avctx, AV_LOG_VERBOSE, "output picture type %d\n",
               pic_type);
    }

    ret = DtsGetDriverStatus(priv->dev, &decoder_status);
    if (ret != BC_STS_SUCCESS) {
        av_log(avctx, AV_LOG_ERROR,
               "CrystalHD: GetDriverStatus failed: %u\n", ret);
       return RET_ERROR;
    }

    /*
     * For most content, we can trust the interlaced flag returned
     * by the hardware, but sometimes we can't. These are the
     * conditions under which we can trust the flag:
     *
     * 1) It's not h.264 content
     * 2) The UNKNOWN_SRC flag is not set
     * 3) We know we're expecting a second field
     * 4) The hardware reports this picture and the next picture
     *    have the same picture number.
     *
     * Note that there can still be interlaced content that will
     * fail this check, if the hardware hasn't decoded the next
     * picture or if there is a corruption in the stream. (In either
     * case a 0 will be returned for the next picture number)
     */
    trust_interlaced = avctx->codec->id != CODEC_ID_H264 ||
                       !(output->PicInfo.flags & VDEC_FLAG_UNKNOWN_SRC) ||
                       priv->need_second_field ||
                       (decoder_status.picNumFlags & ~0x40000000) ==
                       output->PicInfo.picture_number;

    /*
     * If we got a false negative for trust_interlaced on the first field,
     * we will realise our mistake here when we see that the picture number is that
     * of the previous picture. We cannot recover the frame and should discard the
     * second field to keep the correct number of output frames.
     */
    if (output->PicInfo.picture_number == priv->last_picture && !priv->need_second_field) {
        av_log(avctx, AV_LOG_WARNING,
               "Incorrectly guessed progressive frame. Discarding second field\n");
        /* Returning without providing a picture. */
        return RET_OK;
    }

    interlaced = (output->PicInfo.flags & VDEC_FLAG_INTERLACED_SRC) &&
                 trust_interlaced;

    if (!trust_interlaced && (decoder_status.picNumFlags & ~0x40000000) == 0) {
        av_log(avctx, AV_LOG_VERBOSE,
               "Next picture number unknown. Assuming progressive frame.\n");
    }

    av_log(avctx, AV_LOG_VERBOSE, "Interlaced state: %d | trust_interlaced %d\n",
           interlaced, trust_interlaced);

    if (priv->pic.data[0] && !priv->need_second_field)
        avctx->release_buffer(avctx, &priv->pic);

    priv->need_second_field = interlaced && !priv->need_second_field;

    priv->pic.buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_PRESERVE |
                             FF_BUFFER_HINTS_REUSABLE;
    if (!priv->pic.data[0]) {
        if (avctx->get_buffer(avctx, &priv->pic) < 0) {
            av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
            return RET_ERROR;
        }
    }

    bwidth = av_image_get_linesize(avctx->pix_fmt, width, 0);
    if (priv->is_70012) {
        int pStride;

        if (width <= 720)
            pStride = 720;
        else if (width <= 1280)
            pStride = 1280;
        else if (width <= 1080)
            pStride = 1080;
        sStride = av_image_get_linesize(avctx->pix_fmt, pStride, 0);
    } else {
        sStride = bwidth;
    }

    dStride = priv->pic.linesize[0];
    dst     = priv->pic.data[0];

    av_log(priv->avctx, AV_LOG_VERBOSE, "CrystalHD: Copying out frame\n");

    if (interlaced) {
        int dY = 0;
        int sY = 0;

        height /= 2;
        if (bottom_field) {
            av_log(priv->avctx, AV_LOG_VERBOSE, "Interlaced: bottom field\n");
            dY = 1;
        } else {
            av_log(priv->avctx, AV_LOG_VERBOSE, "Interlaced: top field\n");
            dY = 0;
        }

        for (sY = 0; sY < height; dY++, sY++) {
            memcpy(&(dst[dY * dStride]), &(src[sY * sStride]), bwidth);
            dY++;
        }
    } else {
        av_image_copy_plane(dst, dStride, src, sStride, bwidth, height);
    }

    priv->pic.interlaced_frame = interlaced;
    if (interlaced)
        priv->pic.top_field_first = !bottom_first;

    priv->pic.pkt_pts = pkt_pts;

    if (!priv->need_second_field) {
        *data_size       = sizeof(AVFrame);
        *(AVFrame *)data = priv->pic;
    }

    /*
     * Two types of PAFF content have been observed. One form causes the
     * hardware to return a field pair and the other individual fields,
     * even though the input is always individual fields. We must skip
     * copying on the next decode() call to maintain pipeline length in
     * the first case.
     */
    if (!interlaced && (output->PicInfo.flags & VDEC_FLAG_UNKNOWN_SRC) &&
        (pic_type == PICT_TOP_FIELD || pic_type == PICT_BOTTOM_FIELD)) {
        av_log(priv->avctx, AV_LOG_VERBOSE, "Fieldpair from two packets.\n");
        return RET_SKIP_NEXT_COPY;
    }

    /*
     * Testing has shown that in all cases where we don't want to return the
     * full frame immediately, VDEC_FLAG_UNKNOWN_SRC is set.
     */
    return priv->need_second_field &&
           !(output->PicInfo.flags & VDEC_FLAG_UNKNOWN_SRC) ?
           RET_COPY_NEXT_FIELD : RET_OK;
}
コード例 #28
0
ファイル: SaveNalu.cpp プロジェクト: enochi/ffmpeg_test
int main(int argc, char *argv[]) {
    AVFormatContext *pFormatCtx = NULL;
    int             i, videoStream;
    AVCodecContext  *pCodecCtx = NULL;
    AVCodec         *pCodec = NULL;
    AVFrame         *pFrame = NULL;
    AVFrame         *pFrameRGB = NULL;
    AVPacket        packet;
    int             frameFinished;
    int             numBytes;
    uint8_t         *buffer = NULL;

    AVDictionary    *optionsDict = NULL;
    struct SwsContext      *sws_ctx = NULL;

    if(argc < 2) {
        printf("Please provide a movie file\n");
        return -1;
    }
    char out_file[1024]={0};
    sprintf(out_file,"%s.nalu",argv[1]);
    static FILE *fp = fopen(out_file,"wb");
    if(!fp){
        printf("can't open output file:%s\n",out_file);
    }
    // Register all formats and codecs
    av_register_all();

    // Open video file
    if(avformat_open_input(&pFormatCtx, argv[1], NULL, NULL)!=0)
        return -1; // Couldn't open file

    // Retrieve stream information
    if(avformat_find_stream_info(pFormatCtx, NULL)<0)
        return -1; // Couldn't find stream information

    // Dump information about file onto standard error
    av_dump_format(pFormatCtx, 0, argv[1], 0);

    // Find the first video stream
    videoStream=-1;
    for(i=0; i<pFormatCtx->nb_streams; i++)
        if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO) {
            videoStream=i;
            break;
        }
    if(videoStream==-1)
        return -1; // Didn't find a video stream

    // Get a pointer to the codec context for the video stream
    pCodecCtx=pFormatCtx->streams[videoStream]->codec;

    // Find the decoder for the video stream
    pCodec=avcodec_find_decoder(pCodecCtx->codec_id);
    if(pCodec==NULL) {
        fprintf(stderr, "Unsupported codec!\n");
        return -1; // Codec not found
    }
    // Open codec
    if(avcodec_open2(pCodecCtx, pCodec, &optionsDict)<0)
        return -1; // Could not open codec

    // Allocate video frame
    pFrame=av_frame_alloc();
    unsigned char *dummy=NULL;   //输入的指针
    int dummy_len;
    AVBitStreamFilterContext* bsfc =  av_bitstream_filter_init("h264_mp4toannexb");
    av_bitstream_filter_filter(bsfc, pCodecCtx, NULL, &dummy, &dummy_len, NULL, 0, 0);
    fwrite(pCodecCtx->extradata,pCodecCtx->extradata_size,1,fp);
    av_bitstream_filter_close(bsfc);
    free(dummy);

    // Read frames and save first five frames to disk
    i=0;
    while(av_read_frame(pFormatCtx, &packet)>=0) {
        // Is this a packet from the video stream?
        if(packet.stream_index==videoStream) {
            // Decode video frame
            avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished,
                                  &packet);

            // Did we get a video frame?
            if(frameFinished) {
                static bool find_i = false;
                if(!find_i){
                    static unsigned char i_tag[] = {0x65};
                    if(memcmp(i_tag,(packet.data)+4,1) ==0) {
                        find_i = true;
                        printf("find i frame\n");
                    }
                    else {
                        continue;
                    }
                }
                char nal_start[]={0,0,0,1};
                fwrite(nal_start,4,1,fp);
                fwrite(packet.data+4,packet.size-4,1,fp);
                printf("write packet size:%d\n",packet.size-4);
            }
        }
    }
    // Free the packet that was allocated by av_read_frame
    av_free_packet(&packet);
    // Free the RGB image
    av_free(buffer);


    // Free the YUV frame
    av_free(pFrame);

    // Close the codec
    avcodec_close(pCodecCtx);

    // Close the video file
    avformat_close_input(&pFormatCtx);
    fclose(fp);
    return 0;
}
コード例 #29
0
static av_cold int init(AVCodecContext *avctx)
{
	CHDContext* priv;
	BC_STATUS ret;
	BC_INFO_CRYSTAL version;
	BC_INPUT_FORMAT format =
	{
		.FGTEnable   = FALSE,
		.Progressive = TRUE,
		.OptFlags    = 0x80000000 | vdecFrameRate59_94 | 0x40,
		.width       = avctx->width,
		.height      = avctx->height,
	};

	BC_MEDIA_SUBTYPE subtype;

	uint32_t mode = DTS_PLAYBACK_MODE |
	                DTS_LOAD_FILE_PLAY_FW |
	                DTS_SKIP_TX_CHK_CPB |
	                DTS_PLAYBACK_DROP_RPT_MODE |
	                DTS_SINGLE_THREADED_MODE |
	                DTS_DFLT_RESOLUTION(vdecRESOLUTION_1080p23_976);

	av_log(avctx, AV_LOG_VERBOSE, "CrystalHD Init for %s\n",
	       avctx->codec->name);

	avctx->pix_fmt = PIX_FMT_YUYV422;

	/* Initialize the library */
	priv               = avctx->priv_data;
	priv->avctx        = avctx;
	priv->is_nal       = avctx->extradata_size > 0 && *(avctx->extradata) == 1;
	priv->last_picture = -1;
	priv->decode_wait  = BASE_WAIT;

	subtype = id2subtype(priv, avctx->codec->id);
	switch (subtype)
	{
	case BC_MSUBTYPE_AVC1:
	{
		uint8_t *dummy_p;
		int dummy_int;
		AVBitStreamFilterContext *bsfc;

		uint32_t orig_data_size = avctx->extradata_size;
		uint8_t *orig_data = av_malloc(orig_data_size);
		if (!orig_data)
		{
			av_log(avctx, AV_LOG_ERROR,
			       "Failed to allocate copy of extradata\n");
			return AVERROR(ENOMEM);
		}
		memcpy(orig_data, avctx->extradata, orig_data_size);


		bsfc = av_bitstream_filter_init("h264_mp4toannexb");
		if (!bsfc)
		{
			av_log(avctx, AV_LOG_ERROR,
			       "Cannot open the h264_mp4toannexb BSF!\n");
			av_free(orig_data);
			return AVERROR_BSF_NOT_FOUND;
		}
		av_bitstream_filter_filter(bsfc, avctx, NULL, &dummy_p,
		                           &dummy_int, NULL, 0, 0);
		av_bitstream_filter_close(bsfc);

		priv->sps_pps_buf     = avctx->extradata;
		priv->sps_pps_size    = avctx->extradata_size;
		avctx->extradata      = orig_data;
		avctx->extradata_size = orig_data_size;

		format.pMetaData   = priv->sps_pps_buf;
		format.metaDataSz  = priv->sps_pps_size;
		format.startCodeSz = (avctx->extradata[4] & 0x03) + 1;
	}
	break;
	case BC_MSUBTYPE_H264:
		format.startCodeSz = 4;
		// Fall-through
	case BC_MSUBTYPE_VC1:
	case BC_MSUBTYPE_WVC1:
	case BC_MSUBTYPE_WMV3:
	case BC_MSUBTYPE_WMVA:
	case BC_MSUBTYPE_MPEG2VIDEO:
	case BC_MSUBTYPE_DIVX:
	case BC_MSUBTYPE_DIVX311:
		format.pMetaData  = avctx->extradata;
		format.metaDataSz = avctx->extradata_size;
		break;
	default:
		av_log(avctx, AV_LOG_ERROR, "CrystalHD: Unknown codec name\n");
		return AVERROR(EINVAL);
	}
	format.mSubtype = subtype;

	/* Get a decoder instance */
	av_log(avctx, AV_LOG_VERBOSE, "CrystalHD: starting up\n");
	// Initialize the Link and Decoder devices
	ret = DtsDeviceOpen(&priv->dev, mode);
	if (ret != BC_STS_SUCCESS)
	{
		av_log(avctx, AV_LOG_VERBOSE, "CrystalHD: DtsDeviceOpen failed\n");
		goto fail;
	}

	ret = DtsCrystalHDVersion(priv->dev, &version);
	if (ret != BC_STS_SUCCESS)
	{
		av_log(avctx, AV_LOG_VERBOSE,
		       "CrystalHD: DtsCrystalHDVersion failed\n");
		goto fail;
	}
	priv->is_70012 = version.device == 0;

	if (priv->is_70012 &&
	        (subtype == BC_MSUBTYPE_DIVX || subtype == BC_MSUBTYPE_DIVX311))
	{
		av_log(avctx, AV_LOG_VERBOSE,
		       "CrystalHD: BCM70012 doesn't support MPEG4-ASP/DivX/Xvid\n");
		goto fail;
	}

	ret = DtsSetInputFormat(priv->dev, &format);
	if (ret != BC_STS_SUCCESS)
	{
		av_log(avctx, AV_LOG_ERROR, "CrystalHD: SetInputFormat failed\n");
		goto fail;
	}

	ret = DtsOpenDecoder(priv->dev, BC_STREAM_TYPE_ES);
	if (ret != BC_STS_SUCCESS)
	{
		av_log(avctx, AV_LOG_ERROR, "CrystalHD: DtsOpenDecoder failed\n");
		goto fail;
	}

	ret = DtsSetColorSpace(priv->dev, OUTPUT_MODE422_YUY2);
	if (ret != BC_STS_SUCCESS)
	{
		av_log(avctx, AV_LOG_ERROR, "CrystalHD: DtsSetColorSpace failed\n");
		goto fail;
	}
	ret = DtsStartDecoder(priv->dev);
	if (ret != BC_STS_SUCCESS)
	{
		av_log(avctx, AV_LOG_ERROR, "CrystalHD: DtsStartDecoder failed\n");
		goto fail;
	}
	ret = DtsStartCapture(priv->dev);
	if (ret != BC_STS_SUCCESS)
	{
		av_log(avctx, AV_LOG_ERROR, "CrystalHD: DtsStartCapture failed\n");
		goto fail;
	}

	av_log(avctx, AV_LOG_VERBOSE, "CrystalHD: Init complete.\n");

	return 0;

fail:
	uninit(avctx);
	return -1;
}


/*
 * The CrystalHD doesn't report interlaced H.264 content in a way that allows
 * us to distinguish between specific cases that require different handling.
 * So, for now, we have to hard-code the behaviour we want.
 *
 * The default behaviour is to assume MBAFF with input and output fieldpairs.
 *
 * Define ASSUME_PAFF_OVER_MBAFF to treat input as PAFF with separate input
 * and output fields.
 *
 * Define ASSUME_TWO_INPUTS_ONE_OUTPUT to treat input as separate fields but
 * output as a single fieldpair.
 *
 * Define both to mess up your playback.
 */
#define ASSUME_PAFF_OVER_MBAFF 0
#define ASSUME_TWO_INPUTS_ONE_OUTPUT 0
static inline CopyRet copy_frame(AVCodecContext *avctx,
                                 BC_DTS_PROC_OUT *output,
                                 void *data, int *data_size,
                                 uint8_t second_field)
{
	BC_STATUS ret;
	BC_DTS_STATUS decoder_status;
	uint8_t is_paff;
	uint8_t next_frame_same;
	uint8_t interlaced;

	CHDContext *priv = avctx->priv_data;

	uint8_t bottom_field = (output->PicInfo.flags & VDEC_FLAG_BOTTOMFIELD) ==
	                       VDEC_FLAG_BOTTOMFIELD;
	uint8_t bottom_first = !!(output->PicInfo.flags & VDEC_FLAG_BOTTOM_FIRST);

	int width    = output->PicInfo.width;
	int height   = output->PicInfo.height;
	int bwidth;
	uint8_t *src = output->Ybuff;
	int sStride;
	uint8_t *dst;
	int dStride;

	ret = DtsGetDriverStatus(priv->dev, &decoder_status);
	if (ret != BC_STS_SUCCESS)
	{
		av_log(avctx, AV_LOG_ERROR,
		       "CrystalHD: GetDriverStatus failed: %u\n", ret);
		return RET_ERROR;
	}

	is_paff           = ASSUME_PAFF_OVER_MBAFF ||
	                    !(output->PicInfo.flags & VDEC_FLAG_UNKNOWN_SRC);
	next_frame_same   = output->PicInfo.picture_number ==
	                    (decoder_status.picNumFlags & ~0x40000000);
	interlaced        = ((output->PicInfo.flags &
	                      VDEC_FLAG_INTERLACED_SRC) && is_paff) ||
	                    next_frame_same || bottom_field || second_field;

	av_log(avctx, AV_LOG_VERBOSE, "CrystalHD: next_frame_same: %u | %u | %u\n",
	       next_frame_same, output->PicInfo.picture_number,
	       decoder_status.picNumFlags & ~0x40000000);

	if (priv->pic.data[0] && !priv->need_second_field)
		avctx->release_buffer(avctx, &priv->pic);

	priv->need_second_field = interlaced && !priv->need_second_field;

	priv->pic.buffer_hints = FF_BUFFER_HINTS_VALID | FF_BUFFER_HINTS_PRESERVE |
	                         FF_BUFFER_HINTS_REUSABLE;
	if (!priv->pic.data[0])
	{
		if (avctx->get_buffer(avctx, &priv->pic) < 0)
		{
			av_log(avctx, AV_LOG_ERROR, "get_buffer() failed\n");
			return RET_ERROR;
		}
	}

	bwidth = av_image_get_linesize(avctx->pix_fmt, width, 0);
	if (priv->is_70012)
	{
		int pStride;

		if (width <= 720)
			pStride = 720;
		else if (width <= 1280)
			pStride = 1280;
		else if (width <= 1080)
			pStride = 1080;
		sStride = av_image_get_linesize(avctx->pix_fmt, pStride, 0);
	}
	else
	{
		sStride = bwidth;
	}

	dStride = priv->pic.linesize[0];
	dst     = priv->pic.data[0];

	av_log(priv->avctx, AV_LOG_VERBOSE, "CrystalHD: Copying out frame\n");

	if (interlaced)
	{
		int dY = 0;
		int sY = 0;

		height /= 2;
		if (bottom_field)
		{
			av_log(priv->avctx, AV_LOG_VERBOSE, "Interlaced: bottom field\n");
			dY = 1;
		}
		else
		{
			av_log(priv->avctx, AV_LOG_VERBOSE, "Interlaced: top field\n");
			dY = 0;
		}

		for (sY = 0; sY < height; dY++, sY++)
		{
			memcpy(&(dst[dY * dStride]), &(src[sY * sStride]), bwidth);
			dY++;
		}
	}
	else
	{
		av_image_copy_plane(dst, dStride, src, sStride, bwidth, height);
	}

	priv->pic.interlaced_frame = interlaced;
	if (interlaced)
		priv->pic.top_field_first = !bottom_first;

	if (output->PicInfo.timeStamp != 0)
	{
		priv->pic.pkt_pts = opaque_list_pop(priv, output->PicInfo.timeStamp);
		av_log(avctx, AV_LOG_VERBOSE, "output \"pts\": %"PRIu64"\n",
		       priv->pic.pkt_pts);
	}

	if (!priv->need_second_field)
	{
		*data_size       = sizeof(AVFrame);
		*(AVFrame *)data = priv->pic;
	}

	if (ASSUME_TWO_INPUTS_ONE_OUTPUT &&
	        output->PicInfo.flags & VDEC_FLAG_UNKNOWN_SRC)
	{
		av_log(priv->avctx, AV_LOG_VERBOSE, "Fieldpair from two packets.\n");
		return RET_SKIP_NEXT_COPY;
	}

	return RET_OK;
}
コード例 #30
0
ファイル: muxer.cpp プロジェクト: jason860306/ffmpeg
int _tmain(int argc, _TCHAR* argv[])
{
	if (argc != 4)
	{
		printf("Usage: %s in_fname_v in_fname_a out_fname\n");
		return -1;
	}
	AVOutputFormat *p_ofmt = NULL;
	///< Input AVFormatContext and Output AVFormatContext
	AVFormatContext *p_ifmt_ctx_v = NULL, *p_ifmt_ctx_a = NULL, *p_ofmt_ctx = NULL;
	AVPacket pkt;

	int ret, i;
	int video_idx_v = -1, video_idx_out = -1;
	int audio_idx_a = -1, audio_idx_out = -1;
	int frame_idx = 0;
	int64_t cur_pts_v = 0, cur_pts_a = 0;

	const char *p_in_fname_v = argv[1], *p_in_fname_a = argv[2], *p_out_fname = argv[3];

	av_register_all();

	///< Input
	if ((ret = avformat_open_input(&p_ifmt_ctx_v, p_in_fname_v, NULL, NULL)) < 0)
	{
		printf("Could not open input file(: %s).\n", p_in_fname_v);
		goto end;
	}
	if ((ret = avformat_find_stream_info(p_ifmt_ctx_v, NULL)) < 0)
	{
		printf("Failed to retrieve input stream information.\n");
		goto end;
	}

	if ((ret = avformat_open_input(&p_ifmt_ctx_a, p_in_fname_a, NULL, NULL)) < 0)
	{
		printf("Could not open input file.\n");
		goto end;
	}
	if ((ret = avformat_find_stream_info(p_ifmt_ctx_a, NULL)) < 0)
	{
		printf("Failed to retrieve input stream information.\n");
		goto end;
	}
	printf("=========Input Information=========\n");
	av_dump_format(p_ifmt_ctx_v, 0, p_in_fname_v, 0);
	av_dump_format(p_ifmt_ctx_a, 0, p_in_fname_a, 0);
	printf("===================================\n");

	///< Output
	avformat_alloc_output_context2(&p_ofmt_ctx, NULL, NULL, p_out_fname);
	if (NULL == p_ofmt_ctx)
	{
		printf("Could not create output context.\n");
		ret = AVERROR_UNKNOWN;
		goto end;
	}
	p_ofmt = p_ofmt_ctx->oformat;

	for (i = 0; i < (int)p_ifmt_ctx_v->nb_streams; ++i)
	{
		///< Create output AVStream according to input AVStream
		if (AVMEDIA_TYPE_VIDEO == p_ifmt_ctx_v->streams[i]->codec->codec_type)
		{
			AVStream *p_in_strm = p_ifmt_ctx_v->streams[i];
			AVStream *p_out_strm = avformat_new_stream(p_ofmt_ctx,
				p_in_strm->codec->codec);
			video_idx_v = i;
			if (NULL == p_out_strm)
			{
				printf("Failed allocating output stream.\n");
				ret = AVERROR_UNKNOWN;
				goto end;
			}
			video_idx_out = p_out_strm->index;

			///< Copy the settings of AVCodecContext
			if (avcodec_copy_context(p_out_strm->codec, p_in_strm->codec) < 0)
			{
				printf("Failed to copy context from input to output"
					" stream codec context.\n");
				goto end;
			}
			p_out_strm->codec->codec_tag = 0;
			if (p_ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
			{
				p_out_strm->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;
			}
			break;
		}
	}

	for (i = 0; i < (int)p_ifmt_ctx_a->nb_streams; ++i)
	{
		///< Create output AVStream according to input AVStream
		if (AVMEDIA_TYPE_AUDIO == p_ifmt_ctx_a->streams[i]->codec->codec_type)
		{
			AVStream *p_in_strm = p_ifmt_ctx_a->streams[i];
			AVStream *p_out_strm = avformat_new_stream(p_ofmt_ctx,
				p_in_strm->codec->codec);
			audio_idx_a = i;
			if (NULL == p_out_strm)
			{
				printf("Failed allocating output stream.\n");
				ret = AVERROR_UNKNOWN;
				goto end;
			}
			audio_idx_out = p_out_strm->index;

			///< Copy the settings of AVCodecContext
			if (avcodec_copy_context(p_out_strm->codec, p_in_strm->codec) < 0)
			{
				printf("Failed to copy context from intput to "
					"output stream codec context.\n");
				goto end;
			}
			p_out_strm->codec->codec_tag = 0;
			if (p_ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
			{
				p_out_strm->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;
			}
			break;
		}
	}

	printf("=========Output Information=========\n");
	av_dump_format(p_ofmt_ctx, 0, p_out_fname, 1);
	printf("====================================\n");

	///< Open output file
	if (!(p_ofmt->flags & AVFMT_NOFILE))
	{
		if (avio_open(&p_ofmt_ctx->pb, p_out_fname, AVIO_FLAG_WRITE) < 0)
		{
			printf("Could not open output file '%s'", p_out_fname);
			goto end;
		}
	}
	///< Write file header
	if ((ret = avformat_write_header(p_ofmt_ctx, NULL)) < 0)
	{
		printf("Error occurred when opening output file.\n");
		goto end;
	}

	///< FIX
#if USE_H264BSF
	AVBitStreamFilterContext *p_h264bsfc = av_bitstream_filter_init("h264_mp4toannexb");
#endif

#if USE_AACBSF
	AVBitStreamFilterContext *p_aacbsfc = av_bitstream_filter_init("aac_adtstoasc");
#endif

	while (true)
	{
		AVFormatContext *p_ifmt_ctx;
		int strm_idx = 0;
		AVStream *p_in_strm, *p_out_strm;

		///< Get an AVPacket
		if (av_compare_ts(cur_pts_v, p_ifmt_ctx_v->streams[video_idx_v]->time_base,
			cur_pts_a, p_ifmt_ctx_a->streams[audio_idx_a]->time_base) <= 0)
		{
			p_ifmt_ctx = p_ifmt_ctx_v;
			strm_idx = video_idx_out;

			if (av_read_frame(p_ifmt_ctx, &pkt) >= 0)
			{
				do 
				{
					p_in_strm = p_ifmt_ctx->streams[pkt.stream_index];
					p_out_strm = p_ofmt_ctx->streams[strm_idx];

					if (pkt.stream_index == video_idx_v)
					{
						///< FIX: No PTS (Example: Raw H.264)
						///< Simple Write PTS
						if (pkt.pts == AV_NOPTS_VALUE)
						{
							///< Write PTS
							AVRational time_base1 = p_in_strm->time_base;
							///< Duration between 2 frames (us)
							int64_t calc_duration = (int64_t)((double)AV_TIME_BASE /
								av_q2d(p_in_strm->r_frame_rate));
							///< Parameters
							pkt.pts = (int64_t)((double)(frame_idx * calc_duration) /
								(double)(av_q2d(time_base1) * AV_TIME_BASE));
							pkt.dts = pkt.pts;
							pkt.duration = (int)((double)calc_duration /
								(double)(av_q2d(time_base1) * AV_TIME_BASE));
							++frame_idx;
						}
						cur_pts_v = pkt.pts;
						break;
					}
				} while (av_read_frame(p_ifmt_ctx, &pkt));
			}
			else
			{
				break;
			}
		}
		else
		{
			p_ifmt_ctx = p_ifmt_ctx_a;
			strm_idx = audio_idx_out;
			if (av_read_frame(p_ifmt_ctx, &pkt) >= 0)
			{
				do 
				{
					p_in_strm = p_ifmt_ctx->streams[pkt.stream_index];
					p_out_strm = p_ofmt_ctx->streams[strm_idx];

					if (pkt.stream_index == audio_idx_a)
					{
						///< FIX: No PTS
						///< Simple Write PTS
						if (pkt.pts == AV_NOPTS_VALUE)
						{
							///< Write PTS
							AVRational time_base1 = p_in_strm->time_base;
							///< Duration between 2 frames (us)
							int64_t calc_duration = (int64_t)((double)AV_TIME_BASE /
								av_q2d(p_in_strm->r_frame_rate));
							///< Parameters
							pkt.dts = (int64_t)((double)(frame_idx * calc_duration) /
								(double)(av_q2d(time_base1) * AV_TIME_BASE));
							pkt.dts = pkt.pts;
							pkt.duration = (int)((double)calc_duration /
								(double)(av_q2d(time_base1)* AV_TIME_BASE));
							++frame_idx;
						}
						cur_pts_a = pkt.pts;
						break;
					}
				} while (av_read_frame(p_ifmt_ctx, &pkt));
			}
			else
			{
				break;
			}
		}

		///< FIX: Bitstream Filter
#if USE_H264BSF
		av_bitstream_filter_filter(p_h264bsfc, p_in_strm->codec, NULL,
			&pkt.data, &pkt.size, pkt.data, pkt.size, 0);
#endif

#if USE_AACBSF
		av_bitstream_filter_filter(p_aacbsfc, p_out_strm->codec, NULL,
			&pkt.data, &pkt.size, pkt.data, pkt.size, 0);
#endif

		///< Convert PTS/DTS
		pkt.pts = av_rescale_q_rnd(pkt.pts, p_in_strm->time_base,
			p_out_strm->time_base, (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
		pkt.dts = av_rescale_q_rnd(pkt.dts, p_in_strm->time_base,
			p_out_strm->time_base, (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
		pkt.duration = (int)av_rescale_q(pkt.duration, p_in_strm->time_base, p_out_strm->time_base);
		pkt.pos = -1;
		pkt.stream_index = strm_idx;

		printf("Write 1 Packet. size: %5d\tpts: %11d\n", pkt.size, pkt.pts);
		///< Write
		if (av_interleaved_write_frame(p_ofmt_ctx, &pkt) < 0)
		{
			printf("Error muxing packet.\n");
			break;
		}
		av_free_packet(&pkt);
	}

	///< Write file trailer
	av_write_trailer(p_ofmt_ctx);

#if USE_H264BSF
	av_bitstream_filter_close(p_h264bsfc);
#endif

#if USE_AACBSF
	av_bitstream_filter_close(p_aacbsfc);
#endif

end:
	avformat_close_input(&p_ifmt_ctx_v);
	avformat_close_input(&p_ifmt_ctx_a);

	///< close output
	if (p_ofmt_ctx && !(p_ofmt->flags & AVFMT_NOFILE))
	{
		avio_close(p_ofmt_ctx->pb);
	}
	avformat_free_context(p_ofmt_ctx);
	if (ret < 0 && ret != AVERROR_EOF)
	{
		printf("Error occurred.\n");
		return -1;
	}

	return 0;
}