static int hls_read_packet(AVFormatContext *s, AVPacket *pkt)
{
    HLSContext *c = s->priv_data;
    int ret, i, minvariant = -1;

    if (c->first_packet) {
        recheck_discard_flags(s, 1);
        c->first_packet = 0;
    }

start:
    c->end_of_segment = 0;
    for (i = 0; i < c->n_variants; i++) {
        struct variant *var = c->variants[i];
        /* Make sure we've got one buffered packet from each open variant
         * stream */
        if (var->needed && !var->pkt.data) {
            while (1) {
                int64_t ts_diff;
                AVStream *st;
                ret = av_read_frame(var->ctx, &var->pkt);
                if (ret < 0) {
                    if (!url_feof(&var->pb) && ret != AVERROR_EOF)
                        return ret;
                    reset_packet(&var->pkt);
                    break;
                } else {
                    if (c->first_timestamp == AV_NOPTS_VALUE)
                        c->first_timestamp = var->pkt.dts;
                }

                if (c->seek_timestamp == AV_NOPTS_VALUE)
                    break;

                if (var->pkt.dts == AV_NOPTS_VALUE) {
                    c->seek_timestamp = AV_NOPTS_VALUE;
                    break;
                }

                st = var->ctx->streams[var->pkt.stream_index];
                ts_diff = av_rescale_rnd(var->pkt.dts, AV_TIME_BASE,
                                         st->time_base.den, AV_ROUND_DOWN) -
                          c->seek_timestamp;
                if (ts_diff >= 0 && (c->seek_flags  & AVSEEK_FLAG_ANY ||
                                     var->pkt.flags & AV_PKT_FLAG_KEY)) {
                    c->seek_timestamp = AV_NOPTS_VALUE;
                    break;
                }
            }
        }
        /* Check if this stream has the packet with the lowest dts */
        if (var->pkt.data) {
            if(minvariant < 0) {
                minvariant = i;
            } else {
                struct variant *minvar = c->variants[minvariant];
                int64_t dts    =    var->pkt.dts;
                int64_t mindts = minvar->pkt.dts;
                AVStream *st   =    var->ctx->streams[   var->pkt.stream_index];
                AVStream *minst= minvar->ctx->streams[minvar->pkt.stream_index];

                if(   st->start_time != AV_NOPTS_VALUE)    dts -=    st->start_time;
                if(minst->start_time != AV_NOPTS_VALUE) mindts -= minst->start_time;

                if (av_compare_ts(dts, st->time_base, mindts, minst->time_base) < 0)
                    minvariant = i;
            }
        }
    }
    if (c->end_of_segment) {
        if (recheck_discard_flags(s, 0))
            goto start;
    }
    /* If we got a packet, return it */
    if (minvariant >= 0) {
        *pkt = c->variants[minvariant]->pkt;
        pkt->stream_index += c->variants[minvariant]->stream_offset;
        reset_packet(&c->variants[minvariant]->pkt);
        return 0;
    }
    return AVERROR_EOF;
}
Ejemplo n.º 2
0
int _tmain(int argc, _TCHAR* argv[])
{
	if (argc != 4)
	{
		printf("Usage: %s in_fname_v in_fname_a out_fname\n");
		return -1;
	}
	AVOutputFormat *p_ofmt = NULL;
	///< Input AVFormatContext and Output AVFormatContext
	AVFormatContext *p_ifmt_ctx_v = NULL, *p_ifmt_ctx_a = NULL, *p_ofmt_ctx = NULL;
	AVPacket pkt;

	int ret, i;
	int video_idx_v = -1, video_idx_out = -1;
	int audio_idx_a = -1, audio_idx_out = -1;
	int frame_idx = 0;
	int64_t cur_pts_v = 0, cur_pts_a = 0;

	const char *p_in_fname_v = argv[1], *p_in_fname_a = argv[2], *p_out_fname = argv[3];

	av_register_all();

	///< Input
	if ((ret = avformat_open_input(&p_ifmt_ctx_v, p_in_fname_v, NULL, NULL)) < 0)
	{
		printf("Could not open input file(: %s).\n", p_in_fname_v);
		goto end;
	}
	if ((ret = avformat_find_stream_info(p_ifmt_ctx_v, NULL)) < 0)
	{
		printf("Failed to retrieve input stream information.\n");
		goto end;
	}

	if ((ret = avformat_open_input(&p_ifmt_ctx_a, p_in_fname_a, NULL, NULL)) < 0)
	{
		printf("Could not open input file.\n");
		goto end;
	}
	if ((ret = avformat_find_stream_info(p_ifmt_ctx_a, NULL)) < 0)
	{
		printf("Failed to retrieve input stream information.\n");
		goto end;
	}
	printf("=========Input Information=========\n");
	av_dump_format(p_ifmt_ctx_v, 0, p_in_fname_v, 0);
	av_dump_format(p_ifmt_ctx_a, 0, p_in_fname_a, 0);
	printf("===================================\n");

	///< Output
	avformat_alloc_output_context2(&p_ofmt_ctx, NULL, NULL, p_out_fname);
	if (NULL == p_ofmt_ctx)
	{
		printf("Could not create output context.\n");
		ret = AVERROR_UNKNOWN;
		goto end;
	}
	p_ofmt = p_ofmt_ctx->oformat;

	for (i = 0; i < (int)p_ifmt_ctx_v->nb_streams; ++i)
	{
		///< Create output AVStream according to input AVStream
		if (AVMEDIA_TYPE_VIDEO == p_ifmt_ctx_v->streams[i]->codec->codec_type)
		{
			AVStream *p_in_strm = p_ifmt_ctx_v->streams[i];
			AVStream *p_out_strm = avformat_new_stream(p_ofmt_ctx,
				p_in_strm->codec->codec);
			video_idx_v = i;
			if (NULL == p_out_strm)
			{
				printf("Failed allocating output stream.\n");
				ret = AVERROR_UNKNOWN;
				goto end;
			}
			video_idx_out = p_out_strm->index;

			///< Copy the settings of AVCodecContext
			if (avcodec_copy_context(p_out_strm->codec, p_in_strm->codec) < 0)
			{
				printf("Failed to copy context from input to output"
					" stream codec context.\n");
				goto end;
			}
			p_out_strm->codec->codec_tag = 0;
			if (p_ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
			{
				p_out_strm->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;
			}
			break;
		}
	}

	for (i = 0; i < (int)p_ifmt_ctx_a->nb_streams; ++i)
	{
		///< Create output AVStream according to input AVStream
		if (AVMEDIA_TYPE_AUDIO == p_ifmt_ctx_a->streams[i]->codec->codec_type)
		{
			AVStream *p_in_strm = p_ifmt_ctx_a->streams[i];
			AVStream *p_out_strm = avformat_new_stream(p_ofmt_ctx,
				p_in_strm->codec->codec);
			audio_idx_a = i;
			if (NULL == p_out_strm)
			{
				printf("Failed allocating output stream.\n");
				ret = AVERROR_UNKNOWN;
				goto end;
			}
			audio_idx_out = p_out_strm->index;

			///< Copy the settings of AVCodecContext
			if (avcodec_copy_context(p_out_strm->codec, p_in_strm->codec) < 0)
			{
				printf("Failed to copy context from intput to "
					"output stream codec context.\n");
				goto end;
			}
			p_out_strm->codec->codec_tag = 0;
			if (p_ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
			{
				p_out_strm->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;
			}
			break;
		}
	}

	printf("=========Output Information=========\n");
	av_dump_format(p_ofmt_ctx, 0, p_out_fname, 1);
	printf("====================================\n");

	///< Open output file
	if (!(p_ofmt->flags & AVFMT_NOFILE))
	{
		if (avio_open(&p_ofmt_ctx->pb, p_out_fname, AVIO_FLAG_WRITE) < 0)
		{
			printf("Could not open output file '%s'", p_out_fname);
			goto end;
		}
	}
	///< Write file header
	if ((ret = avformat_write_header(p_ofmt_ctx, NULL)) < 0)
	{
		printf("Error occurred when opening output file.\n");
		goto end;
	}

	///< FIX
#if USE_H264BSF
	AVBitStreamFilterContext *p_h264bsfc = av_bitstream_filter_init("h264_mp4toannexb");
#endif

#if USE_AACBSF
	AVBitStreamFilterContext *p_aacbsfc = av_bitstream_filter_init("aac_adtstoasc");
#endif

	while (true)
	{
		AVFormatContext *p_ifmt_ctx;
		int strm_idx = 0;
		AVStream *p_in_strm, *p_out_strm;

		///< Get an AVPacket
		if (av_compare_ts(cur_pts_v, p_ifmt_ctx_v->streams[video_idx_v]->time_base,
			cur_pts_a, p_ifmt_ctx_a->streams[audio_idx_a]->time_base) <= 0)
		{
			p_ifmt_ctx = p_ifmt_ctx_v;
			strm_idx = video_idx_out;

			if (av_read_frame(p_ifmt_ctx, &pkt) >= 0)
			{
				do 
				{
					p_in_strm = p_ifmt_ctx->streams[pkt.stream_index];
					p_out_strm = p_ofmt_ctx->streams[strm_idx];

					if (pkt.stream_index == video_idx_v)
					{
						///< FIX: No PTS (Example: Raw H.264)
						///< Simple Write PTS
						if (pkt.pts == AV_NOPTS_VALUE)
						{
							///< Write PTS
							AVRational time_base1 = p_in_strm->time_base;
							///< Duration between 2 frames (us)
							int64_t calc_duration = (int64_t)((double)AV_TIME_BASE /
								av_q2d(p_in_strm->r_frame_rate));
							///< Parameters
							pkt.pts = (int64_t)((double)(frame_idx * calc_duration) /
								(double)(av_q2d(time_base1) * AV_TIME_BASE));
							pkt.dts = pkt.pts;
							pkt.duration = (int)((double)calc_duration /
								(double)(av_q2d(time_base1) * AV_TIME_BASE));
							++frame_idx;
						}
						cur_pts_v = pkt.pts;
						break;
					}
				} while (av_read_frame(p_ifmt_ctx, &pkt));
			}
			else
			{
				break;
			}
		}
		else
		{
			p_ifmt_ctx = p_ifmt_ctx_a;
			strm_idx = audio_idx_out;
			if (av_read_frame(p_ifmt_ctx, &pkt) >= 0)
			{
				do 
				{
					p_in_strm = p_ifmt_ctx->streams[pkt.stream_index];
					p_out_strm = p_ofmt_ctx->streams[strm_idx];

					if (pkt.stream_index == audio_idx_a)
					{
						///< FIX: No PTS
						///< Simple Write PTS
						if (pkt.pts == AV_NOPTS_VALUE)
						{
							///< Write PTS
							AVRational time_base1 = p_in_strm->time_base;
							///< Duration between 2 frames (us)
							int64_t calc_duration = (int64_t)((double)AV_TIME_BASE /
								av_q2d(p_in_strm->r_frame_rate));
							///< Parameters
							pkt.dts = (int64_t)((double)(frame_idx * calc_duration) /
								(double)(av_q2d(time_base1) * AV_TIME_BASE));
							pkt.dts = pkt.pts;
							pkt.duration = (int)((double)calc_duration /
								(double)(av_q2d(time_base1)* AV_TIME_BASE));
							++frame_idx;
						}
						cur_pts_a = pkt.pts;
						break;
					}
				} while (av_read_frame(p_ifmt_ctx, &pkt));
			}
			else
			{
				break;
			}
		}

		///< FIX: Bitstream Filter
#if USE_H264BSF
		av_bitstream_filter_filter(p_h264bsfc, p_in_strm->codec, NULL,
			&pkt.data, &pkt.size, pkt.data, pkt.size, 0);
#endif

#if USE_AACBSF
		av_bitstream_filter_filter(p_aacbsfc, p_out_strm->codec, NULL,
			&pkt.data, &pkt.size, pkt.data, pkt.size, 0);
#endif

		///< Convert PTS/DTS
		pkt.pts = av_rescale_q_rnd(pkt.pts, p_in_strm->time_base,
			p_out_strm->time_base, (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
		pkt.dts = av_rescale_q_rnd(pkt.dts, p_in_strm->time_base,
			p_out_strm->time_base, (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
		pkt.duration = (int)av_rescale_q(pkt.duration, p_in_strm->time_base, p_out_strm->time_base);
		pkt.pos = -1;
		pkt.stream_index = strm_idx;

		printf("Write 1 Packet. size: %5d\tpts: %11d\n", pkt.size, pkt.pts);
		///< Write
		if (av_interleaved_write_frame(p_ofmt_ctx, &pkt) < 0)
		{
			printf("Error muxing packet.\n");
			break;
		}
		av_free_packet(&pkt);
	}

	///< Write file trailer
	av_write_trailer(p_ofmt_ctx);

#if USE_H264BSF
	av_bitstream_filter_close(p_h264bsfc);
#endif

#if USE_AACBSF
	av_bitstream_filter_close(p_aacbsfc);
#endif

end:
	avformat_close_input(&p_ifmt_ctx_v);
	avformat_close_input(&p_ifmt_ctx_a);

	///< close output
	if (p_ofmt_ctx && !(p_ofmt->flags & AVFMT_NOFILE))
	{
		avio_close(p_ofmt_ctx->pb);
	}
	avformat_free_context(p_ofmt_ctx);
	if (ret < 0 && ret != AVERROR_EOF)
	{
		printf("Error occurred.\n");
		return -1;
	}

	return 0;
}
Ejemplo n.º 3
0
static int wav_read_packet(AVFormatContext *s, AVPacket *pkt)
{
    int ret, size;
    int64_t left;
    AVStream *st;
    WAVDemuxContext *wav = s->priv_data;

    if (CONFIG_SPDIF_DEMUXER && wav->spdif == 0 &&
        s->streams[0]->codec->codec_tag == 1) {
        enum AVCodecID codec;
        ret = ff_spdif_probe(s->pb->buffer, s->pb->buf_end - s->pb->buffer,
                             &codec);
        if (ret > AVPROBE_SCORE_EXTENSION) {
            s->streams[0]->codec->codec_id = codec;
            wav->spdif = 1;
        } else {
            wav->spdif = -1;
        }
    }
    if (CONFIG_SPDIF_DEMUXER && wav->spdif == 1)
        return ff_spdif_read_packet(s, pkt);

    if (wav->smv_data_ofs > 0) {
        int64_t audio_dts, video_dts;
smv_retry:
        audio_dts = s->streams[0]->cur_dts;
        video_dts = s->streams[1]->cur_dts;

        if (audio_dts != AV_NOPTS_VALUE && video_dts != AV_NOPTS_VALUE) {
            /*We always return a video frame first to get the pixel format first*/
            wav->smv_last_stream = wav->smv_given_first ?
                av_compare_ts(video_dts, s->streams[1]->time_base,
                              audio_dts, s->streams[0]->time_base) > 0 : 0;
            wav->smv_given_first = 1;
        }
        wav->smv_last_stream = !wav->smv_last_stream;
        wav->smv_last_stream |= wav->audio_eof;
        wav->smv_last_stream &= !wav->smv_eof;
        if (wav->smv_last_stream) {
            uint64_t old_pos = avio_tell(s->pb);
            uint64_t new_pos = wav->smv_data_ofs +
                wav->smv_block * wav->smv_block_size;
            if (avio_seek(s->pb, new_pos, SEEK_SET) < 0) {
                ret = AVERROR_EOF;
                goto smv_out;
            }
            size = avio_rl24(s->pb);
            ret  = av_get_packet(s->pb, pkt, size);
            if (ret < 0)
                goto smv_out;
            pkt->pos -= 3;
            pkt->pts = wav->smv_block * wav->smv_frames_per_jpeg + wav->smv_cur_pt;
            wav->smv_cur_pt++;
            if (wav->smv_frames_per_jpeg > 0)
                wav->smv_cur_pt %= wav->smv_frames_per_jpeg;
            if (!wav->smv_cur_pt)
                wav->smv_block++;

            pkt->stream_index = 1;
smv_out:
            avio_seek(s->pb, old_pos, SEEK_SET);
            if (ret == AVERROR_EOF) {
                wav->smv_eof = 1;
                goto smv_retry;
            }
            return ret;
        }
    }

    st = s->streams[0];

    left = wav->data_end - avio_tell(s->pb);
    if (wav->ignore_length)
        left = INT_MAX;
    if (left <= 0) {
        if (CONFIG_W64_DEMUXER && wav->w64)
            left = find_guid(s->pb, ff_w64_guid_data) - 24;
        else
            left = find_tag(s->pb, MKTAG('d', 'a', 't', 'a'));
        if (left < 0) {
            wav->audio_eof = 1;
            if (wav->smv_data_ofs > 0 && !wav->smv_eof)
                goto smv_retry;
            return AVERROR_EOF;
        }
        wav->data_end = avio_tell(s->pb) + left;
    }

    size = MAX_SIZE;
    if (st->codec->block_align > 1) {
        if (size < st->codec->block_align)
            size = st->codec->block_align;
        size = (size / st->codec->block_align) * st->codec->block_align;
    }
    size = FFMIN(size, left);
    ret  = av_get_packet(s->pb, pkt, size);
    if (ret < 0)
        return ret;
    pkt->stream_index = 0;

    return ret;
}
Ejemplo n.º 4
0
int main(int argc, char **argv)
{
    OutputStream video_st = { 0 }, audio_st = { 0 };
    const char *filename;
    AVOutputFormat *fmt;
    AVFormatContext *oc;
    AVCodec *audio_codec, *video_codec;
    int ret;
    int have_video = 0, have_audio = 0;
    int encode_video = 0, encode_audio = 0;
    AVDictionary *opt = NULL;

    /* Initialize libavcodec, and register all codecs and formats. */
    av_register_all();

    if (argc < 2) {
        printf("usage: %s output_file\n"
               "API example program to output a media file with libavformat.\n"
               "This program generates a synthetic audio and video stream, encodes and\n"
               "muxes them into a file named output_file.\n"
               "The output format is automatically guessed according to the file extension.\n"
               "Raw images can also be output by using '%%d' in the filename.\n"
               "\n", argv[0]);
        return 1;
    }

    filename = argv[1];
    if (argc > 3 && !strcmp(argv[2], "-flags")) {
        av_dict_set(&opt, argv[2]+1, argv[3], 0);
    }

    /* allocate the output media context */
    avformat_alloc_output_context2(&oc, NULL, NULL, filename);
    if (!oc) {
        printf("Could not deduce output format from file extension: using MPEG.\n");
        avformat_alloc_output_context2(&oc, NULL, "mpeg", filename);
    }
    if (!oc)
        return 1;

    fmt = oc->oformat;

    /* Add the audio and video streams using the default format codecs
     * and initialize the codecs. */
    if (fmt->video_codec != AV_CODEC_ID_NONE) {
        add_stream(&video_st, oc, &video_codec, fmt->video_codec);
        have_video = 1;
        encode_video = 1;
    }
    if (fmt->audio_codec != AV_CODEC_ID_NONE) {
        add_stream(&audio_st, oc, &audio_codec, fmt->audio_codec);
        have_audio = 1;
        encode_audio = 1;
    }

    /* Now that all the parameters are set, we can open the audio and
     * video codecs and allocate the necessary encode buffers. */
    if (have_video)
        open_video(oc, video_codec, &video_st, opt);

    if (have_audio)
        open_audio(oc, audio_codec, &audio_st, opt);

    av_dump_format(oc, 0, filename, 1);

    /* open the output file, if needed */
    if (!(fmt->flags & AVFMT_NOFILE)) {
        ret = avio_open(&oc->pb, filename, AVIO_FLAG_WRITE);
        if (ret < 0) {
            fprintf(stderr, "Could not open '%s': %s\n", filename,
                    av_err2str(ret));
            return 1;
        }
    }

    /* Write the stream header, if any. */
    ret = avformat_write_header(oc, &opt);
    if (ret < 0) {
        fprintf(stderr, "Error occurred when opening output file: %s\n",
                av_err2str(ret));
        return 1;
    }

    while (encode_video || encode_audio) {
        /* select the stream to encode */
        if (encode_video &&
                (!encode_audio || av_compare_ts(video_st.next_pts, video_st.st->codec->time_base,
                                                audio_st.next_pts, audio_st.st->codec->time_base) <= 0)) {
            encode_video = !write_video_frame(oc, &video_st);
        } else {
            encode_audio = !write_audio_frame(oc, &audio_st);
        }
    }

    /* Write the trailer, if any. The trailer must be written before you
     * close the CodecContexts open when you wrote the header; otherwise
     * av_write_trailer() may try to use memory that was freed on
     * av_codec_close(). */
    av_write_trailer(oc);

    /* Close each codec. */
    if (have_video)
        close_stream(oc, &video_st);
    if (have_audio)
        close_stream(oc, &audio_st);

    if (!(fmt->flags & AVFMT_NOFILE))
        /* Close the output file. */
        avio_close(oc->pb);

    /* free the stream */
    avformat_free_context(oc);

    return 0;
}
int main(int argc, char* argv[])
{
    AVFormatContext *ifmt_ctx = NULL;
    AVFormatContext *ifmt_ctx_a = NULL;
    AVFormatContext *ofmt_ctx;
    AVInputFormat* ifmt;
    AVStream* video_st;
    AVStream* audio_st;
    AVCodecContext* pCodecCtx;
    AVCodecContext* pCodecCtx_a;
    AVCodec* pCodec;
    AVCodec* pCodec_a;
    AVPacket *dec_pkt, enc_pkt;
    AVPacket *dec_pkt_a, enc_pkt_a;
    AVFrame *pframe, *pFrameYUV;
    struct SwsContext *img_convert_ctx;
    struct SwrContext *aud_convert_ctx;

    char capture_name[80] = { 0 };
	char device_name[80] = { 0 };
	char device_name_a[80] = { 0 };
    int framecnt = 0;
	int nb_samples = 0;
    int videoindex;
    int audioindex;
    int i;
    int ret;
    HANDLE  hThread;

	const char* out_path = "rtmp://localhost/live/livestream";
    int dec_got_frame, enc_got_frame;
	int dec_got_frame_a, enc_got_frame_a;

	int aud_next_pts = 0;
	int vid_next_pts = 0;
	int encode_video = 1, encode_audio = 1;

	AVRational time_base_q = { 1, AV_TIME_BASE };

    av_register_all();
    //Register Device
    avdevice_register_all();
    avformat_network_init();
#if USEFILTER
    //Register Filter
    avfilter_register_all();
    buffersrc = avfilter_get_by_name("buffer");
    buffersink = avfilter_get_by_name("buffersink");
#endif

    //Show Dshow Device  
    show_dshow_device();

    printf("\nChoose video capture device: ");
    if (gets(capture_name) == 0)
    {
		printf("Error in gets()\n");
		return -1;
    }
    sprintf(device_name, "video=%s", capture_name);

	printf("\nChoose audio capture device: ");
	if (gets(capture_name) == 0)
	{
		printf("Error in gets()\n");
		return -1;
	}
	sprintf(device_name_a, "audio=%s", capture_name);

    //wchar_t *cam = L"video=Integrated Camera";
	//wchar_t *cam = L"video=YY伴侣";
	//char *device_name_utf8 = dup_wchar_to_utf8(cam);
    //wchar_t *cam_a = L"audio=麦克风阵列 (Realtek High Definition Audio)";
	//char *device_name_utf8_a = dup_wchar_to_utf8(cam_a);

	ifmt = av_find_input_format("dshow");
    // Set device params
    AVDictionary *device_param = 0;
	//if not setting rtbufsize, error messages will be shown in cmd, but you can still watch or record the stream correctly in most time
	//setting rtbufsize will erase those error messages, however, larger rtbufsize will bring latency
    //av_dict_set(&device_param, "rtbufsize", "10M", 0);

    //Set own video device's name
	if (avformat_open_input(&ifmt_ctx, device_name, ifmt, &device_param) != 0){

        printf("Couldn't open input video stream.(无法打开输入流)\n");
        return -1;
    }
	//Set own audio device's name
	if (avformat_open_input(&ifmt_ctx_a, device_name_a, ifmt, &device_param) != 0){

        printf("Couldn't open input audio stream.(无法打开输入流)\n");
        return -1;
    }
    //input video initialize
    if (avformat_find_stream_info(ifmt_ctx, NULL) < 0)
    {
        printf("Couldn't find video stream information.(无法获取流信息)\n");
        return -1;
    }
    videoindex = -1;
    for (i = 0; i < ifmt_ctx->nb_streams; i++)
    if (ifmt_ctx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO)
    {
        videoindex = i;
        break;
    }
    if (videoindex == -1)
    {
        printf("Couldn't find a video stream.(没有找到视频流)\n");
        return -1;
    }
    if (avcodec_open2(ifmt_ctx->streams[videoindex]->codec, avcodec_find_decoder(ifmt_ctx->streams[videoindex]->codec->codec_id), NULL) < 0)
    {
        printf("Could not open video codec.(无法打开解码器)\n");
        return -1;
    }
    //input audio initialize
    if (avformat_find_stream_info(ifmt_ctx_a, NULL) < 0)
    {
        printf("Couldn't find audio stream information.(无法获取流信息)\n");
        return -1;
    }
    audioindex = -1;
    for (i = 0; i < ifmt_ctx_a->nb_streams; i++)
    if (ifmt_ctx_a->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO)
    {
        audioindex = i;
        break;
    }
    if (audioindex == -1)
    {
        printf("Couldn't find a audio stream.(没有找到视频流)\n");
        return -1;
	}
    if (avcodec_open2(ifmt_ctx_a->streams[audioindex]->codec, avcodec_find_decoder(ifmt_ctx_a->streams[audioindex]->codec->codec_id), NULL) < 0)
    {
        printf("Could not open audio codec.(无法打开解码器)\n");
        return -1;
    }

    //output initialize
    avformat_alloc_output_context2(&ofmt_ctx, NULL, "flv", out_path);
    //output video encoder initialize
    pCodec = avcodec_find_encoder(AV_CODEC_ID_H264);
    if (!pCodec){
        printf("Can not find output video encoder! (没有找到合适的编码器!)\n");
        return -1;
    }
    pCodecCtx = avcodec_alloc_context3(pCodec);
    pCodecCtx->pix_fmt = PIX_FMT_YUV420P;
    pCodecCtx->width = ifmt_ctx->streams[videoindex]->codec->width;
    pCodecCtx->height = ifmt_ctx->streams[videoindex]->codec->height;
    pCodecCtx->time_base.num = 1;
    pCodecCtx->time_base.den = 25;
    pCodecCtx->bit_rate = 300000;
    pCodecCtx->gop_size = 250;
    /* Some formats want stream headers to be separate. */
    if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
        pCodecCtx->flags |= CODEC_FLAG_GLOBAL_HEADER;

    //H264 codec param
    //pCodecCtx->me_range = 16;
    //pCodecCtx->max_qdiff = 4;
    //pCodecCtx->qcompress = 0.6;
    pCodecCtx->qmin = 10;
    pCodecCtx->qmax = 51;
    //Optional Param
    pCodecCtx->max_b_frames = 0;
    // Set H264 preset and tune
    AVDictionary *param = 0;
    av_dict_set(&param, "preset", "fast", 0);
    av_dict_set(&param, "tune", "zerolatency", 0);

    if (avcodec_open2(pCodecCtx, pCodec, &param) < 0){
        printf("Failed to open output video encoder! (编码器打开失败!)\n");
        return -1;
    }

    //Add a new stream to output,should be called by the user before avformat_write_header() for muxing
    video_st = avformat_new_stream(ofmt_ctx, pCodec);
    if (video_st == NULL){
        return -1;
    }
    video_st->time_base.num = 1;
    video_st->time_base.den = 25;
    video_st->codec = pCodecCtx;


    //output audio encoder initialize
    pCodec_a = avcodec_find_encoder(AV_CODEC_ID_AAC);
    if (!pCodec_a){
        printf("Can not find output audio encoder! (没有找到合适的编码器!)\n");
        return -1;
    }
    pCodecCtx_a = avcodec_alloc_context3(pCodec_a);
    pCodecCtx_a->channels = 2;
    pCodecCtx_a->channel_layout = av_get_default_channel_layout(2);
	pCodecCtx_a->sample_rate = ifmt_ctx_a->streams[audioindex]->codec->sample_rate;
    pCodecCtx_a->sample_fmt = pCodec_a->sample_fmts[0];
    pCodecCtx_a->bit_rate = 32000;
    pCodecCtx_a->time_base.num = 1;
	pCodecCtx_a->time_base.den = pCodecCtx_a->sample_rate;
    /** Allow the use of the experimental AAC encoder */
    pCodecCtx_a->strict_std_compliance = FF_COMPLIANCE_EXPERIMENTAL;
    /* Some formats want stream headers to be separate. */
    if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
        pCodecCtx_a->flags |= CODEC_FLAG_GLOBAL_HEADER;
    if (avcodec_open2(pCodecCtx_a, pCodec_a, NULL) < 0){
        printf("Failed to open ouput audio encoder! (编码器打开失败!)\n");
        return -1;
    }

    //Add a new stream to output,should be called by the user before avformat_write_header() for muxing
    audio_st = avformat_new_stream(ofmt_ctx, pCodec_a);
    if (audio_st == NULL){
        return -1;
    }
    audio_st->time_base.num = 1;
	audio_st->time_base.den = pCodecCtx_a->sample_rate;
    audio_st->codec = pCodecCtx_a;

    //Open output URL,set before avformat_write_header() for muxing
    if (avio_open(&ofmt_ctx->pb, out_path, AVIO_FLAG_READ_WRITE) < 0){
        printf("Failed to open output file! (输出文件打开失败!)\n");
        return -1;
    }

    //Show some Information
    av_dump_format(ofmt_ctx, 0, out_path, 1);

    //Write File Header
    avformat_write_header(ofmt_ctx, NULL);

    //prepare before decode and encode
    dec_pkt = (AVPacket *)av_malloc(sizeof(AVPacket));

#if USEFILTER
#else
	//camera data may has a pix fmt of RGB or sth else,convert it to YUV420
    img_convert_ctx = sws_getContext(ifmt_ctx->streams[videoindex]->codec->width, ifmt_ctx->streams[videoindex]->codec->height,
        ifmt_ctx->streams[videoindex]->codec->pix_fmt, pCodecCtx->width, pCodecCtx->height, PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL);
    
	// Initialize the resampler to be able to convert audio sample formats
	aud_convert_ctx = swr_alloc_set_opts(NULL,
		av_get_default_channel_layout(pCodecCtx_a->channels),
		pCodecCtx_a->sample_fmt,
		pCodecCtx_a->sample_rate,
		av_get_default_channel_layout(ifmt_ctx_a->streams[audioindex]->codec->channels),
		ifmt_ctx_a->streams[audioindex]->codec->sample_fmt,
		ifmt_ctx_a->streams[audioindex]->codec->sample_rate,
		0, NULL);
	
	/**
	* Perform a sanity check so that the number of converted samples is
	* not greater than the number of samples to be converted.
	* If the sample rates differ, this case has to be handled differently
	*/
	//av_assert0(pCodecCtx_a->sample_rate == ifmt_ctx_a->streams[audioindex]->codec->sample_rate);

	swr_init(aud_convert_ctx);

    
#endif
    //Initialize the buffer to store YUV frames to be encoded.
	pFrameYUV = av_frame_alloc();
    uint8_t *out_buffer = (uint8_t *)av_malloc(avpicture_get_size(PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height));
    avpicture_fill((AVPicture *)pFrameYUV, out_buffer, PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height);

	//Initialize the FIFO buffer to store audio samples to be encoded. 
    AVAudioFifo *fifo = NULL;
	fifo = av_audio_fifo_alloc(pCodecCtx_a->sample_fmt, pCodecCtx_a->channels, 1);

	//Initialize the buffer to store converted samples to be encoded.
	uint8_t **converted_input_samples = NULL;
	/**
	* Allocate as many pointers as there are audio channels.
	* Each pointer will later point to the audio samples of the corresponding
	* channels (although it may be NULL for interleaved formats).
	*/
	if (!(converted_input_samples = (uint8_t**)calloc(pCodecCtx_a->channels,
		sizeof(**converted_input_samples)))) {
		printf("Could not allocate converted input sample pointers\n");
		return AVERROR(ENOMEM);
	}


    printf("\n --------call started----------\n");
#if USEFILTER
    printf("\n Press differnet number for different filters:");
    printf("\n 1->Mirror");
    printf("\n 2->Add Watermark");
    printf("\n 3->Negate");
    printf("\n 4->Draw Edge");
    printf("\n 5->Split Into 4");
    printf("\n 6->Vintage");
    printf("\n Press 0 to remove filter\n");
#endif
    printf("\nPress enter to stop...\n");
    hThread = CreateThread(
        NULL,                   // default security attributes
        0,                      // use default stack size  
        MyThreadFunction,       // thread function name
        NULL,          // argument to thread function 
        0,                      // use default creation flags 
        NULL);   // returns the thread identifier 

    //start decode and encode
    int64_t start_time = av_gettime();
    while (encode_video || encode_audio)
    {
        if (encode_video &&
			(!encode_audio || av_compare_ts(vid_next_pts, time_base_q,
			aud_next_pts, time_base_q) <= 0))
        {
            if ((ret=av_read_frame(ifmt_ctx, dec_pkt)) >= 0){

                if (exit_thread)
                    break;

                av_log(NULL, AV_LOG_DEBUG, "Going to reencode the frame\n");
                pframe = av_frame_alloc();
                if (!pframe) {
                    ret = AVERROR(ENOMEM);
                    return ret;
                }
                ret = avcodec_decode_video2(ifmt_ctx->streams[dec_pkt->stream_index]->codec, pframe,
                    &dec_got_frame, dec_pkt);
                if (ret < 0) {
                    av_frame_free(&pframe);
                    av_log(NULL, AV_LOG_ERROR, "Decoding failed\n");
                    break;
                }
                if (dec_got_frame){
#if USEFILTER
                    pframe->pts = av_frame_get_best_effort_timestamp(pframe);

                    if (filter_change)
                        apply_filters(ifmt_ctx);
                    filter_change = 0;
                    /* push the decoded frame into the filtergraph */
                    if (av_buffersrc_add_frame(buffersrc_ctx, pframe) < 0) {
                        printf("Error while feeding the filtergraph\n");
                        break;
                    }
                    picref = av_frame_alloc();

                    /* pull filtered pictures from the filtergraph */
                    while (1) {
                        ret = av_buffersink_get_frame_flags(buffersink_ctx, picref, 0);
                        if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
                            break;
                        if (ret < 0)
                            return ret;

                        if (picref) {
                            img_convert_ctx = sws_getContext(picref->width, picref->height, (AVPixelFormat)picref->format, pCodecCtx->width, pCodecCtx->height, AV_PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL);
                            sws_scale(img_convert_ctx, (const uint8_t* const*)picref->data, picref->linesize, 0, pCodecCtx->height, pFrameYUV->data, pFrameYUV->linesize);
                            sws_freeContext(img_convert_ctx);
                            pFrameYUV->width = picref->width;
                            pFrameYUV->height = picref->height;
                            pFrameYUV->format = PIX_FMT_YUV420P;
#else
                    sws_scale(img_convert_ctx, (const uint8_t* const*)pframe->data, pframe->linesize, 0, pCodecCtx->height, pFrameYUV->data, pFrameYUV->linesize);
                    pFrameYUV->width = pframe->width;
                    pFrameYUV->height = pframe->height;
                    pFrameYUV->format = PIX_FMT_YUV420P;
#endif					
                    enc_pkt.data = NULL;
                    enc_pkt.size = 0;
                    av_init_packet(&enc_pkt);
                    ret = avcodec_encode_video2(pCodecCtx, &enc_pkt, pFrameYUV, &enc_got_frame);
                    av_frame_free(&pframe);
                    if (enc_got_frame == 1){
                        //printf("Succeed to encode frame: %5d\tsize:%5d\n", framecnt, enc_pkt.size);
                        framecnt++;
                        enc_pkt.stream_index = video_st->index;						

                        //Write PTS
						AVRational time_base = ofmt_ctx->streams[0]->time_base;//{ 1, 1000 };
                        AVRational r_framerate1 = ifmt_ctx->streams[videoindex]->r_frame_rate;//{ 50, 2 }; 
                        //Duration between 2 frames (us)
                        int64_t calc_duration = (double)(AV_TIME_BASE)*(1 / av_q2d(r_framerate1));	//内部时间戳
                        //Parameters
                        //enc_pkt.pts = (double)(framecnt*calc_duration)*(double)(av_q2d(time_base_q)) / (double)(av_q2d(time_base));
                        enc_pkt.pts = av_rescale_q(framecnt*calc_duration, time_base_q, time_base);
                        enc_pkt.dts = enc_pkt.pts;
                        enc_pkt.duration = av_rescale_q(calc_duration, time_base_q, time_base); //(double)(calc_duration)*(double)(av_q2d(time_base_q)) / (double)(av_q2d(time_base));
                        enc_pkt.pos = -1;
                        //printf("video pts : %d\n", enc_pkt.pts);

						vid_next_pts=framecnt*calc_duration; //general timebase

                        //Delay
						int64_t pts_time = av_rescale_q(enc_pkt.pts, time_base, time_base_q);
						int64_t now_time = av_gettime() - start_time;						
						if ((pts_time > now_time) && ((vid_next_pts + pts_time - now_time)<aud_next_pts))
							av_usleep(pts_time - now_time);
						
                        ret = av_interleaved_write_frame(ofmt_ctx, &enc_pkt);
                        av_free_packet(&enc_pkt);
                    }
#if USEFILTER
                    av_frame_unref(picref);
                }
            }
#endif
        }
        else {
            av_frame_free(&pframe);
        }
        av_free_packet(dec_pkt);
    }
    else
		if (ret == AVERROR_EOF)
			encode_video = 0;
		else
		{
			printf("Could not read video frame\n");
			return ret;
		}
    }
    else
    {
        //audio trancoding here
        const int output_frame_size = pCodecCtx_a->frame_size;

		if (exit_thread)
			break;

        /**
        * Make sure that there is one frame worth of samples in the FIFO
        * buffer so that the encoder can do its work.
        * Since the decoder's and the encoder's frame size may differ, we
        * need to FIFO buffer to store as many frames worth of input samples
        * that they make up at least one frame worth of output samples.
        */
        while (av_audio_fifo_size(fifo) < output_frame_size) {
            /**
            * Decode one frame worth of audio samples, convert it to the
            * output sample format and put it into the FIFO buffer.
            */
			AVFrame *input_frame = av_frame_alloc();
			if (!input_frame)
			{
				ret = AVERROR(ENOMEM);
				return ret;
			}			
			
			/** Decode one frame worth of audio samples. */
			/** Packet used for temporary storage. */
			AVPacket input_packet;
			av_init_packet(&input_packet);
			input_packet.data = NULL;
			input_packet.size = 0;
			
			/** Read one audio frame from the input file into a temporary packet. */
			if ((ret = av_read_frame(ifmt_ctx_a, &input_packet)) < 0) {
				/** If we are at the end of the file, flush the decoder below. */
				if (ret == AVERROR_EOF)
				{
					encode_audio = 0;
				}
				else
				{
					printf("Could not read audio frame\n");
					return ret;
				}					
			}

			/**
			* Decode the audio frame stored in the temporary packet.
			* The input audio stream decoder is used to do this.
			* If we are at the end of the file, pass an empty packet to the decoder
			* to flush it.
			*/
			if ((ret = avcodec_decode_audio4(ifmt_ctx_a->streams[audioindex]->codec, input_frame,
				&dec_got_frame_a, &input_packet)) < 0) {
				printf("Could not decode audio frame\n");
				return ret;
			}
			av_packet_unref(&input_packet);
			/** If there is decoded data, convert and store it */
			if (dec_got_frame_a) {
				/**
				* Allocate memory for the samples of all channels in one consecutive
				* block for convenience.
				*/
				if ((ret = av_samples_alloc(converted_input_samples, NULL,
					pCodecCtx_a->channels,
					input_frame->nb_samples,
					pCodecCtx_a->sample_fmt, 0)) < 0) {
					printf("Could not allocate converted input samples\n");
					av_freep(&(*converted_input_samples)[0]);
					free(*converted_input_samples);
					return ret;
				}

				/**
				* Convert the input samples to the desired output sample format.
				* This requires a temporary storage provided by converted_input_samples.
				*/
				/** Convert the samples using the resampler. */
				if ((ret = swr_convert(aud_convert_ctx,
					converted_input_samples, input_frame->nb_samples,
					(const uint8_t**)input_frame->extended_data, input_frame->nb_samples)) < 0) {
					printf("Could not convert input samples\n");
					return ret;
				}

				/** Add the converted input samples to the FIFO buffer for later processing. */
				/**
				* Make the FIFO as large as it needs to be to hold both,
				* the old and the new samples.
				*/
				if ((ret = av_audio_fifo_realloc(fifo, av_audio_fifo_size(fifo) + input_frame->nb_samples)) < 0) {
					printf("Could not reallocate FIFO\n");
					return ret;
				}

				/** Store the new samples in the FIFO buffer. */
				if (av_audio_fifo_write(fifo, (void **)converted_input_samples,
					input_frame->nb_samples) < input_frame->nb_samples) {
					printf("Could not write data to FIFO\n");
					return AVERROR_EXIT;
				}				
			}
        }

        /**
        * If we have enough samples for the encoder, we encode them.
        * At the end of the file, we pass the remaining samples to
        * the encoder.
        */
        if (av_audio_fifo_size(fifo) >= output_frame_size)
            /**
            * Take one frame worth of audio samples from the FIFO buffer,
            * encode it and write it to the output file.
            */
        {
            /** Temporary storage of the output samples of the frame written to the file. */
			AVFrame *output_frame=av_frame_alloc();
			if (!output_frame)
			{
				ret = AVERROR(ENOMEM);
				return ret;
			}
			/**
			* Use the maximum number of possible samples per frame.
			* If there is less than the maximum possible frame size in the FIFO
			* buffer use this number. Otherwise, use the maximum possible frame size
			*/
			const int frame_size = FFMIN(av_audio_fifo_size(fifo),
				pCodecCtx_a->frame_size);
			
			/** Initialize temporary storage for one output frame. */
			/**
			* Set the frame's parameters, especially its size and format.
			* av_frame_get_buffer needs this to allocate memory for the
			* audio samples of the frame.
			* Default channel layouts based on the number of channels
			* are assumed for simplicity.
			*/
			output_frame->nb_samples = frame_size;
			output_frame->channel_layout = pCodecCtx_a->channel_layout;
			output_frame->format = pCodecCtx_a->sample_fmt;
			output_frame->sample_rate = pCodecCtx_a->sample_rate;

			/**
			* Allocate the samples of the created frame. This call will make
			* sure that the audio frame can hold as many samples as specified.
			*/
			if ((ret = av_frame_get_buffer(output_frame, 0)) < 0) {
				printf("Could not allocate output frame samples\n");
				av_frame_free(&output_frame);
				return ret;
			}
			
			/**
			* Read as many samples from the FIFO buffer as required to fill the frame.
			* The samples are stored in the frame temporarily.
			*/
			if (av_audio_fifo_read(fifo, (void **)output_frame->data, frame_size) < frame_size) {
				printf("Could not read data from FIFO\n");
				return AVERROR_EXIT;
			}

			/** Encode one frame worth of audio samples. */
			/** Packet used for temporary storage. */
			AVPacket output_packet;
			av_init_packet(&output_packet);
			output_packet.data = NULL;
			output_packet.size = 0;
			
			/** Set a timestamp based on the sample rate for the container. */
			if (output_frame) {
				nb_samples += output_frame->nb_samples;
			}

			/**
			* Encode the audio frame and store it in the temporary packet.
			* The output audio stream encoder is used to do this.
			*/
			if ((ret = avcodec_encode_audio2(pCodecCtx_a, &output_packet,
				output_frame, &enc_got_frame_a)) < 0) {
				printf("Could not encode frame\n");
				av_packet_unref(&output_packet);
				return ret;
			}

			/** Write one audio frame from the temporary packet to the output file. */
			if (enc_got_frame_a) {

				output_packet.stream_index = 1;

				AVRational time_base = ofmt_ctx->streams[1]->time_base;
				AVRational r_framerate1 = { ifmt_ctx_a->streams[audioindex]->codec->sample_rate, 1 };// { 44100, 1};  
				int64_t calc_duration = (double)(AV_TIME_BASE)*(1 / av_q2d(r_framerate1));  //内部时间戳  

				output_packet.pts = av_rescale_q(nb_samples*calc_duration, time_base_q, time_base);
				output_packet.dts = output_packet.pts;
				output_packet.duration = output_frame->nb_samples;

				//printf("audio pts : %d\n", output_packet.pts);
				aud_next_pts = nb_samples*calc_duration;

				int64_t pts_time = av_rescale_q(output_packet.pts, time_base, time_base_q);
				int64_t now_time = av_gettime() - start_time;
				if ((pts_time > now_time) && ((aud_next_pts + pts_time - now_time)<vid_next_pts))
					av_usleep(pts_time - now_time);

				if ((ret = av_interleaved_write_frame(ofmt_ctx, &output_packet)) < 0) {
					printf("Could not write frame\n");
					av_packet_unref(&output_packet);
					return ret;
				}

				av_packet_unref(&output_packet);
			}			
			av_frame_free(&output_frame);		
        }      
	}
  }


    //Flush Encoder
    ret = flush_encoder(ifmt_ctx, ofmt_ctx, 0, framecnt);
    if (ret < 0) {
        printf("Flushing encoder failed\n");
        return -1;
    }
	ret = flush_encoder_a(ifmt_ctx_a, ofmt_ctx, 1, nb_samples);
	if (ret < 0) {
		printf("Flushing encoder failed\n");
		return -1;
	}



    //Write file trailer
    av_write_trailer(ofmt_ctx);

cleanup:
    //Clean
#if USEFILTER
    if (filter_graph)
        avfilter_graph_free(&filter_graph);
#endif
    if (video_st)
        avcodec_close(video_st->codec);
    if (audio_st)
        avcodec_close(audio_st->codec);
    av_free(out_buffer);
	if (converted_input_samples) {
		av_freep(&converted_input_samples[0]);
		//free(converted_input_samples);
	}
	if (fifo)
		av_audio_fifo_free(fifo);
    avio_close(ofmt_ctx->pb);
    avformat_free_context(ifmt_ctx);
	avformat_free_context(ifmt_ctx_a);
    avformat_free_context(ofmt_ctx);
    CloseHandle(hThread);
    return 0;
}
Ejemplo n.º 6
0
static int hls_read_packet(AVFormatContext *s, AVPacket *pkt)
{
    HLSContext *c = s->priv_data;
    int ret, i, minplaylist = -1;

    if (c->first_packet) {
        recheck_discard_flags(s, 1);
        c->first_packet = 0;
    }

start:
    c->end_of_segment = 0;
    for (i = 0; i < c->n_playlists; i++) {
        struct playlist *pls = c->playlists[i];
        /* Make sure we've got one buffered packet from each open playlist
         * stream */
        if (pls->needed && !pls->pkt.data) {
            while (1) {
                int64_t ts_diff;
                AVStream *st;
                ret = av_read_frame(pls->ctx, &pls->pkt);
                if (ret < 0) {
                    if (!url_feof(&pls->pb) && ret != AVERROR_EOF)
                        return ret;
                    reset_packet(&pls->pkt);
                    break;
                } else {
                    if (c->first_timestamp == AV_NOPTS_VALUE &&
                        pls->pkt.dts       != AV_NOPTS_VALUE)
                        c->first_timestamp = av_rescale_q(pls->pkt.dts,
                            pls->ctx->streams[pls->pkt.stream_index]->time_base,
                            AV_TIME_BASE_Q);
                }

                if (c->seek_timestamp == AV_NOPTS_VALUE)
                    break;

                if (pls->pkt.dts == AV_NOPTS_VALUE) {
                    c->seek_timestamp = AV_NOPTS_VALUE;
                    break;
                }

                st = pls->ctx->streams[pls->pkt.stream_index];
                ts_diff = av_rescale_rnd(pls->pkt.dts, AV_TIME_BASE,
                                         st->time_base.den, AV_ROUND_DOWN) -
                          c->seek_timestamp;
                if (ts_diff >= 0 && (c->seek_flags  & AVSEEK_FLAG_ANY ||
                                     pls->pkt.flags & AV_PKT_FLAG_KEY)) {
                    c->seek_timestamp = AV_NOPTS_VALUE;
                    break;
                }
                av_free_packet(&pls->pkt);
                reset_packet(&pls->pkt);
            }
        }
        /* Check if this stream still is on an earlier segment number, or
         * has the packet with the lowest dts */
        if (pls->pkt.data) {
            struct playlist *minpls = minplaylist < 0 ?
                                     NULL : c->playlists[minplaylist];
            if (minplaylist < 0 || pls->cur_seq_no < minpls->cur_seq_no) {
                minplaylist = i;
            } else if (pls->cur_seq_no == minpls->cur_seq_no) {
                int64_t dts     =    pls->pkt.dts;
                int64_t mindts  = minpls->pkt.dts;
                AVStream *st    =    pls->ctx->streams[pls->pkt.stream_index];
                AVStream *minst = minpls->ctx->streams[minpls->pkt.stream_index];

                if (dts == AV_NOPTS_VALUE) {
                    minplaylist = i;
                } else if (mindts != AV_NOPTS_VALUE) {
                    if (st->start_time    != AV_NOPTS_VALUE)
                        dts    -= st->start_time;
                    if (minst->start_time != AV_NOPTS_VALUE)
                        mindts -= minst->start_time;

                    if (av_compare_ts(dts, st->time_base,
                                      mindts, minst->time_base) < 0)
                        minplaylist = i;
                }
            }
        }
    }
    if (c->end_of_segment) {
        if (recheck_discard_flags(s, 0))
            goto start;
    }
    /* If we got a packet, return it */
    if (minplaylist >= 0) {
        *pkt = c->playlists[minplaylist]->pkt;
        pkt->stream_index += c->playlists[minplaylist]->stream_offset;
        reset_packet(&c->playlists[minplaylist]->pkt);
        return 0;
    }
    return AVERROR_EOF;
}
Ejemplo n.º 7
0
int main(int argc, char **argv)
{
    OutputStream video_st = { 0 }, audio_st = { 0 };
    const char *filename;
    AVOutputFormat *fmt;
    AVFormatContext *oc;
    int have_video = 0, have_audio = 0;
    int encode_video = 0, encode_audio = 0;

    /* Initialize libavcodec, and register all codecs and formats. */
    av_register_all();

    if (argc != 2) {
        printf("usage: %s output_file\n"
               "API example program to output a media file with libavformat.\n"
               "The output format is automatically guessed according to the file extension.\n"
               "Raw images can also be output by using '%%d' in the filename\n"
               "\n", argv[0]);
        return 1;
    }

    filename = argv[1];

    /* Autodetect the output format from the name. default is MPEG. */
    fmt = av_guess_format(NULL, filename, NULL);
    if (!fmt) {
        printf("Could not deduce output format from file extension: using MPEG.\n");
        fmt = av_guess_format("mpeg", NULL, NULL);
    }
    if (!fmt) {
        fprintf(stderr, "Could not find suitable output format\n");
        return 1;
    }

    /* Allocate the output media context. */
    oc = avformat_alloc_context();
    if (!oc) {
        fprintf(stderr, "Memory error\n");
        return 1;
    }
    oc->oformat = fmt;
    snprintf(oc->filename, sizeof(oc->filename), "%s", filename);

    /* Add the audio and video streams using the default format codecs
     * and initialize the codecs. */
    if (fmt->video_codec != AV_CODEC_ID_NONE) {
        add_video_stream(&video_st, oc, fmt->video_codec);
        have_video = 1;
        encode_video = 1;
    }
    if (fmt->audio_codec != AV_CODEC_ID_NONE) {
        add_audio_stream(&audio_st, oc, fmt->audio_codec);
        have_audio = 1;
        encode_audio = 1;
    }

    /* Now that all the parameters are set, we can open the audio and
     * video codecs and allocate the necessary encode buffers. */
    if (have_video)
        open_video(oc, &video_st);
    if (have_audio)
        open_audio(oc, &audio_st);

    av_dump_format(oc, 0, filename, 1);

    /* open the output file, if needed */
    if (!(fmt->flags & AVFMT_NOFILE)) {
        if (avio_open(&oc->pb, filename, AVIO_FLAG_WRITE) < 0) {
            fprintf(stderr, "Could not open '%s'\n", filename);
            return 1;
        }
    }

    /* Write the stream header, if any. */
    avformat_write_header(oc, NULL);

    while (encode_video || encode_audio) {
        /* select the stream to encode */
        if (encode_video &&
            (!encode_audio || av_compare_ts(video_st.next_pts, video_st.enc->time_base,
                                            audio_st.next_pts, audio_st.enc->time_base) <= 0)) {
            encode_video = !write_video_frame(oc, &video_st);
        } else {
            encode_audio = !process_audio_stream(oc, &audio_st);
        }
    }

    /* Write the trailer, if any. The trailer must be written before you
     * close the CodecContexts open when you wrote the header; otherwise
     * av_write_trailer() may try to use memory that was freed on
     * av_codec_close(). */
    av_write_trailer(oc);

    /* Close each codec. */
    if (have_video)
        close_stream(oc, &video_st);
    if (have_audio)
        close_stream(oc, &audio_st);

    if (!(fmt->flags & AVFMT_NOFILE))
        /* Close the output file. */
        avio_close(oc->pb);

    /* free the stream */
    avformat_free_context(oc);

    return 0;
}
Ejemplo n.º 8
0
static void mux_frames(int n)
{
    int end_frames = frames + n;
    while (1) {
        AVPacket pkt;
        uint8_t pktdata[8] = { 0 };
        av_init_packet(&pkt);

        if (av_compare_ts(audio_dts, audio_st->time_base, video_dts, video_st->time_base) < 0) {
            pkt.dts = pkt.pts = audio_dts;
            pkt.stream_index = 1;
            pkt.duration = audio_duration;
            audio_dts += audio_duration;
        } else {
            if (frames == end_frames)
                break;
            pkt.dts = video_dts;
            pkt.stream_index = 0;
            pkt.duration = duration;
            if ((frames % gop_size) == 0) {
                pkt.flags |= AV_PKT_FLAG_KEY;
                last_picture = AV_PICTURE_TYPE_I;
                pkt.pts = pkt.dts + duration;
                video_dts = pkt.pts;
            } else {
                if (last_picture == AV_PICTURE_TYPE_P) {
                    last_picture = AV_PICTURE_TYPE_B;
                    pkt.pts = pkt.dts;
                    video_dts = next_p_pts;
                } else {
                    last_picture = AV_PICTURE_TYPE_P;
                    if (((frames + 1) % gop_size) == 0) {
                        pkt.pts = pkt.dts + duration;
                        video_dts = pkt.pts;
                    } else {
                        next_p_pts = pkt.pts = pkt.dts + 2 * duration;
                        video_dts += duration;
                    }
                }
            }
            if (!bframes)
                pkt.pts = pkt.dts;
            if (fake_pkt_duration)
                pkt.duration = fake_pkt_duration;
            frames++;
        }

        if (clear_duration)
            pkt.duration = 0;
        AV_WB32(pktdata + 4, pkt.pts);
        pkt.data = pktdata;
        pkt.size = 8;
        if (skip_write)
            continue;
        if (skip_write_audio && pkt.stream_index == 1)
            continue;
        if (do_interleave)
            av_interleaved_write_frame(ctx, &pkt);
        else
            av_write_frame(ctx, &pkt);
    }
}
Ejemplo n.º 9
0
/**
 * Partial search for keyframes in multiple streams.
 *
 * This routine searches in each stream for the next lower and the next higher
 * timestamp compared to the given target timestamp. The search starts at the current
 * file position and ends at the file position, where all streams have already been
 * examined (or when all higher key frames are found in the first iteration).
 *
 * This routine is called iteratively with an exponential backoff to find the lower
 * timestamp.
 *
 * @param s                 format context
 * @param timestamp         target timestamp (or position, if AVSEEK_FLAG_BYTE)
 * @param timebase          time base for timestamps
 * @param flags             seeking flags
 * @param sync              array with information per stream
 * @param keyframes_to_find count of keyframes to find in total
 * @param found_lo          ptr to the count of already found low timestamp keyframes
 * @param found_hi          ptr to the count of already found high timestamp keyframes
 * @param first_iter        flag for first iteration
 */
static void search_hi_lo_keyframes(AVFormatContext *s,
                                   int64_t timestamp,
                                   AVRational timebase,
                                   int flags,
                                   AVSyncPoint *sync,
                                   int keyframes_to_find,
                                   int *found_lo,
                                   int *found_hi,
                                   int first_iter)
{
    AVPacket pkt;
    AVSyncPoint *sp;
    AVStream *st;
    int idx;
    int flg;
    int terminated_count = 0;
    int64_t pos;
    int64_t pts, dts;   // PTS/DTS from stream
    int64_t ts;         // PTS in stream-local time base or position for byte seeking
    AVRational ts_tb;   // Time base of the stream or 1:1 for byte seeking

    for (;;) {
        if (av_read_frame(s, &pkt) < 0) {
            // EOF or error, make sure high flags are set
            for (idx = 0; idx < s->nb_streams; ++idx) {
                if (s->streams[idx]->discard < AVDISCARD_ALL) {
                    sp = &sync[idx];
                    if (sp->pos_hi == INT64_MAX) {
                        // no high frame exists for this stream
                        (*found_hi)++;
                        sp->ts_hi  = INT64_MAX;
                        sp->pos_hi = INT64_MAX - 1;
                    }
                }
            }
            break;
        }

        idx = pkt.stream_index;
        st = s->streams[idx];
        if (st->discard >= AVDISCARD_ALL)
            // this stream is not active, skip packet
            continue;

        sp = &sync[idx];

        flg = pkt.flags;
        pos = pkt.pos;
        pts = pkt.pts;
        dts = pkt.dts;
        if (pts == AV_NOPTS_VALUE)
            // some formats don't provide PTS, only DTS
            pts = dts;

        av_free_packet(&pkt);

        // Multi-frame packets only return position for the very first frame.
        // Other frames are read with position == -1. Therefore, we note down
        // last known position of a frame and use it if a frame without
        // position arrives. In this way, it's possible to seek to proper
        // position. Additionally, for parsers not providing position at all,
        // an approximation will be used (starting position of this iteration).
        if (pos < 0)
            pos = sp->last_pos;
        else
            sp->last_pos = pos;

        // Evaluate key frames with known TS (or any frames, if AVSEEK_FLAG_ANY set).
        if (pts != AV_NOPTS_VALUE &&
            ((flg & PKT_FLAG_KEY) || (flags & AVSEEK_FLAG_ANY))) {
            if (flags & AVSEEK_FLAG_BYTE) {
                // for byte seeking, use position as timestamp
                ts        = pos;
                ts_tb.num = 1;
                ts_tb.den = 1;
            } else {
                // otherwise, get stream time_base
                ts    = pts;
                ts_tb = st->time_base;
            }

            if (sp->first_ts == AV_NOPTS_VALUE) {
                // Note down termination timestamp for the next iteration - when
                // we encounter a packet with the same timestamp, we will ignore
                // any further packets for this stream in next iteration (as they
                // are already evaluated).
                sp->first_ts    = ts;
                sp->first_ts_tb = ts_tb;
            }

            if (sp->term_ts != AV_NOPTS_VALUE &&
                av_compare_ts(ts, ts_tb, sp->term_ts, sp->term_ts_tb) > 0) {
                // past the end position from last iteration, ignore packet
                if (!sp->terminated) {
                    sp->terminated = 1;
                    ++terminated_count;
                    if (sp->pos_hi == INT64_MAX) {
                        // no high frame exists for this stream
                        (*found_hi)++;
                        sp->ts_hi  = INT64_MAX;
                        sp->pos_hi = INT64_MAX - 1;
                    }
                    if (terminated_count == keyframes_to_find)
                        break;  // all terminated, iteration done
                }
                continue;
            }

            if (av_compare_ts(ts, ts_tb, timestamp, timebase) <= 0) {
                // keyframe found before target timestamp
                if (sp->pos_lo == INT64_MAX) {
                    // found first keyframe lower than target timestamp
                    (*found_lo)++;
                    sp->ts_lo  = ts;
                    sp->pos_lo = pos;
                } else if (sp->ts_lo < ts) {
                    // found a better match (closer to target timestamp)
                    sp->ts_lo  = ts;
                    sp->pos_lo = pos;
                }
            }
            if (av_compare_ts(ts, ts_tb, timestamp, timebase) >= 0) {
                // keyframe found after target timestamp
                if (sp->pos_hi == INT64_MAX) {
                    // found first keyframe higher than target timestamp
                    (*found_hi)++;
                    sp->ts_hi  = ts;
                    sp->pos_hi = pos;
                    if (*found_hi >= keyframes_to_find && first_iter) {
                        // We found high frame for all. They may get updated
                        // to TS closer to target TS in later iterations (which
                        // will stop at start position of previous iteration).
                        break;
                    }
                } else if (sp->ts_hi > ts) {
                    // found a better match (actually, shouldn't happen)
                    sp->ts_hi  = ts;
                    sp->pos_hi = pos;
                }
            }
        }
    }

    // Clean up the parser.
    ff_read_frame_flush(s);
}
Ejemplo n.º 10
0
int64_t ff_gen_syncpoint_search(AVFormatContext *s,
                                int stream_index,
                                int64_t pos,
                                int64_t ts_min,
                                int64_t ts,
                                int64_t ts_max,
                                int flags)
{
    AVSyncPoint *sync, *sp;
    AVStream *st;
    int i;
    int keyframes_to_find = 0;
    int64_t curpos;
    int64_t step;
    int found_lo = 0, found_hi = 0;
    int64_t min_distance, distance;
    int64_t min_pos = 0;
    int first_iter = 1;
    AVRational time_base;

    if (flags & AVSEEK_FLAG_BYTE) {
        // for byte seeking, we have exact 1:1 "timestamps" - positions
        time_base.num = 1;
        time_base.den = 1;
    } else {
        if (stream_index >= 0) {
            // we have a reference stream, which time base we use
            st = s->streams[stream_index];
            time_base = st->time_base;
        } else {
            // no reference stream, use AV_TIME_BASE as reference time base
            time_base.num = 1;
            time_base.den = AV_TIME_BASE;
        }
    }

    // Initialize syncpoint structures for each stream.
    sync = av_malloc(s->nb_streams * sizeof(AVSyncPoint));
    if (!sync)
        // cannot allocate helper structure
        return -1;

    for (i = 0; i < s->nb_streams; ++i) {
        st = s->streams[i];
        sp = &sync[i];

        sp->pos_lo     = INT64_MAX;
        sp->ts_lo      = INT64_MAX;
        sp->pos_hi     = INT64_MAX;
        sp->ts_hi      = INT64_MAX;
        sp->terminated = 0;
        sp->first_ts   = AV_NOPTS_VALUE;
        sp->term_ts    = ts_max;
        sp->term_ts_tb = time_base;
        sp->last_pos   = pos;

        st->cur_dts    = AV_NOPTS_VALUE;

        if (st->discard < AVDISCARD_ALL)
            ++keyframes_to_find;
    }

    if (!keyframes_to_find) {
        // no stream active, error
        av_free(sync);
        return -1;
    }

    // Find keyframes in all active streams with timestamp/position just before
    // and just after requested timestamp/position.
    step = s->pb->buffer_size;
    curpos = FFMAX(pos - step / 2, 0);
    for (;;) {
        url_fseek(s->pb, curpos, SEEK_SET);
        search_hi_lo_keyframes(s,
                               ts, time_base,
                               flags,
                               sync,
                               keyframes_to_find,
                               &found_lo, &found_hi,
                               first_iter);
        if (found_lo == keyframes_to_find && found_hi == keyframes_to_find)
            break;  // have all keyframes we wanted
        if (!curpos)
            break;  // cannot go back anymore

        curpos = pos - step;
        if (curpos < 0)
            curpos = 0;
        step *= 2;

        // switch termination positions
        for (i = 0; i < s->nb_streams; ++i) {
            st = s->streams[i];
            st->cur_dts = AV_NOPTS_VALUE;

            sp = &sync[i];
            if (sp->first_ts != AV_NOPTS_VALUE) {
                sp->term_ts    = sp->first_ts;
                sp->term_ts_tb = sp->first_ts_tb;
                sp->first_ts   = AV_NOPTS_VALUE;
            }
            sp->terminated = 0;
            sp->last_pos = curpos;
        }
        first_iter = 0;
    }

    // Find actual position to start decoding so that decoder synchronizes
    // closest to ts and between ts_min and ts_max.
    pos = INT64_MAX;

    for (i = 0; i < s->nb_streams; ++i) {
        st = s->streams[i];
        if (st->discard < AVDISCARD_ALL) {
            sp = &sync[i];
            min_distance = INT64_MAX;
            // Find timestamp closest to requested timestamp within min/max limits.
            if (sp->pos_lo != INT64_MAX
                && av_compare_ts(ts_min, time_base, sp->ts_lo, st->time_base) <= 0
                && av_compare_ts(sp->ts_lo, st->time_base, ts_max, time_base) <= 0) {
                // low timestamp is in range
                min_distance = ts_distance(ts, time_base, sp->ts_lo, st->time_base);
                min_pos = sp->pos_lo;
            }
            if (sp->pos_hi != INT64_MAX
                && av_compare_ts(ts_min, time_base, sp->ts_hi, st->time_base) <= 0
                && av_compare_ts(sp->ts_hi, st->time_base, ts_max, time_base) <= 0) {
                // high timestamp is in range, check distance
                distance = ts_distance(sp->ts_hi, st->time_base, ts, time_base);
                if (distance < min_distance) {
                    min_distance = distance;
                    min_pos = sp->pos_hi;
                }
            }
            if (min_distance == INT64_MAX) {
                // no timestamp is in range, cannot seek
                av_free(sync);
                return -1;
            }
            if (min_pos < pos)
                pos = min_pos;
        }
    }

    url_fseek(s->pb, pos, SEEK_SET);
    av_free(sync);
    return pos;
}
Ejemplo n.º 11
0
int muxer_mp4(void* noUse)
{
    AVOutputFormat *ofmt = NULL;
    //Input AVFormatContext and Output AVFormatContext
    AVFormatContext *ifmt_ctx_v = NULL, *ifmt_ctx_a = NULL, *ofmt_ctx = NULL;
    AVPacket pkt;
    int ret, i;
    int videoindex_v = -1, videoindex_out = -1;
    int audioindex_a = -1, audioindex_out = -1;
    int frame_index = 0;
    int64_t cur_pts_v = 0, cur_pts_a = 0;

    //const char *in_filename_v = "cuc_ieschool.ts";//Input file URL
    const char *in_filename_v = "../testResource/bigbuckbunny_480x272.h264";
    //const char *in_filename_a = "cuc_ieschool.mp3";
    //const char *in_filename_a = "gowest.m4a";
    //const char *in_filename_a = "gowest.aac";
    const char *in_filename_a = "../testResource/WavinFlag.aac";

    const char *out_filename = "bigbuckbunny.mp4";//Output file URL
    av_register_all();
    //Input
    if ((ret = avformat_open_input(&ifmt_ctx_v, in_filename_v, 0, 0)) < 0) {
        printf("Could not open input file.");
        goto end;
    }
    if ((ret = avformat_find_stream_info(ifmt_ctx_v, 0)) < 0) {
        printf("Failed to retrieve input stream information");
        goto end;
    }

    if ((ret = avformat_open_input(&ifmt_ctx_a, in_filename_a, 0, 0)) < 0) {
        printf("Could not open input file.");
        goto end;
    }
    if ((ret = avformat_find_stream_info(ifmt_ctx_a, 0)) < 0) {
        printf("Failed to retrieve input stream information");
        goto end;
    }
    printf("===========Input Information==========\n");
    av_dump_format(ifmt_ctx_v, 0, in_filename_v, 0);
    av_dump_format(ifmt_ctx_a, 0, in_filename_a, 0);
    printf("======================================\n");
    //Output
    avformat_alloc_output_context2(&ofmt_ctx, NULL, NULL, out_filename);
    if (!ofmt_ctx) {
        printf("Could not create output context\n");
        ret = AVERROR_UNKNOWN;
        goto end;
    }
    ofmt = ofmt_ctx->oformat;

    unsigned char* outbuffer = NULL;
    outbuffer = (unsigned char*)av_malloc(32768);

    AVIOContext *avio_out = avio_alloc_context(outbuffer, 32768, 0, NULL, NULL, write_buffer, NULL);
    if (avio_out == NULL)
        goto end;
    ofmt_ctx->pb = avio_out;
    ofmt_ctx->flags = AVFMT_FLAG_CUSTOM_IO;

    for (i = 0; i < ifmt_ctx_v->nb_streams; i++) {
        //Create output AVStream according to input AVStream
        if (ifmt_ctx_v->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
            AVStream *in_stream = ifmt_ctx_v->streams[i];
            AVStream *out_stream = avformat_new_stream(ofmt_ctx, in_stream->codec->codec);
            videoindex_v = i;
            if (!out_stream) {
                printf("Failed allocating output stream\n");
                ret = AVERROR_UNKNOWN;
                goto end;
            }
            videoindex_out = out_stream->index;
            //Copy the settings of AVCodecContext
            if (avcodec_copy_context(out_stream->codec, in_stream->codec) < 0) {
                printf("Failed to copy context from input to output stream codec context\n");
                goto end;
            }
            out_stream->codec->codec_tag = 0;
            if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
                out_stream->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;
            break;
        }
    }

    for (i = 0; i < ifmt_ctx_a->nb_streams; i++) {
        //Create output AVStream according to input AVStream
        if (ifmt_ctx_a->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
            AVStream *in_stream = ifmt_ctx_a->streams[i];
            AVStream *out_stream = avformat_new_stream(ofmt_ctx, in_stream->codec->codec);
            audioindex_a = i;
            if (!out_stream) {
                printf("Failed allocating output stream\n");
                ret = AVERROR_UNKNOWN;
                goto end;
            }
            audioindex_out = out_stream->index;
            //Copy the settings of AVCodecContext
            if (avcodec_copy_context(out_stream->codec, in_stream->codec) < 0) {
                printf("Failed to copy context from input to output stream codec context\n");
                goto end;
            }
            out_stream->codec->codec_tag = 0;
            if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
                out_stream->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;

            break;
        }
    }

    printf("==========Output Information==========\n");
    av_dump_format(ofmt_ctx, 0, out_filename, 1);
    printf("======================================\n");
    //Open output file
    if (!(ofmt->flags & AVFMT_NOFILE)) {
        if (avio_open(&ofmt_ctx->pb, out_filename, AVIO_FLAG_WRITE) < 0) {
            printf("Could not open output file '%s'", out_filename);
            goto end;
        }
    }
    //Write file header
    if (avformat_write_header(ofmt_ctx, NULL) < 0) {
        printf("Error occurred when opening output file\n");
        goto end;
    }


    //FIX
#if USE_H264BSF
    AVBitStreamFilterContext* h264bsfc = av_bitstream_filter_init("h264_mp4toannexb");
#endif
#if USE_AACBSF
    AVBitStreamFilterContext* aacbsfc = av_bitstream_filter_init("aac_adtstoasc");
#endif

    while (1) {
        AVFormatContext *ifmt_ctx;
        int stream_index = 0;
        AVStream *in_stream, *out_stream;

        //Get an AVPacket
        if (av_compare_ts(cur_pts_v, ifmt_ctx_v->streams[videoindex_v]->time_base, cur_pts_a, ifmt_ctx_a->streams[audioindex_a]->time_base) <= 0) {
            ifmt_ctx = ifmt_ctx_v;
            stream_index = videoindex_out;

            if (av_read_frame(ifmt_ctx, &pkt) >= 0) {
                do {
                    in_stream = ifmt_ctx->streams[pkt.stream_index];
                    out_stream = ofmt_ctx->streams[stream_index];

                    if (pkt.stream_index == videoindex_v) {
                        //FIX£ºNo PTS (Example: Raw H.264)
                        //Simple Write PTS
                        if (pkt.pts == AV_NOPTS_VALUE) {
                            //Write PTS
                            AVRational time_base1 = in_stream->time_base;
                            //Duration between 2 frames (us)
                            int64_t calc_duration = (double)AV_TIME_BASE / av_q2d(in_stream->r_frame_rate);
                            //Parameters
                            pkt.pts = (double)(frame_index*calc_duration) / (double)(av_q2d(time_base1)*AV_TIME_BASE);
                            pkt.dts = pkt.pts;
                            pkt.duration = (double)calc_duration / (double)(av_q2d(time_base1)*AV_TIME_BASE);
                            frame_index++;
                        }

                        cur_pts_v = pkt.pts;
                        break;
                    }
                } while (av_read_frame(ifmt_ctx, &pkt) >= 0);
            }
            else {
                break;
            }
        }
        else {
            ifmt_ctx = ifmt_ctx_a;
            stream_index = audioindex_out;
            if (av_read_frame(ifmt_ctx, &pkt) >= 0) {
                do {
                    in_stream = ifmt_ctx->streams[pkt.stream_index];
                    out_stream = ofmt_ctx->streams[stream_index];

                    if (pkt.stream_index == audioindex_a) {

                        //FIX£ºNo PTS
                        //Simple Write PTS
                        if (pkt.pts == AV_NOPTS_VALUE) {
                            //Write PTS
                            AVRational time_base1 = in_stream->time_base;
                            //Duration between 2 frames (us)
                            int64_t calc_duration = (double)AV_TIME_BASE / av_q2d(in_stream->r_frame_rate);
                            //Parameters
                            pkt.pts = (double)(frame_index*calc_duration) / (double)(av_q2d(time_base1)*AV_TIME_BASE);
                            pkt.dts = pkt.pts;
                            pkt.duration = (double)calc_duration / (double)(av_q2d(time_base1)*AV_TIME_BASE);
                            frame_index++;
                        }
                        cur_pts_a = pkt.pts;

                        break;
                    }
                } while (av_read_frame(ifmt_ctx, &pkt) >= 0);
            }
            else {
                break;
            }

        }

        //FIX:Bitstream Filter
#if USE_H264BSF
        av_bitstream_filter_filter(h264bsfc, in_stream->codec, NULL, &pkt.data, &pkt.size, pkt.data, pkt.size, 0);
#endif
#if USE_AACBSF
        av_bitstream_filter_filter(aacbsfc, out_stream->codec, NULL, &pkt.data, &pkt.size, pkt.data, pkt.size, 0);
#endif


        //Convert PTS/DTS
        pkt.pts = av_rescale_q_rnd(pkt.pts, in_stream->time_base, out_stream->time_base, (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
        pkt.dts = av_rescale_q_rnd(pkt.dts, in_stream->time_base, out_stream->time_base, (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
        pkt.duration = av_rescale_q(pkt.duration, in_stream->time_base, out_stream->time_base);
        pkt.pos = -1;
        pkt.stream_index = stream_index;

        printf("Write 1 Packet. size:%5d\tpts:%lld\n", pkt.size, pkt.pts);
        //Write
        if (av_interleaved_write_frame(ofmt_ctx, &pkt) < 0) {
            printf("Error muxing packet\n");
            break;
        }
        av_free_packet(&pkt);

    }
    //Write file trailer
    av_write_trailer(ofmt_ctx);

#if USE_H264BSF
    av_bitstream_filter_close(h264bsfc);
#endif
#if USE_AACBSF
    av_bitstream_filter_close(aacbsfc);
#endif

end:
    avformat_close_input(&ifmt_ctx_v);
    avformat_close_input(&ifmt_ctx_a);
    /* close output */
    if (ofmt_ctx && !(ofmt->flags & AVFMT_NOFILE))
        avio_close(ofmt_ctx->pb);
    avformat_free_context(ofmt_ctx);
    if (ret < 0 && ret != AVERROR_EOF) {
        printf("Error occurred.\n");
        return -1;
    }
    return 0;
}
Ejemplo n.º 12
0
int main(int argc, char **argv)
{
    AVOutputFormat *ofmt = NULL;
    AVFormatContext *ifmt_ctx = NULL, *ofmt_ctx = NULL;
	AVDictionary *out_opts = NULL;
    AVPacket pkt;
    const char *fmt, *in_filename, *out_filename;
    int ret, i, cmp;
	int cnt = 0;

    if (argc < 4) {
        printf("usage: remuxing input output fmt [do_sff do_rate_emu]\n");
        return 1;
    }
	if(argc >= 5){
		do_sff = atoi(argv[4]);
	}
	if(argc >= 6){
		do_rate_emu = atoi(argv[5]);
	}

    in_filename  = argv[1];
    out_filename = argv[2];
	fmt = argv[3];

    av_register_all();
    avformat_network_init();

    if ((ret = avformat_open_input(&ifmt_ctx, in_filename, 0, 0)) < 0) {
        fprintf(stderr, "Could not open input file '%s'", in_filename);
        goto end;
    }

    if ((ret = avformat_find_stream_info(ifmt_ctx, 0)) < 0) {
        fprintf(stderr, "Failed to retrieve input stream information");
        goto end;
    }

    av_dump_format(ifmt_ctx, 0, in_filename, 0);

    avformat_alloc_output_context2(&ofmt_ctx, NULL, fmt, out_filename);
    if (!ofmt_ctx) {
        fprintf(stderr, "Could not create output context\n");
        ret = AVERROR_UNKNOWN;
        goto end;
    }

    ofmt = ofmt_ctx->oformat;

    for (i = 0; i < ifmt_ctx->nb_streams; i++) {
        AVStream *in_stream = ifmt_ctx->streams[i];
        AVStream *out_stream = avformat_new_stream(ofmt_ctx, in_stream->codec->codec);
        if (!out_stream) {
            fprintf(stderr, "Failed allocating output stream\n");
            ret = AVERROR_UNKNOWN;
            goto end;
        }
		
		avpriv_set_pts_info(out_stream, in_stream->pts_wrap_bits, in_stream->time_base.num, in_stream->time_base.den);
        ret = avcodec_copy_context(out_stream->codec, in_stream->codec);
        if (ret < 0) {
            fprintf(stderr, "Failed to copy context from input to output stream codec context\n");
            goto end;
        }
        out_stream->codec->codec_tag = 0;
        if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
            out_stream->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;
    }

	if(out_filename && strstr(out_filename, ".m3u8")){
		av_opt_set_int(ofmt_ctx, "hls_wrap",  6, AV_OPT_SEARCH_CHILDREN);
		av_opt_set_int(ofmt_ctx, "hls_list_size",  6, AV_OPT_SEARCH_CHILDREN); 
		av_opt_set(ofmt_ctx, "hls_time", "1.0", AV_OPT_SEARCH_CHILDREN);
	}

    av_dump_format(ofmt_ctx, 0, out_filename, 1);

    if (!(ofmt->flags & AVFMT_NOFILE)) {
		av_dict_set(&out_opts, "chunked_post", "0", 0);	
        ret = avio_open2(&ofmt_ctx->pb, out_filename, AVIO_FLAG_WRITE|AVIO_FLAG_NONBLOCK, NULL, &out_opts);

        if (ret < 0) {
            fprintf(stderr, "Could not open output file '%s'", out_filename);
            goto end;
        }
    }

    ret = ofmt_ctx->pb && do_sff ? sff_write_header(ofmt_ctx) : avformat_write_header(ofmt_ctx, NULL);
    if (ret < 0) {
        fprintf(stderr, "Error occurred when opening output file\n");
        goto end;
    }
	if(ofmt_ctx->pb){
		avio_flush(ofmt_ctx->pb);
	}

    while (1) {
        AVStream *in_stream, *out_stream;
		ret = av_read_frame(ifmt_ctx, &pkt);
        if (ret < 0)
            break;

        in_stream  = ifmt_ctx->streams[pkt.stream_index];
        out_stream = ofmt_ctx->streams[pkt.stream_index];
		
		i = pkt.stream_index;
		if(curr_dts[i] == AV_NOPTS_VALUE && pkt.dts != AV_NOPTS_VALUE){
			first_dts[i] = pkt.dts;
			start_time[i] = av_gettime_relative();
		}
		if(pkt.dts != AV_NOPTS_VALUE){	
			curr_dts[i] = pkt.dts; //us
		}

        /* copy packet */
        pkt.pts = av_rescale_q_rnd(pkt.pts, in_stream->time_base, out_stream->time_base, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
        pkt.dts = av_rescale_q_rnd(pkt.dts, in_stream->time_base, out_stream->time_base, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
        pkt.duration = av_rescale_q(pkt.duration, in_stream->time_base, out_stream->time_base);
        pkt.pos = -1;
		
		ret = ofmt_ctx->pb && do_sff ? sff_write_packet(ofmt_ctx, &pkt) : av_interleaved_write_frame(ofmt_ctx, &pkt); 
		if(ofmt_ctx->pb){
			avio_flush(ofmt_ctx->pb);
		}
        if (ret < 0) {
            fprintf(stderr, "Error muxing packet\n");
            break;
        }
        av_free_packet(&pkt);
		++cnt;
		//printf("cnt %d\t", cnt);
		
		do{
			curr_time[i] = av_gettime_relative();
			cmp = av_compare_ts(curr_dts[i] - first_dts[i], in_stream->time_base, 
					curr_time[i] - start_time[i], AV_TIME_BASE_Q); 
			if(!do_rate_emu || cmp <= 0)break;
			
			av_usleep(10000);
		}while(cmp > 0);
		
    }
	
	ofmt_ctx->pb && do_sff ? sff_write_packet(ofmt_ctx, NULL) : av_write_trailer(ofmt_ctx); 
end:

    avformat_close_input(&ifmt_ctx);

    /* close output */
    if (ofmt_ctx && !(ofmt->flags & AVFMT_NOFILE) && ofmt_ctx->pb){
        avio_close(ofmt_ctx->pb);
		av_dict_free(&out_opts);
	}
    avformat_free_context(ofmt_ctx);

    if (ret < 0 && ret != AVERROR_EOF) {
        fprintf(stderr, "Error occurred: %s\n", av_err2str(ret));
        return 1;
    }
	
	printf("end of remux\n");
    return 0;
}