Пример #1
0
static int config_props(AVFilterLink* outlink)
{
    AVFilterContext *ctx    = outlink->src;
    AVFilterLink    *inlink = ctx->inputs[0];
    FPSContext      *s      = ctx->priv;

    outlink->time_base  = av_inv_q(s->framerate);
    outlink->frame_rate = s->framerate;

    /* Calculate the input and output pts offsets for start_time */
    if (s->start_time != DBL_MAX && s->start_time != AV_NOPTS_VALUE) {
        double first_pts = s->start_time * AV_TIME_BASE;
        if (first_pts < INT64_MIN || first_pts > INT64_MAX) {
            av_log(ctx, AV_LOG_ERROR, "Start time %f cannot be represented in internal time base\n",
                   s->start_time);
            return AVERROR(EINVAL);
        }
        s->in_pts_off  = av_rescale_q_rnd(first_pts, AV_TIME_BASE_Q, inlink->time_base,
                                          s->rounding | AV_ROUND_PASS_MINMAX);
        s->out_pts_off = av_rescale_q_rnd(first_pts, AV_TIME_BASE_Q, outlink->time_base,
                                          s->rounding | AV_ROUND_PASS_MINMAX);
        s->next_pts = s->out_pts_off;
        av_log(ctx, AV_LOG_VERBOSE, "Set first pts to (in:%"PRId64" out:%"PRId64") from start time %f\n",
               s->in_pts_off, s->out_pts_off, s->start_time);
    }

    return 0;
}
Пример #2
0
void CAudioEncoder::Encode(AVFrame* inputSample)
{
	AVCodecContext *pContext = m_audioStream->codec;


	int error;

	if ((error = av_audio_fifo_realloc(m_fifo, av_audio_fifo_size(m_fifo) + m_frame_size)) < 0)
	{
		fprintf(stderr, "Could not reallocate FIFO\n");
		return ;
	}

	/** Store the new samples in the FIFO buffer. */
	if (av_audio_fifo_write(m_fifo, (void **)inputSample->data, inputSample->nb_samples) < m_frame_size)
	{
		fprintf(stderr, "Could not write data to FIFO\n");
		return ;
	}
	do
	{
		int current_frame_size = FFMIN(av_audio_fifo_size(m_fifo), m_frame_size);
		if (current_frame_size < m_frame_size)
			break;

		m_tempFrame->nb_samples = current_frame_size;

		if (av_audio_fifo_read(m_fifo, (void **)m_tempFrame->data, current_frame_size) < current_frame_size)
		{
			fprintf(stderr, "Could not read data from FIFO\n");
			return;
		}

		AVPacket pkt;
		av_init_packet(&pkt);
		pkt.data = NULL;    // packet data will be allocated by the encoder
		pkt.size = 0;
		int got_output;

		int out_size = avcodec_encode_audio2(pContext, &pkt, m_tempFrame.get(), &got_output);
		if (got_output)
		{
			pkt.stream_index = m_audioStream->index;

			pkt.pts = av_rescale_q_rnd(pkt.pts, pContext->time_base, m_audioStream->time_base, (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
			pkt.dts = av_rescale_q_rnd(pkt.dts, pContext->time_base, m_audioStream->time_base, (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
			pkt.duration = av_rescale_q(pkt.duration, pContext->time_base, m_audioStream->time_base);
			pkt.pos = -1;

			//printf("AAC wrote %d bytes \n", pkt.size);
			auto pContext = m_pfileWriter->GetContext();
			av_interleaved_write_frame(pContext, &pkt);
			//fwrite(pkt.data, 1, pkt.size, m_fileName.get());
		}
		av_free_packet(&pkt);
	} while (1);


}
Пример #3
0
static int write_frame(AVFormatContext *fmt_ctx, const AVRational *time_base, AVStream *st, AVPacket *pkt)
{
	/* rescale output packet timestamp values from codec to stream timebase */
	pkt->pts = av_rescale_q_rnd(pkt->pts, *time_base, st->time_base, (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
	pkt->dts = av_rescale_q_rnd(pkt->dts, *time_base, st->time_base, (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
	pkt->duration = av_rescale_q(pkt->duration, *time_base, st->time_base);
	pkt->stream_index = st->index;
	/* Write the compressed frame to the media file. */
	return av_interleaved_write_frame(fmt_ctx, pkt); // return 0 은 성공을 뜻한다.
}
Пример #4
0
/* Read a frame from the input and save it in the buffer */
static int read_frame(AVFilterContext *ctx, FPSContext *s, AVFilterLink *inlink, AVFilterLink *outlink)
{
    AVFrame *frame;
    int ret;
    int64_t in_pts;

    /* Must only be called when we have buffer room available */
    av_assert1(s->frames_count < 2);

    ret = ff_inlink_consume_frame(inlink, &frame);
    /* Caller must have run ff_inlink_check_available_frame first */
    av_assert1(ret);
    if (ret < 0)
        return ret;

    /* Convert frame pts to output timebase.
     * The dance with offsets is required to match the rounding behaviour of the
     * previous version of the fps filter when using the start_time option. */
    in_pts = frame->pts;
    frame->pts = s->out_pts_off + av_rescale_q_rnd(in_pts - s->in_pts_off,
                                                   inlink->time_base, outlink->time_base,
                                                   s->rounding | AV_ROUND_PASS_MINMAX);

    av_log(ctx, AV_LOG_DEBUG, "Read frame with in pts %"PRId64", out pts %"PRId64"\n",
           in_pts, frame->pts);

    s->frames[s->frames_count++] = frame;
    s->frames_in++;

    return 1;
}
Пример #5
0
static inline int64_t rescale_ts(struct ffmpeg_mux *ffm, int64_t val, int idx)
{
	AVStream *stream = get_stream(ffm, idx);

	return av_rescale_q_rnd(val / stream->codec->time_base.num,
			stream->codec->time_base, stream->time_base,
			AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX);
}
Пример #6
0
/* Convert status_pts to outlink timebase */
static void update_eof_pts(AVFilterContext *ctx, FPSContext *s, AVFilterLink *inlink, AVFilterLink *outlink, int64_t status_pts)
{
    int eof_rounding = (s->eof_action == EOF_ACTION_PASS) ? AV_ROUND_UP : s->rounding;
    s->status_pts = av_rescale_q_rnd(status_pts, inlink->time_base, outlink->time_base,
                                     eof_rounding | AV_ROUND_PASS_MINMAX);

    av_log(ctx, AV_LOG_DEBUG, "EOF is at pts %"PRId64"\n", s->status_pts);
}
Пример #7
0
int remux_packet(AVFormatContext *ofmt_ctx,AVPacket *pkt){
    int ret;
    //figure out audio stream or video stream to rescale
    AVStream *in_stream, *out_stream;
    in_stream  = fmt_ctx->streams[pkt->stream_index];
    out_stream = ofmt_ctx->streams[pkt->stream_index];

    
    //convert pts,dts
    pkt->pts = av_rescale_q_rnd(pkt->pts, in_stream->time_base, out_stream->time_base, (AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX));
    pkt->dts = av_rescale_q_rnd(pkt->dts, in_stream->time_base, out_stream->time_base, (AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX));
    pkt->duration = av_rescale_q(pkt->duration, in_stream->time_base, out_stream->time_base);
    pkt->pos = -1;
    //remux into certain format video
    ret = av_interleaved_write_frame(ofmt_ctx, pkt);
    return 0;
}
Пример #8
0
static int encode_write_frame(AVFrame *filt_frame, unsigned int stream_index, int *got_frame) {
    int ret;
    int got_frame_local;
    AVPacket enc_pkt;
    int (*enc_func)(AVCodecContext *, AVPacket *, const AVFrame *, int *) =
        (ifmt_ctx->streams[stream_index]->codec->codec_type ==
         AVMEDIA_TYPE_VIDEO) ? avcodec_encode_video2 : avcodec_encode_audio2;

    if (!got_frame)
        got_frame = &got_frame_local;

    av_log(NULL, AV_LOG_INFO, "Encoding frame\n");
    /* encode filtered frame */
    enc_pkt.data = NULL;
    enc_pkt.size = 0;
    av_init_packet(&enc_pkt);
    ret = enc_func(ofmt_ctx->streams[stream_index]->codec, &enc_pkt,
                   filt_frame, got_frame);
    av_frame_free(&filt_frame);
    if (ret < 0)
        return ret;
    if (!(*got_frame))
        return 0;

    /* prepare packet for muxing */
    enc_pkt.stream_index = stream_index;
    enc_pkt.dts = av_rescale_q_rnd(enc_pkt.dts,
                                   ofmt_ctx->streams[stream_index]->codec->time_base,
                                   ofmt_ctx->streams[stream_index]->time_base,
                                   AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
    enc_pkt.pts = av_rescale_q_rnd(enc_pkt.pts,
                                   ofmt_ctx->streams[stream_index]->codec->time_base,
                                   ofmt_ctx->streams[stream_index]->time_base,
                                   AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
    enc_pkt.duration = av_rescale_q(enc_pkt.duration,
                                    ofmt_ctx->streams[stream_index]->codec->time_base,
                                    ofmt_ctx->streams[stream_index]->time_base);

    av_log(NULL, AV_LOG_DEBUG, "Muxing frame\n");
    /* mux encoded frame */
    ret = av_interleaved_write_frame(ofmt_ctx, &enc_pkt);
    return ret;
}
Пример #9
0
// XXX: we should probably prefer the stream duration over the format
// duration
int64_t sxpi_demuxing_probe_duration(const struct demuxing_ctx *ctx)
{
    if (!ctx->is_image) {
        int64_t probe_duration64 = ctx->fmt_ctx->duration;
        AVRational scaleq = AV_TIME_BASE_Q;

        if (probe_duration64 == AV_NOPTS_VALUE && ctx->stream->time_base.den) {
            probe_duration64 = ctx->stream->duration;
            scaleq = ctx->stream->time_base;
        }

        if (probe_duration64 != AV_NOPTS_VALUE)
            return av_rescale_q_rnd(probe_duration64, scaleq, AV_TIME_BASE_Q, 0);
    }
    return AV_NOPTS_VALUE;
}
Пример #10
0
static void write_packet_header(AVFormatContext *ctx, StreamInfo *stream,
                                int length, int key_frame)
{
    int timestamp;
    AVIOContext *s = ctx->pb;

    stream->nb_packets++;
    stream->packet_total_size += length;
    if (length > stream->packet_max_size)
        stream->packet_max_size =  length;

    avio_wb16(s,0); /* version */
    avio_wb16(s,length + 12);
    avio_wb16(s, stream->num); /* stream number */
    timestamp = av_rescale_q_rnd(stream->nb_frames, (AVRational){1000, 1}, stream->frame_rate, AV_ROUND_ZERO);
    avio_wb32(s, timestamp); /* timestamp */
    avio_w8(s, 0); /* reserved */
    avio_w8(s, key_frame ? 2 : 0); /* flags */
}
int main_dummy(int argc, char **argv)
#endif
{
    AVOutputFormat *ofmt = NULL;
    AVFormatContext *inVideoFmtCtx = NULL, *inAudioFmtCtx = NULL, *outFmtCtx = NULL;
    AVPacket pkt;
    const char *inVideo_filename, *inAudio_filename, *out_filename;
    int ret, i;

    if (argc < 3) {
        printf("usage: %s input output\n"
            "API example program to remux a media file with libavformat and libavcodec.\n"
            "The output format is guessed according to the file extension.\n"
            "\n", argv[0]);
        return 1;
    }

    inVideo_filename = argv[1];
    inAudio_filename = argv[2];
    out_filename = argv[3];

    av_register_all();

    /* =============== OPEN STREAMS ================*/

    if ((ret = open_input_file2(argv[1], &inVideoFmtCtx)) < 0)
        goto end;

    if ((ret = open_input_file2(argv[2], &inAudioFmtCtx)) < 0)
        goto end;

    /* ========== ALLOCATE OUTPUT CONTEXT ==========*/

    avformat_alloc_output_context2(&outFmtCtx, NULL, NULL, out_filename);
    if (!outFmtCtx) {
        fprintf(stderr, "Could not create output context\n");
        ret = AVERROR_UNKNOWN;
        goto end;
    }

    ofmt = outFmtCtx->oformat;

    /* =============== SETUP VIDEO CODEC ================*/
    for (i = 0; i < inVideoFmtCtx->nb_streams; i++) {
        AVStream *in_stream = inVideoFmtCtx->streams[i];
        AVStream *out_stream = avformat_new_stream(outFmtCtx, in_stream->codec->codec);
        if (!out_stream) {
            fprintf(stderr, "Failed allocating output stream\n");
            ret = AVERROR_UNKNOWN;
            goto end;
        }

        ret = avcodec_copy_context(out_stream->codec, in_stream->codec);
        if (ret < 0) {
            fprintf(stderr, "Failed to copy context from input to output stream codec context\n");
            goto end;
        }
        out_stream->codec->codec_tag = 0;
        if (outFmtCtx->oformat->flags & AVFMT_GLOBALHEADER)
            out_stream->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;
    }

    /* =============== SETUP AUDIO CODEC ================*/
    for (i = 0; i < inAudioFmtCtx->nb_streams; i++) {
        setup_mp3_audio_codec(outFmtCtx);
    }

    av_dump_format(outFmtCtx, 0, out_filename, 1);

    if (!(ofmt->flags & AVFMT_NOFILE)) {
        ret = avio_open(&outFmtCtx->pb, out_filename, AVIO_FLAG_WRITE);
        if (ret < 0) {
            fprintf(stderr, "Could not open output file '%s'", out_filename);
            goto end;
        }
    }

    ret = avformat_write_header(outFmtCtx, NULL);
    if (ret < 0) {
        fprintf(stderr, "Error occurred when opening output file\n");
        goto end;
    }


    /* =============== SETUP FILTERS ================*/

    init_filters(inAudioFmtCtx, outFmtCtx);

    AVStream *in_stream, *out_stream;
    AVFrame* frame;

    while (1) {        

        /* =============== VIDEO STREAM ================*/
        ret = av_read_frame(inVideoFmtCtx, &pkt);
        if (ret < 0)
            break;

        in_stream = inVideoFmtCtx->streams[pkt.stream_index];
        out_stream = outFmtCtx->streams[pkt.stream_index];

        log_packet(inVideoFmtCtx, &pkt, "in");

        /* copy packet */
        pkt.pts = av_rescale_q_rnd(pkt.pts, in_stream->time_base, out_stream->time_base, (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
        pkt.dts = av_rescale_q_rnd(pkt.dts, in_stream->time_base, out_stream->time_base, (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
        pkt.duration = (int)av_rescale_q(pkt.duration, in_stream->time_base, out_stream->time_base);
        pkt.pos = -1;
        log_packet(outFmtCtx, &pkt, "out");

        ret = av_interleaved_write_frame(outFmtCtx, &pkt);
        if (ret < 0) {
            fprintf(stderr, "Error muxing packet\n");
            break;
        }
        av_free_packet(&pkt);


        /* =============== AUDIO STREAM ================*/

#if 0
        ret = av_read_frame(inAudioFmtCtx, &pkt);
        if (ret < 0)
            break;
        in_stream = inAudioFmtCtx->streams[pkt.stream_index];

        pkt.stream_index++;
        out_stream = outFmtCtx->streams[pkt.stream_index];

        log_packet(inAudioFmtCtx, &pkt, "in");

        /* copy packet */
        pkt.pts = av_rescale_q_rnd(pkt.pts, in_stream->time_base, out_stream->time_base, (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
        pkt.dts = av_rescale_q_rnd(pkt.dts, in_stream->time_base, out_stream->time_base, (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
        pkt.duration = av_rescale_q(pkt.duration, in_stream->time_base, out_stream->time_base);
        pkt.pos = -1;
        log_packet(outFmtCtx, &pkt, "out");

        ret = av_interleaved_write_frame(outFmtCtx, &pkt);
        if (ret < 0) {
            fprintf(stderr, "Error muxing packet\n");
            break;
        }
        av_free_packet(&pkt);
#else

        if ((ret = av_read_frame(inAudioFmtCtx, &pkt)) < 0)
            break;
        int streamIndex = pkt.stream_index;
        int gotFrame;

        if (_filterCtx[streamIndex].FilterGraph) {
            av_log(NULL, AV_LOG_DEBUG, "Going to reencode&filter the frame\n");
            frame = av_frame_alloc();
            if (!frame) {
                ret = AVERROR(ENOMEM);
                break;
            }
            av_packet_rescale_ts(&pkt, inAudioFmtCtx->streams[streamIndex]->time_base,
                inAudioFmtCtx->streams[streamIndex]->codec->time_base);
            ret = avcodec_decode_audio4(inAudioFmtCtx->streams[streamIndex]->codec, frame,
                &gotFrame, &pkt);
            if (ret < 0) {
                av_frame_free(&frame);
                av_log(NULL, AV_LOG_ERROR, "Decoding failed\n");
                break;
            }

            if (gotFrame) {
                frame->pts = av_frame_get_best_effort_timestamp(frame);
                ret = filter_encode_write_frame(frame, inAudioFmtCtx, outFmtCtx, streamIndex);
                av_frame_free(&frame);
                if (ret < 0)
                    goto end;
            }
            else {
                av_frame_free(&frame);
            }
        }
        else {
            /* remux this frame without reencoding */
            av_packet_rescale_ts(&pkt,
                inAudioFmtCtx->streams[streamIndex]->time_base,
                outFmtCtx->streams[streamIndex+1]->time_base);

            ret = av_interleaved_write_frame(outFmtCtx, &pkt);
            if (ret < 0)
                goto end;
        }
        av_free_packet(&pkt);
#endif // 0

    }

    av_write_trailer(outFmtCtx);
end:

    av_free_packet(&pkt);
    av_frame_free(&frame);
    for (i = 0; i < inAudioFmtCtx->nb_streams; i++) {
        avcodec_close(inAudioFmtCtx->streams[i]->codec);
        if (outFmtCtx && outFmtCtx->nb_streams > i && outFmtCtx->streams[i] && outFmtCtx->streams[i]->codec)
            avcodec_close(outFmtCtx->streams[i]->codec);
        if (_filterCtx && _filterCtx[i].FilterGraph)
            avfilter_graph_free(&_filterCtx[i].FilterGraph);
    }
    av_free(_filterCtx);

    avformat_close_input(&inVideoFmtCtx);
    avformat_close_input(&inAudioFmtCtx);

    /* close output */
    if (outFmtCtx && !(ofmt->flags & AVFMT_NOFILE))
        avio_closep(&outFmtCtx->pb);
    avformat_free_context(outFmtCtx);

    if (ret < 0 && ret != AVERROR_EOF) {
        //fprintf(stderr, "Error occurred: %s\n", av_err2str(ret));
        return 1;
    }

    return 0;
}
Пример #12
0
static int request_frame(AVFilterLink *outlink)
{
    AVFilterContext *ctx = outlink->src;
    FPSContext        *s = ctx->priv;
    int ret;

    ret = ff_request_frame(ctx->inputs[0]);

    /* flush the fifo */
    if (ret == AVERROR_EOF && av_fifo_size(s->fifo)) {
        int i;
        for (i = 0; av_fifo_size(s->fifo); i++) {
            AVFrame *buf;

            av_fifo_generic_read(s->fifo, &buf, sizeof(buf), NULL);
            if (av_fifo_size(s->fifo)) {
                buf->pts = av_rescale_q(s->first_pts, ctx->inputs[0]->time_base,
                                        outlink->time_base) + s->frames_out;

                if ((ret = ff_filter_frame(outlink, buf)) < 0)
                    return ret;

                s->frames_out++;
            } else {
                /* This is the last frame, we may have to duplicate it to match
                 * the last frame duration */
                int j;
                int eof_rounding = (s->eof_action == EOF_ACTION_PASS) ? AV_ROUND_UP : s->rounding;
                int delta = av_rescale_q_rnd(ctx->inputs[0]->current_pts - s->first_pts,
                                             ctx->inputs[0]->time_base,
                                             outlink->time_base, eof_rounding) - s->frames_out;
                av_log(ctx, AV_LOG_DEBUG, "EOF frames_out:%d delta:%d\n", s->frames_out, delta);
                /* if the delta is equal to 1, it means we just need to output
                 * the last frame. Greater than 1 means we will need duplicate
                 * delta-1 frames */
                if (delta > 0 ) {
                    for (j = 0; j < delta; j++) {
                        AVFrame *dup = av_frame_clone(buf);

                        av_log(ctx, AV_LOG_DEBUG, "Duplicating frame.\n");
                        dup->pts = av_rescale_q(s->first_pts, ctx->inputs[0]->time_base,
                                                outlink->time_base) + s->frames_out;

                        if ((ret = ff_filter_frame(outlink, dup)) < 0)
                            return ret;

                        s->frames_out++;
                        if (j > 0) s->dup++;
                    }
                    av_frame_free(&buf);
                } else {
                    /* for delta less or equal to 0, we should drop the frame,
                     * otherwise, we will have one or more extra frames */
                    av_frame_free(&buf);
                    s->drop++;
                }
            }
        }
        return 0;
    }

    return ret;
}
JNIEXPORT jint JNICALL Java_com_jiuan_it_ipc_utils_CombineVideo_combine(JNIEnv *env, jobject obj,
																jstring inFileName1, 
																jstring inFileName2, 
																jstring outFileName,
																jint isAudio)
{
	const char *strInName1 = (*env)->GetStringUTFChars(env, inFileName1, 0);
	const char *strInName2 = (*env)->GetStringUTFChars(env, inFileName2, 0);
	const char *strOutName = (*env)->GetStringUTFChars(env, outFileName, 0);

	in1_fmtctx = NULL;
	in2_fmtctx = NULL;
	out_fmtctx = NULL;
	out_video_stream = NULL;
	out_audio_stream = NULL;
	video_stream_index = -1;
	audio_stream_index = -1;

	int ret = -1;

	av_register_all();	
	if (0 > CombineVideo_OpenInput(strInName1, strInName2))
	{
		ret = -1;
		goto ErrLab;
	}
	
	if(0 > CombineVideo_OpenOutput(strOutName,isAudio))
	{
		ret = -1;
		goto ErrLab;
	}

	AVFormatContext *input_ctx = in1_fmtctx;
	AVPacket pkt;
	int64_t pts_v = 0;
	int64_t pts_a = 0;
	int64_t dts_v = 0;
	int64_t dts_a = 0;
	while(1)
	{
		if(0 > av_read_frame(input_ctx, &pkt))
		{
			if (input_ctx == in1_fmtctx)
			{
				float vedioDuraTime, audioDuraTime;

				//calc the first media dura time
				if (isAudio) {
					vedioDuraTime = ((float)input_ctx->streams[video_stream_index]->time_base.num /
						(float)input_ctx->streams[video_stream_index]->time_base.den) * ((float)pts_v);
					audioDuraTime = ((float)input_ctx->streams[audio_stream_index]->time_base.num /
						(float)input_ctx->streams[audio_stream_index]->time_base.den) * ((float)pts_a);
				}
                else
                {
                    vedioDuraTime = ((float)input_ctx->streams[video_stream_index]->time_base.num /
                                     (float)input_ctx->streams[video_stream_index]->time_base.den) * ((float)pts_v);
                    audioDuraTime = 0;
                }

                //当第一个视频文件和第二个视频文件的timebase不一致的时候,需要将其进行timebase变换。
				if (input_ctx->streams[pkt.stream_index]->time_base.den != in2_fmtctx->streams[pkt.stream_index]->time_base.den)
				{
					pts_v = av_rescale_q_rnd(pts_v, input_ctx->streams[pkt.stream_index]->time_base,
						in2_fmtctx->streams[pkt.stream_index]->time_base, (AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
					dts_v = av_rescale_q_rnd(dts_v, input_ctx->streams[pkt.stream_index]->time_base,
						in2_fmtctx->streams[pkt.stream_index]->time_base, (AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
				}

				//calc the pts and dts end of the first media
				if (audioDuraTime > vedioDuraTime)
				{
					dts_v = pts_v = audioDuraTime / ((float)input_ctx->streams[video_stream_index]->time_base.num / 
						(float)input_ctx->streams[video_stream_index]->time_base.den);
					dts_a++;
					pts_a++;
				}
				else
				{
					if (isAudio) {
						dts_a = pts_a = vedioDuraTime / ((float)input_ctx->streams[audio_stream_index]->time_base.num /
							(float)input_ctx->streams[audio_stream_index]->time_base.den);
						dts_v++;
						pts_v++;
					}
                    else
                    {
                        dts_a = pts_a = vedioDuraTime;
                        dts_v++;
                        pts_v++;
                    }
				}
				input_ctx = in2_fmtctx;
				continue;
			}
			break;
		}

		if (pkt.stream_index == video_stream_index)
		{
			if (input_ctx == in2_fmtctx)
			{
				pkt.pts += pts_v;
				pkt.dts += dts_v;
			}
			else
			{
				pts_v = pkt.pts;
				dts_v = pkt.dts;
			}
		}
		else if (pkt.stream_index == audio_stream_index)
		{
			if (input_ctx == in2_fmtctx)
			{
				pkt.pts += pts_a;
				pkt.dts += dts_a;
			}
			else
			{
				pts_a = pkt.pts;
				dts_a = pkt.dts;
			}
		}

        if (pkt.stream_index == video_stream_index)
        {
			pkt.pts = av_rescale_q_rnd(pkt.pts, input_ctx->streams[pkt.stream_index]->time_base,
				out_fmtctx->streams[pkt.stream_index]->time_base, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
			pkt.dts = av_rescale_q_rnd(pkt.dts, input_ctx->streams[pkt.stream_index]->time_base,
				out_fmtctx->streams[pkt.stream_index]->time_base, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
			pkt.pos = -1;

			if (av_interleaved_write_frame(out_fmtctx, &pkt) < 0)
			{
				printf( "Error muxing packet\n");
				//break;
			}
		}
		av_free_packet(&pkt);		
	}

	av_write_trailer(out_fmtctx);
	ret = 0;

ErrLab:
	avformat_close_input(&in1_fmtctx);
	avformat_close_input(&in2_fmtctx);

	if (out_fmtctx && !(out_fmtctx->oformat->flags & AVFMT_NOFILE))
		avio_close(out_fmtctx->pb);

	avformat_free_context(out_fmtctx);
	return ret;
}
Пример #14
0
int main(int argc, char **argv)
{
    AVOutputFormat *ofmt = NULL;
    AVFormatContext *ifmt_ctx = NULL, *ofmt_ctx = NULL;
    AVPacket pkt;
    const char *in_filename, *out_filename;
    int ret, i;
    if (argc < 3) {
        printf("usage: %s input output\n"
               "API example program to remux a media file with libavformat and libavcodec.\n"
               "The output format is guessed according to the file extension.\n"
               "\n", argv[0]);
        return 1;
    }
    in_filename  = argv[1];
    out_filename = argv[2];
    av_register_all();
    if ((ret = avformat_open_input(&ifmt_ctx, in_filename, 0, 0)) < 0) {
        fprintf(stderr, "Could not open input file '%s'", in_filename);
        goto end;
    }
    if ((ret = avformat_find_stream_info(ifmt_ctx, 0)) < 0) {
        fprintf(stderr, "Failed to retrieve input stream information");
        goto end;
    }
    av_dump_format(ifmt_ctx, 0, in_filename, 0);
    avformat_alloc_output_context2(&ofmt_ctx, NULL, NULL, out_filename);
    if (!ofmt_ctx) {
        fprintf(stderr, "Could not create output context\n");
        ret = AVERROR_UNKNOWN;
        goto end;
    }
    ofmt = ofmt_ctx->oformat;
    for (i = 0; i < ifmt_ctx->nb_streams; i++) {
        AVStream *in_stream = ifmt_ctx->streams[i];
        AVStream *out_stream = avformat_new_stream(ofmt_ctx, in_stream->codec->codec);
        if (!out_stream) {
            fprintf(stderr, "Failed allocating output stream\n");
            ret = AVERROR_UNKNOWN;
            goto end;
        }
        ret = avcodec_copy_context(out_stream->codec, in_stream->codec);
        if (ret < 0) {
            fprintf(stderr, "Failed to copy context from input to output stream codec context\n");
            goto end;
        }
        out_stream->codec->codec_tag = 0;
        if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
            out_stream->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;
    }
    av_dump_format(ofmt_ctx, 0, out_filename, 1);
    if (!(ofmt->flags & AVFMT_NOFILE)) {
        ret = avio_open(&ofmt_ctx->pb, out_filename, AVIO_FLAG_WRITE);
        if (ret < 0) {
            fprintf(stderr, "Could not open output file '%s'", out_filename);
            goto end;
        }
    }
    ret = avformat_write_header(ofmt_ctx, NULL);
    if (ret < 0) {
        fprintf(stderr, "Error occurred when opening output file\n");
        goto end;
    }
    while (1) {
        AVStream *in_stream, *out_stream;
        ret = av_read_frame(ifmt_ctx, &pkt);
        if (ret < 0)
            break;
        in_stream  = ifmt_ctx->streams[pkt.stream_index];
        out_stream = ofmt_ctx->streams[pkt.stream_index];
        log_packet(ifmt_ctx, &pkt, "in");
        /* copy packet */
        pkt.pts = av_rescale_q_rnd(pkt.pts, in_stream->time_base, out_stream->time_base, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
        pkt.dts = av_rescale_q_rnd(pkt.dts, in_stream->time_base, out_stream->time_base, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
        pkt.duration = av_rescale_q(pkt.duration, in_stream->time_base, out_stream->time_base);
        pkt.pos = -1;
        log_packet(ofmt_ctx, &pkt, "out");
        ret = av_interleaved_write_frame(ofmt_ctx, &pkt);
        if (ret < 0) {
            fprintf(stderr, "Error muxing packet\n");
            break;
        }
        av_free_packet(&pkt);
    }
    av_write_trailer(ofmt_ctx);
end:
    avformat_close_input(&ifmt_ctx);
    /* close output */
    if (ofmt_ctx && !(ofmt->flags & AVFMT_NOFILE))
        avio_closep(&ofmt_ctx->pb);
    avformat_free_context(ofmt_ctx);
    if (ret < 0 && ret != AVERROR_EOF) {
        fprintf(stderr, "Error occurred: %s\n", av_err2str(ret));
        return 1;
    }
    return 0;
}
int video_thr(LPVOID lpParam)
{
	int iRet = -1;
	AVFormatContext		*	pFmtCtx				= NULL;
	AVFormatContext		*	pRtmpFmtCtx			= NULL;
	AVInputFormat		*	pVideoInputFmt		= NULL;
	AVOutputFormat		*	pVideoOutfmt		= NULL;
	struct_stream_info	*	strct_streaminfo	= NULL;
	AVCodecContext		*	pCodecContext		= NULL;
	AVCodec				*	pCodec				= NULL;
	int						iVideoIndex			= -1;
	int						iVideo_Height		= 0;
	int						iVideo_Width		= 0;
	int						iVideoPic			= 0;
	int64_t					start_time			= 0;
	int						frame_index			= 0;
	SDL_Event				event;

	CLS_DlgStreamPusher* pThis = (CLS_DlgStreamPusher*)lpParam;
	if (pThis == NULL){
		TRACE("video_thr--pThis == NULL\n");
		return iRet;
	}
	pVideoInputFmt = av_find_input_format("dshow");
	if (pVideoInputFmt == NULL){
		TRACE("pVideoInputFmt == NULL\n");
		return iRet;
	}

	char* psDevName = pThis->GetDeviceName(n_Video);
	if (psDevName == NULL){
		TRACE("video_thr--psDevName == NULL");
		return iRet;
	}

	while (1){
		if (pThis->m_cstrPushAddr != ""){
			break;
		}
	}

	//根据推流地址获取到AVFormatContext
	avformat_alloc_output_context2(&pRtmpFmtCtx, NULL, "flv", pThis->m_cstrPushAddr);

	if (NULL == pRtmpFmtCtx){
		TRACE("NULL == pRtmpFmtCtx");
		return iRet;
	}
	pVideoOutfmt = pRtmpFmtCtx->oformat;

	strct_streaminfo = pThis->m_pStreamInfo;
	if (NULL == strct_streaminfo){
		TRACE("NULL == strct_streaminfo");
		return iRet;
	}

	FILE *fp_yuv = fopen("output.yuv", "wb+");

	pFmtCtx = avformat_alloc_context();
	if (avformat_open_input(&pFmtCtx, psDevName, pVideoInputFmt, NULL) != 0){
		TRACE("avformat_open_input err!\n");
		goto END;
	}
	if (avformat_find_stream_info(pFmtCtx, NULL) < 0){
		TRACE("avformat_find_stream_info(pFmtCtx, NULL) < 0\n");
		goto END;
	}

	for (int i = 0; i < pFmtCtx->nb_streams; i++){
		if (pFmtCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO){
			iVideoIndex = i;
			break;
		}
	}

	if (iVideoIndex < 0){
		TRACE("iVideoIndex < 0\n");
		goto END;
	}

	pCodecContext = pFmtCtx->streams[iVideoIndex]->codec;
	if (NULL == pCodecContext){
		TRACE("NULL == pCodecContext");
		goto END;
	}
	pCodec = avcodec_find_decoder(pCodecContext->codec_id);
	if (pCodec == NULL){
		TRACE("avcodec_find_decoder<0");
		goto END;
	}
	if (avcodec_open2(pCodecContext, pCodec, NULL)<0){
		TRACE("avcodec_open2<0");
		goto END;
	}

	for (int i = 0; i < pFmtCtx->nb_streams; i++) {
		//根据输入流创建输出流(Create output AVStream according to input AVStream)  
		AVStream *in_stream = pFmtCtx->streams[i];
		AVStream *out_stream = avformat_new_stream(pRtmpFmtCtx, in_stream->codec->codec);
		if (!out_stream) {
			printf("Failed allocating output stream\n");
			iRet = AVERROR_UNKNOWN;
			goto END;
		}
		//复制AVCodecContext的设置(Copy the settings of AVCodecContext)  
		iRet = avcodec_copy_context(out_stream->codec, in_stream->codec);
		if (iRet < 0) {
			TRACE("Failed to copy context from input to output stream codec context\n");
			goto END;
		}
		out_stream->codec->codec_tag = 0;
		if (pRtmpFmtCtx->oformat->flags & AVFMT_GLOBALHEADER)
			out_stream->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;
	}

	if (!(pVideoOutfmt->flags & AVFMT_NOFILE)) {
		iRet = avio_open(&pRtmpFmtCtx->pb, pThis->m_cstrPushAddr, AVIO_FLAG_WRITE);
		if (iRet < 0) {
			TRACE("Could not open output URL '%s'", pThis->m_cstrPushAddr);
			goto END;
		}
	}

	iRet = avformat_write_header(pRtmpFmtCtx, NULL);
	if (iRet < 0) {
		TRACE("Error occurred when opening output URL\n");
		goto END;
	}

	start_time = av_gettime();

	//获取视频的宽与高
	iVideo_Height = pCodecContext->height;//strct_streaminfo->m_height;//
	iVideo_Width = pCodecContext->width;//strct_streaminfo->m_width;//
	TRACE("video_thr--video_height[%d],video_width[%d]", iVideo_Height
	, iVideo_Width);

	strct_streaminfo->m_pVideoPacket = (AVPacket*)av_malloc(sizeof(AVPacket));

	strct_streaminfo->m_pVideoFrame = av_frame_alloc();
	strct_streaminfo->m_pVideoFrameYUV = av_frame_alloc();
	strct_streaminfo->m_pVideoOutBuffer = (uint8_t *)av_malloc(avpicture_get_size(AV_PIX_FMT_YUV420P, iVideo_Width, iVideo_Height));
	avpicture_fill((AVPicture *)strct_streaminfo->m_pVideoFrameYUV, strct_streaminfo->m_pVideoOutBuffer, AV_PIX_FMT_YUV420P, iVideo_Width, iVideo_Height);
	strct_streaminfo->m_video_sws_ctx = sws_getContext(iVideo_Width, iVideo_Height, pCodecContext->pix_fmt,
		iVideo_Width, iVideo_Height, AV_PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL);
	if (NULL == strct_streaminfo->m_video_sws_ctx){
		TRACE("NULL == strct_streaminfo->m_video_sws_ctx\n");
		goto END;
	}

	strct_streaminfo->m_video_refresh_tid = SDL_CreateThread(video_refresh_thread, NULL, strct_streaminfo);

	//从摄像头获取数据
	for (;;){
		AVStream *in_stream, *out_stream;
		SDL_WaitEvent(&event);
		if (event.type == FF_VIDEO_REFRESH_EVENT){
			if (av_read_frame(pFmtCtx, strct_streaminfo->m_pVideoPacket) >= 0){
				if (strct_streaminfo->m_pVideoPacket->pts == AV_NOPTS_VALUE){
					//Write PTS  
					AVRational time_base1 = pFmtCtx->streams[iVideoIndex]->time_base;
					//Duration between 2 frames (us)  
					int64_t calc_duration = (double)AV_TIME_BASE / av_q2d(pFmtCtx->streams[iVideoIndex]->r_frame_rate);
					//Parameters  
					strct_streaminfo->m_pVideoPacket->pts = (double)(frame_index*calc_duration) / (double)(av_q2d(time_base1)*AV_TIME_BASE);
					strct_streaminfo->m_pVideoPacket->dts = strct_streaminfo->m_pVideoPacket->pts;
					strct_streaminfo->m_pVideoPacket->duration = (double)calc_duration / (double)(av_q2d(time_base1)*AV_TIME_BASE);
				}

				if (strct_streaminfo->m_pVideoPacket->stream_index == iVideoIndex){

					AVRational time_base = pFmtCtx->streams[iVideoIndex]->time_base;
					AVRational time_base_q = { 1, AV_TIME_BASE };
					int64_t pts_time = av_rescale_q(strct_streaminfo->m_pVideoPacket->dts, time_base, time_base_q);
					int64_t now_time = av_gettime() - start_time;
					if (pts_time > now_time)
						av_usleep(pts_time - now_time);

					in_stream = pFmtCtx->streams[strct_streaminfo->m_pVideoPacket->stream_index];
					out_stream = pRtmpFmtCtx->streams[strct_streaminfo->m_pVideoPacket->stream_index];
					/* copy packet */
					//转换PTS/DTS(Convert PTS/DTS)  
					strct_streaminfo->m_pVideoPacket->pts = av_rescale_q_rnd(strct_streaminfo->m_pVideoPacket->pts, in_stream->time_base, out_stream->time_base, (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
					strct_streaminfo->m_pVideoPacket->dts = av_rescale_q_rnd(strct_streaminfo->m_pVideoPacket->dts, in_stream->time_base, out_stream->time_base, (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
					strct_streaminfo->m_pVideoPacket->duration = av_rescale_q(strct_streaminfo->m_pVideoPacket->duration, in_stream->time_base, out_stream->time_base);
					strct_streaminfo->m_pVideoPacket->pos = -1;
					TRACE("Send %8d video frames to output URL\n", frame_index);

					frame_index++;

					iRet = av_interleaved_write_frame(pRtmpFmtCtx, strct_streaminfo->m_pVideoPacket);

					if (iRet < 0) {
						TRACE("Error muxing packet\n");
						break;
					}

					if (pThis->m_blVideoShow){

						//解码显示
						iRet = avcodec_decode_video2(pCodecContext, strct_streaminfo->m_pVideoFrame, &iVideoPic, strct_streaminfo->m_pVideoPacket);
						if (iRet < 0){
							TRACE("Decode Error.\n");
							av_free_packet(strct_streaminfo->m_pVideoPacket);
							goto END;
						}
						if (iVideoPic <= 0){
							TRACE("iVideoPic <= 0");
							av_free_packet(strct_streaminfo->m_pVideoPacket);
							goto END;
						}

						if (sws_scale(strct_streaminfo->m_video_sws_ctx, (const uint8_t* const*)strct_streaminfo->m_pVideoFrame->data, strct_streaminfo->m_pVideoFrame->linesize, 0, /*strct_streaminfo->m_height*/iVideo_Height,
							strct_streaminfo->m_pVideoFrameYUV->data, strct_streaminfo->m_pVideoFrameYUV->linesize) < 0){
							TRACE("sws_scale < 0");
							av_free_packet(strct_streaminfo->m_pVideoPacket);
							goto END;
						}

						if (pThis->m_blPushStream){
							//进行推流操作
							if (pThis->push_stream() < 0){
								TRACE("pThis->push_stream() < 0");
								goto END;
							}
						}


						if (SDL_UpdateTexture(strct_streaminfo->m_sdlTexture, NULL, strct_streaminfo->m_pVideoFrameYUV->data[0], strct_streaminfo->m_pVideoFrameYUV->linesize[0]) < 0){
							TRACE("SDL_UpdateTexture < 0\n");
							goto END;
						}

						if (SDL_RenderClear(strct_streaminfo->m_sdlRenderer) < 0){
							TRACE("SDL_RenderClear<0\n");
							goto END;
						}
						
						if (SDL_RenderCopy(strct_streaminfo->m_sdlRenderer, strct_streaminfo->m_sdlTexture, NULL, NULL) < 0){
							TRACE("SDL_RenderCopy<0\n");
							goto END;
						}

						SDL_RenderPresent(strct_streaminfo->m_sdlRenderer);
					}
				}
				av_free_packet(strct_streaminfo->m_pVideoPacket);
			}
		}
		else if (event.type == FF_BREAK_EVENT){
			break;
		}
	}
	av_write_trailer(pRtmpFmtCtx);

	iRet = 1;
END:
	//fclose(fp_yuv);
	if (strct_streaminfo->m_video_sws_ctx){
		sws_freeContext(strct_streaminfo->m_video_sws_ctx);
	}
	if (strct_streaminfo->m_pVideoFrameYUV){
		av_frame_free(&strct_streaminfo->m_pVideoFrameYUV);
	}
	avformat_close_input(&pFmtCtx);
	avformat_free_context(pRtmpFmtCtx);
	return iRet;
}
Пример #16
0
static void *start_smoothing( void *ptr )
{
    obe_t *h = ptr;
    int num_muxed_data = 0, buffer_complete = 0;
    int64_t start_clock = -1, start_pcr, end_pcr, temporal_vbv_size = 0, cur_pcr;
    obe_muxed_data_t **muxed_data = NULL, *start_data, *end_data;
    AVFifoBuffer *fifo_data = NULL, *fifo_pcr = NULL;
    uint8_t *output_buf;

    struct sched_param param = {0};
    param.sched_priority = 99;
    pthread_setschedparam( pthread_self(), SCHED_FIFO, &param );

    /* This thread buffers one VBV worth of frames */
    fifo_data = av_fifo_alloc( TS_PACKETS_SIZE );
    if( !fifo_data )
    {
        fprintf( stderr, "[mux-smoothing] Could not allocate data fifo" );
        return NULL;
    }

    fifo_pcr = av_fifo_alloc( 7 * sizeof(int64_t) );
    if( !fifo_pcr )
    {
        fprintf( stderr, "[mux-smoothing] Could not allocate pcr fifo" );
        return NULL;
    }

    if( h->obe_system == OBE_SYSTEM_TYPE_GENERIC )
    {
        for( int i = 0; i < h->num_encoders; i++ )
        {
            if( h->encoders[i]->is_video )
            {
                pthread_mutex_lock( &h->encoders[i]->queue.mutex );
                while( !h->encoders[i]->is_ready )
                    pthread_cond_wait( &h->encoders[i]->queue.in_cv, &h->encoders[i]->queue.mutex );
                x264_param_t *params = h->encoders[i]->encoder_params;
                temporal_vbv_size = av_rescale_q_rnd(
                (int64_t)params->rc.i_vbv_buffer_size * params->rc.f_vbv_buffer_init,
                (AVRational){1, params->rc.i_vbv_max_bitrate }, (AVRational){ 1, OBE_CLOCK }, AV_ROUND_UP );
                pthread_mutex_unlock( &h->encoders[i]->queue.mutex );
                break;
            }
        }
    }

    while( 1 )
    {
        pthread_mutex_lock( &h->mux_smoothing_queue.mutex );

        while( h->mux_smoothing_queue.size == num_muxed_data && !h->cancel_mux_smoothing_thread )
            pthread_cond_wait( &h->mux_smoothing_queue.in_cv, &h->mux_smoothing_queue.mutex );

        if( h->cancel_mux_smoothing_thread )
        {
            pthread_mutex_unlock( &h->mux_smoothing_queue.mutex );
            break;
        }

        num_muxed_data = h->mux_smoothing_queue.size;

        /* Refill the buffer after a drop */
        pthread_mutex_lock( &h->drop_mutex );
        if( h->mux_drop )
        {
            syslog( LOG_INFO, "Mux smoothing buffer reset\n" );
            h->mux_drop = 0;
            av_fifo_reset( fifo_data );
            av_fifo_reset( fifo_pcr );
            buffer_complete = 0;
            start_clock = -1;
        }
        pthread_mutex_unlock( &h->drop_mutex );

        if( !buffer_complete )
        {
            start_data = h->mux_smoothing_queue.queue[0];
            end_data = h->mux_smoothing_queue.queue[num_muxed_data-1];

            start_pcr = start_data->pcr_list[0];
            end_pcr = end_data->pcr_list[(end_data->len / 188)-1];
            if( end_pcr - start_pcr >= temporal_vbv_size )
            {
                buffer_complete = 1;
                start_clock = -1;
            }
            else
            {
                pthread_mutex_unlock( &h->mux_smoothing_queue.mutex );
                continue;
            }
        }

        //printf("\n mux smoothed frames %i \n", num_muxed_data );

        muxed_data = malloc( num_muxed_data * sizeof(*muxed_data) );
        if( !muxed_data )
        {
            pthread_mutex_unlock( &h->output_queue.mutex );
            syslog( LOG_ERR, "Malloc failed\n" );
            return NULL;
        }
        memcpy( muxed_data, h->mux_smoothing_queue.queue, num_muxed_data * sizeof(*muxed_data) );
        pthread_mutex_unlock( &h->mux_smoothing_queue.mutex );

        for( int i = 0; i < num_muxed_data; i++ )
        {
            if( av_fifo_realloc2( fifo_data, av_fifo_size( fifo_data ) + muxed_data[i]->len ) < 0 )
            {
                syslog( LOG_ERR, "Malloc failed\n" );
                return NULL;
            }

            av_fifo_generic_write( fifo_data, muxed_data[i]->data, muxed_data[i]->len, NULL );

            if( av_fifo_realloc2( fifo_pcr, av_fifo_size( fifo_pcr ) + ((muxed_data[i]->len * sizeof(int64_t)) / 188) ) < 0 )
            {
                syslog( LOG_ERR, "Malloc failed\n" );
                return NULL;
            }

            av_fifo_generic_write( fifo_pcr, muxed_data[i]->pcr_list, (muxed_data[i]->len * sizeof(int64_t)) / 188, NULL );

            remove_from_queue( &h->mux_smoothing_queue );
            destroy_muxed_data( muxed_data[i] );
        }

        free( muxed_data );
        muxed_data = NULL;
        num_muxed_data = 0;

        while( av_fifo_size( fifo_data ) >= TS_PACKETS_SIZE )
        {
            output_buf = malloc( TS_PACKETS_SIZE + 7 * sizeof(int64_t) );
            if( !output_buf )
            {
                syslog( LOG_ERR, "Malloc failed\n" );
                return NULL;
            }
            av_fifo_generic_read( fifo_pcr, output_buf, 7 * sizeof(int64_t), NULL );
            av_fifo_generic_read( fifo_data, &output_buf[7 * sizeof(int64_t)], TS_PACKETS_SIZE, NULL );

            cur_pcr = AV_RN64( output_buf );

            if( start_clock != -1 )
            {
                sleep_input_clock( h, cur_pcr - start_pcr + start_clock );
            }

            if( start_clock == -1 )
            {
                start_clock = get_input_clock_in_mpeg_ticks( h );
                start_pcr = cur_pcr;
            }

            if( add_to_queue( &h->output_queue, output_buf ) < 0 )
                return NULL;
            output_buf = NULL;
        }
    }

    av_fifo_free( fifo_data );
    av_fifo_free( fifo_pcr );

    return NULL;
}
Пример #17
0
int main(int argc, char **argv)
{
	AVOutputFormat *ofmt = NULL;
	AVFormatContext *ifmt_ctx = NULL, *ofmt_ctx = NULL;
	AVPacket pkt;

	int ret, i;

	av_register_all();

	if ((ret = avformat_open_input(&ifmt_ctx, in_filename, 0, 0)) < 0) {
		fprintf(stderr, "Could not open input file '%s'", in_filename);
		goto end;
	}
	if ((ret = avformat_find_stream_info(ifmt_ctx, 0)) < 0) {
		fprintf(stderr, "Failed to retrieve input stream information");
		goto end;
	}

	av_dump_format(ifmt_ctx, 0, in_filename, 0);

	avformat_alloc_output_context2(&ofmt_ctx, NULL, NULL, out_filename);
	if (!ofmt_ctx) {
		fprintf(stderr, "Could not create output context\n");
		ret = AVERROR_UNKNOWN;
		goto end;
	}

	ofmt = ofmt_ctx->oformat;


	for (i = 0; i < ifmt_ctx->nb_streams; i++) {
		AVStream *in_stream = ifmt_ctx->streams[i];
		AVStream *out_stream = avformat_new_stream(ofmt_ctx, in_stream->codec->codec);

		if (!out_stream) {
			fprintf(stderr, "Failed allocating output stream\n");
			ret = AVERROR_UNKNOWN;
			goto end;
		}
		ret = avcodec_copy_context(out_stream->codec, in_stream->codec);
		if (ret < 0) {
			fprintf(stderr, "Failed to copy context from input to output stream codec context\n");
			goto end;
		}
		out_stream->codec->codec_tag = 0;
		if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
			out_stream->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;

	}


	av_dump_format(ofmt_ctx, 0, out_filename, 1);

	if (!(ofmt->flags & AVFMT_NOFILE)) {
		ret = avio_open(&ofmt_ctx->pb, out_filename, AVIO_FLAG_WRITE);
		if (ret < 0) {
			fprintf(stderr, "Could not open output file '%s'", out_filename);
			goto end;
		}
	}

	ret = avformat_write_header(ofmt_ctx, NULL);
	if (ret < 0) {
		fprintf(stderr, "Error occurred when opening output file\n");
		goto end;
	}

	int frame = 0;

	while (1) {
		AVStream *in_stream, *out_stream;
		ret = av_read_frame(ifmt_ctx, &pkt);
		if (ret < 0)
			break;
		in_stream = ifmt_ctx->streams[pkt.stream_index];
		out_stream = ofmt_ctx->streams[pkt.stream_index];
		//log_packet(ifmt_ctx, &pkt, "in");

		/* copy packet */
		pkt.pts = av_rescale_q_rnd(pkt.pts, in_stream->time_base, out_stream->time_base, (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
		pkt.dts = av_rescale_q_rnd(pkt.dts, in_stream->time_base, out_stream->time_base, (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
		pkt.duration = av_rescale_q(pkt.duration, in_stream->time_base, out_stream->time_base);
		pkt.pos = -1;

		//log_packet(ofmt_ctx, &pkt, "out");

		ret = av_interleaved_write_frame(ofmt_ctx, &pkt);

		if (ret < 0) {
			fprintf(stderr, "Error muxing packet\n");
			break;
		}
		av_free_packet(&pkt);
		frame++;
	}

	printf("프레임의 수 : %d\n", frame);
	av_write_trailer(ofmt_ctx);

end:
	avformat_close_input(&ifmt_ctx);
	/* close output */
	if (ofmt_ctx && !(ofmt->flags & AVFMT_NOFILE))
		avio_closep(&ofmt_ctx->pb);
	avformat_free_context(ofmt_ctx);
	if (ret < 0 && ret != AVERROR_EOF) {
		char buf[256];
		av_strerror(ret, buf, sizeof(buf));
		fprintf(stderr, "Error occurred: %s\n", buf);
		return 1;
	}
	return 0;
}
Пример #18
0
static int filter_frame(AVFilterLink *inlink, AVFrame *buf)
{
    AVFilterContext    *ctx = inlink->dst;
    FPSContext           *s = ctx->priv;
    AVFilterLink   *outlink = ctx->outputs[0];
    int64_t delta;
    int i, ret;

    s->frames_in++;
    /* discard frames until we get the first timestamp */
    if (s->first_pts == AV_NOPTS_VALUE) {
        if (buf->pts != AV_NOPTS_VALUE) {
            ret = write_to_fifo(s->fifo, buf);
            if (ret < 0)
                return ret;

            if (s->start_time != DBL_MAX && s->start_time != AV_NOPTS_VALUE) {
                double first_pts = s->start_time * AV_TIME_BASE;
                first_pts = FFMIN(FFMAX(first_pts, INT64_MIN), INT64_MAX);
                s->first_pts = av_rescale_q(first_pts, AV_TIME_BASE_Q,
                                                     inlink->time_base);
                av_log(ctx, AV_LOG_VERBOSE, "Set first pts to (in:%"PRId64" out:%"PRId64")\n",
                       s->first_pts, av_rescale_q(first_pts, AV_TIME_BASE_Q,
                                                  outlink->time_base));
            } else {
                s->first_pts = buf->pts;
            }
        } else {
            av_log(ctx, AV_LOG_WARNING, "Discarding initial frame(s) with no "
                   "timestamp.\n");
            av_frame_free(&buf);
            s->drop++;
        }
        return 0;
    }

    /* now wait for the next timestamp */
    if (buf->pts == AV_NOPTS_VALUE || av_fifo_size(s->fifo) <= 0) {
        return write_to_fifo(s->fifo, buf);
    }

    /* number of output frames */
    delta = av_rescale_q_rnd(buf->pts - s->first_pts, inlink->time_base,
                             outlink->time_base, s->rounding) - s->frames_out ;

    if (delta < 1) {
        /* drop everything buffered except the last */
        int drop = av_fifo_size(s->fifo)/sizeof(AVFrame*);

        av_log(ctx, AV_LOG_DEBUG, "Dropping %d frame(s).\n", drop);
        s->drop += drop;

        flush_fifo(s->fifo);
        ret = write_to_fifo(s->fifo, buf);

        return ret;
    }

    /* can output >= 1 frames */
    for (i = 0; i < delta; i++) {
        AVFrame *buf_out;
        av_fifo_generic_read(s->fifo, &buf_out, sizeof(buf_out), NULL);

        /* duplicate the frame if needed */
        if (!av_fifo_size(s->fifo) && i < delta - 1) {
            AVFrame *dup = av_frame_clone(buf_out);

            av_log(ctx, AV_LOG_DEBUG, "Duplicating frame.\n");
            if (dup)
                ret = write_to_fifo(s->fifo, dup);
            else
                ret = AVERROR(ENOMEM);

            if (ret < 0) {
                av_frame_free(&buf_out);
                av_frame_free(&buf);
                return ret;
            }

            s->dup++;
        }

        buf_out->pts = av_rescale_q(s->first_pts, inlink->time_base,
                                    outlink->time_base) + s->frames_out;

        if ((ret = ff_filter_frame(outlink, buf_out)) < 0) {
            av_frame_free(&buf);
            return ret;
        }

        s->frames_out++;
    }
    flush_fifo(s->fifo);

    ret = write_to_fifo(s->fifo, buf);

    return ret;
}
int _tmain(int argc, _TCHAR* argv[])
{
    int ret;
    AVPacket packet;
    AVFrame *frame = NULL;
    enum AVMediaType type;
    unsigned int stream_index;
    unsigned int i;
    int got_frame;
    int (*dec_func)(AVCodecContext *, AVFrame *, int *, const AVPacket *);
    if (argc != 3) {
        av_log(NULL, AV_LOG_ERROR, "Usage: %s <input file> <output file>\n", argv[0]);
        return 1;
    }
    av_register_all();
    avfilter_register_all();
    if ((ret = open_input_file(argv[1])) < 0)
        goto end;
    if ((ret = open_output_file(argv[2])) < 0)
        goto end;
    if ((ret = init_filters()) < 0)
        goto end;
    /* read all packets */
    while (1) {
        if ((ret = av_read_frame(ifmt_ctx, &packet)) < 0)
            break;
        stream_index = packet.stream_index;
        type = ifmt_ctx->streams[packet.stream_index]->codec->codec_type;
        av_log(NULL, AV_LOG_DEBUG, "Demuxer gave frame of stream_index %u\n",
                stream_index);
        if (filter_ctx[stream_index].filter_graph) {
            av_log(NULL, AV_LOG_DEBUG, "Going to reencode&filter the frame\n");
            frame = av_frame_alloc();
            if (!frame) {
                ret = AVERROR(ENOMEM);
                break;
            }
            packet.dts = av_rescale_q_rnd(packet.dts,
                    ifmt_ctx->streams[stream_index]->time_base,
                    ifmt_ctx->streams[stream_index]->codec->time_base,
                    (AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX));
            packet.pts = av_rescale_q_rnd(packet.pts,
                    ifmt_ctx->streams[stream_index]->time_base,
                    ifmt_ctx->streams[stream_index]->codec->time_base,
                    (AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX));
            dec_func = (type == AVMEDIA_TYPE_VIDEO) ? avcodec_decode_video2 :
                avcodec_decode_audio4;
            ret = dec_func(ifmt_ctx->streams[stream_index]->codec, frame,
                    &got_frame, &packet);
            if (ret < 0) {
                av_frame_free(&frame);
                av_log(NULL, AV_LOG_ERROR, "Decoding failed\n");
                break;
            }
            if (got_frame) {
                frame->pts = av_frame_get_best_effort_timestamp(frame);
                ret = filter_encode_write_frame(frame, stream_index);
                av_frame_free(&frame);
                if (ret < 0)
                    goto end;
            } else {
                av_frame_free(&frame);
            }
        } else {
            /* remux this frame without reencoding */
            packet.dts = av_rescale_q_rnd(packet.dts,
                    ifmt_ctx->streams[stream_index]->time_base,
                    ofmt_ctx->streams[stream_index]->time_base,
                     (AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX));
            packet.pts = av_rescale_q_rnd(packet.pts,
                    ifmt_ctx->streams[stream_index]->time_base,
                    ofmt_ctx->streams[stream_index]->time_base,
                     (AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX));
            ret = av_interleaved_write_frame(ofmt_ctx, &packet);
            if (ret < 0)
                goto end;
        }
        av_free_packet(&packet);
    }
    /* flush filters and encoders */
    for (i = 0; i < ifmt_ctx->nb_streams; i++) {
        /* flush filter */
        if (!filter_ctx[i].filter_graph)
            continue;
        ret = filter_encode_write_frame(NULL, i);
        if (ret < 0) {
            av_log(NULL, AV_LOG_ERROR, "Flushing filter failed\n");
            goto end;
        }
        /* flush encoder */
        ret = flush_encoder(i);
        if (ret < 0) {
            av_log(NULL, AV_LOG_ERROR, "Flushing encoder failed\n");
            goto end;
        }
    }
    av_write_trailer(ofmt_ctx);
end:
    av_free_packet(&packet);
    av_frame_free(&frame);
    for (i = 0; i < ifmt_ctx->nb_streams; i++) {
        avcodec_close(ifmt_ctx->streams[i]->codec);
        if (ofmt_ctx && ofmt_ctx->nb_streams > i && ofmt_ctx->streams[i] && ofmt_ctx->streams[i]->codec)
            avcodec_close(ofmt_ctx->streams[i]->codec);
        if (filter_ctx && filter_ctx[i].filter_graph)
            avfilter_graph_free(&filter_ctx[i].filter_graph);
    }
    av_free(filter_ctx);
    avformat_close_input(&ifmt_ctx);
    if (ofmt_ctx && !(ofmt_ctx->oformat->flags & AVFMT_NOFILE))
        avio_close(ofmt_ctx->pb);
    avformat_free_context(ofmt_ctx);
    if (ret < 0)
        av_log(NULL, AV_LOG_ERROR, "Error occurred\n");
    return (ret? 1:0);
}
Пример #20
0
// 内部函数实现
static int split_media_file(char *dst, char *src, __int64 start, __int64 end, PFN_SPC spc)
{
    AVFormatContext *ifmt_ctx = NULL;
    AVFormatContext *ofmt_ctx = NULL;
    AVStream        *istream  = NULL;
    AVStream        *ostream  = NULL;
    AVRational       tbms     = { 1, 1000 };
    AVRational       tbvs     = { 1, 1    };
    int64_t          startpts = -1;
    int64_t          duration = -1;
    int64_t          total    = -1;
    int64_t          current  = -1;
    int              streamidx=  0;
    int              ret      = -1;

    av_register_all();
    avformat_network_init();

    if ((ret = avformat_open_input(&ifmt_ctx, src, 0, 0)) < 0) {
        printf("could not open input file '%s' !", src);
        goto done;
    }

    if ((ret = avformat_find_stream_info(ifmt_ctx, 0)) < 0) {
        printf("failed to retrieve input stream information ! \n");
        goto done;
    }

    avformat_alloc_output_context2(&ofmt_ctx, NULL, NULL, dst);
    if (!ofmt_ctx) {
        printf("could not create output context !\n");
        goto done;
    }

    for (unsigned i=0; i<ifmt_ctx->nb_streams; i++) {
        istream = ifmt_ctx->streams[i];
        if (istream->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
            streamidx = i;
            tbvs      = ifmt_ctx->streams[i]->time_base;
        }

        ostream = avformat_new_stream(ofmt_ctx, istream->codec->codec);
        if (!ostream) {
            printf("failed allocating output stream !\n");
            goto done;
        }

        ret = avcodec_copy_context(ostream->codec, istream->codec);
        if (ret < 0) {
            printf("failed to copy context from input to output stream codec context !\n");
            goto done;
        }

        ostream->codec->codec_tag = 0;
        if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER) {
            ostream->codec->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
        }
    }

    if (!(ofmt_ctx->oformat->flags & AVFMT_NOFILE)) {
        ret = avio_open(&ofmt_ctx->pb, dst, AVIO_FLAG_WRITE);
        if (ret < 0) {
            printf("could not open output file '%s' !", dst);
            goto done;
        }
    }

    // calulate pts
    if (start >= 0) {
        startpts = ifmt_ctx->start_time * 1000 / AV_TIME_BASE;
        duration = ifmt_ctx->duration   * 1000 / AV_TIME_BASE;
        total    = duration - start; if (total < 0) total = 1;
        current  = 0;
        start   += startpts;
        end     += startpts;
        start    = av_rescale_q_rnd(start, tbms, tbvs, (AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX));
        end      = av_rescale_q_rnd(end  , tbms, tbvs, (AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX));

        // seek to start position
        av_seek_frame(ifmt_ctx, streamidx, start, AVSEEK_FLAG_BACKWARD);
    } else {
        startpts = ifmt_ctx->start_time * 1000 / AV_TIME_BASE;
        duration = end;
        total    = end;
        current  = 0;
        start    = startpts;
        end     += startpts;
        start    = av_rescale_q_rnd(start, tbms, tbvs, (AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX));
        end      = av_rescale_q_rnd(end  , tbms, tbvs, (AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX));
    }

    // write header
    ret = avformat_write_header(ofmt_ctx, NULL);
    if (ret < 0) {
        printf("error occurred when writing output file header !\n");
        goto done;
    }

    while (!g_exit_remux) {
        AVPacket pkt;
        ret = av_read_frame(ifmt_ctx, &pkt);
        if (ret < 0) {
//          fprintf(stderr, "failed to read frame !\n");
            break;
        }

        // get start pts
        if (pkt.stream_index == streamidx) {
            if (pkt.pts > end) {
                g_exit_remux = 1;
                goto next;
            }
            if (spc) {
                current = av_rescale_q_rnd(pkt.pts - start, tbvs, tbms, (AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX));
                if (current < 0    ) current = 0;
                if (current > total) current = total;
                spc(current, total);
            }
        }

        istream = ifmt_ctx->streams[pkt.stream_index];
        ostream = ofmt_ctx->streams[pkt.stream_index];
        pkt.pts = av_rescale_q_rnd(pkt.pts, istream->time_base, ostream->time_base, (AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX));
        pkt.dts = av_rescale_q_rnd(pkt.dts, istream->time_base, ostream->time_base, (AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX));
        pkt.duration = av_rescale_q(pkt.duration, istream->time_base, ostream->time_base);
        pkt.pos = -1;

        ret = av_interleaved_write_frame(ofmt_ctx, &pkt);
        if (ret < 0) {
            printf("error muxing packet !\n");
            g_exit_remux = 1;
            goto next;
        }

next:
        av_packet_unref(&pkt);
    }

    // write trailer
    av_write_trailer(ofmt_ctx);

done:
    // close input
    avformat_close_input(&ifmt_ctx);

    // close output
    if (ofmt_ctx && !(ofmt_ctx->oformat->flags & AVFMT_NOFILE)) {
        avio_closep(&ofmt_ctx->pb);
    }

    avformat_free_context(ofmt_ctx);
    avformat_network_deinit();

    // done
    printf("\n");
    spc(total, total);
    printf("\ndone.\n");
    return ret;
}
Пример #21
0
int segment(char *input_file, char* base_dirpath, char* output_index_file, char *base_file_name, char* base_file_extension, int segmentLength, int listlen) {
	unsigned int output_index = 1;
	AVOutputFormat *ofmt = NULL;
	AVFormatContext *ic = NULL;
	AVFormatContext *oc;
	AVStream *in_video_st = NULL;
	AVStream *in_audio_st = NULL;
	AVStream *out_video_st = NULL;
	AVStream *out_audio_st = NULL;
	AVCodec *codec;
	int ret;
	int quiet = 0;
	char currentOutputFileName[MAX_FILENAME_LENGTH] = {0};
	double segment_start_time = 0.0;
	char tmp_output_index_file[MAX_FILENAME_LENGTH] = {0};

	unsigned int actual_segment_durations[MAX_SEGMENTS+1];
	double packet_time = 0;

    sprintf(tmp_output_index_file, "%s.tmp", output_index_file);


	av_register_all();
	avformat_network_init(); //just to be safe with later version and be able to handle all kind of input urls
	

	AVStream *in_stream = NULL; 
    AVStream *out_stream = NULL;

	ret = avformat_open_input(&ic, input_file, NULL, NULL);
	if (ret != 0) {
		fprintf(stderr, "Could not open input file %s. Error %d.\n", input_file, ret);
		exit(1);
	}

	if (avformat_find_stream_info(ic, NULL) < 0) {
		fprintf(stderr, "Could not read stream information.\n");
		avformat_close_input(&ic);
		exit(1);
	}

	oc = avformat_alloc_context();
	if (!oc) {
		fprintf(stderr, "Could not allocate output context.");
		avformat_close_input(&ic);
		exit(1);
	}

	int in_video_index = -1;
	int in_audio_index = -1;
	int out_video_index = -1;
	int out_audio_index = -1;
	int i;
	int listofs = 1;

	for (i = 0; i < ic->nb_streams; i++) {
		switch (ic->streams[i]->codec->codec_type) {
			case AVMEDIA_TYPE_VIDEO:
				if (!out_video_st) {
					in_video_st=ic->streams[i];
					in_video_index = i;
					in_video_st->discard = AVDISCARD_NONE;
					out_video_st = add_output_stream(oc, in_video_st);
					out_video_index=out_video_st->index;
				}
				break;
			case AVMEDIA_TYPE_AUDIO:
				if (!out_audio_st) {
					in_audio_st=ic->streams[i];
					in_audio_index = i;
					in_audio_st->discard = AVDISCARD_NONE;
					out_audio_st = add_output_stream(oc, in_audio_st);
					out_audio_index=out_audio_st->index;
				}
				break;
			default:
				ic->streams[i]->discard = AVDISCARD_ALL;
				break;
		}
	}

	if (in_video_index == -1) {
		fprintf(stderr, "Source stream must have video component.\n");
		avformat_close_input(&ic);
		avformat_free_context(oc);
		exit(1);
	}


	if (!ofmt) ofmt = av_guess_format("mpegts", NULL, NULL);
	if (!ofmt) {
		fprintf(stderr, "Could not find MPEG-TS muxer.\n");
		exit(1);
	}

	oc->oformat = ofmt;

	if (oc->oformat->flags & AVFMT_GLOBALHEADER) oc->flags |= CODEC_FLAG_GLOBAL_HEADER;
	
	av_dump_format(oc, 0, base_file_name, 1);


	codec = avcodec_find_decoder(in_video_st->codec->codec_id);
	if (!codec) {
		fprintf(stderr, "Could not find video decoder, key frames will not be honored.\n");
	}
	ret = avcodec_open2(in_video_st->codec, codec, NULL);
	if (ret < 0) {
		fprintf(stderr, "Could not open video decoder, key frames will not be honored.\n");
	}

	
	snprintf(currentOutputFileName, MAX_FILENAME_LENGTH, "%s/%s-%u%s", base_dirpath, base_file_name, output_index, base_file_extension);
		
	if (avio_open(&oc->pb, currentOutputFileName,AVIO_FLAG_WRITE) < 0) {
		fprintf(stderr, "Could not open '%s'.\n", currentOutputFileName);
		exit(1);
	} else if (!quiet) { 
		fprintf(stderr, "Starting segment '%s'\n", currentOutputFileName);
	}

	int r = avformat_write_header(oc, NULL);
	if (r) {
		fprintf(stderr, "Could not write mpegts header to first output file.\n");
		exit(1);
	}


    unsigned int num_segments = 0;
    int decode_done;
	int waitfirstpacket = 1; 
	time_t first_frame_sec = time(NULL);
	int iskeyframe = 0;     
	double vid_pts2time = (double)in_video_st->time_base.num / in_video_st->time_base.den;
	
#if USE_H264BSF  
    AVBitStreamFilterContext* h264bsfc =  av_bitstream_filter_init("h264_mp4toannexb");   
#endif	
	double prev_packet_time = 0;
	do {
		AVPacket packet;

		decode_done = av_read_frame(ic, &packet);

		if (decode_done < 0) {
			break;
		}

		//get the most recent packet time
		//this time is used when the time for the final segment is printed. It may not be on the edge of
		//of a keyframe!
		if (packet.stream_index == in_video_index) {
#if USE_H264BSF  
            av_bitstream_filter_filter(h264bsfc, ic->streams[in_video_index]->codec, NULL, &packet.data, &packet.size, packet.data, packet.size, 0);  
#endif
			packet.stream_index = out_video_index;
			packet_time = (double) packet.pts * vid_pts2time;
			iskeyframe = packet.flags & AV_PKT_FLAG_KEY;
			if (iskeyframe && waitfirstpacket) {
				waitfirstpacket = 0;
				prev_packet_time = packet_time;
				segment_start_time = packet_time;
				first_frame_sec = time(NULL);
			}
		} else if (packet.stream_index == in_audio_index){
			packet.stream_index = out_audio_index;
			iskeyframe=0;
		} else {
			//how this got here?!
			av_free_packet(&packet);
			continue;
		}

		
		if (waitfirstpacket) {
			av_free_packet(&packet);
			continue;
		}
		
		//start looking for segment splits for videos one half second before segment duration expires. This is because the 
		//segments are split on key frames so we cannot expect all segments to be split exactly equally. 
		if (iskeyframe &&  ((packet_time - segment_start_time) >= (segmentLength - 0.25)) ) { //a keyframe  near or past segmentLength -> SPLIT
			printf("key frame packet time=%f, start time=%f\n", packet_time, segment_start_time);
			
			avio_flush(oc->pb);
			avio_close(oc->pb);
			
			actual_segment_durations[num_segments] = (unsigned int) rint(prev_packet_time - segment_start_time);
			num_segments++;
			if (num_segments > listlen) { //move list to exclude last:
				snprintf(currentOutputFileName, MAX_FILENAME_LENGTH, "%s/%s-%u%s", base_dirpath, base_file_name, listofs, base_file_extension);
				unlink (currentOutputFileName);
				listofs++; num_segments--;
				memmove(actual_segment_durations,actual_segment_durations+1,num_segments*sizeof(actual_segment_durations[0]));
					
			}
			write_index_file(output_index_file, tmp_output_index_file, segmentLength, num_segments, actual_segment_durations, listofs,  base_file_name, base_file_extension, (num_segments >= MAX_SEGMENTS));

			if (num_segments == MAX_SEGMENTS) {
					fprintf(stderr, "Reached \"hard\" max segment number %u. If this is not live stream increase segment duration. If live segmenting set max list lenth (-m ...)\n", MAX_SEGMENTS);
					break;
			}
			output_index++;
			snprintf(currentOutputFileName, MAX_FILENAME_LENGTH, "%s/%s-%u%s", base_dirpath, base_file_name, output_index, base_file_extension);
			
			
			if (avio_open(&oc->pb, currentOutputFileName, AVIO_FLAG_WRITE) < 0) {
				fprintf(stderr, "Could not open '%s'\n", currentOutputFileName);
				break;
			} else if (!quiet) { 
				fprintf(stderr, "Starting segment '%s'\n", currentOutputFileName);
			}
			fflush(stderr);
 			segment_start_time = packet_time;
			first_frame_sec=time(NULL);
		}

		if (packet.stream_index == out_video_index) {
			prev_packet_time = packet_time;
		}


		in_stream = ic->streams[packet.stream_index];
        out_stream = oc->streams[packet.stream_index]; 
        packet.pts = av_rescale_q_rnd(packet.pts, in_stream->time_base, out_stream->time_base, AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX); 
        packet.dts = av_rescale_q_rnd(packet.dts, in_stream->time_base, out_stream->time_base, AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX); 
        packet.duration = av_rescale_q(packet.duration, in_stream->time_base, out_stream->time_base); 
        packet.pos = -1;

		ret = av_write_frame(oc, &packet);

		if (ret < 0) {
			fprintf(stderr, "Warning: Could not write frame of stream.\n");
		} else if (ret > 0) {
			fprintf(stderr, "End of stream requested.\n");
			av_free_packet(&packet);
			break;
		}

		av_free_packet(&packet);
	} while (!decode_done);

	if (in_video_st->codec->codec !=NULL) avcodec_close(in_video_st->codec);
	if (num_segments < MAX_SEGMENTS) {
		//make sure all packets are written and then close the last file. 
		avio_flush(oc->pb);
		av_write_trailer(oc);

		for (i = 0; i < oc->nb_streams; i++) {
			av_freep(&oc->streams[i]->codec);
			av_freep(&oc->streams[i]);
		}

		avio_close(oc->pb);
		av_free(oc);
		
		if (num_segments>0){
			actual_segment_durations[num_segments] = (unsigned int) rint(packet_time - segment_start_time);
			if (actual_segment_durations[num_segments] == 0)   actual_segment_durations[num_segments] = 1;
			num_segments++;
			write_index_file(output_index_file, tmp_output_index_file, segmentLength, num_segments, actual_segment_durations, listofs,  base_file_name, base_file_extension, 1);
		} 
	}

	avformat_close_input(&ic);
	
return 0;

}
Пример #22
0
int Testffmpeg()
{
	AVOutputFormat *ofmt = NULL;  
	//Input AVFormatContext and Output AVFormatContext  
	AVFormatContext *ifmt_ctx = NULL, *ofmt_ctx = NULL;  
	AVPacket pkt;  
	const char *in_filename, *out_filename;  
	int ret, i;  
	int videoindex=-1;  
	int frame_index=0;  
	in_filename  = "rtmp://live.hkstv.hk.lxdns.com/live/hks";  
	out_filename = "receive.flv";  

	av_register_all();  
	//Network  
	avformat_network_init();  
	//Input  
	if ((ret = avformat_open_input(&ifmt_ctx, in_filename, 0, 0)) < 0) {  
		printf( "Could not open input file.");  
		goto end;  
	}  
	if ((ret = avformat_find_stream_info(ifmt_ctx, 0)) < 0) {  
		printf( "Failed to retrieve input stream information");  
		goto end;  
	}  

	for(i=0; i<ifmt_ctx->nb_streams; i++)   
		if(ifmt_ctx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO){  
			videoindex=i;  
			break;  
		}  

		av_dump_format(ifmt_ctx, 0, in_filename, 0);  

		//Output  
		avformat_alloc_output_context2(&ofmt_ctx, NULL, NULL, out_filename); //RTMP  

		if (!ofmt_ctx) {  
			printf( "Could not create output context\n");  
			ret = AVERROR_UNKNOWN;  
			goto end;  
		}  
		ofmt = ofmt_ctx->oformat;  
		for (i = 0; i < ifmt_ctx->nb_streams; i++) {  
			//Create output AVStream according to input AVStream  
			AVStream *in_stream = ifmt_ctx->streams[i];  
			AVStream *out_stream = avformat_new_stream(ofmt_ctx, in_stream->codec->codec);  
			if (!out_stream) {  
				printf( "Failed allocating output stream\n");  
				ret = AVERROR_UNKNOWN;  
				goto end;  
			}  
			//Copy the settings of AVCodecContext  
			ret = avcodec_copy_context(out_stream->codec, in_stream->codec);  
			if (ret < 0) {  
				printf( "Failed to copy context from input to output stream codec context\n");  
				goto end;  
			}  
			out_stream->codec->codec_tag = 0;  
			if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)  
				out_stream->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;  
		}  
		//Dump Format------------------  
		av_dump_format(ofmt_ctx, 0, out_filename, 1);  
		//Open output URL  
		if (!(ofmt->flags & AVFMT_NOFILE)) {  
			ret = avio_open(&ofmt_ctx->pb, out_filename, AVIO_FLAG_WRITE);  
			if (ret < 0) {  
				printf( "Could not open output URL '%s'", out_filename);  
				goto end;  
			}  
		}  
		//Write file header  
		ret = avformat_write_header(ofmt_ctx, NULL);  
		if (ret < 0) {  
			printf( "Error occurred when opening output URL\n");  
			goto end;  
		}  

#if USE_H264BSF  
		AVBitStreamFilterContext* h264bsfc =  av_bitstream_filter_init("h264_mp4toannexb");   
#endif  

		while (1) {  
			AVStream *in_stream, *out_stream;  
			//Get an AVPacket  
			ret = av_read_frame(ifmt_ctx, &pkt);  
			if (ret < 0)  
				break;  

			in_stream  = ifmt_ctx->streams[pkt.stream_index];  
			out_stream = ofmt_ctx->streams[pkt.stream_index];  
			/* copy packet */  
			//Convert PTS/DTS  
			pkt.pts = av_rescale_q_rnd(pkt.pts, in_stream->time_base, out_stream->time_base, (AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX));  
			pkt.dts = av_rescale_q_rnd(pkt.dts, in_stream->time_base, out_stream->time_base, (AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX));  
			pkt.duration = av_rescale_q(pkt.duration, in_stream->time_base, out_stream->time_base);  
			pkt.pos = -1;  
			//Print to Screen  
			if(pkt.stream_index==videoindex){  
				printf("Receive %8d video frames from input URL\n",frame_index);  
				frame_index++;  

#if USE_H264BSF  
				av_bitstream_filter_filter(h264bsfc, in_stream->codec, NULL, &pkt.data, &pkt.size, pkt.data, pkt.size, 0);  
#endif  
			}  
			//ret = av_write_frame(ofmt_ctx, &pkt);  
			ret = av_interleaved_write_frame(ofmt_ctx, &pkt);  

			if (ret < 0) {  
				printf( "Error muxing packet\n");  
				break;  
			}  

			av_free_packet(&pkt);  

		}  

#if USE_H264BSF  
		av_bitstream_filter_close(h264bsfc);    
#endif  

		//Write file trailer  
		av_write_trailer(ofmt_ctx);  
end:  
		avformat_close_input(&ifmt_ctx);  
		/* close output */  
		if (ofmt_ctx && !(ofmt->flags & AVFMT_NOFILE))  
			avio_close(ofmt_ctx->pb);  
		avformat_free_context(ofmt_ctx);  
		if (ret < 0 && ret != AVERROR_EOF) {  
			printf( "Error occurred.\n");  
			return -1;  
		}  
}
Пример #23
0
static void write_video_frame(AVFormatContext *oc, AVStream *st)
{
    int ret;
    static struct SwsContext *sws_ctx;
    AVCodecContext *c = st->codec;

    if (frame_count >= STREAM_NB_FRAMES) {
        /* No more frames to compress. The codec has a latency of a few
         * frames if using B-frames, so we get the last frames by
         * passing the same picture again. */
    } else {
        if (c->pix_fmt != AV_PIX_FMT_YUV420P) {
            /* as we only generate a YUV420P picture, we must convert it
             * to the codec pixel format if needed */
            if (!sws_ctx) {
                sws_ctx = sws_getContext(c->width, c->height, AV_PIX_FMT_YUV420P,
                                         c->width, c->height, c->pix_fmt,
                                         sws_flags, NULL, NULL, NULL);
                if (!sws_ctx) {
                    fprintf(stderr,
                            "Could not initialize the conversion context\n");
                    exit(1);
                }
            }
            fill_yuv_image(&src_picture, frame_count, c->width, c->height);
            sws_scale(sws_ctx,
                      (const uint8_t * const *)src_picture.data, src_picture.linesize,
                      0, c->height, dst_picture.data, dst_picture.linesize);
        } else {
            fill_yuv_image(&dst_picture, frame_count, c->width, c->height);
        }
    }

    if (oc->oformat->flags & AVFMT_RAWPICTURE) {
        /* Raw video case - directly store the picture in the packet */
        AVPacket pkt;
        av_init_packet(&pkt);

        pkt.flags        |= AV_PKT_FLAG_KEY;
        pkt.stream_index  = st->index;
        pkt.data          = dst_picture.data[0];
        pkt.size          = sizeof(AVPicture);

        ret = av_interleaved_write_frame(oc, &pkt);
    } else {
        AVPacket pkt = { 0 };
        int got_packet;
        av_init_packet(&pkt);

        /* encode the image */
        frame->pts = frame_count;
        ret = avcodec_encode_video2(c, &pkt, frame, &got_packet);
        if (ret < 0) {
            fprintf(stderr, "Error encoding video frame: %s\n", av_err2str(ret));
            exit(1);
        }
        /* If size is zero, it means the image was buffered. */

        if (!ret && got_packet && pkt.size) {
            /* rescale output packet timestamp values from codec to stream timebase */
            pkt.pts = av_rescale_q_rnd(pkt.pts, c->time_base, st->time_base, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
            pkt.dts = av_rescale_q_rnd(pkt.dts, c->time_base, st->time_base, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
            pkt.duration = av_rescale_q(pkt.duration, c->time_base, st->time_base);
            pkt.stream_index = st->index;

            /* Write the compressed frame to the media file. */
            ret = av_interleaved_write_frame(oc, &pkt);
        } else {
            ret = 0;
        }
    }
    if (ret != 0) {
        fprintf(stderr, "Error while writing video frame: %s\n", av_err2str(ret));
        exit(1);
    }
    frame_count++;
}
Пример #24
0
static void write_audio_frame(AVFormatContext *oc, AVStream *st)
{
    AVCodecContext *c;
    AVPacket pkt = { 0 }; // data and size must be 0;
    int got_packet, ret, dst_nb_samples;

    av_init_packet(&pkt);
    c = st->codec;

    get_audio_frame((int16_t *)src_samples_data[0], src_nb_samples, c->channels);

    /* convert samples from native format to destination codec format, using the resampler */
    if (swr_ctx) {
        /* compute destination number of samples */
        dst_nb_samples = av_rescale_rnd(swr_get_delay(swr_ctx, c->sample_rate) + src_nb_samples,
                                        c->sample_rate, c->sample_rate, AV_ROUND_UP);
        if (dst_nb_samples > max_dst_nb_samples) {
            av_free(dst_samples_data[0]);
            ret = av_samples_alloc(dst_samples_data, &dst_samples_linesize, c->channels,
                                   dst_nb_samples, c->sample_fmt, 0);
            if (ret < 0)
                exit(1);
            max_dst_nb_samples = dst_nb_samples;
            dst_samples_size = av_samples_get_buffer_size(NULL, c->channels, dst_nb_samples,
                                                          c->sample_fmt, 0);
        }

        /* convert to destination format */
        ret = swr_convert(swr_ctx,
                          dst_samples_data, dst_nb_samples,
                          (const uint8_t **)src_samples_data, src_nb_samples);
        if (ret < 0) {
            fprintf(stderr, "Error while converting\n");
            exit(1);
        }
    } else {
        dst_nb_samples = src_nb_samples;
    }

    audio_frame->nb_samples = dst_nb_samples;
    audio_frame->pts = av_rescale_q(samples_count, (AVRational){1, c->sample_rate}, c->time_base);
    avcodec_fill_audio_frame(audio_frame, c->channels, c->sample_fmt,
                             dst_samples_data[0], dst_samples_size, 0);
    samples_count += dst_nb_samples;

    ret = avcodec_encode_audio2(c, &pkt, audio_frame, &got_packet);
    if (ret < 0) {
        fprintf(stderr, "Error encoding audio frame: %s\n", av_err2str(ret));
        exit(1);
    }

    if (!got_packet)
        return;

    /* rescale output packet timestamp values from codec to stream timebase */
    pkt.pts = av_rescale_q_rnd(pkt.pts, c->time_base, st->time_base, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
    pkt.dts = av_rescale_q_rnd(pkt.dts, c->time_base, st->time_base, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
    pkt.duration = av_rescale_q(pkt.duration, c->time_base, st->time_base);
    pkt.stream_index = st->index;

    /* Write the compressed frame to the media file. */
    ret = av_interleaved_write_frame(oc, &pkt);
    if (ret != 0) {
        fprintf(stderr, "Error while writing audio frame: %s\n",
                av_err2str(ret));
        exit(1);
    }
}
int main(int argc, char* argv[])
{
	AVOutputFormat *ofmt = NULL;
	//输入对应一个AVFormatContext,输出对应一个AVFormatContext
	//(Input AVFormatContext and Output AVFormatContext)
	AVFormatContext *ifmt_ctx = NULL, *ofmt_ctx = NULL;
	AVPacket pkt;
	const char *in_filename, *out_filename;
	int ret, i;
	int videoindex=-1;
	int frame_index=0;
	int64_t start_time=0;
	//in_filename  = "cuc_ieschool.mov";
	//in_filename  = "cuc_ieschool.mkv";
	//in_filename  = "cuc_ieschool.ts";
	//in_filename  = "cuc_ieschool.mp4";
	//in_filename  = "cuc_ieschool.h264";
	in_filename  = "cuc_ieschool.flv";//输入URL(Input file URL)
	//in_filename  = "shanghai03_p.h264";
	
	out_filename = "rtmp://localhost/publishlive/livestream";//输出 URL(Output URL)[RTMP]
	//out_filename = "rtp://233.233.233.233:6666";//输出 URL(Output URL)[UDP]

	av_register_all();
	//Network
	avformat_network_init();
	//输入(Input)
	if ((ret = avformat_open_input(&ifmt_ctx, in_filename, 0, 0)) < 0) {
		printf( "Could not open input file.");
		goto end;
	}
	if ((ret = avformat_find_stream_info(ifmt_ctx, 0)) < 0) {
		printf( "Failed to retrieve input stream information");
		goto end;
	}

	for(i=0; i<ifmt_ctx->nb_streams; i++) 
		if(ifmt_ctx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO){
			videoindex=i;
			break;
		}

	av_dump_format(ifmt_ctx, 0, in_filename, 0);

	//输出(Output)
	
	avformat_alloc_output_context2(&ofmt_ctx, NULL, "flv", out_filename); //RTMP
	//avformat_alloc_output_context2(&ofmt_ctx, NULL, "mpegts", out_filename);//UDP

	if (!ofmt_ctx) {
		printf( "Could not create output context\n");
		ret = AVERROR_UNKNOWN;
		goto end;
	}
	ofmt = ofmt_ctx->oformat;
	for (i = 0; i < ifmt_ctx->nb_streams; i++) {
		//根据输入流创建输出流(Create output AVStream according to input AVStream)
		AVStream *in_stream = ifmt_ctx->streams[i];
		AVStream *out_stream = avformat_new_stream(ofmt_ctx, in_stream->codec->codec);
		if (!out_stream) {
			printf( "Failed allocating output stream\n");
			ret = AVERROR_UNKNOWN;
			goto end;
		}
		//复制AVCodecContext的设置(Copy the settings of AVCodecContext)
		ret = avcodec_copy_context(out_stream->codec, in_stream->codec);
		if (ret < 0) {
			printf( "Failed to copy context from input to output stream codec context\n");
			goto end;
		}
		out_stream->codec->codec_tag = 0;
		if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
			out_stream->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;
	}
	//Dump Format------------------
	av_dump_format(ofmt_ctx, 0, out_filename, 1);
	//打开输出URL(Open output URL)
	if (!(ofmt->flags & AVFMT_NOFILE)) {
		ret = avio_open(&ofmt_ctx->pb, out_filename, AVIO_FLAG_WRITE);
		if (ret < 0) {
			printf( "Could not open output URL '%s'", out_filename);
			goto end;
		}
	}
	//写文件头(Write file header)
	ret = avformat_write_header(ofmt_ctx, NULL);
	if (ret < 0) {
		printf( "Error occurred when opening output URL\n");
		goto end;
	}

	start_time=av_gettime();
	while (1) {
		AVStream *in_stream, *out_stream;
		//获取一个AVPacket(Get an AVPacket)
		ret = av_read_frame(ifmt_ctx, &pkt);
		if (ret < 0)
			break;
		//FIX:No PTS (Example: Raw H.264)
		//Simple Write PTS
		if(pkt.pts==AV_NOPTS_VALUE){
			//Write PTS
			AVRational time_base1=ifmt_ctx->streams[videoindex]->time_base;
			//Duration between 2 frames (us)
			int64_t calc_duration=(double)AV_TIME_BASE/av_q2d(ifmt_ctx->streams[videoindex]->r_frame_rate);
			//Parameters
			pkt.pts=(double)(frame_index*calc_duration)/(double)(av_q2d(time_base1)*AV_TIME_BASE);
			pkt.dts=pkt.pts;
			pkt.duration=(double)calc_duration/(double)(av_q2d(time_base1)*AV_TIME_BASE);
		}
		//Important:Delay
		if(pkt.stream_index==videoindex){
			AVRational time_base=ifmt_ctx->streams[videoindex]->time_base;
			AVRational time_base_q={1,AV_TIME_BASE};
			int64_t pts_time = av_rescale_q(pkt.dts, time_base, time_base_q);
			int64_t now_time = av_gettime() - start_time;
			if (pts_time > now_time)
				av_usleep(pts_time - now_time);

		}

		in_stream  = ifmt_ctx->streams[pkt.stream_index];
		out_stream = ofmt_ctx->streams[pkt.stream_index];
		/* copy packet */
		//转换PTS/DTS(Convert PTS/DTS)
		pkt.pts = av_rescale_q_rnd(pkt.pts, in_stream->time_base, out_stream->time_base, (AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX));
		pkt.dts = av_rescale_q_rnd(pkt.dts, in_stream->time_base, out_stream->time_base, (AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX));
		pkt.duration = av_rescale_q(pkt.duration, in_stream->time_base, out_stream->time_base);
		pkt.pos = -1;
		//Print to Screen
		if(pkt.stream_index==videoindex){
			printf("Send %8d video frames to output URL\n",frame_index);
			frame_index++;
		}
		//ret = av_write_frame(ofmt_ctx, &pkt);
		ret = av_interleaved_write_frame(ofmt_ctx, &pkt);

		if (ret < 0) {
			printf( "Error muxing packet\n");
			break;
		}
		
		av_free_packet(&pkt);
		
	}
	//写文件尾(Write file trailer)
	av_write_trailer(ofmt_ctx);
end:
	avformat_close_input(&ifmt_ctx);
	/* close output */
	if (ofmt_ctx && !(ofmt->flags & AVFMT_NOFILE))
		avio_close(ofmt_ctx->pb);
	avformat_free_context(ofmt_ctx);
	if (ret < 0 && ret != AVERROR_EOF) {
		printf( "Error occurred.\n");
		return -1;
	}
	return 0;
}
Пример #26
0
int main(int argc, char **argv)
{
	AVFormatContext *avfmt_in = NULL, *avfmt_out = NULL;
	AVCodecContext *avcodec_in = NULL, *avcodec_out = NULL;
	AVFrame *frame = NULL, *frame_out = NULL;
	AVPacket avpkt, avpkt_out;
	struct SwsContext *sws = NULL;
	int video_index_in = -1, audio_index_in = -1;
	int video_index_out = -1, audio_index_out = -1;

	av_register_all();
	avcodec_register_all();

	if(argc != 5){
		printf("Usage:\n\t%s input output width height\n", argv[0]);
	}
	assert(argc == 5);

	open_input_output_codecs(argv[1], argv[2], atoi(argv[3]), atoi(argv[4]), &avfmt_in, &avfmt_out, &avcodec_in, &avcodec_out, &video_index_in, &video_index_out, &audio_index_in, &audio_index_out, &sws);

	av_init_packet(&avpkt);
	av_init_packet(&avpkt_out);
	avpkt.data = NULL;
	avpkt.size = 0;

	frame = av_frame_alloc();
	frame_out = av_frame_alloc();
	frame_out->format = avcodec_out->pix_fmt;
	frame_out->width = avcodec_out->width;
	frame_out->height = avcodec_out->height;
	av_image_alloc(frame_out->data, frame_out->linesize, frame_out->width, frame_out->height, frame_out->format, 32);

	while(av_read_frame(avfmt_in, &avpkt) == 0){
		if(avpkt.stream_index != video_index_in && avpkt.stream_index != audio_index_in){
			av_free_packet(&avpkt);
			avpkt.data = NULL;
			avpkt.size = 0;
			continue;
		}

		if(avpkt.stream_index == video_index_in){
			avpkt.pts = av_rescale_q_rnd(avpkt.pts, avfmt_in->streams[video_index_in]->time_base, avfmt_out->streams[video_index_out]->time_base, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
			avpkt.dts = av_rescale_q_rnd(avpkt.dts, avfmt_in->streams[video_index_in]->time_base, avfmt_out->streams[video_index_out]->time_base, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
			avpkt.duration = av_rescale_q(avpkt.duration, avfmt_in->streams[video_index_in]->time_base, avfmt_out->streams[video_index_out]->time_base);

			while(avpkt.size > 0){
				int got_frame = 0;
				int len = 0;

				len = avcodec_decode_video2(avcodec_in, frame, &got_frame, &avpkt);
				assert(len >= 0);
				if(!got_frame){
					break;
				}
				avpkt.size -= len;
				avpkt.data += len;

				frame_out->pts = frame->pkt_pts;
				frame_out->pkt_pts = frame->pkt_pts;
				frame_out->pkt_dts = frame->pkt_dts;
				sws_scale(sws, (const uint8_t * const *)frame->data, frame->linesize, 0, frame->height, frame_out->data, frame_out->linesize);

				assert(avcodec_encode_video2(avcodec_out, &avpkt_out, frame_out, &got_frame) >= 0);
				if(got_frame){
					avpkt_out.stream_index = video_index_out;
					av_interleaved_write_frame(avfmt_out, &avpkt_out);
					av_free_packet(&avpkt_out);
					avpkt_out.data = NULL;
					avpkt_out.size = 0;
				}
			}
		}
		else if(avpkt.stream_index == audio_index_in){
			avpkt.pts = av_rescale_q_rnd(avpkt.pts, avfmt_in->streams[audio_index_in]->time_base, avfmt_out->streams[audio_index_out]->time_base, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
			avpkt.dts = av_rescale_q_rnd(avpkt.dts, avfmt_in->streams[audio_index_in]->time_base, avfmt_out->streams[audio_index_out]->time_base, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
			avpkt.duration = av_rescale_q(avpkt.duration, avfmt_in->streams[audio_index_in]->time_base, avfmt_out->streams[audio_index_out]->time_base);

			avpkt.stream_index = audio_index_out;
			av_interleaved_write_frame(avfmt_out, &avpkt);
		}
		av_free_packet(&avpkt);
		avpkt.data = NULL;
		avpkt.size = 0;
	}
	av_interleaved_write_frame(avfmt_out, NULL);
	av_write_trailer(avfmt_out);
	av_frame_free(&frame);
	av_freep(&frame_out->data[0]);
	av_frame_free(&frame_out);

	avformat_close_input(&avfmt_in);
	avformat_free_context(avfmt_in);
	avformat_free_context(avfmt_out);

	return 0;
}
Пример #27
0
//链接h264流
int joinmp4(char (*h264file)[400] ,char (*aacfile)[400],char * mp4,int length,int usefilter)
{
	//AVOutputFormat *ofmt = NULL;
	AVPacket pkt;
	AVStream *out_vstream = NULL;
	AVStream *out_astream = NULL;
	AVFormatContext *ofmt_ctx = NULL;
	int join_index = 0;
	AVBitStreamFilterContext* aacbsfc = NULL;
	long  last_video_pts = 0;
	long last_audio_pts = 0;
	long end_video_pts = 0;
	long end_audio_pts = 0;
	int videoindex_out = -1;
	int audioindex_out = -1;
    //Input AVFormatContext and Output AVFormatContext
    AVFormatContext * ifmt_ctx_v = NULL, *ifmt_ctx_a = NULL;

    int ret, i,retu =0,filter_ret=0;
    //	int fps;
    int videoindex_v=-1;
    int audioindex_a=-1;
    int frame_index=0;
    int64_t cur_pts_v=0,cur_pts_a=0;
    //set file path
    char *in_filename_v = h264file[join_index];
    char *in_filename_a = aacfile[join_index];
    char *out_filename = mp4;
joinone:
    //Input AVFormatContext and Output AVFormatContext
    ifmt_ctx_v = NULL;
    ifmt_ctx_a = NULL;

    ret = 0; i = 0;retu =0;filter_ret=0;
    //	int fps;
    videoindex_v=-1;
    audioindex_a=-1;
    frame_index=0;
    cur_pts_v=0;cur_pts_a=0;
    //set file path
    in_filename_v = h264file[join_index];
    in_filename_a = aacfile[join_index];
    out_filename = mp4;

	//register before use
	av_register_all();
	//open Input and set avformatcontext
	if ((ret = avformat_open_input(&ifmt_ctx_a, in_filename_a, 0, 0)) < 0) {
		retu = -1;//-1 mean audio file opened failed
		
		goto end;
	}
	if ((ret = avformat_open_input(&ifmt_ctx_v, in_filename_v, 0, 0)) < 0) {
		retu = -2; //-2 mean video file opened failed
		
		goto end;
	}
	if ((ret = avformat_find_stream_info(ifmt_ctx_v, 0)) < 0) {

		retu = -3; //-3 mean get video info failed
		goto end;
	}


	if ((ret = avformat_find_stream_info(ifmt_ctx_a, 0)) < 0) {
		retu = -4;//-4 mean get audio info failed
		goto end;
	}

	//open Output
	if(join_index == 0)
	{
		avformat_alloc_output_context2(&ofmt_ctx, NULL, NULL, out_filename);
		if (!ofmt_ctx) {
			retu = -5;
			goto end;
		}
	}

	//ofmt = ofmt_ctx->oformat;
	//find all video stream input type
	for (i = 0; i < ifmt_ctx_v->nb_streams; i++) {
		//Create output AVStream according to input AVStream
		if(ifmt_ctx_v->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO){
			AVStream *in_stream = ifmt_ctx_v->streams[i];
			videoindex_v=i;

			if(join_index == 0)
			{
				out_vstream = avformat_new_stream(ofmt_ctx, in_stream->codec->codec);
				videoindex_out=out_vstream->index;
				//Copy the settings of AVCodecContext
				if (avcodec_copy_context(out_vstream->codec, in_stream->codec) < 0) {
					retu = -7;
					goto end;
				}
				out_vstream->codec->codec_tag = 0;
				if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
					out_vstream->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;
			}
			else
			{
				out_vstream->duration += in_stream->duration;
				//printf("duration = %ld\n",out_vstream->duration);
			}
			if (!out_vstream) {
				retu = -6;
				goto end;
			}
			break;
		}
	}

	//find all audio stream input type
	for (i = 0; i < ifmt_ctx_a->nb_streams; i++) {
		//Create output AVStream according to input AVStream
		if(ifmt_ctx_a->streams[i]->codec->codec_type==AVMEDIA_TYPE_AUDIO){
			AVStream *in_stream = ifmt_ctx_a->streams[i];
			audioindex_a=i;

			if(join_index == 0)
			{
				out_astream = avformat_new_stream(ofmt_ctx, in_stream->codec->codec);
				audioindex_out=out_astream->index;
				//Copy the settings of AVCodecContext
				if (avcodec_copy_context(out_astream->codec, in_stream->codec) < 0) {
					retu = -7;
					goto end;
				}
				out_astream->codec->codec_tag = 0;
				if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
					out_astream->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;
			}
			else
			{
				out_astream->duration += in_stream->duration;
				//printf("duration = %ld\n",out_astream->duration);
			}
			if (!out_astream) {
				retu = -6;
				goto end;
			}
			break;
		}
	}
	if(join_index == 0)
	{
			//Open output file
		if (!(ofmt_ctx->oformat->flags & AVFMT_NOFILE)) {
			if (avio_open(&ofmt_ctx->pb, out_filename, AVIO_FLAG_WRITE) < 0) {
				retu = -10;
				goto end;
			}
		}
		//Write file header
		if (avformat_write_header(ofmt_ctx, NULL) < 0) {
			retu = -11;
			goto end;
		}
	}
	if(usefilter&& aacbsfc == NULL)
		aacbsfc = av_bitstream_filter_init("aac_adtstoasc");


	while (true) {
		AVFormatContext *ifmt_ctx;
		int stream_index=0;
		AVStream *in_stream, *out_stream;
		//Get an AVPacket
		if(av_compare_ts(cur_pts_v,ifmt_ctx_v->streams[videoindex_v]->time_base,cur_pts_a,
					ifmt_ctx_a->streams[audioindex_a]->time_base) <= 0)
		{
			ifmt_ctx=ifmt_ctx_v;
			stream_index=videoindex_out;
			if(av_read_frame(ifmt_ctx, &pkt) >= 0){

				do{
					in_stream  = ifmt_ctx->streams[pkt.stream_index];
					out_stream = out_vstream;
					if(pkt.stream_index==videoindex_v){

						//Simple Write PTS
						if(pkt.pts==AV_NOPTS_VALUE){

							//Write PTS
							AVRational time_base1=in_stream->time_base;
							//Duration between 2 frames (us)
							int64_t calc_duration=(double)AV_TIME_BASE/av_q2d(in_stream->r_frame_rate);
							//Parameters
							pkt.pts=(double)(frame_index*calc_duration)/(double)(av_q2d(time_base1)*AV_TIME_BASE);
							pkt.dts=pkt.pts;
							pkt.duration=(double)calc_duration/(double)(av_q2d(time_base1)*AV_TIME_BASE);
							frame_index++;
						}
						cur_pts_v=pkt.pts;
						break;
					}
				}
				while(av_read_frame(ifmt_ctx, &pkt) >= 0);
			}
			else
			{
				//printf("pkt.duration = %ld\n",pkt.duration);
				join_index++;
				end_video_pts = last_video_pts;
				end_audio_pts = last_audio_pts;

					break;
			}
		}
		else
		{
			ifmt_ctx=ifmt_ctx_a;
			stream_index=audioindex_out;
			if(av_read_frame(ifmt_ctx, &pkt) >= 0){
				do
				{
					in_stream  = ifmt_ctx->streams[pkt.stream_index];
					out_stream = out_astream;
					if(pkt.stream_index==audioindex_a)
					{
						//Simple Write PTS
						if(pkt.pts==AV_NOPTS_VALUE)
						{
							//Write PTS
							AVRational time_base1=in_stream->time_base;
							//Duration between 2 frames (us)
							int64_t calc_duration=(double)AV_TIME_BASE/av_q2d(in_stream->r_frame_rate);
							//Parameters
							pkt.pts=(double)(frame_index*calc_duration)/(double)(av_q2d(time_base1)*AV_TIME_BASE);
							pkt.dts=pkt.pts;
							pkt.duration=(double)calc_duration/(double)(av_q2d(time_base1)*AV_TIME_BASE);
							frame_index++;
						}
						cur_pts_a=pkt.pts;
						break;
					}
				}
				while(av_read_frame(ifmt_ctx, &pkt) >= 0);
			}
			else
			{
				join_index++;
				end_video_pts = last_video_pts;
				end_audio_pts = last_audio_pts;

				break;
			}

		}
		if(usefilter)
			filter_ret = av_bitstream_filter_filter(aacbsfc, out_stream->codec, NULL, &pkt.data,&pkt.size, pkt.data, pkt.size, 0);
		if(filter_ret)
		{
			retu = -10;
			goto end;

		}
		//Convert PTS/DTS
		pkt.pts = av_rescale_q_rnd(pkt.pts, in_stream->time_base, out_stream->time_base,(enum AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX));
		pkt.dts = av_rescale_q_rnd(pkt.dts, in_stream->time_base, out_stream->time_base,(enum AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX));
		pkt.duration = av_rescale_q(pkt.duration, in_stream->time_base, out_stream->time_base);


		pkt.pos = -1;
		pkt.stream_index=stream_index;
		if(pkt.stream_index == audioindex_out)
		{
			pkt.pts += end_audio_pts;
			pkt.dts += end_audio_pts;
			last_audio_pts = pkt.pts+pkt.duration;
		//	printf("audio pts = %lld ,audio dts = %lld\n",pkt.pts,pkt.dts);
		}
		else
		{
			pkt.pts += end_video_pts;
			pkt.dts += end_video_pts;
			last_video_pts = pkt.pts+pkt.duration;
		}


		//Write
		if (av_interleaved_write_frame(ofmt_ctx, &pkt) < 0) {
			av_free_packet(&pkt);
			break;
		}
		//av_packet_unref(&pkt);
			//av_interleaved_write_frame(ofmt_ctx, &pkt);
		av_free_packet(&pkt);
	}


end:


	avformat_close_input(&ifmt_ctx_v);
	avformat_close_input(&ifmt_ctx_a);


    avformat_free_context(ifmt_ctx_v);
    avformat_free_context(ifmt_ctx_a);
	if (ret < 0 && ret != AVERROR_EOF) {
	}
	if(join_index < length)
		goto joinone;
	
	av_write_trailer(ofmt_ctx);

	
	if(usefilter)
		av_bitstream_filter_close(aacbsfc);
	/* close output */
	if (ofmt_ctx && !(ofmt_ctx->oformat->flags & AVFMT_NOFILE))
		avio_close(ofmt_ctx->pb);
	avformat_free_context(ofmt_ctx);
	return retu;
}
Пример #28
0
bool StAVVideoMuxer::save(const StString& theFile) {
    if(myCtxListSrc.isEmpty()
    || theFile.isEmpty()) {
        return false;
    }

    StString aFormatName = myCtxListSrc[0]->iformat->name;
    const char* aFormatStr = formatToMetadata(myStereoFormat);

    std::vector<StRemuxContext> aSrcCtxList;
    //StArrayList<StRemuxContext> aSrcCtxList;
    unsigned int aStreamCount = 0;

    StAVOutContext aCtxOut;
    if(!aCtxOut.findFormat(NULL, theFile.toCString())) {
        signals.onError(StString("Unable to find a suitable output format for '") + theFile + "'.");
        return false;
    } else if(!aCtxOut.create(theFile)) {
        signals.onError(StString("Could not create output context."));
        return false;
    }

    for(size_t aCtxId = 0; aCtxId < myCtxListSrc.size(); ++aCtxId) {
        StRemuxContext aCtxSrc;
        aCtxSrc.Context = myCtxListSrc[aCtxId];
        if(aCtxId == 0) {
            av_dict_copy(&aCtxOut.Context->metadata, aCtxSrc.Context->metadata, AV_DICT_DONT_OVERWRITE);
            av_dict_set(&aCtxOut.Context->metadata, "STEREO_MODE", aFormatStr, 0);
        }
        for(unsigned int aStreamId = 0; aStreamId < aCtxSrc.Context->nb_streams; ++aStreamId) {
            aCtxSrc.Streams.add((unsigned int )-1);
            AVStream* aStreamSrc = aCtxSrc.Context->streams[aStreamId];
            if(aStreamSrc->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
                if(addStream(aCtxOut.Context, aStreamSrc)) {
                    aCtxSrc.Streams[aStreamId] = aStreamCount++;
                }
            }
        }
        aSrcCtxList.push_back(aCtxSrc);
    }

    // add audio streams after video
    for(size_t aCtxId = 0; aCtxId < myCtxListSrc.size(); ++aCtxId) {
        StRemuxContext& aCtxSrc = aSrcCtxList[aCtxId];
        for(unsigned int aStreamId = 0; aStreamId < aCtxSrc.Context->nb_streams; ++aStreamId) {
            AVStream* aStreamSrc = aCtxSrc.Context->streams[aStreamId];
            if(aStreamSrc->codec->codec_type == AVMEDIA_TYPE_AUDIO
            && addStream(aCtxOut.Context, aStreamSrc)) {
                aCtxSrc.Streams[aStreamId] = aStreamCount++;
            }
        }
    }

    // add other streams (subtitles) at the end
    for(size_t aCtxId = 0; aCtxId < myCtxListSrc.size(); ++aCtxId) {
        StRemuxContext& aCtxSrc = aSrcCtxList[aCtxId];
        for(unsigned int aStreamId = 0; aStreamId < aCtxSrc.Context->nb_streams; ++aStreamId) {
            AVStream* aStreamSrc = aCtxSrc.Context->streams[aStreamId];
            if(aStreamSrc->codec->codec_type != AVMEDIA_TYPE_VIDEO
            && aStreamSrc->codec->codec_type != AVMEDIA_TYPE_AUDIO
            && addStream(aCtxOut.Context, aStreamSrc)) {
                aCtxSrc.Streams[aStreamId] = aStreamCount++;
            }
        }
    }

    av_dump_format(aCtxOut.Context, 0, theFile.toCString(), 1);
    if(!(aCtxOut.Context->oformat->flags & AVFMT_NOFILE)) {
        const int aState = avio_open2(&aCtxOut.Context->pb, theFile.toCString(), AVIO_FLAG_WRITE, NULL, NULL);
        if(aState < 0) {
            signals.onError(StString("Could not open output file '") + theFile + "' (" + stAV::getAVErrorDescription(aState) + ")");
            return false;
        }
    }

    int aState = avformat_write_header(aCtxOut.Context, NULL);
    if(aState < 0) {
        signals.onError(StString("Error occurred when opening output file (") + stAV::getAVErrorDescription(aState) + ").");
        return false;
    }

    AVPacket aPacket;
    for(;;) {
        size_t aNbEmpty = 0;
        for(size_t aCtxId = 0; aCtxId < aSrcCtxList.size(); ++aCtxId) {
            StRemuxContext& aCtxSrc = aSrcCtxList[aCtxId];
            if(!aCtxSrc.State) {
                ++aNbEmpty;
                continue;
            }

            if(av_read_frame(aCtxSrc.Context, &aPacket) < 0) {
                aCtxSrc.State = false;
                ++aNbEmpty;
                continue;
            }

            unsigned int aStreamOutIndex = aCtxSrc.Streams[aPacket.stream_index];
            if(aStreamOutIndex == (unsigned int )-1) {
                continue;
            }

            AVStream* aStreamIn  = aCtxSrc.Context->streams[aPacket.stream_index];
            AVStream* aStreamOut = aCtxOut.Context->streams[aStreamOutIndex];

        #ifdef ST_LIBAV_FORK
            const AVRounding aRoundParams = AV_ROUND_NEAR_INF;
        #else
            const AVRounding aRoundParams = AVRounding(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX);
        #endif
            aPacket.pts      = av_rescale_q_rnd(aPacket.pts, aStreamIn->time_base, aStreamOut->time_base, aRoundParams);
            aPacket.dts      = av_rescale_q_rnd(aPacket.dts, aStreamIn->time_base, aStreamOut->time_base, aRoundParams);
            aPacket.duration = static_cast<int >(av_rescale_q(aPacket.duration, aStreamIn->time_base, aStreamOut->time_base));
            aPacket.pos      = -1;

            aState = av_interleaved_write_frame(aCtxOut.Context, &aPacket);
            if(aState < 0) {
                signals.onError(StString("Error muxing packet (") + stAV::getAVErrorDescription(aState) + ").");
                return false;
            }
            av_free_packet(&aPacket);
        }
        if(aNbEmpty == aSrcCtxList.size()) {
            break;
        }
    }
    av_write_trailer(aCtxOut.Context);
    return true;
}
Пример #29
0
static int rtp_write_header(AVFormatContext *s1)
{
    RTPMuxContext *s = s1->priv_data;
    int n;
    AVStream *st;

    if (s1->nb_streams != 1) {
        av_log(s1, AV_LOG_ERROR, "Only one stream supported in the RTP muxer\n");
        return AVERROR(EINVAL);
    }
    st = s1->streams[0];
    if (!is_supported(st->codec->codec_id)) {
        av_log(s1, AV_LOG_ERROR, "Unsupported codec %s\n", avcodec_get_name(st->codec->codec_id));

        return -1;
    }

    if (s->payload_type < 0)
        s->payload_type = ff_rtp_get_payload_type(s1, st->codec);
    s->base_timestamp = av_get_random_seed();
    s->timestamp = s->base_timestamp;
    s->cur_timestamp = 0;
    if (!s->ssrc)
        s->ssrc = av_get_random_seed();
    s->first_packet = 1;
    s->first_rtcp_ntp_time = ff_ntp_time();
    if (s1->start_time_realtime)
        /* Round the NTP time to whole milliseconds. */
        s->first_rtcp_ntp_time = (s1->start_time_realtime / 1000) * 1000 +
                                 NTP_OFFSET_US;

    if (s1->packet_size) {
        if (s1->pb->max_packet_size)
            s1->packet_size = FFMIN(s1->packet_size,
                                    s1->pb->max_packet_size);
    } else
        s1->packet_size = s1->pb->max_packet_size;
    if (s1->packet_size <= 12) {
        av_log(s1, AV_LOG_ERROR, "Max packet size %d too low\n", s1->packet_size);
        return AVERROR(EIO);
    }
    s->buf = av_malloc(s1->packet_size);
    if (s->buf == NULL) {
        return AVERROR(ENOMEM);
    }
    s->max_payload_size = s1->packet_size - 12;

    s->max_frames_per_packet = 0;
    if (s1->max_delay > 0) {
        if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
            int frame_size = av_get_audio_frame_duration(st->codec, 0);
            if (!frame_size)
                frame_size = st->codec->frame_size;
            if (frame_size == 0) {
                av_log(s1, AV_LOG_ERROR, "Cannot respect max delay: frame size = 0\n");
            } else {
                s->max_frames_per_packet =
                        av_rescale_q_rnd(s1->max_delay,
                                         AV_TIME_BASE_Q,
                                         (AVRational){ frame_size, st->codec->sample_rate },
                                         AV_ROUND_DOWN);
            }
        }
        if (st->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
            /* FIXME: We should round down here... */
            s->max_frames_per_packet = av_rescale_q(s1->max_delay, (AVRational){1, 1000000}, st->codec->time_base);
        }
    }

    avpriv_set_pts_info(st, 32, 1, 90000);
    switch(st->codec->codec_id) {
    case AV_CODEC_ID_MP2:
    case AV_CODEC_ID_MP3:
        s->buf_ptr = s->buf + 4;
        break;
    case AV_CODEC_ID_MPEG1VIDEO:
    case AV_CODEC_ID_MPEG2VIDEO:
        break;
    case AV_CODEC_ID_MPEG2TS:
        n = s->max_payload_size / TS_PACKET_SIZE;
        if (n < 1)
            n = 1;
        s->max_payload_size = n * TS_PACKET_SIZE;
        s->buf_ptr = s->buf;
        break;
    case AV_CODEC_ID_H264:
        /* check for H.264 MP4 syntax */
        if (st->codec->extradata_size > 4 && st->codec->extradata[0] == 1) {
            s->nal_length_size = (st->codec->extradata[4] & 0x03) + 1;
        }
        break;
    case AV_CODEC_ID_VORBIS:
    case AV_CODEC_ID_THEORA:
        if (!s->max_frames_per_packet) s->max_frames_per_packet = 15;
        s->max_frames_per_packet = av_clip(s->max_frames_per_packet, 1, 15);
        s->max_payload_size -= 6; // ident+frag+tdt/vdt+pkt_num+pkt_length
        s->num_frames = 0;
        goto defaultcase;
    case AV_CODEC_ID_VP8:
        av_log(s1, AV_LOG_ERROR, "RTP VP8 payload implementation is "
                                 "incompatible with the latest spec drafts.\n");
        break;
    case AV_CODEC_ID_ADPCM_G722:
        /* Due to a historical error, the clock rate for G722 in RTP is
         * 8000, even if the sample rate is 16000. See RFC 3551. */
        avpriv_set_pts_info(st, 32, 1, 8000);
        break;
    case AV_CODEC_ID_ILBC:
        if (st->codec->block_align != 38 && st->codec->block_align != 50) {
            av_log(s1, AV_LOG_ERROR, "Incorrect iLBC block size specified\n");
            goto fail;
        }
        if (!s->max_frames_per_packet)
            s->max_frames_per_packet = 1;
        s->max_frames_per_packet = FFMIN(s->max_frames_per_packet,
                                         s->max_payload_size / st->codec->block_align);
        goto defaultcase;
    case AV_CODEC_ID_AMR_NB:
    case AV_CODEC_ID_AMR_WB:
        if (!s->max_frames_per_packet)
            s->max_frames_per_packet = 12;
        if (st->codec->codec_id == AV_CODEC_ID_AMR_NB)
            n = 31;
        else
            n = 61;
        /* max_header_toc_size + the largest AMR payload must fit */
        if (1 + s->max_frames_per_packet + n > s->max_payload_size) {
            av_log(s1, AV_LOG_ERROR, "RTP max payload size too small for AMR\n");
            goto fail;
        }
        if (st->codec->channels != 1) {
            av_log(s1, AV_LOG_ERROR, "Only mono is supported\n");
            goto fail;
        }
    case AV_CODEC_ID_AAC:
        s->num_frames = 0;
    default:
defaultcase:
        if (st->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
            avpriv_set_pts_info(st, 32, 1, st->codec->sample_rate);
        }
        s->buf_ptr = s->buf;
        break;
    }

    return 0;

fail:
    av_freep(&s->buf);
    return AVERROR(EINVAL);
}
Пример #30
0
int _tmain(int argc, _TCHAR* argv[])
{
	if (argc != 4)
	{
		printf("Usage: %s in_fname_v in_fname_a out_fname\n");
		return -1;
	}
	AVOutputFormat *p_ofmt = NULL;
	///< Input AVFormatContext and Output AVFormatContext
	AVFormatContext *p_ifmt_ctx_v = NULL, *p_ifmt_ctx_a = NULL, *p_ofmt_ctx = NULL;
	AVPacket pkt;

	int ret, i;
	int video_idx_v = -1, video_idx_out = -1;
	int audio_idx_a = -1, audio_idx_out = -1;
	int frame_idx = 0;
	int64_t cur_pts_v = 0, cur_pts_a = 0;

	const char *p_in_fname_v = argv[1], *p_in_fname_a = argv[2], *p_out_fname = argv[3];

	av_register_all();

	///< Input
	if ((ret = avformat_open_input(&p_ifmt_ctx_v, p_in_fname_v, NULL, NULL)) < 0)
	{
		printf("Could not open input file(: %s).\n", p_in_fname_v);
		goto end;
	}
	if ((ret = avformat_find_stream_info(p_ifmt_ctx_v, NULL)) < 0)
	{
		printf("Failed to retrieve input stream information.\n");
		goto end;
	}

	if ((ret = avformat_open_input(&p_ifmt_ctx_a, p_in_fname_a, NULL, NULL)) < 0)
	{
		printf("Could not open input file.\n");
		goto end;
	}
	if ((ret = avformat_find_stream_info(p_ifmt_ctx_a, NULL)) < 0)
	{
		printf("Failed to retrieve input stream information.\n");
		goto end;
	}
	printf("=========Input Information=========\n");
	av_dump_format(p_ifmt_ctx_v, 0, p_in_fname_v, 0);
	av_dump_format(p_ifmt_ctx_a, 0, p_in_fname_a, 0);
	printf("===================================\n");

	///< Output
	avformat_alloc_output_context2(&p_ofmt_ctx, NULL, NULL, p_out_fname);
	if (NULL == p_ofmt_ctx)
	{
		printf("Could not create output context.\n");
		ret = AVERROR_UNKNOWN;
		goto end;
	}
	p_ofmt = p_ofmt_ctx->oformat;

	for (i = 0; i < (int)p_ifmt_ctx_v->nb_streams; ++i)
	{
		///< Create output AVStream according to input AVStream
		if (AVMEDIA_TYPE_VIDEO == p_ifmt_ctx_v->streams[i]->codec->codec_type)
		{
			AVStream *p_in_strm = p_ifmt_ctx_v->streams[i];
			AVStream *p_out_strm = avformat_new_stream(p_ofmt_ctx,
				p_in_strm->codec->codec);
			video_idx_v = i;
			if (NULL == p_out_strm)
			{
				printf("Failed allocating output stream.\n");
				ret = AVERROR_UNKNOWN;
				goto end;
			}
			video_idx_out = p_out_strm->index;

			///< Copy the settings of AVCodecContext
			if (avcodec_copy_context(p_out_strm->codec, p_in_strm->codec) < 0)
			{
				printf("Failed to copy context from input to output"
					" stream codec context.\n");
				goto end;
			}
			p_out_strm->codec->codec_tag = 0;
			if (p_ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
			{
				p_out_strm->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;
			}
			break;
		}
	}

	for (i = 0; i < (int)p_ifmt_ctx_a->nb_streams; ++i)
	{
		///< Create output AVStream according to input AVStream
		if (AVMEDIA_TYPE_AUDIO == p_ifmt_ctx_a->streams[i]->codec->codec_type)
		{
			AVStream *p_in_strm = p_ifmt_ctx_a->streams[i];
			AVStream *p_out_strm = avformat_new_stream(p_ofmt_ctx,
				p_in_strm->codec->codec);
			audio_idx_a = i;
			if (NULL == p_out_strm)
			{
				printf("Failed allocating output stream.\n");
				ret = AVERROR_UNKNOWN;
				goto end;
			}
			audio_idx_out = p_out_strm->index;

			///< Copy the settings of AVCodecContext
			if (avcodec_copy_context(p_out_strm->codec, p_in_strm->codec) < 0)
			{
				printf("Failed to copy context from intput to "
					"output stream codec context.\n");
				goto end;
			}
			p_out_strm->codec->codec_tag = 0;
			if (p_ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
			{
				p_out_strm->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;
			}
			break;
		}
	}

	printf("=========Output Information=========\n");
	av_dump_format(p_ofmt_ctx, 0, p_out_fname, 1);
	printf("====================================\n");

	///< Open output file
	if (!(p_ofmt->flags & AVFMT_NOFILE))
	{
		if (avio_open(&p_ofmt_ctx->pb, p_out_fname, AVIO_FLAG_WRITE) < 0)
		{
			printf("Could not open output file '%s'", p_out_fname);
			goto end;
		}
	}
	///< Write file header
	if ((ret = avformat_write_header(p_ofmt_ctx, NULL)) < 0)
	{
		printf("Error occurred when opening output file.\n");
		goto end;
	}

	///< FIX
#if USE_H264BSF
	AVBitStreamFilterContext *p_h264bsfc = av_bitstream_filter_init("h264_mp4toannexb");
#endif

#if USE_AACBSF
	AVBitStreamFilterContext *p_aacbsfc = av_bitstream_filter_init("aac_adtstoasc");
#endif

	while (true)
	{
		AVFormatContext *p_ifmt_ctx;
		int strm_idx = 0;
		AVStream *p_in_strm, *p_out_strm;

		///< Get an AVPacket
		if (av_compare_ts(cur_pts_v, p_ifmt_ctx_v->streams[video_idx_v]->time_base,
			cur_pts_a, p_ifmt_ctx_a->streams[audio_idx_a]->time_base) <= 0)
		{
			p_ifmt_ctx = p_ifmt_ctx_v;
			strm_idx = video_idx_out;

			if (av_read_frame(p_ifmt_ctx, &pkt) >= 0)
			{
				do 
				{
					p_in_strm = p_ifmt_ctx->streams[pkt.stream_index];
					p_out_strm = p_ofmt_ctx->streams[strm_idx];

					if (pkt.stream_index == video_idx_v)
					{
						///< FIX: No PTS (Example: Raw H.264)
						///< Simple Write PTS
						if (pkt.pts == AV_NOPTS_VALUE)
						{
							///< Write PTS
							AVRational time_base1 = p_in_strm->time_base;
							///< Duration between 2 frames (us)
							int64_t calc_duration = (int64_t)((double)AV_TIME_BASE /
								av_q2d(p_in_strm->r_frame_rate));
							///< Parameters
							pkt.pts = (int64_t)((double)(frame_idx * calc_duration) /
								(double)(av_q2d(time_base1) * AV_TIME_BASE));
							pkt.dts = pkt.pts;
							pkt.duration = (int)((double)calc_duration /
								(double)(av_q2d(time_base1) * AV_TIME_BASE));
							++frame_idx;
						}
						cur_pts_v = pkt.pts;
						break;
					}
				} while (av_read_frame(p_ifmt_ctx, &pkt));
			}
			else
			{
				break;
			}
		}
		else
		{
			p_ifmt_ctx = p_ifmt_ctx_a;
			strm_idx = audio_idx_out;
			if (av_read_frame(p_ifmt_ctx, &pkt) >= 0)
			{
				do 
				{
					p_in_strm = p_ifmt_ctx->streams[pkt.stream_index];
					p_out_strm = p_ofmt_ctx->streams[strm_idx];

					if (pkt.stream_index == audio_idx_a)
					{
						///< FIX: No PTS
						///< Simple Write PTS
						if (pkt.pts == AV_NOPTS_VALUE)
						{
							///< Write PTS
							AVRational time_base1 = p_in_strm->time_base;
							///< Duration between 2 frames (us)
							int64_t calc_duration = (int64_t)((double)AV_TIME_BASE /
								av_q2d(p_in_strm->r_frame_rate));
							///< Parameters
							pkt.dts = (int64_t)((double)(frame_idx * calc_duration) /
								(double)(av_q2d(time_base1) * AV_TIME_BASE));
							pkt.dts = pkt.pts;
							pkt.duration = (int)((double)calc_duration /
								(double)(av_q2d(time_base1)* AV_TIME_BASE));
							++frame_idx;
						}
						cur_pts_a = pkt.pts;
						break;
					}
				} while (av_read_frame(p_ifmt_ctx, &pkt));
			}
			else
			{
				break;
			}
		}

		///< FIX: Bitstream Filter
#if USE_H264BSF
		av_bitstream_filter_filter(p_h264bsfc, p_in_strm->codec, NULL,
			&pkt.data, &pkt.size, pkt.data, pkt.size, 0);
#endif

#if USE_AACBSF
		av_bitstream_filter_filter(p_aacbsfc, p_out_strm->codec, NULL,
			&pkt.data, &pkt.size, pkt.data, pkt.size, 0);
#endif

		///< Convert PTS/DTS
		pkt.pts = av_rescale_q_rnd(pkt.pts, p_in_strm->time_base,
			p_out_strm->time_base, (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
		pkt.dts = av_rescale_q_rnd(pkt.dts, p_in_strm->time_base,
			p_out_strm->time_base, (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
		pkt.duration = (int)av_rescale_q(pkt.duration, p_in_strm->time_base, p_out_strm->time_base);
		pkt.pos = -1;
		pkt.stream_index = strm_idx;

		printf("Write 1 Packet. size: %5d\tpts: %11d\n", pkt.size, pkt.pts);
		///< Write
		if (av_interleaved_write_frame(p_ofmt_ctx, &pkt) < 0)
		{
			printf("Error muxing packet.\n");
			break;
		}
		av_free_packet(&pkt);
	}

	///< Write file trailer
	av_write_trailer(p_ofmt_ctx);

#if USE_H264BSF
	av_bitstream_filter_close(p_h264bsfc);
#endif

#if USE_AACBSF
	av_bitstream_filter_close(p_aacbsfc);
#endif

end:
	avformat_close_input(&p_ifmt_ctx_v);
	avformat_close_input(&p_ifmt_ctx_a);

	///< close output
	if (p_ofmt_ctx && !(p_ofmt->flags & AVFMT_NOFILE))
	{
		avio_close(p_ofmt_ctx->pb);
	}
	avformat_free_context(p_ofmt_ctx);
	if (ret < 0 && ret != AVERROR_EOF)
	{
		printf("Error occurred.\n");
		return -1;
	}

	return 0;
}