Exemplo n.º 1
0
/*
 * encode one video frame and send it to the muxer
 * return false when encoding is finished, true otherwise
 */
static bool write_video_frame(AVFormatContext *oc, OutputStream *ost)
{
    int ret;
    AVCodecContext *c;
    AVFrame *frame;
    int got_packet = 0;

    c = ost->st->codec;

    frame = get_video_frame(ost);

    if (oc->oformat->flags & AVFMT_RAWPICTURE) {
        /* a hack to avoid data copy with some raw video muxers */
        AVPacket pkt = { 0 };
        av_init_packet(&pkt);

        if (!frame)
            return false;

        pkt.flags        |= AV_PKT_FLAG_KEY;
        pkt.stream_index  = ost->st->index;
        pkt.data          = (uint8_t *)frame;
        pkt.size          = sizeof(AVPicture);

        pkt.pts = pkt.dts = frame->pts;
        av_packet_rescale_ts(&pkt, c->time_base, ost->st->time_base);

        ret = av_interleaved_write_frame(oc, &pkt);
    } else {
        AVPacket pkt = { 0 };
        av_init_packet(&pkt);

        /* encode the image */
        ret = avcodec_encode_video2(c, &pkt, frame, &got_packet);
        if (ret < 0) {
            fprintf(stderr, "Error encoding video frame: %s\n", av_err2str(ret));
            exit(1);
        }

        if (got_packet) {
            ret = write_frame(oc, &c->time_base, ost->st, &pkt);
        } else {
            ret = 0;
        }
    }

    if (ret < 0) {
        fprintf(stderr, "Error while writing video frame: %s\n", av_err2str(ret));
        exit(1);
    }

    return (frame || got_packet) ? true : false;
}
Exemplo n.º 2
0
// returns non-zero if there is more sound, -1 in case of error
static int WriteAudioFrame()
{
    if (!g_pAStream)
        return 0;

    AVPacket Packet;
    av_init_packet(&Packet);
    Packet.data = NULL;
    Packet.size = 0;

    int NumSamples = fread(g_pSamples, 2*g_Channels, g_NumSamples, g_pSoundFile);

#if LIBAVCODEC_VERSION_MAJOR >= 53
    AVFrame* pFrame = NULL;
    if (NumSamples > 0)
    {
        g_pAFrame->nb_samples = NumSamples;
        avcodec_fill_audio_frame(g_pAFrame, g_Channels, AV_SAMPLE_FMT_S16,
                                 (uint8_t*)g_pSamples, NumSamples*2*g_Channels, 1);
        pFrame = g_pAFrame;
    }
    // when NumSamples == 0 we still need to call encode_audio2 to flush
    int got_packet;
    if (avcodec_encode_audio2(g_pAudio, &Packet, pFrame, &got_packet) != 0)
        return FatalError("avcodec_encode_audio2 failed");
    if (!got_packet)
        return 0;

    av_packet_rescale_ts(&Packet, g_pAudio->time_base, g_pAStream->time_base);
#else
    if (NumSamples == 0)
        return 0;
    int BufferSize = OUTBUFFER_SIZE;
    if (g_pAudio->frame_size == 0)
        BufferSize = NumSamples*g_Channels*2;
    Packet.size = avcodec_encode_audio(g_pAudio, g_OutBuffer, BufferSize, g_pSamples);
    if (Packet.size == 0)
        return 1;
    if (g_pAudio->coded_frame && g_pAudio->coded_frame->pts != AV_NOPTS_VALUE)
        Packet.pts = av_rescale_q(g_pAudio->coded_frame->pts, g_pAudio->time_base, g_pAStream->time_base);
    Packet.flags |= AV_PKT_FLAG_KEY;
    Packet.data = g_OutBuffer;
#endif

    // Write the compressed frame to the media file.
    Packet.stream_index = g_pAStream->index;
    if (av_interleaved_write_frame(g_pContainer, &Packet) != 0)
        return FatalError("Error while writing audio frame");
    return 1;
}
Exemplo n.º 3
0
static void write_frame(struct ffmpeg_t *self, AVPacket *packet, AVCodecContext *codec, AVStream *stream, int stream_idx)
{
#ifdef HAVE_AV_PACKET_RESCALE_TS
  av_packet_rescale_ts(packet, codec->time_base, stream->time_base);
#else
  if (codec->coded_frame->pts != AV_NOPTS_VALUE)
    packet->pts = av_rescale_q(codec->coded_frame->pts, codec->time_base, stream->time_base);
#endif
  packet->stream_index = stream_idx;
  int err = av_interleaved_write_frame(self->fmt_ctx, packet);
  if (err < 0)
    scm_misc_error("write-frame", "Error writing video frame: ~a",
                   scm_list_1(get_error_text(err)));
}
Exemplo n.º 4
0
// Internally-used function
// Write all buffered video frames to the output.
// Inspiration: https://ffmpeg.org/pipermail/libav-user/2013-February/003663.html
// TODO: Reduce code duplication
int _frame_pusher_write_buffered_video(frame_pusher *fp)
{
    int ret, got_packet;
    do {
        av_free_packet(&fp->packet);
        memset(&fp->packet, 0, sizeof(fp->packet));
        av_init_packet(&fp->packet);
        if ((ret = avcodec_encode_video2(fp->vid_stream->codec, &fp->packet, NULL, &got_packet)) < 0) return ret;
        av_packet_rescale_ts(&fp->packet, fp->vid_stream->codec->time_base, fp->vid_stream->time_base);
        fp->packet.stream_index = fp->vid_stream->index;
        // FIXME: fp->packet.pts might be zero and cause errors?
        if (fp->packet.pts && (ret = av_interleaved_write_frame(fp->fmt_ctx, &fp->packet)) < 0) return ret;
    } while (got_packet);
    return 0;
}
Exemplo n.º 5
0
//Function to encode and write frames
static int encode_write_frame(AVFrame *frame, unsigned int stream_index, int *got_frame) {
    
    int ret;
    int got_frame_local;
    AVPacket enc_pkt;
    AVCodecContext *c_ctx = ofmt_ctx->streams[stream_index]->codec;

    if (!got_frame)
        got_frame = &got_frame_local;

    //Encoding the frame
    enc_pkt.data = NULL;
    enc_pkt.size = 0;
    av_init_packet(&enc_pkt);
    
   
    //Write SEI NAL unit
    int len = 12;
    
    //sei payload
    unsigned char buf[] ="\x00\x00\x01\x06\x05\x05hello\x80";
    unsigned char *buf2 = (unsigned char *)malloc(sizeof(char)*len);
    unsigned int i;
    for (i = 0; i < len; i++) {
        buf2[i] = buf[i];
    }
   
    //COPY SEI NAL INFORMATION TO THE CORRECT LOCATION IN PIPELINE
    memcpy(c_ctx->priv_data + 1192 , &buf2, sizeof(uint8_t *));
    memcpy(c_ctx->priv_data + 1200 , &len, sizeof(int));


    ret = avcodec_encode_video2(c_ctx, &enc_pkt, frame, got_frame);

    if (ret < 0)
        return ret;
    if (!(*got_frame))
        return 0;

    //Prepare packet for muxing
    enc_pkt.stream_index = stream_index;
    av_packet_rescale_ts(&enc_pkt,c_ctx->time_base, ofmt_ctx->streams[stream_index]->time_base);
    
    //Mux encoded frame
    ret = av_interleaved_write_frame(ofmt_ctx, &enc_pkt);
    return ret;
}
Exemplo n.º 6
0
BOOL CVideoLivRecord::write_audio_frame(AVStream *st, void* pBuffer, LONG len)
{
	AVCodecContext* avcc = st->codec;
	AVPacket pkt = {0};
	av_init_packet(&pkt);
	AVFrame* frame = get_audio_frame(st, pBuffer, len);
	int ret = 0;
	int dst_nb_samples = 0;
	if (frame){
		dst_nb_samples = (int)av_rescale_rnd(swr_get_delay(m_pAudioSwrctx, avcc->sample_rate) + frame->nb_samples,
			avcc->sample_rate, avcc->sample_rate, AV_ROUND_UP);
		av_assert0(dst_nb_samples == frame->nb_samples);
		ret = av_frame_make_writable(m_pAudioFrame);
		if (ret < 0){
			log("[CVideoLivRecord::write_audio_frame] -- av_frame_make_writable() error");
			return FALSE;
		}
		ret = swr_convert(m_pAudioSwrctx, m_pAudioFrame->data, dst_nb_samples, 
			             (const uint8_t**)frame->data, frame->nb_samples);
		if (ret < 0){
			log("[CVideoLivRecord::write_audio_frame] -- av_frame_make_writable() error");
			return FALSE;
		}
		frame = m_pAudioFrame;
		AVRational tmp = {1, avcc->sample_rate};

		frame->pts = av_rescale_q(m_AudioSamplesCount, tmp, avcc->time_base);
		m_AudioSamplesCount += dst_nb_samples;
	}
	int got_packet = 0;
	ret = avcodec_encode_audio2(avcc, &pkt, frame, &got_packet);
	if (ret < 0){
		log("[CVideoLivRecord::write_audio_frame] -- avcodec_encode_audio2() error");
		return FALSE;
	}
	if(got_packet){
		av_packet_rescale_ts(&pkt, avcc->time_base, st->time_base);
		pkt.stream_index = st->index;
		ret = av_interleaved_write_frame(m_pAVFormatContext, &pkt);
		//ret = write_audio_frame(m_pAudioStream, pBuffer, len);
		if (ret < 0){
			log("[CVideoLivRecord::write_audio_frame] -- write_audio_frame() error");
			return FALSE;
		}
	}
	return (frame || got_packet)? FALSE : TRUE;
}
Exemplo n.º 7
0
static int write_frame(FFmpegStuffEnc* ff, AVPacket *pkt)
{
	if (!ff)
	{
		assert(ff);
		return 0;
	}
	
	//if (ff->codecContext->coded_frame->key_frame)
	//{
	//	pkt->flags |= AV_PKT_FLAG_KEY;
	//}

	// rescale output packet timestamp values from codec to stream timebase
    av_packet_rescale_ts(pkt, ff->codecContext->time_base, ff->videoStream->time_base);
    pkt->stream_index = ff->videoStream->index;

    // write the compressed frame to the media file
    return av_interleaved_write_frame(ff->formatContext, pkt);
}
Exemplo n.º 8
0
bool AVMuxer::writeVideo(const QtAV::Packet& packet)
{
    AVPacket *pkt = (AVPacket*)packet.asAVPacket();
    pkt->stream_index = d->video_streams[0];
    AVStream *s = d->format_ctx->streams[pkt->stream_index];
    // stream.time_base is set in avformat_write_header
    av_packet_rescale_ts(pkt, kTB, s->time_base);
    //av_write_frame
    av_interleaved_write_frame(d->format_ctx, pkt);
#if 0
    qDebug("mux packet.pts: %.3f dts:%.3f duration: %.3f, avpkt.pts: %lld,dts:%lld,duration:%lld"
           , packet.pts, packet.dts, packet.duration
           , pkt->pts, pkt->dts, pkt->duration);
    qDebug("stream: %d duration: %lld, end: %lld. tb:{%d/%d}"
           , pkt->stream_index, s->duration
            , av_stream_get_end_pts(s)
           , s->time_base.num, s->time_base.den
            );
#endif
    d->started = true;
    return true;
}
Exemplo n.º 9
0
static void flush_video (GeglProperties *o)
{
  Priv *p = (Priv*)o->user_data;
  int got_packet = 0;
  do {
    AVPacket  pkt = { 0 };
    int ret;
    got_packet = 0;
    av_init_packet (&pkt);
    ret = avcodec_encode_video2 (p->video_st->codec, &pkt, NULL, &got_packet);
    if (ret < 0)
      return;
      
     if (got_packet)
     {
       pkt.stream_index = p->video_st->index;
       av_packet_rescale_ts (&pkt, p->video_st->codec->time_base, p->video_st->time_base);
       av_interleaved_write_frame (p->oc, &pkt);
       av_free_packet (&pkt);
     }
  } while (got_packet);
}
Exemplo n.º 10
0
/*
 * encode one video frame and send it to the muxer
 * return 1 when encoding is finished, 0 otherwise
 */
static int write_video_frame(EncoderContext *ec, AVFrame *frame)
{
    OutputStream *ost = &(ec->video_st);
    AVFormatContext *oc = ec->oc;
    int ret;
    AVCodecContext *c;
    c = ost->st->codec;
    if (oc->oformat->flags & AVFMT_RAWPICTURE) {
        /* a hack to avoid data copy with some raw video muxers */
        av_init_packet(&(ec->pkt));
        if (!frame)
            return 1;
        ec->pkt.flags        |= AV_PKT_FLAG_KEY;
        ec->pkt.stream_index  = ost->st->index;
        ec->pkt.data          = (uint8_t *)frame;
        ec->pkt.size          = sizeof(AVPicture);
        ec->pkt.pts = ec->pkt.dts = frame->pts;
        av_packet_rescale_ts(&(ec->pkt), c->time_base, ost->st->time_base);
        ret = av_interleaved_write_frame(oc, &(ec->pkt));
    } else {
        /* encode the image */
        ret = avcodec_encode_video2(c, &(ec->pkt), frame, &(ec->got_output));
        if (ret < 0) {
            fprintf(stderr, "Error encoding video frame: %s\n", av_err2str(ret));
            exit(1);
        }
        if (ec->got_output) {
            ret = write_frame(oc, &c->time_base, ost->st, &(ec->pkt));
        } else {
            ret = 0;
        }
    }
    if (ret < 0) {
        fprintf(stderr, "Error while writing video frame: %s\n", av_err2str(ret));
        exit(1);
    }
    return (frame || ec->got_output) ? 0 : 1;
}
Exemplo n.º 11
0
/* if a frame is provided, send it to the encoder, otherwise flush the encoder;
 * return 1 when encoding is finished, 0 otherwise
 */
static int encode_audio_frame(AVFormatContext *oc, OutputStream *ost,
                              AVFrame *frame)
{
    AVPacket pkt = { 0 }; // data and size must be 0;
    int got_packet;

    av_init_packet(&pkt);
    avcodec_encode_audio2(ost->enc, &pkt, frame, &got_packet);

    if (got_packet) {
        pkt.stream_index = ost->st->index;

        av_packet_rescale_ts(&pkt, ost->enc->time_base, ost->st->time_base);

        /* Write the compressed frame to the media file. */
        if (av_interleaved_write_frame(oc, &pkt) != 0) {
            fprintf(stderr, "Error while writing audio frame\n");
            exit(1);
        }
    }

    return (frame || got_packet) ? 0 : 1;
}
Exemplo n.º 12
0
int MP4Encoder::EncodeFrame(AVCodecContext *pCodecCtx, AVFrame *pFrame, AVPacket *avPacket) {
    int ret = avcodec_send_frame(pCodecCtx, pFrame);
    if (ret < 0) {
        //failed to send frame for encoding
        return -1;
    }
    while (!ret) {
        ret = avcodec_receive_packet(pCodecCtx, avPacket);
        if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF) {
            return 0;
        } else if (ret < 0) {
            //error during encoding
            return -1;
        }
        //printf("Write frame %d, size=%d\n", avPacket->pts, avPacket->size);
        avPacket->stream_index = pStream->index;
        av_packet_rescale_ts(avPacket, pCodecCtx->time_base, pStream->time_base);
        avPacket->pos = -1;
        av_interleaved_write_frame(pFormatCtx, avPacket);
        av_packet_unref(avPacket);
    }
    return 0;
}
Exemplo n.º 13
0
/*
 * encode one video frame and send it to the muxer
 * return 1 when encoding is finished, 0 otherwise
 */
static int write_video_frame(AVFormatContext *oc, OutputStream *ost)
{
    int ret;
    AVCodecContext *c;
    AVFrame *frame;
    int got_packet = 0;

    c = ost->st->codec;

    frame = get_video_frame(ost);

    AVPacket pkt = { 0 };
    av_init_packet(&pkt);

    /* encode the image */
    ret = avcodec_encode_video2(c, &pkt, frame, &got_packet);
    if (ret < 0) {
        fprintf(stderr, "Error encoding a video frame\n");
        exit(1);
    }

    if (got_packet) {
        av_packet_rescale_ts(&pkt, c->time_base, ost->st->time_base);
        pkt.stream_index = ost->st->index;

        /* Write the compressed frame to the media file. */
        //ret = av_interleaved_write_frame(oc, &pkt);
        ret = av_write_frame(oc, &pkt);
    }

    if (ret != 0) {
        fprintf(stderr, "Error while writing video frame\n");
        exit(1);
    }

    return (frame || got_packet) ? 0 : 1;
}
Exemplo n.º 14
0
    bool WriteH264VideoSample(unsigned char *sample,
                              unsigned int sample_size,
                              bool is_key_frame,
                              unsigned long long int duration)
    {
        // Convert AnnexB format to AVCC
        if (sample_size >= 4) {
            unsigned int *p = (unsigned int *) sample;
            *p = htonl(sample_size - 4);
        }

        AVPacket packet = { 0 };
        av_init_packet(&packet);

        packet.stream_index = video_stream_id;
        packet.data         = sample;
        packet.size         = sample_size;
        packet.pos          = -1;

        packet.dts = packet.pts = static_cast<int64_t>(file_duration);
        packet.duration = static_cast<int>(duration);
        av_packet_rescale_ts(&packet, (AVRational){1, 1000}, format_context->streams[video_stream_id]->time_base);

        if (is_key_frame) {
            packet.flags |= AV_PKT_FLAG_KEY;
        }

        if (av_interleaved_write_frame(format_context, &packet) < 0) {
            printf("Fail to write frame\n");
            return false;
        }

        file_duration += duration;

        return true;
    }
Exemplo n.º 15
0
static int encode_write_frame(AVFrame *filt_frame, unsigned int stream_index, int *got_frame)
{
    int ret;
    int got_frame_local;
    AVPacket enc_pkt;
    int (*enc_func)(AVCodecContext *, AVPacket *, const AVFrame *, int *) =
        (ifmt_ctx->streams[stream_index]->codec->codec_type ==
         AVMEDIA_TYPE_VIDEO) ? avcodec_encode_video2 : avcodec_encode_audio2;

    if (!got_frame)
        got_frame = &got_frame_local;

    av_log(NULL, AV_LOG_INFO, "Encoding frame\n");
    /* encode filtered frame */
    enc_pkt.data = NULL;
    enc_pkt.size = 0;
    av_init_packet(&enc_pkt);
    ret = enc_func(ofmt_ctx->streams[stream_index]->codec, &enc_pkt,
                   filt_frame, got_frame);
    av_frame_free(&filt_frame);
    if (ret < 0)
        return ret;
    if (!(*got_frame))
        return 0;

    /* prepare packet for muxing */
    enc_pkt.stream_index = stream_index;
    av_packet_rescale_ts(&enc_pkt,
                         ofmt_ctx->streams[stream_index]->codec->time_base,
                         ofmt_ctx->streams[stream_index]->time_base);

    av_log(NULL, AV_LOG_DEBUG, "Muxing frame\n");
    /* mux encoded frame */
    ret = av_interleaved_write_frame(ofmt_ctx, &enc_pkt);
    return ret;
}
Exemplo n.º 16
0
int main(int argc, char** argv){
	int quadrant_line, quadrant_column;
	
	char *videoFileName = argv[1];
	char quadFileName[64];

	int i = 0, k, j;

	long unsigned int inc = 0;
	long unsigned int incaudio = 0;

	int videoStreamIndex;
	int audioStreamIndex= -1;
	int frameFinished, gotPacket;

	AVDictionary	*codecOptions = NULL;
	
	UDP_PTSframe_t PTS_frame;

	struct tm *start_time_tm;
	char start_time_str[64];
	long unsigned int start_time;
	time_t start_timer_t;
	
	//Crop env
	int tam_quad;
	int frist = 1, marginLeft = 0, marginTop = 0;
	int width , height;

    if(argc < 4){
        usage();    
        return -1;
    }

    signal (SIGTERM, handlerToFinish);
	signal (SIGINT, handlerToFinish);

    tam_quad = sqrt(amount_of_quadrants);
    quadrant_line = atoi(argv[2]);
    quadrant_column = atoi(argv[3]);
    amount_of_quadrants = (quadrant_line * quadrant_column) + 1;

    strcpy (quadFileName, argv[4]);

    //Allocat output streams context
    ff_output = malloc (sizeof(ff_output_t) * amount_of_quadrants);

	av_register_all();
	avformat_network_init();

	//Initialize Input
	if (avformat_open_input (&ff_input.formatCtx, videoFileName, NULL, NULL) != 0) {
		printf ("Cold not open input video file at %s\n", videoFileName);
		return -1;
	}

	if (avformat_find_stream_info(ff_input.formatCtx, NULL) < 0) {
		printf ("Cold not get stream info\n");
		return -1;
	}

	av_dump_format(ff_input.formatCtx, 0, videoFileName, 0);

	videoStreamIndex = av_find_best_stream(ff_input.formatCtx, AVMEDIA_TYPE_VIDEO, -1, -1, &ff_input.encoder, 0);
	if (videoStreamIndex < 0) {
		printf ("no video streams found\n");
		return -1;
	}

	audioStreamIndex = av_find_best_stream(ff_input.formatCtx, AVMEDIA_TYPE_AUDIO, -1, -1, &ff_input.audioencoder, 0);
    if (audioStreamIndex < 0) {
        printf ("no audio streams found\n");
        return -1;
    }
    printf ("VIDEO ST %d, AUDIO ST %d\n", videoStreamIndex, audioStreamIndex);

    ff_input.audiocodecCtx = ff_input.formatCtx->streams[audioStreamIndex]->codec;
	ff_input.codecCtx = ff_input.formatCtx->streams[videoStreamIndex]->codec;

	if (avcodec_open2 (ff_input.audiocodecCtx, ff_input.audioencoder, NULL) < 0) {
        printf ("Could not open input codec\n");
        return -1;
    }

	if (avcodec_open2 (ff_input.codecCtx, ff_input.encoder, NULL) < 0) {
		printf ("Could not open input codec\n");
		return -1;
	}

	//Get system time and append as metadata
	getSystemTime (&PTS_frame.frameTimeVal); //Must be the same for all output contexts
	start_time = PTS_frame.frameTimeVal.tv_sec;
	start_timer_t = (time_t) start_time;
	start_time_tm = localtime (&start_timer_t);
	strftime(start_time_str, sizeof start_time_str, "%Y-%m-%d %H:%M:%S", start_time_tm);

	if (avformat_alloc_output_context2(&formatCtx, NULL, AV_OUTPUT_FORMAT, quadFileName) < 0) {
			printf ("could not create output context\n");
			return -1;
	}

	//Initialize Video Output Streams
	for (i = 0; i < amount_of_quadrants - 1; i++) {

		ff_output[i].outStream = avformat_new_stream (formatCtx, NULL);
		if (ff_output[i].outStream == NULL) {
			printf ("Could not create output stream\n");
			return -1;
		}

		ff_output[i].outStream->id = formatCtx->nb_streams - 1;

		ff_output[i].codecCtx = ff_output[i].outStream->codec;
		ff_output[i].encoder = avcodec_find_encoder_by_name (AV_OUTPUT_CODEC);
		if (ff_output[i].encoder == NULL) {
			printf ("Codec %s not found..\n", AV_OUTPUT_CODEC);
			return -1;
		}

		//Sliced sizes
		width = ff_input.codecCtx->width/quadrant_column;
		height = ff_input.codecCtx->height/quadrant_line;

		ff_output[i].codecCtx->codec_type 	= AVMEDIA_TYPE_VIDEO;
		ff_output[i].codecCtx->height 		= height;
		ff_output[i].codecCtx->width 		= width;
		ff_output[i].codecCtx->pix_fmt		= ff_input.codecCtx->pix_fmt;

		if (strcmp (AV_OUTPUT_CODEC, "libvpx") == 0) {
			//Maintain input aspect ratio for codec and stream info, and b_frames for codec info
			ff_output[i].codecCtx->sample_aspect_ratio = ff_input.codecCtx->sample_aspect_ratio;
			ff_output[i].codecCtx->max_b_frames = ff_input.codecCtx->max_b_frames;
			ff_output[i].outStream->sample_aspect_ratio = ff_output[i].codecCtx->sample_aspect_ratio;

			//Set custom BIT RATE and THREADs 
			ff_output[i].codecCtx->bit_rate 	= AV_OUTPUT_BITRATE;
			ff_output[i].codecCtx->thread_count = AV_OUTPUT_THREADS;
			ff_output[i].codecCtx->thread_type  = AV_OUTPUT_THREAD_TYPE;

			//Set custo timebase for codec and streams
			ff_output[i].codecCtx->time_base.num = 1;
			ff_output[i].codecCtx->time_base.den = AV_FRAMERATE;
			ff_output[i].outStream->time_base.num = 1;
			ff_output[i].outStream->time_base.den = 10000;			
		}

		if (strcmp (AV_OUTPUT_CODEC, "libx264") == 0) {
			// ff_output[i].codecCtx->profile = FF_PROFILE_H264_MAIN;
			// av_dict_set(&codecOptions, "profile","main",0);

			//Set custom BIT RATE and THREADs 
			ff_output[i].codecCtx->bit_rate 	= AV_OUTPUT_BITRATE;
			ff_output[i].codecCtx->thread_count = AV_OUTPUT_THREADS;
			ff_output[i].codecCtx->thread_type  = AV_OUTPUT_THREAD_TYPE;

			ff_output[i].codecCtx->bit_rate_tolerance = 0;
			ff_output[i].codecCtx->rc_max_rate = 0;
			ff_output[i].codecCtx->rc_buffer_size = 0;
			ff_output[i].codecCtx->gop_size = 40;
			ff_output[i].codecCtx->max_b_frames = 3;
			ff_output[i].codecCtx->b_frame_strategy = 1;
			ff_output[i].codecCtx->coder_type = 1;
			ff_output[i].codecCtx->me_cmp = 1;
			ff_output[i].codecCtx->me_range = 16;
			ff_output[i].codecCtx->qmin = 10;
			ff_output[i].codecCtx->qmax = 51;
			ff_output[i].codecCtx->scenechange_threshold = 40;
			ff_output[i].codecCtx->flags |= CODEC_FLAG_LOOP_FILTER;
			ff_output[i].codecCtx->me_method = ME_HEX;
			ff_output[i].codecCtx->me_subpel_quality = 5;
			ff_output[i].codecCtx->i_quant_factor = 0.71;
			ff_output[i].codecCtx->qcompress = 0.6;
			ff_output[i].codecCtx->max_qdiff = 4;

			//Set custo timebase for codec and streams
			ff_output[i].codecCtx->time_base.num = 1;
			ff_output[i].codecCtx->time_base.den = 24;
			ff_output[i].outStream->time_base.num = 1;
			ff_output[i].outStream->time_base.den = 90000;		
		}

		formatCtx->start_time_realtime = start_time;
		av_dict_set (&formatCtx->metadata, "service_name", start_time_str, 0);
		av_dict_set (&formatCtx->metadata, "creation_time", start_time_str, 0);

		//Open codec
		if (avcodec_open2(ff_output[i].codecCtx, ff_output[i].encoder, &codecOptions)) {
			printf ("Could not open output codec...\n");
			return -1;
		}
	}

	//Initializing Audio Output
	i = amount_of_quadrants-1; //Last stream
	ff_output[i].outStream = avformat_new_stream (formatCtx, NULL);
	if (ff_output[i].outStream == NULL) {
		printf ("Could not create output stream\n");
		return -1;
	}

	ff_output[i].outStream->id = formatCtx->nb_streams - 1;

	ff_output[i].codecCtx = ff_output[i].outStream->codec;
	ff_output[i].encoder = avcodec_find_encoder (ff_input.audiocodecCtx->codec_id);
	if (ff_output[i].encoder == NULL) {
		printf ("Codec %s not found..\n", AUDIO_OUTPUT_CODEC);
		return -1;
	}
  
    ff_output[i].codecCtx = ff_output[amount_of_quadrants-1].outStream->codec;
    ff_output[i].codecCtx->codec_id = ff_input.audiocodecCtx->codec_id;
    ff_output[i].codecCtx->codec_type = AVMEDIA_TYPE_AUDIO;
    ff_output[i].codecCtx->sample_fmt = ff_input.audiocodecCtx->sample_fmt;
    ff_output[i].codecCtx->sample_rate = ff_input.audiocodecCtx->sample_rate;
    ff_output[i].codecCtx->channel_layout = ff_input.audiocodecCtx->channel_layout;
    ff_output[i].codecCtx->channels = av_get_channel_layout_nb_channels(ff_output[amount_of_quadrants-1].codecCtx->channel_layout);
    ff_output[i].codecCtx->bit_rate = ff_input.audiocodecCtx->bit_rate;  
    ff_output[i].codecCtx->sample_aspect_ratio = ff_input.audiocodecCtx->sample_aspect_ratio;
    ff_output[i].codecCtx->max_b_frames = ff_input.audiocodecCtx->max_b_frames;
    ff_output[i].outStream->sample_aspect_ratio = ff_output[i].codecCtx->sample_aspect_ratio;

    ff_output[i].outStream->time_base.num = ff_input.formatCtx->streams[audioStreamIndex]->time_base.num;
	ff_output[i].outStream->time_base.den = ff_input.formatCtx->streams[audioStreamIndex]->time_base.den;

	ff_output[i].codecCtx->time_base.num = ff_input.audiocodecCtx->time_base.num;
	ff_output[i].codecCtx->time_base.den = ff_input.audiocodecCtx->time_base.den;

	printf("sample_rate %d\n", ff_input.audiocodecCtx->sample_rate);

	//Open codec
	if (avcodec_open2(ff_output[i].codecCtx, ff_output[i].encoder, &codecOptions)) {
		printf ("Could not open output codec...\n");
		return -1;
	}

	av_dump_format (formatCtx, 0, quadFileName, 1);

	//Open output context
	if (avio_open (&formatCtx->pb, quadFileName, AVIO_FLAG_WRITE)) {
		printf ("avio_open failed %s\n", quadFileName);
		return -1;
	}
	
	//Write format context header
	if (avformat_write_header (formatCtx, &formatCtx->metadata)) {
		printf ("fail to write outstream header\n");
		return -1;
	}

	printf ("OUTPUT TO %s, at %lu\n", quadFileName, start_time);


	incaudio = 0;
	printf("Generating video streams...\n");
	while(av_read_frame (ff_input.formatCtx, &ff_input.packet) >= 0 && _keepEncoder) {
		if (ff_input.packet.stream_index == audioStreamIndex)
		{
			av_packet_ref  (&ff_output[amount_of_quadrants-1].packet, &ff_input.packet); 
            ff_output[amount_of_quadrants-1].packet.stream_index = amount_of_quadrants-1;
            ff_output[amount_of_quadrants-1].packet.pts = incaudio;

            // printf("%lu\n", ff_output[amount_of_quadrants-1].packet.pts);
            // if(gotPacket){
            	if (av_write_frame(formatCtx, &ff_output[amount_of_quadrants-1].packet) < 0) {
	                printf ("Unable to write to output stream..\n");
	                pthread_exit(NULL);
            	// }
            }            
            incaudio += 2880;
		}

		if (ff_input.packet.stream_index == videoStreamIndex) {

			ff_input.frame = av_frame_alloc();
			avcodec_decode_video2 (ff_input.codecCtx, ff_input.frame, &frameFinished, &ff_input.packet);

			if (frameFinished) {
				//TODO: Slice inputFrame and fill avQuadFrames[quadrant]
				//By now, inputFrame are replicated to all quadrants

				ff_input.frame->pts = av_frame_get_best_effort_timestamp (ff_input.frame);
				
				i = 0;
				for ( k = 0; k < quadrant_line; ++k) {
                    for (j = 0; j < quadrant_column; ++j) {
            			ff_output[i].frame = av_frame_alloc();

            			//make the cut quadrant ff_output[i]!
            			av_picture_crop((AVPicture *)ff_output[i].frame, (AVPicture *)ff_input.frame,       
            							ff_input.formatCtx->streams[videoStreamIndex]->codec->pix_fmt, marginTop, marginLeft);
            			
            			ff_output[i].frame->width = width; // updates the new width
						ff_output[i].frame->height = height; // updates the new height
						ff_output[i].frame->format = ff_input.frame->format;

						ff_output[i].frame->pts = inc;

						ff_output[i].packet.data = NULL;
						ff_output[i].packet.size = 0;
						av_init_packet (&ff_output[i].packet);

						avcodec_encode_video2 (ff_output[i].codecCtx, &ff_output[i].packet, ff_output[i].frame, &gotPacket);

						if (gotPacket) {
							ff_output[i].packet.stream_index = i;
							av_packet_rescale_ts (&ff_output[i].packet,
													ff_output[i].codecCtx->time_base,
													ff_output[i].outStream->time_base);

							if (av_write_frame (formatCtx, &ff_output[i].packet) < 0) {
								printf ("Unable to write to output stream..\n");
								pthread_exit(NULL);
							}

						}

						av_frame_free (&ff_output[i].frame);	

						i++;
						marginLeft += width;	

            		}
            		marginLeft = 0;
            		marginTop += height;
            	}
            	marginTop = 0; 
            	i = 0;
            	inc++;
			}
			av_frame_free (&ff_input.frame);
		}
	}

	return 0;
}
int main_dummy(int argc, char **argv)
#endif
{
    AVOutputFormat *ofmt = NULL;
    AVFormatContext *inVideoFmtCtx = NULL, *inAudioFmtCtx = NULL, *outFmtCtx = NULL;
    AVPacket pkt;
    const char *inVideo_filename, *inAudio_filename, *out_filename;
    int ret, i;

    if (argc < 3) {
        printf("usage: %s input output\n"
            "API example program to remux a media file with libavformat and libavcodec.\n"
            "The output format is guessed according to the file extension.\n"
            "\n", argv[0]);
        return 1;
    }

    inVideo_filename = argv[1];
    inAudio_filename = argv[2];
    out_filename = argv[3];

    av_register_all();

    /* =============== OPEN STREAMS ================*/

    if ((ret = open_input_file2(argv[1], &inVideoFmtCtx)) < 0)
        goto end;

    if ((ret = open_input_file2(argv[2], &inAudioFmtCtx)) < 0)
        goto end;

    /* ========== ALLOCATE OUTPUT CONTEXT ==========*/

    avformat_alloc_output_context2(&outFmtCtx, NULL, NULL, out_filename);
    if (!outFmtCtx) {
        fprintf(stderr, "Could not create output context\n");
        ret = AVERROR_UNKNOWN;
        goto end;
    }

    ofmt = outFmtCtx->oformat;

    /* =============== SETUP VIDEO CODEC ================*/
    for (i = 0; i < inVideoFmtCtx->nb_streams; i++) {
        AVStream *in_stream = inVideoFmtCtx->streams[i];
        AVStream *out_stream = avformat_new_stream(outFmtCtx, in_stream->codec->codec);
        if (!out_stream) {
            fprintf(stderr, "Failed allocating output stream\n");
            ret = AVERROR_UNKNOWN;
            goto end;
        }

        ret = avcodec_copy_context(out_stream->codec, in_stream->codec);
        if (ret < 0) {
            fprintf(stderr, "Failed to copy context from input to output stream codec context\n");
            goto end;
        }
        out_stream->codec->codec_tag = 0;
        if (outFmtCtx->oformat->flags & AVFMT_GLOBALHEADER)
            out_stream->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;
    }

    /* =============== SETUP AUDIO CODEC ================*/
    for (i = 0; i < inAudioFmtCtx->nb_streams; i++) {
        setup_mp3_audio_codec(outFmtCtx);
    }

    av_dump_format(outFmtCtx, 0, out_filename, 1);

    if (!(ofmt->flags & AVFMT_NOFILE)) {
        ret = avio_open(&outFmtCtx->pb, out_filename, AVIO_FLAG_WRITE);
        if (ret < 0) {
            fprintf(stderr, "Could not open output file '%s'", out_filename);
            goto end;
        }
    }

    ret = avformat_write_header(outFmtCtx, NULL);
    if (ret < 0) {
        fprintf(stderr, "Error occurred when opening output file\n");
        goto end;
    }


    /* =============== SETUP FILTERS ================*/

    init_filters(inAudioFmtCtx, outFmtCtx);

    AVStream *in_stream, *out_stream;
    AVFrame* frame;

    while (1) {        

        /* =============== VIDEO STREAM ================*/
        ret = av_read_frame(inVideoFmtCtx, &pkt);
        if (ret < 0)
            break;

        in_stream = inVideoFmtCtx->streams[pkt.stream_index];
        out_stream = outFmtCtx->streams[pkt.stream_index];

        log_packet(inVideoFmtCtx, &pkt, "in");

        /* copy packet */
        pkt.pts = av_rescale_q_rnd(pkt.pts, in_stream->time_base, out_stream->time_base, (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
        pkt.dts = av_rescale_q_rnd(pkt.dts, in_stream->time_base, out_stream->time_base, (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
        pkt.duration = (int)av_rescale_q(pkt.duration, in_stream->time_base, out_stream->time_base);
        pkt.pos = -1;
        log_packet(outFmtCtx, &pkt, "out");

        ret = av_interleaved_write_frame(outFmtCtx, &pkt);
        if (ret < 0) {
            fprintf(stderr, "Error muxing packet\n");
            break;
        }
        av_free_packet(&pkt);


        /* =============== AUDIO STREAM ================*/

#if 0
        ret = av_read_frame(inAudioFmtCtx, &pkt);
        if (ret < 0)
            break;
        in_stream = inAudioFmtCtx->streams[pkt.stream_index];

        pkt.stream_index++;
        out_stream = outFmtCtx->streams[pkt.stream_index];

        log_packet(inAudioFmtCtx, &pkt, "in");

        /* copy packet */
        pkt.pts = av_rescale_q_rnd(pkt.pts, in_stream->time_base, out_stream->time_base, (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
        pkt.dts = av_rescale_q_rnd(pkt.dts, in_stream->time_base, out_stream->time_base, (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
        pkt.duration = av_rescale_q(pkt.duration, in_stream->time_base, out_stream->time_base);
        pkt.pos = -1;
        log_packet(outFmtCtx, &pkt, "out");

        ret = av_interleaved_write_frame(outFmtCtx, &pkt);
        if (ret < 0) {
            fprintf(stderr, "Error muxing packet\n");
            break;
        }
        av_free_packet(&pkt);
#else

        if ((ret = av_read_frame(inAudioFmtCtx, &pkt)) < 0)
            break;
        int streamIndex = pkt.stream_index;
        int gotFrame;

        if (_filterCtx[streamIndex].FilterGraph) {
            av_log(NULL, AV_LOG_DEBUG, "Going to reencode&filter the frame\n");
            frame = av_frame_alloc();
            if (!frame) {
                ret = AVERROR(ENOMEM);
                break;
            }
            av_packet_rescale_ts(&pkt, inAudioFmtCtx->streams[streamIndex]->time_base,
                inAudioFmtCtx->streams[streamIndex]->codec->time_base);
            ret = avcodec_decode_audio4(inAudioFmtCtx->streams[streamIndex]->codec, frame,
                &gotFrame, &pkt);
            if (ret < 0) {
                av_frame_free(&frame);
                av_log(NULL, AV_LOG_ERROR, "Decoding failed\n");
                break;
            }

            if (gotFrame) {
                frame->pts = av_frame_get_best_effort_timestamp(frame);
                ret = filter_encode_write_frame(frame, inAudioFmtCtx, outFmtCtx, streamIndex);
                av_frame_free(&frame);
                if (ret < 0)
                    goto end;
            }
            else {
                av_frame_free(&frame);
            }
        }
        else {
            /* remux this frame without reencoding */
            av_packet_rescale_ts(&pkt,
                inAudioFmtCtx->streams[streamIndex]->time_base,
                outFmtCtx->streams[streamIndex+1]->time_base);

            ret = av_interleaved_write_frame(outFmtCtx, &pkt);
            if (ret < 0)
                goto end;
        }
        av_free_packet(&pkt);
#endif // 0

    }

    av_write_trailer(outFmtCtx);
end:

    av_free_packet(&pkt);
    av_frame_free(&frame);
    for (i = 0; i < inAudioFmtCtx->nb_streams; i++) {
        avcodec_close(inAudioFmtCtx->streams[i]->codec);
        if (outFmtCtx && outFmtCtx->nb_streams > i && outFmtCtx->streams[i] && outFmtCtx->streams[i]->codec)
            avcodec_close(outFmtCtx->streams[i]->codec);
        if (_filterCtx && _filterCtx[i].FilterGraph)
            avfilter_graph_free(&_filterCtx[i].FilterGraph);
    }
    av_free(_filterCtx);

    avformat_close_input(&inVideoFmtCtx);
    avformat_close_input(&inAudioFmtCtx);

    /* close output */
    if (outFmtCtx && !(ofmt->flags & AVFMT_NOFILE))
        avio_closep(&outFmtCtx->pb);
    avformat_free_context(outFmtCtx);

    if (ret < 0 && ret != AVERROR_EOF) {
        //fprintf(stderr, "Error occurred: %s\n", av_err2str(ret));
        return 1;
    }

    return 0;
}
Exemplo n.º 18
0
void
write_audio_frame (GeglProperties *o, AVFormatContext * oc, AVStream * st)
{
  Priv *p = (Priv*)o->user_data;
  AVCodecContext *c = st->codec;
  int sample_count = 100000;
  static AVPacket  pkt = { 0 };

  if (pkt.size == 0)
  {
    av_init_packet (&pkt);
  }

  /* first we add incoming frames audio samples */
  {
    int i;
    int sample_count = gegl_audio_fragment_get_sample_count (o->audio);
    GeglAudioFragment *af = gegl_audio_fragment_new (gegl_audio_fragment_get_sample_rate (o->audio),
                                                     gegl_audio_fragment_get_channels (o->audio),
                                                     gegl_audio_fragment_get_channel_layout (o->audio),
                                                     sample_count);
    gegl_audio_fragment_set_sample_count (af, sample_count);
    for (i = 0; i < sample_count; i++)
      {
        af->data[0][i] = o->audio->data[0][i];
        af->data[1][i] = o->audio->data[1][i];
      }
    gegl_audio_fragment_set_pos (af, p->audio_pos);
    p->audio_pos += sample_count;
    p->audio_track = g_list_append (p->audio_track, af);
  }

  if (!(c->codec->capabilities & AV_CODEC_CAP_VARIABLE_FRAME_SIZE))
    sample_count = c->frame_size;

  /* then we encode as much as we can in a loop using the codec frame size */

  
  while (p->audio_pos - p->audio_read_pos > sample_count)
  {
    long i;
    int ret;
    int got_packet = 0;
    AVFrame *frame = alloc_audio_frame (c->sample_fmt, c->channel_layout,
                                        c->sample_rate, sample_count);

    switch (c->sample_fmt) {
      case AV_SAMPLE_FMT_FLT:
        for (i = 0; i < sample_count; i++)
        {
          float left = 0, right = 0;
          get_sample_data (p, i + p->audio_read_pos, &left, &right);
          ((float*)frame->data[0])[c->channels*i+0] = left;
          ((float*)frame->data[0])[c->channels*i+1] = right;
        }
        break;
      case AV_SAMPLE_FMT_FLTP:
        for (i = 0; i < sample_count; i++)
        {
          float left = 0, right = 0;
          get_sample_data (p, i + p->audio_read_pos, &left, &right);
          ((float*)frame->data[0])[i] = left;
          ((float*)frame->data[1])[i] = right;
        }
        break;
      case AV_SAMPLE_FMT_S16:
        for (i = 0; i < sample_count; i++)
        {
          float left = 0, right = 0;
          get_sample_data (p, i + p->audio_read_pos, &left, &right);
          ((int16_t*)frame->data[0])[c->channels*i+0] = left * (1<<15);
          ((int16_t*)frame->data[0])[c->channels*i+1] = right * (1<<15);
        }
        break;
      case AV_SAMPLE_FMT_S32:
        for (i = 0; i < sample_count; i++)
        {
          float left = 0, right = 0;
          get_sample_data (p, i + p->audio_read_pos, &left, &right);
          ((int32_t*)frame->data[0])[c->channels*i+0] = left * (1<<31);
          ((int32_t*)frame->data[0])[c->channels*i+1] = right * (1<<31);
        }
        break;
      case AV_SAMPLE_FMT_S32P:
        for (i = 0; i < sample_count; i++)
        {
          float left = 0, right = 0;
          get_sample_data (p, i + p->audio_read_pos, &left, &right);
          ((int32_t*)frame->data[0])[i] = left * (1<<31);
          ((int32_t*)frame->data[1])[i] = right * (1<<31);
        }
        break;
      case AV_SAMPLE_FMT_S16P:
        for (i = 0; i < sample_count; i++)
        {
          float left = 0, right = 0;
          get_sample_data (p, i + p->audio_read_pos, &left, &right);
          ((int16_t*)frame->data[0])[i] = left * (1<<15);
          ((int16_t*)frame->data[1])[i] = right * (1<<15);
        }
        break;
      default:
        fprintf (stderr, "eeeek unhandled audio format\n");
        break;
    }
    frame->pts = p->next_apts;
    p->next_apts += sample_count;

    av_frame_make_writable (frame);
    ret = avcodec_encode_audio2 (c, &pkt, frame, &got_packet);

    av_packet_rescale_ts (&pkt, st->codec->time_base, st->time_base);
    if (ret < 0) {
      fprintf (stderr, "Error encoding audio frame: %s\n", av_err2str (ret));
    }

    if (got_packet)
    {
      pkt.stream_index = st->index;
      av_interleaved_write_frame (oc, &pkt);
      av_free_packet (&pkt);
    }

    av_frame_free (&frame);
    p->audio_read_pos += sample_count;
  }
}
Exemplo n.º 19
0
static void
write_video_frame (GeglProperties *o,
                   AVFormatContext *oc, AVStream *st)
{
  Priv           *p = (Priv*)o->user_data;
  int             out_size, ret;
  AVCodecContext *c;
  AVFrame        *picture_ptr;

  c = st->codec;

  if (c->pix_fmt != AV_PIX_FMT_RGB24)
    {
      struct SwsContext *img_convert_ctx;
      fill_rgb_image (o, p->tmp_picture, p->frame_count, c->width,
                      c->height);

      img_convert_ctx = sws_getContext(c->width, c->height, AV_PIX_FMT_RGB24,
                                       c->width, c->height, c->pix_fmt,
                                       SWS_BICUBIC, NULL, NULL, NULL);

      if (img_convert_ctx == NULL)
        {
          fprintf(stderr, "ff_save: Cannot initialize conversion context.");
        }
      else
        {
          sws_scale(img_convert_ctx,
                    (void*)p->tmp_picture->data,
                    p->tmp_picture->linesize,
                    0,
                    c->height,
                    p->picture->data,
                    p->picture->linesize);
         p->picture->format = c->pix_fmt;
         p->picture->width = c->width;
         p->picture->height = c->height;
        }
    }
  else
    {
      fill_rgb_image (o, p->picture, p->frame_count, c->width, c->height);
    }

  picture_ptr      = p->picture;
  picture_ptr->pts = p->frame_count;

  if (oc->oformat->flags & AVFMT_RAWPICTURE)
    {
      /* raw video case. The API will change slightly in the near
         future for that */
      AVPacket  pkt;
      av_init_packet (&pkt);

      pkt.flags |= AV_PKT_FLAG_KEY;
      pkt.stream_index = st->index;
      pkt.data = (uint8_t *) picture_ptr;
      pkt.size = sizeof (AVPicture);
      pkt.pts = picture_ptr->pts;
      av_packet_rescale_ts (&pkt, c->time_base, st->time_base);

      ret = av_write_frame (oc, &pkt);
    }
  else
    {
      /* encode the image */
      out_size =
        avcodec_encode_video (c,
                              p->video_outbuf,
                              p->video_outbuf_size, picture_ptr);

      /* if zero size, it means the image was buffered */
      if (out_size != 0)
        {
          AVPacket  pkt;
          av_init_packet (&pkt);
          if (c->coded_frame->key_frame)
            pkt.flags |= AV_PKT_FLAG_KEY;
          pkt.stream_index = st->index;
          pkt.data = p->video_outbuf;
          pkt.size = out_size;
          pkt.pts = picture_ptr->pts;
          av_packet_rescale_ts (&pkt, c->time_base, st->time_base);
          /* write the compressed frame in the media file */
          ret = av_write_frame (oc, &pkt);
        }
      else
        {
          ret = 0;
        }
    }
  if (ret != 0)
    {
      fprintf (stderr, "Error while writing video frame\n");
      exit (1);
    }
  p->frame_count++;
}
Exemplo n.º 20
0
static int WriteFrame(AVFrame* pFrame)
{
    double AudioTime, VideoTime;
    int ret;
    // write interleaved audio frame
    if (g_pAStream)
    {
        VideoTime = (double)g_pVFrame->pts * g_pVStream->time_base.num/g_pVStream->time_base.den;
        do
        {
            AudioTime = (double)g_pAFrame->pts * g_pAStream->time_base.num/g_pAStream->time_base.den;
            ret = WriteAudioFrame();
        }
        while (AudioTime < VideoTime && ret);
        if (ret < 0)
            return ret;
    }

    if (!g_pVStream)
        return 0;

    AVPacket Packet;
    av_init_packet(&Packet);
    Packet.data = NULL;
    Packet.size = 0;

    g_pVFrame->pts++;
#if LIBAVCODEC_VERSION_MAJOR < 58
    if (g_pFormat->flags & AVFMT_RAWPICTURE)
    {
        /* raw video case. The API will change slightly in the near
           future for that. */
        Packet.flags |= AV_PKT_FLAG_KEY;
        Packet.stream_index = g_pVStream->index;
        Packet.data = (uint8_t*)pFrame;
        Packet.size = sizeof(AVPicture);

        if (av_interleaved_write_frame(g_pContainer, &Packet) != 0)
            return FatalError("Error while writing video frame");
        return 0;
    }
    else
#endif
    {
#if LIBAVCODEC_VERSION_MAJOR >= 54
        int got_packet;
        if (avcodec_encode_video2(g_pVideo, &Packet, pFrame, &got_packet) < 0)
            return FatalError("avcodec_encode_video2 failed");
        if (!got_packet)
            return 0;

        av_packet_rescale_ts(&Packet, g_pVideo->time_base, g_pVStream->time_base);
#else
        Packet.size = avcodec_encode_video(g_pVideo, g_OutBuffer, OUTBUFFER_SIZE, pFrame);
        if (Packet.size < 0)
            return FatalError("avcodec_encode_video failed");
        if (Packet.size == 0)
            return 0;

        if( g_pVideo->coded_frame->pts != AV_NOPTS_VALUE)
            Packet.pts = av_rescale_q(g_pVideo->coded_frame->pts, g_pVideo->time_base, g_pVStream->time_base);
        if( g_pVideo->coded_frame->key_frame )
            Packet.flags |= AV_PKT_FLAG_KEY;
        Packet.data = g_OutBuffer;
#endif
        // write the compressed frame in the media file
        Packet.stream_index = g_pVStream->index;
        if (av_interleaved_write_frame(g_pContainer, &Packet) != 0)
            return FatalError("Error while writing video frame");

        return 1;
    }
}
Exemplo n.º 21
0
int main()
{
	int ret=0, check_yuv = 0;
	AVPacket pkt;
	AVFrame *frame = NULL;
	enum AVMediaType mediaType;
	unsigned int streamIdx;
	unsigned int i;
	int gotFrame;

	check_yuv = check_file();

	//inititialize all the registers
	av_register_all();

	if (open_input_file(check_yuv) < 0) exit(1);
	if (open_output_file() < 0) exit(1);

	//initialize packet, set data to NULL
	av_init_packet(&pkt);
	pkt.data = NULL;
	pkt.size = 0;

	//read all packets
	while (av_read_frame(inFmtCtx, &pkt) >= 0)
	{
		streamIdx = pkt.stream_index;
		mediaType = inFmtCtx->streams[streamIdx]->codec->codec_type;
		av_log(NULL, AV_LOG_DEBUG, "Demuxer gave frame of stream_index %u\n", streamIdx);
		av_log(NULL, AV_LOG_DEBUG, "Going to reencode \n");

		frame = av_frame_alloc();

		if (!frame)
		{
			ret = AVERROR(ENOMEM);
			break;
		}

		av_packet_rescale_ts(&pkt,inFmtCtx->streams[streamIdx]->time_base,	inFmtCtx->streams[streamIdx]->codec->time_base);

		if (mediaType == AVMEDIA_TYPE_VIDEO) {
			ret = avcodec_decode_video2(inFmtCtx->streams[streamIdx]->codec, frame, &gotFrame, &pkt);

			if (ret < 0)
			{
				av_frame_free(&frame);
				av_log(NULL, AV_LOG_ERROR, "Decoding failed\n");
				break;
			}
		}

		if (gotFrame)
		{
			frame->pts = av_frame_get_best_effort_timestamp(frame);
			ret = encode_write_frame(frame, streamIdx, &gotFrame);
			//av_frame_free(&frame);
			if (ret < 0)
			{
				av_log(NULL, AV_LOG_ERROR, "Error Encoding Frame");
				exit(1);
			}
		}
		else av_frame_free(&frame);

		av_free_packet(&pkt);
	}

	//flush encoders
	for (i = 0; i < inFmtCtx->nb_streams; i++)
	{
		if (inFmtCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
			ret = flush_encoder(i);
			if (ret < 0)
			{
				av_log(NULL, AV_LOG_ERROR, "Flushing encoder failed\n");
				exit(1);
			}
		}
	}


	//free all
	av_write_trailer(outFmtCtx);
	av_free_packet(&pkt);
	//av_frame_free(&frame);
	for (i = 0; i < inFmtCtx->nb_streams; i++)
	{
		avcodec_close(inFmtCtx->streams[i]->codec);
		if (outFmtCtx && outFmtCtx->nb_streams > i && outFmtCtx->streams[i] && outFmtCtx->streams[i]->codec)
			avcodec_close(outFmtCtx->streams[i]->codec);
	}
	avformat_close_input(&inFmtCtx);
	if (outFmtCtx && !(outFmtCtx->oformat->flags & AVFMT_NOFILE))
		avio_closep(&outFmtCtx->pb);
	avformat_free_context(outFmtCtx);

	if (ret < 0)
		av_log(NULL, AV_LOG_ERROR, "Error occurred \n");

	return 0;


}
Exemplo n.º 22
0
int main(int argc, char* argv[])
{
  int ret;

  av_register_all();
  av_log_set_level(AV_LOG_DEBUG);

  if(argc < 3)
  {
    printf("usage : %s <input> <output>\n", argv[0]);
    return 0;
  }

  if(open_input(argv[1]) < 0)
  {
    goto main_end;
  }

  if(create_output(argv[2]) < 0)
  {
    goto main_end;
  }

  // OUTPUT 파일에 대한 정보를 출력합니다.
  av_dump_format(outputFile.fmt_ctx, 0, outputFile.fmt_ctx->filename, 1);

  AVPacket pkt;
  int out_stream_index;

  while(1)
  {
    ret = av_read_frame(inputFile.fmt_ctx, &pkt);
    if(ret == AVERROR_EOF)
    {
      printf("End of frame\n");
      break;
    }

    if(pkt.stream_index != inputFile.v_index && 
      pkt.stream_index != inputFile.a_index)
    {
      av_free_packet(&pkt);
      continue;
    }

    AVStream* in_stream = inputFile.fmt_ctx->streams[pkt.stream_index];
    out_stream_index = (pkt.stream_index == inputFile.v_index) ? 
            outputFile.v_index : outputFile.a_index;
    AVStream* out_stream = outputFile.fmt_ctx->streams[out_stream_index];

    av_packet_rescale_ts(&pkt, in_stream->time_base, out_stream->time_base);

    pkt.stream_index = out_stream_index;

    if(av_interleaved_write_frame(outputFile.fmt_ctx, &pkt) < 0)
    {
      printf("Error occurred when writing packet into file\n");
      break;
    }   
  } // while

  // 파일을 쓰는 시점에서 마무리하지 못한 정보를 정리하는 시점입니다.
  av_write_trailer(outputFile.fmt_ctx);

main_end:
  release();

  return 0;
}
Exemplo n.º 23
0
static int tee_write_packet(AVFormatContext *avf, AVPacket *pkt)
{
    TeeContext *tee = avf->priv_data;
    AVFormatContext *avf2;
    AVBSFContext *bsfs;
    AVPacket pkt2;
    int ret_all = 0, ret;
    unsigned i, s;
    int s2;

    for (i = 0; i < tee->nb_slaves; i++) {
        if (!(avf2 = tee->slaves[i].avf))
            continue;

        /* Flush slave if pkt is NULL*/
        if (!pkt) {
            ret = av_interleaved_write_frame(avf2, NULL);
            if (ret < 0) {
                ret = tee_process_slave_failure(avf, i, ret);
                if (!ret_all && ret < 0)
                    ret_all = ret;
            }
            continue;
        }

        s = pkt->stream_index;
        s2 = tee->slaves[i].stream_map[s];
        if (s2 < 0)
            continue;

        memset(&pkt2, 0, sizeof(AVPacket));
        if ((ret = av_packet_ref(&pkt2, pkt)) < 0)
            if (!ret_all) {
                ret_all = ret;
                continue;
            }
        bsfs = tee->slaves[i].bsfs[s2];
        pkt2.stream_index = s2;

        ret = av_bsf_send_packet(bsfs, &pkt2);
        if (ret < 0) {
            av_log(avf, AV_LOG_ERROR, "Error while sending packet to bitstream filter: %s\n",
                   av_err2str(ret));
            ret = tee_process_slave_failure(avf, i, ret);
            if (!ret_all && ret < 0)
                ret_all = ret;
        }

        while(1) {
            ret = av_bsf_receive_packet(bsfs, &pkt2);
            if (ret == AVERROR(EAGAIN)) {
                ret = 0;
                break;
            } else if (ret < 0) {
                break;
            }

            av_packet_rescale_ts(&pkt2, bsfs->time_base_out,
                                 avf2->streams[s2]->time_base);
            ret = av_interleaved_write_frame(avf2, &pkt2);
            if (ret < 0)
                break;
        };

        if (ret < 0) {
            ret = tee_process_slave_failure(avf, i, ret);
            if (!ret_all && ret < 0)
                ret_all = ret;
        }
    }
    return ret_all;
}
Exemplo n.º 24
0
    /*
     * Class:     com_jpou_meditor_ffmpeg_trans
     * Method:    startTrans
     * Signature: (Ljava/lang/String;Ljava/lang/String;)Z
     */
    JNIEXPORT jboolean JNICALL Java_com_jpou_ffmpeg_Transcoding_startTrans
        (JNIEnv *env, jobject clazz, jstring input, jstring output) {
            int ret;
            AVPacket packet = { .data = NULL, .size = 0 };
            AVFrame *frame = NULL;
            enum AVMediaType type;
            unsigned int stream_index;
            unsigned int i;
            int got_frame;
            int (*dec_func)(AVCodecContext *, AVFrame *, int *, const AVPacket *);

            char *input_str, *output_str;
            input_str = (*env)->GetStringUTFChars(env, input, 0);
            output_str = (*env)->GetStringUTFChars(env, output, 0);
            LOGI("input_str ~ : %s -------------------", input_str);
            LOGI("output_str ~ : %s -------------------", output_str);


            if ((input == NULL) || (output == NULL)) {
                LOGI("input_str or output_str is null");
                return (jboolean)0;
            }

            av_register_all();
            avfilter_register_all();

            if ((ret = open_input_file(input_str)) < 0) {
                LOGI("open_input_file error");
                goto end;
            }
            if ((ret = open_output_file(output_str)) < 0) {
                LOGI("open_output_file error");
                goto end;
            }
            LOGI("init_filters ----------------");
            if ((ret = init_filters()) < 0) {
                LOGI("init_filters error");
                goto end;
            }

            /* read all packets */
            LOGI("start av_read_frame ----------------");
            while (1) {
                if ((ret = av_read_frame(ifmt_ctx, &packet)) < 0)
                    break;
                stream_index = packet.stream_index;
                type = ifmt_ctx->streams[packet.stream_index]->codec->codec_type;
                LOGI("Demuxer gave frame of stream_index %u\n",
                        stream_index);

                if (filter_ctx[stream_index].filter_graph) {
                    av_log(NULL, AV_LOG_DEBUG, "Going to reencode&filter the frame\n");
                    frame = av_frame_alloc();
                    if (!frame) {
                        ret = AVERROR(ENOMEM);
                        break;
                    }
                    av_packet_rescale_ts(&packet,
                            ifmt_ctx->streams[stream_index]->time_base,
                            ifmt_ctx->streams[stream_index]->codec->time_base);
                    dec_func = (type == AVMEDIA_TYPE_VIDEO) ? avcodec_decode_video2 :
                        avcodec_decode_audio4;
                    ret = dec_func(ifmt_ctx->streams[stream_index]->codec, frame,
                            &got_frame, &packet);
                    if (ret < 0) {
                        av_frame_free(&frame);
                        LOGI("Decoding failed\n");
                        break;
                    }

                    if (got_frame) {
                        frame->pts = av_frame_get_best_effort_timestamp(frame);
                        ret = filter_encode_write_frame(frame, stream_index);
                        av_frame_free(&frame);
                        if (ret < 0)
                            goto end;
                    } else {
                        av_frame_free(&frame);
                    }
                } else {
                    /* remux this frame without reencoding */
                    av_packet_rescale_ts(&packet,
                            ifmt_ctx->streams[stream_index]->time_base,
                            ofmt_ctx->streams[stream_index]->time_base);

                    ret = av_interleaved_write_frame(ofmt_ctx, &packet);
                    if (ret < 0)
                        goto end;
                }
                av_free_packet(&packet);
            }

            /* flush filters and encoders */
            for (i = 0; i < ifmt_ctx->nb_streams; i++) {
                /* flush filter */
                if (!filter_ctx[i].filter_graph)
                    continue;
                ret = filter_encode_write_frame(NULL, i);
                if (ret < 0) {
                    LOGI("Flushing filter failed\n");
                    goto end;
                }

                /* flush encoder */
                ret = flush_encoder(i);
                if (ret < 0) {
                    LOGI("Flushing encoder failed\n");
                    goto end;
                }
            }

            av_write_trailer(ofmt_ctx);
            return (jboolean)1;
end:
            av_free_packet(&packet);
            av_frame_free(&frame);
            for (i = 0; i < ifmt_ctx->nb_streams; i++) {
                avcodec_close(ifmt_ctx->streams[i]->codec);
                if (ofmt_ctx && ofmt_ctx->nb_streams > i && ofmt_ctx->streams[i] && ofmt_ctx->streams[i]->codec)
                    avcodec_close(ofmt_ctx->streams[i]->codec);
                if (filter_ctx && filter_ctx[i].filter_graph)
                    avfilter_graph_free(&filter_ctx[i].filter_graph);
            }
            av_free(filter_ctx);
            avformat_close_input(&ifmt_ctx);
            if (ofmt_ctx && !(ofmt_ctx->oformat->flags & AVFMT_NOFILE))
                avio_closep(&ofmt_ctx->pb);
            avformat_free_context(ofmt_ctx);

            /**
              if (ret < 0)
              av_log(NULL, AV_LOG_ERROR, "Error occurred: %s\n", av_err2str(ret));
              */
            return (jboolean)0;

        }

#ifdef __cplusplus
}
Exemplo n.º 25
0
Arquivo: Muxer.cpp Projeto: acely/ssr
void Muxer::MuxerThread() {
	try {

		Logger::LogInfo("[Muxer::MuxerThread] " + Logger::tr("Muxer thread started."));

		double total_time = 0.0;

		// start muxing
		for( ; ; ) {

			// get a packet from a stream that isn't done yet
			std::unique_ptr<AVPacketWrapper> packet;
			unsigned int current_stream = 0, streams_done = 0;
			for(unsigned int i = 0; i < m_format_context->nb_streams; ++i) {
				StreamLock lock(&m_stream_data[i]);
				if(lock->m_packet_queue.empty()) {
					if(lock->m_is_done)
						++streams_done;
				} else {
					current_stream = i;
					packet = std::move(lock->m_packet_queue.front());
					lock->m_packet_queue.pop_front();
					break;
				}
			}

			// if all streams are done, we can stop
			if(streams_done == m_format_context->nb_streams) {
				break;
			}

			// if there is no packet, wait and try again later
			if(packet == NULL) {
				usleep(20000);
				continue;
			}

			// try to figure out the time (the exact value is not critical, it's only used for bitrate statistics)
			AVStream *stream = m_format_context->streams[current_stream];
			double packet_time = 0.0;
			if(packet->GetPacket()->dts != AV_NOPTS_VALUE)
				packet_time = (double) packet->GetPacket()->dts * ToDouble(stream->codec->time_base);
			else if(packet->GetPacket()->pts != AV_NOPTS_VALUE)
				packet_time = (double) packet->GetPacket()->pts * ToDouble(stream->codec->time_base);
			if(packet_time > total_time)
				total_time = packet_time;

			// prepare packet
			packet->GetPacket()->stream_index = current_stream;
#if SSR_USE_AV_PACKET_RESCALE_TS
			av_packet_rescale_ts(packet->GetPacket(), stream->codec->time_base, stream->time_base);
#else
			if(packet->GetPacket()->pts != (int64_t) AV_NOPTS_VALUE) {
				packet->GetPacket()->pts = av_rescale_q(packet->GetPacket()->pts, stream->codec->time_base, stream->time_base);
			}
			if(packet->GetPacket()->dts != (int64_t) AV_NOPTS_VALUE) {
				packet->GetPacket()->dts = av_rescale_q(packet->GetPacket()->dts, stream->codec->time_base, stream->time_base);
			}
#endif

			// write the packet (again, why does libav/ffmpeg call this a frame?)
			if(av_interleaved_write_frame(m_format_context, packet->GetPacket()) != 0) {
				Logger::LogError("[Muxer::MuxerThread] " + Logger::tr("Error: Can't write frame to muxer!"));
				throw LibavException();
			}

			// the data is now owned by libav/ffmpeg, so don't free it
			packet->SetFreeOnDestruct(false);

			// update the byte counter
			{
				SharedLock lock(&m_shared_data);
				lock->m_total_bytes = m_format_context->pb->pos + (m_format_context->pb->buf_ptr - m_format_context->pb->buffer);
				if(lock->m_stats_previous_time == NOPTS_DOUBLE) {
					lock->m_stats_previous_time = total_time;
					lock->m_stats_previous_bytes = lock->m_total_bytes;
				}
				double timedelta = total_time - lock->m_stats_previous_time;
				if(timedelta > 0.999999) {
					lock->m_stats_actual_bit_rate = (double) ((lock->m_total_bytes - lock->m_stats_previous_bytes) * 8) / timedelta;
					lock->m_stats_previous_time = total_time;
					lock->m_stats_previous_bytes = lock->m_total_bytes;
				}
			}

		}

		// tell the others that we're done
		m_is_done = true;

		Logger::LogInfo("[Muxer::MuxerThread] " + Logger::tr("Muxer thread stopped."));

	} catch(const std::exception& e) {
		m_error_occurred = true;
		Logger::LogError("[Muxer::MuxerThread] " + Logger::tr("Exception '%1' in muxer thread.").arg(e.what()));
	} catch(...) {
		m_error_occurred = true;
		Logger::LogError("[Muxer::MuxerThread] " + Logger::tr("Unknown exception in muxer thread."));
	}
}
Exemplo n.º 26
0
bool Remux::executeRemux()
{
	AVPacket readPkt;
	int ret;
	if ((ret = avformat_open_input(&ifmt_ctx, in_filename.c_str(), 0, 0)) < 0) {
		return false;
	}

	if ((ret = avformat_find_stream_info(ifmt_ctx, 0)) < 0) {
		return false;
	}
	string::size_type pos = out_filename.find_last_of(".");
	if (pos == string::npos)
		out_filename.append(".mp4");
	avformat_alloc_output_context2(&ofmt_ctx, NULL, NULL, out_filename.c_str());
	if (!writeHeader())
		return false;
	int frame_index = 0;
	AVBitStreamFilterContext* h264bsfc = av_bitstream_filter_init("h264_mp4toannexb");
	int startFlag = 1;
	int64_t pts_start_time = 0;
	int64_t dts_start_time = 0;
	int64_t pre_pts = 0;
	int64_t pre_dts = 0;
	while (1)
	{
		
		ret = av_read_frame(ifmt_ctx, &readPkt);
		if (ret < 0)
		{
			break;
		}
		if (readPkt.stream_index == videoIndex)
		{
			++frame_index;
			//过滤掉前面的非I帧
			if (frame_index == startFlag&&readPkt.flags != AV_PKT_FLAG_KEY){
				++startFlag;
				continue;
			}
			if (frame_index == startFlag){
				pts_start_time = readPkt.pts>0? readPkt.pts:0;
				dts_start_time = readPkt.dts>0? readPkt.dts:0;
				pre_dts = dts_start_time;
				pre_pts = pts_start_time;
			}

			//过滤得到h264数据包
			if (isMp4)
				av_bitstream_filter_filter(h264bsfc, ifmt_ctx->streams[videoIndex]->codec, NULL, &readPkt.data, &readPkt.size, readPkt.data, readPkt.size, 0);

			if (readPkt.pts != AV_NOPTS_VALUE){
				readPkt.pts = readPkt.pts - pts_start_time;
			}
			if (readPkt.dts != AV_NOPTS_VALUE){
				if (readPkt.dts <= pre_dts&&frame_index != startFlag){
					//保证 dts 单调递增
					int64_t delta = av_rescale_q(1, ofmt_ctx->streams[0]->time_base, ifmt_ctx->streams[videoIndex]->time_base);
					readPkt.dts = pre_dts + delta + 1;
				}
				else{
					//initDts(&readPkt.dts, dts_start_time);
					readPkt.dts = readPkt.dts - dts_start_time;
				}
			}
			pre_dts = readPkt.dts;
			pre_pts = readPkt.pts;
			
			av_packet_rescale_ts(&readPkt, ifmt_ctx->streams[videoIndex]->time_base, ofmt_ctx->streams[0]->time_base);
			if (readPkt.duration < 0)
			{
				readPkt.duration = 0;
			}
			if (readPkt.pts < readPkt.dts)
			{
				readPkt.pts = readPkt.dts + 1;
			}
			readPkt.stream_index = 0;
			//这里如果使用av_interleaved_write_frame 会导致有时候写的视频文件没有数据。
			ret =av_write_frame(ofmt_ctx, &readPkt);
			if (ret < 0) {
				//break;
				std::cout << "write failed" << std::endl;
			}
		}
		
		av_packet_unref(&readPkt);

	}
	av_bitstream_filter_close(h264bsfc);
	av_packet_unref(&readPkt);
	av_write_trailer(ofmt_ctx);
	return true;
}
Exemplo n.º 27
0
int main(void)
{

	int frame = 0, ret = 0, got_picture = 0, frameFinished = 0, videoStream = 0, check_yuv = 0;
	int frame_size = 0, bitrate = 0;
	int streamIdx = 0;
	unsigned i=0;
	enum AVMediaType mediaType;
	struct SwsContext *sws_ctx = NULL;
	AVStream *video_st = NULL;
	AVCodecContext    *pCodecCtx = NULL, *ctxEncode = NULL;
	AVFrame           *pFrame = NULL;
	AVPacket          input_pkt, output_pkt;

	check_yuv = check_file();

	// Register all formats and codecs
	av_register_all();

	if (open_input_file(check_yuv) < 0) exit(1);
	if (open_output_file() < 0) exit(1);

	init_parameter(&input_pkt, &output_pkt); //init parameter function
	pictureEncoded_init();

	// initialize SWS context for software scaling
	sws_ctx = sws_getContext(inFmtCtx->streams[streamIdx]->codec->width, inFmtCtx->streams[streamIdx]->codec->height, inFmtCtx->streams[streamIdx]->codec->pix_fmt, clip_width, clip_height, PIX_FMT_YUV420P, SWS_BILINEAR, NULL, NULL, NULL);

	while (av_read_frame(inFmtCtx, &input_pkt) >= 0) {

		streamIdx = input_pkt.stream_index;
		mediaType = inFmtCtx->streams[streamIdx]->codec->codec_type;

		av_log(NULL, AV_LOG_DEBUG, "Demuxer gave frame of stream_index %u\n", streamIdx);
		av_log(NULL, AV_LOG_DEBUG, "Going to reencode \n");

		pFrame = av_frame_alloc();

		if (!pFrame)
		{
			ret = AVERROR(ENOMEM);
			break;
		}

		av_packet_rescale_ts(&input_pkt, inFmtCtx->streams[videoStream]->time_base, inFmtCtx->streams[streamIdx]->codec->time_base);


		if (mediaType == AVMEDIA_TYPE_VIDEO){


			ret = avcodec_decode_video2(inFmtCtx->streams[streamIdx]->codec, pFrame, &frameFinished, &input_pkt); 		// Decode video frame (input_pkt-> pFrame)


			if (ret < 0)
			{
				av_frame_free(&pFrame);
				av_log(NULL, AV_LOG_ERROR, "Decoding failed\n");
				break;
			}

		
		if (frameFinished){

			frame_num++;

			sws_scale(sws_ctx, (const uint8_t * const *)pFrame->data, pFrame->linesize, 0, clip_height, pictureEncoded->data, pictureEncoded->linesize);

			pictureEncoded->pts = av_frame_get_best_effort_timestamp(pFrame);

			//pictureEncoded-> output_pkt
			//avcodec_encode_video2(ctxEncode, &output_pkt, pictureEncoded, &got_picture);
			avcodec_encode_video2(ofmt_ctx->streams[streamIdx]->codec, &output_pkt, pictureEncoded, &got_picture);

			av_frame_free(&pFrame);

			//if the function is working
			if (got_picture){

				printf("Encoding %d \n", frame_use);

				frame_use++;
				

				av_packet_rescale_ts(&output_pkt, ofmt_ctx->streams[streamIdx]->codec->time_base, ofmt_ctx->streams[streamIdx]->time_base);

				//av_packet_rescale_ts(&output_pkt, ctxEncode->time_base, video_st->time_base);

				ret = av_interleaved_write_frame(ofmt_ctx, &output_pkt);

				if (ret < 0) {
					fprintf(stderr, "Error muxing packet\n");
					break;
				}
			}
		}

		av_free_packet(&input_pkt);
		av_free_packet(&output_pkt);

		}

	}

	//flush encoders
	for (i = 0; i < inFmtCtx->nb_streams; i++)
	{
		if (inFmtCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
			
			ret = flush_encoder(i);
			if (ret < 0)
			{
				av_log(NULL, AV_LOG_ERROR, "Flushing encoder failed\n");
				exit(1);
			}
		}
	}

	printf("\n\n total frame_num : %d , frame_encode:  %d \n", frame_num-1, frame_use-1);


	/* Write the trailer, if any. The trailer must be written before you
	* close the CodecContexts open when you wrote the header; otherwise
	* av_write_trailer() may try to use memory that was freed on
	* av_codec_close(). */
	av_write_trailer(ofmt_ctx);

	// Free the YUV frame
	av_frame_free(&pFrame);
	av_frame_free(&pictureEncoded);

	// Close the codecs
	//avcodec_close(pCodecCtx);

	// Close the video file
	avformat_close_input(&inFmtCtx);
	//avcodec_close(ctxEncode);

	return 0;
}
Exemplo n.º 28
0
int main(int argc, char **argv)
{
    int ret;
    AVPacket packet = { .data = NULL, .size = 0 };
    AVFrame *frame = NULL;
    enum AVMediaType type;
    unsigned int stream_index;
    unsigned int i;
    int got_frame;
    int (*dec_func)(AVCodecContext *, AVFrame *, int *, const AVPacket *);

    if (argc != 3)
    {
        av_log(NULL, AV_LOG_ERROR, "Usage: %s <input file> <output file>\n", argv[0]);
        return 1;
    }

    av_register_all();
    avfilter_register_all();

    if ((ret = open_input_file(argv[1])) < 0)
        goto end;
    if ((ret = open_output_file(argv[2])) < 0)
        goto end;
    if ((ret = init_filters()) < 0)
        goto end;

    /* read all packets */
    while (1)
    {
        if ((ret = av_read_frame(ifmt_ctx, &packet)) < 0)
            break;
        stream_index = packet.stream_index;
        type = ifmt_ctx->streams[packet.stream_index]->codec->codec_type;
        av_log(NULL, AV_LOG_DEBUG, "Demuxer gave frame of stream_index %u\n",
               stream_index);

        if (filter_ctx[stream_index].filter_graph)
        {
            av_log(NULL, AV_LOG_DEBUG, "Going to reencode&filter the frame\n");
            frame = av_frame_alloc();
            if (!frame)
            {
                ret = AVERROR(ENOMEM);
                break;
            }
            av_packet_rescale_ts(&packet,
                                 ifmt_ctx->streams[stream_index]->time_base,
                                 ifmt_ctx->streams[stream_index]->codec->time_base);
            dec_func = (type == AVMEDIA_TYPE_VIDEO) ? avcodec_decode_video2 :
                       avcodec_decode_audio4;
            ret = dec_func(ifmt_ctx->streams[stream_index]->codec, frame,
                           &got_frame, &packet);
            if (ret < 0)
            {
                av_frame_free(&frame);
                av_log(NULL, AV_LOG_ERROR, "Decoding failed\n");
                break;
            }

            if (got_frame)
            {
                frame->pts = av_frame_get_best_effort_timestamp(frame);
                ret = filter_encode_write_frame(frame, stream_index);
                av_frame_free(&frame);
                if (ret < 0)
                    goto end;
            }
            else
            {
                av_frame_free(&frame);
            }
        }
        else
        {
            /* remux this frame without reencoding */
            av_packet_rescale_ts(&packet,
                                 ifmt_ctx->streams[stream_index]->time_base,
                                 ofmt_ctx->streams[stream_index]->time_base);

            ret = av_interleaved_write_frame(ofmt_ctx, &packet);
            if (ret < 0)
                goto end;
        }
        av_packet_unref(&packet);
    }

    /* flush filters and encoders */
    for (i = 0; i < ifmt_ctx->nb_streams; i++)
    {
        /* flush filter */
        if (!filter_ctx[i].filter_graph)
            continue;
        ret = filter_encode_write_frame(NULL, i);
        if (ret < 0)
        {
            av_log(NULL, AV_LOG_ERROR, "Flushing filter failed\n");
            goto end;
        }

        /* flush encoder */
        ret = flush_encoder(i);
        if (ret < 0)
        {
            av_log(NULL, AV_LOG_ERROR, "Flushing encoder failed\n");
            goto end;
        }
    }

    av_write_trailer(ofmt_ctx);
end:
    av_packet_unref(&packet);
    av_frame_free(&frame);
    for (i = 0; i < ifmt_ctx->nb_streams; i++)
    {
        avcodec_close(ifmt_ctx->streams[i]->codec);
        if (ofmt_ctx && ofmt_ctx->nb_streams > i && ofmt_ctx->streams[i] && ofmt_ctx->streams[i]->codec)
            avcodec_close(ofmt_ctx->streams[i]->codec);
        if (filter_ctx && filter_ctx[i].filter_graph)
            avfilter_graph_free(&filter_ctx[i].filter_graph);
    }
    av_free(filter_ctx);
    avformat_close_input(&ifmt_ctx);
    if (ofmt_ctx && !(ofmt_ctx->oformat->flags & AVFMT_NOFILE))
        avio_closep(&ofmt_ctx->pb);
    avformat_free_context(ofmt_ctx);

    if (ret < 0)
        av_log(NULL, AV_LOG_ERROR, "Error occurred: %s\n", av_err2str(ret));

    return ret ? 1 : 0;
}