Exemplo n.º 1
0
/**
 * Load one audio frame from the FIFO buffer, encode and write it to the
 * output file.
 * @param fifo                  Buffer used for temporary storage
 * @param output_format_context Format context of the output file
 * @param output_codec_context  Codec context of the output file
 * @return Error code (0 if successful)
 */
int Transcode::load_encode_and_write(AVAudioFifo *fifo,
                                 AVFormatContext *output_format_context,
                                 AVCodecContext *output_codec_context)
{
    /* Temporary storage of the output samples of the frame written to the file. */
    AVFrame *output_frame;
    /* Use the maximum number of possible samples per frame.
     * If there is less than the maximum possible frame size in the FIFO
     * buffer use this number. Otherwise, use the maximum possible frame size. */
    const int frame_size = FFMIN(av_audio_fifo_size(fifo),
                                 output_codec_context->frame_size);
    int data_written;

    /* Initialize temporary storage for one output frame. */
    if (init_output_frame(&output_frame, output_codec_context, frame_size))
        return AVERROR_EXIT;

    /* Read as many samples from the FIFO buffer as required to fill the frame.
     * The samples are stored in the frame temporarily. */
    if (av_audio_fifo_read(fifo, (void **)output_frame->data, frame_size) < frame_size) {
        fprintf(stderr, "Could not read data from FIFO\n");
        av_frame_free(&output_frame);
        return AVERROR_EXIT;
    }

    /* Encode one frame worth of audio samples. */
    if (encode_audio_frame(output_frame, output_format_context,
                           output_codec_context, &data_written)) {
        av_frame_free(&output_frame);
        return AVERROR_EXIT;
    }
    av_frame_free(&output_frame);
    return 0;
}
Exemplo n.º 2
0
	void send(const safe_ptr<core::read_frame>& frame)
	{
		encode_executor_.begin_invoke([=]
		{		
			boost::timer frame_timer;

			encode_video_frame(*frame);

			if (!key_only_)
				encode_audio_frame(*frame);

			graph_->set_value("frame-time", frame_timer.elapsed()*format_desc_.fps*0.5);			
		});
	}
    /**
     * Load one audio frame from the FIFO buffer, encode and write it to the
     * output file.
     */
    int AudioDecoder::load_encode(AVPacket& output_packet)
    {
        /** Temporary storage of the output samples of the frame written to the file. */
        AVFrame *output_frame;
        /**
         * Use the maximum number of possible samples per frame.
         * If there is less than the maximum possible frame size in the FIFO
         * buffer use this number. Otherwise, use the maximum possible frame size
         */
        const int frame_size = FFMIN(av_audio_fifo_size(fifo),
                output_codec_context->frame_size);
        /** Initialize temporary storage for one output frame. */
        if (init_output_frame(&output_frame, output_codec_context, frame_size))
        {
            ELOG_WARN(" init_output_frame failed!! frame_size=%d", frame_size);
            return 0;
        }


        /**
         * Read as many samples from the FIFO buffer as required to fill the frame.
         * The samples are stored in the frame temporarily.
         */
        if (av_audio_fifo_read(fifo, (void **)output_frame->data, frame_size) < frame_size) {
            ELOG_WARN("Could not read data from FIFO\n");
            av_frame_free(&output_frame);
            return 0;
        }
        
        ELOG_DEBUG("fifo read %d, now left %d", frame_size, av_audio_fifo_size(fifo));

        /** Encode one frame worth of audio samples. */
        int pktlen = encode_audio_frame(output_frame, output_packet);
        if (pktlen <= 0)
        {
            ELOG_WARN("Failed to encode_audio_frame!!");
        }
        av_frame_free(&output_frame);

        return pktlen;
    }
Exemplo n.º 4
0
int main(int argc ,char *argv[]){

	/*first ,open input file ,and obtain input file information*/
	INPUT_CONTEXT *ptr_input_ctx;

	if( (ptr_input_ctx = malloc (sizeof(INPUT_CONTEXT))) == NULL){
		printf("ptr_input_ctx malloc failed .\n");
		exit(MEMORY_MALLOC_FAIL);
	}

	/*second ,open output file ,and set output file information*/
	OUTPUT_CONTEXT *ptr_output_ctx;
	if( (ptr_output_ctx = malloc (sizeof(OUTPUT_CONTEXT))) == NULL){
		printf("ptr_output_ctx malloc failed .\n");
		exit(MEMORY_MALLOC_FAIL);
	}

	//init inputfile ,and get input file information
	init_input(ptr_input_ctx ,argv[1]);


	//init oputfile ,and set output file information
	init_output(ptr_output_ctx ,argv[2] ,ptr_input_ctx);

	//open video and audio ,set video_out_buf and audio_out_buf
	open_stream_codec(ptr_output_ctx);


    // open the output file, if needed
    if (!(ptr_output_ctx->fmt->flags & AVFMT_NOFILE)) {		//for mp4 or mpegts ,this must be performed
        if (avio_open(&ptr_output_ctx->ptr_format_ctx->pb, argv[2], AVIO_FLAG_WRITE) < 0) {
            fprintf(stderr, "Could not open '%s'\n", argv[2]);
            exit(OPEN_MUX_FILE_FAIL);
        }
    }

    // write the stream header, if any
    avformat_write_header(ptr_output_ctx->ptr_format_ctx ,NULL);

    printf("ptr_output_ctx->ptr_format_ctx->nb_streams = %d \n\n" ,ptr_output_ctx->ptr_format_ctx->nb_streams);  //streams number in output file

    ptr_output_ctx->img_convert_ctx = sws_getContext(
    		ptr_input_ctx->video_codec_ctx->width ,ptr_input_ctx->video_codec_ctx->height ,PIX_FMT_YUV420P,
    		 ptr_output_ctx->video_stream->codec->width ,ptr_output_ctx->video_stream->codec->height ,PIX_FMT_YUV420P ,
    		 SWS_BICUBIC ,NULL ,NULL ,NULL);


    printf("src_width = %d ,src_height = %d \n" ,ptr_input_ctx->video_codec_ctx->width ,ptr_input_ctx->video_codec_ctx->height);
    printf("dts_width = %d ,dts_height = %d \n" ,ptr_output_ctx->video_stream->codec->width ,
    		ptr_output_ctx->video_stream->codec->height);

//    while(1);

	printf("before av_read_frame ...\n");
	/*************************************************************************************/
	/*decoder loop*/
	//
	//
	//***********************************************************************************/
	while(av_read_frame(ptr_input_ctx->ptr_format_ctx ,&ptr_input_ctx->pkt) >= 0){

		if (ptr_input_ctx->pkt.stream_index == ptr_input_ctx->video_index) {

			#if 1

			//decode video packet
			int got_picture = 0;
			ptr_input_ctx->mark_have_frame = 0;
			avcodec_decode_video2(ptr_input_ctx->video_codec_ctx,
					ptr_input_ctx->yuv_frame, &got_picture, &ptr_input_ctx->pkt);

			if (got_picture) {
				//encode video
				ptr_output_ctx->sync_ipts = av_q2d(ptr_input_ctx->ptr_format_ctx->streams[ptr_input_ctx->video_index]->time_base) *
						(ptr_input_ctx->yuv_frame->best_effort_timestamp  )
						- (double)ptr_input_ctx->ptr_format_ctx->start_time / AV_TIME_BASE;

				//first swscale
				sws_scale(ptr_output_ctx->img_convert_ctx ,
						(const uint8_t* const*)ptr_input_ctx->yuv_frame->data ,ptr_input_ctx->yuv_frame->linesize ,
						0 ,
						ptr_input_ctx->video_codec_ctx->height ,
						ptr_output_ctx->encoded_yuv_pict->data ,ptr_output_ctx->encoded_yuv_pict->linesize);

				//second swscale
				encode_video_frame(ptr_output_ctx , ptr_output_ctx->encoded_yuv_pict ,ptr_input_ctx);
			}
			#endif

		} else if (ptr_input_ctx->pkt.stream_index == ptr_input_ctx->audio_index) {
			#if 1
			//decode audio packet
			while (ptr_input_ctx->pkt.size > 0) {
				int got_frame = 0;
				int len = avcodec_decode_audio4(ptr_input_ctx->audio_codec_ctx,
						ptr_input_ctx->audio_decode_frame, &got_frame,
						&ptr_input_ctx->pkt);

				if (len < 0) { //decode failed ,skip frame
					fprintf(stderr, "Error while decoding audio frame\n");
					break;
				}

				if (got_frame) {
					//acquire the large of the decoded audio info...
					int data_size = av_samples_get_buffer_size(NULL,
							ptr_input_ctx->audio_codec_ctx->channels,
							ptr_input_ctx->audio_decode_frame->nb_samples,
							ptr_input_ctx->audio_codec_ctx->sample_fmt, 1);
					ptr_input_ctx->audio_size = data_size; //audio data size

					//encode audio
					int frame_bytes = ptr_output_ctx->audio_stream->codec->frame_size
										* av_get_bytes_per_sample(ptr_output_ctx->audio_stream->codec->sample_fmt)
										* ptr_output_ctx->audio_stream->codec->channels;
					uint8_t * audio_buf = ptr_input_ctx->audio_decode_frame->data[0];

					while (data_size >= frame_bytes) {

						encode_audio_frame(ptr_output_ctx ,audio_buf ,frame_bytes /*data_size*/);  //
						data_size -= frame_bytes;
						audio_buf += frame_bytes;
					}

				} else { //no data
					printf("======>avcodec_decode_audio4 ,no data ..\n");
					continue;
				}

				ptr_input_ctx->pkt.size -= len;
				ptr_input_ctx->pkt.data += len;
			}
			#endif
		}

	}//endwhile


	printf("before flush ,ptr_output_ctx->ptr_format_ctx->nb_streams = %d \n\n" ,ptr_output_ctx->ptr_format_ctx->nb_streams);
	encode_flush(ptr_output_ctx ,ptr_output_ctx->ptr_format_ctx->nb_streams);

	printf("before wirite tailer ...\n\n");

	av_write_trailer(ptr_output_ctx->ptr_format_ctx );

	/*free memory*/
}
Exemplo n.º 5
0
/** Convert an audio file to an AAC file in an MP4 container. */
int compress(int argc, char **argv)
{
    AVFormatContext *input_format_context = NULL, *output_format_context = NULL;
    AVCodecContext *input_codec_context = NULL, *output_codec_context = NULL;
    SwrContext *resample_context = NULL;
    AVAudioFifo *fifo = NULL;
    int ret = AVERROR_EXIT;

    if (argc < 3) {
        fprintf(stderr, "Usage: %s <input file> <output file>\n", argv[0]);
        exit(1);
    }

    const char *input_filename = argv[1];
    const char *output_filename = argv[2];

    /** Register all codecs and formats so that they can be used. */
    av_register_all();
    /** Open the input file for reading. */
    if (open_input_file(input_filename, &input_format_context, &input_codec_context))
        goto cleanup;

    /** Open the output file for writing. */
    if (open_output_file(output_filename, input_codec_context, &output_format_context, &output_codec_context))
        goto cleanup;

    /** Initialize the resampler to be able to convert audio sample formats. */
    if (init_resampler(input_codec_context, output_codec_context, &resample_context))
        goto cleanup;

    /** Initialize the FIFO buffer to store audio samples to be encoded. */
    if (init_fifo(&fifo, output_codec_context))
        goto cleanup;

    /** Write the header of the output file container. */
    if (write_output_file_header(output_format_context))
        goto cleanup;

    /**
     * Loop as long as we have input samples to read or output samples
     * to write; abort as soon as we have neither.
     */
    while (1) {
        /** Use the encoder's desired frame size for processing. */
        const int output_frame_size = output_codec_context->frame_size;
        int finished                = 0;

        /**
         * Make sure that there is one frame worth of samples in the FIFO
         * buffer so that the encoder can do its work.
         * Since the decoder's and the encoder's frame size may differ, we
         * need to FIFO buffer to store as many frames worth of input samples
         * that they make up at least one frame worth of output samples.
         */
        while (av_audio_fifo_size(fifo) < output_frame_size) {
            /**
             * Decode one frame worth of audio samples, convert it to the
             * output sample format and put it into the FIFO buffer.
             */
            if (read_decode_convert_and_store(fifo, input_format_context,
                                              input_codec_context,
                                              output_codec_context,
                                              resample_context, &finished))
                goto cleanup;

            /**
             * If we are at the end of the input file, we continue
             * encoding the remaining audio samples to the output file.
             */
            if (finished)
                break;
        }

        /**
         * If we have enough samples for the encoder, we encode them.
         * At the end of the file, we pass the remaining samples to
         * the encoder.
         */
        while (av_audio_fifo_size(fifo) >= output_frame_size ||
               (finished && av_audio_fifo_size(fifo) > 0))
            /**
             * Take one frame worth of audio samples from the FIFO buffer,
             * encode it and write it to the output file.
             */
            if (load_encode_and_write(fifo, output_format_context, output_codec_context))
                goto cleanup;

        /**
         * If we are at the end of the input file and have encoded
         * all remaining samples, we can exit this loop and finish.
         */
        if (finished) {
            int data_written;
            /** Flush the encoder as it may have delayed frames. */
            do {
                if (encode_audio_frame(NULL, output_format_context, output_codec_context, &data_written))
                    goto cleanup;
            } while (data_written);
            break;
        }
    }

    /** Write the trailer of the output file container. */
    if (write_output_file_trailer(output_format_context))
        goto cleanup;
    ret = 0;

cleanup:
    if (fifo)
        av_audio_fifo_free(fifo);
    swr_free(&resample_context);
    if (output_codec_context)
        avcodec_close(output_codec_context);
    if (output_format_context) {
        avio_closep(&output_format_context->pb);
        avformat_free_context(output_format_context);
    }
    if (input_codec_context)
        avcodec_close(input_codec_context);
    if (input_format_context)
        avformat_close_input(&input_format_context);

    return ret;
}
Exemplo n.º 6
0
/**
 * 
 * @param orig
 * @param dest
 * @return 
 */
int Transcode::transcode(string orig, string dest, TID3Tags *tags)
{
    AVFormatContext *input_format_context = NULL, *output_format_context = NULL;
    AVCodecContext *input_codec_context = NULL, *output_codec_context = NULL;
    SwrContext *resample_context = NULL;
    AVAudioFifo *fifo = NULL;
    int ret = AVERROR_EXIT;
    this->pts = 0;

    if (orig.empty() || dest.empty()) {
        fprintf(stderr, "Files to process not especified\n");
        return ret;
    }

    /* Register all codecs and formats so that they can be used. */
    av_register_all();
    /* Open the input file for reading. */
    if (open_input_file(orig.c_str(), &input_format_context,
                        &input_codec_context))
        goto cleanup;
    /* Open the output file for writing. */
    if (open_output_file(dest.c_str(), input_codec_context,
                         &output_format_context, &output_codec_context))
        goto cleanup;
    
    /**Copy the metadata if exist*/
    copy_metadata(input_format_context, output_format_context, tags);
    
    
    /* Initialize the resampler to be able to convert audio sample formats. */
    if (init_resampler(input_codec_context, output_codec_context,
                       &resample_context))
        goto cleanup;
    /* Initialize the FIFO buffer to store audio samples to be encoded. */
    if (init_fifo(&fifo, output_codec_context))
        goto cleanup;
    /* Write the header of the output file container. */
    if (write_output_file_header(output_format_context))
        goto cleanup;

    /* Loop as long as we have input samples to read or output samples
     * to write; abort as soon as we have neither. */
    while (1) {
        /* Use the encoder's desired frame size for processing. */
        const int output_frame_size = output_codec_context->frame_size;
        int finished                = 0;

        /* Make sure that there is one frame worth of samples in the FIFO
         * buffer so that the encoder can do its work.
         * Since the decoder's and the encoder's frame size may differ, we
         * need to FIFO buffer to store as many frames worth of input samples
         * that they make up at least one frame worth of output samples. */
        while (av_audio_fifo_size(fifo) < output_frame_size) {
            /* Decode one frame worth of audio samples, convert it to the
             * output sample format and put it into the FIFO buffer. */
            if (read_decode_convert_and_store(fifo, input_format_context,
                                              input_codec_context,
                                              output_codec_context,
                                              resample_context, &finished))
                goto cleanup;

            /* If we are at the end of the input file, we continue
             * encoding the remaining audio samples to the output file. */
            if (finished)
                break;
        }

        /* If we have enough samples for the encoder, we encode them.
         * At the end of the file, we pass the remaining samples to
         * the encoder. */
        while (av_audio_fifo_size(fifo) >= output_frame_size ||
               (finished && av_audio_fifo_size(fifo) > 0))
            /* Take one frame worth of audio samples from the FIFO buffer,
             * encode it and write it to the output file. */
            if (load_encode_and_write(fifo, output_format_context,
                                      output_codec_context))
                goto cleanup;

        /* If we are at the end of the input file and have encoded
         * all remaining samples, we can exit this loop and finish. */
        if (finished) {
            int data_written;
            /* Flush the encoder as it may have delayed frames. */
            do {
                if (encode_audio_frame(NULL, output_format_context,
                                       output_codec_context, &data_written))
                    goto cleanup;
            } while (data_written);
            break;
        }
    }

    /* Write the trailer of the output file container. */
    if (write_output_file_trailer(output_format_context))
        goto cleanup;
    ret = 0;

cleanup:
    if (fifo)
        av_audio_fifo_free(fifo);
    swr_free(&resample_context);
    if (output_codec_context)
        avcodec_free_context(&output_codec_context);
    if (output_format_context) {
        avio_closep(&output_format_context->pb);
        avformat_free_context(output_format_context);
    }
    if (input_codec_context)
        avcodec_free_context(&input_codec_context);
    if (input_format_context)
        avformat_close_input(&input_format_context);

    return ret;
}
Exemplo n.º 7
0
/*
 * encode one audio frame and send it to the muxer
 * return 1 when encoding is finished, 0 otherwise
 */
static int process_audio_stream(AVFormatContext *oc, OutputStream *ost)
{
    AVFrame *frame;
    int got_output = 0;
    int ret;

    frame = get_audio_frame(ost);
    got_output |= !!frame;

    /* feed the data to lavr */
    if (frame) {
        ret = avresample_convert(ost->avr, NULL, 0, 0,
                                 frame->extended_data, frame->linesize[0],
                                 frame->nb_samples);
        if (ret < 0) {
            fprintf(stderr, "Error feeding audio data to the resampler\n");
            exit(1);
        }
    }

    while ((frame && avresample_available(ost->avr) >= ost->frame->nb_samples) ||
           (!frame && avresample_get_out_samples(ost->avr, 0))) {
        /* when we pass a frame to the encoder, it may keep a reference to it
         * internally;
         * make sure we do not overwrite it here
         */
        ret = av_frame_make_writable(ost->frame);
        if (ret < 0)
            exit(1);

        /* the difference between the two avresample calls here is that the
         * first one just reads the already converted data that is buffered in
         * the lavr output buffer, while the second one also flushes the
         * resampler */
        if (frame) {
            ret = avresample_read(ost->avr, ost->frame->extended_data,
                                  ost->frame->nb_samples);
        } else {
            ret = avresample_convert(ost->avr, ost->frame->extended_data,
                                     ost->frame->linesize[0], ost->frame->nb_samples,
                                     NULL, 0, 0);
        }

        if (ret < 0) {
            fprintf(stderr, "Error while resampling\n");
            exit(1);
        } else if (frame && ret != ost->frame->nb_samples) {
            fprintf(stderr, "Too few samples returned from lavr\n");
            exit(1);
        }

        ost->frame->nb_samples = ret;

        ost->frame->pts        = ost->next_pts;
        ost->next_pts         += ost->frame->nb_samples;

        got_output |= encode_audio_frame(oc, ost, ret ? ost->frame : NULL);
    }

    return !got_output;
}