ReplayPlayoutLavfSource::~ReplayPlayoutLavfSource( ) {
    av_free(lavc_frame);
    avcodec_close(video_codecctx);
    avformat_close_input(&format_ctx);
}
示例#2
0
bool StAVVideoMuxer::addFile(const StString& theFileToLoad) {
    StString aFileName, aDummy;
    StFileNode::getFolderAndFile(theFileToLoad, aDummy, aFileName);

    AVFormatContext* aFormatCtx = NULL;
#if(LIBAVFORMAT_VERSION_INT >= AV_VERSION_INT(53, 2, 0))
    int avErrCode = avformat_open_input(&aFormatCtx, theFileToLoad.toCString(), NULL, NULL);
#else
    int avErrCode = av_open_input_file (&aFormatCtx, theFileToLoad.toCString(), NULL, 0, NULL);
#endif
    if(avErrCode != 0) {
        signals.onError(StString("FFmpeg: Couldn't open video file '") + theFileToLoad
                      + "'\nError: " + stAV::getAVErrorDescription(avErrCode));
        if(aFormatCtx != NULL) {
        #if(LIBAVFORMAT_VERSION_INT >= AV_VERSION_INT(53, 17, 0))
            avformat_close_input(&aFormatCtx);
        #else
            av_close_input_file(aFormatCtx);
        #endif
        }
        return false;
    }

    // retrieve stream information
#if(LIBAVFORMAT_VERSION_INT >= AV_VERSION_INT(53, 6, 0))
    if(avformat_find_stream_info(aFormatCtx, NULL) < 0) {
#else
    if(av_find_stream_info(aFormatCtx) < 0) {
#endif
        signals.onError(StString("FFmpeg: Couldn't find stream information in '") + theFileToLoad + "'");
        if(aFormatCtx != NULL) {
        #if(LIBAVFORMAT_VERSION_INT >= AV_VERSION_INT(53, 17, 0))
            avformat_close_input(&aFormatCtx);
        #else
            av_close_input_file(aFormatCtx); // close video file at all
        #endif
        }
        return false;
    }

#ifdef ST_DEBUG
#if(LIBAVCODEC_VERSION_INT >= AV_VERSION_INT(52, 101, 0))
    av_dump_format(aFormatCtx, 0, theFileToLoad.toCString(), false);
#else
    dump_format   (aFormatCtx, 0, theFileToLoad.toCString(), false);
#endif
#endif

    myCtxListSrc.add(aFormatCtx);
    return true;
}

class StAVOutContext {

        public:

    AVFormatContext* Context;

    /**
     * Empty constructor.
     */
    StAVOutContext() : Context(NULL), myFormat(NULL) {}

    /**
     * Determine the format.
     */
    bool findFormat(const char* theShortName,
                    const char* theFilename,
                    const char* theMimeType = NULL) {
        myFormat = av_guess_format(theShortName, theFilename, theMimeType);
        return myFormat != NULL;
    }

    /**
     * Create context.
     */
    bool create(const StString& theFile) {
        if(myFormat == NULL) {
            return false;
        }

    #if !defined(ST_LIBAV_FORK)
        avformat_alloc_output_context2(&Context, myFormat, NULL, theFile.toCString());
    #else
        Context = avformat_alloc_context();
        if(Context == NULL) {
            return false;
        }

        Context->oformat = myFormat;
        if(Context->oformat->priv_data_size > 0) {
            Context->priv_data = av_mallocz(Context->oformat->priv_data_size);
            if(!Context->priv_data) {
                //goto nomem;
            }
            if(Context->oformat->priv_class) {
                *(const AVClass**)Context->priv_data = Context->oformat->priv_class;
                //av_opt_set_defaults(aCtxOut->priv_data);
            }
        } else {
            Context->priv_data = NULL;
        }

        const size_t aStrLen = stMin(theFile.Size + 1, size_t(1024));
        stMemCpy(Context->filename, theFile.toCString(), aStrLen);
        Context->filename[1023] = '\0';
    #endif
        return Context != NULL;
    }

    /**
     * Destructor.
     */
    ~StAVOutContext() {
        if(Context == NULL) {
            return;
        }

        if(!(Context->oformat->flags & AVFMT_NOFILE)) {
            avio_close(Context->pb);
        }
        avformat_free_context(Context);
    }

        private:

    AVOutputFormat* myFormat;

};

/**
 * Return string identifier for specified stereo format.
 */
const char* formatToMetadata(const StFormat theFormat) {
    switch(theFormat) {
        case StFormat_Mono:                 return "mono";
        case StFormat_SideBySide_RL:        return "right_left";
        case StFormat_SideBySide_LR:        return "left_right";
        case StFormat_TopBottom_RL:         return "bottom_top";
        case StFormat_TopBottom_LR:         return "top_bottom";
        case StFormat_Rows:                 return "row_interleaved_lr";
        case StFormat_Columns:              return "col_interleaved_lr";
        case StFormat_FrameSequence:        return "block_lr";
        case StFormat_AnaglyphRedCyan:      return "anaglyph_cyan_red";
        case StFormat_AnaglyphGreenMagenta: return "anaglyph_green_magenta";
        case StFormat_AUTO:
        case StFormat_SeparateFrames:
        case StFormat_AnaglyphYellowBlue:
        case StFormat_Tiled4x:
        case StFormat_NB:
            return NULL;
    }
    return NULL;
}

bool StAVVideoMuxer::addStream(AVFormatContext* theContext,
                               AVStream*        theStream) {
    AVCodecContext* aCodecCtxSrc = stAV::getCodecCtx(theStream);
#if(LIBAVFORMAT_VERSION_INT >= AV_VERSION_INT(57, 0, 0))
    AVStream* aStreamOut = avformat_new_stream(theContext, aCodecCtxSrc->codec);
#else
    AVStream* aStreamOut = avformat_new_stream(theContext, (AVCodec* )aCodecCtxSrc->codec);
#endif
    if(aStreamOut == NULL) {
        signals.onError(StString("Failed allocating output stream."));
        return false;
    }
    AVCodecContext* aCodecCtxNew = stAV::getCodecCtx(aStreamOut);
    if(avcodec_copy_context(aCodecCtxNew, aCodecCtxSrc) < 0) {
        signals.onError(StString("Failed to copy context from input to output stream codec context."));
        return false;
    }
    av_dict_copy(&aStreamOut->metadata, theStream->metadata, AV_DICT_DONT_OVERWRITE);
//#if(LIBAVFORMAT_VERSION_INT >= AV_VERSION_INT(54, 2, 100))
//    myIsAttachedPic = (theStream->disposition & AV_DISPOSITION_ATTACHED_PIC) != 0;
//#endif
    if(aCodecCtxSrc->codec_type == AVMEDIA_TYPE_VIDEO) {
        aStreamOut->sample_aspect_ratio   = theStream->sample_aspect_ratio;
        aCodecCtxNew->sample_aspect_ratio = aStreamOut->sample_aspect_ratio;
    }

    if(theContext->oformat->flags & AVFMT_GLOBALHEADER) {
        aCodecCtxNew->flags |= AV_CODEC_FLAG_GLOBAL_HEADER;
    }
    return true;
}
示例#3
0
static void ffmpeg_image_free(struct ffmpeg_image *info)
{
	avcodec_close(info->decoder_ctx);
	avformat_close_input(&info->fmt_ctx);
}
示例#4
0
static int isffmpeg(const char *filename)
{
	AVFormatContext *pFormatCtx = NULL;
	unsigned int i;
	int videoStream;
	AVCodec *pCodec;
	AVCodecContext *pCodecCtx;

	if (BLI_testextensie(filename, ".swf") ||
	    BLI_testextensie(filename, ".jpg") ||
	    BLI_testextensie(filename, ".png") ||
	    BLI_testextensie(filename, ".dds") ||
	    BLI_testextensie(filename, ".tga") ||
	    BLI_testextensie(filename, ".bmp") ||
	    BLI_testextensie(filename, ".tif") ||
	    BLI_testextensie(filename, ".exr") ||
	    BLI_testextensie(filename, ".cin") ||
	    BLI_testextensie(filename, ".wav"))
	{
		return 0;
	}

	if (avformat_open_input(&pFormatCtx, filename, NULL, NULL) != 0) {
		if (UTIL_DEBUG) fprintf(stderr, "isffmpeg: av_open_input_file failed\n");
		return 0;
	}

	if (avformat_find_stream_info(pFormatCtx, NULL) < 0) {
		if (UTIL_DEBUG) fprintf(stderr, "isffmpeg: avformat_find_stream_info failed\n");
		avformat_close_input(&pFormatCtx);
		return 0;
	}

	if (UTIL_DEBUG) av_dump_format(pFormatCtx, 0, filename, 0);


	/* Find the first video stream */
	videoStream = -1;
	for (i = 0; i < pFormatCtx->nb_streams; i++)
		if (pFormatCtx->streams[i] &&
		    pFormatCtx->streams[i]->codec &&
		    (pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO))
		{
			videoStream = i;
			break;
		}

	if (videoStream == -1) {
		avformat_close_input(&pFormatCtx);
		return 0;
	}

	pCodecCtx = pFormatCtx->streams[videoStream]->codec;

	/* Find the decoder for the video stream */
	pCodec = avcodec_find_decoder(pCodecCtx->codec_id);
	if (pCodec == NULL) {
		avformat_close_input(&pFormatCtx);
		return 0;
	}

	if (avcodec_open2(pCodecCtx, pCodec, NULL) < 0) {
		avformat_close_input(&pFormatCtx);
		return 0;
	}

	avcodec_close(pCodecCtx);
	avformat_close_input(&pFormatCtx);

	return 1;
}
示例#5
0
 void run() override
 {
     avformat_open_input(&m_formatCtx, m_url, m_inputFmt, &m_options);
     if (!wakeIfNotAborted() && m_formatCtx)
         avformat_close_input(&m_formatCtx);
 }
示例#6
0
int main( int argc, char *argv[] )
{
	AVFormatContext *pFormatCtx;
	int				i, audioStream;
	AVCodecContext	*pCodecCtx;
	AVCodec			*pCodec;
	AVPacket		*packet;
	uint8_t			*out_buffer;
	AVFrame			*pFrame;
	SDL_AudioSpec	wanted_spec;
	int ret;
	uint32_t len = 0;
	int got_picture;
	int index = 0;
	int64_t in_channel_layout;
	struct SwrContext* au_conert_ctx;

	FILE* pFile = NULL;
	char url[] = "F:/video/6s_kapian.flv";

	av_register_all();
	pFormatCtx = avformat_alloc_context();

	//Open
	if( avformat_open_input(&pFormatCtx, url, NULL, NULL) != 0 )
	{
		printf("Couldn't open input stream.\n");
		return -1;
	}

	//Find Stream information
	if( avformat_find_stream_info(pFormatCtx, NULL) < 0 )
	{
		printf("Couldn't find stream information.\n");
		return -1;
	}
	//Dump valid information into standard error
	av_dump_format(pFormatCtx, 0, url, false);

	//Find the first audio stream
	audioStream = -1;
	for( i = 0; i < pFormatCtx->nb_streams; ++i	)
		if( pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO )
		{
			audioStream = i;
			break;
		}

	if( audioStream == -1 )
	{
		printf("Didn't find a audio stream.\n");
		return -1;
	}

	//Get a pointer to the codec contex for the audio stream
	pCodecCtx = pFormatCtx->streams[audioStream]->codec;

	//Find the decoder for the audio stream
	pCodec = avcodec_find_decoder( pCodecCtx->codec_id );

	if( pCodec == NULL )
	{
		printf("Codec not found.\n");
		return -1;
	}

	//Open codec
	if( avcodec_open2(pCodecCtx, pCodec, NULL) < 0 )
	{
		printf("Could not open codec.\n");
		return -1;
	}

#if OUTPUT_PCM
	pFile=fopen("output.pcm", "wb");
#endif

	packet = (AVPacket *)av_malloc( sizeof(packet) );
	av_init_packet(packet);

	//Out Audio Param
	uint64_t out_channel_layout = AV_CH_LAYOUT_STEREO;
	//nb_samples:
	int out_nb_samples = pCodecCtx->frame_size;
	AVSampleFormat out_sample_fmt = AV_SAMPLE_FMT_S16;
	int out_sample_rate = 44100;
	int out_channels = av_get_channel_layout_nb_channels(out_channel_layout);

	//Out buffer size
	int out_buffer_size = av_samples_get_buffer_size( NULL, out_channels, out_nb_samples, out_sample_fmt, 1 );

	out_buffer = (uint8_t *)av_malloc( MAX_AUDIO_FRAME_SIZE );
	pFrame = av_frame_alloc();

	//SDL------------------
#if USE_SDL
	//Init
	if(SDL_Init( SDL_INIT_AUDIO | SDL_INIT_TIMER)) {  
		printf( "Could not initialize SDL - %s\n", SDL_GetError()); 
		return -1;
	}
	//SDL_AudioSpec
	wanted_spec.freq = out_sample_rate; 
	wanted_spec.format = AUDIO_S16SYS; 
	wanted_spec.channels = out_channels; 
	wanted_spec.silence = 0; 
	wanted_spec.samples = out_nb_samples; 
	wanted_spec.callback = fill_audio; 
	wanted_spec.userdata = pCodecCtx; 

	if (SDL_OpenAudio(&wanted_spec, NULL)<0){ 
		printf("can't open audio.\n"); 
		return -1; 
	} 
#endif

	in_channel_layout = av_get_default_channel_layout(pCodecCtx->channels);
	//swr

	au_conert_ctx = swr_alloc();
	au_conert_ctx = swr_alloc_set_opts( au_conert_ctx, out_channel_layout, out_sample_fmt, out_sample_rate,
		in_channel_layout, pCodecCtx->sample_fmt, pCodecCtx->sample_rate, 0, NULL);
	swr_init(au_conert_ctx);

	while( av_read_frame(pFormatCtx, packet) >= 0 ){
		if( packet->stream_index == audioStream ){
			ret = avcodec_decode_audio4(pCodecCtx, pFrame, &got_picture, packet);
			if( ret < 0 ){
				printf("Error in decoding audio frame.\n");
				return -1;
			}//end if
			if( got_picture > 0 ){
				swr_convert( au_conert_ctx, &out_buffer, MAX_AUDIO_FRAME_SIZE, (const uint8_t **)pFrame->data, pFrame->nb_samples );
#if 1
				printf("index:%5d\t pts:%lld\t packet size:%d\n",index,packet->pts,packet->size);
#endif
#if OUTPUT_PCM
				//Write PCM
				fwrite(out_buffer, 1, out_buffer_size, pFile);
#endif
				index++;
			}//end if
#if USE_SDL
			while(audio_len>0)//Wait until finish
				SDL_Delay(1); 

			//Set audio buffer (PCM data)
			audio_chunk = (Uint8 *) out_buffer; 
			//Audio buffer length
			audio_len =out_buffer_size;
			audio_pos = audio_chunk;

			//Play
			SDL_PauseAudio(0);
#endif
		}//end if
		av_free_packet(packet);
	}//end while

	swr_free(&au_conert_ctx);

#if USE_SDL
	SDL_CloseAudio();//Close SDL
	SDL_Quit();
#endif
	// Close file
#if OUTPUT_PCM
	fclose(pFile);
#endif
	av_free(out_buffer);
	// Close the codec
	avcodec_close(pCodecCtx);
	// Close the video file
	avformat_close_input(&pFormatCtx);

	return 0;
}
示例#7
0
int muxer_mp4(void* noUse)
{
    AVOutputFormat *ofmt = NULL;
    //Input AVFormatContext and Output AVFormatContext
    AVFormatContext *ifmt_ctx_v = NULL, *ifmt_ctx_a = NULL, *ofmt_ctx = NULL;
    AVPacket pkt;
    int ret, i;
    int videoindex_v = -1, videoindex_out = -1;
    int audioindex_a = -1, audioindex_out = -1;
    int frame_index = 0;
    int64_t cur_pts_v = 0, cur_pts_a = 0;

    //const char *in_filename_v = "cuc_ieschool.ts";//Input file URL
    const char *in_filename_v = "../testResource/bigbuckbunny_480x272.h264";
    //const char *in_filename_a = "cuc_ieschool.mp3";
    //const char *in_filename_a = "gowest.m4a";
    //const char *in_filename_a = "gowest.aac";
    const char *in_filename_a = "../testResource/WavinFlag.aac";

    const char *out_filename = "bigbuckbunny.mp4";//Output file URL
    av_register_all();
    //Input
    if ((ret = avformat_open_input(&ifmt_ctx_v, in_filename_v, 0, 0)) < 0) {
        printf("Could not open input file.");
        goto end;
    }
    if ((ret = avformat_find_stream_info(ifmt_ctx_v, 0)) < 0) {
        printf("Failed to retrieve input stream information");
        goto end;
    }

    if ((ret = avformat_open_input(&ifmt_ctx_a, in_filename_a, 0, 0)) < 0) {
        printf("Could not open input file.");
        goto end;
    }
    if ((ret = avformat_find_stream_info(ifmt_ctx_a, 0)) < 0) {
        printf("Failed to retrieve input stream information");
        goto end;
    }
    printf("===========Input Information==========\n");
    av_dump_format(ifmt_ctx_v, 0, in_filename_v, 0);
    av_dump_format(ifmt_ctx_a, 0, in_filename_a, 0);
    printf("======================================\n");
    //Output
    avformat_alloc_output_context2(&ofmt_ctx, NULL, NULL, out_filename);
    if (!ofmt_ctx) {
        printf("Could not create output context\n");
        ret = AVERROR_UNKNOWN;
        goto end;
    }
    ofmt = ofmt_ctx->oformat;

    unsigned char* outbuffer = NULL;
    outbuffer = (unsigned char*)av_malloc(32768);

    AVIOContext *avio_out = avio_alloc_context(outbuffer, 32768, 0, NULL, NULL, write_buffer, NULL);
    if (avio_out == NULL)
        goto end;
    ofmt_ctx->pb = avio_out;
    ofmt_ctx->flags = AVFMT_FLAG_CUSTOM_IO;

    for (i = 0; i < ifmt_ctx_v->nb_streams; i++) {
        //Create output AVStream according to input AVStream
        if (ifmt_ctx_v->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
            AVStream *in_stream = ifmt_ctx_v->streams[i];
            AVStream *out_stream = avformat_new_stream(ofmt_ctx, in_stream->codec->codec);
            videoindex_v = i;
            if (!out_stream) {
                printf("Failed allocating output stream\n");
                ret = AVERROR_UNKNOWN;
                goto end;
            }
            videoindex_out = out_stream->index;
            //Copy the settings of AVCodecContext
            if (avcodec_copy_context(out_stream->codec, in_stream->codec) < 0) {
                printf("Failed to copy context from input to output stream codec context\n");
                goto end;
            }
            out_stream->codec->codec_tag = 0;
            if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
                out_stream->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;
            break;
        }
    }

    for (i = 0; i < ifmt_ctx_a->nb_streams; i++) {
        //Create output AVStream according to input AVStream
        if (ifmt_ctx_a->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
            AVStream *in_stream = ifmt_ctx_a->streams[i];
            AVStream *out_stream = avformat_new_stream(ofmt_ctx, in_stream->codec->codec);
            audioindex_a = i;
            if (!out_stream) {
                printf("Failed allocating output stream\n");
                ret = AVERROR_UNKNOWN;
                goto end;
            }
            audioindex_out = out_stream->index;
            //Copy the settings of AVCodecContext
            if (avcodec_copy_context(out_stream->codec, in_stream->codec) < 0) {
                printf("Failed to copy context from input to output stream codec context\n");
                goto end;
            }
            out_stream->codec->codec_tag = 0;
            if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
                out_stream->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;

            break;
        }
    }

    printf("==========Output Information==========\n");
    av_dump_format(ofmt_ctx, 0, out_filename, 1);
    printf("======================================\n");
    //Open output file
    if (!(ofmt->flags & AVFMT_NOFILE)) {
        if (avio_open(&ofmt_ctx->pb, out_filename, AVIO_FLAG_WRITE) < 0) {
            printf("Could not open output file '%s'", out_filename);
            goto end;
        }
    }
    //Write file header
    if (avformat_write_header(ofmt_ctx, NULL) < 0) {
        printf("Error occurred when opening output file\n");
        goto end;
    }


    //FIX
#if USE_H264BSF
    AVBitStreamFilterContext* h264bsfc = av_bitstream_filter_init("h264_mp4toannexb");
#endif
#if USE_AACBSF
    AVBitStreamFilterContext* aacbsfc = av_bitstream_filter_init("aac_adtstoasc");
#endif

    while (1) {
        AVFormatContext *ifmt_ctx;
        int stream_index = 0;
        AVStream *in_stream, *out_stream;

        //Get an AVPacket
        if (av_compare_ts(cur_pts_v, ifmt_ctx_v->streams[videoindex_v]->time_base, cur_pts_a, ifmt_ctx_a->streams[audioindex_a]->time_base) <= 0) {
            ifmt_ctx = ifmt_ctx_v;
            stream_index = videoindex_out;

            if (av_read_frame(ifmt_ctx, &pkt) >= 0) {
                do {
                    in_stream = ifmt_ctx->streams[pkt.stream_index];
                    out_stream = ofmt_ctx->streams[stream_index];

                    if (pkt.stream_index == videoindex_v) {
                        //FIX£ºNo PTS (Example: Raw H.264)
                        //Simple Write PTS
                        if (pkt.pts == AV_NOPTS_VALUE) {
                            //Write PTS
                            AVRational time_base1 = in_stream->time_base;
                            //Duration between 2 frames (us)
                            int64_t calc_duration = (double)AV_TIME_BASE / av_q2d(in_stream->r_frame_rate);
                            //Parameters
                            pkt.pts = (double)(frame_index*calc_duration) / (double)(av_q2d(time_base1)*AV_TIME_BASE);
                            pkt.dts = pkt.pts;
                            pkt.duration = (double)calc_duration / (double)(av_q2d(time_base1)*AV_TIME_BASE);
                            frame_index++;
                        }

                        cur_pts_v = pkt.pts;
                        break;
                    }
                } while (av_read_frame(ifmt_ctx, &pkt) >= 0);
            }
            else {
                break;
            }
        }
        else {
            ifmt_ctx = ifmt_ctx_a;
            stream_index = audioindex_out;
            if (av_read_frame(ifmt_ctx, &pkt) >= 0) {
                do {
                    in_stream = ifmt_ctx->streams[pkt.stream_index];
                    out_stream = ofmt_ctx->streams[stream_index];

                    if (pkt.stream_index == audioindex_a) {

                        //FIX£ºNo PTS
                        //Simple Write PTS
                        if (pkt.pts == AV_NOPTS_VALUE) {
                            //Write PTS
                            AVRational time_base1 = in_stream->time_base;
                            //Duration between 2 frames (us)
                            int64_t calc_duration = (double)AV_TIME_BASE / av_q2d(in_stream->r_frame_rate);
                            //Parameters
                            pkt.pts = (double)(frame_index*calc_duration) / (double)(av_q2d(time_base1)*AV_TIME_BASE);
                            pkt.dts = pkt.pts;
                            pkt.duration = (double)calc_duration / (double)(av_q2d(time_base1)*AV_TIME_BASE);
                            frame_index++;
                        }
                        cur_pts_a = pkt.pts;

                        break;
                    }
                } while (av_read_frame(ifmt_ctx, &pkt) >= 0);
            }
            else {
                break;
            }

        }

        //FIX:Bitstream Filter
#if USE_H264BSF
        av_bitstream_filter_filter(h264bsfc, in_stream->codec, NULL, &pkt.data, &pkt.size, pkt.data, pkt.size, 0);
#endif
#if USE_AACBSF
        av_bitstream_filter_filter(aacbsfc, out_stream->codec, NULL, &pkt.data, &pkt.size, pkt.data, pkt.size, 0);
#endif


        //Convert PTS/DTS
        pkt.pts = av_rescale_q_rnd(pkt.pts, in_stream->time_base, out_stream->time_base, (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
        pkt.dts = av_rescale_q_rnd(pkt.dts, in_stream->time_base, out_stream->time_base, (AVRounding)(AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
        pkt.duration = av_rescale_q(pkt.duration, in_stream->time_base, out_stream->time_base);
        pkt.pos = -1;
        pkt.stream_index = stream_index;

        printf("Write 1 Packet. size:%5d\tpts:%lld\n", pkt.size, pkt.pts);
        //Write
        if (av_interleaved_write_frame(ofmt_ctx, &pkt) < 0) {
            printf("Error muxing packet\n");
            break;
        }
        av_free_packet(&pkt);

    }
    //Write file trailer
    av_write_trailer(ofmt_ctx);

#if USE_H264BSF
    av_bitstream_filter_close(h264bsfc);
#endif
#if USE_AACBSF
    av_bitstream_filter_close(aacbsfc);
#endif

end:
    avformat_close_input(&ifmt_ctx_v);
    avformat_close_input(&ifmt_ctx_a);
    /* close output */
    if (ofmt_ctx && !(ofmt->flags & AVFMT_NOFILE))
        avio_close(ofmt_ctx->pb);
    avformat_free_context(ofmt_ctx);
    if (ret < 0 && ret != AVERROR_EOF) {
        printf("Error occurred.\n");
        return -1;
    }
    return 0;
}
示例#8
0
int main(int argc, char *argv[])
{
    std::cout << "hello\n";

    av_register_all();

    AVFormatContext *pFormatCtx = nullptr;
    if (avformat_open_input(&pFormatCtx, argv[1], nullptr, nullptr) != 0) {
        std::cerr << "Failed to open file\n";
        return -1;
    }

    if (avformat_find_stream_info(pFormatCtx, nullptr) < 0) {
        std::cerr << "Failed to find stream info\n";
        return -1;
    }

    AVCodecContext *pCodecCtx = nullptr;
    AVCodecContext *pCodecCtxOriginal = nullptr;
    int videoStream = -1;
    for (int i = 0; i < pFormatCtx->nb_streams; i++) {
        if (pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
            videoStream = i;
            break;
        }
    }
    if (videoStream == -1) {
        std::cerr << "Failed to find a VIDEO stream\n";
        return -1;
    } else {
        std::cout << "Video stream = " << videoStream << "\n";
    }
    pCodecCtxOriginal = pFormatCtx->streams[videoStream]->codec;

    AVCodec *pCodec = nullptr;
    pCodec = avcodec_find_decoder(pCodecCtxOriginal->codec_id);
    if (pCodec == nullptr) {
        std::cerr << "Unsupported codec!\n";
        return -1;
    }

    // Copy Context
    pCodecCtx = avcodec_alloc_context3(pCodec);
    if (avcodec_copy_context(pCodecCtx, pCodecCtxOriginal) != 0) {
        std::cerr << "Failed to copy Codec Context\n";
        return -1;
    }

    // Open Codec
    if (avcodec_open2(pCodecCtx, pCodec, nullptr) < 0) {
        std::cerr << "Failed to open Codec\n";
        return -1;
    }

    AVFrame* pFrame;
    AVFrame* pFrameBGR;
    pFrame = av_frame_alloc();
    pFrameBGR = av_frame_alloc();
    if (pFrame == nullptr || pFrameBGR == nullptr) {
        std::cerr << "Failed to allocate frame\n";
        return -1;
    }

    int numFrameBytes;
    numFrameBytes = avpicture_get_size(AV_PIX_FMT_BGR24, pCodecCtx->width, pCodecCtx->height);
    avpicture_fill((AVPicture*)pFrameBGR,
                   (uint8_t*)av_malloc(numFrameBytes * sizeof(uint8_t)),
                   AV_PIX_FMT_BGR24,
                   pCodecCtx->width,
                   pCodecCtx->height);
    std::cout << "Picture width = " << pCodecCtx->width << " height = " << pCodecCtx->height << "\n";

    // Format Conversion
    struct SwsContext * pSwsCtx = nullptr;
    pSwsCtx = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt,
                             pCodecCtx->width, pCodecCtx->height, AV_PIX_FMT_BGR24,
                             SWS_BILINEAR,  nullptr, nullptr, nullptr);
    if (pSwsCtx == nullptr) {
        std::cerr << "Failed to get the conversion context\n";
    }

    // -----------------------------
    // Loop through the video file
    // -----------------------------

    int packet_index = 0;
    AVPacket packet;
    while (av_read_frame(pFormatCtx, &packet) >= 0) {
        if (packet.stream_index == videoStream) {
            packet_index++;

            // convert to AVFrame
            int got_picture;
            if (avcodec_decode_video2(pCodecCtx, pFrame, &got_picture, &packet) < 0) {
                std::cerr << "Failed to decode video AVPacket\n";
            }

            if (got_picture) {
                std::cout << "Successfully decoded a frame " << packet_index << "\n";

                // convert to RGB format
                sws_scale(pSwsCtx,
                          pFrame->data,
                          pFrame->linesize,
                          0,
                          pFrame->height,
                          pFrameBGR->data,
                          pFrameBGR->linesize);

                cv::Mat mat(pCodecCtx->height, pCodecCtx->width, CV_8UC3, pFrameBGR->data[0]);
                char filename[100];
                sprintf(filename, "frame%02d.jpg", packet_index);
                cv::imwrite(filename, mat);
            }
        }

        // Free the packet that was allocated by av_read_frame
        av_free_packet(&packet);

        if (packet_index == 12) {
            break;
        }
    }

    // Free allocated resources
    av_frame_free(&pFrame);
    av_frame_free(&pFrameBGR);

    // Close the codec
    avcodec_close(pCodecCtx);
    avcodec_close(pCodecCtxOriginal);

    // Close the video file
    avformat_close_input(&pFormatCtx);
    return 0;
}
示例#9
0
文件: logo.cpp 项目: crubia/wt
int Logo::Load(const char* fileName)
{
	AVFormatContext *fctx = NULL;
	AVCodecContext *ctx = NULL;
	AVCodec *codec = NULL;
	AVFrame *logoRGB = NULL;
	AVFrame* logo = NULL;
	SwsContext *sws = NULL;
	AVPacket packet;
	int res = 0;
	int gotLogo = 0;
	int numpixels = 0;
	int size = 0;

	//Create context from file
	if(avformat_open_input(&fctx, fileName, NULL, NULL)<0)
		return Error("Couldn't open the logo image file [%s]\n",fileName);

	//Check it's ok
	if(avformat_find_stream_info(fctx,NULL)<0)
	{
		//Set error
		res = Error("Couldn't find stream information for the logo image file...\n");
		//Free resources
		goto end;
	}

	//Get codec from file fromat
	if (!(ctx = fctx->streams[0]->codec))
	{
		//Set errror
		res = Error("Context codec not valid\n");
		//Free resources
		goto end;
	}

	//Get decoder for format
	if (!(codec = avcodec_find_decoder(ctx->codec_id)))
	{
		//Set errror
		res = Error("Couldn't find codec for the logo image file...\n");
		//Free resources
		goto end;
	}
	//Only one thread
	ctx->thread_count	= 1;
	
	//Open codec
	if (avcodec_open2(ctx, codec, NULL)<0)
	{
		//Set errror
		res = Error("Couldn't open codec for the logo image file...\n");
		//Free resources
		goto end;
	}

	//Read logo frame
	if (av_read_frame(fctx, &packet)<0)
	{
		//Set errror
		res = Error("Couldn't read frame from the image file...\n");
		//Free resources
		goto end;
	}

	//Alloc frame
	if (!(logoRGB = av_frame_alloc()))
	{
		//Set errror
		res = Error("Couldn't alloc frame\n");
		//Free resources
		goto end;
	}

	//Use only one thread to avoid decoding on background and logo not displayed
	ctx->thread_count = 1;

	//Decode logo
	if (avcodec_decode_video2(ctx, logoRGB, &gotLogo, &packet)<0)
	{
		//Set errror
		res = Error("Couldn't decode logo\n");
		//Free resources
		av_free_packet(&packet);
		goto end;
	}

	av_free_packet(&packet);

	//If it we don't have a logo
	if (!gotLogo)
	{
		//Set errror
		res = Error("No logo on file\n");
		//Free resources
		goto end;
	}

	//Allocate new one
	if (!(logo = av_frame_alloc()))
	{
		//Set errror
		res = Error("Couldn't alloc frame\n");
		//Free resources
		goto end;
	}

	//Get frame sizes
	width = ctx->width;
	height = ctx->height;

	// Create YUV rescaller cotext
	if (!(sws = sws_alloc_context()))
	{
		//Set errror
		res = Error("Couldn't alloc sws context\n");
		// Exit
		goto end;
	}

	// Set property's of YUV rescaller context
	av_opt_set_defaults(sws);
	av_opt_set_int(sws, "srcw",       width			,AV_OPT_SEARCH_CHILDREN);
	av_opt_set_int(sws, "srch",       height		,AV_OPT_SEARCH_CHILDREN);
	av_opt_set_int(sws, "src_format", ctx->pix_fmt		,AV_OPT_SEARCH_CHILDREN);
	av_opt_set_int(sws, "dstw",       width			,AV_OPT_SEARCH_CHILDREN);
	av_opt_set_int(sws, "dsth",       height		,AV_OPT_SEARCH_CHILDREN);
	av_opt_set_int(sws, "dst_format", AV_PIX_FMT_YUV420P	,AV_OPT_SEARCH_CHILDREN);
	av_opt_set_int(sws, "sws_flags",  SWS_FAST_BILINEAR	,AV_OPT_SEARCH_CHILDREN);
	
	// Init YUV rescaller context
	if (sws_init_context(sws, NULL, NULL) < 0)
	{
		//Set errror
		res = Error("Couldn't init sws context\n");
		// Exit
		goto end;
	}

	//Check if we already had one
	if (frame)
		//Free memory
		free(frame);
	//Check if we already had one
	if (frameRGBA)
		//Free memory
		free(frameRGBA);

	//Get size with padding
	size = (((width/32+1)*32)*((height/32+1)*32)*3)/2+FF_INPUT_BUFFER_PADDING_SIZE+32;

	//And numer of pixels
	numpixels = width*height;

	//Allocate frame
	frame = (BYTE*)malloc32(size); /* size for YUV 420 */
	frameRGBA = (BYTE*)malloc32(numpixels*4);

	//Alloc data
	logo->data[0] = frame;
	logo->data[1] = logo->data[0] + numpixels;
	logo->data[2] = logo->data[1] + numpixels / 4;

	//Set size for planes
	logo->linesize[0] = width;
	logo->linesize[1] = width/2;
	logo->linesize[2] = width/2;

	//Convert
	sws_scale(sws, logoRGB->data, logoRGB->linesize, 0, height, logo->data, logo->linesize);

	//Copy logo from rgbA to rgb
	for (int j=0;j<height;j++)
		for (int i=0;i<width;i++)
			//Copy line by line
			memcpy(frameRGBA+(width*j+i)*4,logoRGB->data[0]+logoRGB->linesize[0]*j+i*3,3);
	
	//Everything was ok
	res = 1;

end:
	if (logo)
		av_free(logo);

	if (logoRGB)
		av_free(logoRGB);

	if (ctx)
		avcodec_close(ctx);

	if (sws)
		sws_freeContext(sws);

	if (fctx)
		avformat_close_input(&fctx);

	//Exit
	return res;	
}
示例#10
0
bool CFFmpegImage::LoadImageFromMemory(unsigned char* buffer, unsigned int bufSize,
                                      unsigned int width, unsigned int height)
{
  
  uint8_t* fbuffer = (uint8_t*)av_malloc(FFMPEG_FILE_BUFFER_SIZE);
  MemBuffer buf;
  buf.data = buffer;
  buf.size = bufSize;
  buf.pos = 0;

  AVIOContext* ioctx = avio_alloc_context(fbuffer, FFMPEG_FILE_BUFFER_SIZE, 0, &buf,
                                          mem_file_read, NULL, mem_file_seek);

  if (!ioctx)
  {
    av_free(fbuffer);
    CLog::LogFunction(LOGERROR, __FUNCTION__, "Could not allocate AVIOContext");
    return false;
  }

  AVFormatContext* fctx = avformat_alloc_context();
  if (!fctx)
  {
    av_free(ioctx->buffer);
    av_free(ioctx);
    CLog::LogFunction(LOGERROR, __FUNCTION__, "Could not allocate AVFormatContext");
    return false;
  }

  fctx->pb = ioctx;
  ioctx->max_packet_size = FFMPEG_FILE_BUFFER_SIZE;

  if (avformat_open_input(&fctx, "", NULL, NULL) < 0)
  {
    avformat_close_input(&fctx);
    FreeIOCtx(ioctx);
    return false;
  }

  AVCodecContext* codec_ctx = fctx->streams[0]->codec;
  AVCodec* codec = avcodec_find_decoder(codec_ctx->codec_id);
  if (avcodec_open2(codec_ctx, codec, NULL) < 0)
  {
    avformat_close_input(&fctx);
    FreeIOCtx(ioctx);
    return false;
  }

  AVPacket pkt;
  AVFrame* frame = av_frame_alloc();
  av_read_frame(fctx, &pkt);
  int frame_decoded;
  int ret = avcodec_decode_video2(codec_ctx, frame, &frame_decoded, &pkt);
  if (ret < 0)
    CLog::Log(LOGDEBUG, "Error [%d] while decoding frame: %s\n", ret, strerror(AVERROR(ret)));

  if (frame_decoded != 0)
  {
    av_frame_free(&m_pFrame);
    m_pFrame = av_frame_clone(frame);

    if (m_pFrame)
    {
      m_height = m_pFrame->height;
      m_width = m_pFrame->width;
    }    
    else
    {
      CLog::LogFunction(LOGERROR, __FUNCTION__, "Could not allocate a picture data buffer");
      frame_decoded = 0;
    }
  }
  else
    CLog::LogFunction(LOGERROR, __FUNCTION__, "Could not decode a frame");

  av_frame_free(&frame);
  av_free_packet(&pkt);
  avcodec_close(codec_ctx);
  avformat_close_input(&fctx);
  FreeIOCtx(ioctx);

  return (frame_decoded != 0);
}
示例#11
0
int main(int argc, char **argv)
{
    int ret;
    AVPacket packet0, packet;
    AVFrame *frame = av_frame_alloc();
    AVFrame *filt_frame = av_frame_alloc();
    int got_frame;

    if (!frame || !filt_frame) {
        perror("Could not allocate frame");
        exit(1);
    }
    if (argc != 2) {
        fprintf(stderr, "Usage: %s file | %s\n", argv[0], player);
        exit(1);
    }

    av_register_all();
    avfilter_register_all();

    if ((ret = open_input_file(argv[1])) < 0)
        goto end;
    if ((ret = init_filters(filter_descr)) < 0)
        goto end;

    /* read all packets */
    packet0.data = NULL;
    packet.data = NULL;
    while (1) {
        if (!packet0.data) {
            if ((ret = av_read_frame(fmt_ctx, &packet)) < 0)
                break;
            packet0 = packet;
        }

        if (packet.stream_index == audio_stream_index) {
            got_frame = 0;
            ret = avcodec_decode_audio4(dec_ctx, frame, &got_frame, &packet);
            if (ret < 0) {
                av_log(NULL, AV_LOG_ERROR, "Error decoding audio\n");
                continue;
            }
            packet.size -= ret;
            packet.data += ret;

            if (got_frame) {
                /* push the audio data from decoded frame into the filtergraph */
                if (av_buffersrc_add_frame_flags(buffersrc_ctx, frame, 0) < 0) {
                    av_log(NULL, AV_LOG_ERROR, "Error while feeding the audio filtergraph\n");
                    break;
                }

                /* pull filtered audio from the filtergraph */
                while (1) {
                    ret = av_buffersink_get_frame(buffersink_ctx, filt_frame);
                    if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
                        break;
                    if (ret < 0)
                        goto end;
                    print_frame(filt_frame);
                    av_frame_unref(filt_frame);
                }
            }

            if (packet.size <= 0)
                av_free_packet(&packet0);
        } else {
            /* discard non-wanted packets */
            av_free_packet(&packet0);
        }
    }
end:
    avfilter_graph_free(&filter_graph);
    avcodec_close(dec_ctx);
    avformat_close_input(&fmt_ctx);
    av_frame_free(&frame);
    av_frame_free(&filt_frame);

    if (ret < 0 && ret != AVERROR_EOF) {
        fprintf(stderr, "Error occurred: %s\n", av_err2str(ret));
        exit(1);
    }

    exit(0);
}
int main(int argc, char *argv[]) {
  AVFormatContext *pFormatCtx = NULL;
  int             i, videoStream, audioStream;
  AVCodecContext  *pCodecCtx = NULL;
  AVCodec         *pCodec = NULL;
  AVFrame         *pFrame = NULL; 
  AVPacket        packet;
  int             frameFinished;
  //float           aspect_ratio;
  
  AVCodecContext  *aCodecCtx = NULL;
  AVCodec         *aCodec = NULL;

  SDL_Overlay     *bmp = NULL;
  SDL_Surface     *screen = NULL;
  SDL_Rect        rect;
  SDL_Event       event;
  SDL_AudioSpec   wanted_spec, spec;

  struct SwsContext   *sws_ctx            = NULL;
  AVDictionary        *videoOptionsDict   = NULL;
  AVDictionary        *audioOptionsDict   = NULL;

  if(argc < 2) {
    fprintf(stderr, "Usage: test <file>\n");
    exit(1);
  }
  // Register all formats and codecs
  av_register_all();
  
  if(SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER)) {
    fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
    exit(1);
  }

  // Open video file
  if(avformat_open_input(&pFormatCtx, argv[1], NULL, NULL)!=0)
    return -1; // Couldn't open file
  
  // Retrieve stream information
  if(avformat_find_stream_info(pFormatCtx, NULL)<0)
    return -1; // Couldn't find stream information
  
  // Dump information about file onto standard error
  av_dump_format(pFormatCtx, 0, argv[1], 0);
  
  // Find the first video stream
  videoStream=-1;
  audioStream=-1;
  for(i=0; i<pFormatCtx->nb_streams; i++) {
    if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO &&
       videoStream < 0) {
      videoStream=i;
    }
    if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_AUDIO &&
       audioStream < 0) {
      audioStream=i;
    }
  }
  if(videoStream==-1)
    return -1; // Didn't find a video stream
  if(audioStream==-1)
    return -1;
   
  aCodecCtx=pFormatCtx->streams[audioStream]->codec;
  // Set audio settings from codec info
  wanted_spec.freq = aCodecCtx->sample_rate;
  wanted_spec.format = AUDIO_S16SYS;
  wanted_spec.channels = aCodecCtx->channels;
  wanted_spec.silence = 0;
  wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
  wanted_spec.callback = audio_callback;
  wanted_spec.userdata = aCodecCtx;
  
  if(SDL_OpenAudio(&wanted_spec, &spec) < 0) {
    fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
    return -1;
  }
  aCodec = avcodec_find_decoder(aCodecCtx->codec_id);
  if(!aCodec) {
    fprintf(stderr, "Unsupported codec!\n");
    return -1;
  }
  avcodec_open2(aCodecCtx, aCodec, &audioOptionsDict);

  // audio_st = pFormatCtx->streams[index]
  packet_queue_init(&audioq);
  SDL_PauseAudio(0);

  // Get a pointer to the codec context for the video stream
  pCodecCtx=pFormatCtx->streams[videoStream]->codec;
  
  // Find the decoder for the video stream
  pCodec=avcodec_find_decoder(pCodecCtx->codec_id);
  if(pCodec==NULL) {
    fprintf(stderr, "Unsupported codec!\n");
    return -1; // Codec not found
  }
  // Open codec
  if(avcodec_open2(pCodecCtx, pCodec, &videoOptionsDict)<0)
    return -1; // Could not open codec
  
  // Allocate video frame
  pFrame=avcodec_alloc_frame();

  // Make a screen to put our video

#ifndef __DARWIN__
        screen = SDL_SetVideoMode(pCodecCtx->width, pCodecCtx->height, 0, 0);
#else
        screen = SDL_SetVideoMode(pCodecCtx->width, pCodecCtx->height, 24, 0);
#endif
  if(!screen) {
    fprintf(stderr, "SDL: could not set video mode - exiting\n");
    exit(1);
  }
  
  // Allocate a place to put our YUV image on that screen
  bmp = SDL_CreateYUVOverlay(pCodecCtx->width,
				 pCodecCtx->height,
				 SDL_YV12_OVERLAY,
				 screen);
  sws_ctx =
    sws_getContext
    (
        pCodecCtx->width,
        pCodecCtx->height,
        pCodecCtx->pix_fmt,
        pCodecCtx->width,
        pCodecCtx->height,
        PIX_FMT_YUV420P,
        SWS_BILINEAR,
        NULL,
        NULL,
        NULL
    );


  // Read frames and save first five frames to disk
  i=0;
  while(av_read_frame(pFormatCtx, &packet)>=0) {
    // Is this a packet from the video stream?
    if(packet.stream_index==videoStream) {
      // Decode video frame
      avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, 
			   &packet);
      
      // Did we get a video frame?
      if(frameFinished) {
	SDL_LockYUVOverlay(bmp);

	AVPicture pict;
	pict.data[0] = bmp->pixels[0];
	pict.data[1] = bmp->pixels[2];
	pict.data[2] = bmp->pixels[1];

	pict.linesize[0] = bmp->pitches[0];
	pict.linesize[1] = bmp->pitches[2];
	pict.linesize[2] = bmp->pitches[1];

	// Convert the image into YUV format that SDL uses
    sws_scale
    (
        sws_ctx, 
        (uint8_t const * const *)pFrame->data, 
        pFrame->linesize, 
        0,
        pCodecCtx->height,
        pict.data,
        pict.linesize
    );
	
	SDL_UnlockYUVOverlay(bmp);
	
	rect.x = 0;
	rect.y = 0;
	rect.w = pCodecCtx->width;
	rect.h = pCodecCtx->height;
	SDL_DisplayYUVOverlay(bmp, &rect);
	av_free_packet(&packet);
      }
    } else if(packet.stream_index==audioStream) {
      packet_queue_put(&audioq, &packet);
    } else {
      av_free_packet(&packet);
    }
    // Free the packet that was allocated by av_read_frame
    SDL_PollEvent(&event);
    switch(event.type) {
    case SDL_QUIT:
      quit = 1;
      SDL_Quit();
      exit(0);
      break;
    default:
      break;
    }

  }

  // Free the YUV frame
  av_free(pFrame);
  
  // Close the codec
  avcodec_close(pCodecCtx);
  
  // Close the video file
  avformat_close_input(&pFormatCtx);
  
  return 0;
}
示例#13
0
JNIEXPORT jint JNICALL Java_com_frank_ffmpeg_VideoPlayer_filter
        (JNIEnv * env, jclass clazz, jstring filePath, jobject surface, jstring filterDescr){

    int ret;
    const char * file_name = (*env)->GetStringUTFChars(env, filePath, JNI_FALSE);
    const char *filter_descr = (*env)->GetStringUTFChars(env, filterDescr, JNI_FALSE);
    //打开输入文件
    if(!is_playing){
        LOGI("open_input...");
        if((ret = open_input(env, file_name, surface)) < 0){
            LOGE("Couldn't allocate video frame.");
            goto end;
        }
        //注册滤波器
        avfilter_register_all();
        filter_frame = av_frame_alloc();
        if(filter_frame == NULL) {
            LOGE("Couldn't allocate filter frame.");
            ret = -1;
            goto end;
        }
        //初始化音频解码器
        if ((ret = init_audio(env, clazz)) < 0){
            LOGE("Couldn't init_audio.");
            goto end;
        }

    }

    //初始化滤波器
    if ((ret = init_filters(filter_descr)) < 0){
        LOGE("init_filter error, ret=%d\n", ret);
        goto end;
    }

    is_playing = 1;
    int frameFinished;
    AVPacket packet;

    while(av_read_frame(pFormatCtx, &packet)>=0 && !release) {
        //切换滤波器,退出当初播放
        if(again){
            goto again;
        }
        //判断是否为视频流
        if(packet.stream_index == video_stream_index) {
            //对该帧进行解码
            avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, &packet);

            if (frameFinished) {
                //把解码后视频帧添加到filter_graph
                if (av_buffersrc_add_frame_flags(buffersrc_ctx, pFrame, AV_BUFFERSRC_FLAG_KEEP_REF) < 0) {
                    LOGE("Error while feeding the filter_graph\n");
                    break;
                }
                //把滤波后的视频帧从filter graph取出来
                ret = av_buffersink_get_frame(buffersink_ctx, filter_frame);
                if (ret >= 0){
                    // lock native window
                    ANativeWindow_lock(nativeWindow, &windowBuffer, 0);
                    // 格式转换
                    sws_scale(sws_ctx, (uint8_t const * const *)filter_frame->data,
                              filter_frame->linesize, 0, pCodecCtx->height,
                              pFrameRGBA->data, pFrameRGBA->linesize);
                    // 获取stride
                    uint8_t * dst = windowBuffer.bits;
                    int dstStride = windowBuffer.stride * 4;
                    uint8_t * src = pFrameRGBA->data[0];
                    int srcStride = pFrameRGBA->linesize[0];
                    // 由于window的stride和帧的stride不同,因此需要逐行复制
                    int h;
                    for (h = 0; h < pCodecCtx->height; h++) {
                        memcpy(dst + h * dstStride, src + h * srcStride, (size_t) srcStride);
                    }
                    ANativeWindow_unlockAndPost(nativeWindow);
                }
                av_frame_unref(filter_frame);
            }
            //延迟等待
            if (!playAudio){
                usleep((unsigned long) (1000 * 40));//1000 * 40
            }
        } else if(packet.stream_index == audio_stream_index){//音频帧
            if (playAudio){
                play_audio(env, &packet, pFrame);
            }
        }
        av_packet_unref(&packet);
    }
    end:
    is_playing = 0;
    //释放内存以及关闭文件
    av_free(buffer);
    av_free(pFrameRGBA);
    av_free(filter_frame);
    av_free(pFrame);
    avcodec_close(pCodecCtx);
    avformat_close_input(&pFormatCtx);
    avfilter_free(buffersrc_ctx);
    avfilter_free(buffersink_ctx);
    avfilter_graph_free(&filter_graph);
    avcodec_close(audioCodecCtx);
    free(buffer);
    free(sws_ctx);
    free(&windowBuffer);
    free(out_buffer);
    free(audio_swr_ctx);
    free(audio_track);
    free(audio_track_write_mid);
    ANativeWindow_release(nativeWindow);
    (*env)->ReleaseStringUTFChars(env, filePath, file_name);
    (*env)->ReleaseStringUTFChars(env, filterDescr, filter_descr);
    LOGE("do release...");
    again:
    again = 0;
    LOGE("play again...");
    return ret;
}
示例#14
0
int main(int argc, char **argv)
{
    AVOutputFormat *ofmt = NULL;
    AVFormatContext *ifmt_ctx = NULL, *ofmt_ctx = NULL;
	AVDictionary *out_opts = NULL;
    AVPacket pkt;
    const char *fmt, *in_filename, *out_filename;
    int ret, i, cmp;
	int cnt = 0;

    if (argc < 4) {
        printf("usage: remuxing input output fmt [do_sff do_rate_emu]\n");
        return 1;
    }
	if(argc >= 5){
		do_sff = atoi(argv[4]);
	}
	if(argc >= 6){
		do_rate_emu = atoi(argv[5]);
	}

    in_filename  = argv[1];
    out_filename = argv[2];
	fmt = argv[3];

    av_register_all();
    avformat_network_init();

    if ((ret = avformat_open_input(&ifmt_ctx, in_filename, 0, 0)) < 0) {
        fprintf(stderr, "Could not open input file '%s'", in_filename);
        goto end;
    }

    if ((ret = avformat_find_stream_info(ifmt_ctx, 0)) < 0) {
        fprintf(stderr, "Failed to retrieve input stream information");
        goto end;
    }

    av_dump_format(ifmt_ctx, 0, in_filename, 0);

    avformat_alloc_output_context2(&ofmt_ctx, NULL, fmt, out_filename);
    if (!ofmt_ctx) {
        fprintf(stderr, "Could not create output context\n");
        ret = AVERROR_UNKNOWN;
        goto end;
    }

    ofmt = ofmt_ctx->oformat;

    for (i = 0; i < ifmt_ctx->nb_streams; i++) {
        AVStream *in_stream = ifmt_ctx->streams[i];
        AVStream *out_stream = avformat_new_stream(ofmt_ctx, in_stream->codec->codec);
        if (!out_stream) {
            fprintf(stderr, "Failed allocating output stream\n");
            ret = AVERROR_UNKNOWN;
            goto end;
        }
		
		avpriv_set_pts_info(out_stream, in_stream->pts_wrap_bits, in_stream->time_base.num, in_stream->time_base.den);
        ret = avcodec_copy_context(out_stream->codec, in_stream->codec);
        if (ret < 0) {
            fprintf(stderr, "Failed to copy context from input to output stream codec context\n");
            goto end;
        }
        out_stream->codec->codec_tag = 0;
        if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
            out_stream->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;
    }

	if(out_filename && strstr(out_filename, ".m3u8")){
		av_opt_set_int(ofmt_ctx, "hls_wrap",  6, AV_OPT_SEARCH_CHILDREN);
		av_opt_set_int(ofmt_ctx, "hls_list_size",  6, AV_OPT_SEARCH_CHILDREN); 
		av_opt_set(ofmt_ctx, "hls_time", "1.0", AV_OPT_SEARCH_CHILDREN);
	}

    av_dump_format(ofmt_ctx, 0, out_filename, 1);

    if (!(ofmt->flags & AVFMT_NOFILE)) {
		av_dict_set(&out_opts, "chunked_post", "0", 0);	
        ret = avio_open2(&ofmt_ctx->pb, out_filename, AVIO_FLAG_WRITE|AVIO_FLAG_NONBLOCK, NULL, &out_opts);

        if (ret < 0) {
            fprintf(stderr, "Could not open output file '%s'", out_filename);
            goto end;
        }
    }

    ret = ofmt_ctx->pb && do_sff ? sff_write_header(ofmt_ctx) : avformat_write_header(ofmt_ctx, NULL);
    if (ret < 0) {
        fprintf(stderr, "Error occurred when opening output file\n");
        goto end;
    }
	if(ofmt_ctx->pb){
		avio_flush(ofmt_ctx->pb);
	}

    while (1) {
        AVStream *in_stream, *out_stream;
		ret = av_read_frame(ifmt_ctx, &pkt);
        if (ret < 0)
            break;

        in_stream  = ifmt_ctx->streams[pkt.stream_index];
        out_stream = ofmt_ctx->streams[pkt.stream_index];
		
		i = pkt.stream_index;
		if(curr_dts[i] == AV_NOPTS_VALUE && pkt.dts != AV_NOPTS_VALUE){
			first_dts[i] = pkt.dts;
			start_time[i] = av_gettime_relative();
		}
		if(pkt.dts != AV_NOPTS_VALUE){	
			curr_dts[i] = pkt.dts; //us
		}

        /* copy packet */
        pkt.pts = av_rescale_q_rnd(pkt.pts, in_stream->time_base, out_stream->time_base, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
        pkt.dts = av_rescale_q_rnd(pkt.dts, in_stream->time_base, out_stream->time_base, AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX);
        pkt.duration = av_rescale_q(pkt.duration, in_stream->time_base, out_stream->time_base);
        pkt.pos = -1;
		
		ret = ofmt_ctx->pb && do_sff ? sff_write_packet(ofmt_ctx, &pkt) : av_interleaved_write_frame(ofmt_ctx, &pkt); 
		if(ofmt_ctx->pb){
			avio_flush(ofmt_ctx->pb);
		}
        if (ret < 0) {
            fprintf(stderr, "Error muxing packet\n");
            break;
        }
        av_free_packet(&pkt);
		++cnt;
		//printf("cnt %d\t", cnt);
		
		do{
			curr_time[i] = av_gettime_relative();
			cmp = av_compare_ts(curr_dts[i] - first_dts[i], in_stream->time_base, 
					curr_time[i] - start_time[i], AV_TIME_BASE_Q); 
			if(!do_rate_emu || cmp <= 0)break;
			
			av_usleep(10000);
		}while(cmp > 0);
		
    }
	
	ofmt_ctx->pb && do_sff ? sff_write_packet(ofmt_ctx, NULL) : av_write_trailer(ofmt_ctx); 
end:

    avformat_close_input(&ifmt_ctx);

    /* close output */
    if (ofmt_ctx && !(ofmt->flags & AVFMT_NOFILE) && ofmt_ctx->pb){
        avio_close(ofmt_ctx->pb);
		av_dict_free(&out_opts);
	}
    avformat_free_context(ofmt_ctx);

    if (ret < 0 && ret != AVERROR_EOF) {
        fprintf(stderr, "Error occurred: %s\n", av_err2str(ret));
        return 1;
    }
	
	printf("end of remux\n");
    return 0;
}
示例#15
0
//返回-1解析失败,返回-2无封堵建议,其它有封堵建议
int content_analysis2(char* videofile, int mn)
{
	AVFormatContext *pFormatCtx=NULL;
	AVCodecContext *pCodecCtx=NULL;
	AVCodec *pCodec=NULL;
	AVFrame *pFrame=NULL;
	AVFrame *pFrameYUV=NULL;
	struct SwsContext *img_convert_ctx;
	AVPacket packet;
	uint8_t *buffer=NULL;
	uint8_t *base_buffer=NULL;
	int base_flag = 1;
	int frameFinished;
	int videoStream = 0;
	int matchFrameNum = 0;

	//
	//unsigned char* yuvdata = NULL;
	int numBytes;
	int total_frame = 0;
	int width,height,linesize;
	int video_id=0,hit_id=0;
	uint8_t* ptr = NULL;



	int keynum = 0;
	int skipframe = 0;
	int frameRate = 0;
	int framecount = 0;

	//av_log_set_flags(AV_LOG_SKIP_REPEATED);

	//av_log_set_level(AV_LOG_DEBUG);
	avcodec_register_all();
	av_register_all();
	avformat_network_init();

	pFormatCtx = avformat_alloc_context();
	//pFormatCtx->interrupt_callback.callback =  decode_interrupt_cb;
	if(avformat_open_input(&pFormatCtx,videofile,NULL,NULL) < 0){
		//if(av_open_input_file(&pFormatCtx, videofile, NULL, 0, NULL) < 0){
		fprintf(stderr, "Couldn't Open video file %s\n",videofile);
		return -1;
	}

	if(avformat_find_stream_info(pFormatCtx,NULL)<0){
	//	av_close_input_file(pFormatCtx);
		fprintf(stderr, "av_find_stream_info error\n");
		return -1; // Couldn't open file
	}

	//== find video stream
	int i;
	videoStream=-1;
	for(i=0;i<pFormatCtx->nb_streams;i++){
		if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO){
			videoStream=i;
			break;
		}
	}

	if(videoStream==-1){
	//	av_close_input_file(pFormatCtx);
		fprintf(stderr, "Didn't find a video stream and pFormatCtx->nb_streams is %d\n",pFormatCtx->nb_streams);
		return -1; // Didn't find a video stream
	}

	pCodecCtx=pFormatCtx->streams[videoStream]->codec;
	if(pCodecCtx==NULL){
	//	av_close_input_file(pFormatCtx);

		fprintf(stderr, "Codec not found\n");
		return -1; // Codec not found
	}
	printf("%d\n",pCodecCtx->codec_id);

	// 	if(pCodecCtx->codec_id == CODEC_ID_H263)
	// 	{
	// 		pCodecCtx->codec_id = CODEC_ID_H264;
	// 	}

	pCodec=avcodec_find_decoder(pCodecCtx->codec_id);
	if(pCodec==NULL){
		if(pCodecCtx!=NULL)
			avcodec_close(pCodecCtx);
	//	if(pFormatCtx!=NULL)
	//		av_close_input_file(pFormatCtx);
		return -1;
	}



	//  	if(pCodecCtx->codec_id != CODEC_ID_MPEG4 && pCodecCtx->codec_id!=CODEC_ID_MJPEG)
	//  	{
	// 		if(pCodec->capabilities&CODEC_CAP_TRUNCATED){
	// 			pCodecCtx->flags|=CODEC_FLAG_TRUNCATED;
	// 		}
	// 	}

	if(pCodecCtx->pix_fmt == PIX_FMT_NONE)
	{
		//avcodec_close(pCodecCtx);
		if(pCodecCtx!=NULL)
			avcodec_close(pCodecCtx);
	//	if(pFormatCtx!=NULL)
	//		av_close_input_file(pFormatCtx);
		return -1; // Could not open codec
	}

	//== open decoder
	if(avcodec_open2(pCodecCtx,pCodec, NULL)<0){
		if(pCodecCtx!=NULL)
			avcodec_close(pCodecCtx);
	//	if(pFormatCtx!=NULL)
	//		av_close_input_file(pFormatCtx);
		return -1; // Could not open codec
	}

	// 	int frame_width = (pCodecCtx->width/4)*4;
	// 	int frame_height = (pCodecCtx->height/4)*4;
	int frame_width = pCodecCtx->width;
	int frame_height = pCodecCtx->height;
	int frame_pix_fmt = pCodecCtx->pix_fmt;

	pFrame = avcodec_alloc_frame();
	if(pFrame== NULL)
	{
		avcodec_close(pCodecCtx);
	//	av_close_input_file(pFormatCtx);
		fprintf(stderr, "pFrame==NULL\n");
		return -1;
	}

	pFrameYUV=avcodec_alloc_frame();
	if(pFrameYUV==NULL){
		av_free(pFrame);
		avcodec_close(pCodecCtx);
		fprintf(stderr, "pFrameYUV==NULL\n");
		return -1;
	}

	numBytes=avpicture_get_size(PIX_FMT_YUV420P, pCodecCtx->width,pCodecCtx->height);

	buffer=(uint8_t*)calloc(numBytes,sizeof(uint8_t));
	base_buffer=(uint8_t*)calloc(numBytes,sizeof(uint8_t));
	if(buffer == NULL)
	{
		av_free(pFrame);
	//	av_close_input_file(pFormatCtx);
		return -1;
	}
	avpicture_fill((AVPicture *)pFrameYUV, buffer, PIX_FMT_YUV420P,pCodecCtx->width, pCodecCtx->height);
	img_convert_ctx = sws_getContext(pCodecCtx->width,pCodecCtx->height,pCodecCtx->pix_fmt,
		pCodecCtx->width,pCodecCtx->height,PIX_FMT_YUV420P,
		SWS_BICUBIC,NULL,NULL,NULL);


	if (mn==0) mn=0x7fffffff;
	int FlagIFrame = 0;
	while(av_read_frame(pFormatCtx,&packet)>=0)
	{
		//printf("I Frame = %d streamid = %d\n",packet.flags,packet.stream_index);
		if(packet.stream_index==videoStream/*&&packet.flags*/)
		{
			//avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished,packet.data, packet.size);
			// 			if(packet.pos > 1600000)
			// 				printf("%d\n",packet.pos);
			// 			printf("%d\n",packet.pos);
			// 			if(packet.flags != 1)
			// 				continue;
			int nRet = avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished,&packet);



			if(nRet < 0)
			{
				av_free_packet(&packet);
				continue;
			}
			// 			if(pFrame->pict_type != AV_PICTURE_TYPE_I)
			// 				continue;
			if(frameFinished)
			{

				if(frame_width   != pFrame->width  ||
					frame_height  != pFrame->height ||
					frame_pix_fmt != pFrame->format)
				{
					printf("error\n");
					av_free_packet(&packet);
					break;
				}


				int offset = 0;
				width = pCodecCtx->width;
				height = pCodecCtx->height;
				if(frame_height!=height||frame_width!=width)
				{
					av_free_packet(&packet);
					break;
				}
				sws_scale(img_convert_ctx,(const uint8_t* const*)pFrame->data,pFrame->linesize,0,pCodecCtx->height,pFrameYUV->data,pFrameYUV->linesize);
				// 				printf("%d*%d\n",frame_width,frame_height);
				// 				printf("%d*%d\n",pCodecCtx->width,pCodecCtx->height);
				int mi;
				for( mi=0;mi<3;mi++) {
					ptr = pFrameYUV->data[mi];
					linesize = pFrameYUV->linesize[mi];
					if (mi == 1) {
						width = (width+1)/2;
						height = (height+1)/2;
					}
					int j;
					for( j=0;j<height;j++) {
						memcpy(buffer+offset,ptr,width*sizeof(uint8_t));
						ptr += linesize;
						offset += width;
					}
				}

				if(base_flag == 1)
				{
					memcpy(base_buffer,buffer,numBytes);
					base_flag = 0;
					continue;
				}
				skipframe++;
				//roi_rect roiRect = trim_blacl_edge(buffer,pCodecCtx->width, pCodecCtx->height);
				int ret = calc_frame_diff(base_buffer,buffer,pCodecCtx->width, pCodecCtx->height);
				if(ret>10||skipframe>30)
				{
					base_flag = 1;
					total_frame++;
					printf("decode frame: %d %d\n",total_frame,ret);
					char outfilename[32];
					sprintf(outfilename,"bmp\\frame-%d-%d.bmp",total_frame,ret);

					calc_subtitle_diff(buffer,pCodecCtx->width, pCodecCtx->height);
					//SaveFrame(buffer, pCodecCtx->width, pCodecCtx->height, video_id, outfilename);
					saveYUV2BMP(buffer,pCodecCtx->width, pCodecCtx->height,outfilename);
					skipframe = 0;
				}

				//video_id = feature_match(buffer,pCodecCtx->width, pCodecCtx->height);
				//SaveFrame(buffer, pCodecCtx->width, pCodecCtx->height, video_id, videofile);
				if (video_id > 0) {
					printf("match frame: %d, %d, %d\n", hit_id, video_id, total_frame);
					//SaveFrame(buffer, pCodecCtx->width, pCodecCtx->height, video_id,videofile);
					//	hitID[i] = video_id;
					hit_id++;
				}

				//total_frame++;
				if (hit_id > mn) {
					av_free_packet(&packet);
					break;
				}


				if(total_frame>2000&&!FlagIFrame)
				{
					FlagIFrame = 1;
				}
			}
		}
		av_free_packet(&packet);

	}

	printf("decode frame:%d %d\n",hit_id,total_frame);



	if(pFrame!=NULL)
		av_free(pFrame);
	if(pFrameYUV!=NULL)
		av_free(pFrameYUV);
	if(buffer!=NULL)
		free(buffer);

	// 	if (pCodecCtx)
	// 		avcodec_close(pCodecCtx);
	// 	avformat_close_input(&fmt_ctx);
	//
	// 	if(pCodecCtx!=NULL)
	// 		avcodec_close(pCodecCtx);
	if(pFormatCtx!=NULL)
		avformat_close_input(&pFormatCtx);

	return hit_id;

}
int main(int argc, char* argv[])
{
	AVOutputFormat *ofmt = NULL;
	//Input AVFormatContext and Output AVFormatContext
	AVFormatContext *ifmt_ctx = NULL, *ofmt_ctx = NULL;
	AVPacket pkt;
	const char *in_filename, *out_filename;
	int ret, i;
	int videoindex=-1;
	int frame_index=0;
	int64_t start_time=0;
	//in_filename  = "cuc_ieschool.mov";
	//in_filename  = "cuc_ieschool.mkv";
	//in_filename  = "cuc_ieschool.ts";
	//in_filename  = "cuc_ieschool.mp4";
	//in_filename  = "cuc_ieschool.h264";
	in_filename  = "cuc_ieschool.flv";//输入URL(Input file URL)
	//in_filename  = "shanghai03_p.h264";
	
    out_filename = "rtmp://localhost/publishlive/livestream";//输出 URL(Output URL)[RTMP]
    out_filename = "http://192.168.0.53:10000";//输出 URL(Output URL)[RTMP]
	//out_filename = "rtp://233.233.233.233:6666";//输出 URL(Output URL)[UDP]

	av_register_all();
	//Network
	avformat_network_init();
	//Input
	if ((ret = avformat_open_input(&ifmt_ctx, in_filename, 0, 0)) < 0) {
		printf( "Could not open input file.");
		goto end;
	}
	if ((ret = avformat_find_stream_info(ifmt_ctx, 0)) < 0) {
		printf( "Failed to retrieve input stream information");
		goto end;
	}

	for(i=0; i<ifmt_ctx->nb_streams; i++) 
		if(ifmt_ctx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO){
			videoindex=i;
			break;
		}

	av_dump_format(ifmt_ctx, 0, in_filename, 0);

	//Output
	
	avformat_alloc_output_context2(&ofmt_ctx, NULL, "flv", out_filename); //RTMP
	//avformat_alloc_output_context2(&ofmt_ctx, NULL, "mpegts", out_filename);//UDP

	if (!ofmt_ctx) {
		printf( "Could not create output context\n");
		ret = AVERROR_UNKNOWN;
		goto end;
	}
	ofmt = ofmt_ctx->oformat;
	for (i = 0; i < ifmt_ctx->nb_streams; i++) {
		//Create output AVStream according to input AVStream
		AVStream *in_stream = ifmt_ctx->streams[i];
		AVStream *out_stream = avformat_new_stream(ofmt_ctx, in_stream->codec->codec);
		if (!out_stream) {
			printf( "Failed allocating output stream\n");
			ret = AVERROR_UNKNOWN;
			goto end;
		}
		//Copy the settings of AVCodecContext
		ret = avcodec_copy_context(out_stream->codec, in_stream->codec);
		if (ret < 0) {
			printf( "Failed to copy context from input to output stream codec context\n");
			goto end;
		}
		out_stream->codec->codec_tag = 0;
		if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
			out_stream->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;
	}
	//Dump Format------------------
	av_dump_format(ofmt_ctx, 0, out_filename, 1);
	//Open output URL
	if (!(ofmt->flags & AVFMT_NOFILE)) {
		ret = avio_open(&ofmt_ctx->pb, out_filename, AVIO_FLAG_WRITE);
		if (ret < 0) {
			printf( "Could not open output URL '%s'", out_filename);
			goto end;
		}
	}
	//Write file header
	ret = avformat_write_header(ofmt_ctx, NULL);
	if (ret < 0) {
		printf( "Error occurred when opening output URL\n");
		goto end;
	}

	start_time=av_gettime();
	while (1) {
		AVStream *in_stream, *out_stream;
		//Get an AVPacket
		ret = av_read_frame(ifmt_ctx, &pkt);
		if (ret < 0)
			break;
		//FIX:No PTS (Example: Raw H.264)
		//Simple Write PTS
		if(pkt.pts==AV_NOPTS_VALUE){
			//Write PTS
			AVRational time_base1=ifmt_ctx->streams[videoindex]->time_base;
			//Duration between 2 frames (us)
			int64_t calc_duration=(double)AV_TIME_BASE/av_q2d(ifmt_ctx->streams[videoindex]->r_frame_rate);
			//Parameters
			pkt.pts=(double)(frame_index*calc_duration)/(double)(av_q2d(time_base1)*AV_TIME_BASE);
			pkt.dts=pkt.pts;
			pkt.duration=(double)calc_duration/(double)(av_q2d(time_base1)*AV_TIME_BASE);
		}
		//Important:Delay
		if(pkt.stream_index==videoindex){
			AVRational time_base=ifmt_ctx->streams[videoindex]->time_base;
			AVRational time_base_q={1,AV_TIME_BASE};
			int64_t pts_time = av_rescale_q(pkt.dts, time_base, time_base_q);
			int64_t now_time = av_gettime() - start_time;
			if (pts_time > now_time)
				av_usleep(pts_time - now_time);

		}

		in_stream  = ifmt_ctx->streams[pkt.stream_index];
		out_stream = ofmt_ctx->streams[pkt.stream_index];
		/* copy packet */
		//Convert PTS/DTS
		pkt.pts = av_rescale_q_rnd(pkt.pts, in_stream->time_base, out_stream->time_base, (AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX));
		pkt.dts = av_rescale_q_rnd(pkt.dts, in_stream->time_base, out_stream->time_base, (AVRounding)(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX));
		pkt.duration = av_rescale_q(pkt.duration, in_stream->time_base, out_stream->time_base);
		pkt.pos = -1;
		//Print to Screen
		if(pkt.stream_index==videoindex){
			printf("Send %8d video frames to output URL\n",frame_index);
			frame_index++;
		}
		//ret = av_write_frame(ofmt_ctx, &pkt);
		ret = av_interleaved_write_frame(ofmt_ctx, &pkt);

		if (ret < 0) {
            char err[1024] = { 0 };
            int nRet = av_strerror(ret, err, 1024);
			printf( "Error muxing packet\n");
			break;
		}
		
		av_free_packet(&pkt);
		
	}
	//Write file trailer
	av_write_trailer(ofmt_ctx);
end:
	avformat_close_input(&ifmt_ctx);
	/* close output */
	if (ofmt_ctx && !(ofmt->flags & AVFMT_NOFILE))
		avio_close(ofmt_ctx->pb);
	avformat_free_context(ofmt_ctx);
	if (ret < 0 && ret != AVERROR_EOF) {
		printf( "Error occurred.\n");
		return -1;
	}
	return 0;
}
示例#17
0
文件: fwServer.cpp 项目: huangyt/FLOW
int main(int argc, char **argv)
{
    /*
     int i;
     char    b[40];
     char    c[21];
     HANDLE hConsole;
     int k;
     
     #ifdef _DEBUG
     _CrtSetDbgFlag ( _CRTDBG_ALLOC_MEM_DF | _CRTDBG_LEAK_CHECK_DF );
     #endif
     
     for(i = 0; i < sizeof(b); ++i )
     {
     b[i] = ' ';
     }
     b[255] = '\0';
     
     system("color 5");
     for(i = 0; i < 20; ++i) {
     c[i] = '>';
     c[i+1] = '\0';
     printf("Progress |%s%*s|\r",c,19-i,&"");
     Sleep(100);
     //printf("%s\r", b);
     }
     printf("\n");
     printf("sizeof(structa_t) = %d\n", sizeof(structa_t));
     printf("sizeof(structb_t) = %d\n", sizeof(structb_t));
     printf("sizeof(structc_t) = %d\n", sizeof(structc_t));
     printf("sizeof(structd_t) = %d\n", AV_TIME_BASE);
     
     hConsole = GetStdHandle(STD_OUTPUT_HANDLE);
     
     // you can loop k higher to see more color choices
     for(k = 1; k < 255; k++)
     {
     // pick the colorattribute k you want
     SetConsoleTextAttribute(hConsole, k);
     printf("%d I want to be nice today!",k);
     }*/
    
    AVFormatContext             *in_ctx = NULL, *out_ctx = NULL;
    AVInputFormat               *file_iformat = NULL;
    AVOutputFormat              *out_fmt = NULL;
    AVFrame                     *frame = NULL, *frameRGB = NULL;
    AVStream                    *st = NULL;
    AVCodecContext              *codec_ctx = NULL, *pCodecCtx = NULL;
    AVCodec                     *codec = NULL, *pCodec = NULL;
    AVCodec                     dummy_codec = {0};
    AVPacket                    pkt, p;
    AVBitStreamFilterContext    *bsf = NULL;
    struct SwsContext           *sws_ctx = NULL;
    BOOL                        tom = TRUE;
    char                        b[1024];
    int                         err, i, ret, frameFinished, numBytes;
    const char                  *src_filename = "final.mp4";
    int64_t                     timestamp;
    uint8_t                     buf[128];
    uint8_t                     *buffer = NULL;
    int                         video_stream_idx = -1;
    int                         audio_stream_idx = -1;
    FILE*                       sdp_file;
    
#ifdef _DEBUG
    _CrtSetDbgFlag ( _CRTDBG_ALLOC_MEM_DF | _CRTDBG_LEAK_CHECK_DF );
#endif
    
    /* register all formats and codecs */
    av_register_all();
    avformat_network_init();
    av_log_set_level(AV_LOG_DEBUG);

    /* open input file, and allocate format context */
    ret = avformat_open_input(&in_ctx, src_filename, NULL, NULL);
    if (ret < 0) {
        fprintf(stderr, "Could not open source file %s\n", src_filename);
        PAUSE_EXIT(1);
    }
    in_ctx->flags |= AVFMT_FLAG_GENPTS;

    ret = avformat_find_stream_info(in_ctx, NULL);
    if (ret < 0) {
        av_log(NULL, AV_LOG_FATAL, "%s: could not find codec parameters\n", src_filename);
        avformat_close_input(&in_ctx);
        PAUSE_EXIT(1);
    }
    
    av_dump_format(in_ctx, 0, src_filename, 0);
    
    for (i = 0; i < in_ctx->nb_streams; i++) {
        AVStream        *st_ptr;
        AVCodecContext  *coctx_ptr;
        
        if (in_ctx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
	  if (in_ctx->streams[i]->codec->codec_id == CODEC_ID_MPEG4) {
	      bsf = av_bitstream_filter_init("dump_extra");
	  } else if (in_ctx->streams[i]->codec->codec_id == CODEC_ID_H264) {
	      fprintf(stderr, "Found h264 Stream\n");
	      bsf = av_bitstream_filter_init("h264_mp4toannexb");
	  } else {
	      bsf = NULL;
	  }
	  pCodecCtx=in_ctx->streams[i]->codec;
	  pCodec=avcodec_find_decoder(pCodecCtx->codec_id);
	  if(pCodec==NULL) {
	      fprintf(stderr, "Unsupported codec!\n");
	      return -1; // Codec not found
	  }
	  // Open codec
	  if(avcodec_open2(pCodecCtx, pCodec, NULL)<0)
	      return -1; // Could not open codec
	  
	  out_ctx = avformat_alloc_context();
	  out_fmt = av_guess_format("rtp", NULL, NULL);
	  if (!out_fmt) {
	      fprintf(stderr, "Unable for find the RTP format for output\n");
	      avformat_close_input(&in_ctx);
	      PAUSE_EXIT(1);
	  }
	  out_ctx->oformat = out_fmt;
	  //out_ctx->flags |= AVFMT_FLAG_NONBLOCK;
	  
	  st = avformat_new_stream(out_ctx, 0);
	  if (!st) {
	      fprintf(stderr, "Cannot allocate stream\n");
	      avformat_close_input(&in_ctx);
	      PAUSE_EXIT(1);
	  }
	  
	  dummy_codec.type = in_ctx->streams[i]->codec->codec_type;
	  codec_ctx = st->codec;
	  avcodec_get_context_defaults3(codec_ctx, &dummy_codec);
	  avcodec_open2(codec_ctx, NULL, NULL);
	  codec_ctx->codec_type = in_ctx->streams[i]->codec->codec_type;
	  
	  /* FIXME: global headers stuff... */
	  
	  snprintf(out_ctx->filename, sizeof(out_ctx->filename), "rtp://%s:%d", "127.0.0.1", 55444);
	  
	  /* open the UDP sockets for RTP and RTCP */
	  if (!software_streaming) {
	      printf("Distant Connection\n");
	      ret = avio_open(&out_ctx->pb, out_ctx->filename, AVIO_FLAG_WRITE);
	      if (ret < 0) {
		fprintf(stderr, "Cannot open '%s'\n", out_ctx->filename);
		avformat_close_input(&in_ctx);
		PAUSE_EXIT(1);
	      }
	  } else {
	      ret = avio_open_dyn_buf(&out_ctx->pb);
	      out_ctx->pb->max_packet_size = 1460;
	      printf("MAX packet size = %d\n",out_ctx->pb->max_packet_size);
	  }
	  st_ptr = in_ctx->streams[i];
	  coctx_ptr = st_ptr->codec;
	  
	  codec_ctx->codec_id = coctx_ptr->codec_id;
	  codec_ctx->codec_type = coctx_ptr->codec_type;
	  
	  if(!codec_ctx->codec_tag) {
	      codec_ctx->codec_tag = coctx_ptr->codec_tag;
	  }
	  codec_ctx->bit_rate = coctx_ptr->bit_rate;
	  printf("\n\n\n\nFIRE!!!!! %d %d\n\n\n\n", codec_ctx->profile, codec_ctx->level);
	  if(coctx_ptr->extradata_size) {
	      codec_ctx->extradata = (uint8_t*)av_malloc(coctx_ptr->extradata_size);
	      memcpy(codec_ctx->extradata, coctx_ptr->extradata, coctx_ptr->extradata_size);
	  } else {
	      codec_ctx->extradata = NULL;
	  }
	  
	  codec_ctx->extradata_size = coctx_ptr->extradata_size;
	  /* FIXME: ExtraData ??? */
	  if (codec_ctx->codec_id == CODEC_ID_H264) {
	      printf("BINGO\n");
	      extradata_convert(codec_ctx);
	  }
	  
	  if(out_ctx->oformat->flags & AVFMT_GLOBALHEADER)
	      codec_ctx->flags |= CODEC_FLAG_GLOBAL_HEADER;
	  
	  if(av_q2d(coctx_ptr->time_base) > av_q2d(st_ptr->time_base) && av_q2d(st_ptr->time_base) < 1.0/1000) {
	      codec_ctx->time_base = coctx_ptr->time_base;
	  } else {
	      codec_ctx->time_base = st_ptr->time_base;
	  }
	  
	  switch(codec_ctx->codec_type) {
	      case AVMEDIA_TYPE_AUDIO:
		codec_ctx->sample_rate = coctx_ptr->sample_rate;
		codec_ctx->time_base.den = 1;
		codec_ctx->time_base.num = coctx_ptr->sample_rate;
		codec_ctx->channels = coctx_ptr->channels;
		codec_ctx->frame_size = coctx_ptr->frame_size;
		codec_ctx->block_align= coctx_ptr->block_align;
		
		break;
	      case AVMEDIA_TYPE_VIDEO:
		//printf("Pixel Format %d\n", coctx_ptr->pix_fmt);
		codec_ctx->pix_fmt = coctx_ptr->pix_fmt;
		codec_ctx->width = coctx_ptr->width;
		codec_ctx->height = coctx_ptr->height;
		codec_ctx->has_b_frames = coctx_ptr->has_b_frames;
		
		break;
	      default:
		fprintf(stderr, "Strange Codec Type %d\n", codec_ctx->codec_type);
		PAUSE_EXIT(1);
	  }
	  
	  
	  ret = avformat_write_header(out_ctx, NULL);
	  if (ret < 0) {
	      fprintf(stderr, "Cannot Initialize output stream %d\n", i);
	      //close_output(rtp_c->out_s[i]);
	      
	      continue;
	  }
	  av_dump_format(out_ctx, i, out_ctx->filename, 1);
        }
    }
    
    frame = avcodec_alloc_frame();
    frameRGB = avcodec_alloc_frame();
    
    // Determine required buffer size and allocate buffer
    numBytes=avpicture_get_size(PIX_FMT_RGB24, pCodecCtx->width,
			  pCodecCtx->height);
    buffer=(uint8_t *)av_malloc(numBytes*sizeof(uint8_t));
    printf("Allocated %d", numBytes);
    
    sws_ctx = sws_getContext (
     pCodecCtx->width,
     pCodecCtx->height,
     pCodecCtx->pix_fmt,
     pCodecCtx->width,
     pCodecCtx->height,
     PIX_FMT_RGB24,
     SWS_BILINEAR,
     NULL,
     NULL,
     NULL
     );
    
    // Assign appropriate parts of buffer to image planes in pFrameRGB
    // Note that pFrameRGB is an AVFrame, but AVFrame is a superset
    // of AVPicture
    avpicture_fill((AVPicture *)frameRGB, buffer, PIX_FMT_RGB24,
		 pCodecCtx->width, pCodecCtx->height);
    
    av_sdp_create(&out_ctx,1,b,1024);
    printf("SDP : \n%s", b);
    sdp_file = fopen("rtp.sdp","w");
    fprintf(sdp_file, "%s",b);
    fclose(sdp_file);

    i = 0;
    av_init_packet(&pkt);
    av_init_packet(&p);
    printf("Payload Size %d\n", *(uint8_t *)out_ctx->streams[0]->codec->extradata != 1);
    while (av_read_frame(in_ctx, &pkt) >= 0) {
        if (pkt.stream_index == 0) {
	  int res;
	  uint8_t *ptr;
	  uint16_t ptr16;
	  
	  if (avcodec_decode_video2(pCodecCtx, frame, &frameFinished, &pkt) < 0 ) {
	      fprintf(stderr, "Error decoding packet\n");
	  }
	   
	  /* if(frameFinished) {
	  // Convert the image from its native format to RGB
	  sws_scale
	  (
	  sws_ctx,
	  (uint8_t const * const *)frame->data,
	  frame->linesize,
	  0,
	  pCodecCtx->height,
	  frameRGB->data,
	  frameRGB->linesize
	  );
	   
	  // Save the frame to disk
	  if(++i<=5)
	      SaveFrame(frameRGB, pCodecCtx->width, pCodecCtx->height, i);
	  }*/
	  printf("PTS %lld DTS%lld\n",pkt.pts,pkt.dts);
	  printf("Got frame %s %d %s\n",STRING_BOOL(frameFinished), pkt.size, STRING_BOOL(pkt.flags & AV_PKT_FLAG_KEY));
	  //break;
	  /*ret = av_bitstream_filter_filter(bsf,
	   in_ctx->streams[pkt.stream_index]->codec,
	   NULL, &p.data, &p.size,
	   pkt.data, pkt.size, pkt.flags & AV_PKT_FLAG_KEY);
	   if(ret > 0) {
	   av_free_packet(&pkt);
	   p.destruct = av_destruct_packet;
	   } else if (ret < 0) {
	   fprintf(stderr, "%s failed for stream %d, codec %s: ",
	   bsf->filter->name,
	   pkt.stream_index,
	   in_ctx->streams[pkt.stream_index]->codec->codec->name);
	   fprintf(stderr, "%d\n", ret);
	   }
	   pkt = p;*/
	  
	  stream_convert(&pkt);
	  printf("pkt size %d %d\n",pkt.size, pkt.flags);
	  av_usleep(4000000);
	  
	  if (av_write_frame(out_ctx, &pkt) < 0)
	      printf("MITSOS eisai!!!!\n");
	  
	  int written_size = avio_close_dyn_buf(out_ctx->pb,&ptr);
	  printf("Written Size %d\n", written_size);
	  ((uint8_t*)&ptr16)[0] = *(ptr+2);
	  ((uint8_t*)&ptr16)[1] = *(ptr+3);
	  printf("CC adsasd%d\n", ptr16 );
	  printByte(ptr);
	  printByte(ptr+1);
	  //printf("Second Byte %d\n", *(ptr+1));
	  
	  parseStream(ptr, written_size);
	  
	  printf("Version %d\n",(*(ptr) & 0xC0) >> 6);
	  printf("Padding %d\n",(*(ptr) & 0x20) >  0);
	  printf("Ext %d\n",(*(ptr) & 0x10) >  0);
	  printf("CC %d\n",(*(ptr) & 0xF));
	  printf("Marker %d\n",(*(ptr+1) & 0x80) > 0);
	  printf("Type %u\n",(*(ptr+1)));
	  printf("Seq %d\n",(*((uint16_t*)((uint8_t*)ptr+2))));
	  ret = avio_open_dyn_buf(&out_ctx->pb);
	  out_ctx->pb->max_packet_size = 1514;
        }
        
        av_free_packet(&pkt);
    }
int main(int argc, char* argv[]) {
	printf("Read few frame and write to image\n");
	if(argc < 2) {
		printf("Missing input video file\n");
		return -1;
	}
	int ret = -1, i = 0, v_stream_idx = -1;
	char* vf_path = argv[1];
	AVFormatContext* fmt_ctx = NULL;
	AVCodecContext* codec_ctx = NULL;
	AVCodec* codec = NULL;
	AVPacket pkt;
	AVFrame* frm = NULL;

	av_register_all();
	ret = avformat_open_input(&fmt_ctx, vf_path, NULL, NULL);
	if(ret < 0){
		printf("Open video file %s failed \n", vf_path);
		goto end;
	}

	// i dont know but without this function, sws_getContext does not work
	if(avformat_find_stream_info(fmt_ctx, NULL)<0)
    	return -1;

    av_dump_format(fmt_ctx, 0, argv[1], 0);

	for(i = 0; i < fmt_ctx->nb_streams; i++) {
		if(fmt_ctx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
			v_stream_idx = i;
			break;
		}
	}
	if(v_stream_idx == -1) {
		printf("Cannot find video stream\n");
		goto end;
	}else{
		printf("Video stream %d with resolution %dx%d\n", v_stream_idx,
			fmt_ctx->streams[i]->codecpar->width,
			fmt_ctx->streams[i]->codecpar->height);
	}

	codec_ctx = avcodec_alloc_context3(NULL);
	avcodec_parameters_to_context(codec_ctx, fmt_ctx->streams[v_stream_idx]->codecpar);

	codec = avcodec_find_decoder(codec_ctx->codec_id);
	if(codec == NULL){
		printf("Unsupported codec for video file\n");
		goto end;
	}
	ret = avcodec_open2(codec_ctx, codec, NULL);
	if(ret < 0){
		printf("Can not open codec\n");
		goto end;
	}

	frm = av_frame_alloc();

  	struct SwsContext      *sws_ctx = NULL;
  	AVFrame         *pFrameRGB = NULL;
	int             numBytes;
	uint8_t         *buffer = NULL;

  // Allocate an AVFrame structure
  pFrameRGB=av_frame_alloc();
  if(pFrameRGB==NULL)
    return -1;
  
  // Determine required buffer size and allocate buffer
  numBytes=avpicture_get_size(AV_PIX_FMT_RGB24, codec_ctx->width,
			      codec_ctx->height);
  buffer=(uint8_t *)av_malloc(numBytes*sizeof(uint8_t));

  sws_ctx =
    sws_getContext
    (
        codec_ctx->width,
        codec_ctx->height,
        codec_ctx->pix_fmt,
        codec_ctx->width,
        codec_ctx->height,
        AV_PIX_FMT_RGB24,
        SWS_BILINEAR,
        NULL,
        NULL,
        NULL
    );

	if(sws_ctx == NULL) {
		printf("Can not use sws\n");
		goto end;
	}

	avpicture_fill((AVPicture *)pFrameRGB, buffer, AV_PIX_FMT_RGB24,
		 codec_ctx->width, codec_ctx->height);



	i=0;
	int ret1 = -1, ret2 = -1, fi = -1;
	while(av_read_frame(fmt_ctx, &pkt)>=0) {
		if(pkt.stream_index == v_stream_idx) {
			ret1 = avcodec_send_packet(codec_ctx, &pkt);
			ret2 = avcodec_receive_frame(codec_ctx, frm);
			printf("ret1 %d ret2 %d\n", ret1, ret2);
			// avcodec_decode_video2(codec_ctx, frm, &fi, &pkt);
		}
		// if not check ret2, error occur [swscaler @ 0x1cb3c40] bad src image pointers
		// ret2 same as fi
		// if(fi && ++i <= 5) {
		if(ret2>= 0 && ++i <= 5) {
	        sws_scale
		        (
		            sws_ctx,
		            (uint8_t const * const *)frm->data,
		            frm->linesize,
		            0,
		            codec_ctx->height,
		            pFrameRGB->data,
		            pFrameRGB->linesize
		        );

			save_frame(pFrameRGB, codec_ctx->width, codec_ctx->height, i);
			// save_frame(frm, codec_ctx->width, codec_ctx->height, i);
		}
		av_packet_unref(&pkt);
		if(i>=5){
			break;
		}
	}

	av_frame_free(&frm);

	avcodec_close(codec_ctx);
	avcodec_free_context(&codec_ctx);
	end:
	avformat_close_input(&fmt_ctx);
	printf("Shutdown\n");
	return 0;
}
示例#19
0
void * thread_routine(void *arg)
{
	struct mypara *recv_para = (struct mypara *)arg;;  //recv para data
	AVFormatContext	*pFormatCtx;
	int				i, videoindex;
	AVCodecContext	*pCodecCtx;
	AVCodec			*pCodec;
	AVFrame	*pFrame, *pFrameYUV;
	unsigned char *out_buffer;
	AVPacket *packet;
	int y_size;
	int ret, got_picture;
	struct SwsContext *img_convert_ctx;

	//char filepath[]="bigbuckbunny_480x272.h265";
	char filepath[] = "rtsp://192.168.131.4/0";
	//SDL---------------------------
	int screen_w = 0, screen_h = 0;
	SDL_Window *screen;
	SDL_Renderer* sdlRenderer;
	SDL_Texture* sdlTexture;
	SDL_Rect sdlRect, sdlRect_tmp;

	FILE *fp_yuv;

	//av_register_all();
	//avformat_network_init();
	pFormatCtx = avformat_alloc_context();

	if (avformat_open_input(&pFormatCtx, filepath, NULL, NULL) != 0){
		printf("Couldn't open input stream.\n");
		return -1;
	}
	if (avformat_find_stream_info(pFormatCtx, NULL) < 0){
		printf("Couldn't find stream information.\n");
		return -1;
	}
	videoindex = -1;
	for (i = 0; i < pFormatCtx->nb_streams; i++)
		if (pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO){
			videoindex = i;
			break;
		}
	if (videoindex == -1){
		printf("Didn't find a video stream.\n");
		return -1;
	}

	pCodecCtx = pFormatCtx->streams[videoindex]->codec;
	pCodec = avcodec_find_decoder(pCodecCtx->codec_id);
	if (pCodec == NULL){
		printf("Codec not found.\n");
		return -1;
	}
	//pthread_mutex_lock(&mutex);
	if (avcodec_open2(pCodecCtx, pCodec, NULL) < 0){
		printf("Could not open codec.\n");
		return -1;
	}
	//pthread_mutex_unlock(&mutex);

	pFrame = av_frame_alloc();
	pFrameYUV = av_frame_alloc();
	out_buffer = (unsigned char *)av_malloc(av_image_get_buffer_size(AV_PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height, 1));
	av_image_fill_arrays(pFrameYUV->data, pFrameYUV->linesize, out_buffer,
		AV_PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height, 1);

	packet = (AVPacket *)av_malloc(sizeof(AVPacket));
	//Output Info-----------------------------
	printf("--------------- File Information ----------------\n");
	av_dump_format(pFormatCtx, 0, filepath, 0);
	printf("-------------------------------------------------\n");
	img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt,
		pCodecCtx->width, pCodecCtx->height, AV_PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL);

#if OUTPUT_YUV420P 
	fp_yuv = fopen("output.yuv", "wb+");
#endif  

	//if(SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER)) {  
	//	printf( "Could not initialize SDL - %s\n", SDL_GetError()); 
	//	return -1;
	//} 

	screen_w = pCodecCtx->width;
	screen_h = pCodecCtx->height;
	//SDL 2.0 Support for multiple windows
	//screen = SDL_CreateWindow("Simplest ffmpeg player's Window", SDL_WINDOWPOS_UNDEFINED, SDL_WINDOWPOS_UNDEFINED,
	//	screen_w*2, screen_h,
	//	SDL_WINDOW_OPENGL);

	screen = (*recv_para).screen; //get the screen
	if (!screen) {
		printf("SDL: could not create window - exiting:%s\n", SDL_GetError());
		return -1;
	}

	//sdlRenderer = SDL_CreateRenderer(screen, -1, 0);  
	sdlRenderer = (*recv_para).sdlRenderer;//get the sdlRenderer
	//IYUV: Y + U + V  (3 planes)
	//YV12: Y + V + U  (3 planes)
	pthread_mutex_lock(&mutex);
	sdlTexture = SDL_CreateTexture(sdlRenderer, SDL_PIXELFORMAT_IYUV, SDL_TEXTUREACCESS_STREAMING, pCodecCtx->width, pCodecCtx->height);
	pthread_mutex_unlock(&mutex);


	//temp sdlRect for render copy
	sdlRect_tmp.x = 0;
	sdlRect_tmp.y = 0;
	sdlRect_tmp.w = screen_w;
	sdlRect_tmp.h = screen_h;

	//four rect in one line
	// total 4*4 = 16 rect
	sdlRect.x = 0 + screen_w / 2 * ((*recv_para).id % 4);
	sdlRect.y = 0 + screen_h / 2 * ((*recv_para).id / 4);
	sdlRect.w = screen_w / 2;
	sdlRect.h = screen_h / 2;


	//SDL End----------------------
	while (thread_exit && av_read_frame(pFormatCtx, packet) >= 0){
		if (packet->stream_index == videoindex){
			ret = avcodec_decode_video2(pCodecCtx, pFrame, &got_picture, packet);
			if (ret < 0){
				printf("Decode Error.\n");
				return -1;
			}
			if (got_picture){
				//printf("id:%d\n",(*recv_para).id); //打印线程id
				//printf("x_pos:%d   y_pos:%d\n",sdlRect.x,sdlRect.y); //print rect position
				sws_scale(img_convert_ctx, (const unsigned char* const*)pFrame->data, pFrame->linesize, 0, pCodecCtx->height,
					pFrameYUV->data, pFrameYUV->linesize);

#if OUTPUT_YUV420P
				y_size = pCodecCtx->width*pCodecCtx->height;
				fwrite(pFrameYUV->data[0], 1, y_size, fp_yuv);    //Y 
				fwrite(pFrameYUV->data[1], 1, y_size / 4, fp_yuv);  //U
				fwrite(pFrameYUV->data[2], 1, y_size / 4, fp_yuv);  //V
#endif
				//SDL---------------------------
#if 0
				SDL_UpdateTexture(sdlTexture, NULL, pFrameYUV->data[0], pFrameYUV->linesize[0]);
#else
				pthread_mutex_lock(&mutex);  //mutex or SEGFAULT
				SDL_UpdateYUVTexture(sdlTexture, &sdlRect_tmp,//sdl tmp
					pFrameYUV->data[0], pFrameYUV->linesize[0],
					pFrameYUV->data[1], pFrameYUV->linesize[1],
					pFrameYUV->data[2], pFrameYUV->linesize[2]);
#endif	
				//SDL_RenderClear( sdlRenderer );  
				SDL_RenderCopy(sdlRenderer, sdlTexture, NULL, &sdlRect);
				//SDL_RenderCopy( sdlRenderer, sdlTexture,  NULL, &sdlRect1);  
				SDL_RenderPresent(sdlRenderer);
				pthread_mutex_unlock(&mutex);
				//SDL End-----------------------
				//Delay 40ms
				//SDL_Delay(40);
			}
		}
		av_free_packet(packet);
	}
	//flush decoder
	//FIX: Flush Frames remained in Codec
	while (1) {
		ret = avcodec_decode_video2(pCodecCtx, pFrame, &got_picture, packet);
		if (ret < 0)
			break;
		if (!got_picture)
			break;
		sws_scale(img_convert_ctx, (const unsigned char* const*)pFrame->data, pFrame->linesize, 0, pCodecCtx->height,
			pFrameYUV->data, pFrameYUV->linesize);
#if OUTPUT_YUV420P
		int y_size = pCodecCtx->width*pCodecCtx->height;
		fwrite(pFrameYUV->data[0], 1, y_size, fp_yuv);    //Y 
		fwrite(pFrameYUV->data[1], 1, y_size / 4, fp_yuv);  //U
		fwrite(pFrameYUV->data[2], 1, y_size / 4, fp_yuv);  //V
#endif
		//SDL---------------------------

		SDL_UpdateTexture(sdlTexture, &sdlRect, pFrameYUV->data[0], pFrameYUV->linesize[0]);
		SDL_RenderClear(sdlRenderer);
		SDL_RenderCopy(sdlRenderer, sdlTexture, NULL, &sdlRect);
		SDL_RenderPresent(sdlRenderer);
		//SDL End-----------------------
		//Delay 40ms
		//SDL_Delay(40);
	}

	sws_freeContext(img_convert_ctx);

#if OUTPUT_YUV420P 
	fclose(fp_yuv);
#endif 

	SDL_RenderClear(sdlRenderer);
	SDL_Quit();

	av_frame_free(&pFrameYUV);
	av_frame_free(&pFrame);
	avcodec_close(pCodecCtx);
	avformat_close_input(&pFormatCtx);
}
示例#20
0
int main(int argc ,char **argv)
{
	av_register_all();
	AVFormatContext *pFormatCtx = NULL;
	AVInputFormat *file_iformat = NULL;
	
	//avio_set_interrupt_cb(decode_interrupt_cb);	
	//Open video file
	printf("open video file:%s\n", argv[1]);
	if(avformat_open_input(&pFormatCtx, argv[1], file_iformat, NULL) < 0)
	{
		printf("canot open input file: %s\n", argv[1]);
		return -1; //Cannot open file
	}
	printf("open input file: %s OK\n", argv[1]);
	//Retrieve stream information
	if(av_find_stream_info(pFormatCtx) < 0)
		return -1;//cannot find stream infomation
	//Dump information about file no to standard error
	av_dump_format(pFormatCtx, 0, argv[1], 0);

	int i;
	int videoStream;
	int audioStream;
	videoStream = -1;
	audioStream = -1;
	AVCodecContext *vCodecCtx;
	AVCodecContext *aCodecCtx;
	//Find the first video stream
	for(i = 0; i < pFormatCtx->nb_streams; i++)
	{
		if(pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO && videoStream < 0) 
		{
			videoStream = i;
		}
		if(pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO && audioStream < 0)
		{
			audioStream = i;	
		}
	}
	if(videoStream == -1)
	{
		printf("no video stream\n");
		return -1;//Did not find a video stream
	}
	if(audioStream == -1)
	{
		printf("no audio stream\n");
		return -1;//Did not find a audio stream
	}
	printf("find video strean: %d\n", videoStream);
	printf("find audio strean: %d\n", audioStream);

	//Get a pointer to the codec context for the video stream
	vCodecCtx = pFormatCtx->streams[videoStream]->codec;
	//set vCodecCtx for vdpau
	vCodecCtx->get_format = decoder_get_format;
	vCodecCtx->get_buffer = decoder_get_buffer;
	vCodecCtx->release_buffer = decoder_release_buffer;
	vCodecCtx->draw_horiz_band = decoder_draw_horiz_band;
	vCodecCtx->reget_buffer = decoder_get_buffer;
	vCodecCtx->slice_flags = SLICE_FLAG_CODEC_ORDER | SLICE_FLAG_ALLOW_FIELD;

	AVCodec *vCodec;
	vCodec = avcodec_find_decoder(vCodecCtx->codec_id);
	if(vCodec == NULL)
	{
		fprintf(stderr, "Unsupported video codec\n");
		return -1;//codec not find
	}
	//Open video codec
	if(avcodec_open(vCodecCtx, vCodec) < 0)
	{
		fprintf(stderr, "open video codec error\n");
		return -1;//Could not open codec
	}

	

	//Get a pointer to the codec context for the audio stream
	aCodecCtx = pFormatCtx->streams[audioStream]->codec;
	static SDL_AudioSpec wanted_spec, spec;
	wanted_spec.freq = aCodecCtx->sample_rate;
	wanted_spec.format = AUDIO_S16SYS;
	wanted_spec.channels = aCodecCtx->channels;
	wanted_spec.silence = 0;
	wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
	wanted_spec.callback = audio_callback;
	wanted_spec.userdata = aCodecCtx;
	if(SDL_OpenAudio(&wanted_spec, &spec) < 0)
	{	
		fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
		return -1;
	}
	AVCodec *aCodec;
	aCodec = avcodec_find_decoder(aCodecCtx->codec_id);
	if(aCodec == NULL)
	{
		fprintf(stderr, "Unsupport audio codec\n");
		return -1;//codec not found
	}
	if(avcodec_open(aCodecCtx, aCodec) < 0)
	{
		fprintf(stderr, "open avcodec error\n");
		return -1;
	}
	packet_queue_init(&audioq);
	SDL_PauseAudio(0);

	AVFrame *pFrame;
	//Allocate video frame
	pFrame = avcodec_alloc_frame();
	AVFrame *pFrameRGB;
	//Allocate an AVFrame structure
	pFrameRGB = avcodec_alloc_frame();
	if(pFrameRGB == NULL)
		return -1;
	uint8_t *buffer;
	int numBytes;
	//Detemine required buffer size and allocate buffer
	numBytes = avpicture_get_size(PIX_FMT_RGB24, vCodecCtx->width, vCodecCtx->height);
	buffer = (uint8_t*)av_malloc(numBytes * sizeof(uint8_t));
	//Assign appropriate parts of buffer to image planes in pFrameRGB
	//Note that pFrameRGB is an AVFrame, but AVFrame is a superset
	//of AVPicture
	avpicture_fill((AVPicture*)pFrameRGB, buffer, PIX_FMT_RGB24, vCodecCtx->width, vCodecCtx->height);
	
	if((SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER)))
	{
		fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
		exit(1);
	}
	SDL_Surface *screen;
	screen = SDL_SetVideoMode(vCodecCtx->width, vCodecCtx->height, 0, 0);
	if(!screen)
	{
		fprintf(stderr, "SDL: could not set video mode\n");
		exit(1);
	}
	SDL_Overlay *bmp;
	bmp = SDL_CreateYUVOverlay(vCodecCtx->width, vCodecCtx->height, SDL_YV12_OVERLAY, screen);

	int frameFinished;
	AVPacket packet;
	SDL_Rect rect;
	i = 0;
	while(av_read_frame(pFormatCtx, &packet) >=0)
	{
		//is this a packet from video stream?
		if(packet.stream_index == videoStream)
		{
			//Decoder video frame
			avcodec_decode_video2(vCodecCtx, pFrame, &frameFinished, &packet);
			//Did we got a video frame?
			if(frameFinished)
			{
				//Convert the image into OPENGL 
				AVPicture pict;
				static struct SwsContext *img_convert_ctx;
				img_convert_ctx = sws_getCachedContext(img_convert_ctx,
		                   vCodecCtx->width, vCodecCtx->height, vCodecCtx->pix_fmt,
			               vCodecCtx->width, vCodecCtx->height, PIX_FMT_RGB32, SWS_BICUBIC, NULL, NULL, NULL);
		        sws_scale(img_convert_ctx, pFrame->data, pFrame->linesize,
		                   0, pFrame->height, pict.data, pict.linesize);

				/*
				usleep(40 * 1000);
				SDL_LockYUVOverlay(bmp);
				AVPicture pict;
				pict.data[0] = bmp->pixels[0];
				pict.data[1] = bmp->pixels[2];
				pict.data[2] = bmp->pixels[1];
				pict.linesize[0] = bmp->pitches[0];
				pict.linesize[1] = bmp->pitches[2];
				pict.linesize[2] = bmp->pitches[1];
				//Convert the image into YUV format that SDL uses
				static struct SwsContext *img_convert_ctx;
				img_convert_ctx = sws_getCachedContext(img_convert_ctx,
		                   vCodecCtx->width, vCodecCtx->height, vCodecCtx->pix_fmt,
			               vCodecCtx->width, vCodecCtx->height, PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL);
		        sws_scale(img_convert_ctx, pFrame->data, pFrame->linesize,
		                   0, pFrame->height, pict.data, pict.linesize);
				SDL_UnlockYUVOverlay(bmp); 
				rect.x = 0;
				rect.y = 0;
				rect.w = vCodecCtx->width;
				rect.h = vCodecCtx->height;
				SDL_DisplayYUVOverlay(bmp, &rect);
				*/
			}
			//Free the packet that was allocated by av_read_frame
			av_free_packet(&packet);	
			/*
			SDL_Event event;
			SDL_PollEvent(&event);
			switch(event.type)
			{
				case SDL_QUIT:
					quit = 1;
					SDL_Quit();
					exit(0);
					break;
				defalut:
					break;
			}
			*/
		}
		else if(packet.stream_index == audioStream)
		{
			packet_queue_put(&audioq, &packet);
		}
		else
		{
			av_free_packet(&packet);
		}
	}
	//Free the RGB image
	av_free(buffer);
	av_free(pFrameRGB);
	//Free the YUV freame
	av_free(pFrame);
	//Close the codec
	avcodec_close(vCodecCtx);
	//Close the video file
	avformat_close_input(&pFormatCtx);
}
示例#21
0
bool CFFmpegImage::Initialize(unsigned char* buffer, unsigned int bufSize)
{
  uint8_t* fbuffer = (uint8_t*)av_malloc(FFMPEG_FILE_BUFFER_SIZE);
  if (!fbuffer)
  {
    CLog::LogFunction(LOGERROR, __FUNCTION__, "Could not allocate FFMPEG_FILE_BUFFER_SIZE");
    return false;
  }
  m_buf.data = buffer;
  m_buf.size = bufSize;
  m_buf.pos = 0;

  m_ioctx = avio_alloc_context(fbuffer, FFMPEG_FILE_BUFFER_SIZE, 0, &m_buf,
    mem_file_read, NULL, mem_file_seek);

  if (!m_ioctx)
  {
    av_free(fbuffer);
    CLog::LogFunction(LOGERROR, __FUNCTION__, "Could not allocate AVIOContext");
    return false;
  }

  m_fctx = avformat_alloc_context();
  if (!m_fctx)
  {
    FreeIOCtx(&m_ioctx);
    CLog::LogFunction(LOGERROR, __FUNCTION__, "Could not allocate AVFormatContext");
    return false;
  }

  m_fctx->pb = m_ioctx;
  m_ioctx->max_packet_size = FFMPEG_FILE_BUFFER_SIZE;

  // Some clients have pngs saved as jpeg or ask us for png but are jpeg
  // mythv throws all mimetypes away and asks us with application/octet-stream
  // this is poor man's fallback to at least identify png / jpeg
  bool is_jpeg = (bufSize > 2 && buffer[0] == 0xFF && buffer[1] == 0xD8 && buffer[2] == 0xFF);
  bool is_png = (bufSize > 3 && buffer[1] == 'P' && buffer[2] == 'N' && buffer[3] == 'G');
  bool is_tiff = (bufSize > 2 && buffer[0] == 'I' && buffer[1] == 'I' && buffer[2] == '*');

  AVInputFormat* inp = nullptr;
  if (is_jpeg)
    inp = av_find_input_format("jpeg_pipe");
  else if (m_strMimeType == "image/apng")
    inp = av_find_input_format("apng");
  else if (is_png)
    inp = av_find_input_format("png_pipe");
  else if (is_tiff)
    inp = av_find_input_format("tiff_pipe");
  else if (m_strMimeType == "image/jp2")
    inp = av_find_input_format("j2k_pipe");
  else if (m_strMimeType == "image/webp")
    inp = av_find_input_format("webp_pipe");
  // brute force parse if above check already failed
  else if (m_strMimeType == "image/jpeg" || m_strMimeType == "image/jpg")
    inp = av_find_input_format("jpeg_pipe");
  else if (m_strMimeType == "image/png")
    inp = av_find_input_format("png_pipe");
  else if (m_strMimeType == "image/tiff")
    inp = av_find_input_format("tiff_pipe");
  else if (m_strMimeType == "image/gif")
    inp = av_find_input_format("gif");

  if (avformat_open_input(&m_fctx, NULL, inp, NULL) < 0)
  {
    CLog::Log(LOGERROR, "Could not find suitable input format: %s", m_strMimeType.c_str());
    avformat_close_input(&m_fctx);
    FreeIOCtx(&m_ioctx);
    return false;
  }

  AVCodecContext* codec_ctx = m_fctx->streams[0]->codec;
  AVCodec* codec = avcodec_find_decoder(codec_ctx->codec_id);
  if (avcodec_open2(codec_ctx, codec, NULL) < 0)
  {
    avformat_close_input(&m_fctx);
    FreeIOCtx(&m_ioctx);
    return false;
  }

  return true;
}
示例#22
0
文件: ffdemuxer.c 项目: arkanis/smeb
int main(int argc, char** argv) {
	if (argc != 2)
		fprintf(stderr, "usage: %s webm-file\n", argv[0]), exit(1);
	
	char errmsg[512];
	// Switch stdin to non-blocking IO to test out a non-blocking av_read_frame()
	if ( fcntl(0, F_SETFL, fcntl(0, F_GETFL, NULL) | O_NONBLOCK) == -1 )
		perror("fcntl"), exit(1);
	
	av_register_all();
	
	AVInputFormat* webm_fmt = av_find_input_format("webm");
	AVFormatContext* demuxer = avformat_alloc_context();
	demuxer->flags |= AVFMT_FLAG_NONBLOCK;
	int error = avformat_open_input(&demuxer, argv[1], webm_fmt, NULL);
	//int error = avformat_open_input(&demuxer, "pipe:0", webm_fmt, NULL);
	if (error < 0)
		fprintf(stderr, "avformat_open_input(): %s\n", av_make_error_string(errmsg, sizeof(errmsg), error)), exit(1);
	
	printf("found %d streams:\n", demuxer->nb_streams);
	for(size_t i = 0; i < demuxer->nb_streams; i++) {
		AVStream* stream = demuxer->streams[i];
		printf("%d: time base %d/%d, codec: %s, extradata: %p, %d bytes\n",
			stream->index, stream->time_base.num, stream->time_base.den,
			stream->codec->codec_name, stream->codec->extradata, stream->codec->extradata_size);
		switch (stream->codec->codec_type) {
			case AVMEDIA_TYPE_VIDEO:
				printf("   video, w: %d, h: %d, sar: %d/%d, %dx%d\n",
					stream->codec->width, stream->codec->height, stream->sample_aspect_ratio.num, stream->sample_aspect_ratio.den,
					stream->codec->width * stream->sample_aspect_ratio.num / stream->sample_aspect_ratio.den, stream->codec->height);
				break;
			case AVMEDIA_TYPE_AUDIO:
				printf("   audio, %d channels, sampel rate: %d, bits per sample: %d\n",
					stream->codec->channels, stream->codec->sample_rate, stream->codec->bits_per_coded_sample);
				break;
			default:
				break;
		}
	}
	
	AVPacket packet;
	int ret =0;
	while (true) {
		ret = av_read_frame(demuxer, &packet);
		if (ret == AVERROR(EAGAIN)) {
			printf("sleep\n");
			struct timespec duration = {0, 250 * 1000000};
			nanosleep(&duration, NULL);
			continue;
		} else if (ret != 0) {
			break;
		}
		
		if (packet.flags & AV_PKT_FLAG_KEY && packet.stream_index == 0)
			printf("keyframe: stream %d, pts: %lu, dts: %lu, duration: %d, buf: %p\n", packet.stream_index, packet.pts, packet.dts, packet.duration, packet.buf);
		
		av_free_packet(&packet);
	}
	
	avformat_close_input(&demuxer);
	
	return 0;
}
int main(int argc, char *argv[]) {
	// Decoder local variable declaration
	AVFormatContext *pFormatCtx = NULL;
	int i, videoStream;
	AVCodecContext *pCodecCtx = NULL;
	AVCodec *pCodec;
	AVFrame *pFrame;
	AVPacket packet;
	int frameFinished;

	// Encoder local variable declaration
	const char *filename;
	AVOutputFormat *fmt;
	AVFormatContext *oc;
	AVStream *video_st;
	AVCodec *video_codec;
	int ret, frame_count;
	StreamInfo sInfo;

	// Register all formats, codecs and network
	av_register_all();
	avcodec_register_all();
	avformat_network_init();

	// Open video file
	if (avformat_open_input(&pFormatCtx, "input_file.wmv", NULL, NULL) != 0)
		return -1; // Couldn't open file

	// Retrieve stream information
	if (avformat_find_stream_info(pFormatCtx, NULL) < 0)
		return -1; // Couldn't find stream information

	// Dump information about file onto standard error
	av_dump_format(pFormatCtx, 0, "input_file.wmv", 0);

	// Find the first video stream
	videoStream = -1;
	for (i = 0; i < pFormatCtx->nb_streams; i++)
		if (pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
			videoStream = i;
			break;
		}
	if (videoStream == -1)
		return -1; // Didn't find a video stream

	// Get a pointer to the codec context for the video stream
	pCodecCtx = pFormatCtx->streams[videoStream]->codec;

	// Find the decoder for the video stream
	pCodec = avcodec_find_decoder(pCodecCtx->codec_id);
	if (pCodec == NULL) {
		fprintf(stderr, "Unsupported codec!\n");
		return -1; // Codec not found
	}
	// Open codec (decoder)
	if (avcodec_open2(pCodecCtx, pCodec, NULL) < 0)
		return -1; // Could not open codec

	// Allocate video frame
	pFrame = avcodec_alloc_frame();

	// Setup mux
	filename = "output_file.flv";
	
	// To stream to a media server (e.g. FMS)
	// filename = "rtmp://chineseforall.org/live/beta";
	
	fmt = av_guess_format("flv", filename, NULL);
	if (fmt == NULL) {
		printf("Could not guess format.\n");
		return -1;
	}
	// allocate the output media context
	oc = avformat_alloc_context();
	if (oc == NULL) {
		printf("could not allocate context.\n");
		return -1;
	}

	// Set output format context to the format ffmpeg guessed
	oc->oformat = fmt;

	// Add the video stream using the h.264
	// codec and initialize the codec.
	video_st = NULL;
	sInfo.width = pFormatCtx->streams[i]->codec->width;
	sInfo.height = pFormatCtx->streams[i]->codec->height;
	sInfo.pix_fmt = AV_PIX_FMT_YUV420P;
	sInfo.frame_rate = 30;
	sInfo.bitrate = 450*1000;
	video_st = add_stream(oc, &video_codec, AV_CODEC_ID_H264, &sInfo);

	// Now that all the parameters are set, we can open the audio and
	// video codecs and allocate the necessary encode buffers.
	if (video_st)
		open_video(oc, video_codec, video_st);

	/* open the output file, if needed */
	if (!(fmt->flags & AVFMT_NOFILE)) {
		ret = avio_open(&oc->pb, filename, AVIO_FLAG_WRITE);
		if (ret < 0) {
			fprintf(stderr, "Could not open '%s': %s\n", filename, av_err2str(ret));
			return 1;
		}
	}

	// dump output format
	av_dump_format(oc, 0, filename, 1);

	// Write the stream header, if any.
	ret = avformat_write_header(oc, NULL);
	if (ret < 0) {
		fprintf(stderr, "Error occurred when opening output file: %s\n", av_err2str(ret));
		return 1;
	}

	// Read frames, decode, and re-encode
	frame_count = 1;
	while (av_read_frame(pFormatCtx, &packet) >= 0) {
		// Is this a packet from the video stream?
		if (packet.stream_index == videoStream) {
			// Decode video frame
			avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, &packet);

			// Did we get a video frame?
			if (frameFinished) {

				// Initialize a new frame
				AVFrame* newFrame = avcodec_alloc_frame();

				int size = avpicture_get_size(video_st->codec->pix_fmt, video_st->codec->width, video_st->codec->height);
				uint8_t* picture_buf = av_malloc(size);

				avpicture_fill((AVPicture *) newFrame, picture_buf, video_st->codec->pix_fmt, video_st->codec->width, video_st->codec->height);

				// Copy only the frame content without additional fields
				av_picture_copy((AVPicture*) newFrame, (AVPicture*) pFrame, video_st->codec->pix_fmt, video_st->codec->width, video_st->codec->height);

				// encode the image
				AVPacket pkt;
				int got_output;
				av_init_packet(&pkt);
				pkt.data = NULL; // packet data will be allocated by the encoder
				pkt.size = 0;

				// Set the frame's pts (this prevents the warning notice 'non-strictly-monotonic PTS'
				newFrame->pts = frame_count;

				ret = avcodec_encode_video2(video_st->codec, &pkt, newFrame, &got_output);
				if (ret < 0) {
					fprintf(stderr, "Error encoding video frame: %s\n", av_err2str(ret));
					exit(1);
				}

				if (got_output) {
					if (video_st->codec->coded_frame->key_frame)
						pkt.flags |= AV_PKT_FLAG_KEY;
					pkt.stream_index = video_st->index;

					if (pkt.pts != AV_NOPTS_VALUE)
						pkt.pts = av_rescale_q(pkt.pts, video_st->codec->time_base, video_st->time_base);
					if (pkt.dts != AV_NOPTS_VALUE)
						pkt.dts = av_rescale_q(pkt.dts, video_st->codec->time_base, video_st->time_base);

					// Write the compressed frame to the media file.
					ret = av_interleaved_write_frame(oc, &pkt);
				} else {
					ret = 0;
				}
				if (ret != 0) {
					fprintf(stderr, "Error while writing video frame: %s\n", av_err2str(ret));
					exit(1);
				}

				fprintf(stderr, "encoded frame #%d\n", frame_count);
				frame_count++;

				// Free the YUV picture frame we copied from the
				// decoder to eliminate the additional fields
				// and other packets/frames used
				av_free(picture_buf);
				av_free_packet(&pkt);
				av_free(newFrame);
			}
		}

		// Free the packet that was allocated by av_read_frame
		av_free_packet(&packet);
	}

	/* Write the trailer, if any. The trailer must be written before you
	 * close the CodecContexts open when you wrote the header; otherwise
	 * av_write_trailer() may try to use memory that was freed on
	 * av_codec_close(). */
	av_write_trailer(oc);

	/* Close the video codec (encoder) */
	if (video_st)
		close_video(oc, video_st);
	// Free the output streams.
	for (i = 0; i < oc->nb_streams; i++) {
		av_freep(&oc->streams[i]->codec);
		av_freep(&oc->streams[i]);
	}
	if (!(fmt->flags & AVFMT_NOFILE))
		/* Close the output file. */
		avio_close(oc->pb);
	/* free the output format context */
	av_free(oc);

	// Free the YUV frame populated by the decoder
	av_free(pFrame);

	// Close the video codec (decoder)
	avcodec_close(pCodecCtx);

	// Close the input video file
	avformat_close_input(&pFormatCtx);

	return 0;
}
示例#24
0
int main (int argc, char **argv)
{
    int ret = 0, got_frame;

    if (argc != 4 && argc != 5) {
        fprintf(stderr, "usage: %s [-refcount] input_file video_output_file audio_output_file\n"
                "API example program to show how to read frames from an input file.\n"
                "This program reads frames from a file, decodes them, and writes decoded\n"
                "video frames to a rawvideo file named video_output_file, and decoded\n"
                "audio frames to a rawaudio file named audio_output_file.\n\n"
                "If the -refcount option is specified, the program use the\n"
                "reference counting frame system which allows keeping a copy of\n"
                "the data for longer than one decode call.\n"
                "\n", argv[0]);
        exit(1);
    }
    if (argc == 5 && !strcmp(argv[1], "-refcount")) {
        refcount = 1;
        argv++;
    }
    src_filename = argv[1];
    video_dst_filename = argv[2];
    audio_dst_filename = argv[3];

    /* register all formats and codecs */
    av_register_all();

    /* open input file, and allocate format context */
    if (avformat_open_input(&fmt_ctx, src_filename, NULL, NULL) < 0) {
        fprintf(stderr, "Could not open source file %s\n", src_filename);
        exit(1);
    }

    /* retrieve stream information */
    if (avformat_find_stream_info(fmt_ctx, NULL) < 0) {
        fprintf(stderr, "Could not find stream information\n");
        exit(1);
    }


    if (open_codec_context(&video_stream_idx, fmt_ctx, AVMEDIA_TYPE_VIDEO) >= 0) {
        video_stream = fmt_ctx->streams[video_stream_idx];
        video_dec_ctx = video_stream->codec;

        video_dst_file = fopen(video_dst_filename, "wb");
        if (!video_dst_file) {
            fprintf(stderr, "Could not open destination file %s\n", video_dst_filename);
            ret = 1;
            goto end;
        }

        /* allocate image where the decoded image will be put */
        width = video_dec_ctx->width;
        height = video_dec_ctx->height;
        pix_fmt = video_dec_ctx->pix_fmt;
	printf("width:%d height:%d pix_fmt:%d\n", width, height, pix_fmt);
        ret = av_image_alloc(video_dst_data, video_dst_linesize,
                             width, height, pix_fmt, 1);
        if (ret < 0) {
            fprintf(stderr, "Could not allocate raw video buffer\n");
            goto end;
        }
        video_dst_bufsize = ret;

     /* create scaling context */
        sws_ctx = sws_getContext(width, height, pix_fmt,
                            width*SCALE_MULTIPLE, height*SCALE_MULTIPLE, pix_fmt, SWS_BILINEAR, NULL, NULL, NULL);
        if (!sws_ctx) {
            fprintf(stderr, "Impossible to create scale context for the conversion "
              "fmt:%s s:%dx%d -> fmt:%s s:%dx%d\n",
                av_get_pix_fmt_name(pix_fmt), width,height,
                av_get_pix_fmt_name(pix_fmt), width,height);
            ret = AVERROR(EINVAL);
            goto end;
        }
        ret = av_image_alloc(scale_video_dst_data, scale_video_dst_linesize,
                             width*SCALE_MULTIPLE, height*SCALE_MULTIPLE, pix_fmt, 1);
        if (ret < 0) {
            fprintf(stderr, "Could not allocate raw video buffer\n");
            goto end;
        }
        scale_video_dst_bufsize = ret;
		AllocPic();
    }

    if (open_codec_context(&audio_stream_idx, fmt_ctx, AVMEDIA_TYPE_AUDIO) >= 0) {
        audio_stream = fmt_ctx->streams[audio_stream_idx];
        audio_dec_ctx = audio_stream->codec;
        audio_dst_file = fopen(audio_dst_filename, "wb");
        if (!audio_dst_file) {
            fprintf(stderr, "Could not open destination file %s\n", audio_dst_filename);
            ret = 1;
            goto end;
        }
    }

    /* dump input information to stderr */
    av_dump_format(fmt_ctx, 0, src_filename, 0);

    if (!audio_stream && !video_stream) {
        fprintf(stderr, "Could not find audio or video stream in the input, aborting\n");
        ret = 1;
        goto end;
    }

    frame = av_frame_alloc();
    if (!frame) {
        fprintf(stderr, "Could not allocate frame\n");
        ret = AVERROR(ENOMEM);
        goto end;
    }

    /*encode yuv to h264*/
    h264_frame = av_frame_alloc();
    if (!h264_frame) {
        fprintf(stderr, "Could not allocate h264_frame\n");
        ret = AVERROR(ENOMEM);
        goto end;
    }
    av_init_packet(&h264_pkt);
    h264_pkt.data = NULL;
    h264_pkt.size = 0;

    /* initialize packet, set data to NULL, let the demuxer fill it */
    av_init_packet(&pkt);
    pkt.data = NULL;
    pkt.size = 0;

    if (video_stream)
        printf("Demuxing video from file '%s' into '%s'\n", src_filename, video_dst_filename);
    if (audio_stream)
        printf("Demuxing audio from file '%s' into '%s'\n", src_filename, audio_dst_filename);

    /* read frames from the file */
    while (av_read_frame(fmt_ctx, &pkt) >= 0) {
        AVPacket orig_pkt = pkt;
        do {
            ret = decode_packet(&got_frame, 0);
            if (ret < 0)
                break;
            pkt.data += ret;
            pkt.size -= ret;
        } while (pkt.size > 0);
        av_packet_unref(&orig_pkt);
    }

    /* flush cached frames */
    pkt.data = NULL;
    pkt.size = 0;
    do {
        decode_packet(&got_frame, 1);
    } while (got_frame);

    printf("Demuxing succeeded.\n");

    if (video_stream) {
        printf("Play the output video file with the command:\n"
               "ffplay -f rawvideo -pix_fmt %s -video_size %dx%d %s\n",
               av_get_pix_fmt_name(pix_fmt), width, height,
               video_dst_filename);
    }

    if (audio_stream) {
        enum AVSampleFormat sfmt = audio_dec_ctx->sample_fmt;
        int n_channels = audio_dec_ctx->channels;
        const char *fmt;

        if (av_sample_fmt_is_planar(sfmt)) {
            const char *packed = av_get_sample_fmt_name(sfmt);
            printf("Warning: the sample format the decoder produced is planar "
                   "(%s). This example will output the first channel only.\n",
                   packed ? packed : "?");
            sfmt = av_get_packed_sample_fmt(sfmt);
            n_channels = 1;
        }

        if ((ret = get_format_from_sample_fmt(&fmt, sfmt)) < 0)
            goto end;

        printf("Play the output audio file with the command:\n"
               "ffplay -f %s -ac %d -ar %d %s\n",
               fmt, n_channels, audio_dec_ctx->sample_rate,
               audio_dst_filename);
    }
	av_write_trailer(mp4FmtCtx);

end:
    avcodec_close(video_dec_ctx);
    avcodec_close(audio_dec_ctx);
    avformat_close_input(&fmt_ctx);
    if (video_dst_file)
        fclose(video_dst_file);
    if (audio_dst_file)
        fclose(audio_dst_file);
    av_frame_free(&frame);
    av_free(video_dst_data[0]);
    av_free(scale_video_dst_data[0]);
    if(sws_ctx)
        sws_freeContext(sws_ctx);

    return ret < 0;
}
示例#25
0
int main(int argc, char **argv)
{
    int ret;
    AVPacket packet = { .data = NULL, .size = 0 };
    AVFrame *frame = NULL;
    enum AVMediaType type;
    unsigned int stream_index;
    unsigned int i;
    int got_frame;
    int (*dec_func)(AVCodecContext *, AVFrame *, int *, const AVPacket *);

    if (argc != 3) {
        av_log(NULL, AV_LOG_ERROR, "Usage: %s <input file> <output file>\n", argv[0]);
        return 1;
    }

    av_register_all();
    avfilter_register_all();

    if ((ret = open_input_file(argv[1])) < 0)
        goto end;
    if ((ret = open_output_file(argv[2])) < 0)
        goto end;
    if ((ret = init_filters()) < 0)
        goto end;

    /* read all packets */
    while (1) {
        if ((ret = av_read_frame(ifmt_ctx, &packet)) < 0)
            break;
        stream_index = packet.stream_index;
        type = ifmt_ctx->streams[packet.stream_index]->codec->codec_type;
        av_log(NULL, AV_LOG_DEBUG, "Demuxer gave frame of stream_index %u\n",
                stream_index);

        if (filter_ctx[stream_index].filter_graph) {
            av_log(NULL, AV_LOG_DEBUG, "Going to reencode&filter the frame\n");
            frame = av_frame_alloc();
            if (!frame) {
                ret = AVERROR(ENOMEM);
                break;
            }
            av_packet_rescale_ts(&packet,
                                 ifmt_ctx->streams[stream_index]->time_base,
                                 ifmt_ctx->streams[stream_index]->codec->time_base);
            dec_func = (type == AVMEDIA_TYPE_VIDEO) ? avcodec_decode_video2 :
                avcodec_decode_audio4;
            ret = dec_func(ifmt_ctx->streams[stream_index]->codec, frame,
                    &got_frame, &packet);
            if (ret < 0) {
                av_frame_free(&frame);
                av_log(NULL, AV_LOG_ERROR, "Decoding failed\n");
                break;
            }

            if (got_frame) {
                frame->pts = av_frame_get_best_effort_timestamp(frame);
                ret = filter_encode_write_frame(frame, stream_index);
                av_frame_free(&frame);
                if (ret < 0)
                    goto end;
            } else {
                av_frame_free(&frame);
            }
        } else {
            /* remux this frame without reencoding */
            av_packet_rescale_ts(&packet,
                                 ifmt_ctx->streams[stream_index]->time_base,
                                 ofmt_ctx->streams[stream_index]->time_base);

            ret = av_interleaved_write_frame(ofmt_ctx, &packet);
            if (ret < 0)
                goto end;
        }
        av_packet_unref(&packet);
    }

    /* flush filters and encoders */
    for (i = 0; i < ifmt_ctx->nb_streams; i++) {
        /* flush filter */
        if (!filter_ctx[i].filter_graph)
            continue;
        ret = filter_encode_write_frame(NULL, i);
        if (ret < 0) {
            av_log(NULL, AV_LOG_ERROR, "Flushing filter failed\n");
            goto end;
        }

        /* flush encoder */
        ret = flush_encoder(i);
        if (ret < 0) {
            av_log(NULL, AV_LOG_ERROR, "Flushing encoder failed\n");
            goto end;
        }
    }

    av_write_trailer(ofmt_ctx);
end:
    av_packet_unref(&packet);
    av_frame_free(&frame);
    for (i = 0; i < ifmt_ctx->nb_streams; i++) {
        avcodec_close(ifmt_ctx->streams[i]->codec);
        if (ofmt_ctx && ofmt_ctx->nb_streams > i && ofmt_ctx->streams[i] && ofmt_ctx->streams[i]->codec)
            avcodec_close(ofmt_ctx->streams[i]->codec);
        if (filter_ctx && filter_ctx[i].filter_graph)
            avfilter_graph_free(&filter_ctx[i].filter_graph);
    }
    av_free(filter_ctx);
    avformat_close_input(&ifmt_ctx);
    if (ofmt_ctx && !(ofmt_ctx->oformat->flags & AVFMT_NOFILE))
        avio_closep(&ofmt_ctx->pb);
    avformat_free_context(ofmt_ctx);

    if (ret < 0)
        av_log(NULL, AV_LOG_ERROR, "Error occurred: %s\n", av_err2str(ret));

    return ret ? 1 : 0;
}
示例#26
0
bool CFFmpegImage::LoadImageFromMemory(unsigned char* buffer, unsigned int bufSize,
                                       unsigned int width, unsigned int height)
{

    uint8_t* fbuffer = (uint8_t*)av_malloc(FFMPEG_FILE_BUFFER_SIZE);
    if (!fbuffer)
    {
        CLog::LogFunction(LOGERROR, __FUNCTION__, "Could not allocate FFMPEG_FILE_BUFFER_SIZE");
        return false;
    }
    MemBuffer buf;
    buf.data = buffer;
    buf.size = bufSize;
    buf.pos = 0;

    AVIOContext* ioctx = avio_alloc_context(fbuffer, FFMPEG_FILE_BUFFER_SIZE, 0, &buf,
                                            mem_file_read, NULL, mem_file_seek);

    if (!ioctx)
    {
        av_free(fbuffer);
        CLog::LogFunction(LOGERROR, __FUNCTION__, "Could not allocate AVIOContext");
        return false;
    }

    AVFormatContext* fctx = avformat_alloc_context();
    if (!fctx)
    {
        av_free(ioctx->buffer);
        av_free(ioctx);
        CLog::LogFunction(LOGERROR, __FUNCTION__, "Could not allocate AVFormatContext");
        return false;
    }

    fctx->pb = ioctx;
    ioctx->max_packet_size = FFMPEG_FILE_BUFFER_SIZE;

    // Some clients have pngs saved as jpeg or ask us for png but are jpeg
    // mythv throws all mimetypes away and asks us with application/octet-stream
    // this is poor man's fallback to at least identify png / jpeg
    bool is_jpeg = (bufSize > 2 && buffer[0] == 0xFF && buffer[1] == 0xD8 && buffer[2] == 0xFF);
    bool is_png = (bufSize > 3 && buffer[1] == 'P' && buffer[2] == 'N' && buffer[3] == 'G');
    bool is_tiff = (bufSize > 2 && buffer[0] == 'I' && buffer[1] == 'I' && buffer[2] == '*');

    AVInputFormat* inp = nullptr;
    if (is_jpeg)
        inp = av_find_input_format("jpeg_pipe");
    else if (is_png)
        inp = av_find_input_format("png_pipe");
    else if (is_tiff)
        inp = av_find_input_format("tiff_pipe");
    else if (m_strMimeType == "image/jp2")
        inp = av_find_input_format("j2k_pipe");
    else if (m_strMimeType == "image/webp")
        inp = av_find_input_format("webp_pipe");
    // brute force parse if above check already failed
    else if (m_strMimeType == "image/jpeg" || m_strMimeType == "image/jpg")
        inp = av_find_input_format("jpeg_pipe");
    else if (m_strMimeType == "image/png")
        inp = av_find_input_format("png_pipe");
    else if (m_strMimeType == "image/tiff")
        inp = av_find_input_format("tiff_pipe");

    if (avformat_open_input(&fctx, "", inp, NULL) < 0)
    {
        CLog::Log(LOGERROR, "Could not find suitable input format: %s", m_strMimeType.c_str());
        avformat_close_input(&fctx);
        FreeIOCtx(ioctx);
        return false;
    }

    AVCodecContext* codec_ctx = fctx->streams[0]->codec;
    AVCodec* codec = avcodec_find_decoder(codec_ctx->codec_id);
    if (avcodec_open2(codec_ctx, codec, NULL) < 0)
    {
        avformat_close_input(&fctx);
        FreeIOCtx(ioctx);
        return false;
    }

    AVPacket pkt;
    AVFrame* frame = av_frame_alloc();
    av_read_frame(fctx, &pkt);
    int frame_decoded;
    int ret = avcodec_decode_video2(codec_ctx, frame, &frame_decoded, &pkt);
    if (ret < 0)
        CLog::Log(LOGDEBUG, "Error [%d] while decoding frame: %s\n", ret, strerror(AVERROR(ret)));

    if (frame_decoded != 0)
    {
        av_frame_free(&m_pFrame);
        m_pFrame = av_frame_clone(frame);

        if (m_pFrame)
        {
            m_height = m_pFrame->height;
            m_width = m_pFrame->width;
            m_originalWidth = m_width;
            m_originalHeight = m_height;

            const AVPixFmtDescriptor* pixDescriptor = av_pix_fmt_desc_get(static_cast<AVPixelFormat>(m_pFrame->format));
            if (pixDescriptor && ((pixDescriptor->flags & (AV_PIX_FMT_FLAG_ALPHA | AV_PIX_FMT_FLAG_PAL)) != 0))
                m_hasAlpha = true;
        }
        else
        {
            CLog::LogFunction(LOGERROR, __FUNCTION__, "Could not allocate a picture data buffer");
            frame_decoded = 0;
        }
    }
    else
        CLog::LogFunction(LOGERROR, __FUNCTION__, "Could not decode a frame");

    av_frame_free(&frame);
    av_free_packet(&pkt);
    avcodec_close(codec_ctx);
    avformat_close_input(&fctx);
    FreeIOCtx(ioctx);

    return (frame_decoded != 0);
}
示例#27
0
int main(int argc, char* argv[])
{
#ifdef _DEBUG
	_CrtSetDbgFlag ( _CRTDBG_ALLOC_MEM_DF | _CRTDBG_LEAK_CHECK_DF );
	//_CrtSetBreakAlloc(1166);
#endif

	char in_url[500]={0};
	char out_url[500]={0};
	int limit_num=0;
	int width=0;
	int height=0;
	bool limit_is=false;
	bool graphically_ti=false;
	bool graphically_si=false;
	bool isinterval=true;
	int intervalcnt=5;
	//接收参数------------------
	extern char *optarg;
	int opt;
	//--------------------------
	if(argc==1){
		usage();
		return 0;
	}

	while ((opt =getopt(argc, argv,"i:o:g:l:n:x:y:h")) != -1)
	{
		switch (opt)
		{
		case 'h':{
			usage();
			return 0;
				 }
		case 'i':{
			strcpy(in_url,optarg);
			break;
				 }
		case 'o':{
			strcpy(out_url,optarg);
			break;
				 }
		case 'l':{
			limit_num=atoi(optarg);
			limit_is=true;
			break;
				 }
		case 'n':{
			intervalcnt=atoi(optarg);
			break;
				 }
		case 'g':{
			if(strcmp(optarg,"ti")==0){
				graphically_ti=true;
			}else if(strcmp(optarg,"si")==0){
				graphically_si=true;
			}
			break;
				 }
		case 'x':{
			width=atoi(optarg);
			break;
				 }
		case 'y':{
			height=atoi(optarg);
			break;
				 }
		default:
			printf("Unknown: %c\n", opt);
			usage();
			return 0;
		}
	}

	if(strcmp(in_url,"")==0){
		printf("Input Video's URL is not set. Exit.\n");
		return 0;
	}

	if(strcmp(out_url,"")==0){
		printf("Output .csv file is not set. Default is {Input Name}.csv\n");
		char *suffix=strchr(in_url, '.');
		*suffix='\0';
		strcpy(out_url,in_url);
		*suffix='.';
		sprintf(out_url,"%s.csv",out_url);
	}
	
	AVFormatContext	*pFormatCtx;
	int				i, video_stream,audio_stream;
	AVCodecContext	*pCodecCtx,*pCodecCtx_au;
	AVCodec			*pCodec,*pCodec_au;

	av_register_all();
	pFormatCtx = avformat_alloc_context();
	if(avformat_open_input(&pFormatCtx,in_url,NULL,NULL)!=0){
		printf("Couldn't open file.\n");
		return FALSE;
	}
	if(strcmp(pFormatCtx->iformat->name,"rawvideo")!=0)
	{
		if(av_find_stream_info(pFormatCtx)<0)
		{
			printf("Couldn't find stream information.\n");
			return FALSE;
		}
	}
	
	video_stream=-1;
	audio_stream=-1;
	for(i=0; i<pFormatCtx->nb_streams; i++) {
		if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO){
			video_stream=i;
		}if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_AUDIO){
			audio_stream=i;
		}
	}
	if(video_stream==-1)
	{
		printf("Didn't find a video stream.\n");
		return FALSE;
	}
	if(video_stream!=-1){

		pCodecCtx=pFormatCtx->streams[video_stream]->codec;
		pCodecCtx->width=width;
		pCodecCtx->height=height;
		pCodec=avcodec_find_decoder(pCodecCtx->codec_id);
		if(pCodec==NULL)
		{
			printf("Codec not found.\n");
			return FALSE;
		}
		if(avcodec_open2(pCodecCtx, pCodec,NULL)<0)
		{
			printf("Could not open codec.\n");
			return FALSE;
		}

		//------------SDL----------------
		SDLParam sdlparam={NULL,NULL,{0,0,0,0},graphically_ti,graphically_si,isinterval,NULL,NULL,0,0,0,0};
		if(graphically_ti==true||graphically_si==true){
			sdlparam.graphically_si=graphically_si;
			sdlparam.graphically_ti=graphically_ti;
			sdlparam.show_w=pCodecCtx->width-2*PADDING;
			sdlparam.show_h=pCodecCtx->height-2*PADDING;
			sdlparam.pixel_w=pCodecCtx->width-2*PADDING;
			sdlparam.pixel_h=pCodecCtx->height-2*PADDING;
			//FIX
			sdlparam.show_YBuffer=(char *)malloc(sdlparam.pixel_w*sdlparam.pixel_h);
			sdlparam.show_UVBuffer=(char *)malloc(sdlparam.pixel_w*sdlparam.pixel_h/2);
			memset(sdlparam.show_UVBuffer,0x80,sdlparam.pixel_w*sdlparam.pixel_h/2);

			SDL_Thread *video_tid = SDL_CreateThread(show_thread,&sdlparam);
		}

		//---------------
		float* silist=(float*)malloc(FRAMENUM*sizeof(float));
		float* tilist=(float*)malloc((FRAMENUM-1)*sizeof(float));
		float* old_silist;
		float* old_tilist;

		AVFrame	*pFrame,*pFrameYUV;
		pFrame=avcodec_alloc_frame();
		pFrameYUV=avcodec_alloc_frame();
		uint8_t *out_buffer;
		out_buffer=(uint8_t *)av_malloc(avpicture_get_size(PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height));
		avpicture_fill((AVPicture *)pFrameYUV, out_buffer, PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height);

		int ret, got_picture;
		static struct SwsContext *img_convert_ctx;
		int y_size = pCodecCtx->width * pCodecCtx->height;

		AVPacket *packet=(AVPacket *)av_malloc(sizeof(AVPacket));
		av_new_packet(packet, y_size);
		
		//计算TI的时候使用
		int prev_has=0;
		uint8_t *prev_ydata=(uint8_t *)av_malloc(pCodecCtx->width*pCodecCtx->height);

		img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height, PIX_FMT_YUV420P, SWS_BILINEAR, NULL, NULL, NULL); 
		
		//打开文件
		FILE *fp=fopen(out_url,"wb+");
		fprintf(fp,"TI,SI\n");
		
		//记个数
		int framecnt=0;
		int realloc_time=1;
		while(av_read_frame(pFormatCtx, packet)>=0&&(framecnt<limit_num||!limit_is))
		{
			if(packet->stream_index==video_stream)
			{
				ret = avcodec_decode_video2(pCodecCtx, pFrame, &got_picture, packet);
				if(ret < 0)
				{
					printf("Decode Error.\n");
					return -1;
				}
				if(got_picture)
				{
					sws_scale(img_convert_ctx, (const uint8_t* const*)pFrame->data, pFrame->linesize, 0, pCodecCtx->height, pFrameYUV->data, pFrameYUV->linesize);
					//有前面的帧,才能计算TI
					if(prev_has==1){
						if(framecnt%intervalcnt==0){
							sdlparam.isinterval=false;
						}else{
							sdlparam.isinterval=true;
						}
						float ti=0,si=0;
						int retval=tisi((char *)pFrameYUV->data[0],(char *)prev_ydata,pCodecCtx->width,pCodecCtx->height,sdlparam,ti,si);
						if(retval==-1)
							break;

						if(framecnt>=FRAMENUM*realloc_time)
						{
							realloc_time++;
							old_tilist=tilist;
							old_silist=silist;
							if( (tilist = (float*)realloc( tilist, (FRAMENUM*realloc_time-1)*sizeof(float)))==NULL)								
							{
								free( old_tilist );  // free original block
								return -1;
							}
							if( (silist = (float*)realloc( silist, (FRAMENUM*realloc_time)*sizeof(float)))==NULL)								
							{
								free( old_silist );  // free original block
								return -1;
							}
						}
						tilist[framecnt]=ti;
						silist[framecnt]=si;
						printf("%f,%f\n",ti,si);
						fprintf(fp,"%f,%f\n",ti,si);
						framecnt++;
					}else{
						prev_has=1;
					}
					//拷贝亮度数据
					memcpy(prev_ydata,pFrameYUV->data[0],pCodecCtx->width*pCodecCtx->height);
				}
			}
			av_free_packet(packet);
		}
		sws_freeContext(img_convert_ctx);
		
		//计算平均值和最大值
		float sum=0;
		for (int i=0;i<framecnt;i++)
			sum +=silist[i];
		float avg_si=sum/framecnt;
		qsort(silist,FRAMENUM*realloc_time,sizeof(float),comp);
		float max_si=silist[FRAMENUM*realloc_time-1];

		sum=0;
		for (int i=0;i<framecnt-1;i++)
			sum +=tilist[i];
		float avg_ti=sum/(framecnt-1);
		qsort(tilist,(FRAMENUM*realloc_time-1),sizeof(float),comp);
		float max_ti=tilist[FRAMENUM*realloc_time-2];

		fprintf(fp,"TI_AVG,SI_AVG\n");
		fprintf(fp,"%f,%f\n",avg_ti,avg_si);
		fprintf(fp,"TI_MAX,SI_MAX\n");
		fprintf(fp,"%f,%f\n",max_ti,max_si);
		fclose(fp);

		av_free(out_buffer);
		av_free(pFrameYUV);
		avcodec_close(pCodecCtx);

		if(graphically_ti==true||graphically_si==true){
			free(sdlparam.show_YBuffer);
			free(sdlparam.show_UVBuffer);
			SDL_Event event;
			event.type=SDL_QUIT;
			SDL_PushEvent(&event);
		}
		delete silist;
		delete tilist;
	}
	avformat_close_input(&pFormatCtx);
	

#ifdef _DEBUG
	_CrtDumpMemoryLeaks();//调试运行到该步,输出检测信息
#endif

	return 0;
}
示例#28
0
int
scan_metadata_ffmpeg(char *file, struct media_file_info *mfi)
{
  AVFormatContext *ctx;
  AVDictionary *options;
  const struct metadata_map *extra_md_map;
  struct http_icy_metadata *icy_metadata;
#if LIBAVCODEC_VERSION_MAJOR >= 55 || (LIBAVCODEC_VERSION_MAJOR == 54 && LIBAVCODEC_VERSION_MINOR >= 35)
  enum AVCodecID codec_id;
  enum AVCodecID video_codec_id;
  enum AVCodecID audio_codec_id;
#else
  enum CodecID codec_id;
  enum CodecID video_codec_id;
  enum CodecID audio_codec_id;
#endif
  AVStream *video_stream;
  AVStream *audio_stream;
  char *path;
  int mdcount;
  int i;
  int ret;

  ctx = NULL;
  options = NULL;
  path = strdup(file);

#if LIBAVFORMAT_VERSION_MAJOR >= 54 || (LIBAVFORMAT_VERSION_MAJOR == 53 && LIBAVFORMAT_VERSION_MINOR >= 3)
# ifndef HAVE_FFMPEG
  // Without this, libav is slow to probe some internet streams
  if (mfi->data_kind == DATA_KIND_HTTP)
    {
      ctx = avformat_alloc_context();
      ctx->probesize = 64000;
    }
# endif

  if (mfi->data_kind == DATA_KIND_HTTP)
    {
      free(path);
      ret = http_stream_setup(&path, file);
      if (ret < 0)
	return -1;

      av_dict_set(&options, "icy", "1", 0);
      mfi->artwork = ARTWORK_HTTP;
    }

  ret = avformat_open_input(&ctx, path, NULL, &options);

  if (options)
    av_dict_free(&options);
#else
  ret = av_open_input_file(&ctx, path, NULL, 0, NULL);
#endif
  if (ret != 0)
    {
      DPRINTF(E_WARN, L_SCAN, "Cannot open media file '%s': %s\n", path, err2str(ret));

      free(path);
      return -1;
    }

  free(path);

#if LIBAVFORMAT_VERSION_MAJOR >= 54 || (LIBAVFORMAT_VERSION_MAJOR == 53 && LIBAVFORMAT_VERSION_MINOR >= 3)
  ret = avformat_find_stream_info(ctx, NULL);
#else
  ret = av_find_stream_info(ctx);
#endif
  if (ret < 0)
    {
      DPRINTF(E_WARN, L_SCAN, "Cannot get stream info of '%s': %s\n", path, err2str(ret));

#if LIBAVFORMAT_VERSION_MAJOR >= 54 || (LIBAVFORMAT_VERSION_MAJOR == 53 && LIBAVFORMAT_VERSION_MINOR >= 21)
      avformat_close_input(&ctx);
#else
      av_close_input_file(ctx);
#endif
      return -1;
    }

#if 0
  /* Dump input format as determined by ffmpeg */
  av_dump_format(ctx, 0, file, 0);
#endif

  DPRINTF(E_DBG, L_SCAN, "File has %d streams\n", ctx->nb_streams);

  /* Extract codec IDs, check for video */
#if LIBAVCODEC_VERSION_MAJOR >= 55 || (LIBAVCODEC_VERSION_MAJOR == 54 && LIBAVCODEC_VERSION_MINOR >= 35)
  video_codec_id = AV_CODEC_ID_NONE;
  video_stream = NULL;

  audio_codec_id = AV_CODEC_ID_NONE;
  audio_stream = NULL;
#else
  video_codec_id = CODEC_ID_NONE;
  video_stream = NULL;

  audio_codec_id = CODEC_ID_NONE;
  audio_stream = NULL;
#endif

  for (i = 0; i < ctx->nb_streams; i++)
    {
      switch (ctx->streams[i]->codec->codec_type)
	{
#if LIBAVCODEC_VERSION_MAJOR >= 53 || (LIBAVCODEC_VERSION_MAJOR == 52 && LIBAVCODEC_VERSION_MINOR >= 64)
	  case AVMEDIA_TYPE_VIDEO:
#else
	  case CODEC_TYPE_VIDEO:
#endif
#if LIBAVFORMAT_VERSION_MAJOR >= 55 || (LIBAVFORMAT_VERSION_MAJOR == 54 && LIBAVFORMAT_VERSION_MINOR >= 6)
	    if (ctx->streams[i]->disposition & AV_DISPOSITION_ATTACHED_PIC)
	      {
		DPRINTF(E_DBG, L_SCAN, "Found embedded artwork (stream %d)\n", i);
		mfi->artwork = ARTWORK_EMBEDDED;

		break;
	      }
#endif
	    // We treat these as audio no matter what
	    if (mfi->compilation || (mfi->media_kind & (MEDIA_KIND_PODCAST | MEDIA_KIND_AUDIOBOOK)))
	      break;

	    if (!video_stream)
	      {
		DPRINTF(E_DBG, L_SCAN, "File has video (stream %d)\n", i);

		mfi->has_video = 1;
		video_stream = ctx->streams[i];
		video_codec_id = video_stream->codec->codec_id;
	      }
	    break;

#if LIBAVCODEC_VERSION_MAJOR >= 53 || (LIBAVCODEC_VERSION_MAJOR == 52 && LIBAVCODEC_VERSION_MINOR >= 64)
	  case AVMEDIA_TYPE_AUDIO:
#else
	  case CODEC_TYPE_AUDIO:
#endif
	    if (!audio_stream)
	      {
		audio_stream = ctx->streams[i];
		audio_codec_id = audio_stream->codec->codec_id;
	      } 
	    break;

	  default:
	    break;
	}
    }

#if LIBAVCODEC_VERSION_MAJOR >= 55 || (LIBAVCODEC_VERSION_MAJOR == 54 && LIBAVCODEC_VERSION_MINOR >= 35)
  if (audio_codec_id == AV_CODEC_ID_NONE)
#else
  if (audio_codec_id == CODEC_ID_NONE)
#endif
    {
      DPRINTF(E_DBG, L_SCAN, "File has no audio streams, discarding\n");

#if LIBAVFORMAT_VERSION_MAJOR >= 54 || (LIBAVFORMAT_VERSION_MAJOR == 53 && LIBAVFORMAT_VERSION_MINOR >= 21)
      avformat_close_input(&ctx);
#else
      av_close_input_file(ctx);
#endif
      return -1;
    }

  /* Common media information */
  if (ctx->duration > 0)
    mfi->song_length = ctx->duration / (AV_TIME_BASE / 1000); /* ms */

  if (ctx->bit_rate > 0)
    mfi->bitrate = ctx->bit_rate / 1000;
  else if (ctx->duration > AV_TIME_BASE) /* guesstimate */
    mfi->bitrate = ((mfi->file_size * 8) / (ctx->duration / AV_TIME_BASE)) / 1000;

  DPRINTF(E_DBG, L_SCAN, "Duration %d ms, bitrate %d kbps\n", mfi->song_length, mfi->bitrate);

  /* Try to extract ICY metadata if http stream */
  if (mfi->data_kind == DATA_KIND_HTTP)
    {
      icy_metadata = http_icy_metadata_get(ctx, 0);
      if (icy_metadata && icy_metadata->name)
	{
	  DPRINTF(E_DBG, L_SCAN, "Found ICY metadata, name is '%s'\n", icy_metadata->name);

	  if (mfi->title)
	    free(mfi->title);
	  if (mfi->artist)
	    free(mfi->artist);
	  if (mfi->album_artist)
	    free(mfi->album_artist);

	  mfi->title = strdup(icy_metadata->name);
	  mfi->artist = strdup(icy_metadata->name);
	  mfi->album_artist = strdup(icy_metadata->name);
	}
      if (icy_metadata && icy_metadata->description)
	{
	  DPRINTF(E_DBG, L_SCAN, "Found ICY metadata, description is '%s'\n", icy_metadata->description);

	  if (mfi->album)
	    free(mfi->album);

	  mfi->album = strdup(icy_metadata->description);
	}
      if (icy_metadata && icy_metadata->genre)
	{
	  DPRINTF(E_DBG, L_SCAN, "Found ICY metadata, genre is '%s'\n", icy_metadata->genre);

	  if (mfi->genre)
	    free(mfi->genre);

	  mfi->genre = strdup(icy_metadata->genre);
	}
      if (icy_metadata)
	http_icy_metadata_free(icy_metadata, 0);
    }

  /* Get some more information on the audio stream */
  if (audio_stream)
    {
      if (audio_stream->codec->sample_rate != 0)
	mfi->samplerate = audio_stream->codec->sample_rate;

      /* Try sample format first */
#if LIBAVUTIL_VERSION_MAJOR >= 52 || (LIBAVUTIL_VERSION_MAJOR == 51 && LIBAVUTIL_VERSION_MINOR >= 4)
      mfi->bits_per_sample = 8 * av_get_bytes_per_sample(audio_stream->codec->sample_fmt);
#elif LIBAVCODEC_VERSION_MAJOR >= 53
      mfi->bits_per_sample = av_get_bits_per_sample_fmt(audio_stream->codec->sample_fmt);
#else
      mfi->bits_per_sample = av_get_bits_per_sample_format(audio_stream->codec->sample_fmt);
#endif
      if (mfi->bits_per_sample == 0)
	{
	  /* Try codec */
	  mfi->bits_per_sample = av_get_bits_per_sample(audio_codec_id);
	}

      DPRINTF(E_DBG, L_SCAN, "samplerate %d, bps %d\n", mfi->samplerate, mfi->bits_per_sample);
    }

  /* Check codec */
  extra_md_map = NULL;
  codec_id = (mfi->has_video) ? video_codec_id : audio_codec_id;
  switch (codec_id)
    {
#if LIBAVCODEC_VERSION_MAJOR >= 55 || (LIBAVCODEC_VERSION_MAJOR == 54 && LIBAVCODEC_VERSION_MINOR >= 35)
      case AV_CODEC_ID_AAC:
#else
      case CODEC_ID_AAC:
#endif
	DPRINTF(E_DBG, L_SCAN, "AAC\n");
	mfi->type = strdup("m4a");
	mfi->codectype = strdup("mp4a");
	mfi->description = strdup("AAC audio file");
	break;

#if LIBAVCODEC_VERSION_MAJOR >= 55 || (LIBAVCODEC_VERSION_MAJOR == 54 && LIBAVCODEC_VERSION_MINOR >= 35)
      case AV_CODEC_ID_ALAC:
#else
      case CODEC_ID_ALAC:
#endif
	DPRINTF(E_DBG, L_SCAN, "ALAC\n");
	mfi->type = strdup("m4a");
	mfi->codectype = strdup("alac");
	mfi->description = strdup("Apple Lossless audio file");
	break;

#if LIBAVCODEC_VERSION_MAJOR >= 55 || (LIBAVCODEC_VERSION_MAJOR == 54 && LIBAVCODEC_VERSION_MINOR >= 35)
      case AV_CODEC_ID_FLAC:
#else
      case CODEC_ID_FLAC:
#endif
	DPRINTF(E_DBG, L_SCAN, "FLAC\n");
	mfi->type = strdup("flac");
	mfi->codectype = strdup("flac");
	mfi->description = strdup("FLAC audio file");

	extra_md_map = md_map_vorbis;
	break;

#if LIBAVCODEC_VERSION_MAJOR >= 55 || (LIBAVCODEC_VERSION_MAJOR == 54 && LIBAVCODEC_VERSION_MINOR >= 35)
      case AV_CODEC_ID_APE:
#else
      case CODEC_ID_APE:
#endif
	DPRINTF(E_DBG, L_SCAN, "APE\n");
	mfi->type = strdup("ape");
	mfi->codectype = strdup("ape");
	mfi->description = strdup("Monkey's audio");
	break;

#if LIBAVCODEC_VERSION_MAJOR >= 55 || (LIBAVCODEC_VERSION_MAJOR == 54 && LIBAVCODEC_VERSION_MINOR >= 35)
      case AV_CODEC_ID_MUSEPACK7:
      case AV_CODEC_ID_MUSEPACK8:
#else
      case CODEC_ID_MUSEPACK7:
      case CODEC_ID_MUSEPACK8:
#endif
	DPRINTF(E_DBG, L_SCAN, "Musepack\n");
	mfi->type = strdup("mpc");
	mfi->codectype = strdup("mpc");
	mfi->description = strdup("Musepack audio file");
	break;

#if LIBAVCODEC_VERSION_MAJOR >= 55 || (LIBAVCODEC_VERSION_MAJOR == 54 && LIBAVCODEC_VERSION_MINOR >= 35)
      case AV_CODEC_ID_MPEG4: /* Video */
      case AV_CODEC_ID_H264:
#else
      case CODEC_ID_MPEG4: /* Video */
      case CODEC_ID_H264:
#endif
	DPRINTF(E_DBG, L_SCAN, "MPEG4 video\n");
	mfi->type = strdup("m4v");
	mfi->codectype = strdup("mp4v");
	mfi->description = strdup("MPEG-4 video file");

	extra_md_map = md_map_tv;
	break;

#if LIBAVCODEC_VERSION_MAJOR >= 55 || (LIBAVCODEC_VERSION_MAJOR == 54 && LIBAVCODEC_VERSION_MINOR >= 35)
      case AV_CODEC_ID_MP3:
#else
      case CODEC_ID_MP3:
#endif
	DPRINTF(E_DBG, L_SCAN, "MP3\n");
	mfi->type = strdup("mp3");
	mfi->codectype = strdup("mpeg");
	mfi->description = strdup("MPEG audio file");

	extra_md_map = md_map_id3;
	break;

#if LIBAVCODEC_VERSION_MAJOR >= 55 || (LIBAVCODEC_VERSION_MAJOR == 54 && LIBAVCODEC_VERSION_MINOR >= 35)
      case AV_CODEC_ID_VORBIS:
#else
      case CODEC_ID_VORBIS:
#endif
	DPRINTF(E_DBG, L_SCAN, "VORBIS\n");
	mfi->type = strdup("ogg");
	mfi->codectype = strdup("ogg");
	mfi->description = strdup("Ogg Vorbis audio file");

	extra_md_map = md_map_vorbis;
	break;

#if LIBAVCODEC_VERSION_MAJOR >= 55 || (LIBAVCODEC_VERSION_MAJOR == 54 && LIBAVCODEC_VERSION_MINOR >= 35)
      case AV_CODEC_ID_WMAV1:
      case AV_CODEC_ID_WMAV2:
      case AV_CODEC_ID_WMAVOICE:
#else
      case CODEC_ID_WMAV1:
      case CODEC_ID_WMAV2:
      case CODEC_ID_WMAVOICE:
#endif
	DPRINTF(E_DBG, L_SCAN, "WMA Voice\n");
	mfi->type = strdup("wma");
	mfi->codectype = strdup("wmav");
	mfi->description = strdup("WMA audio file");
	break;

#if LIBAVCODEC_VERSION_MAJOR >= 55 || (LIBAVCODEC_VERSION_MAJOR == 54 && LIBAVCODEC_VERSION_MINOR >= 35)
      case AV_CODEC_ID_WMAPRO:
#else
      case CODEC_ID_WMAPRO:
#endif
	DPRINTF(E_DBG, L_SCAN, "WMA Pro\n");
	mfi->type = strdup("wmap");
	mfi->codectype = strdup("wma");
	mfi->description = strdup("WMA audio file");
	break;

#if LIBAVCODEC_VERSION_MAJOR >= 55 || (LIBAVCODEC_VERSION_MAJOR == 54 && LIBAVCODEC_VERSION_MINOR >= 35)
      case AV_CODEC_ID_WMALOSSLESS:
#else
      case CODEC_ID_WMALOSSLESS:
#endif
	DPRINTF(E_DBG, L_SCAN, "WMA Lossless\n");
	mfi->type = strdup("wma");
	mfi->codectype = strdup("wmal");
	mfi->description = strdup("WMA audio file");
	break;

#if LIBAVCODEC_VERSION_MAJOR >= 55 || (LIBAVCODEC_VERSION_MAJOR == 54 && LIBAVCODEC_VERSION_MINOR >= 35)
      case AV_CODEC_ID_PCM_S16LE ... AV_CODEC_ID_PCM_F64LE:
#else
      case CODEC_ID_PCM_S16LE ... CODEC_ID_PCM_F64LE:
#endif
	if (strcmp(ctx->iformat->name, "aiff") == 0)
	  {
	    DPRINTF(E_DBG, L_SCAN, "AIFF\n");
	    mfi->type = strdup("aif");
	    mfi->codectype = strdup("aif");
	    mfi->description = strdup("AIFF audio file");
	    break;
	  }
	else if (strcmp(ctx->iformat->name, "wav") == 0)
	  {
	    DPRINTF(E_DBG, L_SCAN, "WAV\n");
	    mfi->type = strdup("wav");
	    mfi->codectype = strdup("wav");
	    mfi->description = strdup("WAV audio file");
	    break;
	  }
	/* WARNING: will fallthrough to default case, don't move */
	/* FALLTHROUGH */

      default:
	DPRINTF(E_DBG, L_SCAN, "Unknown codec 0x%x (video: %s), format %s (%s)\n",
		codec_id, (mfi->has_video) ? "yes" : "no", ctx->iformat->name, ctx->iformat->long_name);
	mfi->type = strdup("unkn");
	mfi->codectype = strdup("unkn");
	if (mfi->has_video)
	  {
	    mfi->description = strdup("Unknown video file format");
	    extra_md_map = md_map_tv;
	  }
	else
	  mfi->description = strdup("Unknown audio file format");
	break;
    }

  mdcount = 0;

  if ((!ctx->metadata) && (!audio_stream->metadata)
      && (video_stream && !video_stream->metadata))
    {
      DPRINTF(E_WARN, L_SCAN, "ffmpeg reports no metadata\n");

      goto skip_extract;
    }

  if (extra_md_map)
    {
      ret = extract_metadata(mfi, ctx, audio_stream, video_stream, extra_md_map);
      mdcount += ret;

      DPRINTF(E_DBG, L_SCAN, "Picked up %d tags with extra md_map\n", ret);
    }

  ret = extract_metadata(mfi, ctx, audio_stream, video_stream, md_map_generic);
  mdcount += ret;

  DPRINTF(E_DBG, L_SCAN, "Picked up %d tags with generic md_map, %d tags total\n", ret, mdcount);

  /* fix up TV metadata */
  if (mfi->media_kind == 10)
    {
      /* I have no idea why this is, but iTunes reports a media kind of 64 for stik==10 (?!) */
      mfi->media_kind = MEDIA_KIND_TVSHOW;
    }
  /* Unspecified video files are "Movies", media_kind 2 */
  else if (mfi->has_video == 1)
    {
      mfi->media_kind = MEDIA_KIND_MOVIE;
    }

 skip_extract:
#if LIBAVFORMAT_VERSION_MAJOR >= 54 || (LIBAVFORMAT_VERSION_MAJOR == 53 && LIBAVFORMAT_VERSION_MINOR >= 21)
  avformat_close_input(&ctx);
#else
  av_close_input_file(ctx);
#endif

  if (mdcount == 0)
    DPRINTF(E_WARN, L_SCAN, "ffmpeg/libav could not extract any metadata\n");

  /* Just in case there's no title set ... */
  if (mfi->title == NULL)
    mfi->title = strdup(mfi->fname);

  /* All done */

  return 0;
}
示例#29
0
/** Convert an audio file to an AAC file in an MP4 container. */
int main(int argc, char **argv)
{
    AVFormatContext *input_format_context = NULL, *output_format_context = NULL;
    AVCodecContext *input_codec_context = NULL, *output_codec_context = NULL;
    SwrContext *resample_context = NULL;
    AVAudioFifo *fifo = NULL;
    int ret = AVERROR_EXIT;

    if (argc < 3) {
        fprintf(stderr, "Usage: %s <input file> <output file>\n", argv[0]);
        exit(1);
    }

    /** Register all codecs and formats so that they can be used. */
    av_register_all();
    /** Open the input file for reading. */
    if (open_input_file(argv[1], &input_format_context,
                        &input_codec_context))
        goto cleanup;
    /** Open the output file for writing. */
    if (open_output_file(argv[2], input_codec_context,
                         &output_format_context, &output_codec_context))
        goto cleanup;
    /** Initialize the resampler to be able to convert audio sample formats. */
    if (init_resampler(input_codec_context, output_codec_context,
                       &resample_context))
        goto cleanup;
    /** Initialize the FIFO buffer to store audio samples to be encoded. */
    if (init_fifo(&fifo, output_codec_context))
        goto cleanup;
    /** Write the header of the output file container. */
    if (write_output_file_header(output_format_context))
        goto cleanup;

    /**
     * Loop as long as we have input samples to read or output samples
     * to write; abort as soon as we have neither.
     */
    while (1) {
        /** Use the encoder's desired frame size for processing. */
        const int output_frame_size = output_codec_context->frame_size;
        int finished                = 0;

        /**
         * Make sure that there is one frame worth of samples in the FIFO
         * buffer so that the encoder can do its work.
         * Since the decoder's and the encoder's frame size may differ, we
         * need to FIFO buffer to store as many frames worth of input samples
         * that they make up at least one frame worth of output samples.
         */
        while (av_audio_fifo_size(fifo) < output_frame_size) {
            /**
             * Decode one frame worth of audio samples, convert it to the
             * output sample format and put it into the FIFO buffer.
             */
            if (read_decode_convert_and_store(fifo, input_format_context,
                                              input_codec_context,
                                              output_codec_context,
                                              resample_context, &finished))
                goto cleanup;

            /**
             * If we are at the end of the input file, we continue
             * encoding the remaining audio samples to the output file.
             */
            if (finished)
                break;
        }

        /**
         * If we have enough samples for the encoder, we encode them.
         * At the end of the file, we pass the remaining samples to
         * the encoder.
         */
        while (av_audio_fifo_size(fifo) >= output_frame_size ||
               (finished && av_audio_fifo_size(fifo) > 0))
            /**
             * Take one frame worth of audio samples from the FIFO buffer,
             * encode it and write it to the output file.
             */
            if (load_encode_and_write(fifo, output_format_context,
                                      output_codec_context))
                goto cleanup;

        /**
         * If we are at the end of the input file and have encoded
         * all remaining samples, we can exit this loop and finish.
         */
        if (finished) {
            int data_written;
            /** Flush the encoder as it may have delayed frames. */
            do {
                if (encode_audio_frame(NULL, output_format_context,
                                       output_codec_context, &data_written))
                    goto cleanup;
            } while (data_written);
            break;
        }
    }

    /** Write the trailer of the output file container. */
    if (write_output_file_trailer(output_format_context))
        goto cleanup;
    ret = 0;

cleanup:
    if (fifo)
        av_audio_fifo_free(fifo);
    swr_free(&resample_context);
    if (output_codec_context)
        avcodec_close(output_codec_context);
    if (output_format_context) {
        avio_closep(&output_format_context->pb);
        avformat_free_context(output_format_context);
    }
    if (input_codec_context)
        avcodec_close(input_codec_context);
    if (input_format_context)
        avformat_close_input(&input_format_context);

    return ret;
}
示例#30
0
void FFmpeg_Decoder::open(const std::string &fname)
{
    close();
    mDataStream = mResourceMgr.openResource(fname);

    if((mFormatCtx=avformat_alloc_context()) == NULL)
        fail("Failed to allocate context");

    mFormatCtx->pb = avio_alloc_context(NULL, 0, 0, this, readPacket, writePacket, seek);
    if(!mFormatCtx->pb || avformat_open_input(&mFormatCtx, fname.c_str(), NULL, NULL) != 0)
    {
        // "Note that a user-supplied AVFormatContext will be freed on failure".
        if (mFormatCtx)
        {
            if (mFormatCtx->pb != NULL)
            {
                if (mFormatCtx->pb->buffer != NULL)
                {
                    av_free(mFormatCtx->pb->buffer);
                    mFormatCtx->pb->buffer = NULL;
                }
                av_free(mFormatCtx->pb);
                mFormatCtx->pb = NULL;
            }
            avformat_free_context(mFormatCtx);
        }
        mFormatCtx = NULL;
        fail("Failed to allocate input stream");
    }

    try
    {
        if(avformat_find_stream_info(mFormatCtx, NULL) < 0)
            fail("Failed to find stream info in "+fname);

        for(size_t j = 0;j < mFormatCtx->nb_streams;j++)
        {
            if(mFormatCtx->streams[j]->codec->codec_type == AVMEDIA_TYPE_AUDIO)
            {
                mStream = &mFormatCtx->streams[j];
                break;
            }
        }
        if(!mStream)
            fail("No audio streams in "+fname);

        (*mStream)->codec->request_sample_fmt = (*mStream)->codec->sample_fmt;

        AVCodec *codec = avcodec_find_decoder((*mStream)->codec->codec_id);
        if(!codec)
        {
            std::stringstream ss("No codec found for id ");
            ss << (*mStream)->codec->codec_id;
            fail(ss.str());
        }
        if(avcodec_open2((*mStream)->codec, codec, NULL) < 0)
            fail("Failed to open audio codec " + std::string(codec->long_name));

        mFrame = av_frame_alloc();
    }
    catch(std::exception&)
    {
        if (mFormatCtx->pb->buffer != NULL)
        {
          av_free(mFormatCtx->pb->buffer);
          mFormatCtx->pb->buffer = NULL;
        }
        av_free(mFormatCtx->pb);
        mFormatCtx->pb = NULL;

        avformat_close_input(&mFormatCtx);
        throw;
    }
}