static void video_decode_example(const char *outfilename, const char *filename)
{
	AVCodec *codec;
	AVCodecContext *c= NULL;
	int frame, got_picture, len;
	FILE *f;
	AVFrame *picture;
	uint8_t inbuf[INBUF_SIZE + FF_INPUT_BUFFER_PADDING_SIZE];
	char buf[1024];
	AVPacket avpkt;

	av_init_packet(&avpkt);

	/* set end of buffer to 0 (this ensures that no overreading happens for damaged mpeg streams) */
	memset(inbuf + INBUF_SIZE, 0, FF_INPUT_BUFFER_PADDING_SIZE);

	printf("Video decoding\n");

	/* find the mpeg1 video decoder */
	codec = avcodec_find_decoder(CODEC_ID_MPEG1VIDEO);
	if (!codec)
	{
		fprintf(stderr, "codec not found\n");
		exit(1);
	}

	c= avcodec_alloc_context();
	picture= avcodec_alloc_frame();

	if(codec->capabilities&CODEC_CAP_TRUNCATED)
		c->flags|= CODEC_FLAG_TRUNCATED; /* we do not send complete frames */

	/* For some codecs, such as msmpeg4 and mpeg4, width and height
	   MUST be initialized there because this information is not
	   available in the bitstream. */

	/* open it */
	if (avcodec_open(c, codec) < 0)
	{
		fprintf(stderr, "could not open codec\n");
		exit(1);
	}

	/* the codec gives us the frame size, in samples */

	f = fopen(filename, "rb");
	if (!f)
	{
		fprintf(stderr, "could not open %s\n", filename);
		exit(1);
	}

	frame = 0;
	for(;;)
	{
		avpkt.size = fread(inbuf, 1, INBUF_SIZE, f);
		if (avpkt.size == 0)
			break;

		/* NOTE1: some codecs are stream based (mpegvideo, mpegaudio)
		   and this is the only method to use them because you cannot
		   know the compressed data size before analysing it.

		   BUT some other codecs (msmpeg4, mpeg4) are inherently frame
		   based, so you must call them with all the data for one
		   frame exactly. You must also initialize 'width' and
		   'height' before initializing them. */

		/* NOTE2: some codecs allow the raw parameters (frame size,
		   sample rate) to be changed at any frame. We handle this, so
		   you should also take care of it */

		/* here, we use a stream based decoder (mpeg1video), so we
		   feed decoder and see if it could decode a frame */
		avpkt.data = inbuf;
		while (avpkt.size > 0)
		{
			len = avcodec_decode_video2(c, picture, &got_picture, &avpkt);
			if (len < 0)
			{
				fprintf(stderr, "Error while decoding frame %d\n", frame);
				exit(1);
			}
			if (got_picture)
			{
				printf("saving frame %3d\n", frame);
				fflush(stdout);

				/* the picture is allocated by the decoder. no need to
				   free it */
				snprintf(buf, sizeof(buf), outfilename, frame);
				pgm_save(picture->data[0], picture->linesize[0],
				         c->width, c->height, buf);
				frame++;
			}
			avpkt.size -= len;
			avpkt.data += len;
		}
	}

	/* some codecs, such as MPEG, transmit the I and P frame with a
	   latency of one frame. You must do the following to have a
	   chance to get the last frame of the video */
	avpkt.data = NULL;
	avpkt.size = 0;
	len = avcodec_decode_video2(c, picture, &got_picture, &avpkt);
	if (got_picture)
	{
		printf("saving last frame %3d\n", frame);
		fflush(stdout);

		/* the picture is allocated by the decoder. no need to
		   free it */
		snprintf(buf, sizeof(buf), outfilename, frame);
		pgm_save(picture->data[0], picture->linesize[0],
		         c->width, c->height, buf);
		frame++;
	}

	fclose(f);

	avcodec_close(c);
	av_free(c);
	av_free(picture);
	printf("\n");
}
Example #2
0
int main(int argc, char* argv[])
{
	AVFormatContext* pFormatCtx;
	AVOutputFormat* fmt;
	AVStream* audio_st;
	AVCodecContext* pCodecCtx;
	AVCodec* pCodec;

	uint8_t* frame_buf;
	AVFrame* pFrame;
	AVPacket pkt;

	int got_frame=0;
	int ret=0;
	int size=0;

	FILE *in_file=NULL;	                        //Raw PCM data
	int framenum=1000;                          //Audio frame number
	const char* out_file = "tdjm.aac";          //Output URL
	int i;

	in_file= fopen("tdjm.pcm", "rb");

	av_register_all();

	//Method 1.
	pFormatCtx = avformat_alloc_context();
	fmt = av_guess_format(NULL, out_file, NULL);
	pFormatCtx->oformat = fmt;


	//Method 2.
	//avformat_alloc_output_context2(&pFormatCtx, NULL, NULL, out_file);
	//fmt = pFormatCtx->oformat;

	//Open output URL
	if (avio_open(&pFormatCtx->pb,out_file, AVIO_FLAG_READ_WRITE) < 0){
		printf("Failed to open output file!\n");
		return -1;
	}

	audio_st = avformat_new_stream(pFormatCtx, 0);
	if (audio_st==NULL){
		return -1;
	}
	pCodecCtx = audio_st->codec;
	pCodecCtx->codec_id = fmt->audio_codec;
	pCodecCtx->codec_type = AVMEDIA_TYPE_AUDIO;
	pCodecCtx->sample_fmt = AV_SAMPLE_FMT_S16;
	pCodecCtx->sample_rate= 44100;
	pCodecCtx->channel_layout=AV_CH_LAYOUT_STEREO;
	pCodecCtx->channels = av_get_channel_layout_nb_channels(pCodecCtx->channel_layout);
	pCodecCtx->bit_rate = 64000;  

	//Show some information
	av_dump_format(pFormatCtx, 0, out_file, 1);

	pCodec = avcodec_find_encoder(pCodecCtx->codec_id);
	if (!pCodec){
		printf("Can not find encoder!\n");
		return -1;
	}
	if (avcodec_open2(pCodecCtx, pCodec,NULL) < 0){
		printf("Failed to open encoder!\n");
		return -1;
	}
	pFrame = av_frame_alloc();
	pFrame->nb_samples= pCodecCtx->frame_size;
	pFrame->format= pCodecCtx->sample_fmt;
	
	size = av_samples_get_buffer_size(NULL, pCodecCtx->channels,pCodecCtx->frame_size,pCodecCtx->sample_fmt, 1);
	frame_buf = (uint8_t *)av_malloc(size);
	avcodec_fill_audio_frame(pFrame, pCodecCtx->channels, pCodecCtx->sample_fmt,(const uint8_t*)frame_buf, size, 1);
	
	//Write Header
	avformat_write_header(pFormatCtx,NULL);

	av_new_packet(&pkt,size);

	for (i=0; i<framenum; i++){
		//Read PCM
		if (fread(frame_buf, 1, size, in_file) <= 0){
			printf("Failed to read raw data! \n");
			return -1;
		}else if(feof(in_file)){
			break;
		}
		pFrame->data[0] = frame_buf;  //PCM Data

		pFrame->pts=i*100;
		got_frame=0;
		//Encode
		ret = avcodec_encode_audio2(pCodecCtx, &pkt,pFrame, &got_frame);
		if(ret < 0){
			printf("Failed to encode!\n");
			return -1;
		}
		if (got_frame==1){
			printf("Succeed to encode 1 frame! \tsize:%5d\n",pkt.size);
			pkt.stream_index = audio_st->index;
			ret = av_write_frame(pFormatCtx, &pkt);
			av_free_packet(&pkt);
		}
	}
	
	//Flush Encoder
	ret = flush_encoder(pFormatCtx,0);
	if (ret < 0) {
		printf("Flushing encoder failed\n");
		return -1;
	}

	//Write Trailer
	av_write_trailer(pFormatCtx);

	//Clean
	if (audio_st){
		avcodec_close(audio_st->codec);
		av_free(pFrame);
		av_free(frame_buf);
	}
	avio_close(pFormatCtx->pb);
	avformat_free_context(pFormatCtx);

	fclose(in_file);

	return 0;
}
static int tdav_codec_h264_set(tmedia_codec_t* self, const tmedia_param_t* param)
{
	tdav_codec_h264_t* h264 = (tdav_codec_h264_t*)self;
	if(!self->opened){
		TSK_DEBUG_ERROR("Codec not opened");
		return -1;
	}
	if(param->value_type == tmedia_pvt_int32){
		if(tsk_striequals(param->key, "action")){
			tmedia_codec_action_t action = (tmedia_codec_action_t)TSK_TO_INT32((uint8_t*)param->value);
			switch(action){
				case tmedia_codec_action_encode_idr:
					{
						h264->encoder.force_idr = tsk_true;
						break;
					}
				case tmedia_codec_action_bw_down:
					{
						h264->encoder.quality = TSK_CLAMP(1, (h264->encoder.quality + 1), 31);
#if HAVE_FFMPEG
						h264->encoder.context->global_quality = FF_QP2LAMBDA * h264->encoder.quality;
#endif
						break;
					}
				case tmedia_codec_action_bw_up:
					{
						h264->encoder.quality = TSK_CLAMP(1, (h264->encoder.quality - 1), 31);
#if HAVE_FFMPEG
						h264->encoder.context->global_quality = FF_QP2LAMBDA * h264->encoder.quality;
#endif
						break;
					}
			}
			return 0;
		}
		else if(tsk_striequals(param->key, "bypass-encoding")){
			h264->encoder.passthrough = *((int32_t*)param->value) ? tsk_true : tsk_false;
			TSK_DEBUG_INFO("[H.264] bypass-encoding = %d", h264->encoder.passthrough);
			return 0;
		}
		else if(tsk_striequals(param->key, "bypass-decoding")){
			h264->decoder.passthrough = *((int32_t*)param->value) ? tsk_true : tsk_false;
			TSK_DEBUG_INFO("[H.264] bypass-decoding = %d", h264->decoder.passthrough);
			return 0;
		}
		else if(tsk_striequals(param->key, "rotation")){
			int32_t rotation = *((int32_t*)param->value);
			if(h264->encoder.rotation != rotation){
				if(self->opened){
					int ret;
					h264->encoder.rotation = rotation;
					if((ret = tdav_codec_h264_close_encoder(h264))){
						return ret;
					}
					if((ret = tdav_codec_h264_open_encoder(h264))){
						return ret;
					}
#if 0 // Not working
					if((ret = avcodec_close(h264->encoder.context))){
						TSK_DEBUG_ERROR("Failed to close [%s] codec", TMEDIA_CODEC(h264)->plugin->desc);
						return ret;
					}
					h264->encoder.context->width = (rotation == 90 || rotation == 270) ? TMEDIA_CODEC_VIDEO(h264)->out.height : TMEDIA_CODEC_VIDEO(h264)->out.width;
					h264->encoder.context->height = (rotation == 90 || rotation == 270) ? TMEDIA_CODEC_VIDEO(h264)->out.width : TMEDIA_CODEC_VIDEO(h264)->out.height;
					if((ret = avcodec_open(h264->encoder.context, h264->encoder.codec)) < 0){
						TSK_DEBUG_ERROR("Failed to open [%s] codec", TMEDIA_CODEC(h264)->plugin->desc);
						return ret;
					}
					h264->encoder.force_idr = tsk_true;
#endif
				}
			}
			return 0;
		}
	}
	return -1;
}
Example #4
0
void
Java_tv_xormedia_AndCodec_CodecLib_EasyDecoderTest(JNIEnv* env, jobject thiz, jstring input_file, jstring output_file, int width, int height, int out_fmt, int frames, int is_writefile)
{	
	and_log_writeline_simple(0, LOG_INFO, "EasyDecoderTest()");
	and_log_init("/mnt/sdcard/easy_decoder.log", LOG_INFO);
	
	//parse input and output filename
	char str_in_file[256]	= {0};
	char str_out_file[256]	= {0};
	char str_tmp[256]	= {0};
	int str_len = 256;
	
	convert_jstring(env, str_tmp, &str_len, input_file);
	sprintf(str_in_file, "/data/data/tv.xormedia.AndCodec/files/%s", str_tmp);
	convert_jstring(env, str_tmp, &str_len, output_file);
	sprintf(str_out_file, "/data/data/tv.xormedia.AndCodec/files/%s", str_tmp);
	
	and_log_writeline_easy(0, LOG_INFO, "in: %s, out %s, frames %d, write_file %d", 
		str_in_file, str_out_file,frames, is_writefile);
	
	avcodec_register_all();
	
	int PIC_W = width;
	int PIC_H = height;
	int ret = 0;
	enum AVPixelFormat out_pix_fmt = AV_PIX_FMT_RGB565LE;

	uint8_t* video_dst_data[4] = {NULL};
	int video_dst_linesize[4];

	AVCodecContext*			dec_ctx		= NULL;
	AVCodec*				dec			= NULL;
	unsigned char*			pbuf		= NULL;
	AVFrame*				frame 		= NULL;
	AVPacket pkt;

	int to_read, readed;
	int fd_in;
	fd_in = and_sysutil_open_file(str_in_file, kANDSysUtilOpenReadOnly);
	if(fd_in < 0) {
		and_log_writeline_easy(0, LOG_ERROR, "failed to open h264(with size) file: %s", str_in_file);
		return;
	}
	
	int written;
	int fd_out;
	fd_out = and_sysutil_create_or_open_file(str_out_file, 0644);
	if(fd_out < 0) {
		and_log_writeline_easy(0, LOG_ERROR, "failed to open rgb565 file: %s", str_out_file);
		return;
	}

	do {
		dec = avcodec_find_decoder(AV_CODEC_ID_H264);
	
		dec_ctx = avcodec_alloc_context3(dec);
		dec_ctx->width	 = PIC_W;
		dec_ctx->height	 = PIC_H;
		dec_ctx->pix_fmt = AV_PIX_FMT_YUV420P;

		ret = avcodec_open2(dec_ctx, dec, NULL);
		if(ret < 0) {
			and_log_writeline_easy(0, LOG_ERROR, "failed to open video decoder .");
			break;
		}
	
        ret = av_image_alloc(video_dst_data, video_dst_linesize, dec_ctx->width, dec_ctx->height, dec_ctx->pix_fmt, 1);
		struct SwsContext* img_convert_ctx = sws_getContext(dec_ctx->width, dec_ctx->height, dec_ctx->pix_fmt, PIC_W, PIC_H, out_pix_fmt, SWS_BICUBIC, NULL, NULL, NULL);

		AVFrame* rgb_pic = alloc_picture(out_pix_fmt, PIC_W, PIC_H);
		frame = avcodec_alloc_frame();

		av_init_packet(&pkt);
		pkt.data = NULL;
		pkt.size = 0;

		const int frame_len = 65536;
		pbuf = (unsigned char*)malloc(frame_len);

		int got_frame = 0;
		int dec_frames = 0; 
		while(1)
		{ 
			to_read = 4;
			readed = and_sysutil_read(fd_in, pbuf, to_read);
			if (readed != to_read) {
				and_log_writeline_easy(0, LOG_INFO, "eof. decoder done! total %d frames", dec_frames);
				break;
			}

			to_read = *((int*)pbuf);
			and_log_writeline_easy(0, LOG_DEBUG, "read h264 frame %d. %c%c%c%c", 
				to_read, pbuf[0], pbuf[1], pbuf[2], pbuf[3]);
			readed = and_sysutil_read(fd_in, pbuf, to_read);
			if (readed != to_read) {
				and_log_writeline_easy(0, LOG_INFO, "eof.");
				break;
			}

			pkt.data = pbuf;
			pkt.size = readed;
			and_log_writeline_easy(0, LOG_DEBUG, "encoder frame size:[%d] %d", dec_frames, readed);

			dec_frames++;
			while(1)
			{
				if( pkt.size == 0)
					break;

				ret = avcodec_decode_video2(dec_ctx, frame, &got_frame, &pkt);
				if (ret < 0) 
					break;

				if(pkt.data) {
					pkt.data += ret;
					pkt.size -= ret;
				}

				if(got_frame)		{
					and_log_writeline_easy(0, LOG_DEBUG, "got pic.");
					av_image_copy(video_dst_data, video_dst_linesize,(const uint8_t **)(frame->data), frame->linesize,
						dec_ctx->pix_fmt, dec_ctx->width, dec_ctx->height);

					//to out_pix_fmt 
					sws_scale(img_convert_ctx, (uint8_t const**)video_dst_data, video_dst_linesize, 0, dec_ctx->height, rgb_pic->data, rgb_pic->linesize);

					int len = rgb_pic->linesize[0] * PIC_H;
					and_log_writeline_easy(0, LOG_DEBUG, "pic len:%d.", len);
					written = and_sysutil_write(fd_out, (void *)rgb_pic->data[0], len);
					if (written != len) {
						and_log_writeline_easy(0, LOG_ERROR, "failed to write %d - %d", len, written);
						break;
					}

					av_free_packet(&pkt);

				}
			}
		}

		pkt.data = NULL;
		pkt.size = 0;
		break;
	}while(0);

	if(pbuf)
		free(pbuf);

	if(dec_ctx)
		avcodec_close(dec_ctx);

	if(frame)
		av_free(frame);

	if(video_dst_data[0])
		av_free(video_dst_data[0]);

	and_sysutil_close(fd_in);
	and_sysutil_close(fd_out);
}
Example #5
0
int main(int argc, char* argv[]) {

	int i, videoStream, audioStream;

	VideoState	*is;
	is = av_mallocz(sizeof(VideoState));

	if(argc < 2) {
		fprintf(stderr, "Usage: test <file>\n");
		exit(1);
	}

	if(SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER)) {
		fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
		exit(1);
	}


	av_register_all();

	AVFormatContext *pFormatCtx = NULL;

	av_strlcpy(is->filename, argv[1], sizeof(is->filename));
	is->pictq_mutex = SDL_CreateMutex();
	is->pictq_cond	= SDL_CreateCond();

	schedule_refresh(is, 40);

	is->parse_tid = SDL_CreateThread(decode_thread, is);
	if(!is->parse_tid) {
		av_free(is);
		return -1;
	}

	// Open video file
	if(avformat_open_input(&pFormatCtx, argv[1], NULL, NULL) != 0) {
		return -1; // Couldn't open file
	}

	// Retrive stream information
	if(avformat_find_stream_info(pFormatCtx, NULL) < 0) {
		return -1; //Couldn't find stream information
	}

	// Dump information about file onto standard error
	av_dump_format(pFormatCtx, 0, argv[1], 0);

	AVCodecContext *pCodecCtxOrig = NULL;
	AVCodecContext *pCodecCtx = NULL;

	AVCodecContext *aCodecCtxOrig = NULL;
	AVCodecContext *aCodecCtx = NULL;

	// Find the first video stream
	videoStream = -1;
	audioStream = -1;

	for(i=0; i < pFormatCtx->nb_streams; i++) {
		if(pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO && videoStream < 0) {
			videoStream = i;
		}
	}
	
	for(i=0; i < pFormatCtx->nb_streams; i++) {
		if(pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO && audioStream < 0) {
			audioStream = i;
		}
	}

	if(videoStream == -1) {
		return -1; // Didn't find a video stream
	}
	if(audioStream == -1) {
		return -1;
	}
	// Get a pointer to the codec context for the video stream
	pCodecCtxOrig = pFormatCtx->streams[videoStream]->codec;
	aCodecCtxOrig = pFormatCtx->streams[audioStream]->codec;

	AVCodec *pCodec = NULL;
	AVCodec *aCodec = NULL;

	//Find the decoder for the video stream
	pCodec = avcodec_find_decoder(pCodecCtxOrig->codec_id);
	if(pCodec == NULL) {
		return -1;
	}
	aCodec = avcodec_find_decoder(aCodecCtxOrig->codec_id);
	if(aCodec == NULL) {
		return -1;
	}
	// Copy context
	pCodecCtx = avcodec_alloc_context3(pCodec);
	if(avcodec_copy_context(pCodecCtx, pCodecCtxOrig) != 0) {
		return -1;
	}

	aCodecCtx = avcodec_alloc_context3(aCodec);
	if(avcodec_copy_context(aCodecCtx, aCodecCtxOrig) != 0) {
		return -1;
	}
	SDL_AudioSpec wanted_spec, spec;

	wanted_spec.freq = aCodecCtx->sample_rate;
	wanted_spec.format = AUDIO_S16SYS;
	wanted_spec.channels = aCodecCtx->channels;
	wanted_spec.silence = 0;
	wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
	wanted_spec.callback = audio_callback;
	wanted_spec.userdata = aCodecCtx;

	if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
		fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
		return -1;
	}



	
	// Open codec
	AVDictionary *optionDict = NULL;
	if(avcodec_open2(pCodecCtx, pCodec, &optionDict) < 0) {
		return -1;
	}
	
	if(avcodec_open2(aCodecCtx, aCodec, NULL) < 0) {
		return -1;
	}

	packet_queue_init(&audioq);
	SDL_PauseAudio(0);

	
	// Allocate video frame
	AVFrame *pFrame = NULL;
	pFrame = av_frame_alloc();


	SDL_Surface *screen;
	screen = SDL_SetVideoMode(pCodecCtx->width, pCodecCtx->height, 0, 0);
	if(!screen) {
		fprintf(stderr, "SDL: could not set video mode - exiting\n");
		exit(1);
	}

	SDL_Overlay *bmp = NULL;

	bmp = SDL_CreateYUVOverlay(pCodecCtx->width, 
							   pCodecCtx->height, 
							   SDL_YV12_OVERLAY, 
							   screen);


	printf("[loop]==========================\n");

	struct SwsContext *sws_ctx = NULL;
	int frameFinished;
	AVPacket packet;

	//initialize SWS context for software scaling
	sws_ctx = sws_getContext(pCodecCtx->width, 
							pCodecCtx->height,
							pCodecCtx->pix_fmt,
							pCodecCtx->width,
							pCodecCtx->height,
							AV_PIX_FMT_YUV420P,
							SWS_BILINEAR,
							NULL,
							NULL,
							NULL);

	// Read frame and display							
	i = 0;
	while(av_read_frame(pFormatCtx, &packet) >= 0) {
		if(packet.stream_index == videoStream) {
			//Decode video frame
			avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, &packet);

			// Did we get a video frame?
			if(frameFinished) {
				SDL_LockYUVOverlay(bmp);

				AVPicture pict;
				pict.data[0] = bmp->pixels[0];
				pict.data[1] = bmp->pixels[2];
				pict.data[2] = bmp->pixels[1];

				pict.linesize[0] = bmp->pitches[0];
				pict.linesize[1] = bmp->pitches[2];
				pict.linesize[2] = bmp->pitches[1];

				// Convert the image into YUV format that SDL uses

				sws_scale(sws_ctx, 
						  (uint8_t const * const *)pFrame->data,
						  pFrame->linesize, 
						  0, 
						  pCodecCtx->height, 
						  pict.data, 
						  pict.linesize);

				SDL_UnlockYUVOverlay(bmp);

				SDL_Rect rect;
				rect.x = 0;
				rect.y = 0;
				rect.w = pCodecCtx->width;
				rect.h = pCodecCtx->height;
				SDL_DisplayYUVOverlay(bmp, &rect);
				av_free_packet(&packet);	
			}
		} else if (packet.stream_index == audioStream) {
			packet_queue_put(&audioq, &packet);

		} else {
			// Free the packet that was allocated by av_read_frame
			av_free_packet(&packet);
		}

		SDL_Event event;
		SDL_PollEvent(&event);
		switch(event.type) {
		case SDL_QUIT:
			quit = 1;
			SDL_Quit();
			exit(0);
			break;
		default:
			break;
		}
	}


	// Free the YUV frame
	av_free(pFrame);

	// Close the codec
	avcodec_close(pCodecCtx);
	avcodec_close(pCodecCtxOrig);

	// Close the video file
	avformat_close_input(&pFormatCtx);
	return 0;
}
Example #6
0
// adapted from [libav-user] list
// https://lists.libav.org/pipermail/libav-user/2010-August/005159.html
int WriteJPEG (AVCodecContext *pCodecCtx, AVFrame *pFrame, int FrameNo)
{
	AVCodecContext         *pOCodecCtx;
	AVCodec                *pOCodec;
	uint8_t                *Buffer;
	int                     BufSiz;
	enum AVPixelFormat      ImgFmt = AV_PIX_FMT_YUVJ420P;
	FILE                   *JPEGFile;
	char                    JPEGFName[256];

	BufSiz = av_image_get_buffer_size(ImgFmt, pCodecCtx->width, pCodecCtx->height, 1);

	Buffer = (uint8_t *)malloc ( BufSiz );
	if ( Buffer == NULL )
	{
		return 0; 
	}

	memset ( Buffer, 0, BufSiz );

	pOCodecCtx = avcodec_alloc_context3 ( NULL );
	if ( !pOCodecCtx ) 
	{
		printf("no pOCodecCtx");
		free ( Buffer );
		return ( 0 );
	}

	pOCodecCtx->bit_rate      = pCodecCtx->bit_rate;
	pOCodecCtx->width         = pCodecCtx->width;
	pOCodecCtx->height        = pCodecCtx->height;
	pOCodecCtx->pix_fmt       = ImgFmt;
	pOCodecCtx->codec_id      = AV_CODEC_ID_MJPEG;
	pOCodecCtx->codec_type    = AVMEDIA_TYPE_VIDEO;
	pOCodecCtx->time_base.num = pCodecCtx->time_base.num;
	pOCodecCtx->time_base.den = pCodecCtx->time_base.den;

	pOCodec = avcodec_find_encoder ( pOCodecCtx->codec_id );
	if ( !pOCodec ) 
	{
		printf("no pOCodec");
		free ( Buffer );
		return ( 0 );
	}
	if ( avcodec_open2 ( pOCodecCtx, pOCodec, NULL ) < 0 ) 
	{
		printf("avcodec_open2 failed");
		free ( Buffer );
		return ( 0 );
	}

	pOCodecCtx->mb_lmin = pOCodecCtx->qmin * FF_QP2LAMBDA;
	pOCodecCtx->mb_lmax = pOCodecCtx->qmax * FF_QP2LAMBDA;

	pOCodecCtx->flags = CODEC_FLAG_QSCALE;
	pOCodecCtx->global_quality = pOCodecCtx->qmin * FF_QP2LAMBDA;

	pFrame->pts = 1;
	pFrame->quality = pOCodecCtx->global_quality;

	AVPacket pOutPacket;
	pOutPacket.data = Buffer;
	pOutPacket.size = BufSiz;

	int got_packet_ptr = 0;
	int encode_err = avcodec_encode_video2( pOCodecCtx, &pOutPacket, pFrame, &got_packet_ptr);

	if (encode_err == 0)
	{
		sprintf ( JPEGFName, "%06d.jpg", FrameNo );
		JPEGFile = fopen ( JPEGFName, "wb" );
		fwrite ( Buffer, 1, BufSiz, JPEGFile );
		fclose ( JPEGFile );
	}

	avcodec_close ( pOCodecCtx );
	free ( Buffer );
	return ( BufSiz );
}
int main(int argc, char* argv[])
{

    AVFormatContext	*pFormatCtx;
    int				i, videoindex;
    AVCodecContext	*pCodecCtx;
    AVCodec			*pCodec;
    AVFrame	*pFrame,*pFrameYUV;
    uint8_t *out_buffer;
    AVPacket *packet;
    int ret, got_picture;

    //------------SDL----------------
    int screen_w,screen_h;
    SDL_Window *screen;
    SDL_Renderer* sdlRenderer;
    SDL_Texture* sdlTexture;
    SDL_Rect sdlRect;
    SDL_Thread *video_tid;
    SDL_Event event;

    struct SwsContext *img_convert_ctx;

    char filepath[]="bigbuckbunny_480x272.h265";

    av_register_all();
    avformat_network_init();
    pFormatCtx = avformat_alloc_context();

    if(avformat_open_input(&pFormatCtx,filepath,NULL,NULL)!=0) {
        printf("Couldn't open input stream.\n");
        return -1;
    }
    if(avformat_find_stream_info(pFormatCtx,NULL)<0) {
        printf("Couldn't find stream information.\n");
        return -1;
    }
    videoindex=-1;
    for(i=0; i<pFormatCtx->nb_streams; i++)
        if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO) {
            videoindex=i;
            break;
        }
    if(videoindex==-1) {
        printf("Didn't find a video stream.\n");
        return -1;
    }
    pCodecCtx=pFormatCtx->streams[videoindex]->codec;
    pCodec=avcodec_find_decoder(pCodecCtx->codec_id);
    if(pCodec==NULL) {
        printf("Codec not found.\n");
        return -1;
    }
    if(avcodec_open2(pCodecCtx, pCodec,NULL)<0) {
        printf("Could not open codec.\n");
        return -1;
    }
    pFrame=av_frame_alloc();
    pFrameYUV=av_frame_alloc();
    out_buffer=(uint8_t *)av_malloc(avpicture_get_size(PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height));
    avpicture_fill((AVPicture *)pFrameYUV, out_buffer, PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height);

    //Output Info-----------------------------
    printf("---------------- File Information ---------------\n");
    av_dump_format(pFormatCtx,0,filepath,0);
    printf("-------------------------------------------------\n");

    img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt,
                                     pCodecCtx->width, pCodecCtx->height, PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL);


    if(SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER)) {
        printf( "Could not initialize SDL - %s\n", SDL_GetError());
        return -1;
    }
    //SDL 2.0 Support for multiple windows
    screen_w = pCodecCtx->width;
    screen_h = pCodecCtx->height;
    screen = SDL_CreateWindow("Simplest ffmpeg player's Window", SDL_WINDOWPOS_UNDEFINED, SDL_WINDOWPOS_UNDEFINED,
                              screen_w, screen_h,SDL_WINDOW_OPENGL);

    if(!screen) {
        printf("SDL: could not create window - exiting:%s\n",SDL_GetError());
        return -1;
    }
    sdlRenderer = SDL_CreateRenderer(screen, -1, 0);
    //IYUV: Y + U + V  (3 planes)
    //YV12: Y + V + U  (3 planes)
    sdlTexture = SDL_CreateTexture(sdlRenderer, SDL_PIXELFORMAT_IYUV, SDL_TEXTUREACCESS_STREAMING,pCodecCtx->width,pCodecCtx->height);

    sdlRect.x=0;
    sdlRect.y=0;
    sdlRect.w=screen_w;
    sdlRect.h=screen_h;

    packet=(AVPacket *)av_malloc(sizeof(AVPacket));

    video_tid = SDL_CreateThread(sfp_refresh_thread,NULL,NULL);
    //------------SDL End------------
    //Event Loop

    for (;;) {
        //Wait
        SDL_WaitEvent(&event);
        if(event.type==SFM_REFRESH_EVENT) {
            //------------------------------
            if(av_read_frame(pFormatCtx, packet)>=0) {
                if(packet->stream_index==videoindex) {
                    ret = avcodec_decode_video2(pCodecCtx, pFrame, &got_picture, packet);
                    if(ret < 0) {
                        printf("Decode Error.\n");
                        return -1;
                    }
                    if(got_picture) {
                        sws_scale(img_convert_ctx, (const uint8_t* const*)pFrame->data, pFrame->linesize, 0, pCodecCtx->height, pFrameYUV->data, pFrameYUV->linesize);
                        //SDL---------------------------
                        SDL_UpdateTexture( sdlTexture, NULL, pFrameYUV->data[0], pFrameYUV->linesize[0] );
                        SDL_RenderClear( sdlRenderer );
                        //SDL_RenderCopy( sdlRenderer, sdlTexture, &sdlRect, &sdlRect );
                        SDL_RenderCopy( sdlRenderer, sdlTexture, NULL, NULL);
                        SDL_RenderPresent( sdlRenderer );
                        //SDL End-----------------------
                    }
                }
                av_free_packet(packet);
            } else {
                //Exit Thread
                thread_exit=1;
            }
        } else if(event.type==SDL_KEYDOWN) {
            //Pause
            if(event.key.keysym.sym==SDLK_SPACE)
                thread_pause=!thread_pause;
        } else if(event.type==SDL_QUIT) {
            thread_exit=1;
        } else if(event.type==SFM_BREAK_EVENT) {
            break;
        }

    }

    sws_freeContext(img_convert_ctx);

    SDL_Quit();
    //--------------
    av_frame_free(&pFrameYUV);
    av_frame_free(&pFrame);
    avcodec_close(pCodecCtx);
    avformat_close_input(&pFormatCtx);

    return 0;
}
Example #8
0
int main(int argc, char *argv[]) {
    AVFormatContext *pFormatCtx = NULL;
    int             i, videoStream;
    AVCodecContext  *pCodecCtx;
    AVCodec         *pCodec;
    AVFrame         *pFrame;
    AVPacket        packet;
    int             frameFinished;
    float           aspect_ratio;
    struct SwsContext *img_convert_ctx;

    SDL_Overlay     *bmp;
    SDL_Surface     *screen;
    SDL_Rect        rect;
    SDL_Event       event;

    if(argc < 2) {
        fprintf(stderr, "Usage: test <file>\n");
        exit(1);
    }
    // Register all formats and codecs
    av_register_all();

    if(SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER)) {
        fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
        exit(1);
    }

    // Open video file
    if(avformat_open_input(&pFormatCtx, argv[1], NULL, NULL)!=0)
        return -1; // Couldn't open file

    // Retrieve stream information
    if(avformat_find_stream_info(pFormatCtx, NULL)<0)
        return -1; // Couldn't find stream information

    // Find the first video stream
    videoStream=-1;
    for(i=0; i<pFormatCtx->nb_streams; i++)
        if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO) {
            videoStream=i;
            break;
        }
    if(videoStream==-1)
        return -1; // Didn't find a video stream

    // Get a pointer to the codec context for the video stream
    pCodecCtx=pFormatCtx->streams[videoStream]->codec;

    // Find the decoder for the video stream
    pCodec=avcodec_find_decoder(pCodecCtx->codec_id);
    if(pCodec==NULL) {
        fprintf(stderr, "Unsupported codec!\n");
        return -1; // Codec not found
    }

    // Open codec
    if(avcodec_open2(pCodecCtx, pCodec, NULL)<0)
        return -1; // Could not open codec

    // Allocate video frame
    pFrame=av_frame_alloc();

    // Make a screen to put our video
    screen = SDL_SetVideoMode(pCodecCtx->width, pCodecCtx->height, 0, 0);

    if(!screen) {
        fprintf(stderr, "SDL: could not set video mode - exiting\n");
        exit(1);
    }

    // Allocate a place to put our YUV image on that screen
    bmp = SDL_CreateYUVOverlay(pCodecCtx->width,
            pCodecCtx->height,
            SDL_YV12_OVERLAY,
            screen);


    // Read frames and save first five frames to disk
    i=0;
    while(av_read_frame(pFormatCtx, &packet)>=0) {
        // Is this a packet from the video stream?
        if(packet.stream_index==videoStream) {
            // Decode video frame
            avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished,
                    &packet);

            // Did we get a video frame?
            if(frameFinished) {
                SDL_LockYUVOverlay(bmp);

                AVPicture pict;
                pict.data[0] = bmp->pixels[0];
                pict.data[1] = bmp->pixels[2];
                pict.data[2] = bmp->pixels[1];

                pict.linesize[0] = bmp->pitches[0];
                pict.linesize[1] = bmp->pitches[2];
                pict.linesize[2] = bmp->pitches[1];

                // Convert the image into YUV format that SDL uses
                img_convert_ctx = sws_getContext(pCodecCtx->width,
                        pCodecCtx->height,
                        pCodecCtx->pix_fmt,
                        pCodecCtx->width,
                        pCodecCtx->height,
                        PIX_FMT_YUV420P,
                        SWS_BICUBIC,NULL,
                        NULL,NULL);
                sws_scale(img_convert_ctx, pFrame->data,
                        pFrame->linesize,
                        0,
                        pFrame->height,
                        pict.data,
                        pict.linesize);

                SDL_UnlockYUVOverlay(bmp);

                rect.x = 0;
                rect.y = 0;
                rect.w = pCodecCtx->width;
                rect.h = pCodecCtx->height;
                SDL_DisplayYUVOverlay(bmp, &rect);

            }
        }

        // Free the packet that was allocated by av_read_frame
        av_free_packet(&packet);
        SDL_PollEvent(&event);
        switch(event.type) {
            case SDL_QUIT:
                SDL_Quit();
                exit(0);
                break;
            default:
                break;
        }

    }

    // Free the YUV frame
    av_free(pFrame);

    // Close the codec
    avcodec_close(pCodecCtx);

    // Close the video file
    avformat_close_input(&pFormatCtx);

    return 0;
}
void lwlibav_update_configuration
(
    lwlibav_decode_handler_t *dhp,
    uint32_t                  frame_number,
    int                       extradata_index,
    int64_t                   rap_pos
)
{
    lwlibav_extradata_handler_t *exhp = &dhp->exh;
    if( exhp->entry_count == 0 || extradata_index < 0 )
    {
        /* No need to update the extradata. */
        exhp->current_index = extradata_index;
        lwlibav_flush_buffers( dhp );
        return;
    }
    AVCodecContext *ctx = dhp->format->streams[ dhp->stream_index ]->codec;
    void *app_specific = ctx->opaque;
    avcodec_close( ctx );
    if( ctx->extradata )
    {
        av_freep( &ctx->extradata );
        ctx->extradata_size = 0;
    }
    /* Find an appropriate decoder. */
    char error_string[96] = { 0 };
    lwlibav_extradata_t *entry = &exhp->entries[extradata_index];
    const AVCodec *codec = find_decoder( entry->codec_id, dhp->preferred_decoder_names );
    if( !codec )
    {
        strcpy( error_string, "Failed to find the decoder.\n" );
        goto fail;
    }
    /* Get decoder default settings. */
    int thread_count = ctx->thread_count;
    if( avcodec_get_context_defaults3( ctx, codec ) < 0 )
    {
        strcpy( error_string, "Failed to get CODEC default.\n" );
        goto fail;
    }
    /* Set up decoder basic settings. */
    if( ctx->codec_type == AVMEDIA_TYPE_VIDEO )
        set_video_basic_settings( dhp, frame_number );
    else
        set_audio_basic_settings( dhp, frame_number );
    /* Update extradata. */
    if( entry->extradata_size > 0 )
    {
        ctx->extradata = (uint8_t *)av_malloc( entry->extradata_size + FF_INPUT_BUFFER_PADDING_SIZE );
        if( !ctx->extradata )
        {
            strcpy( error_string, "Failed to allocate extradata.\n" );
            goto fail;
        }
        ctx->extradata_size = entry->extradata_size;
        memcpy( ctx->extradata, entry->extradata, ctx->extradata_size );
        memset( ctx->extradata + ctx->extradata_size, 0, FF_INPUT_BUFFER_PADDING_SIZE );
    }
    /* AVCodecContext.codec_id is supposed to be set properly in avcodec_open2().
     * See lwlibav_flush_buffers(), why this is needed. */
    ctx->codec_id  = AV_CODEC_ID_NONE;
    /* This is needed by some CODECs such as UtVideo and raw video. */
    ctx->codec_tag = entry->codec_tag;
    /* Open an appropriate decoder.
     * Here, we force single threaded decoding since some decoder doesn't do its proper initialization with multi-threaded decoding. */
    ctx->thread_count = 1;
    if( open_decoder( ctx, codec ) < 0 )
    {
        strcpy( error_string, "Failed to open decoder.\n" );
        goto fail;
    }
    exhp->current_index = extradata_index;
    exhp->delay_count   = 0;
    /* Set up decoder basic settings by actual decoding. */
    if( ctx->codec_type == AVMEDIA_TYPE_VIDEO
      ? try_decode_video_frame( dhp, frame_number, rap_pos, error_string ) < 0
      : try_decode_audio_frame( dhp, frame_number, error_string ) < 0 )
        goto fail;
    /* Reopen/flush with the requested number of threads. */
    ctx->thread_count = thread_count;
    int width  = ctx->width;
    int height = ctx->height;
    lwlibav_flush_buffers( dhp );
    ctx->get_buffer2 = exhp->get_buffer ? exhp->get_buffer : avcodec_default_get_buffer2;
    ctx->opaque      = app_specific;
    /* avcodec_open2() may have changed resolution unexpectedly. */
    ctx->width       = width;
    ctx->height      = height;
    return;
fail:
    exhp->delay_count = 0;
    dhp->error = 1;
    lw_log_show( &dhp->lh, LW_LOG_FATAL,
                 "%sIt is recommended you reopen the file.", error_string );
}
int ffmpeg_jpeg_encode(unsigned char *srcBuf,unsigned char* dstBuf,int dstBufSize,PixelFormat srcPixFmt,int srcWidth,int srcHeight,int qvalue)
{
    AVCodec *codec;
    AVCodecContext *c= NULL;
    
    printf("Video encoding\n");

    /* find the mpeg1 video encoder */
    codec = avcodec_find_encoder(CODEC_ID_MJPEG);
    if (!codec) {
        fprintf(stderr, "codec not found\n");
        return -1;
    }

    c= avcodec_alloc_context();
	c->qmin = qvalue;
	c->qmax = qvalue;
    /* resolution must be a multiple of two */
    c->width = srcWidth;
    c->height = srcHeight;
    
	c->time_base.den = 25;
	c->time_base.num = 1;
    c->max_b_frames=0;
    c->pix_fmt = PIX_FMT_YUVJ420P;

    /* open it */
    if (avcodec_open(c, codec) < 0) {
        fprintf(stderr, "could not open codec\n");
        return -2;
    }

	//prepare colorspace conversion
	//TODO: factor to util.
	AVPicture *pPicSrc = (AVPicture*)malloc(sizeof(AVPicture));
	int srcBufSize =  avpicture_get_size(srcPixFmt,srcWidth,srcHeight);
	avpicture_fill(pPicSrc,srcBuf,srcPixFmt,srcWidth,srcHeight);

	AVFrame *pPicScaled = (AVFrame*)malloc(sizeof(AVFrame));
	int scaleBufSize =  avpicture_get_size(c->pix_fmt,srcWidth,srcHeight);
	unsigned char *scaleBuf = (unsigned char*)malloc(scaleBufSize);
	avpicture_fill((AVPicture*)pPicScaled,scaleBuf,c->pix_fmt,srcWidth,srcHeight);

    SwsContext *img_convert_ctx = sws_getContext(
		srcWidth, srcHeight, srcPixFmt,
		srcWidth, srcHeight, c->pix_fmt,
		SWS_BICUBIC, NULL, NULL, NULL);

	if (img_convert_ctx == NULL)
	{
		printf("can not create colorspace converter!\n");
		return -3;
	}

	int ret = sws_scale(img_convert_ctx,
		pPicSrc->data, pPicSrc->linesize,
		0, srcHeight,
		pPicScaled->data, pPicScaled->linesize);

	if (ret < 0)
	{
		printf("color space conversion failed!\n");
		return -4;
	}

	//encode
	int out_size = avcodec_encode_video(c, dstBuf, dstBufSize, pPicScaled);
    
	if (out_size < 0)
	{
		printf("encode failed!\n");
		return -5;
	}

    avcodec_close(c);
    av_free(c);

	av_free(pPicSrc);
    av_free(pPicScaled);
	free(scaleBuf);
 
	return out_size;
}
int main(int argc, char *argv[]) {
    AVFormatContext *pFormatCtx = NULL;
    int             i, videoStream;
    AVCodecContext  *pCodecCtx = NULL;
    AVCodecParameters       *pCodecParam = NULL;
    AVCodec         *pCodec = NULL;
    AVFrame         *pFrame = NULL;
    AVPacket        packet;
    int             send_packet, receive_frame;
    //float           aspect_ratio;
    AVFrame        *pict;
    /*
    std::unique_ptr<AVFrame, std::function<void(AVFrame*)>> frame_converted{
        av_frame_alloc(),
        [](AVFrame* f){ av_free(f->data[0]); } };
    if (av_frame_copy_props(frame_converted.get(),
        frame_decoded.get()) < 0) {
        throw std::runtime_error("Copying frame properties");
    }
    if (av_image_alloc(
        frame_converted->data, frame_converted->linesize,
        video_decoder_->width(), video_decoder_->height(),
        video_decoder_->pixel_format(), 1) < 0) {
        throw std::runtime_error("Allocating picture");
    }
    */
    AVDictionary    *optionsDict = NULL;
    struct SwsContext *sws_ctx = NULL;

    SDL_Texture*    pTexture = nullptr;
    SDL_Window*     pWindows = nullptr;
    SDL_Renderer*   pRenderer = nullptr;

    SDL_Event       event;

    if (argc < 2) {
        fprintf(stderr, "Usage: test <file>\n");
        exit(1);
    }
    // Register all formats and codecs
    av_register_all();

    if (SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER)) {
        fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
        exit(1);
    }

    // Open video file
    if (avformat_open_input(&pFormatCtx, argv[1], NULL, NULL) != 0)
        return -1; // Couldn't open file

    // Retrieve stream information
    if (avformat_find_stream_info(pFormatCtx, NULL)<0)
        return -1; // Couldn't find stream information

    // Dump information about file onto standard error
    av_dump_format(pFormatCtx, 0, argv[1], 0);

    // Find the first video stream
    videoStream = -1;
    for (i = 0; i<pFormatCtx->nb_streams; i++)
        if (pFormatCtx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
            videoStream = i;
            break;
        }
    if (videoStream == -1)
        return -1; // Didn't find a video stream

    // Get a pointer to the codec context for the video stream
    //AVCodecContext *codec is deprecated,so use the codecpar struct (AVCodecParameters) instead.
    pCodecParam = pFormatCtx->streams[videoStream]->codecpar;
    //but function avcodec_open2() need pCodecCtx,so copy  (AVCodecParameters) pCodecParam to (AVCodecContext) pCodecCtx
    pCodec = avcodec_find_decoder(pCodecParam->codec_id);
    // Find the decoder for the video stream
    if (pCodec == NULL) {
        fprintf(stderr, "Unsupported codec!\n");
        return -1; // Codec not found
    }
    pCodecCtx = avcodec_alloc_context3(pCodec);
    avcodec_parameters_to_context(pCodecCtx, pCodecParam);

    // Open codec
    if (avcodec_open2(pCodecCtx, pCodec, &optionsDict)<0)
        return -1; // Could not open codec

    // Allocate video frame
    pFrame = av_frame_alloc();

    // Make a screen to put our video
#ifndef __DARWIN__
    pWindows = SDL_CreateWindow(argv[1],SDL_WINDOWPOS_CENTERED,SDL_WINDOWPOS_CENTERED,pCodecParam->width, pCodecParam->height,SDL_WINDOW_BORDERLESS|SDL_WINDOW_RESIZABLE);
#else
    screen = SDL_SetVideoMode(pCodecParam->width, pCodecParam->height, 24, 0);
#endif
    if (!pWindows) {
        fprintf(stderr, "SDL: could not set video mode - exiting\n");
        exit(1);
    }
    
    // Allocate a place to put our YUV image on that screen
    pRenderer = SDL_CreateRenderer(pWindows, -1, 0);
    if (!pRenderer) {
        fprintf(stderr, "SDL: could not create renderer - exiting\n");
        exit(1);
    }
    pTexture = SDL_CreateTexture(pRenderer, SDL_PIXELFORMAT_YV12, SDL_TEXTUREACCESS_STREAMING, pCodecParam->width, pCodecParam->height);
    sws_ctx =
        sws_getContext
        (
        pCodecParam->width,
        pCodecParam->height,
        (AVPixelFormat)pCodecParam->format,
        pCodecParam->width,
        pCodecParam->height,
        AV_PIX_FMT_YUV420P,
        SWS_BILINEAR,
        NULL,
        NULL,
        NULL
        );
    pict = av_frame_alloc();
    if (pict == nullptr){
        exit(1);
    }
    if (av_image_alloc(pict->data, pict->linesize,
        pCodecParam->width, pCodecParam->height,
        (AVPixelFormat)pCodecParam->format, 1) < 0){
        exit(1);
    }


    // Read frames and save first five frames to disk
    i = 0;
    while (av_read_frame(pFormatCtx, &packet) >= 0) {
        // Is this a packet from the video stream?
        if (packet.stream_index == videoStream) {
            // Decode video frame
            //avcodec_decode_video2 is deprecated Use avcodec_send_packet() and avcodec_receive_frame().
            send_packet = avcodec_send_packet(pCodecCtx, &packet);
            receive_frame = avcodec_receive_frame(pCodecCtx, pFrame);

            // Did we get a video frame?
            if (send_packet == SEND_PACKET_SUCCESS && receive_frame == RECEIVE_FRAME_SUCCESS) {
                //SDL_LockYUVOverlay(bmp);
                //SDL_LockTexture(pTexture,NULL,);
                // Convert the image into YUV format that SDL uses
                if (av_frame_copy_props(pFrame,
                    pict) < 0) {
                    exit(1);
                }

                sws_scale
                    (
                    sws_ctx,
                    pFrame->data,
                    pFrame->linesize,
                    0,
                    pCodecParam->height,
                    pict->data,
                    pict->linesize
                    );
                
                //SDL_UnlockYUVOverlay(bmp);
                SDL_UpdateYUVTexture(pTexture, NULL, pict->data[0], pict->linesize[0], pict->data[1], pict->linesize[1], pict->data[2], pict->linesize[2]);
                SDL_RenderCopy(pRenderer, pTexture, NULL, NULL);
                SDL_RenderPresent(pRenderer);

            }
        }

        // Free the packet that was allocated by av_read_frame
        av_packet_unref(&packet);
        SDL_PollEvent(&event);
        switch (event.type) {
        case SDL_QUIT:
            SDL_DestroyRenderer(pRenderer);
            SDL_DestroyTexture(pTexture);
            SDL_DestroyWindow(pWindows);
            SDL_Quit();
            exit(0);
            break;
        default:
            break;
        }

    }

    // Free the YUV frame
    av_frame_free(&pFrame);
    //free pict
    av_freep(&pict->data[0]);
    av_frame_free(&pict);

    // Close the codec
    avcodec_close(pCodecCtx);

    // Close the video file
    avformat_close_input(&pFormatCtx);

    return 0;
}
RealFFMpegCodecEncoder::~RealFFMpegCodecEncoder()
{
	avcodec_close(c);
    av_free(c);
	free(encBuf);
}
Example #13
0
int main(int argc, char **argv)
{
    int ret;
    AVPacket packet;
    AVFrame *frame = av_frame_alloc();
    AVFrame *filt_frame = av_frame_alloc();
    int got_frame;

    if (!frame || !filt_frame) {
        perror("Could not allocate frame");
        exit(1);
    }
    if (argc != 2) {
        fprintf(stderr, "Usage: %s file\n", argv[0]);
        exit(1);
    }

    av_register_all();
    avfilter_register_all();

    if ((ret = open_input_file(argv[1])) < 0)
        goto end;
    if ((ret = init_filters(filter_descr)) < 0)
        goto end;

    /* read all packets */
    while (1) {
        if ((ret = av_read_frame(fmt_ctx, &packet)) < 0)
            break;

        if (packet.stream_index == video_stream_index) {
            got_frame = 0;
            ret = avcodec_decode_video2(dec_ctx, frame, &got_frame, &packet);
            if (ret < 0) {
                av_log(NULL, AV_LOG_ERROR, "Error decoding video\n");
                break;
            }

            if (got_frame) {
                frame->pts = av_frame_get_best_effort_timestamp(frame);

                /* push the decoded frame into the filtergraph */
                if (av_buffersrc_add_frame_flags(buffersrc_ctx, frame, AV_BUFFERSRC_FLAG_KEEP_REF) < 0) {
                    av_log(NULL, AV_LOG_ERROR, "Error while feeding the filtergraph\n");
                    break;
                }

                /* pull filtered frames from the filtergraph */
                while (1) {
                    ret = av_buffersink_get_frame(buffersink_ctx, filt_frame);
                    if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
                        break;
                    if (ret < 0)
                        goto end;
                    display_frame(filt_frame, buffersink_ctx->inputs[0]->time_base);
                    av_frame_unref(filt_frame);
                }
                av_frame_unref(frame);
            }
        }
        av_packet_unref(&packet);
    }
end:
    avfilter_graph_free(&filter_graph);
    avcodec_close(dec_ctx);
    avformat_close_input(&fmt_ctx);
    av_frame_free(&frame);
    av_frame_free(&filt_frame);

    if (ret < 0 && ret != AVERROR_EOF) {
        fprintf(stderr, "Error occurred: %s\n", av_err2str(ret));
        exit(1);
    }

    exit(0);
}
/*
 * Audio encoding example
 */
static void audio_encode_example(const char *filename)
{
	AVCodec *codec;
	AVCodecContext *c= NULL;
	int frame_size, i, j, out_size, outbuf_size;
	FILE *f;
	short *samples;
	float t, tincr;
	uint8_t *outbuf;

	printf("Audio encoding\n");

	/* find the MP2 encoder */
	codec = avcodec_find_encoder(CODEC_ID_MP2);
	if (!codec)
	{
		fprintf(stderr, "codec not found\n");
		exit(1);
	}

	c= avcodec_alloc_context();

	/* put sample parameters */
	c->bit_rate = 64000;
	c->sample_rate = 44100;
	c->channels = 2;

	/* open it */
	if (avcodec_open(c, codec) < 0)
	{
		fprintf(stderr, "could not open codec\n");
		exit(1);
	}

	/* the codec gives us the frame size, in samples */
	frame_size = c->frame_size;
	samples = malloc(frame_size * 2 * c->channels);
	outbuf_size = 10000;
	outbuf = malloc(outbuf_size);

	f = fopen(filename, "wb");
	if (!f)
	{
		fprintf(stderr, "could not open %s\n", filename);
		exit(1);
	}

	/* encode a single tone sound */
	t = 0;
	tincr = 2 * M_PI * 440.0 / c->sample_rate;
	for(i=0; i<200; i++)
	{
		for(j=0; j<frame_size; j++)
		{
			samples[2*j] = (int)(sin(t) * 10000);
			samples[2*j+1] = samples[2*j];
			t += tincr;
		}
		/* encode the samples */
		out_size = avcodec_encode_audio(c, outbuf, outbuf_size, samples);
		fwrite(outbuf, 1, out_size, f);
	}
	fclose(f);
	free(outbuf);
	free(samples);

	avcodec_close(c);
	av_free(c);
}
Example #15
0
static void close_audio(AVFormatContext *oc, AVStream *st)
{
    avcodec_close(st->codec);

    av_free(samples);
}
static int close_encoder(AVCodecContext **enc_ctx)
{
    avcodec_close(*enc_ctx);
    av_freep(enc_ctx);
    return 0;
}
int main(int argc, char* argv[]) {
	printf("Play simple video\n");
	if(argc < 2) {
		printf("Miss input video");
		return -1;
	}
	int ret = -1, i = -1, v_stream_idx = -1;
	char* vf_path = argv[1];
	// f**k, fmt_ctx must be inited by NULL
	AVFormatContext* fmt_ctx = NULL;
	AVCodecContext* codec_ctx = NULL;
	AVCodec* codec;
	AVFrame * frame;
	AVPacket packet;

	av_register_all();
	ret = avformat_open_input(&fmt_ctx, vf_path, NULL, NULL);
	if(ret < 0){
		printf("Open video file %s failed \n", vf_path);
		goto end;
	}
	if(avformat_find_stream_info(fmt_ctx, NULL)<0)
    	goto end;
    av_dump_format(fmt_ctx, 0, vf_path, 0);
    for(i = 0; i< fmt_ctx->nb_streams; i++) {
    	if(fmt_ctx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
    		v_stream_idx = i;
    		break;
    	}
    }
    if(v_stream_idx == -1) {
		printf("Cannot find video stream\n");
		goto end;
	}

	codec_ctx = avcodec_alloc_context3(NULL);
	avcodec_parameters_to_context(codec_ctx, fmt_ctx->streams[v_stream_idx]->codecpar);
	codec = avcodec_find_decoder(codec_ctx->codec_id);
	if(codec == NULL){
		printf("Unsupported codec for video file\n");
		goto end;
	}
	if(avcodec_open2(codec_ctx, codec, NULL) < 0){
		printf("Can not open codec\n");
		goto end;
	}

    if(SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER)) {
    	printf("Could not init SDL due to %s", SDL_GetError());
    	goto end;
    }
    SDL_Window *window;
    SDL_Renderer *renderer;
    SDL_Texture *texture;
    SDL_Event event;
    SDL_Rect r;
    window = SDL_CreateWindow("SDL_CreateTexture", SDL_WINDOWPOS_UNDEFINED,
    	SDL_WINDOWPOS_UNDEFINED, codec_ctx->width, codec_ctx->height,
    	SDL_WINDOW_RESIZABLE);
    r.x = 0;
    r.y = 0;
    r.w = codec_ctx->width;
    r.h = codec_ctx->height;

    renderer = SDL_CreateRenderer(window, -1, 0);
    // texture = SDL_CreateTexture(renderer, SDL_PIXELFORMAT_RGBA8888, SDL_TEXTUREACCESS_TARGET,
    // 	codec_ctx->width, codec_ctx->height);
    texture = SDL_CreateTexture(renderer, SDL_PIXELFORMAT_YV12, SDL_TEXTUREACCESS_STREAMING,
    	codec_ctx->width, codec_ctx->height);

    struct SwsContext      *sws_ctx = NULL;
    sws_ctx = sws_getContext(codec_ctx->width, codec_ctx->height, codec_ctx->pix_fmt,
    	codec_ctx->width, codec_ctx->height, AV_PIX_FMT_YUV420P, SWS_BILINEAR, NULL, NULL, NULL);

    frame = av_frame_alloc();

    int ret1, ret2;
    AVFrame* pict;
    pict = av_frame_alloc();


	int             numBytes;
	uint8_t         *buffer = NULL;
  	numBytes=avpicture_get_size(AV_PIX_FMT_YUV420P, codec_ctx->width,
			      codec_ctx->height);
  	buffer=(uint8_t *)av_malloc(numBytes*sizeof(uint8_t));
    // required, or bad dst image pointers
	avpicture_fill((AVPicture *)pict, buffer, AV_PIX_FMT_YUV420P,
		 codec_ctx->width, codec_ctx->height);
    i = 0;
	while (1) {
        SDL_PollEvent(&event);
        if(event.type == SDL_QUIT)
                break;
        ret = av_read_frame(fmt_ctx, &packet);
        if(ret <0){
        	continue;
        }
        if(packet.stream_index == v_stream_idx) {
			ret1 = avcodec_send_packet(codec_ctx, &packet);
			ret2 = avcodec_receive_frame(codec_ctx, frame);
			if(ret2 < 0 ){
				continue;
	    	}
	    	sws_scale(sws_ctx, (uint8_t const * const *)frame->data,
			      frame->linesize, 0, codec_ctx->height,
			      pict->data, pict->linesize);
	   //  	if(++i <=5 ){
				// save_frame(pict, codec_ctx->width, codec_ctx->height, i);
	   //  	}
	        SDL_UpdateYUVTexture(texture, &r, pict->data[0], pict->linesize[0],
	        	pict->data[1], pict->linesize[1],
	        	pict->data[2], pict->linesize[2]);
	        // SDL_UpdateTexture(texture, &r, pict->data[0], pict->linesize[0]);

	        // r.x=rand()%500;
	        // r.y=rand()%500;

	        // SDL_SetRenderTarget(renderer, texture);
	        // SDL_SetRenderDrawColor(renderer, 0x00, 0x00, 0x00, 0x00);
	        SDL_RenderClear(renderer);
	        // SDL_RenderDrawRect(renderer,&r);
	        // SDL_SetRenderDrawColor(renderer, 0xFF, 0x00, 0x00, 0x00);
	        // SDL_RenderFillRect(renderer, &r);
	        // SDL_SetRenderTarget(renderer, NULL);
	        SDL_RenderCopy(renderer, texture, NULL, NULL);
	        // SDL_RenderCopy(renderer, texture, &r, &r);
	        SDL_RenderPresent(renderer);
	        // SDL_Delay(50);
        }
        av_packet_unref(&packet);
    }

    SDL_DestroyRenderer(renderer);
    SDL_Quit();
	av_frame_free(&frame);
	avcodec_close(codec_ctx);
	avcodec_free_context(&codec_ctx);
    end:
	avformat_close_input(&fmt_ctx);
	printf("Shutdown\n");
	return 0;
}
static int close_decoder(AVCodecContext **dec_ctx)
{
    avcodec_close(*dec_ctx);
    av_freep(dec_ctx);
    return 0;
}
Example #19
0
int main(int argc, char *argv[]) {
	// Initalizing these to NULL prevents segfaults!
	AVFormatContext   *pFormatCtx = NULL;
	int               i, videoStream;
	AVCodecContext    *pCodecCtxOrig = NULL;
	AVCodecContext    *pCodecCtx = NULL;
	AVCodec           *pCodec = NULL;
	AVFrame           *pFrame = NULL;
	AVFrame           *pFrameRGB = NULL;
	AVPacket          packet;
	int               frameFinished;
	int               numBytes;
	uint8_t           *buffer = NULL;
	struct SwsContext *sws_ctx = NULL;

	if(argc < 2) {
		printf("Please provide a movie file\n");
		return -1;
	}
	// Register all formats and codecs
	av_register_all();

	// Open video file
	if(avformat_open_input(&pFormatCtx, argv[1], NULL, NULL)!=0)
	{
		return -1; // Couldn't open file
	}

	// Retrieve stream information
	if(avformat_find_stream_info(pFormatCtx, NULL)<0)
	{
		return -1; // Couldn't find stream information
	}

	// Dump information about file onto standard error
	av_dump_format(pFormatCtx, 0, argv[1], 0);

	// Find the first video stream
	videoStream=-1;
	for(i=0; i<pFormatCtx->nb_streams; i++)
	{
		if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO) {
			videoStream=i;
			break;
		}
	}
	if(videoStream==-1)
	{
		return -1; // Didn't find a video stream
	}

	// Get a pointer to the codec context for the video stream
	pCodecCtxOrig=pFormatCtx->streams[videoStream]->codec;
	// Find the decoder for the video stream
	pCodec=avcodec_find_decoder(pCodecCtxOrig->codec_id);
	if(pCodec==NULL) {
		fprintf(stderr, "Unsupported codec!\n");
		return -1; // Codec not found
	}
	// Copy context
	pCodecCtx = avcodec_alloc_context3(pCodec);
	if(avcodec_copy_context(pCodecCtx, pCodecCtxOrig) != 0) {
		fprintf(stderr, "Couldn't copy codec context");
		return -1; // Error copying codec context
	}

	// Open codec
	if(avcodec_open2(pCodecCtx, pCodec, NULL)<0)
	{
		return -1; // Could not open codec
	}

	// Allocate video frame
	pFrame=av_frame_alloc();

	// Allocate an AVFrame structure
	pFrameRGB=av_frame_alloc();
	if(pFrameRGB==NULL)
	{
		return -1;
	}

	// Determine required buffer size and allocate buffer
	numBytes=av_image_get_buffer_size(AV_PIX_FMT_RGB24, pCodecCtx->width, pCodecCtx->height, 1);
	buffer=(uint8_t *)av_malloc(numBytes*sizeof(uint8_t));

	// Assign appropriate parts of buffer to image planes in pFrameRGB
	// Note that pFrameRGB is an AVFrame, but AVFrame is a superset
	// of AVPicture
	av_image_fill_arrays(pFrameRGB->data, pFrameRGB->linesize, buffer, 
		AV_PIX_FMT_RGB24, pCodecCtx->width, pCodecCtx->height, 1);

	// initialize SWS context for software scaling
	sws_ctx = sws_getContext(pCodecCtx->width,
		pCodecCtx->height,
		pCodecCtx->pix_fmt,
		pCodecCtx->width,
		pCodecCtx->height,
		AV_PIX_FMT_RGB24,
		SWS_BILINEAR,
		NULL,
		NULL,
		NULL);

	// Read frames and save first five frames to disk
	i=0;
	while(av_read_frame(pFormatCtx, &packet)>=0) {
		// Is this a packet from the video stream?
		if(packet.stream_index==videoStream) {
			// Decode video frame
			avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, &packet);

			// Did we get a video frame?
			if(frameFinished) {
				// Convert the image from its native format to RGB
				sws_scale(sws_ctx, (uint8_t const * const *)pFrame->data,
				pFrame->linesize, 0, pCodecCtx->height,
				pFrameRGB->data, pFrameRGB->linesize);

				// Save the frame to disk
				if(++i<=5)
				{
					WriteJPEG(pCodecCtx, pFrame, i);
				}
				//SaveFrame(pFrameRGB, pCodecCtx->width, pCodecCtx->height, i);
			}
		}

		// Free the packet that was allocated by av_read_frame
		av_packet_unref(&packet);
	}

	// Free the RGB image
	av_free(buffer);
	av_frame_free(&pFrameRGB);

	// Free the YUV frame
	av_frame_free(&pFrame);

	// Close the codecs
	avcodec_close(pCodecCtx);
	avcodec_close(pCodecCtxOrig);

	// Close the video file
	avformat_close_input(&pFormatCtx);

	return 0;
}
Example #20
0
/** Convert an audio file to an AAC file in an MP4 container. */
int main(int argc, char **argv)
{
    AVFormatContext *input_format_context = NULL, *output_format_context = NULL;
    AVCodecContext *input_codec_context = NULL, *output_codec_context = NULL;
    AVAudioResampleContext *resample_context = NULL;
    AVAudioFifo *fifo = NULL;
    int ret = AVERROR_EXIT;

    if (argc < 3) {
        fprintf(stderr, "Usage: %s <input file> <output file>\n", argv[0]);
        exit(1);
    }

    /** Register all codecs and formats so that they can be used. */
    av_register_all();
    /** Open the input file for reading. */
    if (open_input_file(argv[1], &input_format_context,
                        &input_codec_context))
        goto cleanup;
    /** Open the output file for writing. */
    if (open_output_file(argv[2], input_codec_context,
                         &output_format_context, &output_codec_context))
        goto cleanup;
    /** Initialize the resampler to be able to convert audio sample formats. */
    if (init_resampler(input_codec_context, output_codec_context,
                       &resample_context))
        goto cleanup;
    /** Initialize the FIFO buffer to store audio samples to be encoded. */
    if (init_fifo(&fifo))
        goto cleanup;
    /** Write the header of the output file container. */
    if (write_output_file_header(output_format_context))
        goto cleanup;

    /**
     * Loop as long as we have input samples to read or output samples
     * to write; abort as soon as we have neither.
     */
    while (1) {
        /** Use the encoder's desired frame size for processing. */
        const int output_frame_size = output_codec_context->frame_size;
        int finished                = 0;

        /**
         * Make sure that there is one frame worth of samples in the FIFO
         * buffer so that the encoder can do its work.
         * Since the decoder's and the encoder's frame size may differ, we
         * need to FIFO buffer to store as many frames worth of input samples
         * that they make up at least one frame worth of output samples.
         */
        while (av_audio_fifo_size(fifo) < output_frame_size) {
            /**
             * Decode one frame worth of audio samples, convert it to the
             * output sample format and put it into the FIFO buffer.
             */
            if (read_decode_convert_and_store(fifo, input_format_context,
                                              input_codec_context,
                                              output_codec_context,
                                              resample_context, &finished))
                goto cleanup;

            /**
             * If we are at the end of the input file, we continue
             * encoding the remaining audio samples to the output file.
             */
            if (finished)
                break;
        }

        /**
         * If we have enough samples for the encoder, we encode them.
         * At the end of the file, we pass the remaining samples to
         * the encoder.
         */
        while (av_audio_fifo_size(fifo) >= output_frame_size ||
               (finished && av_audio_fifo_size(fifo) > 0))
            /**
             * Take one frame worth of audio samples from the FIFO buffer,
             * encode it and write it to the output file.
             */
            if (load_encode_and_write(fifo, output_format_context,
                                      output_codec_context))
                goto cleanup;

        /**
         * If we are at the end of the input file and have encoded
         * all remaining samples, we can exit this loop and finish.
         */
        if (finished) {
            int data_written;
            /** Flush the encoder as it may have delayed frames. */
            do {
                if (encode_audio_frame(NULL, output_format_context,
                                       output_codec_context, &data_written))
                    goto cleanup;
            } while (data_written);
            break;
        }
    }

    /** Write the trailer of the output file container. */
    if (write_output_file_trailer(output_format_context))
        goto cleanup;
    ret = 0;

cleanup:
    if (fifo)
        av_audio_fifo_free(fifo);
    if (resample_context) {
        avresample_close(resample_context);
        avresample_free(&resample_context);
    }
    if (output_codec_context)
        avcodec_close(output_codec_context);
    if (output_format_context) {
        avio_close(output_format_context->pb);
        avformat_free_context(output_format_context);
    }
    if (input_codec_context)
        avcodec_close(input_codec_context);
    if (input_format_context)
        avformat_close_input(&input_format_context);

    return ret;
}
 ADM_AudiocodecWMA::~ADM_AudiocodecWMA()
 {
        avcodec_close(_context);
        ADM_dealloc(_context);
        _contextVoid=NULL;
}    
Example #22
0
FFwrapper::Decoder::~Decoder( ) {
    avcodec_close(ctx);
    av_free(ctx);
    av_free(frame);
}
Example #23
0
static ImBuf *avi_fetchibuf(struct anim *anim, int position)
{
	ImBuf *ibuf = NULL;
	int *tmp;
	int y;
	
	if (anim == NULL) return (NULL);

#if defined(_WIN32) && !defined(FREE_WINDOWS)
	if (anim->avistreams) {
		LPBITMAPINFOHEADER lpbi;

		if (anim->pgf) {
			lpbi = AVIStreamGetFrame(anim->pgf, position + AVIStreamStart(anim->pavi[anim->firstvideo]));
			if (lpbi) {
				ibuf = IMB_ibImageFromMemory((unsigned char *) lpbi, 100, IB_rect, "<avi_fetchibuf>");
//Oh brother...
			}
		}
	}
	else {
#else
	if (1) {
#endif
		ibuf = IMB_allocImBuf(anim->x, anim->y, 24, IB_rect);

		tmp = AVI_read_frame(anim->avi, AVI_FORMAT_RGB32, position,
		                     AVI_get_stream(anim->avi, AVIST_VIDEO, 0));
		
		if (tmp == NULL) {
			printf("Error reading frame from AVI");
			IMB_freeImBuf(ibuf);
			return NULL;
		}

		for (y = 0; y < anim->y; y++) {
			memcpy(&(ibuf->rect)[((anim->y - y) - 1) * anim->x],  &tmp[y * anim->x],
			       anim->x * 4);
		}
		
		MEM_freeN(tmp);
	}
	
	ibuf->profile = IB_PROFILE_SRGB;
	
	return ibuf;
}

#ifdef WITH_FFMPEG

extern void do_init_ffmpeg(void);

static int startffmpeg(struct anim *anim)
{
	int i, videoStream;

	AVCodec *pCodec;
	AVFormatContext *pFormatCtx = NULL;
	AVCodecContext *pCodecCtx;
	int frs_num;
	double frs_den;
	int streamcount;

#ifdef FFMPEG_SWSCALE_COLOR_SPACE_SUPPORT
	/* The following for color space determination */
	int srcRange, dstRange, brightness, contrast, saturation;
	int *table;
	const int *inv_table;
#endif

	if (anim == 0) return(-1);

	streamcount = anim->streamindex;

	do_init_ffmpeg();

	if (avformat_open_input(&pFormatCtx, anim->name, NULL, NULL) != 0) {
		return -1;
	}

	if (av_find_stream_info(pFormatCtx) < 0) {
		av_close_input_file(pFormatCtx);
		return -1;
	}

	av_dump_format(pFormatCtx, 0, anim->name, 0);


	/* Find the video stream */
	videoStream = -1;

	for (i = 0; i < pFormatCtx->nb_streams; i++)
		if (pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
			if (streamcount > 0) {
				streamcount--;
				continue;
			}
			videoStream = i;
			break;
		}

	if (videoStream == -1) {
		av_close_input_file(pFormatCtx);
		return -1;
	}

	pCodecCtx = pFormatCtx->streams[videoStream]->codec;

	/* Find the decoder for the video stream */
	pCodec = avcodec_find_decoder(pCodecCtx->codec_id);
	if (pCodec == NULL) {
		av_close_input_file(pFormatCtx);
		return -1;
	}

	pCodecCtx->workaround_bugs = 1;

	if (avcodec_open(pCodecCtx, pCodec) < 0) {
		av_close_input_file(pFormatCtx);
		return -1;
	}

	anim->duration = ceil(pFormatCtx->duration *
	                      av_q2d(pFormatCtx->streams[videoStream]->r_frame_rate) /
	                      AV_TIME_BASE);

	frs_num = pFormatCtx->streams[videoStream]->r_frame_rate.num;
	frs_den = pFormatCtx->streams[videoStream]->r_frame_rate.den;

	frs_den *= AV_TIME_BASE;

	while (frs_num % 10 == 0 && frs_den >= 2.0 && frs_num > 10) {
		frs_num /= 10;
		frs_den /= 10;
	}

	anim->frs_sec = frs_num;
	anim->frs_sec_base = frs_den;

	anim->params = 0;

	anim->x = pCodecCtx->width;
	anim->y = pCodecCtx->height;
	anim->interlacing = 0;
	anim->orientation = 0;
	anim->framesize = anim->x * anim->y * 4;

	anim->curposition = -1;
	anim->last_frame = 0;
	anim->last_pts = -1;
	anim->next_pts = -1;
	anim->next_packet.stream_index = -1;

	anim->pFormatCtx = pFormatCtx;
	anim->pCodecCtx = pCodecCtx;
	anim->pCodec = pCodec;
	anim->videoStream = videoStream;

	anim->pFrame = avcodec_alloc_frame();
	anim->pFrameComplete = FALSE;
	anim->pFrameDeinterlaced = avcodec_alloc_frame();
	anim->pFrameRGB = avcodec_alloc_frame();

	if (avpicture_get_size(PIX_FMT_RGBA, anim->x, anim->y) !=
	    anim->x * anim->y * 4)
	{
		fprintf(stderr,
		        "ffmpeg has changed alloc scheme ... ARGHHH!\n");
		avcodec_close(anim->pCodecCtx);
		av_close_input_file(anim->pFormatCtx);
		av_free(anim->pFrameRGB);
		av_free(anim->pFrameDeinterlaced);
		av_free(anim->pFrame);
		anim->pCodecCtx = NULL;
		return -1;
	}

	if (anim->ib_flags & IB_animdeinterlace) {
		avpicture_fill((AVPicture *) anim->pFrameDeinterlaced,
		               MEM_callocN(avpicture_get_size(
		                               anim->pCodecCtx->pix_fmt,
		                               anim->x, anim->y),
		                           "ffmpeg deinterlace"),
		               anim->pCodecCtx->pix_fmt, anim->x, anim->y);
	}

	if (pCodecCtx->has_b_frames) {
		anim->preseek = 25; /* FIXME: detect gopsize ... */
	}
	else {
		anim->preseek = 0;
	}
	
	anim->img_convert_ctx = sws_getContext(
	        anim->pCodecCtx->width,
	        anim->pCodecCtx->height,
	        anim->pCodecCtx->pix_fmt,
	        anim->pCodecCtx->width,
	        anim->pCodecCtx->height,
	        PIX_FMT_RGBA,
	        SWS_FAST_BILINEAR | SWS_PRINT_INFO,
	        NULL, NULL, NULL);
		
	if (!anim->img_convert_ctx) {
		fprintf(stderr,
		        "Can't transform color space??? Bailing out...\n");
		avcodec_close(anim->pCodecCtx);
		av_close_input_file(anim->pFormatCtx);
		av_free(anim->pFrameRGB);
		av_free(anim->pFrameDeinterlaced);
		av_free(anim->pFrame);
		anim->pCodecCtx = NULL;
		return -1;
	}

#ifdef FFMPEG_SWSCALE_COLOR_SPACE_SUPPORT
	/* Try do detect if input has 0-255 YCbCR range (JFIF Jpeg MotionJpeg) */
	if (!sws_getColorspaceDetails(anim->img_convert_ctx, (int **)&inv_table, &srcRange,
	                              &table, &dstRange, &brightness, &contrast, &saturation))
	{
		srcRange = srcRange || anim->pCodecCtx->color_range == AVCOL_RANGE_JPEG;
		inv_table = sws_getCoefficients(anim->pCodecCtx->colorspace);

		if (sws_setColorspaceDetails(anim->img_convert_ctx, (int *)inv_table, srcRange,
		                             table, dstRange, brightness, contrast, saturation))
		{
			printf("Warning: Could not set libswscale colorspace details.\n");
		}
	}
	else {
		printf("Warning: Could not set libswscale colorspace details.\n");
	}
#endif
		
	return (0);
}
int slimaudio_decoder_aac_process(slimaudio_t *audio) {
	char streamformat[16];
	int out_size;
	int len = 0;
	int iRC;
	u8_t *outbuf;
	u8_t *inbuf;

	/* It is not really correct to assume that all MP4 files (which were not
	 * otherwise recognized as ALAC or MOV by the scanner) are AAC, but that
	 * is the current server side status.
	 *
	 * Container type and bitstream format:
	 *
	 * '1' (adif),
	 * '2' (adts),
	 * '3' (latm within loas),
	 * '4' (rawpkts),
	 * '5' (mp4ff),
	 * '6' (latm within rawpkts)
	 * 
	 * This is a hack that assumes:
	 * (1) If the original content-type of the track is MP4 or SLS then we
	 *     are streaming an MP4 file (without any transcoding);
	 * (2) All other AAC streams will be adts.
	 *
	 * So the server will only set aac_format to '2' or '5'.
	 */

	DEBUGF ("aac: decoder_format:%d '%c'\n", audio->aac_format, audio->aac_format);

	int audioStream = 0; /* Always zero for aac decoder */

	switch ( audio->aac_format )
	{
		case '2':
			strncpy ( streamformat, "aac", sizeof (streamformat) );
			break;
		case '5':
			strncpy ( streamformat, "m4a", sizeof (streamformat) );
			break;
		default:
			fprintf (stderr, "aac: unknown container type: %c\n" ,audio->aac_format );
			return -1;
	}

	DEBUGF ("aac: play audioStream: %d\n", audioStream);

	inbuf = av_malloc(AUDIO_INBUF_SIZE + FF_INPUT_BUFFER_PADDING_SIZE);
	if ( !inbuf )
	{
		DEBUGF("aac: inbuf alloc failed.\n");
		return -1;
	}

	AVIOContext *AVIOCtx;

	AVIOCtx = avio_alloc_context(inbuf, AUDIO_CHUNK_SIZE, 0, audio, av_read_data, NULL, NULL);
	if ( AVIOCtx == NULL )
	{
		DEBUGF("aac: avio_alloc_context failed.\n");
		return -1;
	}
	else
	{
		AVIOCtx->is_streamed = 1;
	}

	AVInputFormat* pAVInputFormat = av_find_input_format(streamformat);
	if( !pAVInputFormat )
	{
		DEBUGF("aac: probe failed\n");
		return -1;
	}
	else
	{
		DEBUGF("aac: probe ok name:%s lname:%s\n", pAVInputFormat->name, pAVInputFormat->long_name);
	}

	AVFormatContext *pFormatCtx;

	pFormatCtx = avformat_alloc_context();
	if( pFormatCtx == NULL ) {
		DEBUGF("aac: avformat_alloc_context failed.\n");
	}
	else {
		pFormatCtx->pb = AVIOCtx;
	}

	AVCodecContext *pCodecCtx;
	
	iRC = avformat_open_input(&pFormatCtx, "", pAVInputFormat, NULL);

	if (iRC < 0)
	{
		DEBUGF("aac: input stream open failed:%d\n", iRC);
		return -1;
	}
	else
	{
		iRC = av_find_stream_info(pFormatCtx);
		if ( iRC < 0 )
		{
			DEBUGF("aac: find stream info failed:%d\n", iRC);
			return -1;
		}
		else
		{
			if ( pFormatCtx->nb_streams < audioStream )
			{
				DEBUGF("aac: invalid stream.\n");
				return -1;
			}

			if ( pFormatCtx->streams[audioStream]->codec->codec_type != CODEC_TYPE_AUDIO )
			{
				DEBUGF("aac: stream: %d is not audio.\n", audioStream );
				return -1;
			}
			else
			{
				pCodecCtx = pFormatCtx->streams[audioStream]->codec;
			}
		}
	}

	AVCodec *pCodec;

	/* Find the WMA audio decoder */
	pCodec = avcodec_find_decoder(pCodecCtx->codec_id);
	if ( !pCodec )
	{
		DEBUGF("aac: codec not found.\n");
		return -1;
	} 
	
	/* Open codec */
	iRC = avcodec_open(pCodecCtx, pCodec);
	if ( iRC < 0)
	{
		DEBUGF("aac: could not open codec:%d\n", iRC);
		return -1;
	}

	outbuf = av_malloc(AVCODEC_MAX_AUDIO_FRAME_SIZE);
	if ( !outbuf )
	{
		DEBUGF("aac: outbuf alloc failed.\n");
		return -1;
	}

	bool eos = false;
	AVPacket avpkt;

	while ( ! eos )
	{
		iRC = av_read_frame (pFormatCtx, &avpkt);

		/* Some decoders fail to read the last packet so additional handling is required */
	        if (iRC < 0)
		{
			DEBUGF("aac: av_read_frame error: %d\n", iRC);

			if ( (iRC == AVERROR_EOF) )
			{
				DEBUGF("aac: AVERROR_EOF\n");
				eos=true;
			}

			if ( pFormatCtx->pb->eof_reached )
			{
				DEBUGF("aac: url_feof\n");
				eos=true;
			}

			if ( url_ferror(pFormatCtx->pb) )
			{
				DEBUGF("aac: url_ferror\n");
#if 0
		                break;
#endif
			}
		}

		out_size = AVCODEC_MAX_AUDIO_FRAME_SIZE;
		len = avcodec_decode_audio3(pCodecCtx, (int16_t *)outbuf, &out_size, &avpkt);
		if (len < 0)
		{
			DEBUGF("aac: no audio to decode\n");
			//av_free_packet (&avpkt);
			//break;
		}

		if (out_size > 0)
		{
			/* if a frame has been decoded, output it */
			slimaudio_buffer_write(audio->output_buffer, (char*)outbuf, out_size);
		}

		av_free_packet (&avpkt);
	}

	if ( inbuf != NULL )
		av_free(inbuf);

	if ( outbuf != NULL )
		av_free(outbuf);

	DEBUGF ("aac: avcodec_close\n");
	avcodec_close(pCodecCtx);

	/* Close the stream */
	DEBUGF ("aac: av_close_input_stream\n");
	av_close_input_stream(pFormatCtx);

	return 0;
}
Example #25
0
int main(int argc, char **argv)
{
    double prev_segment_time = 0;
    unsigned int output_index = 1;
    AVInputFormat *ifmt;
    AVOutputFormat *ofmt;
    AVFormatContext *ic = NULL;
    AVFormatContext *oc;
    AVStream *video_st = NULL;
    AVStream *audio_st = NULL;
    AVCodec *codec;
    char *output_filename;
    char *remove_filename;
    int video_index;
    int audio_index;
    unsigned int first_segment = 1;
    unsigned int last_segment = 0;
    int write_index = 1;
    int decode_done;
    char *dot;
    int ret;
    int i;
    int remove_file;
    struct sigaction act;

    int opt;
    int longindex;
    char *endptr;
    struct options_t options = {0};

    static const char *optstring = "i:d:p:m:u:n:ovh?";

    static const struct option longopts[] = {
        { "input",         required_argument, NULL, 'i' },
        { "duration",      required_argument, NULL, 'd' },
        { "output-prefix", required_argument, NULL, 'p' },
        { "m3u8-file",     required_argument, NULL, 'm' },
        { "url-prefix",    required_argument, NULL, 'u' },
        { "num-segments",  required_argument, NULL, 'n' },
        { "ondemand",      required_argument, NULL, 'o' },
        { "help",          no_argument,       NULL, 'h' },
        { 0, 0, 0, 0 }
    };


    /* Set some defaults */
    options.segment_duration = 10;

    do {
        opt = getopt_long(argc, argv, optstring, longopts, &longindex );
        switch (opt) {
            case 'i':
                options.input_file = optarg;
                if (!strcmp(options.input_file, "-")) {
                    options.input_file = "pipe:";
                }
                break;

            case 'd':
                options.segment_duration = strtol(optarg, &endptr, 10);
                if (optarg == endptr || options.segment_duration < 0 || options.segment_duration == -LONG_MAX) {
                    fprintf(stderr, "Segment duration time (%s) invalid\n", optarg);
                    exit(1);
                }
                break;

            case 'p':
                options.output_prefix = optarg;
                break;

            case 'm':
                options.m3u8_file = optarg;
                break;

            case 'u':
                options.url_prefix = optarg;
                break;

            case 's':
                options.num_segments = strtol(optarg, &endptr, 10);
                if (optarg == endptr || options.num_segments < 0 || options.num_segments >= LONG_MAX) {
                    fprintf(stderr, "Maximum number of ts files (%s) invalid\n", optarg);
                    exit(1);
                }
                break;

            case 'o':
                options.ondemand = 1;
                break;

            case 'h':
                display_usage();
                break;
        }
    } while (opt != -1);


    /* Check required args where set*/
    if (options.input_file == NULL) {
        fprintf(stderr, "Please specify an input file.\n");
        exit(1);
    }

    if (options.output_prefix == NULL) {
        fprintf(stderr, "Please specify an putput prefix.\n");
        exit(1);
    }

    if (options.m3u8_file == NULL) {
        fprintf(stderr, "Please specify an output file.\n");
        exit(1);
    }

    if (options.url_prefix == NULL) {
        fprintf(stderr, "Please specify a url prefix.\n");
        exit(1);
    }

    av_register_all();
    remove_filename = malloc(sizeof(char) * (strlen(options.output_prefix) + 15));
    if (!remove_filename) {
        fprintf(stderr, "Could not allocate space for remove filenames\n");
        exit(1);
    }

    output_filename = malloc(sizeof(char) * (strlen(options.output_prefix) + 15));
    if (!output_filename) {
        fprintf(stderr, "Could not allocate space for output filenames\n");
        exit(1);
    }

    options.tmp_m3u8_file = malloc(strlen(options.m3u8_file) + 2);
    if (!options.tmp_m3u8_file) {
        fprintf(stderr, "Could not allocate space for temporary index filename\n");
        exit(1);
    }

    // Use a dotfile as a temporary file
    strncpy(options.tmp_m3u8_file, options.m3u8_file, strlen(options.m3u8_file) + 2);
    dot = strrchr(options.tmp_m3u8_file, '/');
    dot = dot ? dot + 1 : options.tmp_m3u8_file;
    memmove(dot + 1, dot, strlen(dot));
    *dot = '.';

    ifmt = av_find_input_format("mpegts");
    if (!ifmt) {
        fprintf(stderr, "Could not find MPEG-TS demuxer\n");
        exit(1);
    }

    ret = av_open_input_file(&ic, options.input_file, ifmt, 0, NULL);
    if (ret != 0) {
        fprintf(stderr, "Could not open input file, make sure it is an mpegts file: %d\n", ret);
        exit(1);
    }

    if (av_find_stream_info(ic) < 0) {
        fprintf(stderr, "Could not read stream information\n");
        exit(1);
    }

    ofmt = av_guess_format("mpegts", NULL, NULL);
    if (!ofmt) {
        fprintf(stderr, "Could not find MPEG-TS muxer\n");
        exit(1);
    }

    oc = avformat_alloc_context();
    if (!oc) {
        fprintf(stderr, "Could not allocated output context");
        exit(1);
    }
    oc->oformat = ofmt;

    video_index = -1;
    audio_index = -1;

    for (i = 0; i < ic->nb_streams && (video_index < 0 || audio_index < 0); i++) {
        switch (ic->streams[i]->codec->codec_type) {
            case CODEC_TYPE_VIDEO:
                video_index = i;
                ic->streams[i]->discard = AVDISCARD_NONE;
                video_st = add_output_stream(oc, ic->streams[i]);
                break;
            case CODEC_TYPE_AUDIO:
                audio_index = i;
                ic->streams[i]->discard = AVDISCARD_NONE;
                audio_st = add_output_stream(oc, ic->streams[i]);
                break;
            default:
                ic->streams[i]->discard = AVDISCARD_ALL;
                break;
        }
    }

    if (av_set_parameters(oc, NULL) < 0) {
        fprintf(stderr, "Invalid output format parameters\n");
        exit(1);
    }

    dump_format(oc, 0, options.output_prefix, 1);

    codec = avcodec_find_decoder(video_st->codec->codec_id);
    if (!codec) {
        fprintf(stderr, "Could not find video decoder, key frames will not be honored\n");
    }

    if (avcodec_open(video_st->codec, codec) < 0) {
        fprintf(stderr, "Could not open video decoder, key frames will not be honored\n");
    }

    snprintf(output_filename, strlen(options.output_prefix) + 15, "%s-%u.ts", options.output_prefix, output_index++);
    if (url_fopen(&oc->pb, output_filename, URL_WRONLY) < 0) {
        fprintf(stderr, "Could not open '%s'\n", output_filename);
        exit(1);
    }

    if (av_write_header(oc)) {
        fprintf(stderr, "Could not write mpegts header to first output file\n");
        exit(1);
    }

    write_index = !write_index_file(options, first_segment, last_segment, 0);

    /* Setup signals */
    memset(&act, 0, sizeof(act));
    act.sa_handler = &handler;

    sigaction(SIGINT, &act, NULL);
    sigaction(SIGTERM, &act, NULL);

    do {
        double segment_time;
        AVPacket packet;

        if (terminate) {
          break;
        }

        decode_done = av_read_frame(ic, &packet);
        if (decode_done < 0) {
            break;
        }

        if (av_dup_packet(&packet) < 0) {
            fprintf(stderr, "Could not duplicate packet");
            av_free_packet(&packet);
            break;
        }

        if (packet.stream_index == video_index && (packet.flags & PKT_FLAG_KEY)) {
            segment_time = (double)video_st->pts.val * video_st->time_base.num / video_st->time_base.den;
        }
        else if (video_index < 0) {
            segment_time = (double)audio_st->pts.val * audio_st->time_base.num / audio_st->time_base.den;
        }
        else {
            segment_time = prev_segment_time;
        }

        if (segment_time - prev_segment_time >= options.segment_duration) {
            put_flush_packet(oc->pb);
            url_fclose(oc->pb);

            if (options.num_segments && (int)(last_segment - first_segment) >= options.num_segments - 1) {
                remove_file = 1;
                first_segment++;
            }
            else {
                remove_file = 0;
            }

            if (write_index) {
                write_index = !write_index_file(options, first_segment, ++last_segment, 0);
            }

            if (remove_file) {
                snprintf(remove_filename, strlen(options.output_prefix) + 15, "%s-%u.ts", options.output_prefix, first_segment - 1);
                remove(remove_filename);
            }

            snprintf(output_filename, strlen(options.output_prefix) + 15, "%s-%u.ts", options.output_prefix, output_index++);
            if (url_fopen(&oc->pb, output_filename, URL_WRONLY) < 0) {
                fprintf(stderr, "Could not open '%s'\n", output_filename);
                break;
            }

            prev_segment_time = segment_time;
        }

        ret = av_interleaved_write_frame(oc, &packet);
        if (ret < 0) {
            fprintf(stderr, "Warning: Could not write frame of stream\n");
        }
        else if (ret > 0) {
            fprintf(stderr, "End of stream requested\n");
            av_free_packet(&packet);
            break;
        }

        av_free_packet(&packet);
    } while (!decode_done);

    av_write_trailer(oc);

    avcodec_close(video_st->codec);

    for(i = 0; i < oc->nb_streams; i++) {
        av_freep(&oc->streams[i]->codec);
        av_freep(&oc->streams[i]);
    }

    url_fclose(oc->pb);
    av_free(oc);

    if (options.num_segments && (int)(last_segment - first_segment) >= options.num_segments - 1) {
        remove_file = 1;
        first_segment++;
    }
    else {
        remove_file = 0;
    }

    if (write_index) {
        write_index_file(options, first_segment, ++last_segment, 1);
    }

    if (remove_file) {
        snprintf(remove_filename, strlen(options.output_prefix) + 15, "%s-%u.ts", options.output_prefix, first_segment - 1);
        remove(remove_filename);
    }

    return 0;
}
Example #26
0
void AV::closeVideo(AVContext& context) {
	avcodec_close(context.vs->codec);
	av_free(context.vframe->data[0]);
	av_free(context.tmp_vframe->data[0]);
	av_free(context.vbuf);
}
Example #27
0
int main(int argc, char *argv[]) {
  AVFormatContext *pFormatCtx = NULL;
  int             i, videoStream;
  AVCodecContext  *pCodecCtx = NULL;
  AVCodec         *pCodec = NULL;
  AVFrame         *pFrame = NULL; 
  AVFrame         *pFrameRGB = NULL;
  AVPacket        packet;
  int             frameFinished;
  int             numBytes;
  uint8_t         *buffer= NULL;
  int ret, got_frame;
  
  if(argc < 2) {
    printf("Please provide a movie file\n");
    return -1;
  }
  // Register all formats and codecs
  //  avcodec_register_all();
  av_register_all();
  

  // Open video file
  if(avformat_open_input(&pFormatCtx, argv[1], NULL, NULL) < 0 ) {
    av_log(NULL, AV_LOG_ERROR,"파일을 열 수 없습니다.\n");
    return -1;
  }

  
    
  // Retrieve stream information
  if((ret = avformat_find_stream_info(pFormatCtx,NULL)) < 0 ) {
    av_log(NULL, AV_LOG_ERROR,"stream 정보을 찾을 수 없습니다.\n");
    return ret; // Couldn't find stream information
  }
  
  
  // Dump information about file onto standard error
  av_dump_format(pFormatCtx, 0, argv[1], 0);
  
  // Find the first video stream
  videoStream = av_find_best_stream(pFormatCtx,AVMEDIA_TYPE_VIDEO,-1,-1,&pCodec,0);

  
  if(videoStream < 0 ) {
    av_log(NULL,AV_LOG_ERROR,"Cannot find a video stream in the input file\n");
    return videoStream;
  }

  
  // Get a pointer to the codec context for the video stream
  pCodecCtx=pFormatCtx->streams[videoStream]->codec;
  
  // Find the decoder for the video stream
  /* pCodec=avcodec_find_decoder(pCodecCtx->codec_id); */
  /* if(pCodec==NULL) { */
  /*   av_log(NULL, AV_LOG_ERROR,"지원되지 않는 코덱입니다.\n"); */
  /*   return -1; // Codec not found */
  /* } */
  // Open codec


  if(avcodec_open2(pCodecCtx, pCodec,NULL)<0)
    return -1; // Could not open codec
  
  // Allocate video frame
  pFrame=avcodec_alloc_frame();
  
  // Allocate an AVFrame structure
  pFrameRGB=avcodec_alloc_frame();
  if(pFrameRGB==NULL)
    return -1;

  
  // Determine required buffer size and allocate buffer
  numBytes=avpicture_get_size(PIX_FMT_RGB24, pCodecCtx->width, 
         		      pCodecCtx->height); 
  buffer=(uint8_t *)av_malloc(numBytes*sizeof(uint8_t)); 
  
  /* // Assign appropriate parts of buffer to image planes in pFrameRGB */
  /* // Note that pFrameRGB is an AVFrame, but AVFrame is a superset */
  /* // of AVPicture */
   avpicture_fill((AVPicture *)pFrameRGB, buffer, PIX_FMT_RGB24, 
         	 pCodecCtx->width, pCodecCtx->height); 


  av_init_packet(&packet);
  packet.data = NULL;
  packet.size = 0;

  // Read frames and save first five frames to disk
  i=0;

  
  while(av_read_frame(pFormatCtx, &packet)>=0) {
    // Is this a packet from the video stream?
    if(packet.stream_index==videoStream) {
      // Decode video frame
      avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, &packet);
      
      // Did we get a video frame?
      if(frameFinished) {
	// Convert the image from its native format to RGB
	av_picture_crop((AVPicture *)pFrameRGB, (AVPicture*)pFrame, 
                        PIX_FMT_RGB24, pCodecCtx->width, pCodecCtx->height);
	
	// Save the frame to disk
	if(++i<=100)
	  SaveFrame(pFrameRGB, pCodecCtx->width, pCodecCtx->height, 
		    i);
        if( i >100 )
          break;
      }
    }
    
    // Free the packet that was allocated by av_read_frame
    av_free_packet(&packet);

    
  }
  
  // Free the RGB image
  av_free(buffer);
  
  printf(" av_free ");
  av_free(pFrameRGB);
  
  // Free the YUV frame
  av_free(pFrame);
  
  // Close the codec
  avcodec_close(pCodecCtx);
  
  // Close the video file
  avformat_close_input(&pFormatCtx);
  
  return 0;
}
void FFmpegVideo::restart()
{
	char chVidName1[200];
	int iProcessOrder1 = this->iProcessOrder;
	strcpy(chVidName1, this->chVidName);

	//结束
	// Free the RGB image
	av_free(buffer);
	av_free(pFrameBGR);
	av_free(pFrameRGB);

	// Free the YUV frame
	av_free(pFrameOri);

	// Close the codec
	avcodec_close(pCodecCtx);

	av_close_input_file(pFormatCtx);

	if(imageFrame)
		delete imageFrame;

	//重新开始
	this->iProcessOrder = iProcessOrder1;
	strcpy(chVidName, chVidName1);
	this->fRate = 0;
	iTotalFrameNum = 0;
	iNowFrameNum = 0;
	frameFinished = 0;
	// Register all formats and codecs
	av_register_all();

	// Open video file
	if(av_open_input_file(&pFormatCtx, chVidName, NULL, 0, NULL)!=0)
	{
		bIfSuccess = false;
		return; // Couldn't open file
	}

	// Retrieve stream information
	if(av_find_stream_info(pFormatCtx)<0)
	{
		bIfSuccess = false;
		return; // Couldn't find stream information
	}

	// Dump information about file onto standard error
	dump_format(pFormatCtx, 0, chVidName, 0);

	this->iTotalFrameNum = pFormatCtx->streams[0]->duration;
	this->fFrmRat = pFormatCtx->streams[0]->r_frame_rate.num/(float)(pFormatCtx->streams[0]->r_frame_rate.den);

	// Find the first video stream
	videoStream=-1;
	for(i=0; i<pFormatCtx->nb_streams; i++)
	{
		if(pFormatCtx->streams[i]->codec->codec_type==CODEC_TYPE_VIDEO) {
			videoStream=i;
			break;
		}
	}
	if(videoStream==-1)
	{
		bIfSuccess = false;
		return; // Didn't find a video stream
	}

	// Get a pointer to the codec context for the video stream
	pCodecCtx=pFormatCtx->streams[videoStream]->codec;

	// Find the decoder for the video stream
	pCodec=avcodec_find_decoder(pCodecCtx->codec_id);
	if(pCodec==NULL) {
		bIfSuccess = false;
		fprintf(stderr, "Unsupported codec!\n");
		return; // Codec not found
	}
	// Open codec
	while (avcodec_open(pCodecCtx, pCodec) < 0)/*这个函数总是返回-1*/ {
		Sleep(this->iProcessOrder);
	}

	// Allocate video frame
	pFrameOri=avcodec_alloc_frame();

	// Allocate an AVFrame structure
	pFrameBGR=avcodec_alloc_frame();
	if(pFrameBGR==NULL)
	{
		bIfSuccess = false;
		return;
	}
	pFrameRGB=avcodec_alloc_frame();
	if(pFrameRGB==NULL)
	{
		bIfSuccess = false;
		return;
	}

	// Determine required buffer size and allocate buffer
	numBytes=avpicture_get_size(PIX_FMT_BGR24, pCodecCtx->width,pCodecCtx->height);
	imageFrame->height = pCodecCtx->height;
	imageFrame->width = pCodecCtx->width;
	buffer=(uint8_t *)av_malloc(numBytes*sizeof(uint8_t));
	imageFrame->imageData = new uint8_t[numBytes*sizeof(uint8_t)];
	

	// Assign appropriate parts of buffer to image planes in pFrameRGB
	// Note that pFrameRGB is an AVFrame, but AVFrame is a superset
	// of AVPicture
	avpicture_fill((AVPicture *)pFrameBGR, buffer, PIX_FMT_BGR24,
		pCodecCtx->width, pCodecCtx->height);
	avpicture_fill((AVPicture *)pFrameRGB, buffer, PIX_FMT_RGB24,
		pCodecCtx->width, pCodecCtx->height);
	
	//注意,这里是PIX_FMT_RGB24,它决定了图片的格式
	if(this->bIfUseHD == false)
		ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height,
		pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height,
		PIX_FMT_BGR24, SWS_BICUBIC, NULL, NULL, NULL);

	this->getOneFrame();

	bIfSuccess = true;

}
Example #29
0
static int startffmpeg(struct anim *anim)
{
    int i, videoStream;

    AVCodec *pCodec;
    AVFormatContext *pFormatCtx = NULL;
    AVCodecContext *pCodecCtx;
    AVRational frame_rate;
    int frs_num;
    double frs_den;
    int streamcount;

#ifdef FFMPEG_SWSCALE_COLOR_SPACE_SUPPORT
    /* The following for color space determination */
    int srcRange, dstRange, brightness, contrast, saturation;
    int *table;
    const int *inv_table;
#endif

    if (anim == NULL) return(-1);

    streamcount = anim->streamindex;

    if (avformat_open_input(&pFormatCtx, anim->name, NULL, NULL) != 0) {
        return -1;
    }

    if (avformat_find_stream_info(pFormatCtx, NULL) < 0) {
        avformat_close_input(&pFormatCtx);
        return -1;
    }

    av_dump_format(pFormatCtx, 0, anim->name, 0);


    /* Find the video stream */
    videoStream = -1;

    for (i = 0; i < pFormatCtx->nb_streams; i++)
        if (pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
            if (streamcount > 0) {
                streamcount--;
                continue;
            }
            videoStream = i;
            break;
        }

    if (videoStream == -1) {
        avformat_close_input(&pFormatCtx);
        return -1;
    }

    pCodecCtx = pFormatCtx->streams[videoStream]->codec;

    /* Find the decoder for the video stream */
    pCodec = avcodec_find_decoder(pCodecCtx->codec_id);
    if (pCodec == NULL) {
        avformat_close_input(&pFormatCtx);
        return -1;
    }

    pCodecCtx->workaround_bugs = 1;

    if (avcodec_open2(pCodecCtx, pCodec, NULL) < 0) {
        avformat_close_input(&pFormatCtx);
        return -1;
    }

    frame_rate = av_get_r_frame_rate_compat(pFormatCtx->streams[videoStream]);
    if (pFormatCtx->streams[videoStream]->nb_frames != 0) {
        anim->duration = pFormatCtx->streams[videoStream]->nb_frames;
    }
    else {
        anim->duration = ceil(pFormatCtx->duration *
                              av_q2d(frame_rate) /
                              AV_TIME_BASE);
    }

    frs_num = frame_rate.num;
    frs_den = frame_rate.den;

    frs_den *= AV_TIME_BASE;

    while (frs_num % 10 == 0 && frs_den >= 2.0 && frs_num > 10) {
        frs_num /= 10;
        frs_den /= 10;
    }

    anim->frs_sec = frs_num;
    anim->frs_sec_base = frs_den;

    anim->params = 0;

    anim->x = pCodecCtx->width;
    anim->y = av_get_cropped_height_from_codec(pCodecCtx);

    anim->pFormatCtx = pFormatCtx;
    anim->pCodecCtx = pCodecCtx;
    anim->pCodec = pCodec;
    anim->videoStream = videoStream;

    anim->interlacing = 0;
    anim->orientation = 0;
    anim->framesize = anim->x * anim->y * 4;

    anim->curposition = -1;
    anim->last_frame = 0;
    anim->last_pts = -1;
    anim->next_pts = -1;
    anim->next_packet.stream_index = -1;

    anim->pFrame = av_frame_alloc();
    anim->pFrameComplete = false;
    anim->pFrameDeinterlaced = av_frame_alloc();
    anim->pFrameRGB = av_frame_alloc();

    if (need_aligned_ffmpeg_buffer(anim)) {
        anim->pFrameRGB->format = AV_PIX_FMT_RGBA;
        anim->pFrameRGB->width  = anim->x;
        anim->pFrameRGB->height = anim->y;

        if (av_frame_get_buffer(anim->pFrameRGB, 32) < 0) {
            fprintf(stderr, "Could not allocate frame data.\n");
            avcodec_close(anim->pCodecCtx);
            avformat_close_input(&anim->pFormatCtx);
            av_frame_free(&anim->pFrameRGB);
            av_frame_free(&anim->pFrameDeinterlaced);
            av_frame_free(&anim->pFrame);
            anim->pCodecCtx = NULL;
            return -1;
        }
    }

    if (avpicture_get_size(AV_PIX_FMT_RGBA, anim->x, anim->y) !=
            anim->x * anim->y * 4)
    {
        fprintf(stderr,
                "ffmpeg has changed alloc scheme ... ARGHHH!\n");
        avcodec_close(anim->pCodecCtx);
        avformat_close_input(&anim->pFormatCtx);
        av_frame_free(&anim->pFrameRGB);
        av_frame_free(&anim->pFrameDeinterlaced);
        av_frame_free(&anim->pFrame);
        anim->pCodecCtx = NULL;
        return -1;
    }

    if (anim->ib_flags & IB_animdeinterlace) {
        avpicture_fill((AVPicture *) anim->pFrameDeinterlaced,
                       MEM_callocN(avpicture_get_size(
                                       anim->pCodecCtx->pix_fmt,
                                       anim->pCodecCtx->width,
                                       anim->pCodecCtx->height),
                                   "ffmpeg deinterlace"),
                       anim->pCodecCtx->pix_fmt,
                       anim->pCodecCtx->width,
                       anim->pCodecCtx->height);
    }

    if (pCodecCtx->has_b_frames) {
        anim->preseek = 25; /* FIXME: detect gopsize ... */
    }
    else {
        anim->preseek = 0;
    }

    anim->img_convert_ctx = sws_getContext(
                                anim->x,
                                anim->y,
                                anim->pCodecCtx->pix_fmt,
                                anim->x,
                                anim->y,
                                AV_PIX_FMT_RGBA,
                                SWS_FAST_BILINEAR | SWS_PRINT_INFO | SWS_FULL_CHR_H_INT,
                                NULL, NULL, NULL);

    if (!anim->img_convert_ctx) {
        fprintf(stderr,
                "Can't transform color space??? Bailing out...\n");
        avcodec_close(anim->pCodecCtx);
        avformat_close_input(&anim->pFormatCtx);
        av_frame_free(&anim->pFrameRGB);
        av_frame_free(&anim->pFrameDeinterlaced);
        av_frame_free(&anim->pFrame);
        anim->pCodecCtx = NULL;
        return -1;
    }

#ifdef FFMPEG_SWSCALE_COLOR_SPACE_SUPPORT
    /* Try do detect if input has 0-255 YCbCR range (JFIF Jpeg MotionJpeg) */
    if (!sws_getColorspaceDetails(anim->img_convert_ctx, (int **)&inv_table, &srcRange,
                                  &table, &dstRange, &brightness, &contrast, &saturation))
    {
        srcRange = srcRange || anim->pCodecCtx->color_range == AVCOL_RANGE_JPEG;
        inv_table = sws_getCoefficients(anim->pCodecCtx->colorspace);

        if (sws_setColorspaceDetails(anim->img_convert_ctx, (int *)inv_table, srcRange,
                                     table, dstRange, brightness, contrast, saturation))
        {
            fprintf(stderr, "Warning: Could not set libswscale colorspace details.\n");
        }
    }
    else {
        fprintf(stderr, "Warning: Could not set libswscale colorspace details.\n");
    }
#endif

    return (0);
}
/*
 * Video encoding example
 */
static void video_encode_example(const char *filename)
{
	AVCodec *codec;
	AVCodecContext *c= NULL;
	int i, out_size, size, x, y, outbuf_size;
	FILE *f;
	AVFrame *picture;
	uint8_t *outbuf, *picture_buf;

	printf("Video encoding\n");

	/* find the mpeg1 video encoder */
	codec = avcodec_find_encoder(CODEC_ID_MPEG1VIDEO);
	if (!codec)
	{
		fprintf(stderr, "codec not found\n");
		exit(1);
	}

	c= avcodec_alloc_context();
	picture= avcodec_alloc_frame();

	/* put sample parameters */
	c->bit_rate = 400000;
	/* resolution must be a multiple of two */
	c->width = 352;
	c->height = 288;
	/* frames per second */
	c->time_base= (AVRational)
	{
		1,25
	};
	c->gop_size = 10; /* emit one intra frame every ten frames */
	c->max_b_frames=1;
	c->pix_fmt = PIX_FMT_YUV420P;

	/* open it */
	if (avcodec_open(c, codec) < 0)
	{
		fprintf(stderr, "could not open codec\n");
		exit(1);
	}

	f = fopen(filename, "wb");
	if (!f)
	{
		fprintf(stderr, "could not open %s\n", filename);
		exit(1);
	}

	/* alloc image and output buffer */
	outbuf_size = 100000;
	outbuf = malloc(outbuf_size);
	size = c->width * c->height;
	picture_buf = malloc((size * 3) / 2); /* size for YUV 420 */

	picture->data[0] = picture_buf;
	picture->data[1] = picture->data[0] + size;
	picture->data[2] = picture->data[1] + size / 4;
	picture->linesize[0] = c->width;
	picture->linesize[1] = c->width / 2;
	picture->linesize[2] = c->width / 2;

	/* encode 1 second of video */
	for(i=0; i<25; i++)
	{
		fflush(stdout);
		/* prepare a dummy image */
		/* Y */
		for(y=0; y<c->height; y++)
		{
			for(x=0; x<c->width; x++)
			{
				picture->data[0][y * picture->linesize[0] + x] = x + y + i * 3;
			}
		}

		/* Cb and Cr */
		for(y=0; y<c->height/2; y++)
		{
			for(x=0; x<c->width/2; x++)
			{
				picture->data[1][y * picture->linesize[1] + x] = 128 + y + i * 2;
				picture->data[2][y * picture->linesize[2] + x] = 64 + x + i * 5;
			}
		}

		/* encode the image */
		out_size = avcodec_encode_video(c, outbuf, outbuf_size, picture);
		printf("encoding frame %3d (size=%5d)\n", i, out_size);
		fwrite(outbuf, 1, out_size, f);
	}

	/* get the delayed frames */
	for(; out_size; i++)
	{
		fflush(stdout);

		out_size = avcodec_encode_video(c, outbuf, outbuf_size, NULL);
		printf("write frame %3d (size=%5d)\n", i, out_size);
		fwrite(outbuf, 1, out_size, f);
	}

	/* add sequence end code to have a real mpeg file */
	outbuf[0] = 0x00;
	outbuf[1] = 0x00;
	outbuf[2] = 0x01;
	outbuf[3] = 0xb7;
	fwrite(outbuf, 1, 4, f);
	fclose(f);
	free(picture_buf);
	free(outbuf);

	avcodec_close(c);
	av_free(c);
	av_free(picture);
	printf("\n");
}