Ejemplo n.º 1
0
/*	global initialization
 *
 *	XXX: in theory we can select the filters/formats we want to support, but
 *	this does not work in practise.
 */
void BarPlayerInit () {
	ao_initialize ();
	av_register_all ();
	avfilter_register_all ();
	avformat_network_init ();
}
Ejemplo n.º 2
0
FFmpegVideo::FFmpegVideo(void)
{
	pCodecCtx = NULL;
	av_register_all();

}
Ejemplo n.º 3
0
int main(int argc, char **argv)
{
    const char *filename;
    AVOutputFormat *fmt;
    AVFormatContext *oc;
    AVStream *audio_st, *video_st;
    AVCodec *audio_codec, *video_codec;
    double audio_pts, video_pts;
    int i;

    /* Initialize libavcodec, and register all codecs and formats. */
    av_register_all();

    if (argc != 2) {
        printf("usage: %s output_file\n"
               "API example program to output a media file with libavformat.\n"
               "The output format is automatically guessed according to the file extension.\n"
               "Raw images can also be output by using '%%d' in the filename\n"
               "\n", argv[0]);
        return 1;
    }

    filename = argv[1];

    /* allocate the output media context */
    avformat_alloc_output_context2(&oc, NULL, NULL, filename);
    if (!oc) {
        printf("Could not deduce output format from file extension: using MPEG.\n");
        avformat_alloc_output_context2(&oc, NULL, "mpeg", filename);
    }
    if (!oc) {
        return 1;
    }
    fmt = oc->oformat;

    /* Add the audio and video streams using the default format codecs
     * and initialize the codecs. */
    video_st = NULL;
    audio_st = NULL;
    if (fmt->video_codec != AV_CODEC_ID_NONE) {
        video_st = add_video_stream(oc, &video_codec, fmt->video_codec);
    }
    if (fmt->audio_codec != AV_CODEC_ID_NONE) {
        audio_st = add_audio_stream(oc, &audio_codec, fmt->audio_codec);
    }

    /* Now that all the parameters are set, we can open the audio and
     * video codecs and allocate the necessary encode buffers. */
    if (video_st)
        open_video(oc, video_codec, video_st);
    if (audio_st)
        open_audio(oc, audio_codec, audio_st);

    av_dump_format(oc, 0, filename, 1);

    /* open the output file, if needed */
    if (!(fmt->flags & AVFMT_NOFILE)) {
        if (avio_open(&oc->pb, filename, AVIO_FLAG_WRITE) < 0) {
            fprintf(stderr, "Could not open '%s'\n", filename);
            return 1;
        }
    }

    /* Write the stream header, if any. */
    if (avformat_write_header(oc, NULL) < 0) {
        fprintf(stderr, "Error occurred when opening output file\n");
        return 1;
    }

    frame->pts = 0;
    for (;;) {
        /* Compute current audio and video time. */
        if (audio_st)
            audio_pts = (double)audio_st->pts.val * audio_st->time_base.num / audio_st->time_base.den;
        else
            audio_pts = 0.0;

        if (video_st)
            video_pts = (double)video_st->pts.val * video_st->time_base.num /
                        video_st->time_base.den;
        else
            video_pts = 0.0;

        if ((!audio_st || audio_pts >= STREAM_DURATION) &&
            (!video_st || video_pts >= STREAM_DURATION))
            break;

        /* write interleaved audio and video frames */
        if (!video_st || (video_st && audio_st && audio_pts < video_pts)) {
            write_audio_frame(oc, audio_st);
        } else {
            write_video_frame(oc, video_st);
            frame->pts++;
        }
    }

    /* Write the trailer, if any. The trailer must be written before you
     * close the CodecContexts open when you wrote the header; otherwise
     * av_write_trailer() may try to use memory that was freed on
     * av_codec_close(). */
    av_write_trailer(oc);

    /* Close each codec. */
    if (video_st)
        close_video(oc, video_st);
    if (audio_st)
        close_audio(oc, audio_st);

    /* Free the streams. */
    for (i = 0; i < oc->nb_streams; i++) {
        av_freep(&oc->streams[i]->codec);
        av_freep(&oc->streams[i]);
    }

    if (!(fmt->flags & AVFMT_NOFILE))
        /* Close the output file. */
        avio_close(oc->pb);

    /* free the stream */
    av_free(oc);

    return 0;
}
Ejemplo n.º 4
0
int main(int argc, char* argv[]) {
//	SetUnhandledExceptionFilter(callback);
	SDL_Event event;
	VideoState* is = NULL;
	is = (VideoState*) av_mallocz(sizeof(VideoState));

	if (argc < 2) {
		fprintf(stderr, "Usage: test <file>\n");
		exit(1);
	}

	av_register_all();

	if (SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER)) {
		fprintf(stderr, "Could't not initialize SDL - %s\n", SDL_GetError());
		exit(1);
	}

	screen = SDL_CreateWindow("Hello World", SDL_WINDOWPOS_CENTERED,
	SDL_WINDOWPOS_CENTERED, WINDOW_WIDTH, WINDOW_HEIGHT, SDL_WINDOW_OPENGL);
	if (!screen) {
		printf("Could not initialize SDL -%s\n", SDL_GetError());
		return -1;
	}
	render = SDL_CreateRenderer(screen, -1, 0);

	screen_mutex = SDL_CreateMutex();

	av_strlcpy(is->filename, argv[1], sizeof(is->filename));

	is->pictq_mutex = SDL_CreateMutex();
	is->pictq_cond = SDL_CreateCond();

	schedule_refresh(is, 40);

	is->av_sync_type = DEFAULT_AV_SYNC_TYPE;
	is->parse_tid = SDL_CreateThread(decode_thread, "decode_thread", is);
	if (!is->parse_tid) {
		av_free(is);
		return -1;
	}

	av_init_packet(&flush_pkt);
	flush_pkt.data = (unsigned char*) "FLUSH";

	for (;;) {
		double incr, pos;
		SDL_WaitEvent(&event);
		switch (event.type) {
		case SDL_KEYDOWN:
			switch (event.key.keysym.sym) {
			case SDLK_LEFT:
				incr = -10.0;
				goto do_seek;
			case SDLK_RIGHT:
				incr = 10.0;
				goto do_seek;
			case SDLK_UP:
				incr = 60.0;
				goto do_seek;
			case SDLK_DOWN:
				incr = -60.0;
				goto do_seek;
				do_seek: if (global_video_state) {
					pos = get_master_clock(global_video_state);
					pos += incr;
					stream_seek(global_video_state,
							(int64_t) (pos * AV_TIME_BASE), incr);
				}
				break;
			default:
				break;
			}
			break;
		case FF_QUIT_EVENT:
		case SDL_QUIT:
			is->quit = 1;
			SDL_Quit();
			return 0;
			break;
		case FF_REFRESH_EVENT:
			video_refresh_timer(event.user.data1);
			break;
		default:
			break;
		}
	}
	return 0;
}
Ejemplo n.º 5
0
LibAVC::LibAVC(void )
{
	av_register_all();
}
Ejemplo n.º 6
0
///static
void QFFmpegPlayer::Init()
{
    av_register_all();
    avformat_network_init();
}
Ejemplo n.º 7
0
/** Convert an audio file to an AAC file in an MP4 container. */
int main(int argc, char **argv)
{
    AVFormatContext *input_format_context = NULL, *output_format_context = NULL;
    AVCodecContext *input_codec_context = NULL, *output_codec_context = NULL;
    AVAudioResampleContext *resample_context = NULL;
    AVAudioFifo *fifo = NULL;
    int ret = AVERROR_EXIT;

    if (argc < 3) {
        fprintf(stderr, "Usage: %s <input file> <output file>\n", argv[0]);
        exit(1);
    }

    /** Register all codecs and formats so that they can be used. */
    av_register_all();
    /** Open the input file for reading. */
    if (open_input_file(argv[1], &input_format_context,
                        &input_codec_context))
        goto cleanup;
    /** Open the output file for writing. */
    if (open_output_file(argv[2], input_codec_context,
                         &output_format_context, &output_codec_context))
        goto cleanup;
    /** Initialize the resampler to be able to convert audio sample formats. */
    if (init_resampler(input_codec_context, output_codec_context,
                       &resample_context))
        goto cleanup;
    /** Initialize the FIFO buffer to store audio samples to be encoded. */
    if (init_fifo(&fifo))
        goto cleanup;
    /** Write the header of the output file container. */
    if (write_output_file_header(output_format_context))
        goto cleanup;

    /**
     * Loop as long as we have input samples to read or output samples
     * to write; abort as soon as we have neither.
     */
    while (1) {
        /** Use the encoder's desired frame size for processing. */
        const int output_frame_size = output_codec_context->frame_size;
        int finished                = 0;

        /**
         * Make sure that there is one frame worth of samples in the FIFO
         * buffer so that the encoder can do its work.
         * Since the decoder's and the encoder's frame size may differ, we
         * need to FIFO buffer to store as many frames worth of input samples
         * that they make up at least one frame worth of output samples.
         */
        while (av_audio_fifo_size(fifo) < output_frame_size) {
            /**
             * Decode one frame worth of audio samples, convert it to the
             * output sample format and put it into the FIFO buffer.
             */
            if (read_decode_convert_and_store(fifo, input_format_context,
                                              input_codec_context,
                                              output_codec_context,
                                              resample_context, &finished))
                goto cleanup;

            /**
             * If we are at the end of the input file, we continue
             * encoding the remaining audio samples to the output file.
             */
            if (finished)
                break;
        }

        /**
         * If we have enough samples for the encoder, we encode them.
         * At the end of the file, we pass the remaining samples to
         * the encoder.
         */
        while (av_audio_fifo_size(fifo) >= output_frame_size ||
               (finished && av_audio_fifo_size(fifo) > 0))
            /**
             * Take one frame worth of audio samples from the FIFO buffer,
             * encode it and write it to the output file.
             */
            if (load_encode_and_write(fifo, output_format_context,
                                      output_codec_context))
                goto cleanup;

        /**
         * If we are at the end of the input file and have encoded
         * all remaining samples, we can exit this loop and finish.
         */
        if (finished) {
            int data_written;
            /** Flush the encoder as it may have delayed frames. */
            do {
                if (encode_audio_frame(NULL, output_format_context,
                                       output_codec_context, &data_written))
                    goto cleanup;
            } while (data_written);
            break;
        }
    }

    /** Write the trailer of the output file container. */
    if (write_output_file_trailer(output_format_context))
        goto cleanup;
    ret = 0;

cleanup:
    if (fifo)
        av_audio_fifo_free(fifo);
    if (resample_context) {
        avresample_close(resample_context);
        avresample_free(&resample_context);
    }
    if (output_codec_context)
        avcodec_close(output_codec_context);
    if (output_format_context) {
        avio_close(output_format_context->pb);
        avformat_free_context(output_format_context);
    }
    if (input_codec_context)
        avcodec_close(input_codec_context);
    if (input_format_context)
        avformat_close_input(&input_format_context);

    return ret;
}
Ejemplo n.º 8
0
void SysinfoDlg::GetSysinfo(){


//取得系统支持的格式信息(协议,封装格式,编码器)

	av_register_all();
	//初始化
	si.first_c=NULL;
	si.first_if=NULL;
	si.first_up=NULL;

	URLProtocol **pup=&(si.first_up);
	avio_enum_protocols((void **)pup,0);
	int up_index=0;
	while((*pup)!=NULL){
		CString f_index,name,isread,iswrite,priv_data_size;
		int nIndex=0;
		
#ifdef _UNICODE
		name.Format(_T("%S"),(*pup)->name);
#else
		//大%S在unicode工程下,后面的参数作为ansi看待
		name.Format(_T("%s"),(*pup)->name);
#endif
		priv_data_size.Format(_T("%d"),(*pup)->priv_data_size);
		f_index.Format(_T("%d"),up_index);
		//获取当前记录条数
		nIndex=sysinfosubup.m_sysinfoup.GetItemCount();
		//“行”数据结构
		LV_ITEM lvitem;
		lvitem.mask=LVIF_TEXT;
		lvitem.iItem=nIndex;
		lvitem.iSubItem=0;
		//注:vframe_index不可以直接赋值!
		//务必使用f_index执行Format!再赋值!
		lvitem.pszText=f_index.GetBuffer();
		//lvitem.pszText=(LPWSTR)(LPCTSTR)f_index;
		//------------------------
		sysinfosubup.m_sysinfoup.InsertItem(&lvitem);
		sysinfosubup.m_sysinfoup.SetItemText(nIndex,1,name);
		//下一个
		avio_enum_protocols((void **)pup,0);
		up_index++;
	}




	si.first_if= av_iformat_next(NULL);
	si.first_c=av_codec_next(NULL);

	AVInputFormat *if_temp=si.first_if;
	AVCodec *c_temp=si.first_c;
	//InputFormat
	int if_index=0;
	while(if_temp!=NULL){
		CString f_index,name,long_name,extensions,priv_data_size;
		int nIndex=0;
#ifdef _UNICODE
		name.Format(_T("%S"),if_temp->name);
		long_name.Format(_T("%S"),if_temp->long_name);
		extensions.Format(_T("%S"),if_temp->extensions);
#else
		name.Format(_T("%s"),if_temp->name);
		long_name.Format(_T("%s"),if_temp->long_name);
		extensions.Format(_T("%s"),if_temp->extensions);
#endif
		priv_data_size.Format(_T("%d"),if_temp->priv_data_size);
		f_index.Format(_T("%d"),if_index);
		//获取当前记录条数
		nIndex=sysinfosubif.m_sysinfoif.GetItemCount();
		//“行”数据结构
		LV_ITEM lvitem;
		lvitem.mask=LVIF_TEXT;
		lvitem.iItem=nIndex;
		lvitem.iSubItem=0;
		//注:vframe_index不可以直接赋值!
		//务必使用f_index执行Format!再赋值!
		lvitem.pszText=f_index.GetBuffer();
		//------------------------
		sysinfosubif.m_sysinfoif.InsertItem(&lvitem);
		sysinfosubif.m_sysinfoif.SetItemText(nIndex,1,name);
		sysinfosubif.m_sysinfoif.SetItemText(nIndex,2,long_name);
		sysinfosubif.m_sysinfoif.SetItemText(nIndex,3,extensions);
		sysinfosubif.m_sysinfoif.SetItemText(nIndex,4,priv_data_size);
		if_temp=if_temp->next;
		if_index++;
	}
	//Codec
	int c_index=0;
	while(c_temp!=NULL){
		CString f_index,name,long_name,priv_data_size,capabilities,
			supported_framerates,pix_fmts,supported_samplerates,sample_fmts,channel_layouts;
		int nIndex=0;

#ifdef _UNICODE
		name.Format(_T("%S"),c_temp->name);
		long_name.Format(_T("%S"),c_temp->long_name);
#else
		name.Format(_T("%s"),c_temp->name);
		long_name.Format(_T("%s"),c_temp->long_name);
#endif
		priv_data_size.Format(_T("%d"),c_temp->priv_data_size);
		f_index.Format(_T("%d"),c_index);
		//“行”数据结构
		LV_ITEM lvitem;
		lvitem.mask=LVIF_TEXT;
		lvitem.iSubItem=0;

		switch(c_temp->type){
		case AVMEDIA_TYPE_VIDEO:

			if(c_temp->supported_framerates==NULL){
				supported_framerates.Format(_T("Any"));
			}else{
				float sf_cal=0.0;
				sf_cal=(c_temp->supported_framerates->num)/(c_temp->supported_framerates->den);
				supported_framerates.Format(_T("%f"),sf_cal);
			}

			if(c_temp->pix_fmts==NULL){
				pix_fmts.Format(_T("Unknown"));
			}else{
				const enum AVPixelFormat *pf_temp=c_temp->pix_fmts;
				while(*pf_temp!=-1){
					pix_fmts.AppendFormat(_T("%d;"),*pf_temp);
					pf_temp++;
				}
			}

			//获取当前记录条数
			nIndex=sysinfosubvc.m_sysinfovc.GetItemCount();
			lvitem.iItem=nIndex;

			lvitem.pszText=f_index.GetBuffer();
			//------------------------
			sysinfosubvc.m_sysinfovc.InsertItem(&lvitem);
			sysinfosubvc.m_sysinfovc.SetItemText(nIndex,1,name);
			sysinfosubvc.m_sysinfovc.SetItemText(nIndex,2,long_name);
			sysinfosubvc.m_sysinfovc.SetItemText(nIndex,3,supported_framerates);
			sysinfosubvc.m_sysinfovc.SetItemText(nIndex,4,pix_fmts);
			sysinfosubvc.m_sysinfovc.SetItemText(nIndex,5,priv_data_size);

			break;
		case AVMEDIA_TYPE_AUDIO:

			if(c_temp->supported_samplerates==NULL){
				supported_samplerates.Format(_T("Unknown"));
			}else{
				const int *sr_temp=c_temp->supported_samplerates;
				while(*sr_temp!=0){
					supported_samplerates.AppendFormat(_T("%d;"),*sr_temp);
					sr_temp++;
				}
			}
			if(c_temp->sample_fmts==NULL){
				pix_fmts.Format(_T("Any"));
			}else{
				const enum AVSampleFormat *sf_temp=c_temp->sample_fmts;
				while(*sf_temp!=-1){
					sample_fmts.AppendFormat(_T("%d;"),*sf_temp);
					sf_temp++;
				}
			}

			//获取当前记录条数
			nIndex=sysinfosubac.m_sysinfoac.GetItemCount();
			lvitem.iItem=nIndex;

			lvitem.pszText=f_index.GetBuffer();
			//------------------------
			sysinfosubac.m_sysinfoac.InsertItem(&lvitem);
			sysinfosubac.m_sysinfoac.SetItemText(nIndex,1,name);
			sysinfosubac.m_sysinfoac.SetItemText(nIndex,2,long_name);
			sysinfosubac.m_sysinfoac.SetItemText(nIndex,3,supported_samplerates);
			sysinfosubac.m_sysinfoac.SetItemText(nIndex,4,sample_fmts);
			sysinfosubac.m_sysinfoac.SetItemText(nIndex,5,priv_data_size);
			break;
		default:
			//获取当前记录条数
			nIndex=sysinfosuboc.m_sysinfooc.GetItemCount();
			lvitem.iItem=nIndex;

			lvitem.pszText=f_index.GetBuffer();
			//------------------------
			sysinfosuboc.m_sysinfooc.InsertItem(&lvitem);
			sysinfosuboc.m_sysinfooc.SetItemText(nIndex,1,name);
			sysinfosuboc.m_sysinfooc.SetItemText(nIndex,2,long_name);
			sysinfosuboc.m_sysinfooc.SetItemText(nIndex,3,priv_data_size);
			break;
		}
		c_temp=c_temp->next;
		c_index++;
	}



}
Ejemplo n.º 9
0
int main(int argc, char *argv[])
{
	AVFormatContext *pFormatCtx = NULL;
	int i, videoStream;
	AVCodecContext *pCodecCtx;
	AVCodec *pCodec;
	AVFrame *pFrame;
	AVPacket packet;
	int frameFinished;

	int *stream_size=NULL;
	int *stream_max=NULL;
	int *stream_min=NULL;
	double *stream_ave=NULL;
	int total_max=0, total_min=INT32_MAX;
	double total_ave=0;
	int numberStreams;
	int frame_counter=0;
	int total_size=0;
	int tave=0;

	struct settings programSettings;
	struct stat fileStat;
	off_t total_file_size;
	double framerate;

	// default settings
	programSettings.ave_len=1;
	programSettings.output_stderr=0;
	programSettings.output_interval=1;
	programSettings.output_interval_seconds=0;
	programSettings.output_progress=0;


	// parse commandline options
	const static char *legal_flags = "s:i:I:ePh";

	int c;
	char *error=NULL;
	while ((c = getopt (argc, argv, legal_flags)) != -1) {
		switch (c) {
			case 's':
				// been programming in java too much recently
				// I want to catch a number format exception here
				// And tell the user of their error.
				programSettings.ave_len=(int)strtol(optarg, &error, 10);
				if (*error || programSettings.ave_len < 1) {
					fprintf(stderr,"Smoothing value is invalid\n");
					print_usage();
					return -1;
				}

				break;
			case 'e':
				programSettings.output_stderr=1;
				break;
			case 'P':
				programSettings.output_progress=1;
				break;

			case 'i':
				programSettings.output_interval=(int)strtol(optarg, &error, 10);
				if (*error || programSettings.output_interval<1) {
					fprintf(stderr,"Interval is invalid\n");
					print_usage();

					return -1;
				}
				break;
			case 'I':
				programSettings.output_interval_seconds=strtod(optarg, &error);

				if (*error || programSettings.output_interval_seconds <= 0) {
					fprintf(stderr,"Interval Seconds is invalid\n");
					print_usage();

					return -1;
				}

				break;
			case 'h':
			case '*':
				print_usage();
				return 0;
				break;
		}
	}


	optind--;
	// argc -= optind;
	argv += optind;

	//fprintf (stderr, "optind = %d. Trying file: %s\n",optind,argv[1]);

	// Register all formats and codecs
	av_register_all();

	if (argv[1] == NULL)
	{
		fprintf(stderr,"Error: No filename.\n");
		print_usage();
		return -1; // Couldn't open file
	}


	if (programSettings.output_progress) {
		stat(argv[1], &fileStat);
		// check for return error
		progress_init(0,fileStat.st_size);
	}


	// Open video file
#if LIBAVFORMAT_VERSION_MAJOR < 53
	if(av_open_input_file(&pFormatCtx, argv[1], NULL, 0, NULL)!=0)
#else
		if(avformat_open_input(&pFormatCtx, argv[1], NULL, NULL)!=0)
#endif
		{
			fprintf(stderr,"Error: could not open file.\n");
			return -1; // Couldn't open file
		}

	// Retrieve stream information
#if LIBAVFORMAT_VERSION_MAJOR < 53
	if(av_find_stream_info(pFormatCtx)<0)
#else
		if(avformat_find_stream_info(pFormatCtx,NULL)<0)
#endif
		{
			fprintf(stderr,"Error: could not interpret file.\n");
			return -1; // Couldn't find stream information
		}

	// Dump information about file onto standard error
#if LIBAVFORMAT_VERSION_MAJOR < 53
	dump_format(pFormatCtx, 0, argv[1], 0);
#else
	av_dump_format(pFormatCtx, 0, argv[1], 0);
#endif

	// As this program outputs based on video frames.
	// Find the first video stream
	// To determine the bitrate.
	videoStream=-1;

	numberStreams = pFormatCtx->nb_streams;

	stream_size = (int *)malloc(numberStreams * sizeof(int));
	stream_min = (int *)malloc(numberStreams * sizeof(int));
	stream_max = (int *)malloc(numberStreams * sizeof(int));
	stream_ave = (double *)malloc(numberStreams * sizeof(double));


	for(i=0; i<numberStreams; i++) {
		//	fprintf (stderr,"stream: %d = %d (%s)\n",i,pFormatCtx->streams[i]->codec->codec_type ,pFormatCtx->streams[i]->codec->codec_name);
		// Initialise statistic counters
		stream_size[i] = 0;
		stream_min[i]=INT32_MAX;
		stream_max[i]=0;
		stream_ave[i] = 0;

#if LIBAVFORMAT_VERSION_MAJOR < 53
		if(pFormatCtx->streams[i]->codec->codec_type==CODEC_TYPE_VIDEO)
#else
		if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO)
#endif
		{
			videoStream=i;
#if LIBAVFORMAT_VERSION_MAJOR < 55
			framerate = pFormatCtx->streams[i]->r_frame_rate.num;
#else 
                        framerate = pFormatCtx->streams[i]->avg_frame_rate.num;
#endif

#if LIBAVFORMAT_VERSION_MAJOR < 55
			if (pFormatCtx->streams[i]->r_frame_rate.den != 0)
				framerate /= pFormatCtx->streams[i]->r_frame_rate.den;
			//	fprintf (stderr,"Video Stream: %d Frame Rate: %d:%d\n",videoStream,pFormatCtx->streams[i]->r_frame_rate.num,pFormatCtx->streams[i]->r_frame_rate.den);
#else 
			if (pFormatCtx->streams[i]->avg_frame_rate.den != 0)
				framerate /= pFormatCtx->streams[i]->avg_frame_rate.den;
			//	fprintf (stderr,"Video Stream: %d Frame Rate: %d:%d\n",videoStream,pFormatCtx->streams[i]->avg_frame_rate.num,pFormatCtx->streams[i]->avg_frame_rate.den);
#endif
		}
	}

	if(videoStream==-1) {
		free (stream_size); free (stream_min); free (stream_max); free (stream_ave);
		return -1; // Didn't find a video stream
	}

	// Get a pointer to the codec context for the video stream
	pCodecCtx=pFormatCtx->streams[videoStream]->codec;

	// Find the decoder for the video stream
	pCodec=avcodec_find_decoder(pCodecCtx->codec_id);
	if(pCodec==NULL) {
		free (stream_size); free (stream_min); free (stream_max); free (stream_ave);
		return -1; // Codec not found
	}

	if (framerate == 0)
	{
		//fprintf(stderr,"frame rate %d:%d\n",pCodecCtx->time_base.num,pCodecCtx->time_base.den);
		framerate = pCodecCtx->time_base.den;
		if (pCodecCtx->time_base.den != 0)
			framerate /= pCodecCtx->time_base.num;
	}



	if (programSettings.output_interval_seconds >0)
	{
		if (INT32_MAX / framerate > programSettings.output_interval_seconds)
		{
			programSettings.output_interval = programSettings.output_interval_seconds * framerate;
		} else {
			fprintf(stderr,"Interval seconds too large\n");
			free (stream_size); free (stream_min); free (stream_max); free (stream_ave);
			return -1;
		}
	}
	//	fprintf (stderr,"Video Stream: %d Frame Rate: %g\n",videoStream,framerate);


	// Open codec
#if LIBAVCODEC_VERSION_MAJOR < 52
	if(avcodec_open(pCodecCtx, *pCodec)<0)
#else
	if(avcodec_open2(pCodecCtx, pCodec,NULL)<0)
#endif
	{
		free (stream_size); free (stream_min); free (stream_max); free (stream_ave);
		return -1; // Could not open codec
	}

	// Allocate video frame
#if LIBAVCODEC_VERSION_MAJOR < 55
	pFrame=avcodec_alloc_frame();
#else
	pFrame=av_frame_alloc();
#endif

	int counter_interval=0;


	total_file_size=0;
	// Loop until nothing read
	while(av_read_frame(pFormatCtx, &packet)>=0)
	{
		stream_size[packet.stream_index] += packet.size;

		if (programSettings.output_progress) {
			total_file_size += packet.size;
			progress_loadBar(total_file_size);
		}
		// Is this a packet from the video stream?
		if(packet.stream_index==videoStream)
		{
			// Decode video frame
			// I'm not entirely sure when avcodec_decode_video was deprecated. most likely earlier than 53
	#if LIBAVCODEC_VERSION_MAJOR < 52
			avcodec_decode_video(pCodecCtx, pFrame, &frameFinished, packet.data, packet.size);
	#else
			avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, &packet);
	#endif

			if (counter_interval++ >= programSettings.output_interval) {

				//if (!(frame_counter % ave_len)) {
				// print the statistics in gnuplot friendly format...
				total_size=0;
				for(i=0; i<numberStreams; i++) { total_size += stream_size[i]; }
				// if (tave == -1) { tave = total_size; }

				tave = ((tave * (programSettings.ave_len-1)) + total_size) / programSettings.ave_len / programSettings.output_interval;

				if(total_min > total_size)
					total_min = total_size;

				if(total_max < total_size)
					total_max = total_size;

				total_ave += total_size;


				printf ("%f ",frame_counter/framerate);
				printf ("%f ",tave*8*framerate);

				for(i=0; i<numberStreams; i++) {

					// double rate = stream_size[i]*8*framerate/ programSettings.output_interval;

					if(stream_min[i] > stream_size[i])
						stream_min[i] = stream_size[i];

					if(stream_max[i] < stream_size[i])
						stream_max[i] = stream_size[i];

					stream_ave[i] += stream_size[i];


					printf ("%f ",stream_size[i]*8*framerate/ programSettings.output_interval);

					stream_size[i]=0;
				}
				printf("\n");

				//}
				counter_interval = 1;
			}
			frame_counter++;
		}

		// Free the packet that was allocated by av_read_frame
#if LIBAVCODEC_VERSION_MAJOR < 52
		av_freep(&packet);
#else
		av_free_packet(&packet);
#endif
	}

	free(stream_size);


	// Free the YUV frame
	av_free(pFrame);

	// Close the codec
	avcodec_close(pCodecCtx);

	// Close the video file
#if LIBAVCODEC_VERSION_MAJOR < 53
	av_close_input_file(pFormatCtx);
#else
	avformat_close_input(&pFormatCtx);
#endif

	// Print statistics
	if (programSettings.output_stderr)
	{
		fprintf(stderr,"%20s %20s %20s %20s\n","Stream","Min Bitrate","Average bitrate","Max bitrate");

		fprintf(stderr,"%20s %20f %20f %20f\n","Total",total_min*8*framerate/ programSettings.output_interval,
				total_ave * 8*framerate/ programSettings.output_interval/(frame_counter/programSettings.output_interval),
				total_max*8*framerate/ programSettings.output_interval);
		for(i=0; i<numberStreams; i++) {
			fprintf(stderr,"%20d %20f %20f %20f\n",i,stream_min[i]*8*framerate/ programSettings.output_interval,
					stream_ave[i] *8*framerate/ programSettings.output_interval/(frame_counter/programSettings.output_interval),
					stream_max[i]*8*framerate/ programSettings.output_interval);
		}
	}
	free (stream_min); free (stream_max); free (stream_ave);

	return 0;
}
Ejemplo n.º 10
0
int phonica_init()
{
  av_register_all();
  return 0;
}
Ejemplo n.º 11
0
int main(int argc, char *argv[]) {

	SDL_Event       event;

	VideoState      *is;

	is = (VideoState*)av_mallocz(sizeof(VideoState));

	if (argc < 2) {
		fprintf(stderr, "Usage: test <file>\n");
		exit(1);
	}
	// Register all formats and codecs
	av_register_all();

	if (SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER)) {
		fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
		exit(1);
	}

	// Make a screen to put our video
#ifndef __DARWIN__
	screen = SDL_SetVideoMode(640, 480, 0, 0);
#else
	screen = SDL_SetVideoMode(640, 480, 24, 0);
#endif
	if (!screen) {
		fprintf(stderr, "SDL: could not set video mode - exiting\n");
		exit(1);
	}

	screen_mutex = SDL_CreateMutex();

	av_strlcpy(is->filename, argv[1], sizeof(is->filename));

	is->pictq_mutex = SDL_CreateMutex();
	is->pictq_cond = SDL_CreateCond();

	schedule_refresh(is, 40);

	is->parse_tid = SDL_CreateThread(decode_thread, is);
	if (!is->parse_tid) {
		av_free(is);
		return -1;
	}
	for (;;) {

		SDL_WaitEvent(&event);
		switch (event.type) {
		case FF_QUIT_EVENT:
		case SDL_QUIT:
			is->quit = 1;
			SDL_Quit();
			return 0;
			break;
		case FF_REFRESH_EVENT:
			video_refresh_timer(event.user.data1);
			break;
		default:
			break;
		}
	}
	return 0;

}
int main(int argc, char *argv[]) {

    char *filename = NULL;
    //char *filename_suffix = NULL;
    char *inputsource = NULL;
    char *outputDB = NULL;

    //Find the last / in passed filename.
    if (strrchr(argv[1],'/') == NULL) {
        if (strcmp(argv[1],"-") == 0) {
            inputsource = "/dev/stdin";
            if (argv[2] == NULL) {
                printf("Please input a name for the movie!\n");
                return -1;
            } else {
                if (strrchr(argv[2],'/') == NULL) {
                    filename = argv[2];
                } else {
                    filename = strrchr(argv[2],'/') + 1;
                }

                if (argv[3] != NULL && argc == 4) {
                    outputDB = argv[3];
                } else {
                    outputDB = "/home/gsc/videoaudiofingerprint.db";
                }

            }
        } else {
            filename = argv[1];
            inputsource = argv[1];

            if (argv[2] != NULL && argc == 3) {
                outputDB = argv[2];
            } else {
                outputDB = "/home/gsc/videoaudiofingerprint.db";
            }
        }
    } else {
        filename = strrchr(argv[1],'/') + 1;
        inputsource = argv[1];

        if (argv[3] != NULL && argc == 4) {
            outputDB = argv[3];
        } else {
            outputDB = "/home/gsc/videoaudiofingerprint.db";
        }
    }

    printf("Filename = %s Input source = %s DB output = %s argc = %d\n",filename,inputsource,outputDB, argc);

    /*** DB initialization ***/
    int retval = 0;

    // Create a handle for database connection, create a pointer to sqlite3
    sqlite3 *handle;

    //Full array init of size 5h@60fps (a.k.a large enough)
    //TO FIX: use dynamic array?
    int *fullArray = (int*) calloc ( (1080000-1), sizeof (int));

    // Create the database. If it doesnt exist, it would be created
    // pass a pointer to the pointer to sqlite3, in short sqlite3**

    retval = sqlite3_open(outputDB,&handle);
    // If connection failed, handle returns NULL
    if(retval) {
        printf("Database connection failed\n");
        return -1;
    }

    char query1[] = "create table allmovies (allmovieskey INTEGER PRIMARY KEY,name TEXT,fps INTEGER, date INTEGER);";
    // Execute the query for creating the table
    retval = sqlite3_exec(handle,query1,0,0,0);
    char query2[] = "PRAGMA count_changes = OFF";
    retval = sqlite3_exec(handle,query2,0,0,0);
    char query3[] = "PRAGMA synchronous = OFF";
    retval = sqlite3_exec(handle,query3,0,0,0);

    //Hashluma table
    char query_hash[] = "create table hashluma (avg_range int, movies TEXT)";
    retval = sqlite3_exec(handle,query_hash,0,0,0);

    if (!retval) {
        //Populating the hash tables
        printf("Populating hashluma table\n");
        char hashquery[50];
        memset(hashquery, 0, 50);
        int i = 0;
        for(i=0; i <= 254; i++) {
            sprintf(hashquery, "insert into hashluma (avg_range) values (%d)", i);
            retval = sqlite3_exec(handle,hashquery,0,0,0);
        }
    }

    char table_query[150];
    memset(table_query, 0, 150);
    sprintf(table_query,"create table '%s' (s_end FLOAT, luma INTEGER);",filename);

    int repeated = 0;

    retval = sqlite3_exec(handle,table_query,0,0,0);
    if (retval) {
        char error [100];
        memset(error, 0, 100);
        sprintf(error,"Table for movie %s already exists! Skipping fingerprinting ... \n",filename);
        printf("%s",error);
        //Decide which is the best policy, not FP? overwrite? new file?
        repeated = 1;
        sqlite3_close(handle);
        return 0;
    }
    /*** DB init finished ***/

    printf("Analyzing video %s\n",filename);

    av_register_all();

    AVFormatContext *pFormatCtx;

    // Open video file
    if(av_open_input_file(&pFormatCtx, inputsource, NULL, 0, NULL)!=0) {
        printf("Could't open file %s\n", argv[1]);
        return -1; // Couldn't open file
    }

    // Retrieve stream information
    if(av_find_stream_info(pFormatCtx)<0) {
        printf("Could't find stream information\n");
        return -1; // Couldn't find stream information
    }

    // Dump information about file onto standard error
    dump_format(pFormatCtx, 0, filename, 0);

    int i;
    AVCodecContext *pVideoCodecCtx;
    AVCodecContext *pAudioCodecCtx;

    // Find the first video stream
    int videoStream=-1;
    int audioStream=-1;
    for(i=0; i<pFormatCtx->nb_streams; i++) {
        if(pFormatCtx->streams[i]->codec->codec_type==CODEC_TYPE_VIDEO && videoStream==-1)
            videoStream=i;

        if(pFormatCtx->streams[i]->codec->codec_type==CODEC_TYPE_AUDIO && audioStream==-1)
            audioStream=i;

    }

    if(videoStream==-1 || audioStream==-1)
        return -1; // Didn't find both streams

    // Get a pointer to the codec context for the video stream
    pVideoCodecCtx=pFormatCtx->streams[videoStream]->codec;
    // Similar, for audio stream
    pAudioCodecCtx=pFormatCtx->streams[audioStream]->codec;

    AVCodec *pVideoCodec;
    AVCodec *pAudioCodec;

    // Find the decoder for the streams
    pVideoCodec=avcodec_find_decoder(pVideoCodecCtx->codec_id);
    pAudioCodec=avcodec_find_decoder(pAudioCodecCtx->codec_id);
    if(pVideoCodec==NULL) {
        fprintf(stderr, "Unsupported video codec!\n");
        sqlite3_close(handle);
        return -1; // Codec not found
    }
    if(pAudioCodec==NULL) {
        fprintf(stderr, "Unsupported audio codec!\n");
        sqlite3_close(handle);
        return -1; // Codec not found
    }

    // Open codecs
    if(avcodec_open(pVideoCodecCtx, pVideoCodec)<0) {
        sqlite3_close(handle);
        return -1; // Could not open codec
    }
    if(avcodec_open(pAudioCodecCtx, pAudioCodec)<0) {
        sqlite3_close(handle);
        return -1; // Could not open codec
    }

    AVFrame *pVideoFrame;
    AVFrame *pVideoFrameYUV;
    AVFrame *pAudioFrame;

    int samples = 0;

    // Allocate audio/video frame
    pVideoFrame=avcodec_alloc_frame();
    pVideoFrameYUV=avcodec_alloc_frame();
    pAudioFrame=avcodec_alloc_frame();

    if(pVideoFrameYUV==NULL || pVideoFrame==NULL || pAudioFrame==NULL) {
        sqlite3_close(handle);
        return -1;
    }

    uint8_t *videoBuffer;
    int16_t *audioBuffer;

    int numVideoBytes;
    int numAudioBytes;
    // Determine required buffer size and allocate buffer
    numVideoBytes=avpicture_get_size(PIX_FMT_YUV420P, pVideoCodecCtx->width, pVideoCodecCtx->height);
    videoBuffer=(uint8_t *)av_mallocz(numVideoBytes*sizeof(uint8_t));
    numAudioBytes = AVCODEC_MAX_AUDIO_FRAME_SIZE + FF_INPUT_BUFFER_PADDING_SIZE;
    audioBuffer=(int16_t *)av_mallocz(numAudioBytes);

    // Assign appropriate parts of videoBuffer to image planes in pVideoFrameYUV
    // Note that pVideoFrameYUV is an AVFrame, but AVFrame is a superset of AVPicture
    avpicture_fill((AVPicture *)pVideoFrameYUV, videoBuffer, PIX_FMT_YUV420P, pVideoCodecCtx->width, pVideoCodecCtx->height);

    int frameFinished = 0;
    AVPacket packet;
    av_init_packet(&packet);
    struct SwsContext * sws_context;
    double fps = 0.0;

    struct timeval tv;
    gettimeofday(&tv, NULL);

    char allmovies_query[150];
    memset(allmovies_query, 0, 150);
    fps = (double)pFormatCtx->streams[videoStream]->r_frame_rate.num/(double)pFormatCtx->streams[videoStream]->r_frame_rate.den;
    //if (repeated) {
    //  filename_suffix = (int)tv.tv_sec;
    //  sprintf(filename, "%s_%d", filename, filename_suffix);
    //  sprintf(allmovies_query, "insert into allmovies (name,fps,date) values ('%s',%d,%d);", filename, (int)(fps*100), filename_suffix);
    //} else {
    sprintf(allmovies_query, "insert into allmovies (name,fps,date) values ('%s',%d,%d);", filename, (int)(fps*100), (int)tv.tv_sec);
    //}
    retval = sqlite3_exec(handle,allmovies_query,0,0,0);

    printf("%d %d\n",pAudioCodecCtx->sample_rate,pAudioCodecCtx->channels);

    i = 0;
    unsigned int offset = 0; // bytes
    //fftw_complex *in;
    int totalSamples = 0;
    //in = (fftw_complex*) fftw_malloc(sizeof(fftw_complex) * N);
    int counter = 0;
    float audioTime = 0.0;

    while(av_read_frame(pFormatCtx, &packet)>=0) {
        // Decode video
        if(packet.stream_index==videoStream) {
            // Decode video frame
            avcodec_decode_video2(pVideoCodecCtx, pVideoFrame, &frameFinished, &packet);

            // Did we get a video frame?
            if(frameFinished) {
                if (pVideoCodecCtx->pix_fmt != PIX_FMT_YUV420P) {
                    // Convert the image from its native format to YUV (PIX_FMT_YUV420P)
                    //img_convert((AVPicture *)pVideoFrameYUV, PIX_FMT_YUV420P, (AVPicture*)pVideoFrame, pVideoCodecCtx->pix_fmt, pVideoCodecCtx->width, pVideoCodecCtx->height);
                    sws_context = sws_getContext(pVideoCodecCtx->width, pVideoCodecCtx->height, pVideoCodecCtx->pix_fmt, pVideoCodecCtx->width, pVideoCodecCtx->height, PIX_FMT_YUV420P, SWS_FAST_BILINEAR, NULL, NULL, NULL);

                    sws_scale(sws_context, pVideoFrame->data, pVideoFrame->linesize, 0, pVideoCodecCtx->height, pVideoFrameYUV->data, pVideoFrameYUV->linesize);
                    sws_freeContext(sws_context);

                    retval = AvgFrameImport(pVideoFrameYUV, pVideoCodecCtx->width, pVideoCodecCtx->height, i++, filename, handle, fps, fullArray);
                } else {
                    retval = AvgFrameImport(pVideoFrame, pVideoCodecCtx->width, pVideoCodecCtx->height, i++, filename, handle, fps, fullArray);
                }
            }
        }
        // Decode audio
        // http://qtdvd.com/guides/ffmpeg.html#decode
        if (packet.stream_index == audioStream) {

            offset = 0;
            int frameSize;
            int length = 0;
            memset(audioBuffer, 0, sizeof(audioBuffer));
            while (packet.size > 0) {
                //memset(audioBuffer, 0, sizeof(audioBuffer));
                frameSize = numAudioBytes;

                //Copy decoded information into audioBuffer
                //frameSize gets set as the decoded frameSize, in bytes
                length = avcodec_decode_audio3(pAudioCodecCtx, audioBuffer, &frameSize, &packet);
                if (length <= 0) { // Error, see if we can recover.
                    packet.size--;
                    packet.data++;
                }
                else {
                    //Slide pointer to next frame and update size
                    printf("read %d bytes\n", length);
                    packet.size -= length;
                    packet.data += length;

                    //Slide frame of audiobuffer
                    memcpy((uint16_t*)(audioBuffer+offset), audioBuffer, frameSize);
                    //Update offset
                    offset += frameSize;
                }

                //Do something with audioBuffer
                //in = (fftw_complex*) fftw_malloc(sizeof(fftw_complex) * N);
                //if (counter%2)
                //	printf("%f R: %d\n", audioTime, (int16_t)*audioBuffer);
                //else
                //	printf("%f L: %d\n", audioTime, (int16_t)*audioBuffer);
                printf("%f %d\n", audioTime, (int16_t)*audioBuffer);
                fflush(stdout);

            }


            if (offset == 0)
                samples = 0;
            else
                samples = (unsigned int)offset/sizeof(short);

            totalSamples+=samples;

            if (counter%2)
                audioTime+=samples*1.0/pAudioCodecCtx->sample_rate;

            counter++;

        }
    }

    printf("Total time (s) (per audio sample calculation): %f\n",(float)(totalSamples*1.0/pAudioCodecCtx->sample_rate/pAudioCodecCtx->channels));

    //Cut the large fullArray to the movie actual size
    int *shortArray = (int*) calloc ( i, sizeof (int));
    memcpy(shortArray, fullArray, i*sizeof(int));
    free(fullArray);

    //Do magic
    makeIndexes(shortArray, handle, filename, threshold, i, fps);

    // Free the packet that was allocated by av_read_frame
    av_free_packet(&packet);

    // Free the YUV image
    av_free(videoBuffer);
    av_free(audioBuffer);
    av_free(pVideoFrameYUV);

    // Free the YUV frame
    av_free(pVideoFrame);
    av_free(pAudioFrame);

    // Close the codec
    avcodec_close(pVideoCodecCtx);
    avcodec_close(pAudioCodecCtx);

    // Close the video file
    av_close_input_file(pFormatCtx);

    // Close DB handler
    sqlite3_close(handle);

    // Free full array
    free(shortArray);

    return 0;
}
Ejemplo n.º 13
0
int main(int argc, char **argv)
{
    char fntemplate[PATH_MAX];
    char pktfilename[PATH_MAX];
    AVFormatContext *fctx;
    AVPacket pkt;
    int64_t pktnum = 0;
    int64_t maxpkts = 0;
    int donotquit = 0;
    int nowrite = 0;
    int err;

    if ((argc > 1) && !strncmp(argv[1], "-", 1)) {
        if (strchr(argv[1], 'w'))
            donotquit = 1;
        if (strchr(argv[1], 'n'))
            nowrite = 1;
        argv++;
        argc--;
    }
    if (argc < 2)
        return usage(1);
    if (argc > 2)
        maxpkts = atoi(argv[2]);
    strncpy(fntemplate, argv[1], PATH_MAX-1);
    if (strrchr(argv[1], '/'))
        strncpy(fntemplate, strrchr(argv[1], '/')+1, PATH_MAX-1);
    if (strrchr(fntemplate, '.'))
        *strrchr(fntemplate, '.') = '\0';
    if (strchr(fntemplate, '%')) {
        fprintf(stderr, "can't use filenames containing '%%'\n");
        return usage(1);
    }
    if (strlen(fntemplate) + sizeof(PKTFILESUFF) >= PATH_MAX-1) {
        fprintf(stderr, "filename too long\n");
        return usage(1);
    }
    strcat(fntemplate, PKTFILESUFF);
    printf("FNTEMPLATE: '%s'\n", fntemplate);

    // register all file formats
    av_register_all();

    err = av_open_input_file(&fctx, argv[1], NULL, 0, NULL);
    if (err < 0) {
        fprintf(stderr, "av_open_input_file: error %d\n", err);
        return 1;
    }

    err = av_find_stream_info(fctx);
    if (err < 0) {
        fprintf(stderr, "av_find_stream_info: error %d\n", err);
        return 1;
    }

    av_init_packet(&pkt);

    while ((err = av_read_frame(fctx, &pkt)) >= 0) {
        int fd;
        snprintf(pktfilename, PATH_MAX-1, fntemplate, pktnum, pkt.stream_index, pkt.pts, pkt.size, (pkt.flags & PKT_FLAG_KEY)?'K':'_');
        printf(PKTFILESUFF"\n", pktnum, pkt.stream_index, pkt.pts, pkt.size, (pkt.flags & PKT_FLAG_KEY)?'K':'_');
        //printf("open(\"%s\")\n", pktfilename);
        if (!nowrite) {
            fd = open(pktfilename, O_WRONLY|O_CREAT, 0644);
            write(fd, pkt.data, pkt.size);
            close(fd);
        }
        pktnum++;
        if (maxpkts && (pktnum >= maxpkts))
            break;
    }

    while (donotquit)
        sleep(60);

    return 0;
}
Ejemplo n.º 14
0
int main(int argc, char **argv){	
	struct 				tm start_time_tm;
	int outputPorts;

	pthread_t *audioThreads;
	pthread_attr_t custom_sched_attr;	
	int fifo_max_prio = 0;
	int fifo_min_prio = 0;
	int fifo_mid_prio = 0;	
	struct sched_param fifo_param;

	syncbuffer = 0;
	normalbuffer = 0;

	if(argc < 3){
		printf("./<audio_decoder> udp://[IP]:[PORT] [ptsDelay] [Amount of channel] [Channel 0] [Channel n]\n");
		return 0;
	}

	if(argc != 3){

	}

	
	ff_ctx = malloc(sizeof(ff_ctx_t));

	av_register_all();
	avformat_network_init();

	InitFF(ff_ctx, argv[1], argv[2]);
	

	if (avformat_open_input (&ff_ctx->avInputCtx, ff_ctx->udp_address, NULL , &ff_ctx->avDic) != 0) {
		printf ("Cloud not open UDP input stream at %s\n", ff_ctx->udp_address);
		return -1;
	}

	if (avformat_find_stream_info(ff_ctx->avInputCtx, NULL) < 0) {
		printf ("Cloud not get stream info\n");
		return -1;
	}

	if (ff_ctx->audioIndexStream = av_find_best_stream(ff_ctx->avInputCtx, AVMEDIA_TYPE_AUDIO, -1, -1, &ff_ctx->avCodec, 0) < 0) {
		printf ("No audio streams found\n");
		return -1;
	}

	printf ("Audio stream found at %d\n", ff_ctx->audioIndexStream);

	ff_ctx->avDicentry = av_dict_get(ff_ctx->avInputCtx->metadata, "service_name", NULL, 0);

	if(ff_ctx->avDicentry != NULL){
		strptime( ff_ctx->avDicentry->value, "%Y-%m-%d %H:%M:%S", &start_time_tm);
		start_time = mktime(&start_time_tm);
	}
	else {
		start_time = getSystemTime(NULL);
	}
	
	ff_ctx->avCodecCtx = ff_ctx->avInputCtx->streams[ff_ctx->audioIndexStream]->codec;
	ff_ctx->avCodec = avcodec_find_decoder(ff_ctx->avCodecCtx->codec_id);

	av_dump_format(ff_ctx->avInputCtx, 0, ff_ctx->udp_address, 0);

	if (avcodec_open2 (ff_ctx->avCodecCtx, ff_ctx->avCodec, NULL) < 0) {
		return -1;
	}

	outputPorts = ff_ctx->avCodecCtx->channels;
	InitBF(ff_ctx->avCodecCtx->channels, &to_audio_buffer, TO_AUDIO_BUFFER_SIZE);
	InitBF(ff_ctx->avCodecCtx->channels, &to_jack_buffer, TO_JACK_BUFFER_SIZE);

	//One thread for each channel
	audioThreads = malloc (sizeof(pthread_t)*outputPorts);

	pthread_attr_init(&custom_sched_attr);	
 	pthread_attr_setinheritsched(&custom_sched_attr, PTHREAD_INHERIT_SCHED /* PTHREAD_EXPLICIT_SCHED */);

 	//Options below only are applied when PTHREAD_EXPLICIT_SCHED is used!
 	pthread_attr_setscope(&custom_sched_attr, PTHREAD_SCOPE_SYSTEM );	
 	pthread_attr_setschedpolicy(&custom_sched_attr, SCHED_FIFO);	

 	fifo_max_prio = sched_get_priority_max(SCHED_FIFO);	
 	fifo_min_prio = sched_get_priority_min(SCHED_FIFO);	
 	fifo_mid_prio = (fifo_min_prio + fifo_max_prio) / 2;	
 	fifo_param.sched_priority = fifo_mid_prio;	
 	pthread_attr_setschedparam(&custom_sched_attr, &fifo_param);

 	int i;
 	threadArgs_t args[outputPorts];
 	for (i = 0; i < outputPorts; i++) {
 		args[i].channel = i;
 		args[i].process_block_size = AUDIO_PROCESS_BLOCK_SIZE;
 		if (pthread_create(&audioThreads[i], &custom_sched_attr, audioThreadFunction, &args[i])) {
 			printf ("Unable to create audio_thread %d\n", i);
 			return 0;
 		}
 	}
    
    av_init_packet(&ff_ctx->avPacket);

	static AVFrame frame;
	int frameFinished;
	int nb, ch;

	char samplebuf[30];
	av_get_sample_fmt_string (samplebuf, 30, ff_ctx->avCodecCtx->sample_fmt);
	printf ("Audio sample format is %s\n", samplebuf);

	audio_sync_sample_t **sync_samples;
	sync_samples = malloc (outputPorts*sizeof(audio_sync_sample_t*));

	long double initPTS, PTS, frame_pts_offset;
	unsigned long int frame_count, framePTS, sample_count;

	int sample_rate = ff_ctx->avCodecCtx->sample_rate;

	if (init_jack(&jackCtx, outputPorts)) {
		return 1;
	}

	while(av_read_frame (ff_ctx->avInputCtx, &ff_ctx->avPacket)>=0) {

		if(ff_ctx->avPacket.stream_index == ff_ctx->audioIndexStream ) {
			int contador = 0;
			long double time_1 = getSystemTime(NULL);

			int len = avcodec_decode_audio4 (ff_ctx->avCodecCtx, &frame, &frameFinished, &ff_ctx->avPacket);

			if (frameFinished) {
				int data_size = frame.nb_samples * av_get_bytes_per_sample(frame.format);
				int sync_size = frame.nb_samples * sizeof (audio_sync_sample_t);

				framePTS = av_frame_get_best_effort_timestamp (&frame);

				frame_count = framePTS - ff_ctx->avInputCtx->streams[ff_ctx->audioIndexStream]->start_time;
				frame_pts_offset = frame_count * av_q2d(ff_ctx->avInputCtx->streams[ff_ctx->audioIndexStream]->time_base) ;

				initPTS = start_time + frame_pts_offset + ff_ctx->ptsDelay;

#ifdef _DBG_PTS
				printf ("frame decoded PTS %lu, frame count %lu, TB %d/%d, PTS %Lf\n", framePTS, frame_count, ff_ctx->avInputCtx->streams[ff_ctx->audioIndexStream]->time_base.num, ff_ctx->avInputCtx->streams[ff_ctx->audioIndexStream]->time_base.den, initPTS);
#endif

				//Build sync info data, sample timing
				for (ch = 0; ch < ff_ctx->avCodecCtx->channels; ch++) {
					sync_samples[ch] =  malloc(sync_size);

					PTS = initPTS;

					for (sample_count = 0; sample_count < frame.nb_samples; sample_count++) {
						PTS += (1/(float) sample_rate);
						sync_samples[ch][sample_count].samplePTS = PTS;
					}
				}

#ifdef _DBG_PTS
				printf ("ended samples PTS %Lf\n", PTS);
#endif
				for (ch = 0; ch < ff_ctx->avCodecCtx->channels; ch++) {
					ProduceSyncToBuffer (&to_audio_buffer, ch, (uint8_t*) sync_samples[ch], sync_size);
					ProduceAudioToBuffer(&to_audio_buffer, ch, (uint8_t*) frame.extended_data[ch], data_size);

					free(sync_samples[ch]);
				}
			}

	       	long double time_2 = getSystemTime(NULL);
	       	adaptativeSleep( (1/READ_INPUT_FRAME_RATE) - (time_2 - time_1));
		}
	}
}
Ejemplo n.º 15
0
int main(int argc, char **argv)
{
    int ret;
    AVPacket packet;
    AVFrame *frame = av_frame_alloc();
    AVFrame *filt_frame = av_frame_alloc();
    int got_frame;

    if (!frame || !filt_frame) {
        perror("Could not allocate frame");
        exit(1);
    }
    if (argc != 2) {
        fprintf(stderr, "Usage: %s file\n", argv[0]);
        exit(1);
    }

    av_register_all();
    avfilter_register_all();

    if ((ret = open_input_file(argv[1])) < 0)
        goto end;
    if ((ret = init_filters(filter_descr)) < 0)
        goto end;

    /* read all packets */
    while (1) {
        if ((ret = av_read_frame(fmt_ctx, &packet)) < 0)
            break;

        if (packet.stream_index == video_stream_index) {
            got_frame = 0;
            ret = avcodec_decode_video2(dec_ctx, frame, &got_frame, &packet);
            if (ret < 0) {
                av_log(NULL, AV_LOG_ERROR, "Error decoding video\n");
                break;
            }

            if (got_frame) {
                frame->pts = av_frame_get_best_effort_timestamp(frame);

                /* push the decoded frame into the filtergraph */
                if (av_buffersrc_add_frame_flags(buffersrc_ctx, frame, AV_BUFFERSRC_FLAG_KEEP_REF) < 0) {
                    av_log(NULL, AV_LOG_ERROR, "Error while feeding the filtergraph\n");
                    break;
                }

                /* pull filtered frames from the filtergraph */
                while (1) {
                    ret = av_buffersink_get_frame(buffersink_ctx, filt_frame);
                    if (ret == AVERROR(EAGAIN) || ret == AVERROR_EOF)
                        break;
                    if (ret < 0)
                        goto end;
                    display_frame(filt_frame, buffersink_ctx->inputs[0]->time_base);
                    av_frame_unref(filt_frame);
                }
                av_frame_unref(frame);
            }
        }
        av_packet_unref(&packet);
    }
end:
    avfilter_graph_free(&filter_graph);
    avcodec_close(dec_ctx);
    avformat_close_input(&fmt_ctx);
    av_frame_free(&frame);
    av_frame_free(&filt_frame);

    if (ret < 0 && ret != AVERROR_EOF) {
        fprintf(stderr, "Error occurred: %s\n", av_err2str(ret));
        exit(1);
    }

    exit(0);
}
Ejemplo n.º 16
0
int main(int argc, char **argv)
{
    OutputStream video_st = { 0 }, audio_st = { 0 };
    const char *filename;
    AVOutputFormat *fmt;
    AVFormatContext *oc;
    int have_video = 0, have_audio = 0;
    int encode_video = 0, encode_audio = 0;

    /* Initialize libavcodec, and register all codecs and formats. */
    av_register_all();

    if (argc != 2) {
        printf("usage: %s output_file\n"
               "API example program to output a media file with libavformat.\n"
               "The output format is automatically guessed according to the file extension.\n"
               "Raw images can also be output by using '%%d' in the filename\n"
               "\n", argv[0]);
        return 1;
    }

    filename = argv[1];

    /* Autodetect the output format from the name. default is MPEG. */
    fmt = av_guess_format(NULL, filename, NULL);
    if (!fmt) {
        printf("Could not deduce output format from file extension: using MPEG.\n");
        fmt = av_guess_format("mpeg", NULL, NULL);
    }
    if (!fmt) {
        fprintf(stderr, "Could not find suitable output format\n");
        return 1;
    }

    /* Allocate the output media context. */
    oc = avformat_alloc_context();
    if (!oc) {
        fprintf(stderr, "Memory error\n");
        return 1;
    }
    oc->oformat = fmt;
    snprintf(oc->filename, sizeof(oc->filename), "%s", filename);

    /* Add the audio and video streams using the default format codecs
     * and initialize the codecs. */
    if (fmt->video_codec != AV_CODEC_ID_NONE) {
        add_video_stream(&video_st, oc, fmt->video_codec);
        have_video = 1;
        encode_video = 1;
    }
    if (fmt->audio_codec != AV_CODEC_ID_NONE) {
        add_audio_stream(&audio_st, oc, fmt->audio_codec);
        have_audio = 1;
        encode_audio = 1;
    }

    /* Now that all the parameters are set, we can open the audio and
     * video codecs and allocate the necessary encode buffers. */
    if (have_video)
        open_video(oc, &video_st);
    if (have_audio)
        open_audio(oc, &audio_st);

    av_dump_format(oc, 0, filename, 1);

    /* open the output file, if needed */
    if (!(fmt->flags & AVFMT_NOFILE)) {
        if (avio_open(&oc->pb, filename, AVIO_FLAG_WRITE) < 0) {
            fprintf(stderr, "Could not open '%s'\n", filename);
            return 1;
        }
    }

    /* Write the stream header, if any. */
    avformat_write_header(oc, NULL);

    while (encode_video || encode_audio) {
        /* select the stream to encode */
        if (encode_video &&
            (!encode_audio || av_compare_ts(video_st.next_pts, video_st.enc->time_base,
                                            audio_st.next_pts, audio_st.enc->time_base) <= 0)) {
            encode_video = !write_video_frame(oc, &video_st);
        } else {
            encode_audio = !process_audio_stream(oc, &audio_st);
        }
    }

    /* Write the trailer, if any. The trailer must be written before you
     * close the CodecContexts open when you wrote the header; otherwise
     * av_write_trailer() may try to use memory that was freed on
     * av_codec_close(). */
    av_write_trailer(oc);

    /* Close each codec. */
    if (have_video)
        close_stream(oc, &video_st);
    if (have_audio)
        close_stream(oc, &audio_st);

    if (!(fmt->flags & AVFMT_NOFILE))
        /* Close the output file. */
        avio_close(oc->pb);

    /* free the stream */
    avformat_free_context(oc);

    return 0;
}
int main(int argc, char *argv[]) {
    AVFormatContext *pFormatCtx = NULL;
    int             i, videoStream;
    AVCodecContext  *pCodecCtx = NULL;
    AVCodecParameters       *pCodecParam = NULL;
    AVCodec         *pCodec = NULL;
    AVFrame         *pFrame = NULL;
    AVPacket        packet;
    int             send_packet, receive_frame;
    //float           aspect_ratio;
    AVFrame        *pict;
    /*
    std::unique_ptr<AVFrame, std::function<void(AVFrame*)>> frame_converted{
        av_frame_alloc(),
        [](AVFrame* f){ av_free(f->data[0]); } };
    if (av_frame_copy_props(frame_converted.get(),
        frame_decoded.get()) < 0) {
        throw std::runtime_error("Copying frame properties");
    }
    if (av_image_alloc(
        frame_converted->data, frame_converted->linesize,
        video_decoder_->width(), video_decoder_->height(),
        video_decoder_->pixel_format(), 1) < 0) {
        throw std::runtime_error("Allocating picture");
    }
    */
    AVDictionary    *optionsDict = NULL;
    struct SwsContext *sws_ctx = NULL;

    SDL_Texture*    pTexture = nullptr;
    SDL_Window*     pWindows = nullptr;
    SDL_Renderer*   pRenderer = nullptr;

    SDL_Event       event;

    if (argc < 2) {
        fprintf(stderr, "Usage: test <file>\n");
        exit(1);
    }
    // Register all formats and codecs
    av_register_all();

    if (SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER)) {
        fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
        exit(1);
    }

    // Open video file
    if (avformat_open_input(&pFormatCtx, argv[1], NULL, NULL) != 0)
        return -1; // Couldn't open file

    // Retrieve stream information
    if (avformat_find_stream_info(pFormatCtx, NULL)<0)
        return -1; // Couldn't find stream information

    // Dump information about file onto standard error
    av_dump_format(pFormatCtx, 0, argv[1], 0);

    // Find the first video stream
    videoStream = -1;
    for (i = 0; i<pFormatCtx->nb_streams; i++)
        if (pFormatCtx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
            videoStream = i;
            break;
        }
    if (videoStream == -1)
        return -1; // Didn't find a video stream

    // Get a pointer to the codec context for the video stream
    //AVCodecContext *codec is deprecated,so use the codecpar struct (AVCodecParameters) instead.
    pCodecParam = pFormatCtx->streams[videoStream]->codecpar;
    //but function avcodec_open2() need pCodecCtx,so copy  (AVCodecParameters) pCodecParam to (AVCodecContext) pCodecCtx
    pCodec = avcodec_find_decoder(pCodecParam->codec_id);
    // Find the decoder for the video stream
    if (pCodec == NULL) {
        fprintf(stderr, "Unsupported codec!\n");
        return -1; // Codec not found
    }
    pCodecCtx = avcodec_alloc_context3(pCodec);
    avcodec_parameters_to_context(pCodecCtx, pCodecParam);

    // Open codec
    if (avcodec_open2(pCodecCtx, pCodec, &optionsDict)<0)
        return -1; // Could not open codec

    // Allocate video frame
    pFrame = av_frame_alloc();

    // Make a screen to put our video
#ifndef __DARWIN__
    pWindows = SDL_CreateWindow(argv[1],SDL_WINDOWPOS_CENTERED,SDL_WINDOWPOS_CENTERED,pCodecParam->width, pCodecParam->height,SDL_WINDOW_BORDERLESS|SDL_WINDOW_RESIZABLE);
#else
    screen = SDL_SetVideoMode(pCodecParam->width, pCodecParam->height, 24, 0);
#endif
    if (!pWindows) {
        fprintf(stderr, "SDL: could not set video mode - exiting\n");
        exit(1);
    }
    
    // Allocate a place to put our YUV image on that screen
    pRenderer = SDL_CreateRenderer(pWindows, -1, 0);
    if (!pRenderer) {
        fprintf(stderr, "SDL: could not create renderer - exiting\n");
        exit(1);
    }
    pTexture = SDL_CreateTexture(pRenderer, SDL_PIXELFORMAT_YV12, SDL_TEXTUREACCESS_STREAMING, pCodecParam->width, pCodecParam->height);
    sws_ctx =
        sws_getContext
        (
        pCodecParam->width,
        pCodecParam->height,
        (AVPixelFormat)pCodecParam->format,
        pCodecParam->width,
        pCodecParam->height,
        AV_PIX_FMT_YUV420P,
        SWS_BILINEAR,
        NULL,
        NULL,
        NULL
        );
    pict = av_frame_alloc();
    if (pict == nullptr){
        exit(1);
    }
    if (av_image_alloc(pict->data, pict->linesize,
        pCodecParam->width, pCodecParam->height,
        (AVPixelFormat)pCodecParam->format, 1) < 0){
        exit(1);
    }


    // Read frames and save first five frames to disk
    i = 0;
    while (av_read_frame(pFormatCtx, &packet) >= 0) {
        // Is this a packet from the video stream?
        if (packet.stream_index == videoStream) {
            // Decode video frame
            //avcodec_decode_video2 is deprecated Use avcodec_send_packet() and avcodec_receive_frame().
            send_packet = avcodec_send_packet(pCodecCtx, &packet);
            receive_frame = avcodec_receive_frame(pCodecCtx, pFrame);

            // Did we get a video frame?
            if (send_packet == SEND_PACKET_SUCCESS && receive_frame == RECEIVE_FRAME_SUCCESS) {
                //SDL_LockYUVOverlay(bmp);
                //SDL_LockTexture(pTexture,NULL,);
                // Convert the image into YUV format that SDL uses
                if (av_frame_copy_props(pFrame,
                    pict) < 0) {
                    exit(1);
                }

                sws_scale
                    (
                    sws_ctx,
                    pFrame->data,
                    pFrame->linesize,
                    0,
                    pCodecParam->height,
                    pict->data,
                    pict->linesize
                    );
                
                //SDL_UnlockYUVOverlay(bmp);
                SDL_UpdateYUVTexture(pTexture, NULL, pict->data[0], pict->linesize[0], pict->data[1], pict->linesize[1], pict->data[2], pict->linesize[2]);
                SDL_RenderCopy(pRenderer, pTexture, NULL, NULL);
                SDL_RenderPresent(pRenderer);

            }
        }

        // Free the packet that was allocated by av_read_frame
        av_packet_unref(&packet);
        SDL_PollEvent(&event);
        switch (event.type) {
        case SDL_QUIT:
            SDL_DestroyRenderer(pRenderer);
            SDL_DestroyTexture(pTexture);
            SDL_DestroyWindow(pWindows);
            SDL_Quit();
            exit(0);
            break;
        default:
            break;
        }

    }

    // Free the YUV frame
    av_frame_free(&pFrame);
    //free pict
    av_freep(&pict->data[0]);
    av_frame_free(&pict);

    // Close the codec
    avcodec_close(pCodecCtx);

    // Close the video file
    avformat_close_input(&pFormatCtx);

    return 0;
}
Ejemplo n.º 18
0
int _tmain(int argc, _TCHAR* argv[])
{
	int ret = 0, got_frame;

	if (argc != 3) {
        fprintf(stderr, "usage: %s input_file fifo_size(in byte)\n"
                "\n", argv[0]);
		fprintf(stderr, "example: %s \"rtmp://172.16.204.106/live/test01 live=1\" 128000\n", argv[0]);
        exit(1);
    }

	av_register_all();
	avfilter_register_all();

	avformat_network_init();

	//av_log_set_callback(ff_log_callback);

	/* open input file, and allocate format context */
    if (avformat_open_input(&fmt_ctx, argv[1], NULL, NULL) < 0) {
		printf("Could not open source %s\n", argv[1]);
        return 1;
    }

	/* retrieve stream information */
    if (avformat_find_stream_info(fmt_ctx, NULL) < 0) {
        printf("Could not find stream information\n");
        return 1;
    }

	if (open_codec_context(&audio_stream_idx, fmt_ctx, AVMEDIA_TYPE_AUDIO) >= 0) {
        audio_stream = fmt_ctx->streams[audio_stream_idx];
        audio_dec_ctx = audio_stream->codec;

		audio_dst_bufsize = 192000 * 2;
        audio_dst_data = (uint8_t*)av_malloc(audio_dst_bufsize);
		if (audio_dst_data == NULL) {
			printf("No enough memory for audio conversion\n");
			return 1;
		}

		// it may changed when decode
		audio_channel_layout = audio_dec_ctx->channel_layout;
		audio_channels = audio_dec_ctx->channels;

		swr_ctx = swr_alloc_set_opts(swr_ctx,
			AV_CH_LAYOUT_STEREO,
			AV_SAMPLE_FMT_S16,
			audio_dec_ctx->sample_rate,
			audio_dec_ctx->channel_layout,
			audio_dec_ctx->sample_fmt,
			audio_dec_ctx->sample_rate,
			0, 0);
		if (!swr_ctx) {
			printf("failed to alloc swr_ctx\n");
			return 1;
		}

		if (swr_init(swr_ctx) < 0 || swr_ctx == NULL) {
			printf("swr_init failed\n");
			goto end;
		}

		printf("swr_init done!\n");

		//if (init_filters(audio_stream, audio_stream_idx) < 0)
		//	printf("failed to init_filters!\n");
    }

	/* dump input information to stderr */
    av_dump_format(fmt_ctx, 0, argv[1], 0);

	frame = av_frame_alloc();
    if (!frame) {
        fprintf(stderr, "Could not allocate frame\n");
        ret = AVERROR(ENOMEM);
        goto end;
    }

	int fifo_size = atoi(argv[2]);
	printf("fifo_size %d\n", fifo_size);
	audio_fifo.create(fifo_size);

	// init sdl audio
	SDL_Init(SDL_INIT_AUDIO);

	SDL_AudioSpec wanted_spec, spec;
	memset(&wanted_spec, 0, sizeof(SDL_AudioSpec));
	memset(&spec, 0, sizeof(SDL_AudioSpec));
	wanted_spec.freq		= audio_dec_ctx->sample_rate;
	wanted_spec.format		= AUDIO_S16SYS;
	wanted_spec.channels	= 2;
	wanted_spec.silence		= 0;
	wanted_spec.samples		= SDL_AUDIO_SAMPLES;
	wanted_spec.callback	= audio_callback;
	wanted_spec.userdata	= &audio_fifo;

	if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
		printf("SDL_OpenAudio: %s\n", SDL_GetError());
		return 1;
	}

	printf("SDL_AudioSpec got: chn %d, fmt 0x%x, freq %d\n", spec.channels, spec.format, spec.freq);

	SDL_PauseAudio(0);

    /* initialize packet, set data to NULL, let the demuxer fill it */
    av_init_packet(&pkt);
    pkt.data = NULL;
    pkt.size = 0;

    /* read frames from the file */
    while (av_read_frame(fmt_ctx, &pkt) >= 0) {
        decode_packet(&got_frame, 0);
        av_free_packet(&pkt);
    }

end:
    if (audio_dec_ctx)
        avcodec_close(audio_dec_ctx);
    avformat_close_input(&fmt_ctx);
    av_free(frame);
    av_free(audio_dst_data);
	if (swr_ctx)
		swr_free(&swr_ctx);
	return 0;
}
Ejemplo n.º 19
0
int main(int argc, char *argv[]) {
    AVFormatContext *pFormatCtx = NULL;
    int             i, videoStream;
    AVCodecContext  *pCodecCtx;
    AVCodec         *pCodec;
    AVFrame         *pFrame;
    AVPacket        packet;
    int             frameFinished;
    float           aspect_ratio;
    struct SwsContext *img_convert_ctx;

    SDL_Overlay     *bmp;
    SDL_Surface     *screen;
    SDL_Rect        rect;
    SDL_Event       event;

    if(argc < 2) {
        fprintf(stderr, "Usage: test <file>\n");
        exit(1);
    }
    // Register all formats and codecs
    av_register_all();

    if(SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER)) {
        fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
        exit(1);
    }

    // Open video file
    if(avformat_open_input(&pFormatCtx, argv[1], NULL, NULL)!=0)
        return -1; // Couldn't open file

    // Retrieve stream information
    if(avformat_find_stream_info(pFormatCtx, NULL)<0)
        return -1; // Couldn't find stream information

    // Find the first video stream
    videoStream=-1;
    for(i=0; i<pFormatCtx->nb_streams; i++)
        if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO) {
            videoStream=i;
            break;
        }
    if(videoStream==-1)
        return -1; // Didn't find a video stream

    // Get a pointer to the codec context for the video stream
    pCodecCtx=pFormatCtx->streams[videoStream]->codec;

    // Find the decoder for the video stream
    pCodec=avcodec_find_decoder(pCodecCtx->codec_id);
    if(pCodec==NULL) {
        fprintf(stderr, "Unsupported codec!\n");
        return -1; // Codec not found
    }

    // Open codec
    if(avcodec_open2(pCodecCtx, pCodec, NULL)<0)
        return -1; // Could not open codec

    // Allocate video frame
    pFrame=av_frame_alloc();

    // Make a screen to put our video
    screen = SDL_SetVideoMode(pCodecCtx->width, pCodecCtx->height, 0, 0);

    if(!screen) {
        fprintf(stderr, "SDL: could not set video mode - exiting\n");
        exit(1);
    }

    // Allocate a place to put our YUV image on that screen
    bmp = SDL_CreateYUVOverlay(pCodecCtx->width,
            pCodecCtx->height,
            SDL_YV12_OVERLAY,
            screen);


    // Read frames and save first five frames to disk
    i=0;
    while(av_read_frame(pFormatCtx, &packet)>=0) {
        // Is this a packet from the video stream?
        if(packet.stream_index==videoStream) {
            // Decode video frame
            avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished,
                    &packet);

            // Did we get a video frame?
            if(frameFinished) {
                SDL_LockYUVOverlay(bmp);

                AVPicture pict;
                pict.data[0] = bmp->pixels[0];
                pict.data[1] = bmp->pixels[2];
                pict.data[2] = bmp->pixels[1];

                pict.linesize[0] = bmp->pitches[0];
                pict.linesize[1] = bmp->pitches[2];
                pict.linesize[2] = bmp->pitches[1];

                // Convert the image into YUV format that SDL uses
                img_convert_ctx = sws_getContext(pCodecCtx->width,
                        pCodecCtx->height,
                        pCodecCtx->pix_fmt,
                        pCodecCtx->width,
                        pCodecCtx->height,
                        PIX_FMT_YUV420P,
                        SWS_BICUBIC,NULL,
                        NULL,NULL);
                sws_scale(img_convert_ctx, pFrame->data,
                        pFrame->linesize,
                        0,
                        pFrame->height,
                        pict.data,
                        pict.linesize);

                SDL_UnlockYUVOverlay(bmp);

                rect.x = 0;
                rect.y = 0;
                rect.w = pCodecCtx->width;
                rect.h = pCodecCtx->height;
                SDL_DisplayYUVOverlay(bmp, &rect);

            }
        }

        // Free the packet that was allocated by av_read_frame
        av_free_packet(&packet);
        SDL_PollEvent(&event);
        switch(event.type) {
            case SDL_QUIT:
                SDL_Quit();
                exit(0);
                break;
            default:
                break;
        }

    }

    // Free the YUV frame
    av_free(pFrame);

    // Close the codec
    avcodec_close(pCodecCtx);

    // Close the video file
    avformat_close_input(&pFormatCtx);

    return 0;
}
Ejemplo n.º 20
0
void AUD_initOnce()
{
#ifdef WITH_FFMPEG
	av_register_all();
#endif
}
Ejemplo n.º 21
0
int main (int argc, char *argv[])
{
	av_register_all(); // registed all file format
	AVFormatContext *pFormatCtx; // to define the structure *point of which being used
	// Open media(video/audio whatever) file
	if (avformat_open_input(&pFormatCtx, argv[1], NULL, NULL)!=0){//changed av_open_input_file into the new avformat_open_input, thus no use for the past argument int buf_size inside av_open_input_file.
		return -1; // couldn't open file
	avformat_input_file(&pFormatCtx);//av_close_input_file has be deprecated
	}
	// Retrieve stream information
	if (avformat_find_stream_info(pFormatCtx,NULL)<0)//also use the new one instead of int av_find_stream_info and set NULL for the AVDictionary **options. But notice that, it could be nb_streams.
		return -1; // couldn't find stream information
	// Dump information about file onto standard error
	av_dump_format (pFormatCtx, 0, argv[1], 0);//use the new av_dump_format instead of dump_format

	// Find the first sound stream
	int i;
	AVCodecContext *aCodecCtx;
	int audioStream=-1;
		for (i=0;i<pFormatCtx->nb_streams;i++)
			if (pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_AUDIO && audioStream < 0) //it's new one but not CODEC_TYPE_AUDIO
			{
				audioStream=i;
				break;
			}
	if(audioStream==-1)
		return -1; //didn't find a audio stream
	// Get a pointer to the codec context for the video stream
	aCodecCtx=pFormatCtx->streams[audioStream]->codec;
//	printf ("%d\n",audioStream); //for test

	SDL_AudioSpec *wanted_spec;
	SDL_AudioSpec *spec;
	if(SDL_OpenAudio(&wanted_spec, &spec)<0){
			fprintf(stderr,"SDL_OpenAudio:%s\n",SDL_GetError());
			return -1;
	}
	AVCodec *aCodec;
	aCodec=avcodec_find_decoder(aCodecCtx->codec_id);
	if(!aCodec){
		fprintf(stderr,"Unsupported codec!\n");
		return -1;
	}
	avcodec_open2(aCodecCtx,aCodec,NULL);//use the new 2 version instead of the original version
	
	packet_queue_init(&audioq);
	SDL_PauseAudio(0);

	AVPacket packet;
	while (av_read_frame(pFormatCtx, &packet)>=0){
//		if(packet.stream_index==videoStream){ //for there's not concern with the viedoStream so no use for the determination
/*		} else*/ if (packet.stream_index==audioStream){
			packet_queue_put(&audioq, &packet);
		} else {
			av_free_packet(&packet);
		}
	}


/*	SDL_Event event;
	static VideoState *global_video_state;
	static int decode_interrupt_cb(void *ctx){
		return global_video_state && global_video_state->abort_request;
	}
//	url_set_interrupt_cb(decode_interrupt_cb);//undefined in the current libavformat/avio.h version. Use above instead.
	SDL_PollEvent(&event);
	switch(event.type){
		case SDL_QUIT:
		quit = 1;
	}*/
}
Ejemplo n.º 22
0
int main(int argc, char **argv) {

  if(argc<2) {
    fprintf(stderr,"Missing arguments\n"); 
    fprintf(stderr,"Usage: %s [-g termsize] filename\n",argv[0]); 
    return -1;
  }
  char *filename = argv[1];
  char *screensize = 0;
  // screen resolution
  int tw = TERM_W;
  int th = TERM_H;

  /* parse arguments*/
  if(argc>3) {
    int i;
    if(strcmp(argv[1],"-g")==0) {
      screensize = argv[2];
      filename = argv[3];
      sscanf(screensize,"%dx%d",&tw,&th); 
    }
  }
  

  printf("before init w = %d, h = %d\n",tw,th);
  init_screen(tw,th);
  
  /* init all codecs */
  av_register_all();

  AVFormatContext *pFmtCtx;
  struct SwsContext *pSwsCtx;

  pFmtCtx = avformat_alloc_context();
  if(avformat_open_input(&pFmtCtx, filename, NULL, NULL) != 0) {
    fprintf(stderr,"Failed to open file: %s\n",argv[1]); 
    return -1;
  }

  if(avformat_find_stream_info(pFmtCtx,NULL) < 0) { 
    fprintf(stderr,"No stream found!\n"); 
    return -1;
  }

  av_dump_format(pFmtCtx, 0, filename,0);

  int i;
  int videoStream;
  AVCodecContext *pCodecCtx;

  videoStream = -1;
  for(i=0; i<pFmtCtx->nb_streams; i++) {
    if(pFmtCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
      videoStream = i;
      break;
    }
  }

  if(videoStream == -1) {
    fprintf(stderr,"No stream found!\n"); 
    return -1;
  }

  pCodecCtx = pFmtCtx->streams[videoStream]->codec;

  /* find suitable codec */
 
  AVCodec * pCodec;
  
  pCodec = avcodec_find_decoder(pCodecCtx->codec_id);
  if(!pCodec) {
    fprintf(stderr,"No suitable decoder found!\n"); 
    return -1;
  }

  if(avcodec_open2(pCodecCtx,pCodec,NULL)<0) {
    fprintf(stderr,"Could not open codec!\n"); 
    return -1;

  }

  AVFrame *pFrame;
  AVFrame *pPict;

  /* allocate data structs */
  pFrame = avcodec_alloc_frame();
  pPict  = avcodec_alloc_frame();

  uint8_t *buffer;
  int szPict;;
  int sw,sh;

  sw = pCodecCtx->width;
  sh = pCodecCtx->height;

  // allocate buffer of picture size
  szPict = avpicture_get_size(PIX_FMT_RGB24, sw,sh);

  buffer = (uint8_t *)av_malloc(szPict*sizeof(uint8_t));

  /* associate frame with out buffer */
  avpicture_fill( (AVPicture *)pPict,buffer,PIX_FMT_RGB24, sw, sh);

  int frameFinished;
  AVPacket packet;

  /* init scale context to scale to terminal resolution */
  pSwsCtx = sws_getContext(sw,sh,pCodecCtx->pix_fmt,tw,th,PIX_FMT_RGB24, SWS_BICUBIC, NULL, NULL, NULL);

  i=0;

  /* read as long we have packets in the stream */
  while(av_read_frame(pFmtCtx,&packet)>=0) {
    
     /* we only need packets of our video stream*/
    if(packet.stream_index == videoStream) {
      
      /* decode video frame */
      avcodec_decode_video2(pCodecCtx,pFrame,&frameFinished,
                            &packet);

      if(frameFinished) {
        /* scale, display and sleep for ~30ms*/
        sws_scale(pSwsCtx,pFrame->data, pFrame->linesize,0,sh,pPict->data, pPict->linesize);
        ascii_art(pPict);
        usleep(30000);
    
      }
    }
    /* free current packet struct */
    av_free_packet(&packet);
  }

  /* tidy up.. */
  av_free(buffer);
  av_free(pPict);
  av_free(pFrame);
  avcodec_close(pCodecCtx);
  avformat_free_context(pFmtCtx);

  return 0;
}
Ejemplo n.º 23
0
int main(int argc, char *argv[]) {
  SDL_Event       event;
  VideoState      *is;
  is = av_mallocz(sizeof(VideoState));
  if(argc < 2) {
    fprintf(stderr, "Usage: test <file>\n");
    exit(1);
  }
  // Register all formats and codecs
  av_register_all();

  if(SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER)) {
    fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
    exit(1);
  }

  // Make a screen to put our video
#ifndef __DARWIN__
  screen = SDL_SetVideoMode(640, 480, 0, 0);
#else
  screen = SDL_SetVideoMode(640, 480, 24, 0);
#endif
  if(!screen) {
    fprintf(stderr, "SDL: could not set video mode - exiting\n");
    exit(1);
  }

  av_strlcpy(is->filename, argv[1], 1024);


  // 初始化为视频缓冲准备的锁(pictq)
  // 因为一旦事件驱动调用视频函数, 视频函数会从 pictq 抽出预解码帧。
  // 同时, 视频解码器会把信息放进去, 我们不知道那个动作会先发生。
  is->pictq_mutex = SDL_CreateMutex();
  is->pictq_cond = SDL_CreateCond();

  // schedule_refresh 是一个将要定义的函数。它的动作是告诉系统在某个特定的毫秒数后弹出 FF_REFRESH_EVENT 事件。
  schedule_refresh(is, 40);

  is->av_sync_type = DEFAULT_AV_SYNC_TYPE;
  // 生成一个新线程能完全访问原始进程中的内存,启动我们给的线程,在这种情况下, 调用 decode_thread()并与 VideoState 结构体连接。
  is->parse_tid = SDL_CreateThread(decode_thread, is);
  if(!is->parse_tid) {
    av_free(is);
    return -1;
  }

//  事件循环
  for(;;) {

    SDL_WaitEvent(&event);
    switch(event.type) {
    case FF_QUIT_EVENT:
    case SDL_QUIT:
      is->quit = 1;
      /*
       * If the video has finished playing, then both the picture and
       * audio queues are waiting for more data.  Make them stop
       * waiting and terminate normally.
       */
      SDL_CondSignal(is->audioq.cond);
      SDL_CondSignal(is->videoq.cond);
      SDL_Quit();
      exit(0);
      break;
    case FF_ALLOC_EVENT:
      alloc_picture(event.user.data1);
      break;
    case FF_REFRESH_EVENT:
      video_refresh_timer(event.user.data1);
      break;
    default:
      break;
    }
  }
  return 0;
}
Ejemplo n.º 24
0
int main(int argc, char* argv[]) {

	AVCodec *ptr_codec;
	AVCodecContext *ptr_codec_context = NULL;
	int frame_size;
	int out_size, outbuf_size;
	FILE *f;
	uint8_t *samples;
	uint8_t *outbuf;

	/*	register codecs and formats*/
	av_register_all();

	printf("Audio encoding\n");

	/* find the aac encoder */
//	ptr_codec = avcodec_find_encoder(CODEC_ID_AAC);
	ptr_codec = avcodec_find_encoder(AV_CODEC_ID_SPEEX); //AV_CODEC_ID_SPEEX
	if (!ptr_codec) {
		fprintf(stderr, "codec not found\n");
		exit(1);
	}

	ptr_codec_context = avcodec_alloc_context3(ptr_codec);

	/* put sample parameters */
	//libspeexenc.c
//	ptr_codec_context->bit_rate = 56000; //the average bitrate
	ptr_codec_context->sample_rate = 8000; // Speex has 3 modes, each of which uses a specific sample rate
	ptr_codec_context->channels = 1; //number of audio channels
	ptr_codec_context->sample_fmt = AV_SAMPLE_FMT_S16; //sample format
//	ptr_codec_context->global_quality = 2;

	AVDictionary *opts = NULL;
	av_dict_set(&opts, "cbr_quality", "8", 0);


	/* open it */
	if (avcodec_open2(ptr_codec_context, ptr_codec ,&opts) < 0) {
		fprintf(stderr, "could not open codec\n");
		exit(1);
	}

	/* the codec gives us the frame size, in samples */
	frame_size = ptr_codec_context->frame_size;
	printf("c->frame_size = %d ...\n", ptr_codec_context->frame_size);
	samples = malloc(frame_size * 2 * ptr_codec_context->channels);
	printf("samples = %d \n", frame_size * 2 * ptr_codec_context->channels);
	//这里的数值是固定的?2是这样来的
	//av_get_bytes_per_sample(ptr_codec_context->sample_fmt) 这里获取的值就是2
	//这里是参考ffmpeg.c中这个函数的使用
	printf("ptr_codec_context->channels = %d ...\n",
			ptr_codec_context->channels);
	printf("samples = %d ...\n", frame_size * 2 * ptr_codec_context->channels);

	outbuf_size = 10000;
	outbuf = malloc(outbuf_size);

	f = fopen(argv[1], "wb");
	if (!f) {
		fprintf(stderr, "could not open %s\n", argv[1]);
		exit(1);
	}

	FILE *p_source = fopen(argv[2], "r+");

	printf("before while ....\n");
	while (!feof(p_source)) {
		fread(samples, 1, frame_size * 2 * ptr_codec_context->channels,
				p_source);

		//		/* encode the samples */
		//out packet
		AVPacket pkt;
		av_init_packet(&pkt);
		pkt.data = NULL;
		pkt.size = 0;

		//in frame
		AVFrame *frame = avcodec_alloc_frame();
		if (frame == NULL) {
			printf("frame malloc failed ...\n");
			exit(1);
		}
		avcodec_get_frame_defaults(frame);
		frame->nb_samples = (frame_size * 2 * ptr_codec_context->channels) / 2;   //number of audio samples (per channel) described by this frame

	/*说明一点frame->nb_samples 这个一定要设置正确哈。。*/
		int ret;
		if ((ret = avcodec_fill_audio_frame(frame, ptr_codec_context->channels/*2*/, AV_SAMPLE_FMT_S16,
				samples, (frame_size * 2 * ptr_codec_context->channels), 1))
				< 0) {
			av_log(NULL, AV_LOG_FATAL, "Audio encoding failed...\n");
			exit(1);
		}

		int got_packet = 0;
		if (avcodec_encode_audio2(ptr_codec_context, &pkt, frame, &got_packet)
				< 0) {
			av_log(NULL, AV_LOG_FATAL, "Audio encoding failed\n");
			exit(1);
		}

		if (got_packet) {
			printf("write data ..pkt.size = %d.\n" ,pkt.size);
			fwrite(pkt.data, 1, pkt.size, f);
		}
		av_free_packet(&pkt);
		av_free(frame);
	}

	fclose(f);
	free(outbuf);
	free(samples);

	avcodec_close(ptr_codec_context);
	av_free(ptr_codec_context);

	/*free sth*/

	return 0;
}
Ejemplo n.º 25
0
void FFmpegVideo::restart()
{
	char chVidName1[200];
	int iProcessOrder1 = this->iProcessOrder;
	strcpy(chVidName1, this->chVidName);

	//结束
	// Free the RGB image
	av_free(buffer);
	av_free(pFrameBGR);
	av_free(pFrameRGB);

	// Free the YUV frame
	av_free(pFrameOri);

	// Close the codec
	avcodec_close(pCodecCtx);

	av_close_input_file(pFormatCtx);

	if(imageFrame)
		delete imageFrame;

	//重新开始
	this->iProcessOrder = iProcessOrder1;
	strcpy(chVidName, chVidName1);
	this->fRate = 0;
	iTotalFrameNum = 0;
	iNowFrameNum = 0;
	frameFinished = 0;
	// Register all formats and codecs
	av_register_all();

	// Open video file
	if(av_open_input_file(&pFormatCtx, chVidName, NULL, 0, NULL)!=0)
	{
		bIfSuccess = false;
		return; // Couldn't open file
	}

	// Retrieve stream information
	if(av_find_stream_info(pFormatCtx)<0)
	{
		bIfSuccess = false;
		return; // Couldn't find stream information
	}

	// Dump information about file onto standard error
	dump_format(pFormatCtx, 0, chVidName, 0);

	this->iTotalFrameNum = pFormatCtx->streams[0]->duration;
	this->fFrmRat = pFormatCtx->streams[0]->r_frame_rate.num/(float)(pFormatCtx->streams[0]->r_frame_rate.den);

	// Find the first video stream
	videoStream=-1;
	for(i=0; i<pFormatCtx->nb_streams; i++)
	{
		if(pFormatCtx->streams[i]->codec->codec_type==CODEC_TYPE_VIDEO) {
			videoStream=i;
			break;
		}
	}
	if(videoStream==-1)
	{
		bIfSuccess = false;
		return; // Didn't find a video stream
	}

	// Get a pointer to the codec context for the video stream
	pCodecCtx=pFormatCtx->streams[videoStream]->codec;

	// Find the decoder for the video stream
	pCodec=avcodec_find_decoder(pCodecCtx->codec_id);
	if(pCodec==NULL) {
		bIfSuccess = false;
		fprintf(stderr, "Unsupported codec!\n");
		return; // Codec not found
	}
	// Open codec
	while (avcodec_open(pCodecCtx, pCodec) < 0)/*这个函数总是返回-1*/ {
		Sleep(this->iProcessOrder);
	}

	// Allocate video frame
	pFrameOri=avcodec_alloc_frame();

	// Allocate an AVFrame structure
	pFrameBGR=avcodec_alloc_frame();
	if(pFrameBGR==NULL)
	{
		bIfSuccess = false;
		return;
	}
	pFrameRGB=avcodec_alloc_frame();
	if(pFrameRGB==NULL)
	{
		bIfSuccess = false;
		return;
	}

	// Determine required buffer size and allocate buffer
	numBytes=avpicture_get_size(PIX_FMT_BGR24, pCodecCtx->width,pCodecCtx->height);
	imageFrame->height = pCodecCtx->height;
	imageFrame->width = pCodecCtx->width;
	buffer=(uint8_t *)av_malloc(numBytes*sizeof(uint8_t));
	imageFrame->imageData = new uint8_t[numBytes*sizeof(uint8_t)];
	

	// Assign appropriate parts of buffer to image planes in pFrameRGB
	// Note that pFrameRGB is an AVFrame, but AVFrame is a superset
	// of AVPicture
	avpicture_fill((AVPicture *)pFrameBGR, buffer, PIX_FMT_BGR24,
		pCodecCtx->width, pCodecCtx->height);
	avpicture_fill((AVPicture *)pFrameRGB, buffer, PIX_FMT_RGB24,
		pCodecCtx->width, pCodecCtx->height);
	
	//注意,这里是PIX_FMT_RGB24,它决定了图片的格式
	if(this->bIfUseHD == false)
		ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height,
		pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height,
		PIX_FMT_BGR24, SWS_BICUBIC, NULL, NULL, NULL);

	this->getOneFrame();

	bIfSuccess = true;

}
JNIEXPORT jint JNICALL Java_com_leixiaohua1020_sffmpegandroiddecoder_MainActivity_decode(
		JNIEnv *env, jobject obj, jstring input_jstr, jstring output_jstr) {
	AVFormatContext *pFormatCtx;
	int i, videoindex;
	AVCodecContext *pCodecCtx;
	AVCodec *pCodec;
	AVFrame *pFrame, *pFrameYUV;
	uint8_t *out_buffer;
	AVPacket *packet;
	int y_size;
	int ret, got_picture;
	struct SwsContext *img_convert_ctx;
	FILE *fp_yuv;
	int frame_cnt;
	clock_t time_start, time_finish;
	double time_duration = 0.0;

	char input_str[500] = { 0 };
	char output_str[500] = { 0 };
	char info[1000] = { 0 };
	sprintf(input_str, "%s", (*env)->GetStringUTFChars(env, input_jstr, NULL));
	sprintf(output_str, "%s",
			(*env)->GetStringUTFChars(env, output_jstr, NULL));

	//FFmpeg av_log() callback
	av_log_set_callback(custom_log);

	av_register_all();
	avformat_network_init();
	pFormatCtx = avformat_alloc_context();

	if (avformat_open_input(&pFormatCtx, input_str, NULL, NULL) != 0) {
		LOGE("Couldn't open input stream.\n");
		return -1;
	}

	if (avformat_find_stream_info(pFormatCtx, NULL) < 0) {
		LOGE("Couldn't find stream information.\n");
		return -1;
	}

	videoindex = -1;
	for (i = 0; i < pFormatCtx->nb_streams; i++){
		if (pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
			videoindex = i;
			break;
		}
	}

	if (videoindex == -1) {
		LOGE("Couldn't find a video stream.\n");
		return -1;
	}

	pCodecCtx = pFormatCtx->streams[videoindex]->codec;
	pCodec = avcodec_find_decoder(pCodecCtx->codec_id);

	if (pCodec == NULL) {
		LOGE("Couldn't find Codec.\n");
		return -1;
	}

	if (avcodec_open2(pCodecCtx, pCodec, NULL) < 0) {
		LOGE("Couldn't open codec.\n");
		return -1;
	}

	pFrame = av_frame_alloc();
	pFrameYUV = av_frame_alloc();
	out_buffer = (uint8_t *) av_malloc(
			avpicture_get_size(PIX_FMT_YUV420P, pCodecCtx->width,
					pCodecCtx->height));

	avpicture_fill((AVPicture *) pFrameYUV, out_buffer, PIX_FMT_YUV420P,
			pCodecCtx->width, pCodecCtx->height);

	packet = (AVPacket *) av_malloc(sizeof(AVPacket));

	img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height,
			pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height,
			PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL);

	sprintf(info, "[Input     ]%s\n", input_str);
	sprintf(info, "%s[Output    ]%s\n", info, output_str);
	sprintf(info, "%s[Format    ]%s\n", info, pFormatCtx->iformat->name);
	sprintf(info, "%s[Codec     ]%s\n", info, pCodecCtx->codec->name);
	sprintf(info, "%s[Resolution]%dx%d\n", info, pCodecCtx->width,
			pCodecCtx->height);

	fp_yuv = fopen(output_str, "wb+");
	if (fp_yuv == NULL) {
		printf("Cannot open output file.\n");
		return -1;
	}

	frame_cnt = 0;
	time_start = clock();

	while (av_read_frame(pFormatCtx, packet) >= 0) {
		if (packet->stream_index == videoindex) {
			ret = avcodec_decode_video2(pCodecCtx, pFrame, &got_picture,
					packet);

			if (ret < 0) {
				LOGE("Decode Error.\n");
				return -1;
			}

			if (got_picture) {
				sws_scale(img_convert_ctx,
						(const uint8_t* const *) pFrame->data, pFrame->linesize,
						0, pCodecCtx->height, pFrameYUV->data,
						pFrameYUV->linesize);

				y_size = pCodecCtx->width * pCodecCtx->height;
				fwrite(pFrameYUV->data[0], 1, y_size, fp_yuv);    //Y
				fwrite(pFrameYUV->data[1], 1, y_size / 4, fp_yuv);  //U
				fwrite(pFrameYUV->data[2], 1, y_size / 4, fp_yuv);  //V
				//Output info
				char pictype_str[10] = { 0 };

				switch (pFrame->pict_type) {
				case AV_PICTURE_TYPE_I:
					sprintf(pictype_str, "I");
					break;
				case AV_PICTURE_TYPE_P:
					sprintf(pictype_str, "P");
					break;
				case AV_PICTURE_TYPE_B:
					sprintf(pictype_str, "B");
					break;
				default:
					sprintf(pictype_str, "Other");
					break;
				}

				LOGI("Frame Index: %5d. Type:%s", frame_cnt, pictype_str);
				frame_cnt++;
			}
		}
		av_free_packet(packet);
	}
	//flush decoder
	//FIX: Flush Frames remained in Codec
	while (1) {
		ret = avcodec_decode_video2(pCodecCtx, pFrame, &got_picture, packet);
		if (ret < 0)
			break;
		if (!got_picture)
			break;
		sws_scale(img_convert_ctx, (const uint8_t* const *) pFrame->data,
				pFrame->linesize, 0, pCodecCtx->height, pFrameYUV->data,
				pFrameYUV->linesize);
		int y_size = pCodecCtx->width * pCodecCtx->height;
		fwrite(pFrameYUV->data[0], 1, y_size, fp_yuv);    //Y
		fwrite(pFrameYUV->data[1], 1, y_size / 4, fp_yuv);  //U
		fwrite(pFrameYUV->data[2], 1, y_size / 4, fp_yuv);  //V
		//Output info
		char pictype_str[10] = { 0 };

		switch (pFrame->pict_type) {
		case AV_PICTURE_TYPE_I:
			sprintf(pictype_str, "I");
			break;
		case AV_PICTURE_TYPE_P:
			sprintf(pictype_str, "P");
			break;
		case AV_PICTURE_TYPE_B:
			sprintf(pictype_str, "B");
			break;
		default:
			sprintf(pictype_str, "Other");
			break;
		}

		LOGI("Frame Index: %5d. Type:%s", frame_cnt, pictype_str);
		frame_cnt++;
	}
	time_finish = clock();
	time_duration = (double) (time_finish - time_start);

	sprintf(info, "%s[Time      ]%fms\n", info, time_duration);
	sprintf(info, "%s[Count     ]%d\n", info, frame_cnt);

	sws_freeContext(img_convert_ctx);

	fclose(fp_yuv);

	av_frame_free(&pFrameYUV);
	av_frame_free(&pFrame);
	avcodec_close(pCodecCtx);
	avformat_close_input(&pFormatCtx);

	return 0;
}
Ejemplo n.º 27
0
FFmpegVideo::FFmpegVideo(char* chVidName1, int iProcessOrder1, float fRate1)
{
	this->iProcessOrder = iProcessOrder1;
	strcpy(chVidName, chVidName1);
	this->fRate = fRate1;
	iTotalFrameNum = 0;
	iNowFrameNum = 0;
	frameFinished = 0;
	nFps = 0;
	buffer = NULL;
	pFrameBGR = NULL;
	pFrameRGB = NULL;
	pFrameOri = NULL;
	pCodecCtx = NULL;
	imageFrame = new ImageFrame();
	// Register all formats and codecs
	av_register_all();

	// Open video file
	if(av_open_input_file(&pFormatCtx, chVidName, NULL, 0, NULL)!=0)
	{
		bIfSuccess = false;
		return; // Couldn't open file
	}

	// Retrieve stream information
	if(av_find_stream_info(pFormatCtx)<0)
	{
		bIfSuccess = false;
		return; // Couldn't find stream information
	}

	// Dump information about file onto standard error
	dump_format(pFormatCtx, 0, chVidName, 0);

	this->iTotalFrameNum = pFormatCtx->streams[0]->nb_frames;
	this->fFrmRat = pFormatCtx->streams[0]->r_frame_rate.num/(float)(pFormatCtx->streams[0]->r_frame_rate.den);

	// Find the first video stream
	videoStream=-1;
	for(i=0; i<pFormatCtx->nb_streams; i++)
	{
		if(pFormatCtx->streams[i]->codec->codec_type==CODEC_TYPE_VIDEO) {
			videoStream=i;
			break;
		}
	}
	if(videoStream==-1)
	{
		bIfSuccess = false;
		return; // Didn't find a video stream
	}

	// Get a pointer to the codec context for the video stream
	pCodecCtx=pFormatCtx->streams[videoStream]->codec;

	printf("%d-%d\n", pCodecCtx->height, pCodecCtx->width);

	// Find the decoder for the video stream
	pCodec=avcodec_find_decoder(pCodecCtx->codec_id);
	if(pCodec==NULL) {
		bIfSuccess = false;
		fprintf(stderr, "Unsupported codec!\n");
		return; // Codec not found
	}
	// Open codec
	//while(bIfLockCloseCodec)
	//{
	//	Sleep(10);
	//}
	//bIfLockCloseCodec = true;
	//if(avcodec_open(pCodecCtx, pCodec)<0)
	//	return -1; // Could not open codec
	//bIfLockCloseCodec = false;

	while (avcodec_open(pCodecCtx, pCodec) < 0)/*这个函数总是返回-1*/ {
		//fprintf(stderr, "could not open codec\n");
		Sleep(this->iProcessOrder);
		//exit(1);
	}

	// Allocate video frame
	pFrameOri=avcodec_alloc_frame();

	// Allocate an AVFrame structure
	pFrameBGR=avcodec_alloc_frame();
	if(pFrameBGR==NULL)
	{
		bIfSuccess = false;
		return;
	}
	pFrameRGB=avcodec_alloc_frame();
	if(pFrameRGB==NULL)
	{
		bIfSuccess = false;
		return;
	}

	// Determine required buffer size and allocate buffer
	imageFrame->size = numBytes=avpicture_get_size(PIX_FMT_BGR24, pCodecCtx->width, pCodecCtx->height);
	imageFrame->widthStep = WIDTHSTEP(pCodecCtx->width);
	imageFrame->height = pCodecCtx->height;
	imageFrame->width = pCodecCtx->width;
	buffer=(uint8_t *)av_malloc(numBytes*sizeof(uint8_t));
	imageFrame->imageData = new uint8_t[numBytes*sizeof(uint8_t)];
	memset(imageFrame->imageData, 0, numBytes*sizeof(uint8_t));

	// Assign appropriate parts of buffer to image planes in pFrameRGB
	// Note that pFrameRGB is an AVFrame, but AVFrame is a superset
	// of AVPicture
	avpicture_fill((AVPicture *)pFrameBGR, buffer, PIX_FMT_BGR24,
		pCodecCtx->width, pCodecCtx->height);
	avpicture_fill((AVPicture *)pFrameRGB, buffer, PIX_FMT_RGB24,
		pCodecCtx->width, pCodecCtx->height);
	//注意,这里是PIX_FMT_RGB24,它决定了图片的格式
	ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height,
		pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height,
		PIX_FMT_BGR24, SWS_BICUBIC, NULL, NULL, NULL);

	if(this->getOneFrame()==0)
		bIfSuccess = true;
	else
		bIfSuccess = false;
}
Ejemplo n.º 28
0
int main(int argc, char *argv[]) {
	try {
		std::map<std::string, std::string>	aopt;
		const int param = parse_options(argc, argv, aopt);
		// Register all formats and codecs
		av_register_all();
		if (settings::REF_VIDEO == "")
			throw std::runtime_error("Reference video not specified");
		bool		glb_exit = false;
		mt::Semaphore	sem_cons;
		// create data for reference video
		mt::Semaphore	ref_prod;
		VUCHAR		ref_buf;
		int		ref_frame;
		qav::qvideo	ref_video(settings::REF_VIDEO.c_str(), settings::VIDEO_SIZE_W, settings::VIDEO_SIZE_H);
		// get const values
		const qav::scr_size	ref_sz = ref_video.get_size();
		const int		ref_fps_k = ref_video.get_fps_k();
		//
		//ref_video.get_frame(ref_buf);
		//return 0;
		// 
		V_VPDATA	v_data;
		for(int i = param; i < argc; ++i) {
			try {
				shared_ptr<vp_data>	vpd(new vp_data);
				vpd->name = get_filename(argv[i]);
				vpd->video = new qav::qvideo(argv[i], ref_sz.x, ref_sz.y);
				if (vpd->video->get_fps_k() != ref_fps_k) {
					if (settings::IGNORE_FPS) {
						LOG_WARNING << '[' << argv[i] << "] has different FPS (" << vpd->video->get_fps_k()/1000 << ')' << std::endl;
						v_data.push_back(vpd);
					} else LOG_ERROR << '[' << argv[i] << "] skipped different FPS" << std::endl;
				} else v_data.push_back(vpd);
			} catch(std::exception& e) {
				LOG_ERROR << '[' << argv[i] << "] skipped " << e.what() << std::endl;
			}
		}
		if (v_data.empty()) return 0;
		// print some infos
		LOG_INFO << "Skip frames: " << ((settings::SKIP_FRAMES > 0) ? settings::SKIP_FRAMES : 0) << std::endl;
		LOG_INFO << "Max frames: " << ((settings::MAX_FRAMES > 0) ? settings::MAX_FRAMES : 0) << std::endl;
		// create the stats analyzer (like the psnr)
		LOG_INFO << "Analyzer set: " << settings::ANALYZER << std::endl;
		std::auto_ptr<stats::s_base>	s_analyzer(stats::get_analyzer(settings::ANALYZER.c_str(), v_data.size(), ref_sz.x, ref_sz.y, std::cout));
		// set the default values, in case will get overwritten
		s_analyzer->set_parameter("fpa", "25");
		s_analyzer->set_parameter("blocksize", "8");
		// load the passed parameters
		for(std::map<std::string, std::string>::const_iterator it = aopt.begin(); it != aopt.end(); ++it) {
			LOG_INFO << "Analyzer parameter: " << it->first << " = " << it->second << std::endl;
			s_analyzer->set_parameter(it->first.c_str(), it->second.c_str());
		}
		// create all the threads
		video_producer	ref_vpth(ref_frame, ref_prod, sem_cons, ref_buf, ref_video, glb_exit);
		V_VPTH		v_th;
		for(V_VPDATA::iterator it = v_data.begin(); it != v_data.end(); ++it)
			v_th.push_back(new video_producer((*it)->frame, (*it)->prod, sem_cons, (*it)->buf, *((*it)->video), glb_exit));
		// we'll need some tmp buffers
		VUCHAR			t_ref_buf;
		std::vector<VUCHAR>	t_bufs(v_data.size());
		// and now the core algorithm
		// init all the semaphores
		producers_utils::lock(sem_cons, ref_prod, v_data);
		// start the threads
		producers_utils::start(ref_vpth, v_th);
		// print header
		std::cout << "Sample,";
		for(V_VPDATA::const_iterator it = v_data.begin(); it != v_data.end(); ++it)
			std::cout << (*it)->name << ',';
		std::cout << std::endl;
		while(!glb_exit) {
			// wait for the consumer to be signalled 1 + n times
			const static int CONS_SIG_NUM = 1 + v_th.size();
			producers_utils::sync(sem_cons, CONS_SIG_NUM);
			// now check everything is ok
			const int	cur_ref_frame = ref_frame;
			if (-1 == cur_ref_frame) {
				glb_exit = true;
				// allow the producers to run
				producers_utils::unlock(ref_prod, v_data);
				continue;
			}
			// in case we have to skip frames...
			if (settings::SKIP_FRAMES > 0 && settings::SKIP_FRAMES >= cur_ref_frame) {
				// allow the producers to run
				producers_utils::unlock(ref_prod, v_data);
				continue;
			}
			// vector of bool telling if everything is ok
			std::vector<bool>	v_ok;
			for(V_VPDATA::const_iterator it = v_data.begin(); it != v_data.end(); ++it)
				if ((*it)->frame == cur_ref_frame) v_ok.push_back(true);
				else v_ok.push_back(false);
			// then swap the vectors
			t_ref_buf.swap(ref_buf);
			for(int i = 0; i < v_data.size(); ++i)
				t_bufs[i].swap(v_data[i]->buf);
			// allow the producers to run
			producers_utils::unlock(ref_prod, v_data);
			// finally process data
			s_analyzer->process(cur_ref_frame, t_ref_buf, v_ok, t_bufs);
			// check if we have to exit
			if (settings::MAX_FRAMES > 0 && cur_ref_frame >= settings::MAX_FRAMES) {
				glb_exit = true;
				// allow the producers to run
				producers_utils::unlock(ref_prod, v_data);
				break;
			}
		}
		// wait for all threads
		producers_utils::stop(ref_vpth, v_th);
	} catch(std::exception& e) {
		LOG_ERROR << e.what() << std::endl;
	} catch(...) {
		LOG_ERROR << "Unknown exception" << std::endl;
	}
}
Ejemplo n.º 29
0
int main(int argc, char *argv[]) {
        AVFormatContext *pFormatCtx = NULL;
        int i, videoStream;
        AVCodecContext *pCodecCtx;
        AVCodec *pCodec;
        AVFrame *pFrame;
        AVFrame *pFrameRGB;
        struct SwsContext * pSwsCtx;
        AVPacket packet;
        int frameFinished;
        int numBytes;
        uint8_t *buffer;

        if (argc < 2) {
                printf("Please provide a movie file\n");
                return -1;
        }
        // Register all formats and codecs
        av_register_all();

        // Open video file
        if (avformat_open_input(&pFormatCtx, argv[1], NULL, NULL) != 0)
                return -1; // Couldn't open file

        // Retrieve stream information
        if (avformat_find_stream_info(pFormatCtx, NULL) < 0)
                return -1; // Couldn't find stream information

        // Dump information about file onto standard error
        av_dump_format(pFormatCtx, 0, argv[1], 0);

        // Find the first video stream
        videoStream = -1;
        for (i = 0; i < pFormatCtx->nb_streams; i++)
                if (pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
                        videoStream = i;
                        break;
                }
        if (videoStream == -1)
                return -1; // Didn't find a video stream

        // Get a pointer to the codec context for the video stream
        pCodecCtx = pFormatCtx->streams[videoStream]->codec;

        // Find the decoder for the video stream
        pCodec = avcodec_find_decoder(pCodecCtx->codec_id);
        if (pCodec == NULL) {
                fprintf(stderr, "Unsupported codec!\n");
                return -1; // Codec not found
        }
        // Open codec
        if (avcodec_open2(pCodecCtx, pCodec, NULL) < 0)
                return -1; // Could not open codec

        // Allocate video frame
        pFrame = av_frame_alloc();

        // Allocate an AVFrame structure
        pFrameRGB = av_frame_alloc();
        if (pFrameRGB == NULL)
                return -1;

        // Determine required buffer size and allocate buffer
        numBytes = avpicture_get_size(PIX_FMT_RGB24, pCodecCtx->width,
                        pCodecCtx->height);
        buffer = (uint8_t *) av_malloc(numBytes * sizeof(uint8_t));

        // Assign appropriate parts of buffer to image planes in pFrameRGB
        // Note that pFrameRGB is an AVFrame, but AVFrame is a superset
        // of AVPicture
        avpicture_fill((AVPicture *) pFrameRGB, buffer, PIX_FMT_RGB24,
                        pCodecCtx->width, pCodecCtx->height);

        pSwsCtx = sws_getContext(pCodecCtx->width,
                        pCodecCtx->height, pCodecCtx->pix_fmt,
                        pCodecCtx->width, pCodecCtx->height,
                        PIX_FMT_RGB24, SWS_FAST_BILINEAR, NULL, NULL, NULL);

        if (pSwsCtx == NULL) {
                fprintf(stderr, "Cannot initialize the sws context\n");
                return -1;
        }

        // Read frames and save first five frames to disk
        i = 0;
        while (av_read_frame(pFormatCtx, &packet) >= 0) {
                // Is this a packet from the video stream?
                if (packet.stream_index == videoStream) {
                        // Decode video frame
                        avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, &packet);

                        // Did we get a video frame?
                        if (frameFinished) {

                                // Convert the image from its native format to RGB
                                sws_scale(pSwsCtx,
                                                        (const uint8_t * const *) pFrame->data,
                                                        pFrame->linesize, 0, pCodecCtx->height,
                                                        pFrameRGB->data,
                                                        pFrameRGB->linesize);

                                // Save the frame to disk
                                if (++i <= 5)
                                        SaveFrame(pFrameRGB, pCodecCtx->width, pCodecCtx->height,
                                                        i);
                        }
                }

                // Free the packet that was allocated by av_read_frame
                av_free_packet(&packet);
        }

        // Free the RGB image
        av_free(buffer);
        av_free(pFrameRGB);

        // Free the YUV frame
        av_free(pFrame);

        // Close the codec
        avcodec_close(pCodecCtx);

        // Close the video file
        avformat_close_input(&pFormatCtx);

        return 0;
}
Ejemplo n.º 30
0
mfxStatus FFmpeg_Reader_Init(const char *strFileName, mfxU32 videoType)
{
    MSDK_CHECK_POINTER(strFileName, MFX_ERR_NULL_PTR);

    int res;

    g_videoType = videoType;

    // Initialize libavcodec, and register all codecs and formats
    av_register_all();

    // Open input container
    res = avformat_open_input(&g_pFormatCtx, strFileName, NULL, NULL);
    if(res) {
        printf("FFMPEG: Could not open input container\n");
        return MFX_ERR_UNKNOWN;
    }

    // Retrieve stream information
    res = avformat_find_stream_info(g_pFormatCtx, NULL);
    if(res < 0) {
        printf("FFMPEG: Couldn't find stream information\n");
        return MFX_ERR_UNKNOWN;
    }
    

    // Dump container info to console
    av_dump_format(g_pFormatCtx, 0, strFileName, 0);

    // Find the streams in the container
    g_videoStreamIdx = -1;
    for(unsigned int i=0; i<g_pFormatCtx->nb_streams; i++)
    {
        if(g_pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO && g_videoStreamIdx == -1)
        {
            g_videoStreamIdx = i;
 
            // save decoded stream timestamp time base
            g_dec_time_base = g_pFormatCtx->streams[i]->time_base;

            if(videoType == MFX_CODEC_AVC)
            {
                // Retrieve required h264_mp4toannexb filter
                g_pBsfc = av_bitstream_filter_init("h264_mp4toannexb");
                if (!g_pBsfc) {
                    printf("FFMPEG: Could not aquire h264_mp4toannexb filter\n");
                    return MFX_ERR_UNKNOWN;
                }
            }
        }
#ifdef PROCESS_AUDIO
        else if(g_pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_AUDIO)
        {
            g_audioStreamIdx = i;
            g_pAudioStream = g_pFormatCtx->streams[i];
            g_audio_dec_time_base = g_pAudioStream->time_base;
        }
#endif
    }
    if(g_videoStreamIdx == -1)
        return MFX_ERR_UNKNOWN; // Didn't find any video streams in container

    return MFX_ERR_NONE;
}