Example #1
0
int append_ffmpeg(RenderData *rd, int frame, int *pixels, int rectx, int recty, ReportList *reports) 
{
	AVFrame* avframe;
	int success = 1;

	fprintf(stderr, "Writing frame %i, "
		"render width=%d, render height=%d\n", frame,
		rectx, recty);

// why is this done before writing the video frame and again at end_ffmpeg?
//	write_audio_frames(frame / (((double)rd->frs_sec) / rd->frs_sec_base));

	if(video_stream)
	{
		avframe= generate_video_frame((unsigned char*) pixels, reports);
		success= (avframe && write_video_frame(rd, avframe, reports));

		if (ffmpeg_autosplit) {
			if (avio_tell(outfile->pb) > FFMPEG_AUTOSPLIT_SIZE) {
				end_ffmpeg();
				ffmpeg_autosplit_count++;
				success &= start_ffmpeg_impl(rd, rectx, recty, reports);
			}
		}
	}

#ifdef WITH_AUDASPACE
	write_audio_frames((frame - rd->sfra) / (((double)rd->frs_sec) / rd->frs_sec_base));
#endif
	return success;
}
Example #2
0
int BKE_ffmpeg_append(void *context_v, RenderData *rd, int start_frame, int frame, int *pixels,
                      int rectx, int recty, const char *suffix, ReportList *reports)
{
	FFMpegContext *context = context_v;
	AVFrame *avframe;
	int success = 1;

	PRINT("Writing frame %i, render width=%d, render height=%d\n", frame, rectx, recty);

/* why is this done before writing the video frame and again at end_ffmpeg? */
//	write_audio_frames(frame / (((double)rd->frs_sec) / rd->frs_sec_base));

	if (context->video_stream) {
		avframe = generate_video_frame(context, (unsigned char *) pixels, reports);
		success = (avframe && write_video_frame(context, rd, frame - start_frame, avframe, reports));

		if (context->ffmpeg_autosplit) {
			if (avio_tell(context->outfile->pb) > FFMPEG_AUTOSPLIT_SIZE) {
				end_ffmpeg_impl(context, true);
				context->ffmpeg_autosplit_count++;
				success &= start_ffmpeg_impl(context, rd, rectx, recty, suffix, reports);
			}
		}
	}

#ifdef WITH_AUDASPACE
	write_audio_frames(context, (frame - start_frame) / (((double)rd->frs_sec) / (double)rd->frs_sec_base));
#endif
	return success;
}
Example #3
0
int main(int argc,char **argv)
{
	
	
	const char *short_options = "i:w:h:b:d:p:s:g:f:l:?";
	struct option long_options[] = {
		{"help",	no_argument,	   NULL, '?'},
		{"index",	required_argument, NULL, 'i'},
		{"width",	required_argument, NULL, 'w'},
		{"height",	required_argument, NULL, 'h'},
		{"bitrate",	required_argument, NULL, 'b'},
		{"sbitrate",required_argument, NULL, 's'},
		{"destaddr",required_argument, NULL, 'd'},
		{"pids",	required_argument, NULL, 'p'},
		{"gopsize",	required_argument, NULL, 'g'},
		{"fps",		required_argument, NULL, 'f'},
		{"logfile",	required_argument, NULL, 'l'},
		{NULL,		0,				   NULL, 0},
	};
	int c = -1;
	bool has_index = false;
	bool has_destaddr = false;
	bool has_ebitrate = false;
	bool has_sbitrate = false;

	while(1)
	{
		c = getopt_long(argc, argv, short_options, long_options, NULL);
		if(c == -1)
			break;
		switch(c)
		{
			case '?':
				printf("Usage:%s [options] index destaddr ....\n",argv[0]);
				printf("avencoder version %s\n",version_str);
				printf("-? --help 		Display this usage information.\n");
				printf("-i --index 		Set the index.\n");
				printf("-w --width 		Set the width.\n");
				printf("-h --height		Set the height.\n");
				printf("-b --bitrate 		Set the encoder bitrate .\n");
				printf("-s --sbitrate 		Set the smooth bitrate.\n");
				printf("-d --destaddr 		Set the dest addr (ip:port).\n");
				printf("-p --pids 		Set the service_id pmt_pid video_pid audio_pid (pmt_pid:service_id:video_pid:audio_pid).\n");
				printf("-g --gopsize 		Set the video encode gopsize (gopsize_min:gopsize_max).\n");
				printf("-l --logfile 		Set the log file path.\n");
				printf("Example: %s --index 0 --width 1280 --height 720 --bitrate 4000000 --pids 1024:1:64:65 --gopsize 5:100 --destaddr 192.168.60.248:14000.\n",argv[0]);
				printf("Example: %s -i 0 -w 1280 -h 720 -b 4000000 -p 1024:1:64:65 -g 5:100 -d 192.168.60.248:14000.\n",argv[0]);
				return -1;
			case 'i':
				m_index = atoi(optarg);
				has_index = true;
				break;
			case 'w':
				width = atoi(optarg);
				break;
			case 'h':
				height = atoi(optarg);
				break;
			case 'b':
				encode_bitrate = atoi(optarg);
				has_ebitrate = true;
				break;
			case 's':
				smooth_bitrate = atoi(optarg);
				has_sbitrate = true;
				break;
			case 'd':
				if(sscanf(optarg,"%[^:]:%d",destip,&destport) != 2)
				{
					fprintf(stderr ,"avencoder: error destip:destport.%s.\n",optarg);        
					return -2; 
				}
				strncpy(destip_record,destip,sizeof(destip_record));
				destport_record = destport;
				has_destaddr = true;
				break;
			case 'p':
				if(sscanf(optarg,"%d:%d:%d:%d",&pmt_pid,&service_id,&video_pid,&audio_pid) != 4)
				{
					fprintf(stderr ,"avencoder: error pmt_pid:service_id:video_pid:audio_pid.%s.\n",optarg);        
					return -3; 
				}
				break;
			case 'g':
				if(sscanf(optarg,"%d:%d",&gopsize_min,&gopsize_max) != 2)
				{
					gopsize_min = gopsize_max = atoi(optarg);
				}
				break;
			case 'f':
				fps = atoi(optarg);
				break;
			case 'l':
				memset(logfile,0,sizeof(logfile));
				strncpy(logfile,optarg,sizeof(logfile));
				break;

			}	
	}

	if(has_index == false || has_destaddr == false)
	{
		fprintf(stderr ,"avencoder: index and destaddr is must,not empty.\n");
		return -3;
	}
	if(has_ebitrate == true && has_sbitrate == false)
		smooth_bitrate = encode_bitrate*3;
	else if(has_ebitrate == false && has_sbitrate == true)
		encode_bitrate = smooth_bitrate/3;

	if(encode_bitrate < 0)
		encode_bitrate = 4*1024*1024;
	
	//TODO 检查各个参数的有效性
	printf("avencoder version %s\n",version_str);
	printf("++++++index=%d|width=%d|height=%d|smooth_bitrate=%d|encode_bitrate=%d|destip=%s|destport=%d++++++\n",m_index,width,height,smooth_bitrate,encode_bitrate,destip,destport);
	printf("++++++pmt_pid=%d|service_id=%d|video_pid=%d|audio_pid=%d|gopsize_min=%d|gopsize_max=%d|logfile=%s++++++\n",pmt_pid,service_id,video_pid,audio_pid,gopsize_min,gopsize_max,logfile);

	InputParams_audiosource audio_source_params;
	InputParams_videosource video_source_params;
	InputParams_videoencoder video_encoder_params;
	InputParams_avmuxer avmuxer_params;
	InputParams_tssmooth tssmooth_params;

	audio_source_params.index = m_index;
	p_audio_source = init_audio_source(&audio_source_params);
	if(p_audio_source == NULL)
	{        
		fprintf(stderr ,"avencoder: init_audio_source fail..\n");        
		return -1;    
	}
	video_source_params.index = m_index;
	video_source_params.width = width;
	video_source_params.height = height;
	
	p_video_source = init_video_source(&video_source_params);
	if(p_video_source == NULL)
	{        
		fprintf(stderr ,"avencoder: init_video_source fail..\n");        
		return -1;    
	}

	p_audio_encoder = init_audio_encoder();
	if(p_audio_encoder == NULL)
	{		 
		fprintf(stderr ,"avencoder: init_audio_encoder fail..\n");		 
		return -1;	  
	}

	
	video_encoder_params.width = width;
	video_encoder_params.height = height;
	video_encoder_params.gopsize_min = gopsize_min;
	video_encoder_params.gopsize_max = gopsize_max;
	video_encoder_params.bitrate = encode_bitrate;
	video_encoder_params.fps = fps;
	p_video_encoder = init_video_encoder(&video_encoder_params);
	if(p_video_encoder == NULL)
	{		 
		fprintf(stderr ,"avencoder: init_video_encoder fail..\n");		 
		return -1;	  
	}
#if 0
	if(smooth_bitrate > 0)
	{
		memset(&tssmooth_params,0,sizeof(tssmooth_params));
		tssmooth_params.index = m_index;
		tssmooth_params.listen_udp_port = 0;
		strcpy(tssmooth_params.dst_udp_ip,destip);
		tssmooth_params.dst_udp_port = destport;
		tssmooth_params.bit_rate = smooth_bitrate;
		tssmooth_params.buffer_max_size = 10*1024*1024;

		p_tssmooth = init_tssmooth(&tssmooth_params);
		if(p_tssmooth == NULL)
		{		 
			fprintf(stderr ,"avencoder: init_smooth fail..\n");		 
			return -1;	  
		}

		strcpy(destip,tssmooth_params.listen_udp_ip);
		destport = tssmooth_params.listen_udp_port;
		
		printf("++++++now dest addr is modify destip=%s|destport=%d++++++\n",destip,destport);
	}
#endif

	avmuxer_params.codecID = KY_CODEC_ID_H264;
	avmuxer_params.nWidth = width;
	avmuxer_params.nHeight = height;
	avmuxer_params.nBitRate = encode_bitrate;
	avmuxer_params.nPeakBitRate = smooth_bitrate;
	avmuxer_params.nSamplerate = 48000;
	avmuxer_params.nFramerate = fps;
	snprintf(avmuxer_params.monitorName,sizeof(avmuxer_params.monitorName),"%d.monitor",m_index);
	snprintf(avmuxer_params.appName,sizeof(avmuxer_params.appName),"%s",argv[0]);

	strcpy(avmuxer_params.destip,destip);
	avmuxer_params.destport = destport;
	avmuxer_params.index = m_index;
	avmuxer_params.pmt_pid = pmt_pid;
	avmuxer_params.service_id = service_id;
	avmuxer_params.video_pid = video_pid;
	avmuxer_params.audio_pid = audio_pid;
	p_avmuxer = init_ts_muxer(&avmuxer_params);
	if(p_avmuxer == NULL)
	{		 
		fprintf(stderr ,"avencoder: init_ts_muxer fail..\n");		 
		return -1;	  
	}



	p_audio_source_sample = (char *)malloc(n_audio_source_sample_size);
	p_video_source_sample = (char *)malloc(n_video_source_sample_size);
	if(p_audio_source_sample == NULL || p_video_source_sample == NULL)
	{		 
		fprintf(stderr ,"avencoder: malloc fail..\n");		 
		return -2;	  
	}

	p_audio_encode_sample = (char *)malloc(n_audio_encode_sample_size);
	p_video_encode_sample = (char *)malloc(n_video_encode_sample_size);
	if(p_audio_encode_sample == NULL || p_video_encode_sample == NULL)
	{		 
		fprintf(stderr ,"avencoder: malloc fail..\n");		 
		return -2;	  
	}

	pthread_t pthread;
	int ret = pthread_create(&pthread, NULL, statistics_process, NULL);
	if(ret != 0)
	{
		printf("avencoder: pthread_create fail!...\n");
		return -3;
	}


	

	double videopts = 0.0;
	double audiopts = 0.0;

	struct timeval tv;
	long start_time_us,end_time_us;
	gettimeofday(&tv,NULL);
	start_time_us = end_time_us = tv.tv_sec*1000 + tv.tv_usec/1000;
	
	while(1)
	{
		gettimeofday(&tv,NULL);
		start_time_us = tv.tv_sec*1000 + tv.tv_usec/1000;

		write_video_frames();

		video_pts(p_avmuxer,&videopts);
		audio_pts(p_avmuxer,&audiopts);

		//printf("++++++++++videopts = [%lf]   audiopts = [%lf]+++++++++\n",videopts,audiopts);

		while(audiopts < videopts)
		{
			write_audio_frames();

			video_pts(p_avmuxer,&videopts);
			audio_pts(p_avmuxer,&audiopts);
			//printf("----------videopts = [%lf]   audiopts = [%lf]---------\n",videopts,audiopts);
		}

		gettimeofday(&tv,NULL);
		end_time_us = tv.tv_sec*1000 + tv.tv_usec/1000;

		//if(end_time_us - start_time_us >= 40)
		//	printf("1++++++++++start_time_us = [%ld]	 end_time_us = [%ld]+++++diff=%ld++++\n",start_time_us,end_time_us,end_time_us - start_time_us);

		if(end_time_us - start_time_us < delaytimeus)
			usleep((delaytimeus-(end_time_us - start_time_us))*1000);
		
	}


	uninit_audio_source(p_audio_source);
	uninit_video_source(p_video_source);
	uninit_audio_encoder(p_audio_encoder);
	uninit_video_encoder(p_video_encoder);
	uninit_ts_muxer(p_avmuxer);
	if(smooth_bitrate > 0 && p_tssmooth)
		uninit_tssmooth(p_tssmooth);


	return 0;
}
Example #4
0
static void end_ffmpeg_impl(FFMpegContext *context, int is_autosplit)
{
	PRINT("Closing ffmpeg...\n");

#if 0
	if (context->audio_stream) { /* SEE UPPER */
		write_audio_frames(context);
	}
#endif

#ifdef WITH_AUDASPACE
	if (is_autosplit == false) {
		if (context->audio_mixdown_device) {
			AUD_Device_free(context->audio_mixdown_device);
			context->audio_mixdown_device = 0;
		}
	}
#endif

	if (context->video_stream && context->video_stream->codec) {
		PRINT("Flushing delayed frames...\n");
		flush_ffmpeg(context);
	}
	
	if (context->outfile) {
		av_write_trailer(context->outfile);
	}
	
	/* Close the video codec */

	if (context->video_stream && context->video_stream->codec) {
		avcodec_close(context->video_stream->codec);
		PRINT("zero video stream %p\n", context->video_stream);
		context->video_stream = 0;
	}

	/* free the temp buffer */
	if (context->current_frame) {
		delete_picture(context->current_frame);
		context->current_frame = 0;
	}
	if (context->outfile && context->outfile->oformat) {
		if (!(context->outfile->oformat->flags & AVFMT_NOFILE)) {
			avio_close(context->outfile->pb);
		}
	}
	if (context->outfile) {
		avformat_free_context(context->outfile);
		context->outfile = 0;
	}
	if (context->audio_input_buffer) {
		av_free(context->audio_input_buffer);
		context->audio_input_buffer = 0;
	}
#ifndef FFMPEG_HAVE_ENCODE_AUDIO2
	if (context->audio_output_buffer) {
		av_free(context->audio_output_buffer);
		context->audio_output_buffer = 0;
	}
#endif

	if (context->audio_deinterleave_buffer) {
		av_free(context->audio_deinterleave_buffer);
		context->audio_deinterleave_buffer = 0;
	}

	if (context->img_convert_ctx) {
		sws_freeContext(context->img_convert_ctx);
		context->img_convert_ctx = 0;
	}
}
Example #5
0
static void end_ffmpeg_impl(int is_autosplit)
{
	unsigned int i;
	
	PRINT("Closing ffmpeg...\n");

#if 0
	if (audio_stream) { /* SEE UPPER */
		write_audio_frames();
	}
#endif

#ifdef WITH_AUDASPACE
	if (is_autosplit == FALSE) {
		if (audio_mixdown_device) {
			AUD_closeReadDevice(audio_mixdown_device);
			audio_mixdown_device = 0;
		}
	}
#endif

	if (video_stream && video_stream->codec) {
		PRINT("Flushing delayed frames...\n");
		flush_ffmpeg();
	}
	
	if (outfile) {
		av_write_trailer(outfile);
	}
	
	/* Close the video codec */

	if (video_stream && video_stream->codec) {
		avcodec_close(video_stream->codec);
		PRINT("zero video stream %p\n", video_stream);
		video_stream = 0;
	}

	
	/* Close the output file */
	if (outfile) {
		for (i = 0; i < outfile->nb_streams; i++) {
			if (&outfile->streams[i]) {
				av_freep(&outfile->streams[i]);
			}
		}
	}
	/* free the temp buffer */
	if (current_frame) {
		delete_picture(current_frame);
		current_frame = 0;
	}
	if (outfile && outfile->oformat) {
		if (!(outfile->oformat->flags & AVFMT_NOFILE)) {
			avio_close(outfile->pb);
		}
	}
	if (outfile) {
		av_free(outfile);
		outfile = 0;
	}
	if (video_buffer) {
		MEM_freeN(video_buffer);
		video_buffer = 0;
	}
	if (audio_output_buffer) {
		av_free(audio_output_buffer);
		audio_output_buffer = 0;
	}
	if (audio_input_buffer) {
		av_free(audio_input_buffer);
		audio_input_buffer = 0;
	}

	if (img_convert_ctx) {
		sws_freeContext(img_convert_ctx);
		img_convert_ctx = 0;
	}
}