Example #1
0
int write_video3(videoFile* outVideo,videoFile* inVideo,AVFrame* frameArr[],int* index,int videoCnt)
{
	AVCodecContext* pCodecCtx=outVideo->pCodecCtx;
	AVCodec* pCodec = avcodec_find_encoder(pCodecCtx->codec_id);  
	AVStream* video_st = outVideo->video_st;  
    if (!pCodec){fprintf(stderr,"没有找到合适的编码器!\n");return -1;}  
    if (avcodec_open2(pCodecCtx, pCodec,NULL) < 0){fprintf(stderr,"编码器打开失败!\n");return -1;}  
	
	
	uint64_t tmpPTS=0;
	static uint64_t PTS=0;
	int len=index[videoCnt],i=0;
	int got_picture=0;
	
	#define COMCNT 25

	for(i=0;i<=len;i++)
	{
		int j;
		for(j=1;j<videoCnt;j++)
		{
			if(i==index[j]+1)i+=COMCNT;
			if(i<=index[j]&&i>index[j]-COMCNT)
			{
				frameArr[i]=frame_combine(frameArr[i+COMCNT],frameArr[i]);
			}
		}
		//printf("%d\n",i);
		AVFrame* picture=frameArr[i];
		AVPacket inPKT;
		int y_size = pCodecCtx->width * pCodecCtx->height;
		av_new_packet(&inPKT,y_size*3); 
		picture->pts=inPKT.pts;
		if(avcodec_encode_video2(pCodecCtx, &inPKT,picture, &got_picture)>=0)
		{
			
			(picture->pts)++;
			inPKT.pts+=PTS;
			tmpPTS=inPKT.pts;
			if(got_picture!=1){fprintf(stderr,"no picture got\n");continue;}
			
		if (got_picture != 0) {
			if (inPKT.pts != AV_NOPTS_VALUE) {
				inPKT.pts = av_rescale_q(inPKT.pts, inVideo->pCodecCtx->time_base,video_st->time_base);
				}
			if (inPKT.dts != AV_NOPTS_VALUE) {
				inPKT.dts = av_rescale_q(inPKT.dts, inVideo->pCodecCtx->time_base,video_st->time_base);
				}
				inPKT.stream_index = video_st->index;
			} else {
				//return -1;
			}
		}
		av_write_frame(outVideo->pFormatCtx, &inPKT);
	}
	/*********************编码延迟帧****************************/
	for(got_picture=1;got_picture;)
	{
		AVPacket inPKT;
		int y_size = pCodecCtx->width * pCodecCtx->height;
		av_new_packet(&inPKT,y_size);
		if(avcodec_encode_video2(pCodecCtx, &inPKT,NULL, &got_picture)>=0)
		{
			inPKT.pts+=PTS;
			tmpPTS=inPKT.pts;
			if(got_picture!=1){break;}
			
			if (got_picture != 0) {
				if (inPKT.pts != AV_NOPTS_VALUE) {
					inPKT.pts = av_rescale_q(inPKT.pts, inVideo->pCodecCtx->time_base,video_st->time_base);
					}
				if (inPKT.dts != AV_NOPTS_VALUE) {
					inPKT.dts = av_rescale_q(inPKT.dts, inVideo->pCodecCtx->time_base,video_st->time_base);
					}
					inPKT.stream_index = video_st->index;
				}
		av_write_frame(outVideo->pFormatCtx, &inPKT);  
		}
		av_free_packet(&inPKT); 
	}
	PTS=tmpPTS;
	return 0;
}
Example #2
0
/*************************将一个视频中的帧以append方式写入另一个视频文件中*********/
int write_video2(const videoFile* inVideo,videoFile* outVideo)
{
	/******************初始化*******************************/
	static int videoCnt=0;
	
	AVCodecContext* pCodecCtx=outVideo->pCodecCtx;
	AVCodec* pCodec = avcodec_find_encoder(pCodecCtx->codec_id);  
	AVStream* video_st = outVideo->video_st;  
    if (!pCodec){fprintf(stderr,"没有找到合适的编码器!\n");return -1;}  
    if (avcodec_open2(pCodecCtx, pCodec,NULL) < 0){fprintf(stderr,"编码器打开失败!\n");return -1;}  
	
	AVPacket pkt;  
	int frameFinished=0;
	static int i=0;
	uint64_t tmpPTS=0;
	static uint64_t PTS=0;
	int j=0;
	int got_picture=0;
	AVFrame* picture = avcodec_alloc_frame();
	if (picture==NULL)
	{
		printf("avcodec alloc frame failed!\n");
		exit (1);
	}
	/*****************开始解码和重编码******************/
	while(av_read_frame(inVideo->pFormatCtx,&pkt)>=0)
	{
		
		if(pkt.stream_index==inVideo->videoStream)
		{
			
			i++;
			int r=avcodec_decode_video2(inVideo->pCodecCtx, picture, &frameFinished, &pkt);
			
			if(frameFinished&&r>=0)
			{
					if(picture->format!=PIX_FMT_YUV420P){ERROR("format error");exit(1);}
					AVPacket inPKT;
					int y_size = pCodecCtx->width * pCodecCtx->height;
					av_new_packet(&inPKT,y_size);

					/****************test****************************************/

					if(videoCnt==0){
					//frameArr[j++]=picture;
					//printf("%d %d\n",picture->width,picture->height);
					frameCpy(&frameArr[j++],picture);
					//printf("%d %d\n",frameArr[j-1]->width,frameArr[j-1]->height);
					}
					if(videoCnt>0){picture=frame_combine(picture,frameArr[j++]);
					}

					/***********************************************************/
					
					picture->pts=inPKT.pts;
					
					if(avcodec_encode_video2(pCodecCtx, &inPKT,picture, &got_picture)>=0)
					{
						(picture->pts)++;
						inPKT.pts+=PTS;
						tmpPTS=inPKT.pts;
						if(got_picture!=1){fprintf(stderr,"no picture got\n");continue;}
						
					if (got_picture != 0) {
						if (inPKT.pts != AV_NOPTS_VALUE) {
							inPKT.pts = av_rescale_q(inPKT.pts, inVideo->pCodecCtx->time_base,video_st->time_base);
							}
						if (inPKT.dts != AV_NOPTS_VALUE) {
							inPKT.dts = av_rescale_q(inPKT.dts, inVideo->pCodecCtx->time_base,video_st->time_base);
							}
							inPKT.stream_index = video_st->index;
						} else {
							//return -1;
						}

						av_write_frame(outVideo->pFormatCtx, &inPKT);  
						printf("encoded %drd frame\n",i); 
						av_free_packet(&inPKT); 
					}
					else{fprintf(stderr,"encode fail\n");return -1;}
			}
			else{fprintf(stderr,"cant get frame %d\n",i);}
		}
		av_free_packet(&pkt);
	}
	/*********************编码延迟帧****************************/
	for(got_picture=1;got_picture;)
	{
		AVPacket inPKT;
		int y_size = pCodecCtx->width * pCodecCtx->height;
		av_new_packet(&inPKT,y_size);
		if(avcodec_encode_video2(pCodecCtx, &inPKT,NULL, &got_picture)>=0)
		{
			inPKT.pts+=PTS;
			tmpPTS=inPKT.pts;
			if(got_picture!=1){break;}
			
			if (got_picture != 0) {
				if (inPKT.pts != AV_NOPTS_VALUE) {
					inPKT.pts = av_rescale_q(inPKT.pts, inVideo->pCodecCtx->time_base,video_st->time_base);
					}
				if (inPKT.dts != AV_NOPTS_VALUE) {
					inPKT.dts = av_rescale_q(inPKT.dts, inVideo->pCodecCtx->time_base,video_st->time_base);
					}
					inPKT.stream_index = video_st->index;
				}
		av_write_frame(outVideo->pFormatCtx, &inPKT);  
		}
		av_free_packet(&inPKT); 
	}
	PTS=tmpPTS;
	/*********************释放内存****************************/
	
	if (video_st)  
    {  
        avcodec_close(video_st->codec);  
        av_free(picture);  
    }  
	//printf("写入完成\n");
	videoCnt+=1;
	return 0;
}
Example #3
0
void AVIDump::AddFrame(const u8* data, int width, int height)
{
	avpicture_fill((AVPicture*)s_src_frame, const_cast<u8*>(data), AV_PIX_FMT_BGR24, width, height);

	// Convert image from BGR24 to desired pixel format, and scale to initial
	// width and height
	if ((s_sws_context = sws_getCachedContext(s_sws_context,
	                                          width, height, AV_PIX_FMT_BGR24,
	                                          s_width, s_height, s_stream->codec->pix_fmt,
	                                          SWS_BICUBIC, nullptr, nullptr, nullptr)))
	{
		sws_scale(s_sws_context, s_src_frame->data, s_src_frame->linesize, 0,
		          height, s_scaled_frame->data, s_scaled_frame->linesize);
	}

	s_scaled_frame->format = s_stream->codec->pix_fmt;
	s_scaled_frame->width = s_width;
	s_scaled_frame->height = s_height;

	// Encode and write the image.
	AVPacket pkt;
	PreparePacket(&pkt);
	int got_packet = 0;
	int error = 0;
	u64 delta;
	s64 last_pts;
	if (!s_start_dumping && s_last_frame <= SystemTimers::GetTicksPerSecond())
	{
		delta = CoreTiming::GetTicks();
		last_pts = AV_NOPTS_VALUE;
		s_start_dumping = true;
	}
	else
	{
		delta = CoreTiming::GetTicks() - s_last_frame;
		last_pts = (s_last_pts * s_stream->codec->time_base.den) / SystemTimers::GetTicksPerSecond();
	}
	u64 pts_in_ticks = s_last_pts + delta;
	s_scaled_frame->pts = (pts_in_ticks * s_stream->codec->time_base.den) / SystemTimers::GetTicksPerSecond();
	if (s_scaled_frame->pts != last_pts)
	{
		s_last_frame = CoreTiming::GetTicks();
		s_last_pts = pts_in_ticks;
		error = avcodec_encode_video2(s_stream->codec, &pkt, s_scaled_frame, &got_packet);
	}
	while (!error && got_packet)
	{
		// Write the compressed frame in the media file.
		if (pkt.pts != (s64)AV_NOPTS_VALUE)
		{
			pkt.pts = av_rescale_q(pkt.pts,
			                       s_stream->codec->time_base, s_stream->time_base);
		}
		if (pkt.dts != (s64)AV_NOPTS_VALUE)
		{
			pkt.dts = av_rescale_q(pkt.dts,
			                       s_stream->codec->time_base, s_stream->time_base);
		}
		if (s_stream->codec->coded_frame->key_frame)
			pkt.flags |= AV_PKT_FLAG_KEY;
		pkt.stream_index = s_stream->index;
		av_interleaved_write_frame(s_format_context, &pkt);

		// Handle delayed frames.
		PreparePacket(&pkt);
		error = avcodec_encode_video2(s_stream->codec, &pkt, nullptr, &got_packet);
	}
	if (error)
		ERROR_LOG(VIDEO, "Error while encoding video: %d", error);
}
Example #4
0
int StreamMediaSink::writeJPEG(int FrameNo)
{
	AVCodecContext *pOCodecCtx = NULL;
	AVCodec *pOCodec = NULL;
	uint8_t *Buffer = NULL;
	FILE *JPEGFile = NULL;
	char JPEGFName[256];

	int BufSiz = avpicture_get_size(AV_PIX_FMT_YUVJ444P, m_avCodecContext->width, m_avCodecContext->height);

	Buffer = new uint8_t[BufSiz + FF_INPUT_BUFFER_PADDING_SIZE];
	if (Buffer == NULL)
		return (0);

	pOCodecCtx = avcodec_alloc_context3(pOCodec);
	if (!pOCodecCtx) {
		free(Buffer);
		return (0);
	}

	pOCodecCtx->bit_rate = m_avCodecContext->bit_rate;
	pOCodecCtx->width = m_avCodecContext->width;
	pOCodecCtx->height = m_avCodecContext->height;
	pOCodecCtx->pix_fmt = AV_PIX_FMT_YUVJ444P;
	pOCodecCtx->codec_id = CODEC_ID_MJPEG;
	pOCodecCtx->codec_type = AVMEDIA_TYPE_VIDEO;
	pOCodecCtx->time_base.num = m_avCodecContext->time_base.num;
	pOCodecCtx->time_base.den = m_avCodecContext->time_base.den;

	pOCodec = avcodec_find_encoder(pOCodecCtx->codec_id);
	if (!pOCodec) {
		free(Buffer);
		return (0);
	}

	if (avcodec_open2(pOCodecCtx, pOCodec, NULL) < 0) {
		free(Buffer);
		return (0);
	}

	pOCodecCtx->mb_lmin = pOCodecCtx->lmin = pOCodecCtx->qmin * FF_QP2LAMBDA;
	pOCodecCtx->mb_lmax = pOCodecCtx->lmax = pOCodecCtx->qmax * FF_QP2LAMBDA;
	pOCodecCtx->flags = CODEC_FLAG_QSCALE;
	pOCodecCtx->global_quality = pOCodecCtx->qmin * FF_QP2LAMBDA;

	m_avFrame->pts = 1;
	m_avFrame->quality = pOCodecCtx->global_quality;
	AVPacket packet;
	av_init_packet(&packet);
	packet.size = BufSiz;
	packet.data = Buffer;

	int image = 0;
	avcodec_encode_video2(pOCodecCtx, &packet, m_avFrame, &image);

	if (image) {
		sprintf(JPEGFName, "%06d.jpg", FrameNo);
		JPEGFile = fopen(JPEGFName, "wb");
		fwrite(packet.data, 1, packet.size, JPEGFile);
		fclose(JPEGFile);
	}

	avcodec_close(pOCodecCtx);
	delete[] Buffer;
	return (packet.size);
}
Example #5
0
int demux(const char *in_filename, const char *out_filename_v,
		const char *out_filename_a) {
	AVOutputFormat *ofmt_a = NULL, *ofmt_v = NULL;
	// Input AVFormatContext and Output AVFormatContext
	AVFormatContext *ifmt_ctx = NULL, *ofmt_ctx_a = NULL, *ofmt_ctx_v = NULL;
	AVPacket pkt, enc_pkt;
	int ret, i;
	int video_index = -1, audio_index = -1;
	int frame_index = 0;

	av_register_all();
	// Input
	if ((ret = avformat_open_input(&ifmt_ctx, in_filename, 0, 0)) < 0) {
		printf("Could not open input file.");
		goto end;
	}
	if ((ret = avformat_find_stream_info(ifmt_ctx, 0)) < 0) {
		printf("Failed to retrieve input stream information");
		goto end;
	}

	// Output
	avformat_alloc_output_context2(&ofmt_ctx_v, NULL, NULL, out_filename_v);
	if (!ofmt_ctx_v) {
		printf("Could not create output context.\n");
		ret = AVERROR_UNKNOWN;
		goto end;
	}
	ofmt_v = ofmt_ctx_v->oformat;

	avformat_alloc_output_context2(&ofmt_ctx_a, NULL, NULL, out_filename_a);
	if (!ofmt_ctx_a) {
		printf("Could not create output context\n");
		ret = AVERROR_UNKNOWN;
		goto end;
	}
	ofmt_a = ofmt_ctx_a->oformat;

	for (i = 0; i < ifmt_ctx->nb_streams; i++) {
		// Create output AVStream according to input AVStream
		AVFormatContext *ofmt_ctx;
		AVStream *in_stream = ifmt_ctx->streams[i];
		AVStream *out_stream = NULL;

		if (ifmt_ctx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
			video_index = i;
			out_stream = avformat_new_stream(ofmt_ctx_v,
					in_stream->codec->codec);
			ofmt_ctx = ofmt_ctx_v;
		} else if (ifmt_ctx->streams[i]->codec->codec_type
				== AVMEDIA_TYPE_AUDIO) {
			audio_index = i;
			out_stream = avformat_new_stream(ofmt_ctx_a,
					in_stream->codec->codec);
			ofmt_ctx = ofmt_ctx_a;
		} else {
			break;
		}

		if (!out_stream) {
			printf("Failed allocating output stream\n");
			ret = AVERROR_UNKNOWN;
			goto end;
		}
		// Copy the settings of AVCodecContext
		if (avcodec_copy_context(out_stream->codec, in_stream->codec) < 0) {
			printf(
					"Failed to copy context from input to output stream codec context\n");
			goto end;
		}
		out_stream->codec->codec_tag = 0;

		if (ofmt_ctx->oformat->flags & AVFMT_GLOBALHEADER)
			out_stream->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;
	}

	// Open output file
	if (!(ofmt_v->flags & AVFMT_NOFILE)) {
		if (avio_open(&ofmt_ctx_v->pb, out_filename_v, AVIO_FLAG_WRITE) < 0) {
			printf("Could not open output file '%s'", out_filename_v);
			goto end;
		}
	}

	if (!(ofmt_a->flags & AVFMT_NOFILE)) {
		if (avio_open(&ofmt_ctx_a->pb, out_filename_a, AVIO_FLAG_WRITE) < 0) {
			printf("Could not open output file '%s'", out_filename_a);
			goto end;
		}
	}

	// Write file header
	if (avformat_write_header(ofmt_ctx_v, NULL) < 0) {
		printf("Error occurred when opening video output file\n");
		goto end;
	}
//	if (avformat_write_header(ofmt_ctx_a, NULL) < 0) {
//		printf("Error occurred when opening audio output file\n");
//		goto end;
//	}

#if USE_H264BSF
	AVBitStreamFilterContext* h264bsfc = av_bitstream_filter_init("h264_mp4toannexb");
#endif

	while (1) {
		AVFormatContext *ofmt_ctx;
		AVStream *in_stream, *out_stream;

		AVCodecContext *dec_ctx = NULL, *enc_ctx = NULL;
		AVCodec *dec = NULL, *encoder = NULL;

		AVFrame *frame = NULL;

		int got_frame;

		// Get an AVPacket
		if (av_read_frame(ifmt_ctx, &pkt) < 0)
			break;
		in_stream = ifmt_ctx->streams[pkt.stream_index];

		if (pkt.stream_index == video_index) {
			ofmt_ctx = ofmt_ctx_v;
			out_stream = avformat_new_stream(ofmt_ctx, NULL);

			/* find decoder for the stream */
			dec_ctx = in_stream->codec;
			dec = avcodec_find_decoder(dec_ctx->codec_id);
			if (!dec) {
				fprintf(stderr, "Failed to find %s codec\n",
						av_get_media_type_string(AVMEDIA_TYPE_VIDEO));
				return AVERROR(EINVAL);
			}

			/* Open decoder */
			int ret = avcodec_open2(dec_ctx, dec, NULL);
			if (ret < 0) {
				av_log(NULL, AV_LOG_ERROR,
						"Failed to open decoder for stream #%u\n", i);
				return ret;
			}

			// decoder is MPEG-4 part 2
			printf("decoder is %s\n", dec->long_name);

			// NOTE
			frame = av_frame_alloc();

			ret = avcodec_decode_video2(dec_ctx, frame, &got_frame, &pkt);
			if (ret < 0) {
				av_frame_free(&frame);
				av_log(NULL, AV_LOG_ERROR, "Decoding failed\n");
				break;
			}

			// printf("frame duration is %d\n", frame->pkt_duration);

			// encode
			encoder = avcodec_find_encoder(AV_CODEC_ID_H264);

			// avcodec_copy_context(enc_ctx, dec_ctx);
			enc_ctx = avcodec_alloc_context3(encoder);
			if (!encoder) {
				av_log(NULL, AV_LOG_FATAL, "Necessary encoder not found\n");
				return AVERROR_INVALIDDATA;
			}

			enc_ctx->height = dec_ctx->height;
			enc_ctx->width = dec_ctx->width;
			enc_ctx->sample_aspect_ratio = dec_ctx->sample_aspect_ratio;
			enc_ctx->pix_fmt = encoder->pix_fmts[0];
			enc_ctx->time_base = dec_ctx->time_base;
			//enc_ctx->time_base.num = 1;
			//enc_ctx->time_base.den = 25;
			//H264的必备选项,没有就会错
			enc_ctx->me_range = 16;
			enc_ctx->max_qdiff = 4;
			enc_ctx->qmin = 10;
			enc_ctx->qmax = 51;
			enc_ctx->qcompress = 0.6;
			enc_ctx->refs = 3;
			enc_ctx->bit_rate = 1500;

			ret = avcodec_open2(enc_ctx, encoder, NULL);
			if (ret < 0) {
				av_log(NULL, AV_LOG_ERROR,
						"Cannot open video encoder for stream #%u\n", i);
				return ret;
			}

			av_opt_set(enc_ctx->priv_data, "preset", "slow", 0);

			// AVOutputFormat *formatOut = av_guess_format(NULL, out_filename_v, NULL);

			enc_pkt.data = NULL;
			enc_pkt.size = 0;
			av_init_packet(&enc_pkt);
			ret = avcodec_open2(enc_ctx, encoder, NULL);
			if (ret < 0) {
				av_log(NULL, AV_LOG_ERROR,
						"Failed to open encoder for stream #%u\n", i);
				return ret;
			}
			ret = avcodec_encode_video2(enc_ctx, &enc_pkt, frame, &got_frame);

			printf("demo is %s\n", "hello");

			av_frame_free(&frame);
			avcodec_close(enc_ctx);
			avcodec_close(dec_ctx);

			// printf("Write Video Packet. size:%d\tpts:%lld\n", pkt.size, pkt.pts);
#if USE_H264BSF
			av_bitstream_filter_filter(h264bsfc, in_stream->codec, NULL, &pkt.data, &pkt.size, pkt.data, pkt.size, 0);
#endif
		} else {
			continue;
		}

		// Convert PTS/DTS
		enc_pkt.pts = av_rescale_q_rnd(enc_pkt.pts, in_stream->time_base,
				out_stream->time_base,
				(AVRounding) (AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
		enc_pkt.dts = av_rescale_q_rnd(enc_pkt.dts, in_stream->time_base,
				out_stream->time_base,
				(AVRounding) (AV_ROUND_NEAR_INF | AV_ROUND_PASS_MINMAX));
		enc_pkt.duration = av_rescale_q(enc_pkt.duration, in_stream->time_base,
				out_stream->time_base);
		// enc_pkt.pos = -1;
		enc_pkt.stream_index = video_index;

		if (av_interleaved_write_frame(ofmt_ctx, &enc_pkt) < 0) {
			printf("Error muxing packet\n");
			break;
		}
		av_free_packet(&enc_pkt);
		av_free_packet(&pkt);
		frame_index++;
	}

#if USE_H264BSF
	av_bitstream_filter_close(h264bsfc);
#endif

	// Write file trailer
	av_write_trailer(ofmt_ctx_a);
	av_write_trailer(ofmt_ctx_v);

	end: avformat_close_input(&ifmt_ctx);
	/* close output */
	if (ofmt_ctx_a && !(ofmt_a->flags & AVFMT_NOFILE))
		avio_close(ofmt_ctx_a->pb);

	if (ofmt_ctx_v && !(ofmt_v->flags & AVFMT_NOFILE))
		avio_close(ofmt_ctx_v->pb);

	avformat_free_context(ofmt_ctx_a);
	avformat_free_context(ofmt_ctx_v);

	if (ret < 0 && ret != AVERROR_EOF) {
		printf("Error occurred.\n");
		return -1;
	}
	return 0;
}
Example #6
0
bool VideoEncoder::EncodeFrame(AVFrameWrapper* frame) {

	if(frame != NULL) {
#if SSR_USE_AVFRAME_WIDTH_HEIGHT
		assert(frame->GetFrame()->width == GetCodecContext()->width);
		assert(frame->GetFrame()->height == GetCodecContext()->height);
#endif
#if SSR_USE_AVFRAME_FORMAT
		assert(frame->GetFrame()->format == GetCodecContext()->pix_fmt);
#endif
#if SSR_USE_AVFRAME_SAR
		assert(frame->GetFrame()->sample_aspect_ratio.num == GetCodecContext()->sample_aspect_ratio.num);
		assert(frame->GetFrame()->sample_aspect_ratio.den == GetCodecContext()->sample_aspect_ratio.den);
#endif
	}

#if SSR_USE_AVCODEC_SEND_RECEIVE

	// send a frame
	AVFrame *avframe = (frame == NULL)? NULL : frame->Release();
	try {
		if(avcodec_send_frame(GetCodecContext(), avframe) < 0) {
			Logger::LogError("[VideoEncoder::EncodeFrame] " + Logger::tr("Error: Sending of video frame failed!"));
			throw LibavException();
		}
	} catch(...) {
		av_frame_free(&avframe);
		throw;
	}
	av_frame_free(&avframe);

	// try to receive a packet
	for( ; ; ) {
		std::unique_ptr<AVPacketWrapper> packet(new AVPacketWrapper());
		int res = avcodec_receive_packet(GetCodecContext(), packet->GetPacket());
		if(res == 0) { // we have a packet, send the packet to the muxer
			GetMuxer()->AddPacket(GetStream()->index, std::move(packet));
			IncrementPacketCounter();
		} else if(res == AVERROR(EAGAIN)) { // we have no packet
			return true;
		} else if(res == AVERROR_EOF) { // this is the end of the stream
			return false;
		} else {
			Logger::LogError("[VideoEncoder::EncodeFrame] " + Logger::tr("Error: Receiving of video packet failed!"));
			throw LibavException();
		}
	}

#elif SSR_USE_AVCODEC_ENCODE_VIDEO2

	// allocate a packet
	std::unique_ptr<AVPacketWrapper> packet(new AVPacketWrapper());

	// encode the frame
	int got_packet;
	if(avcodec_encode_video2(GetCodecContext(), packet->GetPacket(), (frame == NULL)? NULL : frame->GetFrame(), &got_packet) < 0) {
		Logger::LogError("[VideoEncoder::EncodeFrame] " + Logger::tr("Error: Encoding of video frame failed!"));
		throw LibavException();
	}

	// do we have a packet?
	if(got_packet) {

		// send the packet to the muxer
		GetMuxer()->AddPacket(GetStream()->index, std::move(packet));
		IncrementPacketCounter();
		return true;

	} else {
		return false;
	}

#else

	// encode the frame
	int bytes_encoded = avcodec_encode_video(GetCodecContext(), m_temp_buffer.data(), m_temp_buffer.size(), (frame == NULL)? NULL : frame->GetFrame());
	if(bytes_encoded < 0) {
		Logger::LogError("[VideoEncoder::EncodeFrame] " + Logger::tr("Error: Encoding of video frame failed!"));
		throw LibavException();
	}

	// do we have a packet?
	if(bytes_encoded > 0) {

		// allocate a packet
		std::unique_ptr<AVPacketWrapper> packet(new AVPacketWrapper(bytes_encoded));

		// copy the data
		memcpy(packet->GetPacket()->data, m_temp_buffer.data(), bytes_encoded);

		// set the timestamp
		// note: pts will be rescaled and stream_index will be set by Muxer
		if(GetCodecContext()->coded_frame != NULL && GetCodecContext()->coded_frame->pts != (int64_t) AV_NOPTS_VALUE)
			packet->GetPacket()->pts = GetCodecContext()->coded_frame->pts;

		// set the keyframe flag
		if(GetCodecContext()->coded_frame->key_frame)
			packet->GetPacket()->flags |= AV_PKT_FLAG_KEY;

		// send the packet to the muxer
		GetMuxer()->AddPacket(GetStream()->index, std::move(packet));
		IncrementPacketCounter();
		return true;

	} else {
		return false;
	}

#endif

}
Example #7
0
    status_t FFMPEGer::encodeVideo(void *data) {
        int ret;
        OutputStream *ost = &video_st;
        AVCodecContext *c;
        AVFrame *frame;
        int got_packet = 0;
        c = ost->st->codec;

        if (mPixFmt == AV_PIX_FMT_NV21) {
            memcpy(ost->tmp_frame->data[0], data, c->width * c->height);
            memcpy(ost->tmp_frame->data[1], (char *) data + c->width * c->height,
                   c->width * c->height / 2);
        } else if (mPixFmt == AV_PIX_FMT_YUV420P) {
            memcpy(ost->frame->data[0], data, c->width * c->height);
            memcpy(ost->frame->data[1], (char *) data + c->width * c->height,
                   c->width * c->height / 4);
            memcpy(ost->frame->data[2], (char *) data + c->width * c->height * 5 / 4,
                   c->width * c->height / 4);
        }

        if (c->pix_fmt != mPixFmt) {
            /* as we only generate a YUV420P picture, we must convert it
             * to the codec pixel format if needed */
            if (!ost->sws_ctx) {
                ost->sws_ctx = sws_getContext(c->width, c->height,
                                              mPixFmt,
                                              c->width, c->height,
                                              c->pix_fmt,
                                              SCALE_FLAGS, NULL, NULL, NULL);
                if (!ost->sws_ctx) {
                    ALOGE("Could not initialize the conversion context");
                    return UNKNOWN_ERROR;
                }
            }

            sws_scale(ost->sws_ctx,
                      (const uint8_t *const *) ost->tmp_frame->data, ost->tmp_frame->linesize,
                      0, c->height, ost->frame->data, ost->frame->linesize);
        }

        //ost->frame->pts = ost->next_pts++;
        ost->frame->pts = av_rescale_q(ost->next_pts++, (AVRational) {1, 15}, ost->st->time_base);

        frame = ost->frame;

        if (fmt_ctx->oformat->flags & AVFMT_RAWPICTURE) {
            /* a hack to avoid data copy with some raw video muxers */
            AVPacket pkt = {0};
            av_init_packet(&pkt);

            pkt.flags |= AV_PKT_FLAG_KEY;
            pkt.stream_index = ost->st->index;
            pkt.data = (uint8_t *) frame;
            pkt.size = sizeof(AVPicture);
            pkt.pts = pkt.dts = frame->pts;
            //av_packet_rescale_ts(&pkt, c->time_base, ost->st->time_base);
            ret = av_interleaved_write_frame(fmt_ctx, &pkt);
        } else {
            AVPacket pkt = {0};
            av_init_packet(&pkt);

            /* encode the image */
            ret = avcodec_encode_video2(c, &pkt, frame, &got_packet);
            if (ret < 0) {
                ALOGE("Error encoding video frame: %s", av_err2str(ret));
                return UNKNOWN_ERROR;
            }

            if (got_packet) {
                ret = write_frame(fmt_ctx, &c->time_base, ost->st, &pkt);
            } else {
                ret = 0;
            }
        }

        if (ret < 0) {
            ALOGE("Error while writing video frame: %s", av_err2str(ret));
            return UNKNOWN_ERROR;
        }

        return OK;
    }
Example #8
0

        
Example #9
0
static GstFlowReturn
gst_ffmpegvidenc_handle_frame (GstVideoEncoder * encoder,
                               GstVideoCodecFrame * frame)
{
    GstFFMpegVidEnc *ffmpegenc = (GstFFMpegVidEnc *) encoder;
    GstBuffer *outbuf;
    gint ret = 0, c;
    GstVideoInfo *info = &ffmpegenc->input_state->info;
    AVPacket *pkt;
    int have_data = 0;
    BufferInfo *buffer_info;

    if (ffmpegenc->interlaced) {
        ffmpegenc->picture->interlaced_frame = TRUE;
        /* if this is not the case, a filter element should be used to swap fields */
        ffmpegenc->picture->top_field_first =
            GST_BUFFER_FLAG_IS_SET (frame->input_buffer, GST_VIDEO_BUFFER_FLAG_TFF);
    }

    if (GST_VIDEO_CODEC_FRAME_IS_FORCE_KEYFRAME (frame))
        ffmpegenc->picture->pict_type = AV_PICTURE_TYPE_I;

    buffer_info = g_slice_new0 (BufferInfo);
    buffer_info->buffer = gst_buffer_ref (frame->input_buffer);

    if (!gst_video_frame_map (&buffer_info->vframe, info, frame->input_buffer,
                              GST_MAP_READ)) {
        GST_ERROR_OBJECT (encoder, "Failed to map input buffer");
        gst_buffer_unref (buffer_info->buffer);
        g_slice_free (BufferInfo, buffer_info);
        gst_video_codec_frame_unref (frame);
        return GST_FLOW_ERROR;
    }

    /* Fill avpicture */
    ffmpegenc->picture->buf[0] =
        av_buffer_create (NULL, 0, buffer_info_free, buffer_info, 0);
    for (c = 0; c < AV_NUM_DATA_POINTERS; c++) {
        if (c < GST_VIDEO_INFO_N_COMPONENTS (info)) {
            ffmpegenc->picture->data[c] =
                GST_VIDEO_FRAME_PLANE_DATA (&buffer_info->vframe, c);
            ffmpegenc->picture->linesize[c] =
                GST_VIDEO_FRAME_COMP_STRIDE (&buffer_info->vframe, c);
        } else {
            ffmpegenc->picture->data[c] = NULL;
            ffmpegenc->picture->linesize[c] = 0;
        }
    }

    ffmpegenc->picture->format = ffmpegenc->context->pix_fmt;
    ffmpegenc->picture->width = GST_VIDEO_FRAME_WIDTH (&buffer_info->vframe);
    ffmpegenc->picture->height = GST_VIDEO_FRAME_HEIGHT (&buffer_info->vframe);

    ffmpegenc->picture->pts =
        gst_ffmpeg_time_gst_to_ff (frame->pts /
                                   ffmpegenc->context->ticks_per_frame, ffmpegenc->context->time_base);

    have_data = 0;
    pkt = g_slice_new0 (AVPacket);

    ret =
        avcodec_encode_video2 (ffmpegenc->context, pkt, ffmpegenc->picture,
                               &have_data);

    av_frame_unref (ffmpegenc->picture);

    if (ret < 0 || !have_data)
        g_slice_free (AVPacket, pkt);

    if (ret < 0)
        goto encode_fail;

    /* Encoder needs more data */
    if (!have_data) {
        gst_video_codec_frame_unref (frame);
        return GST_FLOW_OK;
    }

    /* save stats info if there is some as well as a stats file */
    if (ffmpegenc->file && ffmpegenc->context->stats_out)
        if (fprintf (ffmpegenc->file, "%s", ffmpegenc->context->stats_out) < 0)
            GST_ELEMENT_ERROR (ffmpegenc, RESOURCE, WRITE,
                               (("Could not write to file \"%s\"."), ffmpegenc->filename),
                               GST_ERROR_SYSTEM);

    gst_video_codec_frame_unref (frame);

    /* Get oldest frame */
    frame = gst_video_encoder_get_oldest_frame (encoder);

    outbuf =
        gst_buffer_new_wrapped_full (GST_MEMORY_FLAG_READONLY, pkt->data,
                                     pkt->size, 0, pkt->size, pkt, gst_ffmpegvidenc_free_avpacket);
    frame->output_buffer = outbuf;

    if (pkt->flags & AV_PKT_FLAG_KEY)
        GST_VIDEO_CODEC_FRAME_SET_SYNC_POINT (frame);
    else
        GST_VIDEO_CODEC_FRAME_UNSET_SYNC_POINT (frame);

    return gst_video_encoder_finish_frame (encoder, frame);

    /* ERRORS */
encode_fail:
    {
#ifndef GST_DISABLE_GST_DEBUG
        GstFFMpegVidEncClass *oclass =
            (GstFFMpegVidEncClass *) (G_OBJECT_GET_CLASS (ffmpegenc));
        GST_ERROR_OBJECT (ffmpegenc,
                          "avenc_%s: failed to encode buffer", oclass->in_plugin->name);
#endif /* GST_DISABLE_GST_DEBUG */
        /* avoid frame (and ts etc) piling up */
        return gst_video_encoder_finish_frame (encoder, frame);
    }
}
Example #10
0
static GstFlowReturn
gst_ffmpegvidenc_flush_buffers (GstFFMpegVidEnc * ffmpegenc, gboolean send)
{
    GstVideoCodecFrame *frame;
    GstFlowReturn flow_ret = GST_FLOW_OK;
    GstBuffer *outbuf;
    gint ret;
    AVPacket *pkt;
    int have_data = 0;

    GST_DEBUG_OBJECT (ffmpegenc, "flushing buffers with sending %d", send);

    /* no need to empty codec if there is none */
    if (!ffmpegenc->opened)
        goto done;

    while ((frame =
                gst_video_encoder_get_oldest_frame (GST_VIDEO_ENCODER (ffmpegenc)))) {
        pkt = g_slice_new0 (AVPacket);
        have_data = 0;

        ret = avcodec_encode_video2 (ffmpegenc->context, pkt, NULL, &have_data);

        if (ret < 0) {              /* there should be something, notify and give up */
#ifndef GST_DISABLE_GST_DEBUG
            GstFFMpegVidEncClass *oclass =
                (GstFFMpegVidEncClass *) (G_OBJECT_GET_CLASS (ffmpegenc));
            GST_WARNING_OBJECT (ffmpegenc,
                                "avenc_%s: failed to flush buffer", oclass->in_plugin->name);
#endif /* GST_DISABLE_GST_DEBUG */
            g_slice_free (AVPacket, pkt);
            gst_video_codec_frame_unref (frame);
            break;
        }

        /* save stats info if there is some as well as a stats file */
        if (ffmpegenc->file && ffmpegenc->context->stats_out)
            if (fprintf (ffmpegenc->file, "%s", ffmpegenc->context->stats_out) < 0)
                GST_ELEMENT_ERROR (ffmpegenc, RESOURCE, WRITE,
                                   (("Could not write to file \"%s\"."), ffmpegenc->filename),
                                   GST_ERROR_SYSTEM);

        if (send && have_data) {
            outbuf =
                gst_buffer_new_wrapped_full (GST_MEMORY_FLAG_READONLY, pkt->data,
                                             pkt->size, 0, pkt->size, pkt, gst_ffmpegvidenc_free_avpacket);
            frame->output_buffer = outbuf;

            if (pkt->flags & AV_PKT_FLAG_KEY)
                GST_VIDEO_CODEC_FRAME_SET_SYNC_POINT (frame);
            else
                GST_VIDEO_CODEC_FRAME_UNSET_SYNC_POINT (frame);

            flow_ret =
                gst_video_encoder_finish_frame (GST_VIDEO_ENCODER (ffmpegenc), frame);
        } else {
            /* no frame attached, so will be skipped and removed from frame list */
            gst_video_encoder_finish_frame (GST_VIDEO_ENCODER (ffmpegenc), frame);
        }
    }

done:

    return flow_ret;
}
Example #11
0
int main(int argc, char** argv){
	int quadrant_line, quadrant_column;
	
	char *videoFileName = argv[1];
	char quadFileName[64];

	int i = 0, k, j;

	long unsigned int inc = 0;
	long unsigned int incaudio = 0;

	int videoStreamIndex;
	int audioStreamIndex= -1;
	int frameFinished, gotPacket;

	AVDictionary	*codecOptions = NULL;
	
	UDP_PTSframe_t PTS_frame;

	struct tm *start_time_tm;
	char start_time_str[64];
	long unsigned int start_time;
	time_t start_timer_t;
	
	//Crop env
	int tam_quad;
	int frist = 1, marginLeft = 0, marginTop = 0;
	int width , height;

    if(argc < 4){
        usage();    
        return -1;
    }

    signal (SIGTERM, handlerToFinish);
	signal (SIGINT, handlerToFinish);

    tam_quad = sqrt(amount_of_quadrants);
    quadrant_line = atoi(argv[2]);
    quadrant_column = atoi(argv[3]);
    amount_of_quadrants = (quadrant_line * quadrant_column) + 1;

    strcpy (quadFileName, argv[4]);

    //Allocat output streams context
    ff_output = malloc (sizeof(ff_output_t) * amount_of_quadrants);

	av_register_all();
	avformat_network_init();

	//Initialize Input
	if (avformat_open_input (&ff_input.formatCtx, videoFileName, NULL, NULL) != 0) {
		printf ("Cold not open input video file at %s\n", videoFileName);
		return -1;
	}

	if (avformat_find_stream_info(ff_input.formatCtx, NULL) < 0) {
		printf ("Cold not get stream info\n");
		return -1;
	}

	av_dump_format(ff_input.formatCtx, 0, videoFileName, 0);

	videoStreamIndex = av_find_best_stream(ff_input.formatCtx, AVMEDIA_TYPE_VIDEO, -1, -1, &ff_input.encoder, 0);
	if (videoStreamIndex < 0) {
		printf ("no video streams found\n");
		return -1;
	}

	audioStreamIndex = av_find_best_stream(ff_input.formatCtx, AVMEDIA_TYPE_AUDIO, -1, -1, &ff_input.audioencoder, 0);
    if (audioStreamIndex < 0) {
        printf ("no audio streams found\n");
        return -1;
    }
    printf ("VIDEO ST %d, AUDIO ST %d\n", videoStreamIndex, audioStreamIndex);

    ff_input.audiocodecCtx = ff_input.formatCtx->streams[audioStreamIndex]->codec;
	ff_input.codecCtx = ff_input.formatCtx->streams[videoStreamIndex]->codec;

	if (avcodec_open2 (ff_input.audiocodecCtx, ff_input.audioencoder, NULL) < 0) {
        printf ("Could not open input codec\n");
        return -1;
    }

	if (avcodec_open2 (ff_input.codecCtx, ff_input.encoder, NULL) < 0) {
		printf ("Could not open input codec\n");
		return -1;
	}

	//Get system time and append as metadata
	getSystemTime (&PTS_frame.frameTimeVal); //Must be the same for all output contexts
	start_time = PTS_frame.frameTimeVal.tv_sec;
	start_timer_t = (time_t) start_time;
	start_time_tm = localtime (&start_timer_t);
	strftime(start_time_str, sizeof start_time_str, "%Y-%m-%d %H:%M:%S", start_time_tm);

	if (avformat_alloc_output_context2(&formatCtx, NULL, AV_OUTPUT_FORMAT, quadFileName) < 0) {
			printf ("could not create output context\n");
			return -1;
	}

	//Initialize Video Output Streams
	for (i = 0; i < amount_of_quadrants - 1; i++) {

		ff_output[i].outStream = avformat_new_stream (formatCtx, NULL);
		if (ff_output[i].outStream == NULL) {
			printf ("Could not create output stream\n");
			return -1;
		}

		ff_output[i].outStream->id = formatCtx->nb_streams - 1;

		ff_output[i].codecCtx = ff_output[i].outStream->codec;
		ff_output[i].encoder = avcodec_find_encoder_by_name (AV_OUTPUT_CODEC);
		if (ff_output[i].encoder == NULL) {
			printf ("Codec %s not found..\n", AV_OUTPUT_CODEC);
			return -1;
		}

		//Sliced sizes
		width = ff_input.codecCtx->width/quadrant_column;
		height = ff_input.codecCtx->height/quadrant_line;

		ff_output[i].codecCtx->codec_type 	= AVMEDIA_TYPE_VIDEO;
		ff_output[i].codecCtx->height 		= height;
		ff_output[i].codecCtx->width 		= width;
		ff_output[i].codecCtx->pix_fmt		= ff_input.codecCtx->pix_fmt;

		if (strcmp (AV_OUTPUT_CODEC, "libvpx") == 0) {
			//Maintain input aspect ratio for codec and stream info, and b_frames for codec info
			ff_output[i].codecCtx->sample_aspect_ratio = ff_input.codecCtx->sample_aspect_ratio;
			ff_output[i].codecCtx->max_b_frames = ff_input.codecCtx->max_b_frames;
			ff_output[i].outStream->sample_aspect_ratio = ff_output[i].codecCtx->sample_aspect_ratio;

			//Set custom BIT RATE and THREADs 
			ff_output[i].codecCtx->bit_rate 	= AV_OUTPUT_BITRATE;
			ff_output[i].codecCtx->thread_count = AV_OUTPUT_THREADS;
			ff_output[i].codecCtx->thread_type  = AV_OUTPUT_THREAD_TYPE;

			//Set custo timebase for codec and streams
			ff_output[i].codecCtx->time_base.num = 1;
			ff_output[i].codecCtx->time_base.den = AV_FRAMERATE;
			ff_output[i].outStream->time_base.num = 1;
			ff_output[i].outStream->time_base.den = 10000;			
		}

		if (strcmp (AV_OUTPUT_CODEC, "libx264") == 0) {
			// ff_output[i].codecCtx->profile = FF_PROFILE_H264_MAIN;
			// av_dict_set(&codecOptions, "profile","main",0);

			//Set custom BIT RATE and THREADs 
			ff_output[i].codecCtx->bit_rate 	= AV_OUTPUT_BITRATE;
			ff_output[i].codecCtx->thread_count = AV_OUTPUT_THREADS;
			ff_output[i].codecCtx->thread_type  = AV_OUTPUT_THREAD_TYPE;

			ff_output[i].codecCtx->bit_rate_tolerance = 0;
			ff_output[i].codecCtx->rc_max_rate = 0;
			ff_output[i].codecCtx->rc_buffer_size = 0;
			ff_output[i].codecCtx->gop_size = 40;
			ff_output[i].codecCtx->max_b_frames = 3;
			ff_output[i].codecCtx->b_frame_strategy = 1;
			ff_output[i].codecCtx->coder_type = 1;
			ff_output[i].codecCtx->me_cmp = 1;
			ff_output[i].codecCtx->me_range = 16;
			ff_output[i].codecCtx->qmin = 10;
			ff_output[i].codecCtx->qmax = 51;
			ff_output[i].codecCtx->scenechange_threshold = 40;
			ff_output[i].codecCtx->flags |= CODEC_FLAG_LOOP_FILTER;
			ff_output[i].codecCtx->me_method = ME_HEX;
			ff_output[i].codecCtx->me_subpel_quality = 5;
			ff_output[i].codecCtx->i_quant_factor = 0.71;
			ff_output[i].codecCtx->qcompress = 0.6;
			ff_output[i].codecCtx->max_qdiff = 4;

			//Set custo timebase for codec and streams
			ff_output[i].codecCtx->time_base.num = 1;
			ff_output[i].codecCtx->time_base.den = 24;
			ff_output[i].outStream->time_base.num = 1;
			ff_output[i].outStream->time_base.den = 90000;		
		}

		formatCtx->start_time_realtime = start_time;
		av_dict_set (&formatCtx->metadata, "service_name", start_time_str, 0);
		av_dict_set (&formatCtx->metadata, "creation_time", start_time_str, 0);

		//Open codec
		if (avcodec_open2(ff_output[i].codecCtx, ff_output[i].encoder, &codecOptions)) {
			printf ("Could not open output codec...\n");
			return -1;
		}
	}

	//Initializing Audio Output
	i = amount_of_quadrants-1; //Last stream
	ff_output[i].outStream = avformat_new_stream (formatCtx, NULL);
	if (ff_output[i].outStream == NULL) {
		printf ("Could not create output stream\n");
		return -1;
	}

	ff_output[i].outStream->id = formatCtx->nb_streams - 1;

	ff_output[i].codecCtx = ff_output[i].outStream->codec;
	ff_output[i].encoder = avcodec_find_encoder (ff_input.audiocodecCtx->codec_id);
	if (ff_output[i].encoder == NULL) {
		printf ("Codec %s not found..\n", AUDIO_OUTPUT_CODEC);
		return -1;
	}
  
    ff_output[i].codecCtx = ff_output[amount_of_quadrants-1].outStream->codec;
    ff_output[i].codecCtx->codec_id = ff_input.audiocodecCtx->codec_id;
    ff_output[i].codecCtx->codec_type = AVMEDIA_TYPE_AUDIO;
    ff_output[i].codecCtx->sample_fmt = ff_input.audiocodecCtx->sample_fmt;
    ff_output[i].codecCtx->sample_rate = ff_input.audiocodecCtx->sample_rate;
    ff_output[i].codecCtx->channel_layout = ff_input.audiocodecCtx->channel_layout;
    ff_output[i].codecCtx->channels = av_get_channel_layout_nb_channels(ff_output[amount_of_quadrants-1].codecCtx->channel_layout);
    ff_output[i].codecCtx->bit_rate = ff_input.audiocodecCtx->bit_rate;  
    ff_output[i].codecCtx->sample_aspect_ratio = ff_input.audiocodecCtx->sample_aspect_ratio;
    ff_output[i].codecCtx->max_b_frames = ff_input.audiocodecCtx->max_b_frames;
    ff_output[i].outStream->sample_aspect_ratio = ff_output[i].codecCtx->sample_aspect_ratio;

    ff_output[i].outStream->time_base.num = ff_input.formatCtx->streams[audioStreamIndex]->time_base.num;
	ff_output[i].outStream->time_base.den = ff_input.formatCtx->streams[audioStreamIndex]->time_base.den;

	ff_output[i].codecCtx->time_base.num = ff_input.audiocodecCtx->time_base.num;
	ff_output[i].codecCtx->time_base.den = ff_input.audiocodecCtx->time_base.den;

	printf("sample_rate %d\n", ff_input.audiocodecCtx->sample_rate);

	//Open codec
	if (avcodec_open2(ff_output[i].codecCtx, ff_output[i].encoder, &codecOptions)) {
		printf ("Could not open output codec...\n");
		return -1;
	}

	av_dump_format (formatCtx, 0, quadFileName, 1);

	//Open output context
	if (avio_open (&formatCtx->pb, quadFileName, AVIO_FLAG_WRITE)) {
		printf ("avio_open failed %s\n", quadFileName);
		return -1;
	}
	
	//Write format context header
	if (avformat_write_header (formatCtx, &formatCtx->metadata)) {
		printf ("fail to write outstream header\n");
		return -1;
	}

	printf ("OUTPUT TO %s, at %lu\n", quadFileName, start_time);


	incaudio = 0;
	printf("Generating video streams...\n");
	while(av_read_frame (ff_input.formatCtx, &ff_input.packet) >= 0 && _keepEncoder) {
		if (ff_input.packet.stream_index == audioStreamIndex)
		{
			av_packet_ref  (&ff_output[amount_of_quadrants-1].packet, &ff_input.packet); 
            ff_output[amount_of_quadrants-1].packet.stream_index = amount_of_quadrants-1;
            ff_output[amount_of_quadrants-1].packet.pts = incaudio;

            // printf("%lu\n", ff_output[amount_of_quadrants-1].packet.pts);
            // if(gotPacket){
            	if (av_write_frame(formatCtx, &ff_output[amount_of_quadrants-1].packet) < 0) {
	                printf ("Unable to write to output stream..\n");
	                pthread_exit(NULL);
            	// }
            }            
            incaudio += 2880;
		}

		if (ff_input.packet.stream_index == videoStreamIndex) {

			ff_input.frame = av_frame_alloc();
			avcodec_decode_video2 (ff_input.codecCtx, ff_input.frame, &frameFinished, &ff_input.packet);

			if (frameFinished) {
				//TODO: Slice inputFrame and fill avQuadFrames[quadrant]
				//By now, inputFrame are replicated to all quadrants

				ff_input.frame->pts = av_frame_get_best_effort_timestamp (ff_input.frame);
				
				i = 0;
				for ( k = 0; k < quadrant_line; ++k) {
                    for (j = 0; j < quadrant_column; ++j) {
            			ff_output[i].frame = av_frame_alloc();

            			//make the cut quadrant ff_output[i]!
            			av_picture_crop((AVPicture *)ff_output[i].frame, (AVPicture *)ff_input.frame,       
            							ff_input.formatCtx->streams[videoStreamIndex]->codec->pix_fmt, marginTop, marginLeft);
            			
            			ff_output[i].frame->width = width; // updates the new width
						ff_output[i].frame->height = height; // updates the new height
						ff_output[i].frame->format = ff_input.frame->format;

						ff_output[i].frame->pts = inc;

						ff_output[i].packet.data = NULL;
						ff_output[i].packet.size = 0;
						av_init_packet (&ff_output[i].packet);

						avcodec_encode_video2 (ff_output[i].codecCtx, &ff_output[i].packet, ff_output[i].frame, &gotPacket);

						if (gotPacket) {
							ff_output[i].packet.stream_index = i;
							av_packet_rescale_ts (&ff_output[i].packet,
													ff_output[i].codecCtx->time_base,
													ff_output[i].outStream->time_base);

							if (av_write_frame (formatCtx, &ff_output[i].packet) < 0) {
								printf ("Unable to write to output stream..\n");
								pthread_exit(NULL);
							}

						}

						av_frame_free (&ff_output[i].frame);	

						i++;
						marginLeft += width;	

            		}
            		marginLeft = 0;
            		marginTop += height;
            	}
            	marginTop = 0; 
            	i = 0;
            	inc++;
			}
			av_frame_free (&ff_input.frame);
		}
	}

	return 0;
}
Example #12
0
int main(int argc, char* argv[])
{
	AVFormatContext* pFormatCtx;
	AVOutputFormat* fmt;
	AVStream* video_st;
	AVCodecContext* pCodecCtx;
	AVCodec* pCodec;
	AVPacket pkt;
	uint8_t* picture_buf;
	AVFrame* pFrame;
	int picture_size;
	int y_size;
	int framecnt=0;
	//FILE *in_file = fopen("src01_480x272.yuv", "rb");	//Input raw YUV data 
	FILE *in_file = fopen("../ds_480x272.yuv", "rb");   //Input raw YUV data
	int in_w=480,in_h=272;                              //Input data's width and height
	int framenum=100;                                   //Frames to encode
	//const char* out_file = "src01.h264";              //Output Filepath 
	//const char* out_file = "src01.ts";
	//const char* out_file = "src01.hevc";
	const char* out_file = "ds.h264";

	av_register_all();
	//Method1.
	pFormatCtx = avformat_alloc_context();
	//Guess Format
	fmt = av_guess_format(NULL, out_file, NULL);
	pFormatCtx->oformat = fmt;
	
	//Method 2.
	//avformat_alloc_output_context2(&pFormatCtx, NULL, NULL, out_file);
	//fmt = pFormatCtx->oformat;


	//Open output URL
	if (avio_open(&pFormatCtx->pb,out_file, AVIO_FLAG_READ_WRITE) < 0){
		printf("Failed to open output file! \n");
		return -1;
	}

	video_st = avformat_new_stream(pFormatCtx, 0);
	video_st->time_base.num = 1; 
	video_st->time_base.den = 25;  

	if (video_st==NULL){
		return -1;
	}
	//Param that must set
	pCodecCtx = video_st->codec;
	//pCodecCtx->codec_id =AV_CODEC_ID_HEVC;
	pCodecCtx->codec_id = fmt->video_codec;
	pCodecCtx->codec_type = AVMEDIA_TYPE_VIDEO;
	pCodecCtx->pix_fmt = PIX_FMT_YUV420P;
	pCodecCtx->width = in_w;  
	pCodecCtx->height = in_h;
	pCodecCtx->time_base.num = 1;  
	pCodecCtx->time_base.den = 25;  
	pCodecCtx->bit_rate = 400000;  
	pCodecCtx->gop_size=250;
	//H264
	//pCodecCtx->me_range = 16;
	//pCodecCtx->max_qdiff = 4;
	//pCodecCtx->qcompress = 0.6;
	pCodecCtx->qmin = 10;
	pCodecCtx->qmax = 51;

	//Optional Param
	pCodecCtx->max_b_frames=3;

	// Set Option
	AVDictionary *param = 0;
	//H.264
	if(pCodecCtx->codec_id == AV_CODEC_ID_H264) {
		av_dict_set(&param, "preset", "slow", 0);
		av_dict_set(&param, "tune", "zerolatency", 0);
		//av_dict_set(&param, "profile", "main", 0);
	}
	//H.265
	if(pCodecCtx->codec_id == AV_CODEC_ID_H265){
		av_dict_set(&param, "preset", "ultrafast", 0);
		av_dict_set(&param, "tune", "zero-latency", 0);
	}

	//Show some Information
	av_dump_format(pFormatCtx, 0, out_file, 1);

	pCodec = avcodec_find_encoder(pCodecCtx->codec_id);
	if (!pCodec){
		printf("Can not find encoder! \n");
		return -1;
	}
	if (avcodec_open2(pCodecCtx, pCodec,&param) < 0){
		printf("Failed to open encoder! \n");
		return -1;
	}


	pFrame = av_frame_alloc();
	picture_size = avpicture_get_size(pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height);
	picture_buf = (uint8_t *)av_malloc(picture_size);
	avpicture_fill((AVPicture *)pFrame, picture_buf, pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height);

	//Write File Header
	avformat_write_header(pFormatCtx,NULL);

	av_new_packet(&pkt,picture_size);

	y_size = pCodecCtx->width * pCodecCtx->height;

	for (int i=0; i<framenum; i++){
		//Read raw YUV data
		if (fread(picture_buf, 1, y_size*3/2, in_file) <= 0){
			printf("Failed to read raw data! \n");
			return -1;
		}else if(feof(in_file)){
			break;
		}
		pFrame->data[0] = picture_buf;              // Y
		pFrame->data[1] = picture_buf+ y_size;      // U 
		pFrame->data[2] = picture_buf+ y_size*5/4;  // V
		//PTS
		pFrame->pts=i;
		int got_picture=0;
		//Encode
		int ret = avcodec_encode_video2(pCodecCtx, &pkt,pFrame, &got_picture);
		if(ret < 0){
			printf("Failed to encode! \n");
			return -1;
		}
		if (got_picture==1){
			printf("Succeed to encode frame: %5d\tsize:%5d\n",framecnt,pkt.size);
			framecnt++;
			pkt.stream_index = video_st->index;
			ret = av_write_frame(pFormatCtx, &pkt);
			av_free_packet(&pkt);
		}
	}
	//Flush Encoder
	int ret = flush_encoder(pFormatCtx,0);
	if (ret < 0) {
		printf("Flushing encoder failed\n");
		return -1;
	}

	//Write file trailer
	av_write_trailer(pFormatCtx);

	//Clean
	if (video_st){
		avcodec_close(video_st->codec);
		av_free(pFrame);
		av_free(picture_buf);
	}
	avio_close(pFormatCtx->pb);
	avformat_free_context(pFormatCtx);

	fclose(in_file);

	return 0;
}
Example #13
0
int dc_video_encoder_encode(VideoOutputFile *video_output_file, VideoScaledData *video_scaled_data)
{
	VideoDataNode *video_data_node;
	int ret;

	AVCodecContext *video_codec_ctx = video_output_file->codec_ctx;

	//FIXME: deadlock when pressing 'q' with BigBuckBunny_640x360.m4v
	ret = dc_consumer_lock(&video_output_file->consumer, &video_scaled_data->circular_buf);
	if (ret < 0) {
		GF_LOG(GF_LOG_ERROR, GF_LOG_DASH, ("Video encoder got an end of buffer!\n"));
		return -2;
	}

	if (video_scaled_data->circular_buf.size > 1)
		dc_consumer_unlock_previous(&video_output_file->consumer, &video_scaled_data->circular_buf);

	video_data_node = (VideoDataNode*)dc_consumer_consume(&video_output_file->consumer, &video_scaled_data->circular_buf);

	/*
	 * Set PTS (method 1)
	 */
	if (!video_output_file->use_source_timing) {
		video_data_node->vframe->pts = video_codec_ctx->frame_number;
	}

	/* Encoding video */
	{
		int got_packet = 0;
		AVPacket pkt;
		av_init_packet(&pkt);
		pkt.data = video_output_file->vbuf;
		pkt.size = video_output_file->vbuf_size;
		pkt.pts = pkt.dts = video_data_node->vframe->pkt_dts = video_data_node->vframe->pkt_pts = video_data_node->vframe->pts;
#ifdef LIBAV_ENCODE_OLD
		video_output_file->encoded_frame_size = avcodec_encode_video(video_codec_ctx, video_output_file->vbuf, video_output_file->vbuf_size, video_data_node->vframe);
		got_packet = video_output_file->encoded_frame_size>=0 ? 1 : 0;
#else
		video_output_file->encoded_frame_size = avcodec_encode_video2(video_codec_ctx, &pkt, video_data_node->vframe, &got_packet);
#endif

		//this is not true with libav !
#ifndef GPAC_USE_LIBAV
		if (video_output_file->encoded_frame_size >= 0)
			video_output_file->encoded_frame_size = pkt.size;
#else
		if (got_packet)
			video_output_file->encoded_frame_size = pkt.size;
#endif
		if (video_output_file->encoded_frame_size >= 0) {
			if (got_packet) {
				video_codec_ctx->coded_frame->pts = video_codec_ctx->coded_frame->pkt_pts = pkt.pts;
				video_codec_ctx->coded_frame->pkt_dts = pkt.dts;
				video_codec_ctx->coded_frame->key_frame = (pkt.flags & AV_PKT_FLAG_KEY) ? 1 : 0;
			}
		}
	}

	dc_consumer_advance(&video_output_file->consumer);

	if (video_scaled_data->circular_buf.size == 1)
		dc_consumer_unlock_previous(&video_output_file->consumer, &video_scaled_data->circular_buf);

	if (video_output_file->encoded_frame_size < 0) {
		GF_LOG(GF_LOG_ERROR, GF_LOG_DASH, ("Error occured while encoding video frame.\n"));
		return -1;
	}

	GF_LOG(GF_LOG_INFO, GF_LOG_DASH, ("[DashCast] Video %s Frame TS "LLU" encoded at UTC "LLU" ms\n", video_output_file->rep_id, /*video_data_node->source_number, */video_data_node->vframe->pts, gf_net_get_utc() ));

	/* if zero size, it means the image was buffered */
//	if (out_size > 0) {
//		av_init_packet(&pkt);
//		pkt.data = NULL;
//		pkt.size = 0;
//
//		if (video_codec_ctx->coded_frame->pts != AV_NOPTS_VALUE) {
//			pkt.pts = av_rescale_q(video_codec_ctx->coded_frame->pts,
//					video_codec_ctx->time_base, video_stream->time_base);
//		}
//
//
//		if (video_codec_ctx->coded_frame->key_frame)
//			pkt.flags |= AV_PKT_FLAG_KEY;
//
//		pkt.stream_index = video_stream->index;
//		pkt.data = video_output_file->vbuf;
//		pkt.size = out_size;
//
//		// write the compressed frame in the media file
//		if (av_interleaved_write_frame(video_output_file->av_fmt_ctx, &pkt)
//				!= 0) {
//			GF_LOG(GF_LOG_ERROR, GF_LOG_DASH, ("Writing frame is not successful\n"));
//			return -1;
//		}
//
//		av_free_packet(&pkt);
//
//	}

	return video_output_file->encoded_frame_size;
}
Example #14
0
static void *
vencoder_threadproc(void *arg) {
	// arg is pointer to source pipename
	int iid, outputW, outputH;
	pooldata_t *data = NULL;
	vsource_frame_t *frame = NULL;
	char *pipename = (char*) arg;
	pipeline *pipe = pipeline::lookup(pipename);
	AVCodecContext *encoder = NULL;
	//
	AVFrame *pic_in = NULL;
	unsigned char *pic_in_buf = NULL;
	int pic_in_size;
	unsigned char *nalbuf = NULL, *nalbuf_a = NULL;
	int nalbuf_size = 0, nalign = 0;
	long long basePts = -1LL, newpts = 0LL, pts = -1LL, ptsSync = 0LL;
	pthread_mutex_t condMutex = PTHREAD_MUTEX_INITIALIZER;
	pthread_cond_t cond = PTHREAD_COND_INITIALIZER;
	//
	int video_written = 0;
	//
	if(pipe == NULL) {
		ga_error("video encoder: invalid pipeline specified (%s).\n", pipename);
		goto video_quit;
	}
	//
	rtspconf = rtspconf_global();
	// init variables
	iid = ((vsource_t*) pipe->get_privdata())->channel;
	encoder = vencoder[iid];
	//
	outputW = video_source_out_width(iid);
	outputH = video_source_out_height(iid);
	//
	nalbuf_size = 100000+12 * outputW * outputH;
	if(ga_malloc(nalbuf_size, (void**) &nalbuf, &nalign) < 0) {
		ga_error("video encoder: buffer allocation failed, terminated.\n");
		goto video_quit;
	}
	nalbuf_a = nalbuf + nalign;
	//
	if((pic_in = av_frame_alloc()) == NULL) {
		ga_error("video encoder: picture allocation failed, terminated.\n");
		goto video_quit;
	}
	pic_in_size = avpicture_get_size(PIX_FMT_YUV420P, outputW, outputH);
	if((pic_in_buf = (unsigned char*) av_malloc(pic_in_size)) == NULL) {
		ga_error("video encoder: picture buffer allocation failed, terminated.\n");
		goto video_quit;
	}
	avpicture_fill((AVPicture*) pic_in, pic_in_buf,
			PIX_FMT_YUV420P, outputW, outputH);
	//ga_error("video encoder: linesize = %d|%d|%d\n", pic_in->linesize[0], pic_in->linesize[1], pic_in->linesize[2]);
	// start encoding
	ga_error("video encoding started: tid=%ld %dx%d@%dfps, nalbuf_size=%d, pic_in_size=%d.\n",
		ga_gettid(),
		outputW, outputH, rtspconf->video_fps,
		nalbuf_size, pic_in_size);
	//
	pipe->client_register(ga_gettid(), &cond);
	//
	while(vencoder_started != 0 && encoder_running() > 0) {
		AVPacket pkt;
		int got_packet = 0;
		// wait for notification
		data = pipe->load_data();
		if(data == NULL) {
			int err;
			struct timeval tv;
			struct timespec to;
			gettimeofday(&tv, NULL);
			to.tv_sec = tv.tv_sec+1;
			to.tv_nsec = tv.tv_usec * 1000;
			//
			if((err = pipe->timedwait(&cond, &condMutex, &to)) != 0) {
				ga_error("viedo encoder: image source timed out.\n");
				continue;
			}
			data = pipe->load_data();
			if(data == NULL) {
				ga_error("viedo encoder: unexpected NULL frame received (from '%s', data=%d, buf=%d).\n",
					pipe->name(), pipe->data_count(), pipe->buf_count());
				continue;
			}
		}
		frame = (vsource_frame_t*) data->ptr;
		// handle pts
		if(basePts == -1LL) {
			basePts = frame->imgpts;
			ptsSync = encoder_pts_sync(rtspconf->video_fps);
			newpts = ptsSync;
		} else {
			newpts = ptsSync + frame->imgpts - basePts;
		}
		// XXX: assume always YUV420P
		if(pic_in->linesize[0] == frame->linesize[0]
		&& pic_in->linesize[1] == frame->linesize[1]
		&& pic_in->linesize[2] == frame->linesize[2]) {
			bcopy(frame->imgbuf, pic_in_buf, pic_in_size);
		} else {
			ga_error("video encoder: YUV mode failed - mismatched linesize(s) (src:%d,%d,%d; dst:%d,%d,%d)\n",
				frame->linesize[0], frame->linesize[1], frame->linesize[2],
				pic_in->linesize[0], pic_in->linesize[1], pic_in->linesize[2]);
			pipe->release_data(data);
			goto video_quit;
		}
		pipe->release_data(data);
		// pts must be monotonically increasing
		if(newpts > pts) {
			pts = newpts;
		} else {
			pts++;
		}
		// encode
		pic_in->pts = pts;
		av_init_packet(&pkt);
		pkt.data = nalbuf_a;
		pkt.size = nalbuf_size;
		if(avcodec_encode_video2(encoder, &pkt, pic_in, &got_packet) < 0) {
			ga_error("video encoder: encode failed, terminated.\n");
			goto video_quit;
		}
		if(got_packet) {
			if(pkt.pts == (int64_t) AV_NOPTS_VALUE) {
				pkt.pts = pts;
			}
			pkt.stream_index = 0;
			// send the packet
			if(encoder_send_packet_all("video-encoder",
				iid/*rtspconf->video_id*/, &pkt,
				pkt.pts, NULL) < 0) {
				goto video_quit;
			}
			// free unused side-data
			if(pkt.side_data_elems > 0) {
				int i;
				for (i = 0; i < pkt.side_data_elems; i++)
					av_free(pkt.side_data[i].data);
				av_freep(&pkt.side_data);
				pkt.side_data_elems = 0;
			}
			//
			if(video_written == 0) {
				video_written = 1;
				ga_error("first video frame written (pts=%lld)\n", pts);
			}
		}
	}
	//
video_quit:
	if(pipe) {
		pipe->client_unregister(ga_gettid());
		pipe = NULL;
	}
	//
	if(pic_in_buf)	av_free(pic_in_buf);
	if(pic_in)	av_free(pic_in);
	if(nalbuf)	free(nalbuf);
	//
	ga_error("video encoder: thread terminated (tid=%ld).\n", ga_gettid());
	//
	return NULL;
}
Example #15
0
bool CFFmpegImage::CreateThumbnailFromSurface(unsigned char* bufferin, unsigned int width,
                                             unsigned int height, unsigned int format,
                                             unsigned int pitch,
                                             const std::string& destFile,
                                             unsigned char* &bufferout,
                                             unsigned int &bufferoutSize)
{
  // It seems XB_FMT_A8R8G8B8 mean RGBA and not ARGB
  if (format != XB_FMT_A8R8G8B8)
  {
    CLog::Log(LOGERROR, "Supplied format: %d is not supported.", format);
    return false;
  }

  bool jpg_output = false;
  if (m_strMimeType == "image/jpeg" || m_strMimeType == "image/jpg")
    jpg_output = true;
  else if (m_strMimeType == "image/png")
    jpg_output = false;
  else
  {
    CLog::Log(LOGERROR, "Output Format is not supported: %s is not supported.", destFile.c_str());
    return false;
  }

  ThumbDataManagement tdm;

  tdm.codec = avcodec_find_encoder(jpg_output ? AV_CODEC_ID_MJPEG : AV_CODEC_ID_PNG);
  if (!tdm.codec)
  {
    CLog::Log(LOGERROR, "Your are missing a working encoder for format: %d", jpg_output ? AV_CODEC_ID_MJPEG : AV_CODEC_ID_PNG);
    return false;
  }

  tdm.avOutctx = avcodec_alloc_context3(tdm.codec);
  if (!tdm.avOutctx)
  {
    CLog::Log(LOGERROR, "Could not allocate context for thumbnail: %s", destFile.c_str());
    return false;
  }

  tdm.avOutctx->height = height;
  tdm.avOutctx->width = width;
  tdm.avOutctx->time_base.num = 1;
  tdm.avOutctx->time_base.den = 1;
  tdm.avOutctx->pix_fmt = jpg_output ? AV_PIX_FMT_YUVJ420P : AV_PIX_FMT_RGBA;
  tdm.avOutctx->flags = CODEC_FLAG_QSCALE;
  tdm.avOutctx->mb_lmin = tdm.avOutctx->qmin * FF_QP2LAMBDA;
  tdm.avOutctx->mb_lmax = tdm.avOutctx->qmax * FF_QP2LAMBDA;
  tdm.avOutctx->global_quality = tdm.avOutctx->qmin * FF_QP2LAMBDA;

  unsigned int internalBufOutSize = 0;

  int size = av_image_get_buffer_size(tdm.avOutctx->pix_fmt, tdm.avOutctx->width, tdm.avOutctx->height, 16);
  if (size < 0)
  {
    CLog::Log(LOGERROR, "Could not compute picture size for thumbnail: %s", destFile.c_str());
    CleanupLocalOutputBuffer();
    return false;
  }
  internalBufOutSize = (unsigned int) size;

  m_outputBuffer = (uint8_t*) av_malloc(internalBufOutSize);

  if (!m_outputBuffer)
  {
    CLog::Log(LOGERROR, "Could not generate allocate memory for thumbnail: %s", destFile.c_str());
    CleanupLocalOutputBuffer();
    return false;
  }


  tdm.intermediateBuffer = (uint8_t*) av_malloc(internalBufOutSize);
  if (!tdm.intermediateBuffer)
  {
    CLog::Log(LOGERROR, "Could not allocate memory for thumbnail: %s", destFile.c_str());
    CleanupLocalOutputBuffer();
    return false;
  }

  if (avcodec_open2(tdm.avOutctx, tdm.codec, NULL) < 0)
  {
    CLog::Log(LOGERROR, "Could not open avcodec context thumbnail: %s", destFile.c_str());
    CleanupLocalOutputBuffer();
    return false;
  }

  tdm.frame_input = av_frame_alloc();
  if (!tdm.frame_input)
  {
    CLog::Log(LOGERROR, "Could not allocate frame for thumbnail: %s", destFile.c_str());
    CleanupLocalOutputBuffer();
    return false;
  }

  // convert the RGB32 frame to AV_PIX_FMT_YUV420P - we use this later on as AV_PIX_FMT_YUVJ420P
  tdm.frame_temporary = av_frame_alloc();
  if (!tdm.frame_temporary)
  {
    CLog::Log(LOGERROR, "Could not allocate frame for thumbnail: %s", destFile.c_str());
    CleanupLocalOutputBuffer();
    return false;
  }

  if (av_image_fill_arrays(tdm.frame_temporary->data, tdm.frame_temporary->linesize, tdm.intermediateBuffer, jpg_output ? AV_PIX_FMT_YUV420P : AV_PIX_FMT_RGBA, width, height, 16) < 0)
  {
    CLog::Log(LOGERROR, "Could not fill picture for thumbnail: %s", destFile.c_str());
    CleanupLocalOutputBuffer();
    return false;
  }

  uint8_t* src[] = { bufferin, NULL, NULL, NULL };
  int srcStride[] = { (int) pitch, 0, 0, 0};

  //input size == output size which means only pix_fmt conversion
  tdm.sws = sws_getContext(width, height, AV_PIX_FMT_RGB32, width, height, jpg_output ? AV_PIX_FMT_YUV420P : AV_PIX_FMT_RGBA, 0, 0, 0, 0);
  if (!tdm.sws)
  {
    CLog::Log(LOGERROR, "Could not setup scaling context for thumbnail: %s", destFile.c_str());
    CleanupLocalOutputBuffer();
    return false;
  }

  // Setup jpeg range for sws
  if (jpg_output)
  {
    int* inv_table = nullptr;
    int* table = nullptr;
    int srcRange, dstRange, brightness, contrast, saturation;

    if (sws_getColorspaceDetails(tdm.sws, &inv_table, &srcRange, &table, &dstRange, &brightness, &contrast, &saturation) < 0)
    {
      CLog::Log(LOGERROR, "SWS_SCALE failed to get ColorSpaceDetails for thumbnail: %s", destFile.c_str());
      CleanupLocalOutputBuffer();
      return false;
    }
    dstRange = 1; // jpeg full range yuv420p output
    srcRange = 0; // full range RGB32 input
    if (sws_setColorspaceDetails(tdm.sws, inv_table, srcRange, table, dstRange, brightness, contrast, saturation) < 0)
    {
      CLog::Log(LOGERROR, "SWS_SCALE failed to set ColorSpace Details for thumbnail: %s", destFile.c_str());
      CleanupLocalOutputBuffer();
      return false;
    }
  }

  if (sws_scale(tdm.sws, src, srcStride, 0, height, tdm.frame_temporary->data, tdm.frame_temporary->linesize) < 0)
  {
    CLog::Log(LOGERROR, "SWS_SCALE failed for thumbnail: %s", destFile.c_str());
    CleanupLocalOutputBuffer();
    return false;
  }
  tdm.frame_input->pts = 1;
  tdm.frame_input->quality = tdm.avOutctx->global_quality;
  tdm.frame_input->data[0] = (uint8_t*) tdm.frame_temporary->data[0];
  tdm.frame_input->data[1] = (uint8_t*) tdm.frame_temporary->data[1];
  tdm.frame_input->data[2] = (uint8_t*) tdm.frame_temporary->data[2];
  tdm.frame_input->height = height;
  tdm.frame_input->width = width;
  tdm.frame_input->linesize[0] = tdm.frame_temporary->linesize[0];
  tdm.frame_input->linesize[1] = tdm.frame_temporary->linesize[1];
  tdm.frame_input->linesize[2] = tdm.frame_temporary->linesize[2];
  // this is deprecated but mjpeg is not yet transitioned
  tdm.frame_input->format = jpg_output ? AV_PIX_FMT_YUVJ420P : AV_PIX_FMT_RGBA;

  int got_package = 0;
  AVPacket avpkt;
  av_init_packet(&avpkt);
  avpkt.data = m_outputBuffer;
  avpkt.size = internalBufOutSize;

  if ((avcodec_encode_video2(tdm.avOutctx, &avpkt, tdm.frame_input, &got_package) < 0) || (got_package == 0))
  {
    CLog::Log(LOGERROR, "Could not encode thumbnail: %s", destFile.c_str());
    CleanupLocalOutputBuffer();
    return false;
  }

  bufferoutSize = avpkt.size;
  bufferout = m_outputBuffer;

  return true;
}
Example #16
0
int closeAVDumping(void) {
    /* Encode the remaining frames */
    int got_video = 1;
    int got_audio = 1;
    for (; got_video || got_audio;) {

        /* Initialize AVPacket */
        AVPacket vpkt;
        vpkt.data = NULL;
        vpkt.size = 0;
        av_init_packet(&vpkt);

        int ret = avcodec_encode_video2(video_st->codec, &vpkt, NULL, &got_video);
        if (ret < 0) {
            debuglog(LCF_DUMP | LCF_ERROR, "Error encoding frame");
            return 1;
        }

        if (got_video) {
            vpkt.pts = av_rescale_q_rnd(vpkt.pts, video_st->codec->time_base, video_st->time_base, static_cast<AVRounding>(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX));
            vpkt.dts = av_rescale_q_rnd(vpkt.dts, video_st->codec->time_base, video_st->time_base, static_cast<AVRounding>(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX));
            vpkt.duration = av_rescale_q(vpkt.duration, video_st->codec->time_base, video_st->time_base);
            vpkt.stream_index = video_st->index;
            if (av_interleaved_write_frame(formatContext, &vpkt) < 0) {
                debuglog(LCF_DUMP | LCF_ERROR, "Error writing frame");
                return 1;
            }
            av_free_packet(&vpkt);
        }

        AVPacket apkt;
        apkt.data = NULL;
        apkt.size = 0;
        av_init_packet(&apkt);

        ret = avcodec_encode_audio2(audio_st->codec, &apkt, NULL, &got_audio);
        if (ret < 0) {
            debuglog(LCF_DUMP | LCF_ERROR, "Error encoding audio frame");
            return 1;
        }

        if (got_audio) {
            /* We have an encoder output to write */
            apkt.pts = av_rescale_q_rnd(apkt.pts, audio_st->codec->time_base, audio_st->time_base, static_cast<AVRounding>(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX));
            apkt.dts = av_rescale_q_rnd(apkt.dts, audio_st->codec->time_base, audio_st->time_base, static_cast<AVRounding>(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX));
            apkt.duration = av_rescale_q(apkt.duration, audio_st->codec->time_base, audio_st->time_base);
            apkt.stream_index = audio_st->index;
            if (av_interleaved_write_frame(formatContext, &apkt) < 0) {
                debuglog(LCF_DUMP | LCF_ERROR, "Error writing frame");
                return 1;
            }
            av_free_packet(&apkt);
        }

    }

    /* Write file trailer */
    av_write_trailer(formatContext);

    /* Free resources */
    avio_close(formatContext->pb);
    avcodec_close(video_st->codec);
    avcodec_close(audio_st->codec);
    avformat_free_context(formatContext);
    sws_freeContext(toYUVctx);
    av_freep(&video_frame->data[0]);
    av_frame_free(&video_frame);
    av_frame_free(&audio_frame);

    start_frame = -1;
    return 0;
}
int main(int argc, char *argv[]) {
	// Decoder local variable declaration
	AVFormatContext *pFormatCtx = NULL;
	int i, videoStream;
	AVCodecContext *pCodecCtx = NULL;
	AVCodec *pCodec;
	AVFrame *pFrame;
	AVPacket packet;
	int frameFinished;

	// Encoder local variable declaration
	const char *filename;
	AVOutputFormat *fmt;
	AVFormatContext *oc;
	AVStream *video_st;
	AVCodec *video_codec;
	int ret, frame_count;
	StreamInfo sInfo;

	// Register all formats, codecs and network
	av_register_all();
	avcodec_register_all();
	avformat_network_init();

	// Open video file
	if (avformat_open_input(&pFormatCtx, "input_file.wmv", NULL, NULL) != 0)
		return -1; // Couldn't open file

	// Retrieve stream information
	if (avformat_find_stream_info(pFormatCtx, NULL) < 0)
		return -1; // Couldn't find stream information

	// Dump information about file onto standard error
	av_dump_format(pFormatCtx, 0, "input_file.wmv", 0);

	// Find the first video stream
	videoStream = -1;
	for (i = 0; i < pFormatCtx->nb_streams; i++)
		if (pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
			videoStream = i;
			break;
		}
	if (videoStream == -1)
		return -1; // Didn't find a video stream

	// Get a pointer to the codec context for the video stream
	pCodecCtx = pFormatCtx->streams[videoStream]->codec;

	// Find the decoder for the video stream
	pCodec = avcodec_find_decoder(pCodecCtx->codec_id);
	if (pCodec == NULL) {
		fprintf(stderr, "Unsupported codec!\n");
		return -1; // Codec not found
	}
	// Open codec (decoder)
	if (avcodec_open2(pCodecCtx, pCodec, NULL) < 0)
		return -1; // Could not open codec

	// Allocate video frame
	pFrame = avcodec_alloc_frame();

	// Setup mux
	filename = "output_file.flv";
	
	// To stream to a media server (e.g. FMS)
	// filename = "rtmp://chineseforall.org/live/beta";
	
	fmt = av_guess_format("flv", filename, NULL);
	if (fmt == NULL) {
		printf("Could not guess format.\n");
		return -1;
	}
	// allocate the output media context
	oc = avformat_alloc_context();
	if (oc == NULL) {
		printf("could not allocate context.\n");
		return -1;
	}

	// Set output format context to the format ffmpeg guessed
	oc->oformat = fmt;

	// Add the video stream using the h.264
	// codec and initialize the codec.
	video_st = NULL;
	sInfo.width = pFormatCtx->streams[i]->codec->width;
	sInfo.height = pFormatCtx->streams[i]->codec->height;
	sInfo.pix_fmt = AV_PIX_FMT_YUV420P;
	sInfo.frame_rate = 30;
	sInfo.bitrate = 450*1000;
	video_st = add_stream(oc, &video_codec, AV_CODEC_ID_H264, &sInfo);

	// Now that all the parameters are set, we can open the audio and
	// video codecs and allocate the necessary encode buffers.
	if (video_st)
		open_video(oc, video_codec, video_st);

	/* open the output file, if needed */
	if (!(fmt->flags & AVFMT_NOFILE)) {
		ret = avio_open(&oc->pb, filename, AVIO_FLAG_WRITE);
		if (ret < 0) {
			fprintf(stderr, "Could not open '%s': %s\n", filename, av_err2str(ret));
			return 1;
		}
	}

	// dump output format
	av_dump_format(oc, 0, filename, 1);

	// Write the stream header, if any.
	ret = avformat_write_header(oc, NULL);
	if (ret < 0) {
		fprintf(stderr, "Error occurred when opening output file: %s\n", av_err2str(ret));
		return 1;
	}

	// Read frames, decode, and re-encode
	frame_count = 1;
	while (av_read_frame(pFormatCtx, &packet) >= 0) {
		// Is this a packet from the video stream?
		if (packet.stream_index == videoStream) {
			// Decode video frame
			avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished, &packet);

			// Did we get a video frame?
			if (frameFinished) {

				// Initialize a new frame
				AVFrame* newFrame = avcodec_alloc_frame();

				int size = avpicture_get_size(video_st->codec->pix_fmt, video_st->codec->width, video_st->codec->height);
				uint8_t* picture_buf = av_malloc(size);

				avpicture_fill((AVPicture *) newFrame, picture_buf, video_st->codec->pix_fmt, video_st->codec->width, video_st->codec->height);

				// Copy only the frame content without additional fields
				av_picture_copy((AVPicture*) newFrame, (AVPicture*) pFrame, video_st->codec->pix_fmt, video_st->codec->width, video_st->codec->height);

				// encode the image
				AVPacket pkt;
				int got_output;
				av_init_packet(&pkt);
				pkt.data = NULL; // packet data will be allocated by the encoder
				pkt.size = 0;

				// Set the frame's pts (this prevents the warning notice 'non-strictly-monotonic PTS'
				newFrame->pts = frame_count;

				ret = avcodec_encode_video2(video_st->codec, &pkt, newFrame, &got_output);
				if (ret < 0) {
					fprintf(stderr, "Error encoding video frame: %s\n", av_err2str(ret));
					exit(1);
				}

				if (got_output) {
					if (video_st->codec->coded_frame->key_frame)
						pkt.flags |= AV_PKT_FLAG_KEY;
					pkt.stream_index = video_st->index;

					if (pkt.pts != AV_NOPTS_VALUE)
						pkt.pts = av_rescale_q(pkt.pts, video_st->codec->time_base, video_st->time_base);
					if (pkt.dts != AV_NOPTS_VALUE)
						pkt.dts = av_rescale_q(pkt.dts, video_st->codec->time_base, video_st->time_base);

					// Write the compressed frame to the media file.
					ret = av_interleaved_write_frame(oc, &pkt);
				} else {
					ret = 0;
				}
				if (ret != 0) {
					fprintf(stderr, "Error while writing video frame: %s\n", av_err2str(ret));
					exit(1);
				}

				fprintf(stderr, "encoded frame #%d\n", frame_count);
				frame_count++;

				// Free the YUV picture frame we copied from the
				// decoder to eliminate the additional fields
				// and other packets/frames used
				av_free(picture_buf);
				av_free_packet(&pkt);
				av_free(newFrame);
			}
		}

		// Free the packet that was allocated by av_read_frame
		av_free_packet(&packet);
	}

	/* Write the trailer, if any. The trailer must be written before you
	 * close the CodecContexts open when you wrote the header; otherwise
	 * av_write_trailer() may try to use memory that was freed on
	 * av_codec_close(). */
	av_write_trailer(oc);

	/* Close the video codec (encoder) */
	if (video_st)
		close_video(oc, video_st);
	// Free the output streams.
	for (i = 0; i < oc->nb_streams; i++) {
		av_freep(&oc->streams[i]->codec);
		av_freep(&oc->streams[i]);
	}
	if (!(fmt->flags & AVFMT_NOFILE))
		/* Close the output file. */
		avio_close(oc->pb);
	/* free the output format context */
	av_free(oc);

	// Free the YUV frame populated by the decoder
	av_free(pFrame);

	// Close the video codec (decoder)
	avcodec_close(pCodecCtx);

	// Close the input video file
	avformat_close_input(&pFormatCtx);

	return 0;
}
Example #18
0
MediaRet MediaRecorder::setup_sound_stream(const char *fname, AVOutputFormat *fmt)
{
    oc = avformat_alloc_context();
    if(!oc)
	return MRET_ERR_NOMEM;
    oc->oformat = fmt;
    strncpy(oc->filename, fname, sizeof(oc->filename) - 1);
    oc->filename[sizeof(oc->filename) - 1] = 0;
    if(fmt->audio_codec == CODEC_ID_NONE)
	return MRET_OK;

    AVCodecContext *ctx;
#if LIBAVFORMAT_VERSION_INT < AV_VERSION_INT(53,10,0)
    aud_st = av_new_stream(oc, 1);
#else
    aud_st = avformat_new_stream(oc, NULL);
#endif
    if(!aud_st) {
	avformat_free_context(oc);
	oc = NULL;
	return MRET_ERR_NOMEM;
    }

    AVCodec *codec = avcodec_find_encoder(fmt->audio_codec);

    ctx = aud_st->codec;
    ctx->codec_id = fmt->audio_codec;
    ctx->codec_type = AVMEDIA_TYPE_AUDIO;
    // Some encoders don't like int16_t (SAMPLE_FMT_S16)
    ctx->sample_fmt = codec->sample_fmts[0];
    // This was changed in the initial ffmpeg 3.0 update,
    // but shouldn't (as far as I'm aware) cause problems with older versions
    ctx->bit_rate = 128000; // arbitrary; in case we're generating mp3
    ctx->sample_rate = soundGetSampleRate();
    ctx->channels = 2;
    ctx->time_base.den = 60;
    ctx->time_base.num = 1;
    if(fmt->flags & AVFMT_GLOBALHEADER)
	ctx->flags |= CODEC_FLAG_GLOBAL_HEADER;

#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(53,6,0)
    if(!codec || avcodec_open(ctx, codec)) {
#else
    if(!codec || avcodec_open2(ctx, codec, NULL)) {
#endif
	avformat_free_context(oc);
	oc = NULL;
	return MRET_ERR_NOCODEC;
    }

    return MRET_OK;
}

MediaRet MediaRecorder::setup_video_stream(const char *fname, int w, int h, int d)
{
    AVCodecContext *ctx;
#if LIBAVFORMAT_VERSION_INT < AV_VERSION_INT(53,10,0)
    vid_st = av_new_stream(oc, 0);
#else
    vid_st = avformat_new_stream(oc, NULL);
#endif
    if(!vid_st) {
	avformat_free_context(oc);
	oc = NULL;
	return MRET_ERR_NOMEM;
    }
    ctx = vid_st->codec;
    ctx->codec_id = oc->oformat->video_codec;
    ctx->codec_type = AVMEDIA_TYPE_VIDEO;
    ctx->width = w;
    ctx->height = h;
    ctx->time_base.den = 60;
    ctx->time_base.num = 1;
    // dunno if any of these help; some output just looks plain crappy
    // will have to investigate further
    ctx->bit_rate = 400000;
    ctx->gop_size = 12;
    ctx->max_b_frames = 2;
    switch(d) {
    case 16:
	// FIXME: test & make endian-neutral
	pixfmt = PIX_FMT_RGB565LE;
	break;
    case 24:
	pixfmt = PIX_FMT_RGB24;
	break;
    case 32:
    default: // should never be anything else
	pixfmt = PIX_FMT_RGBA;
	break;
    }
    ctx->pix_fmt = pixfmt;
    pixsize = d >> 3;
    linesize = pixsize * w;
    ctx->max_b_frames = 2;
    if(oc->oformat->flags & AVFMT_GLOBALHEADER)
	ctx->flags |= CODEC_FLAG_GLOBAL_HEADER;

    AVCodec *codec = avcodec_find_encoder(oc->oformat->video_codec);
    // make sure RGB is supported (mostly not)
    if(codec->pix_fmts) {
	const enum PixelFormat *p;
#if LIBAVCODEC_VERSION_MAJOR < 55
	int64_t mask = 0;
#endif
	for(p = codec->pix_fmts; *p != -1; p++) {
	    // may get complaints about 1LL; thus the cast
#if LIBAVCODEC_VERSION_MAJOR < 55
	    mask |= ((int64_t)1) << *p;
#endif
	    if(*p == pixfmt)
		break;
	}
	if(*p == -1) {
	    // if not supported, use a converter to the next best format
	    // this is swscale, the converter used by the output demo
#if LIBAVCODEC_VERSION_MAJOR < 55
	    enum PixelFormat dp = (PixelFormat)avcodec_find_best_pix_fmt(mask, pixfmt, 0, NULL);
#else
#if LIBAVCODEC_VERSION_MICRO >= 100
// FFmpeg
		enum AVPixelFormat dp = avcodec_find_best_pix_fmt_of_list(codec->pix_fmts, pixfmt, 0, NULL);
#else
// Libav
		enum AVPixelFormat dp = avcodec_find_best_pix_fmt2(codec->pix_fmts, pixfmt, 0, NULL);
#endif
#endif
	    if(dp == -1)
		dp = codec->pix_fmts[0];
	    if(!(convpic = avcodec_alloc_frame()) ||
	       avpicture_alloc((AVPicture *)convpic, dp, w, h) < 0) {
		avformat_free_context(oc);
		oc = NULL;
		return MRET_ERR_NOMEM;
	    }
#if LIBSWSCALE_VERSION_INT < AV_VERSION_INT(0, 12, 0)
	    converter = sws_getContext(w, h, pixfmt, w, h, dp, SWS_BICUBIC,
				       NULL, NULL, NULL);
#else
	    converter = sws_alloc_context();
	    // what a convoluted, inefficient way to set options
	    av_opt_set_int(converter, "sws_flags", SWS_BICUBIC, 0);
	    av_opt_set_int(converter, "srcw", w, 0);
	    av_opt_set_int(converter, "srch", h, 0);
	    av_opt_set_int(converter, "dstw", w, 0);
	    av_opt_set_int(converter, "dsth", h, 0);
	    av_opt_set_int(converter, "src_format", pixfmt, 0);
	    av_opt_set_int(converter, "dst_format", dp, 0);
	    sws_init_context(converter, NULL, NULL);
#endif
	    ctx->pix_fmt = dp;
	}
    }
#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(53,6,0)
    if(!codec || avcodec_open(ctx, codec)) {
#else
    if(!codec || avcodec_open2(ctx, codec, NULL)) {
#endif
	avformat_free_context(oc);
	oc = NULL;
	return MRET_ERR_NOCODEC;
    }

    return MRET_OK;
}

MediaRet MediaRecorder::finish_setup(const char *fname)
{
    if(audio_buf)
	free(audio_buf);
    if(audio_buf2)
	free(audio_buf2);
    audio_buf2 = NULL;
    in_audio_buf2 = 0;
    if(aud_st) {
	frame_len = aud_st->codec->frame_size * 4;
	sample_len = soundGetSampleRate() * 4 / 60;
	switch(aud_st->codec->codec_id) {
	case CODEC_ID_PCM_S16LE:
	case CODEC_ID_PCM_S16BE:
	case CODEC_ID_PCM_U16LE:
	case CODEC_ID_PCM_U16BE:
	    frame_len = sample_len;
	}
	audio_buf = (uint8_t *)malloc(AUDIO_BUF_LEN);
	if(!audio_buf) {
	    avformat_free_context(oc);
	    oc = NULL;
	    return MRET_ERR_NOMEM;
	}
	if(frame_len != sample_len && (frame_len > sample_len || sample_len % frame_len)) {
	    audio_buf2 = (uint16_t *)malloc(frame_len);
	    if(!audio_buf2) {
		avformat_free_context(oc);
		oc = NULL;
		return MRET_ERR_NOMEM;
	    }
	}
    } else
	audio_buf = NULL;
    if(video_buf)
	free(video_buf);
    if(vid_st) {
	video_buf = (uint8_t *)malloc(VIDEO_BUF_LEN);
	if(!video_buf) {
	    avformat_free_context(oc);
	    oc = NULL;
	    return MRET_ERR_NOMEM;
	}
    } else {
	video_buf = NULL;
    }
    if(!(oc->oformat->flags & AVFMT_NOFILE)) {
	if(avio_open(&oc->pb, fname, AVIO_FLAG_WRITE) < 0) {
	    avformat_free_context(oc);
	    oc = NULL;
	    return MRET_ERR_FERR;
	}
    }
    avformat_write_header(oc, NULL);    
    return MRET_OK;
}

MediaRet MediaRecorder::Record(const char *fname, int width, int height, int depth)
{
    if(oc)
	return MRET_ERR_RECORDING;
    aud_st = vid_st = NULL;
    AVOutputFormat *fmt = av_guess_format(NULL, fname, NULL);
    if(!fmt)
	fmt = av_guess_format("avi", NULL, NULL);
    if(!fmt || fmt->video_codec == CODEC_ID_NONE)
	return MRET_ERR_FMTGUESS;
    MediaRet ret;
    if((ret = setup_sound_stream(fname, fmt)) == MRET_OK &&
       (ret = setup_video_stream(fname, width, height, depth)) == MRET_OK)
	ret = finish_setup(fname);
    return ret;
}

MediaRet MediaRecorder::Record(const char *fname)
{
    if(oc)
	return MRET_ERR_RECORDING;
    aud_st = vid_st = NULL;
    AVOutputFormat *fmt = av_guess_format(NULL, fname, NULL);
    if(!fmt)
	fmt = av_guess_format("wav", NULL, NULL);
    if(!fmt || fmt->audio_codec == CODEC_ID_NONE)
	return MRET_ERR_FMTGUESS;
    MediaRet ret;
    if((ret = setup_sound_stream(fname, fmt)) == MRET_OK)
	ret = finish_setup(fname);
    return ret;
}

void MediaRecorder::Stop()
{
    if(oc) {
	if(in_audio_buf2)
	    AddFrame((uint16_t *)0);
	av_write_trailer(oc);
	avformat_free_context(oc);
	oc = NULL;
    }
    if(audio_buf) {
	free(audio_buf);
	audio_buf = NULL;
    }
    if(video_buf) {
	free(video_buf);
	video_buf = NULL;
    }
    if(audio_buf2) {
	free(audio_buf2);
	audio_buf2 = NULL;
    }
    if(convpic) {
	avpicture_free((AVPicture *)convpic);
	av_free(convpic);
	convpic = NULL;
    }
    if(converter) {
	sws_freeContext(converter);
	converter = NULL;
    }
}

MediaRecorder::~MediaRecorder()
{
    Stop();
}

// Still needs updating for avcodec_encode_video2
MediaRet MediaRecorder::AddFrame(const uint8_t *vid)
{
    if(!oc || !vid_st)
	return MRET_OK;

    AVCodecContext *ctx = vid_st->codec;
    AVPacket pkt;
#if LIBAVCODEC_VERSION_MAJOR > 56
    int ret, got_packet = 0;
#endif

    // strip borders.  inconsistent between depths for some reason
    // but fortunately consistent between gb/gba.
    int tbord, rbord;
    switch(pixsize) {
    case 2:
	//    16-bit: 2 @ right, 1 @ top
	tbord = 1; rbord = 2; break;
    case 3:
	//    24-bit: no border
	tbord = rbord = 0; break;
    case 4:
	//    32-bit: 1 @ right, 1 @ top
	tbord = 1; rbord = 1; break;
    }
    avpicture_fill((AVPicture *)pic, (uint8_t *)vid + tbord * (linesize + pixsize * rbord),
		   (PixelFormat)pixfmt, ctx->width + rbord, ctx->height);
    // satisfy stupid sws_scale()'s integrity check
    pic->data[1] = pic->data[2] = pic->data[3] = pic->data[0];
    pic->linesize[1] = pic->linesize[2] = pic->linesize[3] = pic->linesize[0];

    AVFrame *f = pic;

    if(converter) {
	sws_scale(converter, pic->data, pic->linesize, 0, ctx->height,
		  convpic->data, convpic->linesize);
	f = convpic;
    }
    av_init_packet(&pkt);
    pkt.stream_index = vid_st->index;
    if(oc->oformat->flags & AVFMT_RAWPICTURE) {
	// this won't work due to border
	// not sure what formats set this, anyway
	pkt.flags |= AV_PKT_FLAG_KEY;
	pkt.data = f->data[0];
	pkt.size = linesize * ctx->height;
    } else {
#if LIBAVCODEC_VERSION_MAJOR > 56
        pkt.data = video_buf;
        pkt.size = VIDEO_BUF_LEN;
        f->format = ctx->pix_fmt;
        f->width = ctx->width;
        f->height = ctx->height;
        ret = avcodec_encode_video2(ctx, &pkt, f, &got_packet);
        if(!ret && got_packet && ctx->coded_frame) {
            ctx->coded_frame->pts = pkt.pts;
            ctx->coded_frame->key_frame = !!(pkt.flags & AV_PKT_FLAG_KEY);
        }
#else
	pkt.size = avcodec_encode_video(ctx, video_buf, VIDEO_BUF_LEN, f);
#endif
	if(!pkt.size)
	    return MRET_OK;
	if(ctx->coded_frame && ctx->coded_frame->pts != AV_NOPTS_VALUE)
	    pkt.pts = av_rescale_q(ctx->coded_frame->pts, ctx->time_base, vid_st->time_base);
	if(pkt.size > VIDEO_BUF_LEN) {
	    avformat_free_context(oc);
	    oc = NULL;
	    return MRET_ERR_BUFSIZE;
	}
	if(ctx->coded_frame->key_frame)
	    pkt.flags |= AV_PKT_FLAG_KEY;
	pkt.data = video_buf;
    }
    if(av_interleaved_write_frame(oc, &pkt) < 0) {
	avformat_free_context(oc);
	oc = NULL;
	// yeah, err might not be a file error, but if it isn't, it's a
	// coding error rather than a user-controllable error
	// and better resolved using debugging
	return MRET_ERR_FERR;
    }
    return MRET_OK;
}

#if LIBAVCODEC_VERSION_MAJOR > 56
/* FFmpeg depricated avcodec_encode_audio.
 * It was removed completely in 3.0.
 * This will at least get audio recording *working*
 */
static inline int MediaRecorderEncodeAudio(AVCodecContext *ctx,
                                           AVPacket *pkt,
                                           uint8_t *buf, int buf_size,
                                           const short *samples)
{
    AVFrame *frame;
    av_init_packet(pkt);
    int ret, samples_size, got_packet = 0;

    pkt->data = buf;
    pkt->size = buf_size;
    if (samples) {
        frame = frame = av_frame_alloc();
        if (ctx->frame_size) {
            frame->nb_samples = ctx->frame_size;
        } else {
            frame->nb_samples = (int64_t)buf_size * 8 /
                            (av_get_bits_per_sample(ctx->codec_id) *
                            ctx->channels);
        }
        frame->format = ctx->sample_fmt;
        frame->channel_layout = ctx->channel_layout;
        samples_size = av_samples_get_buffer_size(NULL, ctx->channels,
                        frame->nb_samples, ctx->sample_fmt, 1);
        avcodec_fill_audio_frame(frame, ctx->channels, ctx->sample_fmt,
                        (const uint8_t *)samples, samples_size, 1);
        //frame->pts = AV_NOPTS_VALUE;
    } else {
        frame = NULL;
    }
        ret = avcodec_encode_audio2(ctx, pkt, frame, &got_packet);
    if (!ret && got_packet && ctx->coded_frame) {
        ctx->coded_frame->pts = pkt->pts;
        ctx->coded_frame->key_frame = !!(pkt->flags & AV_PKT_FLAG_KEY);
    }
        if (frame && frame->extended_data != frame->data)
        av_freep(&frame->extended_data);
        return ret;

}
Example #19
0
/*
 * Video encoding example
 */
static void video_encode_example(const char *filename)
{
    AVCodec *codec;
    AVCodecContext *c= NULL;
    int i, ret, x, y, got_output;
    FILE *f;
    AVFrame *picture;
    AVPacket pkt;
    uint8_t endcode[] = { 0, 0, 1, 0xb7 };

    printf("Video encoding\n");

    /* find the mpeg1 video encoder */
    codec = avcodec_find_encoder(AV_CODEC_ID_MPEG1VIDEO);
    if (!codec) {
        fprintf(stderr, "codec not found\n");
        exit(1);
    }

    c = avcodec_alloc_context3(codec);
    picture = av_frame_alloc();

    /* put sample parameters */
    c->bit_rate = 400000;
    /* resolution must be a multiple of two */
    c->width = 352;
    c->height = 288;
    /* frames per second */
    c->time_base= (AVRational){1,25};
    c->gop_size = 10; /* emit one intra frame every ten frames */
    c->max_b_frames=1;
    c->pix_fmt = AV_PIX_FMT_YUV420P;

    /* open it */
    if (avcodec_open2(c, codec, NULL) < 0) {
        fprintf(stderr, "could not open codec\n");
        exit(1);
    }

    f = fopen(filename, "wb");
    if (!f) {
        fprintf(stderr, "could not open %s\n", filename);
        exit(1);
    }

    ret = av_image_alloc(picture->data, picture->linesize, c->width, c->height,
                         c->pix_fmt, 32);
    if (ret < 0) {
        fprintf(stderr, "could not alloc raw picture buffer\n");
        exit(1);
    }
    picture->format = c->pix_fmt;
    picture->width  = c->width;
    picture->height = c->height;

    /* encode 1 second of video */
    for(i=0;i<25;i++) {
        av_init_packet(&pkt);
        pkt.data = NULL;    // packet data will be allocated by the encoder
        pkt.size = 0;

        fflush(stdout);
        /* prepare a dummy image */
        /* Y */
        for(y=0;y<c->height;y++) {
            for(x=0;x<c->width;x++) {
                picture->data[0][y * picture->linesize[0] + x] = x + y + i * 3;
            }
        }

        /* Cb and Cr */
        for(y=0;y<c->height/2;y++) {
            for(x=0;x<c->width/2;x++) {
                picture->data[1][y * picture->linesize[1] + x] = 128 + y + i * 2;
                picture->data[2][y * picture->linesize[2] + x] = 64 + x + i * 5;
            }
        }

        picture->pts = i;

        /* encode the image */
        ret = avcodec_encode_video2(c, &pkt, picture, &got_output);
        if (ret < 0) {
            fprintf(stderr, "error encoding frame\n");
            exit(1);
        }

        if (got_output) {
            printf("encoding frame %3d (size=%5d)\n", i, pkt.size);
            fwrite(pkt.data, 1, pkt.size, f);
            av_free_packet(&pkt);
        }
    }

    /* get the delayed frames */
    for (got_output = 1; got_output; i++) {
        fflush(stdout);

        ret = avcodec_encode_video2(c, &pkt, NULL, &got_output);
        if (ret < 0) {
            fprintf(stderr, "error encoding frame\n");
            exit(1);
        }

        if (got_output) {
            printf("encoding frame %3d (size=%5d)\n", i, pkt.size);
            fwrite(pkt.data, 1, pkt.size, f);
            av_free_packet(&pkt);
        }
    }

    /* add sequence end code to have a real mpeg file */
    fwrite(endcode, 1, sizeof(endcode), f);
    fclose(f);

    avcodec_close(c);
    av_free(c);
    av_freep(&picture->data[0]);
    av_frame_free(&picture);
    printf("\n");
}
Example #20
0
static int WriteFrame(AVFrame* pFrame)
{
    double AudioTime, VideoTime;
    int ret;
    // write interleaved audio frame
    if (g_pAStream)
    {
        VideoTime = (double)g_pVFrame->pts * g_pVStream->time_base.num/g_pVStream->time_base.den;
        do
        {
            AudioTime = (double)g_pAFrame->pts * g_pAStream->time_base.num/g_pAStream->time_base.den;
            ret = WriteAudioFrame();
        }
        while (AudioTime < VideoTime && ret);
        if (ret < 0)
            return ret;
    }

    if (!g_pVStream)
        return 0;

    AVPacket Packet;
    av_init_packet(&Packet);
    Packet.data = NULL;
    Packet.size = 0;

    g_pVFrame->pts++;
#if LIBAVCODEC_VERSION_MAJOR < 58
    if (g_pFormat->flags & AVFMT_RAWPICTURE)
    {
        /* raw video case. The API will change slightly in the near
           future for that. */
        Packet.flags |= AV_PKT_FLAG_KEY;
        Packet.stream_index = g_pVStream->index;
        Packet.data = (uint8_t*)pFrame;
        Packet.size = sizeof(AVPicture);

        if (av_interleaved_write_frame(g_pContainer, &Packet) != 0)
            return FatalError("Error while writing video frame");
        return 0;
    }
    else
#endif
    {
#if LIBAVCODEC_VERSION_MAJOR >= 54
        int got_packet;
        if (avcodec_encode_video2(g_pVideo, &Packet, pFrame, &got_packet) < 0)
            return FatalError("avcodec_encode_video2 failed");
        if (!got_packet)
            return 0;

        av_packet_rescale_ts(&Packet, g_pVideo->time_base, g_pVStream->time_base);
#else
        Packet.size = avcodec_encode_video(g_pVideo, g_OutBuffer, OUTBUFFER_SIZE, pFrame);
        if (Packet.size < 0)
            return FatalError("avcodec_encode_video failed");
        if (Packet.size == 0)
            return 0;

        if( g_pVideo->coded_frame->pts != AV_NOPTS_VALUE)
            Packet.pts = av_rescale_q(g_pVideo->coded_frame->pts, g_pVideo->time_base, g_pVStream->time_base);
        if( g_pVideo->coded_frame->key_frame )
            Packet.flags |= AV_PKT_FLAG_KEY;
        Packet.data = g_OutBuffer;
#endif
        // write the compressed frame in the media file
        Packet.stream_index = g_pVStream->index;
        if (av_interleaved_write_frame(g_pContainer, &Packet) != 0)
            return FatalError("Error while writing video frame");

        return 1;
    }
}
Example #21
0
int encodeOneFrame(unsigned long fcounter) {

    /* Check if the encode was inited */
    if (start_frame == -1)
        return 0;

    /*** Video ***/
    debuglog(LCF_DUMP | LCF_FRAME, "Encode a video frame");

    const uint8_t* orig_plane[4] = {0};
    int orig_stride[4] = {0};

    /* Access to the screen pixels */
    captureVideoFrame(orig_plane, orig_stride);

    /* Initialize AVPacket */
    AVPacket vpkt;
    vpkt.data = NULL;
    vpkt.size = 0;
    av_init_packet(&vpkt);

    /* Change pixel format to YUV420p and copy it into the AVframe */
    int rets = sws_scale(toYUVctx, orig_plane, orig_stride, 0, 
                video_frame->height, video_frame->data, video_frame->linesize);
    if (rets != video_frame->height) {
        debuglog(LCF_DUMP | LCF_ERROR, "We could only convert ",rets," rows");
        return 1;
    }

    video_frame->pts = fcounter - start_frame;

    /* Encode the image */
    int got_output;
    int ret = avcodec_encode_video2(video_st->codec, &vpkt, video_frame, &got_output);
    if (ret < 0) {
        debuglog(LCF_DUMP | LCF_ERROR, "Error encoding video frame");
        return 1;
    }

    if (got_output) {
        /* Rescale output packet timestamp values from codec to stream timebase */
        vpkt.pts = av_rescale_q_rnd(vpkt.pts, video_st->codec->time_base, video_st->time_base, static_cast<AVRounding>(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX));
        vpkt.dts = av_rescale_q_rnd(vpkt.dts, video_st->codec->time_base, video_st->time_base, static_cast<AVRounding>(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX));
        vpkt.duration = av_rescale_q(vpkt.duration, video_st->codec->time_base, video_st->time_base);
        vpkt.stream_index = video_st->index;
        if (av_interleaved_write_frame(formatContext, &vpkt) < 0) {
            debuglog(LCF_DUMP | LCF_ERROR, "Error writing frame");
            return 1;
        }
        av_free_packet(&vpkt);
    }

    /*** Audio ***/
    debuglog(LCF_DUMP | LCF_FRAME, "Encode an audio frame");

    /* Initialize AVPacket */
    AVPacket apkt;
    apkt.data = NULL;
    apkt.size = 0;
    av_init_packet(&apkt);

    int frame_size;
    if (audio_st->codec->codec->capabilities & CODEC_CAP_VARIABLE_FRAME_SIZE)
        frame_size = audiocontext.outNbSamples;
    else {
        frame_size = audio_st->codec->frame_size;
        if (frame_size > audiocontext.outNbSamples) {
            debuglog(LCF_DUMP | LCF_FRAME | LCF_ERROR, "This is bad...");
            frame_size = audiocontext.outNbSamples;
        }
    }

    audio_frame->nb_samples = frame_size;
    audio_frame->pts = av_rescale_q(accum_samples, AVRational{1, audio_st->codec->sample_rate}, audio_st->codec->time_base);
    accum_samples += frame_size;

    avcodec_fill_audio_frame(audio_frame, audio_st->codec->channels, audio_st->codec->sample_fmt,
                                             &audiocontext.outSamples[0], frame_size*audiocontext.outAlignSize, 1);

    ret = avcodec_encode_audio2(audio_st->codec, &apkt, audio_frame, &got_output);
    if (ret < 0) {
        debuglog(LCF_DUMP | LCF_ERROR, "Error encoding audio frame");
        return 1;
    }

    if (got_output) {
        /* We have an encoder output to write */
        apkt.pts = av_rescale_q_rnd(apkt.pts, audio_st->codec->time_base, audio_st->time_base, static_cast<AVRounding>(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX));
        apkt.dts = av_rescale_q_rnd(apkt.dts, audio_st->codec->time_base, audio_st->time_base, static_cast<AVRounding>(AV_ROUND_NEAR_INF|AV_ROUND_PASS_MINMAX));
        apkt.duration = av_rescale_q(apkt.duration, audio_st->codec->time_base, audio_st->time_base);
        apkt.stream_index = audio_st->index;
        if (av_interleaved_write_frame(formatContext, &apkt) < 0) {
            debuglog(LCF_DUMP | LCF_ERROR, "Error writing frame");
            return 1;
        }
        av_free_packet(&apkt);
    }

    return 0;
}
Example #22
0
static void filter(struct vf_priv_s *p, uint8_t *dst[3], uint8_t *src[3], int dst_stride[3], int src_stride[3], int width, int height, uint8_t *qp_store, int qp_stride){
    int x, y, i, j;
    const int count= 1<<p->log2_count;

    for(i=0; i<3; i++){
        int is_chroma= !!i;
        int w= width >>is_chroma;
        int h= height>>is_chroma;
        int stride= p->temp_stride[i];
        int block= BLOCK>>is_chroma;

        if (!src[i] || !dst[i])
            continue; // HACK avoid crash for Y8 colourspace
        for(y=0; y<h; y++){
            int index= block + block*stride + y*stride;
            fast_memcpy(p->src[i] + index, src[i] + y*src_stride[i], w);
            for(x=0; x<block; x++){
                p->src[i][index     - x - 1]= p->src[i][index +     x    ];
                p->src[i][index + w + x    ]= p->src[i][index + w - x - 1];
            }
        }
        for(y=0; y<block; y++){
            fast_memcpy(p->src[i] + (  block-1-y)*stride, p->src[i] + (  y+block  )*stride, stride);
            fast_memcpy(p->src[i] + (h+block  +y)*stride, p->src[i] + (h-y+block-1)*stride, stride);
        }

        p->frame->linesize[i]= stride;
        memset(p->temp[i], 0, (h+2*block)*stride*sizeof(int16_t));
    }

    if(p->qp)
        p->frame->quality= p->qp * FF_QP2LAMBDA;
    else
        p->frame->quality= norm_qscale(qp_store[0], p->mpeg2) * FF_QP2LAMBDA;
//    init per MB qscale stuff FIXME

    for(i=0; i<count; i++){
        const int x1= offset[i+count-1][0];
        const int y1= offset[i+count-1][1];
        int offset;
        AVPacket pkt;
        int ret, got_pkt;
        p->frame->data[0]= p->src[0] + x1 + y1 * p->frame->linesize[0];
        p->frame->data[1]= p->src[1] + x1/2 + y1/2 * p->frame->linesize[1];
        p->frame->data[2]= p->src[2] + x1/2 + y1/2 * p->frame->linesize[2];

        av_init_packet(&pkt);
        pkt.data = p->outbuf;
        pkt.size = p->outbuf_size;
        avcodec_encode_video2(p->avctx_enc[i], &pkt, p->frame, &got_pkt);
        p->frame_dec = p->avctx_enc[i]->coded_frame;

        offset= (BLOCK-x1) + (BLOCK-y1)*p->frame_dec->linesize[0];
        //FIXME optimize
        for(y=0; y<height; y++){
            for(x=0; x<width; x++){
                p->temp[0][ x + y*p->temp_stride[0] ] += p->frame_dec->data[0][ x + y*p->frame_dec->linesize[0] + offset ];
            }
        }
        offset= (BLOCK/2-x1/2) + (BLOCK/2-y1/2)*p->frame_dec->linesize[1];
        for(y=0; y<height/2; y++){
            for(x=0; x<width/2; x++){
                p->temp[1][ x + y*p->temp_stride[1] ] += p->frame_dec->data[1][ x + y*p->frame_dec->linesize[1] + offset ];
                p->temp[2][ x + y*p->temp_stride[2] ] += p->frame_dec->data[2][ x + y*p->frame_dec->linesize[2] + offset ];
            }
        }
    }

    for(j=0; j<3; j++){
        int is_chroma= !!j;
        if (!dst[j])
            continue; // HACK avoid crash for Y8 colourspace
        store_slice_c(dst[j], p->temp[j], dst_stride[j], p->temp_stride[j], width>>is_chroma, height>>is_chroma, 8-p->log2_count);
    }
}
Example #23
0
static void
write_thumb(const AVCodecContext *src, const AVFrame *sframe, 
            int width, int height, const char *cacheid, time_t mtime)
{
  if(thumbcodec == NULL)
    return;

  AVCodecContext *ctx = thumbctx;
  static AVFrame *oframe;

  if(ctx == NULL || ctx->width  != width || ctx->height != height) {
    
    if(ctx != NULL) {
      avcodec_close(ctx);
      free(ctx);
    }

    ctx = avcodec_alloc_context3(thumbcodec);
    ctx->pix_fmt = AV_PIX_FMT_YUVJ420P;
    ctx->time_base.den = 1;
    ctx->time_base.num = 1;
    ctx->sample_aspect_ratio.num = 1;
    ctx->sample_aspect_ratio.den = 1;
    ctx->width  = width;
    ctx->height = height;

    if(avcodec_open2(ctx, thumbcodec, NULL) < 0) {
      TRACE(TRACE_ERROR, "THUMB", "Unable to open thumb encoder");
      thumbctx = NULL;
      return;
    }
    thumbctx = ctx;

    if(oframe == NULL) {
      oframe = avcodec_alloc_frame();
      memset(oframe, 0, sizeof(AVFrame));
    }
  }

  avpicture_alloc((AVPicture *)oframe, ctx->pix_fmt, width, height);
      
  struct SwsContext *sws;
  sws = sws_getContext(src->width, src->height, src->pix_fmt,
                       width, height, ctx->pix_fmt, SWS_BILINEAR,
                       NULL, NULL, NULL);

  sws_scale(sws, (const uint8_t **)sframe->data, sframe->linesize,
            0, src->height, &oframe->data[0], &oframe->linesize[0]);
  sws_freeContext(sws);

  oframe->pts = AV_NOPTS_VALUE;
  AVPacket out;
  memset(&out, 0, sizeof(AVPacket));
  int got_packet;
  int r = avcodec_encode_video2(ctx, &out, oframe, &got_packet);
  if(r >= 0 && got_packet) {
    buf_t *b = buf_create_and_adopt(out.size, out.data, &av_free);
    blobcache_put(cacheid, "videothumb", b, INT32_MAX, NULL, mtime, 0);
    buf_release(b);
  } else {
    assert(out.data == NULL);
  }
  avpicture_free((AVPicture *)oframe);
}
Example #24
0
static bool write_lavc(struct image_writer_ctx *ctx, mp_image_t *image, FILE *fp)
{
    bool success = 0;
    AVFrame *pic = NULL;
    AVPacket pkt = {0};
    int got_output = 0;

    av_init_packet(&pkt);

    struct AVCodec *codec = avcodec_find_encoder(ctx->writer->lavc_codec);
    AVCodecContext *avctx = NULL;
    if (!codec)
        goto print_open_fail;
    avctx = avcodec_alloc_context3(codec);
    if (!avctx)
        goto print_open_fail;

    avctx->time_base = AV_TIME_BASE_Q;
    avctx->width = image->w;
    avctx->height = image->h;
    avctx->pix_fmt = imgfmt2pixfmt(image->imgfmt);
    if (avctx->pix_fmt == AV_PIX_FMT_NONE) {
        MP_ERR(ctx, "Image format %s not supported by lavc.\n",
               mp_imgfmt_to_name(image->imgfmt));
        goto error_exit;
    }
    if (ctx->writer->lavc_codec == AV_CODEC_ID_PNG) {
        avctx->compression_level = ctx->opts->png_compression;
        avctx->prediction_method = ctx->opts->png_filter;
    }

    if (avcodec_open2(avctx, codec, NULL) < 0) {
     print_open_fail:
        MP_ERR(ctx, "Could not open libavcodec encoder for saving images\n");
        goto error_exit;
    }

    pic = av_frame_alloc();
    if (!pic)
        goto error_exit;
    for (int n = 0; n < 4; n++) {
        pic->data[n] = image->planes[n];
        pic->linesize[n] = image->stride[n];
    }
    pic->format = avctx->pix_fmt;
    pic->width = avctx->width;
    pic->height = avctx->height;
    if (ctx->opts->tag_csp) {
        pic->color_primaries = mp_csp_prim_to_avcol_pri(image->params.primaries);
        pic->color_trc = mp_csp_trc_to_avcol_trc(image->params.gamma);
    }
    int ret = avcodec_encode_video2(avctx, &pkt, pic, &got_output);
    if (ret < 0)
        goto error_exit;

    fwrite(pkt.data, pkt.size, 1, fp);

    success = !!got_output;
error_exit:
    if (avctx)
        avcodec_close(avctx);
    av_free(avctx);
    av_frame_free(&pic);
    av_packet_unref(&pkt);
    return success;
}
Example #25
0
void AVIDump::AddFrame(const u8* data, int width, int height)
{
  avpicture_fill((AVPicture*)s_src_frame, const_cast<u8*>(data), s_pix_fmt, width, height);

  // Convert image from {BGR24, RGBA} to desired pixel format, and scale to initial
  // width and height
  if ((s_sws_context =
           sws_getCachedContext(s_sws_context, width, height, s_pix_fmt, s_width, s_height,
                                s_stream->codec->pix_fmt, SWS_BICUBIC, nullptr, nullptr, nullptr)))
  {
    sws_scale(s_sws_context, s_src_frame->data, s_src_frame->linesize, 0, height,
              s_scaled_frame->data, s_scaled_frame->linesize);
  }

  s_scaled_frame->format = s_stream->codec->pix_fmt;
  s_scaled_frame->width = s_width;
  s_scaled_frame->height = s_height;

  // Encode and write the image.
  AVPacket pkt;
  PreparePacket(&pkt);
  int got_packet = 0;
  int error = 0;
  u64 delta;
  s64 last_pts;
  // Check to see if the first frame being dumped is the first frame of output from the emulator.
  // This prevents an issue with starting dumping later in emulation from placing the frames
  // incorrectly.
  if (!s_start_dumping && Movie::g_currentFrame < 1)
  {
    delta = CoreTiming::GetTicks();
    last_pts = AV_NOPTS_VALUE;
    s_start_dumping = true;
  }
  else
  {
    delta = CoreTiming::GetTicks() - s_last_frame;
    last_pts = (s_last_pts * s_stream->codec->time_base.den) / SystemTimers::GetTicksPerSecond();
  }
  u64 pts_in_ticks = s_last_pts + delta;
  s_scaled_frame->pts =
      (pts_in_ticks * s_stream->codec->time_base.den) / SystemTimers::GetTicksPerSecond();
  if (s_scaled_frame->pts != last_pts)
  {
    s_last_frame = CoreTiming::GetTicks();
    s_last_pts = pts_in_ticks;
    error = avcodec_encode_video2(s_stream->codec, &pkt, s_scaled_frame, &got_packet);
  }
  while (!error && got_packet)
  {
    // Write the compressed frame in the media file.
    if (pkt.pts != (s64)AV_NOPTS_VALUE)
    {
      pkt.pts = av_rescale_q(pkt.pts, s_stream->codec->time_base, s_stream->time_base);
    }
    if (pkt.dts != (s64)AV_NOPTS_VALUE)
    {
      pkt.dts = av_rescale_q(pkt.dts, s_stream->codec->time_base, s_stream->time_base);
    }
#if LIBAVCODEC_VERSION_INT < AV_VERSION_INT(56, 60, 100)
    if (s_stream->codec->coded_frame->key_frame)
      pkt.flags |= AV_PKT_FLAG_KEY;
#endif
    pkt.stream_index = s_stream->index;
    av_interleaved_write_frame(s_format_context, &pkt);

    // Handle delayed frames.
    PreparePacket(&pkt);
    error = avcodec_encode_video2(s_stream->codec, &pkt, nullptr, &got_packet);
  }
  if (error)
    ERROR_LOG(VIDEO, "Error while encoding video: %d", error);
}
int AVFormatWriter::WriteVideoFrame(VideoFrame *frame)
{
    //AVCodecContext *c = m_videoStream->codec;

    uint8_t *planes[3];
    int len = frame->size;
    unsigned char *buf = frame->buf;
    int framesEncoded = m_framesWritten + m_bufferedVideoFrameTimes.size();

    planes[0] = buf;
    planes[1] = planes[0] + frame->width * frame->height;
    planes[2] = planes[1] + (frame->width * frame->height) /
        4; // (pictureFormat == PIX_FMT_YUV422P ? 2 : 4);

    m_picture->data[0] = planes[0];
    m_picture->data[1] = planes[1];
    m_picture->data[2] = planes[2];
    m_picture->linesize[0] = frame->width;
    m_picture->linesize[1] = frame->width / 2;
    m_picture->linesize[2] = frame->width / 2;
    m_picture->pts = framesEncoded + 1;
    m_picture->type = FF_BUFFER_TYPE_SHARED;

    if ((framesEncoded % m_keyFrameDist) == 0)
        m_picture->pict_type = AV_PICTURE_TYPE_I;
    else
        m_picture->pict_type = AV_PICTURE_TYPE_NONE;

    int got_pkt = 0;
    int ret = 0;

    m_bufferedVideoFrameTimes.push_back(frame->timecode);
    m_bufferedVideoFrameTypes.push_back(m_picture->pict_type);

    av_init_packet(m_pkt);
    m_pkt->data = (unsigned char *)m_videoOutBuf;
    m_pkt->size = len;

    {
        QMutexLocker locker(avcodeclock);
        ret = avcodec_encode_video2(m_videoStream->codec, m_pkt,
                                      m_picture, &got_pkt); 
    }

    if (ret < 0)
    {
        LOG(VB_RECORD, LOG_ERR, "avcodec_encode_video2() failed");
        return ret;
    }

    if (!got_pkt)
    {
        //LOG(VB_RECORD, LOG_DEBUG, QString("WriteVideoFrame(): Frame Buffered: cs: %1, mfw: %2, f->tc: %3, fn: %4, pt: %5").arg(m_pkt->size).arg(m_framesWritten).arg(frame->timecode).arg(frame->frameNumber).arg(m_picture->pict_type));
        return ret;
    }

    long long tc = frame->timecode;

    if (!m_bufferedVideoFrameTimes.isEmpty())
        tc = m_bufferedVideoFrameTimes.takeFirst();
    if (!m_bufferedVideoFrameTypes.isEmpty())
    {
        int pict_type = m_bufferedVideoFrameTypes.takeFirst();
        if (pict_type == AV_PICTURE_TYPE_I)
            m_pkt->flags |= AV_PKT_FLAG_KEY;
    }

    if (m_startingTimecodeOffset == -1)
        m_startingTimecodeOffset = tc - 1;
    tc -= m_startingTimecodeOffset;

    m_pkt->pts = tc * m_videoStream->time_base.den / m_videoStream->time_base.num / 1000;
    m_pkt->dts = AV_NOPTS_VALUE;
    m_pkt->stream_index= m_videoStream->index;

    //LOG(VB_RECORD, LOG_DEBUG, QString("WriteVideoFrame(): cs: %1, mfw: %2, pkt->pts: %3, tc: %4, fn: %5, pic->pts: %6, f->tc: %7, pt: %8").arg(m_pkt->size).arg(m_framesWritten).arg(m_pkt->pts).arg(tc).arg(frame->frameNumber).arg(m_picture->pts).arg(frame->timecode).arg(m_picture->pict_type));
    ret = av_interleaved_write_frame(m_ctx, m_pkt);
    if (ret != 0)
        LOG(VB_RECORD, LOG_ERR, LOC + "WriteVideoFrame(): "
                "av_interleaved_write_frame couldn't write Video");

    frame->timecode = tc + m_startingTimecodeOffset;
    m_framesWritten++;

    return 1;
}
int main(int argc, char ** argv)
{
	if(argc < 4) {
		printf("\nScrub, you need to specify a bitrate, number of frames, and server."
				"\nLike this: pixieHD 350 1000 rtmp://domain.com/live/matt\n"
				"\nNOTE, it is: progname bitrate frames server\n\n"
				"The bitrate is understood to be kbits/sec.\n"
				"You should enter frames or else you the program will\n"
				"continue to stream until you forcefully close it.\n"
				"THANK YOU: while(1) { /* stream! */ }\n");
		return 0;
	}
	printf("\nYou have set the following options:\n\n%5cbitrate: %s,"
			"\n%5cframes: %s\n%5cserver: %s\n\n",
			' ',argv[1],' ',argv[2],' ',argv[3]);
	
	/*int p;
	printf("Initializing noob options");
	for(p=0; p<3; ++p) {
		printf("%5c",'.');
		Sleep(1500);
	}
	printf("\n\n");

	char *input;
	printf("You hating on my GFX or wat? Please Answer: ");
	input = getline();

	printf("\n\n");
	printf("Your answer: ");

	size_t input_len = strlen(input);
	for(p=0; p<input_len; ++p) {
		Sleep(300);
		printf("%c",input[p]);
	}
	printf("\nkk here we go...");
	Sleep(1000);*/

	printf("\n\nPress the CONTROL key to begin streaming or ESC key to QUIT.\n\n");
    while (1)
    {
	   if (ButtonPress(VK_ESCAPE)) {
          printf("Quit.\n\n");
          break;
       } else if (ButtonPress(VK_CONTROL)) {
    	   // Decoder local variable declaration
    	   	AVFormatContext *pFormatCtx = NULL;
    	   	int i, videoStream;
    	   	AVCodecContext *pCodecCtx = NULL;
    	   	AVCodec *pCodec;
    	   	AVFrame *pFrame;
    	   	AVPacket packet;
    	   	int frameFinished;

    	   	// Encoder local variable declaration
    	   	const char *filename;
    	   	AVOutputFormat *fmt;
    	   	AVFormatContext *oc;
    	   	AVStream *video_st;
    	   	AVCodec *video_codec;
    	   	int ret; unsigned int frame_count, frame_count2;
    	   	StreamInfo sInfo;

    	   	size_t max_frames = strtol(argv[2], NULL, 0);

    	   	// Register all formats, codecs and network
    	   	av_register_all();
    	   	avcodec_register_all();
    	   	avformat_network_init();

    	   	// Setup mux
    	   	//filename = "output_file.flv";
    	   	//filename = "rtmp://chineseforall.org/live/beta";
    	   	filename = argv[3];
    	   	fmt = av_guess_format("flv", filename, NULL);
    	   	if (fmt == NULL) {
    	   		printf("Could not guess format.\n");
    	   		return -1;
    	   	}
    	   	// allocate the output media context
    	   	oc = avformat_alloc_context();
    	   	if (oc == NULL) {
    	   		printf("could not allocate context.\n");
    	   		return -1;
    	   	}


    	   HDC hScreen = GetDC(GetDesktopWindow());
		   ScreenX = GetDeviceCaps(hScreen, HORZRES);
		   ScreenY = GetDeviceCaps(hScreen, VERTRES);

		   // Temp. hard-code the resolution
		   int new_width = 1024, new_height = 576;
		   double v_ratio = 1.7786458333333333333333333333333;

    	   	// Set output format context to the format ffmpeg guessed
    	   	oc->oformat = fmt;

    	   	// Add the video stream using the h.264
    	   	// codec and initialize the codec.
    	   	video_st = NULL;
    	   	sInfo.width = new_width;
    	   	sInfo.height = new_height;
    	   	sInfo.pix_fmt = AV_PIX_FMT_YUV420P;
    	   	sInfo.frame_rate = 10;
    	   	sInfo.bitrate = strtol(argv[1], NULL, 0)*1000;
    	   	video_st = add_stream(oc, &video_codec, AV_CODEC_ID_H264, &sInfo);

    	   	// Now that all the parameters are set, we can open the audio and
    	   	// video codecs and allocate the necessary encode buffers.
    	   	if (video_st)
    	   		open_video(oc, video_codec, video_st);

    	   	/* open the output file, if needed */
    	   	if (!(fmt->flags & AVFMT_NOFILE)) {
    	   		ret = avio_open(&oc->pb, filename, AVIO_FLAG_WRITE);
    	   		if (ret < 0) {
    	   			fprintf(stderr, "Could not open '%s': %s\n", filename, av_err2str(ret));
    	   			return 1;
    	   		}
    	   	}

    	   	// dump output format
    	   	av_dump_format(oc, 0, filename, 1);

    	   	// Write the stream header, if any.
    	   	ret = avformat_write_header(oc, NULL);
    	   	if (ret < 0) {
    	   		fprintf(stderr, "Error occurred when opening output file: %s\n", av_err2str(ret));
    	   		return 1;
    	   	}

    	   	// Read frames, decode, and re-encode
    	   	frame_count = 1;
    	   	frame_count2 = 1;

		   HDC hdcMem = CreateCompatibleDC (hScreen);
		   HBITMAP hBitmap = CreateCompatibleBitmap(hScreen, ScreenX, ScreenY);
		   HGDIOBJ hOld;
		   BITMAPINFOHEADER bmi = {0};
		   bmi.biSize = sizeof(BITMAPINFOHEADER);
		   bmi.biPlanes = 1;
		   bmi.biBitCount = 32;
		   bmi.biWidth = ScreenX;
		   bmi.biHeight = -ScreenY;
		   bmi.biCompression = BI_RGB;
		   bmi.biSizeImage = 0;// 3 * ScreenX * ScreenY;


		   if(ScreenData)
			   free(ScreenData);
		   ScreenData = (BYTE*)malloc(4 * ScreenX * ScreenY);
		   AVPacket pkt;

		   clock_t start_t = GetTickCount();
		   long long wait_time = 0;

		   uint64_t total_size;

    	   while(1) {
			hOld = SelectObject(hdcMem, hBitmap);
			BitBlt(hdcMem, 0, 0, ScreenX, ScreenY, hScreen, 0, 0, SRCCOPY);
			SelectObject(hdcMem, hOld);

			GetDIBits(hdcMem, hBitmap, 0, ScreenY, ScreenData, (BITMAPINFO*)&bmi, DIB_RGB_COLORS);

			//calculate the bytes needed for the output image
			int nbytes = avpicture_get_size(AV_PIX_FMT_YUV420P, new_width, new_height);

			//create buffer for the output image
			uint8_t* outbuffer = (uint8_t*)av_malloc(nbytes);

			//create ffmpeg frame structures.  These do not allocate space for image data,
			//just the pointers and other information about the image.
			AVFrame* inpic = avcodec_alloc_frame();
			AVFrame* outpic = avcodec_alloc_frame();

			//this will set the pointers in the frame structures to the right points in
			//the input and output buffers.
			avpicture_fill((AVPicture*)inpic, ScreenData, AV_PIX_FMT_RGB32, ScreenX, ScreenY);
			avpicture_fill((AVPicture*)outpic, outbuffer, AV_PIX_FMT_YUV420P, new_width, new_height);

			//create the conversion context
			struct SwsContext *fooContext = sws_getContext(ScreenX, ScreenY, AV_PIX_FMT_RGB32, new_width, new_height, AV_PIX_FMT_YUV420P, SWS_FAST_BILINEAR, NULL, NULL, NULL);

			//perform the conversion
			sws_scale(fooContext, inpic->data, inpic->linesize, 0, ScreenY, outpic->data, outpic->linesize);
			
			// Initialize a new frame
			AVFrame* newFrame = avcodec_alloc_frame();

			int size = avpicture_get_size(video_st->codec->pix_fmt, video_st->codec->width, video_st->codec->height);
			uint8_t* picture_buf = av_malloc(size);

			avpicture_fill((AVPicture *) newFrame, picture_buf, video_st->codec->pix_fmt, video_st->codec->width, video_st->codec->height);

			// Copy only the frame content without additional fields
			av_picture_copy((AVPicture*) newFrame, (AVPicture*) outpic, video_st->codec->pix_fmt, video_st->codec->width, video_st->codec->height);

			// encode the image
			int got_output;
			av_init_packet(&pkt);
			pkt.data = NULL; // packet data will be allocated by the encoder
			pkt.size = 0;

			// Set the frame's pts (this prevents the warning notice 'non-strictly-monotonic PTS')
			newFrame->pts = frame_count2;

			ret = avcodec_encode_video2(video_st->codec, &pkt, newFrame, &got_output);
			if (ret < 0) {
				fprintf(stderr, "Error encoding video frame: %s\n", av_err2str(ret));
				exit(1);
			}

			if (got_output) {
				if (video_st->codec->coded_frame->key_frame)
					pkt.flags |= AV_PKT_FLAG_KEY;
				pkt.stream_index = video_st->index;

				if (pkt.pts != AV_NOPTS_VALUE)
					pkt.pts = av_rescale_q(pkt.pts, video_st->codec->time_base, video_st->time_base);
				if (pkt.dts != AV_NOPTS_VALUE)
					pkt.dts = av_rescale_q(pkt.dts, video_st->codec->time_base, video_st->time_base);

				// Write the compressed frame to the media file.
				ret = av_interleaved_write_frame(oc, &pkt);

				fprintf(stderr, "encoded frame #%d\n", frame_count);
				frame_count++;
			} else {
				ret = 0;
			}
			if (ret != 0) {
				fprintf(stderr, "Error while writing video frame: %s\n", av_err2str(ret));
				exit(1);
			}

			++frame_count2;

			// Free the YUV picture frame we copied from the
			// decoder to eliminate the additional fields
			// and other packets/frames used
			av_free(picture_buf);
			av_free_packet(&pkt);
			av_free(newFrame);

			//free memory
			av_free(outbuffer);
			av_free(inpic);
			av_free(outpic);


          if(frame_count == max_frames) {
			/* Write the trailer, if any. The trailer must be written before you
			 * close the CodecContexts open when you wrote the header; otherwise
			 * av_write_trailer() may try to use memory that was freed on
			 * av_codec_close().
			 */
			av_write_trailer(oc);

			/* Close the video codec (encoder) */
			if (video_st) {
				close_video(oc, video_st);
			}
			// Free the output streams.
			for (i = 0; i < oc->nb_streams; i++) {
				av_freep(&oc->streams[i]->codec);
				av_freep(&oc->streams[i]);
			}
			if (!(fmt->flags & AVFMT_NOFILE)) {
				/* Close the output file. */
				avio_close(oc->pb);
			}
			/* free the output format context */
			av_free(oc);

			ReleaseDC(GetDesktopWindow(),hScreen);
			DeleteDC(hdcMem);

			printf("\n\nPress the CONTROL key to begin streaming or ESC key to QUIT.\n\n");
			break;
          }
       }
       }
    }
    return 0;
}
Example #28
0
int VideoFFmpegWriter::execute( boost::uint8_t* in_buffer, int in_width, int in_height, PixelFormat in_pixelFormat )
{
	_error = IGNORE_FINISH;

	if( !_avformatOptions )
	{
		// TODO avformat_alloc_context2 can guess format from filename
		// if format name is NULL, find a way to expose the feature
		if (avformat_alloc_output_context2(&_avformatOptions, NULL, _formatName.c_str(), filename().c_str()) < 0)
		{
			TUTTLE_CERR( "ffmpegWriter: output context allocation failed" );
			return false;
		}
		_ofmt = _avformatOptions->oformat;
		TUTTLE_CERR( "ffmpegWriter: " << std::string(_ofmt->name) << " format selected" );
	}

	if( !_stream )
	{
		_codec = avcodec_find_encoder_by_name( _codecName.c_str() );
		if (!_codec)
		{
			TUTTLE_CERR( "ffmpegWriter: codec not found" );
			return false;
		}
		TUTTLE_CERR( "ffmpegWriter: " << std::string(_codec->name) << " codec selected" );

		_stream = avformat_new_stream( _avformatOptions, _codec );
		if( !_stream )
		{
			TUTTLE_CERR( "ffmpegWriter: out of memory." );
			return false;
		}
		avcodec_get_context_defaults3(_stream->codec, _codec);

		if( _videoPresetName.length() !=0 )
		{
			TUTTLE_COUT( "ffmpegWriter: " << _videoPresetName << " preset selected" );
			std::string presetFilename = getFilename( std::string(_codec->name), _videoPresetName );
			
			PresetsOptions opts = getOptionsForPresetFilename( presetFilename );
			PresetsOptions::iterator itOpt;
			for ( itOpt = opts.begin() ; itOpt != opts.end(); itOpt++ )
			{
				int ret = av_opt_set( (void*)_stream->codec, (*itOpt).first.c_str(), (*itOpt).second.c_str(), 0);
				switch( ret )
				{
					case AVERROR_OPTION_NOT_FOUND: TUTTLE_CERR( "ffmpegPreset: unable to find " << (*itOpt).first ); break;
					case AVERROR(EINVAL): TUTTLE_CERR( "ffmpegPreset: invalid value " << (*itOpt).second.c_str() << " for option " << (*itOpt).first ); break;
					case AVERROR(ERANGE): TUTTLE_CERR( "ffmpegPreset: invalid range for parameter " << (*itOpt).first << " : " << (*itOpt).second.c_str() ); break;
				}
			}
		}
		
		_stream->codec->bit_rate           = _bitRate;
		_stream->codec->bit_rate_tolerance = _bitRateTolerance;
		_stream->codec->width              = width();
		_stream->codec->height             = height();
		_stream->codec->time_base          = av_d2q( 1.0 / _fps, 100 );
		_stream->codec->gop_size           = _gopSize;
		_stream->codec->sample_rate        = 48000; ///< samples per second
		_stream->codec->channels           = 0;     ///< number of audio channels
		if( _bFrames )
		{
			_stream->codec->max_b_frames     = _bFrames;
			_stream->codec->b_frame_strategy = 0;
			_stream->codec->b_quant_factor   = 2.0;
		}
		_stream->codec->mb_decision = _mbDecision;

		int pixfmt_allowed = 0, k;
		if ( _codec->pix_fmts )
		{
			for ( k = 0; _codec->pix_fmts[k] != PIX_FMT_NONE; k++ )
			{
				if ( _codec->pix_fmts[k] == _out_pixelFormat )
				{
					pixfmt_allowed = 1;
					break;
				}
			}
		}
		else
		{
			// If a codec does not contain a list of supported pixel
			// formats, just assume that _out_PixelFormat is valid
			pixfmt_allowed = 1;
		}

		if ( !pixfmt_allowed )
		{
			// av_get_pix_fmt_name requires lavu 51.3.0 or higher
			TUTTLE_CERR( "ffmpegWriter: pixel format " << av_get_pix_fmt_name(_out_pixelFormat) << " not available in codec" );
			_out_pixelFormat = _codec->pix_fmts[0];
			TUTTLE_CERR( "ffmpegWriter: auto-selecting " << av_get_pix_fmt_name(_out_pixelFormat) );
		}
		_stream->codec->pix_fmt     = _out_pixelFormat;

		if( !strcmp( _avformatOptions->oformat->name, "mp4" ) || !strcmp( _avformatOptions->oformat->name, "mov" ) || !strcmp( _avformatOptions->oformat->name, "3gp" ) || !strcmp( _avformatOptions->oformat->name, "flv" ) )
			_stream->codec->flags |= CODEC_FLAG_GLOBAL_HEADER;

		av_dump_format( _avformatOptions, 0, filename().c_str(), 1 );

		if( avcodec_open2( _stream->codec, _codec, NULL ) < 0 )
		{
			TUTTLE_CERR( "ffmpegWriter: unable to open codec." );
			freeFormat();
			return false;
		}

		if( !( _ofmt->flags & AVFMT_NOFILE ) )
		{
			if( avio_open2( &_avformatOptions->pb, filename().c_str(),
                                        AVIO_FLAG_WRITE, NULL, NULL ) < 0 )
			{
				TUTTLE_CERR( "ffmpegWriter: unable to open file." );
				freeFormat();
				return false;
			}
		}

		avformat_write_header( _avformatOptions, NULL );
	}

	_error = CLEANUP;

	AVFrame* in_frame = avcodec_alloc_frame();
	avcodec_get_frame_defaults( in_frame );
	avpicture_fill( (AVPicture*)in_frame, in_buffer, in_pixelFormat, in_width, in_height );

	AVFrame* out_frame = avcodec_alloc_frame();
	avcodec_get_frame_defaults( out_frame );
	int out_picSize            = avpicture_get_size( _out_pixelFormat, width(), height() );
	boost::uint8_t* out_buffer = (boost::uint8_t*) av_malloc( out_picSize );
	avpicture_fill( (AVPicture*) out_frame, out_buffer, _out_pixelFormat, width(), height() );

	_sws_context = sws_getCachedContext( _sws_context, in_width, in_height, in_pixelFormat, width(), height(), _out_pixelFormat, SWS_BICUBIC, NULL, NULL, NULL );

	TUTTLE_COUT( "ffmpegWriter: input format: " << av_get_pix_fmt_name( in_pixelFormat ) );
	TUTTLE_COUT( "ffmpegWriter: output format: " << av_get_pix_fmt_name( _out_pixelFormat ) );

	if( !_sws_context )
	{
		TUTTLE_CERR( "ffmpeg-conversion failed (" << in_pixelFormat << "->" << _out_pixelFormat << ")." );
		return false;
	}
	int error = sws_scale( _sws_context, in_frame->data, in_frame->linesize, 0, height(), out_frame->data, out_frame->linesize );
	if( error < 0 )
	{
		TUTTLE_CERR( "ffmpeg-conversion failed (" << in_pixelFormat << "->" << _out_pixelFormat << ")." );
		return false;
	}

	int ret = 0;
	if( ( _avformatOptions->oformat->flags & AVFMT_RAWPICTURE ) != 0 )
	{
		AVPacket pkt;
		av_init_packet( &pkt );
		pkt.flags       |= AV_PKT_FLAG_KEY;
		pkt.stream_index = _stream->index;
		pkt.data         = (boost::uint8_t*) out_frame;
		pkt.size         = sizeof( AVPicture );
		ret              = av_interleaved_write_frame( _avformatOptions, &pkt );
	}
	else
	{
		AVPacket pkt;
		int hasFrame = 0;
		av_init_packet( &pkt );
		pkt.size = 0;
		pkt.data = NULL;
		pkt.stream_index = _stream->index;

		if( _stream->codec->coded_frame && _stream->codec->coded_frame->pts != static_cast<boost::int64_t>( AV_NOPTS_VALUE ) ) // static_cast<unsigned long> (
			pkt.pts = av_rescale_q( _stream->codec->coded_frame->pts, _stream->codec->time_base, _stream->time_base );

		if( _stream->codec->coded_frame && _stream->codec->coded_frame->key_frame )
			pkt.flags |= AV_PKT_FLAG_KEY;

		out_frame->pts = pts++;
		ret = avcodec_encode_video2( _stream->codec, &pkt, out_frame,  &hasFrame );
		if ( ret < 0 )
			return false;

		if ( hasFrame )
		{
			ret = av_interleaved_write_frame( _avformatOptions, &pkt );
			if ( ret < 0 )
			{
				TUTTLE_CERR( "ffmpegWriter: error writing packet to file" );
				return false;
			}
		}
	}

	av_free( out_buffer );
	av_free( out_frame );
	av_free( in_frame );
	// in_buffer not free (function parameter)

	if( ret )
	{
		TUTTLE_CERR( "ffmpegWriter: error writing frame to file." );
		return false;
	}

	_error = SUCCESS;
	return true;
}
Example #29
0
static bool vaapi_encode(void *data, struct encoder_frame *frame,
		struct encoder_packet *packet, bool *received_packet)
{
	struct vaapi_encoder *enc     = data;
	AVFrame *             hwframe = NULL;
	AVPacket              av_pkt;
	int                   got_packet;
	int                   ret;

	hwframe = av_frame_alloc();
	if (!hwframe) {
		warn("vaapi_encode: failed to allocate hw frame");
		return false;
	}

	ret = av_hwframe_get_buffer(enc->vaframes_ref, hwframe, 0);
	if (ret < 0) {
		warn("vaapi_encode: failed to get buffer for hw frame: %s",
				av_err2str(ret));
		goto fail;
	}

	copy_data(enc->vframe, frame, enc->height, enc->context->pix_fmt);

	enc->vframe->pts = frame->pts;
	hwframe->pts     = frame->pts;
	hwframe->width   = enc->vframe->width;
	hwframe->height  = enc->vframe->height;

	ret = av_hwframe_transfer_data(hwframe, enc->vframe, 0);
	if (ret < 0) {
		warn("vaapi_encode: failed to upload hw frame: %s",
				av_err2str(ret));
		goto fail;
	}

	ret = av_frame_copy_props(hwframe, enc->vframe);
	if (ret < 0) {
		warn("vaapi_encode: failed to copy props to hw frame: %s",
				av_err2str(ret));
		goto fail;
	}

	av_init_packet(&av_pkt);

#if LIBAVFORMAT_VERSION_INT >= AV_VERSION_INT(57, 40, 101)
	ret = avcodec_send_frame(enc->context, hwframe);
	if (ret == 0)
		ret = avcodec_receive_packet(enc->context, &av_pkt);

	got_packet = (ret == 0);

	if (ret == AVERROR_EOF || ret == AVERROR(EAGAIN))
		ret = 0;
#else
	ret = avcodec_encode_video2(
			enc->context, &av_pkt, hwframe, &got_packet);
#endif
	if (ret < 0) {
		warn("vaapi_encode: Error encoding: %s", av_err2str(ret));
		goto fail;
	}

	if (got_packet && av_pkt.size) {
		if (enc->first_packet) {
			uint8_t *new_packet;
			size_t   size;

			enc->first_packet = false;
			obs_extract_avc_headers(av_pkt.data, av_pkt.size,
					&new_packet, &size, &enc->header,
					&enc->header_size, &enc->sei,
					&enc->sei_size);

			da_copy_array(enc->buffer, new_packet, size);
			bfree(new_packet);
		} else {
			da_copy_array(enc->buffer, av_pkt.data, av_pkt.size);
		}

		packet->pts      = av_pkt.pts;
		packet->dts      = av_pkt.dts;
		packet->data     = enc->buffer.array;
		packet->size     = enc->buffer.num;
		packet->type     = OBS_ENCODER_VIDEO;
		packet->keyframe = obs_avc_keyframe(packet->data, packet->size);
		*received_packet = true;
	} else {
		*received_packet = false;
	}

	av_packet_unref(&av_pkt);
	av_frame_free(&hwframe);
	return true;

fail:
	av_frame_free(&hwframe);
	return false;
}
Example #30
-1
static int
artwork_rescale(AVFormatContext *src_ctx, int s, int out_w, int out_h, int format, struct evbuffer *evbuf)
{
  uint8_t *buf;
  uint8_t *outbuf;

  AVCodecContext *src;

  AVFormatContext *dst_ctx;
  AVCodecContext *dst;
  AVOutputFormat *dst_fmt;
  AVStream *dst_st;

  AVCodec *img_decoder;
  AVCodec *img_encoder;

  int64_t pix_fmt_mask;
  const enum PixelFormat *pix_fmts;

  AVFrame *i_frame;
  AVFrame *o_frame;

  struct SwsContext *swsctx;

  AVPacket pkt;
  int have_frame;

  int outbuf_len;

  int ret;

  src = src_ctx->streams[s]->codec;

  img_decoder = avcodec_find_decoder(src->codec_id);
  if (!img_decoder)
    {
      DPRINTF(E_LOG, L_ART, "No suitable decoder found for artwork %s\n", src_ctx->filename);

      return -1;
    }

#if LIBAVCODEC_VERSION_MAJOR >= 54 || (LIBAVCODEC_VERSION_MAJOR == 53 && LIBAVCODEC_VERSION_MINOR >= 6)
  ret = avcodec_open2(src, img_decoder, NULL);
#else
  ret = avcodec_open(src, img_decoder);
#endif
  if (ret < 0)
    {
      DPRINTF(E_LOG, L_ART, "Could not open codec for decoding: %s\n", strerror(AVUNERROR(ret)));

      return -1;
    }

  /* Set up output */
#if LIBAVFORMAT_VERSION_MAJOR >= 53 || (LIBAVFORMAT_VERSION_MAJOR == 52 && LIBAVFORMAT_VERSION_MINOR >= 45)
  /* FFmpeg 0.6 */
  dst_fmt = av_guess_format("image2", NULL, NULL);
#else
  dst_fmt = guess_format("image2", NULL, NULL);
#endif
  if (!dst_fmt)
    {
      DPRINTF(E_LOG, L_ART, "ffmpeg image2 muxer not available\n");

      ret = -1;
      goto out_close_src;
    }

  dst_fmt->video_codec = CODEC_ID_NONE;

  /* Try to keep same codec if possible */
  if ((src->codec_id == CODEC_ID_PNG) && (format & ART_CAN_PNG))
    dst_fmt->video_codec = CODEC_ID_PNG;
  else if ((src->codec_id == CODEC_ID_MJPEG) && (format & ART_CAN_JPEG))
    dst_fmt->video_codec = CODEC_ID_MJPEG;

  /* If not possible, select new codec */
  if (dst_fmt->video_codec == CODEC_ID_NONE)
    {
      if (format & ART_CAN_PNG)
	dst_fmt->video_codec = CODEC_ID_PNG;
      else if (format & ART_CAN_JPEG)
	dst_fmt->video_codec = CODEC_ID_MJPEG;
    }

  img_encoder = avcodec_find_encoder(dst_fmt->video_codec);
  if (!img_encoder)
    {
      DPRINTF(E_LOG, L_ART, "No suitable encoder found for codec ID %d\n", dst_fmt->video_codec);

      ret = -1;
      goto out_close_src;
    }

  dst_ctx = avformat_alloc_context();
  if (!dst_ctx)
    {
      DPRINTF(E_LOG, L_ART, "Out of memory for format context\n");

      ret = -1;
      goto out_close_src;
    }

  dst_ctx->oformat = dst_fmt;

#if LIBAVFORMAT_VERSION_MAJOR >= 53
  dst_fmt->flags &= ~AVFMT_NOFILE;
#else
  ret = snprintf(dst_ctx->filename, sizeof(dst_ctx->filename), "evbuffer:%p", evbuf);
  if ((ret < 0) || (ret >= sizeof(dst_ctx->filename)))
    {
      DPRINTF(E_LOG, L_ART, "Output artwork URL too long\n");

      ret = -1;
      goto out_free_dst_ctx;
    }
#endif

#if LIBAVFORMAT_VERSION_MAJOR >= 54 || (LIBAVFORMAT_VERSION_MAJOR == 53 && LIBAVFORMAT_VERSION_MINOR >= 21)
  dst_st = avformat_new_stream(dst_ctx, NULL);
#else
  dst_st = av_new_stream(dst_ctx, 0);
#endif
  if (!dst_st)
    {
      DPRINTF(E_LOG, L_ART, "Out of memory for new output stream\n");

      ret = -1;
      goto out_free_dst_ctx;
    }

  dst = dst_st->codec;

#if LIBAVCODEC_VERSION_MAJOR >= 54 || (LIBAVCODEC_VERSION_MAJOR == 53 && LIBAVCODEC_VERSION_MINOR >= 35)
  avcodec_get_context_defaults3(dst, NULL);
#else
  avcodec_get_context_defaults2(dst, AVMEDIA_TYPE_VIDEO);
#endif

  if (dst_fmt->flags & AVFMT_GLOBALHEADER)
    dst->flags |= CODEC_FLAG_GLOBAL_HEADER;

  dst->codec_id = dst_fmt->video_codec;
#if LIBAVCODEC_VERSION_MAJOR >= 53 || (LIBAVCODEC_VERSION_MAJOR == 52 && LIBAVCODEC_VERSION_MINOR >= 64)
  dst->codec_type = AVMEDIA_TYPE_VIDEO;
#else
  dst->codec_type = CODEC_TYPE_VIDEO;
#endif

  pix_fmt_mask = 0;
  pix_fmts = img_encoder->pix_fmts;
  while (pix_fmts && (*pix_fmts != -1))
    {
      pix_fmt_mask |= (1 << *pix_fmts);
      pix_fmts++;
    }

  dst->pix_fmt = avcodec_find_best_pix_fmt(pix_fmt_mask, src->pix_fmt, 1, NULL);

  if (dst->pix_fmt < 0)
    {
      DPRINTF(E_LOG, L_ART, "Could not determine best pixel format\n");

      ret = -1;
      goto out_free_dst_ctx;
    }

  DPRINTF(E_DBG, L_ART, "Selected pixel format: %d\n", dst->pix_fmt);

  dst->time_base.num = 1;
  dst->time_base.den = 25;

  dst->width = out_w;
  dst->height = out_h;

#if LIBAVFORMAT_VERSION_MAJOR <= 52 || (LIBAVFORMAT_VERSION_MAJOR == 53 && LIBAVFORMAT_VERSION_MINOR <= 1)
  ret = av_set_parameters(dst_ctx, NULL);
  if (ret < 0)
    {
      DPRINTF(E_LOG, L_ART, "Invalid parameters for artwork output: %s\n", strerror(AVUNERROR(ret)));

      ret = -1;
      goto out_free_dst_ctx;
    }
#endif

  /* Open encoder */
#if LIBAVCODEC_VERSION_MAJOR >= 54 || (LIBAVCODEC_VERSION_MAJOR == 53 && LIBAVCODEC_VERSION_MINOR >= 6)
  ret = avcodec_open2(dst, img_encoder, NULL);
#else
  ret = avcodec_open(dst, img_encoder);
#endif
  if (ret < 0)
    {
      DPRINTF(E_LOG, L_ART, "Could not open codec for encoding: %s\n", strerror(AVUNERROR(ret)));

      ret = -1;
      goto out_free_dst_ctx;
    }

  i_frame = avcodec_alloc_frame();
  o_frame = avcodec_alloc_frame();

  if (!i_frame || !o_frame)
    {
      DPRINTF(E_LOG, L_ART, "Could not allocate input/output frame\n");

      ret = -1;
      goto out_free_frames;
    }

  ret = avpicture_get_size(dst->pix_fmt, src->width, src->height);

  DPRINTF(E_DBG, L_ART, "Artwork buffer size: %d\n", ret);

  buf = (uint8_t *)av_malloc(ret);
  if (!buf)
    {
      DPRINTF(E_LOG, L_ART, "Out of memory for artwork buffer\n");

      ret = -1;
      goto out_free_frames;
    }

  avpicture_fill((AVPicture *)o_frame, buf, dst->pix_fmt, src->width, src->height);

  swsctx = sws_getContext(src->width, src->height, src->pix_fmt,
			  dst->width, dst->height, dst->pix_fmt,
			  SWS_BICUBIC, NULL, NULL, NULL);
  if (!swsctx)
    {
      DPRINTF(E_LOG, L_ART, "Could not get SWS context\n");

      ret = -1;
      goto out_free_buf;
    }

  /* Get frame */
  have_frame = 0;
  while (av_read_frame(src_ctx, &pkt) == 0)
    {
      if (pkt.stream_index != s)
	{
	  av_free_packet(&pkt);
	  continue;
	}

#if LIBAVCODEC_VERSION_MAJOR >= 53 || (LIBAVCODEC_VERSION_MAJOR == 52 && LIBAVCODEC_VERSION_MINOR >= 32)
      /* FFmpeg 0.6 */
      avcodec_decode_video2(src, i_frame, &have_frame, &pkt);
#else
      avcodec_decode_video(src, i_frame, &have_frame, pkt.data, pkt.size);
#endif

      break;
    }

  if (!have_frame)
    {
      DPRINTF(E_LOG, L_ART, "Could not decode artwork\n");

      av_free_packet(&pkt);
      sws_freeContext(swsctx);

      ret = -1;
      goto out_free_buf;
    }

  /* Scale */
#if LIBSWSCALE_VERSION_MAJOR >= 1 || (LIBSWSCALE_VERSION_MAJOR == 0 && LIBSWSCALE_VERSION_MINOR >= 9)
  /* FFmpeg 0.6, libav 0.6+ */
  sws_scale(swsctx, (const uint8_t * const *)i_frame->data, i_frame->linesize, 0, src->height, o_frame->data, o_frame->linesize);
#else
  sws_scale(swsctx, i_frame->data, i_frame->linesize, 0, src->height, o_frame->data, o_frame->linesize);
#endif

  sws_freeContext(swsctx);
  av_free_packet(&pkt);

  /* Open output file */
#if LIBAVFORMAT_VERSION_MAJOR >= 53
  dst_ctx->pb = avio_evbuffer_open(evbuf);
#else
  ret = url_fopen(&dst_ctx->pb, dst_ctx->filename, URL_WRONLY);
#endif
  if (ret < 0)
    {
      DPRINTF(E_LOG, L_ART, "Could not open artwork destination buffer\n");

      ret = -1;
      goto out_free_buf;
    }

  /* Encode frame */
  outbuf_len = dst->width * dst->height * 3;
  if (outbuf_len < FF_MIN_BUFFER_SIZE)
    outbuf_len = FF_MIN_BUFFER_SIZE;

  outbuf = (uint8_t *)av_malloc(outbuf_len);
  if (!outbuf)
    {
      DPRINTF(E_LOG, L_ART, "Out of memory for encoded artwork buffer\n");

#if LIBAVFORMAT_VERSION_MAJOR >= 53
      avio_evbuffer_close(dst_ctx->pb);
#else
      url_fclose(dst_ctx->pb);
#endif

      ret = -1;
      goto out_free_buf;
    }

#if LIBAVCODEC_VERSION_MAJOR >= 54
  av_init_packet(&pkt);
  pkt.data = outbuf;
  pkt.size = outbuf_len;
  ret = avcodec_encode_video2(dst, &pkt, o_frame, &have_frame);
  if (!ret && have_frame && dst->coded_frame) 
    {
      dst->coded_frame->pts       = pkt.pts;
      dst->coded_frame->key_frame = !!(pkt.flags & AV_PKT_FLAG_KEY);
    }
  else if (ret < 0)
    {
      DPRINTF(E_LOG, L_ART, "Could not encode artwork\n");

      ret = -1;
      goto out_fclose_dst;
    }
#else
  ret = avcodec_encode_video(dst, outbuf, outbuf_len, o_frame);
  if (ret <= 0)
    {
      DPRINTF(E_LOG, L_ART, "Could not encode artwork\n");

      ret = -1;
      goto out_fclose_dst;
    }

  av_init_packet(&pkt);
  pkt.stream_index = 0;
  pkt.data = outbuf;
  pkt.size = ret;
#endif

#if LIBAVFORMAT_VERSION_MAJOR >= 54 || (LIBAVFORMAT_VERSION_MAJOR == 53 && LIBAVFORMAT_VERSION_MINOR >= 3)
  ret = avformat_write_header(dst_ctx, NULL);
#else
  ret = av_write_header(dst_ctx);
#endif
  if (ret != 0)
    {
      DPRINTF(E_LOG, L_ART, "Could not write artwork header: %s\n", strerror(AVUNERROR(ret)));

      ret = -1;
      goto out_fclose_dst;
    }

  ret = av_interleaved_write_frame(dst_ctx, &pkt);

  if (ret != 0)
    {
      DPRINTF(E_LOG, L_ART, "Error writing artwork\n");

      ret = -1;
      goto out_fclose_dst;
    }

  ret = av_write_trailer(dst_ctx);
  if (ret != 0)
    {
      DPRINTF(E_LOG, L_ART, "Could not write artwork trailer: %s\n", strerror(AVUNERROR(ret)));

      ret = -1;
      goto out_fclose_dst;
    }

  switch (dst_fmt->video_codec)
    {
      case CODEC_ID_PNG:
	ret = ART_FMT_PNG;
	break;

      case CODEC_ID_MJPEG:
	ret = ART_FMT_JPEG;
	break;

      default:
	DPRINTF(E_LOG, L_ART, "Unhandled rescale output format\n");
	ret = -1;
	break;
    }

 out_fclose_dst:
#if LIBAVFORMAT_VERSION_MAJOR >= 53
  avio_evbuffer_close(dst_ctx->pb);
#else
  url_fclose(dst_ctx->pb);
#endif
  av_free(outbuf);

 out_free_buf:
  av_free(buf);

 out_free_frames:
  if (i_frame)
    av_free(i_frame);
  if (o_frame)
    av_free(o_frame);
  avcodec_close(dst);

 out_free_dst_ctx:
  avformat_free_context(dst_ctx);

 out_close_src:
  avcodec_close(src);

  return ret;
}