コード例 #1
0
bool update_sws_context(struct ffmpeg_source *s, AVFrame *frame)
{
	if (frame->width != s->sws_width
			|| frame->height != s->sws_height
			|| frame->format != s->sws_format) {
		if (s->sws_ctx != NULL)
			sws_freeContext(s->sws_ctx);

		if (frame->width <= 0 || frame->height <= 0) {
			FF_BLOG(LOG_ERROR, "unable to create a sws "
					"context that has a width(%d) or "
					"height(%d) of zero.", frame->width,
					frame->height);
			goto fail;
		}

		s->sws_ctx = sws_getContext(
			frame->width,
			frame->height,
			frame->format,
			frame->width,
			frame->height,
			AV_PIX_FMT_BGRA,
			SWS_BILINEAR,
			NULL, NULL, NULL);

		if (s->sws_ctx == NULL) {
			FF_BLOG(LOG_ERROR, "unable to create sws "
					"context with src{w:%d,h:%d,f:%d}->"
					"dst{w:%d,h:%d,f:%d}",
					frame->width, frame->height,
					frame->format, frame->width,
					frame->height, AV_PIX_FMT_BGRA);
			goto fail;

		}

		if (s->sws_data != NULL)
			bfree(s->sws_data);
		s->sws_data = bzalloc(frame->width * frame->height * 4);
		if (s->sws_data == NULL) {
			FF_BLOG(LOG_ERROR, "unable to allocate sws "
					"pixel data with size %d",
					frame->width * frame->height * 4);
			goto fail;
		}

		s->sws_linesize = frame->width * 4;
		s->sws_width = frame->width;
		s->sws_height = frame->height;
		s->sws_format = frame->format;
	}

	return true;

fail:
	if (s->sws_ctx != NULL)
		sws_freeContext(s->sws_ctx);
	s->sws_ctx = NULL;

	if (s->sws_data)
		bfree(s->sws_data);
	s->sws_data = NULL;

	s->sws_linesize = 0;
	s->sws_width = 0;
	s->sws_height = 0;
	s->sws_format = 0;

	return false;
}
コード例 #2
0
int JPG_to_Pixel(const unsigned char *jpgBuff, int jpgSize, int pixelFmt, int pixelWidth, int pixelHeight, unsigned char *pixelBuff, int *pixelSize) {	
	AVFormatContext *formatContext;
	AVInputFormat *inputFormat;
	AVIOContext *ioContext;
	AVStream *stream;
	AVCodecContext *codecContext;
	AVCodec *codec;
	AVFrame *frame, *frame2;
	AVPacket packet;
	struct SwsContext *swsContext;
	int streamIndex;
	int gotFrame;
	int codecRet;
	int result = -1;

	av_register_all();
	formatContext = avformat_alloc_context();
	ioContext = avio_alloc_context((unsigned char *)jpgBuff, jpgSize, 0, NULL, NULL, NULL, NULL);
	inputFormat = av_find_input_format("mjpeg");
	av_probe_input_buffer2(ioContext, &inputFormat, NULL, NULL, 0, 0);
	formatContext->pb = ioContext;
	formatContext->iformat = inputFormat;
	avformat_open_input(&formatContext, NULL, NULL, NULL);
	av_find_stream_info(formatContext);

	av_init_packet(&packet);
	for (streamIndex = 0; streamIndex < formatContext->nb_streams; streamIndex++) {
		av_read_frame(formatContext, &packet);
		if (formatContext->streams[streamIndex]->codec->codec_type == AVMEDIA_TYPE_VIDEO && 0 < packet.size) {
			stream = formatContext->streams[streamIndex];
			codecContext = stream->codec;
			codec = avcodec_find_decoder(codecContext->codec_id);
			avcodec_open2(codecContext, codec, NULL);
			frame = avcodec_alloc_frame();
			codecRet = avcodec_decode_video2(codecContext, frame, &gotFrame, &packet);
			if (0 <= codecRet && 1 == gotFrame) {
				frame2 = av_frame_clone(frame);
				frame2->format = PF(pixelFmt);
				swsContext = sws_getContext(codecContext->width, codecContext->height, codecContext->pix_fmt, pixelWidth, pixelHeight, (AVPixelFormat)frame2->format, SWS_BICUBIC, NULL, NULL, NULL);   
				sws_scale(swsContext, (const uint8_t *const *)frame->data, frame->linesize, 0, codecContext->height, frame2->data, frame2->linesize);  
				sws_freeContext(swsContext);

				*pixelSize = avpicture_layout((const AVPicture *)frame2, (enum AVPixelFormat)frame2->format, pixelWidth, pixelHeight, pixelBuff, *pixelSize);
				result = *pixelSize;

				av_frame_free(&frame2);
			}	
			if (1 == codecContext->refcounted_frames) av_frame_unref(frame); 
			avcodec_free_frame(&frame);
			avcodec_close(codecContext);
		}
		av_free_packet(&packet);
		if (-1 != result)
			break;
	}

	avformat_close_input(&formatContext);
	av_free(ioContext->buffer);
	av_free(ioContext);
	avformat_free_context(formatContext);
	return result;
}
コード例 #3
0
ファイル: VideoFFmpeg.cpp プロジェクト: GeniaPenksik/blender
int VideoFFmpeg::openStream(const char *filename, AVInputFormat *inputFormat, AVDictionary **formatParams)
{
	AVFormatContext *formatCtx = NULL;
	int				i, videoStream;
	AVCodec			*codec;
	AVCodecContext	*codecCtx;

	if (avformat_open_input(&formatCtx, filename, inputFormat, formatParams)!=0)
		return -1;

	if (avformat_find_stream_info(formatCtx, NULL) < 0)
	{
		avformat_close_input(&formatCtx);
		return -1;
	}

	/* Find the first video stream */
	videoStream=-1;
	for (i=0; i<formatCtx->nb_streams; i++)
	{
		if (formatCtx->streams[i] &&
			get_codec_from_stream(formatCtx->streams[i]) && 
			(get_codec_from_stream(formatCtx->streams[i])->codec_type==AVMEDIA_TYPE_VIDEO))
		{
			videoStream=i;
			break;
		}
	}

	if (videoStream==-1) 
	{
		avformat_close_input(&formatCtx);
		return -1;
	}

	codecCtx = get_codec_from_stream(formatCtx->streams[videoStream]);

	/* Find the decoder for the video stream */
	codec=avcodec_find_decoder(codecCtx->codec_id);
	if (codec==NULL) 
	{
		avformat_close_input(&formatCtx);
		return -1;
	}
	codecCtx->workaround_bugs = 1;
	if (avcodec_open2(codecCtx, codec, NULL) < 0)
	{
		avformat_close_input(&formatCtx);
		return -1;
	}

#ifdef FFMPEG_OLD_FRAME_RATE
	if (codecCtx->frame_rate>1000 && codecCtx->frame_rate_base==1)
		codecCtx->frame_rate_base=1000;
	m_baseFrameRate = (double)codecCtx->frame_rate / (double)codecCtx->frame_rate_base;
#else
	m_baseFrameRate = av_q2d(av_get_r_frame_rate_compat(formatCtx->streams[videoStream]));
#endif
	if (m_baseFrameRate <= 0.0) 
		m_baseFrameRate = defFrameRate;

	m_codec = codec;
	m_codecCtx = codecCtx;
	m_formatCtx = formatCtx;
	m_videoStream = videoStream;
	m_frame = avcodec_alloc_frame();
	m_frameDeinterlaced = avcodec_alloc_frame();

	// allocate buffer if deinterlacing is required
	avpicture_fill((AVPicture*)m_frameDeinterlaced, 
		(uint8_t*)MEM_callocN(avpicture_get_size(
		m_codecCtx->pix_fmt,
		m_codecCtx->width, m_codecCtx->height), 
		"ffmpeg deinterlace"), 
		m_codecCtx->pix_fmt, m_codecCtx->width, m_codecCtx->height);

	// check if the pixel format supports Alpha
	if (m_codecCtx->pix_fmt == PIX_FMT_RGB32 ||
		m_codecCtx->pix_fmt == PIX_FMT_BGR32 ||
		m_codecCtx->pix_fmt == PIX_FMT_RGB32_1 ||
		m_codecCtx->pix_fmt == PIX_FMT_BGR32_1) 
	{
		// allocate buffer to store final decoded frame
		m_format = RGBA32;
		// allocate sws context
		m_imgConvertCtx = sws_getContext(
			m_codecCtx->width,
			m_codecCtx->height,
			m_codecCtx->pix_fmt,
			m_codecCtx->width,
			m_codecCtx->height,
			PIX_FMT_RGBA,
			SWS_FAST_BILINEAR,
			NULL, NULL, NULL);
	} else
	{
		// allocate buffer to store final decoded frame
		m_format = RGB24;
		// allocate sws context
		m_imgConvertCtx = sws_getContext(
			m_codecCtx->width,
			m_codecCtx->height,
			m_codecCtx->pix_fmt,
			m_codecCtx->width,
			m_codecCtx->height,
			PIX_FMT_RGB24,
			SWS_FAST_BILINEAR,
			NULL, NULL, NULL);
	}
	m_frameRGB = allocFrameRGB();

	if (!m_imgConvertCtx) {
		avcodec_close(m_codecCtx);
		m_codecCtx = NULL;
		avformat_close_input(&m_formatCtx);
		m_formatCtx = NULL;
		av_free(m_frame);
		m_frame = NULL;
		MEM_freeN(m_frameDeinterlaced->data[0]);
		av_free(m_frameDeinterlaced);
		m_frameDeinterlaced = NULL;
		MEM_freeN(m_frameRGB->data[0]);
		av_free(m_frameRGB);
		m_frameRGB = NULL;
		return -1;
	}
	return 0;
}
コード例 #4
0
ファイル: writeffmpeg.c プロジェクト: sonyomega/blender-git
static AVStream *alloc_video_stream(RenderData *rd, int codec_id, AVFormatContext *of,
                                    int rectx, int recty, char *error, int error_size)
{
	AVStream *st;
	AVCodecContext *c;
	AVCodec *codec;
	AVDictionary *opts = NULL;

	error[0] = '\0';

	st = avformat_new_stream(of, NULL);
	if (!st) return NULL;
	st->id = 0;

	/* Set up the codec context */
	
	c = st->codec;
	c->codec_id = codec_id;
	c->codec_type = AVMEDIA_TYPE_VIDEO;

	/* Get some values from the current render settings */
	
	c->width = rectx;
	c->height = recty;

	/* FIXME: Really bad hack (tm) for NTSC support */
	if (ffmpeg_type == FFMPEG_DV && rd->frs_sec != 25) {
		c->time_base.den = 2997;
		c->time_base.num = 100;
	}
	else if ((float) ((int) rd->frs_sec_base) == rd->frs_sec_base) {
		c->time_base.den = rd->frs_sec;
		c->time_base.num = (int) rd->frs_sec_base;
	}
	else {
		c->time_base.den = rd->frs_sec * 100000;
		c->time_base.num = ((double) rd->frs_sec_base) * 100000;
	}
	
	c->gop_size = ffmpeg_gop_size;
	c->bit_rate = ffmpeg_video_bitrate * 1000;
	c->rc_max_rate = rd->ffcodecdata.rc_max_rate * 1000;
	c->rc_min_rate = rd->ffcodecdata.rc_min_rate * 1000;
	c->rc_buffer_size = rd->ffcodecdata.rc_buffer_size * 1024;

#if 0
	/* this options are not set in ffmpeg.c and leads to artifacts with MPEG-4
	 * see #33586: Encoding to mpeg4 makes first frame(s) blocky
	 */
	c->rc_initial_buffer_occupancy = rd->ffcodecdata.rc_buffer_size * 3 / 4;
	c->rc_buffer_aggressivity = 1.0;
#endif

	c->me_method = ME_EPZS;
	
	codec = avcodec_find_encoder(c->codec_id);
	if (!codec)
		return NULL;
	
	/* Be sure to use the correct pixel format(e.g. RGB, YUV) */

	if (codec->pix_fmts) {
		c->pix_fmt = codec->pix_fmts[0];
	}
	else {
		/* makes HuffYUV happy ... */
		c->pix_fmt = PIX_FMT_YUV422P;
	}

	if (ffmpeg_type == FFMPEG_XVID) {
		/* arghhhh ... */
		c->pix_fmt = PIX_FMT_YUV420P;
		c->codec_tag = (('D' << 24) + ('I' << 16) + ('V' << 8) + 'X');
	}

	if (codec_id == AV_CODEC_ID_H264) {
		/* correct wrong default ffmpeg param which crash x264 */
		c->qmin = 10;
		c->qmax = 51;
	}
	
	/* Keep lossless encodes in the RGB domain. */
	if (codec_id == AV_CODEC_ID_HUFFYUV) {
		/* HUFFYUV was PIX_FMT_YUV422P before */
		c->pix_fmt = PIX_FMT_RGB32;
	}

	if (codec_id == AV_CODEC_ID_FFV1) {
		c->pix_fmt = PIX_FMT_RGB32;
	}

	if (codec_id == AV_CODEC_ID_QTRLE) {
		if (rd->im_format.planes == R_IMF_PLANES_RGBA) {
			c->pix_fmt = PIX_FMT_ARGB;
		}
	}

	if (codec_id == AV_CODEC_ID_PNG) {
		if (rd->im_format.planes == R_IMF_PLANES_RGBA) {
			c->pix_fmt = PIX_FMT_RGBA;
		}
	}

	if ((of->oformat->flags & AVFMT_GLOBALHEADER)
#if 0
	    || !strcmp(of->oformat->name, "mp4")
	    || !strcmp(of->oformat->name, "mov")
	    || !strcmp(of->oformat->name, "3gp")
#endif
	    )
	{
		PRINT("Using global header\n");
		c->flags |= CODEC_FLAG_GLOBAL_HEADER;
	}
	
	/* Determine whether we are encoding interlaced material or not */
	if (rd->mode & R_FIELDS) {
		PRINT("Encoding interlaced video\n");
		c->flags |= CODEC_FLAG_INTERLACED_DCT;
		c->flags |= CODEC_FLAG_INTERLACED_ME;
	}

	/* xasp & yasp got float lately... */

	st->sample_aspect_ratio = c->sample_aspect_ratio = av_d2q(((double) rd->xasp / (double) rd->yasp), 255);

	set_ffmpeg_properties(rd, c, "video", &opts);

	if (avcodec_open2(c, codec, &opts) < 0) {
		BLI_strncpy(error, IMB_ffmpeg_last_error(), error_size);
		av_dict_free(&opts);
		return NULL;
	}
	av_dict_free(&opts);

	current_frame = alloc_picture(c->pix_fmt, c->width, c->height);

	img_convert_ctx = sws_getContext(c->width, c->height, PIX_FMT_BGR32, c->width, c->height, c->pix_fmt, SWS_BICUBIC,
	                                 NULL, NULL, NULL);
	return st;
}
コード例 #5
0
ファイル: CFFmpegJni.cpp プロジェクト: hongyunxp/mxplayer
/*
 * Class:     com_example_testffmpeg_CFFmpegJni
 * Method:    IPlay
 * Signature: ()I
 */
jint Java_com_example_testffmpeg_CFFmpegJni_IPlay(JNIEnv *env, jobject thiz)
{
	/// 定义返回值
	int nRet = -1;
	/// 打开文件
	if(NULL != m_pFormatCtx)
	{
		avformat_close_input(&m_pFormatCtx);
		/// 释放数据
		av_free(m_pFormatCtx);
		m_pFormatCtx = NULL;
	}

	if(NULL == m_pFormatCtx)
	{
		/// 打开文件
		if(0 != (nRet = avformat_open_input(&m_pFormatCtx, m_szURLPath, 0, NULL/*&m_pDictOptions*/)))
		{
			char szTemp[256];
			memset(szTemp, 0x00, sizeof(szTemp));
			av_strerror(nRet, szTemp, 255);
			/// 打印错误信息
			LOGD("%s, Error Code = %d, %s, Error = %s", m_szURLPath, nRet,
					" The Error URL Or Path--------------->", szTemp);
			return nRet;
		}
	}

	// m_pFormatCtx->max_analyze_duration = 1000;
	// m_pFormatCtx->probesize = 2048;
	if(0 > avformat_find_stream_info(m_pFormatCtx, NULL))
	{
		LOGD("Couldn't find stream information.");
		return -1;
	}

	int nVideoIndex = -1;
	for(int i = 0; i < m_pFormatCtx->nb_streams; i++)
	{
		if(AVMEDIA_TYPE_VIDEO == m_pFormatCtx->streams[i]->codec->codec_type)
		{
			nVideoIndex = i;
			break;
		}
	}
	if(-1 == nVideoIndex)
	{
		LOGD("Didn't find a video stream.");
		return -1;
	}

	AVCodecContext* pCodecCtx = m_pFormatCtx->streams[nVideoIndex]->codec;
	AVCodec* pCodec = avcodec_find_decoder(pCodecCtx->codec_id);
	if(NULL == pCodec)
	{
		LOGD("Codec not found.");
		return -1;
	}

	if(pCodec->capabilities & CODEC_CAP_TRUNCATED)
	{
		pCodecCtx->flags |= CODEC_FLAG_TRUNCATED;
	}

	if(0 > avcodec_open2(pCodecCtx, pCodec, NULL))
	{
		LOGD("Could not open codec.");
		return -1;
	}

	/// 声明数据帧变量
	AVFrame	*pFrame = NULL, *pFrameYUV = NULL;
	pFrame = avcodec_alloc_frame();
	pFrameYUV = avcodec_alloc_frame();
	/// 创建转换数据缓冲
	int nConvertSize = avpicture_get_size(PIX_FMT_RGB565, iWidth, iHeight);
	uint8_t* pConvertbuffer = new uint8_t[nConvertSize];
	avpicture_fill((AVPicture *)pFrameYUV, pConvertbuffer, PIX_FMT_RGB565, iWidth, iHeight);

	/// 声明解码参数
	int nCodecRet, nHasGetPicture;
	/// 声明数据帧解码数据包
	int nPackgeSize  = pCodecCtx->width * pCodecCtx->height;
	AVPacket* pAVPacket = (AVPacket *)malloc(sizeof(AVPacket));
	av_new_packet(pAVPacket, nPackgeSize);

	/// 列出输出文件的相关流信息
	av_dump_format(m_pFormatCtx, 0, m_szURLPath, 0);
	/// 设置播放状态
	m_bIsPlaying = true;

	/// 声明格式转换参数
	struct SwsContext* img_convert_ctx = NULL;
	/// 格式化像素格式为YUV
	img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt,
		iWidth, iHeight, PIX_FMT_RGB565, SWS_BICUBIC, NULL, NULL, NULL);
	/// 读取数据帧
	while(0 <= av_read_frame(m_pFormatCtx, pAVPacket) && true == m_bIsPlaying)
	{
		/// 判断是否是视频数据流
		if(nVideoIndex == pAVPacket->stream_index)
		{
			/// 解码数据包
			nCodecRet = avcodec_decode_video2(pCodecCtx, pFrame, &nHasGetPicture, pAVPacket);
			if(0 < nHasGetPicture)
			{
				/// 转换格式为YUV
				sws_scale(img_convert_ctx, (const uint8_t* const* )pFrame->data, pFrame->linesize,
						0, pCodecCtx->height, pFrameYUV->data, pFrameYUV->linesize);
				/// 回调显示数据
				e_DisplayCallBack(env, pConvertbuffer, nConvertSize);
			}
		}
		/// 释放解码包,此数据包,在 av_read_frame 调用时被创建
		av_free_packet(pAVPacket);
	}
	/// 释放个格式化信息
	sws_freeContext(img_convert_ctx);

	/// 释放转换图片缓存
	delete[] pConvertbuffer;
	pConvertbuffer = NULL;
	/// 释放数据帧对象指针
	av_free(pFrame); pFrame = NULL;
	av_free(pFrameYUV); pFrameYUV = NULL;

	/// 释放解码信息对象
	avcodec_close(pCodecCtx); pCodecCtx = NULL;
	avformat_close_input(&m_pFormatCtx);
	/// 释放数据
	av_free(m_pFormatCtx);
	m_pFormatCtx = NULL;
	return nRet;
}
コード例 #6
0
ファイル: skinload.c プロジェクト: AungWinnHtut/mplayer
/* reads a complete image as is into image buffer */
static image *pngRead(skin_t *skin, const char *fname)
{
    unsigned int i;
    guiImage bmp;
    image *bf;
    char *filename = NULL;
    FILE *fp;

    if(!strcasecmp(fname, "NULL")) return 0;

    /* find filename in order file file.png */
    if(!(fp = fopen(fname, "rb")))
    {
        filename = calloc(1, strlen(skin->skindir) + strlen(fname) + 6);
        sprintf(filename, "%s/%s.png", skin->skindir, fname);
        if(!(fp = fopen(filename, "rb")))
        {
            mp_msg(MSGT_GPLAYER, MSGL_ERR, "[png] cannot find image %s\n", filename);
            free(filename);
            return 0;
        }
    }
    fclose(fp);

    for (i=0; i < skin->imagecount; i++)
        if(!strcmp(fname, skin->images[i]->name))
        {
            mp_msg(MSGT_GPLAYER, MSGL_DBG2, "[png] skinfile %s already exists\n", fname);
            free(filename);
            return skin->images[i];
        }
    (skin->imagecount)++;
    skin->images = realloc(skin->images, sizeof(image *) * skin->imagecount);
    bf = skin->images[(skin->imagecount) - 1] = calloc(1, sizeof(image));
    bf->name = strdup(fname);
    bpRead(filename ? filename : fname, &bmp);
    free(filename);
    bf->width = bmp.Width; bf->height = bmp.Height;

    bf->size = bf->width * bf->height * skin->desktopbpp / 8;
    if (skin->desktopbpp == 32)
      bf->data = bmp.Image;
    else {
      const uint8_t *src[4] = { bmp.Image, NULL, NULL, NULL};
      int src_stride[4] = { 4 * bmp.Width, 0, 0, 0 };
      uint8_t *dst[4] = { NULL, NULL, NULL, NULL };
      int dst_stride[4];
      enum AVPixelFormat out_pix_fmt = PIX_FMT_NONE;
      struct SwsContext *sws;
      if      (skin->desktopbpp == 16) out_pix_fmt = PIX_FMT_RGB555;
      else if (skin->desktopbpp == 24) out_pix_fmt = PIX_FMT_RGB24;
      av_image_fill_linesizes(dst_stride, out_pix_fmt, bmp.Width);
      sws = sws_getContext(bmp.Width, bmp.Height, PIX_FMT_RGB32,
                           bmp.Width, bmp.Height, out_pix_fmt,
                           SWS_POINT, NULL, NULL, NULL);
      bf->data = malloc(bf->size);
      dst[0] = bf->data;
      sws_scale(sws, src, src_stride, 0, bmp.Height, dst, dst_stride);
      sws_freeContext(sws);
      free(bmp.Image);
    }
    return bf;
}
コード例 #7
0
ファイル: recorder.cpp プロジェクト: amazingyyc/DeepRed
/**
 * 设置相机的预览大小 和 用户的预览的比例
 */
int Recorder::set_preview_size_and_ratio(int width, int height, float ratio)
{
	if(0 >= width || 0 >= height || 0 >= ratio)
		return -1;

	/**
	 * 根据传入的参数重新设置当前缩放的属性
	 */
	if(NULL != src_frame_buffer)
		av_free(src_frame_buffer);

	if(NULL != dst_frame_buffer)
		av_free(dst_frame_buffer);

	if(NULL != src_frame)
		av_frame_free(&src_frame);

	if(NULL != dst_frame)
		av_frame_free(&dst_frame);

	if (NULL != sws_ctx)
		sws_freeContext(sws_ctx);

	src_width  = -1;
	src_height = -1;
	src_frame_buffer = NULL;

	dst_width = -1;
	dst_height = -1;
	dst_frame_buffer = NULL;

	src_frame = NULL;
	dst_frame = NULL;

	sws_ctx = NULL;

	/**
	 * 根据camera预览比例 和 用于看到的比例重新计算大小
	 */
	//注意图片还需要进行旋转 camera的图片方向和用户的预览不一致
	if (1.0 * height / width > ratio)
	{
		src_width  = (int)(1.0 * ratio * width);
		src_height = width;
	}
	else
	{
		src_width = height;
		src_height = (int)(1.0 * height / ratio);
	}

	//标准化成 2的倍数
	src_width  -= src_width  % 2;
	src_height -= src_height % 2;

	/**
	 * 只进行转码 而不进行缩放操作
	 */
	dst_width  = src_width;
	dst_height = src_height;

	/**
	 * 初始化存储原始图片的内存
	 */
	src_frame = av_frame_alloc();
	if(!src_frame)
		return -1;

	int src_frame_size = avpicture_get_size(SRC_VIDEO_PIX_FMT, src_width, src_height);
	src_frame_buffer = (uint8_t*)av_malloc(src_frame_size);
	avpicture_fill((AVPicture *)src_frame, src_frame_buffer, SRC_VIDEO_PIX_FMT, src_width, src_height);

	src_frame->width  = src_width;
	src_frame->height = src_height;
	src_frame->format = SRC_VIDEO_PIX_FMT;

	/**
	 * 初始化内存
	 */
	dst_frame = av_frame_alloc();
	if(!dst_frame)
		return -1;

	int dst_frame_size = avpicture_get_size(VIDEO_PIX_FMT, dst_width, dst_height);
	dst_frame_buffer = (uint8_t*)av_malloc(dst_frame_size);
	avpicture_fill((AVPicture *)dst_frame, dst_frame_buffer, VIDEO_PIX_FMT, dst_width, dst_height);

	dst_frame->width = dst_width;
	dst_frame->height = dst_height;
	dst_frame->format = VIDEO_PIX_FMT;

	/**
	 * 缩放函数
	 */
	//初始化图片缩放函数 从ccut_video_height 缩放到 FRAME_HEIGHT
	sws_ctx = sws_getContext(src_width, src_height, SRC_VIDEO_PIX_FMT,
			dst_width, dst_height, VIDEO_PIX_FMT,
		SWS_FAST_BILINEAR, NULL, NULL, NULL);

	if(!sws_ctx)
		return -1;

	return 1;
}
コード例 #8
0
ファイル: player.c プロジェクト: phreax/asciiplayer
int main(int argc, char **argv) {

  if(argc<2) {
    fprintf(stderr,"Missing arguments\n"); 
    fprintf(stderr,"Usage: %s [-g termsize] filename\n",argv[0]); 
    return -1;
  }
  char *filename = argv[1];
  char *screensize = 0;
  // screen resolution
  int tw = TERM_W;
  int th = TERM_H;

  /* parse arguments*/
  if(argc>3) {
    int i;
    if(strcmp(argv[1],"-g")==0) {
      screensize = argv[2];
      filename = argv[3];
      sscanf(screensize,"%dx%d",&tw,&th); 
    }
  }
  

  printf("before init w = %d, h = %d\n",tw,th);
  init_screen(tw,th);
  
  /* init all codecs */
  av_register_all();

  AVFormatContext *pFmtCtx;
  struct SwsContext *pSwsCtx;

  pFmtCtx = avformat_alloc_context();
  if(avformat_open_input(&pFmtCtx, filename, NULL, NULL) != 0) {
    fprintf(stderr,"Failed to open file: %s\n",argv[1]); 
    return -1;
  }

  if(avformat_find_stream_info(pFmtCtx,NULL) < 0) { 
    fprintf(stderr,"No stream found!\n"); 
    return -1;
  }

  av_dump_format(pFmtCtx, 0, filename,0);

  int i;
  int videoStream;
  AVCodecContext *pCodecCtx;

  videoStream = -1;
  for(i=0; i<pFmtCtx->nb_streams; i++) {
    if(pFmtCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
      videoStream = i;
      break;
    }
  }

  if(videoStream == -1) {
    fprintf(stderr,"No stream found!\n"); 
    return -1;
  }

  pCodecCtx = pFmtCtx->streams[videoStream]->codec;

  /* find suitable codec */
 
  AVCodec * pCodec;
  
  pCodec = avcodec_find_decoder(pCodecCtx->codec_id);
  if(!pCodec) {
    fprintf(stderr,"No suitable decoder found!\n"); 
    return -1;
  }

  if(avcodec_open2(pCodecCtx,pCodec,NULL)<0) {
    fprintf(stderr,"Could not open codec!\n"); 
    return -1;

  }

  AVFrame *pFrame;
  AVFrame *pPict;

  /* allocate data structs */
  pFrame = avcodec_alloc_frame();
  pPict  = avcodec_alloc_frame();

  uint8_t *buffer;
  int szPict;;
  int sw,sh;

  sw = pCodecCtx->width;
  sh = pCodecCtx->height;

  // allocate buffer of picture size
  szPict = avpicture_get_size(PIX_FMT_RGB24, sw,sh);

  buffer = (uint8_t *)av_malloc(szPict*sizeof(uint8_t));

  /* associate frame with out buffer */
  avpicture_fill( (AVPicture *)pPict,buffer,PIX_FMT_RGB24, sw, sh);

  int frameFinished;
  AVPacket packet;

  /* init scale context to scale to terminal resolution */
  pSwsCtx = sws_getContext(sw,sh,pCodecCtx->pix_fmt,tw,th,PIX_FMT_RGB24, SWS_BICUBIC, NULL, NULL, NULL);

  i=0;

  /* read as long we have packets in the stream */
  while(av_read_frame(pFmtCtx,&packet)>=0) {
    
     /* we only need packets of our video stream*/
    if(packet.stream_index == videoStream) {
      
      /* decode video frame */
      avcodec_decode_video2(pCodecCtx,pFrame,&frameFinished,
                            &packet);

      if(frameFinished) {
        /* scale, display and sleep for ~30ms*/
        sws_scale(pSwsCtx,pFrame->data, pFrame->linesize,0,sh,pPict->data, pPict->linesize);
        ascii_art(pPict);
        usleep(30000);
    
      }
    }
    /* free current packet struct */
    av_free_packet(&packet);
  }

  /* tidy up.. */
  av_free(buffer);
  av_free(pPict);
  av_free(pFrame);
  avcodec_close(pCodecCtx);
  avformat_free_context(pFmtCtx);

  return 0;
}
コード例 #9
0
FFmpegVideo::FFmpegVideo(char* chVidName1, int iProcessOrder1, float fRate1)
{
	this->iProcessOrder = iProcessOrder1;
	strcpy(chVidName, chVidName1);
	this->fRate = fRate1;
	iTotalFrameNum = 0;
	iNowFrameNum = 0;
	frameFinished = 0;
	nFps = 0;
	buffer = NULL;
	pFrameBGR = NULL;
	pFrameRGB = NULL;
	pFrameOri = NULL;
	pCodecCtx = NULL;
	imageFrame = new ImageFrame();
	// Register all formats and codecs
	av_register_all();

	// Open video file
	if(av_open_input_file(&pFormatCtx, chVidName, NULL, 0, NULL)!=0)
	{
		bIfSuccess = false;
		return; // Couldn't open file
	}

	// Retrieve stream information
	if(av_find_stream_info(pFormatCtx)<0)
	{
		bIfSuccess = false;
		return; // Couldn't find stream information
	}

	// Dump information about file onto standard error
	dump_format(pFormatCtx, 0, chVidName, 0);

	this->iTotalFrameNum = pFormatCtx->streams[0]->nb_frames;
	this->fFrmRat = pFormatCtx->streams[0]->r_frame_rate.num/(float)(pFormatCtx->streams[0]->r_frame_rate.den);

	// Find the first video stream
	videoStream=-1;
	for(i=0; i<pFormatCtx->nb_streams; i++)
	{
		if(pFormatCtx->streams[i]->codec->codec_type==CODEC_TYPE_VIDEO) {
			videoStream=i;
			break;
		}
	}
	if(videoStream==-1)
	{
		bIfSuccess = false;
		return; // Didn't find a video stream
	}

	// Get a pointer to the codec context for the video stream
	pCodecCtx=pFormatCtx->streams[videoStream]->codec;

	printf("%d-%d\n", pCodecCtx->height, pCodecCtx->width);

	// Find the decoder for the video stream
	pCodec=avcodec_find_decoder(pCodecCtx->codec_id);
	if(pCodec==NULL) {
		bIfSuccess = false;
		fprintf(stderr, "Unsupported codec!\n");
		return; // Codec not found
	}
	// Open codec
	//while(bIfLockCloseCodec)
	//{
	//	Sleep(10);
	//}
	//bIfLockCloseCodec = true;
	//if(avcodec_open(pCodecCtx, pCodec)<0)
	//	return -1; // Could not open codec
	//bIfLockCloseCodec = false;

	while (avcodec_open(pCodecCtx, pCodec) < 0)/*这个函数总是返回-1*/ {
		//fprintf(stderr, "could not open codec\n");
		Sleep(this->iProcessOrder);
		//exit(1);
	}

	// Allocate video frame
	pFrameOri=avcodec_alloc_frame();

	// Allocate an AVFrame structure
	pFrameBGR=avcodec_alloc_frame();
	if(pFrameBGR==NULL)
	{
		bIfSuccess = false;
		return;
	}
	pFrameRGB=avcodec_alloc_frame();
	if(pFrameRGB==NULL)
	{
		bIfSuccess = false;
		return;
	}

	// Determine required buffer size and allocate buffer
	imageFrame->size = numBytes=avpicture_get_size(PIX_FMT_BGR24, pCodecCtx->width, pCodecCtx->height);
	imageFrame->widthStep = WIDTHSTEP(pCodecCtx->width);
	imageFrame->height = pCodecCtx->height;
	imageFrame->width = pCodecCtx->width;
	buffer=(uint8_t *)av_malloc(numBytes*sizeof(uint8_t));
	imageFrame->imageData = new uint8_t[numBytes*sizeof(uint8_t)];
	memset(imageFrame->imageData, 0, numBytes*sizeof(uint8_t));

	// Assign appropriate parts of buffer to image planes in pFrameRGB
	// Note that pFrameRGB is an AVFrame, but AVFrame is a superset
	// of AVPicture
	avpicture_fill((AVPicture *)pFrameBGR, buffer, PIX_FMT_BGR24,
		pCodecCtx->width, pCodecCtx->height);
	avpicture_fill((AVPicture *)pFrameRGB, buffer, PIX_FMT_RGB24,
		pCodecCtx->width, pCodecCtx->height);
	//注意,这里是PIX_FMT_RGB24,它决定了图片的格式
	ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height,
		pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height,
		PIX_FMT_BGR24, SWS_BICUBIC, NULL, NULL, NULL);

	if(this->getOneFrame()==0)
		bIfSuccess = true;
	else
		bIfSuccess = false;
}
コード例 #10
0
ファイル: VideoThread.cpp プロジェクト: TritonSailor/livepro
void VideoThread::readFrame()
{
// 	qDebug() << "VideoThread::readFrame(): start of function";
	if(!m_inited)
	{
// 		qDebug() << "VideoThread::readFrame(): not inited";
		//emit frameReady(m_nextDelay);
		return;
	}
	
	
	
	AVPacket pkt1, *packet = &pkt1;
	double pts;
	
// 	qDebug() << "VideoThread::readFrame(): killed: "<<m_killed;

	int frame_finished = 0;
	while(!frame_finished && !m_killed)
	{
		if(av_read_frame(m_av_format_context, packet) >= 0)
		{
			// Is this a packet from the video stream?
			if(packet->stream_index == m_video_stream)
			{
				//global_video_pkt_pts = packet->pts;
				#ifdef USE_DECODE_VID2
				avcodec_decode_video2(m_video_codec_context, m_av_frame, &frame_finished, packet);
				#else
				avcodec_decode_video(m_video_codec_context, m_av_frame, &frame_finished, packet->data, packet->size);
				#endif

				if(packet->dts == AV_NOPTS_VALUE &&
						  m_av_frame->opaque &&
				  *(uint64_t*)m_av_frame->opaque != AV_NOPTS_VALUE)
				{
					pts = *(uint64_t *)m_av_frame->opaque;
				}
				else if(packet->dts != AV_NOPTS_VALUE)
				{
					pts = packet->dts;
				}
				else
				{
					pts = 0;
				}

				pts *= av_q2d(m_timebase);

				// Did we get a video frame?
				if(frame_finished)
				{
					
					// Convert the image from its native format to RGB, then copy the image data to a QImage
					if(m_sws_context == NULL)
					{
						//mutex.lock();
						m_sws_context = sws_getContext(
							m_video_codec_context->width, m_video_codec_context->height,
							m_video_codec_context->pix_fmt,
							m_video_codec_context->width, m_video_codec_context->height,
							//PIX_FMT_RGB32,SWS_BICUBIC,
							//PIX_FMT_RGB565, SWS_FAST_BILINEAR,
							PIX_FMT_RGB32, SWS_FAST_BILINEAR,
							//PIX_FMT_YUV420P, SWS_FAST_BILINEAR,
							NULL, NULL, NULL); //SWS_PRINT_INFO
						//mutex.unlock();
						//printf("decode(): created m_sws_context\n");
					}
					//printf("decode(): got frame\n");

					sws_scale(m_sws_context,
						  m_av_frame->data,
						  m_av_frame->linesize, 0,
						  m_video_codec_context->height,
						  m_av_rgb_frame->data,
						  m_av_rgb_frame->linesize);

					//m_bufferMutex.lock();
// 					qDebug() << "VideoThread: void*:"<<(void*)m_av_rgb_frame->data[0];
					QImage frame = QImage(m_av_rgb_frame->data[0],
								m_video_codec_context->width,
								m_video_codec_context->height,
								//QImage::Format_RGB16);
								QImage::Format_ARGB32);
					//m_bufferMutex.unlock();
					
					av_free_packet(packet);

					// This block from the synchronize_video(VideoState *is, AVFrame *src_frame, double pts) : double
					// function given at: http://dranger.com/ffmpeg/tutorial05.html
					{
						// update the frame pts
						double frame_delay;

						if(pts != 0)
						{
							/* if we have pts, set video clock to it */
							m_video_clock = pts;
						} else {
							/* if we aren't given a pts, set it to the clock */
							pts = m_video_clock;
						}
						/* update the video clock */
						frame_delay = av_q2d(m_timebase);
						/* if we are repeating a frame, adjust clock accordingly */
						frame_delay += m_av_frame->repeat_pict * (frame_delay * 0.5);
						m_video_clock += frame_delay;
						//qDebug() << "Frame Dealy: "<<frame_delay<<", avq2d:"<<av_q2d(m_timebase)<<", repeat:"<<(m_av_frame->repeat_pict * (frame_delay * 0.5))<<", vidclock: "<<m_video_clock; 
					}


					//QFFMpegVideoFrame video_frame;
					//video_frame.frame = m_frame;
					//video_frame.pts = pts;
					//video_frame.previous_pts = m_previous_pts;

					//m_current_frame = video_frame;

					//emit newFrame(video_frame);
					//qDebug() << "emit newImage(), frameSize:"<<frame.size();
					//emit newImage(frame);
					
					
					//calculate the pts delay
					double pts_delay = pts - m_previous_pts;
					if((pts_delay < 0.0) || (pts_delay > 1.0))
						pts_delay = m_previous_pts_delay;
			
					m_previous_pts_delay = pts_delay;
					
					/* update delay to sync to audio */
			// 		double ref_clock = get_audio_clock(is);
			// 		double diff = vp->pts - ref_clock;
			
					
			
					//calculate the global delay
					//int global_delay = (m_current_frame.pts * 1000) - m_run_time.elapsed();
					//m_run_time.elapsed() / 1000; //
					//double curTime = (double)(av_gettime() / 1000000.0);
					double curTime = ((double)m_run_time.elapsed() + m_total_runtime) / 1000.0;
					m_frameTimer += pts_delay;
					if(m_frameTimer > curTime+.5)
						m_frameTimer = curTime;
					if(m_frameTimer < curTime-1.)
						m_frameTimer = curTime;
					double actual_delay = m_frameTimer - curTime;
					//qDebug() << "VideoThread::readFrame(): frame timer: "<<m_frameTimer<<", curTime:"<<curTime<<", \t actual_delay:"<<((int)(actual_delay*1000))<<", pts_delay:"<<((int)(pts_delay*1000))<<", m_run_time:"<<m_run_time.elapsed()<<", m_total_runtime:"<<m_total_runtime;
					if(actual_delay < 0.010)
					{
						// This should just skip this frame
			// 			qDebug() << "Skipping this frame, skips:"<<m_skipFrameCount;
			// 			if(status() == Running)
			// 				m_play_timer = startTimer(0);
			// 			return;
						
						actual_delay = 0.0;
						
						m_skipFrameCount ++;
						
						if(m_skipFrameCount > MAX_SKIPS_TILL_RESET)
						{
							m_skipFrameCount = 0;
							m_frameTimer = curTime - pts_delay;
			// 				qDebug() << "Reset frame timer";
						}
					}
					
					int frameDelay = (int)(actual_delay * 1000 + 0.5);
					
					// The count/accum pair is designed to average out sharp
					// jumps in frameDelay to make the resulting output appear
					// smoother
					m_frameSmoothCount ++;
					m_frameSmoothAccum += frameDelay;
					
					//frameDelay = m_frameSmoothAccum / m_frameSmoothCount;
					
					// Reset the averaging every 15 sec (approx)
					if( m_frameSmoothCount % 100 == 0)
					{
						m_frameSmoothCount = 0;
						m_frameSmoothAccum = 0;
						//qDebug() << "VideoThread::readFrame(): frame smoother reset";
					}
					
					// Arbitrary min/max delays - equals about a max fps of 66fps (15 ms) and a min fps of 9fps (111ms)
					if(frameDelay < 15)
						frameDelay = 15;
					if(frameDelay > 111)
						frameDelay = 111;
					//qDebug() << "VideoThread::readFrame(): frameDelay:"<<frameDelay;
					
// 					if(m_frameConsumed || (!m_frameConsumed && ++m_frameLockCount > 10))
// 					{
// 						m_frameLockCount = 0;
// 						m_frameConsumed = false;

						//m_time = QTime::currentTime(); 
						//QTimer::singleShot(frameDelay, this, SLOT(releaseCurrentFrame()));
						//emit frameReady((int)(pts_delay*1000));
						
						//enqueue(VideoFrame(m_frame,frameDelay));
						
						VideoFrame *frame2 = new VideoFrame(frame.convertToFormat(QImage::Format_ARGB32),pts_delay*1000);
						frame2->setCaptureTime(QTime::currentTime());
						enqueue(frame2);
						
// 						VideoFrame vidframe;
// 						vidframe.isRaw = true;
// 						vidframe.bufferType = VideoFrame::BUFFER_BYTEARRAY;
// 						const uchar *bits = frame.bits();
// 						vidframe.byteArray.append((const char*)bits, frame.byteCount());
// 						vidframe.holdTime = pts_delay*1000;
// 						vidframe.setSize(frame.size());
// 						enqueue(vidframe);
						
// 						VideoFrame vid(pts_delay*1000);
// 						QImage rgb32img = frame.convertToFormat(QImage::Format_ARGB32);
// 						vid.isRaw = true;
// 						vid.bufferType = VideoFrame::BUFFER_BYTEARRAY;
// 						vid.pixelFormat = QVideoFrame::Format_ARGB32;
// 						vid.setSize(m_frame_size);
// 						vid.byteArray.resize(rgb32img.byteCount());
// 						const uchar * bits = rgb32img.bits();
// 						vid.byteArray.append((const char*)bits, rgb32img.byteCount());
// 						enqueue(vid);
						
						//enqueue(VideoFrame(frame,pts_delay*1000));
// 						VideoFrame vid(pts_delay*1000);
// 						vid.isRaw = true;
// 						vid.bufferType = VideoFrame::BUFFER_BYTEARRAY;
// 						vid.pixelFormat = QVideoFrame::Format_YUV420P;
// 						//vid.setPointerData(m_av_rgb_frame->data, m_av_rgb_frame->linesize);
// 						vid.setSize(m_frame_size);
// 						int frame0 = m_frame_size.width() * m_frame_size.height();
// 						int frame1 = m_frame_size.width()/2 * m_frame_size.height()/2;
// 						int frame2 = m_frame_size.width()/2 * m_frame_size.height()/2;
// 						int maxSize = frame0 + frame1 + frame2;
// 						qDebug() << "frame0:"<<frame0<<", frame1:"<<frame1+frame0<<", frame2:"<<frame2+frame1+frame0<<m_frame_size;
// 						vid.byteArray.resize(maxSize);
// 						vid.byteArray.append((const char*)m_av_frame->data[0], frame0);
// 						vid.byteArray.append((const char*)m_av_frame->data[1], frame1);
// 						vid.byteArray.append((const char*)m_av_frame->data[2], frame2);
// 						
// 						enqueue(vid);
						
						
						
						//emit frameReady();
					//}
						
					m_nextDelay = frameDelay; //frameDelay * .5;
					//QTimer::singleShot(0, this, SLOT(updateTimer()));
					//updateTimer();
					
					m_previous_pts = pts;
				}

			}
			else if(packet->stream_index == m_audio_stream)
			{
// 				mutex.lock();
				//decode audio packet, store in queue
				av_free_packet(packet);
// 				mutex.unlock();

			}
			else
			{
// 				mutex.lock();
				av_free_packet(packet);
// 				mutex.unlock();

			}
		}
		else
		{
			//emit reachedEnd();
			//qDebug() << "reachedEnd()";
			restart();
			
		}
	}
	//qDebug() << "VideoThread::readFrame(): end of function";
}
コード例 #11
0
std::auto_ptr<ImgBuf>
VideoConverterFfmpeg::convert(const ImgBuf& src)
{
    std::auto_ptr<ImgBuf> ret;

    const int width = src.width;
    const int height = src.height;

    PixelFormat dst_pixFmt = fourcc_to_ffmpeg(_dst_fmt);
    assert(dst_pixFmt != PIX_FMT_NONE);
    PixelFormat src_pixFmt = PIX_FMT_RGB24;

#ifdef HAVE_SWSCALE_H

    if (!_swsContext.get()) {

        _swsContext.reset(new SwsContextWrapper(sws_getContext(width, height,
                                                src_pixFmt, width, height, dst_pixFmt, SWS_BILINEAR, NULL, NULL,
                                                NULL)));

        if (!_swsContext->getContext()) {

            // This means we will try to assign the
            // context again next time.
            _swsContext.reset();

            return ret;
        }
    }
#endif


    AVPicture srcpicture = {{src.data, 0, 0, 0},
        {static_cast<int>(src.stride[0]), 0, 0, 0}
    };


    int bufsize = avpicture_get_size(dst_pixFmt, width, height);
    if (bufsize == -1) {
        return ret;
    }

    boost::uint8_t* dstbuffer = new boost::uint8_t[bufsize];

    AVPicture dstpicture;
    avpicture_fill(&dstpicture, dstbuffer, dst_pixFmt, width, height);


#ifndef HAVE_SWSCALE_H
    img_convert(&dstpicture, dst_pixFmt, &srcpicture, src_pixFmt, width,
                height);
#else

    int rv = sws_scale(_swsContext->getContext(), srcpicture.data,
                       srcpicture.linesize, 0, height, dstpicture.data,
                       dstpicture.linesize);

    if (rv == -1) {
        return ret;
    }
#endif
    ret.reset(new ImgBuf(_dst_fmt, dstbuffer, bufsize, src.width,
                         src.height));
    std::copy(dstpicture.linesize, dstpicture.linesize+4, ret->stride.begin());

    return ret;
}
コード例 #12
0
ファイル: qav.cpp プロジェクト: FallingSnow/qpsnr
qav::qvideo::qvideo(const char* file, int _out_width, int _out_height) : frnum(0), videoStream(-1), out_width(_out_width),
out_height(_out_height), pFormatCtx(NULL), pCodecCtx(NULL), pCodec(NULL), pFrame(NULL), img_convert_ctx(NULL) {
	const char* pslash = strrchr(file, '/');
	if (pslash)
        	fname = pslash+1;
    	else
        	fname = file;

	if (avformat_open_input(&pFormatCtx, file, NULL, NULL) < 0) {
        	free_resources();
        	throw std::runtime_error("Can't open file");
    	}
    	if (avformat_find_stream_info(pFormatCtx, NULL)<0) {
        	free_resources();
        	throw std::runtime_error("Multimedia type not supported");
    	}
    	LOG_INFO << "File info for (" << file << ")" << std::endl;
    	av_dump_format(pFormatCtx, 0, file, false);

    	// find video stream (first)
    	for (unsigned int i=0; i<pFormatCtx->nb_streams; i++) {
        	if (AVMEDIA_TYPE_VIDEO == pFormatCtx->streams[i]->codec->codec_type) {
            		videoStream=i;
            		break;
        	}
    	}
    	if (-1==videoStream) {
        	free_resources();
        	throw std::runtime_error("Can't find video stream");
    	}
    	// Get a pointer to the codec context for the video stream
    	pCodecCtx=pFormatCtx->streams[videoStream]->codec;
    	pCodec=avcodec_find_decoder(pCodecCtx->codec_id);
    	if(!pCodec) {
        	free_resources();
        	throw std::runtime_error("Can't find codec for video stream");
    	}
    	if(avcodec_open2(pCodecCtx, pCodec, NULL)<0) {
        	free_resources();
        	throw std::runtime_error("Can't open codec for video stream");
    	}
    	// alloacate data to extract frames
    	pFrame = av_frame_alloc();
    	if (!pFrame) {
        	free_resources();
        	throw std::runtime_error("Can't allocated frame for video stream");
    	}
    	// populate the out_width/out_height members
    	if (out_width > 0 && out_height > 0) {
        	LOG_INFO << "Output frame size for (" << file << ") is: " << out_width << 'x' << out_height << std::endl;
    	} else if (-1 == out_width && -1 == out_height) {
        	out_width = pCodecCtx->width;
        	out_height = pCodecCtx->height;
        	LOG_INFO << "Output frame size for (" << file << ") (default) is: " << out_width << 'x' << out_height << std::endl;
    	} else {
        	free_resources();
        	throw std::runtime_error("Invalid output frame size for video stream");
    	}
    	// just report if we're using a different video size
    	if (out_width!=pCodecCtx->width || out_height!=pCodecCtx->height)
        	LOG_WARNING << "Video (" << file <<") will get scaled: " << pCodecCtx->width << 'x' << pCodecCtx->height << " (in), " << out_width << 'x' << out_height << " (out)" << std::endl;

    	img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt, out_width, out_height, AV_PIX_FMT_RGB24, SWS_BICUBIC, NULL, NULL, NULL);
    	if (!img_convert_ctx) {
        	free_resources();
        	throw std::runtime_error("Can't allocated sw_scale context");
    	}
}
コード例 #13
0
int main(int argc, char *argv[]) {
    AVFormatContext *pFormatCtx = NULL;
    int             i, videoStream;
    AVCodecContext  *pCodecCtx;
    AVCodec         *pCodec;
    AVFrame         *pFrame;
    AVPacket        packet;
    int             frameFinished;
    float           aspect_ratio;
    struct SwsContext *img_convert_ctx;

    SDL_Overlay     *bmp;
    SDL_Surface     *screen;
    SDL_Rect        rect;
    SDL_Event       event;

    if(argc < 2) {
        fprintf(stderr, "Usage: test <file>\n");
        exit(1);
    }
    // Register all formats and codecs
    av_register_all();

    if(SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER)) {
        fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
        exit(1);
    }

    // Open video file
    if(avformat_open_input(&pFormatCtx, argv[1], NULL, NULL)!=0)
        return -1; // Couldn't open file

    // Retrieve stream information
    if(avformat_find_stream_info(pFormatCtx, NULL)<0)
        return -1; // Couldn't find stream information

    // Find the first video stream
    videoStream=-1;
    for(i=0; i<pFormatCtx->nb_streams; i++)
        if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO) {
            videoStream=i;
            break;
        }
    if(videoStream==-1)
        return -1; // Didn't find a video stream

    // Get a pointer to the codec context for the video stream
    pCodecCtx=pFormatCtx->streams[videoStream]->codec;

    // Find the decoder for the video stream
    pCodec=avcodec_find_decoder(pCodecCtx->codec_id);
    if(pCodec==NULL) {
        fprintf(stderr, "Unsupported codec!\n");
        return -1; // Codec not found
    }

    // Open codec
    if(avcodec_open2(pCodecCtx, pCodec, NULL)<0)
        return -1; // Could not open codec

    // Allocate video frame
    pFrame=av_frame_alloc();

    // Make a screen to put our video
    screen = SDL_SetVideoMode(pCodecCtx->width, pCodecCtx->height, 0, 0);

    if(!screen) {
        fprintf(stderr, "SDL: could not set video mode - exiting\n");
        exit(1);
    }

    // Allocate a place to put our YUV image on that screen
    bmp = SDL_CreateYUVOverlay(pCodecCtx->width,
            pCodecCtx->height,
            SDL_YV12_OVERLAY,
            screen);


    // Read frames and save first five frames to disk
    i=0;
    while(av_read_frame(pFormatCtx, &packet)>=0) {
        // Is this a packet from the video stream?
        if(packet.stream_index==videoStream) {
            // Decode video frame
            avcodec_decode_video2(pCodecCtx, pFrame, &frameFinished,
                    &packet);

            // Did we get a video frame?
            if(frameFinished) {
                SDL_LockYUVOverlay(bmp);

                AVPicture pict;
                pict.data[0] = bmp->pixels[0];
                pict.data[1] = bmp->pixels[2];
                pict.data[2] = bmp->pixels[1];

                pict.linesize[0] = bmp->pitches[0];
                pict.linesize[1] = bmp->pitches[2];
                pict.linesize[2] = bmp->pitches[1];

                // Convert the image into YUV format that SDL uses
                img_convert_ctx = sws_getContext(pCodecCtx->width,
                        pCodecCtx->height,
                        pCodecCtx->pix_fmt,
                        pCodecCtx->width,
                        pCodecCtx->height,
                        PIX_FMT_YUV420P,
                        SWS_BICUBIC,NULL,
                        NULL,NULL);
                sws_scale(img_convert_ctx, pFrame->data,
                        pFrame->linesize,
                        0,
                        pFrame->height,
                        pict.data,
                        pict.linesize);

                SDL_UnlockYUVOverlay(bmp);

                rect.x = 0;
                rect.y = 0;
                rect.w = pCodecCtx->width;
                rect.h = pCodecCtx->height;
                SDL_DisplayYUVOverlay(bmp, &rect);

            }
        }

        // Free the packet that was allocated by av_read_frame
        av_free_packet(&packet);
        SDL_PollEvent(&event);
        switch(event.type) {
            case SDL_QUIT:
                SDL_Quit();
                exit(0);
                break;
            default:
                break;
        }

    }

    // Free the YUV frame
    av_free(pFrame);

    // Close the codec
    avcodec_close(pCodecCtx);

    // Close the video file
    avformat_close_input(&pFormatCtx);

    return 0;
}
コード例 #14
0
void FFmpegVideo::restart()
{
	char chVidName1[200];
	int iProcessOrder1 = this->iProcessOrder;
	strcpy(chVidName1, this->chVidName);

	//结束
	// Free the RGB image
	av_free(buffer);
	av_free(pFrameBGR);
	av_free(pFrameRGB);

	// Free the YUV frame
	av_free(pFrameOri);

	// Close the codec
	avcodec_close(pCodecCtx);

	av_close_input_file(pFormatCtx);

	if(imageFrame)
		delete imageFrame;

	//重新开始
	this->iProcessOrder = iProcessOrder1;
	strcpy(chVidName, chVidName1);
	this->fRate = 0;
	iTotalFrameNum = 0;
	iNowFrameNum = 0;
	frameFinished = 0;
	// Register all formats and codecs
	av_register_all();

	// Open video file
	if(av_open_input_file(&pFormatCtx, chVidName, NULL, 0, NULL)!=0)
	{
		bIfSuccess = false;
		return; // Couldn't open file
	}

	// Retrieve stream information
	if(av_find_stream_info(pFormatCtx)<0)
	{
		bIfSuccess = false;
		return; // Couldn't find stream information
	}

	// Dump information about file onto standard error
	dump_format(pFormatCtx, 0, chVidName, 0);

	this->iTotalFrameNum = pFormatCtx->streams[0]->duration;
	this->fFrmRat = pFormatCtx->streams[0]->r_frame_rate.num/(float)(pFormatCtx->streams[0]->r_frame_rate.den);

	// Find the first video stream
	videoStream=-1;
	for(i=0; i<pFormatCtx->nb_streams; i++)
	{
		if(pFormatCtx->streams[i]->codec->codec_type==CODEC_TYPE_VIDEO) {
			videoStream=i;
			break;
		}
	}
	if(videoStream==-1)
	{
		bIfSuccess = false;
		return; // Didn't find a video stream
	}

	// Get a pointer to the codec context for the video stream
	pCodecCtx=pFormatCtx->streams[videoStream]->codec;

	// Find the decoder for the video stream
	pCodec=avcodec_find_decoder(pCodecCtx->codec_id);
	if(pCodec==NULL) {
		bIfSuccess = false;
		fprintf(stderr, "Unsupported codec!\n");
		return; // Codec not found
	}
	// Open codec
	while (avcodec_open(pCodecCtx, pCodec) < 0)/*这个函数总是返回-1*/ {
		Sleep(this->iProcessOrder);
	}

	// Allocate video frame
	pFrameOri=avcodec_alloc_frame();

	// Allocate an AVFrame structure
	pFrameBGR=avcodec_alloc_frame();
	if(pFrameBGR==NULL)
	{
		bIfSuccess = false;
		return;
	}
	pFrameRGB=avcodec_alloc_frame();
	if(pFrameRGB==NULL)
	{
		bIfSuccess = false;
		return;
	}

	// Determine required buffer size and allocate buffer
	numBytes=avpicture_get_size(PIX_FMT_BGR24, pCodecCtx->width,pCodecCtx->height);
	imageFrame->height = pCodecCtx->height;
	imageFrame->width = pCodecCtx->width;
	buffer=(uint8_t *)av_malloc(numBytes*sizeof(uint8_t));
	imageFrame->imageData = new uint8_t[numBytes*sizeof(uint8_t)];
	

	// Assign appropriate parts of buffer to image planes in pFrameRGB
	// Note that pFrameRGB is an AVFrame, but AVFrame is a superset
	// of AVPicture
	avpicture_fill((AVPicture *)pFrameBGR, buffer, PIX_FMT_BGR24,
		pCodecCtx->width, pCodecCtx->height);
	avpicture_fill((AVPicture *)pFrameRGB, buffer, PIX_FMT_RGB24,
		pCodecCtx->width, pCodecCtx->height);
	
	//注意,这里是PIX_FMT_RGB24,它决定了图片的格式
	if(this->bIfUseHD == false)
		ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height,
		pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height,
		PIX_FMT_BGR24, SWS_BICUBIC, NULL, NULL, NULL);

	this->getOneFrame();

	bIfSuccess = true;

}
コード例 #15
0
int _tmain(int argc, _TCHAR* argv[])
{
	AVFormatContext *pFormatCtx;
	char *filepath = "Titanic.ts";
	char *file_out = "Titanic.yuv";
	char *file_out1 = "Titanic.h264";
	FILE *fp_out;
	FILE *fp_out1;
	errno_t err,err1;

	err = fopen_s(&fp_out, file_out, "wb+");
	if (err != 0)
	{
		printf("The file 'crt_fopen_s.c' was opened\n");
		return -1;
	}

	err1 = fopen_s(&fp_out1, file_out1, "wb+");
	if (err1 != 0)
	{
		printf("The file 'crt_fopen_s.c' was opened\n");
		return -1;
	}

	av_register_all();    //注册所有组件
	avformat_network_init();
	pFormatCtx = avformat_alloc_context();   //开辟内存
	if (avformat_open_input(&pFormatCtx, filepath, NULL, NULL) < 0) //打开输入视频文件
	{
		printf("Can't open the input stream.\n");
		return -1;
	}
	if (avformat_find_stream_info(pFormatCtx,NULL)<0)     //判断文件流,视频流还是音频流
	{
		printf("Can't find the stream information!\n");
		return -1;
	}

	int i, index_video = -1;
	for (i = 0; i < pFormatCtx->nb_streams; i++)
	{
		if (pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO)      //如果是视频流,则记录存储。
		{
			index_video = i;
			break;
		}		
	}
	if (index_video == -1)
	{
		printf("Can't find a video stream;\n");
		return -1;
	}

	AVCodecContext *pCodecCtx = pFormatCtx->streams[index_video]->codec;

	AVCodec *pCodec = avcodec_find_decoder(pCodecCtx->codec_id);     //查找解码器
	if (pCodec == NULL)
	{
		printf("Can't find a decoder!\n");
		return -1;
	}

	//if (pCodecCtx->codec_id == AV_CODEC_ID_H264)
	//{
	//	av_opt_set(pCodecCtx->priv_data, "preset", "slow", 0);
	//	av_opt_set(pCodecCtx->priv_data, "tune", "zerolatency", 0);
	//}


	if (avcodec_open2(pCodecCtx, pCodec, NULL) < 0)   //打开编码器
	{
		printf("Can't open the decoder!\n");
		return -1;
	}


	AVFrame *pFrame = av_frame_alloc();  //this only allocates the AVFrame itself, not the data buffers
	AVFrame *pFrameYUV = av_frame_alloc();

	uint8_t *out_buffer = (uint8_t *)av_malloc(avpicture_get_size(PIX_FMT_YUV420P,pCodecCtx->width,pCodecCtx->height));  //开辟缓冲区
	avpicture_fill((AVPicture *)pFrameYUV, out_buffer, PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height);//帧和申请的内存结合

	AVPacket *pkt = (AVPacket *)av_malloc(sizeof(AVPacket));;
	av_init_packet(pkt);
	
	SwsContext * img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt,
		pCodecCtx->width, pCodecCtx->height, PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL);
	
	int frame_cnt = 0;
	int ret;
	int get_frame;
	int y_size = pCodecCtx->width*pCodecCtx->height;
	while (av_read_frame(pFormatCtx,pkt) >=0  )
	{
		if (pkt->stream_index == index_video)
		{
			fwrite(pkt->data,1,pkt->size,fp_out1);
			if (avcodec_decode_video2(pCodecCtx, pFrame, &get_frame, pkt) < 0)
			{
				printf("Decode Error!\n");
				return -1;
			}
			if (get_frame)
			{
				printf("Decoded frame index: %d\n", frame_cnt);
				sws_scale(img_convert_ctx, (const uint8_t* const*)pFrame->data, pFrame->linesize, 0, pCodecCtx->height,
					pFrameYUV->data, pFrameYUV->linesize);
				fwrite(pFrameYUV->data[0], 1, y_size, fp_out);
				fwrite(pFrameYUV->data[1], 1, y_size / 4, fp_out);
				fwrite(pFrameYUV->data[2], 1, y_size / 4, fp_out);
				frame_cnt++;
			}

		}
		av_free_packet(pkt);
	}
	while (1)
	{
		if (avcodec_decode_video2(pCodecCtx, pFrame, &get_frame, pkt) < 0)
		{
			printf("Decode Error!\n");
			break;
		}
		if (get_frame)
		{
			printf("Flush Decoded frame index: %d\n", frame_cnt);
			sws_scale(img_convert_ctx, (const uint8_t* const*)pFrame->data, pFrame->linesize, 0, pCodecCtx->height,
				pFrameYUV->data, pFrameYUV->linesize);
			fwrite(pFrameYUV->data[0], 1, y_size, fp_out);
			fwrite(pFrameYUV->data[1], 1, y_size / 4, fp_out);
			fwrite(pFrameYUV->data[2], 1, y_size / 4, fp_out);
			frame_cnt++;
		}
		else
			break;
	}

	//close
	fclose(fp_out);
	fclose(fp_out1);

	//free
	sws_freeContext(img_convert_ctx);
	av_frame_free(&pFrameYUV);
	av_frame_free(&pFrame);
	avcodec_close(pCodecCtx);
	avformat_close_input(&pFormatCtx);
	avformat_free_context(pFormatCtx);

	return 0;
}
コード例 #16
0
JNIEXPORT jint JNICALL Java_com_leixiaohua1020_sffmpegandroiddecoder_MainActivity_decode(
		JNIEnv *env, jobject obj, jstring input_jstr, jstring output_jstr) {
	AVFormatContext *pFormatCtx;
	int i, videoindex;
	AVCodecContext *pCodecCtx;
	AVCodec *pCodec;
	AVFrame *pFrame, *pFrameYUV;
	uint8_t *out_buffer;
	AVPacket *packet;
	int y_size;
	int ret, got_picture;
	struct SwsContext *img_convert_ctx;
	FILE *fp_yuv;
	int frame_cnt;
	clock_t time_start, time_finish;
	double time_duration = 0.0;

	char input_str[500] = { 0 };
	char output_str[500] = { 0 };
	char info[1000] = { 0 };
	sprintf(input_str, "%s", (*env)->GetStringUTFChars(env, input_jstr, NULL));
	sprintf(output_str, "%s",
			(*env)->GetStringUTFChars(env, output_jstr, NULL));

	//FFmpeg av_log() callback
	av_log_set_callback(custom_log);

	av_register_all();
	avformat_network_init();
	pFormatCtx = avformat_alloc_context();

	if (avformat_open_input(&pFormatCtx, input_str, NULL, NULL) != 0) {
		LOGE("Couldn't open input stream.\n");
		return -1;
	}

	if (avformat_find_stream_info(pFormatCtx, NULL) < 0) {
		LOGE("Couldn't find stream information.\n");
		return -1;
	}

	videoindex = -1;
	for (i = 0; i < pFormatCtx->nb_streams; i++){
		if (pFormatCtx->streams[i]->codec->codec_type == AVMEDIA_TYPE_VIDEO) {
			videoindex = i;
			break;
		}
	}

	if (videoindex == -1) {
		LOGE("Couldn't find a video stream.\n");
		return -1;
	}

	pCodecCtx = pFormatCtx->streams[videoindex]->codec;
	pCodec = avcodec_find_decoder(pCodecCtx->codec_id);

	if (pCodec == NULL) {
		LOGE("Couldn't find Codec.\n");
		return -1;
	}

	if (avcodec_open2(pCodecCtx, pCodec, NULL) < 0) {
		LOGE("Couldn't open codec.\n");
		return -1;
	}

	pFrame = av_frame_alloc();
	pFrameYUV = av_frame_alloc();
	out_buffer = (uint8_t *) av_malloc(
			avpicture_get_size(PIX_FMT_YUV420P, pCodecCtx->width,
					pCodecCtx->height));

	avpicture_fill((AVPicture *) pFrameYUV, out_buffer, PIX_FMT_YUV420P,
			pCodecCtx->width, pCodecCtx->height);

	packet = (AVPacket *) av_malloc(sizeof(AVPacket));

	img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height,
			pCodecCtx->pix_fmt, pCodecCtx->width, pCodecCtx->height,
			PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL);

	sprintf(info, "[Input     ]%s\n", input_str);
	sprintf(info, "%s[Output    ]%s\n", info, output_str);
	sprintf(info, "%s[Format    ]%s\n", info, pFormatCtx->iformat->name);
	sprintf(info, "%s[Codec     ]%s\n", info, pCodecCtx->codec->name);
	sprintf(info, "%s[Resolution]%dx%d\n", info, pCodecCtx->width,
			pCodecCtx->height);

	fp_yuv = fopen(output_str, "wb+");
	if (fp_yuv == NULL) {
		printf("Cannot open output file.\n");
		return -1;
	}

	frame_cnt = 0;
	time_start = clock();

	while (av_read_frame(pFormatCtx, packet) >= 0) {
		if (packet->stream_index == videoindex) {
			ret = avcodec_decode_video2(pCodecCtx, pFrame, &got_picture,
					packet);

			if (ret < 0) {
				LOGE("Decode Error.\n");
				return -1;
			}

			if (got_picture) {
				sws_scale(img_convert_ctx,
						(const uint8_t* const *) pFrame->data, pFrame->linesize,
						0, pCodecCtx->height, pFrameYUV->data,
						pFrameYUV->linesize);

				y_size = pCodecCtx->width * pCodecCtx->height;
				fwrite(pFrameYUV->data[0], 1, y_size, fp_yuv);    //Y
				fwrite(pFrameYUV->data[1], 1, y_size / 4, fp_yuv);  //U
				fwrite(pFrameYUV->data[2], 1, y_size / 4, fp_yuv);  //V
				//Output info
				char pictype_str[10] = { 0 };

				switch (pFrame->pict_type) {
				case AV_PICTURE_TYPE_I:
					sprintf(pictype_str, "I");
					break;
				case AV_PICTURE_TYPE_P:
					sprintf(pictype_str, "P");
					break;
				case AV_PICTURE_TYPE_B:
					sprintf(pictype_str, "B");
					break;
				default:
					sprintf(pictype_str, "Other");
					break;
				}

				LOGI("Frame Index: %5d. Type:%s", frame_cnt, pictype_str);
				frame_cnt++;
			}
		}
		av_free_packet(packet);
	}
	//flush decoder
	//FIX: Flush Frames remained in Codec
	while (1) {
		ret = avcodec_decode_video2(pCodecCtx, pFrame, &got_picture, packet);
		if (ret < 0)
			break;
		if (!got_picture)
			break;
		sws_scale(img_convert_ctx, (const uint8_t* const *) pFrame->data,
				pFrame->linesize, 0, pCodecCtx->height, pFrameYUV->data,
				pFrameYUV->linesize);
		int y_size = pCodecCtx->width * pCodecCtx->height;
		fwrite(pFrameYUV->data[0], 1, y_size, fp_yuv);    //Y
		fwrite(pFrameYUV->data[1], 1, y_size / 4, fp_yuv);  //U
		fwrite(pFrameYUV->data[2], 1, y_size / 4, fp_yuv);  //V
		//Output info
		char pictype_str[10] = { 0 };

		switch (pFrame->pict_type) {
		case AV_PICTURE_TYPE_I:
			sprintf(pictype_str, "I");
			break;
		case AV_PICTURE_TYPE_P:
			sprintf(pictype_str, "P");
			break;
		case AV_PICTURE_TYPE_B:
			sprintf(pictype_str, "B");
			break;
		default:
			sprintf(pictype_str, "Other");
			break;
		}

		LOGI("Frame Index: %5d. Type:%s", frame_cnt, pictype_str);
		frame_cnt++;
	}
	time_finish = clock();
	time_duration = (double) (time_finish - time_start);

	sprintf(info, "%s[Time      ]%fms\n", info, time_duration);
	sprintf(info, "%s[Count     ]%d\n", info, frame_cnt);

	sws_freeContext(img_convert_ctx);

	fclose(fp_yuv);

	av_frame_free(&pFrameYUV);
	av_frame_free(&pFrame);
	avcodec_close(pCodecCtx);
	avformat_close_input(&pFormatCtx);

	return 0;
}
コード例 #17
0
ファイル: tutorial06.c プロジェクト: feixiao/ffmpeg-tutorial
int stream_component_open(VideoState *is, int stream_index) {
  AVFormatContext *pFormatCtx = is->pFormatCtx;
  AVCodecContext *codecCtx = NULL;
  AVCodec *codec = NULL;
  AVDictionary *optionsDict = NULL;
  SDL_AudioSpec wanted_spec, spec;

  if(stream_index < 0 || stream_index >= pFormatCtx->nb_streams) {
    return -1;
  }
  // Get a pointer to the codec context for the video stream
  codecCtx = pFormatCtx->streams[stream_index]->codec;
  if(codecCtx->codec_type == AVMEDIA_TYPE_AUDIO) {
    // Set audio settings from codec info
    wanted_spec.freq = codecCtx->sample_rate;
    wanted_spec.format = AUDIO_S16SYS;
    wanted_spec.channels = codecCtx->channels;
    wanted_spec.silence = 0;
    wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
    wanted_spec.callback = audio_callback;
    wanted_spec.userdata = is;

    if(SDL_OpenAudio(&wanted_spec, &spec) < 0) {
      fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
      return -1;
    }
    is->audio_hw_buf_size = spec.size;
  }
  codec = avcodec_find_decoder(codecCtx->codec_id);
  if(!codec || (avcodec_open2(codecCtx, codec, &optionsDict) < 0)) {
    fprintf(stderr, "Unsupported codec!\n");
    return -1;
  }

  switch(codecCtx->codec_type) {
  case AVMEDIA_TYPE_AUDIO:
    is->audioStream = stream_index;
    is->audio_st = pFormatCtx->streams[stream_index];
    is->audio_buf_size = 0;
    is->audio_buf_index = 0;

    /* averaging filter for audio sync */
    is->audio_diff_avg_coef = exp(log(0.01 / AUDIO_DIFF_AVG_NB));
    is->audio_diff_avg_count = 0;
    /* Correct audio only if larger error than this */
    is->audio_diff_threshold = 2.0 * SDL_AUDIO_BUFFER_SIZE / codecCtx->sample_rate;

	is->sws_ctx_audio = swr_alloc();
	if (!is->sws_ctx_audio) {
		fprintf(stderr, "Could not allocate resampler context\n");
		return -1;
	}

    memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
    packet_queue_init(&is->audioq);
    SDL_PauseAudio(0);
    break;
  case AVMEDIA_TYPE_VIDEO:
    is->videoStream = stream_index;
    is->video_st = pFormatCtx->streams[stream_index];

    is->frame_timer = (double)av_gettime() / 1000000.0;
    is->frame_last_delay = 40e-3;
    is->video_current_pts_time = av_gettime();

    packet_queue_init(&is->videoq);
    is->video_tid = SDL_CreateThread(video_thread, is);
    is->sws_ctx =
        sws_getContext
        (
            is->video_st->codec->width,
            is->video_st->codec->height,
            is->video_st->codec->pix_fmt,
            is->video_st->codec->width,
            is->video_st->codec->height,
            AV_PIX_FMT_YUV420P,
            SWS_BILINEAR,
            NULL,
            NULL,
            NULL
        );
    codecCtx->get_buffer2 = our_get_buffer;
    codecCtx->release_buffer = our_release_buffer;
    break;
  default:
    break;
  }

  return 0;
}
コード例 #18
0
ファイル: tutorial05.cpp プロジェクト: shileiz/notes
int stream_component_open(VideoState *is, int stream_index) {

	AVFormatContext *pFormatCtx = is->pFormatCtx;
	AVCodecContext *codecCtx = NULL;
	AVCodec *codec = NULL;
	SDL_AudioSpec wanted_spec, spec;

	if (stream_index < 0 || stream_index >= pFormatCtx->nb_streams) {
		return -1;
	}

	codec = avcodec_find_decoder(pFormatCtx->streams[stream_index]->codec->codec_id);
	if (!codec) {
		fprintf(stderr, "Unsupported codec!\n");
		return -1;
	}

	codecCtx = avcodec_alloc_context3(codec);
	if (avcodec_copy_context(codecCtx, pFormatCtx->streams[stream_index]->codec) != 0) {
		fprintf(stderr, "Couldn't copy codec context");
		return -1; // Error copying codec context
	}


	if (codecCtx->codec_type == AVMEDIA_TYPE_AUDIO) {
		// Set audio settings from codec info
		wanted_spec.freq = codecCtx->sample_rate;
		wanted_spec.format = AUDIO_S16SYS;
		wanted_spec.channels = codecCtx->channels;
		wanted_spec.silence = 0;
		wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
		wanted_spec.callback = audio_callback;
		wanted_spec.userdata = is;

		if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
			fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
			return -1;
		}
		is->audio_hw_buf_size = spec.size;
	}
	if (avcodec_open2(codecCtx, codec, NULL) < 0) {
		fprintf(stderr, "Unsupported codec!\n");
		return -1;
	}

	switch (codecCtx->codec_type) {
	case AVMEDIA_TYPE_AUDIO:
		is->audioStream = stream_index;
		is->audio_st = pFormatCtx->streams[stream_index];
		is->audio_ctx = codecCtx;
		is->audio_buf_size = 0;
		is->audio_buf_index = 0;
		memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
		packet_queue_init(&is->audioq);
		// 需要先把解出来的 raw audio 转换成 SDL 需要的格式
		// 根据 raw audio 的格式 和 SDL 的格式设置 swr_ctx
		is->swr_ctx = swr_alloc_set_opts(NULL,
			AV_CH_LAYOUT_STEREO, AV_SAMPLE_FMT_S16, is->audio_ctx->sample_rate,
			av_get_default_channel_layout(is->audio_ctx->channels), is->audio_ctx->sample_fmt, is->audio_ctx->sample_rate,
			0, NULL);
		//初始化 swr_ctx
		swr_init(is->swr_ctx);
		SDL_PauseAudio(0);
		break;
	case AVMEDIA_TYPE_VIDEO:
		is->videoStream = stream_index;
		is->video_st = pFormatCtx->streams[stream_index];
		is->video_ctx = codecCtx;

		is->frame_timer = (double)av_gettime() / 1000000.0;
		is->frame_last_delay = 40e-3;

		packet_queue_init(&is->videoq);
		is->video_tid = SDL_CreateThread(video_thread, is);
		is->sws_ctx = sws_getContext(is->video_ctx->width, is->video_ctx->height,
			is->video_ctx->pix_fmt, is->video_ctx->width,
			is->video_ctx->height, PIX_FMT_YUV420P,
			SWS_BILINEAR, NULL, NULL, NULL
			);
		break;
	default:
		break;
	}
}
コード例 #19
0
// Initialize resampler
//
// Generic
//     ::new(src_width, src_height, src_format, dst_factor)									- Resize by percentage
//     ::new(src_width, src_height, src_format, dst_format)									- Change color format
//     ::new(src_width, src_height, src_format, dst_width, dst_height)						- Resize to width and height
//     ::new(src_width, src_height, src_format, dst_width, dst_height, filter)				- Resize with interpolation filter
//     ::new(src_width, src_height, src_format, dst_width, dst_height, dst_format, filter)	- Resize with filter and change color format
//
// From Object
//     ::new(source, dst_factor)															- Resize by percentage
//     ::new(source, dst_format)															- Change color format
//     ::new(source, dst_width, dst_height)													- Resize to width and height
//     ::new(source, dst_width, dst_height, filter)											- Resize with interpolation filter
//     ::new(source, dst_width, dst_height, dst_format, filter)								- Resize with filter and change color format
VALUE video_resampler_initialize(int argc, VALUE * argv, VALUE self) {
    VideoResamplerInternal * internal;
    Data_Get_Struct(self, VideoResamplerInternal, internal);

    if (argc && TYPE(argv[0]) == T_FIXNUM) {
        // Called generic form
        if 		(argc < 4)	rb_raise(rb_eArgError, "Missing argument(s)");
        else if (argc > 7)	rb_raise(rb_eArgError, "Too many arguments");

        internal->src_width 	= NUM2INT(argv[0]);
        internal->src_height	= NUM2INT(argv[1]);
        internal->src_format	= symbol_to_av_pixel_format(argv[2]);

        argc -= 3;
        argv += 3;
    }
    else {
        // Called with object
        if 		(argc < 2)	rb_raise(rb_eArgError, "Missing argument(s)");
        else if (argc > 5)	rb_raise(rb_eArgError, "Too many arguments");

        internal->src_width 	= NUM2INT(rb_funcall(argv[0], rb_intern("width"), 0));
        internal->src_height	= NUM2INT(rb_funcall(argv[0], rb_intern("height"), 0));
        internal->src_format	= symbol_to_av_pixel_format(rb_funcall(argv[0], rb_intern("format"), 0));

        argc -= 1;
        argv += 1;
    }

    internal->dst_width		= internal->src_width;
    internal->dst_height	= internal->src_height;
    internal->dst_format	= internal->src_format;
    internal->filter		= SWS_FAST_BILINEAR;

    switch (argc) {
    case 1: {
        if (TYPE(argv[0]) != T_SYMBOL) {
            // Resize by percentage
            internal->dst_width	= (int)(internal->src_width  * NUM2DBL(argv[0]));
            internal->dst_height = (int)(internal->src_height * NUM2DBL(argv[0]));
        }
        else {
            // Change color format
            internal->dst_format = symbol_to_av_pixel_format(argv[0]);
        }
        break;
    }
    case 2: {
        // Resize to width and height
        internal->dst_width = NUM2INT(argv[0]);
        internal->dst_height = NUM2INT(argv[1]);
        break;
    }
    case 3: {
        // Resize to width and height using interpolation filter
        internal->dst_width = NUM2INT(argv[0]);
        internal->dst_height = NUM2INT(argv[1]);
        internal->filter = symbol_to_interpolation_filter(argv[2]);
        break;
    }
    case 4: {
        // Resize to width and height using interpolation filter and change color format
        internal->dst_width = NUM2INT(argv[0]);
        internal->dst_height = NUM2INT(argv[1]);
        internal->dst_format = symbol_to_av_pixel_format(argv[2]);
        internal->filter = symbol_to_interpolation_filter(argv[3]);
        break;
    }
    }

    if (internal->src_format == PIX_FMT_NONE) rb_raise(rb_eArgError, "Unknown input color format");
    if (internal->dst_format == PIX_FMT_NONE) rb_raise(rb_eArgError, "Unknown output color format");
    if (internal->filter == 0) rb_raise(rb_eArgError, "Unknown interpolation method");

    // Create scaler context
    internal->context = sws_getContext(internal->src_width,
                                       internal->src_height,
                                       internal->src_format,
                                       internal->dst_width,
                                       internal->dst_height,
                                       internal->dst_format,
                                       internal->filter,
                                       NULL,
                                       NULL,
                                       NULL);
    if (!internal->context)
        rb_raise(rb_eRuntimeError, "Failed to create rescaling context");

    return self;
}
コード例 #20
0
int main(int argc, char *argv[]) {

    char *filename = NULL;
    //char *filename_suffix = NULL;
    char *inputsource = NULL;
    char *outputDB = NULL;

    //Find the last / in passed filename.
    if (strrchr(argv[1],'/') == NULL) {
        if (strcmp(argv[1],"-") == 0) {
            inputsource = "/dev/stdin";
            if (argv[2] == NULL) {
                printf("Please input a name for the movie!\n");
                return -1;
            } else {
                if (strrchr(argv[2],'/') == NULL) {
                    filename = argv[2];
                } else {
                    filename = strrchr(argv[2],'/') + 1;
                }

                if (argv[3] != NULL && argc == 4) {
                    outputDB = argv[3];
                } else {
                    outputDB = "/home/gsc/videoaudiofingerprint.db";
                }

            }
        } else {
            filename = argv[1];
            inputsource = argv[1];

            if (argv[2] != NULL && argc == 3) {
                outputDB = argv[2];
            } else {
                outputDB = "/home/gsc/videoaudiofingerprint.db";
            }
        }
    } else {
        filename = strrchr(argv[1],'/') + 1;
        inputsource = argv[1];

        if (argv[3] != NULL && argc == 4) {
            outputDB = argv[3];
        } else {
            outputDB = "/home/gsc/videoaudiofingerprint.db";
        }
    }

    printf("Filename = %s Input source = %s DB output = %s argc = %d\n",filename,inputsource,outputDB, argc);

    /*** DB initialization ***/
    int retval = 0;

    // Create a handle for database connection, create a pointer to sqlite3
    sqlite3 *handle;

    //Full array init of size 5h@60fps (a.k.a large enough)
    //TO FIX: use dynamic array?
    int *fullArray = (int*) calloc ( (1080000-1), sizeof (int));

    // Create the database. If it doesnt exist, it would be created
    // pass a pointer to the pointer to sqlite3, in short sqlite3**

    retval = sqlite3_open(outputDB,&handle);
    // If connection failed, handle returns NULL
    if(retval) {
        printf("Database connection failed\n");
        return -1;
    }

    char query1[] = "create table allmovies (allmovieskey INTEGER PRIMARY KEY,name TEXT,fps INTEGER, date INTEGER);";
    // Execute the query for creating the table
    retval = sqlite3_exec(handle,query1,0,0,0);
    char query2[] = "PRAGMA count_changes = OFF";
    retval = sqlite3_exec(handle,query2,0,0,0);
    char query3[] = "PRAGMA synchronous = OFF";
    retval = sqlite3_exec(handle,query3,0,0,0);

    //Hashluma table
    char query_hash[] = "create table hashluma (avg_range int, movies TEXT)";
    retval = sqlite3_exec(handle,query_hash,0,0,0);

    if (!retval) {
        //Populating the hash tables
        printf("Populating hashluma table\n");
        char hashquery[50];
        memset(hashquery, 0, 50);
        int i = 0;
        for(i=0; i <= 254; i++) {
            sprintf(hashquery, "insert into hashluma (avg_range) values (%d)", i);
            retval = sqlite3_exec(handle,hashquery,0,0,0);
        }
    }

    char table_query[150];
    memset(table_query, 0, 150);
    sprintf(table_query,"create table '%s' (s_end FLOAT, luma INTEGER);",filename);

    int repeated = 0;

    retval = sqlite3_exec(handle,table_query,0,0,0);
    if (retval) {
        char error [100];
        memset(error, 0, 100);
        sprintf(error,"Table for movie %s already exists! Skipping fingerprinting ... \n",filename);
        printf("%s",error);
        //Decide which is the best policy, not FP? overwrite? new file?
        repeated = 1;
        sqlite3_close(handle);
        return 0;
    }
    /*** DB init finished ***/

    printf("Analyzing video %s\n",filename);

    av_register_all();

    AVFormatContext *pFormatCtx;

    // Open video file
    if(av_open_input_file(&pFormatCtx, inputsource, NULL, 0, NULL)!=0) {
        printf("Could't open file %s\n", argv[1]);
        return -1; // Couldn't open file
    }

    // Retrieve stream information
    if(av_find_stream_info(pFormatCtx)<0) {
        printf("Could't find stream information\n");
        return -1; // Couldn't find stream information
    }

    // Dump information about file onto standard error
    dump_format(pFormatCtx, 0, filename, 0);

    int i;
    AVCodecContext *pVideoCodecCtx;
    AVCodecContext *pAudioCodecCtx;

    // Find the first video stream
    int videoStream=-1;
    int audioStream=-1;
    for(i=0; i<pFormatCtx->nb_streams; i++) {
        if(pFormatCtx->streams[i]->codec->codec_type==CODEC_TYPE_VIDEO && videoStream==-1)
            videoStream=i;

        if(pFormatCtx->streams[i]->codec->codec_type==CODEC_TYPE_AUDIO && audioStream==-1)
            audioStream=i;

    }

    if(videoStream==-1 || audioStream==-1)
        return -1; // Didn't find both streams

    // Get a pointer to the codec context for the video stream
    pVideoCodecCtx=pFormatCtx->streams[videoStream]->codec;
    // Similar, for audio stream
    pAudioCodecCtx=pFormatCtx->streams[audioStream]->codec;

    AVCodec *pVideoCodec;
    AVCodec *pAudioCodec;

    // Find the decoder for the streams
    pVideoCodec=avcodec_find_decoder(pVideoCodecCtx->codec_id);
    pAudioCodec=avcodec_find_decoder(pAudioCodecCtx->codec_id);
    if(pVideoCodec==NULL) {
        fprintf(stderr, "Unsupported video codec!\n");
        sqlite3_close(handle);
        return -1; // Codec not found
    }
    if(pAudioCodec==NULL) {
        fprintf(stderr, "Unsupported audio codec!\n");
        sqlite3_close(handle);
        return -1; // Codec not found
    }

    // Open codecs
    if(avcodec_open(pVideoCodecCtx, pVideoCodec)<0) {
        sqlite3_close(handle);
        return -1; // Could not open codec
    }
    if(avcodec_open(pAudioCodecCtx, pAudioCodec)<0) {
        sqlite3_close(handle);
        return -1; // Could not open codec
    }

    AVFrame *pVideoFrame;
    AVFrame *pVideoFrameYUV;
    AVFrame *pAudioFrame;

    int samples = 0;

    // Allocate audio/video frame
    pVideoFrame=avcodec_alloc_frame();
    pVideoFrameYUV=avcodec_alloc_frame();
    pAudioFrame=avcodec_alloc_frame();

    if(pVideoFrameYUV==NULL || pVideoFrame==NULL || pAudioFrame==NULL) {
        sqlite3_close(handle);
        return -1;
    }

    uint8_t *videoBuffer;
    int16_t *audioBuffer;

    int numVideoBytes;
    int numAudioBytes;
    // Determine required buffer size and allocate buffer
    numVideoBytes=avpicture_get_size(PIX_FMT_YUV420P, pVideoCodecCtx->width, pVideoCodecCtx->height);
    videoBuffer=(uint8_t *)av_mallocz(numVideoBytes*sizeof(uint8_t));
    numAudioBytes = AVCODEC_MAX_AUDIO_FRAME_SIZE + FF_INPUT_BUFFER_PADDING_SIZE;
    audioBuffer=(int16_t *)av_mallocz(numAudioBytes);

    // Assign appropriate parts of videoBuffer to image planes in pVideoFrameYUV
    // Note that pVideoFrameYUV is an AVFrame, but AVFrame is a superset of AVPicture
    avpicture_fill((AVPicture *)pVideoFrameYUV, videoBuffer, PIX_FMT_YUV420P, pVideoCodecCtx->width, pVideoCodecCtx->height);

    int frameFinished = 0;
    AVPacket packet;
    av_init_packet(&packet);
    struct SwsContext * sws_context;
    double fps = 0.0;

    struct timeval tv;
    gettimeofday(&tv, NULL);

    char allmovies_query[150];
    memset(allmovies_query, 0, 150);
    fps = (double)pFormatCtx->streams[videoStream]->r_frame_rate.num/(double)pFormatCtx->streams[videoStream]->r_frame_rate.den;
    //if (repeated) {
    //  filename_suffix = (int)tv.tv_sec;
    //  sprintf(filename, "%s_%d", filename, filename_suffix);
    //  sprintf(allmovies_query, "insert into allmovies (name,fps,date) values ('%s',%d,%d);", filename, (int)(fps*100), filename_suffix);
    //} else {
    sprintf(allmovies_query, "insert into allmovies (name,fps,date) values ('%s',%d,%d);", filename, (int)(fps*100), (int)tv.tv_sec);
    //}
    retval = sqlite3_exec(handle,allmovies_query,0,0,0);

    printf("%d %d\n",pAudioCodecCtx->sample_rate,pAudioCodecCtx->channels);

    i = 0;
    unsigned int offset = 0; // bytes
    //fftw_complex *in;
    int totalSamples = 0;
    //in = (fftw_complex*) fftw_malloc(sizeof(fftw_complex) * N);
    int counter = 0;
    float audioTime = 0.0;

    while(av_read_frame(pFormatCtx, &packet)>=0) {
        // Decode video
        if(packet.stream_index==videoStream) {
            // Decode video frame
            avcodec_decode_video2(pVideoCodecCtx, pVideoFrame, &frameFinished, &packet);

            // Did we get a video frame?
            if(frameFinished) {
                if (pVideoCodecCtx->pix_fmt != PIX_FMT_YUV420P) {
                    // Convert the image from its native format to YUV (PIX_FMT_YUV420P)
                    //img_convert((AVPicture *)pVideoFrameYUV, PIX_FMT_YUV420P, (AVPicture*)pVideoFrame, pVideoCodecCtx->pix_fmt, pVideoCodecCtx->width, pVideoCodecCtx->height);
                    sws_context = sws_getContext(pVideoCodecCtx->width, pVideoCodecCtx->height, pVideoCodecCtx->pix_fmt, pVideoCodecCtx->width, pVideoCodecCtx->height, PIX_FMT_YUV420P, SWS_FAST_BILINEAR, NULL, NULL, NULL);

                    sws_scale(sws_context, pVideoFrame->data, pVideoFrame->linesize, 0, pVideoCodecCtx->height, pVideoFrameYUV->data, pVideoFrameYUV->linesize);
                    sws_freeContext(sws_context);

                    retval = AvgFrameImport(pVideoFrameYUV, pVideoCodecCtx->width, pVideoCodecCtx->height, i++, filename, handle, fps, fullArray);
                } else {
                    retval = AvgFrameImport(pVideoFrame, pVideoCodecCtx->width, pVideoCodecCtx->height, i++, filename, handle, fps, fullArray);
                }
            }
        }
        // Decode audio
        // http://qtdvd.com/guides/ffmpeg.html#decode
        if (packet.stream_index == audioStream) {

            offset = 0;
            int frameSize;
            int length = 0;
            memset(audioBuffer, 0, sizeof(audioBuffer));
            while (packet.size > 0) {
                //memset(audioBuffer, 0, sizeof(audioBuffer));
                frameSize = numAudioBytes;

                //Copy decoded information into audioBuffer
                //frameSize gets set as the decoded frameSize, in bytes
                length = avcodec_decode_audio3(pAudioCodecCtx, audioBuffer, &frameSize, &packet);
                if (length <= 0) { // Error, see if we can recover.
                    packet.size--;
                    packet.data++;
                }
                else {
                    //Slide pointer to next frame and update size
                    printf("read %d bytes\n", length);
                    packet.size -= length;
                    packet.data += length;

                    //Slide frame of audiobuffer
                    memcpy((uint16_t*)(audioBuffer+offset), audioBuffer, frameSize);
                    //Update offset
                    offset += frameSize;
                }

                //Do something with audioBuffer
                //in = (fftw_complex*) fftw_malloc(sizeof(fftw_complex) * N);
                //if (counter%2)
                //	printf("%f R: %d\n", audioTime, (int16_t)*audioBuffer);
                //else
                //	printf("%f L: %d\n", audioTime, (int16_t)*audioBuffer);
                printf("%f %d\n", audioTime, (int16_t)*audioBuffer);
                fflush(stdout);

            }


            if (offset == 0)
                samples = 0;
            else
                samples = (unsigned int)offset/sizeof(short);

            totalSamples+=samples;

            if (counter%2)
                audioTime+=samples*1.0/pAudioCodecCtx->sample_rate;

            counter++;

        }
    }

    printf("Total time (s) (per audio sample calculation): %f\n",(float)(totalSamples*1.0/pAudioCodecCtx->sample_rate/pAudioCodecCtx->channels));

    //Cut the large fullArray to the movie actual size
    int *shortArray = (int*) calloc ( i, sizeof (int));
    memcpy(shortArray, fullArray, i*sizeof(int));
    free(fullArray);

    //Do magic
    makeIndexes(shortArray, handle, filename, threshold, i, fps);

    // Free the packet that was allocated by av_read_frame
    av_free_packet(&packet);

    // Free the YUV image
    av_free(videoBuffer);
    av_free(audioBuffer);
    av_free(pVideoFrameYUV);

    // Free the YUV frame
    av_free(pVideoFrame);
    av_free(pAudioFrame);

    // Close the codec
    avcodec_close(pVideoCodecCtx);
    avcodec_close(pAudioCodecCtx);

    // Close the video file
    av_close_input_file(pFormatCtx);

    // Close DB handler
    sqlite3_close(handle);

    // Free full array
    free(shortArray);

    return 0;
}
コード例 #21
0
int main(int argc, char ** argv)
{
	if(argc < 4) {
		printf("\nScrub, you need to specify a bitrate, number of frames, and server."
				"\nLike this: pixieHD 350 1000 rtmp://domain.com/live/matt\n"
				"\nNOTE, it is: progname bitrate frames server\n\n"
				"The bitrate is understood to be kbits/sec.\n"
				"You should enter frames or else you the program will\n"
				"continue to stream until you forcefully close it.\n"
				"THANK YOU: while(1) { /* stream! */ }\n");
		return 0;
	}
	printf("\nYou have set the following options:\n\n%5cbitrate: %s,"
			"\n%5cframes: %s\n%5cserver: %s\n\n",
			' ',argv[1],' ',argv[2],' ',argv[3]);
	
	/*int p;
	printf("Initializing noob options");
	for(p=0; p<3; ++p) {
		printf("%5c",'.');
		Sleep(1500);
	}
	printf("\n\n");

	char *input;
	printf("You hating on my GFX or wat? Please Answer: ");
	input = getline();

	printf("\n\n");
	printf("Your answer: ");

	size_t input_len = strlen(input);
	for(p=0; p<input_len; ++p) {
		Sleep(300);
		printf("%c",input[p]);
	}
	printf("\nkk here we go...");
	Sleep(1000);*/

	printf("\n\nPress the CONTROL key to begin streaming or ESC key to QUIT.\n\n");
    while (1)
    {
	   if (ButtonPress(VK_ESCAPE)) {
          printf("Quit.\n\n");
          break;
       } else if (ButtonPress(VK_CONTROL)) {
    	   // Decoder local variable declaration
    	   	AVFormatContext *pFormatCtx = NULL;
    	   	int i, videoStream;
    	   	AVCodecContext *pCodecCtx = NULL;
    	   	AVCodec *pCodec;
    	   	AVFrame *pFrame;
    	   	AVPacket packet;
    	   	int frameFinished;

    	   	// Encoder local variable declaration
    	   	const char *filename;
    	   	AVOutputFormat *fmt;
    	   	AVFormatContext *oc;
    	   	AVStream *video_st;
    	   	AVCodec *video_codec;
    	   	int ret; unsigned int frame_count, frame_count2;
    	   	StreamInfo sInfo;

    	   	size_t max_frames = strtol(argv[2], NULL, 0);

    	   	// Register all formats, codecs and network
    	   	av_register_all();
    	   	avcodec_register_all();
    	   	avformat_network_init();

    	   	// Setup mux
    	   	//filename = "output_file.flv";
    	   	//filename = "rtmp://chineseforall.org/live/beta";
    	   	filename = argv[3];
    	   	fmt = av_guess_format("flv", filename, NULL);
    	   	if (fmt == NULL) {
    	   		printf("Could not guess format.\n");
    	   		return -1;
    	   	}
    	   	// allocate the output media context
    	   	oc = avformat_alloc_context();
    	   	if (oc == NULL) {
    	   		printf("could not allocate context.\n");
    	   		return -1;
    	   	}


    	   HDC hScreen = GetDC(GetDesktopWindow());
		   ScreenX = GetDeviceCaps(hScreen, HORZRES);
		   ScreenY = GetDeviceCaps(hScreen, VERTRES);

		   // Temp. hard-code the resolution
		   int new_width = 1024, new_height = 576;
		   double v_ratio = 1.7786458333333333333333333333333;

    	   	// Set output format context to the format ffmpeg guessed
    	   	oc->oformat = fmt;

    	   	// Add the video stream using the h.264
    	   	// codec and initialize the codec.
    	   	video_st = NULL;
    	   	sInfo.width = new_width;
    	   	sInfo.height = new_height;
    	   	sInfo.pix_fmt = AV_PIX_FMT_YUV420P;
    	   	sInfo.frame_rate = 10;
    	   	sInfo.bitrate = strtol(argv[1], NULL, 0)*1000;
    	   	video_st = add_stream(oc, &video_codec, AV_CODEC_ID_H264, &sInfo);

    	   	// Now that all the parameters are set, we can open the audio and
    	   	// video codecs and allocate the necessary encode buffers.
    	   	if (video_st)
    	   		open_video(oc, video_codec, video_st);

    	   	/* open the output file, if needed */
    	   	if (!(fmt->flags & AVFMT_NOFILE)) {
    	   		ret = avio_open(&oc->pb, filename, AVIO_FLAG_WRITE);
    	   		if (ret < 0) {
    	   			fprintf(stderr, "Could not open '%s': %s\n", filename, av_err2str(ret));
    	   			return 1;
    	   		}
    	   	}

    	   	// dump output format
    	   	av_dump_format(oc, 0, filename, 1);

    	   	// Write the stream header, if any.
    	   	ret = avformat_write_header(oc, NULL);
    	   	if (ret < 0) {
    	   		fprintf(stderr, "Error occurred when opening output file: %s\n", av_err2str(ret));
    	   		return 1;
    	   	}

    	   	// Read frames, decode, and re-encode
    	   	frame_count = 1;
    	   	frame_count2 = 1;

		   HDC hdcMem = CreateCompatibleDC (hScreen);
		   HBITMAP hBitmap = CreateCompatibleBitmap(hScreen, ScreenX, ScreenY);
		   HGDIOBJ hOld;
		   BITMAPINFOHEADER bmi = {0};
		   bmi.biSize = sizeof(BITMAPINFOHEADER);
		   bmi.biPlanes = 1;
		   bmi.biBitCount = 32;
		   bmi.biWidth = ScreenX;
		   bmi.biHeight = -ScreenY;
		   bmi.biCompression = BI_RGB;
		   bmi.biSizeImage = 0;// 3 * ScreenX * ScreenY;


		   if(ScreenData)
			   free(ScreenData);
		   ScreenData = (BYTE*)malloc(4 * ScreenX * ScreenY);
		   AVPacket pkt;

		   clock_t start_t = GetTickCount();
		   long long wait_time = 0;

		   uint64_t total_size;

    	   while(1) {
			hOld = SelectObject(hdcMem, hBitmap);
			BitBlt(hdcMem, 0, 0, ScreenX, ScreenY, hScreen, 0, 0, SRCCOPY);
			SelectObject(hdcMem, hOld);

			GetDIBits(hdcMem, hBitmap, 0, ScreenY, ScreenData, (BITMAPINFO*)&bmi, DIB_RGB_COLORS);

			//calculate the bytes needed for the output image
			int nbytes = avpicture_get_size(AV_PIX_FMT_YUV420P, new_width, new_height);

			//create buffer for the output image
			uint8_t* outbuffer = (uint8_t*)av_malloc(nbytes);

			//create ffmpeg frame structures.  These do not allocate space for image data,
			//just the pointers and other information about the image.
			AVFrame* inpic = avcodec_alloc_frame();
			AVFrame* outpic = avcodec_alloc_frame();

			//this will set the pointers in the frame structures to the right points in
			//the input and output buffers.
			avpicture_fill((AVPicture*)inpic, ScreenData, AV_PIX_FMT_RGB32, ScreenX, ScreenY);
			avpicture_fill((AVPicture*)outpic, outbuffer, AV_PIX_FMT_YUV420P, new_width, new_height);

			//create the conversion context
			struct SwsContext *fooContext = sws_getContext(ScreenX, ScreenY, AV_PIX_FMT_RGB32, new_width, new_height, AV_PIX_FMT_YUV420P, SWS_FAST_BILINEAR, NULL, NULL, NULL);

			//perform the conversion
			sws_scale(fooContext, inpic->data, inpic->linesize, 0, ScreenY, outpic->data, outpic->linesize);
			
			// Initialize a new frame
			AVFrame* newFrame = avcodec_alloc_frame();

			int size = avpicture_get_size(video_st->codec->pix_fmt, video_st->codec->width, video_st->codec->height);
			uint8_t* picture_buf = av_malloc(size);

			avpicture_fill((AVPicture *) newFrame, picture_buf, video_st->codec->pix_fmt, video_st->codec->width, video_st->codec->height);

			// Copy only the frame content without additional fields
			av_picture_copy((AVPicture*) newFrame, (AVPicture*) outpic, video_st->codec->pix_fmt, video_st->codec->width, video_st->codec->height);

			// encode the image
			int got_output;
			av_init_packet(&pkt);
			pkt.data = NULL; // packet data will be allocated by the encoder
			pkt.size = 0;

			// Set the frame's pts (this prevents the warning notice 'non-strictly-monotonic PTS')
			newFrame->pts = frame_count2;

			ret = avcodec_encode_video2(video_st->codec, &pkt, newFrame, &got_output);
			if (ret < 0) {
				fprintf(stderr, "Error encoding video frame: %s\n", av_err2str(ret));
				exit(1);
			}

			if (got_output) {
				if (video_st->codec->coded_frame->key_frame)
					pkt.flags |= AV_PKT_FLAG_KEY;
				pkt.stream_index = video_st->index;

				if (pkt.pts != AV_NOPTS_VALUE)
					pkt.pts = av_rescale_q(pkt.pts, video_st->codec->time_base, video_st->time_base);
				if (pkt.dts != AV_NOPTS_VALUE)
					pkt.dts = av_rescale_q(pkt.dts, video_st->codec->time_base, video_st->time_base);

				// Write the compressed frame to the media file.
				ret = av_interleaved_write_frame(oc, &pkt);

				fprintf(stderr, "encoded frame #%d\n", frame_count);
				frame_count++;
			} else {
				ret = 0;
			}
			if (ret != 0) {
				fprintf(stderr, "Error while writing video frame: %s\n", av_err2str(ret));
				exit(1);
			}

			++frame_count2;

			// Free the YUV picture frame we copied from the
			// decoder to eliminate the additional fields
			// and other packets/frames used
			av_free(picture_buf);
			av_free_packet(&pkt);
			av_free(newFrame);

			//free memory
			av_free(outbuffer);
			av_free(inpic);
			av_free(outpic);


          if(frame_count == max_frames) {
			/* Write the trailer, if any. The trailer must be written before you
			 * close the CodecContexts open when you wrote the header; otherwise
			 * av_write_trailer() may try to use memory that was freed on
			 * av_codec_close().
			 */
			av_write_trailer(oc);

			/* Close the video codec (encoder) */
			if (video_st) {
				close_video(oc, video_st);
			}
			// Free the output streams.
			for (i = 0; i < oc->nb_streams; i++) {
				av_freep(&oc->streams[i]->codec);
				av_freep(&oc->streams[i]);
			}
			if (!(fmt->flags & AVFMT_NOFILE)) {
				/* Close the output file. */
				avio_close(oc->pb);
			}
			/* free the output format context */
			av_free(oc);

			ReleaseDC(GetDesktopWindow(),hScreen);
			DeleteDC(hdcMem);

			printf("\n\nPress the CONTROL key to begin streaming or ESC key to QUIT.\n\n");
			break;
          }
       }
       }
    }
    return 0;
}
コード例 #22
0
static void FFMpegUtils_print(JNIEnv *env, jobject obj, jint pAVFormatContext) {
	int i;
	AVCodecContext *pCodecCtx;
	AVFrame *pFrame;
	AVCodec *pCodec;
	AVFormatContext *pFormatCtx = (AVFormatContext *) pAVFormatContext;
	struct SwsContext *img_convert_ctx;

	LOGD("playing");

	// Find the first video stream
	int videoStream = -1;
	for (i = 0; i < pFormatCtx->nb_streams; i++)
		if (pFormatCtx->streams[i]->codec->codec_type == CODEC_TYPE_VIDEO) {
			videoStream = i;
			break;
		}
	if (videoStream == -1) {
		jniThrowException(env,
						  "java/io/IOException",
						  "Didn't find a video stream");
		return;
	}

	// Get a pointer to the codec context for the video stream
	pCodecCtx = pFormatCtx->streams[videoStream]->codec;

	// Find the decoder for the video stream
	pCodec = avcodec_find_decoder(pCodecCtx->codec_id);
	if (pCodec == NULL) {
		jniThrowException(env,
						  "java/io/IOException",
						  "Unsupported codec!");
		return; // Codec not found
	}
	// Open codec
	if (avcodec_open(pCodecCtx, pCodec) < 0) {
		jniThrowException(env,
						  "java/io/IOException",
						  "Could not open codec");
		return; // Could not open codec
	}

	// Allocate video frame
	pFrame = avcodec_alloc_frame();

	// Allocate an AVFrame structure
	AVFrame *pFrameRGB = avcodec_alloc_frame();
	if (pFrameRGB == NULL) {
		jniThrowException(env,
						  "java/io/IOException",
						  "Could allocate an AVFrame structure");
		return;
	}

	uint8_t *buffer;
	int numBytes;
	// Determine required buffer size and allocate buffer
	numBytes = avpicture_get_size(PIX_FMT_RGB565, pCodecCtx->width,
			pCodecCtx->height);
	buffer = (uint8_t *) av_malloc(numBytes * sizeof(uint8_t));

	// Assign appropriate parts of buffer to image planes in pFrameRGB
	// Note that pFrameRGB is an AVFrame, but AVFrame is a superset
	// of AVPicture
	avpicture_fill((AVPicture *) pFrameRGB, buffer, PIX_FMT_RGB565,
			pCodecCtx->width, pCodecCtx->height);

	int w = pCodecCtx->width;
	int h = pCodecCtx->height;
	img_convert_ctx = sws_getContext(w, h, pCodecCtx->pix_fmt, w, h,
			PIX_FMT_RGB565, SWS_BICUBIC, NULL, NULL, NULL);

	int frameFinished;
	AVPacket packet;

	i = 0;
	int result = -1;
	while ((result = av_read_frame(pFormatCtx, &packet)) >= 0) {
		// Is this a packet from the video stream?
		if (packet.stream_index == videoStream) {
			// Decode video frame
			avcodec_decode_video(pCodecCtx, pFrame, &frameFinished,
					packet.data, packet.size);

			// Did we get a video frame?
			if (frameFinished) {
				// Convert the image from its native format to RGB
				sws_scale(img_convert_ctx, pFrame->data, pFrame->linesize, 0,
						pCodecCtx->height, pFrameRGB->data, pFrameRGB->linesize);

				//FFMpegUtils_saveFrame(pFrameRGB, pCodecCtx->width,
				//		pCodecCtx->height, i);
				FFMpegUtils_handleOnVideoFrame(env, obj, pFrame, pCodecCtx->width,
						pCodecCtx->height);
				i++;
			}
		}

		// Free the packet that was allocated by av_read_frame
		av_free_packet(&packet);
	}

	// Free the RGB image
	av_free(buffer);
	av_free(pFrameRGB);

	// Free the YUV frame
	av_free(pFrame);

	// Close the codec
	avcodec_close(pCodecCtx);

	// Close the video file
	av_close_input_file(pFormatCtx);

	LOGD("end of playing");
}
コード例 #23
0
ファイル: DVDFileInfo.cpp プロジェクト: Admirali78/xbmc
bool CDVDFileInfo::ExtractThumb(const std::string &strPath,
                                CTextureDetails &details,
                                CStreamDetails *pStreamDetails, int pos)
{
  std::string redactPath = CURL::GetRedacted(strPath);
  unsigned int nTime = XbmcThreads::SystemClockMillis();
  CFileItem item(strPath, false);
  CDVDInputStream *pInputStream = CDVDFactoryInputStream::CreateInputStream(NULL, item);
  if (!pInputStream)
  {
    CLog::Log(LOGERROR, "InputStream: Error creating stream for %s", redactPath.c_str());
    return false;
  }

  if (pInputStream->IsStreamType(DVDSTREAM_TYPE_DVD)
   || pInputStream->IsStreamType(DVDSTREAM_TYPE_BLURAY))
  {
    CLog::Log(LOGDEBUG, "%s: disc streams not supported for thumb extraction, file: %s", __FUNCTION__, redactPath.c_str());
    delete pInputStream;
    return false;
  }

  if (pInputStream->IsStreamType(DVDSTREAM_TYPE_PVRMANAGER))
  {
    delete pInputStream;
    return false;
  }

  if (!pInputStream->Open())
  {
    CLog::Log(LOGERROR, "InputStream: Error opening, %s", redactPath.c_str());
    if (pInputStream)
      delete pInputStream;
    return false;
  }

  CDVDDemux *pDemuxer = NULL;

  try
  {
    pDemuxer = CDVDFactoryDemuxer::CreateDemuxer(pInputStream, true);
    if(!pDemuxer)
    {
      delete pInputStream;
      CLog::Log(LOGERROR, "%s - Error creating demuxer", __FUNCTION__);
      return false;
    }
  }
  catch(...)
  {
    CLog::Log(LOGERROR, "%s - Exception thrown when opening demuxer", __FUNCTION__);
    if (pDemuxer)
      delete pDemuxer;
    delete pInputStream;
    return false;
  }

  if (pStreamDetails)
  {
    DemuxerToStreamDetails(pInputStream, pDemuxer, *pStreamDetails, strPath);

    //extern subtitles
    std::vector<std::string> filenames;
    std::string video_path;
    if (strPath.empty())
      video_path = pInputStream->GetFileName();
    else
      video_path = strPath;

    CUtil::ScanForExternalSubtitles(video_path, filenames);

    for(unsigned int i=0;i<filenames.size();i++)
    {
      // if vobsub subtitle:
      if (URIUtils::GetExtension(filenames[i]) == ".idx")
      {
        std::string strSubFile;
        if ( CUtil::FindVobSubPair(filenames, filenames[i], strSubFile) )
          AddExternalSubtitleToDetails(video_path, *pStreamDetails, filenames[i], strSubFile);
      }
      else
      {
        if ( !CUtil::IsVobSub(filenames, filenames[i]) )
        {
          AddExternalSubtitleToDetails(video_path, *pStreamDetails, filenames[i]);
        }
      }
    }
  }

  int nVideoStream = -1;
  for (int i = 0; i < pDemuxer->GetNrOfStreams(); i++)
  {
    CDemuxStream* pStream = pDemuxer->GetStream(i);
    if (pStream)
    {
      // ignore if it's a picture attachment (e.g. jpeg artwork)
      if(pStream->type == STREAM_VIDEO && !(pStream->flags & AV_DISPOSITION_ATTACHED_PIC))
        nVideoStream = i;
      else
        pStream->SetDiscard(AVDISCARD_ALL);
    }
  }

  bool bOk = false;
  int packetsTried = 0;

  if (nVideoStream != -1)
  {
    CDVDVideoCodec *pVideoCodec;
    std::unique_ptr<CProcessInfo> pProcessInfo(CProcessInfo::CreateInstance());

    CDVDStreamInfo hint(*pDemuxer->GetStream(nVideoStream), true);
    hint.software = true;

    if (hint.codec == AV_CODEC_ID_MPEG2VIDEO || hint.codec == AV_CODEC_ID_MPEG1VIDEO)
    {
      // libmpeg2 is not thread safe so use ffmepg for mpeg2/mpeg1 thumb extraction
      CDVDCodecOptions dvdOptions;
      pVideoCodec = CDVDFactoryCodec::OpenCodec(new CDVDVideoCodecFFmpeg(*pProcessInfo), hint, dvdOptions);
    }
    else
    {
      pVideoCodec = CDVDFactoryCodec::CreateVideoCodec(hint, *pProcessInfo);
    }

    if (pVideoCodec)
    {
      int nTotalLen = pDemuxer->GetStreamLength();
      int nSeekTo = (pos==-1?nTotalLen / 3:pos);

      CLog::Log(LOGDEBUG,"%s - seeking to pos %dms (total: %dms) in %s", __FUNCTION__, nSeekTo, nTotalLen, redactPath.c_str());
      if (pDemuxer->SeekTime(nSeekTo, true))
      {
        int iDecoderState = VC_ERROR;
        DVDVideoPicture picture;

        memset(&picture, 0, sizeof(picture));

        // num streams * 160 frames, should get a valid frame, if not abort.
        int abort_index = pDemuxer->GetNrOfStreams() * 160;
        do
        {
          DemuxPacket* pPacket = pDemuxer->Read();
          packetsTried++;

          if (!pPacket)
            break;

          if (pPacket->iStreamId != nVideoStream)
          {
            CDVDDemuxUtils::FreeDemuxPacket(pPacket);
            continue;
          }

          iDecoderState = pVideoCodec->Decode(pPacket->pData, pPacket->iSize, pPacket->dts, pPacket->pts);
          CDVDDemuxUtils::FreeDemuxPacket(pPacket);

          if (iDecoderState & VC_ERROR)
            break;

          if (iDecoderState & VC_PICTURE)
          {
            memset(&picture, 0, sizeof(DVDVideoPicture));
            if (pVideoCodec->GetPicture(&picture))
            {
              if(!(picture.iFlags & DVP_FLAG_DROPPED))
                break;
            }
          }

        } while (abort_index--);

        if (iDecoderState & VC_PICTURE && !(picture.iFlags & DVP_FLAG_DROPPED))
        {
          {
            unsigned int nWidth = g_advancedSettings.m_imageRes;
            double aspect = (double)picture.iDisplayWidth / (double)picture.iDisplayHeight;
            if(hint.forced_aspect && hint.aspect != 0)
              aspect = hint.aspect;
            unsigned int nHeight = (unsigned int)((double)g_advancedSettings.m_imageRes / aspect);

            uint8_t *pOutBuf = new uint8_t[nWidth * nHeight * 4];
            struct SwsContext *context = sws_getContext(picture.iWidth, picture.iHeight,
                  AV_PIX_FMT_YUV420P, nWidth, nHeight, AV_PIX_FMT_BGRA, SWS_FAST_BILINEAR, NULL, NULL, NULL);

            if (context)
            {
              uint8_t *src[] = { picture.data[0], picture.data[1], picture.data[2], 0 };
              int     srcStride[] = { picture.iLineSize[0], picture.iLineSize[1], picture.iLineSize[2], 0 };
              uint8_t *dst[] = { pOutBuf, 0, 0, 0 };
              int     dstStride[] = { (int)nWidth*4, 0, 0, 0 };
              int orientation = DegreeToOrientation(hint.orientation);
              sws_scale(context, src, srcStride, 0, picture.iHeight, dst, dstStride);
              sws_freeContext(context);

              details.width = nWidth;
              details.height = nHeight;
              CPicture::CacheTexture(pOutBuf, nWidth, nHeight, nWidth * 4, orientation, nWidth, nHeight, CTextureCache::GetCachedPath(details.file));
              bOk = true;
            }

            delete [] pOutBuf;
          }
        }
        else
        {
          CLog::Log(LOGDEBUG,"%s - decode failed in %s after %d packets.", __FUNCTION__, redactPath.c_str(), packetsTried);
        }
      }
      delete pVideoCodec;
    }
  }

  if (pDemuxer)
    delete pDemuxer;

  delete pInputStream;

  if(!bOk)
  {
    XFILE::CFile file;
    if(file.OpenForWrite(CTextureCache::GetCachedPath(details.file)))
      file.Close();
  }

  unsigned int nTotalTime = XbmcThreads::SystemClockMillis() - nTime;
  CLog::Log(LOGDEBUG,"%s - measured %u ms to extract thumb from file <%s> in %d packets. ", __FUNCTION__, nTotalTime, redactPath.c_str(), packetsTried);
  return bOk;
}
コード例 #24
0
ファイル: swscale-test.c プロジェクト: 119/dropcam_for_iphone
// test by ref -> src -> dst -> out & compare out against ref
// ref & out are YV12
static int doTest(uint8_t *ref[4], int refStride[4], int w, int h,
                  enum PixelFormat srcFormat, enum PixelFormat dstFormat,
                  int srcW, int srcH, int dstW, int dstH, int flags)
{
    uint8_t *src[4] = {0};
    uint8_t *dst[4] = {0};
    uint8_t *out[4] = {0};
    int srcStride[4], dstStride[4];
    int i;
    uint64_t ssdY, ssdU=0, ssdV=0, ssdA=0;
    struct SwsContext *srcContext = NULL, *dstContext = NULL,
                      *outContext = NULL;
    int res;

    res = 0;
    for (i=0; i<4; i++) {
        // avoid stride % bpp != 0
        if (srcFormat==PIX_FMT_RGB24 || srcFormat==PIX_FMT_BGR24)
            srcStride[i]= srcW*3;
        else if (srcFormat==PIX_FMT_RGB48BE || srcFormat==PIX_FMT_RGB48LE)
            srcStride[i]= srcW*6;
        else
            srcStride[i]= srcW*4;

        if (dstFormat==PIX_FMT_RGB24 || dstFormat==PIX_FMT_BGR24)
            dstStride[i]= dstW*3;
        else if (dstFormat==PIX_FMT_RGB48BE || dstFormat==PIX_FMT_RGB48LE)
            dstStride[i]= dstW*6;
        else
            dstStride[i]= dstW*4;

        /* Image buffers passed into libswscale can be allocated any way you
         * prefer, as long as they're aligned enough for the architecture, and
         * they're freed appropriately (such as using av_free for buffers
         * allocated with av_malloc). */
        src[i]= av_mallocz(srcStride[i]*srcH);
        dst[i]= av_mallocz(dstStride[i]*dstH);
        out[i]= av_mallocz(refStride[i]*h);
        if (!src[i] || !dst[i] || !out[i]) {
            perror("Malloc");
            res = -1;

            goto end;
        }
    }

    srcContext= sws_getContext(w, h, PIX_FMT_YUVA420P, srcW, srcH, srcFormat, flags, NULL, NULL, NULL);
    if (!srcContext) {
        fprintf(stderr, "Failed to get %s ---> %s\n",
                sws_format_name(PIX_FMT_YUVA420P),
                sws_format_name(srcFormat));
        res = -1;

        goto end;
    }
    dstContext= sws_getContext(srcW, srcH, srcFormat, dstW, dstH, dstFormat, flags, NULL, NULL, NULL);
    if (!dstContext) {
        fprintf(stderr, "Failed to get %s ---> %s\n",
                sws_format_name(srcFormat),
                sws_format_name(dstFormat));
        res = -1;

        goto end;
    }
    outContext= sws_getContext(dstW, dstH, dstFormat, w, h, PIX_FMT_YUVA420P, flags, NULL, NULL, NULL);
    if (!outContext) {
        fprintf(stderr, "Failed to get %s ---> %s\n",
                sws_format_name(dstFormat),
                sws_format_name(PIX_FMT_YUVA420P));
        res = -1;

        goto end;
    }
//    printf("test %X %X %X -> %X %X %X\n", (int)ref[0], (int)ref[1], (int)ref[2],
//        (int)src[0], (int)src[1], (int)src[2]);

    sws_scale(srcContext, ref, refStride, 0, h   , src, srcStride);
    sws_scale(dstContext, src, srcStride, 0, srcH, dst, dstStride);
    sws_scale(outContext, dst, dstStride, 0, dstH, out, refStride);

    ssdY= getSSD(ref[0], out[0], refStride[0], refStride[0], w, h);
    if (hasChroma(srcFormat) && hasChroma(dstFormat)) {
        //FIXME check that output is really gray
        ssdU= getSSD(ref[1], out[1], refStride[1], refStride[1], (w+1)>>1, (h+1)>>1);
        ssdV= getSSD(ref[2], out[2], refStride[2], refStride[2], (w+1)>>1, (h+1)>>1);
    }
コード例 #25
0
ファイル: Test7.cpp プロジェクト: soffio/FFmpegTutorial
int stream_component_open(VideoState* is, int stream_index) {
	AVFormatContext* pFormatCtx = is->pFormatCtx;
	AVCodecContext* codecCtx;
	AVCodec* codec;

	SDL_AudioSpec wanted_spec, spec;

	if (stream_index < 0 || stream_index >= pFormatCtx->nb_streams) {
		return -1;
	}

	codec = avcodec_find_decoder(
			pFormatCtx->streams[stream_index]->codec->codec_id);
	if (!codec) {
		fprintf(stderr, "Unsupported codec!\n");
		return -1;
	}

	codecCtx = avcodec_alloc_context3(codec);
	if (avcodec_copy_context(codecCtx, pFormatCtx->streams[stream_index]->codec)
			!= 0) {
		fprintf(stderr, "Couldn't copying codec context");
		return -1;
	}

	if (codecCtx->codec_type == AVMEDIA_TYPE_AUDIO) {
		wanted_spec.freq = codecCtx->sample_rate;
		wanted_spec.format = AUDIO_S16SYS;
		wanted_spec.channels = codecCtx->channels;
		wanted_spec.silence = 0;
		wanted_spec.samples = SDL_AUDIO_BUFFER_SIZE;
		wanted_spec.callback = audio_callback;
		wanted_spec.userdata = is;

		if (SDL_OpenAudio(&wanted_spec, &spec) < 0) {
			fprintf(stderr, "SDL_OpenAudio: %s\n", SDL_GetError());
			return -1;
		}

		is->au_convert_ctx = swr_alloc();
		uint64_t out_channel_layout = AV_CH_LAYOUT_STEREO;
		AVSampleFormat out_sample_fmt = AV_SAMPLE_FMT_S16;
		int out_sample_rate = 44100;
		int64_t in_channel_layout = av_get_default_channel_layout(
				codecCtx->channels);
		is->au_convert_ctx = swr_alloc_set_opts(is->au_convert_ctx,
				out_channel_layout, out_sample_fmt, out_sample_rate,
				in_channel_layout, codecCtx->sample_fmt, codecCtx->sample_rate,
				0,
				NULL);
		swr_init(is->au_convert_ctx);
	}

	if (avcodec_open2(codecCtx, codec, NULL) < 0) {
		fprintf(stderr, "Unsupported codec!\n");
		return -1;
	}
	switch (codecCtx->codec_type) {
	case AVMEDIA_TYPE_AUDIO:
		is->audioStream = stream_index;
		is->audio_st = pFormatCtx->streams[stream_index];
		is->audio_ctx = codecCtx;
		is->audio_buf_size = 0;
		is->audio_buf_index = 0;
		memset(&is->audio_pkt, 0, sizeof(is->audio_pkt));
		packet_queue_init(&is->audioq);
		SDL_PauseAudio(0);
		break;
	case AVMEDIA_TYPE_VIDEO:
		is->videoStream = stream_index;
		is->video_st = pFormatCtx->streams[stream_index];
		is->video_ctx = codecCtx;
		Hex2Str(codecCtx->extradata, codecCtx->extradata_size);
		printf("AVCodecID:%d\n", codec->id);

		is->frame_timer = (double) av_gettime() / 1000000.0;
		is->frame_last_delay = 40e-3;
		is->video_current_pts_time = av_gettime();

		packet_queue_init(&is->videoq);
		is->video_tid = SDL_CreateThread(video_thread, "video_thread", is);
		is->sws_ctx = sws_getContext(is->video_ctx->width,
				is->video_ctx->height, is->video_ctx->pix_fmt,
				is->video_ctx->width, is->video_ctx->height, AV_PIX_FMT_YUV420P,
				SWS_BILINEAR, NULL, NULL, NULL);
		break;
	default:
		break;
	}
}
コード例 #26
0
int ffmpeg_jpeg_encode(unsigned char *srcBuf,unsigned char* dstBuf,int dstBufSize,PixelFormat srcPixFmt,int srcWidth,int srcHeight,int qvalue)
{
    AVCodec *codec;
    AVCodecContext *c= NULL;
    
    printf("Video encoding\n");

    /* find the mpeg1 video encoder */
    codec = avcodec_find_encoder(CODEC_ID_MJPEG);
    if (!codec) {
        fprintf(stderr, "codec not found\n");
        return -1;
    }

    c= avcodec_alloc_context();
	c->qmin = qvalue;
	c->qmax = qvalue;
    /* resolution must be a multiple of two */
    c->width = srcWidth;
    c->height = srcHeight;
    
	c->time_base.den = 25;
	c->time_base.num = 1;
    c->max_b_frames=0;
    c->pix_fmt = PIX_FMT_YUVJ420P;

    /* open it */
    if (avcodec_open(c, codec) < 0) {
        fprintf(stderr, "could not open codec\n");
        return -2;
    }

	//prepare colorspace conversion
	//TODO: factor to util.
	AVPicture *pPicSrc = (AVPicture*)malloc(sizeof(AVPicture));
	int srcBufSize =  avpicture_get_size(srcPixFmt,srcWidth,srcHeight);
	avpicture_fill(pPicSrc,srcBuf,srcPixFmt,srcWidth,srcHeight);

	AVFrame *pPicScaled = (AVFrame*)malloc(sizeof(AVFrame));
	int scaleBufSize =  avpicture_get_size(c->pix_fmt,srcWidth,srcHeight);
	unsigned char *scaleBuf = (unsigned char*)malloc(scaleBufSize);
	avpicture_fill((AVPicture*)pPicScaled,scaleBuf,c->pix_fmt,srcWidth,srcHeight);

    SwsContext *img_convert_ctx = sws_getContext(
		srcWidth, srcHeight, srcPixFmt,
		srcWidth, srcHeight, c->pix_fmt,
		SWS_BICUBIC, NULL, NULL, NULL);

	if (img_convert_ctx == NULL)
	{
		printf("can not create colorspace converter!\n");
		return -3;
	}

	int ret = sws_scale(img_convert_ctx,
		pPicSrc->data, pPicSrc->linesize,
		0, srcHeight,
		pPicScaled->data, pPicScaled->linesize);

	if (ret < 0)
	{
		printf("color space conversion failed!\n");
		return -4;
	}

	//encode
	int out_size = avcodec_encode_video(c, dstBuf, dstBufSize, pPicScaled);
    
	if (out_size < 0)
	{
		printf("encode failed!\n");
		return -5;
	}

    avcodec_close(c);
    av_free(c);

	av_free(pPicSrc);
    av_free(pPicScaled);
	free(scaleBuf);
 
	return out_size;
}
コード例 #27
0
ファイル: VideoDecoder.cpp プロジェクト: dhorth/LiveProxy
/** Decoder.
 The supplied buffer should contain an H264 video frame, then DecodeFrame
  will pass the buffer to the avcode_decode_video2 method. Once decoded we then
  use the get_picture command to convert the frame to RGB24.  The RGB24 buffer is
  then used to create a FrameInfo object which is placed on our video queue.

  \param pBuffer Memory buffer holding an H264 frame
  \param size Size of the buffer
*/
FrameInfo* CVideoDecoder::DecodeFrame(unsigned char *pBuffer, int size) 
{
		FrameInfo	*p_block=NULL;
        uint8_t startCode4[] = {0x00, 0x00, 0x00, 0x01};
        int got_frame = 0;
        AVPacket packet;

		//Initialize optional fields of a packet with default values. 
		av_init_packet(&packet);

		//set the buffer and the size	
        packet.data = pBuffer;
        packet.size = size;

        while (packet.size > sizeof(startCode4)) 
		{
			//Decode the video frame of size avpkt->size from avpkt->data into picture. 
			int len = avcodec_decode_video2(m_codecContext, m_frame, &got_frame, &packet);
			if(len<0)
			{
				TRACE_ERROR("Failed to decode video len=%d",len);
				break;
			}

			//sometime we dont get the whole frame, so move
			//forward and try again
            if ( !got_frame )
			{
				packet.size -= len;
				packet.data += len;
				continue;
			}

			//allocate a working frame to store our rgb image
            AVFrame * rgb = avcodec_alloc_frame();
			if(rgb==NULL)
			{
				TRACE_ERROR("Failed to allocate new av frame");
				return NULL;
			}

			//Allocate and return an SwsContext. 
			struct SwsContext * scale_ctx = sws_getContext(m_codecContext->width,	
				m_codecContext->height,
				m_codecContext->pix_fmt,
				m_codecContext->width,
				m_codecContext->height,
				PIX_FMT_BGR24,
				SWS_BICUBIC,
				NULL,
				NULL,
				NULL);
            if (scale_ctx == NULL)
			{
				TRACE_ERROR("Failed to get context");
				continue;
			}

			//Calculate the size in bytes that a picture of the given width and height would occupy if stored in the given picture format. 
			int numBytes = avpicture_get_size(PIX_FMT_RGB24,
				m_codecContext->width,
				m_codecContext->height);
						
			try{
				//create one of our FrameInfo objects
				p_block = FrameNew(numBytes);
				if(p_block==NULL){	

					//cleanup the working buffer
					av_free(rgb);
					sws_freeContext(scale_ctx);
					scale_ctx=NULL;
					return NULL;
				}

				//Fill our frame buffer with the rgb image
				avpicture_fill((AVPicture*)rgb, 
					(uint8_t*)p_block->pdata,
					PIX_FMT_RGB24,
					m_codecContext->width,
					m_codecContext->height);
					
				//Scale the image slice in srcSlice and put the resulting scaled slice in the image in dst. 
				sws_scale(scale_ctx, 
					m_frame->data, 
					m_frame->linesize, 
					0, 
					m_codecContext->height, 
					rgb->data, 
					rgb->linesize);
									
				//set the frame header to indicate rgb24
				p_block->frameHead.FrameType = (long)(PIX_FMT_RGB24);
				p_block->frameHead.TimeStamp = 0;
			}
			catch(...)
			{
				TRACE_ERROR("EXCEPTION: in afterGettingFrame1 ");
			}

			//cleanup the working buffer
             av_free(rgb);
			sws_freeContext(scale_ctx);

			//we got our frame no its time to move on
			break;
        }
		return p_block;
}
コード例 #28
0
int main(int argc, char *argv[]) {
    AVFormatContext *pFormatCtx = NULL;
    int             i, videoStream;
    AVCodecContext  *pCodecCtx = NULL;
    AVCodecParameters       *pCodecParam = NULL;
    AVCodec         *pCodec = NULL;
    AVFrame         *pFrame = NULL;
    AVPacket        packet;
    int             send_packet, receive_frame;
    //float           aspect_ratio;
    AVFrame        *pict;
    /*
    std::unique_ptr<AVFrame, std::function<void(AVFrame*)>> frame_converted{
        av_frame_alloc(),
        [](AVFrame* f){ av_free(f->data[0]); } };
    if (av_frame_copy_props(frame_converted.get(),
        frame_decoded.get()) < 0) {
        throw std::runtime_error("Copying frame properties");
    }
    if (av_image_alloc(
        frame_converted->data, frame_converted->linesize,
        video_decoder_->width(), video_decoder_->height(),
        video_decoder_->pixel_format(), 1) < 0) {
        throw std::runtime_error("Allocating picture");
    }
    */
    AVDictionary    *optionsDict = NULL;
    struct SwsContext *sws_ctx = NULL;

    SDL_Texture*    pTexture = nullptr;
    SDL_Window*     pWindows = nullptr;
    SDL_Renderer*   pRenderer = nullptr;

    SDL_Event       event;

    if (argc < 2) {
        fprintf(stderr, "Usage: test <file>\n");
        exit(1);
    }
    // Register all formats and codecs
    av_register_all();

    if (SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER)) {
        fprintf(stderr, "Could not initialize SDL - %s\n", SDL_GetError());
        exit(1);
    }

    // Open video file
    if (avformat_open_input(&pFormatCtx, argv[1], NULL, NULL) != 0)
        return -1; // Couldn't open file

    // Retrieve stream information
    if (avformat_find_stream_info(pFormatCtx, NULL)<0)
        return -1; // Couldn't find stream information

    // Dump information about file onto standard error
    av_dump_format(pFormatCtx, 0, argv[1], 0);

    // Find the first video stream
    videoStream = -1;
    for (i = 0; i<pFormatCtx->nb_streams; i++)
        if (pFormatCtx->streams[i]->codecpar->codec_type == AVMEDIA_TYPE_VIDEO) {
            videoStream = i;
            break;
        }
    if (videoStream == -1)
        return -1; // Didn't find a video stream

    // Get a pointer to the codec context for the video stream
    //AVCodecContext *codec is deprecated,so use the codecpar struct (AVCodecParameters) instead.
    pCodecParam = pFormatCtx->streams[videoStream]->codecpar;
    //but function avcodec_open2() need pCodecCtx,so copy  (AVCodecParameters) pCodecParam to (AVCodecContext) pCodecCtx
    pCodec = avcodec_find_decoder(pCodecParam->codec_id);
    // Find the decoder for the video stream
    if (pCodec == NULL) {
        fprintf(stderr, "Unsupported codec!\n");
        return -1; // Codec not found
    }
    pCodecCtx = avcodec_alloc_context3(pCodec);
    avcodec_parameters_to_context(pCodecCtx, pCodecParam);

    // Open codec
    if (avcodec_open2(pCodecCtx, pCodec, &optionsDict)<0)
        return -1; // Could not open codec

    // Allocate video frame
    pFrame = av_frame_alloc();

    // Make a screen to put our video
#ifndef __DARWIN__
    pWindows = SDL_CreateWindow(argv[1],SDL_WINDOWPOS_CENTERED,SDL_WINDOWPOS_CENTERED,pCodecParam->width, pCodecParam->height,SDL_WINDOW_BORDERLESS|SDL_WINDOW_RESIZABLE);
#else
    screen = SDL_SetVideoMode(pCodecParam->width, pCodecParam->height, 24, 0);
#endif
    if (!pWindows) {
        fprintf(stderr, "SDL: could not set video mode - exiting\n");
        exit(1);
    }
    
    // Allocate a place to put our YUV image on that screen
    pRenderer = SDL_CreateRenderer(pWindows, -1, 0);
    if (!pRenderer) {
        fprintf(stderr, "SDL: could not create renderer - exiting\n");
        exit(1);
    }
    pTexture = SDL_CreateTexture(pRenderer, SDL_PIXELFORMAT_YV12, SDL_TEXTUREACCESS_STREAMING, pCodecParam->width, pCodecParam->height);
    sws_ctx =
        sws_getContext
        (
        pCodecParam->width,
        pCodecParam->height,
        (AVPixelFormat)pCodecParam->format,
        pCodecParam->width,
        pCodecParam->height,
        AV_PIX_FMT_YUV420P,
        SWS_BILINEAR,
        NULL,
        NULL,
        NULL
        );
    pict = av_frame_alloc();
    if (pict == nullptr){
        exit(1);
    }
    if (av_image_alloc(pict->data, pict->linesize,
        pCodecParam->width, pCodecParam->height,
        (AVPixelFormat)pCodecParam->format, 1) < 0){
        exit(1);
    }


    // Read frames and save first five frames to disk
    i = 0;
    while (av_read_frame(pFormatCtx, &packet) >= 0) {
        // Is this a packet from the video stream?
        if (packet.stream_index == videoStream) {
            // Decode video frame
            //avcodec_decode_video2 is deprecated Use avcodec_send_packet() and avcodec_receive_frame().
            send_packet = avcodec_send_packet(pCodecCtx, &packet);
            receive_frame = avcodec_receive_frame(pCodecCtx, pFrame);

            // Did we get a video frame?
            if (send_packet == SEND_PACKET_SUCCESS && receive_frame == RECEIVE_FRAME_SUCCESS) {
                //SDL_LockYUVOverlay(bmp);
                //SDL_LockTexture(pTexture,NULL,);
                // Convert the image into YUV format that SDL uses
                if (av_frame_copy_props(pFrame,
                    pict) < 0) {
                    exit(1);
                }

                sws_scale
                    (
                    sws_ctx,
                    pFrame->data,
                    pFrame->linesize,
                    0,
                    pCodecParam->height,
                    pict->data,
                    pict->linesize
                    );
                
                //SDL_UnlockYUVOverlay(bmp);
                SDL_UpdateYUVTexture(pTexture, NULL, pict->data[0], pict->linesize[0], pict->data[1], pict->linesize[1], pict->data[2], pict->linesize[2]);
                SDL_RenderCopy(pRenderer, pTexture, NULL, NULL);
                SDL_RenderPresent(pRenderer);

            }
        }

        // Free the packet that was allocated by av_read_frame
        av_packet_unref(&packet);
        SDL_PollEvent(&event);
        switch (event.type) {
        case SDL_QUIT:
            SDL_DestroyRenderer(pRenderer);
            SDL_DestroyTexture(pTexture);
            SDL_DestroyWindow(pWindows);
            SDL_Quit();
            exit(0);
            break;
        default:
            break;
        }

    }

    // Free the YUV frame
    av_frame_free(&pFrame);
    //free pict
    av_freep(&pict->data[0]);
    av_frame_free(&pict);

    // Close the codec
    avcodec_close(pCodecCtx);

    // Close the video file
    avformat_close_input(&pFormatCtx);

    return 0;
}
コード例 #29
0
int main(int argc, char* argv[])
{
	AVFormatContext	*pFormatCtx;
	int				i, videoindex;
	AVCodecContext	*pCodecCtx;
	AVCodec			*pCodec;
	AVFrame	*pFrame,*pFrameYUV;
	uint8_t *out_buffer;
	AVPacket *packet;
	int y_size;
	int ret, got_picture;
	struct SwsContext *img_convert_ctx;

	char filepath[]="bigbuckbunny_480x272.h265";
	//SDL---------------------------
	int screen_w=0,screen_h=0;
	SDL_Window *screen; 
	SDL_Renderer* sdlRenderer;
	SDL_Texture* sdlTexture;
	SDL_Rect sdlRect;

	FILE *fp_yuv;

	av_register_all();
	avformat_network_init();
	pFormatCtx = avformat_alloc_context();

	if(avformat_open_input(&pFormatCtx,filepath,NULL,NULL)!=0){
		printf("Couldn't open input stream.\n");
		return -1;
	}
	if(avformat_find_stream_info(pFormatCtx,NULL)<0){
		printf("Couldn't find stream information.\n");
		return -1;
	}
	videoindex=-1;
	for(i=0; i<pFormatCtx->nb_streams; i++) 
		if(pFormatCtx->streams[i]->codec->codec_type==AVMEDIA_TYPE_VIDEO){
			videoindex=i;
			break;
		}
	if(videoindex==-1){
		printf("Didn't find a video stream.\n");
		return -1;
	}

	pCodecCtx=pFormatCtx->streams[videoindex]->codec;
	pCodec=avcodec_find_decoder(pCodecCtx->codec_id);
	if(pCodec==NULL){
		printf("Codec not found.\n");
		return -1;
	}
	if(avcodec_open2(pCodecCtx, pCodec,NULL)<0){
		printf("Could not open codec.\n");
		return -1;
	}
	
	pFrame=av_frame_alloc();
	pFrameYUV=av_frame_alloc();
	out_buffer=(uint8_t *)av_malloc(avpicture_get_size(AV_PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height));
	avpicture_fill((AVPicture *)pFrameYUV, out_buffer, AV_PIX_FMT_YUV420P, pCodecCtx->width, pCodecCtx->height);
	packet=(AVPacket *)av_malloc(sizeof(AVPacket));
	//Output Info-----------------------------
	printf("--------------- File Information ----------------\n");
	av_dump_format(pFormatCtx,0,filepath,0);
	printf("-------------------------------------------------\n");
	img_convert_ctx = sws_getContext(pCodecCtx->width, pCodecCtx->height, pCodecCtx->pix_fmt, 
		pCodecCtx->width, pCodecCtx->height, AV_PIX_FMT_YUV420P, SWS_BICUBIC, NULL, NULL, NULL); 

#if OUTPUT_YUV420P 
    fp_yuv=fopen("output.yuv","wb+");  
#endif  
	
	if(SDL_Init(SDL_INIT_VIDEO | SDL_INIT_AUDIO | SDL_INIT_TIMER)) {  
		printf( "Could not initialize SDL - %s\n", SDL_GetError()); 
		return -1;
	} 

	screen_w = pCodecCtx->width;
	screen_h = pCodecCtx->height;
	//SDL 2.0 Support for multiple windows
	screen = SDL_CreateWindow("Simplest ffmpeg player's Window", SDL_WINDOWPOS_UNDEFINED, SDL_WINDOWPOS_UNDEFINED,
		screen_w, screen_h,
		SDL_WINDOW_OPENGL);

	if(!screen) {  
		printf("SDL: could not create window - exiting:%s\n",SDL_GetError());  
		return -1;
	}

	sdlRenderer = SDL_CreateRenderer(screen, -1, 0);  
	//IYUV: Y + U + V  (3 planes)
	//YV12: Y + V + U  (3 planes)
	sdlTexture = SDL_CreateTexture(sdlRenderer, SDL_PIXELFORMAT_IYUV, SDL_TEXTUREACCESS_STREAMING,pCodecCtx->width,pCodecCtx->height);  

	sdlRect.x=0;
	sdlRect.y=0;
	sdlRect.w=screen_w;
	sdlRect.h=screen_h;

	//SDL End----------------------
	while(av_read_frame(pFormatCtx, packet)>=0){
		if(packet->stream_index==videoindex){
			ret = avcodec_decode_video2(pCodecCtx, pFrame, &got_picture, packet);
			if(ret < 0){
				printf("Decode Error.\n");
				return -1;
			}
			if(got_picture){
				sws_scale(img_convert_ctx, (const uint8_t* const*)pFrame->data, pFrame->linesize, 0, pCodecCtx->height, 
					pFrameYUV->data, pFrameYUV->linesize);
				
#if OUTPUT_YUV420P
				y_size=pCodecCtx->width*pCodecCtx->height;  
				fwrite(pFrameYUV->data[0],1,y_size,fp_yuv);    //Y 
				fwrite(pFrameYUV->data[1],1,y_size/4,fp_yuv);  //U
				fwrite(pFrameYUV->data[2],1,y_size/4,fp_yuv);  //V
#endif
				//SDL---------------------------
#if 0
				SDL_UpdateTexture( sdlTexture, NULL, pFrameYUV->data[0], pFrameYUV->linesize[0] );  
#else
				SDL_UpdateYUVTexture(sdlTexture, &sdlRect,
				pFrameYUV->data[0], pFrameYUV->linesize[0],
				pFrameYUV->data[1], pFrameYUV->linesize[1],
				pFrameYUV->data[2], pFrameYUV->linesize[2]);
#endif	
				
				SDL_RenderClear( sdlRenderer );  
				SDL_RenderCopy( sdlRenderer, sdlTexture,  NULL, &sdlRect);  
				SDL_RenderPresent( sdlRenderer );  
				//SDL End-----------------------
				//Delay 40ms
				SDL_Delay(40);
			}
		}
		av_free_packet(packet);
	}
	//flush decoder
	//FIX: Flush Frames remained in Codec
	while (1) {
		ret = avcodec_decode_video2(pCodecCtx, pFrame, &got_picture, packet);
		if (ret < 0)
			break;
		if (!got_picture)
			break;
		sws_scale(img_convert_ctx, (const uint8_t* const*)pFrame->data, pFrame->linesize, 0, pCodecCtx->height, 
			pFrameYUV->data, pFrameYUV->linesize);
#if OUTPUT_YUV420P
		int y_size=pCodecCtx->width*pCodecCtx->height;  
		fwrite(pFrameYUV->data[0],1,y_size,fp_yuv);    //Y 
		fwrite(pFrameYUV->data[1],1,y_size/4,fp_yuv);  //U
		fwrite(pFrameYUV->data[2],1,y_size/4,fp_yuv);  //V
#endif
		//SDL---------------------------
		SDL_UpdateTexture( sdlTexture, &sdlRect, pFrameYUV->data[0], pFrameYUV->linesize[0] );  
		SDL_RenderClear( sdlRenderer );  
		SDL_RenderCopy( sdlRenderer, sdlTexture,  NULL, &sdlRect);  
		SDL_RenderPresent( sdlRenderer );  
		//SDL End-----------------------
		//Delay 40ms
		SDL_Delay(40);
	}

	sws_freeContext(img_convert_ctx);

#if OUTPUT_YUV420P 
    fclose(fp_yuv);
#endif 

	SDL_Quit();

	av_frame_free(&pFrameYUV);
	av_frame_free(&pFrame);
	avcodec_close(pCodecCtx);
	avformat_close_input(&pFormatCtx);

	return 0;
}
コード例 #30
-1
ファイル: artwork.c プロジェクト: juvenal/forked-daapd
static int
artwork_rescale(AVFormatContext *src_ctx, int s, int out_w, int out_h, int format, struct evbuffer *evbuf)
{
  uint8_t *buf;
  uint8_t *outbuf;

  AVCodecContext *src;

  AVFormatContext *dst_ctx;
  AVCodecContext *dst;
  AVOutputFormat *dst_fmt;
  AVStream *dst_st;

  AVCodec *img_decoder;
  AVCodec *img_encoder;

  int64_t pix_fmt_mask;
  const enum PixelFormat *pix_fmts;

  AVFrame *i_frame;
  AVFrame *o_frame;

  struct SwsContext *swsctx;

  AVPacket pkt;
  int have_frame;

  int outbuf_len;

  int ret;

  src = src_ctx->streams[s]->codec;

  img_decoder = avcodec_find_decoder(src->codec_id);
  if (!img_decoder)
    {
      DPRINTF(E_LOG, L_ART, "No suitable decoder found for artwork %s\n", src_ctx->filename);

      return -1;
    }

#if LIBAVCODEC_VERSION_MAJOR >= 54 || (LIBAVCODEC_VERSION_MAJOR == 53 && LIBAVCODEC_VERSION_MINOR >= 6)
  ret = avcodec_open2(src, img_decoder, NULL);
#else
  ret = avcodec_open(src, img_decoder);
#endif
  if (ret < 0)
    {
      DPRINTF(E_LOG, L_ART, "Could not open codec for decoding: %s\n", strerror(AVUNERROR(ret)));

      return -1;
    }

  /* Set up output */
#if LIBAVFORMAT_VERSION_MAJOR >= 53 || (LIBAVFORMAT_VERSION_MAJOR == 52 && LIBAVFORMAT_VERSION_MINOR >= 45)
  /* FFmpeg 0.6 */
  dst_fmt = av_guess_format("image2", NULL, NULL);
#else
  dst_fmt = guess_format("image2", NULL, NULL);
#endif
  if (!dst_fmt)
    {
      DPRINTF(E_LOG, L_ART, "ffmpeg image2 muxer not available\n");

      ret = -1;
      goto out_close_src;
    }

  dst_fmt->video_codec = CODEC_ID_NONE;

  /* Try to keep same codec if possible */
  if ((src->codec_id == CODEC_ID_PNG) && (format & ART_CAN_PNG))
    dst_fmt->video_codec = CODEC_ID_PNG;
  else if ((src->codec_id == CODEC_ID_MJPEG) && (format & ART_CAN_JPEG))
    dst_fmt->video_codec = CODEC_ID_MJPEG;

  /* If not possible, select new codec */
  if (dst_fmt->video_codec == CODEC_ID_NONE)
    {
      if (format & ART_CAN_PNG)
	dst_fmt->video_codec = CODEC_ID_PNG;
      else if (format & ART_CAN_JPEG)
	dst_fmt->video_codec = CODEC_ID_MJPEG;
    }

  img_encoder = avcodec_find_encoder(dst_fmt->video_codec);
  if (!img_encoder)
    {
      DPRINTF(E_LOG, L_ART, "No suitable encoder found for codec ID %d\n", dst_fmt->video_codec);

      ret = -1;
      goto out_close_src;
    }

  dst_ctx = avformat_alloc_context();
  if (!dst_ctx)
    {
      DPRINTF(E_LOG, L_ART, "Out of memory for format context\n");

      ret = -1;
      goto out_close_src;
    }

  dst_ctx->oformat = dst_fmt;

#if LIBAVFORMAT_VERSION_MAJOR >= 53
  dst_fmt->flags &= ~AVFMT_NOFILE;
#else
  ret = snprintf(dst_ctx->filename, sizeof(dst_ctx->filename), "evbuffer:%p", evbuf);
  if ((ret < 0) || (ret >= sizeof(dst_ctx->filename)))
    {
      DPRINTF(E_LOG, L_ART, "Output artwork URL too long\n");

      ret = -1;
      goto out_free_dst_ctx;
    }
#endif

#if LIBAVFORMAT_VERSION_MAJOR >= 54 || (LIBAVFORMAT_VERSION_MAJOR == 53 && LIBAVFORMAT_VERSION_MINOR >= 21)
  dst_st = avformat_new_stream(dst_ctx, NULL);
#else
  dst_st = av_new_stream(dst_ctx, 0);
#endif
  if (!dst_st)
    {
      DPRINTF(E_LOG, L_ART, "Out of memory for new output stream\n");

      ret = -1;
      goto out_free_dst_ctx;
    }

  dst = dst_st->codec;

#if LIBAVCODEC_VERSION_MAJOR >= 54 || (LIBAVCODEC_VERSION_MAJOR == 53 && LIBAVCODEC_VERSION_MINOR >= 35)
  avcodec_get_context_defaults3(dst, NULL);
#else
  avcodec_get_context_defaults2(dst, AVMEDIA_TYPE_VIDEO);
#endif

  if (dst_fmt->flags & AVFMT_GLOBALHEADER)
    dst->flags |= CODEC_FLAG_GLOBAL_HEADER;

  dst->codec_id = dst_fmt->video_codec;
#if LIBAVCODEC_VERSION_MAJOR >= 53 || (LIBAVCODEC_VERSION_MAJOR == 52 && LIBAVCODEC_VERSION_MINOR >= 64)
  dst->codec_type = AVMEDIA_TYPE_VIDEO;
#else
  dst->codec_type = CODEC_TYPE_VIDEO;
#endif

  pix_fmt_mask = 0;
  pix_fmts = img_encoder->pix_fmts;
  while (pix_fmts && (*pix_fmts != -1))
    {
      pix_fmt_mask |= (1 << *pix_fmts);
      pix_fmts++;
    }

  dst->pix_fmt = avcodec_find_best_pix_fmt(pix_fmt_mask, src->pix_fmt, 1, NULL);

  if (dst->pix_fmt < 0)
    {
      DPRINTF(E_LOG, L_ART, "Could not determine best pixel format\n");

      ret = -1;
      goto out_free_dst_ctx;
    }

  DPRINTF(E_DBG, L_ART, "Selected pixel format: %d\n", dst->pix_fmt);

  dst->time_base.num = 1;
  dst->time_base.den = 25;

  dst->width = out_w;
  dst->height = out_h;

#if LIBAVFORMAT_VERSION_MAJOR <= 52 || (LIBAVFORMAT_VERSION_MAJOR == 53 && LIBAVFORMAT_VERSION_MINOR <= 1)
  ret = av_set_parameters(dst_ctx, NULL);
  if (ret < 0)
    {
      DPRINTF(E_LOG, L_ART, "Invalid parameters for artwork output: %s\n", strerror(AVUNERROR(ret)));

      ret = -1;
      goto out_free_dst_ctx;
    }
#endif

  /* Open encoder */
#if LIBAVCODEC_VERSION_MAJOR >= 54 || (LIBAVCODEC_VERSION_MAJOR == 53 && LIBAVCODEC_VERSION_MINOR >= 6)
  ret = avcodec_open2(dst, img_encoder, NULL);
#else
  ret = avcodec_open(dst, img_encoder);
#endif
  if (ret < 0)
    {
      DPRINTF(E_LOG, L_ART, "Could not open codec for encoding: %s\n", strerror(AVUNERROR(ret)));

      ret = -1;
      goto out_free_dst_ctx;
    }

  i_frame = avcodec_alloc_frame();
  o_frame = avcodec_alloc_frame();

  if (!i_frame || !o_frame)
    {
      DPRINTF(E_LOG, L_ART, "Could not allocate input/output frame\n");

      ret = -1;
      goto out_free_frames;
    }

  ret = avpicture_get_size(dst->pix_fmt, src->width, src->height);

  DPRINTF(E_DBG, L_ART, "Artwork buffer size: %d\n", ret);

  buf = (uint8_t *)av_malloc(ret);
  if (!buf)
    {
      DPRINTF(E_LOG, L_ART, "Out of memory for artwork buffer\n");

      ret = -1;
      goto out_free_frames;
    }

  avpicture_fill((AVPicture *)o_frame, buf, dst->pix_fmt, src->width, src->height);

  swsctx = sws_getContext(src->width, src->height, src->pix_fmt,
			  dst->width, dst->height, dst->pix_fmt,
			  SWS_BICUBIC, NULL, NULL, NULL);
  if (!swsctx)
    {
      DPRINTF(E_LOG, L_ART, "Could not get SWS context\n");

      ret = -1;
      goto out_free_buf;
    }

  /* Get frame */
  have_frame = 0;
  while (av_read_frame(src_ctx, &pkt) == 0)
    {
      if (pkt.stream_index != s)
	{
	  av_free_packet(&pkt);
	  continue;
	}

#if LIBAVCODEC_VERSION_MAJOR >= 53 || (LIBAVCODEC_VERSION_MAJOR == 52 && LIBAVCODEC_VERSION_MINOR >= 32)
      /* FFmpeg 0.6 */
      avcodec_decode_video2(src, i_frame, &have_frame, &pkt);
#else
      avcodec_decode_video(src, i_frame, &have_frame, pkt.data, pkt.size);
#endif

      break;
    }

  if (!have_frame)
    {
      DPRINTF(E_LOG, L_ART, "Could not decode artwork\n");

      av_free_packet(&pkt);
      sws_freeContext(swsctx);

      ret = -1;
      goto out_free_buf;
    }

  /* Scale */
#if LIBSWSCALE_VERSION_MAJOR >= 1 || (LIBSWSCALE_VERSION_MAJOR == 0 && LIBSWSCALE_VERSION_MINOR >= 9)
  /* FFmpeg 0.6, libav 0.6+ */
  sws_scale(swsctx, (const uint8_t * const *)i_frame->data, i_frame->linesize, 0, src->height, o_frame->data, o_frame->linesize);
#else
  sws_scale(swsctx, i_frame->data, i_frame->linesize, 0, src->height, o_frame->data, o_frame->linesize);
#endif

  sws_freeContext(swsctx);
  av_free_packet(&pkt);

  /* Open output file */
#if LIBAVFORMAT_VERSION_MAJOR >= 53
  dst_ctx->pb = avio_evbuffer_open(evbuf);
#else
  ret = url_fopen(&dst_ctx->pb, dst_ctx->filename, URL_WRONLY);
#endif
  if (ret < 0)
    {
      DPRINTF(E_LOG, L_ART, "Could not open artwork destination buffer\n");

      ret = -1;
      goto out_free_buf;
    }

  /* Encode frame */
  outbuf_len = dst->width * dst->height * 3;
  if (outbuf_len < FF_MIN_BUFFER_SIZE)
    outbuf_len = FF_MIN_BUFFER_SIZE;

  outbuf = (uint8_t *)av_malloc(outbuf_len);
  if (!outbuf)
    {
      DPRINTF(E_LOG, L_ART, "Out of memory for encoded artwork buffer\n");

#if LIBAVFORMAT_VERSION_MAJOR >= 53
      avio_evbuffer_close(dst_ctx->pb);
#else
      url_fclose(dst_ctx->pb);
#endif

      ret = -1;
      goto out_free_buf;
    }

#if LIBAVCODEC_VERSION_MAJOR >= 54
  av_init_packet(&pkt);
  pkt.data = outbuf;
  pkt.size = outbuf_len;
  ret = avcodec_encode_video2(dst, &pkt, o_frame, &have_frame);
  if (!ret && have_frame && dst->coded_frame) 
    {
      dst->coded_frame->pts       = pkt.pts;
      dst->coded_frame->key_frame = !!(pkt.flags & AV_PKT_FLAG_KEY);
    }
  else if (ret < 0)
    {
      DPRINTF(E_LOG, L_ART, "Could not encode artwork\n");

      ret = -1;
      goto out_fclose_dst;
    }
#else
  ret = avcodec_encode_video(dst, outbuf, outbuf_len, o_frame);
  if (ret <= 0)
    {
      DPRINTF(E_LOG, L_ART, "Could not encode artwork\n");

      ret = -1;
      goto out_fclose_dst;
    }

  av_init_packet(&pkt);
  pkt.stream_index = 0;
  pkt.data = outbuf;
  pkt.size = ret;
#endif

#if LIBAVFORMAT_VERSION_MAJOR >= 54 || (LIBAVFORMAT_VERSION_MAJOR == 53 && LIBAVFORMAT_VERSION_MINOR >= 3)
  ret = avformat_write_header(dst_ctx, NULL);
#else
  ret = av_write_header(dst_ctx);
#endif
  if (ret != 0)
    {
      DPRINTF(E_LOG, L_ART, "Could not write artwork header: %s\n", strerror(AVUNERROR(ret)));

      ret = -1;
      goto out_fclose_dst;
    }

  ret = av_interleaved_write_frame(dst_ctx, &pkt);

  if (ret != 0)
    {
      DPRINTF(E_LOG, L_ART, "Error writing artwork\n");

      ret = -1;
      goto out_fclose_dst;
    }

  ret = av_write_trailer(dst_ctx);
  if (ret != 0)
    {
      DPRINTF(E_LOG, L_ART, "Could not write artwork trailer: %s\n", strerror(AVUNERROR(ret)));

      ret = -1;
      goto out_fclose_dst;
    }

  switch (dst_fmt->video_codec)
    {
      case CODEC_ID_PNG:
	ret = ART_FMT_PNG;
	break;

      case CODEC_ID_MJPEG:
	ret = ART_FMT_JPEG;
	break;

      default:
	DPRINTF(E_LOG, L_ART, "Unhandled rescale output format\n");
	ret = -1;
	break;
    }

 out_fclose_dst:
#if LIBAVFORMAT_VERSION_MAJOR >= 53
  avio_evbuffer_close(dst_ctx->pb);
#else
  url_fclose(dst_ctx->pb);
#endif
  av_free(outbuf);

 out_free_buf:
  av_free(buf);

 out_free_frames:
  if (i_frame)
    av_free(i_frame);
  if (o_frame)
    av_free(o_frame);
  avcodec_close(dst);

 out_free_dst_ctx:
  avformat_free_context(dst_ctx);

 out_close_src:
  avcodec_close(src);

  return ret;
}