예제 #1
0
UINT CFlvUtils::SetVideoCodecParams(int &nVideoWidth, int &nVideoHeight, int nVideoBitrate, int nVideoFramerate)
{
   // Note: PutSrcVideoDimensions() has to be called before this

   HRESULT hr = S_OK;

   m_nDestVideoWidth = nVideoWidth;
   m_nDestVideoHeight = nVideoHeight;
   m_nVideoBitrate = nVideoBitrate;
   m_nVideoFramerate = nVideoFramerate;

//   // Video resolution: width/height must be even (a multiple of two)
//   // (but seems to be not necessary for reampling)
//   if ((m_nDestVideoWidth % 2) !=0)
//      m_nDestVideoWidth +=1;
//   if ((m_nDestVideoHeight % 2) !=0)
//      m_nDestVideoHeight +=1;

   // Video width must be a multiple of 4,
   // video height must be a multiple of 2.
   // --> Expand video size, if necessary
   if (m_nDestVideoWidth % 4 != 0)
      m_nDestVideoWidth += (4 - (m_nDestVideoWidth % 4));
   if (m_nDestVideoHeight % 2 != 0)
      m_nDestVideoHeight += (2 - (m_nDestVideoHeight % 2));

   // Resample context
   m_pImgResampleContext = img_resample_init(m_nDestVideoWidth, m_nDestVideoHeight, 
                                             m_nSrcVideoWidth, m_nSrcVideoHeight);

   nVideoWidth = m_nDestVideoWidth;
   nVideoHeight = m_nDestVideoHeight;

   return hr;
}
예제 #2
0
static gboolean
gst_ffmpegscale_set_caps (GstBaseTransform * trans, GstCaps * incaps,
    GstCaps * outcaps)
{
  GstFFMpegScale *scale = GST_FFMPEGSCALE (trans);
  GstStructure *instructure = gst_caps_get_structure (incaps, 0);
  GstStructure *outstructure = gst_caps_get_structure (outcaps, 0);
  gint par_num, par_den;
  AVCodecContext *ctx;

  if (!gst_structure_get_int (instructure, "width", &scale->in_width))
    return FALSE;
  if (!gst_structure_get_int (instructure, "height", &scale->in_height))
    return FALSE;

  if (!gst_structure_get_int (outstructure, "width", &scale->out_width))
    return FALSE;
  if (!gst_structure_get_int (outstructure, "height", &scale->out_height))
    return FALSE;

  if (gst_structure_get_fraction (instructure, "pixel-aspect-ratio",
          &par_num, &par_den)) {
    gst_structure_set (outstructure,
        "pixel-aspect-ratio", GST_TYPE_FRACTION,
        par_num * scale->in_width / scale->out_width,
        par_den * scale->in_height / scale->out_height, NULL);
  }

  ctx = avcodec_alloc_context ();
  ctx->width = scale->in_width;
  ctx->height = scale->in_height;
  ctx->pix_fmt = PIX_FMT_NB;
  gst_ffmpeg_caps_with_codectype (CODEC_TYPE_VIDEO, incaps, ctx);
  if (ctx->pix_fmt == PIX_FMT_NB) {
    av_free (ctx);
    return FALSE;
  }

  scale->pixfmt = ctx->pix_fmt;

  av_free (ctx);

  scale->res = img_resample_init (scale->out_width, scale->out_height,
      scale->in_width, scale->in_height);

  return TRUE;
}
///////////////////////////////
// Convert AVFrame to wxBitmap
wxBitmap LAVCVideoProvider::AVFrameToWX(AVFrame *source, int n) {
	// Get sizes
	int w = codecContext->width;
	int h = codecContext->height;
//#ifdef __WINDOWS__
//	PixelFormat format = PIX_FMT_RGBA32;
//#else
	PixelFormat format = PIX_FMT_RGB24;
//#endif
	unsigned int size1 = avpicture_get_size(codecContext->pix_fmt,display_w,display_h);
	unsigned int size2 = avpicture_get_size(format,display_w,display_h);

	// Prepare buffers
	if (!buffer1 || buffer1Size != size1) {
		if (buffer1) delete buffer1;
		buffer1 = new uint8_t[size1];
		buffer1Size = size1;
	}
	if (!buffer2 || buffer2Size != size2) {
		if (buffer2) delete buffer2;
		buffer2 = new uint8_t[size2];
		buffer2Size = size2;
	}

	// Resize
	AVFrame *resized;
	bool resize = w != display_w || h != display_h;
	if (resize) {
		// Allocate
		unsigned int resSize = avpicture_get_size(codecContext->pix_fmt,display_w,display_h);
		resized = avcodec_alloc_frame();
		avpicture_fill((AVPicture*) resized, buffer1, codecContext->pix_fmt, display_w, display_h);

		// Resize
		ImgReSampleContext *resampleContext = img_resample_init(display_w,display_h,w,h);
		img_resample(resampleContext,(AVPicture*) resized,(AVPicture*) source);
		img_resample_close(resampleContext);

		// Set new w/h
		w = display_w;
		h = display_h;
	}
	else resized = source;

	// Allocate RGB32 buffer
	AVFrame *frameRGB = avcodec_alloc_frame();
	avpicture_fill((AVPicture*) frameRGB, buffer2, format, w, h);

	// Convert to RGB32
	img_convert((AVPicture*) frameRGB, format, (AVPicture*) resized, codecContext->pix_fmt, w, h);

	// Convert to wxBitmap
	wxImage img(w, h, false);
	unsigned char *data = (unsigned char *)malloc(w * h * 3);
	memcpy(data, frameRGB->data[0], w * h * 3);
	img.SetData(data);
	if (overlay)
		overlay->Render(img, VFR_Input.GetTimeAtFrame(n));

	wxBitmap bmp(img);

	av_free(frameRGB);
	if (resized != source)
		av_free(resized);
	return bmp;
}
예제 #4
0
pixerrorcode pix_convert(int flags, piximage * img_dst, piximage * img_src) {

	uint8_t * buf_source = img_src->data;
	int need_avfree = 0;

	//If the format is NV12, transforming it
	if (img_src->palette == PIX_OSI_NV12) {
		buf_source = _nv12_to_yuv420p(img_src->data, img_src->width, img_src->height);
		need_avfree = 1;
		img_src->palette = PIX_OSI_YUV420P;
	}
	////

	int need_resize = 0;

	//Check if the piximage needs to be resized
	if ((img_src->width != img_dst->width) || (img_src->height != img_dst->height)) {
		need_resize = 1;
	}
	////

	int len_target = pix_size(img_dst->palette, img_src->width, img_src->height);

	int pix_fmt_source = pix_ffmpeg_from_pix_osi(img_src->palette);
	int pix_fmt_target = pix_ffmpeg_from_pix_osi(img_dst->palette);

	AVPicture avp_source, avp_target;
	avpicture_fill(&avp_source,  buf_source, pix_fmt_source, img_src->width, img_src->height);
	avpicture_fill(&avp_target, img_dst->data, pix_fmt_target, img_dst->width, img_dst->height);

	//FIXME Only flip other planes if the destination palette is YUV420
	if ((flags & PIX_FLIP_HORIZONTALLY) && (img_src->palette == PIX_OSI_YUV420P)) {
		avp_source.data[0] += avp_source.linesize[0] * (img_src->height - 1);
		avp_source.linesize[0] *= -1;

		if (pix_fmt_source == PIX_FMT_YUV420P) {
			avp_source.data[1] += avp_source.linesize[1] * (img_src->height / 2 - 1);
			avp_source.linesize[1] *= -1;
			avp_source.data[2] += avp_source.linesize[2] * (img_src->height / 2 - 1);
			avp_source.linesize[2] *= -1;
		}
	}

	//Resizing picture if needed. Needs test
	if (need_resize) {

		//resampling only works yuv420P -> yuv420P in current ffmpeg

		if (pix_fmt_source != PIX_FMT_YUV420P) {
			return PIX_NOK;
		}

		//TODO optimize this part but will need the preparation of contexts
		ImgReSampleContext * resample_context = img_resample_init(img_dst->width, img_dst->height,
			img_src->width, img_src->height);

		if (!resample_context) {
			return PIX_NOK;
		}

		AVPicture avp_tmp_target;

		//we need to prepare a tmp buffer
		uint8_t * buf_tmp_target = (uint8_t *)av_malloc(avpicture_get_size(pix_fmt_source, img_dst->width, img_dst->height)  * sizeof(uint8_t));
		avpicture_fill(&avp_tmp_target, buf_tmp_target, pix_fmt_source, img_dst->width, img_dst->height);
		//

		//do the resampling
		img_resample(resample_context, &avp_tmp_target, &avp_source);
		img_resample_close(resample_context);
		//

		//do the conversion
		if (img_convert(&avp_target, pix_fmt_target,
			&avp_tmp_target, pix_fmt_source,
			img_dst->width, img_dst->height) == -1) {

			av_free(buf_tmp_target);
			return PIX_NOK;
		}
		av_free(buf_tmp_target);
		//

	} else {
		if (img_convert(&avp_target, pix_fmt_target,
			&avp_source, pix_fmt_source,
			img_src->width, img_src->height) == -1) {
			return PIX_NOK;
		}
	}
	////

	if (need_avfree) {
		av_free(buf_source);
	}

	return PIX_OK;
}