Beispiel #1
0
static GstFlowReturn
gst_ffmpegscale_transform (GstBaseTransform * trans, GstBuffer * inbuf,
    GstBuffer * outbuf)
{
  GstFFMpegScale *scale = GST_FFMPEGSCALE (trans);
  AVPicture in_frame, out_frame;

  gst_buffer_copy_metadata (outbuf, inbuf, GST_BUFFER_COPY_TIMESTAMPS);

  gst_ffmpeg_avpicture_fill (&in_frame,
      GST_BUFFER_DATA (inbuf),
      scale->pixfmt, scale->in_width, scale->in_height);

  gst_ffmpeg_avpicture_fill (&out_frame,
      GST_BUFFER_DATA (outbuf),
      scale->pixfmt, scale->out_width, scale->out_height);

  img_resample (scale->res, &out_frame, &in_frame);

  return GST_FLOW_OK;
}
///////////////////////////////
// Convert AVFrame to wxBitmap
wxBitmap LAVCVideoProvider::AVFrameToWX(AVFrame *source, int n) {
	// Get sizes
	int w = codecContext->width;
	int h = codecContext->height;
//#ifdef __WINDOWS__
//	PixelFormat format = PIX_FMT_RGBA32;
//#else
	PixelFormat format = PIX_FMT_RGB24;
//#endif
	unsigned int size1 = avpicture_get_size(codecContext->pix_fmt,display_w,display_h);
	unsigned int size2 = avpicture_get_size(format,display_w,display_h);

	// Prepare buffers
	if (!buffer1 || buffer1Size != size1) {
		if (buffer1) delete buffer1;
		buffer1 = new uint8_t[size1];
		buffer1Size = size1;
	}
	if (!buffer2 || buffer2Size != size2) {
		if (buffer2) delete buffer2;
		buffer2 = new uint8_t[size2];
		buffer2Size = size2;
	}

	// Resize
	AVFrame *resized;
	bool resize = w != display_w || h != display_h;
	if (resize) {
		// Allocate
		unsigned int resSize = avpicture_get_size(codecContext->pix_fmt,display_w,display_h);
		resized = avcodec_alloc_frame();
		avpicture_fill((AVPicture*) resized, buffer1, codecContext->pix_fmt, display_w, display_h);

		// Resize
		ImgReSampleContext *resampleContext = img_resample_init(display_w,display_h,w,h);
		img_resample(resampleContext,(AVPicture*) resized,(AVPicture*) source);
		img_resample_close(resampleContext);

		// Set new w/h
		w = display_w;
		h = display_h;
	}
	else resized = source;

	// Allocate RGB32 buffer
	AVFrame *frameRGB = avcodec_alloc_frame();
	avpicture_fill((AVPicture*) frameRGB, buffer2, format, w, h);

	// Convert to RGB32
	img_convert((AVPicture*) frameRGB, format, (AVPicture*) resized, codecContext->pix_fmt, w, h);

	// Convert to wxBitmap
	wxImage img(w, h, false);
	unsigned char *data = (unsigned char *)malloc(w * h * 3);
	memcpy(data, frameRGB->data[0], w * h * 3);
	img.SetData(data);
	if (overlay)
		overlay->Render(img, VFR_Input.GetTimeAtFrame(n));

	wxBitmap bmp(img);

	av_free(frameRGB);
	if (resized != source)
		av_free(resized);
	return bmp;
}
Beispiel #3
0
pixerrorcode pix_convert(int flags, piximage * img_dst, piximage * img_src) {

	uint8_t * buf_source = img_src->data;
	int need_avfree = 0;

	//If the format is NV12, transforming it
	if (img_src->palette == PIX_OSI_NV12) {
		buf_source = _nv12_to_yuv420p(img_src->data, img_src->width, img_src->height);
		need_avfree = 1;
		img_src->palette = PIX_OSI_YUV420P;
	}
	////

	int need_resize = 0;

	//Check if the piximage needs to be resized
	if ((img_src->width != img_dst->width) || (img_src->height != img_dst->height)) {
		need_resize = 1;
	}
	////

	int len_target = pix_size(img_dst->palette, img_src->width, img_src->height);

	int pix_fmt_source = pix_ffmpeg_from_pix_osi(img_src->palette);
	int pix_fmt_target = pix_ffmpeg_from_pix_osi(img_dst->palette);

	AVPicture avp_source, avp_target;
	avpicture_fill(&avp_source,  buf_source, pix_fmt_source, img_src->width, img_src->height);
	avpicture_fill(&avp_target, img_dst->data, pix_fmt_target, img_dst->width, img_dst->height);

	//FIXME Only flip other planes if the destination palette is YUV420
	if ((flags & PIX_FLIP_HORIZONTALLY) && (img_src->palette == PIX_OSI_YUV420P)) {
		avp_source.data[0] += avp_source.linesize[0] * (img_src->height - 1);
		avp_source.linesize[0] *= -1;

		if (pix_fmt_source == PIX_FMT_YUV420P) {
			avp_source.data[1] += avp_source.linesize[1] * (img_src->height / 2 - 1);
			avp_source.linesize[1] *= -1;
			avp_source.data[2] += avp_source.linesize[2] * (img_src->height / 2 - 1);
			avp_source.linesize[2] *= -1;
		}
	}

	//Resizing picture if needed. Needs test
	if (need_resize) {

		//resampling only works yuv420P -> yuv420P in current ffmpeg

		if (pix_fmt_source != PIX_FMT_YUV420P) {
			return PIX_NOK;
		}

		//TODO optimize this part but will need the preparation of contexts
		ImgReSampleContext * resample_context = img_resample_init(img_dst->width, img_dst->height,
			img_src->width, img_src->height);

		if (!resample_context) {
			return PIX_NOK;
		}

		AVPicture avp_tmp_target;

		//we need to prepare a tmp buffer
		uint8_t * buf_tmp_target = (uint8_t *)av_malloc(avpicture_get_size(pix_fmt_source, img_dst->width, img_dst->height)  * sizeof(uint8_t));
		avpicture_fill(&avp_tmp_target, buf_tmp_target, pix_fmt_source, img_dst->width, img_dst->height);
		//

		//do the resampling
		img_resample(resample_context, &avp_tmp_target, &avp_source);
		img_resample_close(resample_context);
		//

		//do the conversion
		if (img_convert(&avp_target, pix_fmt_target,
			&avp_tmp_target, pix_fmt_source,
			img_dst->width, img_dst->height) == -1) {

			av_free(buf_tmp_target);
			return PIX_NOK;
		}
		av_free(buf_tmp_target);
		//

	} else {
		if (img_convert(&avp_target, pix_fmt_target,
			&avp_source, pix_fmt_source,
			img_src->width, img_src->height) == -1) {
			return PIX_NOK;
		}
	}
	////

	if (need_avfree) {
		av_free(buf_source);
	}

	return PIX_OK;
}
Beispiel #4
0
UINT CFlvUtils::WriteVideoFrame()
{
   HRESULT hr = S_OK;

   AVCodecContext *avCodecContext = &m_pAVStreamVideo->codec;
   AVFrame *pPicturePointer = NULL;

//   img_convert((AVPicture *)m_pAVFramePicture, avCodecContext->pix_fmt, 
//               (AVPicture *)m_pAVFrameTmpPicture, PIX_FMT_RGB24,
//               avCodecContext->width, avCodecContext->height);

   // Is resampling necessary?
   if ((m_nDestVideoWidth != m_nSrcVideoWidth) || (m_nDestVideoHeight != m_nSrcVideoHeight))
   {
      // Convert RGB data to the output format
      img_convert((AVPicture *)m_pAVFrameTmpPicture2, avCodecContext->pix_fmt, 
                  (AVPicture *)m_pAVFrameTmpPicture, PIX_FMT_RGB24,
                  m_nSrcVideoWidth, m_nSrcVideoHeight);

      // Resample
      img_resample(m_pImgResampleContext, 
                   (AVPicture *)m_pAVFramePicture, 
                   (AVPicture *)m_pAVFrameTmpPicture2);
   }
   else
   {
      // Convert RGB data to the output format
      img_convert((AVPicture *)m_pAVFramePicture, avCodecContext->pix_fmt, 
                  (AVPicture *)m_pAVFrameTmpPicture, PIX_FMT_RGB24,
                  m_nDestVideoWidth, m_nDestVideoHeight);
   }
   
   if (m_pAVFramePicture)
      pPicturePointer = m_pAVFramePicture;

   int nReturn;
   int nOutSize = 0;

   if (m_pAVFormatContext->oformat->flags & AVFMT_RAWPICTURE)
   {
      // Raw video case. 
      AVPacket pkt;
      av_init_packet(&pkt);

      pkt.flags |= PKT_FLAG_KEY;
      pkt.stream_index= m_pAVStreamVideo->index;
      pkt.data= (uint8_t *)pPicturePointer;
      pkt.size= sizeof(AVPicture);

      // Write the frame into the media file
      nReturn = av_write_frame(m_pAVFormatContext, &pkt);
   }
   else
   {
      // Encode the image
      nOutSize = avcodec_encode_video(avCodecContext, m_pVideoOutbuf, m_nVideoOutbufSize, pPicturePointer);

      if (nOutSize != 0)
      {
         AVPacket pkt;
         av_init_packet(&pkt);

         pkt.pts= avCodecContext->coded_frame->pts;
         if(avCodecContext->coded_frame->key_frame)
            pkt.flags |= PKT_FLAG_KEY;
         pkt.stream_index= m_pAVStreamVideo->index;
         pkt.data= m_pVideoOutbuf;
         pkt.size= nOutSize;

         // Write the compressed frame in the media file
         nReturn = av_write_frame(m_pAVFormatContext, &pkt);
      }
      else
         nReturn = 0;
   }

   if (0 == nReturn)
      hr = E_FAIL;

   return hr;
}