Example #1
0
static void
gst_ffmpegscale_finalize (GObject * object)
{
  GstFFMpegScale *scale = GST_FFMPEGSCALE (object);

  if (scale->res != NULL)
    img_resample_close (scale->res);

  G_OBJECT_CLASS (parent_class)->finalize (object);
}
Example #2
0
void h263_encoder_cleanup(void *ctx) {
	ph_h263_encoder_ctx_t * encoder = (ph_h263_encoder_ctx_t *) ctx;
	img_resample_close(encoder->encoder_ctx.res_ctx);
	//avcodec_flush_buffers(encoder->encoder_ctx.context);
	avcodec_close(encoder->encoder_ctx.context);
	av_free(encoder->encoder_ctx.resized_pic);
	av_free(encoder->encoder_ctx.sampled_frame);
	av_free(encoder->data_enc);
	av_free(encoder->encoder_ctx.context);
	free(encoder);
}
Example #3
0
UINT CFlvUtils::CloseFlvFile()
{
   HRESULT hr = S_OK;

   // Write the trailer, if any
   av_write_trailer(m_pAVFormatContext);

   if (!(m_pAVOutputFormat->flags & AVFMT_NOFILE))
   {
     url_fclose(&m_pAVFormatContext->pb);
   }

   // Close resample context
   img_resample_close(m_pImgResampleContext);

   return hr;
}
///////////////////////////////
// Convert AVFrame to wxBitmap
wxBitmap LAVCVideoProvider::AVFrameToWX(AVFrame *source, int n) {
	// Get sizes
	int w = codecContext->width;
	int h = codecContext->height;
//#ifdef __WINDOWS__
//	PixelFormat format = PIX_FMT_RGBA32;
//#else
	PixelFormat format = PIX_FMT_RGB24;
//#endif
	unsigned int size1 = avpicture_get_size(codecContext->pix_fmt,display_w,display_h);
	unsigned int size2 = avpicture_get_size(format,display_w,display_h);

	// Prepare buffers
	if (!buffer1 || buffer1Size != size1) {
		if (buffer1) delete buffer1;
		buffer1 = new uint8_t[size1];
		buffer1Size = size1;
	}
	if (!buffer2 || buffer2Size != size2) {
		if (buffer2) delete buffer2;
		buffer2 = new uint8_t[size2];
		buffer2Size = size2;
	}

	// Resize
	AVFrame *resized;
	bool resize = w != display_w || h != display_h;
	if (resize) {
		// Allocate
		unsigned int resSize = avpicture_get_size(codecContext->pix_fmt,display_w,display_h);
		resized = avcodec_alloc_frame();
		avpicture_fill((AVPicture*) resized, buffer1, codecContext->pix_fmt, display_w, display_h);

		// Resize
		ImgReSampleContext *resampleContext = img_resample_init(display_w,display_h,w,h);
		img_resample(resampleContext,(AVPicture*) resized,(AVPicture*) source);
		img_resample_close(resampleContext);

		// Set new w/h
		w = display_w;
		h = display_h;
	}
	else resized = source;

	// Allocate RGB32 buffer
	AVFrame *frameRGB = avcodec_alloc_frame();
	avpicture_fill((AVPicture*) frameRGB, buffer2, format, w, h);

	// Convert to RGB32
	img_convert((AVPicture*) frameRGB, format, (AVPicture*) resized, codecContext->pix_fmt, w, h);

	// Convert to wxBitmap
	wxImage img(w, h, false);
	unsigned char *data = (unsigned char *)malloc(w * h * 3);
	memcpy(data, frameRGB->data[0], w * h * 3);
	img.SetData(data);
	if (overlay)
		overlay->Render(img, VFR_Input.GetTimeAtFrame(n));

	wxBitmap bmp(img);

	av_free(frameRGB);
	if (resized != source)
		av_free(resized);
	return bmp;
}
Example #5
0
pixerrorcode pix_convert(int flags, piximage * img_dst, piximage * img_src) {

	uint8_t * buf_source = img_src->data;
	int need_avfree = 0;

	//If the format is NV12, transforming it
	if (img_src->palette == PIX_OSI_NV12) {
		buf_source = _nv12_to_yuv420p(img_src->data, img_src->width, img_src->height);
		need_avfree = 1;
		img_src->palette = PIX_OSI_YUV420P;
	}
	////

	int need_resize = 0;

	//Check if the piximage needs to be resized
	if ((img_src->width != img_dst->width) || (img_src->height != img_dst->height)) {
		need_resize = 1;
	}
	////

	int len_target = pix_size(img_dst->palette, img_src->width, img_src->height);

	int pix_fmt_source = pix_ffmpeg_from_pix_osi(img_src->palette);
	int pix_fmt_target = pix_ffmpeg_from_pix_osi(img_dst->palette);

	AVPicture avp_source, avp_target;
	avpicture_fill(&avp_source,  buf_source, pix_fmt_source, img_src->width, img_src->height);
	avpicture_fill(&avp_target, img_dst->data, pix_fmt_target, img_dst->width, img_dst->height);

	//FIXME Only flip other planes if the destination palette is YUV420
	if ((flags & PIX_FLIP_HORIZONTALLY) && (img_src->palette == PIX_OSI_YUV420P)) {
		avp_source.data[0] += avp_source.linesize[0] * (img_src->height - 1);
		avp_source.linesize[0] *= -1;

		if (pix_fmt_source == PIX_FMT_YUV420P) {
			avp_source.data[1] += avp_source.linesize[1] * (img_src->height / 2 - 1);
			avp_source.linesize[1] *= -1;
			avp_source.data[2] += avp_source.linesize[2] * (img_src->height / 2 - 1);
			avp_source.linesize[2] *= -1;
		}
	}

	//Resizing picture if needed. Needs test
	if (need_resize) {

		//resampling only works yuv420P -> yuv420P in current ffmpeg

		if (pix_fmt_source != PIX_FMT_YUV420P) {
			return PIX_NOK;
		}

		//TODO optimize this part but will need the preparation of contexts
		ImgReSampleContext * resample_context = img_resample_init(img_dst->width, img_dst->height,
			img_src->width, img_src->height);

		if (!resample_context) {
			return PIX_NOK;
		}

		AVPicture avp_tmp_target;

		//we need to prepare a tmp buffer
		uint8_t * buf_tmp_target = (uint8_t *)av_malloc(avpicture_get_size(pix_fmt_source, img_dst->width, img_dst->height)  * sizeof(uint8_t));
		avpicture_fill(&avp_tmp_target, buf_tmp_target, pix_fmt_source, img_dst->width, img_dst->height);
		//

		//do the resampling
		img_resample(resample_context, &avp_tmp_target, &avp_source);
		img_resample_close(resample_context);
		//

		//do the conversion
		if (img_convert(&avp_target, pix_fmt_target,
			&avp_tmp_target, pix_fmt_source,
			img_dst->width, img_dst->height) == -1) {

			av_free(buf_tmp_target);
			return PIX_NOK;
		}
		av_free(buf_tmp_target);
		//

	} else {
		if (img_convert(&avp_target, pix_fmt_target,
			&avp_source, pix_fmt_source,
			img_src->width, img_src->height) == -1) {
			return PIX_NOK;
		}
	}
	////

	if (need_avfree) {
		av_free(buf_source);
	}

	return PIX_OK;
}