コード例 #1
0
static void
gst_gl_composition_overlay_add_transformation (GstGLCompositionOverlay *
    overlay, GstBuffer * video_buffer)
{
  gint comp_x, comp_y;
  guint comp_width, comp_height;
  GstVideoMeta *meta;
  guint width, height;

  float rel_x, rel_y, rel_w, rel_h;

  meta = gst_buffer_get_video_meta (video_buffer);

  gst_video_overlay_rectangle_get_render_rectangle (overlay->rectangle,
      &comp_x, &comp_y, &comp_width, &comp_height);

  width = meta->width;
  height = meta->height;

  /* calculate relative position */
  rel_x = (float) comp_x / (float) width;
  rel_y = (float) comp_y / (float) height;

  rel_w = (float) comp_width / (float) width;
  rel_h = (float) comp_height / (float) height;

  /* transform from [0,1] to [-1,1], invert y axis */
  rel_x = rel_x * 2.0 - 1.0;
  rel_y = (1.0 - rel_y) * 2.0 - 1.0;
  rel_w = rel_w * 2.0;
  rel_h = rel_h * 2.0;

  /* initialize position array */
  overlay->positions[0] = rel_x + rel_w;
  overlay->positions[1] = rel_y;
  overlay->positions[2] = 0.0;
  overlay->positions[3] = 1.0;
  overlay->positions[4] = rel_x;
  overlay->positions[5] = rel_y;
  overlay->positions[6] = 0.0;
  overlay->positions[7] = 1.0;
  overlay->positions[8] = rel_x;
  overlay->positions[9] = rel_y - rel_h;
  overlay->positions[10] = 0.0;
  overlay->positions[11] = 1.0;
  overlay->positions[12] = rel_x + rel_w;
  overlay->positions[13] = rel_y - rel_h;
  overlay->positions[14] = 0.0;
  overlay->positions[15] = 1.0;

  gst_gl_context_thread_add (overlay->context,
      gst_gl_composition_overlay_free_vertex_buffer, overlay);

  gst_gl_context_thread_add (overlay->context,
      gst_gl_composition_overlay_init_vertex_buffer, overlay);

  GST_DEBUG
      ("overlay position: (%d,%d) size: %dx%d video size: %dx%d",
      comp_x, comp_y, comp_width, comp_height, meta->width, meta->height);
}
コード例 #2
0
static gboolean
plugin_update_sinkpad_info_from_buffer (GstVaapiPluginBase * plugin,
    GstBuffer * buf)
{
  GstVideoInfo *const vip = &plugin->sinkpad_info;
  GstVideoMeta *vmeta;
  guint i;

  vmeta = gst_buffer_get_video_meta (buf);
  if (!vmeta)
    return TRUE;

  if (GST_VIDEO_INFO_FORMAT (vip) != vmeta->format ||
      GST_VIDEO_INFO_WIDTH (vip) != vmeta->width ||
      GST_VIDEO_INFO_HEIGHT (vip) != vmeta->height ||
      GST_VIDEO_INFO_N_PLANES (vip) != vmeta->n_planes)
    return FALSE;

  for (i = 0; i < GST_VIDEO_INFO_N_PLANES (vip); ++i) {
    GST_VIDEO_INFO_PLANE_OFFSET (vip, i) = vmeta->offset[i];
    GST_VIDEO_INFO_PLANE_STRIDE (vip, i) = vmeta->stride[i];
  }
  GST_VIDEO_INFO_SIZE (vip) = gst_buffer_get_size (buf);
  return TRUE;
}
コード例 #3
0
static void
gst_gdk_pixbuf_overlay_update_composition (GstGdkPixbufOverlay * overlay)
{
  GstVideoOverlayComposition *comp;
  GstVideoOverlayRectangle *rect;
  GstVideoMeta *overlay_meta;
  gint x, y, width, height;
  gint video_width =
      GST_VIDEO_INFO_WIDTH (&GST_VIDEO_FILTER (overlay)->in_info);
  gint video_height =
      GST_VIDEO_INFO_HEIGHT (&GST_VIDEO_FILTER (overlay)->in_info);

  if (overlay->comp) {
    gst_video_overlay_composition_unref (overlay->comp);
    overlay->comp = NULL;
  }

  if (overlay->alpha == 0.0 || overlay->pixels == NULL)
    return;

  overlay_meta = gst_buffer_get_video_meta (overlay->pixels);

  x = overlay->offset_x < 0 ?
      video_width + overlay->offset_x - overlay_meta->width +
      (overlay->relative_x * overlay_meta->width) :
      overlay->offset_x + (overlay->relative_x * overlay_meta->width);
  y = overlay->offset_y < 0 ?
      video_height + overlay->offset_y - overlay_meta->height +
      (overlay->relative_y * overlay_meta->height) :
      overlay->offset_y + (overlay->relative_y * overlay_meta->height);

  width = overlay->overlay_width;
  if (width == 0)
    width = overlay_meta->width;

  height = overlay->overlay_height;
  if (height == 0)
    height = overlay_meta->height;

  GST_DEBUG_OBJECT (overlay, "overlay image dimensions: %d x %d, alpha=%.2f",
      overlay_meta->width, overlay_meta->height, overlay->alpha);
  GST_DEBUG_OBJECT (overlay, "properties: x,y: %d,%d (%g%%,%g%%) - WxH: %dx%d",
      overlay->offset_x, overlay->offset_y,
      overlay->relative_x * 100.0, overlay->relative_y * 100.0,
      overlay->overlay_height, overlay->overlay_width);
  GST_DEBUG_OBJECT (overlay, "overlay rendered: %d x %d @ %d,%d (onto %d x %d)",
      width, height, x, y, video_width, video_height);

  rect = gst_video_overlay_rectangle_new_raw (overlay->pixels,
      x, y, width, height, GST_VIDEO_OVERLAY_FORMAT_FLAG_NONE);

  if (overlay->alpha != 1.0)
    gst_video_overlay_rectangle_set_global_alpha (rect, overlay->alpha);

  comp = gst_video_overlay_composition_new (rect);
  gst_video_overlay_rectangle_unref (rect);

  overlay->comp = comp;
}
コード例 #4
0
gboolean gst_imx_vpu_framebuffer_array_set_framebuffer_in_gstbuffer(GstImxVpuFramebufferArray *framebuffer_array, GstBuffer *buffer, ImxVpuFramebuffer *framebuffer)
{
	GstVideoMeta *video_meta;
	GstImxVpuFramebufferMeta *vpu_meta;
	GstImxPhysMemMeta *phys_mem_meta;
	GstImxPhysMemory *memory;

	video_meta = gst_buffer_get_video_meta(buffer);
	if (video_meta == NULL)
	{
		GST_ERROR("buffer with pointer %p has no video metadata", (gpointer)buffer);
		return FALSE;
	}

	vpu_meta = GST_IMX_VPU_FRAMEBUFFER_META_GET(buffer);
	if (vpu_meta == NULL)
	{
		GST_ERROR("buffer with pointer %p has no VPU metadata", (gpointer)buffer);
		return FALSE;
	}

	phys_mem_meta = GST_IMX_PHYS_MEM_META_GET(buffer);
	if (phys_mem_meta == NULL)
	{
		GST_ERROR("buffer with pointer %p has no phys mem metadata", (gpointer)buffer);
		return FALSE;
	}

	{
		gsize x_padding = 0, y_padding = 0;

		if (framebuffer_array->framebuffer_sizes.aligned_frame_width > video_meta->width)
			x_padding = framebuffer_array->framebuffer_sizes.aligned_frame_width - video_meta->width;
		if (framebuffer_array->framebuffer_sizes.aligned_frame_height > video_meta->height)
			y_padding = framebuffer_array->framebuffer_sizes.aligned_frame_height - video_meta->height;

		vpu_meta->framebuffer = framebuffer;

		phys_mem_meta->phys_addr = (gst_imx_phys_addr_t)imx_vpu_dma_buffer_get_physical_address(framebuffer->dma_buffer);
		phys_mem_meta->x_padding = x_padding;
		phys_mem_meta->y_padding = y_padding;

		GST_LOG("setting phys mem meta for buffer with pointer %p: phys addr %" GST_IMX_PHYS_ADDR_FORMAT " x/y padding %" G_GSIZE_FORMAT "/%" G_GSIZE_FORMAT, (gpointer)buffer, phys_mem_meta->phys_addr, phys_mem_meta->x_padding, phys_mem_meta->y_padding);
	}

	memory = gst_imx_vpu_framebuffer_array_get_gst_phys_memory(framebuffer);

	/* remove any existing memory blocks */
	gst_buffer_remove_all_memory(buffer);
	/* and append the new memory block
	 * the memory is ref'd to prevent deallocation when it is later removed
	 * (either because this function is called again, or because the buffer
	 * is deallocated); refcount is 1 already at this point, since the memory
	 * is ref'd inside the framebuffer array, and unref'd when the array is
	 * shut down */
	gst_buffer_append_memory(buffer, gst_memory_ref((GstMemory *)memory));

	return TRUE;
}
コード例 #5
0
static gboolean
gst_raw_video_parse_process (GstRawBaseParse * raw_base_parse,
    GstRawBaseParseConfig config, GstBuffer * in_data,
    G_GNUC_UNUSED gsize total_num_in_bytes,
    G_GNUC_UNUSED gsize num_valid_in_bytes, GstBuffer ** processed_data)
{
  GstRawVideoParse *raw_video_parse = GST_RAW_VIDEO_PARSE (raw_base_parse);
  GstRawVideoParseConfig *config_ptr =
      gst_raw_video_parse_get_config_ptr (raw_video_parse, config);
  guint frame_flags = 0;
  GstVideoInfo *video_info = &(config_ptr->info);
  GstVideoMeta *videometa;
  GstBuffer *out_data;

  /* In case of extra padding bytes, get a subbuffer without the padding bytes.
   * Otherwise, just add the video meta. */
  if (GST_VIDEO_INFO_SIZE (video_info) < config_ptr->frame_stride) {
    *processed_data = out_data =
        gst_buffer_copy_region (in_data,
        GST_BUFFER_COPY_FLAGS | GST_BUFFER_COPY_TIMESTAMPS |
        GST_BUFFER_COPY_MEMORY, 0, GST_VIDEO_INFO_SIZE (video_info));
  } else {
    out_data = in_data;
    *processed_data = NULL;
  }

  if (config_ptr->interlaced) {
    GST_BUFFER_FLAG_SET (out_data, GST_VIDEO_BUFFER_FLAG_INTERLACED);
    frame_flags |= GST_VIDEO_FRAME_FLAG_INTERLACED;

    if (config_ptr->top_field_first) {
      GST_BUFFER_FLAG_SET (out_data, GST_VIDEO_BUFFER_FLAG_TFF);
      frame_flags |= GST_VIDEO_FRAME_FLAG_TFF;
    } else
      GST_BUFFER_FLAG_UNSET (out_data, GST_VIDEO_BUFFER_FLAG_TFF);
  }

  /* Remove any existing videometa - it will be replaced by the new videometa
   * from here */
  while ((videometa = gst_buffer_get_video_meta (out_data))) {
    GST_LOG_OBJECT (raw_base_parse, "removing existing videometa from buffer");
    gst_buffer_remove_meta (out_data, (GstMeta *) videometa);
  }

  gst_buffer_add_video_meta_full (out_data,
      frame_flags,
      config_ptr->format,
      config_ptr->width,
      config_ptr->height,
      GST_VIDEO_INFO_N_PLANES (video_info),
      config_ptr->plane_offsets, config_ptr->plane_strides);


  return TRUE;
}
コード例 #6
0
static void
gst_gl_composition_overlay_upload (GstGLCompositionOverlay * overlay,
    GstBuffer * buf)
{
  GstGLMemory *comp_gl_memory = NULL;
  GstBuffer *comp_buffer = NULL;
  GstBuffer *overlay_buffer = NULL;
  GstVideoInfo vinfo;
  GstVideoMeta *vmeta;
  GstVideoFrame *comp_frame;
  GstVideoFrame gl_frame;

  comp_buffer =
      gst_video_overlay_rectangle_get_pixels_unscaled_argb (overlay->rectangle,
      GST_VIDEO_OVERLAY_FORMAT_FLAG_PREMULTIPLIED_ALPHA);

  comp_frame = g_slice_new (GstVideoFrame);

  vmeta = gst_buffer_get_video_meta (comp_buffer);
  gst_video_info_set_format (&vinfo, vmeta->format, vmeta->width,
      vmeta->height);
  vinfo.stride[0] = vmeta->stride[0];

  if (gst_video_frame_map (comp_frame, &vinfo, comp_buffer, GST_MAP_READ)) {
    gst_gl_composition_overlay_add_transformation (overlay, buf);

    comp_gl_memory =
        gst_gl_memory_wrapped (overlay->context, GST_GL_TEXTURE_TARGET_2D,
        &comp_frame->info, 0, NULL, comp_frame->data[0], comp_frame,
        _video_frame_unmap_and_free);

    overlay_buffer = gst_buffer_new ();
    gst_buffer_append_memory (overlay_buffer, (GstMemory *) comp_gl_memory);

    if (!gst_video_frame_map (&gl_frame, &comp_frame->info, overlay_buffer,
            GST_MAP_READ | GST_MAP_GL)) {
      gst_buffer_unref (overlay_buffer);
      _video_frame_unmap_and_free (comp_frame);
      GST_WARNING_OBJECT (overlay, "Cannot upload overlay texture");
      return;
    }

    gst_memory_ref ((GstMemory *) comp_gl_memory);
    overlay->gl_memory = comp_gl_memory;
    overlay->texture_id = comp_gl_memory->tex_id;

    gst_buffer_unref (overlay_buffer);
    gst_video_frame_unmap (&gl_frame);

    GST_DEBUG ("uploaded overlay texture %d", overlay->texture_id);
  } else {
    g_slice_free (GstVideoFrame, comp_frame);
  }
}
コード例 #7
0
ファイル: testegl.c プロジェクト: ryumiel/gst-omx
/***********************************************************
 * Name: init_textures
 *
 * Arguments:
 *       APP_STATE_T *state - holds OGLES model info
 *
 * Description:   Initialise OGL|ES texture surfaces to use image
 *                buffers
 *
 * Returns: void
 *
 ***********************************************************/
static void
init_textures (APP_STATE_T * state, GstBuffer * buffer)
{
    GstCapsFeatures *feature = gst_caps_get_features (state->caps, 0);

    if (gst_caps_features_contains (feature, "memory:EGLImage")) {
        /* nothing special to do */
        g_print ("Prepare texture for EGLImage\n");
        state->can_avoid_upload = FALSE;
        glGenTextures (1, &state->tex);
        glBindTexture (GL_TEXTURE_2D, state->tex);
    } else if (gst_caps_features_contains (feature, "memory:GLMemory")) {
        g_print ("Prepare texture for GLMemory\n");
        state->can_avoid_upload = TRUE;
        state->tex = 0;
    } else if (gst_caps_features_contains (feature,
                                           "meta:GstVideoGLTextureUploadMeta")) {
        GstVideoMeta *meta = NULL;
        g_print ("Prepare texture for GstVideoGLTextureUploadMeta\n");
        meta = gst_buffer_get_video_meta (buffer);
        state->can_avoid_upload = FALSE;
        glGenTextures (1, &state->tex);
        glBindTexture (GL_TEXTURE_2D, state->tex);
        glTexImage2D (GL_TEXTURE_2D, 0, GL_RGBA8, meta->width, meta->height, 0,
                      GL_RGBA, GL_UNSIGNED_BYTE, NULL);
    } else {
        g_assert_not_reached ();
    }

#if 0
    glTexParameteri (GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_NEAREST);
    glTexParameteri (GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_NEAREST);
#else
    glTexParameteri (GL_TEXTURE_2D, GL_TEXTURE_MIN_FILTER, GL_LINEAR);
    glTexParameteri (GL_TEXTURE_2D, GL_TEXTURE_MAG_FILTER, GL_LINEAR);
#endif

    glTexParameteri (GL_TEXTURE_2D, GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE);
    glTexParameteri (GL_TEXTURE_2D, GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE);

    assert (glGetError () == GL_NO_ERROR);
}
コード例 #8
0
static gboolean
meta_texture_ensure_info_from_buffer (GstVaapiVideoMetaTexture * meta,
    GstBuffer * buffer)
{
  GstVideoMeta *vmeta;
  GstVideoFormat format;

  if (!buffer || !(vmeta = gst_buffer_get_video_meta (buffer))) {
    format = DEFAULT_FORMAT;
    meta->width = 0;
    meta->height = 0;
  } else {
    const GstVideoFormatInfo *const fmt_info =
        gst_video_format_get_info (vmeta->format);
    format = (fmt_info && GST_VIDEO_FORMAT_INFO_IS_RGB (fmt_info)) ?
        vmeta->format : DEFAULT_FORMAT;
    meta->width = vmeta->width;
    meta->height = vmeta->height;
  }
  return meta_texture_ensure_format (meta, format);
}
static GstPadProbeReturn
buffer_cb (GstPad * pad, GstPadProbeInfo * info, gpointer user_data)
{
  GstVideoOverlayRectangle *rect;
  GstVideoOverlayComposition *comp;
  GstVideoFrame frame;
  GstVideoMeta *vmeta;
  GstVideoInfo vinfo;
  GstCaps *caps;
  gint x, y;

  caps = gst_pad_get_current_caps (pad);
  gst_video_info_from_caps (&vinfo, caps);
  gst_caps_unref (caps);

  info->data = gst_buffer_make_writable (info->data);

  vmeta = gst_buffer_get_video_meta (logo_buf);

  calculate_position (&x, &y, vmeta->width, vmeta->height, ++count);

  GST_LOG ("%3d, %3d", x, y);

  rect = gst_video_overlay_rectangle_new_raw (logo_buf, x, y,
      vmeta->width, vmeta->height, GST_VIDEO_OVERLAY_FORMAT_FLAG_NONE);
  comp = gst_video_overlay_composition_new (rect);
  gst_video_overlay_rectangle_unref (rect);

  gst_video_frame_map (&frame, &vinfo, info->data, GST_MAP_READWRITE);

  if (!gst_video_overlay_composition_blend (comp, &frame))
    g_warning ("Error blending overlay at position (%d,%d)", x, y);

  gst_video_frame_unmap (&frame);

  gst_video_overlay_composition_unref (comp);

  return GST_PAD_PROBE_OK;
}
コード例 #10
0
ファイル: blitter.c プロジェクト: FrankBau/gstreamer-imx
static void gst_imx_ipu_blitter_set_task_params(GstImxIpuBlitter *ipu_blitter, GstBuffer *video_frame, struct ipu_task *task, GstVideoInfo const *info, gboolean is_input)
{
	GstVideoMeta *video_meta;
	GstImxPhysMemMeta *phys_mem_meta;
	GstVideoFormat format;
	guint width, height, padded_width, padded_height;

	g_assert(video_frame != NULL);

	video_meta = gst_buffer_get_video_meta(video_frame);
	phys_mem_meta = GST_IMX_PHYS_MEM_META_GET(video_frame);

	g_assert((phys_mem_meta != NULL) && (phys_mem_meta->phys_addr != 0));

	format = (video_meta != NULL) ? video_meta->format : GST_VIDEO_INFO_FORMAT(info);
	width = (video_meta != NULL) ? video_meta->width : (guint)(GST_VIDEO_INFO_WIDTH(info));
	height = (video_meta != NULL) ? video_meta->height : (guint)(GST_VIDEO_INFO_HEIGHT(info));

	padded_width = width + phys_mem_meta->x_padding;
	padded_height = height + phys_mem_meta->y_padding;

	if (is_input)
	{
		task->input.width = padded_width;
		task->input.height = padded_height;
		task->input.format = gst_imx_ipu_blitter_get_v4l_format(format);
		task->input.paddr = (dma_addr_t)(phys_mem_meta->phys_addr);
	}
	else
	{
		task->output.width = padded_width;
		task->output.height = padded_height;
		task->output.format = gst_imx_ipu_blitter_get_v4l_format(format);
		task->output.paddr = (dma_addr_t)(phys_mem_meta->phys_addr);
	}
}
コード例 #11
0
static gboolean gst_imx_egl_viv_sink_gles2_renderer_fill_texture(GstImxEglVivSinkGLES2Renderer *renderer, GstBuffer *buffer)
{
	GstVideoMeta *video_meta;
	GstMapInfo map_info;
	guint num_extra_lines, stride[3], offset[3], is_phys_buf;
	GstImxPhysMemMeta *phys_mem_meta;
	GstVideoFormat fmt;
	GLenum gl_format;
	GLuint w, h, total_w, total_h;
	
	phys_mem_meta = NULL;
	fmt = renderer->video_info.finfo->format;

	gl_format = gst_imx_egl_viv_sink_gles2_renderer_get_viv_format(fmt);
	w = renderer->video_info.width;
	h = renderer->video_info.height;

	phys_mem_meta = GST_IMX_PHYS_MEM_META_GET(buffer);
	is_phys_buf = (phys_mem_meta != NULL) && (phys_mem_meta->phys_addr != 0);

	/* Get the stride and number of extra lines */
	video_meta = gst_buffer_get_video_meta(buffer);
	if (video_meta != NULL)
	{
		for (guint i = 0; i < MIN(video_meta->n_planes, 3); ++i)
		{
			stride[i] = video_meta->stride[i];
			offset[i] = video_meta->offset[i];
		}
	}
	else
	{
		for (guint i = 0; i < MIN(GST_VIDEO_INFO_N_PLANES(&(renderer->video_info)), 3); ++i)
		{
			stride[i] = GST_VIDEO_INFO_PLANE_STRIDE(&(renderer->video_info), i);
			offset[i] = GST_VIDEO_INFO_PLANE_OFFSET(&(renderer->video_info), i);
		}
	}

	num_extra_lines = is_phys_buf ? (phys_mem_meta->padding / stride[0]) : 0;

	/* stride is in bytes, we need pixels */
	total_w = stride[0] / gst_imx_egl_viv_sink_gles2_renderer_bpp(fmt);
	total_h = h + num_extra_lines;

	GST_LOG("w/h: %d/%d total_w/h: %d/%d", w, h, total_w, total_h);

	glUniform2f(renderer->uv_scale_uloc, (float)w / (float)total_w, (float)h / (float)total_h);

	/* Only update texture if the video frame actually changed */
	if ((renderer->viv_planes[0] == NULL) || (renderer->video_info_updated))
	{
		GST_LOG("video frame did change");

		if (is_phys_buf)
		{
			GLvoid *virt_addr;
			GLuint phys_addr;

			phys_addr = (GLuint)(phys_mem_meta->phys_addr);

			GST_LOG("mapping physical address 0x%x of video frame in buffer %p into VIV texture", phys_addr, (gpointer)buffer);

			gst_buffer_map(buffer, &map_info, GST_MAP_READ);
			virt_addr = map_info.data;

			/* Just set to make sure the == NULL check above is false */
			renderer->viv_planes[0] = virt_addr;

			glTexDirectVIVMap(
				GL_TEXTURE_2D,
				total_w, total_h,
				gl_format,
				(GLvoid **)(&virt_addr), &phys_addr
			);

			gst_buffer_unmap(buffer, &map_info);
			GST_LOG("done showing frame in buffer %p with virtual address %p physical address 0x%x", (gpointer)buffer, virt_addr, phys_addr);

			if (!gst_imx_egl_viv_sink_gles2_renderer_check_gl_error("render", "glTexDirectVIVMap"))
				return FALSE;
		}
		else
		{
			glTexDirectVIV(
				GL_TEXTURE_2D,
				total_w, total_h,
				gl_format,
				(GLvoid **) &(renderer->viv_planes)
			);
			if (!gst_imx_egl_viv_sink_gles2_renderer_check_gl_error("render", "glTexDirectVIV"))
				return FALSE;

			GST_LOG("copying pixels into VIV direct texture buffer");

			gst_buffer_map(buffer, &map_info, GST_MAP_READ);
			switch (fmt)
			{
				case GST_VIDEO_FORMAT_I420:
				case GST_VIDEO_FORMAT_YV12:
					memcpy(renderer->viv_planes[0], map_info.data + offset[0], stride[0] * total_h);
					memcpy(renderer->viv_planes[1], map_info.data + offset[1], stride[1] * total_h / 2);
					memcpy(renderer->viv_planes[2], map_info.data + offset[2], stride[2] * total_h / 2);
					break;
				case GST_VIDEO_FORMAT_NV12:
				case GST_VIDEO_FORMAT_NV21:
					memcpy(renderer->viv_planes[0], map_info.data + offset[0], stride[0] * total_h);
					memcpy(renderer->viv_planes[1], map_info.data + offset[1], stride[1] * total_h / 2);
					break;
				default:
					memcpy(renderer->viv_planes[0], map_info.data, stride[0] * total_h);
			}
			gst_buffer_unmap(buffer, &map_info);

		}

		glTexDirectInvalidateVIV(GL_TEXTURE_2D);
		if (!gst_imx_egl_viv_sink_gles2_renderer_check_gl_error("render", "glTexDirectInvalidateVIV"))
			return FALSE;

		renderer->video_info_updated = FALSE;
	}
	else
	{
		GST_LOG("video frame did not change - not doing anything");
	}

	return TRUE;
}
コード例 #12
0
ファイル: video-frame.c プロジェクト: rawoul/gst-plugins-base
/**
 * gst_video_frame_map_id:
 * @frame: pointer to #GstVideoFrame
 * @info: a #GstVideoInfo
 * @buffer: the buffer to map
 * @id: the frame id to map
 * @flags: #GstMapFlags
 *
 * Use @info and @buffer to fill in the values of @frame with the video frame
 * information of frame @id.
 *
 * When @id is -1, the default frame is mapped. When @id != -1, this function
 * will return %FALSE when there is no GstVideoMeta with that id.
 *
 * All video planes of @buffer will be mapped and the pointers will be set in
 * @frame->data.
 *
 * Returns: %TRUE on success.
 */
gboolean
gst_video_frame_map_id (GstVideoFrame * frame, GstVideoInfo * info,
                        GstBuffer * buffer, gint id, GstMapFlags flags)
{
    GstVideoMeta *meta;
    gint i;

    g_return_val_if_fail (frame != NULL, FALSE);
    g_return_val_if_fail (info != NULL, FALSE);
    g_return_val_if_fail (GST_IS_BUFFER (buffer), FALSE);

    if (id == -1)
        meta = gst_buffer_get_video_meta (buffer);
    else
        meta = gst_buffer_get_video_meta_id (buffer, id);

    /* copy the info */
    frame->info = *info;

    if (meta) {
        frame->info.finfo = gst_video_format_get_info (meta->format);
        frame->info.width = meta->width;
        frame->info.height = meta->height;
        frame->id = meta->id;
        frame->flags = meta->flags;

        for (i = 0; i < info->finfo->n_planes; i++)
            if (!gst_video_meta_map (meta, i, &frame->map[i], &frame->data[i],
                                     &frame->info.stride[i], flags))
                goto frame_map_failed;
    } else {
        /* no metadata, we really need to have the metadata when the id is
         * specified. */
        if (id != -1)
            goto no_metadata;

        frame->id = id;
        frame->flags = 0;

        if (!gst_buffer_map (buffer, &frame->map[0], flags))
            goto map_failed;

        /* do some sanity checks */
        if (frame->map[0].size < info->size)
            goto invalid_size;

        /* set up pointers */
        for (i = 0; i < info->finfo->n_planes; i++) {
            frame->data[i] = frame->map[0].data + info->offset[i];
        }
    }
    frame->buffer = gst_buffer_ref (buffer);
    frame->meta = meta;

    /* buffer flags enhance the frame flags */
    if (GST_VIDEO_INFO_IS_INTERLACED (info)) {
        if (GST_BUFFER_FLAG_IS_SET (buffer, GST_VIDEO_BUFFER_FLAG_INTERLACED))
            frame->flags |= GST_VIDEO_FRAME_FLAG_INTERLACED;
        if (GST_BUFFER_FLAG_IS_SET (buffer, GST_VIDEO_BUFFER_FLAG_TFF))
            frame->flags |= GST_VIDEO_FRAME_FLAG_TFF;
        if (GST_BUFFER_FLAG_IS_SET (buffer, GST_VIDEO_BUFFER_FLAG_RFF))
            frame->flags |= GST_VIDEO_FRAME_FLAG_RFF;
        if (GST_BUFFER_FLAG_IS_SET (buffer, GST_VIDEO_BUFFER_FLAG_ONEFIELD))
            frame->flags |= GST_VIDEO_FRAME_FLAG_ONEFIELD;
    }
    return TRUE;

    /* ERRORS */
no_metadata:
    {
        GST_ERROR ("no GstVideoMeta for id %d", id);
        return FALSE;
    }
frame_map_failed:
    {
        GST_ERROR ("failed to map video frame plane %d", i);
        while (--i >= 0)
            gst_video_meta_unmap (meta, i, &frame->map[i]);
        return FALSE;
    }
map_failed:
    {
        GST_ERROR ("failed to map buffer");
        return FALSE;
    }
invalid_size:
    {
        GST_ERROR ("invalid buffer size %" G_GSIZE_FORMAT " < %" G_GSIZE_FORMAT,
                   frame->map[0].size, info->size);
        gst_buffer_unmap (buffer, &frame->map[0]);
        return FALSE;
    }
}
コード例 #13
0
ファイル: base_enc.c プロジェクト: chamois94/gstreamer-imx
static GstFlowReturn gst_imx_vpu_base_enc_handle_frame(GstVideoEncoder *encoder, GstVideoCodecFrame *frame)
{
	VpuEncRetCode enc_ret;
	VpuEncEncParam enc_enc_param;
	GstImxPhysMemMeta *phys_mem_meta;
	GstImxVpuBaseEncClass *klass;
	GstImxVpuBaseEnc *vpu_base_enc;
	VpuFrameBuffer input_framebuf;
	GstBuffer *input_buffer;
	gint src_stride;

	vpu_base_enc = GST_IMX_VPU_BASE_ENC(encoder);
	klass = GST_IMX_VPU_BASE_ENC_CLASS(G_OBJECT_GET_CLASS(vpu_base_enc));

	g_assert(klass->set_frame_enc_params != NULL);

	memset(&enc_enc_param, 0, sizeof(enc_enc_param));
	memset(&input_framebuf, 0, sizeof(input_framebuf));

	phys_mem_meta = GST_IMX_PHYS_MEM_META_GET(frame->input_buffer);

	/* If the incoming frame's buffer is not using physically contiguous memory,
	 * it needs to be copied to the internal input buffer, otherwise the VPU
	 * encoder cannot read the frame */
	if (phys_mem_meta == NULL)
	{
		/* No physical memory metadata found -> buffer is not physically contiguous */

		GstVideoFrame temp_input_video_frame, temp_incoming_video_frame;

		GST_LOG_OBJECT(vpu_base_enc, "input buffer not physically contiguous - frame copy is necessary");

		if (vpu_base_enc->internal_input_buffer == NULL)
		{
			/* The internal input buffer is the temp input frame's DMA memory.
			 * If it does not exist yet, it needs to be created here. The temp input
			 * frame is then mapped. */

			GstFlowReturn flow_ret;

			if (vpu_base_enc->internal_bufferpool == NULL)
			{
				/* Internal bufferpool does not exist yet - create it now,
				 * so that it can in turn create the internal input buffer */

				GstStructure *config;
				GstCaps *caps;
				GstAllocator *allocator;

				GST_DEBUG_OBJECT(vpu_base_enc, "creating internal bufferpool");

				caps = gst_video_info_to_caps(&(vpu_base_enc->video_info));
				vpu_base_enc->internal_bufferpool = gst_imx_phys_mem_buffer_pool_new(FALSE);
				allocator = gst_imx_vpu_enc_allocator_obtain();

				config = gst_buffer_pool_get_config(vpu_base_enc->internal_bufferpool);
				gst_buffer_pool_config_set_params(config, caps, vpu_base_enc->video_info.size, 2, 0);
				gst_buffer_pool_config_set_allocator(config, allocator, NULL);
				gst_buffer_pool_config_add_option(config, GST_BUFFER_POOL_OPTION_IMX_PHYS_MEM);
				gst_buffer_pool_config_add_option(config, GST_BUFFER_POOL_OPTION_VIDEO_META);
				gst_buffer_pool_set_config(vpu_base_enc->internal_bufferpool, config);

				gst_caps_unref(caps);

				if (vpu_base_enc->internal_bufferpool == NULL)
				{
					GST_ERROR_OBJECT(vpu_base_enc, "failed to create internal bufferpool");
					return GST_FLOW_ERROR;
				}
			}

			/* Future versions of this code may propose the internal bufferpool upstream;
			 * hence the is_active check */
			if (!gst_buffer_pool_is_active(vpu_base_enc->internal_bufferpool))
				gst_buffer_pool_set_active(vpu_base_enc->internal_bufferpool, TRUE);

			/* Create the internal input buffer */
			flow_ret = gst_buffer_pool_acquire_buffer(vpu_base_enc->internal_bufferpool, &(vpu_base_enc->internal_input_buffer), NULL);
			if (flow_ret != GST_FLOW_OK)
			{
				GST_ERROR_OBJECT(vpu_base_enc, "error acquiring input frame buffer: %s", gst_pad_mode_get_name(flow_ret));
				return flow_ret;
			}
		}

		/* The internal input buffer exists at this point. Since the incoming frame
		 * is not stored in physical memory, copy its pixels to the internal
		 * input buffer, so the encoder can read them. */

		gst_video_frame_map(&temp_incoming_video_frame, &(vpu_base_enc->video_info), frame->input_buffer, GST_MAP_READ);
		gst_video_frame_map(&temp_input_video_frame, &(vpu_base_enc->video_info), vpu_base_enc->internal_input_buffer, GST_MAP_WRITE);

		gst_video_frame_copy(&temp_input_video_frame, &temp_incoming_video_frame);

		gst_video_frame_unmap(&temp_incoming_video_frame);
		gst_video_frame_unmap(&temp_input_video_frame);

		/* Set the internal input buffer as the encoder's input */
		input_buffer = vpu_base_enc->internal_input_buffer;
		/* And use the internal input buffer's physical memory metadata */
		phys_mem_meta = GST_IMX_PHYS_MEM_META_GET(vpu_base_enc->internal_input_buffer);
	}
	else
	{
		/* Physical memory metadata found -> buffer is physically contiguous
		 * It can be used directly as input for the VPU encoder */
		input_buffer = frame->input_buffer;
	}

	/* Set up physical addresses for the input framebuffer */
	{
		gsize *plane_offsets;
		gint *plane_strides;
		GstVideoMeta *video_meta;
		unsigned char *phys_ptr;

		/* Try to use plane offset and stride information from the video
		 * metadata if present, since these can be more accurate than
		 * the information from the video info */
		video_meta = gst_buffer_get_video_meta(input_buffer);
		if (video_meta != NULL)
		{
			plane_offsets = video_meta->offset;
			plane_strides = video_meta->stride;
		}
		else
		{
			plane_offsets = vpu_base_enc->video_info.offset;
			plane_strides = vpu_base_enc->video_info.stride;
		}

		phys_ptr = (unsigned char*)(phys_mem_meta->phys_addr);

		input_framebuf.pbufY = phys_ptr;
		input_framebuf.pbufCb = phys_ptr + plane_offsets[1];
		input_framebuf.pbufCr = phys_ptr + plane_offsets[2];
		input_framebuf.pbufMvCol = NULL; /* not used by the VPU encoder */
		input_framebuf.nStrideY = plane_strides[0];
		input_framebuf.nStrideC = plane_strides[1];

		/* this is needed for framebuffers registration below */
		src_stride = plane_strides[0];

		GST_TRACE_OBJECT(vpu_base_enc, "width: %d   height: %d   stride 0: %d   stride 1: %d   offset 0: %d   offset 1: %d   offset 2: %d", GST_VIDEO_INFO_WIDTH(&(vpu_base_enc->video_info)), GST_VIDEO_INFO_HEIGHT(&(vpu_base_enc->video_info)), plane_strides[0], plane_strides[1], plane_offsets[0], plane_offsets[1], plane_offsets[2]);
	}

	/* Create framebuffers structure (if not already present) */
	if (vpu_base_enc->framebuffers == NULL)
	{
		GstImxVpuFramebufferParams fbparams;
		gst_imx_vpu_framebuffers_enc_init_info_to_params(&(vpu_base_enc->init_info), &fbparams);
		fbparams.pic_width = vpu_base_enc->open_param.nPicWidth;
		fbparams.pic_height = vpu_base_enc->open_param.nPicHeight;

		vpu_base_enc->framebuffers = gst_imx_vpu_framebuffers_new(&fbparams, gst_imx_vpu_enc_allocator_obtain());
		if (vpu_base_enc->framebuffers == NULL)
		{
			GST_ELEMENT_ERROR(vpu_base_enc, RESOURCE, NO_SPACE_LEFT, ("could not create framebuffers structure"), (NULL));
			return GST_FLOW_ERROR;
		}

		gst_imx_vpu_framebuffers_register_with_encoder(vpu_base_enc->framebuffers, vpu_base_enc->handle, src_stride);
	}

	/* Allocate physical buffer for output data (if not already present) */
	if (vpu_base_enc->output_phys_buffer == NULL)
	{
		vpu_base_enc->output_phys_buffer = (GstImxPhysMemory *)gst_allocator_alloc(gst_imx_vpu_enc_allocator_obtain(), vpu_base_enc->framebuffers->total_size, NULL);

		if (vpu_base_enc->output_phys_buffer == NULL)
		{
			GST_ERROR_OBJECT(vpu_base_enc, "could not allocate physical buffer for output data");
			return GST_FLOW_ERROR;
		}
	}

	/* Set up encoding parameters */
	enc_enc_param.nInVirtOutput = (unsigned int)(vpu_base_enc->output_phys_buffer->mapped_virt_addr); /* TODO */
	enc_enc_param.nInPhyOutput = (unsigned int)(vpu_base_enc->output_phys_buffer->phys_addr);
	enc_enc_param.nInOutputBufLen = vpu_base_enc->output_phys_buffer->mem.size;
	enc_enc_param.nPicWidth = vpu_base_enc->framebuffers->pic_width;
	enc_enc_param.nPicHeight = vpu_base_enc->framebuffers->pic_height;
	enc_enc_param.nFrameRate = vpu_base_enc->open_param.nFrameRate;
	enc_enc_param.pInFrame = &input_framebuf;
	enc_enc_param.nForceIPicture = 0;

	/* Force I-frame if either IS_FORCE_KEYFRAME or IS_FORCE_KEYFRAME_HEADERS is set for the current frame. */
	if (GST_VIDEO_CODEC_FRAME_IS_FORCE_KEYFRAME(frame) || GST_VIDEO_CODEC_FRAME_IS_FORCE_KEYFRAME_HEADERS(frame))
	{
		enc_enc_param.nForceIPicture = 1;
		GST_LOG_OBJECT(vpu_base_enc, "got request to make this a keyframe - forcing I frame");
		GST_VIDEO_CODEC_FRAME_SET_SYNC_POINT(frame);
	}

	/* Give the derived class a chance to set up encoding parameters too */
	if (!klass->set_frame_enc_params(vpu_base_enc, &enc_enc_param, &(vpu_base_enc->open_param)))
	{
		GST_ERROR_OBJECT(vpu_base_enc, "derived class could not frame enc params");
		return GST_FLOW_ERROR;
	}

	/* Main encoding block */
	{
		GstBuffer *output_buffer = NULL;
		gsize output_buffer_offset = 0;
		gboolean frame_finished = FALSE;

		frame->output_buffer = NULL;

		/* Run in a loop until the VPU reports the input as used */
		do
		{
			/* Feed input data */
			enc_ret = VPU_EncEncodeFrame(vpu_base_enc->handle, &enc_enc_param);
			if (enc_ret != VPU_ENC_RET_SUCCESS)
			{
				GST_ERROR_OBJECT(vpu_base_enc, "failed to encode frame: %s", gst_imx_vpu_strerror(enc_ret));
				VPU_EncReset(vpu_base_enc->handle);
				return GST_FLOW_ERROR;
			}

			if (frame_finished)
			{
				GST_WARNING_OBJECT(vpu_base_enc, "frame was already finished for the current input, but input not yet marked as used");
				continue;
			}

			if (enc_enc_param.eOutRetCode & (VPU_ENC_OUTPUT_DIS | VPU_ENC_OUTPUT_SEQHEADER))
			{
				/* Create an output buffer on demand */
				if (output_buffer == NULL)
				{
					output_buffer = gst_video_encoder_allocate_output_buffer(
						encoder,
						vpu_base_enc->output_phys_buffer->mem.size
					);
					frame->output_buffer = output_buffer;
				}

				GST_LOG_OBJECT(vpu_base_enc, "processing output data: %u bytes, output buffer offset %u", enc_enc_param.nOutOutputSize, output_buffer_offset);

				if (klass->fill_output_buffer != NULL)
				{
					/* Derived class fills data on its own */

					gsize cur_offset = output_buffer_offset;
					output_buffer_offset += klass->fill_output_buffer(
						vpu_base_enc,
						frame,
						cur_offset,
						vpu_base_enc->output_phys_buffer->mapped_virt_addr,
						enc_enc_param.nOutOutputSize,
						enc_enc_param.eOutRetCode & VPU_ENC_OUTPUT_SEQHEADER
					);
				}
				else
				{
					/* Use default data filling (= copy input to output) */

					gst_buffer_fill(
						output_buffer,
						output_buffer_offset,
						vpu_base_enc->output_phys_buffer->mapped_virt_addr,
						enc_enc_param.nOutOutputSize
					);
					output_buffer_offset += enc_enc_param.nOutOutputSize;
				}

				if (enc_enc_param.eOutRetCode & VPU_ENC_OUTPUT_DIS)
				{
					g_assert(output_buffer != NULL);

					/* Set the output buffer's size to the actual number of bytes
					 * filled by the derived class */
					gst_buffer_set_size(output_buffer, output_buffer_offset);

					/* Set the frame DTS */
					frame->dts = frame->pts;

					/* And finish the frame, handing the output data over to the base class */
					gst_video_encoder_finish_frame(encoder, frame);

					output_buffer = NULL;
					frame_finished = TRUE;

					if (!(enc_enc_param.eOutRetCode & VPU_ENC_INPUT_USED))
						GST_WARNING_OBJECT(vpu_base_enc, "frame finished, but VPU did not report the input as used");

					break;
				}
			}
		}
		while (!(enc_enc_param.eOutRetCode & VPU_ENC_INPUT_USED)); /* VPU_ENC_INPUT_NOT_USED has value 0x0 - cannot use it for flag checks */

		/* If output_buffer is NULL at this point, it means VPU_ENC_OUTPUT_DIS was never communicated
		 * by the VPU, and the buffer is unfinished. -> Drop it. */
		if (output_buffer != NULL)
		{
			GST_WARNING_OBJECT(vpu_base_enc, "frame unfinished ; dropping");
			gst_buffer_unref(output_buffer);
			frame->output_buffer = NULL; /* necessary to make finish_frame() drop the frame */
			gst_video_encoder_finish_frame(encoder, frame);
		}
	}

	return GST_FLOW_OK;
}
コード例 #14
0
ファイル: blitter.c プロジェクト: sijisunny/gstreamer-imx
gboolean gst_imx_ipu_blitter_set_input_buffer(GstImxIpuBlitter *ipu_blitter, GstBuffer *input_buffer)
{
	GstImxPhysMemMeta *phys_mem_meta;

	g_assert(input_buffer != NULL);

	phys_mem_meta = GST_IMX_PHYS_MEM_META_GET(input_buffer);

	/* Test if the input buffer uses DMA memory */
	if ((phys_mem_meta != NULL) && (phys_mem_meta->phys_addr != 0))
	{
		/* DMA memory present - the input buffer can be used as an actual input buffer */
		gst_imx_ipu_blitter_set_actual_input_buffer(ipu_blitter, gst_buffer_ref(input_buffer));

		GST_TRACE_OBJECT(ipu_blitter, "input buffer uses DMA memory - setting it as actual input buffer");
	}
	else
	{
		/* No DMA memory present; the input buffer needs to be copied to an internal
		 * temporary input buffer */

		GstBuffer *temp_input_buffer;
		GstFlowReturn flow_ret;

		GST_TRACE_OBJECT(ipu_blitter, "input buffer does not use DMA memory - need to copy it to an internal input DMA buffer");

		{
			/* The internal input buffer is the temp input frame's DMA memory.
			 * If it does not exist yet, it needs to be created here. The temp input
			 * frame is then mapped. */

			if (ipu_blitter->internal_bufferpool == NULL)
			{
				/* Internal bufferpool does not exist yet - create it now,
				 * so that it can in turn create the internal input buffer */

				GstCaps *caps = gst_video_info_to_caps(&(ipu_blitter->input_video_info));

				ipu_blitter->internal_bufferpool = gst_imx_ipu_blitter_create_bufferpool(
					ipu_blitter,
					caps,
					ipu_blitter->input_video_info.size,
					2, 0,
					NULL,
					NULL
				);

				gst_caps_unref(caps);

				if (ipu_blitter->internal_bufferpool == NULL)
				{
					GST_ERROR_OBJECT(ipu_blitter, "failed to create internal bufferpool");
					return FALSE;
				}
			}

			/* Future versions of this code may propose the internal bufferpool upstream;
			 * hence the is_active check */
			if (!gst_buffer_pool_is_active(ipu_blitter->internal_bufferpool))
				gst_buffer_pool_set_active(ipu_blitter->internal_bufferpool, TRUE);
		}

		/* Create the internal input buffer */
		flow_ret = gst_buffer_pool_acquire_buffer(ipu_blitter->internal_bufferpool, &temp_input_buffer, NULL);
		if (flow_ret != GST_FLOW_OK)
		{
			GST_ERROR_OBJECT(ipu_blitter, "error acquiring input frame buffer: %s", gst_pad_mode_get_name(flow_ret));
			return FALSE;
		}

		{
			GstVideoFrame input_frame, temp_input_frame;

			gst_video_frame_map(&input_frame, &(ipu_blitter->input_video_info), input_buffer, GST_MAP_READ);
			gst_video_frame_map(&temp_input_frame, &(ipu_blitter->input_video_info), temp_input_buffer, GST_MAP_WRITE);

			/* Copy the input buffer's pixels to the temp input frame
			 * The gst_video_frame_copy() makes sure stride and plane offset values from both
			 * frames are respected */
			gst_video_frame_copy(&temp_input_frame, &input_frame);

			gst_video_frame_unmap(&temp_input_frame);
			gst_video_frame_unmap(&input_frame);
		}

		/* Finally, set the temp input buffer as the actual input buffer */
		gst_imx_ipu_blitter_set_actual_input_buffer(ipu_blitter, temp_input_buffer);
	}

	/* Configure interlacing */
	ipu_blitter->priv->task.input.deinterlace.enable = 0;
	if (ipu_blitter->deinterlace_mode != GST_IMX_IPU_BLITTER_DEINTERLACE_NONE)
	{
		switch (ipu_blitter->input_video_info.interlace_mode)
		{
			case GST_VIDEO_INTERLACE_MODE_INTERLEAVED:
				GST_TRACE_OBJECT(ipu_blitter, "input stream uses interlacing -> deinterlacing enabled");
				ipu_blitter->priv->task.input.deinterlace.enable = 1;
				break;
			case GST_VIDEO_INTERLACE_MODE_MIXED:
			{
				GstVideoMeta *video_meta;

				GST_TRACE_OBJECT(ipu_blitter, "input stream uses mixed interlacing -> need to check video metadata deinterlacing flag");

				video_meta = gst_buffer_get_video_meta(input_buffer);
				if (video_meta != NULL)
				{
					if (video_meta->flags & GST_VIDEO_FRAME_FLAG_INTERLACED)
					{
						GST_TRACE_OBJECT(ipu_blitter, "frame has video metadata and deinterlacing flag");
						ipu_blitter->priv->task.input.deinterlace.enable = 1;
					}
					else
						GST_TRACE_OBJECT(ipu_blitter, "frame has video metadata but no deinterlacing flag");
				}
				else
					GST_TRACE_OBJECT(ipu_blitter, "frame has no video metadata -> no deinterlacing done");

				break;
			}
			case GST_VIDEO_INTERLACE_MODE_PROGRESSIVE:
			{
				GST_TRACE_OBJECT(ipu_blitter, "input stream is progressive -> no deinterlacing necessary");
				break;
			}
			case GST_VIDEO_INTERLACE_MODE_FIELDS:
				GST_FIXME_OBJECT(ipu_blitter, "2-fields deinterlacing not supported (yet)");
				break;
			default:
				break;
		}
	}

	return TRUE;
}
コード例 #15
0
static gboolean
gst_mpeg2dec_decide_allocation (GstVideoDecoder * decoder, GstQuery * query)
{
  GstMpeg2dec *dec = GST_MPEG2DEC (decoder);
  GstBufferPool *pool;
  guint size, min, max;
  GstStructure *config, *down_config = NULL;
  GstAllocator *allocator;
  GstAllocationParams params;
  gboolean update_allocator;
  gboolean has_videometa = FALSE;
  GstCaps *caps;

  /* Get rid of ancient pool */
  if (dec->downstream_pool) {
    gst_buffer_pool_set_active (dec->downstream_pool, FALSE);
    gst_object_unref (dec->downstream_pool);
    dec->downstream_pool = NULL;
  }

  /* Get negotiated allocation caps */
  gst_query_parse_allocation (query, &caps, NULL);

  /* Set allocation parameters to guarantee 16-byte aligned output buffers */
  if (gst_query_get_n_allocation_params (query) > 0) {
    gst_query_parse_nth_allocation_param (query, 0, &allocator, &params);
    update_allocator = TRUE;
  } else {
    allocator = NULL;
    gst_allocation_params_init (&params);
    update_allocator = FALSE;
  }

  params.align = MAX (params.align, 15);

  if (update_allocator)
    gst_query_set_nth_allocation_param (query, 0, allocator, &params);
  else
    gst_query_add_allocation_param (query, allocator, &params);

  /* Now chain up to the parent class to guarantee that we can
   * get a buffer pool from the query */
  if (!GST_VIDEO_DECODER_CLASS (parent_class)->decide_allocation (decoder,
          query)) {
    if (allocator)
      gst_object_unref (allocator);
    return FALSE;
  }

  gst_query_parse_nth_allocation_pool (query, 0, &pool, &size, &min, &max);

  config = gst_buffer_pool_get_config (pool);
  if (gst_query_find_allocation_meta (query, GST_VIDEO_META_API_TYPE, NULL)) {
    gst_buffer_pool_config_add_option (config,
        GST_BUFFER_POOL_OPTION_VIDEO_META);
    has_videometa = TRUE;
  }

  if (dec->need_alignment) {
    /* If downstream does not support video meta, we will have to copy, keep
     * the downstream pool to avoid double copying */
    if (!has_videometa) {
      dec->downstream_pool = pool;
      pool = NULL;
      down_config = config;
      config = NULL;
      min = 2;
      max = 0;
    }

    /* In case downstream support video meta, but the downstream pool does not
     * have alignment support, discard downstream pool and use video pool */
    else if (!gst_buffer_pool_has_option (pool,
            GST_BUFFER_POOL_OPTION_VIDEO_ALIGNMENT)) {
      gst_object_unref (pool);
      pool = NULL;
      gst_structure_free (config);
      config = NULL;
    }

    if (!pool)
      pool = gst_mpeg2dec_create_generic_pool (allocator, &params, caps, size,
          min, max, &config);

    gst_buffer_pool_config_add_option (config,
        GST_BUFFER_POOL_OPTION_VIDEO_ALIGNMENT);
    gst_buffer_pool_config_set_video_alignment (config, &dec->valign);
  }

  if (allocator)
    gst_object_unref (allocator);

  /* If we are copying out, we'll need to setup and activate the other pool */
  if (dec->downstream_pool) {
    if (!gst_buffer_pool_set_config (dec->downstream_pool, down_config)) {
      down_config = gst_buffer_pool_get_config (dec->downstream_pool);
      if (!gst_buffer_pool_config_validate_params (down_config, caps, size, min,
              max)) {
        gst_structure_free (down_config);
        goto config_failed;
      }

      if (!gst_buffer_pool_set_config (dec->downstream_pool, down_config))
        goto config_failed;
    }

    if (!gst_buffer_pool_set_active (dec->downstream_pool, TRUE))
      goto activate_failed;
  }

  /* Now configure the pool, if the pool had made some changes, it will
   * return FALSE. Validate the changes ...*/
  if (!gst_buffer_pool_set_config (pool, config)) {
    config = gst_buffer_pool_get_config (pool);

    /* Check basic params */
    if (!gst_buffer_pool_config_validate_params (config, caps, size, min, max)) {
      gst_structure_free (config);
      goto config_failed;
    }

    /* If needed, check that resulting alignment is still valid */
    if (dec->need_alignment) {
      GstVideoAlignment valign;

      if (!gst_buffer_pool_config_get_video_alignment (config, &valign)) {
        gst_structure_free (config);
        goto config_failed;
      }

      if (valign.padding_left != 0 || valign.padding_top != 0
          || valign.padding_right < dec->valign.padding_right
          || valign.padding_bottom < dec->valign.padding_bottom) {
        gst_structure_free (config);
        goto config_failed;
      }
    }

    if (!gst_buffer_pool_set_config (pool, config))
      goto config_failed;
  }

  /* For external pools, we need to check strides */
  if (!GST_IS_VIDEO_BUFFER_POOL (pool) && has_videometa) {
    GstBuffer *buffer;
    const GstVideoFormatInfo *finfo;
    GstVideoMeta *vmeta;
    gint uv_stride;

    if (!gst_buffer_pool_set_active (pool, TRUE))
      goto activate_failed;

    if (gst_buffer_pool_acquire_buffer (pool, &buffer, NULL) != GST_FLOW_OK) {
      gst_buffer_pool_set_active (pool, FALSE);
      goto acquire_failed;
    }

    vmeta = gst_buffer_get_video_meta (buffer);
    finfo = gst_video_format_get_info (vmeta->format);

    /* Check that strides are compatible. In this case, we can scale the
     * stride directly since all the pixel strides for the formats we support
     * is 1 */
    uv_stride = GST_VIDEO_FORMAT_INFO_SCALE_WIDTH (finfo, 1, vmeta->stride[0]);
    if (uv_stride != vmeta->stride[1] || uv_stride != vmeta->stride[2]) {
      gst_buffer_pool_set_active (pool, FALSE);
      gst_object_unref (pool);

      pool = gst_mpeg2dec_create_generic_pool (allocator, &params, caps, size,
          min, max, &config);

      if (dec->need_alignment) {
        gst_buffer_pool_config_add_option (config,
            GST_BUFFER_POOL_OPTION_VIDEO_ALIGNMENT);
        gst_buffer_pool_config_set_video_alignment (config, &dec->valign);
      }

      /* Generic pool don't fail on _set_config() */
      gst_buffer_pool_set_config (pool, config);
    }

    gst_buffer_unref (buffer);
  }

  gst_query_set_nth_allocation_pool (query, 0, pool, size, min, max);
  gst_object_unref (pool);

  return TRUE;

config_failed:
  gst_object_unref (pool);
  GST_ELEMENT_ERROR (dec, RESOURCE, SETTINGS,
      ("Failed to configure buffer pool"),
      ("Configuration is most likely invalid, please report this issue."));
  return FALSE;

activate_failed:
  gst_object_unref (pool);
  GST_ELEMENT_ERROR (dec, RESOURCE, SETTINGS,
      ("Failed to activate buffer pool"), (NULL));
  return FALSE;

acquire_failed:
  gst_object_unref (pool);
  GST_ELEMENT_ERROR (dec, RESOURCE, SETTINGS,
      ("Failed to acquire a buffer"), (NULL));
  return FALSE;
}
コード例 #16
0
static void
gst_gl_composition_overlay_upload (GstGLCompositionOverlay * overlay,
    GstBuffer * buf)
{
  GstGLMemory *comp_gl_memory = NULL;
  GstBuffer *comp_buffer = NULL;
  GstBuffer *overlay_buffer = NULL;
  GstVideoInfo vinfo;
  GstVideoMeta *vmeta;
  GstVideoFrame *comp_frame;
  GstVideoFrame gl_frame;

  comp_buffer =
      gst_video_overlay_rectangle_get_pixels_unscaled_argb (overlay->rectangle,
      GST_VIDEO_OVERLAY_FORMAT_FLAG_PREMULTIPLIED_ALPHA);

  comp_frame = g_slice_new (GstVideoFrame);

  vmeta = gst_buffer_get_video_meta (comp_buffer);
  gst_video_info_set_format (&vinfo, vmeta->format, vmeta->width,
      vmeta->height);
  vinfo.stride[0] = vmeta->stride[0];

  if (gst_video_frame_map (comp_frame, &vinfo, comp_buffer, GST_MAP_READ)) {
    GstGLVideoAllocationParams *params;
    GstGLBaseMemoryAllocator *mem_allocator;
    GstAllocator *allocator;

    allocator =
        GST_ALLOCATOR (gst_gl_memory_allocator_get_default (overlay->context));
    mem_allocator = GST_GL_BASE_MEMORY_ALLOCATOR (allocator);

    gst_gl_composition_overlay_add_transformation (overlay, buf);

    params = gst_gl_video_allocation_params_new_wrapped_data (overlay->context,
        NULL, &comp_frame->info, 0, NULL, GST_GL_TEXTURE_TARGET_2D,
        GST_GL_RGBA, comp_frame->data[0], comp_frame,
        _video_frame_unmap_and_free);

    comp_gl_memory =
        (GstGLMemory *) gst_gl_base_memory_alloc (mem_allocator,
        (GstGLAllocationParams *) params);

    gst_gl_allocation_params_free ((GstGLAllocationParams *) params);
    gst_object_unref (allocator);

    overlay_buffer = gst_buffer_new ();
    gst_buffer_append_memory (overlay_buffer, (GstMemory *) comp_gl_memory);

    if (!gst_video_frame_map (&gl_frame, &comp_frame->info, overlay_buffer,
            GST_MAP_READ | GST_MAP_GL)) {
      gst_buffer_unref (overlay_buffer);
      _video_frame_unmap_and_free (comp_frame);
      GST_WARNING_OBJECT (overlay, "Cannot upload overlay texture");
      return;
    }

    gst_memory_ref ((GstMemory *) comp_gl_memory);
    overlay->gl_memory = comp_gl_memory;
    overlay->texture_id = comp_gl_memory->tex_id;

    gst_buffer_unref (overlay_buffer);
    gst_video_frame_unmap (&gl_frame);

    GST_DEBUG ("uploaded overlay texture %d", overlay->texture_id);
  } else {
    g_slice_free (GstVideoFrame, comp_frame);
  }
}
コード例 #17
0
/**
 * gst_vaapi_subpicture_new_from_overlay_rectangle:
 * @display: a #GstVaapiDisplay
 * @rect: a #GstVideoOverlayRectangle
 *
 * Helper function that creates a new #GstVaapiSubpicture from a
 * #GstVideoOverlayRectangle. A new #GstVaapiImage is also created
 * along the way and attached to the resulting subpicture. The
 * subpicture holds a unique reference to the underlying image.
 *
 * Return value: the newly allocated #GstVaapiSubpicture object
 */
GstVaapiSubpicture *
gst_vaapi_subpicture_new_from_overlay_rectangle (GstVaapiDisplay * display,
    GstVideoOverlayRectangle * rect)
{
  GstVaapiSubpicture *subpicture;
  GstVideoFormat format;
  GstVaapiImage *image;
  GstVaapiImageRaw raw_image;
  GstBuffer *buffer;
  guint8 *data;
  gfloat global_alpha;
  guint width, height, stride;
  guint hw_flags, flags;
  GstVideoMeta *vmeta;
  GstMapInfo map_info;

  g_return_val_if_fail (GST_IS_VIDEO_OVERLAY_RECTANGLE (rect), NULL);

  /* XXX: use gst_vaapi_image_format_from_video() */
#if G_BYTE_ORDER == G_LITTLE_ENDIAN
  format = GST_VIDEO_FORMAT_BGRA;
#else
  format = GST_VIDEO_FORMAT_ARGB;
#endif
  if (!gst_vaapi_display_has_subpicture_format (display, format, &hw_flags))
    return NULL;

  flags =
      hw_flags &
      from_GstVideoOverlayFormatFlags (gst_video_overlay_rectangle_get_flags
      (rect));

  buffer = gst_video_overlay_rectangle_get_pixels_unscaled_argb (rect,
      to_GstVideoOverlayFormatFlags (flags));
  if (!buffer)
    return NULL;

  vmeta = gst_buffer_get_video_meta (buffer);
  if (!vmeta)
    return NULL;
  width = vmeta->width;
  height = vmeta->height;

  if (!gst_video_meta_map (vmeta, 0, &map_info, (gpointer *) & data,
          (gint *) & stride, GST_MAP_READ))
    return NULL;

  image = gst_vaapi_image_new (display, format, width, height);
  if (!image)
    return NULL;

  raw_image.format = format;
  raw_image.width = width;
  raw_image.height = height;
  raw_image.num_planes = 1;
  raw_image.pixels[0] = data;
  raw_image.stride[0] = stride;
  if (!gst_vaapi_image_update_from_raw (image, &raw_image, NULL)) {
    GST_WARNING ("could not update VA image with subtitle data");
    gst_vaapi_object_unref (image);
    return NULL;
  }

  subpicture = gst_vaapi_subpicture_new (image, flags);
  gst_vaapi_object_unref (image);
  gst_video_meta_unmap (vmeta, 0, &map_info);
  if (!subpicture)
    return NULL;

  if (flags & GST_VAAPI_SUBPICTURE_FLAG_GLOBAL_ALPHA) {
    global_alpha = gst_video_overlay_rectangle_get_global_alpha (rect);
    if (!gst_vaapi_subpicture_set_global_alpha (subpicture, global_alpha))
      return NULL;
  }
  return subpicture;
}
コード例 #18
0
ファイル: base_enc.c プロジェクト: Radbug/gst-fsl
static GstFlowReturn gst_fsl_vpu_base_enc_handle_frame(GstVideoEncoder *encoder, GstVideoCodecFrame *frame)
{
	VpuEncRetCode enc_ret;
	VpuEncEncParam enc_enc_param;
	GstFslPhysMemMeta *phys_mem_meta;
	GstFslVpuBaseEncClass *klass;
	GstFslVpuBaseEnc *vpu_base_enc;
	VpuFrameBuffer input_framebuf;
	GstBuffer *input_buffer;

	vpu_base_enc = GST_FSL_VPU_BASE_ENC(encoder);
	klass = GST_FSL_VPU_BASE_ENC_CLASS(G_OBJECT_GET_CLASS(vpu_base_enc));

	g_assert(klass->set_frame_enc_params != NULL);

	memset(&enc_enc_param, 0, sizeof(enc_enc_param));
	memset(&input_framebuf, 0, sizeof(input_framebuf));

	phys_mem_meta = GST_FSL_PHYS_MEM_META_GET(frame->input_buffer);

	if (phys_mem_meta == NULL)
	{
		GstVideoFrame temp_input_video_frame, temp_incoming_video_frame;

		if (vpu_base_enc->internal_input_buffer == NULL)
		{
			/* The internal input buffer is the temp input frame's DMA memory.
			 * If it does not exist yet, it needs to be created here. The temp input
			 * frame is then mapped. */

			GstFlowReturn flow_ret;

			if (vpu_base_enc->internal_bufferpool == NULL)
			{
				/* Internal bufferpool does not exist yet - create it now,
				 * so that it can in turn create the internal input buffer */

				GstStructure *config;
				GstCaps *caps;
				GstAllocator *allocator;

				caps = gst_video_info_to_caps(&(vpu_base_enc->video_info));
				vpu_base_enc->internal_bufferpool = gst_fsl_phys_mem_buffer_pool_new(FALSE);
				allocator = gst_fsl_vpu_enc_allocator_obtain();

				config = gst_buffer_pool_get_config(vpu_base_enc->internal_bufferpool);
				gst_buffer_pool_config_set_params(config, caps, vpu_base_enc->video_info.size, 2, 0);
				gst_buffer_pool_config_set_allocator(config, allocator, NULL);
				gst_buffer_pool_config_add_option(config, GST_BUFFER_POOL_OPTION_FSL_PHYS_MEM);
				gst_buffer_pool_config_add_option(config, GST_BUFFER_POOL_OPTION_VIDEO_META);
				gst_buffer_pool_set_config(vpu_base_enc->internal_bufferpool, config);

				gst_caps_unref(caps);

				if (vpu_base_enc->internal_bufferpool == NULL)
				{
					GST_ERROR_OBJECT(vpu_base_enc, "failed to create internal bufferpool");
					return FALSE;
				}
			}

			/* Future versions of this code may propose the internal bufferpool upstream;
			 * hence the is_active check */
			if (!gst_buffer_pool_is_active(vpu_base_enc->internal_bufferpool))
				gst_buffer_pool_set_active(vpu_base_enc->internal_bufferpool, TRUE);

			/* Create the internal input buffer */
			flow_ret = gst_buffer_pool_acquire_buffer(vpu_base_enc->internal_bufferpool, &(vpu_base_enc->internal_input_buffer), NULL);
			if (flow_ret != GST_FLOW_OK)
			{
				GST_ERROR_OBJECT(vpu_base_enc, "error acquiring input frame buffer: %s", gst_pad_mode_get_name(flow_ret));
				return FALSE;
			}
		}

		gst_video_frame_map(&temp_incoming_video_frame, &(vpu_base_enc->video_info), frame->input_buffer, GST_MAP_READ);
		gst_video_frame_map(&temp_input_video_frame, &(vpu_base_enc->video_info), vpu_base_enc->internal_input_buffer, GST_MAP_WRITE);

		gst_video_frame_copy(&temp_input_video_frame, &temp_incoming_video_frame);

		gst_video_frame_unmap(&temp_incoming_video_frame);
		gst_video_frame_unmap(&temp_input_video_frame);

		input_buffer = vpu_base_enc->internal_input_buffer;
		phys_mem_meta = GST_FSL_PHYS_MEM_META_GET(vpu_base_enc->internal_input_buffer);
	}
	else
		input_buffer = frame->input_buffer;

	{
		gsize *plane_offsets;
		gint *plane_strides;
		GstVideoMeta *video_meta;
		unsigned char *phys_ptr;

		video_meta = gst_buffer_get_video_meta(input_buffer);
		if (video_meta != NULL)
		{
			plane_offsets = video_meta->offset;
			plane_strides = video_meta->stride;
		}
		else
		{
			plane_offsets = vpu_base_enc->video_info.offset;
			plane_strides = vpu_base_enc->video_info.stride;
		}

		phys_ptr = (unsigned char*)(phys_mem_meta->phys_addr);

		input_framebuf.pbufY = phys_ptr;
		input_framebuf.pbufCb = phys_ptr + plane_offsets[1];
		input_framebuf.pbufCr = phys_ptr + plane_offsets[2];
		input_framebuf.pbufMvCol = NULL;
		input_framebuf.nStrideY = plane_strides[0];
		input_framebuf.nStrideC = plane_strides[1];

		GST_TRACE_OBJECT(vpu_base_enc, "width: %d   height: %d   stride 0: %d   stride 1: %d   offset 0: %d   offset 1: %d   offset 2: %d", GST_VIDEO_INFO_WIDTH(&(vpu_base_enc->video_info)), GST_VIDEO_INFO_HEIGHT(&(vpu_base_enc->video_info)), plane_strides[0], plane_strides[1], plane_offsets[0], plane_offsets[1], plane_offsets[2]);

		if (vpu_base_enc->framebuffers == NULL)
		{
			GstFslVpuFramebufferParams fbparams;
			gst_fsl_vpu_framebuffers_enc_init_info_to_params(&(vpu_base_enc->init_info), &fbparams);
			fbparams.pic_width = vpu_base_enc->open_param.nPicWidth;
			fbparams.pic_height = vpu_base_enc->open_param.nPicHeight;
			vpu_base_enc->framebuffers = gst_fsl_vpu_framebuffers_new(&fbparams, gst_fsl_vpu_enc_allocator_obtain());
			gst_fsl_vpu_framebuffers_register_with_encoder(vpu_base_enc->framebuffers, vpu_base_enc->handle, plane_strides[0]);
		}

		if (vpu_base_enc->output_phys_buffer == NULL)
		{
			vpu_base_enc->output_phys_buffer = (GstFslPhysMemory *)gst_allocator_alloc(gst_fsl_vpu_enc_allocator_obtain(), vpu_base_enc->framebuffers->total_size, NULL);

			if (vpu_base_enc->output_phys_buffer == NULL)
			{
				GST_ERROR_OBJECT(vpu_base_enc, "could not allocate physical buffer for output data");
				return GST_FLOW_ERROR;
			}
		}
	}

	enc_enc_param.nInVirtOutput = (unsigned int)(vpu_base_enc->output_phys_buffer->mapped_virt_addr); /* TODO */
	enc_enc_param.nInPhyOutput = (unsigned int)(vpu_base_enc->output_phys_buffer->phys_addr);
	enc_enc_param.nInOutputBufLen = vpu_base_enc->output_phys_buffer->mem.size;
	enc_enc_param.nPicWidth = vpu_base_enc->framebuffers->pic_width;
	enc_enc_param.nPicHeight = vpu_base_enc->framebuffers->pic_height;
	enc_enc_param.nFrameRate = vpu_base_enc->open_param.nFrameRate;
	enc_enc_param.pInFrame = &input_framebuf;

	if (!klass->set_frame_enc_params(vpu_base_enc, &enc_enc_param, &(vpu_base_enc->open_param)))
	{
		GST_ERROR_OBJECT(vpu_base_enc, "derived class could not frame enc params");
		return GST_FLOW_ERROR;
	}

	enc_ret = VPU_EncEncodeFrame(vpu_base_enc->handle, &enc_enc_param);
	if (enc_ret != VPU_ENC_RET_SUCCESS)
	{
		GST_ERROR_OBJECT(vpu_base_enc, "failed to encode frame: %s", gst_fsl_vpu_strerror(enc_ret));
		VPU_EncReset(vpu_base_enc->handle);
		return GST_FLOW_ERROR;
	}

	GST_LOG_OBJECT(vpu_base_enc, "out ret code: 0x%x  out size: %u", enc_enc_param.eOutRetCode, enc_enc_param.nOutOutputSize);

	if ((enc_enc_param.eOutRetCode & VPU_ENC_OUTPUT_DIS) || (enc_enc_param.eOutRetCode & VPU_ENC_OUTPUT_SEQHEADER))
	{
		gst_video_encoder_allocate_output_frame(encoder, frame, enc_enc_param.nOutOutputSize);
		gst_buffer_fill(frame->output_buffer, 0, vpu_base_enc->output_phys_buffer->mapped_virt_addr, enc_enc_param.nOutOutputSize);
		gst_video_encoder_finish_frame(encoder, frame);
	}

	return GST_FLOW_OK;
}
コード例 #19
0
static GstFlowReturn gst_imx_vpu_encoder_base_handle_frame(GstVideoEncoder *encoder, GstVideoCodecFrame *input_frame)
{
	GstImxPhysMemMeta *phys_mem_meta;
	GstImxVpuEncoderBaseClass *klass;
	GstImxVpuEncoderBase *vpu_encoder_base;
	GstBuffer *input_buffer;
	ImxVpuEncParams enc_params;

	vpu_encoder_base = GST_IMX_VPU_ENCODER_BASE(encoder);
	klass = GST_IMX_VPU_ENCODER_BASE_CLASS(G_OBJECT_GET_CLASS(vpu_encoder_base));

	if (vpu_encoder_base->drop)
	{
		input_frame->output_buffer = NULL; /* necessary to make finish_frame() drop the frame */
		gst_video_encoder_finish_frame(encoder, input_frame);
		return GST_FLOW_OK;
	}

	/* Get access to the input buffer's physical address */

	phys_mem_meta = GST_IMX_PHYS_MEM_META_GET(input_frame->input_buffer);

	/* If the incoming frame's buffer is not using physically contiguous memory,
	 * it needs to be copied to the internal input buffer, otherwise the VPU
	 * encoder cannot read the frame */
	if (phys_mem_meta == NULL)
	{
		/* No physical memory metadata found -> buffer is not physically contiguous */

		GstVideoFrame temp_input_video_frame, temp_incoming_video_frame;

		GST_LOG_OBJECT(vpu_encoder_base, "input buffer not physically contiguous - frame copy is necessary");

		if (vpu_encoder_base->internal_input_buffer == NULL)
		{
			/* The internal input buffer is the temp input frame's DMA memory.
			 * If it does not exist yet, it needs to be created here. The temp input
			 * frame is then mapped. */

			GstFlowReturn flow_ret;

			if (vpu_encoder_base->internal_input_bufferpool == NULL)
			{
				/* Internal bufferpool does not exist yet - create it now,
				 * so that it can in turn create the internal input buffer */

				GstStructure *config;
				GstCaps *caps;

				GST_DEBUG_OBJECT(vpu_encoder_base, "creating internal bufferpool");

				caps = gst_video_info_to_caps(&(vpu_encoder_base->video_info));
				vpu_encoder_base->internal_input_bufferpool = gst_imx_phys_mem_buffer_pool_new(FALSE);

				gst_object_ref(vpu_encoder_base->phys_mem_allocator);

				config = gst_buffer_pool_get_config(vpu_encoder_base->internal_input_bufferpool);
				gst_buffer_pool_config_set_params(config, caps, vpu_encoder_base->video_info.size, 2, 0);
				gst_buffer_pool_config_set_allocator(config, vpu_encoder_base->phys_mem_allocator, NULL);
				gst_buffer_pool_config_add_option(config, GST_BUFFER_POOL_OPTION_IMX_PHYS_MEM);
				gst_buffer_pool_config_add_option(config, GST_BUFFER_POOL_OPTION_VIDEO_META);
				gst_buffer_pool_set_config(vpu_encoder_base->internal_input_bufferpool, config);

				gst_caps_unref(caps);

				if (vpu_encoder_base->internal_input_bufferpool == NULL)
				{
					GST_ERROR_OBJECT(vpu_encoder_base, "failed to create internal bufferpool");
					return GST_FLOW_ERROR;
				}
			}

			/* Future versions of this code may propose the internal bufferpool upstream;
			 * hence the is_active check */
			if (!gst_buffer_pool_is_active(vpu_encoder_base->internal_input_bufferpool))
				gst_buffer_pool_set_active(vpu_encoder_base->internal_input_bufferpool, TRUE);

			/* Create the internal input buffer */
			flow_ret = gst_buffer_pool_acquire_buffer(vpu_encoder_base->internal_input_bufferpool, &(vpu_encoder_base->internal_input_buffer), NULL);
			if (flow_ret != GST_FLOW_OK)
			{
				GST_ERROR_OBJECT(vpu_encoder_base, "error acquiring input frame buffer: %s", gst_pad_mode_get_name(flow_ret));
				return flow_ret;
			}
		}

		/* The internal input buffer exists at this point. Since the incoming frame
		 * is not stored in physical memory, copy its pixels to the internal
		 * input buffer, so the encoder can read them. */

		gst_video_frame_map(&temp_incoming_video_frame, &(vpu_encoder_base->video_info), input_frame->input_buffer, GST_MAP_READ);
		gst_video_frame_map(&temp_input_video_frame, &(vpu_encoder_base->video_info), vpu_encoder_base->internal_input_buffer, GST_MAP_WRITE);

		gst_video_frame_copy(&temp_input_video_frame, &temp_incoming_video_frame);

		gst_video_frame_unmap(&temp_incoming_video_frame);
		gst_video_frame_unmap(&temp_input_video_frame);

		/* Set the input buffer as the encoder's input */
		input_buffer = vpu_encoder_base->internal_input_buffer;
		/* And use the input buffer's physical memory metadata */
		phys_mem_meta = GST_IMX_PHYS_MEM_META_GET(vpu_encoder_base->internal_input_buffer);
	}
	else
	{
		/* Physical memory metadata found -> buffer is physically contiguous
		 * It can be used directly as input for the VPU encoder */
		input_buffer = input_frame->input_buffer;
	}


	/* Prepare the input buffer's information (strides, plane offsets ..) for encoding */

	{
		GstVideoMeta *video_meta;

		/* Try to use plane offset and stride information from the video
		 * metadata if present, since these can be more accurate than
		 * the information from the video info */
		video_meta = gst_buffer_get_video_meta(input_buffer);
		if (video_meta != NULL)
		{
			vpu_encoder_base->input_framebuffer.y_stride = video_meta->stride[0];
			vpu_encoder_base->input_framebuffer.cbcr_stride = video_meta->stride[1];

			vpu_encoder_base->input_framebuffer.y_offset = video_meta->offset[0];
			vpu_encoder_base->input_framebuffer.cb_offset = video_meta->offset[1];
			vpu_encoder_base->input_framebuffer.cr_offset = video_meta->offset[2];
		}
		else
		{
			vpu_encoder_base->input_framebuffer.y_stride = GST_VIDEO_INFO_PLANE_STRIDE(&(vpu_encoder_base->video_info), 0);
			vpu_encoder_base->input_framebuffer.cbcr_stride = GST_VIDEO_INFO_PLANE_STRIDE(&(vpu_encoder_base->video_info), 1);

			vpu_encoder_base->input_framebuffer.y_offset = GST_VIDEO_INFO_PLANE_OFFSET(&(vpu_encoder_base->video_info), 0);
			vpu_encoder_base->input_framebuffer.cb_offset = GST_VIDEO_INFO_PLANE_OFFSET(&(vpu_encoder_base->video_info), 1);
			vpu_encoder_base->input_framebuffer.cr_offset = GST_VIDEO_INFO_PLANE_OFFSET(&(vpu_encoder_base->video_info), 2);
		}

		vpu_encoder_base->input_framebuffer.mvcol_offset = 0; /* this is not used by the encoder */
		vpu_encoder_base->input_framebuffer.context = (void *)(input_frame->system_frame_number);

		vpu_encoder_base->input_dmabuffer.fd = -1;
		vpu_encoder_base->input_dmabuffer.physical_address = phys_mem_meta->phys_addr;
		vpu_encoder_base->input_dmabuffer.size = gst_buffer_get_size(input_buffer);
	}


	/* Prepare the encoding parameters */

	memset(&enc_params, 0, sizeof(enc_params));
	imx_vpu_enc_set_default_encoding_params(vpu_encoder_base->encoder, &enc_params);
	enc_params.force_I_frame = 0;
	enc_params.acquire_output_buffer = gst_imx_vpu_encoder_base_acquire_output_buffer;
	enc_params.finish_output_buffer = gst_imx_vpu_encoder_base_finish_output_buffer;
	enc_params.output_buffer_context = vpu_encoder_base;

	/* Force I-frame if either IS_FORCE_KEYFRAME or IS_FORCE_KEYFRAME_HEADERS is set for the current frame. */
	if (GST_VIDEO_CODEC_FRAME_IS_FORCE_KEYFRAME(input_frame) || GST_VIDEO_CODEC_FRAME_IS_FORCE_KEYFRAME_HEADERS(input_frame))
	{
		enc_params.force_I_frame = 1;
		GST_LOG_OBJECT(vpu_encoder_base, "got request to make this a keyframe - forcing I frame");
		GST_VIDEO_CODEC_FRAME_SET_SYNC_POINT(input_frame);
	}

	/* Give the derived class a chance to set up encoding parameters too */
	if ((klass->set_frame_enc_params != NULL) && !klass->set_frame_enc_params(vpu_encoder_base, &enc_params))
	{
		GST_ERROR_OBJECT(vpu_encoder_base, "derived class could not frame enc params");
		return GST_FLOW_ERROR;
	}


	/* Main encoding block */
	{
		ImxVpuEncReturnCodes enc_ret;
		unsigned int output_code = 0;
		ImxVpuEncodedFrame encoded_data_frame;

		vpu_encoder_base->output_buffer = NULL;

		/* The actual encoding call */
		memset(&encoded_data_frame, 0, sizeof(ImxVpuEncodedFrame));
		enc_ret = imx_vpu_enc_encode(vpu_encoder_base->encoder, &(vpu_encoder_base->input_frame), &encoded_data_frame, &enc_params, &output_code);
		if (enc_ret != IMX_VPU_ENC_RETURN_CODE_OK)
		{
			GST_ERROR_OBJECT(vpu_encoder_base, "failed to encode frame: %s", imx_vpu_enc_error_string(enc_ret));
			if (vpu_encoder_base->output_buffer != NULL)
				gst_buffer_unref(vpu_encoder_base->output_buffer);
			return GST_FLOW_ERROR;
		}

		/* Give the derived class a chance to process the output_block_buffer */
		if ((klass->process_output_buffer != NULL) && !klass->process_output_buffer(vpu_encoder_base, input_frame, &(vpu_encoder_base->output_buffer)))
		{
			GST_ERROR_OBJECT(vpu_encoder_base, "derived class reports failure while processing encoded output");
			if (vpu_encoder_base->output_buffer != NULL)
				gst_buffer_unref(vpu_encoder_base->output_buffer);
			return GST_FLOW_ERROR;
		}

		if (output_code & IMX_VPU_ENC_OUTPUT_CODE_ENCODED_FRAME_AVAILABLE)
		{
			GST_LOG_OBJECT(vpu_encoder_base, "VPU outputs encoded frame");

			/* TODO: make use of the frame context that is retrieved with get_frame(i)
			 * This is not strictly necessary, since the VPU encoder does not
			 * do frame reordering, nor does it produce delays, but it would
			 * be a bit cleaner. */

			input_frame->dts = input_frame->pts;

			/* Take all of the encoded bits. The adapter contains an encoded frame
			 * at this point. */
			input_frame->output_buffer = vpu_encoder_base->output_buffer;

			/* And finish the frame, handing the output data over to the base class */
			gst_video_encoder_finish_frame(encoder, input_frame);
		}
		else
		{
			/* If at this point IMX_VPU_ENC_OUTPUT_CODE_ENCODED_FRAME_AVAILABLE is not set
			 * in the output_code, it means the input was used up before a frame could be
			 * encoded. Therefore, no output frame can be pushed downstream. Note that this
			 * should not happen during normal operation, so a warning is logged. */

			if (vpu_encoder_base->output_buffer != NULL)
				gst_buffer_unref(vpu_encoder_base->output_buffer);

			GST_WARNING_OBJECT(vpu_encoder_base, "frame unfinished ; dropping");
			input_frame->output_buffer = NULL; /* necessary to make finish_frame() drop the frame */
			gst_video_encoder_finish_frame(encoder, input_frame);
		}
	}


	return GST_FLOW_OK;
}
コード例 #20
0
/* Allocate buffer and copy image data into Y444 format */
static GstFlowReturn
theora_handle_image (GstTheoraDec * dec, th_ycbcr_buffer buf,
    GstVideoCodecFrame * frame)
{
  GstVideoDecoder *decoder = GST_VIDEO_DECODER (dec);
  gint width, height, stride;
  GstFlowReturn result;
  gint i, comp;
  guint8 *dest, *src;
  GstVideoFrame vframe;
  gint pic_width, pic_height;
  gint offset_x, offset_y;

  result = gst_video_decoder_allocate_output_frame (decoder, frame);

  if (G_UNLIKELY (result != GST_FLOW_OK)) {
    GST_DEBUG_OBJECT (dec, "could not get buffer, reason: %s",
        gst_flow_get_name (result));
    return result;
  }

  if (!dec->can_crop) {
    /* we need to crop the hard way */
    offset_x = dec->info.pic_x;
    offset_y = dec->info.pic_y;
    pic_width = dec->info.pic_width;
    pic_height = dec->info.pic_height;
    /* Ensure correct offsets in chroma for formats that need it
     * by rounding the offset. libtheora will add proper pixels,
     * so no need to handle them ourselves. */
    if (offset_x & 1 && dec->info.pixel_fmt != TH_PF_444)
      offset_x--;
    if (offset_y & 1 && dec->info.pixel_fmt == TH_PF_420)
      offset_y--;
  } else {
    /* copy the whole frame */
    offset_x = 0;
    offset_y = 0;
    pic_width = dec->info.frame_width;
    pic_height = dec->info.frame_height;

    if (dec->info.pic_width != dec->info.frame_width ||
        dec->info.pic_height != dec->info.frame_height ||
        dec->info.pic_x != 0 || dec->info.pic_y != 0) {
      GstVideoMeta *vmeta;
      GstVideoCropMeta *cmeta;

      vmeta = gst_buffer_get_video_meta (frame->output_buffer);
      /* If the buffer pool didn't add the meta already
       * we add it ourselves here */
      if (!vmeta)
        vmeta = gst_buffer_add_video_meta (frame->output_buffer,
            GST_VIDEO_FRAME_FLAG_NONE,
            dec->output_state->info.finfo->format,
            dec->info.frame_width, dec->info.frame_height);

      /* Just to be sure that the buffer pool doesn't do something
       * completely weird and we would crash later
       */
      g_assert (vmeta->format == dec->output_state->info.finfo->format);
      g_assert (vmeta->width == dec->info.frame_width);
      g_assert (vmeta->height == dec->info.frame_height);

      cmeta = gst_buffer_add_video_crop_meta (frame->output_buffer);

      /* we can do things slightly more efficient when we know that
       * downstream understands clipping */
      cmeta->x = dec->info.pic_x;
      cmeta->y = dec->info.pic_y;
      cmeta->width = dec->info.pic_width;
      cmeta->height = dec->info.pic_height;
    }
  }

  /* if only libtheora would allow us to give it a destination frame */
  GST_CAT_TRACE_OBJECT (GST_CAT_PERFORMANCE, dec,
      "doing unavoidable video frame copy");

  if (G_UNLIKELY (!gst_video_frame_map (&vframe, &dec->output_state->info,
              frame->output_buffer, GST_MAP_WRITE)))
    goto invalid_frame;

  for (comp = 0; comp < 3; comp++) {
    width =
        GST_VIDEO_FORMAT_INFO_SCALE_WIDTH (vframe.info.finfo, comp, pic_width);
    height =
        GST_VIDEO_FORMAT_INFO_SCALE_HEIGHT (vframe.info.finfo, comp,
        pic_height);
    stride = GST_VIDEO_FRAME_COMP_STRIDE (&vframe, comp);
    dest = GST_VIDEO_FRAME_COMP_DATA (&vframe, comp);

    src = buf[comp].data;
    src += ((height == pic_height) ? offset_y : offset_y / 2)
        * buf[comp].stride;
    src += (width == pic_width) ? offset_x : offset_x / 2;

    for (i = 0; i < height; i++) {
      memcpy (dest, src, width);

      dest += stride;
      src += buf[comp].stride;
    }
  }
  gst_video_frame_unmap (&vframe);

  return GST_FLOW_OK;
invalid_frame:
  {
    GST_DEBUG_OBJECT (dec, "could not map video frame");
    return GST_FLOW_ERROR;
  }
}
コード例 #21
0
ファイル: gstomxbufferpool.c プロジェクト: allenk/gst-omx
static GstFlowReturn
gst_omx_buffer_pool_alloc_buffer (GstBufferPool * bpool,
    GstBuffer ** buffer, GstBufferPoolAcquireParams * params)
{
  GstOMXBufferPool *pool = GST_OMX_BUFFER_POOL (bpool);
  GstBuffer *buf;
  GstOMXBuffer *omx_buf;

  g_return_val_if_fail (pool->allocating, GST_FLOW_ERROR);

  omx_buf = g_ptr_array_index (pool->port->buffers, pool->current_buffer_index);
  g_return_val_if_fail (omx_buf != NULL, GST_FLOW_ERROR);

  if (pool->other_pool) {
    guint i, n;

    buf = g_ptr_array_index (pool->buffers, pool->current_buffer_index);
    g_assert (pool->other_pool == buf->pool);
    gst_object_replace ((GstObject **) & buf->pool, NULL);

    n = gst_buffer_n_memory (buf);
    for (i = 0; i < n; i++) {
      GstMemory *mem = gst_buffer_peek_memory (buf, i);

      /* FIXME: We don't allow sharing because we need to know
       * when the memory becomes unused and can only then put
       * it back to the pool. Which is done in the pool's release
       * function
       */
      GST_MINI_OBJECT_FLAG_SET (mem, GST_MEMORY_FLAG_NO_SHARE);
    }

    if (pool->add_videometa) {
      GstVideoMeta *meta;

      meta = gst_buffer_get_video_meta (buf);
      if (!meta) {
        gst_buffer_add_video_meta (buf, GST_VIDEO_FRAME_FLAG_NONE,
            GST_VIDEO_INFO_FORMAT (&pool->video_info),
            GST_VIDEO_INFO_WIDTH (&pool->video_info),
            GST_VIDEO_INFO_HEIGHT (&pool->video_info));
      }
    }

    pool->need_copy = FALSE;
  } else {
    GstMemory *mem;
    const guint nstride = pool->port->port_def.format.video.nStride;
    const guint nslice = pool->port->port_def.format.video.nSliceHeight;
    gsize offset[GST_VIDEO_MAX_PLANES] = { 0, };
    gint stride[GST_VIDEO_MAX_PLANES] = { nstride, 0, };

    mem = gst_omx_memory_allocator_alloc (pool->allocator, 0, omx_buf);
    buf = gst_buffer_new ();
    gst_buffer_append_memory (buf, mem);
    g_ptr_array_add (pool->buffers, buf);

    switch (GST_VIDEO_INFO_FORMAT (&pool->video_info)) {
      case GST_VIDEO_FORMAT_ABGR:
      case GST_VIDEO_FORMAT_ARGB:
      case GST_VIDEO_FORMAT_RGB16:
      case GST_VIDEO_FORMAT_BGR16:
      case GST_VIDEO_FORMAT_YUY2:
      case GST_VIDEO_FORMAT_UYVY:
      case GST_VIDEO_FORMAT_YVYU:
      case GST_VIDEO_FORMAT_GRAY8:
        break;
      case GST_VIDEO_FORMAT_I420:
        stride[1] = nstride / 2;
        offset[1] = offset[0] + stride[0] * nslice;
        stride[2] = nstride / 2;
        offset[2] = offset[1] + (stride[1] * nslice / 2);
        break;
      case GST_VIDEO_FORMAT_NV12:
      case GST_VIDEO_FORMAT_NV16:
        stride[1] = nstride;
        offset[1] = offset[0] + stride[0] * nslice;
        break;
      default:
        g_assert_not_reached ();
        break;
    }

    if (pool->add_videometa) {
      pool->need_copy = FALSE;
    } else {
      GstVideoInfo info;
      gboolean need_copy = FALSE;
      gint i;

      gst_video_info_init (&info);
      gst_video_info_set_format (&info,
          GST_VIDEO_INFO_FORMAT (&pool->video_info),
          GST_VIDEO_INFO_WIDTH (&pool->video_info),
          GST_VIDEO_INFO_HEIGHT (&pool->video_info));

      for (i = 0; i < GST_VIDEO_INFO_N_PLANES (&pool->video_info); i++) {
        if (info.stride[i] != stride[i] || info.offset[i] != offset[i]) {
          need_copy = TRUE;
          break;
        }
      }

      pool->need_copy = need_copy;
    }

    if (pool->need_copy || pool->add_videometa) {
      /* We always add the videometa. It's the job of the user
       * to copy the buffer if pool->need_copy is TRUE
       */
      gst_buffer_add_video_meta_full (buf, GST_VIDEO_FRAME_FLAG_NONE,
          GST_VIDEO_INFO_FORMAT (&pool->video_info),
          GST_VIDEO_INFO_WIDTH (&pool->video_info),
          GST_VIDEO_INFO_HEIGHT (&pool->video_info),
          GST_VIDEO_INFO_N_PLANES (&pool->video_info), offset, stride);
    }
  }

  gst_mini_object_set_qdata (GST_MINI_OBJECT_CAST (buf),
      gst_omx_buffer_data_quark, omx_buf, NULL);

  *buffer = buf;

  pool->current_buffer_index++;

  return GST_FLOW_OK;
}
コード例 #22
0
static gboolean
import_dmabuf_to_msdk_surface (GstMsdkVPP * thiz, GstBuffer * buf,
    MsdkSurface * msdk_surface)
{
  GstMemory *mem = NULL;
  GstVideoInfo vinfo;
  GstVideoMeta *vmeta;
  GstMsdkMemoryID *msdk_mid = NULL;
  mfxFrameSurface1 *mfx_surface = NULL;
  gint fd, i;

  mem = gst_buffer_peek_memory (buf, 0);
  fd = gst_dmabuf_memory_get_fd (mem);
  if (fd < 0)
    return FALSE;

  vinfo = thiz->sinkpad_info;

  /* Update offset/stride/size if there is VideoMeta attached to
   * the buffer */
  vmeta = gst_buffer_get_video_meta (buf);
  if (vmeta) {
    if (GST_VIDEO_INFO_FORMAT (&vinfo) != vmeta->format ||
        GST_VIDEO_INFO_WIDTH (&vinfo) != vmeta->width ||
        GST_VIDEO_INFO_HEIGHT (&vinfo) != vmeta->height ||
        GST_VIDEO_INFO_N_PLANES (&vinfo) != vmeta->n_planes) {
      GST_ERROR_OBJECT (thiz, "VideoMeta attached to buffer is not matching"
          "the negotiated width/height/format");
      return FALSE;
    }
    for (i = 0; i < GST_VIDEO_INFO_N_PLANES (&vinfo); ++i) {
      GST_VIDEO_INFO_PLANE_OFFSET (&vinfo, i) = vmeta->offset[i];
      GST_VIDEO_INFO_PLANE_STRIDE (&vinfo, i) = vmeta->stride[i];
    }
    GST_VIDEO_INFO_SIZE (&vinfo) = gst_buffer_get_size (buf);
  }

  /* Upstream neither accepted the msdk pool nor the msdk buffer size restrictions.
   * Current media-driver and GMMLib will fail due to strict memory size restrictions.
   * Ideally, media-driver should accept what ever memory coming from other drivers
   * in case of dmabuf-import and this is how the intel-vaapi-driver works.
   * For now, in order to avoid any crash we check the buffer size and fallback
   * to copy frame method.
   *
   * See this: https://github.com/intel/media-driver/issues/169
   * */
  if (GST_VIDEO_INFO_SIZE (&vinfo) <
      GST_VIDEO_INFO_SIZE (&thiz->sinkpad_buffer_pool_info))
    return FALSE;

  mfx_surface = msdk_surface->surface;
  msdk_mid = (GstMsdkMemoryID *) mfx_surface->Data.MemId;

  /* release the internal memory storage of associated mfxSurface */
  gst_msdk_replace_mfx_memid (thiz->context, mfx_surface, VA_INVALID_ID);

  /* export dmabuf to vasurface */
  if (!gst_msdk_export_dmabuf_to_vasurface (thiz->context, &vinfo, fd,
          msdk_mid->surface))
    return FALSE;

  return TRUE;
}
コード例 #23
0
static gboolean
gst_kms_sink_import_dmabuf (GstKMSSink * self, GstBuffer * inbuf,
    GstBuffer ** outbuf)
{
  gint prime_fds[GST_VIDEO_MAX_PLANES] = { 0, };
  GstVideoMeta *meta;
  guint i, n_mem, n_planes;
  GstKMSMemory *kmsmem;
  guint mems_idx[GST_VIDEO_MAX_PLANES];
  gsize mems_skip[GST_VIDEO_MAX_PLANES];
  GstMemory *mems[GST_VIDEO_MAX_PLANES];

  if (!self->has_prime_import)
    return FALSE;

  /* This will eliminate most non-dmabuf out there */
  if (!gst_is_dmabuf_memory (gst_buffer_peek_memory (inbuf, 0)))
    return FALSE;

  n_planes = GST_VIDEO_INFO_N_PLANES (&self->vinfo);
  n_mem = gst_buffer_n_memory (inbuf);
  meta = gst_buffer_get_video_meta (inbuf);

  GST_TRACE_OBJECT (self, "Found a dmabuf with %u planes and %u memories",
      n_planes, n_mem);

  /* We cannot have multiple dmabuf per plane */
  if (n_mem > n_planes)
    return FALSE;

  /* Update video info based on video meta */
  if (meta) {
    GST_VIDEO_INFO_WIDTH (&self->vinfo) = meta->width;
    GST_VIDEO_INFO_HEIGHT (&self->vinfo) = meta->height;

    for (i = 0; i < meta->n_planes; i++) {
      GST_VIDEO_INFO_PLANE_OFFSET (&self->vinfo, i) = meta->offset[i];
      GST_VIDEO_INFO_PLANE_STRIDE (&self->vinfo, i) = meta->stride[i];
    }
  }

  /* Find and validate all memories */
  for (i = 0; i < n_planes; i++) {
    guint length;

    if (!gst_buffer_find_memory (inbuf,
            GST_VIDEO_INFO_PLANE_OFFSET (&self->vinfo, i), 1,
            &mems_idx[i], &length, &mems_skip[i]))
      return FALSE;

    mems[i] = gst_buffer_peek_memory (inbuf, mems_idx[i]);

    /* And all memory found must be dmabuf */
    if (!gst_is_dmabuf_memory (mems[i]))
      return FALSE;
  }

  kmsmem = (GstKMSMemory *) get_cached_kmsmem (mems[0]);
  if (kmsmem) {
    GST_LOG_OBJECT (self, "found KMS mem %p in DMABuf mem %p with fb id = %d",
        kmsmem, mems[0], kmsmem->fb_id);
    goto wrap_mem;
  }

  for (i = 0; i < n_planes; i++)
    prime_fds[i] = gst_dmabuf_memory_get_fd (mems[i]);

  GST_LOG_OBJECT (self, "found these prime ids: %d, %d, %d, %d", prime_fds[0],
      prime_fds[1], prime_fds[2], prime_fds[3]);

  kmsmem = gst_kms_allocator_dmabuf_import (self->allocator, prime_fds,
      n_planes, mems_skip, &self->vinfo);
  if (!kmsmem)
    return FALSE;

  GST_LOG_OBJECT (self, "setting KMS mem %p to DMABuf mem %p with fb id = %d",
      kmsmem, mems[0], kmsmem->fb_id);
  set_cached_kmsmem (mems[0], GST_MEMORY_CAST (kmsmem));

wrap_mem:
  *outbuf = gst_buffer_new ();
  if (!*outbuf)
    return FALSE;
  gst_buffer_append_memory (*outbuf, gst_memory_ref (GST_MEMORY_CAST (kmsmem)));
  gst_buffer_add_parent_buffer_meta (*outbuf, inbuf);

  return TRUE;
}