static GstGLBuffer *
_gl_buffer_copy (GstGLBuffer * src, gssize offset, gssize size)
{
  GstAllocator *allocator = src->mem.mem.allocator;
  GstAllocationParams params = { 0, src->mem.mem.align, 0, 0 };
  GstGLBuffer *dest = NULL;

  dest = _gl_buffer_new (allocator, NULL, src->mem.context,
      src->target, src->usage_hints, &params, src->mem.mem.maxsize);

  /* If not doing a full copy, then copy to sysmem, the 2D represention of the
   * texture would become wrong */
  if (GST_MEMORY_FLAG_IS_SET (src, GST_GL_BASE_MEMORY_TRANSFER_NEED_UPLOAD)) {
    if (!gst_gl_base_memory_memcpy (GST_GL_BASE_MEMORY_CAST (src),
            GST_GL_BASE_MEMORY_CAST (dest), offset, size)) {
      GST_CAT_WARNING (GST_CAT_GL_BUFFER, "Could not copy GL Buffer");
      gst_memory_unref (GST_MEMORY_CAST (dest));
      dest = NULL;
    }
  } else {
    if (!gst_gl_buffer_copy_buffer_sub_data (src, dest, offset, size)) {
      if (!gst_gl_base_memory_memcpy (GST_GL_BASE_MEMORY_CAST (src),
              GST_GL_BASE_MEMORY_CAST (dest), offset, size)) {
        GST_CAT_WARNING (GST_CAT_GL_BUFFER, "Could not copy GL Buffer");
        gst_memory_unref (GST_MEMORY_CAST (dest));
        dest = NULL;
      }
    }
  }

  return dest;
}
예제 #2
0
static GstMemory *
_gl_mem_copy (GstGLMemoryPBO * src, gssize offset, gssize size)
{
  GstAllocationParams params = { 0, GST_MEMORY_CAST (src)->align, 0, 0 };
  GstGLBaseMemoryAllocator *base_mem_allocator;
  GstAllocator *allocator;
  GstMemory *dest = NULL;

  allocator = GST_MEMORY_CAST (src)->allocator;
  base_mem_allocator = (GstGLBaseMemoryAllocator *) allocator;

  if (src->mem.tex_target == GST_GL_TEXTURE_TARGET_EXTERNAL_OES) {
    GST_CAT_ERROR (GST_CAT_GL_MEMORY, "Cannot copy External OES textures");
    return NULL;
  }

  /* If not doing a full copy, then copy to sysmem, the 2D represention of the
   * texture would become wrong */
  if (offset > 0 || size < GST_MEMORY_CAST (src)->size) {
    return base_mem_allocator->fallback_mem_copy (GST_MEMORY_CAST (src), offset,
        size);
  }

  dest = (GstMemory *) _gl_mem_new (allocator, NULL, src->mem.mem.context,
      src->mem.tex_target, &params, &src->mem.info, src->mem.plane,
      &src->mem.valign, NULL, NULL);

  if (GST_MEMORY_FLAG_IS_SET (src, GST_GL_BASE_MEMORY_TRANSFER_NEED_UPLOAD)) {
    if (!gst_gl_base_memory_memcpy ((GstGLBaseMemory *) src,
            (GstGLBaseMemory *) dest, offset, size)) {
      GST_CAT_WARNING (GST_CAT_GL_MEMORY, "Could not copy GL Memory");
      gst_memory_unref (GST_MEMORY_CAST (dest));
      return NULL;
    }
  } else {
    GstMapInfo dinfo;

    if (!gst_memory_map (GST_MEMORY_CAST (dest), &dinfo,
            GST_MAP_WRITE | GST_MAP_GL)) {
      GST_CAT_WARNING (GST_CAT_GL_MEMORY,
          "Failed not map destination " "for writing");
      gst_memory_unref (GST_MEMORY_CAST (dest));
      return NULL;
    }

    if (!gst_gl_memory_copy_into ((GstGLMemory *) src,
            ((GstGLMemory *) dest)->tex_id, src->mem.tex_target,
            src->mem.tex_type, src->mem.tex_width, GL_MEM_HEIGHT (src))) {
      GST_CAT_WARNING (GST_CAT_GL_MEMORY, "Could not copy GL Memory");
      gst_memory_unmap (GST_MEMORY_CAST (dest), &dinfo);
      gst_memory_unref (GST_MEMORY_CAST (dest));
      return NULL;
    }

    gst_memory_unmap (GST_MEMORY_CAST (dest), &dinfo);
  }

  return dest;
}
예제 #3
0
static void
_upload_pbo_memory (GstGLMemoryPBO * gl_mem, GstMapInfo * info,
    GstGLBuffer * pbo, GstMapInfo * pbo_info)
{
  GstGLContext *context = gl_mem->mem.mem.context;
  const GstGLFuncs *gl;
  guint gl_format, gl_type, gl_target;
  guint pbo_id;
  gsize plane_start;

  if (!GST_MEMORY_FLAG_IS_SET (gl_mem, GST_GL_BASE_MEMORY_TRANSFER_NEED_UPLOAD))
    return;

  g_return_if_fail (CONTEXT_SUPPORTS_PBO_UPLOAD (context));

  gl = context->gl_vtable;
  pbo_id = *(guint *) pbo_info->data;

  gl_type = GL_UNSIGNED_BYTE;
  if (gl_mem->mem.tex_type == GST_VIDEO_GL_TEXTURE_TYPE_RGB16)
    gl_type = GL_UNSIGNED_SHORT_5_6_5;

  gl_format = gst_gl_format_from_gl_texture_type (gl_mem->mem.tex_type);
  gl_target = gst_gl_texture_target_to_gl (gl_mem->mem.tex_target);

  if (USING_OPENGL (context) || USING_GLES3 (context)
      || USING_OPENGL3 (context)) {
    gl->PixelStorei (GL_UNPACK_ROW_LENGTH, gl_mem->mem.unpack_length);
  } else if (USING_GLES2 (context)) {
    gl->PixelStorei (GL_UNPACK_ALIGNMENT, gl_mem->mem.unpack_length);
  }

  GST_CAT_LOG (GST_CAT_GL_MEMORY, "upload for texture id:%u, with pbo %u %ux%u",
      gl_mem->mem.tex_id, pbo_id, gl_mem->mem.tex_width,
      GL_MEM_HEIGHT (gl_mem));

  /* find the start of the plane data including padding */
  plane_start =
      gst_gl_get_plane_start (&gl_mem->mem.info, &gl_mem->mem.valign,
      gl_mem->mem.plane) + GST_MEMORY_CAST (gl_mem)->offset;

  gl->BindBuffer (GL_PIXEL_UNPACK_BUFFER, pbo_id);
  gl->BindTexture (gl_target, gl_mem->mem.tex_id);
  gl->TexSubImage2D (gl_target, 0, 0, 0, gl_mem->mem.tex_width,
      GL_MEM_HEIGHT (gl_mem), gl_format, gl_type, (void *) plane_start);
  gl->BindBuffer (GL_PIXEL_UNPACK_BUFFER, 0);
  gl->BindTexture (gl_target, 0);

  /* Reset to default values */
  if (USING_OPENGL (context) || USING_GLES3 (context)) {
    gl->PixelStorei (GL_UNPACK_ROW_LENGTH, 0);
  } else if (USING_GLES2 (context)) {
    gl->PixelStorei (GL_UNPACK_ALIGNMENT, 4);
  }
}
/**
 * gst_memory_share:
 * @mem: a #GstMemory
 * @offset: offset to share from
 * @size: size to share, or -1 to share to the end of the memory region
 *
 * Return a shared copy of @size bytes from @mem starting from @offset. No
 * memory copy is performed and the memory region is simply shared. The result
 * is guaranteed to be non-writable. @size can be set to -1 to return a shared
 * copy from @offset to the end of the memory region.
 *
 * Returns: a new #GstMemory.
 */
GstMemory *
gst_memory_share (GstMemory * mem, gssize offset, gssize size)
{
  GstMemory *shared;

  g_return_val_if_fail (mem != NULL, NULL);
  g_return_val_if_fail (!GST_MEMORY_FLAG_IS_SET (mem, GST_MEMORY_FLAG_NO_SHARE),
      NULL);

  shared = mem->allocator->mem_share (mem, offset, size);

  return shared;
}
static gpointer
gst_gl_buffer_cpu_access (GstGLBuffer * mem, GstMapInfo * info, gsize size)
{
  const GstGLFuncs *gl = mem->mem.context->gl_vtable;
  gpointer data, ret;

  if (!gst_gl_base_memory_alloc_data (GST_GL_BASE_MEMORY_CAST (mem)))
    return NULL;

  ret = mem->mem.data;

  GST_CAT_LOG (GST_CAT_GL_BUFFER, "mapping id %d size %" G_GSIZE_FORMAT,
      mem->id, size);

  /* The extra data pointer indirection/memcpy is needed for coherent across
   * concurrent map()'s in both GL and CPU */
  if (GST_MEMORY_FLAG_IS_SET (mem, GST_GL_BASE_MEMORY_TRANSFER_NEED_DOWNLOAD)
      && (info->flags & GST_MAP_GL) == 0 && (info->flags & GST_MAP_READ) != 0) {
    gl->BindBuffer (mem->target, mem->id);

    if (gl->MapBufferRange) {
      /* FIXME: optionally remove this with a flag and return the
       * glMapBufferRange pointer (requires
       * GL_ARB_buffer_storage/GL4/GL_COHERENT_BIT) */
      guint gl_map_flags = GL_MAP_READ_BIT;

      data = gl->MapBufferRange (mem->target, 0, size, gl_map_flags);

      if (data)
        memcpy (mem->mem.data, data, size);

      gl->UnmapBuffer (mem->target);
      ret = mem->mem.data;
    } else if (gl->GetBufferSubData) {
      gl->GetBufferSubData (mem->target, 0, size, mem->mem.data);
      ret = mem->mem.data;
    } else {
      ret = NULL;
    }
    gl->BindBuffer (mem->target, 0);
  }

  return ret;
}
예제 #6
0
static gpointer
_pbo_download_transfer (GstGLMemoryPBO * gl_mem, GstMapInfo * info, gsize size)
{
  GstMapInfo *pbo_info;

  gl_mem->pbo->target = GL_PIXEL_PACK_BUFFER;
  /* texture -> pbo */
  if (info->flags & GST_MAP_READ
      && GST_MEMORY_FLAG_IS_SET (gl_mem,
          GST_GL_BASE_MEMORY_TRANSFER_NEED_DOWNLOAD)) {
    GstMapInfo info;

    GST_CAT_TRACE (GST_CAT_GL_MEMORY,
        "attempting download of texture %u " "using pbo %u", gl_mem->mem.tex_id,
        gl_mem->pbo->id);

    if (!gst_memory_map (GST_MEMORY_CAST (gl_mem->pbo), &info,
            GST_MAP_WRITE | GST_MAP_GL)) {
      GST_CAT_WARNING (GST_CAT_GL_MEMORY, "Failed to write to PBO");
      return NULL;
    }

    if (!_read_pixels_to_pbo (gl_mem)) {
      gst_memory_unmap (GST_MEMORY_CAST (gl_mem->pbo), &info);
      return NULL;
    }

    gst_memory_unmap (GST_MEMORY_CAST (gl_mem->pbo), &info);
  }

  pbo_info = g_new0 (GstMapInfo, 1);

  /* pbo -> data */
  /* get a cpu accessible mapping from the pbo */
  if (!gst_memory_map (GST_MEMORY_CAST (gl_mem->pbo), pbo_info, info->flags)) {
    GST_CAT_ERROR (GST_CAT_GL_MEMORY, "Failed to map pbo");
    g_free (pbo_info);
    return NULL;
  }
  info->user_data[0] = pbo_info;

  return pbo_info->data;
}
예제 #7
0
/* XXX: add as API? */
static void
gst_gl_base_buffer_upload_cpu_write (GstGLBaseBuffer * mem, GstMapInfo * info,
    gsize size)
{
  const GstGLFuncs *gl = mem->context->gl_vtable;
  gpointer data;

  if (!mem->data)
    /* no data pointer has been written */
    return;

  /* The extra data pointer indirection/memcpy is needed for coherent across
   * concurrent map()'s in both GL and CPU */
  /* FIXME: uploading potentially half-written data for libav pushing READWRITE
   * mapped buffers */
  if (GST_MEMORY_FLAG_IS_SET (mem, GST_GL_BASE_BUFFER_FLAG_NEED_UPLOAD)
      || (mem->map_flags & GST_MAP_WRITE) != 0) {
    gl->BindBuffer (mem->target, mem->id);

    if (gl->MapBufferRange) {
      /* FIXME: optionally remove this with a flag and return the
       * glMapBufferRange pointer (requires
       * GL_ARB_buffer_storage/GL4/GL_COHERENT_BIT) */
      guint gl_map_flags = GL_MAP_WRITE_BIT;

      data = gl->MapBufferRange (mem->target, 0, size, gl_map_flags);

      if (data)
        memcpy (data, mem->data, size);

      gl->UnmapBuffer (mem->target);
    } else if (gl->BufferSubData) {
      gl->BufferSubData (mem->target, 0, size, mem->data);
    }
    gl->BindBuffer (mem->target, 0);
  }
}
예제 #8
0
static gboolean
_read_pixels_to_pbo (GstGLMemoryPBO * gl_mem)
{
  if (!gl_mem->pbo || !CONTEXT_SUPPORTS_PBO_DOWNLOAD (gl_mem->mem.mem.context)
      || gl_mem->mem.tex_type == GST_VIDEO_GL_TEXTURE_TYPE_LUMINANCE
      || gl_mem->mem.tex_type == GST_VIDEO_GL_TEXTURE_TYPE_LUMINANCE_ALPHA)
    /* unsupported */
    return FALSE;

  if (GST_MEMORY_FLAG_IS_SET (gl_mem,
          GST_GL_BASE_MEMORY_TRANSFER_NEED_DOWNLOAD)) {
    /* copy texture data into into the pbo and map that */
    gsize plane_start;
    GstMapInfo pbo_info;

    plane_start =
        gst_gl_get_plane_start (&gl_mem->mem.info, &gl_mem->mem.valign,
        gl_mem->mem.plane) + GST_MEMORY_CAST (gl_mem)->offset;

    gl_mem->pbo->target = GL_PIXEL_PACK_BUFFER;
    if (!gst_memory_map (GST_MEMORY_CAST (gl_mem->pbo), &pbo_info,
            GST_MAP_WRITE | GST_MAP_GL)) {
      GST_CAT_ERROR (GST_CAT_GL_MEMORY, "Failed to map pbo for writing");
      return FALSE;
    }

    if (!gst_gl_memory_read_pixels ((GstGLMemory *) gl_mem,
            (gpointer) plane_start)) {
      gst_memory_unmap (GST_MEMORY_CAST (gl_mem->pbo), &pbo_info);
      return FALSE;
    }

    gst_memory_unmap (GST_MEMORY_CAST (gl_mem->pbo), &pbo_info);
  }

  return TRUE;
}
예제 #9
0
static void
_upload_pbo_memory (GstGLMemoryPBO * gl_mem, GstMapInfo * info,
    GstGLBuffer * pbo, GstMapInfo * pbo_info)
{
  GstGLContext *context = gl_mem->mem.mem.context;
  const GstGLFuncs *gl;
  guint pbo_id;

  if (!GST_MEMORY_FLAG_IS_SET (gl_mem, GST_GL_BASE_MEMORY_TRANSFER_NEED_UPLOAD))
    return;

  g_return_if_fail (CONTEXT_SUPPORTS_PBO_UPLOAD (context));

  gl = context->gl_vtable;
  pbo_id = *(guint *) pbo_info->data;

  GST_CAT_LOG (GST_CAT_GL_MEMORY, "upload for texture id:%u, with pbo %u %ux%u",
      gl_mem->mem.tex_id, pbo_id, gl_mem->mem.tex_width,
      GL_MEM_HEIGHT (gl_mem));

  gl->BindBuffer (GL_PIXEL_UNPACK_BUFFER, pbo_id);
  gst_gl_memory_texsubimage (GST_GL_MEMORY_CAST (gl_mem), NULL);
  gl->BindBuffer (GL_PIXEL_UNPACK_BUFFER, 0);
}
static void
test_transfer_allocator (const gchar * allocator_name)
{
  GstAllocator *gl_allocator;
  GstGLBaseMemoryAllocator *base_mem_alloc;
  GstVideoInfo v_info;
  GstMemory *mem, *mem2, *mem3;
  GstMapInfo map_info;
  GstGLVideoAllocationParams *params;

  gl_allocator = gst_allocator_find (allocator_name);
  fail_if (gl_allocator == NULL);
  base_mem_alloc = GST_GL_BASE_MEMORY_ALLOCATOR (gl_allocator);

  gst_video_info_set_format (&v_info, GST_VIDEO_FORMAT_RGBA, 1, 1);

  params = gst_gl_video_allocation_params_new (context, NULL, &v_info, 0,
      NULL, GST_GL_TEXTURE_TARGET_2D, GST_VIDEO_GL_TEXTURE_TYPE_RGBA);

  /* texture creation */
  mem = (GstMemory *) gst_gl_base_memory_alloc (base_mem_alloc,
      (GstGLAllocationParams *) params);
  gst_gl_allocation_params_free ((GstGLAllocationParams *) params);
  fail_unless (!GST_MEMORY_FLAG_IS_SET (mem,
          GST_GL_BASE_MEMORY_TRANSFER_NEED_UPLOAD));
  fail_unless (!GST_MEMORY_FLAG_IS_SET (mem,
          GST_GL_BASE_MEMORY_TRANSFER_NEED_DOWNLOAD));

  /* test wrapping raw data */
  params = gst_gl_video_allocation_params_new_wrapped_data (context, NULL,
      &v_info, 0, NULL, GST_GL_TEXTURE_TARGET_2D,
      GST_VIDEO_GL_TEXTURE_TYPE_RGBA, rgba_pixel, NULL, NULL);
  mem2 =
      (GstMemory *) gst_gl_base_memory_alloc (base_mem_alloc,
      (GstGLAllocationParams *) params);
  gst_gl_allocation_params_free ((GstGLAllocationParams *) params);
  fail_if (mem == NULL);

  fail_unless (GST_MEMORY_FLAG_IS_SET (mem2,
          GST_GL_BASE_MEMORY_TRANSFER_NEED_UPLOAD));
  fail_unless (!GST_MEMORY_FLAG_IS_SET (mem2,
          GST_GL_BASE_MEMORY_TRANSFER_NEED_DOWNLOAD));

  /* wrapped texture creation */
  params = gst_gl_video_allocation_params_new_wrapped_texture (context, NULL,
      &v_info, 0, NULL, GST_GL_TEXTURE_TARGET_2D,
      GST_VIDEO_GL_TEXTURE_TYPE_RGBA, ((GstGLMemory *) mem)->tex_id, NULL,
      NULL);
  mem3 =
      (GstMemory *) gst_gl_base_memory_alloc (base_mem_alloc,
      (GstGLAllocationParams *) params);
  gst_gl_allocation_params_free ((GstGLAllocationParams *) params);
  fail_unless (!GST_MEMORY_FLAG_IS_SET (mem3,
          GST_GL_BASE_MEMORY_TRANSFER_NEED_UPLOAD));
  fail_unless (GST_MEMORY_FLAG_IS_SET (mem3,
          GST_GL_BASE_MEMORY_TRANSFER_NEED_DOWNLOAD));

  /* check data/flags are correct */
  fail_unless (gst_memory_map (mem2, &map_info, GST_MAP_READ));

  fail_unless (GST_MEMORY_FLAG_IS_SET (mem2,
          GST_GL_BASE_MEMORY_TRANSFER_NEED_UPLOAD));
  fail_unless (!GST_MEMORY_FLAG_IS_SET (mem2,
          GST_GL_BASE_MEMORY_TRANSFER_NEED_DOWNLOAD));

  fail_unless (memcmp (map_info.data, rgba_pixel,
          G_N_ELEMENTS (rgba_pixel)) == 0,
      "0x%02x%02x%02x%02x != 0x%02x%02x%02x%02x", map_info.data[0],
      map_info.data[1], map_info.data[2], map_info.data[3],
      (guint8) rgba_pixel[0], (guint8) rgba_pixel[1], (guint8) rgba_pixel[2],
      (guint8) rgba_pixel[3]);

  gst_memory_unmap (mem2, &map_info);

  fail_unless (GST_MEMORY_FLAG_IS_SET (mem2,
          GST_GL_BASE_MEMORY_TRANSFER_NEED_UPLOAD));
  fail_unless (!GST_MEMORY_FLAG_IS_SET (mem2,
          GST_GL_BASE_MEMORY_TRANSFER_NEED_DOWNLOAD));

  fail_unless (gst_memory_map (mem2, &map_info, GST_MAP_READ | GST_MAP_GL));

  fail_unless (!GST_MEMORY_FLAG_IS_SET (mem2,
          GST_GL_BASE_MEMORY_TRANSFER_NEED_UPLOAD));
  fail_unless (!GST_MEMORY_FLAG_IS_SET (mem2,
          GST_GL_BASE_MEMORY_TRANSFER_NEED_DOWNLOAD));

  /* test texture copy */
  fail_unless (gst_gl_memory_copy_into ((GstGLMemory *) mem2,
          ((GstGLMemory *) mem)->tex_id, GST_GL_TEXTURE_TARGET_2D,
          GST_VIDEO_GL_TEXTURE_TYPE_RGBA, 1, 1));
  GST_MINI_OBJECT_FLAG_SET (mem, GST_GL_BASE_MEMORY_TRANSFER_NEED_DOWNLOAD);

  fail_unless (!GST_MEMORY_FLAG_IS_SET (mem2,
          GST_GL_BASE_MEMORY_TRANSFER_NEED_UPLOAD));
  fail_unless (!GST_MEMORY_FLAG_IS_SET (mem2,
          GST_GL_BASE_MEMORY_TRANSFER_NEED_DOWNLOAD));
  fail_unless (!GST_MEMORY_FLAG_IS_SET (mem,
          GST_GL_BASE_MEMORY_TRANSFER_NEED_UPLOAD));
  fail_unless (GST_MEMORY_FLAG_IS_SET (mem,
          GST_GL_BASE_MEMORY_TRANSFER_NEED_DOWNLOAD));

  gst_memory_unmap (mem2, &map_info);

  /* test download of copied texture */
  fail_unless (gst_memory_map (mem, &map_info, GST_MAP_READ));

  fail_unless (memcmp (map_info.data, rgba_pixel,
          G_N_ELEMENTS (rgba_pixel)) == 0,
      "0x%02x%02x%02x%02x != 0x%02x%02x%02x%02x", (guint8) map_info.data[0],
      (guint8) map_info.data[1], (guint8) map_info.data[2],
      (guint8) map_info.data[3], (guint8) rgba_pixel[0], (guint8) rgba_pixel[1],
      (guint8) rgba_pixel[2], (guint8) rgba_pixel[3]);

  gst_memory_unmap (mem, &map_info);

  /* test download of wrapped copied texture */
  fail_unless (gst_memory_map (mem3, &map_info, GST_MAP_READ));

  fail_unless (!GST_MEMORY_FLAG_IS_SET (mem,
          GST_GL_BASE_MEMORY_TRANSFER_NEED_UPLOAD));
  fail_unless (!GST_MEMORY_FLAG_IS_SET (mem,
          GST_GL_BASE_MEMORY_TRANSFER_NEED_DOWNLOAD));

  fail_unless (memcmp (map_info.data, rgba_pixel,
          G_N_ELEMENTS (rgba_pixel)) == 0,
      "0x%02x%02x%02x%02x != 0x%02x%02x%02x%02x", (guint8) map_info.data[0],
      (guint8) map_info.data[1], (guint8) map_info.data[2],
      (guint8) map_info.data[3], (guint8) rgba_pixel[0], (guint8) rgba_pixel[1],
      (guint8) rgba_pixel[2], (guint8) rgba_pixel[3]);

  gst_memory_unmap (mem3, &map_info);

  /* test upload flag */
  fail_unless (gst_memory_map (mem3, &map_info, GST_MAP_WRITE));
  gst_memory_unmap (mem3, &map_info);

  fail_unless (GST_MEMORY_FLAG_IS_SET (mem3,
          GST_GL_BASE_MEMORY_TRANSFER_NEED_UPLOAD));
  fail_unless (!GST_MEMORY_FLAG_IS_SET (mem3,
          GST_GL_BASE_MEMORY_TRANSFER_NEED_DOWNLOAD));

  /* test download flag */
  fail_unless (gst_memory_map (mem3, &map_info, GST_MAP_WRITE | GST_MAP_GL));
  gst_memory_unmap (mem3, &map_info);

  fail_unless (!GST_MEMORY_FLAG_IS_SET (mem3,
          GST_GL_BASE_MEMORY_TRANSFER_NEED_UPLOAD));
  fail_unless (GST_MEMORY_FLAG_IS_SET (mem3,
          GST_GL_BASE_MEMORY_TRANSFER_NEED_DOWNLOAD));

  gst_memory_unref (mem);
  gst_memory_unref (mem2);
  gst_memory_unref (mem3);
  gst_object_unref (gl_allocator);
}