示例#1
0
static struct drm_i915_gem_object *
_i915_gem_object_create_stolen(struct drm_device *dev,
			       struct drm_mm_node *stolen)
{
	struct drm_i915_gem_object *obj;

	obj = i915_gem_object_alloc(dev);
	if (obj == NULL)
		return NULL;

	drm_gem_private_object_init(dev, &obj->base, stolen->size);
	i915_gem_object_init(obj, &i915_gem_object_stolen_ops);

	obj->pages = i915_pages_create_for_stolen(dev,
						  stolen->start, stolen->size);
	if (obj->pages == NULL)
		goto cleanup;

	obj->has_dma_mapping = true;
	i915_gem_object_pin_pages(obj);
	obj->stolen = stolen;

	obj->base.read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT;
	obj->cache_level = HAS_LLC(dev) ? I915_CACHE_LLC : I915_CACHE_NONE;

	return obj;

cleanup:
	i915_gem_object_free(obj);
	return NULL;
}
示例#2
0
static struct drm_i915_gem_object *
_i915_gem_object_create_stolen(struct drm_i915_private *dev_priv,
			       struct drm_mm_node *stolen)
{
	struct drm_i915_gem_object *obj;
	unsigned int cache_level;

	obj = i915_gem_object_alloc(dev_priv);
	if (obj == NULL)
		return NULL;

	drm_gem_private_object_init(&dev_priv->drm, &obj->base, stolen->size);
	i915_gem_object_init(obj, &i915_gem_object_stolen_ops);

	obj->stolen = stolen;
	obj->read_domains = I915_GEM_DOMAIN_CPU | I915_GEM_DOMAIN_GTT;
	cache_level = HAS_LLC(dev_priv) ? I915_CACHE_LLC : I915_CACHE_NONE;
	i915_gem_object_set_cache_coherency(obj, cache_level);

	if (i915_gem_object_pin_pages(obj))
		goto cleanup;

	return obj;

cleanup:
	i915_gem_object_free(obj);
	return NULL;
}
示例#3
0
/**
 * i915_gem_object_create_internal: create an object with volatile pages
 * @i915: the i915 device
 * @size: the size in bytes of backing storage to allocate for the object
 *
 * Creates a new object that wraps some internal memory for private use.
 * This object is not backed by swappable storage, and as such its contents
 * are volatile and only valid whilst pinned. If the object is reaped by the
 * shrinker, its pages and data will be discarded. Equally, it is not a full
 * GEM object and so not valid for access from userspace. This makes it useful
 * for hardware interfaces like ringbuffers (which are pinned from the time
 * the request is written to the time the hardware stops accessing it), but
 * not for contexts (which need to be preserved when not active for later
 * reuse). Note that it is not cleared upon allocation.
 */
struct drm_i915_gem_object *
i915_gem_object_create_internal(struct drm_i915_private *i915,
				phys_addr_t size)
{
	struct drm_i915_gem_object *obj;
	unsigned int cache_level;

	GEM_BUG_ON(!size);
	GEM_BUG_ON(!IS_ALIGNED(size, PAGE_SIZE));

	if (overflows_type(size, obj->base.size))
		return ERR_PTR(-E2BIG);

	obj = i915_gem_object_alloc();
	if (!obj)
		return ERR_PTR(-ENOMEM);

	drm_gem_private_object_init(&i915->drm, &obj->base, size);
	i915_gem_object_init(obj, &i915_gem_object_internal_ops);

	obj->read_domains = I915_GEM_DOMAIN_CPU;
	obj->write_domain = I915_GEM_DOMAIN_CPU;

	cache_level = HAS_LLC(i915) ? I915_CACHE_LLC : I915_CACHE_NONE;
	i915_gem_object_set_cache_coherency(obj, cache_level);

	return obj;
}
示例#4
0
struct drm_i915_gem_object *
huge_gem_object(struct drm_i915_private *i915,
		phys_addr_t phys_size,
		dma_addr_t dma_size)
{
	struct drm_i915_gem_object *obj;

	GEM_BUG_ON(!phys_size || phys_size > dma_size);
	GEM_BUG_ON(!IS_ALIGNED(phys_size, PAGE_SIZE));
	GEM_BUG_ON(!IS_ALIGNED(dma_size, I915_GTT_PAGE_SIZE));

	if (overflows_type(dma_size, obj->base.size))
		return ERR_PTR(-E2BIG);

	obj = i915_gem_object_alloc(i915);
	if (!obj)
		return ERR_PTR(-ENOMEM);

	drm_gem_private_object_init(&i915->drm, &obj->base, dma_size);
	i915_gem_object_init(obj, &huge_ops);

	obj->base.write_domain = I915_GEM_DOMAIN_CPU;
	obj->base.read_domains = I915_GEM_DOMAIN_CPU;
	obj->cache_level = HAS_LLC(i915) ? I915_CACHE_LLC : I915_CACHE_NONE;
	obj->scratch = phys_size;

	return obj;
}
static struct drm_i915_gem_object *
_kos_fb_object_create(struct drm_device *dev,
                   struct drm_mm_node *fb_node)
{
    struct drm_i915_gem_object *obj;

    obj = i915_gem_object_alloc(dev);
    if (obj == NULL)
        return NULL;

    drm_gem_private_object_init(dev, &obj->base, fb_node->size);
    i915_gem_object_init(obj, &kos_fb_object_ops);

    obj->pages = i915_pages_create_for_fb(dev,
                          fb_node->start, fb_node->size);
    if (obj->pages == NULL)
        goto cleanup;

    obj->has_dma_mapping = true;
    i915_gem_object_pin_pages(obj);
    obj->stolen = fb_node;

    obj->base.read_domains = I915_GEM_DOMAIN_GTT;
    obj->cache_level = I915_CACHE_NONE;
    obj->tiling_mode = I915_TILING_X;

    return obj;

cleanup:
    i915_gem_object_free(obj);
    return NULL;
}
struct drm_gem_object *i915_gem_prime_import(struct drm_device *dev,
					     struct dma_buf *dma_buf)
{
	struct dma_buf_attachment *attach;
	struct drm_i915_gem_object *obj;
	int ret;

	/* is this one of own objects? */
	if (dma_buf->ops == &i915_dmabuf_ops) {
		obj = dma_buf_to_obj(dma_buf);
		/* is it from our device? */
		if (obj->base.dev == dev) {
			/*
			 * Importing dmabuf exported from out own gem increases
			 * refcount on gem itself instead of f_count of dmabuf.
			 */
			drm_gem_object_reference(&obj->base);
			return &obj->base;
		}
	}

	/* need to attach */
	attach = dma_buf_attach(dma_buf, dev->dev);
	if (IS_ERR(attach))
		return ERR_CAST(attach);

	get_dma_buf(dma_buf);

	obj = i915_gem_object_alloc(dev);
	if (obj == NULL) {
		ret = -ENOMEM;
		goto fail_detach;
	}

	ret = drm_gem_private_object_init(dev, &obj->base, dma_buf->size);
	if (ret) {
		i915_gem_object_free(obj);
		goto fail_detach;
	}

	i915_gem_object_init(obj, &i915_gem_object_dmabuf_ops);
	obj->base.import_attach = attach;

	return &obj->base;

fail_detach:
	dma_buf_detach(dma_buf, attach);
	dma_buf_put(dma_buf);

	return ERR_PTR(ret);
}
示例#7
0
/**
 * Creates a new object that wraps some internal memory for private use.
 * This object is not backed by swappable storage, and as such its contents
 * are volatile and only valid whilst pinned. If the object is reaped by the
 * shrinker, its pages and data will be discarded. Equally, it is not a full
 * GEM object and so not valid for access from userspace. This makes it useful
 * for hardware interfaces like ringbuffers (which are pinned from the time
 * the request is written to the time the hardware stops accessing it), but
 * not for contexts (which need to be preserved when not active for later
 * reuse). Note that it is not cleared upon allocation.
 */
struct drm_i915_gem_object *
i915_gem_object_create_internal(struct drm_i915_private *i915,
				unsigned int size)
{
	struct drm_i915_gem_object *obj;

	obj = i915_gem_object_alloc(&i915->drm);
	if (!obj)
		return ERR_PTR(-ENOMEM);

	drm_gem_private_object_init(&i915->drm, &obj->base, size);
	i915_gem_object_init(obj, &i915_gem_object_internal_ops);

	obj->base.write_domain = I915_GEM_DOMAIN_CPU;
	obj->base.read_domains = I915_GEM_DOMAIN_CPU;
	obj->cache_level = HAS_LLC(i915) ? I915_CACHE_LLC : I915_CACHE_NONE;

	return obj;
}
/**
 * Creates a new mm object that wraps some user memory.
 */
int
i915_gem_vgtbuffer_ioctl(struct drm_device *dev, void *data,
			 struct drm_file *file)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct drm_i915_gem_vgtbuffer *args = data;
	struct drm_i915_gem_object *obj;
	struct vgt_primary_plane_format *p;
	struct vgt_cursor_plane_format *c;
	struct vgt_fb_format fb;
	struct vgt_pipe_format *pipe;

	int ret;

	int num_pages = 0;

	u32 vmid;
	u32 handle;

	uint32_t __iomem *gtt_base = dev_priv->gtt.gsm;	/* mappable_base; */
	uint32_t gtt_fbstart;
	uint32_t gtt_pte;
	uint32_t gtt_offset = 0;

	/* Allocate the new object */
	DRM_DEBUG_DRIVER("VGT: gem_vgtbuffer_ioctl\n");

	if (!xen_initial_domain())
		return -EPERM;

	obj = i915_gem_object_alloc(dev);
	if (obj == NULL)
		return -ENOMEM;

	vmid = args->vmid;
	DRM_DEBUG_DRIVER("VGT: calling decode\n");
	if (vgt_decode_fb_format(vmid, &fb)) {
		kfree(obj);
		return -EINVAL;
	}

	pipe = ((args->pipe_id >= I915_MAX_PIPES) ?
		NULL : &fb.pipes[args->pipe_id]);

	/* If plane is not enabled, bail */
	if (!pipe || !pipe->primary.enabled) {
		kfree(obj);
		return -ENOENT;
	}

	DRM_DEBUG_DRIVER("VGT: pipe = %d\n", args->pipe_id);
	if ((args->plane_id) == I915_VGT_PLANE_PRIMARY) {
		DRM_DEBUG_DRIVER("VGT: &pipe=0x%x\n", (&pipe));
		p = &pipe->primary;
		args->enabled = p->enabled;
		args->x_offset = p->x_offset;
		args->y_offset = p->y_offset;
		args->start = p->base;
		args->width = p->width;
		args->height = p->height;
		args->stride = p->stride;
		args->bpp = p->bpp;
		args->hw_format = p->hw_format;
		args->drm_format = p->drm_format;
		args->tiled = p->tiled;
		args->size = (((p->width * p->height * p->bpp) / 8) +
			      (PAGE_SIZE - 1)) >> PAGE_SHIFT;

		uint64_t range = p->base >> PAGE_SHIFT;
		range += args->size;

		if (range > gtt_total_entries(dev_priv->gtt)) {
			DRM_DEBUG_DRIVER("VGT: Invalid GTT offset or size\n");
			kfree(obj);
			return -EINVAL;
		}

		if (args->flags & I915_VGTBUFFER_QUERY_ONLY) {
			DRM_DEBUG_DRIVER("VGT: query only: primary");
			kfree(obj);
			return 0;
		}

		gtt_offset = p->base;
		num_pages = args->size;

		DRM_DEBUG_DRIVER("VGT GEM: Surface GTT Offset = %x\n", p->base);
		obj->tiling_mode = p->tiled ? I915_TILING_X : 0;
		obj->stride = p->tiled ? args->stride : 0;
	}
示例#9
0
/*
 * Creates a new mm object that wraps some normal memory from the process
 * context - user memory.
 *
 * We impose several restrictions upon the memory being mapped
 * into the GPU.
 * 1. It must be page aligned (both start/end addresses, i.e ptr and size).
 * 2. It must be normal system memory, not a pointer into another map of IO
 *    space (e.g. it must not be a GTT mmapping of another object).
 * 3. We only allow a bo as large as we could in theory map into the GTT,
 *    that is we limit the size to the total size of the GTT.
 * 4. The bo is marked as being snoopable. The backing pages are left
 *    accessible directly by the CPU, but reads and writes by the GPU may
 *    incur the cost of a snoop (unless you have an LLC architecture).
 *
 * Synchronisation between multiple users and the GPU is left to userspace
 * through the normal set-domain-ioctl. The kernel will enforce that the
 * GPU relinquishes the VMA before it is returned back to the system
 * i.e. upon free(), munmap() or process termination. However, the userspace
 * malloc() library may not immediately relinquish the VMA after free() and
 * instead reuse it whilst the GPU is still reading and writing to the VMA.
 * Caveat emptor.
 *
 * Also note, that the object created here is not currently a "first class"
 * object, in that several ioctls are banned. These are the CPU access
 * ioctls: mmap(), pwrite and pread. In practice, you are expected to use
 * direct access via your pointer rather than use those ioctls. Another
 * restriction is that we do not allow userptr surfaces to be pinned to the
 * hardware and so we reject any attempt to create a framebuffer out of a
 * userptr.
 *
 * If you think this is a good interface to use to pass GPU memory between
 * drivers, please use dma-buf instead. In fact, wherever possible use
 * dma-buf instead.
 */
int
i915_gem_userptr_ioctl(struct drm_device *dev,
		       void *data,
		       struct drm_file *file)
{
	struct drm_i915_private *dev_priv = to_i915(dev);
	struct drm_i915_gem_userptr *args = data;
	struct drm_i915_gem_object *obj;
	int ret;
	u32 handle;

	if (!HAS_LLC(dev_priv) && !HAS_SNOOP(dev_priv)) {
		/* We cannot support coherent userptr objects on hw without
		 * LLC and broken snooping.
		 */
		return -ENODEV;
	}

	if (args->flags & ~(I915_USERPTR_READ_ONLY |
			    I915_USERPTR_UNSYNCHRONIZED))
		return -EINVAL;

	if (!args->user_size)
		return -EINVAL;

	if (offset_in_page(args->user_ptr | args->user_size))
		return -EINVAL;

	if (!access_ok((char __user *)(unsigned long)args->user_ptr, args->user_size))
		return -EFAULT;

	if (args->flags & I915_USERPTR_READ_ONLY) {
		struct i915_hw_ppgtt *ppgtt;

		/*
		 * On almost all of the older hw, we cannot tell the GPU that
		 * a page is readonly.
		 */
		ppgtt = dev_priv->kernel_context->ppgtt;
		if (!ppgtt || !ppgtt->vm.has_read_only)
			return -ENODEV;
	}

	obj = i915_gem_object_alloc();
	if (obj == NULL)
		return -ENOMEM;

	drm_gem_private_object_init(dev, &obj->base, args->user_size);
	i915_gem_object_init(obj, &i915_gem_userptr_ops);
	obj->read_domains = I915_GEM_DOMAIN_CPU;
	obj->write_domain = I915_GEM_DOMAIN_CPU;
	i915_gem_object_set_cache_coherency(obj, I915_CACHE_LLC);

	obj->userptr.ptr = args->user_ptr;
	if (args->flags & I915_USERPTR_READ_ONLY)
		i915_gem_object_set_readonly(obj);

	/* And keep a pointer to the current->mm for resolving the user pages
	 * at binding. This means that we need to hook into the mmu_notifier
	 * in order to detect if the mmu is destroyed.
	 */
	ret = i915_gem_userptr_init__mm_struct(obj);
	if (ret == 0)
		ret = i915_gem_userptr_init__mmu_notifier(obj, args->flags);
	if (ret == 0)
		ret = drm_gem_handle_create(file, &obj->base, &handle);

	/* drop reference from allocate - handle holds it now */
	i915_gem_object_put(obj);
	if (ret)
		return ret;

	args->handle = handle;
	return 0;
}