Esempio n. 1
0
/**
 * Create a handle for this object. This adds a handle reference
 * to the object, which includes a regular reference count. Callers
 * will likely want to dereference the object afterwards.
 */
int
drm_gem_handle_create(struct drm_file *file_priv,
		       struct drm_gem_object *obj,
		       u32 *handlep)
{
	struct drm_device *dev = obj->dev;
	int ret;

	/*
	 * Get the user-visible handle using idr.  Preload and perform
	 * allocation under our spinlock.
	 */
	idr_preload(GFP_KERNEL);
	spin_lock(&file_priv->table_lock);

	ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT);

	spin_unlock(&file_priv->table_lock);
	idr_preload_end();
	if (ret < 0)
		return ret;
	*handlep = ret;

	drm_gem_object_handle_reference(obj);

	if (dev->driver->gem_open_object) {
		ret = dev->driver->gem_open_object(obj, file_priv);
		if (ret) {
			drm_gem_handle_delete(file_priv, *handlep);
			return ret;
		}
	}

	return 0;
}
int
nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
		      struct drm_file *file_priv)
{
	struct drm_nouveau_private *dev_priv = dev->dev_private;
	struct drm_nouveau_gem_new *req = data;
	struct nouveau_bo *nvbo = NULL;
	int ret = 0;

	if (unlikely(dev_priv->ttm.bdev.dev_mapping == NULL))
		dev_priv->ttm.bdev.dev_mapping = dev_priv->dev->dev_mapping;

	if (!dev_priv->engine.vram.flags_valid(dev, req->info.tile_flags)) {
		NV_ERROR(dev, "bad page flags: 0x%08x\n", req->info.tile_flags);
		return -EINVAL;
	}

	ret = nouveau_gem_new(dev, req->info.size, req->align,
			      req->info.domain, req->info.tile_mode,
			      req->info.tile_flags, &nvbo);
	if (ret)
		return ret;

	ret = drm_gem_handle_create(file_priv, nvbo->gem, &req->info.handle);
	if (ret == 0) {
		ret = nouveau_gem_info(file_priv, nvbo->gem, &req->info);
		if (ret)
			drm_gem_handle_delete(file_priv, req->info.handle);
	}

	
	drm_gem_object_unreference_unlocked(nvbo->gem);
	return ret;
}
Esempio n. 3
0
int
nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
		      struct drm_file *file_priv)
{
	struct nouveau_cli *cli = nouveau_cli(file_priv);
	struct drm_nouveau_gem_new *req = data;
	struct nouveau_bo *nvbo = NULL;
	int ret = 0;

	ret = nouveau_gem_new(cli, req->info.size, req->align,
			      req->info.domain, req->info.tile_mode,
			      req->info.tile_flags, &nvbo);
	if (ret)
		return ret;

	ret = drm_gem_handle_create(file_priv, &nvbo->gem, &req->info.handle);
	if (ret == 0) {
		ret = nouveau_gem_info(file_priv, &nvbo->gem, &req->info);
		if (ret)
			drm_gem_handle_delete(file_priv, req->info.handle);
	}

	/* drop reference from allocate - handle holds it now */
	drm_gem_object_unreference_unlocked(&nvbo->gem);
	return ret;
}
Esempio n. 4
0
int
nouveau_gem_ioctl_new(struct drm_device *dev, void *data,
                      struct drm_file *file_priv)
{
    struct nouveau_drm *drm = nouveau_drm(dev);
    struct nouveau_fb *pfb = nouveau_fb(drm->device);
    struct drm_nouveau_gem_new *req = data;
    struct nouveau_bo *nvbo = NULL;
    int ret = 0;

    drm->ttm.bdev.dev_mapping = drm->dev->dev_mapping;

    if (!pfb->memtype_valid(pfb, req->info.tile_flags)) {
        NV_ERROR(drm, "bad page flags: 0x%08x\n", req->info.tile_flags);
        return -EINVAL;
    }

    ret = nouveau_gem_new(dev, req->info.size, req->align,
                          req->info.domain, req->info.tile_mode,
                          req->info.tile_flags, &nvbo);
    if (ret)
        return ret;

    ret = drm_gem_handle_create(file_priv, nvbo->gem, &req->info.handle);
    if (ret == 0) {
        ret = nouveau_gem_info(file_priv, nvbo->gem, &req->info);
        if (ret)
            drm_gem_handle_delete(file_priv, req->info.handle);
    }

    /* drop reference from allocate - handle holds it now */
    drm_gem_object_unreference_unlocked(nvbo->gem);
    return ret;
}
Esempio n. 5
0
/**
 * Create a handle for this object. This adds a handle reference
 * to the object, which includes a regular reference count. Callers
 * will likely want to dereference the object afterwards.
 */
int
drm_gem_handle_create(struct drm_file *file_priv,
                      struct drm_gem_object *obj,
                      u32 *handlep)
{
    struct drm_device *dev = obj->dev;
    int ret;

    *handlep = 0;
    ret = drm_gem_name_create(&file_priv->object_names, obj, handlep);
    if (ret != 0)
        return ret;

    drm_gem_object_handle_reference(obj);

    if (dev->driver->gem_open_object) {
        ret = dev->driver->gem_open_object(obj, file_priv);
        if (ret) {
            drm_gem_handle_delete(file_priv, *handlep);
            return ret;
        }
    }

    return 0;
}
Esempio n. 6
0
/**
 * drm_gem_handle_create_tail - internal functions to create a handle
 * @file_priv: drm file-private structure to register the handle for
 * @obj: object to register
 * @handlep: pointer to return the created handle to the caller
 * 
 * This expects the dev->object_name_lock to be held already and will drop it
 * before returning. Used to avoid races in establishing new handles when
 * importing an object from either an flink name or a dma-buf.
 */
int
drm_gem_handle_create_tail(struct drm_file *file_priv,
			   struct drm_gem_object *obj,
			   u32 *handlep)
{
	struct drm_device *dev = obj->dev;
	int ret;

	WARN_ON(!mutex_is_locked(&dev->object_name_lock));

	/*
	 * Get the user-visible handle using idr.  Preload and perform
	 * allocation under our spinlock.
	 */
	idr_preload(GFP_KERNEL);
	spin_lock(&file_priv->table_lock);

	ret = idr_alloc(&file_priv->object_idr, obj, 1, 0, GFP_NOWAIT);
	drm_gem_object_reference(obj);
	obj->handle_count++;
	spin_unlock(&file_priv->table_lock);
	idr_preload_end();
	mutex_unlock(&dev->object_name_lock);
	if (ret < 0) {
		drm_gem_object_handle_unreference_unlocked(obj);
		return ret;
	}
	*handlep = ret;

	ret = drm_vma_node_allow(&obj->vma_node, file_priv->filp);
	if (ret) {
		drm_gem_handle_delete(file_priv, *handlep);
		return ret;
	}

	if (dev->driver->gem_open_object) {
		ret = dev->driver->gem_open_object(obj, file_priv);
		if (ret) {
			drm_gem_handle_delete(file_priv, *handlep);
			return ret;
		}
	}

	return 0;
}
Esempio n. 7
0
int
drm_gem_close_ioctl(struct drm_device *dev, void *data,
    struct drm_file *file_priv)
{
	struct drm_gem_close *args;

	if (!drm_core_check_feature(dev, DRIVER_GEM))
		return (ENODEV);
	args = data;

	return (drm_gem_handle_delete(file_priv, args->handle));
}
Esempio n. 8
0
/**
 * Releases the handle to an mm object.
 */
int
drm_gem_close_ioctl(struct drm_device *dev, void *data,
                    struct drm_file *file_priv)
{
    struct drm_gem_close *args = data;
    int ret;

    if (!(dev->driver->driver_features & DRIVER_GEM))
        return -ENODEV;

    ret = drm_gem_handle_delete(file_priv, args->handle);

    return ret;
}
Esempio n. 9
0
/**
 * Create a handle for this object. This adds a handle reference
 * to the object, which includes a regular reference count. Callers
 * will likely want to dereference the object afterwards.
 */
int
drm_gem_handle_create(struct drm_file *file_priv,
                      struct drm_gem_object *obj,
                      u32 *handlep)
{
    struct drm_device *dev = obj->dev;
    int ret;

    /*
     * Get the user-visible handle using idr.
     */
again:
    /* ensure there is space available to allocate a handle */
    if (idr_pre_get(&file_priv->object_idr, GFP_KERNEL) == 0)
        return -ENOMEM;

    /* do the allocation under our spinlock */
    spin_lock(&file_priv->table_lock);
    ret = idr_get_new_above(&file_priv->object_idr, obj, 1, (int *)handlep);
    spin_unlock(&file_priv->table_lock);
    if (ret == -EAGAIN)
        goto again;

    if (ret != 0)
        return ret;

    drm_gem_object_handle_reference(obj);

    if (dev->driver->gem_open_object) {
        ret = dev->driver->gem_open_object(obj, file_priv);
        if (ret) {
            drm_gem_handle_delete(file_priv, *handlep);
            return ret;
        }
    }

    return 0;
}
Esempio n. 10
0
int
drm_gem_handle_create(struct drm_file *file_priv,
		       struct drm_gem_object *obj,
		       u32 *handlep)
{
	struct drm_device *dev = obj->dev;
	int ret;

	/*
                                          
  */
again:
	/*                                                      */
	if (idr_pre_get(&file_priv->object_idr, GFP_KERNEL) == 0)
		return -ENOMEM;

	/*                                      */
	spin_lock(&file_priv->table_lock);
	ret = idr_get_new_above(&file_priv->object_idr, obj, 1, (int *)handlep);
	spin_unlock(&file_priv->table_lock);
	if (ret == -EAGAIN)
		goto again;

	if (ret != 0)
		return ret;

	drm_gem_object_handle_reference(obj);

	if (dev->driver->gem_open_object) {
		ret = dev->driver->gem_open_object(obj, file_priv);
		if (ret) {
			drm_gem_handle_delete(file_priv, *handlep);
			return ret;
		}
	}

	return 0;
}
Esempio n. 11
0
int udl_dumb_destroy(struct drm_file *file, struct drm_device *dev,
		     uint32_t handle)
{
	return drm_gem_handle_delete(file, handle);
}
Esempio n. 12
0
/*
 * drm_gem_cma_dumb_destroy - (struct drm_driver)->dumb_destroy callback function
 */
int drm_gem_cma_dumb_destroy(struct drm_file *file_priv,
		struct drm_device *drm, unsigned int handle)
{
	return drm_gem_handle_delete(file_priv, handle);
}
Esempio n. 13
0
/**
 *	psb_gem_dumb_destroy	-	destroy a dumb buffer
 *	@file: client file
 *	@dev: our DRM device
 *	@handle: the object handle
 *
 *	Destroy a handle that was created via psb_gem_dumb_create, at least
 *	we hope it was created that way. i915 seems to assume the caller
 *	does the checking but that might be worth review ! FIXME
 */
int psb_gem_dumb_destroy(struct drm_file *file, struct drm_device *dev,
			uint32_t handle)
{
	/* No special work needed, drop the reference and see what falls out */
	return drm_gem_handle_delete(file, handle);
}
Esempio n. 14
0
int radeon_mode_dumb_destroy(struct drm_file *file_priv,
			     struct drm_device *dev,
			     uint32_t handle)
{
	return drm_gem_handle_delete(file_priv, handle);
}