Ejemplo n.º 1
0
int
nouveau_gem_new(struct drm_device *dev, struct nouveau_channel *chan,
		int size, int align, uint32_t flags, uint32_t tile_mode,
		uint32_t tile_flags, bool no_vm, bool mappable,
		struct nouveau_bo **pnvbo)
{
	struct nouveau_bo *nvbo;
	int ret;

	ret = nouveau_bo_new(dev, chan, size, align, flags, tile_mode,
			     tile_flags, no_vm, mappable, pnvbo);
	if (ret)
		return ret;
	nvbo = *pnvbo;

	nvbo->gem = drm_gem_object_alloc(dev, nvbo->bo.mem.size);
	if (!nvbo->gem) {
		nouveau_bo_ref(NULL, pnvbo);
		return -ENOMEM;
	}

	nvbo->bo.persistant_swap_storage = nvbo->gem->filp;
	nvbo->gem->driver_private = nvbo;
	return 0;
}
Ejemplo n.º 2
0
static int
nouveau_prime_new(struct drm_device *dev,
		  size_t size,
		  struct sg_table *sg,
		  struct nouveau_bo **pnvbo)
{
	struct nouveau_bo *nvbo;
	u32 flags = 0;
	int ret;

	flags = TTM_PL_FLAG_TT;

	ret = nouveau_bo_new(dev, size, 0, flags, 0, 0,
			     sg, pnvbo);
	if (ret)
		return ret;
	nvbo = *pnvbo;

	/* we restrict allowed domains on nv50+ to only the types
	 * that were requested at creation time.  not possibly on
	 * earlier chips without busting the ABI.
	 */
	nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_GART;
	nvbo->gem = drm_gem_object_alloc(dev, nvbo->bo.mem.size);
	if (!nvbo->gem) {
		nouveau_bo_ref(NULL, pnvbo);
		return -ENOMEM;
	}

	nvbo->gem->driver_private = nvbo;
	return 0;
}
Ejemplo n.º 3
0
static int
nouveau_prime_new(struct drm_device *dev,
		  size_t size,
		  struct sg_table *sg,
		  struct nouveau_bo **pnvbo)
{
	struct nouveau_bo *nvbo;
	u32 flags = 0;
	int ret;

	flags = TTM_PL_FLAG_TT;

	ret = nouveau_bo_new(dev, size, 0, flags, 0, 0,
			     sg, pnvbo);
	if (ret)
		return ret;
	nvbo = *pnvbo;

	nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_GART;
	nvbo->gem = drm_gem_object_alloc(dev, nvbo->bo.mem.size);
	if (!nvbo->gem) {
		nouveau_bo_ref(NULL, pnvbo);
		return -ENOMEM;
	}

	nvbo->gem->driver_private = nvbo;
	return 0;
}
Ejemplo n.º 4
0
struct drm_gem_object *pscnv_gem_new(struct drm_device *dev, uint64_t size, uint32_t flags,
		uint32_t tile_flags, uint32_t cookie, uint32_t *user)
{
	int i;
	struct drm_gem_object *obj;
	struct pscnv_bo *vo;

	vo = pscnv_mem_alloc(dev, size, flags, tile_flags, cookie);
	if (!vo)
		return 0;

	obj = drm_gem_object_alloc(dev, vo->size);
	if (!obj) {
		pscnv_mem_free(vo);
		return 0;
	}
	obj->driver_private = vo;
	vo->gem = obj;

	if (user)
		for (i = 0; i < ARRAY_SIZE(vo->user); i++)
			vo->user[i] = user[i];
	else
		for (i = 0; i < ARRAY_SIZE(vo->user); i++)
			vo->user[i] = 0;

	return obj;
}
Ejemplo n.º 5
0
int radeon_gem_object_create(struct radeon_device *rdev, int size,
				int alignment, int initial_domain,
				bool discardable, bool kernel,
				struct drm_gem_object **obj)
{
	struct drm_gem_object *gobj;
	struct radeon_bo *robj;
	int r;

	*obj = NULL;
	gobj = drm_gem_object_alloc(rdev->ddev, size);
	if (!gobj) {
		return -ENOMEM;
	}
	/* At least align on page size */
	if (alignment < PAGE_SIZE) {
		alignment = PAGE_SIZE;
	}
	r = radeon_bo_create(rdev, gobj, size, kernel, initial_domain, &robj);
	if (r) {
		if (r != -ERESTARTSYS)
			DRM_ERROR("Failed to allocate GEM object (%d, %d, %u, %d)\n",
				  size, initial_domain, alignment, r);
		drm_gem_object_unreference_unlocked(gobj);
		return r;
	}
	gobj->driver_private = robj;
	*obj = gobj;
	return 0;
}
Ejemplo n.º 6
0
int radeon_gem_object_create(struct radeon_device *rdev, int size,
			     int alignment, int initial_domain,
			     bool discardable, bool kernel,
			     bool interruptible,
			     struct drm_gem_object **obj)
{
	struct drm_gem_object *gobj;
	struct radeon_object *robj;
	int r;

	*obj = NULL;
	gobj = drm_gem_object_alloc(rdev->ddev, size);
	if (!gobj) {
		return -ENOMEM;
	}
	
	if (alignment < PAGE_SIZE) {
		alignment = PAGE_SIZE;
	}
	r = radeon_object_create(rdev, gobj, size, kernel, initial_domain,
				 interruptible, &robj);
	if (r) {
		DRM_ERROR("Failed to allocate GEM object (%d, %d, %u)\n",
			  size, initial_domain, alignment);
		mutex_lock(&rdev->ddev->struct_mutex);
		drm_gem_object_unreference(gobj);
		mutex_unlock(&rdev->ddev->struct_mutex);
		return r;
	}
	gobj->driver_private = robj;
	*obj = gobj;
	return 0;
}
Ejemplo n.º 7
0
struct drm_gem_object *pscnv_gem_new(struct drm_device *dev, uint64_t size, uint32_t flags,
		uint32_t tile_flags, uint32_t cookie, uint32_t *user)
{
	int i;
	struct drm_gem_object *obj;
	struct pscnv_bo *vo;

	vo = pscnv_mem_alloc(dev, size, flags, tile_flags, cookie);
	if (!vo)
		return 0;

	obj = drm_gem_object_alloc(dev, vo->size);
	if (!obj) {
		pscnv_mem_free(vo);
		return 0;
	}
#ifndef PSCNV_KAPI_DRM_GEM_OBJECT_HANDLE_COUNT
	atomic_inc(&obj->handle_count);
#endif
	obj->driver_private = vo;
	vo->gem = obj;

	if (user)
		for (i = 0; i < ARRAY_SIZE(vo->user); i++)
			vo->user[i] = user[i];
	else
		for (i = 0; i < ARRAY_SIZE(vo->user); i++)
			vo->user[i] = 0;

	return obj;
}
Ejemplo n.º 8
0
int
nouveau_gem_new(struct drm_device *dev, int size, int align, uint32_t domain,
                uint32_t tile_mode, uint32_t tile_flags,
                struct nouveau_bo **pnvbo)
{
    struct nouveau_drm *drm = nouveau_drm(dev);
    struct nouveau_bo *nvbo;
    u32 flags = 0;
    int ret;

    if (domain & NOUVEAU_GEM_DOMAIN_VRAM)
        flags |= TTM_PL_FLAG_VRAM;
    if (domain & NOUVEAU_GEM_DOMAIN_GART)
        flags |= TTM_PL_FLAG_TT;
    if (!flags || domain & NOUVEAU_GEM_DOMAIN_CPU)
        flags |= TTM_PL_FLAG_SYSTEM;

    ret = nouveau_bo_new(dev, size, align, flags, tile_mode,
                         tile_flags, NULL, pnvbo);
    if (ret)
        return ret;
    nvbo = *pnvbo;

    /* we restrict allowed domains on nv50+ to only the types
     * that were requested at creation time.  not possibly on
     * earlier chips without busting the ABI.
     */
    nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_VRAM |
                          NOUVEAU_GEM_DOMAIN_GART;
    if (nv_device(drm->device)->card_type >= NV_50)
        nvbo->valid_domains &= domain;

    nvbo->gem = drm_gem_object_alloc(dev, nvbo->bo.mem.size);
    if (!nvbo->gem) {
        nouveau_bo_ref(NULL, pnvbo);
        return -ENOMEM;
    }

    nvbo->bo.persistent_swap_storage = nvbo->gem->filp;
    nvbo->gem->driver_private = nvbo;
    return 0;
}
int
nouveau_gem_new(struct drm_device *dev, int size, int align, uint32_t domain,
		uint32_t tile_mode, uint32_t tile_flags,
		struct nouveau_bo **pnvbo)
{
	struct drm_nouveau_private *dev_priv = dev->dev_private;
	struct nouveau_bo *nvbo;
	u32 flags = 0;
	int ret;

	if (domain & NOUVEAU_GEM_DOMAIN_VRAM)
		flags |= TTM_PL_FLAG_VRAM;
	if (domain & NOUVEAU_GEM_DOMAIN_GART)
		flags |= TTM_PL_FLAG_TT;
	if (!flags || domain & NOUVEAU_GEM_DOMAIN_CPU)
		flags |= TTM_PL_FLAG_SYSTEM;

	ret = nouveau_bo_new(dev, size, align, flags, tile_mode,
			     tile_flags, pnvbo);
	if (ret)
		return ret;
	nvbo = *pnvbo;

	nvbo->valid_domains = NOUVEAU_GEM_DOMAIN_VRAM |
			      NOUVEAU_GEM_DOMAIN_GART;
	if (dev_priv->card_type >= NV_50)
		nvbo->valid_domains &= domain;

	nvbo->gem = drm_gem_object_alloc(dev, nvbo->bo.mem.size);
	if (!nvbo->gem) {
		nouveau_bo_ref(NULL, pnvbo);
		return -ENOMEM;
	}

	nvbo->bo.persistent_swap_storage = nvbo->gem->filp;
	nvbo->gem->driver_private = nvbo;
	return 0;
}