/* FIXME Copy from upstream TTM "ttm_bo_create", upstream TTM does not export this, so copy it here */
static int ttm_bo_create_private(struct ttm_bo_device *bdev,
				 unsigned long size,
				 enum ttm_bo_type type,
				 struct ttm_placement *placement,
				 uint32_t page_alignment,
				 unsigned long buffer_start,
				 bool interruptible,
				 struct file *persistent_swap_storage,
				 struct ttm_buffer_object **p_bo)
{
	struct ttm_buffer_object *bo;
	struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
	int ret;

	size_t acc_size =
		ttm_bo_size(bdev->glob, (size + PAGE_SIZE - 1) >> PAGE_SHIFT);
	ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
	if (unlikely(ret != 0))
		return ret;

	bo = kzalloc(sizeof(*bo), GFP_KERNEL);

	if (unlikely(bo == NULL)) {
		ttm_mem_global_free(mem_glob, acc_size);
		return -ENOMEM;
	}

	ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment,
			  buffer_start, interruptible,
			  persistent_swap_storage, acc_size, NULL);
	if (likely(ret == 0))
		*p_bo = bo;

	return ret;
}
Ejemplo n.º 2
0
int ttm_bo_create(struct ttm_bo_device *bdev,
			unsigned long size,
			enum ttm_bo_type type,
			struct ttm_placement *placement,
			uint32_t page_alignment,
			unsigned long buffer_start,
			bool interruptible,
			struct file *persistent_swap_storage,
			struct ttm_buffer_object **p_bo)
{
	struct ttm_buffer_object *bo;
	size_t acc_size;
	int ret;

	bo = kzalloc(sizeof(*bo), GFP_KERNEL);
	if (unlikely(bo == NULL))
		return -ENOMEM;

	acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct ttm_buffer_object));
	ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment,
				buffer_start, interruptible,
			  persistent_swap_storage, acc_size, NULL, NULL);
	if (likely(ret == 0))
		*p_bo = bo;

	return ret;
}
Ejemplo n.º 3
0
int
nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan,
               int size, int align, uint32_t flags, uint32_t tile_mode,
               uint32_t tile_flags, bool no_vm, bool mappable,
               struct nouveau_bo **pnvbo)
{
    struct drm_nouveau_private *dev_priv = dev->dev_private;
    struct nouveau_bo *nvbo;
    int ret = 0, page_shift = 0;

    nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL);
    if (!nvbo)
        return -ENOMEM;
    INIT_LIST_HEAD(&nvbo->head);
    INIT_LIST_HEAD(&nvbo->entry);
    nvbo->mappable = mappable;
    nvbo->no_vm = no_vm;
    nvbo->tile_mode = tile_mode;
    nvbo->tile_flags = tile_flags;
    nvbo->bo.bdev = &dev_priv->ttm.bdev;

    nouveau_bo_fixup_align(nvbo, &align, &size, &page_shift);
    align >>= PAGE_SHIFT;

    if (!nvbo->no_vm && dev_priv->chan_vm) {
        ret = nouveau_vm_get(dev_priv->chan_vm, size, page_shift,
                             NV_MEM_ACCESS_RW, &nvbo->vma);
        if (ret) {
            kfree(nvbo);
            return ret;
        }
    }

    nvbo->bo.mem.num_pages = size >> PAGE_SHIFT;
    nouveau_bo_placement_set(nvbo, flags, 0);

    nvbo->channel = chan;
    ret = ttm_bo_init(&dev_priv->ttm.bdev, &nvbo->bo, size,
                      ttm_bo_type_device, &nvbo->placement, align, 0,
                      false, NULL, size, nouveau_bo_del_ttm);
    if (ret) {
        /* ttm will call nouveau_bo_del_ttm if it fails.. */
        return ret;
    }
    nvbo->channel = NULL;

    if (nvbo->vma.node) {
        if (nvbo->bo.mem.mem_type == TTM_PL_VRAM)
            nvbo->bo.offset = nvbo->vma.offset;
    }

    *pnvbo = nvbo;
    return 0;
}
Ejemplo n.º 4
0
int radeon_bo_create(struct radeon_device *rdev, struct drm_gem_object *gobj,
			unsigned long size, bool kernel, u32 domain,
			struct radeon_bo **bo_ptr)
{
	struct radeon_bo *bo;
	enum ttm_bo_type type;
	int r;

	if (unlikely(rdev->mman.bdev.dev_mapping == NULL)) {
		rdev->mman.bdev.dev_mapping = rdev->ddev->dev_mapping;
	}
	if (kernel) {
		type = ttm_bo_type_kernel;
	} else {
		type = ttm_bo_type_device;
	}
	*bo_ptr = NULL;
	bo = kzalloc(sizeof(struct radeon_bo), GFP_KERNEL);
	if (bo == NULL)
		return -ENOMEM;
	bo->rdev = rdev;
	bo->gobj = gobj;
	bo->surface_reg = -1;
	INIT_LIST_HEAD(&bo->list);

retry:
	radeon_ttm_placement_from_domain(bo, domain);
	/* Kernel allocation are uninterruptible */
	mutex_lock(&rdev->vram_mutex);
	r = ttm_bo_init(&rdev->mman.bdev, &bo->tbo, size, type,
			&bo->placement, 0, 0, !kernel, NULL, size,
			&radeon_ttm_bo_destroy);
	mutex_unlock(&rdev->vram_mutex);
	if (unlikely(r != 0)) {
		if (r != -ERESTARTSYS) {
			if (domain == RADEON_GEM_DOMAIN_VRAM) {
				domain |= RADEON_GEM_DOMAIN_GTT;
				goto retry;
			}
			dev_err(rdev->dev,
				"object_init failed for (%lu, 0x%08X)\n",
				size, domain);
		}
		return r;
	}
	*bo_ptr = bo;
	if (gobj) {
		mutex_lock(&bo->rdev->gem.mutex);
		list_add_tail(&bo->list, &rdev->gem.objects);
		mutex_unlock(&bo->rdev->gem.mutex);
	}
	return 0;
}
Ejemplo n.º 5
0
int virtio_gpu_object_create(struct virtio_gpu_device *vgdev,
			     unsigned long size, bool kernel, bool pinned,
			     struct virtio_gpu_object **bo_ptr)
{
	struct virtio_gpu_object *bo;
	enum ttm_bo_type type;
	size_t acc_size;
	int ret;

	if (kernel)
		type = ttm_bo_type_kernel;
	else
		type = ttm_bo_type_device;
	*bo_ptr = NULL;

	acc_size = ttm_bo_dma_acc_size(&vgdev->mman.bdev, size,
				       sizeof(struct virtio_gpu_object));

	bo = kzalloc(sizeof(struct virtio_gpu_object), GFP_KERNEL);
	if (bo == NULL)
		return -ENOMEM;
	size = roundup(size, PAGE_SIZE);
	ret = drm_gem_object_init(vgdev->ddev, &bo->gem_base, size);
	if (ret != 0)
		goto err_gem_init;
	bo->dumb = false;
	virtio_gpu_init_ttm_placement(bo, pinned);

	ret = ttm_bo_init(&vgdev->mman.bdev, &bo->tbo, size, type,
			  &bo->placement, 0, !kernel, NULL, acc_size,
			  NULL, NULL, &virtio_gpu_ttm_bo_destroy);
	if (ret != 0)
		goto err_ttm_init;

	*bo_ptr = bo;
	return 0;

err_ttm_init:
	drm_gem_object_release(&bo->gem_base);
err_gem_init:
	kfree(bo);
	return ret;
}
int ttm_pl_ub_create_ioctl(struct ttm_object_file *tfile,
			   struct ttm_bo_device *bdev,
			   struct ttm_lock *lock, void *data)
{
	union ttm_pl_create_ub_arg *arg = data;
	struct ttm_pl_create_ub_req *req = &arg->req;
	struct ttm_pl_rep *rep = &arg->rep;
	struct ttm_buffer_object *bo;
	struct ttm_buffer_object *tmp;
	struct ttm_bo_user_object *user_bo;
	uint32_t flags;
	int ret = 0;
	struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
	struct ttm_placement placement = default_placement;

#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,3,0))
	size_t acc_size =
		ttm_pl_size(bdev, (req->size + PAGE_SIZE - 1) >> PAGE_SHIFT);
#else
	size_t acc_size = ttm_bo_acc_size(bdev, req->size,
		sizeof(struct ttm_buffer_object));
#endif
	if (req->user_address & ~PAGE_MASK) {
		printk(KERN_ERR "User pointer buffer need page alignment\n");
		return -EFAULT;
	}

	ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
	if (unlikely(ret != 0))
		return ret;

	flags = req->placement;
	user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL);
	if (unlikely(user_bo == NULL)) {
		ttm_mem_global_free(mem_glob, acc_size);
		return -ENOMEM;
	}
	ret = ttm_read_lock(lock, true);
	if (unlikely(ret != 0)) {
		ttm_mem_global_free(mem_glob, acc_size);
		kfree(user_bo);
		return ret;
	}
	bo = &user_bo->bo;

	placement.num_placement = 1;
	placement.placement = &flags;

#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,3,0))

/*  For kernel 3.0, use the desired type. */
#define TTM_HACK_WORKAROUND_ttm_bo_type_user ttm_bo_type_user

#else
/*  TTM_HACK_WORKAROUND_ttm_bo_type_user -- Hack for porting,
    as ttm_bo_type_user is no longer implemented.
    This will not result in working code.
    FIXME - to be removed. */

#warning warning: ttm_bo_type_user no longer supported

/*  For kernel 3.3+, use the wrong type, which will compile but not work. */
#define TTM_HACK_WORKAROUND_ttm_bo_type_user ttm_bo_type_kernel

#endif

#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 3, 0))
		/* Handle frame buffer allocated in user space, Convert
		  user space virtual address into pages list */
		unsigned int page_nr = 0;
		struct vm_area_struct *vma = NULL;
		struct sg_table *sg = NULL;
		unsigned long num_pages = 0;
		struct page **pages = 0;

		num_pages = (req->size + PAGE_SIZE - 1) >> PAGE_SHIFT;
		pages = kzalloc(num_pages * sizeof(struct page *), GFP_KERNEL);
		if (unlikely(pages == NULL)) {
			printk(KERN_ERR "kzalloc pages failed\n");
			return -ENOMEM;
		}

		down_read(&current->mm->mmap_sem);
		vma = find_vma(current->mm, req->user_address);
		if (unlikely(vma == NULL)) {
			up_read(&current->mm->mmap_sem);
			kfree(pages);
			printk(KERN_ERR "find_vma failed\n");
			return -EFAULT;
		}
		unsigned long before_flags = vma->vm_flags;
		if (vma->vm_flags & (VM_IO | VM_PFNMAP))
			vma->vm_flags = vma->vm_flags & ((~VM_IO) & (~VM_PFNMAP));
		page_nr = get_user_pages(current, current->mm,
					 req->user_address,
					 (int)(num_pages), 1, 0, pages,
					 NULL);
		vma->vm_flags = before_flags;
		up_read(&current->mm->mmap_sem);

		/* can be written by caller, not forced */
		if (unlikely(page_nr < num_pages)) {
			kfree(pages);
			pages = 0;
			printk(KERN_ERR "get_user_pages err.\n");
			return -ENOMEM;
		}
		sg = drm_prime_pages_to_sg(pages, num_pages);
		if (unlikely(sg == NULL)) {
			kfree(pages);
			printk(KERN_ERR "drm_prime_pages_to_sg err.\n");
			return -ENOMEM;
		}
		kfree(pages);
#endif

#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 3, 0))
	ret = ttm_bo_init(bdev,
			  bo,
			  req->size,
			  TTM_HACK_WORKAROUND_ttm_bo_type_user,
			  &placement,
			  req->page_alignment,
			  req->user_address,
			  true,
			  NULL,
			  acc_size,
			  NULL,
			  &ttm_bo_user_destroy);
#elif (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0))
	ret = ttm_bo_init(bdev,
			  bo,
			  req->size,
			  ttm_bo_type_sg,
			  &placement,
			  req->page_alignment,
			  req->user_address,
			  true,
			  NULL,
			  acc_size,
			  sg,
			  &ttm_ub_bo_user_destroy);
#else
	ret = ttm_bo_init(bdev,
			  bo,
			  req->size,
			  ttm_bo_type_sg,
			  &placement,
			  req->page_alignment,
			  true,
			  NULL,
			  acc_size,
			  sg,
			  &ttm_ub_bo_user_destroy);
#endif

	/*
	 * Note that the ttm_buffer_object_init function
	 * would've called the destroy function on failure!!
	 */
	ttm_read_unlock(lock);
	if (unlikely(ret != 0))
		goto out;

	tmp = ttm_bo_reference(bo);
	ret = ttm_base_object_init(tfile, &user_bo->base,
				   flags & TTM_PL_FLAG_SHARED,
				   ttm_buffer_type,
				   &ttm_bo_user_release,
				   &ttm_bo_user_ref_release);
	if (unlikely(ret != 0))
		goto out_err;

	ret = ttm_bo_reserve(bo, true, false, false, 0);
	if (unlikely(ret != 0))
		goto out_err;
	ttm_pl_fill_rep(bo, rep);
	ttm_bo_unreserve(bo);
	ttm_bo_unref(&bo);
out:
	return 0;
out_err:
	ttm_bo_unref(&tmp);
	ttm_bo_unref(&bo);
	return ret;
}
int ttm_pl_create_ioctl(struct ttm_object_file *tfile,
			struct ttm_bo_device *bdev,
			struct ttm_lock *lock, void *data)
{
	union ttm_pl_create_arg *arg = data;
	struct ttm_pl_create_req *req = &arg->req;
	struct ttm_pl_rep *rep = &arg->rep;
	struct ttm_buffer_object *bo;
	struct ttm_buffer_object *tmp;
	struct ttm_bo_user_object *user_bo;
	uint32_t flags;
	int ret = 0;
	struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
	struct ttm_placement placement = default_placement;
#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,3,0))
	size_t acc_size =
		ttm_pl_size(bdev, (req->size + PAGE_SIZE - 1) >> PAGE_SHIFT);
#else
	size_t acc_size = ttm_bo_acc_size(bdev, req->size,
		sizeof(struct ttm_buffer_object));
#endif
	ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
	if (unlikely(ret != 0))
		return ret;

	flags = req->placement;
	user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL);
	if (unlikely(user_bo == NULL)) {
		ttm_mem_global_free(mem_glob, acc_size);
		return -ENOMEM;
	}

	bo = &user_bo->bo;
	ret = ttm_read_lock(lock, true);
	if (unlikely(ret != 0)) {
		ttm_mem_global_free(mem_glob, acc_size);
		kfree(user_bo);
		return ret;
	}

	placement.num_placement = 1;
	placement.placement = &flags;

	if ((flags & TTM_PL_MASK_CACHING) == 0)
		flags |=  TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED;

#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0))
	ret = ttm_bo_init(bdev, bo, req->size,
			  ttm_bo_type_device, &placement,
			  req->page_alignment, 0, true,
			  NULL, acc_size, NULL, &ttm_bo_user_destroy);
#else
	ret = ttm_bo_init(bdev, bo, req->size,
			  ttm_bo_type_device, &placement,
			  req->page_alignment, true,
			  NULL, acc_size, NULL, &ttm_bo_user_destroy);
#endif
	ttm_read_unlock(lock);
	/*
	 * Note that the ttm_buffer_object_init function
	 * would've called the destroy function on failure!!
	 */

	if (unlikely(ret != 0))
		goto out;

	tmp = ttm_bo_reference(bo);
	ret = ttm_base_object_init(tfile, &user_bo->base,
				   flags & TTM_PL_FLAG_SHARED,
				   ttm_buffer_type,
				   &ttm_bo_user_release,
				   &ttm_bo_user_ref_release);
	if (unlikely(ret != 0))
		goto out_err;

	ret = ttm_bo_reserve(bo, true, false, false, 0);
	if (unlikely(ret != 0))
		goto out_err;
	ttm_pl_fill_rep(bo, rep);
	ttm_bo_unreserve(bo);
	ttm_bo_unref(&bo);
out:
	return 0;
out_err:
	ttm_bo_unref(&tmp);
	ttm_bo_unref(&bo);
	return ret;
}