Ejemplo n.º 1
0
static int amdgpu_cgs_gmap_kmem(void *cgs_device, void *kmem,
				uint64_t size,
				uint64_t min_offset, uint64_t max_offset,
				cgs_handle_t *kmem_handle, uint64_t *mcaddr)
{
	CGS_FUNC_ADEV;
	int ret;
	struct amdgpu_bo *bo;
	struct page *kmem_page = vmalloc_to_page(kmem);
	int npages = ALIGN(size, PAGE_SIZE) >> PAGE_SHIFT;

	struct sg_table *sg = drm_prime_pages_to_sg(&kmem_page, npages);
	ret = amdgpu_bo_create(adev, size, PAGE_SIZE, false,
			       AMDGPU_GEM_DOMAIN_GTT, 0, sg, NULL, &bo);
	if (ret)
		return ret;
	ret = amdgpu_bo_reserve(bo, false);
	if (unlikely(ret != 0))
		return ret;

	/* pin buffer into GTT */
	ret = amdgpu_bo_pin_restricted(bo, AMDGPU_GEM_DOMAIN_GTT,
				       min_offset, max_offset, mcaddr);
	amdgpu_bo_unreserve(bo);

	*kmem_handle = (cgs_handle_t)bo;
	return ret;
}
Ejemplo n.º 2
0
struct sg_table *radeon_gem_prime_get_sg_table(struct drm_gem_object *obj)
{
	struct radeon_bo *bo = gem_to_radeon_bo(obj);
	int npages = bo->tbo.num_pages;

	return drm_prime_pages_to_sg(bo->tbo.ttm->pages, npages);
}
Ejemplo n.º 3
0
Archivo: gem.c Proyecto: JaneDu/ath
static int tegra_bo_get_pages(struct drm_device *drm, struct tegra_bo *bo)
{
	struct scatterlist *s;
	unsigned int i;

	bo->pages = drm_gem_get_pages(&bo->gem);
	if (IS_ERR(bo->pages))
		return PTR_ERR(bo->pages);

	bo->num_pages = bo->gem.size >> PAGE_SHIFT;

	bo->sgt = drm_prime_pages_to_sg(bo->pages, bo->num_pages);
	if (IS_ERR(bo->sgt))
		goto put_pages;

	/*
	 * Fake up the SG table so that dma_sync_sg_for_device() can be used
	 * to flush the pages associated with it.
	 *
	 * TODO: Replace this by drm_clflash_sg() once it can be implemented
	 * without relying on symbols that are not exported.
	 */
	for_each_sg(bo->sgt->sgl, s, bo->sgt->nents, i)
		sg_dma_address(s) = sg_phys(s);

	dma_sync_sg_for_device(drm->dev, bo->sgt->sgl, bo->sgt->nents,
			       DMA_TO_DEVICE);

	return 0;

put_pages:
	drm_gem_put_pages(&bo->gem, bo->pages, false, false);
	return PTR_ERR(bo->sgt);
}
Ejemplo n.º 4
0
struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj)
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);
	int npages = obj->size >> PAGE_SHIFT;

	if (WARN_ON(!msm_obj->pages))  /* should have already pinned! */
		return NULL;

	return drm_prime_pages_to_sg(msm_obj->pages, npages);
}
Ejemplo n.º 5
0
static struct sg_table *nouveau_gem_map_dma_buf(struct dma_buf_attachment *attachment,
					  enum dma_data_direction dir)
{
	struct nouveau_bo *nvbo = attachment->dmabuf->priv;
	struct drm_device *dev = nvbo->gem->dev;
	int npages = nvbo->bo.num_pages;
	struct sg_table *sg;
	int nents;

	mutex_lock(&dev->struct_mutex);
	sg = drm_prime_pages_to_sg(nvbo->bo.ttm->pages, npages);
	nents = dma_map_sg(attachment->dev, sg->sgl, sg->nents, dir);
	mutex_unlock(&dev->struct_mutex);
	return sg;
}
Ejemplo n.º 6
0
static struct sg_table *radeon_gem_map_dma_buf(struct dma_buf_attachment *attachment,
					       enum dma_data_direction dir)
{
	struct radeon_bo *bo = attachment->dmabuf->priv;
	struct drm_device *dev = bo->rdev->ddev;
	int npages = bo->tbo.num_pages;
	struct sg_table *sg;
	int nents;

	rw_enter_write(&dev->struct_rwlock);
	sg = drm_prime_pages_to_sg(bo->tbo.ttm->pages, npages);
	nents = dma_map_sg(attachment->dev, sg->sgl, sg->nents, dir);
	rw_exit_write(&dev->struct_rwlock);
	return sg;
}
Ejemplo n.º 7
0
static int rockchip_gem_get_pages(struct rockchip_gem_object *rk_obj)
{
	struct drm_device *drm = rk_obj->base.dev;
	int ret, i;
	struct scatterlist *s;

	rk_obj->pages = drm_gem_get_pages(&rk_obj->base);
	if (IS_ERR(rk_obj->pages))
		return PTR_ERR(rk_obj->pages);

	rk_obj->num_pages = rk_obj->base.size >> PAGE_SHIFT;

	rk_obj->sgt = drm_prime_pages_to_sg(rk_obj->pages, rk_obj->num_pages);
	if (IS_ERR(rk_obj->sgt)) {
		ret = PTR_ERR(rk_obj->sgt);
		goto err_put_pages;
	}

	/*
	 * Fake up the SG table so that dma_sync_sg_for_device() can be used
	 * to flush the pages associated with it.
	 *
	 * TODO: Replace this by drm_clflush_sg() once it can be implemented
	 * without relying on symbols that are not exported.
	 */
	for_each_sg(rk_obj->sgt->sgl, s, rk_obj->sgt->nents, i)
		sg_dma_address(s) = sg_phys(s);

	dma_sync_sg_for_device(drm->dev, rk_obj->sgt->sgl, rk_obj->sgt->nents,
			       DMA_TO_DEVICE);

	return 0;

err_put_pages:
	drm_gem_put_pages(&rk_obj->base, rk_obj->pages, false, false);
	return ret;
}
int ttm_pl_ub_create_ioctl(struct ttm_object_file *tfile,
			   struct ttm_bo_device *bdev,
			   struct ttm_lock *lock, void *data)
{
	union ttm_pl_create_ub_arg *arg = data;
	struct ttm_pl_create_ub_req *req = &arg->req;
	struct ttm_pl_rep *rep = &arg->rep;
	struct ttm_buffer_object *bo;
	struct ttm_buffer_object *tmp;
	struct ttm_bo_user_object *user_bo;
	uint32_t flags;
	int ret = 0;
	struct ttm_mem_global *mem_glob = bdev->glob->mem_glob;
	struct ttm_placement placement = default_placement;

#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,3,0))
	size_t acc_size =
		ttm_pl_size(bdev, (req->size + PAGE_SIZE - 1) >> PAGE_SHIFT);
#else
	size_t acc_size = ttm_bo_acc_size(bdev, req->size,
		sizeof(struct ttm_buffer_object));
#endif
	if (req->user_address & ~PAGE_MASK) {
		printk(KERN_ERR "User pointer buffer need page alignment\n");
		return -EFAULT;
	}

	ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false);
	if (unlikely(ret != 0))
		return ret;

	flags = req->placement;
	user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL);
	if (unlikely(user_bo == NULL)) {
		ttm_mem_global_free(mem_glob, acc_size);
		return -ENOMEM;
	}
	ret = ttm_read_lock(lock, true);
	if (unlikely(ret != 0)) {
		ttm_mem_global_free(mem_glob, acc_size);
		kfree(user_bo);
		return ret;
	}
	bo = &user_bo->bo;

	placement.num_placement = 1;
	placement.placement = &flags;

#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,3,0))

/*  For kernel 3.0, use the desired type. */
#define TTM_HACK_WORKAROUND_ttm_bo_type_user ttm_bo_type_user

#else
/*  TTM_HACK_WORKAROUND_ttm_bo_type_user -- Hack for porting,
    as ttm_bo_type_user is no longer implemented.
    This will not result in working code.
    FIXME - to be removed. */

#warning warning: ttm_bo_type_user no longer supported

/*  For kernel 3.3+, use the wrong type, which will compile but not work. */
#define TTM_HACK_WORKAROUND_ttm_bo_type_user ttm_bo_type_kernel

#endif

#if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 3, 0))
		/* Handle frame buffer allocated in user space, Convert
		  user space virtual address into pages list */
		unsigned int page_nr = 0;
		struct vm_area_struct *vma = NULL;
		struct sg_table *sg = NULL;
		unsigned long num_pages = 0;
		struct page **pages = 0;

		num_pages = (req->size + PAGE_SIZE - 1) >> PAGE_SHIFT;
		pages = kzalloc(num_pages * sizeof(struct page *), GFP_KERNEL);
		if (unlikely(pages == NULL)) {
			printk(KERN_ERR "kzalloc pages failed\n");
			return -ENOMEM;
		}

		down_read(&current->mm->mmap_sem);
		vma = find_vma(current->mm, req->user_address);
		if (unlikely(vma == NULL)) {
			up_read(&current->mm->mmap_sem);
			kfree(pages);
			printk(KERN_ERR "find_vma failed\n");
			return -EFAULT;
		}
		unsigned long before_flags = vma->vm_flags;
		if (vma->vm_flags & (VM_IO | VM_PFNMAP))
			vma->vm_flags = vma->vm_flags & ((~VM_IO) & (~VM_PFNMAP));
		page_nr = get_user_pages(current, current->mm,
					 req->user_address,
					 (int)(num_pages), 1, 0, pages,
					 NULL);
		vma->vm_flags = before_flags;
		up_read(&current->mm->mmap_sem);

		/* can be written by caller, not forced */
		if (unlikely(page_nr < num_pages)) {
			kfree(pages);
			pages = 0;
			printk(KERN_ERR "get_user_pages err.\n");
			return -ENOMEM;
		}
		sg = drm_prime_pages_to_sg(pages, num_pages);
		if (unlikely(sg == NULL)) {
			kfree(pages);
			printk(KERN_ERR "drm_prime_pages_to_sg err.\n");
			return -ENOMEM;
		}
		kfree(pages);
#endif

#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 3, 0))
	ret = ttm_bo_init(bdev,
			  bo,
			  req->size,
			  TTM_HACK_WORKAROUND_ttm_bo_type_user,
			  &placement,
			  req->page_alignment,
			  req->user_address,
			  true,
			  NULL,
			  acc_size,
			  NULL,
			  &ttm_bo_user_destroy);
#elif (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0))
	ret = ttm_bo_init(bdev,
			  bo,
			  req->size,
			  ttm_bo_type_sg,
			  &placement,
			  req->page_alignment,
			  req->user_address,
			  true,
			  NULL,
			  acc_size,
			  sg,
			  &ttm_ub_bo_user_destroy);
#else
	ret = ttm_bo_init(bdev,
			  bo,
			  req->size,
			  ttm_bo_type_sg,
			  &placement,
			  req->page_alignment,
			  true,
			  NULL,
			  acc_size,
			  sg,
			  &ttm_ub_bo_user_destroy);
#endif

	/*
	 * Note that the ttm_buffer_object_init function
	 * would've called the destroy function on failure!!
	 */
	ttm_read_unlock(lock);
	if (unlikely(ret != 0))
		goto out;

	tmp = ttm_bo_reference(bo);
	ret = ttm_base_object_init(tfile, &user_bo->base,
				   flags & TTM_PL_FLAG_SHARED,
				   ttm_buffer_type,
				   &ttm_bo_user_release,
				   &ttm_bo_user_ref_release);
	if (unlikely(ret != 0))
		goto out_err;

	ret = ttm_bo_reserve(bo, true, false, false, 0);
	if (unlikely(ret != 0))
		goto out_err;
	ttm_pl_fill_rep(bo, rep);
	ttm_bo_unreserve(bo);
	ttm_bo_unref(&bo);
out:
	return 0;
out_err:
	ttm_bo_unref(&tmp);
	ttm_bo_unref(&bo);
	return ret;
}
Ejemplo n.º 9
0
static int lowlevel_buffer_allocate(struct drm_device *dev,
		unsigned int flags, struct rockchip_drm_gem_buf *buf)
{
	int ret = 0;
	enum dma_attr attr;
	unsigned int nr_pages;

	DRM_DEBUG_KMS("%s\n", __FILE__);

	if (buf->dma_addr) {
		DRM_DEBUG_KMS("already allocated.\n");
		return 0;
	}

	init_dma_attrs(&buf->dma_attrs);

	/*
	 * if ROCKCHIP_BO_CONTIG, fully physically contiguous memory
	 * region will be allocated else physically contiguous
	 * as possible.
	 */
	if (!(flags & ROCKCHIP_BO_NONCONTIG))
		dma_set_attr(DMA_ATTR_FORCE_CONTIGUOUS, &buf->dma_attrs);

	/*
	 * if ROCKCHIP_BO_WC or ROCKCHIP_BO_NONCACHABLE, writecombine mapping
	 * else cachable mapping.
	 */
	if (flags & ROCKCHIP_BO_WC || !(flags & ROCKCHIP_BO_CACHABLE))
		attr = DMA_ATTR_WRITE_COMBINE;
	else
		attr = DMA_ATTR_NON_CONSISTENT;

	dma_set_attr(attr, &buf->dma_attrs);
	dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &buf->dma_attrs);

	nr_pages = buf->size >> PAGE_SHIFT;

	if (!is_drm_iommu_supported(dev)) {
		dma_addr_t start_addr;
		unsigned int i = 0;

		buf->pages = kzalloc(sizeof(struct page) * nr_pages,
					GFP_KERNEL);
		if (!buf->pages) {
			DRM_ERROR("failed to allocate pages.\n");
			return -ENOMEM;
		}

		buf->kvaddr = dma_alloc_attrs(dev->dev, buf->size,
					&buf->dma_addr, GFP_KERNEL,
					&buf->dma_attrs);
		if (!buf->kvaddr) {
			DRM_ERROR("failed to allocate buffer.\n");
			kfree(buf->pages);
			return -ENOMEM;
		}

		start_addr = buf->dma_addr;
		while (i < nr_pages) {
			buf->pages[i] = phys_to_page(start_addr);
			start_addr += PAGE_SIZE;
			i++;
		}
	} else {

		buf->pages = dma_alloc_attrs(dev->dev, buf->size,
					&buf->dma_addr, GFP_KERNEL,
					&buf->dma_attrs);
		if (!buf->pages) {
			DRM_ERROR("failed to allocate buffer.\n");
			return -ENOMEM;
		}
	}

	buf->sgt = drm_prime_pages_to_sg(buf->pages, nr_pages);
	if (!buf->sgt) {
		DRM_ERROR("failed to get sg table.\n");
		ret = -ENOMEM;
		goto err_free_attrs;
	}

	DRM_DEBUG_KMS("dma_addr(0x%lx), size(0x%lx)\n",
			(unsigned long)buf->dma_addr,
			buf->size);

	return ret;

err_free_attrs:
	dma_free_attrs(dev->dev, buf->size, buf->pages,
			(dma_addr_t)buf->dma_addr, &buf->dma_attrs);
	buf->dma_addr = (dma_addr_t)NULL;

	if (!is_drm_iommu_supported(dev))
		kfree(buf->pages);

	return ret;
}
Ejemplo n.º 10
0
static struct sg_table *udl_map_dma_buf(struct dma_buf_attachment *attach,
					enum dma_data_direction dir)
{
	struct udl_drm_dmabuf_attachment *udl_attach = attach->priv;
	struct udl_gem_object *obj = to_udl_bo(attach->dmabuf->priv);
	struct drm_device *dev = obj->base.dev;
	struct scatterlist *rd, *wr;
	struct sg_table *sgt = NULL;
	unsigned int i;
	int page_count;
	int nents, ret;

	DRM_DEBUG_PRIME("[DEV:%s] size:%zd dir=%d\n", dev_name(attach->dev),
			attach->dmabuf->size, dir);

	/* just return current sgt if already requested. */
	if (udl_attach->dir == dir && udl_attach->is_mapped)
		return &udl_attach->sgt;

	if (!obj->pages) {
		ret = udl_gem_get_pages(obj);
		if (ret) {
			DRM_ERROR("failed to map pages.\n");
			return ERR_PTR(ret);
		}
	}

	page_count = obj->base.size / PAGE_SIZE;
	obj->sg = drm_prime_pages_to_sg(obj->pages, page_count);
	if (IS_ERR(obj->sg)) {
		DRM_ERROR("failed to allocate sgt.\n");
		return ERR_CAST(obj->sg);
	}

	sgt = &udl_attach->sgt;

	ret = sg_alloc_table(sgt, obj->sg->orig_nents, GFP_KERNEL);
	if (ret) {
		DRM_ERROR("failed to alloc sgt.\n");
		return ERR_PTR(-ENOMEM);
	}

	mutex_lock(&dev->struct_mutex);

	rd = obj->sg->sgl;
	wr = sgt->sgl;
	for (i = 0; i < sgt->orig_nents; ++i) {
		sg_set_page(wr, sg_page(rd), rd->length, rd->offset);
		rd = sg_next(rd);
		wr = sg_next(wr);
	}

	if (dir != DMA_NONE) {
		nents = dma_map_sg(attach->dev, sgt->sgl, sgt->orig_nents, dir);
		if (!nents) {
			DRM_ERROR("failed to map sgl with iommu.\n");
			sg_free_table(sgt);
			sgt = ERR_PTR(-EIO);
			goto err_unlock;
		}
	}

	udl_attach->is_mapped = true;
	udl_attach->dir = dir;
	attach->priv = udl_attach;

err_unlock:
	mutex_unlock(&dev->struct_mutex);
	return sgt;
}