Example #1
0
static int udl_prime_create(struct drm_device *dev,
			    size_t size,
			    struct sg_table *sg,
			    struct udl_gem_object **obj_p)
{
	struct udl_gem_object *obj;
	int npages;

	npages = size / PAGE_SIZE;

	*obj_p = NULL;
	obj = udl_gem_alloc_object(dev, npages * PAGE_SIZE);
	if (!obj)
		return -ENOMEM;

	obj->sg = sg;
	obj->pages = drm_malloc_ab(npages, sizeof(struct page *));
	if (obj->pages == NULL) {
		DRM_ERROR("obj pages is NULL %d\n", npages);
		return -ENOMEM;
	}

	drm_prime_sg_to_page_addr_arrays(sg, obj->pages, NULL, npages);

	*obj_p = obj;
	return 0;
}
Example #2
0
static int radeon_ttm_tt_populate(struct ttm_tt *ttm)
{
	struct radeon_device *rdev;
	struct radeon_ttm_tt *gtt = (void *)ttm;
	unsigned i;
	int r;
	bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);

	if (ttm->state != tt_unpopulated)
		return 0;

	if (slave && ttm->sg) {
		drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
						 gtt->ttm.dma_address, ttm->num_pages);
		ttm->state = tt_unbound;
		return 0;
	}

	rdev = radeon_get_rdev(ttm->bdev);
#if __OS_HAS_AGP
	if (rdev->flags & RADEON_IS_AGP) {
		return ttm_agp_tt_populate(ttm);
	}
#endif

#if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,3,0))
#ifdef CONFIG_SWIOTLB
	if (swiotlb_nr_tbl()) {
		return ttm_dma_populate(&gtt->ttm, rdev->dev);
	}
#endif
#endif

	r = ttm_pool_populate(ttm);
	if (r) {
		return r;
	}

	for (i = 0; i < ttm->num_pages; i++) {
		gtt->ttm.dma_address[i] = pci_map_page(rdev->pdev, ttm->pages[i],
						       0, PAGE_SIZE,
						       PCI_DMA_BIDIRECTIONAL);
		if (pci_dma_mapping_error(rdev->pdev, gtt->ttm.dma_address[i])) {
			while (--i) {
				pci_unmap_page(rdev->pdev, gtt->ttm.dma_address[i],
					       PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
				gtt->ttm.dma_address[i] = 0;
			}
			ttm_pool_unpopulate(ttm);
			return -EFAULT;
		}
	}
	return 0;
}
/* This is used for sg_table which is derived from user-pointer */
static void ttm_tt_free_user_pages(struct ttm_buffer_object *bo)
{
	struct page *page;
	struct page **pages = NULL;
	int i, ret;
/*
	struct page **pages_to_wb;

	pages_to_wb = kmalloc(ttm->num_pages * sizeof(struct page *),
			GFP_KERNEL);

	if (pages_to_wb && ttm->caching_state != tt_cached) {
		int num_pages_wb = 0;

		for (i = 0; i < ttm->num_pages; ++i) {
			page = ttm->pages[i];
			if (page == NULL)
				continue;
			pages_to_wb[num_pages_wb++] = page;
		}

		if (set_pages_array_wb(pages_to_wb, num_pages_wb))
			printk(KERN_ERR TTM_PFX "Failed to set pages to wb\n");

	} else if (NULL == pages_to_wb) {
		printk(KERN_ERR TTM_PFX
		       "Failed to allocate memory for set wb operation.\n");
	}

*/
	pages = kzalloc(bo->num_pages * sizeof(struct page *), GFP_KERNEL);
	if (unlikely(pages == NULL)) {
		printk(KERN_ERR "TTM bo free: kzalloc failed\n");
		return ;
	}

	ret = drm_prime_sg_to_page_addr_arrays(bo->sg, pages,
						 NULL, bo->num_pages);
	if (ret) {
		printk(KERN_ERR "sg to pages: kzalloc failed\n");
		return ;
	}

	for (i = 0; i < bo->num_pages; ++i) {
		page = pages[i];
		if (page == NULL)
			continue;

		put_page(page);
	}
	/* kfree(pages_to_wb); */
	kfree(pages);
}
Example #4
0
struct drm_gem_object *msm_gem_import(struct drm_device *dev,
		struct dma_buf *dmabuf, struct sg_table *sgt)
{
	struct msm_gem_object *msm_obj;
	struct drm_gem_object *obj;
	uint32_t size;
	int ret, npages;

	/* if we don't have IOMMU, don't bother pretending we can import: */
	if (!iommu_present(&platform_bus_type)) {
		dev_err(dev->dev, "cannot import without IOMMU\n");
		return ERR_PTR(-EINVAL);
	}

	size = PAGE_ALIGN(dmabuf->size);

	ret = msm_gem_new_impl(dev, size, MSM_BO_WC, dmabuf->resv, &obj);
	if (ret)
		goto fail;

	drm_gem_private_object_init(dev, obj, size);

	npages = size / PAGE_SIZE;

	msm_obj = to_msm_bo(obj);
	msm_obj->sgt = sgt;
	msm_obj->pages = drm_malloc_ab(npages, sizeof(struct page *));
	if (!msm_obj->pages) {
		ret = -ENOMEM;
		goto fail;
	}

	ret = drm_prime_sg_to_page_addr_arrays(sgt, msm_obj->pages, NULL, npages);
	if (ret)
		goto fail;

	return obj;

fail:
	if (obj)
		drm_gem_object_unreference_unlocked(obj);

	return ERR_PTR(ret);
}
Example #5
0
static struct drm_gem_object *vgem_prime_import_sg_table(struct drm_device *dev,
			struct dma_buf_attachment *attach, struct sg_table *sg)
{
	struct drm_vgem_gem_object *obj;
	int npages;

	obj = __vgem_gem_create(dev, attach->dmabuf->size);
	if (IS_ERR(obj))
		return ERR_CAST(obj);

	npages = PAGE_ALIGN(attach->dmabuf->size) / PAGE_SIZE;

	obj->table = sg;
	obj->pages = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL);
	if (!obj->pages) {
		__vgem_gem_destroy(obj);
		return ERR_PTR(-ENOMEM);
	}

	obj->pages_pin_count++; /* perma-pinned */
	drm_prime_sg_to_page_addr_arrays(obj->table, obj->pages, NULL,
					npages);
	return &obj->base;
}
Example #6
0
static int radeon_ttm_tt_populate(struct ttm_tt *ttm)
{
	struct radeon_device *rdev;
	struct radeon_ttm_tt *gtt = (void *)ttm;
	unsigned i;
	int r, seg;
	bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);

	if (ttm->state != tt_unpopulated)
		return 0;

	if (slave && ttm->sg) {
#ifdef notyet
		drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
						 gtt->ttm.dma_address, ttm->num_pages);
#endif
		ttm->state = tt_unbound;
		return 0;
	}

	rdev = radeon_get_rdev(ttm->bdev);
#if __OS_HAS_AGP
	if (rdev->flags & RADEON_IS_AGP) {
		return ttm_agp_tt_populate(ttm);
	}
#endif

#ifdef CONFIG_SWIOTLB
	if (swiotlb_nr_tbl()) {
		return ttm_dma_populate(&gtt->ttm, rdev->dev);
	}
#endif

	r = ttm_pool_populate(ttm);
	if (r) {
		return r;
	}

	for (i = 0; i < ttm->num_pages; i++) {
		gtt->segs[i].ds_addr = VM_PAGE_TO_PHYS(ttm->pages[i]);
		gtt->segs[i].ds_len = PAGE_SIZE;
	}

	if (bus_dmamap_load_raw(rdev->dmat, gtt->map, gtt->segs,
				ttm->num_pages,
				ttm->num_pages * PAGE_SIZE, 0)) {
		ttm_pool_unpopulate(ttm);
		return -EFAULT;
	}

	for (seg = 0, i = 0; seg < gtt->map->dm_nsegs; seg++) {
		bus_addr_t addr = gtt->map->dm_segs[seg].ds_addr;
		bus_size_t len = gtt->map->dm_segs[seg].ds_len;

		while (len > 0) {
			gtt->ttm.dma_address[i++] = addr;
			addr += PAGE_SIZE;
			len -= PAGE_SIZE;
		}
	}

	return 0;
}
Example #7
0
static int radeon_ttm_tt_populate(struct ttm_tt *ttm)
{
	struct radeon_device *rdev;
	struct radeon_ttm_tt *gtt = (void *)ttm;
	unsigned i;
	int r;
#ifdef DUMBBELL_WIP
	bool slave = !!(ttm->page_flags & TTM_PAGE_FLAG_SG);
#endif /* DUMBBELL_WIP */

	if (ttm->state != tt_unpopulated)
		return 0;

#ifdef DUMBBELL_WIP
	/*
	 * Maybe unneeded on FreeBSD.
	 *   -- dumbbell@
	 */
	if (slave && ttm->sg) {
		drm_prime_sg_to_page_addr_arrays(ttm->sg, ttm->pages,
						 gtt->ttm.dma_address, ttm->num_pages);
		ttm->state = tt_unbound;
		return 0;
	}
#endif /* DUMBBELL_WIP */

	rdev = radeon_get_rdev(ttm->bdev);
#if __OS_HAS_AGP
#ifdef DUMBBELL_WIP
	if (rdev->flags & RADEON_IS_AGP) {
		return ttm_agp_tt_populate(ttm);
	}
#endif /* DUMBBELL_WIP */
#endif

#ifdef CONFIG_SWIOTLB
	if (swiotlb_nr_tbl()) {
		return ttm_dma_populate(&gtt->ttm, rdev->dev);
	}
#endif

	r = ttm_pool_populate(ttm);
	if (r) {
		return r;
	}

	for (i = 0; i < ttm->num_pages; i++) {
		gtt->ttm.dma_address[i] = VM_PAGE_TO_PHYS(ttm->pages[i]);
#ifdef DUMBBELL_WIP
		gtt->ttm.dma_address[i] = pci_map_page(rdev->pdev, ttm->pages[i],
						       0, PAGE_SIZE,
						       PCI_DMA_BIDIRECTIONAL);
		if (pci_dma_mapping_error(rdev->pdev, gtt->ttm.dma_address[i])) {
			while (--i) {
				pci_unmap_page(rdev->pdev, gtt->ttm.dma_address[i],
					       PAGE_SIZE, PCI_DMA_BIDIRECTIONAL);
				gtt->ttm.dma_address[i] = 0;
			}
			ttm_pool_unpopulate(ttm);
			return -EFAULT;
		}
#endif /* DUMBBELL_WIP */
	}
	return 0;
}