示例#1
0
static int udl_gem_get_pages(struct udl_gem_object *obj, gfp_t gfpmask)
{
	int page_count, i;
	struct page *page;
	struct inode *inode;
	struct address_space *mapping;

	if (obj->pages)
		return 0;

	page_count = obj->base.size / PAGE_SIZE;
	BUG_ON(obj->pages != NULL);
	obj->pages = drm_malloc_ab(page_count, sizeof(struct page *));
	if (obj->pages == NULL)
		return -ENOMEM;

	inode = obj->base.filp->f_path.dentry->d_inode;
	mapping = inode->i_mapping;
	gfpmask |= mapping_gfp_mask(mapping);

	for (i = 0; i < page_count; i++) {
		page = shmem_read_mapping_page_gfp(mapping, i, gfpmask);
		if (IS_ERR(page))
			goto err_pages;
		obj->pages[i] = page;
	}

	return 0;
err_pages:
	while (i--)
		page_cache_release(obj->pages[i]);
	drm_free_large(obj->pages);
	obj->pages = NULL;
	return PTR_ERR(page);
}
示例#2
0
static int udl_prime_create(struct drm_device *dev,
			    size_t size,
			    struct sg_table *sg,
			    struct udl_gem_object **obj_p)
{
	struct udl_gem_object *obj;
	int npages;

	npages = size / PAGE_SIZE;

	*obj_p = NULL;
	obj = udl_gem_alloc_object(dev, npages * PAGE_SIZE);
	if (!obj)
		return -ENOMEM;

	obj->sg = sg;
	obj->pages = drm_malloc_ab(npages, sizeof(struct page *));
	if (obj->pages == NULL) {
		DRM_ERROR("obj pages is NULL %d\n", npages);
		return -ENOMEM;
	}

	drm_prime_sg_to_page_addr_arrays(sg, obj->pages, NULL, npages);

	*obj_p = obj;
	return 0;
}
示例#3
0
static int udl_prime_create(struct drm_device *dev,
			    size_t size,
			    struct sg_table *sg,
			    struct udl_gem_object **obj_p)
{
	struct udl_gem_object *obj;
	int npages;
	int i;
	struct scatterlist *iter;

	npages = size / PAGE_SIZE;

	*obj_p = NULL;
	obj = udl_gem_alloc_object(dev, npages * PAGE_SIZE);
	if (!obj)
		return -ENOMEM;

	obj->sg = sg;
	obj->pages = drm_malloc_ab(npages, sizeof(struct page *));
	if (obj->pages == NULL) {
		DRM_ERROR("obj pages is NULL %d\n", npages);
		return -ENOMEM;
	}

	for_each_sg(sg->sgl, iter, npages, i)
		obj->pages[i] = sg_page(iter);

	*obj_p = obj;
	return 0;
}
static void *i915_gem_dmabuf_vmap(struct dma_buf *dma_buf)
{
	struct drm_i915_gem_object *obj = dma_buf->priv;
	struct drm_device *dev = obj->base.dev;
	struct scatterlist *sg;
	struct page **pages;
	int ret, i;

	ret = i915_mutex_lock_interruptible(dev);
	if (ret)
		return ERR_PTR(ret);

	if (obj->dma_buf_vmapping) {
		obj->vmapping_count++;
		goto out_unlock;
	}

	ret = i915_gem_object_get_pages(obj);
	if (ret)
		goto error;

	ret = -ENOMEM;

	pages = drm_malloc_ab(obj->pages->nents, sizeof(struct page *));
	if (pages == NULL)
		goto error;

	for_each_sg(obj->pages->sgl, sg, obj->pages->nents, i)
		pages[i] = sg_page(sg);

	obj->dma_buf_vmapping = vmap(pages, obj->pages->nents, 0, PAGE_KERNEL);
	drm_free_large(pages);

	if (!obj->dma_buf_vmapping)
		goto error;

	obj->vmapping_count = 1;
	i915_gem_object_pin_pages(obj);
out_unlock:
	mutex_unlock(&dev->struct_mutex);
	return obj->dma_buf_vmapping;

error:
	mutex_unlock(&dev->struct_mutex);
	return ERR_PTR(ret);
}
示例#5
0
文件: msm_gem.c 项目: 513855417/linux
struct drm_gem_object *msm_gem_import(struct drm_device *dev,
		struct dma_buf *dmabuf, struct sg_table *sgt)
{
	struct msm_gem_object *msm_obj;
	struct drm_gem_object *obj;
	uint32_t size;
	int ret, npages;

	/* if we don't have IOMMU, don't bother pretending we can import: */
	if (!iommu_present(&platform_bus_type)) {
		dev_err(dev->dev, "cannot import without IOMMU\n");
		return ERR_PTR(-EINVAL);
	}

	size = PAGE_ALIGN(dmabuf->size);

	ret = msm_gem_new_impl(dev, size, MSM_BO_WC, dmabuf->resv, &obj);
	if (ret)
		goto fail;

	drm_gem_private_object_init(dev, obj, size);

	npages = size / PAGE_SIZE;

	msm_obj = to_msm_bo(obj);
	msm_obj->sgt = sgt;
	msm_obj->pages = drm_malloc_ab(npages, sizeof(struct page *));
	if (!msm_obj->pages) {
		ret = -ENOMEM;
		goto fail;
	}

	ret = drm_prime_sg_to_page_addr_arrays(sgt, msm_obj->pages, NULL, npages);
	if (ret)
		goto fail;

	return obj;

fail:
	if (obj)
		drm_gem_object_unreference_unlocked(obj);

	return ERR_PTR(ret);
}
示例#6
0
/**
 * radeon_vm_get_bos - add the vm BOs to a validation list
 *
 * @vm: vm providing the BOs
 * @head: head of validation list
 *
 * Add the page directory to the list of BOs to
 * validate for command submission (cayman+).
 */
struct radeon_bo_list *radeon_vm_get_bos(struct radeon_device *rdev,
					  struct radeon_vm *vm,
					  struct list_head *head)
{
	struct radeon_bo_list *list;
	unsigned i, idx;

	list = drm_malloc_ab(vm->max_pde_used + 2,
			     sizeof(struct radeon_bo_list));
	if (!list)
		return NULL;

	/* add the vm page table to the list */
	list[0].robj = vm->page_directory;
	list[0].prefered_domains = RADEON_GEM_DOMAIN_VRAM;
	list[0].allowed_domains = RADEON_GEM_DOMAIN_VRAM;
	list[0].tv.bo = &vm->page_directory->tbo;
	list[0].tv.shared = true;
	list[0].tiling_flags = 0;
	list_add(&list[0].tv.head, head);

	for (i = 0, idx = 1; i <= vm->max_pde_used; i++) {
		if (!vm->page_tables[i].bo)
			continue;

		list[idx].robj = vm->page_tables[i].bo;
		list[idx].prefered_domains = RADEON_GEM_DOMAIN_VRAM;
		list[idx].allowed_domains = RADEON_GEM_DOMAIN_VRAM;
		list[idx].tv.bo = &list[idx].robj->tbo;
		list[idx].tv.shared = true;
		list[idx].tiling_flags = 0;
		list_add(&list[idx++].tv.head, head);
	}

	return list;
}
示例#7
0
static int amdgpu_bo_list_set(struct amdgpu_device *adev,
				     struct drm_file *filp,
				     struct amdgpu_bo_list *list,
				     struct drm_amdgpu_bo_list_entry *info,
				     unsigned num_entries)
{
	struct amdgpu_bo_list_entry *array;
	struct amdgpu_bo *gds_obj = adev->gds.gds_gfx_bo;
	struct amdgpu_bo *gws_obj = adev->gds.gws_gfx_bo;
	struct amdgpu_bo *oa_obj = adev->gds.oa_gfx_bo;

	unsigned last_entry = 0, first_userptr = num_entries;
	unsigned i;
	int r;

	array = drm_malloc_ab(num_entries, sizeof(struct amdgpu_bo_list_entry));
	if (!array)
		return -ENOMEM;
	memset(array, 0, num_entries * sizeof(struct amdgpu_bo_list_entry));

	for (i = 0; i < num_entries; ++i) {
		struct amdgpu_bo_list_entry *entry;
		struct drm_gem_object *gobj;
		struct amdgpu_bo *bo;
		struct mm_struct *usermm;

		gobj = drm_gem_object_lookup(adev->ddev, filp, info[i].bo_handle);
		if (!gobj) {
			r = -ENOENT;
			goto error_free;
		}

		bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
		drm_gem_object_unreference_unlocked(gobj);

		usermm = amdgpu_ttm_tt_get_usermm(bo->tbo.ttm);
		if (usermm) {
			if (usermm != current->mm) {
				amdgpu_bo_unref(&bo);
				r = -EPERM;
				goto error_free;
			}
			entry = &array[--first_userptr];
		} else {
			entry = &array[last_entry++];
		}

		entry->robj = bo;
		entry->priority = min(info[i].bo_priority,
				      AMDGPU_BO_LIST_MAX_PRIORITY);
		entry->tv.bo = &entry->robj->tbo;
		entry->tv.shared = true;

		if (entry->robj->prefered_domains == AMDGPU_GEM_DOMAIN_GDS)
			gds_obj = entry->robj;
		if (entry->robj->prefered_domains == AMDGPU_GEM_DOMAIN_GWS)
			gws_obj = entry->robj;
		if (entry->robj->prefered_domains == AMDGPU_GEM_DOMAIN_OA)
			oa_obj = entry->robj;

		trace_amdgpu_bo_list_set(list, entry->robj);
	}

	for (i = 0; i < list->num_entries; ++i)
		amdgpu_bo_unref(&list->array[i].robj);

	drm_free_large(list->array);

	list->gds_obj = gds_obj;
	list->gws_obj = gws_obj;
	list->oa_obj = oa_obj;
	list->first_userptr = first_userptr;
	list->array = array;
	list->num_entries = num_entries;

	return 0;

error_free:
	while (i--)
		amdgpu_bo_unref(&array[i].robj);
	drm_free_large(array);
	return r;
}
示例#8
0
int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data,
				struct drm_file *filp)
{
	const uint32_t info_size = sizeof(struct drm_amdgpu_bo_list_entry);

	struct amdgpu_device *adev = dev->dev_private;
	struct amdgpu_fpriv *fpriv = filp->driver_priv;
	union drm_amdgpu_bo_list *args = data;
	uint32_t handle = args->in.list_handle;
	const void __user *uptr = (const void*)(long)args->in.bo_info_ptr;

	struct drm_amdgpu_bo_list_entry *info;
	struct amdgpu_bo_list *list;

	int r;

	info = drm_malloc_ab(args->in.bo_number,
			     sizeof(struct drm_amdgpu_bo_list_entry));
	if (!info)
		return -ENOMEM;

	/* copy the handle array from userspace to a kernel buffer */
	r = -EFAULT;
	if (likely(info_size == args->in.bo_info_size)) {
		unsigned long bytes = args->in.bo_number *
			args->in.bo_info_size;

		if (copy_from_user(info, uptr, bytes))
			goto error_free;

	} else {
		unsigned long bytes = min(args->in.bo_info_size, info_size);
		unsigned i;

		memset(info, 0, args->in.bo_number * info_size);
		for (i = 0; i < args->in.bo_number; ++i) {
			if (copy_from_user(&info[i], uptr, bytes))
				goto error_free;
			
			uptr += args->in.bo_info_size;
		}
	}

	switch (args->in.operation) {
	case AMDGPU_BO_LIST_OP_CREATE:
		r = amdgpu_bo_list_create(fpriv, &list, &handle);
		if (r) 
			goto error_free;

		r = amdgpu_bo_list_set(adev, filp, list, info,
					      args->in.bo_number);
		amdgpu_bo_list_put(list);
		if (r)
			goto error_free;

		break;
		
	case AMDGPU_BO_LIST_OP_DESTROY:
		amdgpu_bo_list_destroy(fpriv, handle);
		handle = 0;
		break;

	case AMDGPU_BO_LIST_OP_UPDATE:
		r = -ENOENT;
		list = amdgpu_bo_list_get(fpriv, handle);
		if (!list)
			goto error_free;

		r = amdgpu_bo_list_set(adev, filp, list, info,
					      args->in.bo_number);
		amdgpu_bo_list_put(list);
		if (r)
			goto error_free;

		break;

	default:
		r = -EINVAL;
		goto error_free;
	}

	memset(args, 0, sizeof(*args));
	args->out.list_handle = handle;
	drm_free_large(info);

	return 0;

error_free:
	drm_free_large(info);
	return r;
}
示例#9
0
/* XXX: note that this is called from the legacy UMS CS ioctl as well */
int radeon_cs_parser_init(struct radeon_cs_parser *p, void *data)
{
	struct drm_radeon_cs *cs = data;
	uint64_t *chunk_array_ptr;
	unsigned size, i;
	u32 ring = RADEON_CS_RING_GFX;
	s32 priority = 0;

	if (!cs->num_chunks) {
		return 0;
	}
	/* get chunks */
	INIT_LIST_HEAD(&p->validated);
	p->idx = 0;
	p->ib.sa_bo = NULL;
	p->ib.semaphore = NULL;
	p->const_ib.sa_bo = NULL;
	p->const_ib.semaphore = NULL;
	p->chunk_ib_idx = -1;
	p->chunk_relocs_idx = -1;
	p->chunk_flags_idx = -1;
	p->chunk_const_ib_idx = -1;
	p->chunks_array = kcalloc(cs->num_chunks, sizeof(uint64_t), GFP_KERNEL);
	if (p->chunks_array == NULL) {
		return -ENOMEM;
	}
	chunk_array_ptr = (uint64_t *)(unsigned long)(cs->chunks);
	if (copy_from_user(p->chunks_array, chunk_array_ptr,
			       sizeof(uint64_t)*cs->num_chunks)) {
		return -EFAULT;
	}
	p->cs_flags = 0;
	p->nchunks = cs->num_chunks;
	p->chunks = kcalloc(p->nchunks, sizeof(struct radeon_cs_chunk), GFP_KERNEL);
	if (p->chunks == NULL) {
		return -ENOMEM;
	}
	for (i = 0; i < p->nchunks; i++) {
		struct drm_radeon_cs_chunk __user **chunk_ptr = NULL;
		struct drm_radeon_cs_chunk user_chunk;
		uint32_t __user *cdata;

		chunk_ptr = (void __user*)(unsigned long)p->chunks_array[i];
		if (copy_from_user(&user_chunk, chunk_ptr,
				       sizeof(struct drm_radeon_cs_chunk))) {
			return -EFAULT;
		}
		p->chunks[i].length_dw = user_chunk.length_dw;
		p->chunks[i].chunk_id = user_chunk.chunk_id;
		if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_RELOCS) {
			p->chunk_relocs_idx = i;
		}
		if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_IB) {
			p->chunk_ib_idx = i;
			/* zero length IB isn't useful */
			if (p->chunks[i].length_dw == 0)
				return -EINVAL;
		}
		if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_CONST_IB) {
			p->chunk_const_ib_idx = i;
			/* zero length CONST IB isn't useful */
			if (p->chunks[i].length_dw == 0)
				return -EINVAL;
		}
		if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS) {
			p->chunk_flags_idx = i;
			/* zero length flags aren't useful */
			if (p->chunks[i].length_dw == 0)
				return -EINVAL;
		}

		size = p->chunks[i].length_dw;
		cdata = (void __user *)(unsigned long)user_chunk.chunk_data;
		p->chunks[i].user_ptr = cdata;
		if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_CONST_IB)
			continue;

		if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_IB) {
			if (!p->rdev || !(p->rdev->flags & RADEON_IS_AGP))
				continue;
		}

		p->chunks[i].kdata = drm_malloc_ab(size, sizeof(uint32_t));
		size *= sizeof(uint32_t);
		if (p->chunks[i].kdata == NULL) {
			return -ENOMEM;
		}
		if (copy_from_user(p->chunks[i].kdata, cdata, size)) {
			return -EFAULT;
		}
		if (p->chunks[i].chunk_id == RADEON_CHUNK_ID_FLAGS) {
			p->cs_flags = p->chunks[i].kdata[0];
			if (p->chunks[i].length_dw > 1)
				ring = p->chunks[i].kdata[1];
			if (p->chunks[i].length_dw > 2)
				priority = (s32)p->chunks[i].kdata[2];
		}
	}

	/* these are KMS only */
	if (p->rdev) {
		if ((p->cs_flags & RADEON_CS_USE_VM) &&
		    !p->rdev->vm_manager.enabled) {
			DRM_ERROR("VM not active on asic!\n");
			return -EINVAL;
		}

		if (radeon_cs_get_ring(p, ring, priority))
			return -EINVAL;

		/* we only support VM on some SI+ rings */
		if ((p->cs_flags & RADEON_CS_USE_VM) == 0) {
			if (p->rdev->asic->ring[p->ring]->cs_parse == NULL) {
				DRM_ERROR("Ring %d requires VM!\n", p->ring);
				return -EINVAL;
			}
		} else {
			if (p->rdev->asic->ring[p->ring]->ib_parse == NULL) {
				DRM_ERROR("VM not supported on ring %d!\n",
					  p->ring);
				return -EINVAL;
			}
		}
	}

	return 0;
}
示例#10
0
static int amdgpu_bo_list_set(struct amdgpu_device *adev,
				     struct drm_file *filp,
				     struct amdgpu_bo_list *list,
				     struct drm_amdgpu_bo_list_entry *info,
				     unsigned num_entries)
{
	struct amdgpu_bo_list_entry *array;
	struct amdgpu_bo *gds_obj = adev->gds.gds_gfx_bo;
	struct amdgpu_bo *gws_obj = adev->gds.gws_gfx_bo;
	struct amdgpu_bo *oa_obj = adev->gds.oa_gfx_bo;

	bool has_userptr = false;
	unsigned i;

	array = drm_malloc_ab(num_entries, sizeof(struct amdgpu_bo_list_entry));
	if (!array)
		return -ENOMEM;
	memset(array, 0, num_entries * sizeof(struct amdgpu_bo_list_entry));

	for (i = 0; i < num_entries; ++i) {
		struct amdgpu_bo_list_entry *entry = &array[i];
		struct drm_gem_object *gobj;

		gobj = drm_gem_object_lookup(adev->ddev, filp, info[i].bo_handle);
		if (!gobj)
			goto error_free;

		entry->robj = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj));
		drm_gem_object_unreference_unlocked(gobj);
		entry->priority = info[i].bo_priority;
		entry->prefered_domains = entry->robj->initial_domain;
		entry->allowed_domains = entry->prefered_domains;
		if (entry->allowed_domains == AMDGPU_GEM_DOMAIN_VRAM)
			entry->allowed_domains |= AMDGPU_GEM_DOMAIN_GTT;
		if (amdgpu_ttm_tt_has_userptr(entry->robj->tbo.ttm)) {
			has_userptr = true;
			entry->prefered_domains = AMDGPU_GEM_DOMAIN_GTT;
			entry->allowed_domains = AMDGPU_GEM_DOMAIN_GTT;
		}
		entry->tv.bo = &entry->robj->tbo;
		entry->tv.shared = !entry->robj->prime_shared_count;

		if (entry->prefered_domains == AMDGPU_GEM_DOMAIN_GDS)
			gds_obj = entry->robj;
		if (entry->prefered_domains == AMDGPU_GEM_DOMAIN_GWS)
			gws_obj = entry->robj;
		if (entry->prefered_domains == AMDGPU_GEM_DOMAIN_OA)
			oa_obj = entry->robj;

		trace_amdgpu_bo_list_set(list, entry->robj);
	}

	for (i = 0; i < list->num_entries; ++i)
		amdgpu_bo_unref(&list->array[i].robj);

	drm_free_large(list->array);

	list->gds_obj = gds_obj;
	list->gws_obj = gws_obj;
	list->oa_obj = oa_obj;
	list->has_userptr = has_userptr;
	list->array = array;
	list->num_entries = num_entries;

	return 0;

error_free:
	drm_free_large(array);
	return -ENOENT;
}