int amdgpu_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma) { struct amdgpu_bo *bo = gem_to_amdgpu_bo(obj); struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); unsigned asize = amdgpu_bo_size(bo); int ret; if (!vma->vm_file) return -ENODEV; if (adev == NULL) return -ENODEV; /* Check for valid size. */ if (asize < vma->vm_end - vma->vm_start) return -EINVAL; if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) || (bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS)) { return -EPERM; } vma->vm_pgoff += amdgpu_bo_mmap_offset(bo) >> PAGE_SHIFT; /* prime mmap does not need to check access, so allow here */ ret = drm_vma_node_allow(&obj->vma_node, vma->vm_file->private_data); if (ret) return ret; ret = ttm_bo_mmap(vma->vm_file, vma, &adev->mman.bdev); drm_vma_node_revoke(&obj->vma_node, vma->vm_file->private_data); return ret; }
struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev, struct drm_gem_object *gobj, int flags) { struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj); if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm)) return ERR_PTR(-EPERM); return drm_gem_prime_export(dev, gobj, flags); }
struct dma_buf *amdgpu_gem_prime_export(struct drm_device *dev, struct drm_gem_object *gobj, int flags) { struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj); struct dma_buf *buf; if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) || bo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) return ERR_PTR(-EPERM); buf = drm_gem_prime_export(dev, gobj, flags); if (!IS_ERR(buf)) buf->file->f_mapping = dev->anon_inode->i_mapping; return buf; }
static int amdgpu_bo_list_set(struct amdgpu_device *adev, struct drm_file *filp, struct amdgpu_bo_list *list, struct drm_amdgpu_bo_list_entry *info, unsigned num_entries) { struct amdgpu_bo_list_entry *array; struct amdgpu_bo *gds_obj = adev->gds.gds_gfx_bo; struct amdgpu_bo *gws_obj = adev->gds.gws_gfx_bo; struct amdgpu_bo *oa_obj = adev->gds.oa_gfx_bo; unsigned last_entry = 0, first_userptr = num_entries; unsigned i; int r; unsigned long total_size = 0; array = kvmalloc_array(num_entries, sizeof(struct amdgpu_bo_list_entry), GFP_KERNEL); if (!array) return -ENOMEM; memset(array, 0, num_entries * sizeof(struct amdgpu_bo_list_entry)); for (i = 0; i < num_entries; ++i) { struct amdgpu_bo_list_entry *entry; struct drm_gem_object *gobj; struct amdgpu_bo *bo; struct mm_struct *usermm; gobj = drm_gem_object_lookup(filp, info[i].bo_handle); if (!gobj) { r = -ENOENT; goto error_free; } bo = amdgpu_bo_ref(gem_to_amdgpu_bo(gobj)); drm_gem_object_put_unlocked(gobj); usermm = amdgpu_ttm_tt_get_usermm(bo->tbo.ttm); if (usermm) { if (usermm != current->mm) { amdgpu_bo_unref(&bo); r = -EPERM; goto error_free; } entry = &array[--first_userptr]; } else { entry = &array[last_entry++]; } entry->robj = bo; entry->priority = min(info[i].bo_priority, AMDGPU_BO_LIST_MAX_PRIORITY); entry->tv.bo = &entry->robj->tbo; entry->tv.shared = !entry->robj->prime_shared_count; if (entry->robj->preferred_domains == AMDGPU_GEM_DOMAIN_GDS) gds_obj = entry->robj; if (entry->robj->preferred_domains == AMDGPU_GEM_DOMAIN_GWS) gws_obj = entry->robj; if (entry->robj->preferred_domains == AMDGPU_GEM_DOMAIN_OA) oa_obj = entry->robj; total_size += amdgpu_bo_size(entry->robj); trace_amdgpu_bo_list_set(list, entry->robj); } for (i = 0; i < list->num_entries; ++i) amdgpu_bo_unref(&list->array[i].robj); kvfree(list->array); list->gds_obj = gds_obj; list->gws_obj = gws_obj; list->oa_obj = oa_obj; list->first_userptr = first_userptr; list->array = array; list->num_entries = num_entries; trace_amdgpu_cs_bo_status(list->num_entries, total_size); return 0; error_free: while (i--) amdgpu_bo_unref(&array[i].robj); kvfree(array); return r; }