/** * vmw_bo_to_validate_list - add a bo to a validate list * * @sw_context: The software context used for this command submission batch. * @bo: The buffer object to add. * @fence_flags: Fence flags to be or'ed with any other fence flags for * this buffer on this submission batch. * @p_val_node: If non-NULL Will be updated with the validate node number * on return. * * Returns -EINVAL if the limit of number of buffer objects per command * submission is reached. */ static int vmw_bo_to_validate_list(struct vmw_sw_context *sw_context, struct ttm_buffer_object *bo, uint32_t fence_flags, uint32_t *p_val_node) { uint32_t val_node; struct ttm_validate_buffer *val_buf; val_node = vmw_dmabuf_validate_node(bo, sw_context->cur_val_buf); if (unlikely(val_node >= VMWGFX_MAX_VALIDATIONS)) { DRM_ERROR("Max number of DMA buffers per submission" " exceeded.\n"); return -EINVAL; } val_buf = &sw_context->val_bufs[val_node]; if (unlikely(val_node == sw_context->cur_val_buf)) { val_buf->new_sync_obj_arg = NULL; val_buf->bo = ttm_bo_reference(bo); list_add_tail(&val_buf->head, &sw_context->validate_nodes); ++sw_context->cur_val_buf; } val_buf->new_sync_obj_arg = (void *) ((unsigned long) val_buf->new_sync_obj_arg | fence_flags); sw_context->fence_flags |= fence_flags; if (p_val_node) *p_val_node = val_node; return 0; }
int ttm_fbdev_mmap(struct vm_area_struct *vma, struct ttm_buffer_object *bo) { if (vma->vm_pgoff != 0) return -EACCES; vma->vm_ops = &ttm_bo_vm_ops; vma->vm_private_data = ttm_bo_reference(bo); vma->vm_flags |= VM_IO | VM_MIXEDMAP | VM_DONTEXPAND; return 0; }
struct ttm_buffer_object *ttm_buffer_object_lookup(struct ttm_object_file *tfile, uint32_t handle) { struct ttm_bo_user_object *user_bo; struct ttm_base_object *base; user_bo = ttm_bo_user_lookup(tfile, handle); if (unlikely(user_bo == NULL)) return NULL; (void)ttm_bo_reference(&user_bo->bo); base = &user_bo->base; ttm_base_object_unref(&base); return &user_bo->bo; }
int psb_validate_kernel_buffer(struct psb_context *context, struct ttm_buffer_object *bo, uint32_t fence_class, uint64_t set_flags, uint64_t clr_flags) { struct psb_validate_buffer *item; uint32_t cur_fence_type; int ret; if (unlikely(context->used_buffers >= PSB_NUM_VALIDATE_BUFFERS)) { DRM_ERROR("Out of free validation buffer entries for " "kernel buffer validation.\n"); return -ENOMEM; } item = &context->buffers[context->used_buffers]; item->user_val_arg = NULL; item->base.reserved = 0; ret = ttm_bo_reserve(bo, 1, 0, 1, context->val_seq); if (unlikely(ret != 0)) return ret; ret = psb_placement_fence_type(bo, set_flags, clr_flags, fence_class, &cur_fence_type); if (unlikely(ret != 0)) { ttm_bo_unreserve(bo); return ret; } item->base.bo = ttm_bo_reference(bo); item->base.new_sync_obj_arg = (void *) (unsigned long) cur_fence_type; item->base.reserved = 1; /* Internal locking ??? FIXMEAC */ list_add_tail(&item->base.head, &context->kern_validate_list); context->used_buffers++; /* ret = ttm_bo_validate(bo, 1, 0, 0); if (unlikely(ret != 0)) goto out_unlock; */ item->offset = bo->offset; item->flags = bo->mem.placement; context->fence_types |= cur_fence_type; return ret; }
int ttm_pl_ub_create_ioctl(struct ttm_object_file *tfile, struct ttm_bo_device *bdev, struct ttm_lock *lock, void *data) { union ttm_pl_create_ub_arg *arg = data; struct ttm_pl_create_ub_req *req = &arg->req; struct ttm_pl_rep *rep = &arg->rep; struct ttm_buffer_object *bo; struct ttm_buffer_object *tmp; struct ttm_bo_user_object *user_bo; uint32_t flags; int ret = 0; struct ttm_mem_global *mem_glob = bdev->glob->mem_glob; struct ttm_placement placement = default_placement; #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,3,0)) size_t acc_size = ttm_pl_size(bdev, (req->size + PAGE_SIZE - 1) >> PAGE_SHIFT); #else size_t acc_size = ttm_bo_acc_size(bdev, req->size, sizeof(struct ttm_buffer_object)); #endif if (req->user_address & ~PAGE_MASK) { printk(KERN_ERR "User pointer buffer need page alignment\n"); return -EFAULT; } ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false); if (unlikely(ret != 0)) return ret; flags = req->placement; user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL); if (unlikely(user_bo == NULL)) { ttm_mem_global_free(mem_glob, acc_size); return -ENOMEM; } ret = ttm_read_lock(lock, true); if (unlikely(ret != 0)) { ttm_mem_global_free(mem_glob, acc_size); kfree(user_bo); return ret; } bo = &user_bo->bo; placement.num_placement = 1; placement.placement = &flags; #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,3,0)) /* For kernel 3.0, use the desired type. */ #define TTM_HACK_WORKAROUND_ttm_bo_type_user ttm_bo_type_user #else /* TTM_HACK_WORKAROUND_ttm_bo_type_user -- Hack for porting, as ttm_bo_type_user is no longer implemented. This will not result in working code. FIXME - to be removed. */ #warning warning: ttm_bo_type_user no longer supported /* For kernel 3.3+, use the wrong type, which will compile but not work. */ #define TTM_HACK_WORKAROUND_ttm_bo_type_user ttm_bo_type_kernel #endif #if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 3, 0)) /* Handle frame buffer allocated in user space, Convert user space virtual address into pages list */ unsigned int page_nr = 0; struct vm_area_struct *vma = NULL; struct sg_table *sg = NULL; unsigned long num_pages = 0; struct page **pages = 0; num_pages = (req->size + PAGE_SIZE - 1) >> PAGE_SHIFT; pages = kzalloc(num_pages * sizeof(struct page *), GFP_KERNEL); if (unlikely(pages == NULL)) { printk(KERN_ERR "kzalloc pages failed\n"); return -ENOMEM; } down_read(¤t->mm->mmap_sem); vma = find_vma(current->mm, req->user_address); if (unlikely(vma == NULL)) { up_read(¤t->mm->mmap_sem); kfree(pages); printk(KERN_ERR "find_vma failed\n"); return -EFAULT; } unsigned long before_flags = vma->vm_flags; if (vma->vm_flags & (VM_IO | VM_PFNMAP)) vma->vm_flags = vma->vm_flags & ((~VM_IO) & (~VM_PFNMAP)); page_nr = get_user_pages(current, current->mm, req->user_address, (int)(num_pages), 1, 0, pages, NULL); vma->vm_flags = before_flags; up_read(¤t->mm->mmap_sem); /* can be written by caller, not forced */ if (unlikely(page_nr < num_pages)) { kfree(pages); pages = 0; printk(KERN_ERR "get_user_pages err.\n"); return -ENOMEM; } sg = drm_prime_pages_to_sg(pages, num_pages); if (unlikely(sg == NULL)) { kfree(pages); printk(KERN_ERR "drm_prime_pages_to_sg err.\n"); return -ENOMEM; } kfree(pages); #endif #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 3, 0)) ret = ttm_bo_init(bdev, bo, req->size, TTM_HACK_WORKAROUND_ttm_bo_type_user, &placement, req->page_alignment, req->user_address, true, NULL, acc_size, NULL, &ttm_bo_user_destroy); #elif (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0)) ret = ttm_bo_init(bdev, bo, req->size, ttm_bo_type_sg, &placement, req->page_alignment, req->user_address, true, NULL, acc_size, sg, &ttm_ub_bo_user_destroy); #else ret = ttm_bo_init(bdev, bo, req->size, ttm_bo_type_sg, &placement, req->page_alignment, true, NULL, acc_size, sg, &ttm_ub_bo_user_destroy); #endif /* * Note that the ttm_buffer_object_init function * would've called the destroy function on failure!! */ ttm_read_unlock(lock); if (unlikely(ret != 0)) goto out; tmp = ttm_bo_reference(bo); ret = ttm_base_object_init(tfile, &user_bo->base, flags & TTM_PL_FLAG_SHARED, ttm_buffer_type, &ttm_bo_user_release, &ttm_bo_user_ref_release); if (unlikely(ret != 0)) goto out_err; ret = ttm_bo_reserve(bo, true, false, false, 0); if (unlikely(ret != 0)) goto out_err; ttm_pl_fill_rep(bo, rep); ttm_bo_unreserve(bo); ttm_bo_unref(&bo); out: return 0; out_err: ttm_bo_unref(&tmp); ttm_bo_unref(&bo); return ret; }
int ttm_pl_create_ioctl(struct ttm_object_file *tfile, struct ttm_bo_device *bdev, struct ttm_lock *lock, void *data) { union ttm_pl_create_arg *arg = data; struct ttm_pl_create_req *req = &arg->req; struct ttm_pl_rep *rep = &arg->rep; struct ttm_buffer_object *bo; struct ttm_buffer_object *tmp; struct ttm_bo_user_object *user_bo; uint32_t flags; int ret = 0; struct ttm_mem_global *mem_glob = bdev->glob->mem_glob; struct ttm_placement placement = default_placement; #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,3,0)) size_t acc_size = ttm_pl_size(bdev, (req->size + PAGE_SIZE - 1) >> PAGE_SHIFT); #else size_t acc_size = ttm_bo_acc_size(bdev, req->size, sizeof(struct ttm_buffer_object)); #endif ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false); if (unlikely(ret != 0)) return ret; flags = req->placement; user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL); if (unlikely(user_bo == NULL)) { ttm_mem_global_free(mem_glob, acc_size); return -ENOMEM; } bo = &user_bo->bo; ret = ttm_read_lock(lock, true); if (unlikely(ret != 0)) { ttm_mem_global_free(mem_glob, acc_size); kfree(user_bo); return ret; } placement.num_placement = 1; placement.placement = &flags; if ((flags & TTM_PL_MASK_CACHING) == 0) flags |= TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED; #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0)) ret = ttm_bo_init(bdev, bo, req->size, ttm_bo_type_device, &placement, req->page_alignment, 0, true, NULL, acc_size, NULL, &ttm_bo_user_destroy); #else ret = ttm_bo_init(bdev, bo, req->size, ttm_bo_type_device, &placement, req->page_alignment, true, NULL, acc_size, NULL, &ttm_bo_user_destroy); #endif ttm_read_unlock(lock); /* * Note that the ttm_buffer_object_init function * would've called the destroy function on failure!! */ if (unlikely(ret != 0)) goto out; tmp = ttm_bo_reference(bo); ret = ttm_base_object_init(tfile, &user_bo->base, flags & TTM_PL_FLAG_SHARED, ttm_buffer_type, &ttm_bo_user_release, &ttm_bo_user_ref_release); if (unlikely(ret != 0)) goto out_err; ret = ttm_bo_reserve(bo, true, false, false, 0); if (unlikely(ret != 0)) goto out_err; ttm_pl_fill_rep(bo, rep); ttm_bo_unreserve(bo); ttm_bo_unref(&bo); out: return 0; out_err: ttm_bo_unref(&tmp); ttm_bo_unref(&bo); return ret; }
ssize_t ttm_bo_io(struct ttm_bo_device *bdev, struct file *filp, const char __user *wbuf, char __user *rbuf, size_t count, loff_t *f_pos, bool write) { struct ttm_buffer_object *bo; struct ttm_bo_driver *driver; struct ttm_bo_kmap_obj map; unsigned long dev_offset = (*f_pos >> PAGE_SHIFT); unsigned long kmap_offset; unsigned long kmap_end; unsigned long kmap_num; size_t io_size; unsigned int page_offset; char *virtual; int ret; bool no_wait = false; bool dummy; read_lock(&bdev->vm_lock); bo = ttm_bo_vm_lookup_rb(bdev, dev_offset, 1); if (likely(bo != NULL)) ttm_bo_reference(bo); read_unlock(&bdev->vm_lock); if (unlikely(bo == NULL)) return -EFAULT; driver = bo->bdev->driver; if (unlikely(!driver->verify_access)) { ret = -EPERM; goto out_unref; } ret = driver->verify_access(bo, filp); if (unlikely(ret != 0)) goto out_unref; kmap_offset = dev_offset - bo->vm_node->start; if (unlikely(kmap_offset >= bo->num_pages)) { ret = -EFBIG; goto out_unref; } page_offset = *f_pos & ~PAGE_MASK; io_size = bo->num_pages - kmap_offset; io_size = (io_size << PAGE_SHIFT) - page_offset; if (count < io_size) io_size = count; kmap_end = (*f_pos + count - 1) >> PAGE_SHIFT; kmap_num = kmap_end - kmap_offset + 1; ret = ttm_bo_reserve(bo, true, no_wait, false, 0); switch (ret) { case 0: break; case -EBUSY: ret = -EAGAIN; goto out_unref; default: goto out_unref; } ret = ttm_bo_kmap(bo, kmap_offset, kmap_num, &map); if (unlikely(ret != 0)) { ttm_bo_unreserve(bo); goto out_unref; } virtual = ttm_kmap_obj_virtual(&map, &dummy); virtual += page_offset; if (write) ret = copy_from_user(virtual, wbuf, io_size); else