int ttm_bo_create(struct ttm_bo_device *bdev, unsigned long size, enum ttm_bo_type type, struct ttm_placement *placement, uint32_t page_alignment, unsigned long buffer_start, bool interruptible, struct file *persistent_swap_storage, struct ttm_buffer_object **p_bo) { struct ttm_buffer_object *bo; struct ttm_mem_global *mem_glob = bdev->glob->mem_glob; size_t acc_size; int ret; acc_size = ttm_bo_acc_size(bdev, size, sizeof(struct ttm_buffer_object)); ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false); if (unlikely(ret != 0)) return ret; bo = kzalloc(sizeof(*bo), GFP_KERNEL); if (unlikely(bo == NULL)) { ttm_mem_global_free(mem_glob, acc_size); return -ENOMEM; } ret = ttm_bo_init(bdev, bo, size, type, placement, page_alignment, buffer_start, interruptible, persistent_swap_storage, acc_size, NULL); if (likely(ret == 0)) *p_bo = bo; return ret; }
int ttm_pl_ub_create_ioctl(struct ttm_object_file *tfile, struct ttm_bo_device *bdev, struct ttm_lock *lock, void *data) { union ttm_pl_create_ub_arg *arg = data; struct ttm_pl_create_ub_req *req = &arg->req; struct ttm_pl_rep *rep = &arg->rep; struct ttm_buffer_object *bo; struct ttm_buffer_object *tmp; struct ttm_bo_user_object *user_bo; uint32_t flags; int ret = 0; struct ttm_mem_global *mem_glob = bdev->glob->mem_glob; struct ttm_placement placement = default_placement; #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,3,0)) size_t acc_size = ttm_pl_size(bdev, (req->size + PAGE_SIZE - 1) >> PAGE_SHIFT); #else size_t acc_size = ttm_bo_acc_size(bdev, req->size, sizeof(struct ttm_buffer_object)); #endif if (req->user_address & ~PAGE_MASK) { printk(KERN_ERR "User pointer buffer need page alignment\n"); return -EFAULT; } ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false); if (unlikely(ret != 0)) return ret; flags = req->placement; user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL); if (unlikely(user_bo == NULL)) { ttm_mem_global_free(mem_glob, acc_size); return -ENOMEM; } ret = ttm_read_lock(lock, true); if (unlikely(ret != 0)) { ttm_mem_global_free(mem_glob, acc_size); kfree(user_bo); return ret; } bo = &user_bo->bo; placement.num_placement = 1; placement.placement = &flags; #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,3,0)) /* For kernel 3.0, use the desired type. */ #define TTM_HACK_WORKAROUND_ttm_bo_type_user ttm_bo_type_user #else /* TTM_HACK_WORKAROUND_ttm_bo_type_user -- Hack for porting, as ttm_bo_type_user is no longer implemented. This will not result in working code. FIXME - to be removed. */ #warning warning: ttm_bo_type_user no longer supported /* For kernel 3.3+, use the wrong type, which will compile but not work. */ #define TTM_HACK_WORKAROUND_ttm_bo_type_user ttm_bo_type_kernel #endif #if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 3, 0)) /* Handle frame buffer allocated in user space, Convert user space virtual address into pages list */ unsigned int page_nr = 0; struct vm_area_struct *vma = NULL; struct sg_table *sg = NULL; unsigned long num_pages = 0; struct page **pages = 0; num_pages = (req->size + PAGE_SIZE - 1) >> PAGE_SHIFT; pages = kzalloc(num_pages * sizeof(struct page *), GFP_KERNEL); if (unlikely(pages == NULL)) { printk(KERN_ERR "kzalloc pages failed\n"); return -ENOMEM; } down_read(¤t->mm->mmap_sem); vma = find_vma(current->mm, req->user_address); if (unlikely(vma == NULL)) { up_read(¤t->mm->mmap_sem); kfree(pages); printk(KERN_ERR "find_vma failed\n"); return -EFAULT; } unsigned long before_flags = vma->vm_flags; if (vma->vm_flags & (VM_IO | VM_PFNMAP)) vma->vm_flags = vma->vm_flags & ((~VM_IO) & (~VM_PFNMAP)); page_nr = get_user_pages(current, current->mm, req->user_address, (int)(num_pages), 1, 0, pages, NULL); vma->vm_flags = before_flags; up_read(¤t->mm->mmap_sem); /* can be written by caller, not forced */ if (unlikely(page_nr < num_pages)) { kfree(pages); pages = 0; printk(KERN_ERR "get_user_pages err.\n"); return -ENOMEM; } sg = drm_prime_pages_to_sg(pages, num_pages); if (unlikely(sg == NULL)) { kfree(pages); printk(KERN_ERR "drm_prime_pages_to_sg err.\n"); return -ENOMEM; } kfree(pages); #endif #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 3, 0)) ret = ttm_bo_init(bdev, bo, req->size, TTM_HACK_WORKAROUND_ttm_bo_type_user, &placement, req->page_alignment, req->user_address, true, NULL, acc_size, NULL, &ttm_bo_user_destroy); #elif (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0)) ret = ttm_bo_init(bdev, bo, req->size, ttm_bo_type_sg, &placement, req->page_alignment, req->user_address, true, NULL, acc_size, sg, &ttm_ub_bo_user_destroy); #else ret = ttm_bo_init(bdev, bo, req->size, ttm_bo_type_sg, &placement, req->page_alignment, true, NULL, acc_size, sg, &ttm_ub_bo_user_destroy); #endif /* * Note that the ttm_buffer_object_init function * would've called the destroy function on failure!! */ ttm_read_unlock(lock); if (unlikely(ret != 0)) goto out; tmp = ttm_bo_reference(bo); ret = ttm_base_object_init(tfile, &user_bo->base, flags & TTM_PL_FLAG_SHARED, ttm_buffer_type, &ttm_bo_user_release, &ttm_bo_user_ref_release); if (unlikely(ret != 0)) goto out_err; ret = ttm_bo_reserve(bo, true, false, false, 0); if (unlikely(ret != 0)) goto out_err; ttm_pl_fill_rep(bo, rep); ttm_bo_unreserve(bo); ttm_bo_unref(&bo); out: return 0; out_err: ttm_bo_unref(&tmp); ttm_bo_unref(&bo); return ret; }
int ttm_pl_create_ioctl(struct ttm_object_file *tfile, struct ttm_bo_device *bdev, struct ttm_lock *lock, void *data) { union ttm_pl_create_arg *arg = data; struct ttm_pl_create_req *req = &arg->req; struct ttm_pl_rep *rep = &arg->rep; struct ttm_buffer_object *bo; struct ttm_buffer_object *tmp; struct ttm_bo_user_object *user_bo; uint32_t flags; int ret = 0; struct ttm_mem_global *mem_glob = bdev->glob->mem_glob; struct ttm_placement placement = default_placement; #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,3,0)) size_t acc_size = ttm_pl_size(bdev, (req->size + PAGE_SIZE - 1) >> PAGE_SHIFT); #else size_t acc_size = ttm_bo_acc_size(bdev, req->size, sizeof(struct ttm_buffer_object)); #endif ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false); if (unlikely(ret != 0)) return ret; flags = req->placement; user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL); if (unlikely(user_bo == NULL)) { ttm_mem_global_free(mem_glob, acc_size); return -ENOMEM; } bo = &user_bo->bo; ret = ttm_read_lock(lock, true); if (unlikely(ret != 0)) { ttm_mem_global_free(mem_glob, acc_size); kfree(user_bo); return ret; } placement.num_placement = 1; placement.placement = &flags; if ((flags & TTM_PL_MASK_CACHING) == 0) flags |= TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED; #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0)) ret = ttm_bo_init(bdev, bo, req->size, ttm_bo_type_device, &placement, req->page_alignment, 0, true, NULL, acc_size, NULL, &ttm_bo_user_destroy); #else ret = ttm_bo_init(bdev, bo, req->size, ttm_bo_type_device, &placement, req->page_alignment, true, NULL, acc_size, NULL, &ttm_bo_user_destroy); #endif ttm_read_unlock(lock); /* * Note that the ttm_buffer_object_init function * would've called the destroy function on failure!! */ if (unlikely(ret != 0)) goto out; tmp = ttm_bo_reference(bo); ret = ttm_base_object_init(tfile, &user_bo->base, flags & TTM_PL_FLAG_SHARED, ttm_buffer_type, &ttm_bo_user_release, &ttm_bo_user_ref_release); if (unlikely(ret != 0)) goto out_err; ret = ttm_bo_reserve(bo, true, false, false, 0); if (unlikely(ret != 0)) goto out_err; ttm_pl_fill_rep(bo, rep); ttm_bo_unreserve(bo); ttm_bo_unref(&bo); out: return 0; out_err: ttm_bo_unref(&tmp); ttm_bo_unref(&bo); return ret; }