/** * Pin or unpin a buffer in vram. * * @dev_priv: Driver private. * @buf: DMA buffer to pin or unpin. * @pin: Pin buffer in vram if true. * @interruptible: Use interruptible wait. * * Takes the current masters ttm lock in read. * * Returns * -ERESTARTSYS if interrupted by a signal. */ static int vmw_dmabuf_pin_in_vram(struct vmw_private *dev_priv, struct vmw_dma_buffer *buf, bool pin, bool interruptible) { struct ttm_buffer_object *bo = &buf->base; struct ttm_placement *overlay_placement = &vmw_vram_placement; int ret; ret = ttm_read_lock(&dev_priv->active_master->lock, interruptible); if (unlikely(ret != 0)) return ret; ret = ttm_bo_reserve(bo, interruptible, false, false, 0); if (unlikely(ret != 0)) goto err; if (pin) overlay_placement = &vmw_vram_ne_placement; ret = ttm_bo_validate(bo, overlay_placement, interruptible, false, false); ttm_bo_unreserve(bo); err: ttm_read_unlock(&dev_priv->active_master->lock); return ret; }
static struct vmw_master *vmw_master_check(struct drm_device *dev, struct drm_file *file_priv, unsigned int flags) { int ret; struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv); struct vmw_master *vmaster; if (file_priv->minor->type != DRM_MINOR_LEGACY || !(flags & DRM_AUTH)) return NULL; ret = mutex_lock_interruptible(&dev->master_mutex); if (unlikely(ret != 0)) return ERR_PTR(-ERESTARTSYS); if (file_priv->is_master) { mutex_unlock(&dev->master_mutex); return NULL; } /* * Check if we were previously master, but now dropped. In that * case, allow at least render node functionality. */ if (vmw_fp->locked_master) { mutex_unlock(&dev->master_mutex); if (flags & DRM_RENDER_ALLOW) return NULL; DRM_ERROR("Dropped master trying to access ioctl that " "requires authentication.\n"); return ERR_PTR(-EACCES); } mutex_unlock(&dev->master_mutex); /* * Taking the drm_global_mutex after the TTM lock might deadlock */ if (!(flags & DRM_UNLOCKED)) { DRM_ERROR("Refusing locked ioctl access.\n"); return ERR_PTR(-EDEADLK); } /* * Take the TTM lock. Possibly sleep waiting for the authenticating * master to become master again, or for a SIGTERM if the * authenticating master exits. */ vmaster = vmw_master(file_priv->master); ret = ttm_read_lock(&vmaster->lock, true); if (unlikely(ret != 0)) vmaster = ERR_PTR(ret); return vmaster; }
/** * vmw_dmabuf_unpin - Unpin the buffer given buffer, does not move the buffer. * * This function takes the reservation_sem in write mode. * * @dev_priv: Driver private. * @buf: DMA buffer to unpin. * @interruptible: Use interruptible wait. * * Returns * -ERESTARTSYS if interrupted by a signal. */ int vmw_dmabuf_unpin(struct vmw_private *dev_priv, struct vmw_dma_buffer *buf, bool interruptible) { struct ttm_buffer_object *bo = &buf->base; int ret; ret = ttm_read_lock(&dev_priv->reservation_sem, interruptible); if (unlikely(ret != 0)) return ret; ret = ttm_bo_reserve(bo, interruptible, false, NULL); if (unlikely(ret != 0)) goto err; vmw_bo_pin_reserved(buf, false); ttm_bo_unreserve(bo); err: ttm_read_unlock(&dev_priv->reservation_sem); return ret; }
/** * vmw_svga_enable - Enable SVGA mode, FIFO and use of VRAM. * * @dev_priv: Pointer to device private struct. */ void vmw_svga_enable(struct vmw_private *dev_priv) { (void) ttm_read_lock(&dev_priv->reservation_sem, false); __vmw_svga_enable(dev_priv); ttm_read_unlock(&dev_priv->reservation_sem); }
int ttm_pl_setstatus_ioctl(struct ttm_object_file *tfile, struct ttm_lock *lock, void *data) { union ttm_pl_setstatus_arg *arg = data; struct ttm_pl_setstatus_req *req = &arg->req; struct ttm_pl_rep *rep = &arg->rep; struct ttm_buffer_object *bo; struct ttm_bo_device *bdev; struct ttm_placement placement = default_placement; uint32_t flags[2]; int ret; bo = ttm_buffer_object_lookup(tfile, req->handle); if (unlikely(bo == NULL)) { printk(KERN_ERR "Could not find buffer object for setstatus.\n"); return -EINVAL; } bdev = bo->bdev; ret = ttm_read_lock(lock, true); if (unlikely(ret != 0)) goto out_err0; ret = ttm_bo_reserve(bo, true, false, false, 0); if (unlikely(ret != 0)) goto out_err1; #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0)) ret = ttm_bo_wait_cpu(bo, false); if (unlikely(ret != 0)) goto out_err2; #endif flags[0] = req->set_placement; flags[1] = req->clr_placement; placement.num_placement = 2; placement.placement = flags; /* spin_lock(&bo->lock); */ /* Already get reserve lock */ ret = psb_ttm_bo_check_placement(bo, &placement); if (unlikely(ret != 0)) goto out_err2; placement.num_placement = 1; flags[0] = (req->set_placement | bo->mem.placement) & ~req->clr_placement; #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0)) ret = ttm_bo_validate(bo, &placement, true, false, false); #else ret = ttm_bo_validate(bo, &placement, true, false); #endif if (unlikely(ret != 0)) goto out_err2; ttm_pl_fill_rep(bo, rep); out_err2: /* spin_unlock(&bo->lock); */ ttm_bo_unreserve(bo); out_err1: ttm_read_unlock(lock); out_err0: ttm_bo_unref(&bo); return ret; }
int ttm_pl_ub_create_ioctl(struct ttm_object_file *tfile, struct ttm_bo_device *bdev, struct ttm_lock *lock, void *data) { union ttm_pl_create_ub_arg *arg = data; struct ttm_pl_create_ub_req *req = &arg->req; struct ttm_pl_rep *rep = &arg->rep; struct ttm_buffer_object *bo; struct ttm_buffer_object *tmp; struct ttm_bo_user_object *user_bo; uint32_t flags; int ret = 0; struct ttm_mem_global *mem_glob = bdev->glob->mem_glob; struct ttm_placement placement = default_placement; #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,3,0)) size_t acc_size = ttm_pl_size(bdev, (req->size + PAGE_SIZE - 1) >> PAGE_SHIFT); #else size_t acc_size = ttm_bo_acc_size(bdev, req->size, sizeof(struct ttm_buffer_object)); #endif if (req->user_address & ~PAGE_MASK) { printk(KERN_ERR "User pointer buffer need page alignment\n"); return -EFAULT; } ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false); if (unlikely(ret != 0)) return ret; flags = req->placement; user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL); if (unlikely(user_bo == NULL)) { ttm_mem_global_free(mem_glob, acc_size); return -ENOMEM; } ret = ttm_read_lock(lock, true); if (unlikely(ret != 0)) { ttm_mem_global_free(mem_glob, acc_size); kfree(user_bo); return ret; } bo = &user_bo->bo; placement.num_placement = 1; placement.placement = &flags; #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,3,0)) /* For kernel 3.0, use the desired type. */ #define TTM_HACK_WORKAROUND_ttm_bo_type_user ttm_bo_type_user #else /* TTM_HACK_WORKAROUND_ttm_bo_type_user -- Hack for porting, as ttm_bo_type_user is no longer implemented. This will not result in working code. FIXME - to be removed. */ #warning warning: ttm_bo_type_user no longer supported /* For kernel 3.3+, use the wrong type, which will compile but not work. */ #define TTM_HACK_WORKAROUND_ttm_bo_type_user ttm_bo_type_kernel #endif #if (LINUX_VERSION_CODE > KERNEL_VERSION(3, 3, 0)) /* Handle frame buffer allocated in user space, Convert user space virtual address into pages list */ unsigned int page_nr = 0; struct vm_area_struct *vma = NULL; struct sg_table *sg = NULL; unsigned long num_pages = 0; struct page **pages = 0; num_pages = (req->size + PAGE_SIZE - 1) >> PAGE_SHIFT; pages = kzalloc(num_pages * sizeof(struct page *), GFP_KERNEL); if (unlikely(pages == NULL)) { printk(KERN_ERR "kzalloc pages failed\n"); return -ENOMEM; } down_read(¤t->mm->mmap_sem); vma = find_vma(current->mm, req->user_address); if (unlikely(vma == NULL)) { up_read(¤t->mm->mmap_sem); kfree(pages); printk(KERN_ERR "find_vma failed\n"); return -EFAULT; } unsigned long before_flags = vma->vm_flags; if (vma->vm_flags & (VM_IO | VM_PFNMAP)) vma->vm_flags = vma->vm_flags & ((~VM_IO) & (~VM_PFNMAP)); page_nr = get_user_pages(current, current->mm, req->user_address, (int)(num_pages), 1, 0, pages, NULL); vma->vm_flags = before_flags; up_read(¤t->mm->mmap_sem); /* can be written by caller, not forced */ if (unlikely(page_nr < num_pages)) { kfree(pages); pages = 0; printk(KERN_ERR "get_user_pages err.\n"); return -ENOMEM; } sg = drm_prime_pages_to_sg(pages, num_pages); if (unlikely(sg == NULL)) { kfree(pages); printk(KERN_ERR "drm_prime_pages_to_sg err.\n"); return -ENOMEM; } kfree(pages); #endif #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 3, 0)) ret = ttm_bo_init(bdev, bo, req->size, TTM_HACK_WORKAROUND_ttm_bo_type_user, &placement, req->page_alignment, req->user_address, true, NULL, acc_size, NULL, &ttm_bo_user_destroy); #elif (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0)) ret = ttm_bo_init(bdev, bo, req->size, ttm_bo_type_sg, &placement, req->page_alignment, req->user_address, true, NULL, acc_size, sg, &ttm_ub_bo_user_destroy); #else ret = ttm_bo_init(bdev, bo, req->size, ttm_bo_type_sg, &placement, req->page_alignment, true, NULL, acc_size, sg, &ttm_ub_bo_user_destroy); #endif /* * Note that the ttm_buffer_object_init function * would've called the destroy function on failure!! */ ttm_read_unlock(lock); if (unlikely(ret != 0)) goto out; tmp = ttm_bo_reference(bo); ret = ttm_base_object_init(tfile, &user_bo->base, flags & TTM_PL_FLAG_SHARED, ttm_buffer_type, &ttm_bo_user_release, &ttm_bo_user_ref_release); if (unlikely(ret != 0)) goto out_err; ret = ttm_bo_reserve(bo, true, false, false, 0); if (unlikely(ret != 0)) goto out_err; ttm_pl_fill_rep(bo, rep); ttm_bo_unreserve(bo); ttm_bo_unref(&bo); out: return 0; out_err: ttm_bo_unref(&tmp); ttm_bo_unref(&bo); return ret; }
int ttm_pl_create_ioctl(struct ttm_object_file *tfile, struct ttm_bo_device *bdev, struct ttm_lock *lock, void *data) { union ttm_pl_create_arg *arg = data; struct ttm_pl_create_req *req = &arg->req; struct ttm_pl_rep *rep = &arg->rep; struct ttm_buffer_object *bo; struct ttm_buffer_object *tmp; struct ttm_bo_user_object *user_bo; uint32_t flags; int ret = 0; struct ttm_mem_global *mem_glob = bdev->glob->mem_glob; struct ttm_placement placement = default_placement; #if (LINUX_VERSION_CODE < KERNEL_VERSION(3,3,0)) size_t acc_size = ttm_pl_size(bdev, (req->size + PAGE_SIZE - 1) >> PAGE_SHIFT); #else size_t acc_size = ttm_bo_acc_size(bdev, req->size, sizeof(struct ttm_buffer_object)); #endif ret = ttm_mem_global_alloc(mem_glob, acc_size, false, false); if (unlikely(ret != 0)) return ret; flags = req->placement; user_bo = kzalloc(sizeof(*user_bo), GFP_KERNEL); if (unlikely(user_bo == NULL)) { ttm_mem_global_free(mem_glob, acc_size); return -ENOMEM; } bo = &user_bo->bo; ret = ttm_read_lock(lock, true); if (unlikely(ret != 0)) { ttm_mem_global_free(mem_glob, acc_size); kfree(user_bo); return ret; } placement.num_placement = 1; placement.placement = &flags; if ((flags & TTM_PL_MASK_CACHING) == 0) flags |= TTM_PL_FLAG_WC | TTM_PL_FLAG_UNCACHED; #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0)) ret = ttm_bo_init(bdev, bo, req->size, ttm_bo_type_device, &placement, req->page_alignment, 0, true, NULL, acc_size, NULL, &ttm_bo_user_destroy); #else ret = ttm_bo_init(bdev, bo, req->size, ttm_bo_type_device, &placement, req->page_alignment, true, NULL, acc_size, NULL, &ttm_bo_user_destroy); #endif ttm_read_unlock(lock); /* * Note that the ttm_buffer_object_init function * would've called the destroy function on failure!! */ if (unlikely(ret != 0)) goto out; tmp = ttm_bo_reference(bo); ret = ttm_base_object_init(tfile, &user_bo->base, flags & TTM_PL_FLAG_SHARED, ttm_buffer_type, &ttm_bo_user_release, &ttm_bo_user_ref_release); if (unlikely(ret != 0)) goto out_err; ret = ttm_bo_reserve(bo, true, false, false, 0); if (unlikely(ret != 0)) goto out_err; ttm_pl_fill_rep(bo, rep); ttm_bo_unreserve(bo); ttm_bo_unref(&bo); out: return 0; out_err: ttm_bo_unref(&tmp); ttm_bo_unref(&bo); return ret; }
int vmw_present_readback_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct vmw_private *dev_priv = vmw_priv(dev); struct drm_vmw_present_readback_arg *arg = (struct drm_vmw_present_readback_arg *)data; struct drm_vmw_fence_rep __user *user_fence_rep = (struct drm_vmw_fence_rep __user *) (unsigned long)arg->fence_rep; struct vmw_master *vmaster = vmw_master(file_priv->master); struct drm_vmw_rect __user *clips_ptr; struct drm_vmw_rect *clips = NULL; struct drm_framebuffer *fb; struct vmw_framebuffer *vfb; uint32_t num_clips; int ret; num_clips = arg->num_clips; clips_ptr = (struct drm_vmw_rect *)(unsigned long)arg->clips_ptr; if (unlikely(num_clips == 0)) return 0; if (clips_ptr == NULL) { DRM_ERROR("Argument clips_ptr must be specified.\n"); ret = -EINVAL; goto out_clips; } clips = kcalloc(num_clips, sizeof(*clips), GFP_KERNEL); if (clips == NULL) { DRM_ERROR("Failed to allocate clip rect list.\n"); ret = -ENOMEM; goto out_clips; } ret = copy_from_user(clips, clips_ptr, num_clips * sizeof(*clips)); if (ret) { DRM_ERROR("Failed to copy clip rects from userspace.\n"); ret = -EFAULT; goto out_no_copy; } drm_modeset_lock_all(dev); fb = drm_framebuffer_lookup(dev, arg->fb_id); if (!fb) { DRM_ERROR("Invalid framebuffer id.\n"); ret = -ENOENT; goto out_no_fb; } vfb = vmw_framebuffer_to_vfb(fb); if (!vfb->dmabuf) { DRM_ERROR("Framebuffer not dmabuf backed.\n"); ret = -EINVAL; goto out_no_ttm_lock; } ret = ttm_read_lock(&vmaster->lock, true); if (unlikely(ret != 0)) goto out_no_ttm_lock; ret = vmw_kms_readback(dev_priv, file_priv, vfb, user_fence_rep, clips, num_clips); ttm_read_unlock(&vmaster->lock); out_no_ttm_lock: drm_framebuffer_unreference(fb); out_no_fb: drm_modeset_unlock_all(dev); out_no_copy: kfree(clips); out_clips: return ret; }
int vmw_present_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; struct vmw_private *dev_priv = vmw_priv(dev); struct drm_vmw_present_arg *arg = (struct drm_vmw_present_arg *)data; struct vmw_surface *surface; struct vmw_master *vmaster = vmw_master(file_priv->master); struct drm_vmw_rect __user *clips_ptr; struct drm_vmw_rect *clips = NULL; struct drm_framebuffer *fb; struct vmw_framebuffer *vfb; struct vmw_resource *res; uint32_t num_clips; int ret; num_clips = arg->num_clips; clips_ptr = (struct drm_vmw_rect *)(unsigned long)arg->clips_ptr; if (unlikely(num_clips == 0)) return 0; if (clips_ptr == NULL) { DRM_ERROR("Variable clips_ptr must be specified.\n"); ret = -EINVAL; goto out_clips; } clips = kcalloc(num_clips, sizeof(*clips), GFP_KERNEL); if (clips == NULL) { DRM_ERROR("Failed to allocate clip rect list.\n"); ret = -ENOMEM; goto out_clips; } ret = copy_from_user(clips, clips_ptr, num_clips * sizeof(*clips)); if (ret) { DRM_ERROR("Failed to copy clip rects from userspace.\n"); ret = -EFAULT; goto out_no_copy; } drm_modeset_lock_all(dev); fb = drm_framebuffer_lookup(dev, arg->fb_id); if (!fb) { DRM_ERROR("Invalid framebuffer id.\n"); ret = -ENOENT; goto out_no_fb; } vfb = vmw_framebuffer_to_vfb(fb); ret = ttm_read_lock(&vmaster->lock, true); if (unlikely(ret != 0)) goto out_no_ttm_lock; ret = vmw_user_resource_lookup_handle(dev_priv, tfile, arg->sid, user_surface_converter, &res); if (ret) goto out_no_surface; surface = vmw_res_to_srf(res); ret = vmw_kms_present(dev_priv, file_priv, vfb, surface, arg->sid, arg->dest_x, arg->dest_y, clips, num_clips); /* vmw_user_surface_lookup takes one ref so does new_fb */ vmw_surface_unreference(&surface); out_no_surface: ttm_read_unlock(&vmaster->lock); out_no_ttm_lock: drm_framebuffer_unreference(fb); out_no_fb: drm_modeset_unlock_all(dev); out_no_copy: kfree(clips); out_clips: return ret; }
/** * vmw_simple_resource_create_ioctl - Helper to set up an ioctl function to * create a struct vmw_simple_resource. * * @dev: Pointer to a struct drm device. * @data: Ioctl argument. * @file_priv: Pointer to a struct drm_file identifying the caller. * @func: Pointer to a struct vmw_simple_resource_func identifying the * simple resource type. * * Returns: * 0 if success, * Negative error value on error. */ int vmw_simple_resource_create_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv, const struct vmw_simple_resource_func *func) { struct vmw_private *dev_priv = vmw_priv(dev); struct vmw_user_simple_resource *usimple; struct vmw_resource *res; struct vmw_resource *tmp; struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; struct ttm_operation_ctx ctx = { .interruptible = true, .no_wait_gpu = false }; size_t alloc_size; size_t account_size; int ret; alloc_size = offsetof(struct vmw_user_simple_resource, simple) + func->size; account_size = ttm_round_pot(alloc_size) + VMW_IDA_ACC_SIZE + TTM_OBJ_EXTRA_SIZE; ret = ttm_read_lock(&dev_priv->reservation_sem, true); if (ret) return ret; ret = ttm_mem_global_alloc(vmw_mem_glob(dev_priv), account_size, &ctx); ttm_read_unlock(&dev_priv->reservation_sem); if (ret) { if (ret != -ERESTARTSYS) DRM_ERROR("Out of graphics memory for %s" " creation.\n", func->res_func.type_name); goto out_ret; } usimple = kzalloc(alloc_size, GFP_KERNEL); if (!usimple) { ttm_mem_global_free(vmw_mem_glob(dev_priv), account_size); ret = -ENOMEM; goto out_ret; } usimple->simple.func = func; usimple->account_size = account_size; res = &usimple->simple.res; usimple->base.shareable = false; usimple->base.tfile = NULL; /* * From here on, the destructor takes over resource freeing. */ ret = vmw_simple_resource_init(dev_priv, &usimple->simple, data, vmw_simple_resource_free); if (ret) goto out_ret; tmp = vmw_resource_reference(res); ret = ttm_base_object_init(tfile, &usimple->base, false, func->ttm_res_type, &vmw_simple_resource_base_release, NULL); if (ret) { vmw_resource_unreference(&tmp); goto out_err; } func->set_arg_handle(data, usimple->base.handle); out_err: vmw_resource_unreference(&res); out_ret: return ret; } /** * vmw_simple_resource_lookup - Look up a simple resource from its user-space * handle. * * @tfile: struct ttm_object_file identifying the caller. * @handle: The user-space handle. * @func: The struct vmw_simple_resource_func identifying the simple resource * type. * * Returns: Refcounted pointer to the embedded struct vmw_resource if * successfule. Error pointer otherwise. */ struct vmw_resource * vmw_simple_resource_lookup(struct ttm_object_file *tfile, uint32_t handle, const struct vmw_simple_resource_func *func) { struct vmw_user_simple_resource *usimple; struct ttm_base_object *base; struct vmw_resource *res; base = ttm_base_object_lookup(tfile, handle); if (!base) { DRM_ERROR("Invalid %s handle 0x%08lx.\n", func->res_func.type_name, (unsigned long) handle); return ERR_PTR(-ESRCH); } if (ttm_base_object_type(base) != func->ttm_res_type) { ttm_base_object_unref(&base); DRM_ERROR("Invalid type of %s handle 0x%08lx.\n", func->res_func.type_name, (unsigned long) handle); return ERR_PTR(-EINVAL); } usimple = container_of(base, typeof(*usimple), base); res = vmw_resource_reference(&usimple->simple.res); ttm_base_object_unref(&base); return res; }
int vmw_present_readback_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct vmw_private *dev_priv = vmw_priv(dev); struct drm_vmw_present_readback_arg *arg = (struct drm_vmw_present_readback_arg *)data; struct drm_vmw_fence_rep __user *user_fence_rep = (struct drm_vmw_fence_rep __user *) (unsigned long)arg->fence_rep; struct vmw_master *vmaster = vmw_master(file_priv->master); struct drm_vmw_rect __user *clips_ptr; struct drm_vmw_rect *clips = NULL; struct drm_mode_object *obj; struct vmw_framebuffer *vfb; uint32_t num_clips; int ret; num_clips = arg->num_clips; clips_ptr = (struct drm_vmw_rect *)(unsigned long)arg->clips_ptr; if (unlikely(num_clips == 0)) return 0; if (clips_ptr == NULL) { DRM_ERROR("Argument clips_ptr must be specified.\n"); ret = -EINVAL; goto out_clips; } clips = kcalloc(num_clips, sizeof(*clips), GFP_KERNEL); if (clips == NULL) { DRM_ERROR("Failed to allocate clip rect list.\n"); ret = -ENOMEM; goto out_clips; } ret = copy_from_user(clips, clips_ptr, num_clips * sizeof(*clips)); if (ret) { DRM_ERROR("Failed to copy clip rects from userspace.\n"); ret = -EFAULT; goto out_no_copy; } ret = mutex_lock_interruptible(&dev->mode_config.mutex); if (unlikely(ret != 0)) { ret = -ERESTARTSYS; goto out_no_mode_mutex; } obj = drm_mode_object_find(dev, arg->fb_id, DRM_MODE_OBJECT_FB); if (!obj) { DRM_ERROR("Invalid framebuffer id.\n"); ret = -EINVAL; goto out_no_fb; } vfb = vmw_framebuffer_to_vfb(obj_to_fb(obj)); if (!vfb->dmabuf) { DRM_ERROR("Framebuffer not dmabuf backed.\n"); ret = -EINVAL; goto out_no_fb; } ret = ttm_read_lock(&vmaster->lock, true); if (unlikely(ret != 0)) goto out_no_ttm_lock; ret = vmw_kms_readback(dev_priv, file_priv, vfb, user_fence_rep, clips, num_clips); ttm_read_unlock(&vmaster->lock); out_no_ttm_lock: out_no_fb: mutex_unlock(&dev->mode_config.mutex); out_no_mode_mutex: out_no_copy: kfree(clips); out_clips: return ret; }
int vmw_present_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; struct vmw_private *dev_priv = vmw_priv(dev); struct drm_vmw_present_arg *arg = (struct drm_vmw_present_arg *)data; struct vmw_surface *surface; struct vmw_master *vmaster = vmw_master(file_priv->master); struct drm_vmw_rect __user *clips_ptr; struct drm_vmw_rect *clips = NULL; struct drm_mode_object *obj; struct vmw_framebuffer *vfb; uint32_t num_clips; int ret; num_clips = arg->num_clips; clips_ptr = (struct drm_vmw_rect *)(unsigned long)arg->clips_ptr; if (unlikely(num_clips == 0)) return 0; if (clips_ptr == NULL) { DRM_ERROR("Variable clips_ptr must be specified.\n"); ret = -EINVAL; goto out_clips; } clips = kcalloc(num_clips, sizeof(*clips), GFP_KERNEL); if (clips == NULL) { DRM_ERROR("Failed to allocate clip rect list.\n"); ret = -ENOMEM; goto out_clips; } ret = copy_from_user(clips, clips_ptr, num_clips * sizeof(*clips)); if (ret) { DRM_ERROR("Failed to copy clip rects from userspace.\n"); ret = -EFAULT; goto out_no_copy; } ret = mutex_lock_interruptible(&dev->mode_config.mutex); if (unlikely(ret != 0)) { ret = -ERESTARTSYS; goto out_no_mode_mutex; } obj = drm_mode_object_find(dev, arg->fb_id, DRM_MODE_OBJECT_FB); if (!obj) { DRM_ERROR("Invalid framebuffer id.\n"); ret = -EINVAL; goto out_no_fb; } vfb = vmw_framebuffer_to_vfb(obj_to_fb(obj)); ret = ttm_read_lock(&vmaster->lock, true); if (unlikely(ret != 0)) goto out_no_ttm_lock; ret = vmw_user_surface_lookup_handle(dev_priv, tfile, arg->sid, &surface); if (ret) goto out_no_surface; ret = vmw_kms_present(dev_priv, file_priv, vfb, surface, arg->sid, arg->dest_x, arg->dest_y, clips, num_clips); /* vmw_user_surface_lookup takes one ref so does new_fb */ vmw_surface_unreference(&surface); out_no_surface: ttm_read_unlock(&vmaster->lock); out_no_ttm_lock: out_no_fb: mutex_unlock(&dev->mode_config.mutex); out_no_mode_mutex: out_no_copy: kfree(clips); out_clips: return ret; }