/** * vmw_stdu_crtc_page_flip - Binds a buffer to a screen target * * @crtc: CRTC to attach FB to * @fb: FB to attach * @event: Event to be posted. This event should've been alloced * using k[mz]alloc, and should've been completely initialized. * @page_flip_flags: Input flags. * * If the STDU uses the same display and content buffers, i.e. a true flip, * this function will replace the existing display buffer with the new content * buffer. * * If the STDU uses different display and content buffers, i.e. a blit, then * only the content buffer will be updated. * * RETURNS: * 0 on success, error code on failure */ static int vmw_stdu_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *new_fb, struct drm_pending_vblank_event *event, uint32_t flags) { struct vmw_private *dev_priv = vmw_priv(crtc->dev); struct vmw_screen_target_display_unit *stdu; struct drm_vmw_rect vclips; struct vmw_framebuffer *vfb = vmw_framebuffer_to_vfb(new_fb); int ret; dev_priv = vmw_priv(crtc->dev); stdu = vmw_crtc_to_stdu(crtc); if (!stdu->defined || !vmw_kms_crtc_flippable(dev_priv, crtc)) return -EINVAL; ret = vmw_stdu_bind_fb(dev_priv, crtc, &crtc->mode, new_fb); if (ret) return ret; if (stdu->base.is_implicit) vmw_kms_update_implicit_fb(dev_priv, crtc); vclips.x = crtc->x; vclips.y = crtc->y; vclips.w = crtc->mode.hdisplay; vclips.h = crtc->mode.vdisplay; if (vfb->dmabuf) ret = vmw_kms_stdu_dma(dev_priv, NULL, vfb, NULL, NULL, &vclips, 1, 1, true, false); else ret = vmw_kms_stdu_surface_dirty(dev_priv, vfb, NULL, &vclips, NULL, 0, 0, 1, 1, NULL); if (ret) return ret; if (event) { struct vmw_fence_obj *fence = NULL; struct drm_file *file_priv = event->base.file_priv; vmw_execbuf_fence_commands(NULL, dev_priv, &fence, NULL); if (!fence) return -ENOMEM; ret = vmw_event_fence_action_queue(file_priv, fence, &event->base, &event->event.tv_sec, &event->event.tv_usec, true); vmw_fence_obj_unreference(&fence); } else { vmw_fifo_flush(dev_priv, false); } return 0; }
static int vmw_master_set(struct drm_device *dev, struct drm_file *file_priv, bool from_open) { struct vmw_private *dev_priv = vmw_priv(dev); struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv); struct vmw_master *active = dev_priv->active_master; struct vmw_master *vmaster = vmw_master(file_priv->master); int ret = 0; if (active) { BUG_ON(active != &dev_priv->fbdev_master); ret = ttm_vt_lock(&active->lock, false, vmw_fp->tfile); if (unlikely(ret != 0)) return ret; ttm_lock_set_kill(&active->lock, true, SIGTERM); dev_priv->active_master = NULL; } ttm_lock_set_kill(&vmaster->lock, false, SIGTERM); if (!from_open) { ttm_vt_unlock(&vmaster->lock); BUG_ON(vmw_fp->locked_master != file_priv->master); drm_master_put(&vmw_fp->locked_master); } dev_priv->active_master = vmaster; drm_sysfs_hotplug_event(dev); return 0; }
static void vmw_master_drop(struct drm_device *dev, struct drm_file *file_priv) { struct vmw_private *dev_priv = vmw_priv(dev); struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv); struct vmw_master *vmaster = vmw_master(file_priv->master); int ret; /** * Make sure the master doesn't disappear while we have * it locked. */ vmw_fp->locked_master = drm_master_get(file_priv->master); ret = ttm_vt_lock(&vmaster->lock, false, vmw_fp->tfile); vmw_kms_legacy_hotspot_clear(dev_priv); if (unlikely((ret != 0))) { DRM_ERROR("Unable to lock TTM at VT switch.\n"); drm_master_put(&vmw_fp->locked_master); } ttm_lock_set_kill(&vmaster->lock, false, SIGTERM); if (!dev_priv->enable_fb) vmw_svga_disable(dev_priv); dev_priv->active_master = &dev_priv->fbdev_master; ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM); ttm_vt_unlock(&dev_priv->fbdev_master.lock); if (dev_priv->enable_fb) vmw_fb_on(dev_priv); }
static int vmw_pm_freeze(struct device *kdev) { struct pci_dev *pdev = to_pci_dev(kdev); struct drm_device *dev = pci_get_drvdata(pdev); struct vmw_private *dev_priv = vmw_priv(dev); dev_priv->suspended = true; if (dev_priv->enable_fb) vmw_fifo_resource_dec(dev_priv); if (atomic_read(&dev_priv->num_fifo_resources) != 0) { DRM_ERROR("Can't hibernate while 3D resources are active.\n"); if (dev_priv->enable_fb) vmw_fifo_resource_inc(dev_priv); WARN_ON(vmw_request_device_late(dev_priv)); dev_priv->suspended = false; return -EBUSY; } if (dev_priv->enable_fb) __vmw_svga_disable(dev_priv); vmw_release_device_late(dev_priv); return 0; }
irqreturn_t vmw_irq_handler(DRM_IRQ_ARGS) { struct drm_device *dev = (struct drm_device *)arg; struct vmw_private *dev_priv = vmw_priv(dev); uint32_t status, masked_status; spin_lock(&dev_priv->irq_lock); status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); masked_status = status & dev_priv->irq_mask; spin_unlock(&dev_priv->irq_lock); if (likely(status)) outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); if (!masked_status) return IRQ_NONE; if (masked_status & (SVGA_IRQFLAG_ANY_FENCE | SVGA_IRQFLAG_FENCE_GOAL)) { vmw_fences_update(dev_priv->fman); wake_up_all(&dev_priv->fence_queue); } if (masked_status & SVGA_IRQFLAG_FIFO_PROGRESS) wake_up_all(&dev_priv->fifo_queue); return IRQ_HANDLED; }
static void vmw_preclose(struct drm_device *dev, struct drm_file *file_priv) { struct vmw_fpriv *vmw_fp = vmw_fpriv(file_priv); struct vmw_private *dev_priv = vmw_priv(dev); vmw_event_fence_fpriv_gone(dev_priv->fman, &vmw_fp->fence_events); }
/** * vmw_sou_crtc_mode_set_nofb - Create new screen * * @crtc: CRTC associated with the new screen * * This function creates/destroys a screen. This function cannot fail, so if * somehow we run into a failure, just do the best we can to get out. */ static void vmw_sou_crtc_mode_set_nofb(struct drm_crtc *crtc) { struct vmw_private *dev_priv; struct vmw_screen_object_unit *sou; struct vmw_framebuffer *vfb; struct drm_framebuffer *fb; struct drm_plane_state *ps; struct vmw_plane_state *vps; int ret; sou = vmw_crtc_to_sou(crtc); dev_priv = vmw_priv(crtc->dev); ps = crtc->primary->state; fb = ps->fb; vps = vmw_plane_state_to_vps(ps); vfb = (fb) ? vmw_framebuffer_to_vfb(fb) : NULL; if (sou->defined) { ret = vmw_sou_fifo_destroy(dev_priv, sou); if (ret) { DRM_ERROR("Failed to destroy Screen Object\n"); return; } } if (vfb) { struct drm_connector_state *conn_state; struct vmw_connector_state *vmw_conn_state; int x, y; sou->buffer = vps->bo; sou->buffer_size = vps->bo_size; if (sou->base.is_implicit) { x = crtc->x; y = crtc->y; } else { conn_state = sou->base.connector.state; vmw_conn_state = vmw_connector_state_to_vcs(conn_state); x = vmw_conn_state->gui_x; y = vmw_conn_state->gui_y; } ret = vmw_sou_fifo_create(dev_priv, sou, x, y, &crtc->mode); if (ret) DRM_ERROR("Failed to define Screen Object %dx%d\n", crtc->x, crtc->y); vmw_kms_add_active(dev_priv, &sou->base, vfb); } else { sou->buffer = NULL; sou->buffer_size = 0; vmw_kms_del_active(dev_priv, &sou->base); } }
/** * vmw_fops_poll - wrapper around the drm_poll function * * @filp: See the linux fops poll documentation. * @wait: See the linux fops poll documentation. * * Wrapper around the drm_poll function that makes sure the device is * processing the fifo if drm_poll decides to wait. */ unsigned int vmw_fops_poll(struct file *filp, struct poll_table_struct *wait) { struct drm_file *file_priv = filp->private_data; struct vmw_private *dev_priv = vmw_priv(file_priv->minor->dev); vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC); return drm_poll(filp, wait); }
/** * vmw_fops_read - wrapper around the drm_read function * * @filp: See the linux fops read documentation. * @buffer: See the linux fops read documentation. * @count: See the linux fops read documentation. * offset: See the linux fops read documentation. * * Wrapper around the drm_read function that makes sure the device is * processing the fifo if drm_read decides to wait. */ ssize_t vmw_fops_read(struct file *filp, char __user *buffer, size_t count, loff_t *offset) { struct drm_file *file_priv = filp->private_data; struct vmw_private *dev_priv = vmw_priv(file_priv->minor->dev); vmw_fifo_ping_host(dev_priv, SVGA_SYNC_GENERIC); return drm_read(filp, buffer, count, offset); }
void vmw_irq_preinstall(struct drm_device *dev) { struct vmw_private *dev_priv = vmw_priv(dev); uint32_t status; if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK)) return; spin_lock_init(&dev_priv->irq_lock); status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); }
/** * vmw_sou_primary_plane_prepare_fb - allocate backing buffer * * @plane: display plane * @new_state: info on the new plane state, including the FB * * The SOU backing buffer is our equivalent of the display plane. * * Returns 0 on success */ static int vmw_sou_primary_plane_prepare_fb(struct drm_plane *plane, struct drm_plane_state *new_state) { struct drm_framebuffer *new_fb = new_state->fb; struct drm_crtc *crtc = plane->state->crtc ?: new_state->crtc; struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state); struct vmw_private *dev_priv; size_t size; int ret; if (!new_fb) { vmw_dmabuf_unreference(&vps->dmabuf); vps->dmabuf_size = 0; return 0; } size = new_state->crtc_w * new_state->crtc_h * 4; if (vps->dmabuf) { if (vps->dmabuf_size == size) return 0; vmw_dmabuf_unreference(&vps->dmabuf); vps->dmabuf_size = 0; } vps->dmabuf = kzalloc(sizeof(*vps->dmabuf), GFP_KERNEL); if (!vps->dmabuf) return -ENOMEM; dev_priv = vmw_priv(crtc->dev); vmw_svga_enable(dev_priv); /* After we have alloced the backing store might not be able to * resume the overlays, this is preferred to failing to alloc. */ vmw_overlay_pause_all(dev_priv); ret = vmw_dmabuf_init(dev_priv, vps->dmabuf, size, &vmw_vram_ne_placement, false, &vmw_dmabuf_bo_free); vmw_overlay_resume_all(dev_priv); if (ret != 0) vps->dmabuf = NULL; /* vmw_dmabuf_init frees on error */ else vps->dmabuf_size = size; return ret; }
static int vmw_pci_suspend(struct pci_dev *pdev, pm_message_t state) { struct drm_device *dev = pci_get_drvdata(pdev); struct vmw_private *dev_priv = vmw_priv(dev); if (dev_priv->refuse_hibernation) return -EBUSY; pci_save_state(pdev); pci_disable_device(pdev); pci_set_power_state(pdev, PCI_D3hot); return 0; }
static int vmw_driver_unload(struct drm_device *dev) { struct vmw_private *dev_priv = vmw_priv(dev); enum vmw_res_type i; unregister_pm_notifier(&dev_priv->pm_nb); if (dev_priv->ctx.res_ht_initialized) drm_ht_remove(&dev_priv->ctx.res_ht); vfree(dev_priv->ctx.cmd_bounce); if (dev_priv->enable_fb) { vmw_fb_off(dev_priv); vmw_fb_close(dev_priv); vmw_fifo_resource_dec(dev_priv); vmw_svga_disable(dev_priv); } vmw_kms_close(dev_priv); vmw_overlay_close(dev_priv); if (dev_priv->has_gmr) (void)ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR); (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM); vmw_release_device_early(dev_priv); if (dev_priv->has_mob) (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB); (void) ttm_bo_device_release(&dev_priv->bdev); vmw_release_device_late(dev_priv); vmw_fence_manager_takedown(dev_priv->fman); if (dev_priv->capabilities & SVGA_CAP_IRQMASK) drm_irq_uninstall(dev_priv->dev); if (dev_priv->stealth) pci_release_region(dev->pdev, 2); else pci_release_regions(dev->pdev); ttm_object_device_release(&dev_priv->tdev); iounmap(dev_priv->mmio_virt); arch_phys_wc_del(dev_priv->mmio_mtrr); if (dev_priv->ctx.staged_bindings) vmw_binding_state_free(dev_priv->ctx.staged_bindings); vmw_ttm_global_release(dev_priv); for (i = vmw_res_context; i < vmw_res_max; ++i) idr_destroy(&dev_priv->res_idr[i]); kfree(dev_priv); return 0; }
int vmw_getparam_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct vmw_private *dev_priv = vmw_priv(dev); struct drm_vmw_getparam_arg *param = (struct drm_vmw_getparam_arg *)data; switch (param->param) { case DRM_VMW_PARAM_NUM_STREAMS: param->value = vmw_overlay_num_overlays(dev_priv); break; case DRM_VMW_PARAM_NUM_FREE_STREAMS: param->value = vmw_overlay_num_free_overlays(dev_priv); break; case DRM_VMW_PARAM_3D: param->value = vmw_fifo_have_3d(dev_priv) ? 1 : 0; break; case DRM_VMW_PARAM_HW_CAPS: param->value = dev_priv->capabilities; break; case DRM_VMW_PARAM_FIFO_CAPS: param->value = dev_priv->fifo.capabilities; break; case DRM_VMW_PARAM_MAX_FB_SIZE: param->value = dev_priv->vram_size; break; case DRM_VMW_PARAM_FIFO_HW_VERSION: { __le32 __iomem *fifo_mem = dev_priv->mmio_virt; const struct vmw_fifo_state *fifo = &dev_priv->fifo; param->value = ioread32(fifo_mem + ((fifo->capabilities & SVGA_FIFO_CAP_3D_HWVERSION_REVISED) ? SVGA_FIFO_3D_HWVERSION_REVISED : SVGA_FIFO_3D_HWVERSION)); break; } case DRM_VMW_PARAM_MAX_SURF_MEMORY: param->value = dev_priv->memory_size; break; default: DRM_ERROR("Illegal vmwgfx get param request: %d\n", param->param); return -EINVAL; } return 0; }
int vmw_mmap(struct file *filp, struct vm_area_struct *vma) { struct drm_file *file_priv; struct vmw_private *dev_priv; if (unlikely(vma->vm_pgoff < VMWGFX_FILE_PAGE_OFFSET)) { DRM_ERROR("Illegal attempt to mmap old fifo space.\n"); return -EINVAL; } file_priv = filp->private_data; dev_priv = vmw_priv(file_priv->minor->dev); return ttm_bo_mmap(filp, vma, &dev_priv->bdev); }
/** * vmw_sou_primary_plane_cleanup_fb - Frees sou backing buffer * * @plane: display plane * @old_state: Contains the FB to clean up * * Unpins the display surface * * Returns 0 on success */ static void vmw_sou_primary_plane_cleanup_fb(struct drm_plane *plane, struct drm_plane_state *old_state) { struct vmw_plane_state *vps = vmw_plane_state_to_vps(old_state); struct drm_crtc *crtc = plane->state->crtc ? plane->state->crtc : old_state->crtc; if (vps->bo) vmw_bo_unpin(vmw_priv(crtc->dev), vps->bo, false); vmw_bo_unreference(&vps->bo); vps->bo_size = 0; vmw_du_plane_cleanup_fb(plane, old_state); }
int vmw_mmap(struct file *filp, struct vm_area_struct *vma) { struct drm_file *file_priv; struct vmw_private *dev_priv; if (unlikely(vma->vm_pgoff < VMWGFX_FILE_PAGE_OFFSET)) { if (vmw_fifo_mmap(filp, vma) == 0) return 0; return drm_mmap(filp, vma); } file_priv = filp->private_data; dev_priv = vmw_priv(file_priv->minor->dev); return ttm_bo_mmap(filp, vma, &dev_priv->bdev); }
void vmw_irq_uninstall(struct drm_device *dev) { struct vmw_private *dev_priv = vmw_priv(dev); uint32_t status; if (!(dev_priv->capabilities & SVGA_CAP_IRQMASK)) return; mutex_lock(&dev_priv->hw_mutex); vmw_write(dev_priv, SVGA_REG_IRQMASK, 0); mutex_unlock(&dev_priv->hw_mutex); status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); }
int vmw_fence_wait_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_vmw_fence_wait_arg *arg = (struct drm_vmw_fence_wait_arg *)data; unsigned long timeout; if (!arg->cookie_valid) { arg->cookie_valid = 1; arg->kernel_cookie = jiffies + VMW_FENCE_WAIT_TIMEOUT; } timeout = jiffies; if (time_after_eq(timeout, (unsigned long)arg->kernel_cookie)) return -EBUSY; timeout = (unsigned long)arg->kernel_cookie - timeout; return vmw_wait_fence(vmw_priv(dev), true, arg->sequence, true, timeout); }
int vmw_get_cap_3d_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct drm_vmw_get_3d_cap_arg *arg = (struct drm_vmw_get_3d_cap_arg *) data; struct vmw_private *dev_priv = vmw_priv(dev); uint32_t size; __le32 __iomem *fifo_mem; void __user *buffer = (void __user *)((unsigned long)(arg->buffer)); void *bounce; int ret; if (unlikely(arg->pad64 != 0)) { DRM_ERROR("Illegal GET_3D_CAP argument.\n"); return -EINVAL; } size = (SVGA_FIFO_3D_CAPS_LAST - SVGA_FIFO_3D_CAPS + 1) << 2; if (arg->max_size < size) size = arg->max_size; bounce = vmalloc(size); if (unlikely(bounce == NULL)) { DRM_ERROR("Failed to allocate bounce buffer for 3D caps.\n"); return -ENOMEM; } fifo_mem = dev_priv->mmio_virt; memcpy_fromio(bounce, &fifo_mem[SVGA_FIFO_3D_CAPS], size); ret = copy_to_user(buffer, bounce, size); if (ret) ret = -EFAULT; vfree(bounce); if (unlikely(ret != 0)) DRM_ERROR("Failed to report 3D caps info.\n"); return ret; }
/** * vmw_sou_crtc_helper_disable - Turns off CRTC * * @crtc: CRTC to be turned off */ static void vmw_sou_crtc_helper_disable(struct drm_crtc *crtc) { struct vmw_private *dev_priv; struct vmw_screen_object_unit *sou; int ret; if (!crtc) { DRM_ERROR("CRTC is NULL\n"); return; } sou = vmw_crtc_to_sou(crtc); dev_priv = vmw_priv(crtc->dev); if (sou->defined) { ret = vmw_sou_fifo_destroy(dev_priv, sou); if (ret) DRM_ERROR("Failed to destroy Screen Object\n"); } }
int vmw_overlay_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct ttm_object_file *tfile = vmw_fpriv(file_priv)->tfile; struct vmw_private *dev_priv = vmw_priv(dev); struct vmw_overlay *overlay = dev_priv->overlay_priv; struct drm_vmw_control_stream_arg *arg = (struct drm_vmw_control_stream_arg *)data; struct vmw_dma_buffer *buf; struct vmw_resource *res; int ret; if (!vmw_overlay_available(dev_priv)) return -ENOSYS; ret = vmw_user_stream_lookup(dev_priv, tfile, &arg->stream_id, &res); if (ret) return ret; mutex_lock(&overlay->mutex); if (!arg->enabled) { ret = vmw_overlay_stop(dev_priv, arg->stream_id, false, true); goto out_unlock; } ret = vmw_user_dmabuf_lookup(tfile, arg->handle, &buf, NULL); if (ret) goto out_unlock; ret = vmw_overlay_update_stream(dev_priv, buf, arg, true); vmw_dmabuf_unreference(&buf); out_unlock: mutex_unlock(&overlay->mutex); vmw_resource_unreference(&res); return ret; }
int vmw_fifo_debug_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct vmw_private *dev_priv = vmw_priv(dev); struct vmw_fifo_state *fifo_state = &dev_priv->fifo; struct drm_vmw_fifo_debug_arg *arg = (struct drm_vmw_fifo_debug_arg *)data; __le32 __user *buffer = (__le32 __user *) (unsigned long)arg->debug_buffer; if (unlikely(fifo_state->last_buffer == NULL)) return -EINVAL; if (arg->debug_buffer_size < fifo_state->last_data_size) { arg->used_size = arg->debug_buffer_size; arg->did_not_fit = 1; } else { arg->used_size = fifo_state->last_data_size; arg->did_not_fit = 0; } return copy_to_user(buffer, fifo_state->last_buffer, arg->used_size); }
irqreturn_t vmw_irq_handler(DRM_IRQ_ARGS) { struct drm_device *dev = (struct drm_device *)arg; struct vmw_private *dev_priv = vmw_priv(dev); uint32_t status; spin_lock(&dev_priv->irq_lock); status = inl(dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); spin_unlock(&dev_priv->irq_lock); if (status & SVGA_IRQFLAG_ANY_FENCE) wake_up_all(&dev_priv->fence_queue); if (status & SVGA_IRQFLAG_FIFO_PROGRESS) wake_up_all(&dev_priv->fifo_queue); if (likely(status)) { outl(status, dev_priv->io_start + VMWGFX_IRQSTATUS_PORT); return IRQ_HANDLED; } return IRQ_NONE; }
static int vmw_driver_open(struct drm_device *dev, struct drm_file *file_priv) { struct vmw_private *dev_priv = vmw_priv(dev); struct vmw_fpriv *vmw_fp; int ret = -ENOMEM; vmw_fp = kzalloc(sizeof(*vmw_fp), GFP_KERNEL); if (unlikely(vmw_fp == NULL)) return ret; vmw_fp->tfile = ttm_object_file_init(dev_priv->tdev, 10); if (unlikely(vmw_fp->tfile == NULL)) goto out_no_tfile; file_priv->driver_priv = vmw_fp; return 0; out_no_tfile: kfree(vmw_fp); return ret; }
static int vmw_sou_crtc_page_flip(struct drm_crtc *crtc, struct drm_framebuffer *new_fb, struct drm_pending_vblank_event *event, uint32_t flags, struct drm_modeset_acquire_ctx *ctx) { struct vmw_private *dev_priv = vmw_priv(crtc->dev); int ret; if (!vmw_kms_crtc_flippable(dev_priv, crtc)) return -EINVAL; ret = drm_atomic_helper_page_flip(crtc, new_fb, event, flags, ctx); if (ret) { DRM_ERROR("Page flip error %d.\n", ret); return ret; } if (vmw_crtc_to_du(crtc)->is_implicit) vmw_kms_update_implicit_fb(dev_priv, crtc); return ret; }
int vmw_getparam_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct vmw_private *dev_priv = vmw_priv(dev); struct drm_vmw_getparam_arg *param = (struct drm_vmw_getparam_arg *)data; switch (param->param) { case DRM_VMW_PARAM_NUM_STREAMS: param->value = vmw_overlay_num_overlays(dev_priv); break; case DRM_VMW_PARAM_NUM_FREE_STREAMS: param->value = vmw_overlay_num_free_overlays(dev_priv); break; case DRM_VMW_PARAM_3D: param->value = vmw_fifo_have_3d(dev_priv) ? 1 : 0; break; case DRM_VMW_PARAM_FIFO_OFFSET: param->value = dev_priv->mmio_start; break; case DRM_VMW_PARAM_HW_CAPS: param->value = dev_priv->capabilities; break; case DRM_VMW_PARAM_FIFO_CAPS: param->value = dev_priv->fifo.capabilities; break; case DRM_VMW_PARAM_MAX_FB_SIZE: param->value = dev_priv->vram_size; break; default: DRM_ERROR("Illegal vmwgfx get param request: %d\n", param->param); return -EINVAL; } return 0; }
static int vmw_pm_restore(struct device *kdev) { struct pci_dev *pdev = to_pci_dev(kdev); struct drm_device *dev = pci_get_drvdata(pdev); struct vmw_private *dev_priv = vmw_priv(dev); int ret; vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2); (void) vmw_read(dev_priv, SVGA_REG_ID); if (dev_priv->enable_fb) vmw_fifo_resource_inc(dev_priv); ret = vmw_request_device(dev_priv); if (ret) return ret; if (dev_priv->enable_fb) __vmw_svga_enable(dev_priv); dev_priv->suspended = false; return 0; }
static int vmw_sou_crtc_set_config(struct drm_mode_set *set) { struct vmw_private *dev_priv; struct vmw_screen_object_unit *sou; struct drm_connector *connector; struct drm_display_mode *mode; struct drm_encoder *encoder; struct vmw_framebuffer *vfb; struct drm_framebuffer *fb; struct drm_crtc *crtc; int ret = 0; if (!set) return -EINVAL; if (!set->crtc) return -EINVAL; /* get the sou */ crtc = set->crtc; sou = vmw_crtc_to_sou(crtc); vfb = set->fb ? vmw_framebuffer_to_vfb(set->fb) : NULL; dev_priv = vmw_priv(crtc->dev); if (set->num_connectors > 1) { DRM_ERROR("to many connectors\n"); return -EINVAL; } if (set->num_connectors == 1 && set->connectors[0] != &sou->base.connector) { DRM_ERROR("connector doesn't match %p %p\n", set->connectors[0], &sou->base.connector); return -EINVAL; } /* sou only supports one fb active at the time */ if (sou->base.is_implicit && dev_priv->sou_priv->implicit_fb && vfb && !(dev_priv->sou_priv->num_implicit == 1 && sou->active_implicit) && dev_priv->sou_priv->implicit_fb != vfb) { DRM_ERROR("Multiple framebuffers not supported\n"); return -EINVAL; } /* since they always map one to one these are safe */ connector = &sou->base.connector; encoder = &sou->base.encoder; /* should we turn the crtc off */ if (set->num_connectors == 0 || !set->mode || !set->fb) { ret = vmw_sou_fifo_destroy(dev_priv, sou); /* the hardware has hung don't do anything more */ if (unlikely(ret != 0)) return ret; connector->encoder = NULL; encoder->crtc = NULL; crtc->fb = NULL; crtc->x = 0; crtc->y = 0; crtc->enabled = false; vmw_sou_del_active(dev_priv, sou); vmw_sou_backing_free(dev_priv, sou); return 0; } /* we now know we want to set a mode */ mode = set->mode; fb = set->fb; if (set->x + mode->hdisplay > fb->width || set->y + mode->vdisplay > fb->height) { DRM_ERROR("set outside of framebuffer\n"); return -EINVAL; } vmw_fb_off(dev_priv); if (mode->hdisplay != crtc->mode.hdisplay || mode->vdisplay != crtc->mode.vdisplay) { /* no need to check if depth is different, because backing * store depth is forced to 4 by the device. */ ret = vmw_sou_fifo_destroy(dev_priv, sou); /* the hardware has hung don't do anything more */ if (unlikely(ret != 0)) return ret; vmw_sou_backing_free(dev_priv, sou); } if (!sou->buffer) { /* forced to depth 4 by the device */ size_t size = mode->hdisplay * mode->vdisplay * 4; ret = vmw_sou_backing_alloc(dev_priv, sou, size); if (unlikely(ret != 0)) return ret; } ret = vmw_sou_fifo_create(dev_priv, sou, set->x, set->y, mode); if (unlikely(ret != 0)) { /* * We are in a bit of a situation here, the hardware has * hung and we may or may not have a buffer hanging of * the screen object, best thing to do is not do anything * if we where defined, if not just turn the crtc of. * Not what userspace wants but it needs to htfu. */ if (sou->defined) return ret; connector->encoder = NULL; encoder->crtc = NULL; crtc->fb = NULL; crtc->x = 0; crtc->y = 0; crtc->enabled = false; return ret; } vmw_sou_add_active(dev_priv, sou, vfb); connector->encoder = encoder; encoder->crtc = crtc; crtc->mode = *mode; crtc->fb = fb; crtc->x = set->x; crtc->y = set->y; crtc->enabled = true; return 0; }
int vmw_present_readback_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct vmw_private *dev_priv = vmw_priv(dev); struct drm_vmw_present_readback_arg *arg = (struct drm_vmw_present_readback_arg *)data; struct drm_vmw_fence_rep __user *user_fence_rep = (struct drm_vmw_fence_rep __user *) (unsigned long)arg->fence_rep; struct vmw_master *vmaster = vmw_master(file_priv->master); struct drm_vmw_rect __user *clips_ptr; struct drm_vmw_rect *clips = NULL; struct drm_framebuffer *fb; struct vmw_framebuffer *vfb; uint32_t num_clips; int ret; num_clips = arg->num_clips; clips_ptr = (struct drm_vmw_rect *)(unsigned long)arg->clips_ptr; if (unlikely(num_clips == 0)) return 0; if (clips_ptr == NULL) { DRM_ERROR("Argument clips_ptr must be specified.\n"); ret = -EINVAL; goto out_clips; } clips = kcalloc(num_clips, sizeof(*clips), GFP_KERNEL); if (clips == NULL) { DRM_ERROR("Failed to allocate clip rect list.\n"); ret = -ENOMEM; goto out_clips; } ret = copy_from_user(clips, clips_ptr, num_clips * sizeof(*clips)); if (ret) { DRM_ERROR("Failed to copy clip rects from userspace.\n"); ret = -EFAULT; goto out_no_copy; } drm_modeset_lock_all(dev); fb = drm_framebuffer_lookup(dev, arg->fb_id); if (!fb) { DRM_ERROR("Invalid framebuffer id.\n"); ret = -ENOENT; goto out_no_fb; } vfb = vmw_framebuffer_to_vfb(fb); if (!vfb->dmabuf) { DRM_ERROR("Framebuffer not dmabuf backed.\n"); ret = -EINVAL; goto out_no_ttm_lock; } ret = ttm_read_lock(&vmaster->lock, true); if (unlikely(ret != 0)) goto out_no_ttm_lock; ret = vmw_kms_readback(dev_priv, file_priv, vfb, user_fence_rep, clips, num_clips); ttm_read_unlock(&vmaster->lock); out_no_ttm_lock: drm_framebuffer_unreference(fb); out_no_fb: drm_modeset_unlock_all(dev); out_no_copy: kfree(clips); out_clips: return ret; }