/** * vmw_sou_primary_plane_prepare_fb - allocate backing buffer * * @plane: display plane * @new_state: info on the new plane state, including the FB * * The SOU backing buffer is our equivalent of the display plane. * * Returns 0 on success */ static int vmw_sou_primary_plane_prepare_fb(struct drm_plane *plane, struct drm_plane_state *new_state) { struct drm_framebuffer *new_fb = new_state->fb; struct drm_crtc *crtc = plane->state->crtc ?: new_state->crtc; struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state); struct vmw_private *dev_priv; size_t size; int ret; if (!new_fb) { vmw_dmabuf_unreference(&vps->dmabuf); vps->dmabuf_size = 0; return 0; } size = new_state->crtc_w * new_state->crtc_h * 4; if (vps->dmabuf) { if (vps->dmabuf_size == size) return 0; vmw_dmabuf_unreference(&vps->dmabuf); vps->dmabuf_size = 0; } vps->dmabuf = kzalloc(sizeof(*vps->dmabuf), GFP_KERNEL); if (!vps->dmabuf) return -ENOMEM; dev_priv = vmw_priv(crtc->dev); vmw_svga_enable(dev_priv); /* After we have alloced the backing store might not be able to * resume the overlays, this is preferred to failing to alloc. */ vmw_overlay_pause_all(dev_priv); ret = vmw_dmabuf_init(dev_priv, vps->dmabuf, size, &vmw_vram_ne_placement, false, &vmw_dmabuf_bo_free); vmw_overlay_resume_all(dev_priv); if (ret != 0) vps->dmabuf = NULL; /* vmw_dmabuf_init frees on error */ else vps->dmabuf_size = size; return ret; }
static int vmw_driver_load(struct drm_device *dev, unsigned long chipset) { struct vmw_private *dev_priv; int ret; uint32_t svga_id; enum vmw_res_type i; bool refuse_dma = false; char host_log[100] = {0}; dev_priv = kzalloc(sizeof(*dev_priv), GFP_KERNEL); if (unlikely(dev_priv == NULL)) { DRM_ERROR("Failed allocating a device private struct.\n"); return -ENOMEM; } pci_set_master(dev->pdev); dev_priv->dev = dev; dev_priv->vmw_chipset = chipset; dev_priv->last_read_seqno = (uint32_t) -100; mutex_init(&dev_priv->cmdbuf_mutex); mutex_init(&dev_priv->release_mutex); mutex_init(&dev_priv->binding_mutex); mutex_init(&dev_priv->global_kms_state_mutex); rwlock_init(&dev_priv->resource_lock); ttm_lock_init(&dev_priv->reservation_sem); spin_lock_init(&dev_priv->hw_lock); spin_lock_init(&dev_priv->waiter_lock); spin_lock_init(&dev_priv->cap_lock); spin_lock_init(&dev_priv->svga_lock); for (i = vmw_res_context; i < vmw_res_max; ++i) { idr_init(&dev_priv->res_idr[i]); INIT_LIST_HEAD(&dev_priv->res_lru[i]); } mutex_init(&dev_priv->init_mutex); init_waitqueue_head(&dev_priv->fence_queue); init_waitqueue_head(&dev_priv->fifo_queue); dev_priv->fence_queue_waiters = 0; dev_priv->fifo_queue_waiters = 0; dev_priv->used_memory_size = 0; dev_priv->io_start = pci_resource_start(dev->pdev, 0); dev_priv->vram_start = pci_resource_start(dev->pdev, 1); dev_priv->mmio_start = pci_resource_start(dev->pdev, 2); dev_priv->assume_16bpp = !!vmw_assume_16bpp; dev_priv->enable_fb = enable_fbdev; vmw_write(dev_priv, SVGA_REG_ID, SVGA_ID_2); svga_id = vmw_read(dev_priv, SVGA_REG_ID); if (svga_id != SVGA_ID_2) { ret = -ENOSYS; DRM_ERROR("Unsupported SVGA ID 0x%x\n", svga_id); goto out_err0; } dev_priv->capabilities = vmw_read(dev_priv, SVGA_REG_CAPABILITIES); ret = vmw_dma_select_mode(dev_priv); if (unlikely(ret != 0)) { DRM_INFO("Restricting capabilities due to IOMMU setup.\n"); refuse_dma = true; } dev_priv->vram_size = vmw_read(dev_priv, SVGA_REG_VRAM_SIZE); dev_priv->mmio_size = vmw_read(dev_priv, SVGA_REG_MEM_SIZE); dev_priv->fb_max_width = vmw_read(dev_priv, SVGA_REG_MAX_WIDTH); dev_priv->fb_max_height = vmw_read(dev_priv, SVGA_REG_MAX_HEIGHT); vmw_get_initial_size(dev_priv); if (dev_priv->capabilities & SVGA_CAP_GMR2) { dev_priv->max_gmr_ids = vmw_read(dev_priv, SVGA_REG_GMR_MAX_IDS); dev_priv->max_gmr_pages = vmw_read(dev_priv, SVGA_REG_GMRS_MAX_PAGES); dev_priv->memory_size = vmw_read(dev_priv, SVGA_REG_MEMORY_SIZE); dev_priv->memory_size -= dev_priv->vram_size; } else { /* * An arbitrary limit of 512MiB on surface * memory. But all HWV8 hardware supports GMR2. */ dev_priv->memory_size = 512*1024*1024; } dev_priv->max_mob_pages = 0; dev_priv->max_mob_size = 0; if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) { uint64_t mem_size = vmw_read(dev_priv, SVGA_REG_SUGGESTED_GBOBJECT_MEM_SIZE_KB); /* * Workaround for low memory 2D VMs to compensate for the * allocation taken by fbdev */ if (!(dev_priv->capabilities & SVGA_CAP_3D)) mem_size *= 2; dev_priv->max_mob_pages = mem_size * 1024 / PAGE_SIZE; dev_priv->prim_bb_mem = vmw_read(dev_priv, SVGA_REG_MAX_PRIMARY_BOUNDING_BOX_MEM); dev_priv->max_mob_size = vmw_read(dev_priv, SVGA_REG_MOB_MAX_SIZE); dev_priv->stdu_max_width = vmw_read(dev_priv, SVGA_REG_SCREENTARGET_MAX_WIDTH); dev_priv->stdu_max_height = vmw_read(dev_priv, SVGA_REG_SCREENTARGET_MAX_HEIGHT); vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_MAX_TEXTURE_WIDTH); dev_priv->texture_max_width = vmw_read(dev_priv, SVGA_REG_DEV_CAP); vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_MAX_TEXTURE_HEIGHT); dev_priv->texture_max_height = vmw_read(dev_priv, SVGA_REG_DEV_CAP); } else { dev_priv->texture_max_width = 8192; dev_priv->texture_max_height = 8192; dev_priv->prim_bb_mem = dev_priv->vram_size; } vmw_print_capabilities(dev_priv->capabilities); ret = vmw_dma_masks(dev_priv); if (unlikely(ret != 0)) goto out_err0; if (dev_priv->capabilities & SVGA_CAP_GMR2) { DRM_INFO("Max GMR ids is %u\n", (unsigned)dev_priv->max_gmr_ids); DRM_INFO("Max number of GMR pages is %u\n", (unsigned)dev_priv->max_gmr_pages); DRM_INFO("Max dedicated hypervisor surface memory is %u kiB\n", (unsigned)dev_priv->memory_size / 1024); } DRM_INFO("Maximum display memory size is %u kiB\n", dev_priv->prim_bb_mem / 1024); DRM_INFO("VRAM at 0x%08x size is %u kiB\n", dev_priv->vram_start, dev_priv->vram_size / 1024); DRM_INFO("MMIO at 0x%08x size is %u kiB\n", dev_priv->mmio_start, dev_priv->mmio_size / 1024); ret = vmw_ttm_global_init(dev_priv); if (unlikely(ret != 0)) goto out_err0; vmw_master_init(&dev_priv->fbdev_master); ttm_lock_set_kill(&dev_priv->fbdev_master.lock, false, SIGTERM); dev_priv->active_master = &dev_priv->fbdev_master; dev_priv->mmio_virt = memremap(dev_priv->mmio_start, dev_priv->mmio_size, MEMREMAP_WB); if (unlikely(dev_priv->mmio_virt == NULL)) { ret = -ENOMEM; DRM_ERROR("Failed mapping MMIO.\n"); goto out_err3; } /* Need mmio memory to check for fifo pitchlock cap. */ if (!(dev_priv->capabilities & SVGA_CAP_DISPLAY_TOPOLOGY) && !(dev_priv->capabilities & SVGA_CAP_PITCHLOCK) && !vmw_fifo_have_pitchlock(dev_priv)) { ret = -ENOSYS; DRM_ERROR("Hardware has no pitchlock\n"); goto out_err4; } dev_priv->tdev = ttm_object_device_init (dev_priv->mem_global_ref.object, 12, &vmw_prime_dmabuf_ops); if (unlikely(dev_priv->tdev == NULL)) { DRM_ERROR("Unable to initialize TTM object management.\n"); ret = -ENOMEM; goto out_err4; } dev->dev_private = dev_priv; ret = pci_request_regions(dev->pdev, "vmwgfx probe"); dev_priv->stealth = (ret != 0); if (dev_priv->stealth) { /** * Request at least the mmio PCI resource. */ DRM_INFO("It appears like vesafb is loaded. " "Ignore above error if any.\n"); ret = pci_request_region(dev->pdev, 2, "vmwgfx stealth probe"); if (unlikely(ret != 0)) { DRM_ERROR("Failed reserving the SVGA MMIO resource.\n"); goto out_no_device; } } if (dev_priv->capabilities & SVGA_CAP_IRQMASK) { ret = drm_irq_install(dev, dev->pdev->irq); if (ret != 0) { DRM_ERROR("Failed installing irq: %d\n", ret); goto out_no_irq; } } dev_priv->fman = vmw_fence_manager_init(dev_priv); if (unlikely(dev_priv->fman == NULL)) { ret = -ENOMEM; goto out_no_fman; } ret = ttm_bo_device_init(&dev_priv->bdev, dev_priv->bo_global_ref.ref.object, &vmw_bo_driver, dev->anon_inode->i_mapping, VMWGFX_FILE_PAGE_OFFSET, false); if (unlikely(ret != 0)) { DRM_ERROR("Failed initializing TTM buffer object driver.\n"); goto out_no_bdev; } /* * Enable VRAM, but initially don't use it until SVGA is enabled and * unhidden. */ ret = ttm_bo_init_mm(&dev_priv->bdev, TTM_PL_VRAM, (dev_priv->vram_size >> PAGE_SHIFT)); if (unlikely(ret != 0)) { DRM_ERROR("Failed initializing memory manager for VRAM.\n"); goto out_no_vram; } dev_priv->bdev.man[TTM_PL_VRAM].use_type = false; dev_priv->has_gmr = true; if (((dev_priv->capabilities & (SVGA_CAP_GMR | SVGA_CAP_GMR2)) == 0) || refuse_dma || ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_GMR, VMW_PL_GMR) != 0) { DRM_INFO("No GMR memory available. " "Graphics memory resources are very limited.\n"); dev_priv->has_gmr = false; } if (dev_priv->capabilities & SVGA_CAP_GBOBJECTS) { dev_priv->has_mob = true; if (ttm_bo_init_mm(&dev_priv->bdev, VMW_PL_MOB, VMW_PL_MOB) != 0) { DRM_INFO("No MOB memory available. " "3D will be disabled.\n"); dev_priv->has_mob = false; } } if (dev_priv->has_mob) { spin_lock(&dev_priv->cap_lock); vmw_write(dev_priv, SVGA_REG_DEV_CAP, SVGA3D_DEVCAP_DX); dev_priv->has_dx = !!vmw_read(dev_priv, SVGA_REG_DEV_CAP); spin_unlock(&dev_priv->cap_lock); } ret = vmw_kms_init(dev_priv); if (unlikely(ret != 0)) goto out_no_kms; vmw_overlay_init(dev_priv); ret = vmw_request_device(dev_priv); if (ret) goto out_no_fifo; DRM_INFO("DX: %s\n", dev_priv->has_dx ? "yes." : "no."); snprintf(host_log, sizeof(host_log), "vmwgfx: %s-%s", VMWGFX_REPO, VMWGFX_GIT_VERSION); vmw_host_log(host_log); memset(host_log, 0, sizeof(host_log)); snprintf(host_log, sizeof(host_log), "vmwgfx: Module Version: %d.%d.%d", VMWGFX_DRIVER_MAJOR, VMWGFX_DRIVER_MINOR, VMWGFX_DRIVER_PATCHLEVEL); vmw_host_log(host_log); if (dev_priv->enable_fb) { vmw_fifo_resource_inc(dev_priv); vmw_svga_enable(dev_priv); vmw_fb_init(dev_priv); } dev_priv->pm_nb.notifier_call = vmwgfx_pm_notifier; register_pm_notifier(&dev_priv->pm_nb); return 0; out_no_fifo: vmw_overlay_close(dev_priv); vmw_kms_close(dev_priv); out_no_kms: if (dev_priv->has_mob) (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_MOB); if (dev_priv->has_gmr) (void) ttm_bo_clean_mm(&dev_priv->bdev, VMW_PL_GMR); (void)ttm_bo_clean_mm(&dev_priv->bdev, TTM_PL_VRAM); out_no_vram: (void)ttm_bo_device_release(&dev_priv->bdev); out_no_bdev: vmw_fence_manager_takedown(dev_priv->fman); out_no_fman: if (dev_priv->capabilities & SVGA_CAP_IRQMASK) drm_irq_uninstall(dev_priv->dev); out_no_irq: if (dev_priv->stealth) pci_release_region(dev->pdev, 2); else pci_release_regions(dev->pdev); out_no_device: ttm_object_device_release(&dev_priv->tdev); out_err4: memunmap(dev_priv->mmio_virt); out_err3: vmw_ttm_global_release(dev_priv); out_err0: for (i = vmw_res_context; i < vmw_res_max; ++i) idr_destroy(&dev_priv->res_idr[i]); if (dev_priv->ctx.staged_bindings) vmw_binding_state_free(dev_priv->ctx.staged_bindings); kfree(dev_priv); return ret; }
static int vmw_sou_crtc_set_config(struct drm_mode_set *set) { struct vmw_private *dev_priv; struct vmw_screen_object_unit *sou; struct drm_connector *connector; struct drm_display_mode *mode; struct drm_encoder *encoder; struct vmw_framebuffer *vfb; struct drm_framebuffer *fb; struct drm_crtc *crtc; int ret = 0; if (!set) return -EINVAL; if (!set->crtc) return -EINVAL; /* get the sou */ crtc = set->crtc; sou = vmw_crtc_to_sou(crtc); vfb = set->fb ? vmw_framebuffer_to_vfb(set->fb) : NULL; dev_priv = vmw_priv(crtc->dev); if (set->num_connectors > 1) { DRM_ERROR("Too many connectors\n"); return -EINVAL; } if (set->num_connectors == 1 && set->connectors[0] != &sou->base.connector) { DRM_ERROR("Connector doesn't match %p %p\n", set->connectors[0], &sou->base.connector); return -EINVAL; } /* Only one active implicit frame-buffer at a time. */ if (sou->base.is_implicit && dev_priv->implicit_fb && vfb && !(dev_priv->num_implicit == 1 && sou->base.active_implicit) && dev_priv->implicit_fb != vfb) { DRM_ERROR("Multiple implicit framebuffers not supported.\n"); return -EINVAL; } /* since they always map one to one these are safe */ connector = &sou->base.connector; encoder = &sou->base.encoder; /* should we turn the crtc off */ if (set->num_connectors == 0 || !set->mode || !set->fb) { ret = vmw_sou_fifo_destroy(dev_priv, sou); /* the hardware has hung don't do anything more */ if (unlikely(ret != 0)) return ret; connector->encoder = NULL; encoder->crtc = NULL; crtc->primary->fb = NULL; crtc->x = 0; crtc->y = 0; crtc->enabled = false; vmw_kms_del_active(dev_priv, &sou->base); vmw_sou_backing_free(dev_priv, sou); return 0; } /* we now know we want to set a mode */ mode = set->mode; fb = set->fb; if (set->x + mode->hdisplay > fb->width || set->y + mode->vdisplay > fb->height) { DRM_ERROR("set outside of framebuffer\n"); return -EINVAL; } vmw_svga_enable(dev_priv); if (mode->hdisplay != crtc->mode.hdisplay || mode->vdisplay != crtc->mode.vdisplay) { /* no need to check if depth is different, because backing * store depth is forced to 4 by the device. */ ret = vmw_sou_fifo_destroy(dev_priv, sou); /* the hardware has hung don't do anything more */ if (unlikely(ret != 0)) return ret; vmw_sou_backing_free(dev_priv, sou); } if (!sou->buffer) { /* forced to depth 4 by the device */ size_t size = mode->hdisplay * mode->vdisplay * 4; ret = vmw_sou_backing_alloc(dev_priv, sou, size); if (unlikely(ret != 0)) return ret; } ret = vmw_sou_fifo_create(dev_priv, sou, set->x, set->y, mode); if (unlikely(ret != 0)) { /* * We are in a bit of a situation here, the hardware has * hung and we may or may not have a buffer hanging of * the screen object, best thing to do is not do anything * if we where defined, if not just turn the crtc of. * Not what userspace wants but it needs to htfu. */ if (sou->defined) return ret; connector->encoder = NULL; encoder->crtc = NULL; crtc->primary->fb = NULL; crtc->x = 0; crtc->y = 0; crtc->enabled = false; return ret; } vmw_kms_add_active(dev_priv, &sou->base, vfb); connector->encoder = encoder; encoder->crtc = crtc; crtc->mode = *mode; crtc->primary->fb = fb; crtc->x = set->x; crtc->y = set->y; crtc->enabled = true; return 0; }
/** * vmw_stdu_crtc_set_config - Sets a mode * * @set: mode parameters * * This function is the device-specific portion of the DRM CRTC mode set. * For the SVGA device, we do this by defining a Screen Target, binding a * GB Surface to that target, and finally update the screen target. * * RETURNS: * 0 on success, error code otherwise */ static int vmw_stdu_crtc_set_config(struct drm_mode_set *set) { struct vmw_private *dev_priv; struct vmw_framebuffer *vfb; struct vmw_screen_target_display_unit *stdu; struct drm_display_mode *mode; struct drm_framebuffer *new_fb; struct drm_crtc *crtc; struct drm_encoder *encoder; struct drm_connector *connector; bool turning_off; int ret; if (!set || !set->crtc) return -EINVAL; crtc = set->crtc; stdu = vmw_crtc_to_stdu(crtc); mode = set->mode; new_fb = set->fb; dev_priv = vmw_priv(crtc->dev); turning_off = set->num_connectors == 0 || !mode || !new_fb; vfb = (new_fb) ? vmw_framebuffer_to_vfb(new_fb) : NULL; if (set->num_connectors > 1) { DRM_ERROR("Too many connectors\n"); return -EINVAL; } if (set->num_connectors == 1 && set->connectors[0] != &stdu->base.connector) { DRM_ERROR("Connectors don't match %p %p\n", set->connectors[0], &stdu->base.connector); return -EINVAL; } if (!turning_off && (set->x + mode->hdisplay > new_fb->width || set->y + mode->vdisplay > new_fb->height)) { DRM_ERROR("Set outside of framebuffer\n"); return -EINVAL; } /* Only one active implicit frame-buffer at a time. */ mutex_lock(&dev_priv->global_kms_state_mutex); if (!turning_off && stdu->base.is_implicit && dev_priv->implicit_fb && !(dev_priv->num_implicit == 1 && stdu->base.active_implicit) && dev_priv->implicit_fb != vfb) { mutex_unlock(&dev_priv->global_kms_state_mutex); DRM_ERROR("Multiple implicit framebuffers not supported.\n"); return -EINVAL; } mutex_unlock(&dev_priv->global_kms_state_mutex); /* Since they always map one to one these are safe */ connector = &stdu->base.connector; encoder = &stdu->base.encoder; if (stdu->defined) { ret = vmw_stdu_bind_st(dev_priv, stdu, NULL); if (ret) return ret; vmw_stdu_unpin_display(stdu); (void) vmw_stdu_update_st(dev_priv, stdu); vmw_kms_del_active(dev_priv, &stdu->base); ret = vmw_stdu_destroy_st(dev_priv, stdu); if (ret) return ret; crtc->primary->fb = NULL; crtc->enabled = false; encoder->crtc = NULL; connector->encoder = NULL; stdu->content_fb_type = SAME_AS_DISPLAY; crtc->x = set->x; crtc->y = set->y; } if (turning_off) return 0; /* * Steps to displaying a surface, assume surface is already * bound: * 1. define a screen target * 2. bind a fb to the screen target * 3. update that screen target (this is done later by * vmw_kms_stdu_do_surface_dirty_or_present) */ /* * Note on error handling: We can't really restore the crtc to * it's original state on error, but we at least update the * current state to what's submitted to hardware to enable * future recovery. */ vmw_svga_enable(dev_priv); ret = vmw_stdu_define_st(dev_priv, stdu, mode, set->x, set->y); if (ret) return ret; crtc->x = set->x; crtc->y = set->y; crtc->mode = *mode; ret = vmw_stdu_bind_fb(dev_priv, crtc, mode, new_fb); if (ret) return ret; vmw_kms_add_active(dev_priv, &stdu->base, vfb); crtc->enabled = true; connector->encoder = encoder; encoder->crtc = crtc; return 0; }
/** * vmw_sou_primary_plane_prepare_fb - allocate backing buffer * * @plane: display plane * @new_state: info on the new plane state, including the FB * * The SOU backing buffer is our equivalent of the display plane. * * Returns 0 on success */ static int vmw_sou_primary_plane_prepare_fb(struct drm_plane *plane, struct drm_plane_state *new_state) { struct drm_framebuffer *new_fb = new_state->fb; struct drm_crtc *crtc = plane->state->crtc ?: new_state->crtc; struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state); struct vmw_private *dev_priv; size_t size; int ret; if (!new_fb) { vmw_bo_unreference(&vps->bo); vps->bo_size = 0; return 0; } size = new_state->crtc_w * new_state->crtc_h * 4; dev_priv = vmw_priv(crtc->dev); if (vps->bo) { if (vps->bo_size == size) { /* * Note that this might temporarily up the pin-count * to 2, until cleanup_fb() is called. */ return vmw_bo_pin_in_vram(dev_priv, vps->bo, true); } vmw_bo_unreference(&vps->bo); vps->bo_size = 0; } vps->bo = kzalloc(sizeof(*vps->bo), GFP_KERNEL); if (!vps->bo) return -ENOMEM; vmw_svga_enable(dev_priv); /* After we have alloced the backing store might not be able to * resume the overlays, this is preferred to failing to alloc. */ vmw_overlay_pause_all(dev_priv); ret = vmw_bo_init(dev_priv, vps->bo, size, &vmw_vram_ne_placement, false, &vmw_bo_bo_free); vmw_overlay_resume_all(dev_priv); if (ret) { vps->bo = NULL; /* vmw_bo_init frees on error */ return ret; } vps->bo_size = size; /* * TTM already thinks the buffer is pinned, but make sure the * pin_count is upped. */ return vmw_bo_pin_in_vram(dev_priv, vps->bo, true); }
/** * vmw_stdu_crtc_set_config - Sets a mode * * @set: mode parameters * * This function is the device-specific portion of the DRM CRTC mode set. * For the SVGA device, we do this by defining a Screen Target, binding a * GB Surface to that target, and finally update the screen target. * * RETURNS: * 0 on success, error code otherwise */ static int vmw_stdu_crtc_set_config(struct drm_mode_set *set) { struct vmw_private *dev_priv; struct vmw_screen_target_display_unit *stdu; struct vmw_framebuffer *vfb; struct vmw_framebuffer_surface *new_vfbs; struct drm_display_mode *mode; struct drm_framebuffer *new_fb; struct drm_crtc *crtc; struct drm_encoder *encoder; struct drm_connector *connector; int ret; if (!set || !set->crtc) return -EINVAL; crtc = set->crtc; crtc->x = set->x; crtc->y = set->y; stdu = vmw_crtc_to_stdu(crtc); mode = set->mode; new_fb = set->fb; dev_priv = vmw_priv(crtc->dev); if (set->num_connectors > 1) { DRM_ERROR("Too many connectors\n"); return -EINVAL; } if (set->num_connectors == 1 && set->connectors[0] != &stdu->base.connector) { DRM_ERROR("Connectors don't match %p %p\n", set->connectors[0], &stdu->base.connector); return -EINVAL; } /* Since they always map one to one these are safe */ connector = &stdu->base.connector; encoder = &stdu->base.encoder; /* * After this point the CRTC will be considered off unless a new fb * is bound */ if (stdu->defined) { /* Unbind current surface by binding an invalid one */ ret = vmw_stdu_bind_st(dev_priv, stdu, NULL); if (unlikely(ret != 0)) return ret; /* Update Screen Target, display will now be blank */ if (crtc->primary->fb) { vmw_stdu_update_st(dev_priv, stdu); if (unlikely(ret != 0)) return ret; } crtc->primary->fb = NULL; crtc->enabled = false; encoder->crtc = NULL; connector->encoder = NULL; vmw_stdu_unpin_display(stdu); stdu->content_fb = NULL; stdu->content_fb_type = SAME_AS_DISPLAY; ret = vmw_stdu_destroy_st(dev_priv, stdu); /* The hardware is hung, give up */ if (unlikely(ret != 0)) return ret; } /* Any of these conditions means the caller wants CRTC off */ if (set->num_connectors == 0 || !mode || !new_fb) return 0; if (set->x + mode->hdisplay > new_fb->width || set->y + mode->vdisplay > new_fb->height) { DRM_ERROR("Set outside of framebuffer\n"); return -EINVAL; } stdu->content_fb = new_fb; vfb = vmw_framebuffer_to_vfb(stdu->content_fb); if (vfb->dmabuf) stdu->content_fb_type = SEPARATE_DMA; /* * If the requested mode is different than the width and height * of the FB or if the content buffer is a DMA buf, then allocate * a display FB that matches the dimension of the mode */ if (mode->hdisplay != new_fb->width || mode->vdisplay != new_fb->height || stdu->content_fb_type != SAME_AS_DISPLAY) { struct vmw_surface content_srf; struct drm_vmw_size display_base_size = {0}; struct vmw_surface *display_srf; display_base_size.width = mode->hdisplay; display_base_size.height = mode->vdisplay; display_base_size.depth = 1; /* * If content buffer is a DMA buf, then we have to construct * surface info */ if (stdu->content_fb_type == SEPARATE_DMA) { switch (new_fb->bits_per_pixel) { case 32: content_srf.format = SVGA3D_X8R8G8B8; break; case 16: content_srf.format = SVGA3D_R5G6B5; break; case 8: content_srf.format = SVGA3D_P8; break; default: DRM_ERROR("Invalid format\n"); ret = -EINVAL; goto err_unref_content; } content_srf.flags = 0; content_srf.mip_levels[0] = 1; content_srf.multisample_count = 0; } else { stdu->content_fb_type = SEPARATE_SURFACE; new_vfbs = vmw_framebuffer_to_vfbs(new_fb); content_srf = *new_vfbs->surface; } ret = vmw_surface_gb_priv_define(crtc->dev, 0, /* because kernel visible only */ content_srf.flags, content_srf.format, true, /* a scanout buffer */ content_srf.mip_levels[0], content_srf.multisample_count, 0, display_base_size, &display_srf); if (unlikely(ret != 0)) { DRM_ERROR("Cannot allocate a display FB.\n"); goto err_unref_content; } stdu->display_srf = display_srf; } else { new_vfbs = vmw_framebuffer_to_vfbs(new_fb); stdu->display_srf = new_vfbs->surface; } ret = vmw_stdu_pin_display(stdu); if (unlikely(ret != 0)) { stdu->display_srf = NULL; goto err_unref_content; } vmw_svga_enable(dev_priv); /* * Steps to displaying a surface, assume surface is already * bound: * 1. define a screen target * 2. bind a fb to the screen target * 3. update that screen target (this is done later by * vmw_kms_stdu_do_surface_dirty_or_present) */ ret = vmw_stdu_define_st(dev_priv, stdu); if (unlikely(ret != 0)) goto err_unpin_display_and_content; ret = vmw_stdu_bind_st(dev_priv, stdu, &stdu->display_srf->res); if (unlikely(ret != 0)) goto err_unpin_destroy_st; connector->encoder = encoder; encoder->crtc = crtc; crtc->mode = *mode; crtc->primary->fb = new_fb; crtc->enabled = true; return ret; err_unpin_destroy_st: vmw_stdu_destroy_st(dev_priv, stdu); err_unpin_display_and_content: vmw_stdu_unpin_display(stdu); err_unref_content: stdu->content_fb = NULL; return ret; }