/** * Allocate the backing store for the buffer. */ static int vmw_sou_backing_alloc(struct vmw_private *dev_priv, struct vmw_screen_object_unit *sou, unsigned long size) { int ret; if (sou->buffer_size == size) return 0; if (sou->buffer) vmw_sou_backing_free(dev_priv, sou); sou->buffer = kzalloc(sizeof(*sou->buffer), GFP_KERNEL); if (unlikely(sou->buffer == NULL)) return -ENOMEM; /* After we have alloced the backing store might not be able to * resume the overlays, this is preferred to failing to alloc. */ vmw_overlay_pause_all(dev_priv); ret = vmw_dmabuf_init(dev_priv, sou->buffer, size, &vmw_vram_ne_placement, false, &vmw_dmabuf_bo_free); vmw_overlay_resume_all(dev_priv); if (unlikely(ret != 0)) sou->buffer = NULL; /* vmw_dmabuf_init frees on error */ else sou->buffer_size = size; return ret; }
/** * vmw_sou_primary_plane_prepare_fb - allocate backing buffer * * @plane: display plane * @new_state: info on the new plane state, including the FB * * The SOU backing buffer is our equivalent of the display plane. * * Returns 0 on success */ static int vmw_sou_primary_plane_prepare_fb(struct drm_plane *plane, struct drm_plane_state *new_state) { struct drm_framebuffer *new_fb = new_state->fb; struct drm_crtc *crtc = plane->state->crtc ?: new_state->crtc; struct vmw_plane_state *vps = vmw_plane_state_to_vps(new_state); struct vmw_private *dev_priv; size_t size; int ret; if (!new_fb) { vmw_dmabuf_unreference(&vps->dmabuf); vps->dmabuf_size = 0; return 0; } size = new_state->crtc_w * new_state->crtc_h * 4; if (vps->dmabuf) { if (vps->dmabuf_size == size) return 0; vmw_dmabuf_unreference(&vps->dmabuf); vps->dmabuf_size = 0; } vps->dmabuf = kzalloc(sizeof(*vps->dmabuf), GFP_KERNEL); if (!vps->dmabuf) return -ENOMEM; dev_priv = vmw_priv(crtc->dev); vmw_svga_enable(dev_priv); /* After we have alloced the backing store might not be able to * resume the overlays, this is preferred to failing to alloc. */ vmw_overlay_pause_all(dev_priv); ret = vmw_dmabuf_init(dev_priv, vps->dmabuf, size, &vmw_vram_ne_placement, false, &vmw_dmabuf_bo_free); vmw_overlay_resume_all(dev_priv); if (ret != 0) vps->dmabuf = NULL; /* vmw_dmabuf_init frees on error */ else vps->dmabuf_size = size; return ret; }
/** * vmw_dummy_query_bo_create - create a bo to hold a dummy query result * * @dev_priv: A device private structure. * * This function creates a small buffer object that holds the query * result for dummy queries emitted as query barriers. * The function will then map the first page and initialize a pending * occlusion query result structure, Finally it will unmap the buffer. * No interruptible waits are done within this function. * * Returns an error if bo creation or initialization fails. */ static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv) { int ret; struct vmw_dma_buffer *vbo; struct ttm_bo_kmap_obj map; volatile SVGA3dQueryResult *result; bool dummy; /* * Create the vbo as pinned, so that a tryreserve will * immediately succeed. This is because we're the only * user of the bo currently. */ vbo = kzalloc(sizeof(*vbo), GFP_KERNEL); if (!vbo) return -ENOMEM; ret = vmw_dmabuf_init(dev_priv, vbo, PAGE_SIZE, &vmw_sys_ne_placement, false, &vmw_dmabuf_bo_free); if (unlikely(ret != 0)) return ret; ret = ttm_bo_reserve(&vbo->base, false, true, NULL); BUG_ON(ret != 0); vmw_bo_pin_reserved(vbo, true); ret = ttm_bo_kmap(&vbo->base, 0, 1, &map); if (likely(ret == 0)) { result = ttm_kmap_obj_virtual(&map, &dummy); result->totalSize = sizeof(*result); result->state = SVGA3D_QUERYSTATE_PENDING; result->result32 = 0xff; ttm_bo_kunmap(&map); } vmw_bo_pin_reserved(vbo, false); ttm_bo_unreserve(&vbo->base); if (unlikely(ret != 0)) { DRM_ERROR("Dummy query buffer map failed.\n"); vmw_dmabuf_unreference(&vbo); } else dev_priv->dummy_query_bo = vbo; return ret; }