int qxl_gem_object_create_with_handle(struct qxl_device *qdev, struct drm_file *file_priv, u32 domain, size_t size, struct qxl_surface *surf, struct qxl_bo **qobj, uint32_t *handle) { struct drm_gem_object *gobj; int r; BUG_ON(!qobj); BUG_ON(!handle); r = qxl_gem_object_create(qdev, size, 0, domain, false, false, surf, &gobj); if (r) return -ENOMEM; r = drm_gem_handle_create(file_priv, gobj, handle); if (r) return r; /* drop reference from allocate - handle holds it now */ *qobj = gem_to_qxl_bo(gobj); drm_gem_object_unreference_unlocked(gobj); return 0; }
static int qxl_framebuffer_surface_dirty(struct drm_framebuffer *fb, struct drm_file *file_priv, unsigned flags, unsigned color, struct drm_clip_rect *clips, unsigned num_clips) { /* TODO: vmwgfx where this was cribbed from had locking. Why? */ struct qxl_framebuffer *qxl_fb = to_qxl_framebuffer(fb); struct qxl_device *qdev = qxl_fb->base.dev->dev_private; struct drm_clip_rect norect; struct qxl_bo *qobj; int inc = 1; qobj = gem_to_qxl_bo(qxl_fb->obj); /* if we aren't primary surface ignore this */ if (!qobj->is_primary) return 0; if (!num_clips) { num_clips = 1; clips = &norect; norect.x1 = norect.y1 = 0; norect.x2 = fb->width; norect.y2 = fb->height; } else if (flags & DRM_MODE_FB_DIRTY_ANNOTATE_COPY) { num_clips /= 2; inc = 2; /* skip source rects */ } qxl_draw_dirty_fb(qdev, qxl_fb, qobj, flags, color, clips, num_clips, inc); return 0; }
static int qxlfb_create_pinned_object(struct qxl_fbdev *qfbdev, const struct drm_mode_fb_cmd2 *mode_cmd, struct drm_gem_object **gobj_p) { struct qxl_device *qdev = qfbdev->qdev; struct drm_gem_object *gobj = NULL; struct qxl_bo *qbo = NULL; int ret; int aligned_size, size; int height = mode_cmd->height; int bpp; int depth; drm_fb_get_bpp_depth(mode_cmd->pixel_format, &bpp, &depth); size = mode_cmd->pitches[0] * height; aligned_size = ALIGN(size, PAGE_SIZE); /* TODO: unallocate and reallocate surface0 for real. Hack to just * have a large enough surface0 for 1024x768 Xorg 32bpp mode */ ret = qxl_gem_object_create(qdev, aligned_size, 0, QXL_GEM_DOMAIN_SURFACE, false, /* is discardable */ false, /* is kernel (false means device) */ NULL, &gobj); if (ret) { pr_err("failed to allocate framebuffer (%d)\n", aligned_size); return -ENOMEM; } qbo = gem_to_qxl_bo(gobj); qbo->surf.width = mode_cmd->width; qbo->surf.height = mode_cmd->height; qbo->surf.stride = mode_cmd->pitches[0]; qbo->surf.format = SPICE_SURFACE_FMT_32_xRGB; ret = qxl_bo_reserve(qbo, false); if (unlikely(ret != 0)) goto out_unref; ret = qxl_bo_pin(qbo, QXL_GEM_DOMAIN_SURFACE, NULL); if (ret) { qxl_bo_unreserve(qbo); goto out_unref; } ret = qxl_bo_kmap(qbo, NULL); qxl_bo_unreserve(qbo); /* unreserve, will be mmaped */ if (ret) goto out_unref; *gobj_p = gobj; return 0; out_unref: qxlfb_destroy_pinned_object(gobj); *gobj_p = NULL; return ret; }
void *qxl_gem_prime_vmap(struct drm_gem_object *obj) { struct qxl_bo *bo = gem_to_qxl_bo(obj); void *ptr; int ret; ret = qxl_bo_kmap(bo, &ptr); if (ret < 0) return ERR_PTR(ret); return ptr; }
void qxl_gem_object_free(struct drm_gem_object *gobj) { struct qxl_bo *qobj = gem_to_qxl_bo(gobj); struct qxl_device *qdev; struct ttm_buffer_object *tbo; qdev = (struct qxl_device *)gobj->dev->dev_private; qxl_surface_evict(qdev, qobj, false); tbo = &qobj->tbo; ttm_bo_unref(&tbo); }
static void qxlfb_destroy_pinned_object(struct drm_gem_object *gobj) { struct qxl_bo *qbo = gem_to_qxl_bo(gobj); int ret; ret = qxl_bo_reserve(qbo, false); if (likely(ret == 0)) { qxl_bo_kunmap(qbo); qxl_bo_unpin(qbo); qxl_bo_unreserve(qbo); } drm_gem_object_unreference_unlocked(gobj); }
static void qxl_crtc_disable(struct drm_crtc *crtc) { struct qxl_crtc *qcrtc = to_qxl_crtc(crtc); struct drm_device *dev = crtc->dev; struct qxl_device *qdev = dev->dev_private; if (crtc->fb) { struct qxl_framebuffer *qfb = to_qxl_framebuffer(crtc->fb); struct qxl_bo *bo = gem_to_qxl_bo(qfb->obj); int ret; ret = qxl_bo_reserve(bo, false); qxl_bo_unpin(bo); qxl_bo_unreserve(bo); crtc->fb = NULL; } qxl_monitors_config_set(qdev, qcrtc->index, 0, 0, 0, 0, 0); qxl_send_monitors_config(qdev); }
/* return holding the reference to this object */ static struct qxl_bo *qxlhw_handle_to_bo(struct qxl_device *qdev, struct drm_file *file_priv, uint64_t handle, struct qxl_release *release) { struct drm_gem_object *gobj; struct qxl_bo *qobj; int ret; gobj = drm_gem_object_lookup(qdev->ddev, file_priv, handle); if (!gobj) return NULL; qobj = gem_to_qxl_bo(gobj); ret = qxl_release_list_add(release, qobj); if (ret) return NULL; return qobj; }
/* return holding the reference to this object */ static int qxlhw_handle_to_bo(struct drm_file *file_priv, uint64_t handle, struct qxl_release *release, struct qxl_bo **qbo_p) { struct drm_gem_object *gobj; struct qxl_bo *qobj; int ret; gobj = drm_gem_object_lookup(file_priv, handle); if (!gobj) return -EINVAL; qobj = gem_to_qxl_bo(gobj); ret = qxl_release_list_add(release, qobj); drm_gem_object_unreference_unlocked(gobj); if (ret) return ret; *qbo_p = qobj; return 0; }
/* return holding the reference to this object */ static struct qxl_bo *qxlhw_handle_to_bo(struct qxl_device *qdev, struct drm_file *file_priv, uint64_t handle, struct qxl_reloc_list *reloc_list) { struct drm_gem_object *gobj; struct qxl_bo *qobj; int ret; gobj = drm_gem_object_lookup(qdev->ddev, file_priv, handle); if (!gobj) { DRM_ERROR("bad bo handle %lld\n", handle); return NULL; } qobj = gem_to_qxl_bo(gobj); ret = qxl_bo_list_add(reloc_list, qobj); if (ret) return NULL; return qobj; }
int qxl_create_monitors_object(struct qxl_device *qdev) { int ret; struct drm_gem_object *gobj; int max_allowed = qxl_num_crtc; int monitors_config_size = sizeof(struct qxl_monitors_config) + max_allowed * sizeof(struct qxl_head); ret = qxl_gem_object_create(qdev, monitors_config_size, 0, QXL_GEM_DOMAIN_VRAM, false, false, NULL, &gobj); if (ret) { DRM_ERROR("%s: failed to create gem ret=%d\n", __func__, ret); return -ENOMEM; } qdev->monitors_config_bo = gem_to_qxl_bo(gobj); ret = qxl_bo_reserve(qdev->monitors_config_bo, false); if (ret) return ret; ret = qxl_bo_pin(qdev->monitors_config_bo, QXL_GEM_DOMAIN_VRAM, NULL); if (ret) { qxl_bo_unreserve(qdev->monitors_config_bo); return ret; } qxl_bo_unreserve(qdev->monitors_config_bo); qxl_bo_kmap(qdev->monitors_config_bo, NULL); qdev->monitors_config = qdev->monitors_config_bo->kptr; qdev->ram_header->monitors_config = qxl_bo_physical_address(qdev, qdev->monitors_config_bo, 0); memset(qdev->monitors_config, 0, monitors_config_size); qdev->monitors_config->max_allowed = max_allowed; return 0; }
static int qxl_crtc_cursor_set2(struct drm_crtc *crtc, struct drm_file *file_priv, uint32_t handle, uint32_t width, uint32_t height, int32_t hot_x, int32_t hot_y) { struct drm_device *dev = crtc->dev; struct qxl_device *qdev = dev->dev_private; struct qxl_crtc *qcrtc = to_qxl_crtc(crtc); struct drm_gem_object *obj; struct qxl_cursor *cursor; struct qxl_cursor_cmd *cmd; struct qxl_bo *cursor_bo, *user_bo; struct qxl_release *release; void *user_ptr; int size = 64*64*4; int ret = 0; if (!handle) return qxl_hide_cursor(qdev); obj = drm_gem_object_lookup(crtc->dev, file_priv, handle); if (!obj) { DRM_ERROR("cannot find cursor object\n"); return -ENOENT; } user_bo = gem_to_qxl_bo(obj); ret = qxl_bo_reserve(user_bo, false); if (ret) goto out_unref; ret = qxl_bo_pin(user_bo, QXL_GEM_DOMAIN_CPU, NULL); qxl_bo_unreserve(user_bo); if (ret) goto out_unref; ret = qxl_bo_kmap(user_bo, &user_ptr); if (ret) goto out_unpin; ret = qxl_alloc_release_reserved(qdev, sizeof(*cmd), QXL_RELEASE_CURSOR_CMD, &release, NULL); if (ret) goto out_kunmap; ret = qxl_alloc_bo_reserved(qdev, release, sizeof(struct qxl_cursor) + size, &cursor_bo); if (ret) goto out_free_release; ret = qxl_release_reserve_list(release, false); if (ret) goto out_free_bo; ret = qxl_bo_kmap(cursor_bo, (void **)&cursor); if (ret) goto out_backoff; cursor->header.unique = 0; cursor->header.type = SPICE_CURSOR_TYPE_ALPHA; cursor->header.width = 64; cursor->header.height = 64; cursor->header.hot_spot_x = hot_x; cursor->header.hot_spot_y = hot_y; cursor->data_size = size; cursor->chunk.next_chunk = 0; cursor->chunk.prev_chunk = 0; cursor->chunk.data_size = size; memcpy(cursor->chunk.data, user_ptr, size); qxl_bo_kunmap(cursor_bo); qxl_bo_kunmap(user_bo); cmd = (struct qxl_cursor_cmd *)qxl_release_map(qdev, release); cmd->type = QXL_CURSOR_SET; cmd->u.set.position.x = qcrtc->cur_x; cmd->u.set.position.y = qcrtc->cur_y; cmd->u.set.shape = qxl_bo_physical_address(qdev, cursor_bo, 0); cmd->u.set.visible = 1; qxl_release_unmap(qdev, release, &cmd->release_info); qxl_push_cursor_ring_release(qdev, release, QXL_CMD_CURSOR, false); qxl_release_fence_buffer_objects(release); /* finish with the userspace bo */ ret = qxl_bo_reserve(user_bo, false); if (!ret) { qxl_bo_unpin(user_bo); qxl_bo_unreserve(user_bo); } drm_gem_object_unreference_unlocked(obj); qxl_bo_unref(&cursor_bo); return ret; out_backoff: qxl_release_backoff_reserve_list(release); out_free_bo: qxl_bo_unref(&cursor_bo); out_free_release: qxl_release_free(qdev, release); out_kunmap: qxl_bo_kunmap(user_bo); out_unpin: qxl_bo_unpin(user_bo); out_unref: drm_gem_object_unreference_unlocked(obj); return ret; }
bool qxl_fbdev_qobj_is_fb(struct qxl_device *qdev, struct qxl_bo *qobj) { if (qobj == gem_to_qxl_bo(qdev->mode_info.qfbdev->qfb.obj)) return true; return false; }
static int qxlfb_create(struct qxl_fbdev *qfbdev, struct drm_fb_helper_surface_size *sizes) { struct qxl_device *qdev = qfbdev->qdev; struct fb_info *info; struct drm_framebuffer *fb = NULL; struct drm_mode_fb_cmd2 mode_cmd; struct drm_gem_object *gobj = NULL; struct qxl_bo *qbo = NULL; int ret; int size; int bpp = sizes->surface_bpp; int depth = sizes->surface_depth; void *shadow; mode_cmd.width = sizes->surface_width; mode_cmd.height = sizes->surface_height; mode_cmd.pitches[0] = ALIGN(mode_cmd.width * ((bpp + 1) / 8), 64); mode_cmd.pixel_format = drm_mode_legacy_fb_format(bpp, depth); ret = qxlfb_create_pinned_object(qfbdev, &mode_cmd, &gobj); if (ret < 0) return ret; qbo = gem_to_qxl_bo(gobj); QXL_INFO(qdev, "%s: %dx%d %d\n", __func__, mode_cmd.width, mode_cmd.height, mode_cmd.pitches[0]); shadow = vmalloc(mode_cmd.pitches[0] * mode_cmd.height); /* TODO: what's the usual response to memory allocation errors? */ BUG_ON(!shadow); QXL_INFO(qdev, "surface0 at gpu offset %lld, mmap_offset %lld (virt %p, shadow %p)\n", qxl_bo_gpu_offset(qbo), qxl_bo_mmap_offset(qbo), qbo->kptr, shadow); size = mode_cmd.pitches[0] * mode_cmd.height; info = drm_fb_helper_alloc_fbi(&qfbdev->helper); if (IS_ERR(info)) { ret = PTR_ERR(info); goto out_unref; } info->par = qfbdev; qxl_framebuffer_init(qdev->ddev, &qfbdev->qfb, &mode_cmd, gobj, &qxlfb_fb_funcs); fb = &qfbdev->qfb.base; /* setup helper with fb data */ qfbdev->helper.fb = fb; qfbdev->shadow = shadow; strcpy(info->fix.id, "qxldrmfb"); drm_fb_helper_fill_fix(info, fb->pitches[0], fb->depth); info->flags = FBINFO_DEFAULT | FBINFO_HWACCEL_COPYAREA | FBINFO_HWACCEL_FILLRECT; info->fbops = &qxlfb_ops; /* * TODO: using gobj->size in various places in this function. Not sure * what the difference between the different sizes is. */ info->fix.smem_start = qdev->vram_base; /* TODO - correct? */ info->fix.smem_len = gobj->size; info->screen_base = qfbdev->shadow; info->screen_size = gobj->size; drm_fb_helper_fill_var(info, &qfbdev->helper, sizes->fb_width, sizes->fb_height); /* setup aperture base/size for vesafb takeover */ info->apertures->ranges[0].base = qdev->ddev->mode_config.fb_base; info->apertures->ranges[0].size = qdev->vram_size; info->fix.mmio_start = 0; info->fix.mmio_len = 0; if (info->screen_base == NULL) { ret = -ENOSPC; goto out_destroy_fbi; } info->fbdefio = &qxl_defio; fb_deferred_io_init(info); qdev->fbdev_info = info; qdev->fbdev_qfb = &qfbdev->qfb; DRM_INFO("fb mappable at 0x%lX, size %lu\n", info->fix.smem_start, (unsigned long)info->screen_size); DRM_INFO("fb: depth %d, pitch %d, width %d, height %d\n", fb->depth, fb->pitches[0], fb->width, fb->height); return 0; out_destroy_fbi: drm_fb_helper_release_fbi(&qfbdev->helper); out_unref: if (qbo) { ret = qxl_bo_reserve(qbo, false); if (likely(ret == 0)) { qxl_bo_kunmap(qbo); qxl_bo_unpin(qbo); qxl_bo_unreserve(qbo); } } if (fb && ret) { drm_gem_object_unreference_unlocked(gobj); drm_framebuffer_cleanup(fb); kfree(fb); } drm_gem_object_unreference_unlocked(gobj); return ret; }
void qxl_gem_prime_vunmap(struct drm_gem_object *obj, void *vaddr) { struct qxl_bo *bo = gem_to_qxl_bo(obj); qxl_bo_kunmap(bo); }
static int qxl_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode, struct drm_display_mode *adjusted_mode, int x, int y, struct drm_framebuffer *old_fb) { struct drm_device *dev = crtc->dev; struct qxl_device *qdev = dev->dev_private; struct qxl_mode *m = (void *)mode->private; struct qxl_framebuffer *qfb; struct qxl_bo *bo, *old_bo = NULL; struct qxl_crtc *qcrtc = to_qxl_crtc(crtc); uint32_t width, height, base_offset; bool recreate_primary = false; int ret; int surf_id; if (!crtc->fb) { DRM_DEBUG_KMS("No FB bound\n"); return 0; } if (old_fb) { qfb = to_qxl_framebuffer(old_fb); old_bo = gem_to_qxl_bo(qfb->obj); } qfb = to_qxl_framebuffer(crtc->fb); bo = gem_to_qxl_bo(qfb->obj); if (!m) /* and do we care? */ DRM_DEBUG("%dx%d: not a native mode\n", x, y); else DRM_DEBUG("%dx%d: qxl id %d\n", mode->hdisplay, mode->vdisplay, m->id); DRM_DEBUG("+%d+%d (%d,%d) => (%d,%d)\n", x, y, mode->hdisplay, mode->vdisplay, adjusted_mode->hdisplay, adjusted_mode->vdisplay); if (qcrtc->index == 0) recreate_primary = true; width = mode->hdisplay; height = mode->vdisplay; base_offset = 0; ret = qxl_bo_reserve(bo, false); if (ret != 0) return ret; ret = qxl_bo_pin(bo, bo->type, NULL); if (ret != 0) { qxl_bo_unreserve(bo); return -EINVAL; } qxl_bo_unreserve(bo); if (recreate_primary) { qxl_io_destroy_primary(qdev); qxl_io_log(qdev, "recreate primary: %dx%d (was %dx%d,%d,%d)\n", width, height, bo->surf.width, bo->surf.height, bo->surf.stride, bo->surf.format); qxl_io_create_primary(qdev, base_offset, bo); bo->is_primary = true; surf_id = 0; } else { surf_id = bo->surface_id; } if (old_bo && old_bo != bo) { old_bo->is_primary = false; ret = qxl_bo_reserve(old_bo, false); qxl_bo_unpin(old_bo); qxl_bo_unreserve(old_bo); } qxl_monitors_config_set(qdev, qcrtc->index, x, y, mode->hdisplay, mode->vdisplay, surf_id); return 0; }
void qxl_gem_prime_unpin(struct drm_gem_object *obj) { struct qxl_bo *bo = gem_to_qxl_bo(obj); qxl_bo_unpin(bo); }
static int qxl_update_area_ioctl(struct drm_device *dev, void *data, struct drm_file *file) { struct qxl_device *qdev = dev->dev_private; struct drm_qxl_update_area *update_area = data; struct qxl_rect area = {.left = update_area->left, .top = update_area->top, .right = update_area->right, .bottom = update_area->bottom}; int ret; struct drm_gem_object *gobj = NULL; struct qxl_bo *qobj = NULL; if (update_area->left >= update_area->right || update_area->top >= update_area->bottom) return -EINVAL; gobj = drm_gem_object_lookup(file, update_area->handle); if (gobj == NULL) return -ENOENT; qobj = gem_to_qxl_bo(gobj); ret = qxl_bo_reserve(qobj, false); if (ret) goto out; if (!qobj->pin_count) { qxl_ttm_placement_from_domain(qobj, qobj->type, false); ret = ttm_bo_validate(&qobj->tbo, &qobj->placement, true, false); if (unlikely(ret)) goto out; } ret = qxl_bo_check_id(qdev, qobj); if (ret) goto out2; if (!qobj->surface_id) DRM_ERROR("got update area for surface with no id %d\n", update_area->handle); ret = qxl_io_update_area(qdev, qobj, &area); out2: qxl_bo_unreserve(qobj); out: drm_gem_object_unreference_unlocked(gobj); return ret; } static int qxl_getparam_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct qxl_device *qdev = dev->dev_private; struct drm_qxl_getparam *param = data; switch (param->param) { case QXL_PARAM_NUM_SURFACES: param->value = qdev->rom->n_surfaces; break; case QXL_PARAM_MAX_RELOCS: param->value = QXL_MAX_RES; break; default: return -EINVAL; } return 0; } static int qxl_clientcap_ioctl(struct drm_device *dev, void *data, struct drm_file *file_priv) { struct qxl_device *qdev = dev->dev_private; struct drm_qxl_clientcap *param = data; int byte, idx; byte = param->index / 8; idx = param->index % 8; if (qdev->pdev->revision < 4) return -ENOSYS; if (byte >= 58) return -ENOSYS; if (qdev->rom->client_capabilities[byte] & (1 << idx)) return 0; return -ENOSYS; } static int qxl_alloc_surf_ioctl(struct drm_device *dev, void *data, struct drm_file *file) { struct qxl_device *qdev = dev->dev_private; struct drm_qxl_alloc_surf *param = data; struct qxl_bo *qobj; int handle; int ret; int size, actual_stride; struct qxl_surface surf; /* work out size allocate bo with handle */ actual_stride = param->stride < 0 ? -param->stride : param->stride; size = actual_stride * param->height + actual_stride; surf.format = param->format; surf.width = param->width; surf.height = param->height; surf.stride = param->stride; surf.data = 0; ret = qxl_gem_object_create_with_handle(qdev, file, QXL_GEM_DOMAIN_SURFACE, size, &surf, &qobj, &handle); if (ret) { DRM_ERROR("%s: failed to create gem ret=%d\n", __func__, ret); return -ENOMEM; } else param->handle = handle; return ret; } const struct drm_ioctl_desc qxl_ioctls[] = { DRM_IOCTL_DEF_DRV(QXL_ALLOC, qxl_alloc_ioctl, DRM_AUTH), DRM_IOCTL_DEF_DRV(QXL_MAP, qxl_map_ioctl, DRM_AUTH), DRM_IOCTL_DEF_DRV(QXL_EXECBUFFER, qxl_execbuffer_ioctl, DRM_AUTH), DRM_IOCTL_DEF_DRV(QXL_UPDATE_AREA, qxl_update_area_ioctl, DRM_AUTH), DRM_IOCTL_DEF_DRV(QXL_GETPARAM, qxl_getparam_ioctl, DRM_AUTH), DRM_IOCTL_DEF_DRV(QXL_CLIENTCAP, qxl_clientcap_ioctl, DRM_AUTH), DRM_IOCTL_DEF_DRV(QXL_ALLOC_SURF, qxl_alloc_surf_ioctl, DRM_AUTH), }; int qxl_max_ioctls = ARRAY_SIZE(qxl_ioctls);
int qxl_gem_prime_pin(struct drm_gem_object *obj) { struct qxl_bo *bo = gem_to_qxl_bo(obj); return qxl_bo_pin(bo); }