void nouveau_bo_wr32(struct nouveau_bo *nvbo, unsigned index, u32 val) { bool is_iomem; u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem); mem = &mem[index]; if (is_iomem) iowrite32_native(val, (void __force __iomem *)mem); else *mem = val; }
u32 nouveau_bo_rd32(struct nouveau_bo *nvbo, unsigned index) { bool is_iomem; u32 *mem = ttm_kmap_obj_virtual(&nvbo->kmap, &is_iomem); mem = &mem[index]; if (is_iomem) return ioread32_native((void __force __iomem *)mem); else return *mem; }
void OUT_RINGp(struct nouveau_channel *chan, const void *data, unsigned nr_dwords) { bool is_iomem; u32 *mem = ttm_kmap_obj_virtual(&chan->push.buffer->kmap, &is_iomem); mem = &mem[chan->dma.cur]; if (is_iomem) memcpy_toio((void __force __iomem *)mem, data, nr_dwords * 4); else memcpy(mem, data, nr_dwords * 4); chan->dma.cur += nr_dwords; }
/** * vmw_dummy_query_bo_create - create a bo to hold a dummy query result * * @dev_priv: A device private structure. * * This function creates a small buffer object that holds the query * result for dummy queries emitted as query barriers. * The function will then map the first page and initialize a pending * occlusion query result structure, Finally it will unmap the buffer. * No interruptible waits are done within this function. * * Returns an error if bo creation or initialization fails. */ static int vmw_dummy_query_bo_create(struct vmw_private *dev_priv) { int ret; struct vmw_dma_buffer *vbo; struct ttm_bo_kmap_obj map; volatile SVGA3dQueryResult *result; bool dummy; /* * Create the vbo as pinned, so that a tryreserve will * immediately succeed. This is because we're the only * user of the bo currently. */ vbo = kzalloc(sizeof(*vbo), GFP_KERNEL); if (!vbo) return -ENOMEM; ret = vmw_dmabuf_init(dev_priv, vbo, PAGE_SIZE, &vmw_sys_ne_placement, false, &vmw_dmabuf_bo_free); if (unlikely(ret != 0)) return ret; ret = ttm_bo_reserve(&vbo->base, false, true, NULL); BUG_ON(ret != 0); vmw_bo_pin_reserved(vbo, true); ret = ttm_bo_kmap(&vbo->base, 0, 1, &map); if (likely(ret == 0)) { result = ttm_kmap_obj_virtual(&map, &dummy); result->totalSize = sizeof(*result); result->state = SVGA3D_QUERYSTATE_PENDING; result->result32 = 0xff; ttm_bo_kunmap(&map); } vmw_bo_pin_reserved(vbo, false); ttm_bo_unreserve(&vbo->base); if (unlikely(ret != 0)) { DRM_ERROR("Dummy query buffer map failed.\n"); vmw_dmabuf_unreference(&vbo); } else dev_priv->dummy_query_bo = vbo; return ret; }
int virtio_gpu_object_kmap(struct virtio_gpu_object *bo, void **ptr) { bool is_iomem; int r; if (bo->vmap) { if (ptr) *ptr = bo->vmap; return 0; } r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap); if (r) return r; bo->vmap = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem); if (ptr) *ptr = bo->vmap; return 0; }
int radeon_bo_kmap(struct radeon_bo *bo, void **ptr) { bool is_iomem; int r; if (bo->kptr) { if (ptr) { *ptr = bo->kptr; } return 0; } r = ttm_bo_kmap(&bo->tbo, 0, bo->tbo.num_pages, &bo->kmap); if (r) { return r; } bo->kptr = ttm_kmap_obj_virtual(&bo->kmap, &is_iomem); if (ptr) { *ptr = bo->kptr; } radeon_bo_check_tiling(bo, 0, 0); return 0; }
static int vbox_cursor_set2(struct drm_crtc *crtc, struct drm_file *file_priv, uint32_t handle, uint32_t width, uint32_t height, int32_t hot_x, int32_t hot_y) { struct vbox_private *vbox = crtc->dev->dev_private; struct vbox_crtc *vbox_crtc = to_vbox_crtc(crtc); struct drm_gem_object *obj; struct vbox_bo *bo; int ret, rc; struct ttm_bo_kmap_obj uobj_map; u8 *src; u8 *dst = NULL; size_t cbData, cbMask; bool src_isiomem; if (!handle) { /* Hide cursor. */ VBoxHGSMIUpdatePointerShape(&vbox->Ctx, 0, 0, 0, 0, 0, NULL, 0); return 0; } if ( width > VBOX_MAX_CURSOR_WIDTH || height > VBOX_MAX_CURSOR_HEIGHT || width == 0 || hot_x > width || height == 0 || hot_y > height) return -EINVAL; obj = drm_gem_object_lookup(crtc->dev, file_priv, handle); if (obj) { bo = gem_to_vbox_bo(obj); ret = vbox_bo_reserve(bo, false); if (!ret) { /* The mask must be calculated based on the alpha channel, one bit * per ARGB word, and must be 32-bit padded. */ cbMask = ((width + 7) / 8 * height + 3) & ~3; cbData = width * height * 4 + cbMask; dst = kmalloc(cbData, GFP_KERNEL); if (dst) { ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &uobj_map); if (!ret) { src = ttm_kmap_obj_virtual(&uobj_map, &src_isiomem); if (!src_isiomem) { uint32_t fFlags = VBOX_MOUSE_POINTER_VISIBLE | VBOX_MOUSE_POINTER_SHAPE | VBOX_MOUSE_POINTER_ALPHA; copy_cursor_image(src, dst, width, height, cbMask); rc = VBoxHGSMIUpdatePointerShape(&vbox->Ctx, fFlags, hot_x, hot_y, width, height, dst, cbData); ret = RTErrConvertToErrno(rc); } else DRM_ERROR("src cursor bo should be in main memory\n"); ttm_bo_kunmap(&uobj_map); } kfree(dst); } vbox_bo_unreserve(bo); } drm_gem_object_unreference_unlocked(obj); } else { DRM_ERROR("Cannot find cursor object %x for crtc\n", handle); ret = -ENOENT; } return ret; }
int vsp_init(struct drm_device *dev) { struct drm_psb_private *dev_priv = dev->dev_private; struct ttm_bo_device *bdev = &dev_priv->bdev; struct vsp_private *vsp_priv; bool is_iomem; int ret; unsigned int context_size; VSP_DEBUG("init vsp private data structure\n"); vsp_priv = kmalloc(sizeof(struct vsp_private), GFP_KERNEL); if (vsp_priv == NULL) return -1; memset(vsp_priv, 0, sizeof(*vsp_priv)); /* get device --> drm_device --> drm_psb_private --> vsp_priv * for psb_vsp_pmstate_show: vsp_pmpolicy * if not pci_set_drvdata, can't get drm_device from device */ /* pci_set_drvdata(dev->pdev, dev); */ if (device_create_file(&dev->pdev->dev, &dev_attr_vsp_pmstate)) DRM_ERROR("TOPAZ: could not create sysfs file\n"); vsp_priv->sysfs_pmstate = sysfs_get_dirent( dev->pdev->dev.kobj.sd, NULL, "vsp_pmstate"); vsp_priv->vsp_cmd_num = 0; vsp_priv->fw_loaded = VSP_FW_NONE; vsp_priv->current_sequence = 0; vsp_priv->vsp_state = VSP_STATE_DOWN; vsp_priv->dev = dev; vsp_priv->coded_buf = NULL; vsp_priv->context_num = 0; atomic_set(&dev_priv->vsp_mmu_invaldc, 0); dev_priv->vsp_private = vsp_priv; vsp_priv->cmd_queue_sz = VSP_CMD_QUEUE_SIZE * sizeof(struct vss_command_t); #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0)) ret = ttm_buffer_object_create(bdev, vsp_priv->cmd_queue_sz, ttm_bo_type_kernel, DRM_PSB_FLAG_MEM_MMU | TTM_PL_FLAG_NO_EVICT, 0, 0, 0, NULL, &vsp_priv->cmd_queue_bo); #else ret = ttm_buffer_object_create(bdev, vsp_priv->cmd_queue_sz, ttm_bo_type_kernel, DRM_PSB_FLAG_MEM_MMU | TTM_PL_FLAG_NO_EVICT, 0, 0, NULL, &vsp_priv->cmd_queue_bo); #endif if (ret != 0) { DRM_ERROR("VSP: failed to allocate VSP cmd queue\n"); goto out_clean; } vsp_priv->ack_queue_sz = VSP_ACK_QUEUE_SIZE * sizeof(struct vss_response_t); #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0)) ret = ttm_buffer_object_create(bdev, vsp_priv->ack_queue_sz, ttm_bo_type_kernel, DRM_PSB_FLAG_MEM_MMU | TTM_PL_FLAG_NO_EVICT, 0, 0, 0, NULL, &vsp_priv->ack_queue_bo); #else ret = ttm_buffer_object_create(bdev, vsp_priv->ack_queue_sz, ttm_bo_type_kernel, DRM_PSB_FLAG_MEM_MMU | TTM_PL_FLAG_NO_EVICT, 0, 0, NULL, &vsp_priv->ack_queue_bo); #endif if (ret != 0) { DRM_ERROR("VSP: failed to allocate VSP cmd ack queue\n"); goto out_clean; } /* Create setting buffer */ #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0)) ret = ttm_buffer_object_create(bdev, sizeof(struct vsp_settings_t), ttm_bo_type_kernel, DRM_PSB_FLAG_MEM_MMU | TTM_PL_FLAG_NO_EVICT, 0, 0, 0, NULL, &vsp_priv->setting_bo); #else ret = ttm_buffer_object_create(bdev, sizeof(struct vsp_settings_t), ttm_bo_type_kernel, DRM_PSB_FLAG_MEM_MMU | TTM_PL_FLAG_NO_EVICT, 0, 0, NULL, &vsp_priv->setting_bo); #endif if (ret != 0) { DRM_ERROR("VSP: failed to allocate VSP setting buffer\n"); goto out_clean; } /* map cmd queue */ ret = ttm_bo_kmap(vsp_priv->cmd_queue_bo, 0, vsp_priv->cmd_queue_bo->num_pages, &vsp_priv->cmd_kmap); if (ret) { DRM_ERROR("drm_bo_kmap failed: %d\n", ret); ttm_bo_unref(&vsp_priv->cmd_queue_bo); ttm_bo_kunmap(&vsp_priv->cmd_kmap); goto out_clean; } vsp_priv->cmd_queue = ttm_kmap_obj_virtual(&vsp_priv->cmd_kmap, &is_iomem); /* map ack queue */ ret = ttm_bo_kmap(vsp_priv->ack_queue_bo, 0, vsp_priv->ack_queue_bo->num_pages, &vsp_priv->ack_kmap); if (ret) { DRM_ERROR("drm_bo_kmap failed: %d\n", ret); ttm_bo_unref(&vsp_priv->ack_queue_bo); ttm_bo_kunmap(&vsp_priv->ack_kmap); goto out_clean; } vsp_priv->ack_queue = ttm_kmap_obj_virtual(&vsp_priv->ack_kmap, &is_iomem); /* map vsp setting */ ret = ttm_bo_kmap(vsp_priv->setting_bo, 0, vsp_priv->setting_bo->num_pages, &vsp_priv->setting_kmap); if (ret) { DRM_ERROR("drm_bo_kmap setting_bo failed: %d\n", ret); ttm_bo_unref(&vsp_priv->setting_bo); ttm_bo_kunmap(&vsp_priv->setting_kmap); goto out_clean; } vsp_priv->setting = ttm_kmap_obj_virtual(&vsp_priv->setting_kmap, &is_iomem); vsp_priv->vp8_filp[0] = NULL; vsp_priv->vp8_filp[1] = NULL; vsp_priv->context_vp8_num = 0; vsp_priv->vp8_cmd_num = 0; spin_lock_init(&vsp_priv->lock); mutex_init(&vsp_priv->vsp_mutex); INIT_DELAYED_WORK(&vsp_priv->vsp_suspend_wq, &psb_powerdown_vsp); INIT_DELAYED_WORK(&vsp_priv->vsp_irq_wq, &vsp_irq_task); return 0; out_clean: vsp_deinit(dev); return -1; }