static int virtio_gpu_bo_move(struct ttm_buffer_object *bo, bool evict, bool interruptible, bool no_wait_gpu, struct ttm_mem_reg *new_mem) { virtio_gpu_move_null(bo, new_mem); return 0; }
static int virtio_gpu_bo_move(struct ttm_buffer_object *bo, bool evict, struct ttm_operation_ctx *ctx, struct ttm_mem_reg *new_mem) { int ret; ret = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu); if (ret) return ret; virtio_gpu_move_null(bo, new_mem); return 0; }
static void virtio_gpu_evict_flags(struct ttm_buffer_object *bo, struct ttm_placement *placement) { static const struct ttm_place placements = { .fpfn = 0, .lpfn = 0, .flags = TTM_PL_MASK_CACHING | TTM_PL_FLAG_SYSTEM, }; placement->placement = &placements; placement->busy_placement = &placements; placement->num_placement = 1; placement->num_busy_placement = 1; } static int virtio_gpu_verify_access(struct ttm_buffer_object *bo, struct file *filp) { return 0; } static int virtio_gpu_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) { struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; mem->bus.addr = NULL; mem->bus.offset = 0; mem->bus.size = mem->num_pages << PAGE_SHIFT; mem->bus.base = 0; mem->bus.is_iomem = false; if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE)) return -EINVAL; switch (mem->mem_type) { case TTM_PL_SYSTEM: case TTM_PL_TT: /* system memory */ return 0; default: return -EINVAL; } return 0; } static void virtio_gpu_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) { } /* * TTM backend functions. */ struct virtio_gpu_ttm_tt { struct ttm_dma_tt ttm; struct virtio_gpu_device *vgdev; u64 offset; }; static int virtio_gpu_ttm_backend_bind(struct ttm_tt *ttm, struct ttm_mem_reg *bo_mem) { struct virtio_gpu_ttm_tt *gtt = (void *)ttm; gtt->offset = (unsigned long)(bo_mem->start << PAGE_SHIFT); if (!ttm->num_pages) WARN(1, "nothing to bind %lu pages for mreg %p back %p!\n", ttm->num_pages, bo_mem, ttm); /* Not implemented */ return 0; } static int virtio_gpu_ttm_backend_unbind(struct ttm_tt *ttm) { /* Not implemented */ return 0; } static void virtio_gpu_ttm_backend_destroy(struct ttm_tt *ttm) { struct virtio_gpu_ttm_tt *gtt = (void *)ttm; ttm_dma_tt_fini(>t->ttm); kfree(gtt); } static struct ttm_backend_func virtio_gpu_backend_func = { .bind = &virtio_gpu_ttm_backend_bind, .unbind = &virtio_gpu_ttm_backend_unbind, .destroy = &virtio_gpu_ttm_backend_destroy, }; static struct ttm_tt *virtio_gpu_ttm_tt_create(struct ttm_buffer_object *bo, uint32_t page_flags) { struct virtio_gpu_device *vgdev; struct virtio_gpu_ttm_tt *gtt; vgdev = virtio_gpu_get_vgdev(bo->bdev); gtt = kzalloc(sizeof(struct virtio_gpu_ttm_tt), GFP_KERNEL); if (gtt == NULL) return NULL; gtt->ttm.ttm.func = &virtio_gpu_backend_func; gtt->vgdev = vgdev; if (ttm_dma_tt_init(>t->ttm, bo, page_flags)) { kfree(gtt); return NULL; } return >t->ttm.ttm; } static void virtio_gpu_move_null(struct ttm_buffer_object *bo, struct ttm_mem_reg *new_mem) { struct ttm_mem_reg *old_mem = &bo->mem; BUG_ON(old_mem->mm_node != NULL); *old_mem = *new_mem; new_mem->mm_node = NULL; } static int virtio_gpu_bo_move(struct ttm_buffer_object *bo, bool evict, struct ttm_operation_ctx *ctx, struct ttm_mem_reg *new_mem) { int ret; ret = ttm_bo_wait(bo, ctx->interruptible, ctx->no_wait_gpu); if (ret) return ret; virtio_gpu_move_null(bo, new_mem); return 0; }