static int radeon_bo_move(struct ttm_buffer_object *bo, bool evict, bool interruptible, bool no_wait_reserve, bool no_wait_gpu, struct ttm_mem_reg *new_mem) { struct radeon_device *rdev; struct ttm_mem_reg *old_mem = &bo->mem; int r; rdev = radeon_get_rdev(bo->bdev); if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) { radeon_move_null(bo, new_mem); return 0; } if ((old_mem->mem_type == TTM_PL_TT && new_mem->mem_type == TTM_PL_SYSTEM) || (old_mem->mem_type == TTM_PL_SYSTEM && new_mem->mem_type == TTM_PL_TT)) { /* bind is enough */ radeon_move_null(bo, new_mem); return 0; } if (!rdev->ring[radeon_copy_ring_index(rdev)].ready || rdev->asic->copy.copy == NULL) { /* use memcpy */ goto memcpy; } if (old_mem->mem_type == TTM_PL_VRAM && new_mem->mem_type == TTM_PL_SYSTEM) { r = radeon_move_vram_ram(bo, evict, interruptible, no_wait_reserve, no_wait_gpu, new_mem); } else if (old_mem->mem_type == TTM_PL_SYSTEM && new_mem->mem_type == TTM_PL_VRAM) { r = radeon_move_ram_vram(bo, evict, interruptible, no_wait_reserve, no_wait_gpu, new_mem); } else { r = radeon_move_blit(bo, evict, no_wait_reserve, no_wait_gpu, new_mem, old_mem); } if (r) { memcpy: r = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, new_mem); } return r; }
static int qxl_bo_move(struct ttm_buffer_object *bo, bool evict, bool interruptible, bool no_wait_gpu, struct ttm_mem_reg *new_mem) { struct ttm_mem_reg *old_mem = &bo->mem; int ret; ret = ttm_bo_wait(bo, interruptible, no_wait_gpu); if (ret) return ret; if (old_mem->mem_type == TTM_PL_SYSTEM && bo->ttm == NULL) { qxl_move_null(bo, new_mem); return 0; } return ttm_bo_move_memcpy(bo, interruptible, no_wait_gpu, new_mem); }
static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem, bool evict, bool interruptible, bool no_wait_reserve, bool no_wait_gpu) { struct ttm_bo_device *bdev = bo->bdev; bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem); bool new_is_pci = ttm_mem_reg_is_pci(bdev, mem); struct ttm_mem_type_manager *old_man = &bdev->man[bo->mem.mem_type]; struct ttm_mem_type_manager *new_man = &bdev->man[mem->mem_type]; int ret = 0; if (old_is_pci || new_is_pci || ((mem->placement & bo->mem.placement & TTM_PL_MASK_CACHING) == 0)) { ret = ttm_mem_io_lock(old_man, true); if (unlikely(ret != 0)) goto out_err; ttm_bo_unmap_virtual_locked(bo); ttm_mem_io_unlock(old_man); } /* * Create and bind a ttm if required. */ if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && (bo->ttm == NULL)) { ret = ttm_bo_add_ttm(bo, false); if (ret) goto out_err; ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement); if (ret) goto out_err; if (mem->mem_type != TTM_PL_SYSTEM) { ret = ttm_tt_bind(bo->ttm, mem); if (ret) goto out_err; } if (bo->mem.mem_type == TTM_PL_SYSTEM) { if (bdev->driver->move_notify) bdev->driver->move_notify(bo, mem); bo->mem = *mem; mem->mm_node = NULL; goto moved; } } if (bdev->driver->move_notify) bdev->driver->move_notify(bo, mem); if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) && !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, mem); else if (bdev->driver->move) ret = bdev->driver->move(bo, evict, interruptible, no_wait_reserve, no_wait_gpu, mem); else ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, mem); if (ret) goto out_err; moved: if (bo->evicted) { ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement); if (ret) printk(KERN_ERR TTM_PFX "Can not flush read caches\n"); bo->evicted = false; } if (bo->mem.mm_node) { bo->offset = (bo->mem.start << PAGE_SHIFT) + bdev->man[bo->mem.mem_type].gpu_offset; bo->cur_placement = bo->mem.placement; } else bo->offset = 0; return 0; out_err: new_man = &bdev->man[bo->mem.mem_type]; if ((new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && bo->ttm) { ttm_tt_unbind(bo->ttm); ttm_tt_destroy(bo->ttm); bo->ttm = NULL; } return ret; }