/* * Flip destination ttm into GATT, * then blit and subsequently move out again. */ static int psb_move_flip(struct ttm_buffer_object *bo, bool evict, bool interruptible, bool no_wait, struct ttm_mem_reg *new_mem) { /*struct ttm_bo_device *bdev = bo->bdev;*/ struct ttm_mem_reg tmp_mem; int ret; struct ttm_placement placement; uint32_t flags = TTM_PL_FLAG_TT; tmp_mem = *new_mem; tmp_mem.mm_node = NULL; placement.fpfn = 0; placement.lpfn = 0; placement.num_placement = 1; placement.placement = &flags; placement.num_busy_placement = 0; /* FIXME */ placement.busy_placement = NULL; #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0)) ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, interruptible, false, no_wait); #else ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, interruptible, no_wait); #endif if (ret) return ret; ret = ttm_tt_bind(bo->ttm, &tmp_mem); if (ret) goto out_cleanup; ret = psb_move_blit(bo, true, no_wait, &tmp_mem); if (ret) goto out_cleanup; #if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0)) ret = ttm_bo_move_ttm(bo, evict, false, no_wait, new_mem); #else ret = ttm_bo_move_ttm(bo, evict, no_wait, new_mem); #endif out_cleanup: if (tmp_mem.mm_node) { /*spin_lock(&bdev->lru_lock);*/ /* lru_lock is removed from upstream TTM */ drm_mm_put_block(tmp_mem.mm_node); tmp_mem.mm_node = NULL; /*spin_unlock(&bdev->lru_lock);*/ } return ret; }
static int nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr, bool no_wait_reserve, bool no_wait_gpu, struct ttm_mem_reg *new_mem) { u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING; struct ttm_placement placement; struct ttm_mem_reg tmp_mem; int ret; placement.fpfn = placement.lpfn = 0; placement.num_placement = placement.num_busy_placement = 1; placement.placement = placement.busy_placement = &placement_memtype; tmp_mem = *new_mem; tmp_mem.mm_node = NULL; ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_reserve, no_wait_gpu); if (ret) return ret; ret = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, &tmp_mem); if (ret) goto out; ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, new_mem); if (ret) goto out; out: ttm_bo_mem_put(bo, &tmp_mem); return ret; }
static int radeon_move_vram_ram(struct ttm_buffer_object *bo, bool evict, bool interruptible, bool no_wait_reserve, bool no_wait_gpu, struct ttm_mem_reg *new_mem) { struct radeon_device *rdev; struct ttm_mem_reg *old_mem = &bo->mem; struct ttm_mem_reg tmp_mem; u32 placements; struct ttm_placement placement; int r; rdev = radeon_get_rdev(bo->bdev); tmp_mem = *new_mem; tmp_mem.mm_node = NULL; placement.fpfn = 0; placement.lpfn = 0; placement.num_placement = 1; placement.placement = &placements; placement.num_busy_placement = 1; placement.busy_placement = &placements; placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; r = ttm_bo_mem_space(bo, &placement, &tmp_mem, interruptible, no_wait_reserve, no_wait_gpu); if (unlikely(r)) { return r; } r = ttm_tt_set_placement_caching(bo->ttm, tmp_mem.placement); if (unlikely(r)) { goto out_cleanup; } r = ttm_tt_bind(bo->ttm, &tmp_mem); if (unlikely(r)) { goto out_cleanup; } r = radeon_move_blit(bo, true, no_wait_reserve, no_wait_gpu, &tmp_mem, old_mem); if (unlikely(r)) { goto out_cleanup; } r = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, new_mem); out_cleanup: if (tmp_mem.mm_node) { struct ttm_bo_global *glob = rdev->mman.bdev.glob; spin_lock(&glob->lru_lock); drm_mm_put_block(tmp_mem.mm_node); spin_unlock(&glob->lru_lock); return r; } return r; }
static int radeon_move_ram_vram(struct ttm_buffer_object *bo, bool evict, bool interruptible, bool no_wait_gpu, struct ttm_mem_reg *new_mem) { struct radeon_device *rdev; struct ttm_mem_reg *old_mem = &bo->mem; struct ttm_mem_reg tmp_mem; struct ttm_placement placement; u32 placements; int r; rdev = radeon_get_rdev(bo->bdev); tmp_mem = *new_mem; tmp_mem.mm_node = NULL; placement.fpfn = 0; placement.lpfn = 0; placement.num_placement = 1; placement.placement = &placements; placement.num_busy_placement = 1; placement.busy_placement = &placements; placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT; r = ttm_bo_mem_space(bo, &placement, &tmp_mem, interruptible, no_wait_gpu); if (unlikely(r)) { return r; } r = ttm_bo_move_ttm(bo, true, no_wait_gpu, &tmp_mem); if (unlikely(r)) { goto out_cleanup; } r = radeon_move_blit(bo, true, no_wait_gpu, new_mem, old_mem); if (unlikely(r)) { goto out_cleanup; } out_cleanup: ttm_bo_mem_put(bo, &tmp_mem); return r; }
static int radeon_move_ram_vram(struct ttm_buffer_object *bo, bool evict, bool interruptible, bool no_wait, struct ttm_mem_reg *new_mem) { struct radeon_device *rdev; struct ttm_mem_reg *old_mem = &bo->mem; struct ttm_mem_reg tmp_mem; uint32_t proposed_flags; int r; rdev = radeon_get_rdev(bo->bdev); tmp_mem = *new_mem; tmp_mem.mm_node = NULL; proposed_flags = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING; r = ttm_bo_mem_space(bo, proposed_flags, &tmp_mem, interruptible, no_wait); if (unlikely(r)) { return r; } r = ttm_bo_move_ttm(bo, true, no_wait, &tmp_mem); if (unlikely(r)) { goto out_cleanup; } r = radeon_move_blit(bo, true, no_wait, new_mem, old_mem); if (unlikely(r)) { goto out_cleanup; } out_cleanup: if (tmp_mem.mm_node) { struct ttm_bo_global *glob = rdev->mman.bdev.glob; spin_lock(&glob->lru_lock); drm_mm_put_block(tmp_mem.mm_node); spin_unlock(&glob->lru_lock); return r; } return r; }
static int ttm_bo_handle_move_mem(struct ttm_buffer_object *bo, struct ttm_mem_reg *mem, bool evict, bool interruptible, bool no_wait_reserve, bool no_wait_gpu) { struct ttm_bo_device *bdev = bo->bdev; bool old_is_pci = ttm_mem_reg_is_pci(bdev, &bo->mem); bool new_is_pci = ttm_mem_reg_is_pci(bdev, mem); struct ttm_mem_type_manager *old_man = &bdev->man[bo->mem.mem_type]; struct ttm_mem_type_manager *new_man = &bdev->man[mem->mem_type]; int ret = 0; if (old_is_pci || new_is_pci || ((mem->placement & bo->mem.placement & TTM_PL_MASK_CACHING) == 0)) { ret = ttm_mem_io_lock(old_man, true); if (unlikely(ret != 0)) goto out_err; ttm_bo_unmap_virtual_locked(bo); ttm_mem_io_unlock(old_man); } /* * Create and bind a ttm if required. */ if (!(new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && (bo->ttm == NULL)) { ret = ttm_bo_add_ttm(bo, false); if (ret) goto out_err; ret = ttm_tt_set_placement_caching(bo->ttm, mem->placement); if (ret) goto out_err; if (mem->mem_type != TTM_PL_SYSTEM) { ret = ttm_tt_bind(bo->ttm, mem); if (ret) goto out_err; } if (bo->mem.mem_type == TTM_PL_SYSTEM) { if (bdev->driver->move_notify) bdev->driver->move_notify(bo, mem); bo->mem = *mem; mem->mm_node = NULL; goto moved; } } if (bdev->driver->move_notify) bdev->driver->move_notify(bo, mem); if (!(old_man->flags & TTM_MEMTYPE_FLAG_FIXED) && !(new_man->flags & TTM_MEMTYPE_FLAG_FIXED)) ret = ttm_bo_move_ttm(bo, evict, no_wait_reserve, no_wait_gpu, mem); else if (bdev->driver->move) ret = bdev->driver->move(bo, evict, interruptible, no_wait_reserve, no_wait_gpu, mem); else ret = ttm_bo_move_memcpy(bo, evict, no_wait_reserve, no_wait_gpu, mem); if (ret) goto out_err; moved: if (bo->evicted) { ret = bdev->driver->invalidate_caches(bdev, bo->mem.placement); if (ret) printk(KERN_ERR TTM_PFX "Can not flush read caches\n"); bo->evicted = false; } if (bo->mem.mm_node) { bo->offset = (bo->mem.start << PAGE_SHIFT) + bdev->man[bo->mem.mem_type].gpu_offset; bo->cur_placement = bo->mem.placement; } else bo->offset = 0; return 0; out_err: new_man = &bdev->man[bo->mem.mem_type]; if ((new_man->flags & TTM_MEMTYPE_FLAG_FIXED) && bo->ttm) { ttm_tt_unbind(bo->ttm); ttm_tt_destroy(bo->ttm); bo->ttm = NULL; } return ret; }