/*
 * Flip destination ttm into GATT,
 * then blit and subsequently move out again.
 */
static int psb_move_flip(struct ttm_buffer_object *bo,
			 bool evict, bool interruptible, bool no_wait,
			 struct ttm_mem_reg *new_mem)
{
	/*struct ttm_bo_device *bdev = bo->bdev;*/
	struct ttm_mem_reg tmp_mem;
	int ret;
	struct ttm_placement placement;
	uint32_t flags = TTM_PL_FLAG_TT;

	tmp_mem = *new_mem;
	tmp_mem.mm_node = NULL;

	placement.fpfn = 0;
	placement.lpfn = 0;
	placement.num_placement = 1;
	placement.placement = &flags;
	placement.num_busy_placement = 0; /* FIXME */
	placement.busy_placement = NULL;

#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0))
	ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, interruptible, false, no_wait);
#else
	ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, interruptible, no_wait);
#endif
	if (ret)
		return ret;
	ret = ttm_tt_bind(bo->ttm, &tmp_mem);
	if (ret)
		goto out_cleanup;
	ret = psb_move_blit(bo, true, no_wait, &tmp_mem);
	if (ret)
		goto out_cleanup;

#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0))
	ret = ttm_bo_move_ttm(bo, evict, false, no_wait, new_mem);
#else
	ret = ttm_bo_move_ttm(bo, evict, no_wait, new_mem);
#endif
out_cleanup:
	if (tmp_mem.mm_node) {
		/*spin_lock(&bdev->lru_lock);*/ /* lru_lock is removed from upstream TTM */
		drm_mm_put_block(tmp_mem.mm_node);
		tmp_mem.mm_node = NULL;
		/*spin_unlock(&bdev->lru_lock);*/
	}
	return ret;
}
Exemple #2
0
static int
nouveau_bo_move_flips(struct ttm_buffer_object *bo, bool evict, bool intr,
                      bool no_wait_reserve, bool no_wait_gpu,
                      struct ttm_mem_reg *new_mem)
{
    u32 placement_memtype = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
    struct ttm_placement placement;
    struct ttm_mem_reg tmp_mem;
    int ret;

    placement.fpfn = placement.lpfn = 0;
    placement.num_placement = placement.num_busy_placement = 1;
    placement.placement = placement.busy_placement = &placement_memtype;

    tmp_mem = *new_mem;
    tmp_mem.mm_node = NULL;
    ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, intr, no_wait_reserve, no_wait_gpu);
    if (ret)
        return ret;

    ret = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, &tmp_mem);
    if (ret)
        goto out;

    ret = nouveau_bo_move_m2mf(bo, true, intr, no_wait_reserve, no_wait_gpu, new_mem);
    if (ret)
        goto out;

out:
    ttm_bo_mem_put(bo, &tmp_mem);
    return ret;
}
Exemple #3
0
static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
			bool no_wait_reserve, bool no_wait_gpu)
{
	struct ttm_bo_device *bdev = bo->bdev;
	struct ttm_mem_reg evict_mem;
	struct ttm_placement placement;
	int ret = 0;

	spin_lock(&bdev->fence_lock);
	ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
	spin_unlock(&bdev->fence_lock);

	if (unlikely(ret != 0)) {
		if (ret != -ERESTARTSYS) {
			printk(KERN_ERR TTM_PFX
			       "Failed to expire sync object before "
			       "buffer eviction.\n");
		}
		goto out;
	}

	BUG_ON(!atomic_read(&bo->reserved));

	evict_mem = bo->mem;
	evict_mem.mm_node = NULL;
	evict_mem.bus.io_reserved_vm = false;
	evict_mem.bus.io_reserved_count = 0;

	placement.fpfn = 0;
	placement.lpfn = 0;
	placement.num_placement = 0;
	placement.num_busy_placement = 0;
	bdev->driver->evict_flags(bo, &placement);
	ret = ttm_bo_mem_space(bo, &placement, &evict_mem, interruptible,
				no_wait_reserve, no_wait_gpu);
	if (ret) {
		if (ret != -ERESTARTSYS) {
			printk(KERN_ERR TTM_PFX
			       "Failed to find memory space for "
			       "buffer 0x%p eviction.\n", bo);
			ttm_bo_mem_space_debug(bo, &placement);
		}
		goto out;
	}

	ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible,
				     no_wait_reserve, no_wait_gpu);
	if (ret) {
		if (ret != -ERESTARTSYS)
			printk(KERN_ERR TTM_PFX "Buffer eviction failed\n");
		ttm_bo_mem_put(bo, &evict_mem);
		goto out;
	}
	bo->evicted = true;
out:
	return ret;
}
static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
				bool evict, bool interruptible,
				bool no_wait_reserve, bool no_wait_gpu,
				struct ttm_mem_reg *new_mem)
{
	struct radeon_device *rdev;
	struct ttm_mem_reg *old_mem = &bo->mem;
	struct ttm_mem_reg tmp_mem;
	u32 placements;
	struct ttm_placement placement;
	int r;

	rdev = radeon_get_rdev(bo->bdev);
	tmp_mem = *new_mem;
	tmp_mem.mm_node = NULL;
	placement.fpfn = 0;
	placement.lpfn = 0;
	placement.num_placement = 1;
	placement.placement = &placements;
	placement.num_busy_placement = 1;
	placement.busy_placement = &placements;
	placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
	r = ttm_bo_mem_space(bo, &placement, &tmp_mem,
			     interruptible, no_wait_reserve, no_wait_gpu);
	if (unlikely(r)) {
		return r;
	}

	r = ttm_tt_set_placement_caching(bo->ttm, tmp_mem.placement);
	if (unlikely(r)) {
		goto out_cleanup;
	}

	r = ttm_tt_bind(bo->ttm, &tmp_mem);
	if (unlikely(r)) {
		goto out_cleanup;
	}
	r = radeon_move_blit(bo, true, no_wait_reserve, no_wait_gpu, &tmp_mem, old_mem);
	if (unlikely(r)) {
		goto out_cleanup;
	}
	r = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, new_mem);
out_cleanup:
	if (tmp_mem.mm_node) {
		struct ttm_bo_global *glob = rdev->mman.bdev.glob;

		spin_lock(&glob->lru_lock);
		drm_mm_put_block(tmp_mem.mm_node);
		spin_unlock(&glob->lru_lock);
		return r;
	}
	return r;
}
Exemple #5
0
int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
			struct ttm_placement *placement,
			bool interruptible, bool no_wait_reserve,
			bool no_wait_gpu)
{
	struct ttm_bo_global *glob = bo->glob;
	int ret = 0;
	struct ttm_mem_reg mem;

	BUG_ON(!atomic_read(&bo->reserved));

	/*
	 * FIXME: It's possible to pipeline buffer moves.
	 * Have the driver move function wait for idle when necessary,
	 * instead of doing it here.
	 */
	spin_lock(&bo->lock);
	ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
	spin_unlock(&bo->lock);
	if (ret)
		return ret;
	mem.num_pages = bo->num_pages;
	mem.size = mem.num_pages << PAGE_SHIFT;
	mem.page_alignment = bo->mem.page_alignment;
	mem.bus.io_reserved = false;
	/*
	 * Determine where to move the buffer.
	 */
	ret = ttm_bo_mem_space(bo, placement, &mem, interruptible, no_wait_reserve, no_wait_gpu);
	if (ret)
		goto out_unlock;
	ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait_reserve, no_wait_gpu);
out_unlock:
	if (ret && mem.mm_node) {
		spin_lock(&glob->lru_lock);
		drm_mm_put_block(mem.mm_node);
		spin_unlock(&glob->lru_lock);
	}
	return ret;
}
Exemple #6
0
static int radeon_move_ram_vram(struct ttm_buffer_object *bo,
				bool evict, bool interruptible,
				bool no_wait_gpu,
				struct ttm_mem_reg *new_mem)
{
	struct radeon_device *rdev;
	struct ttm_mem_reg *old_mem = &bo->mem;
	struct ttm_mem_reg tmp_mem;
	struct ttm_placement placement;
	u32 placements;
	int r;

	rdev = radeon_get_rdev(bo->bdev);
	tmp_mem = *new_mem;
	tmp_mem.mm_node = NULL;
	placement.fpfn = 0;
	placement.lpfn = 0;
	placement.num_placement = 1;
	placement.placement = &placements;
	placement.num_busy_placement = 1;
	placement.busy_placement = &placements;
	placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
	r = ttm_bo_mem_space(bo, &placement, &tmp_mem,
			     interruptible, no_wait_gpu);
	if (unlikely(r)) {
		return r;
	}
	r = ttm_bo_move_ttm(bo, true, no_wait_gpu, &tmp_mem);
	if (unlikely(r)) {
		goto out_cleanup;
	}
	r = radeon_move_blit(bo, true, no_wait_gpu, new_mem, old_mem);
	if (unlikely(r)) {
		goto out_cleanup;
	}
out_cleanup:
	ttm_bo_mem_put(bo, &tmp_mem);
	return r;
}
Exemple #7
0
static int radeon_move_ram_vram(struct ttm_buffer_object *bo,
				bool evict, bool interruptible, bool no_wait,
				struct ttm_mem_reg *new_mem)
{
	struct radeon_device *rdev;
	struct ttm_mem_reg *old_mem = &bo->mem;
	struct ttm_mem_reg tmp_mem;
	uint32_t proposed_flags;
	int r;

	rdev = radeon_get_rdev(bo->bdev);
	tmp_mem = *new_mem;
	tmp_mem.mm_node = NULL;
	proposed_flags = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
	r = ttm_bo_mem_space(bo, proposed_flags, &tmp_mem,
			     interruptible, no_wait);
	if (unlikely(r)) {
		return r;
	}
	r = ttm_bo_move_ttm(bo, true, no_wait, &tmp_mem);
	if (unlikely(r)) {
		goto out_cleanup;
	}
	r = radeon_move_blit(bo, true, no_wait, new_mem, old_mem);
	if (unlikely(r)) {
		goto out_cleanup;
	}
out_cleanup:
	if (tmp_mem.mm_node) {
		struct ttm_bo_global *glob = rdev->mman.bdev.glob;

		spin_lock(&glob->lru_lock);
		drm_mm_put_block(tmp_mem.mm_node);
		spin_unlock(&glob->lru_lock);
		return r;
	}
	return r;
}