int
nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle,
		       int size, uint32_t start, uint32_t end,
		       uint32_t *b_offset)
{
	struct drm_device *dev = chan->dev;
	struct drm_nouveau_private *dev_priv = dev->dev_private;
	struct nouveau_gpuobj *nobj = NULL;
	struct drm_mm_node *mem;
	uint32_t offset;
	int target, ret;

	mem = drm_mm_search_free_in_range(&chan->notifier_heap, size, 0,
					  start, end, 0);
	if (mem)
		mem = drm_mm_get_block_range(mem, size, 0, start, end);
	if (!mem) {
		NV_ERROR(dev, "Channel %d notifier block full\n", chan->id);
		return -ENOMEM;
	}

	if (dev_priv->card_type < NV_50) {
		if (chan->notifier_bo->bo.mem.mem_type == TTM_PL_VRAM)
			target = NV_MEM_TARGET_VRAM;
		else
			target = NV_MEM_TARGET_GART;
		offset  = chan->notifier_bo->bo.mem.start << PAGE_SHIFT;
	} else {
		target = NV_MEM_TARGET_VM;
		offset = chan->notifier_bo->vma.offset;
	}
	offset += mem->start;

	ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, offset,
				     mem->size, NV_MEM_ACCESS_RW, target,
				     &nobj);
	if (ret) {
		drm_mm_put_block(mem);
		NV_ERROR(dev, "Error creating notifier ctxdma: %d\n", ret);
		return ret;
	}
	nobj->dtor = nouveau_notifier_gpuobj_dtor;
	nobj->priv = mem;

	ret = nouveau_ramht_insert(chan, handle, nobj);
	nouveau_gpuobj_ref(NULL, &nobj);
	if (ret) {
		drm_mm_put_block(mem);
		NV_ERROR(dev, "Error adding notifier to ramht: %d\n", ret);
		return ret;
	}

	*b_offset = mem->start;
	return 0;
}
struct drm_mm_node *drm_mm_get_block(struct drm_mm_node * parent,
				unsigned long size, unsigned alignment)
{

	struct drm_mm_node *align_splitoff = NULL;
	struct drm_mm_node *child;
	unsigned tmp = 0;

	if (alignment)
		tmp = parent->start % alignment;

	if (tmp) {
		align_splitoff = drm_mm_split_at_start(parent, alignment - tmp);
		if (!align_splitoff)
			return NULL;
	}

	if (parent->size == size) {
		list_del_init(&parent->fl_entry);
		parent->free = 0;
		return parent;
	} else {
		child = drm_mm_split_at_start(parent, size);
	}

	if (align_splitoff)
		drm_mm_put_block(align_splitoff);

	return child;
}
static void
nouveau_notifier_gpuobj_dtor(struct drm_device *dev,
			     struct nouveau_gpuobj *gpuobj)
{
	NV_DEBUG(dev, "\n");

	if (gpuobj->priv)
		drm_mm_put_block(gpuobj->priv);
}
示例#4
0
static void
nv20_fb_free_tag(struct drm_device *dev, struct drm_mm_node *mem)
{
	struct drm_nouveau_private *dev_priv = dev->dev_private;

	spin_lock(&dev_priv->tile.lock);
	drm_mm_put_block(mem);
	spin_unlock(&dev_priv->tile.lock);
}
示例#5
0
void ttm_bo_free_old_node(struct ttm_buffer_object *bo)
{
	struct ttm_mem_reg *old_mem = &bo->mem;

	if (old_mem->mm_node) {
		spin_lock(&bo->glob->lru_lock);
		drm_mm_put_block(old_mem->mm_node);
		spin_unlock(&bo->glob->lru_lock);
	}
	old_mem->mm_node = NULL;
}
示例#6
0
/**
 * drm_gem_create_mmap_offset - create a fake mmap offset for an object
 * @obj: obj in question
 *
 * GEM memory mapping works by handing back to userspace a fake mmap offset
 * it can use in a subsequent mmap(2) call.  The DRM core code then looks
 * up the object based on the offset and sets up the various memory mapping
 * structures.
 *
 * This routine allocates and attaches a fake offset for @obj.
 */
int
drm_gem_create_mmap_offset(struct drm_gem_object *obj)
{
    struct drm_device *dev = obj->dev;
    struct drm_gem_mm *mm = dev->mm_private;
    struct drm_map_list *list;
    struct drm_local_map *map;
    int ret = 0;

    /* Set the object up for mmap'ing */
    list = &obj->map_list;
    list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL);
    if (!list->map)
        return -ENOMEM;

    map = list->map;
    map->type = _DRM_GEM;
    map->size = obj->size;
    map->handle = obj;

    /* Get a DRM GEM mmap offset allocated... */
    list->file_offset_node = drm_mm_search_free(&mm->offset_manager,
                             obj->size / PAGE_SIZE, 0, 0);

    if (!list->file_offset_node) {
        DRM_ERROR("failed to allocate offset for bo %d\n", obj->name);
        ret = -ENOSPC;
        goto out_free_list;
    }

    list->file_offset_node = drm_mm_get_block(list->file_offset_node,
                             obj->size / PAGE_SIZE, 0);
    if (!list->file_offset_node) {
        ret = -ENOMEM;
        goto out_free_list;
    }

    list->hash.key = list->file_offset_node->start;
    ret = drm_ht_insert_item(&mm->offset_hash, &list->hash);
    if (ret) {
        DRM_ERROR("failed to add to map hash\n");
        goto out_free_mm;
    }

    return 0;

out_free_mm:
    drm_mm_put_block(list->file_offset_node);
out_free_list:
    kfree(list->map);
    list->map = NULL;

    return ret;
}
示例#7
0
static void ttm_bo_man_put_node(struct ttm_mem_type_manager *man,
				struct ttm_mem_reg *mem)
{
	struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;

	if (mem->mm_node) {
		spin_lock(&rman->lock);
		drm_mm_put_block(mem->mm_node);
		spin_unlock(&rman->lock);
		mem->mm_node = NULL;
	}
}
示例#8
0
/**
 * drm_gem_free_mmap_offset - release a fake mmap offset for an object
 * @obj: obj in question
 *
 * This routine frees fake offsets allocated by drm_gem_create_mmap_offset().
 */
void
drm_gem_free_mmap_offset(struct drm_gem_object *obj)
{
    struct drm_device *dev = obj->dev;
    struct drm_gem_mm *mm = dev->mm_private;
    struct drm_map_list *list = &obj->map_list;

    drm_ht_remove_item(&mm->offset_hash, &list->hash);
    drm_mm_put_block(list->file_offset_node);
    kfree(list->map);
    list->map = NULL;
}
示例#9
0
static void ttm_bo_man_put_node(struct ttm_mem_type_manager *man,
				struct ttm_mem_reg *mem)
{
	struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;

	if (mem->mm_node) {
		lockmgr(&rman->lock, LK_EXCLUSIVE);
		drm_mm_put_block(mem->mm_node);
		lockmgr(&rman->lock, LK_RELEASE);
		mem->mm_node = NULL;
	}
}
示例#10
0
static int radeon_move_vram_ram(struct ttm_buffer_object *bo,
				bool evict, bool interruptible,
				bool no_wait_reserve, bool no_wait_gpu,
				struct ttm_mem_reg *new_mem)
{
	struct radeon_device *rdev;
	struct ttm_mem_reg *old_mem = &bo->mem;
	struct ttm_mem_reg tmp_mem;
	u32 placements;
	struct ttm_placement placement;
	int r;

	rdev = radeon_get_rdev(bo->bdev);
	tmp_mem = *new_mem;
	tmp_mem.mm_node = NULL;
	placement.fpfn = 0;
	placement.lpfn = 0;
	placement.num_placement = 1;
	placement.placement = &placements;
	placement.num_busy_placement = 1;
	placement.busy_placement = &placements;
	placements = TTM_PL_MASK_CACHING | TTM_PL_FLAG_TT;
	r = ttm_bo_mem_space(bo, &placement, &tmp_mem,
			     interruptible, no_wait_reserve, no_wait_gpu);
	if (unlikely(r)) {
		return r;
	}

	r = ttm_tt_set_placement_caching(bo->ttm, tmp_mem.placement);
	if (unlikely(r)) {
		goto out_cleanup;
	}

	r = ttm_tt_bind(bo->ttm, &tmp_mem);
	if (unlikely(r)) {
		goto out_cleanup;
	}
	r = radeon_move_blit(bo, true, no_wait_reserve, no_wait_gpu, &tmp_mem, old_mem);
	if (unlikely(r)) {
		goto out_cleanup;
	}
	r = ttm_bo_move_ttm(bo, true, no_wait_reserve, no_wait_gpu, new_mem);
out_cleanup:
	if (tmp_mem.mm_node) {
		struct ttm_bo_global *glob = rdev->mman.bdev.glob;

		spin_lock(&glob->lru_lock);
		drm_mm_put_block(tmp_mem.mm_node);
		spin_unlock(&glob->lru_lock);
		return r;
	}
	return r;
}
示例#11
0
void drm_gem_object_release_wrap(struct drm_gem_object *obj)
{
	/* Remove the list map if one is present */
	if (obj->map_list.map) {
		struct drm_gem_mm *mm = obj->dev->mm_private;
		struct drm_map_list *list = &obj->map_list;
		drm_ht_remove_item(&mm->offset_hash, &list->hash);
		drm_mm_put_block(list->file_offset_node);
		kfree(list->map);
		list->map = NULL;
	}
	drm_gem_object_release(obj);
}
/*
 * Flip destination ttm into GATT,
 * then blit and subsequently move out again.
 */
static int psb_move_flip(struct ttm_buffer_object *bo,
			 bool evict, bool interruptible, bool no_wait,
			 struct ttm_mem_reg *new_mem)
{
	/*struct ttm_bo_device *bdev = bo->bdev;*/
	struct ttm_mem_reg tmp_mem;
	int ret;
	struct ttm_placement placement;
	uint32_t flags = TTM_PL_FLAG_TT;

	tmp_mem = *new_mem;
	tmp_mem.mm_node = NULL;

	placement.fpfn = 0;
	placement.lpfn = 0;
	placement.num_placement = 1;
	placement.placement = &flags;
	placement.num_busy_placement = 0; /* FIXME */
	placement.busy_placement = NULL;

#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0))
	ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, interruptible, false, no_wait);
#else
	ret = ttm_bo_mem_space(bo, &placement, &tmp_mem, interruptible, no_wait);
#endif
	if (ret)
		return ret;
	ret = ttm_tt_bind(bo->ttm, &tmp_mem);
	if (ret)
		goto out_cleanup;
	ret = psb_move_blit(bo, true, no_wait, &tmp_mem);
	if (ret)
		goto out_cleanup;

#if (LINUX_VERSION_CODE < KERNEL_VERSION(3, 8, 0))
	ret = ttm_bo_move_ttm(bo, evict, false, no_wait, new_mem);
#else
	ret = ttm_bo_move_ttm(bo, evict, no_wait, new_mem);
#endif
out_cleanup:
	if (tmp_mem.mm_node) {
		/*spin_lock(&bdev->lru_lock);*/ /* lru_lock is removed from upstream TTM */
		drm_mm_put_block(tmp_mem.mm_node);
		tmp_mem.mm_node = NULL;
		/*spin_unlock(&bdev->lru_lock);*/
	}
	return ret;
}
示例#13
0
static void ttm_bo_release(struct kref *kref)
{
	struct ttm_buffer_object *bo =
	    container_of(kref, struct ttm_buffer_object, kref);
	struct ttm_bo_device *bdev = bo->bdev;

	if (likely(bo->vm_node != NULL)) {
		rb_erase(&bo->vm_rb, &bdev->addr_space_rb);
		drm_mm_put_block(bo->vm_node);
		bo->vm_node = NULL;
	}
	write_unlock(&bdev->vm_lock);
	ttm_bo_cleanup_refs(bo, false);
	kref_put(&bo->list_kref, ttm_bo_release_list);
	write_lock(&bdev->vm_lock);
}
示例#14
0
static void ttm_bo_release(struct kref *kref)
{
	struct ttm_buffer_object *bo =
	    container_of(kref, struct ttm_buffer_object, kref);
	struct ttm_bo_device *bdev = bo->bdev;
	struct ttm_mem_type_manager *man = &bdev->man[bo->mem.mem_type];

	if (likely(bo->vm_node != NULL)) {
		rb_erase(&bo->vm_rb, &bdev->addr_space_rb);
		drm_mm_put_block(bo->vm_node);
		bo->vm_node = NULL;
	}
	write_unlock(&bdev->vm_lock);
	ttm_mem_io_lock(man, false);
	ttm_mem_io_free_vm(bo);
	ttm_mem_io_unlock(man);
	ttm_bo_cleanup_refs_or_queue(bo);
	kref_put(&bo->list_kref, ttm_bo_release_list);
	write_lock(&bdev->vm_lock);
}
示例#15
0
int ttm_bo_move_buffer(struct ttm_buffer_object *bo,
			struct ttm_placement *placement,
			bool interruptible, bool no_wait_reserve,
			bool no_wait_gpu)
{
	struct ttm_bo_global *glob = bo->glob;
	int ret = 0;
	struct ttm_mem_reg mem;

	BUG_ON(!atomic_read(&bo->reserved));

	/*
	 * FIXME: It's possible to pipeline buffer moves.
	 * Have the driver move function wait for idle when necessary,
	 * instead of doing it here.
	 */
	spin_lock(&bo->lock);
	ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
	spin_unlock(&bo->lock);
	if (ret)
		return ret;
	mem.num_pages = bo->num_pages;
	mem.size = mem.num_pages << PAGE_SHIFT;
	mem.page_alignment = bo->mem.page_alignment;
	mem.bus.io_reserved = false;
	/*
	 * Determine where to move the buffer.
	 */
	ret = ttm_bo_mem_space(bo, placement, &mem, interruptible, no_wait_reserve, no_wait_gpu);
	if (ret)
		goto out_unlock;
	ret = ttm_bo_handle_move_mem(bo, &mem, false, interruptible, no_wait_reserve, no_wait_gpu);
out_unlock:
	if (ret && mem.mm_node) {
		spin_lock(&glob->lru_lock);
		drm_mm_put_block(mem.mm_node);
		spin_unlock(&glob->lru_lock);
	}
	return ret;
}
示例#16
0
static int radeon_move_ram_vram(struct ttm_buffer_object *bo,
				bool evict, bool interruptible, bool no_wait,
				struct ttm_mem_reg *new_mem)
{
	struct radeon_device *rdev;
	struct ttm_mem_reg *old_mem = &bo->mem;
	struct ttm_mem_reg tmp_mem;
	uint32_t proposed_flags;
	int r;

	rdev = radeon_get_rdev(bo->bdev);
	tmp_mem = *new_mem;
	tmp_mem.mm_node = NULL;
	proposed_flags = TTM_PL_FLAG_TT | TTM_PL_MASK_CACHING;
	r = ttm_bo_mem_space(bo, proposed_flags, &tmp_mem,
			     interruptible, no_wait);
	if (unlikely(r)) {
		return r;
	}
	r = ttm_bo_move_ttm(bo, true, no_wait, &tmp_mem);
	if (unlikely(r)) {
		goto out_cleanup;
	}
	r = radeon_move_blit(bo, true, no_wait, new_mem, old_mem);
	if (unlikely(r)) {
		goto out_cleanup;
	}
out_cleanup:
	if (tmp_mem.mm_node) {
		struct ttm_bo_global *glob = rdev->mman.bdev.glob;

		spin_lock(&glob->lru_lock);
		drm_mm_put_block(tmp_mem.mm_node);
		spin_unlock(&glob->lru_lock);
		return r;
	}
	return r;
}
int
nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle,
		       int size, uint32_t *b_offset)
{
	struct drm_device *dev = chan->dev;
	struct drm_nouveau_private *dev_priv = dev->dev_private;
	struct nouveau_gpuobj *nobj = NULL;
	struct drm_mm_node *mem;
	uint32_t offset;
	int target, ret;

	mem = drm_mm_search_free(&chan->notifier_heap, size, 0, 0);
	if (mem)
		mem = drm_mm_get_block(mem, size, 0);
	if (!mem) {
		NV_ERROR(dev, "Channel %d notifier block full\n", chan->id);
		return -ENOMEM;
	}

	offset = chan->notifier_bo->bo.mem.mm_node->start << PAGE_SHIFT;
	if (chan->notifier_bo->bo.mem.mem_type == TTM_PL_VRAM) {
		target = NV_DMA_TARGET_VIDMEM;
	} else
	if (chan->notifier_bo->bo.mem.mem_type == TTM_PL_TT) {
		if (dev_priv->gart_info.type == NOUVEAU_GART_SGDMA &&
		    dev_priv->card_type < NV_50) {
			ret = nouveau_sgdma_get_page(dev, offset, &offset);
			if (ret)
				return ret;
			target = NV_DMA_TARGET_PCI;
		} else {
			target = NV_DMA_TARGET_AGP;
			if (dev_priv->card_type >= NV_50)
				offset += dev_priv->vm_gart_base;
		}
	} else {
		NV_ERROR(dev, "Bad DMA target, mem_type %d!\n",
			 chan->notifier_bo->bo.mem.mem_type);
		return -EINVAL;
	}
	offset += mem->start;

	ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, offset,
				     mem->size, NV_DMA_ACCESS_RW, target,
				     &nobj);
	if (ret) {
		drm_mm_put_block(mem);
		NV_ERROR(dev, "Error creating notifier ctxdma: %d\n", ret);
		return ret;
	}
	nobj->dtor = nouveau_notifier_gpuobj_dtor;
	nobj->priv = mem;

	ret = nouveau_gpuobj_ref_add(dev, chan, handle, nobj, NULL);
	if (ret) {
		nouveau_gpuobj_del(dev, &nobj);
		drm_mm_put_block(mem);
		NV_ERROR(dev, "Error referencing notifier ctxdma: %d\n", ret);
		return ret;
	}

	*b_offset = mem->start;
	return 0;
}
示例#18
0
static int ttm_bo_cleanup_refs(struct ttm_buffer_object *bo, bool remove_all)
{
	struct ttm_bo_device *bdev = bo->bdev;
	struct ttm_bo_global *glob = bo->glob;
	struct ttm_bo_driver *driver = bdev->driver;
	int ret;

	spin_lock(&bo->lock);
	(void) ttm_bo_wait(bo, false, false, !remove_all);

	if (!bo->sync_obj) {
		int put_count;

		spin_unlock(&bo->lock);

		spin_lock(&glob->lru_lock);
		put_count = ttm_bo_del_from_lru(bo);

		ret = ttm_bo_reserve_locked(bo, false, false, false, 0);
		BUG_ON(ret);
		if (bo->ttm)
			ttm_tt_unbind(bo->ttm);

		if (!list_empty(&bo->ddestroy)) {
			list_del_init(&bo->ddestroy);
			++put_count;
		}
		if (bo->mem.mm_node) {
			drm_mm_put_block(bo->mem.mm_node);
			bo->mem.mm_node = NULL;
		}
		spin_unlock(&glob->lru_lock);

		atomic_set(&bo->reserved, 0);

		while (put_count--)
			kref_put(&bo->list_kref, ttm_bo_ref_bug);

		return 0;
	}

	spin_lock(&glob->lru_lock);
	if (list_empty(&bo->ddestroy)) {
		void *sync_obj = bo->sync_obj;
		void *sync_obj_arg = bo->sync_obj_arg;

		kref_get(&bo->list_kref);
		list_add_tail(&bo->ddestroy, &bdev->ddestroy);
		spin_unlock(&glob->lru_lock);
		spin_unlock(&bo->lock);

		if (sync_obj)
			driver->sync_obj_flush(sync_obj, sync_obj_arg);
		schedule_delayed_work(&bdev->wq,
				      ((HZ / 100) < 1) ? 1 : HZ / 100);
		ret = 0;

	} else {
		spin_unlock(&glob->lru_lock);
		spin_unlock(&bo->lock);
		ret = -EBUSY;
	}

	return ret;
}
示例#19
0
static int ttm_bo_evict(struct ttm_buffer_object *bo, bool interruptible,
			bool no_wait_reserve, bool no_wait_gpu)
{
	struct ttm_bo_device *bdev = bo->bdev;
	struct ttm_bo_global *glob = bo->glob;
	struct ttm_mem_reg evict_mem;
	struct ttm_placement placement;
	int ret = 0;

	spin_lock(&bo->lock);
	ret = ttm_bo_wait(bo, false, interruptible, no_wait_gpu);
	spin_unlock(&bo->lock);

	if (unlikely(ret != 0)) {
		if (ret != -ERESTARTSYS) {
			printk(KERN_ERR TTM_PFX
			       "Failed to expire sync object before "
			       "buffer eviction.\n");
		}
		goto out;
	}

	BUG_ON(!atomic_read(&bo->reserved));

	evict_mem = bo->mem;
	evict_mem.mm_node = NULL;
	evict_mem.bus.io_reserved = false;

	placement.fpfn = 0;
	placement.lpfn = 0;
	placement.num_placement = 0;
	placement.num_busy_placement = 0;
	bdev->driver->evict_flags(bo, &placement);
	ret = ttm_bo_mem_space(bo, &placement, &evict_mem, interruptible,
				no_wait_reserve, no_wait_gpu);
	if (ret) {
		if (ret != -ERESTARTSYS) {
			printk(KERN_ERR TTM_PFX
			       "Failed to find memory space for "
			       "buffer 0x%p eviction.\n", bo);
			ttm_bo_mem_space_debug(bo, &placement);
		}
		goto out;
	}

	ret = ttm_bo_handle_move_mem(bo, &evict_mem, true, interruptible,
				     no_wait_reserve, no_wait_gpu);
	if (ret) {
		if (ret != -ERESTARTSYS)
			printk(KERN_ERR TTM_PFX "Buffer eviction failed\n");
		spin_lock(&glob->lru_lock);
		if (evict_mem.mm_node) {
			drm_mm_put_block(evict_mem.mm_node);
			evict_mem.mm_node = NULL;
		}
		spin_unlock(&glob->lru_lock);
		goto out;
	}
	bo->evicted = true;
out:
	return ret;
}