Пример #1
0
/**
 * drm_gem_create_mmap_offset - create a fake mmap offset for an object
 * @obj: obj in question
 *
 * GEM memory mapping works by handing back to userspace a fake mmap offset
 * it can use in a subsequent mmap(2) call.  The DRM core code then looks
 * up the object based on the offset and sets up the various memory mapping
 * structures.
 *
 * This routine allocates and attaches a fake offset for @obj.
 */
int
drm_gem_create_mmap_offset(struct drm_gem_object *obj)
{
    struct drm_device *dev = obj->dev;
    struct drm_gem_mm *mm = dev->mm_private;
    struct drm_map_list *list;
    struct drm_local_map *map;
    int ret = 0;

    /* Set the object up for mmap'ing */
    list = &obj->map_list;
    list->map = kzalloc(sizeof(struct drm_map_list), GFP_KERNEL);
    if (!list->map)
        return -ENOMEM;

    map = list->map;
    map->type = _DRM_GEM;
    map->size = obj->size;
    map->handle = obj;

    /* Get a DRM GEM mmap offset allocated... */
    list->file_offset_node = drm_mm_search_free(&mm->offset_manager,
                             obj->size / PAGE_SIZE, 0, 0);

    if (!list->file_offset_node) {
        DRM_ERROR("failed to allocate offset for bo %d\n", obj->name);
        ret = -ENOSPC;
        goto out_free_list;
    }

    list->file_offset_node = drm_mm_get_block(list->file_offset_node,
                             obj->size / PAGE_SIZE, 0);
    if (!list->file_offset_node) {
        ret = -ENOMEM;
        goto out_free_list;
    }

    list->hash.key = list->file_offset_node->start;
    ret = drm_ht_insert_item(&mm->offset_hash, &list->hash);
    if (ret) {
        DRM_ERROR("failed to add to map hash\n");
        goto out_free_mm;
    }

    return 0;

out_free_mm:
    drm_mm_put_block(list->file_offset_node);
out_free_list:
    kfree(list->map);
    list->map = NULL;

    return ret;
}
Пример #2
0
static struct drm_mm_node *
nv20_fb_alloc_tag(struct drm_device *dev, uint32_t size)
{
	struct drm_nouveau_private *dev_priv = dev->dev_private;
	struct nouveau_fb_engine *pfb = &dev_priv->engine.fb;
	struct drm_mm_node *mem;
	int ret;

	ret = drm_mm_pre_get(&pfb->tag_heap);
	if (ret)
		return NULL;

	spin_lock(&dev_priv->tile.lock);
	mem = drm_mm_search_free(&pfb->tag_heap, size, 0, 0);
	if (mem)
		mem = drm_mm_get_block_atomic(mem, size, 0);
	spin_unlock(&dev_priv->tile.lock);

	return mem;
}
Пример #3
0
static int ttm_bo_setup_vm(struct ttm_buffer_object *bo)
{
	struct ttm_bo_device *bdev = bo->bdev;
	int ret;

retry_pre_get:
	ret = drm_mm_pre_get(&bdev->addr_space_mm);
	if (unlikely(ret != 0))
		return ret;

	write_lock(&bdev->vm_lock);
	bo->vm_node = drm_mm_search_free(&bdev->addr_space_mm,
					 bo->mem.num_pages, 0, 0);

	if (unlikely(bo->vm_node == NULL)) {
		ret = -ENOMEM;
		goto out_unlock;
	}

	bo->vm_node = drm_mm_get_block_atomic(bo->vm_node,
					      bo->mem.num_pages, 0);

	if (unlikely(bo->vm_node == NULL)) {
		write_unlock(&bdev->vm_lock);
		goto retry_pre_get;
	}

	ttm_bo_vm_insert_rb(bo);
	write_unlock(&bdev->vm_lock);
	bo->addr_space_offset = ((uint64_t) bo->vm_node->start) << PAGE_SHIFT;

	return 0;
out_unlock:
	write_unlock(&bdev->vm_lock);
	return ret;
}
	drm_ht_remove(&sman->owner_hash_tab);
out1:
	kfree(sman->mm);
out:
	return ret;
}

EXPORT_SYMBOL(drm_sman_init);

static void *drm_sman_mm_allocate(void *private, unsigned long size,
				  unsigned alignment)
{
	struct drm_mm *mm = (struct drm_mm *) private;
	struct drm_mm_node *tmp;

	tmp = drm_mm_search_free(mm, size, alignment, 1);
	if (!tmp) {
		return NULL;
	}
	tmp = drm_mm_get_block(tmp, size, alignment);
	return tmp;
}

static void drm_sman_mm_free(void *private, void *ref)
{
	struct drm_mm_node *node = (struct drm_mm_node *) ref;

	drm_mm_put_block(node);
}

static void drm_sman_mm_destroy(void *private)
int
nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle,
		       int size, uint32_t *b_offset)
{
	struct drm_device *dev = chan->dev;
	struct drm_nouveau_private *dev_priv = dev->dev_private;
	struct nouveau_gpuobj *nobj = NULL;
	struct drm_mm_node *mem;
	uint32_t offset;
	int target, ret;

	mem = drm_mm_search_free(&chan->notifier_heap, size, 0, 0);
	if (mem)
		mem = drm_mm_get_block(mem, size, 0);
	if (!mem) {
		NV_ERROR(dev, "Channel %d notifier block full\n", chan->id);
		return -ENOMEM;
	}

	offset = chan->notifier_bo->bo.mem.mm_node->start << PAGE_SHIFT;
	if (chan->notifier_bo->bo.mem.mem_type == TTM_PL_VRAM) {
		target = NV_DMA_TARGET_VIDMEM;
	} else
	if (chan->notifier_bo->bo.mem.mem_type == TTM_PL_TT) {
		if (dev_priv->gart_info.type == NOUVEAU_GART_SGDMA &&
		    dev_priv->card_type < NV_50) {
			ret = nouveau_sgdma_get_page(dev, offset, &offset);
			if (ret)
				return ret;
			target = NV_DMA_TARGET_PCI;
		} else {
			target = NV_DMA_TARGET_AGP;
			if (dev_priv->card_type >= NV_50)
				offset += dev_priv->vm_gart_base;
		}
	} else {
		NV_ERROR(dev, "Bad DMA target, mem_type %d!\n",
			 chan->notifier_bo->bo.mem.mem_type);
		return -EINVAL;
	}
	offset += mem->start;

	ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, offset,
				     mem->size, NV_DMA_ACCESS_RW, target,
				     &nobj);
	if (ret) {
		drm_mm_put_block(mem);
		NV_ERROR(dev, "Error creating notifier ctxdma: %d\n", ret);
		return ret;
	}
	nobj->dtor = nouveau_notifier_gpuobj_dtor;
	nobj->priv = mem;

	ret = nouveau_gpuobj_ref_add(dev, chan, handle, nobj, NULL);
	if (ret) {
		nouveau_gpuobj_del(dev, &nobj);
		drm_mm_put_block(mem);
		NV_ERROR(dev, "Error referencing notifier ctxdma: %d\n", ret);
		return ret;
	}

	*b_offset = mem->start;
	return 0;
}