Ejemplo n.º 1
0
struct drm_i915_gem_object *
i915_gem_object_create_stolen(struct drm_device *dev, u32 size)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct drm_i915_gem_object *obj;
	struct drm_mm_node *stolen;
	int ret;

	if (!drm_mm_initialized(&dev_priv->mm.stolen))
		return NULL;

	DRM_DEBUG_KMS("creating stolen object: size=%x\n", size);
	if (size == 0)
		return NULL;

	stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
	if (!stolen)
		return NULL;

	ret = drm_mm_insert_node(&dev_priv->mm.stolen, stolen, size,
				 4096, DRM_MM_SEARCH_DEFAULT);
	if (ret) {
		kfree(stolen);
		return NULL;
	}

	obj = _i915_gem_object_create_stolen(dev, stolen);
	if (obj)
		return obj;

	drm_mm_remove_node(stolen);
	kfree(stolen);
	return NULL;
}
Ejemplo n.º 2
0
/**
 * amdgpu_vram_mgr_del - free ranges
 *
 * @man: TTM memory type manager
 * @tbo: TTM BO we need this range for
 * @place: placement flags and restrictions
 * @mem: TTM memory object
 *
 * Free the allocated VRAM again.
 */
static void amdgpu_vram_mgr_del(struct ttm_mem_type_manager *man,
				struct ttm_mem_reg *mem)
{
	struct amdgpu_device *adev = amdgpu_ttm_adev(man->bdev);
	struct amdgpu_vram_mgr *mgr = man->priv;
	struct drm_mm_node *nodes = mem->mm_node;
	uint64_t usage = 0, vis_usage = 0;
	unsigned pages = mem->num_pages;

	if (!mem->mm_node)
		return;

	spin_lock(&mgr->lock);
	while (pages) {
		pages -= nodes->size;
		drm_mm_remove_node(nodes);
		usage += nodes->size << PAGE_SHIFT;
		vis_usage += amdgpu_vram_mgr_vis_size(adev, nodes);
		++nodes;
	}
	spin_unlock(&mgr->lock);

	atomic64_sub(usage, &mgr->usage);
	atomic64_sub(vis_usage, &mgr->vis_usage);

	kfree(mem->mm_node);
	mem->mm_node = NULL;
}
Ejemplo n.º 3
0
void i915_gem_stolen_remove_node(struct drm_i915_private *dev_priv,
				 struct drm_mm_node *node)
{
	mutex_lock(&dev_priv->mm.stolen_lock);
	drm_mm_remove_node(node);
	mutex_unlock(&dev_priv->mm.stolen_lock);
}
Ejemplo n.º 4
0
static int i915_setup_compression(struct drm_device *dev, int size)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct drm_mm_node *compressed_fb, *uninitialized_var(compressed_llb);
	int ret;

	compressed_fb = kzalloc(sizeof(*compressed_fb), GFP_KERNEL);
	if (!compressed_fb)
		goto err_llb;

	/* Try to over-allocate to reduce reallocations and fragmentation */
	ret = drm_mm_insert_node(&dev_priv->mm.stolen, compressed_fb,
				 size <<= 1, 4096, DRM_MM_SEARCH_DEFAULT);
	if (ret)
		ret = drm_mm_insert_node(&dev_priv->mm.stolen, compressed_fb,
					 size >>= 1, 4096,
					 DRM_MM_SEARCH_DEFAULT);
	if (ret)
		goto err_llb;

	if (HAS_PCH_SPLIT(dev))
		I915_WRITE(ILK_DPFC_CB_BASE, compressed_fb->start);
	else if (IS_GM45(dev)) {
		I915_WRITE(DPFC_CB_BASE, compressed_fb->start);
	} else {
		compressed_llb = kzalloc(sizeof(*compressed_llb), GFP_KERNEL);
		if (!compressed_llb)
			goto err_fb;

		ret = drm_mm_insert_node(&dev_priv->mm.stolen, compressed_llb,
					 4096, 4096, DRM_MM_SEARCH_DEFAULT);
		if (ret)
			goto err_fb;

		dev_priv->fbc.compressed_llb = compressed_llb;

		I915_WRITE(FBC_CFB_BASE,
			   dev_priv->mm.stolen_base + compressed_fb->start);
		I915_WRITE(FBC_LL_BASE,
			   dev_priv->mm.stolen_base + compressed_llb->start);
	}

	dev_priv->fbc.compressed_fb = compressed_fb;
	dev_priv->fbc.size = size;

	DRM_DEBUG_KMS("reserved %d bytes of contiguous stolen space for FBC\n",
		      size);

	return 0;

err_fb:
	kfree(compressed_llb);
	drm_mm_remove_node(compressed_fb);
err_llb:
	kfree(compressed_fb);
	pr_info_once("drm: not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size);
	return -ENOSPC;
}
Ejemplo n.º 5
0
void
i915_gem_object_release_stolen(struct drm_i915_gem_object *obj)
{
	if (obj->stolen) {
		drm_mm_remove_node(obj->stolen);
		kfree(obj->stolen);
		obj->stolen = NULL;
	}
}
Ejemplo n.º 6
0
void i915_gem_stolen_cleanup_compression(struct drm_device *dev)
{
	struct drm_i915_private *dev_priv = dev->dev_private;

	if (dev_priv->fbc.size == 0)
		return;

	if (dev_priv->fbc.compressed_fb) {
		drm_mm_remove_node(dev_priv->fbc.compressed_fb);
		kfree(dev_priv->fbc.compressed_fb);
	}

	if (dev_priv->fbc.compressed_llb) {
		drm_mm_remove_node(dev_priv->fbc.compressed_llb);
		kfree(dev_priv->fbc.compressed_llb);
	}

	dev_priv->fbc.size = 0;
}
Ejemplo n.º 7
0
Archivo: gem.c Proyecto: JaneDu/ath
static int tegra_bo_iommu_unmap(struct tegra_drm *tegra, struct tegra_bo *bo)
{
	if (!bo->mm)
		return 0;

	iommu_unmap(tegra->domain, bo->paddr, bo->size);
	drm_mm_remove_node(bo->mm);
	kfree(bo->mm);

	return 0;
}
Ejemplo n.º 8
0
/* Called DRM core on the last userspace/kernel unreference of the
 * BO.
 */
static void panfrost_gem_free_object(struct drm_gem_object *obj)
{
	struct panfrost_gem_object *bo = to_panfrost_bo(obj);
	struct panfrost_device *pfdev = obj->dev->dev_private;

	panfrost_mmu_unmap(bo);

	spin_lock(&pfdev->mm_lock);
	drm_mm_remove_node(&bo->node);
	spin_unlock(&pfdev->mm_lock);

	drm_gem_shmem_free_object(obj);
}
Ejemplo n.º 9
0
/**
 * drm_vma_offset_remove() - Remove offset node from manager
 * @mgr: Manager object
 * @node: Node to be removed
 *
 * Remove a node from the offset manager. If the node wasn't added before, this
 * does nothing. After this call returns, the offset and size will be 0 until a
 * new offset is allocated via drm_vma_offset_add() again. Helper functions like
 * drm_vma_node_start() and drm_vma_node_offset_addr() will return 0 if no
 * offset is allocated.
 */
void drm_vma_offset_remove(struct drm_vma_offset_manager *mgr,
			   struct drm_vma_offset_node *node)
{
	lockmgr(&mgr->vm_lock, LK_EXCLUSIVE);

	if (drm_mm_node_allocated(&node->vm_node)) {
		rb_erase(&node->vm_rb, &mgr->vm_addr_space_rb);
		drm_mm_remove_node(&node->vm_node);
		memset(&node->vm_node, 0, sizeof(node->vm_node));
	}

	lockmgr(&mgr->vm_lock, LK_RELEASE);
}
Ejemplo n.º 10
0
static void ttm_bo_man_put_node(struct ttm_mem_type_manager *man,
				struct ttm_mem_reg *mem)
{
	struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;

	if (mem->mm_node) {
		spin_lock(&rman->lock);
		drm_mm_remove_node(mem->mm_node);
		spin_unlock(&rman->lock);

		kfree(mem->mm_node);
		mem->mm_node = NULL;
	}
}
Ejemplo n.º 11
0
static void ttm_bo_man_put_node(struct ttm_mem_type_manager *man,
				struct ttm_mem_reg *mem)
{
	struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv;

	if (mem->mm_node) {
		lockmgr(&rman->lock, LK_EXCLUSIVE);
		drm_mm_remove_node(mem->mm_node);
		lockmgr(&rman->lock, LK_RELEASE);

		kfree(mem->mm_node);
		mem->mm_node = NULL;
	}
}
Ejemplo n.º 12
0
static int rockchip_gem_iommu_unmap(struct rockchip_gem_object *rk_obj)
{
	struct drm_device *drm = rk_obj->base.dev;
	struct rockchip_drm_private *private = drm->dev_private;

	iommu_unmap(private->domain, rk_obj->dma_addr, rk_obj->size);

	mutex_lock(&private->mm_lock);

	drm_mm_remove_node(&rk_obj->mm);

	mutex_unlock(&private->mm_lock);

	return 0;
}
Ejemplo n.º 13
0
/* Close an iova.  Warn if it is still in use */
void msm_gem_close_vma(struct msm_gem_address_space *aspace,
		struct msm_gem_vma *vma)
{
	if (WARN_ON(vma->inuse > 0 || vma->mapped))
		return;

	spin_lock(&aspace->lock);
	if (vma->iova)
		drm_mm_remove_node(&vma->node);
	spin_unlock(&aspace->lock);

	vma->iova = 0;

	msm_gem_address_space_put(aspace);
}
Ejemplo n.º 14
0
/**
 * intel_vgt_deballoon - deballoon reserved graphics address trunks
 * @dev_priv: i915 device private data
 *
 * This function is called to deallocate the ballooned-out graphic memory, when
 * driver is unloaded or when ballooning fails.
 */
void intel_vgt_deballoon(struct drm_i915_private *dev_priv)
{
	int i;

	if (!intel_vgpu_active(dev_priv))
		return;

	DRM_DEBUG("VGT deballoon.\n");

	for (i = 0; i < 4; i++) {
		if (bl_info.space[i].allocated)
			drm_mm_remove_node(&bl_info.space[i]);
	}

	memset(&bl_info, 0, sizeof(bl_info));
}
Ejemplo n.º 15
0
void
msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
		struct msm_gem_vma *vma, struct sg_table *sgt)
{
	if (!aspace || !vma->iova)
		return;

	if (aspace->mmu) {
		unsigned size = vma->node.size << PAGE_SHIFT;
		aspace->mmu->funcs->unmap(aspace->mmu, vma->iova, sgt, size);
	}

	spin_lock(&aspace->lock);
	drm_mm_remove_node(&vma->node);
	spin_unlock(&aspace->lock);

	vma->iova = 0;

	msm_gem_address_space_put(aspace);
}
Ejemplo n.º 16
0
/**
 * amdgpu_vram_mgr_del - free ranges
 *
 * @man: TTM memory type manager
 * @tbo: TTM BO we need this range for
 * @place: placement flags and restrictions
 * @mem: TTM memory object
 *
 * Free the allocated VRAM again.
 */
static void amdgpu_vram_mgr_del(struct ttm_mem_type_manager *man,
				struct ttm_mem_reg *mem)
{
	struct amdgpu_vram_mgr *mgr = man->priv;
	struct drm_mm_node *nodes = mem->mm_node;
	unsigned pages = mem->num_pages;

	if (!mem->mm_node)
		return;

	spin_lock(&mgr->lock);
	while (pages) {
		pages -= nodes->size;
		drm_mm_remove_node(nodes);
		++nodes;
	}
	spin_unlock(&mgr->lock);

	kfree(mem->mm_node);
	mem->mm_node = NULL;
}
Ejemplo n.º 17
0
static int rockchip_gem_iommu_map(struct rockchip_gem_object *rk_obj)
{
	struct drm_device *drm = rk_obj->base.dev;
	struct rockchip_drm_private *private = drm->dev_private;
	int prot = IOMMU_READ | IOMMU_WRITE;
	ssize_t ret;

	mutex_lock(&private->mm_lock);
	ret = drm_mm_insert_node_generic(&private->mm, &rk_obj->mm,
					 rk_obj->base.size, PAGE_SIZE,
					 0, 0);
	mutex_unlock(&private->mm_lock);

	if (ret < 0) {
		DRM_ERROR("out of I/O virtual memory: %zd\n", ret);
		return ret;
	}

	rk_obj->dma_addr = rk_obj->mm.start;

	ret = iommu_map_sg(private->domain, rk_obj->dma_addr, rk_obj->sgt->sgl,
			   rk_obj->sgt->nents, prot);
	if (ret < rk_obj->base.size) {
		DRM_ERROR("failed to map buffer: size=%zd request_size=%zd\n",
			  ret, rk_obj->base.size);
		ret = -ENOMEM;
		goto err_remove_node;
	}

	rk_obj->size = ret;

	return 0;

err_remove_node:
	mutex_lock(&private->mm_lock);
	drm_mm_remove_node(&rk_obj->mm);
	mutex_unlock(&private->mm_lock);

	return ret;
}
Ejemplo n.º 18
0
Archivo: gem.c Proyecto: JaneDu/ath
static int tegra_bo_iommu_map(struct tegra_drm *tegra, struct tegra_bo *bo)
{
	int prot = IOMMU_READ | IOMMU_WRITE;
	ssize_t err;

	if (bo->mm)
		return -EBUSY;

	bo->mm = kzalloc(sizeof(*bo->mm), GFP_KERNEL);
	if (!bo->mm)
		return -ENOMEM;

	err = drm_mm_insert_node_generic(&tegra->mm, bo->mm, bo->gem.size,
					 PAGE_SIZE, 0, 0, 0);
	if (err < 0) {
		dev_err(tegra->drm->dev, "out of I/O virtual memory: %zd\n",
			err);
		goto free;
	}

	bo->paddr = bo->mm->start;

	err = iommu_map_sg(tegra->domain, bo->paddr, bo->sgt->sgl,
			   bo->sgt->nents, prot);
	if (err < 0) {
		dev_err(tegra->drm->dev, "failed to map buffer: %zd\n", err);
		goto remove;
	}

	bo->size = err;

	return 0;

remove:
	drm_mm_remove_node(bo->mm);
free:
	kfree(bo->mm);
	return err;
}
Ejemplo n.º 19
0
static void put_pages(struct drm_gem_object *obj)
{
	struct msm_gem_object *msm_obj = to_msm_bo(obj);

	if (msm_obj->pages) {
		/* For non-cached buffers, ensure the new pages are clean
		 * because display controller, GPU, etc. are not coherent:
		 */
		if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
			dma_unmap_sg(obj->dev->dev, msm_obj->sgt->sgl,
					msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
		sg_free_table(msm_obj->sgt);
		kfree(msm_obj->sgt);

		if (use_pages(obj))
			drm_gem_put_pages(obj, msm_obj->pages, true, false);
		else {
			drm_mm_remove_node(msm_obj->vram_node);
			drm_free_large(msm_obj->pages);
		}

		msm_obj->pages = NULL;
	}
}
Ejemplo n.º 20
0
/**
 * amdgpu_vram_mgr_new - allocate new ranges
 *
 * @man: TTM memory type manager
 * @tbo: TTM BO we need this range for
 * @place: placement flags and restrictions
 * @mem: the resulting mem object
 *
 * Allocate VRAM for the given BO.
 */
static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man,
			       struct ttm_buffer_object *tbo,
			       const struct ttm_place *place,
			       struct ttm_mem_reg *mem)
{
	struct amdgpu_vram_mgr *mgr = man->priv;
	struct drm_mm *mm = &mgr->mm;
	struct drm_mm_node *nodes;
	enum drm_mm_insert_mode mode;
	unsigned long lpfn, num_nodes, pages_per_node, pages_left;
	unsigned i;
	int r;

	lpfn = place->lpfn;
	if (!lpfn)
		lpfn = man->size;

	if (place->flags & TTM_PL_FLAG_CONTIGUOUS ||
	    amdgpu_vram_page_split == -1) {
		pages_per_node = ~0ul;
		num_nodes = 1;
	} else {
		pages_per_node = max((uint32_t)amdgpu_vram_page_split,
				     mem->page_alignment);
		num_nodes = DIV_ROUND_UP(mem->num_pages, pages_per_node);
	}

	nodes = kcalloc(num_nodes, sizeof(*nodes), GFP_KERNEL);
	if (!nodes)
		return -ENOMEM;

	mode = DRM_MM_INSERT_BEST;
	if (place->flags & TTM_PL_FLAG_TOPDOWN)
		mode = DRM_MM_INSERT_HIGH;

	mem->start = 0;
	pages_left = mem->num_pages;

	spin_lock(&mgr->lock);
	for (i = 0; i < num_nodes; ++i) {
		unsigned long pages = min(pages_left, pages_per_node);
		uint32_t alignment = mem->page_alignment;
		unsigned long start;

		if (pages == pages_per_node)
			alignment = pages_per_node;

		r = drm_mm_insert_node_in_range(mm, &nodes[i],
						pages, alignment, 0,
						place->fpfn, lpfn,
						mode);
		if (unlikely(r))
			goto error;

		/* Calculate a virtual BO start address to easily check if
		 * everything is CPU accessible.
		 */
		start = nodes[i].start + nodes[i].size;
		if (start > mem->num_pages)
			start -= mem->num_pages;
		else
			start = 0;
		mem->start = max(mem->start, start);
		pages_left -= pages;
	}
	spin_unlock(&mgr->lock);

	mem->mm_node = nodes;

	return 0;

error:
	while (i--)
		drm_mm_remove_node(&nodes[i]);
	spin_unlock(&mgr->lock);

	kfree(nodes);
	return r == -ENOSPC ? 0 : r;
}
Ejemplo n.º 21
0
struct drm_i915_gem_object *
i915_gem_object_create_stolen_for_preallocated(struct drm_device *dev,
					       u32 stolen_offset,
					       u32 gtt_offset,
					       u32 size)
{
	struct drm_i915_private *dev_priv = dev->dev_private;
	struct i915_address_space *ggtt = &dev_priv->gtt.base;
	struct drm_i915_gem_object *obj;
	struct drm_mm_node *stolen;
	struct i915_vma *vma;
	int ret;

	if (!drm_mm_initialized(&dev_priv->mm.stolen))
		return NULL;

	DRM_DEBUG_KMS("creating preallocated stolen object: stolen_offset=%x, gtt_offset=%x, size=%x\n",
			stolen_offset, gtt_offset, size);

	/* KISS and expect everything to be page-aligned */
	BUG_ON(stolen_offset & 4095);
	BUG_ON(size & 4095);

	if (WARN_ON(size == 0))
		return NULL;

	stolen = kzalloc(sizeof(*stolen), GFP_KERNEL);
	if (!stolen)
		return NULL;

	stolen->start = stolen_offset;
	stolen->size = size;
	ret = drm_mm_reserve_node(&dev_priv->mm.stolen, stolen);
	if (ret) {
		DRM_DEBUG_KMS("failed to allocate stolen space\n");
		kfree(stolen);
		return NULL;
	}

	obj = _i915_gem_object_create_stolen(dev, stolen);
	if (obj == NULL) {
		DRM_DEBUG_KMS("failed to allocate stolen object\n");
		drm_mm_remove_node(stolen);
		kfree(stolen);
		return NULL;
	}

	/* Some objects just need physical mem from stolen space */
	if (gtt_offset == I915_GTT_OFFSET_NONE)
		return obj;

	vma = i915_gem_vma_create(obj, ggtt);
	if (IS_ERR(vma)) {
		ret = PTR_ERR(vma);
		goto err_out;
	}

	/* To simplify the initialisation sequence between KMS and GTT,
	 * we allow construction of the stolen object prior to
	 * setting up the GTT space. The actual reservation will occur
	 * later.
	 */
	vma->node.start = gtt_offset;
	vma->node.size = size;
	if (drm_mm_initialized(&ggtt->mm)) {
		ret = drm_mm_reserve_node(&ggtt->mm, &vma->node);
		if (ret) {
			DRM_DEBUG_KMS("failed to allocate stolen GTT space\n");
			goto err_vma;
		}
	}

	obj->has_global_gtt_mapping = 1;

	list_add_tail(&obj->global_list, &dev_priv->mm.bound_list);
	list_add_tail(&vma->mm_list, &ggtt->inactive_list);

	return obj;

err_vma:
	i915_gem_vma_destroy(vma);
err_out:
	drm_mm_remove_node(stolen);
	kfree(stolen);
	drm_gem_object_unreference(&obj->base);
	return NULL;
}