static int i915_setup_compression(struct drm_device *dev, int size) { struct drm_i915_private *dev_priv = dev->dev_private; struct drm_mm_node *compressed_fb, *uninitialized_var(compressed_llb); int ret; compressed_fb = kzalloc(sizeof(*compressed_fb), GFP_KERNEL); if (!compressed_fb) goto err_llb; /* Try to over-allocate to reduce reallocations and fragmentation */ ret = drm_mm_insert_node(&dev_priv->mm.stolen, compressed_fb, size <<= 1, 4096, DRM_MM_SEARCH_DEFAULT); if (ret) ret = drm_mm_insert_node(&dev_priv->mm.stolen, compressed_fb, size >>= 1, 4096, DRM_MM_SEARCH_DEFAULT); if (ret) goto err_llb; if (HAS_PCH_SPLIT(dev)) I915_WRITE(ILK_DPFC_CB_BASE, compressed_fb->start); else if (IS_GM45(dev)) { I915_WRITE(DPFC_CB_BASE, compressed_fb->start); } else { compressed_llb = kzalloc(sizeof(*compressed_llb), GFP_KERNEL); if (!compressed_llb) goto err_fb; ret = drm_mm_insert_node(&dev_priv->mm.stolen, compressed_llb, 4096, 4096, DRM_MM_SEARCH_DEFAULT); if (ret) goto err_fb; dev_priv->fbc.compressed_llb = compressed_llb; I915_WRITE(FBC_CFB_BASE, dev_priv->mm.stolen_base + compressed_fb->start); I915_WRITE(FBC_LL_BASE, dev_priv->mm.stolen_base + compressed_llb->start); } dev_priv->fbc.compressed_fb = compressed_fb; dev_priv->fbc.size = size; DRM_DEBUG_KMS("reserved %d bytes of contiguous stolen space for FBC\n", size); return 0; err_fb: kfree(compressed_llb); drm_mm_remove_node(compressed_fb); err_llb: kfree(compressed_fb); pr_info_once("drm: not enough stolen space for compressed buffer (need %d more bytes), disabling. Hint: you may be able to increase stolen memory size in the BIOS to avoid this.\n", size); return -ENOSPC; }
struct drm_i915_gem_object * i915_gem_object_create_stolen(struct drm_device *dev, u32 size) { struct drm_i915_private *dev_priv = dev->dev_private; struct drm_i915_gem_object *obj; struct drm_mm_node *stolen; int ret; if (!drm_mm_initialized(&dev_priv->mm.stolen)) return NULL; DRM_DEBUG_KMS("creating stolen object: size=%x\n", size); if (size == 0) return NULL; stolen = kzalloc(sizeof(*stolen), GFP_KERNEL); if (!stolen) return NULL; ret = drm_mm_insert_node(&dev_priv->mm.stolen, stolen, size, 4096, DRM_MM_SEARCH_DEFAULT); if (ret) { kfree(stolen); return NULL; } obj = _i915_gem_object_create_stolen(dev, stolen); if (obj) return obj; drm_mm_remove_node(stolen); kfree(stolen); return NULL; }
int msm_gem_map_vma(struct msm_gem_address_space *aspace, struct msm_gem_vma *vma, struct sg_table *sgt, int npages) { int ret; spin_lock(&aspace->lock); if (WARN_ON(drm_mm_node_allocated(&vma->node))) { spin_unlock(&aspace->lock); return 0; } ret = drm_mm_insert_node(&aspace->mm, &vma->node, npages); spin_unlock(&aspace->lock); if (ret) return ret; vma->iova = vma->node.start << PAGE_SHIFT; if (aspace->mmu) { unsigned size = npages << PAGE_SHIFT; ret = aspace->mmu->funcs->map(aspace->mmu, vma->iova, sgt, size, IOMMU_READ | IOMMU_WRITE); } /* Get a reference to the aspace to keep it around */ kref_get(&aspace->kref); return ret; }
/* Initialize a new vma and allocate an iova for it */ int msm_gem_init_vma(struct msm_gem_address_space *aspace, struct msm_gem_vma *vma, int npages) { int ret; if (WARN_ON(vma->iova)) return -EBUSY; spin_lock(&aspace->lock); ret = drm_mm_insert_node(&aspace->mm, &vma->node, npages); spin_unlock(&aspace->lock); if (ret) return ret; vma->iova = vma->node.start << PAGE_SHIFT; vma->mapped = false; kref_get(&aspace->kref); return 0; }
/** * drm_vma_offset_add() - Add offset node to manager * @mgr: Manager object * @node: Node to be added * @pages: Allocation size visible to user-space (in number of pages) * * Add a node to the offset-manager. If the node was already added, this does * nothing and return 0. @pages is the size of the object given in number of * pages. * After this call succeeds, you can access the offset of the node until it * is removed again. * * If this call fails, it is safe to retry the operation or call * drm_vma_offset_remove(), anyway. However, no cleanup is required in that * case. * * @pages is not required to be the same size as the underlying memory object * that you want to map. It only limits the size that user-space can map into * their address space. * * RETURNS: * 0 on success, negative error code on failure. */ int drm_vma_offset_add(struct drm_vma_offset_manager *mgr, struct drm_vma_offset_node *node, unsigned long pages) { int ret; lockmgr(&mgr->vm_lock, LK_EXCLUSIVE); if (drm_mm_node_allocated(&node->vm_node)) { ret = 0; goto out_unlock; } ret = drm_mm_insert_node(&mgr->vm_addr_space_mm, &node->vm_node, pages, 0, DRM_MM_SEARCH_DEFAULT); if (ret) goto out_unlock; _drm_vma_offset_add_rb(mgr, node); out_unlock: lockmgr(&mgr->vm_lock, LK_RELEASE); return ret; }