static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man, struct ttm_buffer_object *bo, struct ttm_placement *placement, struct ttm_mem_reg *mem) { struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv; struct drm_mm *mm = &rman->mm; struct drm_mm_node *node = NULL; unsigned long lpfn; int ret; lpfn = placement->lpfn; if (!lpfn) lpfn = man->size; node = kzalloc(sizeof(*node), GFP_KERNEL); if (!node) return -ENOMEM; spin_lock(&rman->lock); ret = drm_mm_insert_node_in_range(mm, node, mem->num_pages, mem->page_alignment, placement->fpfn, lpfn, DRM_MM_SEARCH_BEST); spin_unlock(&rman->lock); if (unlikely(ret)) { kfree(node); } else { mem->mm_node = node; mem->start = node->start; } return 0; }
int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv, struct drm_mm_node *node, u64 size, unsigned alignment, u64 start, u64 end) { int ret; if (!drm_mm_initialized(&dev_priv->mm.stolen)) return -ENODEV; mutex_lock(&dev_priv->mm.stolen_lock); ret = drm_mm_insert_node_in_range(&dev_priv->mm.stolen, node, size, alignment, 0, start, end, DRM_MM_INSERT_BEST); mutex_unlock(&dev_priv->mm.stolen_lock); return ret; }
int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv, struct drm_mm_node *node, u64 size, unsigned alignment, u64 start, u64 end) { int ret; if (!drm_mm_initialized(&dev_priv->mm.stolen)) return -ENODEV; /* WaSkipStolenMemoryFirstPage:bdw+ */ if (INTEL_GEN(dev_priv) >= 8 && start < 4096) start = 4096; mutex_lock(&dev_priv->mm.stolen_lock); ret = drm_mm_insert_node_in_range(&dev_priv->mm.stolen, node, size, alignment, 0, start, end, DRM_MM_INSERT_BEST); mutex_unlock(&dev_priv->mm.stolen_lock); return ret; }
int i915_gem_stolen_insert_node_in_range(struct drm_i915_private *dev_priv, struct drm_mm_node *node, u64 size, unsigned alignment, u64 start, u64 end) { int ret; if (!drm_mm_initialized(&dev_priv->mm.stolen)) return -ENODEV; /* See the comment at the drm_mm_init() call for more about this check. * WaSkipStolenMemoryFirstPage:bdw,chv (incomplete) */ if (INTEL_INFO(dev_priv)->gen == 8 && start < 4096) start = 4096; mutex_lock(&dev_priv->mm.stolen_lock); ret = drm_mm_insert_node_in_range(&dev_priv->mm.stolen, node, size, alignment, start, end, DRM_MM_SEARCH_DEFAULT); mutex_unlock(&dev_priv->mm.stolen_lock); return ret; }
/** * amdgpu_vram_mgr_new - allocate new ranges * * @man: TTM memory type manager * @tbo: TTM BO we need this range for * @place: placement flags and restrictions * @mem: the resulting mem object * * Allocate VRAM for the given BO. */ static int amdgpu_vram_mgr_new(struct ttm_mem_type_manager *man, struct ttm_buffer_object *tbo, const struct ttm_place *place, struct ttm_mem_reg *mem) { struct amdgpu_vram_mgr *mgr = man->priv; struct drm_mm *mm = &mgr->mm; struct drm_mm_node *nodes; enum drm_mm_insert_mode mode; unsigned long lpfn, num_nodes, pages_per_node, pages_left; unsigned i; int r; lpfn = place->lpfn; if (!lpfn) lpfn = man->size; if (place->flags & TTM_PL_FLAG_CONTIGUOUS || amdgpu_vram_page_split == -1) { pages_per_node = ~0ul; num_nodes = 1; } else { pages_per_node = max((uint32_t)amdgpu_vram_page_split, mem->page_alignment); num_nodes = DIV_ROUND_UP(mem->num_pages, pages_per_node); } nodes = kcalloc(num_nodes, sizeof(*nodes), GFP_KERNEL); if (!nodes) return -ENOMEM; mode = DRM_MM_INSERT_BEST; if (place->flags & TTM_PL_FLAG_TOPDOWN) mode = DRM_MM_INSERT_HIGH; mem->start = 0; pages_left = mem->num_pages; spin_lock(&mgr->lock); for (i = 0; i < num_nodes; ++i) { unsigned long pages = min(pages_left, pages_per_node); uint32_t alignment = mem->page_alignment; unsigned long start; if (pages == pages_per_node) alignment = pages_per_node; r = drm_mm_insert_node_in_range(mm, &nodes[i], pages, alignment, 0, place->fpfn, lpfn, mode); if (unlikely(r)) goto error; /* Calculate a virtual BO start address to easily check if * everything is CPU accessible. */ start = nodes[i].start + nodes[i].size; if (start > mem->num_pages) start -= mem->num_pages; else start = 0; mem->start = max(mem->start, start); pages_left -= pages; } spin_unlock(&mgr->lock); mem->mm_node = nodes; return 0; error: while (i--) drm_mm_remove_node(&nodes[i]); spin_unlock(&mgr->lock); kfree(nodes); return r == -ENOSPC ? 0 : r; }