static int ttm_bo_man_get_node(struct ttm_buffer_object *bo, struct ttm_mem_type_manager *man, struct ttm_placement *placement, struct ttm_mem_reg *mem, struct drm_mm_node **node) { struct ttm_bo_global *glob = bo->glob; unsigned long lpfn; int ret; lpfn = placement->lpfn; if (!lpfn) lpfn = man->size; *node = NULL; do { ret = drm_mm_pre_get(&man->manager); if (unlikely(ret)) return ret; spin_lock(&glob->lru_lock); *node = drm_mm_search_free_in_range(&man->manager, mem->num_pages, mem->page_alignment, placement->fpfn, lpfn, 1); if (unlikely(*node == NULL)) { spin_unlock(&glob->lru_lock); return 0; } *node = drm_mm_get_block_atomic_range(*node, mem->num_pages, mem->page_alignment, placement->fpfn, lpfn); spin_unlock(&glob->lru_lock); } while (*node == NULL); return 0; }
static struct drm_mm_node * nv20_fb_alloc_tag(struct drm_device *dev, uint32_t size) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_fb_engine *pfb = &dev_priv->engine.fb; struct drm_mm_node *mem; int ret; ret = drm_mm_pre_get(&pfb->tag_heap); if (ret) return NULL; spin_lock(&dev_priv->tile.lock); mem = drm_mm_search_free(&pfb->tag_heap, size, 0, 0); if (mem) mem = drm_mm_get_block_atomic(mem, size, 0); spin_unlock(&dev_priv->tile.lock); return mem; }
static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man, struct ttm_buffer_object *bo, struct ttm_placement *placement, struct ttm_mem_reg *mem) { struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv; struct drm_mm *mm = &rman->mm; struct drm_mm_node *node = NULL; unsigned long lpfn; int ret; lpfn = placement->lpfn; if (!lpfn) lpfn = man->size; do { ret = drm_mm_pre_get(mm); if (unlikely(ret)) return ret; spin_lock(&rman->lock); node = drm_mm_search_free_in_range(mm, mem->num_pages, mem->page_alignment, placement->fpfn, lpfn, DRM_MM_SEARCH_BEST); if (unlikely(node == NULL)) { spin_unlock(&rman->lock); return 0; } node = drm_mm_get_block_atomic_range(node, mem->num_pages, mem->page_alignment, placement->fpfn, lpfn); spin_unlock(&rman->lock); } while (node == NULL); mem->mm_node = node; mem->start = node->start; return 0; }
static int ttm_bo_setup_vm(struct ttm_buffer_object *bo) { struct ttm_bo_device *bdev = bo->bdev; int ret; retry_pre_get: ret = drm_mm_pre_get(&bdev->addr_space_mm); if (unlikely(ret != 0)) return ret; write_lock(&bdev->vm_lock); bo->vm_node = drm_mm_search_free(&bdev->addr_space_mm, bo->mem.num_pages, 0, 0); if (unlikely(bo->vm_node == NULL)) { ret = -ENOMEM; goto out_unlock; } bo->vm_node = drm_mm_get_block_atomic(bo->vm_node, bo->mem.num_pages, 0); if (unlikely(bo->vm_node == NULL)) { write_unlock(&bdev->vm_lock); goto retry_pre_get; } ttm_bo_vm_insert_rb(bo); write_unlock(&bdev->vm_lock); bo->addr_space_offset = ((uint64_t) bo->vm_node->start) << PAGE_SHIFT; return 0; out_unlock: write_unlock(&bdev->vm_lock); return ret; }