static int ttm_bo_man_get_node(struct ttm_buffer_object *bo, struct ttm_mem_type_manager *man, struct ttm_placement *placement, struct ttm_mem_reg *mem, struct drm_mm_node **node) { struct ttm_bo_global *glob = bo->glob; unsigned long lpfn; int ret; lpfn = placement->lpfn; if (!lpfn) lpfn = man->size; *node = NULL; do { ret = drm_mm_pre_get(&man->manager); if (unlikely(ret)) return ret; spin_lock(&glob->lru_lock); *node = drm_mm_search_free_in_range(&man->manager, mem->num_pages, mem->page_alignment, placement->fpfn, lpfn, 1); if (unlikely(*node == NULL)) { spin_unlock(&glob->lru_lock); return 0; } *node = drm_mm_get_block_atomic_range(*node, mem->num_pages, mem->page_alignment, placement->fpfn, lpfn); spin_unlock(&glob->lru_lock); } while (*node == NULL); return 0; }
int nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle, int size, uint32_t start, uint32_t end, uint32_t *b_offset) { struct drm_device *dev = chan->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_gpuobj *nobj = NULL; struct drm_mm_node *mem; uint32_t offset; int target, ret; mem = drm_mm_search_free_in_range(&chan->notifier_heap, size, 0, start, end, 0); if (mem) mem = drm_mm_get_block_range(mem, size, 0, start, end); if (!mem) { NV_ERROR(dev, "Channel %d notifier block full\n", chan->id); return -ENOMEM; } if (dev_priv->card_type < NV_50) { if (chan->notifier_bo->bo.mem.mem_type == TTM_PL_VRAM) target = NV_MEM_TARGET_VRAM; else target = NV_MEM_TARGET_GART; offset = chan->notifier_bo->bo.mem.start << PAGE_SHIFT; } else { target = NV_MEM_TARGET_VM; offset = chan->notifier_bo->vma.offset; } offset += mem->start; ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, offset, mem->size, NV_MEM_ACCESS_RW, target, &nobj); if (ret) { drm_mm_put_block(mem); NV_ERROR(dev, "Error creating notifier ctxdma: %d\n", ret); return ret; } nobj->dtor = nouveau_notifier_gpuobj_dtor; nobj->priv = mem; ret = nouveau_ramht_insert(chan, handle, nobj); nouveau_gpuobj_ref(NULL, &nobj); if (ret) { drm_mm_put_block(mem); NV_ERROR(dev, "Error adding notifier to ramht: %d\n", ret); return ret; } *b_offset = mem->start; return 0; }
static int ttm_bo_man_get_node(struct ttm_mem_type_manager *man, struct ttm_buffer_object *bo, struct ttm_placement *placement, struct ttm_mem_reg *mem) { struct ttm_range_manager *rman = (struct ttm_range_manager *) man->priv; struct drm_mm *mm = &rman->mm; struct drm_mm_node *node = NULL; unsigned long lpfn; int ret; lpfn = placement->lpfn; if (!lpfn) lpfn = man->size; do { ret = drm_mm_pre_get(mm); if (unlikely(ret)) return ret; spin_lock(&rman->lock); node = drm_mm_search_free_in_range(mm, mem->num_pages, mem->page_alignment, placement->fpfn, lpfn, DRM_MM_SEARCH_BEST); if (unlikely(node == NULL)) { spin_unlock(&rman->lock); return 0; } node = drm_mm_get_block_atomic_range(node, mem->num_pages, mem->page_alignment, placement->fpfn, lpfn); spin_unlock(&rman->lock); } while (node == NULL); mem->mm_node = node; mem->start = node->start; return 0; }