static int nv04_sgdma_unbind(struct ttm_tt *ttm) { struct nouveau_sgdma_be *nvbe = (struct nouveau_sgdma_be *)ttm; nouveau_vm_unmap(&nvbe->node->vma[0]); return 0; }
void nouveau_gpuobj_unmap(struct nouveau_vma *vma) { if (vma->node) { nouveau_vm_unmap(vma); nouveau_vm_put(vma); } }
static void nouveau_gem_object_delete(void *data) { struct nouveau_vma *vma = data; nouveau_vm_unmap(vma); nouveau_vm_put(vma); kfree(vma); }
static void nvc0_bar_unmap(struct nouveau_bar *bar, struct nouveau_vma *vma) { struct nvc0_bar_priv *priv = (void *)bar; int i = !(vma->vm == priv->bar[0].vm); nouveau_vm_unmap(vma); nvc0_vm_flush_engine(nv_subdev(bar), priv->bar[i].pgd->addr, 5); nouveau_vm_put(vma); }
void nv50_instmem_unmap(struct nouveau_gpuobj *gpuobj) { struct nv50_gpuobj_node *node = gpuobj->node; if (node->vram->bar_vma.node) { nouveau_vm_unmap(&node->vram->bar_vma); nouveau_vm_put(&node->vram->bar_vma); } }
static void nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) { struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev); struct nouveau_vram *vram = mem->mm_node; if (!dev_priv->bar1_vm || mem->mem_type != TTM_PL_VRAM) return; if (!vram->bar_vma.node) return; nouveau_vm_unmap(&vram->bar_vma); nouveau_vm_put(&vram->bar_vma); }
static void nouveau_bo_del_ttm(struct ttm_buffer_object *bo) { struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); struct drm_device *dev = dev_priv->dev; struct nouveau_bo *nvbo = nouveau_bo(bo); if (unlikely(nvbo->gem)) DRM_ERROR("bo %p still attached to GEM object\n", bo); nv10_mem_put_tile_region(dev, nvbo->tile, NULL); if (nvbo->vma.node) { nouveau_vm_unmap(&nvbo->vma); nouveau_vm_put(&nvbo->vma); } kfree(nvbo); }
void nv50_instmem_put(struct nouveau_gpuobj *gpuobj) { struct drm_device *dev = gpuobj->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_vram_engine *vram = &dev_priv->engine.vram; struct nv50_gpuobj_node *node; node = gpuobj->node; gpuobj->node = NULL; if (node->chan_vma.node) { nouveau_vm_unmap(&node->chan_vma); nouveau_vm_put(&node->chan_vma); } vram->put(dev, &node->vram); kfree(node); }
static void nouveau_gem_object_unmap(struct nouveau_bo *nvbo, struct nouveau_vma *vma) { const bool mapped = nvbo->bo.mem.mem_type != TTM_PL_SYSTEM; struct nouveau_fence *fence = NULL; list_del(&vma->head); if (mapped) { spin_lock(&nvbo->bo.bdev->fence_lock); fence = nouveau_fence_ref(nvbo->bo.sync_obj); spin_unlock(&nvbo->bo.bdev->fence_lock); } if (fence) { nouveau_fence_work(fence, nouveau_gem_object_delete, vma); } else { if (mapped) nouveau_vm_unmap(vma); nouveau_vm_put(vma); kfree(vma); } nouveau_fence_unref(&fence); }
static void nv50_bar_unmap(struct nouveau_bar *bar, struct nouveau_vma *vma) { nouveau_vm_unmap(vma); nouveau_vm_put(vma); }