void nouveau_gpuobj_unmap(struct nouveau_vma *vma) { if (vma->node) { nouveau_vm_unmap(vma); nouveau_vm_put(vma); } }
static void nouveau_gem_object_delete(void *data) { struct nouveau_vma *vma = data; nouveau_vm_unmap(vma); nouveau_vm_put(vma); kfree(vma); }
static void nvc0_bar_unmap(struct nouveau_bar *bar, struct nouveau_vma *vma) { struct nvc0_bar_priv *priv = (void *)bar; int i = !(vma->vm == priv->bar[0].vm); nouveau_vm_unmap(vma); nvc0_vm_flush_engine(nv_subdev(bar), priv->bar[i].pgd->addr, 5); nouveau_vm_put(vma); }
void nv50_instmem_unmap(struct nouveau_gpuobj *gpuobj) { struct nv50_gpuobj_node *node = gpuobj->node; if (node->vram->bar_vma.node) { nouveau_vm_unmap(&node->vram->bar_vma); nouveau_vm_put(&node->vram->bar_vma); } }
static void nouveau_bo_del_ttm(struct ttm_buffer_object *bo) { struct drm_nouveau_private *dev_priv = nouveau_bdev(bo->bdev); struct drm_device *dev = dev_priv->dev; struct nouveau_bo *nvbo = nouveau_bo(bo); if (unlikely(nvbo->gem)) DRM_ERROR("bo %p still attached to GEM object\n", bo); nv10_mem_put_tile_region(dev, nvbo->tile, NULL); nouveau_vm_put(&nvbo->vma); kfree(nvbo); }
static void nouveau_ttm_io_mem_free(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) { struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev); struct nouveau_vram *vram = mem->mm_node; if (!dev_priv->bar1_vm || mem->mem_type != TTM_PL_VRAM) return; if (!vram->bar_vma.node) return; nouveau_vm_unmap(&vram->bar_vma); nouveau_vm_put(&vram->bar_vma); }
void nv50_instmem_put(struct nouveau_gpuobj *gpuobj) { struct drm_device *dev = gpuobj->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_vram_engine *vram = &dev_priv->engine.vram; struct nv50_gpuobj_node *node; node = gpuobj->node; gpuobj->node = NULL; if (node->chan_vma.node) { nouveau_vm_unmap(&node->chan_vma); nouveau_vm_put(&node->chan_vma); } vram->put(dev, &node->vram); kfree(node); }
static void nouveau_gem_object_unmap(struct nouveau_bo *nvbo, struct nouveau_vma *vma) { const bool mapped = nvbo->bo.mem.mem_type != TTM_PL_SYSTEM; struct nouveau_fence *fence = NULL; list_del(&vma->head); if (mapped) { spin_lock(&nvbo->bo.bdev->fence_lock); fence = nouveau_fence_ref(nvbo->bo.sync_obj); spin_unlock(&nvbo->bo.bdev->fence_lock); } if (fence) { nouveau_fence_work(fence, nouveau_gem_object_delete, vma); } else { if (mapped) nouveau_vm_unmap(vma); nouveau_vm_put(vma); kfree(vma); } nouveau_fence_unref(&fence); }
static int nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) { struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev); struct drm_device *dev = dev_priv->dev; int ret; mem->bus.addr = NULL; mem->bus.offset = 0; mem->bus.size = mem->num_pages << PAGE_SHIFT; mem->bus.base = 0; mem->bus.is_iomem = false; if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE)) return -EINVAL; switch (mem->mem_type) { case TTM_PL_SYSTEM: /* System memory */ return 0; case TTM_PL_TT: #if __OS_HAS_AGP if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) { mem->bus.offset = mem->start << PAGE_SHIFT; mem->bus.base = dev_priv->gart_info.aper_base; mem->bus.is_iomem = true; } #endif break; case TTM_PL_VRAM: { struct nouveau_vram *vram = mem->mm_node; u8 page_shift; if (!dev_priv->bar1_vm) { mem->bus.offset = mem->start << PAGE_SHIFT; mem->bus.base = pci_resource_start(dev->pdev, 1); mem->bus.is_iomem = true; break; } if (dev_priv->card_type == NV_C0) page_shift = vram->page_shift; else page_shift = 12; ret = nouveau_vm_get(dev_priv->bar1_vm, mem->bus.size, page_shift, NV_MEM_ACCESS_RW, &vram->bar_vma); if (ret) return ret; nouveau_vm_map(&vram->bar_vma, vram); if (ret) { nouveau_vm_put(&vram->bar_vma); return ret; } mem->bus.offset = vram->bar_vma.offset; if (dev_priv->card_type == NV_50) /*XXX*/ mem->bus.offset -= 0x0020000000ULL; mem->bus.base = pci_resource_start(dev->pdev, 1); mem->bus.is_iomem = true; } break; default: return -EINVAL; } return 0; }
static void nv50_bar_unmap(struct nouveau_bar *bar, struct nouveau_vma *vma) { nouveau_vm_unmap(vma); nouveau_vm_put(vma); }