int nouveau_bo_new(struct drm_device *dev, struct nouveau_channel *chan, int size, int align, uint32_t flags, uint32_t tile_mode, uint32_t tile_flags, bool no_vm, bool mappable, struct nouveau_bo **pnvbo) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_bo *nvbo; int ret = 0, page_shift = 0; nvbo = kzalloc(sizeof(struct nouveau_bo), GFP_KERNEL); if (!nvbo) return -ENOMEM; INIT_LIST_HEAD(&nvbo->head); INIT_LIST_HEAD(&nvbo->entry); nvbo->mappable = mappable; nvbo->no_vm = no_vm; nvbo->tile_mode = tile_mode; nvbo->tile_flags = tile_flags; nvbo->bo.bdev = &dev_priv->ttm.bdev; nouveau_bo_fixup_align(nvbo, &align, &size, &page_shift); align >>= PAGE_SHIFT; if (!nvbo->no_vm && dev_priv->chan_vm) { ret = nouveau_vm_get(dev_priv->chan_vm, size, page_shift, NV_MEM_ACCESS_RW, &nvbo->vma); if (ret) { kfree(nvbo); return ret; } } nvbo->bo.mem.num_pages = size >> PAGE_SHIFT; nouveau_bo_placement_set(nvbo, flags, 0); nvbo->channel = chan; ret = ttm_bo_init(&dev_priv->ttm.bdev, &nvbo->bo, size, ttm_bo_type_device, &nvbo->placement, align, 0, false, NULL, size, nouveau_bo_del_ttm); if (ret) { /* ttm will call nouveau_bo_del_ttm if it fails.. */ return ret; } nvbo->channel = NULL; if (nvbo->vma.node) { if (nvbo->bo.mem.mem_type == TTM_PL_VRAM) nvbo->bo.offset = nvbo->vma.offset; } *pnvbo = nvbo; return 0; }
static int nv50_bar_umap(struct nouveau_bar *bar, struct nouveau_mem *mem, u32 flags, struct nouveau_vma *vma) { struct nv50_bar_priv *priv = (void *)bar; int ret; ret = nouveau_vm_get(priv->bar1_vm, mem->size << 12, 12, flags, vma); if (ret) return ret; nouveau_vm_map(vma, mem); return 0; }
static int nvc0_bar_kmap(struct nouveau_bar *bar, struct nouveau_mem *mem, u32 flags, struct nouveau_vma *vma) { struct nvc0_bar_priv *priv = (void *)bar; int ret; ret = nouveau_vm_get(priv->bar[0].vm, mem->size << 12, 12, flags, vma); if (ret) return ret; nouveau_vm_map(vma, mem); nvc0_vm_flush_engine(nv_subdev(bar), priv->bar[0].pgd->addr, 5); return 0; }
int nouveau_gpuobj_map_vm(struct nouveau_gpuobj *gpuobj, struct nouveau_vm *vm, u32 access, struct nouveau_vma *vma) { struct nouveau_instobj *iobj = (void *) nv_pclass(nv_object(gpuobj), NV_MEMOBJ_CLASS); struct nouveau_mem **mem = (void *)(iobj + 1); int ret; ret = nouveau_vm_get(vm, gpuobj->size, 12, access, vma); if (ret) return ret; nouveau_vm_map(vma, *mem); return 0; }
int nv50_instmem_map(struct nouveau_gpuobj *gpuobj) { struct drm_nouveau_private *dev_priv = gpuobj->dev->dev_private; struct nv50_gpuobj_node *node = gpuobj->node; int ret; ret = nouveau_vm_get(dev_priv->bar3_vm, gpuobj->size, 12, NV_MEM_ACCESS_RW, &node->vram->bar_vma); if (ret) return ret; nouveau_vm_map(&node->vram->bar_vma, node->vram); gpuobj->pinst = node->vram->bar_vma.offset; return 0; }
int nv50_instmem_get(struct nouveau_gpuobj *gpuobj, struct nouveau_channel *chan, u32 size, u32 align) { struct drm_device *dev = gpuobj->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_vram_engine *vram = &dev_priv->engine.vram; struct nv50_gpuobj_node *node = NULL; int ret; node = kzalloc(sizeof(*node), GFP_KERNEL); if (!node) return -ENOMEM; node->align = align; size = (size + 4095) & ~4095; align = max(align, (u32)4096); ret = vram->get(dev, size, align, 0, 0, &node->vram); if (ret) { kfree(node); return ret; } gpuobj->vinst = node->vram->offset; if (gpuobj->flags & NVOBJ_FLAG_VM) { u32 flags = NV_MEM_ACCESS_RW; if (!(gpuobj->flags & NVOBJ_FLAG_VM_USER)) flags |= NV_MEM_ACCESS_SYS; ret = nouveau_vm_get(chan->vm, size, 12, flags, &node->chan_vma); if (ret) { vram->put(dev, &node->vram); kfree(node); return ret; } nouveau_vm_map(&node->chan_vma, node->vram); gpuobj->linst = node->chan_vma.offset; } gpuobj->size = size; gpuobj->node = node; return 0; }
static int nouveau_ttm_io_mem_reserve(struct ttm_bo_device *bdev, struct ttm_mem_reg *mem) { struct ttm_mem_type_manager *man = &bdev->man[mem->mem_type]; struct drm_nouveau_private *dev_priv = nouveau_bdev(bdev); struct drm_device *dev = dev_priv->dev; int ret; mem->bus.addr = NULL; mem->bus.offset = 0; mem->bus.size = mem->num_pages << PAGE_SHIFT; mem->bus.base = 0; mem->bus.is_iomem = false; if (!(man->flags & TTM_MEMTYPE_FLAG_MAPPABLE)) return -EINVAL; switch (mem->mem_type) { case TTM_PL_SYSTEM: /* System memory */ return 0; case TTM_PL_TT: #if __OS_HAS_AGP if (dev_priv->gart_info.type == NOUVEAU_GART_AGP) { mem->bus.offset = mem->start << PAGE_SHIFT; mem->bus.base = dev_priv->gart_info.aper_base; mem->bus.is_iomem = true; } #endif break; case TTM_PL_VRAM: { struct nouveau_vram *vram = mem->mm_node; u8 page_shift; if (!dev_priv->bar1_vm) { mem->bus.offset = mem->start << PAGE_SHIFT; mem->bus.base = pci_resource_start(dev->pdev, 1); mem->bus.is_iomem = true; break; } if (dev_priv->card_type == NV_C0) page_shift = vram->page_shift; else page_shift = 12; ret = nouveau_vm_get(dev_priv->bar1_vm, mem->bus.size, page_shift, NV_MEM_ACCESS_RW, &vram->bar_vma); if (ret) return ret; nouveau_vm_map(&vram->bar_vma, vram); if (ret) { nouveau_vm_put(&vram->bar_vma); return ret; } mem->bus.offset = vram->bar_vma.offset; if (dev_priv->card_type == NV_50) /*XXX*/ mem->bus.offset -= 0x0020000000ULL; mem->bus.base = pci_resource_start(dev->pdev, 1); mem->bus.is_iomem = true; } break; default: return -EINVAL; } return 0; }