void nvc0_instmem_takedown(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nvc0_instmem_priv *priv = dev_priv->engine.instmem.priv; struct nouveau_vm *vm = NULL; nvc0_instmem_suspend(dev); nv_wr32(dev, 0x1704, 0x00000000); nv_wr32(dev, 0x1714, 0x00000000); nouveau_vm_ref(NULL, &dev_priv->chan_vm, priv->chan_pgd); nouveau_gpuobj_ref(NULL, &priv->chan_pgd); nvc0_channel_del(&priv->bar1); nouveau_vm_ref(NULL, &dev_priv->bar1_vm, priv->bar1_pgd); nouveau_gpuobj_ref(NULL, &priv->bar1_pgd); nvc0_channel_del(&priv->bar3); nouveau_vm_ref(dev_priv->bar3_vm, &vm, NULL); nouveau_vm_ref(NULL, &vm, priv->bar3_pgd); nouveau_gpuobj_ref(NULL, &priv->bar3_pgd); nouveau_gpuobj_ref(NULL, &dev_priv->bar3_vm->pgt[0].obj[0]); nouveau_vm_ref(NULL, &dev_priv->bar3_vm, NULL); dev_priv->engine.instmem.priv = NULL; kfree(priv); }
static int nv10_fifo_ctor(struct nouveau_object *parent, struct nouveau_object *engine, struct nouveau_oclass *oclass, void *data, u32 size, struct nouveau_object **pobject) { struct nv04_instmem_priv *imem = nv04_instmem(parent); struct nv04_fifo_priv *priv; int ret; ret = nouveau_fifo_create(parent, engine, oclass, 0, 31, &priv); *pobject = nv_object(priv); if (ret) return ret; nouveau_ramht_ref(imem->ramht, &priv->ramht); nouveau_gpuobj_ref(imem->ramro, &priv->ramro); nouveau_gpuobj_ref(imem->ramfc, &priv->ramfc); nv_subdev(priv)->unit = 0x00000100; nv_subdev(priv)->intr = nv04_fifo_intr; nv_engine(priv)->cclass = &nv10_fifo_cclass; nv_engine(priv)->sclass = nv10_fifo_sclass; priv->base.pause = nv04_fifo_pause; priv->base.start = nv04_fifo_start; priv->ramfc_desc = nv10_ramfc; return 0; }
void nv04_instmem_dtor(struct nouveau_object *object) { struct nv04_instmem_priv *priv = (void *)object; nouveau_gpuobj_ref(NULL, &priv->ramfc); nouveau_gpuobj_ref(NULL, &priv->ramro); nouveau_ramht_ref(NULL, &priv->ramht); nouveau_gpuobj_ref(NULL, &priv->vbios); nouveau_mm_fini(&priv->heap); if (priv->iomem) iounmap(priv->iomem); nouveau_instmem_destroy(&priv->base); }
static int nouveau_channel_pushbuf_ctxdma_init(struct nouveau_channel *chan) { struct drm_device *dev = chan->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_bo *pb = chan->pushbuf_bo; struct nouveau_gpuobj *pushbuf = NULL; int ret; if (dev_priv->card_type >= NV_50) { if (dev_priv->card_type < NV_C0) { ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0, (1ULL << 40), NV_MEM_ACCESS_RO, NV_MEM_TARGET_VM, &pushbuf); } chan->pushbuf_base = pb->bo.offset; } else if (pb->bo.mem.mem_type == TTM_PL_TT) { ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0, dev_priv->gart_info.aper_size, NV_MEM_ACCESS_RO, NV_MEM_TARGET_GART, &pushbuf); chan->pushbuf_base = pb->bo.mem.start << PAGE_SHIFT; } else if (dev_priv->card_type != NV_04) { ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, 0, dev_priv->fb_available_size, NV_MEM_ACCESS_RO, NV_MEM_TARGET_VRAM, &pushbuf); chan->pushbuf_base = pb->bo.mem.start << PAGE_SHIFT; } else { /* NV04 cmdbuf hack, from original ddx.. not sure of it's * exact reason for existing :) PCI access to cmdbuf in * VRAM. */ ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, pci_resource_start(dev->pdev, 1), dev_priv->fb_available_size, NV_MEM_ACCESS_RO, NV_MEM_TARGET_PCI, &pushbuf); chan->pushbuf_base = pb->bo.mem.start << PAGE_SHIFT; } nouveau_gpuobj_ref(pushbuf, &chan->pushbuf); nouveau_gpuobj_ref(NULL, &pushbuf); return ret; }
static void nv50_mpeg_context_del(struct nouveau_channel *chan, int engine) { struct drm_nouveau_private *dev_priv = chan->dev->dev_private; struct nouveau_gpuobj *ctx = chan->engctx[engine]; struct drm_device *dev = chan->dev; unsigned long flags; u32 inst, i; if (!chan->ramin) return; inst = chan->ramin->vinst >> 12; inst |= 0x80000000; spin_lock_irqsave(&dev_priv->context_switch_lock, flags); nv_mask(dev, 0x00b32c, 0x00000001, 0x00000000); if (nv_rd32(dev, 0x00b318) == inst) nv_mask(dev, 0x00b318, 0x80000000, 0x00000000); nv_mask(dev, 0x00b32c, 0x00000001, 0x00000001); spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); for (i = 0x00; i <= 0x14; i += 4) nv_wo32(chan->ramin, CTX_PTR(dev, i), 0x00000000); nouveau_gpuobj_ref(NULL, &ctx); chan->engctx[engine] = NULL; }
static int nv84_fence_context_new(struct nouveau_channel *chan, int engine) { struct nv84_fence_priv *priv = nv_engine(chan->dev, engine); struct nv84_fence_chan *fctx; struct nouveau_gpuobj *obj; int ret; fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL); if (!fctx) return -ENOMEM; nouveau_fence_context_new(&fctx->base); ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_FROM_MEMORY, priv->mem->vinst, priv->mem->size, NV_MEM_ACCESS_RW, NV_MEM_TARGET_VRAM, &obj); if (ret == 0) { ret = nouveau_ramht_insert(chan, NvSema, obj); nouveau_gpuobj_ref(NULL, &obj); nv_wo32(priv->mem, chan->id * 16, 0x00000000); } if (ret) nv84_fence_context_del(chan, engine); return ret; }
void nv50_graph_destroy_context(struct nouveau_channel *chan) { struct drm_device *dev = chan->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; int i, hdr = (dev_priv->chipset == 0x50) ? 0x200 : 0x20; unsigned long flags; NV_DEBUG(dev, "ch%d\n", chan->id); if (!chan->ramin) return; spin_lock_irqsave(&dev_priv->context_switch_lock, flags); pfifo->reassign(dev, false); pgraph->fifo_access(dev, false); if (pgraph->channel(dev) == chan) pgraph->unload_context(dev); for (i = hdr; i < hdr + 24; i += 4) nv_wo32(chan->ramin, i, 0); dev_priv->engine.instmem.flush(dev); pgraph->fifo_access(dev, true); pfifo->reassign(dev, true); spin_unlock_irqrestore(&dev_priv->context_switch_lock, flags); nouveau_gpuobj_ref(NULL, &chan->ramin_grctx); atomic_dec(&chan->vm->pgraph_refs); }
static int nv10_fence_context_new(struct nouveau_channel *chan, int engine) { struct nv10_fence_priv *priv = nv_engine(chan->dev, engine); struct nv10_fence_chan *fctx; struct nouveau_gpuobj *obj; int ret = 0; fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL); if (!fctx) return -ENOMEM; nouveau_fence_context_new(&fctx->base); if (priv->bo) { struct ttm_mem_reg *mem = &priv->bo->bo.mem; ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_FROM_MEMORY, mem->start * PAGE_SIZE, mem->size, NV_MEM_ACCESS_RW, NV_MEM_TARGET_VRAM, &obj); if (!ret) { ret = nouveau_ramht_insert(chan, NvSema, obj); nouveau_gpuobj_ref(NULL, &obj); } } if (ret) nv10_fence_context_del(chan, engine); return ret; }
void nv20_graph_takedown(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; nouveau_gpuobj_ref(NULL, &pgraph->ctx_table); }
static void nv50_channel_del(struct nouveau_channel **pchan) { struct nouveau_channel *chan; chan = *pchan; *pchan = NULL; if (!chan) return; nouveau_gpuobj_ref(NULL, &chan->ramfc); nouveau_vm_ref(NULL, &chan->vm, chan->vm_pd); nouveau_gpuobj_ref(NULL, &chan->vm_pd); if (drm_mm_initialized(&chan->ramin_heap)) drm_mm_takedown(&chan->ramin_heap); nouveau_gpuobj_ref(NULL, &chan->ramin); kfree(chan); }
static void nvc0_bar_dtor(struct nouveau_object *object) { struct nvc0_bar_priv *priv = (void *)object; nouveau_vm_ref(NULL, &priv->bar[1].vm, priv->bar[1].pgd); nouveau_gpuobj_ref(NULL, &priv->bar[1].pgd); nouveau_gpuobj_ref(NULL, &priv->bar[1].mem); if (priv->bar[0].vm) { nouveau_gpuobj_ref(NULL, &priv->bar[0].vm->pgt[0].obj[0]); nouveau_vm_ref(NULL, &priv->bar[0].vm, priv->bar[0].pgd); } nouveau_gpuobj_ref(NULL, &priv->bar[0].pgd); nouveau_gpuobj_ref(NULL, &priv->bar[0].mem); nouveau_bar_destroy(&priv->base); }
static void nouveau_accel_fini(struct nouveau_drm *drm) { nouveau_gpuobj_ref(NULL, &drm->notify); nouveau_channel_del(&drm->channel); nouveau_channel_del(&drm->cechan); if (drm->fence) nouveau_fence(drm)->dtor(drm); }
void nv20_graph_destroy_context(struct nouveau_channel *chan) { struct drm_device *dev = chan->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; nouveau_gpuobj_ref(NULL, &chan->ramin_grctx); nv_wo32(pgraph->ctx_table, chan->id * 4, 0); }
static void nv84_fence_destroy(struct drm_device *dev, int engine) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nv84_fence_priv *priv = nv_engine(dev, engine); nouveau_gpuobj_ref(NULL, &priv->mem); dev_priv->eng[engine] = NULL; kfree(priv); }
int nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle, int size, uint32_t start, uint32_t end, uint32_t *b_offset) { struct drm_device *dev = chan->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_gpuobj *nobj = NULL; struct drm_mm_node *mem; uint32_t offset; int target, ret; mem = drm_mm_search_free_in_range(&chan->notifier_heap, size, 0, start, end, 0); if (mem) mem = drm_mm_get_block_range(mem, size, 0, start, end); if (!mem) { NV_ERROR(dev, "Channel %d notifier block full\n", chan->id); return -ENOMEM; } if (dev_priv->card_type < NV_50) { if (chan->notifier_bo->bo.mem.mem_type == TTM_PL_VRAM) target = NV_MEM_TARGET_VRAM; else target = NV_MEM_TARGET_GART; offset = chan->notifier_bo->bo.mem.start << PAGE_SHIFT; } else { target = NV_MEM_TARGET_VM; offset = chan->notifier_bo->vma.offset; } offset += mem->start; ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, offset, mem->size, NV_MEM_ACCESS_RW, target, &nobj); if (ret) { drm_mm_put_block(mem); NV_ERROR(dev, "Error creating notifier ctxdma: %d\n", ret); return ret; } nobj->dtor = nouveau_notifier_gpuobj_dtor; nobj->priv = mem; ret = nouveau_ramht_insert(chan, handle, nobj); nouveau_gpuobj_ref(NULL, &nobj); if (ret) { drm_mm_put_block(mem); NV_ERROR(dev, "Error adding notifier to ramht: %d\n", ret); return ret; } *b_offset = mem->start; return 0; }
static int nouveau_card_init_channel(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_gpuobj *gpuobj = NULL; int ret; ret = nouveau_channel_alloc(dev, &dev_priv->channel, (struct drm_file *)-2, NvDmaFB, NvDmaTT); if (ret) return ret; ret = nouveau_gpuobj_dma_new(dev_priv->channel, NV_CLASS_DMA_IN_MEMORY, 0, dev_priv->vram_size, NV_DMA_ACCESS_RW, NV_DMA_TARGET_VIDMEM, &gpuobj); if (ret) goto out_err; ret = nouveau_ramht_insert(dev_priv->channel, NvDmaVRAM, gpuobj); nouveau_gpuobj_ref(NULL, &gpuobj); if (ret) goto out_err; ret = nouveau_gpuobj_gart_dma_new(dev_priv->channel, 0, dev_priv->gart_info.aper_size, NV_DMA_ACCESS_RW, &gpuobj, NULL); if (ret) goto out_err; ret = nouveau_ramht_insert(dev_priv->channel, NvDmaGART, gpuobj); nouveau_gpuobj_ref(NULL, &gpuobj); if (ret) goto out_err; return 0; out_err: nouveau_channel_free(dev_priv->channel); dev_priv->channel = NULL; return ret; }
static void nv50_mpeg_context_del(struct nouveau_channel *chan, int engine) { struct nouveau_gpuobj *ctx = chan->engctx[engine]; struct drm_device *dev = chan->dev; int i; for (i = 0x00; i <= 0x14; i += 4) nv_wo32(chan->ramin, CTX_PTR(dev, i), 0x00000000); nouveau_gpuobj_ref(NULL, &ctx); chan->engctx[engine] = NULL; }
void nv50_instmem_takedown(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nv50_instmem_priv *priv = dev_priv->engine.instmem.priv; struct nouveau_channel *chan = dev_priv->channels.ptr[0]; int i; NV_DEBUG(dev, "\n"); if (!priv) return; dev_priv->ramin_available = false; nouveau_vm_ref(NULL, &dev_priv->chan_vm, NULL); for (i = 0x1700; i <= 0x1710; i += 4) nv_wr32(dev, i, priv->save1700[(i - 0x1700) / 4]); nouveau_gpuobj_ref(NULL, &priv->bar3_dmaobj); nouveau_gpuobj_ref(NULL, &priv->bar1_dmaobj); nouveau_vm_ref(NULL, &dev_priv->bar1_vm, chan->vm_pd); dev_priv->channels.ptr[127] = 0; nv50_channel_del(&dev_priv->channels.ptr[0]); nouveau_gpuobj_ref(NULL, &dev_priv->bar3_vm->pgt[0].obj[0]); nouveau_vm_ref(NULL, &dev_priv->bar3_vm, NULL); if (drm_mm_initialized(&dev_priv->ramin_heap)) drm_mm_takedown(&dev_priv->ramin_heap); dev_priv->engine.instmem.priv = NULL; kfree(priv); }
static void nvc0_channel_del(struct nouveau_channel **pchan) { struct nouveau_channel *chan; chan = *pchan; *pchan = NULL; if (!chan) return; nouveau_vm_ref(NULL, &chan->vm, NULL); if (chan->ramin_heap.free_stack.next) drm_mm_takedown(&chan->ramin_heap); nouveau_gpuobj_ref(NULL, &chan->ramin); kfree(chan); }
void nouveau_fifo_channel_destroy(struct nouveau_fifo_chan *chan) { struct nouveau_fifo *priv = (void *)nv_object(chan)->engine; unsigned long flags; #ifdef __NetBSD__ if (chan->mapped) { bus_space_unmap(chan->bst, chan->bsh, chan->size); chan->mapped = false; } #else iounmap(chan->user); #endif spin_lock_irqsave(&priv->lock, flags); priv->channel[chan->chid] = NULL; spin_unlock_irqrestore(&priv->lock, flags); nouveau_gpuobj_ref(NULL, &chan->pushgpu); nouveau_object_ref(NULL, (struct nouveau_object **)&chan->pushdma); nouveau_namedb_destroy(&chan->base); }
void nouveau_channel_put_unlocked(struct nouveau_channel **pchan) { struct nouveau_channel *chan = *pchan; struct drm_device *dev = chan->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_fifo_engine *pfifo = &dev_priv->engine.fifo; struct nouveau_pgraph_engine *pgraph = &dev_priv->engine.graph; struct nouveau_crypt_engine *pcrypt = &dev_priv->engine.crypt; unsigned long flags; /* decrement the refcount, and we're done if there's still refs */ if (likely(!atomic_dec_and_test(&chan->users))) { nouveau_channel_ref(NULL, pchan); return; } /* noone wants the channel anymore */ NV_DEBUG(dev, "freeing channel %d\n", chan->id); nouveau_debugfs_channel_fini(chan); /* give it chance to idle */ nouveau_channel_idle(chan); /* ensure all outstanding fences are signaled. they should be if the * above attempts at idling were OK, but if we failed this'll tell TTM * we're done with the buffers. */ nouveau_fence_channel_fini(chan); /* boot it off the hardware */ pfifo->reassign(dev, false); /* We want to give pgraph a chance to idle and get rid of all * potential errors. We need to do this without the context * switch lock held, otherwise the irq handler is unable to * process them. */ if (pgraph->channel(dev) == chan) nouveau_wait_for_idle(dev); /* destroy the engine specific contexts */ pfifo->destroy_context(chan); pgraph->destroy_context(chan); if (pcrypt->destroy_context) pcrypt->destroy_context(chan); pfifo->reassign(dev, true); /* aside from its resources, the channel should now be dead, * remove it from the channel list */ spin_lock_irqsave(&dev_priv->channels.lock, flags); nouveau_channel_ref(NULL, &dev_priv->channels.ptr[chan->id]); spin_unlock_irqrestore(&dev_priv->channels.lock, flags); /* destroy any resources the channel owned */ nouveau_gpuobj_ref(NULL, &chan->pushbuf); if (chan->pushbuf_bo) { nouveau_bo_unmap(chan->pushbuf_bo); nouveau_bo_unpin(chan->pushbuf_bo); nouveau_bo_ref(NULL, &chan->pushbuf_bo); } nouveau_gpuobj_channel_takedown(chan); nouveau_notifier_takedown_channel(chan); nouveau_channel_ref(NULL, pchan); }