static int nv84_fence_context_new(struct nouveau_channel *chan, int engine) { struct nv84_fence_priv *priv = nv_engine(chan->dev, engine); struct nv84_fence_chan *fctx; struct nouveau_gpuobj *obj; int ret; fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL); if (!fctx) return -ENOMEM; nouveau_fence_context_new(&fctx->base); ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_FROM_MEMORY, priv->mem->vinst, priv->mem->size, NV_MEM_ACCESS_RW, NV_MEM_TARGET_VRAM, &obj); if (ret == 0) { ret = nouveau_ramht_insert(chan, NvSema, obj); nouveau_gpuobj_ref(NULL, &obj); nv_wo32(priv->mem, chan->id * 16, 0x00000000); } if (ret) nv84_fence_context_del(chan, engine); return ret; }
static int nv10_fence_context_new(struct nouveau_channel *chan, int engine) { struct nv10_fence_priv *priv = nv_engine(chan->dev, engine); struct nv10_fence_chan *fctx; struct nouveau_gpuobj *obj; int ret = 0; fctx = chan->engctx[engine] = kzalloc(sizeof(*fctx), GFP_KERNEL); if (!fctx) return -ENOMEM; nouveau_fence_context_new(&fctx->base); if (priv->bo) { struct ttm_mem_reg *mem = &priv->bo->bo.mem; ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_FROM_MEMORY, mem->start * PAGE_SIZE, mem->size, NV_MEM_ACCESS_RW, NV_MEM_TARGET_VRAM, &obj); if (!ret) { ret = nouveau_ramht_insert(chan, NvSema, obj); nouveau_gpuobj_ref(NULL, &obj); } } if (ret) nv10_fence_context_del(chan, engine); return ret; }
static int nvd0_disp_dmac_object_attach(struct nouveau_object *parent, struct nouveau_object *object, u32 name) { struct nv50_disp_base *base = (void *)parent->parent; struct nv50_disp_chan *chan = (void *)parent; u32 addr = nv_gpuobj(object)->node->offset; u32 data = (chan->chid << 27) | (addr << 9) | 0x00000001; return nouveau_ramht_insert(base->ramht, chan->chid, name, data); }
int nouveau_notifier_alloc(struct nouveau_channel *chan, uint32_t handle, int size, uint32_t start, uint32_t end, uint32_t *b_offset) { struct drm_device *dev = chan->dev; struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_gpuobj *nobj = NULL; struct drm_mm_node *mem; uint32_t offset; int target, ret; mem = drm_mm_search_free_in_range(&chan->notifier_heap, size, 0, start, end, 0); if (mem) mem = drm_mm_get_block_range(mem, size, 0, start, end); if (!mem) { NV_ERROR(dev, "Channel %d notifier block full\n", chan->id); return -ENOMEM; } if (dev_priv->card_type < NV_50) { if (chan->notifier_bo->bo.mem.mem_type == TTM_PL_VRAM) target = NV_MEM_TARGET_VRAM; else target = NV_MEM_TARGET_GART; offset = chan->notifier_bo->bo.mem.start << PAGE_SHIFT; } else { target = NV_MEM_TARGET_VM; offset = chan->notifier_bo->vma.offset; } offset += mem->start; ret = nouveau_gpuobj_dma_new(chan, NV_CLASS_DMA_IN_MEMORY, offset, mem->size, NV_MEM_ACCESS_RW, target, &nobj); if (ret) { drm_mm_put_block(mem); NV_ERROR(dev, "Error creating notifier ctxdma: %d\n", ret); return ret; } nobj->dtor = nouveau_notifier_gpuobj_dtor; nobj->priv = mem; ret = nouveau_ramht_insert(chan, handle, nobj); nouveau_gpuobj_ref(NULL, &nobj); if (ret) { drm_mm_put_block(mem); NV_ERROR(dev, "Error adding notifier to ramht: %d\n", ret); return ret; } *b_offset = mem->start; return 0; }
static int nouveau_card_init_channel(struct drm_device *dev) { struct drm_nouveau_private *dev_priv = dev->dev_private; struct nouveau_gpuobj *gpuobj = NULL; int ret; ret = nouveau_channel_alloc(dev, &dev_priv->channel, (struct drm_file *)-2, NvDmaFB, NvDmaTT); if (ret) return ret; ret = nouveau_gpuobj_dma_new(dev_priv->channel, NV_CLASS_DMA_IN_MEMORY, 0, dev_priv->vram_size, NV_DMA_ACCESS_RW, NV_DMA_TARGET_VIDMEM, &gpuobj); if (ret) goto out_err; ret = nouveau_ramht_insert(dev_priv->channel, NvDmaVRAM, gpuobj); nouveau_gpuobj_ref(NULL, &gpuobj); if (ret) goto out_err; ret = nouveau_gpuobj_gart_dma_new(dev_priv->channel, 0, dev_priv->gart_info.aper_size, NV_DMA_ACCESS_RW, &gpuobj, NULL); if (ret) goto out_err; ret = nouveau_ramht_insert(dev_priv->channel, NvDmaGART, gpuobj); nouveau_gpuobj_ref(NULL, &gpuobj); if (ret) goto out_err; return 0; out_err: nouveau_channel_free(dev_priv->channel); dev_priv->channel = NULL; return ret; }