int nouveau_bar_create_(struct nouveau_object *parent, struct nouveau_object *engine, struct nouveau_oclass *oclass, int length, void **pobject) { struct nouveau_device *device = nv_device(parent); struct nouveau_bar *bar; int ret; ret = nouveau_subdev_create_(parent, engine, oclass, 0, "BARCTL", "bar", length, pobject); bar = *pobject; if (ret) return ret; if (nv_device_resource_len(device, 3) != 0) { bar->iomem = ioremap(nv_device_resource_start(device, 3), nv_device_resource_len(device, 3)); if (!bar->iomem) nv_warn(bar, "PRAMIN ioremap failed\n"); } return 0; }
static int nv40_instmem_ctor(struct nouveau_object *parent, struct nouveau_object *engine, struct nouveau_oclass *oclass, void *data, u32 size, struct nouveau_object **pobject) { struct nouveau_device *device = nv_device(parent); struct nv04_instmem_priv *priv; int ret, bar, vs; ret = nouveau_instmem_create(parent, engine, oclass, &priv); *pobject = nv_object(priv); if (ret) return ret; /* map bar */ if (nv_device_resource_len(device, 2)) bar = 2; else bar = 3; #ifdef __NetBSD__ priv->iomemt = nv_device_resource_tag(device, bar); priv->iomemsz = nv_device_resource_len(device, bar); ret = bus_space_map(priv->iomemt, nv_device_resource_start(device, bar), priv->iomemsz, 0, &priv->iomemh); if (ret) { priv->iomemsz = 0; nv_error(priv, "unable to map PRAMIN BAR: %d\n", ret); return -EFAULT; } #else priv->iomem = ioremap(nv_device_resource_start(device, bar), nv_device_resource_len(device, bar)); if (!priv->iomem) { nv_error(priv, "unable to map PRAMIN BAR\n"); return -EFAULT; } #endif /* PRAMIN aperture maps over the end of vram, reserve enough space * to fit graphics contexts for every channel, the magics come * from engine/graph/nv40.c */ vs = hweight8((nv_rd32(priv, 0x001540) & 0x0000ff00) >> 8); if (device->chipset == 0x40) priv->base.reserved = 0x6aa0 * vs; else if (device->chipset < 0x43) priv->base.reserved = 0x4f00 * vs; else if (nv44_graph_class(priv)) priv->base.reserved = 0x4980 * vs; else priv->base.reserved = 0x4a40 * vs; priv->base.reserved += 16 * 1024; priv->base.reserved *= 32; /* per-channel */ priv->base.reserved += 512 * 1024; /* pci(e)gart table */ priv->base.reserved += 512 * 1024; /* object storage */ priv->base.reserved = round_up(priv->base.reserved, 4096); ret = nouveau_mm_init(&priv->heap, 0, priv->base.reserved, 1); if (ret) return ret; /* 0x00000-0x10000: reserve for probable vbios image */ ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x10000, 0, 0, &priv->vbios); if (ret) return ret; /* 0x10000-0x18000: reserve for RAMHT */ ret = nouveau_ramht_new(nv_object(priv), NULL, 0x08000, 0, &priv->ramht); if (ret) return ret; /* 0x18000-0x18200: reserve for RAMRO * 0x18200-0x20000: padding */ ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x08000, 0, 0, &priv->ramro); if (ret) return ret; /* 0x20000-0x21000: reserve for RAMFC * 0x21000-0x40000: padding and some unknown crap */ ret = nouveau_gpuobj_new(nv_object(priv), NULL, 0x20000, 0, NVOBJ_FLAG_ZERO_ALLOC, &priv->ramfc); if (ret) return ret; return 0; }
static int nouveau_channel_prep(struct nouveau_drm *drm, struct nvif_device *device, u32 handle, u32 size, struct nouveau_channel **pchan) { struct nouveau_cli *cli = (void *)nvif_client(&device->base); struct nouveau_vmmgr *vmm = nvkm_vmmgr(device); struct nv_dma_v0 args = {}; struct nouveau_channel *chan; u32 target; int ret; chan = *pchan = kzalloc(sizeof(*chan), GFP_KERNEL); if (!chan) return -ENOMEM; nvif_device_ref(device, &chan->device); chan->drm = drm; /* allocate memory for dma push buffer */ target = TTM_PL_FLAG_TT; if (nouveau_vram_pushbuf) target = TTM_PL_FLAG_VRAM; ret = nouveau_bo_new(drm->dev, size, 0, target, 0, 0, NULL, &chan->push.buffer); if (ret == 0) { ret = nouveau_bo_pin(chan->push.buffer, target); if (ret == 0) ret = nouveau_bo_map(chan->push.buffer); } if (ret) { nouveau_channel_del(pchan); return ret; } /* create dma object covering the *entire* memory space that the * pushbuf lives in, this is because the GEM code requires that * we be able to call out to other (indirect) push buffers */ chan->push.vma.offset = chan->push.buffer->bo.offset; if (device->info.family >= NV_DEVICE_INFO_V0_TESLA) { ret = nouveau_bo_vma_add(chan->push.buffer, cli->vm, &chan->push.vma); if (ret) { nouveau_channel_del(pchan); return ret; } args.target = NV_DMA_V0_TARGET_VM; args.access = NV_DMA_V0_ACCESS_VM; args.start = 0; args.limit = cli->vm->vmm->limit - 1; } else if (chan->push.buffer->bo.mem.mem_type == TTM_PL_VRAM) { if (device->info.family == NV_DEVICE_INFO_V0_TNT) { /* nv04 vram pushbuf hack, retarget to its location in * the framebuffer bar rather than direct vram access.. * nfi why this exists, it came from the -nv ddx. */ args.target = NV_DMA_V0_TARGET_PCI; args.access = NV_DMA_V0_ACCESS_RDWR; args.start = nv_device_resource_start(nvkm_device(device), 1); args.limit = args.start + device->info.ram_user - 1; } else { args.target = NV_DMA_V0_TARGET_VRAM; args.access = NV_DMA_V0_ACCESS_RDWR; args.start = 0; args.limit = device->info.ram_user - 1; } } else { if (chan->drm->agp.stat == ENABLED) { args.target = NV_DMA_V0_TARGET_AGP; args.access = NV_DMA_V0_ACCESS_RDWR; args.start = chan->drm->agp.base; args.limit = chan->drm->agp.base + chan->drm->agp.size - 1; } else { args.target = NV_DMA_V0_TARGET_VM; args.access = NV_DMA_V0_ACCESS_RDWR; args.start = 0; args.limit = vmm->limit - 1; } } ret = nvif_object_init(nvif_object(device), NULL, NVDRM_PUSH | (handle & 0xffff), NV_DMA_FROM_MEMORY, &args, sizeof(args), &chan->push.ctxdma); if (ret) { nouveau_channel_del(pchan); return ret; } return 0; }
int nouveau_fifo_channel_create_(struct nouveau_object *parent, struct nouveau_object *engine, struct nouveau_oclass *oclass, int bar, u32 addr, u32 size, u32 pushbuf, u64 engmask, int len, void **ptr) { struct nouveau_device *device = nv_device(engine); struct nouveau_fifo *priv = (void *)engine; struct nouveau_fifo_chan *chan; struct nouveau_dmaeng *dmaeng; unsigned long flags; int ret; /* create base object class */ ret = nouveau_namedb_create_(parent, engine, oclass, 0, NULL, engmask, len, ptr); chan = *ptr; if (ret) return ret; /* validate dma object representing push buffer */ chan->pushdma = (void *)nouveau_handle_ref(parent, pushbuf); if (!chan->pushdma) return -ENOENT; dmaeng = (void *)chan->pushdma->base.engine; switch (chan->pushdma->base.oclass->handle) { case NV_DMA_FROM_MEMORY_CLASS: case NV_DMA_IN_MEMORY_CLASS: break; default: return -EINVAL; } ret = dmaeng->bind(dmaeng, parent, chan->pushdma, &chan->pushgpu); if (ret) return ret; /* find a free fifo channel */ spin_lock_irqsave(&priv->lock, flags); for (chan->chid = priv->min; chan->chid < priv->max; chan->chid++) { if (!priv->channel[chan->chid]) { priv->channel[chan->chid] = nv_object(chan); break; } } spin_unlock_irqrestore(&priv->lock, flags); if (chan->chid == priv->max) { nv_error(priv, "no free channels\n"); return -ENOSPC; } /* map fifo control registers */ #ifdef __NetBSD__ if (bar == 0) { /* * We already map BAR 0 in the engine device base, so * grab a subregion of that. */ bus_space_tag_t mmiot = nv_subdev(device)->mmiot; bus_space_handle_t mmioh = nv_subdev(device)->mmioh; bus_size_t mmiosz = nv_subdev(device)->mmiosz; /* Check whether it lies inside the region. */ if (mmiosz < addr || mmiosz - addr < chan->chid*size || mmiosz - addr - chan->chid*size < size) { ret = EIO; nv_error(priv, "fifo channel out of range:" " addr 0x%"PRIxMAX " chid 0x%"PRIxMAX" size 0x%"PRIxMAX " mmiosz 0x%"PRIxMAX"\n", (uintmax_t)addr, (uintmax_t)chan->chid, (uintmax_t)size, (uintmax_t)mmiosz); return ret; } /* Grab a subregion. */ /* XXX errno NetBSD->Linux */ ret = -bus_space_subregion(mmiot, mmioh, (addr + chan->chid*size), size, &chan->bsh); if (ret) { nv_error(priv, "bus_space_subregion failed: %d\n", ret); return ret; } /* Success! No need to unmap a subregion. */ chan->mapped = false; chan->bst = mmiot; } else { chan->bst = nv_device_resource_tag(device, bar); /* XXX errno NetBSD->Linux */ ret = -bus_space_map(chan->bst, (nv_device_resource_start(device, bar) + addr + (chan->chid * size)), size, 0, &chan->bsh); if (ret) { nv_error(priv, "failed to map fifo channel:" " bar %d addr %"PRIxMAX" + %"PRIxMAX " + (%"PRIxMAX" * %"PRIxMAX") = %"PRIxMAX " size %"PRIxMAX": %d\n", bar, (uintmax_t)nv_device_resource_start(device, bar), (uintmax_t)addr, (uintmax_t)chan->chid, (uintmax_t)size, (uintmax_t)(nv_device_resource_start(device, bar) + addr + (chan->chid * size)), (uintmax_t)size, ret); return ret; } chan->mapped = true; } #else chan->user = ioremap(nv_device_resource_start(device, bar) + addr + (chan->chid * size), size); if (!chan->user) return -EFAULT; #endif nouveau_event_trigger(priv->cevent, 0); chan->size = size; return 0; }
int nouveau_fifo_channel_create_(struct nouveau_object *parent, struct nouveau_object *engine, struct nouveau_oclass *oclass, int bar, u32 addr, u32 size, u32 pushbuf, u64 engmask, int len, void **ptr) { struct nouveau_device *device = nv_device(engine); struct nouveau_fifo *priv = (void *)engine; struct nouveau_fifo_chan *chan; struct nouveau_dmaeng *dmaeng; unsigned long flags; int ret; /* create base object class */ ret = nouveau_namedb_create_(parent, engine, oclass, 0, NULL, engmask, len, ptr); chan = *ptr; if (ret) return ret; /* validate dma object representing push buffer */ chan->pushdma = (void *)nouveau_handle_ref(parent, pushbuf); if (!chan->pushdma) return -ENOENT; dmaeng = (void *)chan->pushdma->base.engine; switch (chan->pushdma->base.oclass->handle) { case NV_DMA_FROM_MEMORY_CLASS: case NV_DMA_IN_MEMORY_CLASS: break; default: return -EINVAL; } ret = dmaeng->bind(dmaeng, parent, chan->pushdma, &chan->pushgpu); if (ret) return ret; /* find a free fifo channel */ spin_lock_irqsave(&priv->lock, flags); for (chan->chid = priv->min; chan->chid < priv->max; chan->chid++) { if (!priv->channel[chan->chid]) { priv->channel[chan->chid] = nv_object(chan); break; } } spin_unlock_irqrestore(&priv->lock, flags); if (chan->chid == priv->max) { nv_error(priv, "no free channels\n"); return -ENOSPC; } /* map fifo control registers */ #ifdef __NetBSD__ chan->bst = nv_device_resource_tag(device, bar); /* XXX errno NetBSD->Linux */ ret = -bus_space_map(chan->bst, nv_device_resource_start(device, bar) + addr + (chan->chid * size), size, 0, &chan->bsh); if (ret) return ret; chan->mapped = true; #else chan->user = ioremap(nv_device_resource_start(device, bar) + addr + (chan->chid * size), size); if (!chan->user) return -EFAULT; #endif nouveau_event_trigger(priv->cevent, 0); chan->size = size; return 0; }