static int nv35_gr_ctor(struct nvkm_object *parent, struct nvkm_object *engine, struct nvkm_oclass *oclass, void *data, u32 size, struct nvkm_object **pobject) { struct nv20_gr_priv *priv; int ret; ret = nvkm_gr_create(parent, engine, oclass, true, &priv); *pobject = nv_object(priv); if (ret) return ret; ret = nvkm_gpuobj_new(nv_object(priv), NULL, 32 * 4, 16, NVOBJ_FLAG_ZERO_ALLOC, &priv->ctxtab); if (ret) return ret; nv_subdev(priv)->unit = 0x00001000; nv_subdev(priv)->intr = nv20_gr_intr; nv_engine(priv)->cclass = &nv35_gr_cclass; nv_engine(priv)->sclass = nv35_gr_sclass; nv_engine(priv)->tile_prog = nv20_gr_tile_prog; return 0; }
static int gf100_dmaobj_bind(struct nvkm_dmaobj *dmaobj, struct nvkm_object *parent, struct nvkm_gpuobj **pgpuobj) { struct gf100_dmaobj_priv *priv = (void *)dmaobj; int ret; if (!nv_iclass(parent, NV_ENGCTX_CLASS)) { switch (nv_mclass(parent->parent)) { case GT214_DISP_CORE_CHANNEL_DMA: case GT214_DISP_BASE_CHANNEL_DMA: case GT214_DISP_OVERLAY_CHANNEL_DMA: break; default: return -EINVAL; } } else return 0; ret = nvkm_gpuobj_new(parent, parent, 24, 32, 0, pgpuobj); if (ret == 0) { nv_wo32(*pgpuobj, 0x00, priv->flags0 | nv_mclass(dmaobj)); nv_wo32(*pgpuobj, 0x04, lower_32_bits(priv->base.limit)); nv_wo32(*pgpuobj, 0x08, lower_32_bits(priv->base.start)); nv_wo32(*pgpuobj, 0x0c, upper_32_bits(priv->base.limit) << 24 | upper_32_bits(priv->base.start)); nv_wo32(*pgpuobj, 0x10, 0x00000000); nv_wo32(*pgpuobj, 0x14, priv->flags5); } return ret; }
static int gf100_bar_ctor_vm(struct gf100_bar_priv *priv, struct gf100_bar_priv_vm *bar_vm, int bar_nr) { struct nvkm_device *device = nv_device(&priv->base); struct nvkm_vm *vm; resource_size_t bar_len; int ret; ret = nvkm_gpuobj_new(nv_object(priv), NULL, 0x1000, 0, 0, &bar_vm->mem); if (ret) return ret; ret = nvkm_gpuobj_new(nv_object(priv), NULL, 0x8000, 0, 0, &bar_vm->pgd); if (ret) return ret; bar_len = nv_device_resource_len(device, bar_nr); ret = nvkm_vm_new(device, 0, bar_len, 0, &vm); if (ret) return ret; atomic_inc(&vm->engref[NVDEV_SUBDEV_BAR]); /* * Bootstrap page table lookup. */ if (bar_nr == 3) { ret = nvkm_gpuobj_new(nv_object(priv), NULL, (bar_len >> 12) * 8, 0x1000, NVOBJ_FLAG_ZERO_ALLOC, &vm->pgt[0].obj[0]); vm->pgt[0].refcount[0] = 1; if (ret) return ret; }
static int nv40_gr_chan_bind(struct nvkm_object *object, struct nvkm_gpuobj *parent, int align, struct nvkm_gpuobj **pgpuobj) { struct nv40_gr_chan *chan = nv40_gr_chan(object); struct nv40_gr *gr = chan->gr; int ret = nvkm_gpuobj_new(gr->base.engine.subdev.device, gr->size, align, true, parent, pgpuobj); if (ret == 0) { chan->inst = (*pgpuobj)->addr; nvkm_kmap(*pgpuobj); nv40_grctx_fill(gr->base.engine.subdev.device, *pgpuobj); nvkm_wo32(*pgpuobj, 0x00000, chan->inst >> 4); nvkm_done(*pgpuobj); }
static int nv40_gr_object_bind(struct nvkm_object *object, struct nvkm_gpuobj *parent, int align, struct nvkm_gpuobj **pgpuobj) { int ret = nvkm_gpuobj_new(object->engine->subdev.device, 20, align, false, parent, pgpuobj); if (ret == 0) { nvkm_kmap(*pgpuobj); nvkm_wo32(*pgpuobj, 0x00, object->oclass); nvkm_wo32(*pgpuobj, 0x04, 0x00000000); nvkm_wo32(*pgpuobj, 0x08, 0x00000000); #ifdef __BIG_ENDIAN nvkm_mo32(*pgpuobj, 0x08, 0x01000000, 0x01000000); #endif nvkm_wo32(*pgpuobj, 0x0c, 0x00000000); nvkm_wo32(*pgpuobj, 0x10, 0x00000000); nvkm_done(*pgpuobj); } return ret; }
static int gf100_dmaobj_bind(struct nvkm_dmaobj *base, struct nvkm_gpuobj *parent, int align, struct nvkm_gpuobj **pgpuobj) { struct gf100_dmaobj *dmaobj = gf100_dmaobj(base); struct nvkm_device *device = dmaobj->base.dma->engine.subdev.device; int ret; ret = nvkm_gpuobj_new(device, 24, align, false, parent, pgpuobj); if (ret == 0) { nvkm_kmap(*pgpuobj); nvkm_wo32(*pgpuobj, 0x00, dmaobj->flags0); nvkm_wo32(*pgpuobj, 0x04, lower_32_bits(dmaobj->base.limit)); nvkm_wo32(*pgpuobj, 0x08, lower_32_bits(dmaobj->base.start)); nvkm_wo32(*pgpuobj, 0x0c, upper_32_bits(dmaobj->base.limit) << 24 | upper_32_bits(dmaobj->base.start)); nvkm_wo32(*pgpuobj, 0x10, 0x00000000); nvkm_wo32(*pgpuobj, 0x14, dmaobj->flags5); nvkm_done(*pgpuobj); } return ret; }
static void nouveau_accel_init(struct nouveau_drm *drm) { struct nvif_device *device = &drm->device; struct nvif_sclass *sclass; u32 arg0, arg1; int ret, i, n; if (nouveau_noaccel) return; /* initialise synchronisation routines */ /*XXX: this is crap, but the fence/channel stuff is a little * backwards in some places. this will be fixed. */ ret = n = nvif_object_sclass_get(&device->object, &sclass); if (ret < 0) return; for (ret = -ENOSYS, i = 0; i < n; i++) { switch (sclass[i].oclass) { case NV03_CHANNEL_DMA: ret = nv04_fence_create(drm); break; case NV10_CHANNEL_DMA: ret = nv10_fence_create(drm); break; case NV17_CHANNEL_DMA: case NV40_CHANNEL_DMA: ret = nv17_fence_create(drm); break; case NV50_CHANNEL_GPFIFO: ret = nv50_fence_create(drm); break; case G82_CHANNEL_GPFIFO: ret = nv84_fence_create(drm); break; case FERMI_CHANNEL_GPFIFO: case KEPLER_CHANNEL_GPFIFO_A: case MAXWELL_CHANNEL_GPFIFO_A: ret = nvc0_fence_create(drm); break; default: break; } } nvif_object_sclass_put(&sclass); if (ret) { NV_ERROR(drm, "failed to initialise sync subsystem, %d\n", ret); nouveau_accel_fini(drm); return; } if (device->info.family >= NV_DEVICE_INFO_V0_KEPLER) { ret = nouveau_channel_new(drm, &drm->device, NVDRM_CHAN + 1, KEPLER_CHANNEL_GPFIFO_A_V0_ENGINE_CE0| KEPLER_CHANNEL_GPFIFO_A_V0_ENGINE_CE1, 0, &drm->cechan); if (ret) NV_ERROR(drm, "failed to create ce channel, %d\n", ret); arg0 = KEPLER_CHANNEL_GPFIFO_A_V0_ENGINE_GR; arg1 = 1; } else if (device->info.chipset >= 0xa3 && device->info.chipset != 0xaa && device->info.chipset != 0xac) { ret = nouveau_channel_new(drm, &drm->device, NVDRM_CHAN + 1, NvDmaFB, NvDmaTT, &drm->cechan); if (ret) NV_ERROR(drm, "failed to create ce channel, %d\n", ret); arg0 = NvDmaFB; arg1 = NvDmaTT; } else { arg0 = NvDmaFB; arg1 = NvDmaTT; } ret = nouveau_channel_new(drm, &drm->device, NVDRM_CHAN, arg0, arg1, &drm->channel); if (ret) { NV_ERROR(drm, "failed to create kernel channel, %d\n", ret); nouveau_accel_fini(drm); return; } ret = nvif_object_init(&drm->channel->user, NVDRM_NVSW, nouveau_abi16_swclass(drm), NULL, 0, &drm->nvsw); if (ret == 0) { ret = RING_SPACE(drm->channel, 2); if (ret == 0) { if (device->info.family < NV_DEVICE_INFO_V0_FERMI) { BEGIN_NV04(drm->channel, NvSubSw, 0, 1); OUT_RING (drm->channel, NVDRM_NVSW); } else if (device->info.family < NV_DEVICE_INFO_V0_KEPLER) { BEGIN_NVC0(drm->channel, FermiSw, 0, 1); OUT_RING (drm->channel, 0x001f0000); } } ret = nvif_notify_init(&drm->nvsw, nouveau_flip_complete, false, NVSW_NTFY_UEVENT, NULL, 0, 0, &drm->flip); if (ret == 0) ret = nvif_notify_get(&drm->flip); if (ret) { nouveau_accel_fini(drm); return; } } if (ret) { NV_ERROR(drm, "failed to allocate software object, %d\n", ret); nouveau_accel_fini(drm); return; } if (device->info.family < NV_DEVICE_INFO_V0_FERMI) { ret = nvkm_gpuobj_new(nvxx_device(&drm->device), 32, 0, false, NULL, &drm->notify); if (ret) { NV_ERROR(drm, "failed to allocate notifier, %d\n", ret); nouveau_accel_fini(drm); return; } ret = nvif_object_init(&drm->channel->user, NvNotify0, NV_DMA_IN_MEMORY, &(struct nv_dma_v0) { .target = NV_DMA_V0_TARGET_VRAM, .access = NV_DMA_V0_ACCESS_RDWR, .start = drm->notify->addr, .limit = drm->notify->addr + 31 }, sizeof(struct nv_dma_v0),
static int nv40_instmem_ctor(struct nvkm_object *parent, struct nvkm_object *engine, struct nvkm_oclass *oclass, void *data, u32 size, struct nvkm_object **pobject) { struct nvkm_device *device = nv_device(parent); struct nv04_instmem_priv *priv; int ret, bar, vs; ret = nvkm_instmem_create(parent, engine, oclass, &priv); *pobject = nv_object(priv); if (ret) return ret; /* map bar */ if (nv_device_resource_len(device, 2)) bar = 2; else bar = 3; priv->iomem = ioremap(nv_device_resource_start(device, bar), nv_device_resource_len(device, bar)); if (!priv->iomem) { nv_error(priv, "unable to map PRAMIN BAR\n"); return -EFAULT; } /* PRAMIN aperture maps over the end of vram, reserve enough space * to fit graphics contexts for every channel, the magics come * from engine/gr/nv40.c */ vs = hweight8((nv_rd32(priv, 0x001540) & 0x0000ff00) >> 8); if (device->chipset == 0x40) priv->base.reserved = 0x6aa0 * vs; else if (device->chipset < 0x43) priv->base.reserved = 0x4f00 * vs; else if (nv44_gr_class(priv)) priv->base.reserved = 0x4980 * vs; else priv->base.reserved = 0x4a40 * vs; priv->base.reserved += 16 * 1024; priv->base.reserved *= 32; /* per-channel */ priv->base.reserved += 512 * 1024; /* pci(e)gart table */ priv->base.reserved += 512 * 1024; /* object storage */ priv->base.reserved = round_up(priv->base.reserved, 4096); ret = nvkm_mm_init(&priv->heap, 0, priv->base.reserved, 1); if (ret) return ret; /* 0x00000-0x10000: reserve for probable vbios image */ ret = nvkm_gpuobj_new(nv_object(priv), NULL, 0x10000, 0, 0, &priv->vbios); if (ret) return ret; /* 0x10000-0x18000: reserve for RAMHT */ ret = nvkm_ramht_new(nv_object(priv), NULL, 0x08000, 0, &priv->ramht); if (ret) return ret; /* 0x18000-0x18200: reserve for RAMRO * 0x18200-0x20000: padding */ ret = nvkm_gpuobj_new(nv_object(priv), NULL, 0x08000, 0, 0, &priv->ramro); if (ret) return ret; /* 0x20000-0x21000: reserve for RAMFC * 0x21000-0x40000: padding and some unknown crap */ ret = nvkm_gpuobj_new(nv_object(priv), NULL, 0x20000, 0, NVOBJ_FLAG_ZERO_ALLOC, &priv->ramfc); if (ret) return ret; return 0; }