static int nouveau_channel_init(struct nouveau_channel *chan, u32 vram, u32 gart) { struct nvif_device *device = chan->device; struct nouveau_cli *cli = (void *)nvif_client(&device->base); struct nouveau_vmmgr *vmm = nvkm_vmmgr(device); struct nouveau_software_chan *swch; struct nv_dma_v0 args = {}; int ret, i; nvif_object_map(chan->object); /* allocate dma objects to cover all allowed vram, and gart */ if (device->info.family < NV_DEVICE_INFO_V0_FERMI) { if (device->info.family >= NV_DEVICE_INFO_V0_TESLA) { args.target = NV_DMA_V0_TARGET_VM; args.access = NV_DMA_V0_ACCESS_VM; args.start = 0; args.limit = cli->vm->vmm->limit - 1; } else { args.target = NV_DMA_V0_TARGET_VRAM; args.access = NV_DMA_V0_ACCESS_RDWR; args.start = 0; args.limit = device->info.ram_user - 1; } ret = nvif_object_init(chan->object, NULL, vram, NV_DMA_IN_MEMORY, &args, sizeof(args), &chan->vram); if (ret) return ret; if (device->info.family >= NV_DEVICE_INFO_V0_TESLA) { args.target = NV_DMA_V0_TARGET_VM; args.access = NV_DMA_V0_ACCESS_VM; args.start = 0; args.limit = cli->vm->vmm->limit - 1; } else if (chan->drm->agp.stat == ENABLED) { args.target = NV_DMA_V0_TARGET_AGP; args.access = NV_DMA_V0_ACCESS_RDWR; args.start = chan->drm->agp.base; args.limit = chan->drm->agp.base + chan->drm->agp.size - 1; } else { args.target = NV_DMA_V0_TARGET_VM; args.access = NV_DMA_V0_ACCESS_RDWR; args.start = 0; args.limit = vmm->limit - 1; } ret = nvif_object_init(chan->object, NULL, gart, NV_DMA_IN_MEMORY, &args, sizeof(args), &chan->gart); if (ret) return ret; } /* initialise dma tracking parameters */ switch (chan->object->oclass & 0x00ff) { case 0x006b: case 0x006e: chan->user_put = 0x40; chan->user_get = 0x44; chan->dma.max = (0x10000 / 4) - 2; break; default: chan->user_put = 0x40; chan->user_get = 0x44; chan->user_get_hi = 0x60; chan->dma.ib_base = 0x10000 / 4; chan->dma.ib_max = (0x02000 / 8) - 1; chan->dma.ib_put = 0; chan->dma.ib_free = chan->dma.ib_max - chan->dma.ib_put; chan->dma.max = chan->dma.ib_base; break; } chan->dma.put = 0; chan->dma.cur = chan->dma.put; chan->dma.free = chan->dma.max - chan->dma.cur; ret = RING_SPACE(chan, NOUVEAU_DMA_SKIPS); if (ret) return ret; for (i = 0; i < NOUVEAU_DMA_SKIPS; i++) OUT_RING(chan, 0x00000000); /* allocate software object class (used for fences on <= nv05) */ if (device->info.family < NV_DEVICE_INFO_V0_CELSIUS) { ret = nvif_object_init(chan->object, NULL, 0x006e, 0x006e, NULL, 0, &chan->nvsw); if (ret) return ret; swch = (void *)nvkm_object(&chan->nvsw)->parent; swch->flip = nouveau_flip_complete; swch->flip_data = chan; ret = RING_SPACE(chan, 2); if (ret) return ret; BEGIN_NV04(chan, NvSubSw, 0x0000, 1); OUT_RING (chan, chan->nvsw.handle); FIRE_RING (chan); } /* initialise synchronisation */ return nouveau_fence(chan->drm)->context_new(chan); }
static void nouveau_accel_init(struct nouveau_drm *drm) { struct nvif_device *device = &drm->device; u32 arg0, arg1; u32 sclass[16]; int ret, i; if (nouveau_noaccel) return; /* initialise synchronisation routines */ /*XXX: this is crap, but the fence/channel stuff is a little * backwards in some places. this will be fixed. */ ret = nvif_object_sclass(&device->base, sclass, ARRAY_SIZE(sclass)); if (ret < 0) return; for (ret = -ENOSYS, i = 0; ret && i < ARRAY_SIZE(sclass); i++) { switch (sclass[i]) { case NV03_CHANNEL_DMA: ret = nv04_fence_create(drm); break; case NV10_CHANNEL_DMA: ret = nv10_fence_create(drm); break; case NV17_CHANNEL_DMA: case NV40_CHANNEL_DMA: ret = nv17_fence_create(drm); break; case NV50_CHANNEL_GPFIFO: ret = nv50_fence_create(drm); break; case G82_CHANNEL_GPFIFO: ret = nv84_fence_create(drm); break; case FERMI_CHANNEL_GPFIFO: case KEPLER_CHANNEL_GPFIFO_A: ret = nvc0_fence_create(drm); break; default: break; } } if (ret) { NV_ERROR(drm, "failed to initialise sync subsystem, %d\n", ret); nouveau_accel_fini(drm); return; } if (device->info.family >= NV_DEVICE_INFO_V0_KEPLER) { ret = nouveau_channel_new(drm, &drm->device, NVDRM_CHAN + 1, KEPLER_CHANNEL_GPFIFO_A_V0_ENGINE_CE0| KEPLER_CHANNEL_GPFIFO_A_V0_ENGINE_CE1, 0, &drm->cechan); if (ret) NV_ERROR(drm, "failed to create ce channel, %d\n", ret); arg0 = KEPLER_CHANNEL_GPFIFO_A_V0_ENGINE_GR; arg1 = 1; } else if (device->info.chipset >= 0xa3 && device->info.chipset != 0xaa && device->info.chipset != 0xac) { ret = nouveau_channel_new(drm, &drm->device, NVDRM_CHAN + 1, NvDmaFB, NvDmaTT, &drm->cechan); if (ret) NV_ERROR(drm, "failed to create ce channel, %d\n", ret); arg0 = NvDmaFB; arg1 = NvDmaTT; } else { arg0 = NvDmaFB; arg1 = NvDmaTT; } ret = nouveau_channel_new(drm, &drm->device, NVDRM_CHAN, arg0, arg1, &drm->channel); if (ret) { NV_ERROR(drm, "failed to create kernel channel, %d\n", ret); nouveau_accel_fini(drm); return; } ret = nvif_object_init(drm->channel->object, NULL, NVDRM_NVSW, nouveau_abi16_swclass(drm), NULL, 0, &drm->nvsw); if (ret == 0) { struct nouveau_software_chan *swch; ret = RING_SPACE(drm->channel, 2); if (ret == 0) { if (device->info.family < NV_DEVICE_INFO_V0_FERMI) { BEGIN_NV04(drm->channel, NvSubSw, 0, 1); OUT_RING (drm->channel, NVDRM_NVSW); } else if (device->info.family < NV_DEVICE_INFO_V0_KEPLER) { BEGIN_NVC0(drm->channel, FermiSw, 0, 1); OUT_RING (drm->channel, 0x001f0000); } } swch = (void *)nvkm_object(&drm->nvsw)->parent; swch->flip = nouveau_flip_complete; swch->flip_data = drm->channel; } if (ret) { NV_ERROR(drm, "failed to allocate software object, %d\n", ret); nouveau_accel_fini(drm); return; } if (device->info.family < NV_DEVICE_INFO_V0_FERMI) { ret = nouveau_gpuobj_new(nvkm_object(&drm->device), NULL, 32, 0, 0, &drm->notify); if (ret) { NV_ERROR(drm, "failed to allocate notifier, %d\n", ret); nouveau_accel_fini(drm); return; } ret = nvif_object_init(drm->channel->object, NULL, NvNotify0, NV_DMA_IN_MEMORY, &(struct nv_dma_v0) { .target = NV_DMA_V0_TARGET_VRAM, .access = NV_DMA_V0_ACCESS_RDWR, .start = drm->notify->addr, .limit = drm->notify->addr + 31 }, sizeof(struct nv_dma_v0),