static void nouveau_accel_init(struct nouveau_drm *drm) { struct nouveau_device *device = nv_device(drm->device); struct nouveau_object *object; u32 arg0, arg1; int ret; if (nouveau_noaccel) return; /* initialise synchronisation routines */ if (device->card_type < NV_10) ret = nv04_fence_create(drm); else if (device->card_type < NV_50) ret = nv10_fence_create(drm); else if (device->chipset < 0x84) ret = nv50_fence_create(drm); else if (device->card_type < NV_C0) ret = nv84_fence_create(drm); else ret = nvc0_fence_create(drm); if (ret) { NV_ERROR(drm, "failed to initialise sync subsystem, %d\n", ret); nouveau_accel_fini(drm); return; } if (device->card_type >= NV_E0) { ret = nouveau_channel_new(drm, &drm->client, NVDRM_DEVICE, NVDRM_CHAN + 1, NVE0_CHANNEL_IND_ENGINE_CE0 | NVE0_CHANNEL_IND_ENGINE_CE1, 0, &drm->cechan); if (ret) NV_ERROR(drm, "failed to create ce channel, %d\n", ret); arg0 = NVE0_CHANNEL_IND_ENGINE_GR; arg1 = 0; } else { arg0 = NvDmaFB; arg1 = NvDmaTT; } ret = nouveau_channel_new(drm, &drm->client, NVDRM_DEVICE, NVDRM_CHAN, arg0, arg1, &drm->channel); if (ret) { NV_ERROR(drm, "failed to create kernel channel, %d\n", ret); nouveau_accel_fini(drm); return; } if (device->card_type < NV_C0) { ret = nouveau_gpuobj_new(drm->device, NULL, 32, 0, 0, &drm->notify); if (ret) { NV_ERROR(drm, "failed to allocate notifier, %d\n", ret); nouveau_accel_fini(drm); return; } ret = nouveau_object_new(nv_object(drm), drm->channel->handle, NvNotify0, 0x003d, &(struct nv_dma_class) { .flags = NV_DMA_TARGET_VRAM | NV_DMA_ACCESS_RDWR, .start = drm->notify->addr, .limit = drm->notify->addr + 31 }, sizeof(struct nv_dma_class),
static void nouveau_accel_init(struct nouveau_drm *drm) { struct nvif_device *device = &drm->device; struct nvif_sclass *sclass; u32 arg0, arg1; int ret, i, n; if (nouveau_noaccel) return; /* initialise synchronisation routines */ /*XXX: this is crap, but the fence/channel stuff is a little * backwards in some places. this will be fixed. */ ret = n = nvif_object_sclass_get(&device->object, &sclass); if (ret < 0) return; for (ret = -ENOSYS, i = 0; i < n; i++) { switch (sclass[i].oclass) { case NV03_CHANNEL_DMA: ret = nv04_fence_create(drm); break; case NV10_CHANNEL_DMA: ret = nv10_fence_create(drm); break; case NV17_CHANNEL_DMA: case NV40_CHANNEL_DMA: ret = nv17_fence_create(drm); break; case NV50_CHANNEL_GPFIFO: ret = nv50_fence_create(drm); break; case G82_CHANNEL_GPFIFO: ret = nv84_fence_create(drm); break; case FERMI_CHANNEL_GPFIFO: case KEPLER_CHANNEL_GPFIFO_A: case MAXWELL_CHANNEL_GPFIFO_A: ret = nvc0_fence_create(drm); break; default: break; } } nvif_object_sclass_put(&sclass); if (ret) { NV_ERROR(drm, "failed to initialise sync subsystem, %d\n", ret); nouveau_accel_fini(drm); return; } if (device->info.family >= NV_DEVICE_INFO_V0_KEPLER) { ret = nouveau_channel_new(drm, &drm->device, NVDRM_CHAN + 1, KEPLER_CHANNEL_GPFIFO_A_V0_ENGINE_CE0| KEPLER_CHANNEL_GPFIFO_A_V0_ENGINE_CE1, 0, &drm->cechan); if (ret) NV_ERROR(drm, "failed to create ce channel, %d\n", ret); arg0 = KEPLER_CHANNEL_GPFIFO_A_V0_ENGINE_GR; arg1 = 1; } else if (device->info.chipset >= 0xa3 && device->info.chipset != 0xaa && device->info.chipset != 0xac) { ret = nouveau_channel_new(drm, &drm->device, NVDRM_CHAN + 1, NvDmaFB, NvDmaTT, &drm->cechan); if (ret) NV_ERROR(drm, "failed to create ce channel, %d\n", ret); arg0 = NvDmaFB; arg1 = NvDmaTT; } else { arg0 = NvDmaFB; arg1 = NvDmaTT; } ret = nouveau_channel_new(drm, &drm->device, NVDRM_CHAN, arg0, arg1, &drm->channel); if (ret) { NV_ERROR(drm, "failed to create kernel channel, %d\n", ret); nouveau_accel_fini(drm); return; } ret = nvif_object_init(&drm->channel->user, NVDRM_NVSW, nouveau_abi16_swclass(drm), NULL, 0, &drm->nvsw); if (ret == 0) { ret = RING_SPACE(drm->channel, 2); if (ret == 0) { if (device->info.family < NV_DEVICE_INFO_V0_FERMI) { BEGIN_NV04(drm->channel, NvSubSw, 0, 1); OUT_RING (drm->channel, NVDRM_NVSW); } else if (device->info.family < NV_DEVICE_INFO_V0_KEPLER) { BEGIN_NVC0(drm->channel, FermiSw, 0, 1); OUT_RING (drm->channel, 0x001f0000); } } ret = nvif_notify_init(&drm->nvsw, nouveau_flip_complete, false, NVSW_NTFY_UEVENT, NULL, 0, 0, &drm->flip); if (ret == 0) ret = nvif_notify_get(&drm->flip); if (ret) { nouveau_accel_fini(drm); return; } } if (ret) { NV_ERROR(drm, "failed to allocate software object, %d\n", ret); nouveau_accel_fini(drm); return; } if (device->info.family < NV_DEVICE_INFO_V0_FERMI) { ret = nvkm_gpuobj_new(nvxx_device(&drm->device), 32, 0, false, NULL, &drm->notify); if (ret) { NV_ERROR(drm, "failed to allocate notifier, %d\n", ret); nouveau_accel_fini(drm); return; } ret = nvif_object_init(&drm->channel->user, NvNotify0, NV_DMA_IN_MEMORY, &(struct nv_dma_v0) { .target = NV_DMA_V0_TARGET_VRAM, .access = NV_DMA_V0_ACCESS_RDWR, .start = drm->notify->addr, .limit = drm->notify->addr + 31 }, sizeof(struct nv_dma_v0),
static void nouveau_accel_init(struct nouveau_drm *drm) { struct nouveau_device *device = nv_device(drm->device); struct nouveau_object *object; u32 arg0, arg1; int ret; if (nouveau_noaccel || !nouveau_fifo(device) /*XXX*/) return; /* initialise synchronisation routines */ if (device->card_type < NV_10) ret = nv04_fence_create(drm); else if (device->card_type < NV_11 || device->chipset < 0x17) ret = nv10_fence_create(drm); else if (device->card_type < NV_50) ret = nv17_fence_create(drm); else if (device->chipset < 0x84) ret = nv50_fence_create(drm); else if (device->card_type < NV_C0) ret = nv84_fence_create(drm); else ret = nvc0_fence_create(drm); if (ret) { NV_ERROR(drm, "failed to initialise sync subsystem, %d\n", ret); nouveau_accel_fini(drm); return; } if (device->card_type >= NV_E0) { ret = nouveau_channel_new(drm, &drm->client, NVDRM_DEVICE, NVDRM_CHAN + 1, NVE0_CHANNEL_IND_ENGINE_CE0 | NVE0_CHANNEL_IND_ENGINE_CE1, 0, &drm->cechan); if (ret) NV_ERROR(drm, "failed to create ce channel, %d\n", ret); arg0 = NVE0_CHANNEL_IND_ENGINE_GR; arg1 = 1; } else if (device->chipset >= 0xa3 && device->chipset != 0xaa && device->chipset != 0xac) { ret = nouveau_channel_new(drm, &drm->client, NVDRM_DEVICE, NVDRM_CHAN + 1, NvDmaFB, NvDmaTT, &drm->cechan); if (ret) NV_ERROR(drm, "failed to create ce channel, %d\n", ret); arg0 = NvDmaFB; arg1 = NvDmaTT; } else { arg0 = NvDmaFB; arg1 = NvDmaTT; } ret = nouveau_channel_new(drm, &drm->client, NVDRM_DEVICE, NVDRM_CHAN, arg0, arg1, &drm->channel); if (ret) { NV_ERROR(drm, "failed to create kernel channel, %d\n", ret); nouveau_accel_fini(drm); return; } ret = nouveau_object_new(nv_object(drm), NVDRM_CHAN, NVDRM_NVSW, nouveau_abi16_swclass(drm), NULL, 0, &object); if (ret == 0) { struct nouveau_software_chan *swch = (void *)object->parent; ret = RING_SPACE(drm->channel, 2); if (ret == 0) { if (device->card_type < NV_C0) { BEGIN_NV04(drm->channel, NvSubSw, 0, 1); OUT_RING (drm->channel, NVDRM_NVSW); } else if (device->card_type < NV_E0) { BEGIN_NVC0(drm->channel, FermiSw, 0, 1); OUT_RING (drm->channel, 0x001f0000); } } swch = (void *)object->parent; swch->flip = nouveau_flip_complete; swch->flip_data = drm->channel; } if (ret) { NV_ERROR(drm, "failed to allocate software object, %d\n", ret); nouveau_accel_fini(drm); return; } if (device->card_type < NV_C0) { ret = nouveau_gpuobj_new(drm->device, NULL, 32, 0, 0, &drm->notify); if (ret) { NV_ERROR(drm, "failed to allocate notifier, %d\n", ret); nouveau_accel_fini(drm); return; } ret = nouveau_object_new(nv_object(drm), drm->channel->handle, NvNotify0, 0x003d, &(struct nv_dma_class) { .flags = NV_DMA_TARGET_VRAM | NV_DMA_ACCESS_RDWR, .start = drm->notify->addr, .limit = drm->notify->addr + 31 }, sizeof(struct nv_dma_class),