void nvkm_oproxy_ctor(const struct nvkm_oproxy_func *func, const struct nvkm_oclass *oclass, struct nvkm_oproxy *oproxy) { nvkm_object_ctor(&nvkm_oproxy_func, oclass, &oproxy->base); oproxy->func = func; }
int nv50_disp_chan_new_(const struct nv50_disp_chan_func *func, const struct nv50_disp_chan_mthd *mthd, struct nv50_disp *disp, int ctrl, int user, int head, const struct nvkm_oclass *oclass, struct nvkm_object **pobject) { struct nv50_disp_chan *chan; if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL))) return -ENOMEM; *pobject = &chan->object; nvkm_object_ctor(&nv50_disp_chan, oclass, &chan->object); chan->func = func; chan->mthd = mthd; chan->disp = disp; chan->chid.ctrl = ctrl; chan->chid.user = user; chan->head = head; if (disp->chan[chan->chid.user]) { chan->chid.user = -1; return -EBUSY; } disp->chan[chan->chid.user] = chan; return 0; }
static int nv04_disp_root_new(struct nvkm_disp *disp, const struct nvkm_oclass *oclass, void *data, u32 size, struct nvkm_object **pobject) { struct nv04_disp_root *root; if (!(root = kzalloc(sizeof(*root), GFP_KERNEL))) return -ENOMEM; root->disp = disp; *pobject = &root->object; nvkm_object_ctor(&nv04_disp_root, oclass, &root->object); return 0; }
int nvkm_nvsw_new_(const struct nvkm_nvsw_func *func, struct nvkm_sw_chan *chan, const struct nvkm_oclass *oclass, void *data, u32 size, struct nvkm_object **pobject) { struct nvkm_nvsw *nvsw; if (!(nvsw = kzalloc(sizeof(*nvsw), GFP_KERNEL))) return -ENOMEM; *pobject = &nvsw->object; nvkm_object_ctor(&nvkm_nvsw_, oclass, &nvsw->object); nvsw->func = func; nvsw->chan = chan; return 0; }
int nvkm_udevice_new(const struct nvkm_oclass *oclass, void *data, u32 size, struct nvkm_object **pobject) { union { struct nv_device_v0 v0; } *args = data; struct nvkm_client *client = oclass->client; struct nvkm_object *parent = &client->object; const struct nvkm_object_func *func; struct nvkm_udevice *udev; int ret = -ENOSYS; nvif_ioctl(parent, "create device size %d\n", size); if (!(ret = nvif_unpack(ret, &data, &size, args->v0, 0, 0, false))) { nvif_ioctl(parent, "create device v%d device %016llx\n", args->v0.version, args->v0.device); } else return ret; /* give priviledged clients register access */ if (client->super) func = &nvkm_udevice_super; else func = &nvkm_udevice; if (!(udev = kzalloc(sizeof(*udev), GFP_KERNEL))) return -ENOMEM; nvkm_object_ctor(func, oclass, &udev->object); *pobject = &udev->object; /* find the device that matches what the client requested */ if (args->v0.device != ~0) udev->device = nvkm_device_find(args->v0.device); else udev->device = nvkm_device_find(client->device); if (!udev->device) return -ENODEV; return 0; }
static int nv30_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch, const struct nvkm_oclass *oclass, struct nvkm_object **pobject) { struct nv20_gr *gr = nv20_gr(base); struct nv20_gr_chan *chan; int ret, i; if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL))) return -ENOMEM; nvkm_object_ctor(&nv30_gr_chan, oclass, &chan->object); chan->gr = gr; chan->chid = fifoch->chid; *pobject = &chan->object; ret = nvkm_memory_new(gr->base.engine.subdev.device, NVKM_MEM_TARGET_INST, 0x5f48, 16, true, &chan->inst); if (ret) return ret; nvkm_kmap(chan->inst); nvkm_wo32(chan->inst, 0x0028, 0x00000001 | (chan->chid << 24)); nvkm_wo32(chan->inst, 0x0410, 0x00000101); nvkm_wo32(chan->inst, 0x0424, 0x00000111); nvkm_wo32(chan->inst, 0x0428, 0x00000060); nvkm_wo32(chan->inst, 0x0444, 0x00000080); nvkm_wo32(chan->inst, 0x0448, 0xffff0000); nvkm_wo32(chan->inst, 0x044c, 0x00000001); nvkm_wo32(chan->inst, 0x0460, 0x44400000); nvkm_wo32(chan->inst, 0x048c, 0xffff0000); for (i = 0x04e0; i < 0x04e8; i += 4) nvkm_wo32(chan->inst, i, 0x0fff0000); nvkm_wo32(chan->inst, 0x04ec, 0x00011100); for (i = 0x0508; i < 0x0548; i += 4) nvkm_wo32(chan->inst, i, 0x07ff0000); nvkm_wo32(chan->inst, 0x0550, 0x4b7fffff); nvkm_wo32(chan->inst, 0x058c, 0x00000080); nvkm_wo32(chan->inst, 0x0590, 0x30201000); nvkm_wo32(chan->inst, 0x0594, 0x70605040); nvkm_wo32(chan->inst, 0x0598, 0xb8a89888); nvkm_wo32(chan->inst, 0x059c, 0xf8e8d8c8); nvkm_wo32(chan->inst, 0x05b0, 0xb0000000); for (i = 0x0600; i < 0x0640; i += 4) nvkm_wo32(chan->inst, i, 0x00010588); for (i = 0x0640; i < 0x0680; i += 4) nvkm_wo32(chan->inst, i, 0x00030303); for (i = 0x06c0; i < 0x0700; i += 4) nvkm_wo32(chan->inst, i, 0x0008aae4); for (i = 0x0700; i < 0x0740; i += 4) nvkm_wo32(chan->inst, i, 0x01012000); for (i = 0x0740; i < 0x0780; i += 4) nvkm_wo32(chan->inst, i, 0x00080008); nvkm_wo32(chan->inst, 0x085c, 0x00040000); nvkm_wo32(chan->inst, 0x0860, 0x00010000); for (i = 0x0864; i < 0x0874; i += 4) nvkm_wo32(chan->inst, i, 0x00040004); for (i = 0x1f18; i <= 0x3088 ; i += 16) { nvkm_wo32(chan->inst, i + 0, 0x10700ff9); nvkm_wo32(chan->inst, i + 4, 0x0436086c); nvkm_wo32(chan->inst, i + 8, 0x000c001b); } for (i = 0x30b8; i < 0x30c8; i += 4) nvkm_wo32(chan->inst, i, 0x0000ffff); nvkm_wo32(chan->inst, 0x344c, 0x3f800000); nvkm_wo32(chan->inst, 0x3808, 0x3f800000); nvkm_wo32(chan->inst, 0x381c, 0x3f800000); nvkm_wo32(chan->inst, 0x3848, 0x40000000); nvkm_wo32(chan->inst, 0x384c, 0x3f800000); nvkm_wo32(chan->inst, 0x3850, 0x3f000000); nvkm_wo32(chan->inst, 0x3858, 0x40000000); nvkm_wo32(chan->inst, 0x385c, 0x3f800000); nvkm_wo32(chan->inst, 0x3864, 0xbf800000); nvkm_wo32(chan->inst, 0x386c, 0xbf800000); nvkm_done(chan->inst); return 0; }
int nvkm_dmaobj_ctor(const struct nvkm_dmaobj_func *func, struct nvkm_dma *dma, const struct nvkm_oclass *oclass, void **pdata, u32 *psize, struct nvkm_dmaobj *dmaobj) { union { struct nv_dma_v0 v0; } *args = *pdata; struct nvkm_device *device = dma->engine.subdev.device; struct nvkm_client *client = oclass->client; struct nvkm_object *parent = oclass->parent; struct nvkm_instmem *instmem = device->imem; struct nvkm_fb *fb = device->fb; void *data = *pdata; u32 size = *psize; int ret; nvkm_object_ctor(&nvkm_dmaobj_func, oclass, &dmaobj->object); dmaobj->func = func; dmaobj->dma = dma; RB_CLEAR_NODE(&dmaobj->rb); nvif_ioctl(parent, "create dma size %d\n", *psize); if (nvif_unpack(args->v0, 0, 0, true)) { nvif_ioctl(parent, "create dma vers %d target %d access %d " "start %016llx limit %016llx\n", args->v0.version, args->v0.target, args->v0.access, args->v0.start, args->v0.limit); dmaobj->target = args->v0.target; dmaobj->access = args->v0.access; dmaobj->start = args->v0.start; dmaobj->limit = args->v0.limit; } else return ret; *pdata = data; *psize = size; if (dmaobj->start > dmaobj->limit) return -EINVAL; switch (dmaobj->target) { case NV_DMA_V0_TARGET_VM: dmaobj->target = NV_MEM_TARGET_VM; break; case NV_DMA_V0_TARGET_VRAM: if (!client->super) { if (dmaobj->limit >= fb->ram->size - instmem->reserved) return -EACCES; if (device->card_type >= NV_50) return -EACCES; } dmaobj->target = NV_MEM_TARGET_VRAM; break; case NV_DMA_V0_TARGET_PCI: if (!client->super) return -EACCES; dmaobj->target = NV_MEM_TARGET_PCI; break; case NV_DMA_V0_TARGET_PCI_US: case NV_DMA_V0_TARGET_AGP: if (!client->super) return -EACCES; dmaobj->target = NV_MEM_TARGET_PCI_NOSNOOP; break; default: return -EINVAL; } switch (dmaobj->access) { case NV_DMA_V0_ACCESS_VM: dmaobj->access = NV_MEM_ACCESS_VM; break; case NV_DMA_V0_ACCESS_RD: dmaobj->access = NV_MEM_ACCESS_RO; break; case NV_DMA_V0_ACCESS_WR: dmaobj->access = NV_MEM_ACCESS_WO; break; case NV_DMA_V0_ACCESS_RDWR: dmaobj->access = NV_MEM_ACCESS_RW; break; default: return -EINVAL; } return ret; }
static int nv34_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch, const struct nvkm_oclass *oclass, struct nvkm_object **pobject) { struct nv20_gr *gr = nv20_gr(base); struct nv20_gr_chan *chan; int ret, i; if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL))) return -ENOMEM; nvkm_object_ctor(&nv34_gr_chan, oclass, &chan->object); chan->gr = gr; chan->chid = fifoch->chid; *pobject = &chan->object; ret = nvkm_memory_new(gr->base.engine.subdev.device, NVKM_MEM_TARGET_INST, 0x46dc, 16, true, &chan->inst); if (ret) return ret; nvkm_kmap(chan->inst); nvkm_wo32(chan->inst, 0x0028, 0x00000001 | (chan->chid << 24)); nvkm_wo32(chan->inst, 0x040c, 0x01000101); nvkm_wo32(chan->inst, 0x0420, 0x00000111); nvkm_wo32(chan->inst, 0x0424, 0x00000060); nvkm_wo32(chan->inst, 0x0440, 0x00000080); nvkm_wo32(chan->inst, 0x0444, 0xffff0000); nvkm_wo32(chan->inst, 0x0448, 0x00000001); nvkm_wo32(chan->inst, 0x045c, 0x44400000); nvkm_wo32(chan->inst, 0x0480, 0xffff0000); for (i = 0x04d4; i < 0x04dc; i += 4) nvkm_wo32(chan->inst, i, 0x0fff0000); nvkm_wo32(chan->inst, 0x04e0, 0x00011100); for (i = 0x04fc; i < 0x053c; i += 4) nvkm_wo32(chan->inst, i, 0x07ff0000); nvkm_wo32(chan->inst, 0x0544, 0x4b7fffff); nvkm_wo32(chan->inst, 0x057c, 0x00000080); nvkm_wo32(chan->inst, 0x0580, 0x30201000); nvkm_wo32(chan->inst, 0x0584, 0x70605040); nvkm_wo32(chan->inst, 0x0588, 0xb8a89888); nvkm_wo32(chan->inst, 0x058c, 0xf8e8d8c8); nvkm_wo32(chan->inst, 0x05a0, 0xb0000000); for (i = 0x05f0; i < 0x0630; i += 4) nvkm_wo32(chan->inst, i, 0x00010588); for (i = 0x0630; i < 0x0670; i += 4) nvkm_wo32(chan->inst, i, 0x00030303); for (i = 0x06b0; i < 0x06f0; i += 4) nvkm_wo32(chan->inst, i, 0x0008aae4); for (i = 0x06f0; i < 0x0730; i += 4) nvkm_wo32(chan->inst, i, 0x01012000); for (i = 0x0730; i < 0x0770; i += 4) nvkm_wo32(chan->inst, i, 0x00080008); nvkm_wo32(chan->inst, 0x0850, 0x00040000); nvkm_wo32(chan->inst, 0x0854, 0x00010000); for (i = 0x0858; i < 0x0868; i += 4) nvkm_wo32(chan->inst, i, 0x00040004); for (i = 0x15ac; i <= 0x271c ; i += 16) { nvkm_wo32(chan->inst, i + 0, 0x10700ff9); nvkm_wo32(chan->inst, i + 4, 0x0436086c); nvkm_wo32(chan->inst, i + 8, 0x000c001b); } for (i = 0x274c; i < 0x275c; i += 4) nvkm_wo32(chan->inst, i, 0x0000ffff); nvkm_wo32(chan->inst, 0x2ae0, 0x3f800000); nvkm_wo32(chan->inst, 0x2e9c, 0x3f800000); nvkm_wo32(chan->inst, 0x2eb0, 0x3f800000); nvkm_wo32(chan->inst, 0x2edc, 0x40000000); nvkm_wo32(chan->inst, 0x2ee0, 0x3f800000); nvkm_wo32(chan->inst, 0x2ee4, 0x3f000000); nvkm_wo32(chan->inst, 0x2eec, 0x40000000); nvkm_wo32(chan->inst, 0x2ef0, 0x3f800000); nvkm_wo32(chan->inst, 0x2ef8, 0xbf800000); nvkm_wo32(chan->inst, 0x2f00, 0xbf800000); nvkm_done(chan->inst); return 0; }