int nv50_fifo_oneinit(struct nvkm_fifo *base) { struct nv50_fifo *fifo = nv50_fifo(base); struct nvkm_device *device = fifo->base.engine.subdev.device; int ret; ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 128 * 4, 0x1000, false, &fifo->runlist[0]); if (ret) return ret; return nvkm_memory_new(device, NVKM_MEM_TARGET_INST, 128 * 4, 0x1000, false, &fifo->runlist[1]); }
static int nvkm_fault_oneinit_buffer(struct nvkm_fault *fault, int id) { struct nvkm_subdev *subdev = &fault->subdev; struct nvkm_device *device = subdev->device; struct nvkm_fault_buffer *buffer; int ret; if (!(buffer = kzalloc(sizeof(*buffer), GFP_KERNEL))) return -ENOMEM; buffer->fault = fault; buffer->id = id; fault->func->buffer.info(buffer); fault->buffer[id] = buffer; nvkm_debug(subdev, "buffer %d: %d entries\n", id, buffer->entries); ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, buffer->entries * fault->func->buffer.entry_size, 0x1000, true, &buffer->mem); if (ret) return ret; /* Pin fault buffer in BAR2. */ buffer->addr = nvkm_memory_bar2(buffer->mem); if (buffer->addr == ~0ULL) return -EFAULT; return 0; }
static int nvkm_gpuobj_ctor(struct nvkm_device *device, u32 size, int align, bool zero, struct nvkm_gpuobj *parent, struct nvkm_gpuobj *gpuobj) { u32 offset; int ret; if (parent) { if (align >= 0) { ret = nvkm_mm_head(&parent->heap, 0, 1, size, size, max(align, 1), &gpuobj->node); } else { ret = nvkm_mm_tail(&parent->heap, 0, 1, size, size, -align, &gpuobj->node); } if (ret) return ret; gpuobj->parent = parent; gpuobj->func = &nvkm_gpuobj_func; gpuobj->addr = parent->addr + gpuobj->node->offset; gpuobj->size = gpuobj->node->length; if (zero) { nvkm_kmap(gpuobj); for (offset = 0; offset < gpuobj->size; offset += 4) nvkm_wo32(gpuobj, offset, 0x00000000); nvkm_done(gpuobj); } } else { ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, size, abs(align), zero, &gpuobj->memory); if (ret) return ret; gpuobj->func = &nvkm_gpuobj_heap; gpuobj->addr = nvkm_memory_addr(gpuobj->memory); gpuobj->size = nvkm_memory_size(gpuobj->memory); } return nvkm_mm_init(&gpuobj->heap, 0, 0, gpuobj->size, 1); }
static int nv30_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch, const struct nvkm_oclass *oclass, struct nvkm_object **pobject) { struct nv20_gr *gr = nv20_gr(base); struct nv20_gr_chan *chan; int ret, i; if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL))) return -ENOMEM; nvkm_object_ctor(&nv30_gr_chan, oclass, &chan->object); chan->gr = gr; chan->chid = fifoch->chid; *pobject = &chan->object; ret = nvkm_memory_new(gr->base.engine.subdev.device, NVKM_MEM_TARGET_INST, 0x5f48, 16, true, &chan->inst); if (ret) return ret; nvkm_kmap(chan->inst); nvkm_wo32(chan->inst, 0x0028, 0x00000001 | (chan->chid << 24)); nvkm_wo32(chan->inst, 0x0410, 0x00000101); nvkm_wo32(chan->inst, 0x0424, 0x00000111); nvkm_wo32(chan->inst, 0x0428, 0x00000060); nvkm_wo32(chan->inst, 0x0444, 0x00000080); nvkm_wo32(chan->inst, 0x0448, 0xffff0000); nvkm_wo32(chan->inst, 0x044c, 0x00000001); nvkm_wo32(chan->inst, 0x0460, 0x44400000); nvkm_wo32(chan->inst, 0x048c, 0xffff0000); for (i = 0x04e0; i < 0x04e8; i += 4) nvkm_wo32(chan->inst, i, 0x0fff0000); nvkm_wo32(chan->inst, 0x04ec, 0x00011100); for (i = 0x0508; i < 0x0548; i += 4) nvkm_wo32(chan->inst, i, 0x07ff0000); nvkm_wo32(chan->inst, 0x0550, 0x4b7fffff); nvkm_wo32(chan->inst, 0x058c, 0x00000080); nvkm_wo32(chan->inst, 0x0590, 0x30201000); nvkm_wo32(chan->inst, 0x0594, 0x70605040); nvkm_wo32(chan->inst, 0x0598, 0xb8a89888); nvkm_wo32(chan->inst, 0x059c, 0xf8e8d8c8); nvkm_wo32(chan->inst, 0x05b0, 0xb0000000); for (i = 0x0600; i < 0x0640; i += 4) nvkm_wo32(chan->inst, i, 0x00010588); for (i = 0x0640; i < 0x0680; i += 4) nvkm_wo32(chan->inst, i, 0x00030303); for (i = 0x06c0; i < 0x0700; i += 4) nvkm_wo32(chan->inst, i, 0x0008aae4); for (i = 0x0700; i < 0x0740; i += 4) nvkm_wo32(chan->inst, i, 0x01012000); for (i = 0x0740; i < 0x0780; i += 4) nvkm_wo32(chan->inst, i, 0x00080008); nvkm_wo32(chan->inst, 0x085c, 0x00040000); nvkm_wo32(chan->inst, 0x0860, 0x00010000); for (i = 0x0864; i < 0x0874; i += 4) nvkm_wo32(chan->inst, i, 0x00040004); for (i = 0x1f18; i <= 0x3088 ; i += 16) { nvkm_wo32(chan->inst, i + 0, 0x10700ff9); nvkm_wo32(chan->inst, i + 4, 0x0436086c); nvkm_wo32(chan->inst, i + 8, 0x000c001b); } for (i = 0x30b8; i < 0x30c8; i += 4) nvkm_wo32(chan->inst, i, 0x0000ffff); nvkm_wo32(chan->inst, 0x344c, 0x3f800000); nvkm_wo32(chan->inst, 0x3808, 0x3f800000); nvkm_wo32(chan->inst, 0x381c, 0x3f800000); nvkm_wo32(chan->inst, 0x3848, 0x40000000); nvkm_wo32(chan->inst, 0x384c, 0x3f800000); nvkm_wo32(chan->inst, 0x3850, 0x3f000000); nvkm_wo32(chan->inst, 0x3858, 0x40000000); nvkm_wo32(chan->inst, 0x385c, 0x3f800000); nvkm_wo32(chan->inst, 0x3864, 0xbf800000); nvkm_wo32(chan->inst, 0x386c, 0xbf800000); nvkm_done(chan->inst); return 0; }
static int nv34_gr_chan_new(struct nvkm_gr *base, struct nvkm_fifo_chan *fifoch, const struct nvkm_oclass *oclass, struct nvkm_object **pobject) { struct nv20_gr *gr = nv20_gr(base); struct nv20_gr_chan *chan; int ret, i; if (!(chan = kzalloc(sizeof(*chan), GFP_KERNEL))) return -ENOMEM; nvkm_object_ctor(&nv34_gr_chan, oclass, &chan->object); chan->gr = gr; chan->chid = fifoch->chid; *pobject = &chan->object; ret = nvkm_memory_new(gr->base.engine.subdev.device, NVKM_MEM_TARGET_INST, 0x46dc, 16, true, &chan->inst); if (ret) return ret; nvkm_kmap(chan->inst); nvkm_wo32(chan->inst, 0x0028, 0x00000001 | (chan->chid << 24)); nvkm_wo32(chan->inst, 0x040c, 0x01000101); nvkm_wo32(chan->inst, 0x0420, 0x00000111); nvkm_wo32(chan->inst, 0x0424, 0x00000060); nvkm_wo32(chan->inst, 0x0440, 0x00000080); nvkm_wo32(chan->inst, 0x0444, 0xffff0000); nvkm_wo32(chan->inst, 0x0448, 0x00000001); nvkm_wo32(chan->inst, 0x045c, 0x44400000); nvkm_wo32(chan->inst, 0x0480, 0xffff0000); for (i = 0x04d4; i < 0x04dc; i += 4) nvkm_wo32(chan->inst, i, 0x0fff0000); nvkm_wo32(chan->inst, 0x04e0, 0x00011100); for (i = 0x04fc; i < 0x053c; i += 4) nvkm_wo32(chan->inst, i, 0x07ff0000); nvkm_wo32(chan->inst, 0x0544, 0x4b7fffff); nvkm_wo32(chan->inst, 0x057c, 0x00000080); nvkm_wo32(chan->inst, 0x0580, 0x30201000); nvkm_wo32(chan->inst, 0x0584, 0x70605040); nvkm_wo32(chan->inst, 0x0588, 0xb8a89888); nvkm_wo32(chan->inst, 0x058c, 0xf8e8d8c8); nvkm_wo32(chan->inst, 0x05a0, 0xb0000000); for (i = 0x05f0; i < 0x0630; i += 4) nvkm_wo32(chan->inst, i, 0x00010588); for (i = 0x0630; i < 0x0670; i += 4) nvkm_wo32(chan->inst, i, 0x00030303); for (i = 0x06b0; i < 0x06f0; i += 4) nvkm_wo32(chan->inst, i, 0x0008aae4); for (i = 0x06f0; i < 0x0730; i += 4) nvkm_wo32(chan->inst, i, 0x01012000); for (i = 0x0730; i < 0x0770; i += 4) nvkm_wo32(chan->inst, i, 0x00080008); nvkm_wo32(chan->inst, 0x0850, 0x00040000); nvkm_wo32(chan->inst, 0x0854, 0x00010000); for (i = 0x0858; i < 0x0868; i += 4) nvkm_wo32(chan->inst, i, 0x00040004); for (i = 0x15ac; i <= 0x271c ; i += 16) { nvkm_wo32(chan->inst, i + 0, 0x10700ff9); nvkm_wo32(chan->inst, i + 4, 0x0436086c); nvkm_wo32(chan->inst, i + 8, 0x000c001b); } for (i = 0x274c; i < 0x275c; i += 4) nvkm_wo32(chan->inst, i, 0x0000ffff); nvkm_wo32(chan->inst, 0x2ae0, 0x3f800000); nvkm_wo32(chan->inst, 0x2e9c, 0x3f800000); nvkm_wo32(chan->inst, 0x2eb0, 0x3f800000); nvkm_wo32(chan->inst, 0x2edc, 0x40000000); nvkm_wo32(chan->inst, 0x2ee0, 0x3f800000); nvkm_wo32(chan->inst, 0x2ee4, 0x3f000000); nvkm_wo32(chan->inst, 0x2eec, 0x40000000); nvkm_wo32(chan->inst, 0x2ef0, 0x3f800000); nvkm_wo32(chan->inst, 0x2ef8, 0xbf800000); nvkm_wo32(chan->inst, 0x2f00, 0xbf800000); nvkm_done(chan->inst); return 0; }