void nv04_timer_alarm_init(struct nvkm_timer *tmr, u32 time) { struct nvkm_device *device = tmr->subdev.device; nvkm_wr32(device, NV04_PTIMER_ALARM_0, time); nvkm_wr32(device, NV04_PTIMER_INTR_EN_0, 0x00000001); }
static void gf100_msppp_init(struct nvkm_falcon *msppp) { struct nvkm_device *device = msppp->engine.subdev.device; nvkm_wr32(device, 0x086010, 0x0000fff2); nvkm_wr32(device, 0x08601c, 0x0000fff2); }
static void nv04_bus_intr(struct nvkm_bus *bus) { struct nvkm_subdev *subdev = &bus->subdev; struct nvkm_device *device = subdev->device; u32 stat = nvkm_rd32(device, 0x001100) & nvkm_rd32(device, 0x001140); if (stat & 0x00000001) { nvkm_error(subdev, "BUS ERROR\n"); stat &= ~0x00000001; nvkm_wr32(device, 0x001100, 0x00000001); } if (stat & 0x00000110) { struct nvkm_gpio *gpio = device->gpio; if (gpio) nvkm_subdev_intr(&gpio->subdev); stat &= ~0x00000110; nvkm_wr32(device, 0x001100, 0x00000110); } if (stat) { nvkm_error(subdev, "intr %08x\n", stat); nvkm_mask(device, 0x001140, stat, 0x00000000); } }
int nvkm_memx_init(struct nvkm_pmu *pmu, struct nvkm_memx **pmemx) { struct nvkm_device *device = pmu->subdev.device; struct nvkm_memx *memx; u32 reply[2]; int ret; ret = nvkm_pmu_send(pmu, reply, PROC_MEMX, MEMX_MSG_INFO, MEMX_INFO_DATA, 0); if (ret) return ret; memx = *pmemx = kzalloc(sizeof(*memx), GFP_KERNEL); if (!memx) return -ENOMEM; memx->pmu = pmu; memx->base = reply[0]; memx->size = reply[1]; /* acquire data segment access */ do { nvkm_wr32(device, 0x10a580, 0x00000003); } while (nvkm_rd32(device, 0x10a580) != 0x00000003); nvkm_wr32(device, 0x10a1c0, 0x01000000 | memx->base); return 0; }
void gf100_mc_intr_rearm(struct nvkm_mc *mc) { struct nvkm_device *device = mc->subdev.device; nvkm_wr32(device, 0x000140, 0x00000001); nvkm_wr32(device, 0x000144, 0x00000001); }
static void gm20b_gr_set_hww_esr_report_mask(struct gf100_gr *gr) { struct nvkm_device *device = gr->base.engine.subdev.device; nvkm_wr32(device, 0x419e44, 0xdffffe); nvkm_wr32(device, 0x419e4c, 0x5); }
void gm107_gr_init_bios(struct gf100_gr *gr) { static const struct { u32 ctrl; u32 data; } regs[] = { { 0x419ed8, 0x419ee0 }, { 0x419ad0, 0x419ad4 }, { 0x419ae0, 0x419ae4 }, { 0x419af0, 0x419af4 }, { 0x419af8, 0x419afc }, }; struct nvkm_device *device = gr->base.engine.subdev.device; struct nvkm_bios *bios = device->bios; struct nvbios_P0260E infoE; struct nvbios_P0260X infoX; int E = -1, X; u8 ver, hdr; while (nvbios_P0260Ep(bios, ++E, &ver, &hdr, &infoE)) { if (X = -1, E < ARRAY_SIZE(regs)) { nvkm_wr32(device, regs[E].ctrl, infoE.data); while (nvbios_P0260Xp(bios, ++X, &ver, &hdr, &infoX)) nvkm_wr32(device, regs[E].data, infoX.data); } } }
void gk104_clkgate_enable(struct nvkm_therm *base) { struct gk104_therm *therm = gk104_therm(base); struct nvkm_device *dev = therm->base.subdev.device; const struct gk104_clkgate_engine_info *order = therm->clkgate_order; int i; /* Program ENG_MANT, ENG_FILTER */ for (i = 0; order[i].engine != NVKM_SUBDEV_NR; i++) { if (!nvkm_device_subdev(dev, order[i].engine)) continue; nvkm_mask(dev, 0x20200 + order[i].offset, 0xff00, 0x4500); } /* magic */ nvkm_wr32(dev, 0x020288, therm->idle_filter->fecs); nvkm_wr32(dev, 0x02028c, therm->idle_filter->hubmmu); /* Enable clockgating (ENG_CLK = RUN->AUTO) */ for (i = 0; order[i].engine != NVKM_SUBDEV_NR; i++) { if (!nvkm_device_subdev(dev, order[i].engine)) continue; nvkm_mask(dev, 0x20200 + order[i].offset, 0x00ff, 0x0045); } }
void gk104_ce_intr(struct nvkm_engine *ce) { const u32 base = (ce->subdev.index - NVKM_ENGINE_CE0) * 0x1000; struct nvkm_subdev *subdev = &ce->subdev; struct nvkm_device *device = subdev->device; u32 mask = nvkm_rd32(device, 0x104904 + base); u32 intr = nvkm_rd32(device, 0x104908 + base) & mask; if (intr & 0x00000001) { nvkm_warn(subdev, "BLOCKPIPE\n"); nvkm_wr32(device, 0x104908 + base, 0x00000001); intr &= ~0x00000001; } if (intr & 0x00000002) { nvkm_warn(subdev, "NONBLOCKPIPE\n"); nvkm_wr32(device, 0x104908 + base, 0x00000002); intr &= ~0x00000002; } if (intr & 0x00000004) { gk104_ce_intr_launcherr(ce, base); nvkm_wr32(device, 0x104908 + base, 0x00000004); intr &= ~0x00000004; } if (intr) { nvkm_warn(subdev, "intr %08x\n", intr); nvkm_wr32(device, 0x104908 + base, intr); } }
static void nv31_bus_init(struct nvkm_bus *bus) { struct nvkm_device *device = bus->subdev.device; nvkm_wr32(device, 0x001100, 0xffffffff); nvkm_wr32(device, 0x001140, 0x00070008); }
void nv44_fb_init(struct nvkm_fb *fb) { struct nvkm_device *device = fb->subdev.device; nvkm_wr32(device, 0x100850, 0x80000000); nvkm_wr32(device, 0x100800, 0x00000001); }
void gm200_grctx_generate_405b60(struct gf100_gr *gr) { struct nvkm_device *device = gr->base.engine.subdev.device; const u32 dist_nr = DIV_ROUND_UP(gr->tpc_total, 4); u32 dist[TPC_MAX / 4] = {}; u32 gpcs[GPC_MAX] = {}; u8 tpcnr[GPC_MAX]; int tpc, gpc, i; memcpy(tpcnr, gr->tpc_nr, sizeof(gr->tpc_nr)); /* won't result in the same distribution as the binary driver where * some of the gpcs have more tpcs than others, but this shall do * for the moment. the code for earlier gpus has this issue too. */ for (gpc = -1, i = 0; i < gr->tpc_total; i++) { do { gpc = (gpc + 1) % gr->gpc_nr; } while(!tpcnr[gpc]); tpc = gr->tpc_nr[gpc] - tpcnr[gpc]--; dist[i / 4] |= ((gpc << 4) | tpc) << ((i % 4) * 8); gpcs[gpc] |= i << (tpc * 8); } for (i = 0; i < dist_nr; i++) nvkm_wr32(device, 0x405b60 + (i * 4), dist[i]); for (i = 0; i < gr->gpc_nr; i++) nvkm_wr32(device, 0x405ba0 + (i * 4), gpcs[i]); }
void gf100_mspdec_init(struct nvkm_falcon *mspdec) { struct nvkm_device *device = mspdec->engine.subdev.device; nvkm_wr32(device, 0x085010, 0x0000fff2); nvkm_wr32(device, 0x08501c, 0x0000fff2); }
void gf100_msvld_init(struct nvkm_falcon *msvld) { struct nvkm_device *device = msvld->engine.subdev.device; nvkm_wr32(device, 0x084010, 0x0000fff2); nvkm_wr32(device, 0x08401c, 0x0000fff2); }
static void gp104_disp_intr_error(struct nv50_disp *disp, int chid) { struct nvkm_subdev *subdev = &disp->base.engine.subdev; struct nvkm_device *device = subdev->device; u32 mthd = nvkm_rd32(device, 0x6111f0 + (chid * 12)); u32 data = nvkm_rd32(device, 0x6111f4 + (chid * 12)); u32 unkn = nvkm_rd32(device, 0x6111f8 + (chid * 12)); nvkm_error(subdev, "chid %d mthd %04x data %08x %08x %08x\n", chid, (mthd & 0x0000ffc), data, mthd, unkn); if (chid < ARRAY_SIZE(disp->chan)) { switch (mthd & 0xffc) { case 0x0080: nv50_disp_chan_mthd(disp->chan[chid], NV_DBG_ERROR); break; default: break; } } nvkm_wr32(device, 0x61009c, (1 << chid)); nvkm_wr32(device, 0x6111f0 + (chid * 12), 0x90000000); }
void gm107_ltc_cbc_clear(struct nvkm_ltc *ltc, u32 start, u32 limit) { struct nvkm_device *device = ltc->subdev.device; nvkm_wr32(device, 0x17e270, start); nvkm_wr32(device, 0x17e274, limit); nvkm_mask(device, 0x17e26c, 0x00000000, 0x00000004); }
static void gv100_grctx_generate_sm_id(struct gf100_gr *gr, int gpc, int tpc, int sm) { struct nvkm_device *device = gr->base.engine.subdev.device; nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x608), sm); nvkm_wr32(device, GPC_UNIT(gpc, 0x0c10 + tpc * 4), sm); nvkm_wr32(device, TPC_UNIT(gpc, tpc, 0x088), sm); }
void nv44_fb_tile_prog(struct nvkm_fb *fb, int i, struct nvkm_fb_tile *tile) { struct nvkm_device *device = fb->subdev.device; nvkm_wr32(device, 0x100604 + (i * 0x10), tile->limit); nvkm_wr32(device, 0x100608 + (i * 0x10), tile->pitch); nvkm_wr32(device, 0x100600 + (i * 0x10), tile->addr); nvkm_rd32(device, 0x100600 + (i * 0x10)); }
static void gm107_ltc_init(struct nvkm_ltc *ltc) { struct nvkm_device *device = ltc->subdev.device; u32 lpg128 = !(nvkm_rd32(device, 0x100c80) & 0x00000001); nvkm_wr32(device, 0x17e27c, ltc->ltc_nr); nvkm_wr32(device, 0x17e278, ltc->tag_base); nvkm_mask(device, 0x17e264, 0x00000002, lpg128 ? 0x00000002 : 0x00000000); }
void gm107_ltc_zbc_clear_color(struct nvkm_ltc *ltc, int i, const u32 color[4]) { struct nvkm_device *device = ltc->subdev.device; nvkm_mask(device, 0x17e338, 0x0000000f, i); nvkm_wr32(device, 0x17e33c, color[0]); nvkm_wr32(device, 0x17e340, color[1]); nvkm_wr32(device, 0x17e344, color[2]); nvkm_wr32(device, 0x17e348, color[3]); }
static void gf100_gr_zbc_clear_depth(struct gf100_gr *gr, int zbc) { struct nvkm_device *device = gr->base.engine.subdev.device; if (gr->zbc_depth[zbc].format) nvkm_wr32(device, 0x405818, gr->zbc_depth[zbc].ds); nvkm_wr32(device, 0x40581c, gr->zbc_depth[zbc].format); nvkm_wr32(device, 0x405820, zbc); nvkm_wr32(device, 0x405824, 0x00000005); /* TRIGGER | WRITE | DEPTH */ }
static void gk104_ltc_init(struct nvkm_ltc *ltc) { struct nvkm_device *device = ltc->subdev.device; u32 lpg128 = !(nvkm_rd32(device, 0x100c80) & 0x00000001); nvkm_wr32(device, 0x17e8d8, ltc->ltc_nr); nvkm_wr32(device, 0x17e000, ltc->ltc_nr); nvkm_wr32(device, 0x17e8d4, ltc->tag_base); nvkm_mask(device, 0x17e8c0, 0x00000002, lpg128 ? 0x00000002 : 0x00000000); }
static int gf100_ibus_init(struct nvkm_subdev *ibus) { struct nvkm_device *device = ibus->device; nvkm_mask(device, 0x122310, 0x0003ffff, 0x00000800); nvkm_wr32(device, 0x12232c, 0x00100064); nvkm_wr32(device, 0x122330, 0x00100064); nvkm_wr32(device, 0x122334, 0x00100064); nvkm_mask(device, 0x122348, 0x0003ffff, 0x00000100); return 0; }
static void magic_(struct nvkm_device *device, u32 ctrl, int size) { nvkm_wr32(device, 0x00c800, 0x00000000); nvkm_wr32(device, 0x00c808, 0x00000000); nvkm_wr32(device, 0x00c800, ctrl); nvkm_msec(device, 2000, if (nvkm_rd32(device, 0x00c800) & 0x40000000) { while (size--) nvkm_wr32(device, 0x00c804, 0x00000000); break; } );
static void gm20b_gr_init_gpc_mmu(struct gf100_gr *gr) { struct nvkm_device *device = gr->base.engine.subdev.device; u32 val; /* Bypass MMU check for non-secure boot */ if (!device->secboot) { nvkm_wr32(device, 0x100ce4, 0xffffffff); if (nvkm_rd32(device, 0x100ce4) != 0xffffffff) nvdev_warn(device, "cannot bypass secure boot - expect failure soon!\n"); } val = nvkm_rd32(device, 0x100c80); val &= 0xf000087f; nvkm_wr32(device, 0x418880, val); nvkm_wr32(device, 0x418890, 0); nvkm_wr32(device, 0x418894, 0); nvkm_wr32(device, 0x4188b0, nvkm_rd32(device, 0x100cc4)); nvkm_wr32(device, 0x4188b4, nvkm_rd32(device, 0x100cc8)); nvkm_wr32(device, 0x4188b8, nvkm_rd32(device, 0x100ccc)); nvkm_wr32(device, 0x4188ac, nvkm_rd32(device, 0x100800)); }
void nv04_timer_time(struct nvkm_timer *tmr, u64 time) { struct nvkm_subdev *subdev = &tmr->subdev; struct nvkm_device *device = subdev->device; u32 hi = upper_32_bits(time); u32 lo = lower_32_bits(time); nvkm_debug(subdev, "time low : %08x\n", lo); nvkm_debug(subdev, "time high : %08x\n", hi); nvkm_wr32(device, NV04_PTIMER_TIME_1, hi); nvkm_wr32(device, NV04_PTIMER_TIME_0, lo); }
static void memx_out(struct nvkm_memx *memx) { struct nvkm_device *device = memx->pmu->subdev.device; int i; if (memx->c.mthd) { nvkm_wr32(device, 0x10a1c4, (memx->c.size << 16) | memx->c.mthd); for (i = 0; i < memx->c.size; i++) nvkm_wr32(device, 0x10a1c4, memx->c.data[i]); memx->c.mthd = 0; memx->c.size = 0; } }
static int gk104_volt_speedo_read(struct nvkm_volt *volt) { struct nvkm_device *device = volt->subdev.device; struct nvkm_fuse *fuse = device->fuse; int ret; if (!fuse) return -EINVAL; nvkm_wr32(device, 0x122634, 0x0); ret = nvkm_fuse_read(fuse, 0x3a8); nvkm_wr32(device, 0x122634, 0x41); return ret; }
void gm107_ltc_zbc_clear_depth(struct nvkm_ltc *ltc, int i, const u32 depth) { struct nvkm_device *device = ltc->subdev.device; nvkm_mask(device, 0x17e338, 0x0000000f, i); nvkm_wr32(device, 0x17e34c, depth); }
static int nv40_fifo_dma_engine_init(struct nvkm_fifo_chan *base, struct nvkm_engine *engine) { struct nv04_fifo_chan *chan = nv04_fifo_chan(base); struct nv04_fifo *fifo = chan->fifo; struct nvkm_device *device = fifo->base.engine.subdev.device; struct nvkm_instmem *imem = device->imem; unsigned long flags; u32 inst, reg, ctx; int chid; if (!nv40_fifo_dma_engine(engine, ®, &ctx)) return 0; inst = chan->engn[engine->subdev.index]->addr >> 4; spin_lock_irqsave(&fifo->base.lock, flags); nvkm_mask(device, 0x002500, 0x00000001, 0x00000000); chid = nvkm_rd32(device, 0x003204) & (fifo->base.nr - 1); if (chid == chan->base.chid) nvkm_wr32(device, reg, inst); nvkm_kmap(imem->ramfc); nvkm_wo32(imem->ramfc, chan->ramfc + ctx, inst); nvkm_done(imem->ramfc); nvkm_mask(device, 0x002500, 0x00000001, 0x00000001); spin_unlock_irqrestore(&fifo->base.lock, flags); return 0; }