static int gk20a_pllg_enable(struct gk20a_clk *clk) { struct nvkm_device *device = clk->base.subdev.device; u32 val; nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_ENABLE, GPCPLL_CFG_ENABLE); nvkm_rd32(device, GPCPLL_CFG); /* enable lock detection */ val = nvkm_rd32(device, GPCPLL_CFG); if (val & GPCPLL_CFG_LOCK_DET_OFF) { val &= ~GPCPLL_CFG_LOCK_DET_OFF; nvkm_wr32(device, GPCPLL_CFG, val); } /* wait for lock */ if (nvkm_wait_usec(device, 300, GPCPLL_CFG, GPCPLL_CFG_LOCK, GPCPLL_CFG_LOCK) < 0) return -ETIMEDOUT; /* switch to VCO mode */ nvkm_mask(device, SEL_VCO, BIT(SEL_VCO_GPC2CLK_OUT_SHIFT), BIT(SEL_VCO_GPC2CLK_OUT_SHIFT)); return 0; }
int nvkm_subdev_fini(struct nvkm_subdev *subdev, bool suspend) { struct nvkm_device *device = subdev->device; const char *action = suspend ? "suspend" : "fini"; u32 pmc_enable = subdev->pmc_enable; s64 time; nvkm_trace(subdev, "%s running...\n", action); time = ktime_to_us(ktime_get()); if (subdev->func->fini) { int ret = subdev->func->fini(subdev, suspend); if (ret) { nvkm_error(subdev, "%s failed, %d\n", action, ret); if (suspend) return ret; } } if (pmc_enable) { nvkm_mask(device, 0x000200, pmc_enable, 0x00000000); nvkm_mask(device, 0x000200, pmc_enable, pmc_enable); nvkm_rd32(device, 0x000200); } time = ktime_to_us(ktime_get()) - time; nvkm_trace(subdev, "%s completed in %lldus\n", action, time); return 0; }
void gf100_mc_intr_mask(struct nvkm_mc *mc, u32 mask, u32 stat) { struct nvkm_device *device = mc->subdev.device; nvkm_mask(device, 0x000640, mask, stat); nvkm_mask(device, 0x000644, mask, stat); }
void gf119_gpio_reset(struct nvkm_gpio *gpio, u8 match) { struct nvkm_device *device = gpio->subdev.device; struct nvkm_bios *bios = device->bios; u8 ver, len; u16 entry; int ent = -1; while ((entry = dcb_gpio_entry(bios, 0, ++ent, &ver, &len))) { u32 data = nvbios_rd32(bios, entry); u8 line = (data & 0x0000003f); u8 defs = !!(data & 0x00000080); u8 func = (data & 0x0000ff00) >> 8; u8 unk0 = (data & 0x00ff0000) >> 16; u8 unk1 = (data & 0x1f000000) >> 24; if ( func == DCB_GPIO_UNUSED || (match != DCB_GPIO_UNUSED && match != func)) continue; nvkm_gpio_set(gpio, 0, func, line, defs); nvkm_mask(device, 0x00d610 + (line * 4), 0xff, unk0); if (unk1--) nvkm_mask(device, 0x00d740 + (unk1 * 4), 0xff, line); } }
static int nv40_fifo_dma_engine_init(struct nvkm_fifo_chan *base, struct nvkm_engine *engine) { struct nv04_fifo_chan *chan = nv04_fifo_chan(base); struct nv04_fifo *fifo = chan->fifo; struct nvkm_device *device = fifo->base.engine.subdev.device; struct nvkm_instmem *imem = device->imem; unsigned long flags; u32 inst, reg, ctx; int chid; if (!nv40_fifo_dma_engine(engine, ®, &ctx)) return 0; inst = chan->engn[engine->subdev.index]->addr >> 4; spin_lock_irqsave(&fifo->base.lock, flags); nvkm_mask(device, 0x002500, 0x00000001, 0x00000000); chid = nvkm_rd32(device, 0x003204) & (fifo->base.nr - 1); if (chid == chan->base.chid) nvkm_wr32(device, reg, inst); nvkm_kmap(imem->ramfc); nvkm_wo32(imem->ramfc, chan->ramfc + ctx, inst); nvkm_done(imem->ramfc); nvkm_mask(device, 0x002500, 0x00000001, 0x00000001); spin_unlock_irqrestore(&fifo->base.lock, flags); return 0; }
int gk20a_clk_setup_slide(struct gk20a_clk *clk) { struct nvkm_subdev *subdev = &clk->base.subdev; struct nvkm_device *device = subdev->device; u32 step_a, step_b; switch (clk->parent_rate) { case 12000000: case 12800000: case 13000000: step_a = 0x2b; step_b = 0x0b; break; case 19200000: step_a = 0x12; step_b = 0x08; break; case 38400000: step_a = 0x04; step_b = 0x05; break; default: nvkm_error(subdev, "invalid parent clock rate %u KHz", clk->parent_rate / KHZ); return -EINVAL; } nvkm_mask(device, GPCPLL_CFG2, 0xff << GPCPLL_CFG2_PLL_STEPA_SHIFT, step_a << GPCPLL_CFG2_PLL_STEPA_SHIFT); nvkm_mask(device, GPCPLL_CFG3, 0xff << GPCPLL_CFG3_PLL_STEPB_SHIFT, step_b << GPCPLL_CFG3_PLL_STEPB_SHIFT); return 0; }
static int gk20a_clk_init(struct nvkm_clk *base) { struct gk20a_clk *clk = gk20a_clk(base); struct nvkm_subdev *subdev = &clk->base.subdev; struct nvkm_device *device = subdev->device; int ret; /* get out from IDDQ */ nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_IDDQ, 0); nvkm_rd32(device, GPCPLL_CFG); udelay(5); nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_INIT_MASK, GPC2CLK_OUT_INIT_VAL); ret = gk20a_clk_setup_slide(clk); if (ret) return ret; /* Start with lowest frequency */ base->func->calc(base, &base->func->pstates[0].base); ret = base->func->prog(&clk->base); if (ret) { nvkm_error(subdev, "cannot initialize clock\n"); return ret; } return 0; }
void gk104_clkgate_enable(struct nvkm_therm *base) { struct gk104_therm *therm = gk104_therm(base); struct nvkm_device *dev = therm->base.subdev.device; const struct gk104_clkgate_engine_info *order = therm->clkgate_order; int i; /* Program ENG_MANT, ENG_FILTER */ for (i = 0; order[i].engine != NVKM_SUBDEV_NR; i++) { if (!nvkm_device_subdev(dev, order[i].engine)) continue; nvkm_mask(dev, 0x20200 + order[i].offset, 0xff00, 0x4500); } /* magic */ nvkm_wr32(dev, 0x020288, therm->idle_filter->fecs); nvkm_wr32(dev, 0x02028c, therm->idle_filter->hubmmu); /* Enable clockgating (ENG_CLK = RUN->AUTO) */ for (i = 0; order[i].engine != NVKM_SUBDEV_NR; i++) { if (!nvkm_device_subdev(dev, order[i].engine)) continue; nvkm_mask(dev, 0x20200 + order[i].offset, 0x00ff, 0x0045); } }
void gk104_gr_init_rop_active_fbps(struct gf100_gr *gr) { struct nvkm_device *device = gr->base.engine.subdev.device; const u32 fbp_count = nvkm_rd32(device, 0x120074); nvkm_mask(device, 0x408850, 0x0000000f, fbp_count); /* zrop */ nvkm_mask(device, 0x408958, 0x0000000f, fbp_count); /* crop */ }
static void gm200_grctx_generate_rop_active_fbps(struct gf100_gr *gr) { struct nvkm_device *device = gr->base.engine.subdev.device; const u32 fbp_count = nvkm_rd32(device, 0x12006c); nvkm_mask(device, 0x408850, 0x0000000f, fbp_count); /* zrop */ nvkm_mask(device, 0x408958, 0x0000000f, fbp_count); /* crop */ }
int gf119_gpio_drive(struct nvkm_gpio *gpio, int line, int dir, int out) { struct nvkm_device *device = gpio->subdev.device; u32 data = ((dir ^ 1) << 13) | (out << 12); nvkm_mask(device, 0x00d610 + (line * 4), 0x00003000, data); nvkm_mask(device, 0x00d604, 0x00000001, 0x00000001); /* update? */ return 0; }
static int gf117_ibus_init(struct nvkm_subdev *ibus) { struct nvkm_device *device = ibus->device; nvkm_mask(device, 0x122310, 0x0003ffff, 0x00000800); nvkm_mask(device, 0x122348, 0x0003ffff, 0x00000100); nvkm_mask(device, 0x1223b0, 0x0003ffff, 0x00000fff); return 0; }
static void gv100_grctx_generate_unkn(struct gf100_gr *gr) { struct nvkm_device *device = gr->base.engine.subdev.device; nvkm_mask(device, 0x41980c, 0x00000010, 0x00000010); nvkm_mask(device, 0x41be08, 0x00000004, 0x00000004); nvkm_mask(device, 0x4064c0, 0x80000000, 0x80000000); nvkm_mask(device, 0x405800, 0x08000000, 0x08000000); nvkm_mask(device, 0x419c00, 0x00000008, 0x00000008); }
static void gk20a_pllg_disable(struct gk20a_clk *clk) { struct nvkm_device *device = clk->base.subdev.device; /* put PLL in bypass before disabling it */ nvkm_mask(device, SEL_VCO, BIT(SEL_VCO_GPC2CLK_OUT_SHIFT), 0); nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_ENABLE, 0); nvkm_rd32(device, GPCPLL_CFG); }
static int gf100_ibus_init(struct nvkm_subdev *ibus) { struct nvkm_device *device = ibus->device; nvkm_mask(device, 0x122310, 0x0003ffff, 0x00000800); nvkm_wr32(device, 0x12232c, 0x00100064); nvkm_wr32(device, 0x122330, 0x00100064); nvkm_wr32(device, 0x122334, 0x00100064); nvkm_mask(device, 0x122348, 0x0003ffff, 0x00000100); return 0; }
static void nv17_gr_mthd_lma_enable(struct nv10_gr_chan *chan, u32 mthd, u32 data) { struct nvkm_device *device = chan->object.engine->subdev.device; struct nvkm_gr *gr = &chan->gr->base; nv04_gr_idle(gr); nvkm_mask(device, NV10_PGRAPH_DEBUG_4, 0x00000100, 0x00000100); nvkm_mask(device, 0x4006b0, 0x08000000, 0x08000000); }
static void gv100_grctx_unkn88c(struct gf100_gr *gr, bool on) { struct nvkm_device *device = gr->base.engine.subdev.device; const u32 mask = 0x00000010, data = on ? mask : 0x00000000; nvkm_mask(device, 0x40988c, mask, data); nvkm_rd32(device, 0x40988c); nvkm_mask(device, 0x41a88c, mask, data); nvkm_rd32(device, 0x41a88c); nvkm_mask(device, 0x408a14, mask, data); nvkm_rd32(device, 0x408a14); }
void nv04_devinit_preinit(struct nvkm_devinit *base) { struct nv04_devinit *init = nv04_devinit(base); struct nvkm_subdev *subdev = &init->base.subdev; struct nvkm_device *device = subdev->device; /* make i2c busses accessible */ nvkm_mask(device, 0x000200, 0x00000001, 0x00000001); /* unslave crtcs */ if (init->owner < 0) init->owner = nvkm_rdvgaowner(device); nvkm_wrvgaowner(device, 0); if (!init->base.post) { u32 htotal = nvkm_rdvgac(device, 0, 0x06); htotal |= (nvkm_rdvgac(device, 0, 0x07) & 0x01) << 8; htotal |= (nvkm_rdvgac(device, 0, 0x07) & 0x20) << 4; htotal |= (nvkm_rdvgac(device, 0, 0x25) & 0x01) << 10; htotal |= (nvkm_rdvgac(device, 0, 0x41) & 0x01) << 11; if (!htotal) { nvkm_debug(subdev, "adaptor not initialised\n"); init->base.post = true; } } }
void gm107_ltc_zbc_clear_depth(struct nvkm_ltc *ltc, int i, const u32 depth) { struct nvkm_device *device = ltc->subdev.device; nvkm_mask(device, 0x17e338, 0x0000000f, i); nvkm_wr32(device, 0x17e34c, depth); }
static int gm204_i2c_aux_init(struct gm204_i2c_aux *aux) { struct nvkm_device *device = aux->base.pad->i2c->subdev.device; const u32 unksel = 1; /* nfi which to use, or if it matters.. */ const u32 ureq = unksel ? 0x00100000 : 0x00200000; const u32 urep = unksel ? 0x01000000 : 0x02000000; u32 ctrl, timeout; /* wait up to 1ms for any previous transaction to be done... */ timeout = 1000; do { ctrl = nvkm_rd32(device, 0x00d954 + (aux->ch * 0x50)); udelay(1); if (!timeout--) { AUX_ERR(&aux->base, "begin idle timeout %08x", ctrl); return -EBUSY; } } while (ctrl & 0x03010000); /* set some magic, and wait up to 1ms for it to appear */ nvkm_mask(device, 0x00d954 + (aux->ch * 0x50), 0x00300000, ureq); timeout = 1000; do { ctrl = nvkm_rd32(device, 0x00d954 + (aux->ch * 0x50)); udelay(1); if (!timeout--) { AUX_ERR(&aux->base, "magic wait %08x", ctrl); gm204_i2c_aux_fini(aux); return -EBUSY; } } while ((ctrl & 0x03000000) != urep); return 0; }
static void nv04_bus_intr(struct nvkm_bus *bus) { struct nvkm_subdev *subdev = &bus->subdev; struct nvkm_device *device = subdev->device; u32 stat = nvkm_rd32(device, 0x001100) & nvkm_rd32(device, 0x001140); if (stat & 0x00000001) { nvkm_error(subdev, "BUS ERROR\n"); stat &= ~0x00000001; nvkm_wr32(device, 0x001100, 0x00000001); } if (stat & 0x00000110) { struct nvkm_gpio *gpio = device->gpio; if (gpio) nvkm_subdev_intr(&gpio->subdev); stat &= ~0x00000110; nvkm_wr32(device, 0x001100, 0x00000110); } if (stat) { nvkm_error(subdev, "intr %08x\n", stat); nvkm_mask(device, 0x001140, stat, 0x00000000); } }
void gm107_ltc_cbc_clear(struct nvkm_ltc *ltc, u32 start, u32 limit) { struct nvkm_device *device = ltc->subdev.device; nvkm_wr32(device, 0x17e270, start); nvkm_wr32(device, 0x17e274, limit); nvkm_mask(device, 0x17e26c, 0x00000000, 0x00000004); }
void nv50_disp_chan_intr(struct nv50_disp_chan *chan, bool en) { struct nvkm_device *device = chan->disp->base.engine.subdev.device; const u64 mask = 0x00010001 << chan->chid.user; const u64 data = en ? 0x00010000 : 0x00000000; nvkm_mask(device, 0x610028, mask, data); }
static void nv50_disp_chan_uevent_init(struct nvkm_event *event, int types, int index) { struct nv50_disp *disp = container_of(event, typeof(*disp), uevent); struct nvkm_device *device = disp->base.engine.subdev.device; nvkm_wr32(device, 0x610020, 0x00000001 << index); nvkm_mask(device, 0x610028, 0x00000001 << index, 0x00000001 << index); }
void nv50_sor_clock(struct nvkm_ior *sor) { struct nvkm_device *device = sor->disp->engine.subdev.device; const int div = sor->asy.link == 3; const u32 soff = nv50_ior_base(sor); nvkm_mask(device, 0x614300 + soff, 0x00000707, (div << 8) | div); }
void nv04_fifo_dma_fini(struct nvkm_fifo_chan *base) { struct nv04_fifo_chan *chan = nv04_fifo_chan(base); struct nv04_fifo *fifo = chan->fifo; struct nvkm_device *device = fifo->base.engine.subdev.device; struct nvkm_memory *fctx = device->imem->ramfc; const struct nv04_fifo_ramfc *c; unsigned long flags; u32 mask = fifo->base.nr - 1; u32 data = chan->ramfc; u32 chid; /* prevent fifo context switches */ spin_lock_irqsave(&fifo->base.lock, flags); nvkm_wr32(device, NV03_PFIFO_CACHES, 0); /* if this channel is active, replace it with a null context */ chid = nvkm_rd32(device, NV03_PFIFO_CACHE1_PUSH1) & mask; if (chid == chan->base.chid) { nvkm_mask(device, NV04_PFIFO_CACHE1_DMA_PUSH, 0x00000001, 0); nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH0, 0); nvkm_mask(device, NV04_PFIFO_CACHE1_PULL0, 0x00000001, 0); c = fifo->ramfc; nvkm_kmap(fctx); do { u32 rm = ((1ULL << c->bits) - 1) << c->regs; u32 cm = ((1ULL << c->bits) - 1) << c->ctxs; u32 rv = (nvkm_rd32(device, c->regp) & rm) >> c->regs; u32 cv = (nvkm_ro32(fctx, c->ctxp + data) & ~cm); nvkm_wo32(fctx, c->ctxp + data, cv | (rv << c->ctxs)); } while ((++c)->bits); nvkm_done(fctx); c = fifo->ramfc; do { nvkm_wr32(device, c->regp, 0x00000000); } while ((++c)->bits); nvkm_wr32(device, NV03_PFIFO_CACHE1_GET, 0); nvkm_wr32(device, NV03_PFIFO_CACHE1_PUT, 0); nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH1, mask); nvkm_wr32(device, NV03_PFIFO_CACHE1_PUSH0, 1); nvkm_wr32(device, NV04_PFIFO_CACHE1_PULL0, 1); }
static void gv100_disp_wimm_intr(struct nv50_disp_chan *chan, bool en) { struct nvkm_device *device = chan->disp->base.engine.subdev.device; const u32 mask = 0x00000001 << chan->head; const u32 data = en ? mask : 0; nvkm_mask(device, 0x611da8, mask, data); }
int nv50_devinit_pll_set(struct nvkm_devinit *init, u32 type, u32 freq) { struct nvkm_subdev *subdev = &init->subdev; struct nvkm_device *device = subdev->device; struct nvkm_bios *bios = device->bios; struct nvbios_pll info; int N1, M1, N2, M2, P; int ret; ret = nvbios_pll_parse(bios, type, &info); if (ret) { nvkm_error(subdev, "failed to retrieve pll data, %d\n", ret); return ret; } ret = nv04_pll_calc(subdev, &info, freq, &N1, &M1, &N2, &M2, &P); if (!ret) { nvkm_error(subdev, "failed pll calculation\n"); return ret; } switch (info.type) { case PLL_VPLL0: case PLL_VPLL1: nvkm_wr32(device, info.reg + 0, 0x10000611); nvkm_mask(device, info.reg + 4, 0x00ff00ff, (M1 << 16) | N1); nvkm_mask(device, info.reg + 8, 0x7fff00ff, (P << 28) | (M2 << 16) | N2); break; case PLL_MEMORY: nvkm_mask(device, info.reg + 0, 0x01ff0000, (P << 22) | (info.bias_p << 19) | (P << 16)); nvkm_wr32(device, info.reg + 4, (N1 << 8) | M1); break; default: nvkm_mask(device, info.reg + 0, 0x00070000, (P << 16)); nvkm_wr32(device, info.reg + 4, (N1 << 8) | M1); break; } return 0; }
static int gk20a_pllg_slide(struct gk20a_clk *clk, u32 n) { struct nvkm_subdev *subdev = &clk->base.subdev; struct nvkm_device *device = subdev->device; struct gk20a_pll pll; int ret = 0; /* get old coefficients */ gk20a_pllg_read_mnp(clk, &pll); /* do nothing if NDIV is the same */ if (n == pll.n) return 0; /* pll slowdown mode */ nvkm_mask(device, GPCPLL_NDIV_SLOWDOWN, BIT(GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT), BIT(GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT)); /* new ndiv ready for ramp */ pll.n = n; udelay(1); gk20a_pllg_write_mnp(clk, &pll); /* dynamic ramp to new ndiv */ udelay(1); nvkm_mask(device, GPCPLL_NDIV_SLOWDOWN, BIT(GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT), BIT(GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT)); /* wait for ramping to complete */ if (nvkm_wait_usec(device, 500, GPC_BCAST_NDIV_SLOWDOWN_DEBUG, GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_MASK, GPC_BCAST_NDIV_SLOWDOWN_DEBUG_PLL_DYNRAMP_DONE_SYNCED_MASK) < 0) ret = -ETIMEDOUT; /* exit slowdown mode */ nvkm_mask(device, GPCPLL_NDIV_SLOWDOWN, BIT(GPCPLL_NDIV_SLOWDOWN_SLOWDOWN_USING_PLL_SHIFT) | BIT(GPCPLL_NDIV_SLOWDOWN_EN_DYNRAMP_SHIFT), 0); nvkm_rd32(device, GPCPLL_NDIV_SLOWDOWN); return ret; }
int gk104_hdmi_ctrl(NV50_DISP_MTHD_V1) { struct nvkm_device *device = disp->base.engine.subdev.device; const u32 hoff = (head * 0x800); const u32 hdmi = (head * 0x400); union { struct nv50_disp_sor_hdmi_pwr_v0 v0; } *args = data; u32 ctrl; int ret; nvif_ioctl(object, "disp sor hdmi ctrl size %d\n", size); if (nvif_unpack(args->v0, 0, 0, false)) { nvif_ioctl(object, "disp sor hdmi ctrl vers %d state %d " "max_ac_packet %d rekey %d\n", args->v0.version, args->v0.state, args->v0.max_ac_packet, args->v0.rekey); if (args->v0.max_ac_packet > 0x1f || args->v0.rekey > 0x7f) return -EINVAL; ctrl = 0x40000000 * !!args->v0.state; ctrl |= args->v0.max_ac_packet << 16; ctrl |= args->v0.rekey; } else return ret; if (!(ctrl & 0x40000000)) { nvkm_mask(device, 0x616798 + hoff, 0x40000000, 0x00000000); nvkm_mask(device, 0x6900c0 + hdmi, 0x00000001, 0x00000000); nvkm_mask(device, 0x690000 + hdmi, 0x00000001, 0x00000000); return 0; } /* AVI InfoFrame */ nvkm_mask(device, 0x690000 + hdmi, 0x00000001, 0x00000000); nvkm_wr32(device, 0x690008 + hdmi, 0x000d0282); nvkm_wr32(device, 0x69000c + hdmi, 0x0000006f); nvkm_wr32(device, 0x690010 + hdmi, 0x00000000); nvkm_wr32(device, 0x690014 + hdmi, 0x00000000); nvkm_wr32(device, 0x690018 + hdmi, 0x00000000); nvkm_mask(device, 0x690000 + hdmi, 0x00000001, 0x00000001); /* ??? InfoFrame? */ nvkm_mask(device, 0x6900c0 + hdmi, 0x00000001, 0x00000000); nvkm_wr32(device, 0x6900cc + hdmi, 0x00000010); nvkm_mask(device, 0x6900c0 + hdmi, 0x00000001, 0x00000001); /* ??? */ nvkm_wr32(device, 0x690080 + hdmi, 0x82000000); /* HDMI_CTRL */ nvkm_mask(device, 0x616798 + hoff, 0x401f007f, ctrl); return 0; }