/** * gm20b_secboot_tegra_read_wpr() - read the WPR registers on Tegra * * On dGPU, we can manage the WPR region ourselves, but on Tegra the WPR region * is reserved from system memory by the bootloader and irreversibly locked. * This function reads the address and size of the pre-configured WPR region. */ int gm20b_secboot_tegra_read_wpr(struct gm200_secboot *gsb, u32 mc_base) { struct nvkm_secboot *sb = &gsb->base; void __iomem *mc; u32 cfg; mc = ioremap(mc_base, 0xd00); if (!mc) { nvkm_error(&sb->subdev, "Cannot map Tegra MC registers\n"); return PTR_ERR(mc); } sb->wpr_addr = ioread32_native(mc + MC_SECURITY_CARVEOUT2_BOM_0) | ((u64)ioread32_native(mc + MC_SECURITY_CARVEOUT2_BOM_HI_0) << 32); sb->wpr_size = ioread32_native(mc + MC_SECURITY_CARVEOUT2_SIZE_128K) << 17; cfg = ioread32_native(mc + MC_SECURITY_CARVEOUT2_CFG0); iounmap(mc); /* Check that WPR settings are valid */ if (sb->wpr_size == 0) { nvkm_error(&sb->subdev, "WPR region is empty\n"); return -EINVAL; } if (!(cfg & TEGRA_MC_SECURITY_CARVEOUT_CFG_LOCKED)) { nvkm_error(&sb->subdev, "WPR region not locked\n"); return -EINVAL; } return 0; }
int nvkm_hwsq_fini(struct nvkm_hwsq **phwsq, bool exec) { struct nvkm_hwsq *hwsq = *phwsq; int ret = 0, i; if (hwsq) { struct nvkm_subdev *subdev = hwsq->subdev; struct nvkm_bus *bus = subdev->device->bus; hwsq->c.size = (hwsq->c.size + 4) / 4; if (hwsq->c.size <= bus->func->hwsq_size) { if (exec) ret = bus->func->hwsq_exec(bus, (u32 *)hwsq->c.data, hwsq->c.size); if (ret) nvkm_error(subdev, "hwsq exec failed: %d\n", ret); } else { nvkm_error(subdev, "hwsq ucode too large\n"); ret = -ENOSPC; } for (i = 0; ret && i < hwsq->c.size; i++) nvkm_error(subdev, "\t%08x\n", ((u32 *)hwsq->c.data)[i]); *phwsq = NULL; kfree(hwsq); } return ret; }
static int gm20b_secboot_prepare_blobs(struct gm200_secboot *gsb) { struct nvkm_subdev *subdev = &gsb->base.subdev; int acr_size; int ret; ret = gm20x_secboot_prepare_blobs(gsb); if (ret) return ret; acr_size = gsb->acr_load_blob->size; /* * On Tegra the WPR region is set by the bootloader. It is illegal for * the HS blob to be larger than this region. */ if (acr_size > gsb->wpr_size) { nvkm_error(subdev, "WPR region too small for FW blob!\n"); nvkm_error(subdev, "required: %dB\n", acr_size); nvkm_error(subdev, "WPR size: %dB\n", gsb->wpr_size); return -ENOSPC; } return 0; }
static void nv04_bus_intr(struct nvkm_bus *bus) { struct nvkm_subdev *subdev = &bus->subdev; struct nvkm_device *device = subdev->device; u32 stat = nvkm_rd32(device, 0x001100) & nvkm_rd32(device, 0x001140); if (stat & 0x00000001) { nvkm_error(subdev, "BUS ERROR\n"); stat &= ~0x00000001; nvkm_wr32(device, 0x001100, 0x00000001); } if (stat & 0x00000110) { struct nvkm_gpio *gpio = device->gpio; if (gpio) nvkm_subdev_intr(&gpio->subdev); stat &= ~0x00000110; nvkm_wr32(device, 0x001100, 0x00000110); } if (stat) { nvkm_error(subdev, "intr %08x\n", stat); nvkm_mask(device, 0x001140, stat, 0x00000000); } }
int nvkm_pcie_set_link(struct nvkm_pci *pci, enum nvkm_pcie_speed speed, u8 width) { struct nvkm_subdev *subdev = &pci->subdev; enum nvkm_pcie_speed cur_speed, max_speed; struct pci_bus *pbus; int ret; if (!pci || !pci_is_pcie(pci->pdev)) return 0; pbus = pci->pdev->bus; if (!pci->func->pcie.set_link) return -ENOSYS; nvkm_trace(subdev, "requested %s\n", nvkm_pcie_speeds[speed]); if (pci->func->pcie.version(pci) < 2) { nvkm_error(subdev, "setting link failed due to low version\n"); return -ENODEV; } cur_speed = pci->func->pcie.cur_speed(pci); max_speed = min(nvkm_pcie_speed(pbus->max_bus_speed), pci->func->pcie.max_speed(pci)); nvkm_trace(subdev, "current speed: %s\n", nvkm_pcie_speeds[cur_speed]); if (speed > max_speed) { nvkm_debug(subdev, "%s not supported by bus or card, dropping" "requested speed to %s", nvkm_pcie_speeds[speed], nvkm_pcie_speeds[max_speed]); speed = max_speed; } pci->pcie.speed = speed; pci->pcie.width = width; if (speed == cur_speed) { nvkm_debug(subdev, "requested matches current speed\n"); return speed; } nvkm_debug(subdev, "set link to %s x%i\n", nvkm_pcie_speeds[speed], width); ret = pci->func->pcie.set_link(pci, speed, width); if (ret < 0) nvkm_error(subdev, "setting link failed: %i\n", ret); return ret; }
int nvkm_pcie_init(struct nvkm_pci *pci) { struct nvkm_subdev *subdev = &pci->subdev; int ret; /* raise pcie version first */ ret = nvkm_pcie_get_version(pci); if (ret > 0) { int max_version = nvkm_pcie_get_max_version(pci); if (max_version > 0 && max_version > ret) ret = nvkm_pcie_set_version(pci, max_version); if (ret < max_version) nvkm_error(subdev, "couldn't raise version: %i\n", ret); } if (pci->func->pcie.init) pci->func->pcie.init(pci); if (pci->pcie.speed != -1) nvkm_pcie_set_link(pci, pci->pcie.speed, pci->pcie.width); return 0; }
int nvkm_subdev_fini(struct nvkm_subdev *subdev, bool suspend) { struct nvkm_device *device = subdev->device; const char *action = suspend ? "suspend" : "fini"; s64 time; nvkm_trace(subdev, "%s running...\n", action); time = ktime_to_us(ktime_get()); if (subdev->func->fini) { int ret = subdev->func->fini(subdev, suspend); if (ret) { nvkm_error(subdev, "%s failed, %d\n", action, ret); if (suspend) return ret; } } nvkm_mc_reset(device, subdev->index); time = ktime_to_us(ktime_get()) - time; nvkm_trace(subdev, "%s completed in %lldus\n", action, time); return 0; }
static int gk20a_clk_init(struct nvkm_clk *base) { struct gk20a_clk *clk = gk20a_clk(base); struct nvkm_subdev *subdev = &clk->base.subdev; struct nvkm_device *device = subdev->device; int ret; /* get out from IDDQ */ nvkm_mask(device, GPCPLL_CFG, GPCPLL_CFG_IDDQ, 0); nvkm_rd32(device, GPCPLL_CFG); udelay(5); nvkm_mask(device, GPC2CLK_OUT, GPC2CLK_OUT_INIT_MASK, GPC2CLK_OUT_INIT_VAL); ret = gk20a_clk_setup_slide(clk); if (ret) return ret; /* Start with lowest frequency */ base->func->calc(base, &base->func->pstates[0].base); ret = base->func->prog(&clk->base); if (ret) { nvkm_error(subdev, "cannot initialize clock\n"); return ret; } return 0; }
static int nv40_ram_calc(struct nvkm_ram *base, u32 freq) { struct nv40_ram *ram = nv40_ram(base); struct nvkm_subdev *subdev = &ram->base.fb->subdev; struct nvkm_bios *bios = subdev->device->bios; struct nvbios_pll pll; int N1, M1, N2, M2; int log2P, ret; ret = nvbios_pll_parse(bios, 0x04, &pll); if (ret) { nvkm_error(subdev, "mclk pll data not found\n"); return ret; } ret = nv04_pll_calc(subdev, &pll, freq, &N1, &M1, &N2, &M2, &log2P); if (ret < 0) return ret; ram->ctrl = 0x80000000 | (log2P << 16); ram->ctrl |= min(pll.bias_p + log2P, (int)pll.max_p) << 20; if (N2 == M2) { ram->ctrl |= 0x00000100; ram->coef = (N1 << 8) | M1; } else { ram->ctrl |= 0x40000000; ram->coef = (N2 << 24) | (M2 << 16) | (N1 << 8) | M1; } return 0; }
int nvkm_subdev_fini(struct nvkm_subdev *subdev, bool suspend) { struct nvkm_device *device = subdev->device; const char *action = suspend ? "suspend" : "fini"; u32 pmc_enable = subdev->pmc_enable; s64 time; nvkm_trace(subdev, "%s running...\n", action); time = ktime_to_us(ktime_get()); if (subdev->func->fini) { int ret = subdev->func->fini(subdev, suspend); if (ret) { nvkm_error(subdev, "%s failed, %d\n", action, ret); if (suspend) return ret; } } if (pmc_enable) { nvkm_mask(device, 0x000200, pmc_enable, 0x00000000); nvkm_mask(device, 0x000200, pmc_enable, pmc_enable); nvkm_rd32(device, 0x000200); } time = ktime_to_us(ktime_get()) - time; nvkm_trace(subdev, "%s completed in %lldus\n", action, time); return 0; }
int gk20a_clk_setup_slide(struct gk20a_clk *clk) { struct nvkm_subdev *subdev = &clk->base.subdev; struct nvkm_device *device = subdev->device; u32 step_a, step_b; switch (clk->parent_rate) { case 12000000: case 12800000: case 13000000: step_a = 0x2b; step_b = 0x0b; break; case 19200000: step_a = 0x12; step_b = 0x08; break; case 38400000: step_a = 0x04; step_b = 0x05; break; default: nvkm_error(subdev, "invalid parent clock rate %u KHz", clk->parent_rate / KHZ); return -EINVAL; } nvkm_mask(device, GPCPLL_CFG2, 0xff << GPCPLL_CFG2_PLL_STEPA_SHIFT, step_a << GPCPLL_CFG2_PLL_STEPA_SHIFT); nvkm_mask(device, GPCPLL_CFG3, 0xff << GPCPLL_CFG3_PLL_STEPB_SHIFT, step_b << GPCPLL_CFG3_PLL_STEPB_SHIFT); return 0; }
static void gp104_disp_intr_error(struct nv50_disp *disp, int chid) { struct nvkm_subdev *subdev = &disp->base.engine.subdev; struct nvkm_device *device = subdev->device; u32 mthd = nvkm_rd32(device, 0x6111f0 + (chid * 12)); u32 data = nvkm_rd32(device, 0x6111f4 + (chid * 12)); u32 unkn = nvkm_rd32(device, 0x6111f8 + (chid * 12)); nvkm_error(subdev, "chid %d mthd %04x data %08x %08x %08x\n", chid, (mthd & 0x0000ffc), data, mthd, unkn); if (chid < ARRAY_SIZE(disp->chan)) { switch (mthd & 0xffc) { case 0x0080: nv50_disp_chan_mthd(disp->chan[chid], NV_DBG_ERROR); break; default: break; } } nvkm_wr32(device, 0x61009c, (1 << chid)); nvkm_wr32(device, 0x6111f0 + (chid * 12), 0x90000000); }
int nvkm_therm_fan_ctor(struct nvkm_therm *therm) { struct nvkm_subdev *subdev = &therm->subdev; struct nvkm_device *device = subdev->device; struct nvkm_gpio *gpio = device->gpio; struct nvkm_bios *bios = device->bios; struct dcb_gpio_func func; int ret; /* attempt to locate a drivable fan, and determine control method */ ret = nvkm_gpio_find(gpio, 0, DCB_GPIO_FAN, 0xff, &func); if (ret == 0) { /* FIXME: is this really the place to perform such checks ? */ if (func.line != 16 && func.log[0] & DCB_GPIO_LOG_DIR_IN) { nvkm_debug(subdev, "GPIO_FAN is in input mode\n"); ret = -EINVAL; } else { ret = nvkm_fanpwm_create(therm, &func); if (ret != 0) ret = nvkm_fantog_create(therm, &func); } } /* no controllable fan found, create a dummy fan module */ if (ret != 0) { ret = nvkm_fannil_create(therm); if (ret) return ret; } nvkm_debug(subdev, "FAN control: %s\n", therm->fan->type); /* read the current speed, it is useful when resuming */ therm->fan->percent = nvkm_therm_fan_get(therm); /* attempt to detect a tachometer connection */ ret = nvkm_gpio_find(gpio, 0, DCB_GPIO_FAN_SENSE, 0xff, &therm->fan->tach); if (ret) therm->fan->tach.func = DCB_GPIO_UNUSED; /* initialise fan bump/slow update handling */ therm->fan->parent = therm; nvkm_alarm_init(&therm->fan->alarm, nvkm_fan_alarm); spin_lock_init(&therm->fan->lock); /* other random init... */ nvkm_therm_fan_set_defaults(therm); nvbios_perf_fan_parse(bios, &therm->fan->perf); if (!nvbios_fan_parse(bios, &therm->fan->bios)) { nvkm_debug(subdev, "parsing the fan table failed\n"); if (nvbios_therm_fan_parse(bios, &therm->fan->bios)) nvkm_error(subdev, "parsing both fan tables failed\n"); } nvkm_therm_fan_safety_checks(therm); return 0; }
int nv50_devinit_pll_set(struct nvkm_devinit *init, u32 type, u32 freq) { struct nvkm_subdev *subdev = &init->subdev; struct nvkm_device *device = subdev->device; struct nvkm_bios *bios = device->bios; struct nvbios_pll info; int N1, M1, N2, M2, P; int ret; ret = nvbios_pll_parse(bios, type, &info); if (ret) { nvkm_error(subdev, "failed to retrieve pll data, %d\n", ret); return ret; } ret = nv04_pll_calc(subdev, &info, freq, &N1, &M1, &N2, &M2, &P); if (!ret) { nvkm_error(subdev, "failed pll calculation\n"); return ret; } switch (info.type) { case PLL_VPLL0: case PLL_VPLL1: nvkm_wr32(device, info.reg + 0, 0x10000611); nvkm_mask(device, info.reg + 4, 0x00ff00ff, (M1 << 16) | N1); nvkm_mask(device, info.reg + 8, 0x7fff00ff, (P << 28) | (M2 << 16) | N2); break; case PLL_MEMORY: nvkm_mask(device, info.reg + 0, 0x01ff0000, (P << 22) | (info.bias_p << 19) | (P << 16)); nvkm_wr32(device, info.reg + 4, (N1 << 8) | M1); break; default: nvkm_mask(device, info.reg + 0, 0x00070000, (P << 16)); nvkm_wr32(device, info.reg + 4, (N1 << 8) | M1); break; } return 0; }
void nvkm_falcon_bind_context(struct nvkm_falcon *falcon, struct nvkm_gpuobj *inst) { if (!falcon->func->bind_context) { nvkm_error(falcon->user, "Context binding not supported on this falcon!\n"); return; } falcon->func->bind_context(falcon, inst); }
static void gm107_ltc_intr_lts(struct nvkm_ltc *ltc, int c, int s) { struct nvkm_subdev *subdev = <c->subdev; struct nvkm_device *device = subdev->device; u32 base = 0x140400 + (c * 0x2000) + (s * 0x200); u32 stat = nvkm_rd32(device, base + 0x00c); if (stat) { nvkm_error(subdev, "LTC%d_LTS%d: %08x\n", c, s, stat); nvkm_wr32(device, base + 0x00c, stat); } }
static void nv31_bus_intr(struct nvkm_bus *bus) { struct nvkm_subdev *subdev = &bus->subdev; struct nvkm_device *device = subdev->device; u32 stat = nvkm_rd32(device, 0x001100) & nvkm_rd32(device, 0x001140); u32 gpio = nvkm_rd32(device, 0x001104) & nvkm_rd32(device, 0x001144); if (gpio) { struct nvkm_gpio *gpio = device->gpio; if (gpio) nvkm_subdev_intr(&gpio->subdev); } if (stat & 0x00000008) { /* NV41- */ u32 addr = nvkm_rd32(device, 0x009084); u32 data = nvkm_rd32(device, 0x009088); nvkm_error(subdev, "MMIO %s of %08x FAULT at %06x\n", (addr & 0x00000002) ? "write" : "read", data, (addr & 0x00fffffc)); stat &= ~0x00000008; nvkm_wr32(device, 0x001100, 0x00000008); } if (stat & 0x00070000) { struct nvkm_therm *therm = device->therm; if (therm) nvkm_subdev_intr(&therm->subdev); stat &= ~0x00070000; nvkm_wr32(device, 0x001100, 0x00070000); } if (stat) { nvkm_error(subdev, "intr %08x\n", stat); nvkm_mask(device, 0x001140, stat, 0x00000000); } }
static int nvkm_fb_oneinit(struct nvkm_subdev *subdev) { struct nvkm_fb *fb = nvkm_fb(subdev); if (fb->func->ram_new) { int ret = fb->func->ram_new(fb, &fb->ram); if (ret) { nvkm_error(subdev, "vram setup failed, %d\n", ret); return ret; } } return 0; }
int nvkm_subdev_init(struct nvkm_subdev *subdev) { s64 time; int ret; nvkm_trace(subdev, "init running...\n"); time = ktime_to_us(ktime_get()); if (subdev->func->oneinit && !subdev->oneinit) { s64 time; nvkm_trace(subdev, "one-time init running...\n"); time = ktime_to_us(ktime_get()); ret = subdev->func->oneinit(subdev); if (ret) { nvkm_error(subdev, "one-time init failed, %d\n", ret); return ret; } subdev->oneinit = true; time = ktime_to_us(ktime_get()) - time; nvkm_trace(subdev, "one-time init completed in %lldus\n", time); } if (subdev->func->init) { ret = subdev->func->init(subdev); if (ret) { nvkm_error(subdev, "init failed, %d\n", ret); return ret; } } time = ktime_to_us(ktime_get()) - time; nvkm_trace(subdev, "init completed in %lldus\n", time); return 0; }
int nvkm_falcon_get(struct nvkm_falcon *falcon, const struct nvkm_subdev *user) { mutex_lock(&falcon->mutex); if (falcon->user) { nvkm_error(user, "%s falcon already acquired by %s!\n", falcon->name, nvkm_subdev_name[falcon->user->index]); mutex_unlock(&falcon->mutex); return -EBUSY; } nvkm_debug(user, "acquired %s falcon\n", falcon->name); falcon->user = user; mutex_unlock(&falcon->mutex); return 0; }
void nv04_timer_intr(struct nvkm_timer *tmr) { struct nvkm_subdev *subdev = &tmr->subdev; struct nvkm_device *device = subdev->device; u32 stat = nvkm_rd32(device, NV04_PTIMER_INTR_0); if (stat & 0x00000001) { nvkm_timer_alarm_trigger(tmr); nvkm_wr32(device, NV04_PTIMER_INTR_0, 0x00000001); stat &= ~0x00000001; } if (stat) { nvkm_error(subdev, "intr %08x\n", stat); nvkm_wr32(device, NV04_PTIMER_INTR_0, stat); } }
int gk104_volt_new(struct nvkm_device *device, int index, struct nvkm_volt **pvolt) { const struct nvkm_volt_func *volt_func = &gk104_volt_gpio; struct dcb_gpio_func gpio; struct nvbios_volt bios; struct gk104_volt *volt; u8 ver, hdr, cnt, len; const char *mode; if (!nvbios_volt_parse(device->bios, &ver, &hdr, &cnt, &len, &bios)) return 0; if (!nvkm_gpio_find(device->gpio, 0, DCB_GPIO_VID_PWM, 0xff, &gpio) && bios.type == NVBIOS_VOLT_PWM) { volt_func = &gk104_volt_pwm; } if (!(volt = kzalloc(sizeof(*volt), GFP_KERNEL))) return -ENOMEM; nvkm_volt_ctor(volt_func, device, index, &volt->base); *pvolt = &volt->base; volt->bios = bios; /* now that we have a subdev, we can show an error if we found through * the voltage table that we were supposed to use the PWN mode but we * did not find the right GPIO for it. */ if (bios.type == NVBIOS_VOLT_PWM && volt_func != &gk104_volt_pwm) { nvkm_error(&volt->base.subdev, "Type mismatch between the voltage table type and " "the GPIO table. Fallback to GPIO mode.\n"); } if (volt_func == &gk104_volt_gpio) { nvkm_voltgpio_init(&volt->base); mode = "GPIO"; } else mode = "PWM"; nvkm_debug(&volt->base.subdev, "Using %s mode\n", mode); return 0; }
int nvbios_iccsense_parse(struct nvkm_bios *bios, struct nvbios_iccsense *iccsense) { struct nvkm_subdev *subdev = &bios->subdev; u8 ver, hdr, cnt, len, i; u16 table, entry; table = nvbios_iccsense_table(bios, &ver, &hdr, &cnt, &len); if (!table || !cnt) return -EINVAL; if (ver != 0x10 && ver != 0x20) { nvkm_error(subdev, "ICCSENSE version 0x%02x unknown\n", ver); return -EINVAL; } iccsense->nr_entry = cnt; iccsense->rail = kmalloc(sizeof(struct pwr_rail_t) * cnt, GFP_KERNEL); if (!iccsense->rail) return -ENOMEM; for (i = 0; i < cnt; ++i) { struct pwr_rail_t *rail = &iccsense->rail[i]; entry = table + hdr + i * len; switch(ver) { case 0x10: rail->mode = nvbios_rd08(bios, entry + 0x1); rail->extdev_id = nvbios_rd08(bios, entry + 0x2); rail->resistor_mohm = nvbios_rd08(bios, entry + 0x3); rail->rail = nvbios_rd08(bios, entry + 0x4); break; case 0x20: rail->mode = nvbios_rd08(bios, entry); rail->extdev_id = nvbios_rd08(bios, entry + 0x1); rail->resistor_mohm = nvbios_rd08(bios, entry + 0x5); rail->rail = nvbios_rd08(bios, entry + 0x6); break; }; } return 0; }
int gk20a_clk_read(struct nvkm_clk *base, enum nv_clk_src src) { struct gk20a_clk *clk = gk20a_clk(base); struct nvkm_subdev *subdev = &clk->base.subdev; struct nvkm_device *device = subdev->device; struct gk20a_pll pll; switch (src) { case nv_clk_src_crystal: return device->crystal; case nv_clk_src_gpc: gk20a_pllg_read_mnp(clk, &pll); return gk20a_pllg_calc_rate(clk, &pll) / GK20A_CLK_GPC_MDIV; default: nvkm_error(subdev, "invalid clock source %d\n", src); return -EINVAL; } }
int nvkm_subdev_preinit(struct nvkm_subdev *subdev) { s64 time; nvkm_trace(subdev, "preinit running...\n"); time = ktime_to_us(ktime_get()); if (subdev->func->preinit) { int ret = subdev->func->preinit(subdev); if (ret) { nvkm_error(subdev, "preinit failed, %d\n", ret); return ret; } } time = ktime_to_us(ktime_get()) - time; nvkm_trace(subdev, "preinit completed in %lldus\n", time); return 0; }
static int gm20b_clk_init(struct nvkm_clk *base) { struct gk20a_clk *clk = gk20a_clk(base); struct nvkm_subdev *subdev = &clk->base.subdev; struct nvkm_device *device = subdev->device; int ret; /* Set the global bypass control to VCO */ nvkm_mask(device, BYPASSCTRL_SYS, MASK(BYPASSCTRL_SYS_GPCPLL_WIDTH) << BYPASSCTRL_SYS_GPCPLL_SHIFT, 0); /* Start with lowest frequency */ base->func->calc(base, &base->func->pstates[0].base); ret = base->func->prog(&clk->base); if (ret) { nvkm_error(subdev, "cannot initialize clock\n"); return ret; } return 0; }
int nvbios_pll_parse(struct nvkm_bios *bios, u32 type, struct nvbios_pll *info) { struct nvkm_subdev *subdev = &bios->subdev; struct nvkm_device *device = subdev->device; u8 ver, len; u32 reg = type; u32 data; if (type > PLL_MAX) { reg = type; data = pll_map_reg(bios, reg, &type, &ver, &len); } else { data = pll_map_type(bios, type, ®, &ver, &len); } if (ver && !data) return -ENOENT; memset(info, 0, sizeof(*info)); info->type = type; info->reg = reg; switch (ver) { case 0x00: break; case 0x10: case 0x11: info->vco1.min_freq = nvbios_rd32(bios, data + 0); info->vco1.max_freq = nvbios_rd32(bios, data + 4); info->vco2.min_freq = nvbios_rd32(bios, data + 8); info->vco2.max_freq = nvbios_rd32(bios, data + 12); info->vco1.min_inputfreq = nvbios_rd32(bios, data + 16); info->vco2.min_inputfreq = nvbios_rd32(bios, data + 20); info->vco1.max_inputfreq = INT_MAX; info->vco2.max_inputfreq = INT_MAX; info->max_p = 0x7; info->max_p_usable = 0x6; /* these values taken from nv30/31/36 */ switch (bios->version.chip) { case 0x36: info->vco1.min_n = 0x5; break; default: info->vco1.min_n = 0x1; break; } info->vco1.max_n = 0xff; info->vco1.min_m = 0x1; info->vco1.max_m = 0xd; /* * On nv30, 31, 36 (i.e. all cards with two stage PLLs with this * table version (apart from nv35)), N2 is compared to * maxN2 (0x46) and 10 * maxM2 (0x4), so set maxN2 to 0x28 and * save a comparison */ info->vco2.min_n = 0x4; switch (bios->version.chip) { case 0x30: case 0x35: info->vco2.max_n = 0x1f; break; default: info->vco2.max_n = 0x28; break; } info->vco2.min_m = 0x1; info->vco2.max_m = 0x4; break; case 0x20: case 0x21: info->vco1.min_freq = nvbios_rd16(bios, data + 4) * 1000; info->vco1.max_freq = nvbios_rd16(bios, data + 6) * 1000; info->vco2.min_freq = nvbios_rd16(bios, data + 8) * 1000; info->vco2.max_freq = nvbios_rd16(bios, data + 10) * 1000; info->vco1.min_inputfreq = nvbios_rd16(bios, data + 12) * 1000; info->vco2.min_inputfreq = nvbios_rd16(bios, data + 14) * 1000; info->vco1.max_inputfreq = nvbios_rd16(bios, data + 16) * 1000; info->vco2.max_inputfreq = nvbios_rd16(bios, data + 18) * 1000; info->vco1.min_n = nvbios_rd08(bios, data + 20); info->vco1.max_n = nvbios_rd08(bios, data + 21); info->vco1.min_m = nvbios_rd08(bios, data + 22); info->vco1.max_m = nvbios_rd08(bios, data + 23); info->vco2.min_n = nvbios_rd08(bios, data + 24); info->vco2.max_n = nvbios_rd08(bios, data + 25); info->vco2.min_m = nvbios_rd08(bios, data + 26); info->vco2.max_m = nvbios_rd08(bios, data + 27); info->max_p = nvbios_rd08(bios, data + 29); info->max_p_usable = info->max_p; if (bios->version.chip < 0x60) info->max_p_usable = 0x6; info->bias_p = nvbios_rd08(bios, data + 30); if (len > 0x22) info->refclk = nvbios_rd32(bios, data + 31); break; case 0x30: data = nvbios_rd16(bios, data + 1); info->vco1.min_freq = nvbios_rd16(bios, data + 0) * 1000; info->vco1.max_freq = nvbios_rd16(bios, data + 2) * 1000; info->vco2.min_freq = nvbios_rd16(bios, data + 4) * 1000; info->vco2.max_freq = nvbios_rd16(bios, data + 6) * 1000; info->vco1.min_inputfreq = nvbios_rd16(bios, data + 8) * 1000; info->vco2.min_inputfreq = nvbios_rd16(bios, data + 10) * 1000; info->vco1.max_inputfreq = nvbios_rd16(bios, data + 12) * 1000; info->vco2.max_inputfreq = nvbios_rd16(bios, data + 14) * 1000; info->vco1.min_n = nvbios_rd08(bios, data + 16); info->vco1.max_n = nvbios_rd08(bios, data + 17); info->vco1.min_m = nvbios_rd08(bios, data + 18); info->vco1.max_m = nvbios_rd08(bios, data + 19); info->vco2.min_n = nvbios_rd08(bios, data + 20); info->vco2.max_n = nvbios_rd08(bios, data + 21); info->vco2.min_m = nvbios_rd08(bios, data + 22); info->vco2.max_m = nvbios_rd08(bios, data + 23); info->max_p_usable = info->max_p = nvbios_rd08(bios, data + 25); info->bias_p = nvbios_rd08(bios, data + 27); info->refclk = nvbios_rd32(bios, data + 28); break; case 0x40: info->refclk = nvbios_rd16(bios, data + 9) * 1000; data = nvbios_rd16(bios, data + 1); info->vco1.min_freq = nvbios_rd16(bios, data + 0) * 1000; info->vco1.max_freq = nvbios_rd16(bios, data + 2) * 1000; info->vco1.min_inputfreq = nvbios_rd16(bios, data + 4) * 1000; info->vco1.max_inputfreq = nvbios_rd16(bios, data + 6) * 1000; info->vco1.min_m = nvbios_rd08(bios, data + 8); info->vco1.max_m = nvbios_rd08(bios, data + 9); info->vco1.min_n = nvbios_rd08(bios, data + 10); info->vco1.max_n = nvbios_rd08(bios, data + 11); info->min_p = nvbios_rd08(bios, data + 12); info->max_p = nvbios_rd08(bios, data + 13); break; default: nvkm_error(subdev, "unknown pll limits version 0x%02x\n", ver); return -EINVAL; } if (!info->refclk) { info->refclk = device->crystal; if (bios->version.chip == 0x51) { u32 sel_clk = nvkm_rd32(device, 0x680524); if ((info->reg == 0x680508 && sel_clk & 0x20) || (info->reg == 0x680520 && sel_clk & 0x80)) { if (nvkm_rdvgac(device, 0, 0x27) < 0xa3) info->refclk = 200000; else info->refclk = 25000; } } } /* * By now any valid limit table ought to have set a max frequency for * vco1, so if it's zero it's either a pre limit table bios, or one * with an empty limit table (seen on nv18) */ if (!info->vco1.max_freq) { info->vco1.max_freq = nvbios_rd32(bios, bios->bmp_offset + 67); info->vco1.min_freq = nvbios_rd32(bios, bios->bmp_offset + 71); if (bmp_version(bios) < 0x0506) { info->vco1.max_freq = 256000; info->vco1.min_freq = 128000; } info->vco1.min_inputfreq = 0; info->vco1.max_inputfreq = INT_MAX; info->vco1.min_n = 0x1; info->vco1.max_n = 0xff; info->vco1.min_m = 0x1; if (device->crystal == 13500) { /* nv05 does this, nv11 doesn't, nv10 unknown */ if (bios->version.chip < 0x11) info->vco1.min_m = 0x7; info->vco1.max_m = 0xd; } else { if (bios->version.chip < 0x11) info->vco1.min_m = 0x8; info->vco1.max_m = 0xe; } if (bios->version.chip < 0x17 || bios->version.chip == 0x1a || bios->version.chip == 0x20) info->max_p = 4; else info->max_p = 5; info->max_p_usable = info->max_p; } return 0; }
int nvbios_iccsense_parse(struct nvkm_bios *bios, struct nvbios_iccsense *iccsense) { struct nvkm_subdev *subdev = &bios->subdev; u8 ver, hdr, cnt, len, i; u32 table, entry; table = nvbios_iccsense_table(bios, &ver, &hdr, &cnt, &len); if (!table || !cnt) return -EINVAL; if (ver != 0x10 && ver != 0x20) { nvkm_error(subdev, "ICCSENSE version 0x%02x unknown\n", ver); return -EINVAL; } iccsense->nr_entry = cnt; iccsense->rail = kmalloc(sizeof(struct pwr_rail_t) * cnt, GFP_KERNEL); if (!iccsense->rail) return -ENOMEM; for (i = 0; i < cnt; ++i) { struct nvbios_extdev_func extdev; struct pwr_rail_t *rail = &iccsense->rail[i]; u8 res_start = 0; int r; entry = table + hdr + i * len; switch(ver) { case 0x10: rail->mode = nvbios_rd08(bios, entry + 0x1); rail->extdev_id = nvbios_rd08(bios, entry + 0x2); res_start = 0x3; break; case 0x20: rail->mode = nvbios_rd08(bios, entry); rail->extdev_id = nvbios_rd08(bios, entry + 0x1); res_start = 0x5; break; }; if (nvbios_extdev_parse(bios, rail->extdev_id, &extdev)) continue; switch (extdev.type) { case NVBIOS_EXTDEV_INA209: case NVBIOS_EXTDEV_INA219: rail->resistor_count = 1; break; case NVBIOS_EXTDEV_INA3221: rail->resistor_count = 3; break; default: rail->resistor_count = 0; break; }; for (r = 0; r < rail->resistor_count; ++r) { rail->resistors[r].mohm = nvbios_rd08(bios, entry + res_start + r * 2); rail->resistors[r].enabled = !(nvbios_rd08(bios, entry + res_start + r * 2 + 1) & 0x40); } rail->config = nvbios_rd16(bios, entry + res_start + rail->resistor_count * 2); } return 0; }
int gm20b_secboot_tegra_read_wpr(struct gm200_secboot *gsb, u32 mc_base) { nvkm_error(&gsb->base.subdev, "Tegra support not compiled in\n"); return -EINVAL; }
static void nv04_devinit_meminit(struct nvkm_devinit *init) { struct nvkm_subdev *subdev = &init->subdev; struct nvkm_device *device = subdev->device; u32 patt = 0xdeadbeef; struct io_mapping *fb; int i; /* Map the framebuffer aperture */ fb = fbmem_init(device); if (!fb) { nvkm_error(subdev, "failed to map fb\n"); return; } /* Sequencer and refresh off */ nvkm_wrvgas(device, 0, 1, nvkm_rdvgas(device, 0, 1) | 0x20); nvkm_mask(device, NV04_PFB_DEBUG_0, 0, NV04_PFB_DEBUG_0_REFRESH_OFF); nvkm_mask(device, NV04_PFB_BOOT_0, ~0, NV04_PFB_BOOT_0_RAM_AMOUNT_16MB | NV04_PFB_BOOT_0_RAM_WIDTH_128 | NV04_PFB_BOOT_0_RAM_TYPE_SGRAM_16MBIT); for (i = 0; i < 4; i++) fbmem_poke(fb, 4 * i, patt); fbmem_poke(fb, 0x400000, patt + 1); if (fbmem_peek(fb, 0) == patt + 1) { nvkm_mask(device, NV04_PFB_BOOT_0, NV04_PFB_BOOT_0_RAM_TYPE, NV04_PFB_BOOT_0_RAM_TYPE_SDRAM_16MBIT); nvkm_mask(device, NV04_PFB_DEBUG_0, NV04_PFB_DEBUG_0_REFRESH_OFF, 0); for (i = 0; i < 4; i++) fbmem_poke(fb, 4 * i, patt); if ((fbmem_peek(fb, 0xc) & 0xffff) != (patt & 0xffff)) nvkm_mask(device, NV04_PFB_BOOT_0, NV04_PFB_BOOT_0_RAM_WIDTH_128 | NV04_PFB_BOOT_0_RAM_AMOUNT, NV04_PFB_BOOT_0_RAM_AMOUNT_8MB); } else if ((fbmem_peek(fb, 0xc) & 0xffff0000) != (patt & 0xffff0000)) { nvkm_mask(device, NV04_PFB_BOOT_0, NV04_PFB_BOOT_0_RAM_WIDTH_128 | NV04_PFB_BOOT_0_RAM_AMOUNT, NV04_PFB_BOOT_0_RAM_AMOUNT_4MB); } else if (fbmem_peek(fb, 0) != patt) { if (fbmem_readback(fb, 0x800000, patt)) nvkm_mask(device, NV04_PFB_BOOT_0, NV04_PFB_BOOT_0_RAM_AMOUNT, NV04_PFB_BOOT_0_RAM_AMOUNT_8MB); else nvkm_mask(device, NV04_PFB_BOOT_0, NV04_PFB_BOOT_0_RAM_AMOUNT, NV04_PFB_BOOT_0_RAM_AMOUNT_4MB); nvkm_mask(device, NV04_PFB_BOOT_0, NV04_PFB_BOOT_0_RAM_TYPE, NV04_PFB_BOOT_0_RAM_TYPE_SGRAM_8MBIT); } else if (!fbmem_readback(fb, 0x800000, patt)) { nvkm_mask(device, NV04_PFB_BOOT_0, NV04_PFB_BOOT_0_RAM_AMOUNT, NV04_PFB_BOOT_0_RAM_AMOUNT_8MB); } /* Refresh on, sequencer on */ nvkm_mask(device, NV04_PFB_DEBUG_0, NV04_PFB_DEBUG_0_REFRESH_OFF, 0); nvkm_wrvgas(device, 0, 1, nvkm_rdvgas(device, 0, 1) & ~0x20); fbmem_fini(fb); }