int gk20a_volt_new(struct nvkm_device *device, int index, struct nvkm_volt **pvolt) { struct nvkm_device_tegra *tdev = device->func->tegra(device); struct gk20a_volt *volt; int i, uv; if (!(volt = kzalloc(sizeof(*volt), GFP_KERNEL))) return -ENOMEM; nvkm_volt_ctor(&gk20a_volt, device, index, &volt->base); *pvolt = &volt->base; uv = regulator_get_voltage(tdev->vdd); nvkm_info(&volt->base.subdev, "The default voltage is %duV\n", uv); volt->vdd = tdev->vdd; volt->base.vid_nr = ARRAY_SIZE(gk20a_cvb_coef); nvkm_debug(&volt->base.subdev, "%s - vid_nr = %d\n", __func__, volt->base.vid_nr); for (i = 0; i < volt->base.vid_nr; i++) { volt->base.vid[i].vid = i; volt->base.vid[i].uv = gk20a_volt_calc_voltage(&gk20a_cvb_coef[i], tdev->gpu_speedo); nvkm_debug(&volt->base.subdev, "%2d: vid=%d, uv=%d\n", i, volt->base.vid[i].vid, volt->base.vid[i].uv); } return 0; }
int _gk20a_volt_ctor(struct nvkm_device *device, int index, const struct cvb_coef *coefs, int nb_coefs, struct gk20a_volt *volt) { struct nvkm_device_tegra *tdev = device->func->tegra(device); int i, uv; nvkm_volt_ctor(&gk20a_volt, device, index, &volt->base); uv = regulator_get_voltage(tdev->vdd); nvkm_debug(&volt->base.subdev, "the default voltage is %duV\n", uv); volt->vdd = tdev->vdd; volt->base.vid_nr = nb_coefs; for (i = 0; i < volt->base.vid_nr; i++) { volt->base.vid[i].vid = i; volt->base.vid[i].uv = gk20a_volt_calc_voltage(&coefs[i], tdev->gpu_speedo); nvkm_debug(&volt->base.subdev, "%2d: vid=%d, uv=%d\n", i, volt->base.vid[i].vid, volt->base.vid[i].uv); } return 0; }
int nvkm_therm_fan_ctor(struct nvkm_therm *therm) { struct nvkm_subdev *subdev = &therm->subdev; struct nvkm_device *device = subdev->device; struct nvkm_gpio *gpio = device->gpio; struct nvkm_bios *bios = device->bios; struct dcb_gpio_func func; int ret; /* attempt to locate a drivable fan, and determine control method */ ret = nvkm_gpio_find(gpio, 0, DCB_GPIO_FAN, 0xff, &func); if (ret == 0) { /* FIXME: is this really the place to perform such checks ? */ if (func.line != 16 && func.log[0] & DCB_GPIO_LOG_DIR_IN) { nvkm_debug(subdev, "GPIO_FAN is in input mode\n"); ret = -EINVAL; } else { ret = nvkm_fanpwm_create(therm, &func); if (ret != 0) ret = nvkm_fantog_create(therm, &func); } } /* no controllable fan found, create a dummy fan module */ if (ret != 0) { ret = nvkm_fannil_create(therm); if (ret) return ret; } nvkm_debug(subdev, "FAN control: %s\n", therm->fan->type); /* read the current speed, it is useful when resuming */ therm->fan->percent = nvkm_therm_fan_get(therm); /* attempt to detect a tachometer connection */ ret = nvkm_gpio_find(gpio, 0, DCB_GPIO_FAN_SENSE, 0xff, &therm->fan->tach); if (ret) therm->fan->tach.func = DCB_GPIO_UNUSED; /* initialise fan bump/slow update handling */ therm->fan->parent = therm; nvkm_alarm_init(&therm->fan->alarm, nvkm_fan_alarm); spin_lock_init(&therm->fan->lock); /* other random init... */ nvkm_therm_fan_set_defaults(therm); nvbios_perf_fan_parse(bios, &therm->fan->perf); if (!nvbios_fan_parse(bios, &therm->fan->bios)) { nvkm_debug(subdev, "parsing the fan table failed\n"); if (nvbios_therm_fan_parse(bios, &therm->fan->bios)) nvkm_error(subdev, "parsing both fan tables failed\n"); } nvkm_therm_fan_safety_checks(therm); return 0; }
int nvkm_pcie_set_link(struct nvkm_pci *pci, enum nvkm_pcie_speed speed, u8 width) { struct nvkm_subdev *subdev = &pci->subdev; enum nvkm_pcie_speed cur_speed, max_speed; struct pci_bus *pbus; int ret; if (!pci || !pci_is_pcie(pci->pdev)) return 0; pbus = pci->pdev->bus; if (!pci->func->pcie.set_link) return -ENOSYS; nvkm_trace(subdev, "requested %s\n", nvkm_pcie_speeds[speed]); if (pci->func->pcie.version(pci) < 2) { nvkm_error(subdev, "setting link failed due to low version\n"); return -ENODEV; } cur_speed = pci->func->pcie.cur_speed(pci); max_speed = min(nvkm_pcie_speed(pbus->max_bus_speed), pci->func->pcie.max_speed(pci)); nvkm_trace(subdev, "current speed: %s\n", nvkm_pcie_speeds[cur_speed]); if (speed > max_speed) { nvkm_debug(subdev, "%s not supported by bus or card, dropping" "requested speed to %s", nvkm_pcie_speeds[speed], nvkm_pcie_speeds[max_speed]); speed = max_speed; } pci->pcie.speed = speed; pci->pcie.width = width; if (speed == cur_speed) { nvkm_debug(subdev, "requested matches current speed\n"); return speed; } nvkm_debug(subdev, "set link to %s x%i\n", nvkm_pcie_speeds[speed], width); ret = pci->func->pcie.set_link(pci, speed, width); if (ret < 0) nvkm_error(subdev, "setting link failed: %i\n", ret); return ret; }
void nv04_timer_time(struct nvkm_timer *tmr, u64 time) { struct nvkm_subdev *subdev = &tmr->subdev; struct nvkm_device *device = subdev->device; u32 hi = upper_32_bits(time); u32 lo = lower_32_bits(time); nvkm_debug(subdev, "time low : %08x\n", lo); nvkm_debug(subdev, "time high : %08x\n", hi); nvkm_wr32(device, NV04_PTIMER_TIME_1, hi); nvkm_wr32(device, NV04_PTIMER_TIME_0, lo); }
void nvkm_memx_nsec(struct nvkm_memx *memx, u32 nsec) { nvkm_debug(&memx->pmu->subdev, " DELAY = %d ns\n", nsec); memx_cmd(memx, MEMX_DELAY, 1, (u32[]){ nsec }); memx_out(memx); /* fuc can't handle multiple */ }
static int nvkm_fault_oneinit_buffer(struct nvkm_fault *fault, int id) { struct nvkm_subdev *subdev = &fault->subdev; struct nvkm_device *device = subdev->device; struct nvkm_fault_buffer *buffer; int ret; if (!(buffer = kzalloc(sizeof(*buffer), GFP_KERNEL))) return -ENOMEM; buffer->fault = fault; buffer->id = id; fault->func->buffer.info(buffer); fault->buffer[id] = buffer; nvkm_debug(subdev, "buffer %d: %d entries\n", id, buffer->entries); ret = nvkm_memory_new(device, NVKM_MEM_TARGET_INST, buffer->entries * fault->func->buffer.entry_size, 0x1000, true, &buffer->mem); if (ret) return ret; /* Pin fault buffer in BAR2. */ buffer->addr = nvkm_memory_bar2(buffer->mem); if (buffer->addr == ~0ULL) return -EFAULT; return 0; }
int nvkm_voltgpio_init(struct nvkm_volt *volt) { struct nvkm_subdev *subdev = &volt->subdev; struct nvkm_gpio *gpio = subdev->device->gpio; struct dcb_gpio_func func; int i; /* check we have gpio function info for each vid bit. on some * boards (ie. nvs295) the vid mask has more bits than there * are valid gpio functions... from traces, nvidia appear to * just touch the existing ones, so let's mask off the invalid * bits and continue with life */ for (i = 0; i < ARRAY_SIZE(tags); i++) { if (volt->vid_mask & (1 << i)) { int ret = nvkm_gpio_find(gpio, 0, tags[i], 0xff, &func); if (ret) { if (ret != -ENOENT) return ret; nvkm_debug(subdev, "VID bit %d has no GPIO\n", i); volt->vid_mask &= ~(1 << i); } } } return 0; }
int gk20a_clk_ctor(struct nvkm_device *device, int index, const struct nvkm_clk_func *func, const struct gk20a_clk_pllg_params *params, struct gk20a_clk *clk) { struct nvkm_device_tegra *tdev = device->func->tegra(device); int ret; int i; /* Finish initializing the pstates */ for (i = 0; i < func->nr_pstates; i++) { INIT_LIST_HEAD(&func->pstates[i].list); func->pstates[i].pstate = i + 1; } clk->params = params; clk->parent_rate = clk_get_rate(tdev->clk); ret = nvkm_clk_ctor(func, device, index, true, &clk->base); if (ret) return ret; nvkm_debug(&clk->base.subdev, "parent clock rate: %d Khz\n", clk->parent_rate / KHZ); return 0; }
int nvkm_memx_fini(struct nvkm_memx **pmemx, bool exec) { struct nvkm_memx *memx = *pmemx; struct nvkm_pmu *pmu = memx->pmu; struct nvkm_subdev *subdev = &pmu->subdev; struct nvkm_device *device = subdev->device; u32 finish, reply[2]; /* flush the cache... */ memx_out(memx); /* release data segment access */ finish = nvkm_rd32(device, 0x10a1c0) & 0x00ffffff; nvkm_wr32(device, 0x10a580, 0x00000000); /* call MEMX process to execute the script, and wait for reply */ if (exec) { nvkm_pmu_send(pmu, reply, PROC_MEMX, MEMX_MSG_EXEC, memx->base, finish); } nvkm_debug(subdev, "Exec took %uns, PMU_IN %08x\n", reply[0], reply[1]); kfree(memx); return 0; }
int nvkm_therm_fan_mode(struct nvkm_therm *therm, int mode) { struct nvkm_subdev *subdev = &therm->subdev; struct nvkm_device *device = subdev->device; static const char *name[] = { "disabled", "manual", "automatic" }; /* The default PPWR ucode on fermi interferes with fan management */ if ((mode >= ARRAY_SIZE(name)) || (mode != NVKM_THERM_CTRL_NONE && device->card_type >= NV_C0 && !device->pmu)) return -EINVAL; /* do not allow automatic fan management if the thermal sensor is * not available */ if (mode == NVKM_THERM_CTRL_AUTO && therm->func->temp_get(therm) < 0) return -EINVAL; if (therm->mode == mode) return 0; nvkm_debug(subdev, "fan management: %s\n", name[mode]); nvkm_therm_update(therm, mode); return 0; }
void nv50_devinit_preinit(struct nvkm_devinit *base) { struct nv50_devinit *init = nv50_devinit(base); struct nvkm_subdev *subdev = &init->base.subdev; struct nvkm_device *device = subdev->device; /* our heuristics can't detect whether the board has had its * devinit scripts executed or not if the display engine is * missing, assume it's a secondary gpu which requires post */ if (!init->base.post) { u64 disable = nvkm_devinit_disable(&init->base); if (disable & (1ULL << NVKM_ENGINE_DISP)) init->base.post = true; } /* magic to detect whether or not x86 vbios code has executed * the devinit scripts to initialise the board */ if (!init->base.post) { if (!nvkm_rdvgac(device, 0, 0x00) && !nvkm_rdvgac(device, 0, 0x1a)) { nvkm_debug(subdev, "adaptor not initialised\n"); init->base.post = true; } } }
void nv04_devinit_preinit(struct nvkm_devinit *base) { struct nv04_devinit *init = nv04_devinit(base); struct nvkm_subdev *subdev = &init->base.subdev; struct nvkm_device *device = subdev->device; /* make i2c busses accessible */ nvkm_mask(device, 0x000200, 0x00000001, 0x00000001); /* unslave crtcs */ if (init->owner < 0) init->owner = nvkm_rdvgaowner(device); nvkm_wrvgaowner(device, 0); if (!init->base.post) { u32 htotal = nvkm_rdvgac(device, 0, 0x06); htotal |= (nvkm_rdvgac(device, 0, 0x07) & 0x01) << 8; htotal |= (nvkm_rdvgac(device, 0, 0x07) & 0x20) << 4; htotal |= (nvkm_rdvgac(device, 0, 0x25) & 0x01) << 10; htotal |= (nvkm_rdvgac(device, 0, 0x41) & 0x01) << 11; if (!htotal) { nvkm_debug(subdev, "adaptor not initialised\n"); init->base.post = true; } } }
int nvkm_pcie_oneinit(struct nvkm_pci *pci) { if (pci->func->pcie.max_speed) nvkm_debug(&pci->subdev, "pcie max speed: %s\n", nvkm_pcie_speeds[pci->func->pcie.max_speed(pci)]); return 0; }
static void nv04_timer_init(struct nvkm_timer *tmr) { struct nvkm_subdev *subdev = &tmr->subdev; struct nvkm_device *device = subdev->device; u32 f = 0; /*XXX: nvclk */ u32 n, d; /* aim for 31.25MHz, which gives us nanosecond timestamps */ d = 1000000 / 32; n = f; if (!f) { n = nvkm_rd32(device, NV04_PTIMER_NUMERATOR); d = nvkm_rd32(device, NV04_PTIMER_DENOMINATOR); if (!n || !d) { n = 1; d = 1; } nvkm_warn(subdev, "unknown input clock freq\n"); } /* reduce ratio to acceptable values */ while (((n % 5) == 0) && ((d % 5) == 0)) { n /= 5; d /= 5; } while (((n % 2) == 0) && ((d % 2) == 0)) { n /= 2; d /= 2; } while (n > 0xffff || d > 0xffff) { n >>= 1; d >>= 1; } nvkm_debug(subdev, "input frequency : %dHz\n", f); nvkm_debug(subdev, "numerator : %08x\n", n); nvkm_debug(subdev, "denominator : %08x\n", d); nvkm_debug(subdev, "timer frequency : %dHz\n", f * d / n); nvkm_wr32(device, NV04_PTIMER_NUMERATOR, n); nvkm_wr32(device, NV04_PTIMER_DENOMINATOR, d); }
void nvkm_memx_wait(struct nvkm_memx *memx, u32 addr, u32 mask, u32 data, u32 nsec) { nvkm_debug(&memx->pmu->subdev, "R[%06x] & %08x == %08x, %d us\n", addr, mask, data, nsec); memx_cmd(memx, MEMX_WAIT, 4, (u32[]){ addr, mask, data, nsec }); memx_out(memx); /* fuc can't handle multiple */ }
int gk20a_volt_vid_set(struct nvkm_volt *base, u8 vid) { struct gk20a_volt *volt = gk20a_volt(base); struct nvkm_subdev *subdev = &volt->base.subdev; nvkm_debug(subdev, "set voltage as %duv\n", volt->base.vid[vid].uv); return regulator_set_voltage(volt->vdd, volt->base.vid[vid].uv, 1200000); }
void nvkm_falcon_put(struct nvkm_falcon *falcon, const struct nvkm_subdev *user) { mutex_lock(&falcon->mutex); if (falcon->user == user) { nvkm_debug(falcon->user, "released %s falcon\n", falcon->name); falcon->user = NULL; } mutex_unlock(&falcon->mutex); }
static void gf100_ibus_intr_gpc(struct nvkm_subdev *ibus, int i) { struct nvkm_device *device = ibus->device; u32 addr = nvkm_rd32(device, 0x128120 + (i * 0x0400)); u32 data = nvkm_rd32(device, 0x128124 + (i * 0x0400)); u32 stat = nvkm_rd32(device, 0x128128 + (i * 0x0400)); nvkm_debug(ibus, "GPC%d: %06x %08x (%08x)\n", i, addr, data, stat); nvkm_mask(device, 0x128128 + (i * 0x0400), 0x00000200, 0x00000000); }
static void nvkm_therm_update(struct nvkm_therm *therm, int mode) { struct nvkm_subdev *subdev = &therm->subdev; struct nvkm_timer *tmr = subdev->device->timer; unsigned long flags; bool immd = true; bool poll = true; int duty = -1; spin_lock_irqsave(&therm->lock, flags); if (mode < 0) mode = therm->mode; therm->mode = mode; switch (mode) { case NVKM_THERM_CTRL_MANUAL: nvkm_timer_alarm_cancel(tmr, &therm->alarm); duty = nvkm_therm_fan_get(therm); if (duty < 0) duty = 100; poll = false; break; case NVKM_THERM_CTRL_AUTO: switch(therm->fan->bios.fan_mode) { case NVBIOS_THERM_FAN_TRIP: duty = nvkm_therm_update_trip(therm); break; case NVBIOS_THERM_FAN_LINEAR: duty = nvkm_therm_update_linear(therm); break; case NVBIOS_THERM_FAN_OTHER: if (therm->cstate) duty = therm->cstate; poll = false; break; } immd = false; break; case NVKM_THERM_CTRL_NONE: default: nvkm_timer_alarm_cancel(tmr, &therm->alarm); poll = false; } if (list_empty(&therm->alarm.head) && poll) nvkm_timer_alarm(tmr, 1000000000ULL, &therm->alarm); spin_unlock_irqrestore(&therm->lock, flags); if (duty >= 0) { nvkm_debug(subdev, "FAN target request: %d%%\n", duty); nvkm_therm_fan_set(therm, immd, duty); } }
void nvkm_hwsq_wr32(struct nvkm_hwsq *hwsq, u32 addr, u32 data) { nvkm_debug(hwsq->subdev, "R[%06x] = %08x\n", addr, data); if (hwsq->data != data) { if ((data & 0xffff0000) != (hwsq->data & 0xffff0000)) { hwsq_cmd(hwsq, 5, (u8[]) { 0xe2, data, data >> 8, data >> 16, data >> 24 });
int nvkm_therm_cstate(struct nvkm_therm *therm, int fan, int dir) { struct nvkm_subdev *subdev = &therm->subdev; if (!dir || (dir < 0 && fan < therm->cstate) || (dir > 0 && fan > therm->cstate)) { nvkm_debug(subdev, "default fan speed -> %d%%\n", fan); therm->cstate = fan; nvkm_therm_update(therm, -1); } return 0; }
int nvkm_pci_new_(const struct nvkm_pci_func *func, struct nvkm_device *device, int index, struct nvkm_pci **ppci) { struct nvkm_pci *pci; if (!(pci = *ppci = kzalloc(sizeof(**ppci), GFP_KERNEL))) return -ENOMEM; nvkm_subdev_ctor(&nvkm_pci_func, device, index, &pci->subdev); pci->func = func; pci->pdev = device->func->pci(device)->pdev; pci->irq = -1; pci->pcie.speed = -1; pci->pcie.width = -1; if (device->type == NVKM_DEVICE_AGP) nvkm_agp_ctor(pci); switch (pci->pdev->device & 0x0ff0) { case 0x00f0: case 0x02e0: /* BR02? NFI how these would be handled yet exactly */ break; default: switch (device->chipset) { case 0xaa: /* reported broken, nv also disable it */ break; default: pci->msi = true; break; } } #ifdef __BIG_ENDIAN pci->msi = false; #endif pci->msi = nvkm_boolopt(device->cfgopt, "NvMSI", pci->msi); if (pci->msi && func->msi_rearm) { pci->msi = pci_enable_msi(pci->pdev) == 0; if (pci->msi) nvkm_debug(&pci->subdev, "MSI enabled\n"); } else { pci->msi = false; } return 0; }
int nvkm_falcon_get(struct nvkm_falcon *falcon, const struct nvkm_subdev *user) { mutex_lock(&falcon->mutex); if (falcon->user) { nvkm_error(user, "%s falcon already acquired by %s!\n", falcon->name, nvkm_subdev_name[falcon->user->index]); mutex_unlock(&falcon->mutex); return -EBUSY; } nvkm_debug(user, "acquired %s falcon\n", falcon->name); falcon->user = user; mutex_unlock(&falcon->mutex); return 0; }
int nvkm_engine_ctor(const struct nvkm_engine_func *func, struct nvkm_device *device, int index, u32 pmc_enable, bool enable, struct nvkm_engine *engine) { nvkm_subdev_ctor(&nvkm_engine_func, device, index, pmc_enable, &engine->subdev); engine->func = func; if (!nvkm_boolopt(device->cfgopt, nvkm_subdev_name[index], enable)) { nvkm_debug(&engine->subdev, "disabled\n"); return -ENODEV; } spin_lock_init(&engine->lock); return 0; }
static void gk20a_ibus_intr(struct nvkm_subdev *ibus) { struct nvkm_device *device = ibus->device; u32 status0 = nvkm_rd32(device, 0x120058); if (status0 & 0x7) { nvkm_debug(ibus, "resetting ibus ring\n"); gk20a_ibus_init_ibus_ring(ibus); } /* Acknowledge interrupt */ nvkm_mask(device, 0x12004c, 0x2, 0x2); nvkm_msec(device, 2000, if (!(nvkm_rd32(device, 0x12004c) & 0x0000003f)) break; );
int acr_ls_pmu_post_run(const struct nvkm_acr *acr, const struct nvkm_secboot *sb) { struct nvkm_device *device = sb->subdev.device; struct nvkm_pmu *pmu = device->pmu; u32 addr_args = pmu->falcon->data.limit - NVKM_MSGQUEUE_CMDLINE_SIZE; int ret; ret = acr_ls_msgqueue_post_run(pmu->queue, pmu->falcon, addr_args); if (ret) return ret; nvkm_debug(&sb->subdev, "%s started\n", nvkm_secboot_falcon_name[acr->boot_falcon]); return 0; }
int gk104_volt_new(struct nvkm_device *device, int index, struct nvkm_volt **pvolt) { const struct nvkm_volt_func *volt_func = &gk104_volt_gpio; struct dcb_gpio_func gpio; struct nvbios_volt bios; struct gk104_volt *volt; u8 ver, hdr, cnt, len; const char *mode; if (!nvbios_volt_parse(device->bios, &ver, &hdr, &cnt, &len, &bios)) return 0; if (!nvkm_gpio_find(device->gpio, 0, DCB_GPIO_VID_PWM, 0xff, &gpio) && bios.type == NVBIOS_VOLT_PWM) { volt_func = &gk104_volt_pwm; } if (!(volt = kzalloc(sizeof(*volt), GFP_KERNEL))) return -ENOMEM; nvkm_volt_ctor(volt_func, device, index, &volt->base); *pvolt = &volt->base; volt->bios = bios; /* now that we have a subdev, we can show an error if we found through * the voltage table that we were supposed to use the PWN mode but we * did not find the right GPIO for it. */ if (bios.type == NVBIOS_VOLT_PWM && volt_func != &gk104_volt_pwm) { nvkm_error(&volt->base.subdev, "Type mismatch between the voltage table type and " "the GPIO table. Fallback to GPIO mode.\n"); } if (volt_func == &gk104_volt_gpio) { nvkm_voltgpio_init(&volt->base); mode = "GPIO"; } else mode = "PWM"; nvkm_debug(&volt->base.subdev, "Using %s mode\n", mode); return 0; }
u16 mxm_table(struct nvkm_bios *bios, u8 *ver, u8 *hdr) { struct nvkm_subdev *subdev = &bios->subdev; struct bit_entry x; if (bit_entry(bios, 'x', &x)) { nvkm_debug(subdev, "BIT 'x' table not present\n"); return 0x0000; } *ver = x.version; *hdr = x.length; if (*ver != 1 || *hdr < 3) { nvkm_warn(subdev, "BIT 'x' table %d/%d unknown\n", *ver, *hdr); return 0x0000; } return x.offset; }
int gk20a_volt_set_id(struct nvkm_volt *base, u8 id, int condition) { struct gk20a_volt *volt = gk20a_volt(base); struct nvkm_subdev *subdev = &volt->base.subdev; int prev_uv = regulator_get_voltage(volt->vdd); int target_uv = volt->base.vid[id].uv; int ret; nvkm_debug(subdev, "prev=%d, target=%d, condition=%d\n", prev_uv, target_uv, condition); if (!condition || (condition < 0 && target_uv < prev_uv) || (condition > 0 && target_uv > prev_uv)) { ret = gk20a_volt_vid_set(&volt->base, volt->base.vid[id].vid); } else { ret = 0; } return ret; }