static int ichss_set(device_t dev, const struct cf_setting *set) { struct ichss_softc *sc; uint8_t bmval, new_val, old_val, req_val; uint64_t rate; register_t regs; /* Look up appropriate bit value based on frequency. */ sc = device_get_softc(dev); if (CPUFREQ_CMP(set->freq, sc->sets[0].freq)) req_val = 0; else if (CPUFREQ_CMP(set->freq, sc->sets[1].freq)) req_val = ICHSS_CTRL_BIT; else return (EINVAL); DPRINT("ichss: requested setting %d\n", req_val); /* Disable interrupts and get the other register contents. */ regs = intr_disable(); old_val = ICH_GET_REG(sc->ctrl_reg) & ~ICHSS_CTRL_BIT; /* * Disable bus master arbitration, write the new value to the control * register, and then re-enable bus master arbitration. */ bmval = ICH_GET_REG(sc->bm_reg) | ICHSS_BM_DISABLE; ICH_SET_REG(sc->bm_reg, bmval); ICH_SET_REG(sc->ctrl_reg, old_val | req_val); ICH_SET_REG(sc->bm_reg, bmval & ~ICHSS_BM_DISABLE); /* Get the new value and re-enable interrupts. */ new_val = ICH_GET_REG(sc->ctrl_reg); intr_restore(regs); /* Check if the desired state was indeed selected. */ if (req_val != (new_val & ICHSS_CTRL_BIT)) { device_printf(sc->dev, "transition to %d failed\n", req_val); return (ENXIO); } /* Re-initialize our cycle counter if we don't know this new state. */ if (sc->sets[req_val].freq == CPUFREQ_VAL_UNKNOWN) { cpu_est_clockrate(0, &rate); sc->sets[req_val].freq = rate / 1000000; DPRINT("ichss: set calibrated new rate of %d\n", sc->sets[req_val].freq); } return (0); }
static int pn_set(device_t dev, const struct cf_setting *cf) { struct pn_softc *sc; int fid, vid; int i; int rv; if (cf == NULL) return (EINVAL); sc = device_get_softc(dev); if (sc->errata & PENDING_STUCK) return (ENXIO); for (i = 0; i < sc->powernow_max_states; ++i) if (CPUFREQ_CMP(sc->powernow_states[i].freq / 1000, cf->freq)) break; fid = sc->powernow_states[i].fid; vid = sc->powernow_states[i].vid; rv = ENODEV; switch (sc->pn_type) { case PN7_TYPE: rv = pn7_setfidvid(sc, fid, vid); break; case PN8_TYPE: rv = pn8_setfidvid(sc, fid, vid); break; } return (rv); }
static int acpi_px_set(device_t dev, const struct cf_setting *set) { struct acpi_perf_softc *sc; int i, status, sts_val, tries; if (set == NULL) return (EINVAL); sc = device_get_softc(dev); /* If we can't set new states, return immediately. */ if (sc->info_only) return (ENXIO); /* Look up appropriate state, based on frequency. */ for (i = sc->px_max_avail; i < sc->px_count; i++) { if (CPUFREQ_CMP(set->freq, sc->px_states[i].core_freq)) break; } if (i == sc->px_count) return (EINVAL); /* Write the appropriate value to the register. */ PX_SET_REG(sc->perf_ctrl, sc->px_states[i].ctrl_val); /* * Try for up to 10 ms to verify the desired state was selected. * This is longer than the standard says (1 ms) but in some modes, * systems may take longer to respond. */ sts_val = sc->px_states[i].sts_val; for (tries = 0; tries < 1000; tries++) { status = PX_GET_REG(sc->perf_status); /* * If we match the status or the desired status is 8 bits * and matches the relevant bits, assume we succeeded. It * appears some systems (IBM R32) expect byte-wide access * even though the standard says the register is 32-bit. */ if (status == sts_val || ((sts_val & ~0xff) == 0 && (status & 0xff) == sts_val)) break; DELAY(10); } if (tries == 1000) { device_printf(dev, "Px transition to %d failed\n", sc->px_states[i].core_freq); return (ENXIO); } sc->px_curr_state = i; return (0); }
static int acpi_px_get(device_t dev, struct cf_setting *set) { struct acpi_perf_softc *sc; uint64_t rate; int i; struct pcpu *pc; if (set == NULL) return (EINVAL); sc = device_get_softc(dev); /* If we can't get new states, return immediately. */ if (sc->info_only) return (ENXIO); /* If we've set the rate before, use the cached value. */ if (sc->px_curr_state != CPUFREQ_VAL_UNKNOWN) { acpi_px_to_set(dev, &sc->px_states[sc->px_curr_state], set); return (0); } /* Otherwise, estimate and try to match against our settings. */ pc = cpu_get_pcpu(dev); if (pc == NULL) return (ENXIO); cpu_est_clockrate(pc->pc_cpuid, &rate); rate /= 1000000; for (i = 0; i < sc->px_count; i++) { if (CPUFREQ_CMP(sc->px_states[i].core_freq, rate)) { sc->px_curr_state = i; acpi_px_to_set(dev, &sc->px_states[i], set); break; } } /* No match, give up. */ if (i == sc->px_count) { sc->px_curr_state = CPUFREQ_VAL_UNKNOWN; set->freq = CPUFREQ_VAL_UNKNOWN; } return (0); }
static int hwpstate_set(device_t dev, const struct cf_setting *cf) { struct hwpstate_softc *sc; struct hwpstate_setting *set; int i; if (cf == NULL) return (EINVAL); sc = device_get_softc(dev); set = sc->hwpstate_settings; for (i = 0; i < sc->cfnum; i++) if (CPUFREQ_CMP(cf->freq, set[i].freq)) break; if (i == sc->cfnum) return (EINVAL); return (hwpstate_goto_pstate(dev, set[i].pstate_id)); }