int acpicpu_md_tstate_set(struct acpicpu_tstate *ts) { uint64_t val; uint8_t i; val = ts->ts_control; val = val & __BITS(0, 4); wrmsr(MSR_THERM_CONTROL, val); if (ts->ts_status == 0) { DELAY(ts->ts_latency); return 0; } for (i = val = 0; i < ACPICPU_T_STATE_RETRY; i++) { val = rdmsr(MSR_THERM_CONTROL); if (val == ts->ts_status) return 0; DELAY(ts->ts_latency); } return EAGAIN; }
void bwi_phy_set_bbp_atten(struct bwi_mac *mac, uint16_t bbp_atten) { struct bwi_phy *phy = &mac->mac_phy; uint16_t mask = __BITS(3, 0); if (phy->phy_version == 0) { CSR_FILT_SETBITS_2(mac->mac_sc, BWI_BBP_ATTEN, ~mask, __SHIFTIN(bbp_atten, mask)); } else { if (phy->phy_version > 1) mask <<= 2; else mask <<= 3; PHY_FILT_SETBITS(mac, BWI_PHYR_BBP_ATTEN, ~mask, __SHIFTIN(bbp_atten, mask)); } }
#define PADGRP(_n, _p, _dt, _dd, _du, _slwf) \ { \ .pg_reg = PADGRP_ ## _n ## _REG,\ .pg_preemp = (_p), \ .pg_hsm = __BIT(2), \ .pg_schmt = __BIT(3), \ .pg_drv_type = (_dt), \ .pg_drvdn = (_dd), \ .pg_drvup = (_du), \ .pg_slwr = __BITS(29,28), \ .pg_slwf = (_slwf) \ } static const struct tegra_mpio_padgrp tegra_mpio_padgrp[] = { PADGRP(GMACFG, __BIT(0), __BITS(7,6), __BITS(18,14), __BITS(24,20), __BITS(31,30)), PADGRP(SDIO1CFG, 0, 0, __BITS(18,12), __BITS(26,20), __BITS(31,30)), PADGRP(SDIO3CFG, 0, 0, __BITS(18,12), __BITS(26,20), __BITS(31,30)), PADGRP(SDIO4CFG, 0, 0, __BITS(16,12), __BITS(24,20), __BITS(31,30)), PADGRP(AOCFG0, 0, 0, __BITS(16,12), __BITS(24,20), __BITS(31,30)), PADGRP(AOCFG1, 0, 0, __BITS(16,12), __BITS(24,20), __BITS(31,30)), PADGRP(AOCFG2, 0, 0, __BITS(16,12), __BITS(24,20), __BITS(31,30)), PADGRP(AOCFG3, 0, 0, __BITS(16,12), 0, 0), PADGRP(AOCFG4, 0, __BITS(7,6), __BITS(18,12), __BITS(26,20), __BITS(31,30)), PADGRP(CDEV1CFG, 0, 0, __BITS(16,12), __BITS(24,20), __BITS(31,30)), PADGRP(CDEV2CFG, 0, 0, __BITS(16,12), __BITS(24,20), __BITS(31,30)), PADGRP(CECCFG, 0, 0, __BITS(16,12), __BITS(24,20), __BITS(31,30)), PADGRP(DAP1CFG, 0, 0, __BITS(16,12), __BITS(24,20), __BITS(31,30)), PADGRP(DAP2CFG, 0, 0, __BITS(16,12), __BITS(24,20), __BITS(31,30)), PADGRP(DAP3CFG, 0, 0, __BITS(16,12), __BITS(24,20), __BITS(31,30)), PADGRP(DAP4CFG, 0, 0, __BITS(16,12), __BITS(24,20), __BITS(31,30)),
static void awin_ahci_phy_init(struct awin_ahci_softc *asc) { bus_space_tag_t bst = asc->asc_sc.sc_ahcit; bus_space_handle_t bsh = asc->asc_sc.sc_ahcih; u_int timeout; uint32_t v; /* * This is dark magic. */ delay(5000); bus_space_write_4(bst, bsh, AWIN_AHCI_RWCR_REG, 0); awin_reg_set_clear(bst, bsh, AWIN_AHCI_PHYCS1R_REG, __BIT(19), 0); awin_reg_set_clear(bst, bsh, AWIN_AHCI_PHYCS0R_REG, __BIT(26)|__BIT(24)|__BIT(23)|__BIT(18), __BIT(25)); awin_reg_set_clear(bst, bsh, AWIN_AHCI_PHYCS1R_REG, __BIT(17)|__BIT(10)|__BIT(9)|__BIT(7), __BIT(16)|__BIT(12)|__BIT(11)|__BIT(8)|__BIT(6)); awin_reg_set_clear(bst, bsh, AWIN_AHCI_PHYCS1R_REG, __BIT(28)|__BIT(15), 0); awin_reg_set_clear(bst, bsh, AWIN_AHCI_PHYCS1R_REG, 0, __BIT(19)); awin_reg_set_clear(bst, bsh, AWIN_AHCI_PHYCS0R_REG, __BIT(21)|__BIT(20), __BIT(22)); awin_reg_set_clear(bst, bsh, AWIN_AHCI_PHYCS2R_REG, __BIT(9)|__BIT(8)|__BIT(5), __BIT(7)|__BIT(6)); delay(10); awin_reg_set_clear(bst, bsh, AWIN_AHCI_PHYCS0R_REG, __BIT(19), 0); timeout = 1000; do { delay(1); v = bus_space_read_4(bst, bsh, AWIN_AHCI_PHYCS0R_REG); } while (--timeout && __SHIFTOUT(v, __BITS(30,28)) != 2); if (!timeout) { aprint_error_dev( asc->asc_sc.sc_atac.atac_dev, "SATA PHY power failed (%#x)\n", v); } else { awin_reg_set_clear(bst, bsh, AWIN_AHCI_PHYCS2R_REG, __BIT(24), 0); timeout = 1000; do { delay(10); v = bus_space_read_4(bst, bsh, AWIN_AHCI_PHYCS2R_REG); } while (--timeout && (v & __BIT(24))); if (!timeout) { aprint_error_dev( asc->asc_sc.sc_atac.atac_dev, "SATA PHY calibration failed (%#x)\n", v); } } delay(10); bus_space_write_4(bst, bsh, AWIN_AHCI_RWCR_REG, 7); }
static void awin_gige_attach(device_t parent, device_t self, void *aux) { struct awin_gige_softc * const sc = device_private(self); struct awinio_attach_args * const aio = aux; const struct awin_locators * const loc = &aio->aio_loc; struct awin_gpio_pinset pinset; prop_dictionary_t cfg = device_properties(self); uint32_t clkreg; const char *phy_type, *pin_name; bus_space_handle_t bsh; switch (awin_chip_id()) { case AWIN_CHIP_ID_A80: bsh = aio->aio_a80_core2_bsh; pinset = awin_gige_gpio_pinset_a80; break; case AWIN_CHIP_ID_A31: bsh = aio->aio_core_bsh; pinset = awin_gige_gpio_pinset_a31; break; default: bsh = aio->aio_core_bsh; pinset = awin_gige_gpio_pinset; break; } sc->sc_core.sc_dev = self; prop_dictionary_get_uint8(cfg, "pinset-func", &pinset.pinset_func); awin_gpio_pinset_acquire(&pinset); sc->sc_core.sc_bst = aio->aio_core_bst; sc->sc_core.sc_dmat = aio->aio_dmat; bus_space_subregion(sc->sc_core.sc_bst, bsh, loc->loc_offset, loc->loc_size, &sc->sc_core.sc_bsh); aprint_naive("\n"); aprint_normal(": Gigabit Ethernet Controller\n"); awin_gige_pmu_init(self); /* * Interrupt handler */ sc->sc_ih = intr_establish(loc->loc_intr, IPL_NET, IST_LEVEL, awin_gige_intr, sc); if (sc->sc_ih == NULL) { aprint_error_dev(self, "failed to establish interrupt %d\n", loc->loc_intr); return; } aprint_normal_dev(self, "interrupting on irq %d\n", loc->loc_intr); if (prop_dictionary_get_cstring_nocopy(cfg, "phy-power", &pin_name)) { if (awin_gpio_pin_reserve(pin_name, &sc->sc_power_pin)) { awin_gpio_pindata_write(&sc->sc_power_pin, 1); } else { aprint_error_dev(self, "failed to reserve GPIO \"%s\"\n", pin_name); } } /* * Enable GMAC clock */ if (awin_chip_id() == AWIN_CHIP_ID_A80) { awin_reg_set_clear(aio->aio_core_bst, aio->aio_ccm_bsh, AWIN_A80_CCU_SCLK_BUS_CLK_GATING1_REG, AWIN_A80_CCU_SCLK_BUS_CLK_GATING1_GMAC, 0); } else if (awin_chip_id() == AWIN_CHIP_ID_A31) { awin_reg_set_clear(aio->aio_core_bst, aio->aio_ccm_bsh, AWIN_AHB_GATING0_REG, AWIN_A31_AHB_GATING0_GMAC, 0); } else if (awin_chip_id() == AWIN_CHIP_ID_A20) { awin_reg_set_clear(aio->aio_core_bst, aio->aio_ccm_bsh, AWIN_AHB_GATING1_REG, AWIN_AHB_GATING1_GMAC, 0); } /* * Soft reset */ if (awin_chip_id() == AWIN_CHIP_ID_A80) { awin_reg_set_clear(aio->aio_core_bst, aio->aio_ccm_bsh, AWIN_A80_CCU_SCLK_BUS_SOFT_RST1_REG, AWIN_A80_CCU_SCLK_BUS_SOFT_RST1_GMAC, 0); } else if (awin_chip_id() == AWIN_CHIP_ID_A31) { awin_reg_set_clear(aio->aio_core_bst, aio->aio_ccm_bsh, AWIN_A31_AHB_RESET0_REG, AWIN_A31_AHB_RESET0_GMAC_RST, 0); } /* * PHY clock setup */ if (!prop_dictionary_get_cstring_nocopy(cfg, "phy-type", &phy_type)) phy_type = "rgmii"; if (strcmp(phy_type, "rgmii") == 0) { clkreg = AWIN_GMAC_CLK_PIT | AWIN_GMAC_CLK_TCS_INT_RGMII; } else if (strcmp(phy_type, "rgmii-bpi") == 0) { clkreg = AWIN_GMAC_CLK_PIT | AWIN_GMAC_CLK_TCS_INT_RGMII; /* * These magic bits seem to be necessary for RGMII at gigabit * speeds on Banana Pi. */ clkreg |= __BITS(11,10); } else if (strcmp(phy_type, "gmii") == 0) { clkreg = AWIN_GMAC_CLK_TCS_INT_RGMII; } else if (strcmp(phy_type, "mii") == 0) { clkreg = AWIN_GMAC_CLK_TCS_MII; } else { panic("unknown phy type '%s'", phy_type); } if (awin_chip_id() == AWIN_CHIP_ID_A80) { awin_reg_set_clear(aio->aio_core_bst, aio->aio_a80_core2_bsh, AWIN_A80_SYS_CTRL_OFFSET + AWIN_A80_SYS_CTRL_EMAC_CLK_REG, clkreg, AWIN_GMAC_CLK_PIT|AWIN_GMAC_CLK_TCS); } else if (awin_chip_id() == AWIN_CHIP_ID_A31) { awin_reg_set_clear(aio->aio_core_bst, aio->aio_ccm_bsh, AWIN_A31_GMAC_CLK_REG, clkreg, AWIN_GMAC_CLK_PIT|AWIN_GMAC_CLK_TCS); } else { awin_reg_set_clear(aio->aio_core_bst, aio->aio_ccm_bsh, AWIN_GMAC_CLK_REG, clkreg, AWIN_GMAC_CLK_PIT|AWIN_GMAC_CLK_TCS); } dwc_gmac_attach(&sc->sc_core, GMAC_MII_CLK_150_250M_DIV102); }
int acpicpu_md_pstate_init(struct acpicpu_softc *sc) { struct cpu_info *ci = sc->sc_ci; struct acpicpu_pstate *ps, msr; uint32_t family, i = 0; (void)memset(&msr, 0, sizeof(struct acpicpu_pstate)); switch (cpu_vendor) { case CPUVENDOR_IDT: case CPUVENDOR_INTEL: /* * If the so-called Turbo Boost is present, * the P0-state is always the "turbo state". * It is shown as the P1 frequency + 1 MHz. * * For discussion, see: * * Intel Corporation: Intel Turbo Boost Technology * in Intel Core(tm) Microarchitectures (Nehalem) * Based Processors. White Paper, November 2008. */ if (sc->sc_pstate_count >= 2 && (sc->sc_flags & ACPICPU_FLAG_P_TURBO) != 0) { ps = &sc->sc_pstate[0]; if (ps->ps_freq == sc->sc_pstate[1].ps_freq + 1) ps->ps_flags |= ACPICPU_FLAG_P_TURBO; } msr.ps_control_addr = MSR_PERF_CTL; msr.ps_control_mask = __BITS(0, 15); msr.ps_status_addr = MSR_PERF_STATUS; msr.ps_status_mask = __BITS(0, 15); break; case CPUVENDOR_AMD: if ((sc->sc_flags & ACPICPU_FLAG_P_FIDVID) != 0) msr.ps_flags |= ACPICPU_FLAG_P_FIDVID; family = CPUID_TO_FAMILY(ci->ci_signature); switch (family) { case 0x0f: msr.ps_control_addr = MSR_0FH_CONTROL; msr.ps_status_addr = MSR_0FH_STATUS; break; case 0x10: case 0x11: case 0x12: case 0x14: case 0x15: msr.ps_control_addr = MSR_10H_CONTROL; msr.ps_control_mask = __BITS(0, 2); msr.ps_status_addr = MSR_10H_STATUS; msr.ps_status_mask = __BITS(0, 2); break; default: /* * If we have an unknown AMD CPU, rely on XPSS. */ if ((sc->sc_flags & ACPICPU_FLAG_P_XPSS) == 0) return EOPNOTSUPP; } break; default: return ENODEV; } /* * Fill the P-state structures with MSR addresses that are * known to be correct. If we do not know the addresses, * leave the values intact. If a vendor uses XPSS, we do * not necessarily need to do anything to support new CPUs. */ while (i < sc->sc_pstate_count) { ps = &sc->sc_pstate[i]; if (msr.ps_flags != 0) ps->ps_flags |= msr.ps_flags; if (msr.ps_status_addr != 0) ps->ps_status_addr = msr.ps_status_addr; if (msr.ps_status_mask != 0) ps->ps_status_mask = msr.ps_status_mask; if (msr.ps_control_addr != 0) ps->ps_control_addr = msr.ps_control_addr; if (msr.ps_control_mask != 0) ps->ps_control_mask = msr.ps_control_mask; i++; } return 0; }
static uint64_t imx51_get_pll_freq(u_int pll_no) { uint32_t dp_ctrl; uint32_t dp_op; uint32_t dp_mfd; uint32_t dp_mfn; uint32_t mfi; int32_t mfn; uint32_t mfd; uint32_t pdf; uint32_t ccr; uint64_t freq = 0; u_int ref = 0; bus_space_tag_t iot = ccm_softc->sc_iot; bus_space_handle_t ioh = ccm_softc->sc_pll[pll_no-1].pll_ioh; KASSERT(1 <= pll_no && pll_no <= IMX51_N_DPLLS); dp_ctrl = bus_space_read_4(iot, ioh, DPLL_DP_CTL); if (dp_ctrl & DP_CTL_HFSM) { dp_op = bus_space_read_4(iot, ioh, DPLL_DP_HFS_OP); dp_mfd = bus_space_read_4(iot, ioh, DPLL_DP_HFS_MFD); dp_mfn = bus_space_read_4(iot, ioh, DPLL_DP_HFS_MFN); } else { dp_op = bus_space_read_4(iot, ioh, DPLL_DP_OP); dp_mfd = bus_space_read_4(iot, ioh, DPLL_DP_MFD); dp_mfn = bus_space_read_4(iot, ioh, DPLL_DP_MFN); } pdf = dp_op & DP_OP_PDF; mfi = max(5, __SHIFTOUT(dp_op, DP_OP_MFI)); mfd = dp_mfd; if (dp_mfn & __BIT(26)) /* 27bit signed value */ mfn = (int32_t)(__BITS(31,27) | dp_mfn); else mfn = dp_mfn; switch (dp_ctrl & DP_CTL_REF_CLK_SEL) { case DP_CTL_REF_CLK_SEL_COSC: /* Internal Oscillator */ ref = IMX51_OSC_FREQ; break; case DP_CTL_REF_CLK_SEL_FPM: ccr = bus_space_read_4(iot, ccm_softc->sc_ioh, CCMC_CCR); if (ccr & CCR_FPM_MULT) ref = IMX51_CKIL_FREQ * 1024; else ref = IMX51_CKIL_FREQ * 512; break; default: ref = 0; } if (dp_ctrl & DP_CTL_REF_CLK_DIV) ref /= 2; #if 0 if (dp_ctrl & DP_CTL_DPDCK0_2_EN) ref *= 2; ref /= (pdf + 1); freq = ref * mfn; freq /= (mfd + 1); freq = (ref * mfi) + freq; #endif ref *= 4; freq = (int64_t)ref * mfi + (int64_t)ref * mfn / (mfd + 1); freq /= pdf + 1; if (!(dp_ctrl & DP_CTL_DPDCK0_2_EN)) freq /= 2; #ifdef IMXCCMDEBUG printf("dp_ctl: %08x ", dp_ctrl); printf("pdf: %3d ", pdf); printf("mfi: %3d ", mfi); printf("mfd: %3d ", mfd); printf("mfn: %3d ", mfn); printf("pll: %lld\n", freq); #endif ccm_softc->sc_pll[pll_no-1].pll_freq = freq; return freq; }