void generate_cpu_entries(void) { int len_pr, core; int pcontrol_blk = get_pmbase(), plen = 6; const struct pattrs *pattrs = pattrs_get(); for (core=0; core<pattrs->num_cpus; core++) { if (core > 0) { pcontrol_blk = 0; plen = 0; } /* Generate processor \_PR.CPUx */ len_pr = acpigen_write_processor( core, pcontrol_blk, plen); /* Generate P-state tables */ len_pr += generate_P_state_entries( core, pattrs->num_cpus); /* Generate C-state tables */ len_pr += acpigen_write_CST_package( cstate_map, ARRAY_SIZE(cstate_map)); /* Generate T-state tables */ len_pr += generate_T_state_entries( core, pattrs->num_cpus); len_pr--; acpigen_patch_len(len_pr); } }
static void lpe_stash_firmware_info(device_t dev) { struct resource *res; struct resource *mmio; const struct pattrs *pattrs = pattrs_get(); res = find_resource(dev, FIRMWARE_PCI_REG_BASE); if (res == NULL) { printk(BIOS_DEBUG, "LPE Firmware memory not found.\n"); return; } /* Continue using old way of informing firmware address / size. */ pci_write_config32(dev, FIRMWARE_PCI_REG_BASE, res->base); pci_write_config32(dev, FIRMWARE_PCI_REG_LENGTH, res->size); /* C0 and later steppings use an offset in the MMIO space. */ if (pattrs->stepping >= STEP_C0) { mmio = find_resource(dev, PCI_BASE_ADDRESS_0); write32((u32 *)(uintptr_t)(mmio->base + FIRMWARE_REG_BASE_C0), res->base); write32((u32 *)(uintptr_t)(mmio->base + FIRMWARE_REG_LENGTH_C0), res->size); } }
static void get_microcode_info(const void **microcode, int *parallel) { const struct pattrs *pattrs = pattrs_get(); *microcode = pattrs->microcode_patch; *parallel = 1; }
static int smm_load_handlers(void) { /* All range registers are aligned to 4KiB */ const uint32_t rmask = ~((1 << 12) - 1); const struct pattrs *pattrs = pattrs_get(); /* Initialize global tracking state. */ relo_attrs.smbase = (uint32_t)smm_region_start(); relo_attrs.smrr_base = relo_attrs.smbase | MTRR_TYPE_WRBACK; relo_attrs.smrr_mask = ~(smm_region_size() - 1) & rmask; relo_attrs.smrr_mask |= MTRR_PHYS_MASK_VALID; /* Install handlers. */ if (install_relocation_handler(pattrs->num_cpus) < 0) { printk(BIOS_ERR, "Unable to install SMM relocation handler.\n"); return -1; } if (install_permanent_handler(pattrs->num_cpus) < 0) { printk(BIOS_ERR, "Unable to install SMM permanent handler.\n"); return -1; } /* Ensure the SMM handlers hit DRAM before performing first SMI. */ wbinvd(); return 0; }
static void fill_in_pattrs(void) { struct device *dev; msr_t msr; struct pattrs *attrs = (struct pattrs *)pattrs_get(); attrs->cpuid = cpuid_eax(1); dev = pcidev_on_root(LPC_DEV, LPC_FUNC); attrs->revid = pci_read_config8(dev, REVID); /* The revision to stepping IDs have two values per metal stepping. */ if (attrs->revid >= RID_D_STEPPING_START) { attrs->stepping = (attrs->revid - RID_D_STEPPING_START) / 2; attrs->stepping += STEP_D0; } else if (attrs->revid >= RID_C_STEPPING_START) { attrs->stepping = (attrs->revid - RID_C_STEPPING_START) / 2; attrs->stepping += STEP_C0; } else if (attrs->revid >= RID_B_STEPPING_START) { attrs->stepping = (attrs->revid - RID_B_STEPPING_START) / 2; attrs->stepping += STEP_B0; } else { attrs->stepping = (attrs->revid - RID_A_STEPPING_START) / 2; attrs->stepping += STEP_A0; } attrs->microcode_patch = intel_microcode_find(); attrs->address_bits = cpuid_eax(0x80000008) & 0xff; detect_num_cpus(attrs); if (SHOW_PATTRS) { printk(BIOS_DEBUG, "CPUID: %08x\nCores: %d\nRevision ID: %02x\nStepping: %s\n", attrs->cpuid, attrs->num_cpus, attrs->revid, (attrs->stepping >= ARRAY_SIZE(stepping_str)) ? "??" : stepping_str[attrs->stepping]); } fill_in_msr(&attrs->platform_id, IA32_PLATFORM_ID); fill_in_msr(&attrs->platform_info, MSR_PLATFORM_INFO); /* Set IA core speed ratio and voltages */ msr = rdmsr(MSR_IACORE_RATIOS); attrs->iacore_ratios[IACORE_MIN] = msr.lo & 0x7f; attrs->iacore_ratios[IACORE_LFM] = (msr.lo >> 8) & 0x7f; attrs->iacore_ratios[IACORE_MAX] = (msr.lo >> 16) & 0x7f; msr = rdmsr(MSR_IACORE_TURBO_RATIOS); attrs->iacore_ratios[IACORE_TURBO] = (msr.lo & 0xff); /* 1 core max */ msr = rdmsr(MSR_IACORE_VIDS); attrs->iacore_vids[IACORE_MIN] = msr.lo & 0x7f; attrs->iacore_vids[IACORE_LFM] = (msr.lo >> 8) & 0x7f; attrs->iacore_vids[IACORE_MAX] = (msr.lo >> 16) & 0x7f; msr = rdmsr(MSR_IACORE_TURBO_VIDS); attrs->iacore_vids[IACORE_TURBO] = (msr.lo & 0xff); /* 1 core max */ /* Set bus clock speed */ attrs->bclk_khz = bus_freq_khz(); }
static void pre_smm_relocation(void *unused) { const struct pattrs *pattrs = pattrs_get(); msr_t msr_value; /* Need to make sure that all cores have microcode loaded. */ msr_value = rdmsr(MSR_IA32_BIOS_SIGN_ID); if (msr_value.hi == 0) intel_microcode_load_unlocked(pattrs->microcode_patch); }
static void per_cpu_smm_trigger(void) { const struct pattrs *pattrs = pattrs_get(); msr_t msr_value; /* Need to make sure that all cores have microcode loaded. */ msr_value = rdmsr(MSR_IA32_BIOS_SIGN_ID); if (msr_value.hi == 0) intel_microcode_load_unlocked(pattrs->microcode_patch); /* Relocate SMM space. */ smm_initiate_relocation(); /* Load microcode after SMM relocation. */ intel_microcode_load_unlocked(pattrs->microcode_patch); }
static void xhci_init(device_t dev) { struct soc_intel_baytrail_config *config = dev->chip_info; struct reg_script xhci_hc_init[] = { /* Initialize clock gating */ REG_SCRIPT_NEXT(xhci_clock_gating_script), /* Finalize XHCC1 and XHCC2 */ REG_PCI_RMW32(0x44, ~0x00000000, 0x83c00000), REG_PCI_RMW32(0x40, ~0x00800000, 0x80000000), /* Set USB2 Port Routing Mask */ REG_PCI_WRITE32(XHCI_USB2PRM, BYTM_USB2_PORT_MAP), /* Set USB3 Port Routing Mask */ REG_PCI_WRITE32(XHCI_USB3PRM, BYTM_USB3_PORT_MAP), /* * Disable ports if requested */ /* Open per-port disable control override */ REG_IO_RMW16(ACPI_BASE_ADDRESS + UPRWC, ~0, UPRWC_WR_EN), REG_PCI_WRITE32(XHCI_USB2PDO, config->usb2_port_disable_mask), REG_PCI_WRITE32(XHCI_USB3PDO, config->usb3_port_disable_mask), /* Close per-port disable control override */ REG_IO_RMW16(ACPI_BASE_ADDRESS + UPRWC, ~UPRWC_WR_EN, 0), REG_SCRIPT_END }; /* Initialize XHCI controller for boot or resume path */ if (acpi_is_wakeup_s3()) reg_script_run_on_dev(dev, xhci_init_resume_script); else reg_script_run_on_dev(dev, xhci_init_boot_script); /* C0 steppings change iCLK/USB PLL VCO settings from 5 to 7 */ if (pattrs_get()->stepping == STEP_C0) { uint32_t reg = iosf_ushphy_read(USHPHY_CDN_PLL_CONTROL); reg |= 0x00700000; iosf_ushphy_write(USHPHY_CDN_PLL_CONTROL, reg); } /* Finalize Initialization */ reg_script_run_on_dev(dev, xhci_hc_init); /* Route all ports to XHCI if requested */ if (config->usb_route_to_xhci) xhci_route_all(dev); }
void soc_init_cpus(device_t dev) { struct bus *cpu_bus = dev->link_list; const struct pattrs *pattrs = pattrs_get(); struct mp_params mp_params; void *default_smm_area; uint32_t bsmrwac; printk(BIOS_SPEW, "%s/%s ( %s )\n", __FILE__, __func__, dev_name(dev)); /* Set up MTRRs based on physical address size. */ x86_setup_fixed_mtrrs(); x86_setup_var_mtrrs(pattrs->address_bits, 2); x86_mtrr_check(); mp_params.num_cpus = pattrs->num_cpus, mp_params.parallel_microcode_load = 1, mp_params.adjust_apic_id = adjust_apic_id; mp_params.flight_plan = &mp_steps[0]; mp_params.num_records = ARRAY_SIZE(mp_steps); mp_params.microcode_pointer = pattrs->microcode_patch; default_smm_area = backup_default_smm_area(); /* * Configure the BUNIT to allow dirty cache line evictions in non-SMM * mode for the lines that were dirtied while in SMM mode. Otherwise * the writes would be silently dropped. */ bsmrwac = iosf_bunit_read(BUNIT_SMRWAC) | SAI_IA_UNTRUSTED; iosf_bunit_write(BUNIT_SMRWAC, bsmrwac); /* Set package MSRs */ reg_script_run(package_msr_script); /* Enable Turbo Mode on BSP and siblings of the BSP's building block. */ enable_turbo(); if (mp_init(cpu_bus, &mp_params)) printk(BIOS_ERR, "MP initialization failure.\n"); restore_default_smm_area(default_smm_area); }
static void smm_relocate(void *unused) { const struct pattrs *pattrs = pattrs_get(); /* Load relocation and permanent handler. */ if (boot_cpu()) { if (smm_load_handlers() < 0) { printk(BIOS_ERR, "Error loading SMM handlers.\n"); return; } southcluster_smm_clear_state(); } /* Relocate SMM space. */ smm_initiate_relocation(); /* Load microcode after SMM relocation. */ intel_microcode_load_unlocked(pattrs->microcode_patch); }
void baytrail_init_cpus(device_t dev) { struct bus *cpu_bus = dev->link_list; const struct pattrs *pattrs = pattrs_get(); struct mp_params mp_params; x86_mtrr_check(); /* Enable the local cpu apics */ setup_lapic(); mp_params.num_cpus = pattrs->num_cpus, mp_params.parallel_microcode_load = 1, mp_params.adjust_apic_id = adjust_apic_id; mp_params.flight_plan = &mp_steps[0]; mp_params.num_records = ARRAY_SIZE(mp_steps); mp_params.microcode_pointer = pattrs->microcode_patch; if (mp_init(cpu_bus, &mp_params)) { printk(BIOS_ERR, "MP initialization failure.\n"); } }
static int generate_P_state_entries(int core, int cores_per_package) { int len, len_pss; int ratio_min, ratio_max, ratio_turbo, ratio_step, ratio_range_2; int coord_type, power_max, power_unit, num_entries; int ratio, power, clock, clock_max; int vid, vid_turbo, vid_min, vid_max, vid_range_2; u32 control_status; const struct pattrs *pattrs = pattrs_get(); msr_t msr; /* Inputs from CPU attributes */ ratio_max = pattrs->iacore_ratios[IACORE_MAX]; ratio_min = pattrs->iacore_ratios[IACORE_LFM]; vid_max = pattrs->iacore_vids[IACORE_MAX]; vid_min = pattrs->iacore_vids[IACORE_LFM]; /* Hardware coordination of P-states */ coord_type = HW_ALL; /* Max Non-Turbo Frequency */ clock_max = (ratio_max * pattrs->bclk_khz) / 1000; /* Calculate CPU TDP in mW */ msr = rdmsr(MSR_PKG_POWER_SKU_UNIT); power_unit = 1 << (msr.lo & 0xf); msr = rdmsr(MSR_PKG_POWER_LIMIT); power_max = ((msr.lo & 0x7fff) / power_unit) * 1000; /* Write _PCT indicating use of FFixedHW */ len = acpigen_write_empty_PCT(); /* Write _PPC with NVS specified limit on supported P-state */ len += acpigen_write_PPC_NVS(); /* Write PSD indicating configured coordination type */ len += acpigen_write_PSD_package(core, 1, coord_type); /* Add P-state entries in _PSS table */ len += acpigen_write_name("_PSS"); /* Determine ratio points */ ratio_step = 1; num_entries = (ratio_max - ratio_min) / ratio_step; while (num_entries > 15) { /* ACPI max is 15 ratios */ ratio_step <<= 1; num_entries >>= 1; } /* P[T] is Turbo state if enabled */ if (get_turbo_state() == TURBO_ENABLED) { /* _PSS package count including Turbo */ len_pss = acpigen_write_package(num_entries + 2); ratio_turbo = pattrs->iacore_ratios[IACORE_TURBO]; vid_turbo = pattrs->iacore_vids[IACORE_TURBO]; control_status = (ratio_turbo << 8) | vid_turbo; /* Add entry for Turbo ratio */ len_pss += acpigen_write_PSS_package( clock_max + 1, /*MHz*/ power_max, /*mW*/ 10, /*lat1*/ 10, /*lat2*/ control_status, /*control*/ control_status); /*status*/ } else { /* _PSS package count without Turbo */ len_pss = acpigen_write_package(num_entries + 1); ratio_turbo = ratio_max; vid_turbo = vid_max; } /* First regular entry is max non-turbo ratio */ control_status = (ratio_max << 8) | vid_max; len_pss += acpigen_write_PSS_package( clock_max, /*MHz*/ power_max, /*mW*/ 10, /*lat1*/ 10, /*lat2*/ control_status, /*control */ control_status); /*status*/ /* Set up ratio and vid ranges for VID calculation */ ratio_range_2 = (ratio_turbo - ratio_min) * 2; vid_range_2 = (vid_turbo - vid_min) * 2; /* Generate the remaining entries */ for (ratio = ratio_min + ((num_entries - 1) * ratio_step); ratio >= ratio_min; ratio -= ratio_step) { /* Calculate VID for this ratio */ vid = ((ratio - ratio_min) * vid_range_2) / ratio_range_2 + vid_min; /* Round up if remainder */ if (((ratio - ratio_min) * vid_range_2) % ratio_range_2) vid++; /* Calculate power at this ratio */ power = calculate_power(power_max, ratio_max, ratio); clock = (ratio * pattrs->bclk_khz) / 1000; control_status = (ratio << 8) | (vid & 0xff); len_pss += acpigen_write_PSS_package( clock, /*MHz*/ power, /*mW*/ 10, /*lat1*/ 10, /*lat2*/ control_status, /*control*/ control_status); /*status*/ } /* Fix package length */ len_pss--; acpigen_patch_len(len_pss); return len + len_pss; }
static int get_cpu_count(void) { const struct pattrs *pattrs = pattrs_get(); return pattrs->num_cpus; }