/* * Initialize the CA subsystem if it hasn't been done already */ int acpica_init() { extern void acpica_find_ioapics(void); ACPI_STATUS status; /* * Make sure user options are processed, * then fail to initialize if ACPI CA has been * disabled */ acpica_process_user_options(); if (!acpica_enable) return (AE_ERROR); mutex_enter(&acpica_module_lock); if (acpica_init_state == ACPICA_INITIALIZED) { mutex_exit(&acpica_module_lock); return (AE_OK); } if (ACPI_FAILURE(status = AcpiLoadTables())) goto error; if (ACPI_FAILURE(status = acpica_install_handlers())) goto error; /* * Create ACPI-to-devinfo mapping now so _INI and _STA * methods can access PCI config space when needed */ scan_d2a_map(); if (ACPI_FAILURE(status = AcpiEnableSubsystem(acpi_init_level))) goto error; /* do after AcpiEnableSubsystem() so GPEs are initialized */ acpica_ec_init(); /* initialize EC if present */ /* This runs all device _STA and _INI methods. */ if (ACPI_FAILURE(status = AcpiInitializeObjects(0))) goto error; acpica_init_state = ACPICA_INITIALIZED; /* * [ACPI, sec. 4.4.1.1] * As of ACPICA version 20101217 (December 2010), the _PRW methods * (Power Resources for Wake) are no longer automatically executed * as part of the ACPICA initialization. The OS must do this. */ (void) AcpiWalkNamespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, UINT32_MAX, acpica_init_PRW, NULL, NULL, NULL); (void) AcpiUpdateAllGpes(); /* * If we are running on the Xen hypervisor as dom0 we need to * find the ioapics so we can prevent ACPI from trying to * access them. */ if (get_hwenv() == HW_XEN_PV && is_controldom()) acpica_find_ioapics(); acpica_init_kstats(); error: if (acpica_init_state != ACPICA_INITIALIZED) { cmn_err(CE_NOTE, "!failed to initialize ACPI services"); } /* * Set acpi-status to 13 if acpica has been initialized successfully. * This indicates that acpica is up and running. This variable name * and value were chosen in order to remain compatible with acpi_intp. */ e_ddi_prop_update_int(DDI_DEV_T_NONE, ddi_root_node(), "acpi-status", (ACPI_SUCCESS(status)) ? (ACPI_BOOT_INIT | ACPI_BOOT_ENABLE | ACPI_BOOT_BOOTCONF) : 0); /* Mark acpica subsystem as fully initialized. */ if (ACPI_SUCCESS(status) && acpi_init_level == ACPI_FULL_INITIALIZATION) { acpica_set_core_feature(ACPI_FEATURE_FULL_INIT); } mutex_exit(&acpica_module_lock); return (status); }
static void synth_amd_info(uint_t family, uint_t model, uint_t step, uint32_t *skt_p, uint32_t *chiprev_p, const char **chiprevstr_p) { const struct amd_rev_mapent *rmp; int found = 0; int i; if (family < 0xf) return; for (i = 0, rmp = amd_revmap; i < sizeof (amd_revmap) / sizeof (*rmp); i++, rmp++) { if (family == rmp->rm_family && model >= rmp->rm_modello && model <= rmp->rm_modelhi && step >= rmp->rm_steplo && step <= rmp->rm_stephi) { found = 1; break; } } if (!found) return; if (chiprev_p != NULL) *chiprev_p = rmp->rm_chiprev; if (chiprevstr_p != NULL) *chiprevstr_p = rmp->rm_chiprevstr; if (skt_p != NULL) { int platform; #ifdef __xpv /* PV guest */ if (!is_controldom()) { *skt_p = X86_SOCKET_UNKNOWN; return; } #endif platform = get_hwenv(); if ((platform == HW_XEN_HVM) || (platform == HW_VMWARE)) { *skt_p = X86_SOCKET_UNKNOWN; } else if (family == 0xf) { *skt_p = amd_skts[rmp->rm_sktidx][model & 0x3]; } else { /* * Starting with family 10h, socket type is stored in * CPUID Fn8000_0001_EBX */ struct cpuid_regs cp; int idx; cp.cp_eax = 0x80000001; (void) __cpuid_insn(&cp); /* PkgType bits */ idx = BITX(cp.cp_ebx, 31, 28); if (idx > 7) { /* Reserved bits */ *skt_p = X86_SOCKET_UNKNOWN; } else if (family == 0x10 && amd_skts[rmp->rm_sktidx][idx] == X86_SOCKET_AM) { /* * Look at Ddr3Mode bit of DRAM Configuration * High Register to decide whether this is * AM2r2 (aka AM2+) or AM3. */ uint32_t val; val = pci_getl_func(0, 24, 2, 0x94); if (BITX(val, 8, 8)) *skt_p = X86_SOCKET_AM3; else *skt_p = X86_SOCKET_AM2R2; } else { *skt_p = amd_skts[rmp->rm_sktidx][idx]; } } } }