int acpi_wakeup_machdep(struct acpi_softc *sc, int state, int sleep_result, int intr_enabled) { if (sleep_result == -1) return (sleep_result); if (!intr_enabled) { /* Wakeup MD procedures in interrupt disabled context */ if (sleep_result == 1) { pmap_init_pat(); initializecpu(); PCPU_SET(switchtime, 0); PCPU_SET(switchticks, ticks); #ifdef DEV_APIC lapic_xapic_mode(); #endif #ifdef SMP if (!CPU_EMPTY(&suspcpus)) acpi_wakeup_cpus(sc); #endif } #ifdef SMP if (!CPU_EMPTY(&suspcpus)) restart_cpus(suspcpus); #endif mca_resume(); #ifdef __amd64__ if (vmm_resume_p != NULL) vmm_resume_p(); #endif intr_resume(/*suspend_cancelled*/false); AcpiSetFirmwareWakingVector(0, 0); } else { /* Wakeup MD procedures in interrupt enabled context */ if (sleep_result == 1 && mem_range_softc.mr_op != NULL && mem_range_softc.mr_op->reinit != NULL) mem_range_softc.mr_op->reinit(&mem_range_softc); } return (sleep_result); }
int acpi_wakeup_machdep(struct acpi_softc *sc, int state, int sleep_result, int intr_enabled) { if (sleep_result == -1) return (sleep_result); if (intr_enabled == 0) { /* Wakeup MD procedures in interrupt disabled context */ if (sleep_result == 1) { pmap_init_pat(); load_cr3(susppcbs[0]->pcb_cr3); initializecpu(); PCPU_SET(switchtime, 0); PCPU_SET(switchticks, ticks); #ifdef SMP if (!CPU_EMPTY(&suspcpus)) acpi_wakeup_cpus(sc, &suspcpus); #endif } #ifdef SMP if (!CPU_EMPTY(&suspcpus)) restart_cpus(suspcpus); #endif mca_resume(); intr_resume(); } else { /* Wakeup MD procedures in interrupt enabled context */ AcpiSetFirmwareWakingVector(0); if (sleep_result == 1 && mem_range_softc.mr_op != NULL && mem_range_softc.mr_op->reinit != NULL) mem_range_softc.mr_op->reinit(&mem_range_softc); } return (sleep_result); }
/* * AP cpu's call this to sync up protected mode. * * WARNING! %gs is not set up on entry. This routine sets up %gs. */ void init_secondary(void) { int gsel_tss; int x, myid = bootAP; u_int64_t msr, cr0; struct mdglobaldata *md; struct privatespace *ps; ps = &CPU_prvspace[myid]; gdt_segs[GPROC0_SEL].ssd_base = (long) &ps->mdglobaldata.gd_common_tss; ps->mdglobaldata.mi.gd_prvspace = ps; /* We fill the 32-bit segment descriptors */ for (x = 0; x < NGDT; x++) { if (x != GPROC0_SEL && x != (GPROC0_SEL + 1)) ssdtosd(&gdt_segs[x], &gdt[myid * NGDT + x]); } /* And now a 64-bit one */ ssdtosyssd(&gdt_segs[GPROC0_SEL], (struct system_segment_descriptor *)&gdt[myid * NGDT + GPROC0_SEL]); r_gdt.rd_limit = NGDT * sizeof(gdt[0]) - 1; r_gdt.rd_base = (long) &gdt[myid * NGDT]; lgdt(&r_gdt); /* does magic intra-segment return */ /* lgdt() destroys the GSBASE value, so we load GSBASE after lgdt() */ wrmsr(MSR_FSBASE, 0); /* User value */ wrmsr(MSR_GSBASE, (u_int64_t)ps); wrmsr(MSR_KGSBASE, 0); /* XXX User value while we're in the kernel */ lidt(&r_idt_arr[mdcpu->mi.gd_cpuid]); #if 0 lldt(_default_ldt); mdcpu->gd_currentldt = _default_ldt; #endif gsel_tss = GSEL(GPROC0_SEL, SEL_KPL); gdt[myid * NGDT + GPROC0_SEL].sd_type = SDT_SYSTSS; md = mdcpu; /* loaded through %gs:0 (mdglobaldata.mi.gd_prvspace)*/ md->gd_common_tss.tss_rsp0 = 0; /* not used until after switch */ #if 0 /* JG XXX */ md->gd_common_tss.tss_ioopt = (sizeof md->gd_common_tss) << 16; #endif md->gd_tss_gdt = &gdt[myid * NGDT + GPROC0_SEL]; md->gd_common_tssd = *md->gd_tss_gdt; /* double fault stack */ md->gd_common_tss.tss_ist1 = (long)&md->mi.gd_prvspace->idlestack[ sizeof(md->mi.gd_prvspace->idlestack)]; ltr(gsel_tss); /* * Set to a known state: * Set by mpboot.s: CR0_PG, CR0_PE * Set by cpu_setregs: CR0_NE, CR0_MP, CR0_TS, CR0_WP, CR0_AM */ cr0 = rcr0(); cr0 &= ~(CR0_CD | CR0_NW | CR0_EM); load_cr0(cr0); /* Set up the fast syscall stuff */ msr = rdmsr(MSR_EFER) | EFER_SCE; wrmsr(MSR_EFER, msr); wrmsr(MSR_LSTAR, (u_int64_t)IDTVEC(fast_syscall)); wrmsr(MSR_CSTAR, (u_int64_t)IDTVEC(fast_syscall32)); msr = ((u_int64_t)GSEL(GCODE_SEL, SEL_KPL) << 32) | ((u_int64_t)GSEL(GUCODE32_SEL, SEL_UPL) << 48); wrmsr(MSR_STAR, msr); wrmsr(MSR_SF_MASK, PSL_NT|PSL_T|PSL_I|PSL_C|PSL_D|PSL_IOPL); pmap_set_opt(); /* PSE/4MB pages, etc */ pmap_init_pat(); /* Page Attribute Table */ /* set up CPU registers and state */ cpu_setregs(); /* set up SSE/NX registers */ initializecpu(myid); /* set up FPU state on the AP */ npxinit(__INITIAL_FPUCW__); /* disable the APIC, just to be SURE */ lapic->svr &= ~APIC_SVR_ENABLE; }
int acpi_sleep_machdep(struct acpi_softc *sc, int state) { ACPI_STATUS status; vm_offset_t oldphys; struct pmap *pm; vm_page_t page; static vm_page_t opage = NULL; int ret = 0; int pteobj_allocated = 0; u_long ef; struct proc *p; if (sc->acpi_wakeaddr == 0) { return (0); } AcpiSetFirmwareWakingVector(sc->acpi_wakephys); ef = read_eflags(); disable_intr(); /* Create Identity Mapping */ if ((p = curproc) == NULL) p = &proc0; pm = vmspace_pmap(p->p_vmspace); if (pm->pm_pteobj == NULL) { pm->pm_pteobj = vm_object_allocate(OBJT_DEFAULT, PTDPTDI + 1); pteobj_allocated = 1; } oldphys = pmap_extract(pm, sc->acpi_wakephys); if (oldphys) { opage = PHYS_TO_VM_PAGE(oldphys); } page = PHYS_TO_VM_PAGE(sc->acpi_wakephys); pmap_enter(pm, sc->acpi_wakephys, page, VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE, 1); ret_addr = 0; if (acpi_savecpu()) { /* Execute Sleep */ p_gdt = (struct region_descriptor *)(sc->acpi_wakeaddr + physical_gdt); p_gdt->rd_limit = r_gdt.rd_limit; p_gdt->rd_base = vtophys(r_gdt.rd_base); WAKECODE_FIXUP(physical_esp, u_int32_t, vtophys(r_esp)); WAKECODE_FIXUP(previous_cr0, u_int32_t, r_cr0); WAKECODE_FIXUP(previous_cr2, u_int32_t, r_cr2); WAKECODE_FIXUP(previous_cr3, u_int32_t, r_cr3); WAKECODE_FIXUP(previous_cr4, u_int32_t, r_cr4); WAKECODE_FIXUP(previous_tr, u_int16_t, r_tr); WAKECODE_BCOPY(previous_gdt, struct region_descriptor, r_gdt); WAKECODE_FIXUP(previous_ldt, u_int16_t, r_ldt); WAKECODE_BCOPY(previous_idt, struct region_descriptor, r_idt); WAKECODE_FIXUP(where_to_recover, void, acpi_restorecpu); WAKECODE_FIXUP(previous_ds, u_int16_t, r_ds); WAKECODE_FIXUP(previous_es, u_int16_t, r_es); WAKECODE_FIXUP(previous_fs, u_int16_t, r_fs); WAKECODE_FIXUP(previous_gs, u_int16_t, r_gs); WAKECODE_FIXUP(previous_ss, u_int16_t, r_ss); if (acpi_get_verbose(sc)) { acpi_printcpu(); } wbinvd(); if (state == ACPI_STATE_S4 && sc->acpi_s4bios) { status = AcpiEnterSleepStateS4Bios(); } else { status = AcpiEnterSleepState(state); } if (status != AE_OK) { device_printf(sc->acpi_dev, "AcpiEnterSleepState failed - %s\n", AcpiFormatException(status)); ret = -1; goto out; } for (;;) ; } else { /* Execute Wakeup */ #if 0 initializecpu(); #endif icu_reinit(); if (acpi_get_verbose(sc)) { acpi_savecpu(); acpi_printcpu(); } } out: vm_page_lock_queues(); pmap_remove(pm, sc->acpi_wakephys, sc->acpi_wakephys + PAGE_SIZE); vm_page_unlock_queues(); if (opage) { pmap_enter(pm, sc->acpi_wakephys, page, VM_PROT_READ | VM_PROT_WRITE, 0); } if (pteobj_allocated) { vm_object_deallocate(pm->pm_pteobj); pm->pm_pteobj = NULL; } write_eflags(ef); return (ret); }