/* * Cyrix 486S/DX series */ static void init_cy486dx(void) { u_long eflags; u_char ccr2; eflags = read_eflags(); disable_intr(); invd(); ccr2 = read_cyrix_reg(CCR2); #ifdef CPU_SUSP_HLT ccr2 |= CCR2_SUSP_HLT; #endif #ifdef PC98 /* Enables WB cache interface pin and Lock NW bit in CR0. */ ccr2 |= CCR2_WB | CCR2_LOCK_NW; /* Unlock NW bit in CR0. */ write_cyrix_reg(CCR2, ccr2 & ~CCR2_LOCK_NW); load_cr0((rcr0() & ~CR0_CD) | CR0_NW); /* CD = 0, NW = 1 */ #endif write_cyrix_reg(CCR2, ccr2); write_eflags(eflags); }
/* * IBM Blue Lightning */ static void init_bluelightning(void) { u_long eflags; #if defined(PC98) && !defined(CPU_UPGRADE_HW_CACHE) need_post_dma_flush = 1; #endif eflags = read_eflags(); disable_intr(); load_cr0(rcr0() | CR0_CD | CR0_NW); invd(); #ifdef CPU_BLUELIGHTNING_FPU_OP_CACHE wrmsr(0x1000, 0x9c92LL); /* FP operand can be cacheable on Cyrix FPU */ #else wrmsr(0x1000, 0x1c92LL); /* Intel FPU */ #endif /* Enables 13MB and 0-640KB cache. */ wrmsr(0x1001, (0xd0LL << 32) | 0x3ff); #ifdef CPU_BLUELIGHTNING_3X wrmsr(0x1002, 0x04000000LL); /* Enables triple-clock mode. */ #else wrmsr(0x1002, 0x03000000LL); /* Enables double-clock mode. */ #endif /* Enable caching in CR0. */ load_cr0(rcr0() & ~(CR0_CD | CR0_NW)); /* CD = 0 and NW = 0 */ invd(); write_eflags(eflags); }
/* * Initialize BBL_CR_CTL3 (Control register 3: used to configure the * L2 cache). */ static void init_mendocino(void) { #ifdef CPU_PPRO2CELERON u_long eflags; u_int64_t bbl_cr_ctl3; eflags = read_eflags(); disable_intr(); load_cr0(rcr0() | CR0_CD | CR0_NW); wbinvd(); bbl_cr_ctl3 = rdmsr(MSR_BBL_CR_CTL3); /* If the L2 cache is configured, do nothing. */ if (!(bbl_cr_ctl3 & 1)) { bbl_cr_ctl3 = 0x134052bLL; /* Set L2 Cache Latency (Default: 5). */ #ifdef CPU_CELERON_L2_LATENCY #if CPU_L2_LATENCY > 15 #error invalid CPU_L2_LATENCY. #endif bbl_cr_ctl3 |= CPU_L2_LATENCY << 1; #else bbl_cr_ctl3 |= 5 << 1; #endif wrmsr(MSR_BBL_CR_CTL3, bbl_cr_ctl3); } load_cr0(rcr0() & ~(CR0_CD | CR0_NW)); write_eflags(eflags); #endif /* CPU_PPRO2CELERON */ }
/* * Check to see if this CPU supports long mode. */ static int bi_checkcpu(void) { char *cpu_vendor; int vendor[3]; int eflags, regs[4]; /* Check for presence of "cpuid". */ eflags = read_eflags(); write_eflags(eflags ^ PSL_ID); if (!((eflags ^ read_eflags()) & PSL_ID)) return (0); /* Fetch the vendor string. */ do_cpuid(0, regs); vendor[0] = regs[1]; vendor[1] = regs[3]; vendor[2] = regs[2]; cpu_vendor = (char *)vendor; /* Check for vendors that support AMD features. */ if (strncmp(cpu_vendor, "GenuineIntel", 12) != 0 && strncmp(cpu_vendor, "AuthenticAMD", 12) != 0) return (0); /* Has to support AMD features. */ do_cpuid(0x80000000, regs); if (!(regs[0] >= 0x80000001)) return (0); /* Check for long mode. */ do_cpuid(0x80000001, regs); return (regs[3] & AMDID_LM); }
/* * Cyrix 6x86MX (code-named M2) * * XXX - What should I do here? Please let me know. */ static void init_6x86MX(void) { u_long eflags; u_char ccr3, ccr4; eflags = read_eflags(); disable_intr(); load_cr0(rcr0() | CR0_CD | CR0_NW); wbinvd(); /* Initialize CCR0. */ write_cyrix_reg(CCR0, read_cyrix_reg(CCR0) | CCR0_NC1); /* Initialize CCR1. */ #ifdef CPU_CYRIX_NO_LOCK write_cyrix_reg(CCR1, read_cyrix_reg(CCR1) | CCR1_NO_LOCK); #else write_cyrix_reg(CCR1, read_cyrix_reg(CCR1) & ~CCR1_NO_LOCK); #endif /* Initialize CCR2. */ #ifdef CPU_SUSP_HLT write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) | CCR2_SUSP_HLT); #else write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) & ~CCR2_SUSP_HLT); #endif ccr3 = read_cyrix_reg(CCR3); write_cyrix_reg(CCR3, CCR3_MAPEN0); /* Initialize CCR4. */ ccr4 = read_cyrix_reg(CCR4); ccr4 &= ~CCR4_IOMASK; #ifdef CPU_IORT write_cyrix_reg(CCR4, ccr4 | (CPU_IORT & CCR4_IOMASK)); #else write_cyrix_reg(CCR4, ccr4 | 7); #endif /* Initialize CCR5. */ #ifdef CPU_WT_ALLOC write_cyrix_reg(CCR5, read_cyrix_reg(CCR5) | CCR5_WT_ALLOC); #endif /* Restore CCR3. */ write_cyrix_reg(CCR3, ccr3); /* Unlock NW bit in CR0. */ write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) & ~CCR2_LOCK_NW); load_cr0(rcr0() & ~(CR0_CD | CR0_NW)); /* CD = 0 and NW = 0 */ /* Lock NW bit in CR0. */ write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) | CCR2_LOCK_NW); write_eflags(eflags); }
static int acpi_timer_test(void) { uint32_t last, this; int min, max, max2, n, delta; register_t s; min = INT32_MAX; max = max2 = 0; /* Test the timer with interrupts disabled to get accurate results. */ #if defined(__i386__) s = read_eflags(); #elif defined(__x86_64__) s = read_rflags(); #else #error "no read_eflags" #endif cpu_disable_intr(); AcpiGetTimer(&last); for (n = 0; n < 2000; n++) { AcpiGetTimer(&this); delta = acpi_TimerDelta(this, last); if (delta > max) { max2 = max; max = delta; } else if (delta > max2) { max2 = delta; } if (delta < min) min = delta; last = this; } #if defined(__i386__) write_eflags(s); #elif defined(__x86_64__) write_rflags(s); #else #error "no read_eflags" #endif delta = max2 - min; if ((max - min > 8 || delta > 3) && vmm_guest == VMM_GUEST_NONE) n = 0; else if (min < 0 || max == 0 || max2 == 0) n = 0; else n = 1; if (bootverbose) { kprintf("ACPI timer looks %s min = %d, max = %d, width = %d\n", n ? "GOOD" : "BAD ", min, max, max - min); } return (n); }
static ACPI_STATUS enter_s4_with_bios(void) { ACPI_OBJECT_LIST ArgList; ACPI_OBJECT Arg; u_long ef; UINT32 ret; ACPI_STATUS status; /* run the _PTS and _GTS methods */ ACPI_MEMSET(&ArgList, 0, sizeof(ArgList)); ArgList.Count = 1; ArgList.Pointer = &Arg; ACPI_MEMSET(&Arg, 0, sizeof(Arg)); Arg.Type = ACPI_TYPE_INTEGER; Arg.Integer.Value = ACPI_STATE_S4; AcpiEvaluateObject(NULL, "\\_PTS", &ArgList, NULL); AcpiEvaluateObject(NULL, "\\_GTS", &ArgList, NULL); /* clear wake status */ AcpiSetRegister(ACPI_BITREG_WAKE_STATUS, 1, ACPI_MTX_LOCK); ef = read_eflags(); disable_intr(); AcpiHwDisableNonWakeupGpes(); /* flush caches */ ACPI_FLUSH_CPU_CACHE(); /* * write the value to command port and wait until we enter sleep state */ do { AcpiOsStall(1000000); AcpiOsWritePort(AcpiGbl_FADT->SmiCmd, AcpiGbl_FADT->S4BiosReq, 8); status = AcpiGetRegister(ACPI_BITREG_WAKE_STATUS, &ret, ACPI_MTX_LOCK); if (ACPI_FAILURE(status)) break; } while (!ret); AcpiHwEnableNonWakeupGpes(); write_eflags(ef); return (AE_OK); }
/* * There are i486 based upgrade products for i386 machines. * In this case, BIOS doesn't enables CPU cache. */ void init_i486_on_386(void) { u_long eflags; eflags = read_eflags(); cpu_disable_intr(); load_cr0(rcr0() & ~(CR0_CD | CR0_NW)); /* CD = 0, NW = 0 */ write_eflags(eflags); }
void pm_init(void) { descriptor_t *gdt_p = (descriptor_t *) gdtr.base; ptr_16_32_t idtr; /* * Update addresses in GDT and IDT to their virtual counterparts. */ idtr.limit = sizeof(idt); idtr.base = (uintptr_t) idt; gdtr_load(&gdtr); idtr_load(&idtr); /* * Each CPU has its private GDT and TSS. * All CPUs share one IDT. */ if (config.cpu_active == 1) { idt_init(); /* * NOTE: bootstrap CPU has statically allocated TSS, because * the heap hasn't been initialized so far. */ tss_p = &tss0; } else { tss_p = (tss_t *) malloc(sizeof(tss_t), FRAME_ATOMIC); if (!tss_p) panic("Cannot allocate TSS."); } tss_initialize(tss_p); gdt_p[TSS_DES].access = AR_PRESENT | AR_TSS | DPL_KERNEL; gdt_p[TSS_DES].special = 1; gdt_p[TSS_DES].granularity = 0; gdt_setbase(&gdt_p[TSS_DES], (uintptr_t) tss_p); gdt_setlimit(&gdt_p[TSS_DES], TSS_BASIC_SIZE - 1); /* * As of this moment, the current CPU has its own GDT pointing * to its own TSS. We just need to load the TR register. */ tr_load(GDT_SELECTOR(TSS_DES)); /* Disable I/O on nonprivileged levels and clear NT flag. */ write_eflags(read_eflags() & ~(EFLAGS_IOPL | EFLAGS_NT)); /* Disable alignment check */ write_cr0(read_cr0() & ~CR0_AM); }
static int acpi_timer_test(void) { uint32_t last, this; int min, max, n, delta; register_t s; min = 10000000; max = 0; /* Test the timer with interrupts disabled to get accurate results. */ #if defined(__i386__) s = read_eflags(); #elif defined(__x86_64__) s = read_rflags(); #else #error "no read_eflags" #endif cpu_disable_intr(); last = acpi_timer_read(); for (n = 0; n < 2000; n++) { this = acpi_timer_read(); delta = acpi_TimerDelta(this, last); if (delta > max) max = delta; else if (delta < min) min = delta; last = this; } #if defined(__i386__) write_eflags(s); #elif defined(__x86_64__) write_rflags(s); #else #error "no read_eflags" #endif if (max - min > 2) n = 0; else if (min < 0 || max == 0) n = 0; else n = 1; if (bootverbose) { kprintf("ACPI timer looks %s min = %d, max = %d, width = %d\n", n ? "GOOD" : "BAD ", min, max, max - min); } return (n); }
void set_palette(int start, int end, unsigned char* rgb){ int i, eflags; eflags = read_eflags(); //替代作者的io_load_eflags() io_cli(); outb(0x03c8, start); //替代作者的io_out8() for(i=start; i<=end; i++){ outb(0x03c9,rgb[0]/4); outb(0x03c9,rgb[1]/4); outb(0x03c9,rgb[2]/4); rgb+=3; } write_eflags(eflags); //替代作者的io_store_eflags(eflags) return; }
/* * There are i486 based upgrade products for i386 machines. * In this case, BIOS doesn't enables CPU cache. */ static void init_i486_on_386(void) { u_long eflags; #if defined(PC98) && !defined(CPU_UPGRADE_HW_CACHE) need_post_dma_flush = 1; #endif eflags = read_eflags(); disable_intr(); load_cr0(rcr0() & ~(CR0_CD | CR0_NW)); /* CD = 0, NW = 0 */ write_eflags(eflags); }
/* * Cyrix 486S/DX series */ static void init_cy486dx(void) { u_long eflags; u_char ccr2; eflags = read_eflags(); cpu_disable_intr(); invd(); ccr2 = read_cyrix_reg(CCR2); #ifdef CPU_SUSP_HLT ccr2 |= CCR2_SUSP_HLT; #endif write_cyrix_reg(CCR2, ccr2); write_eflags(eflags); }
void bx_cpu_c::IRET32(BxInstruction_t *i) { Bit32u eip, ecs_raw, eflags; #if BX_DEBUGGER bx_cpu. show_flag |= Flag_iret; bx_cpu. show_eip = bx_cpu. eip; #endif invalidate_prefetch_q(); if (v8086_mode()) { // IOPL check in stack_return_from_v86() stack_return_from_v86(i); goto done; } #if BX_CPU_LEVEL >= 2 if (bx_cpu. cr0.pe) { iret_protected(i); goto done; } #endif BX_ERROR(("IRET32 called when you're not in vm8086 mode or protected mode.")); BX_ERROR(("IRET32 may not be implemented right, since it doesn't check anything.")); BX_PANIC(("Please report that you have found a test case for bx_cpu_c::IRET32.")); pop_32(&eip); pop_32(&ecs_raw); pop_32(&eflags); load_seg_reg(&bx_cpu. sregs[BX_SEG_REG_CS], (Bit16u) ecs_raw); bx_cpu. eip = eip; //FIXME: this should do (eflags & 0x257FD5) | (EFLAGS | 0x1A0000) write_eflags(eflags, /* change IOPL? */ 1, /* change IF? */ 1, 0, 1); done: BX_INSTR_FAR_BRANCH(BX_INSTR_IS_IRET, bx_cpu. sregs[BX_SEG_REG_CS].selector.value, bx_cpu. eip); return; }
/* * Cyrix 486SLC/DLC/SR/DR series */ static void init_486dlc(void) { u_long eflags; u_char ccr0; eflags = read_eflags(); disable_intr(); invd(); ccr0 = read_cyrix_reg(CCR0); #ifndef CYRIX_CACHE_WORKS ccr0 |= CCR0_NC1 | CCR0_BARB; write_cyrix_reg(CCR0, ccr0); invd(); #else ccr0 &= ~CCR0_NC0; #ifndef CYRIX_CACHE_REALLY_WORKS ccr0 |= CCR0_NC1 | CCR0_BARB; #else ccr0 |= CCR0_NC1; #endif #ifdef CPU_DIRECT_MAPPED_CACHE ccr0 |= CCR0_CO; /* Direct mapped mode. */ #endif write_cyrix_reg(CCR0, ccr0); /* Clear non-cacheable region. */ write_cyrix_reg(NCR1+2, NCR_SIZE_0K); write_cyrix_reg(NCR2+2, NCR_SIZE_0K); write_cyrix_reg(NCR3+2, NCR_SIZE_0K); write_cyrix_reg(NCR4+2, NCR_SIZE_0K); write_cyrix_reg(0, 0); /* dummy write */ /* Enable caching in CR0. */ load_cr0(rcr0() & ~(CR0_CD | CR0_NW)); /* CD = 0 and NW = 0 */ invd(); #endif /* !CYRIX_CACHE_WORKS */ write_eflags(eflags); }
/* * Check to see if this CPU supports long mode. */ static int bi_checkcpu(void) { #if 0 char *cpu_vendor; int vendor[3]; int eflags, regs[4]; /* Check for presence of "cpuid". */ eflags = read_eflags(); write_eflags(eflags ^ PSL_ID); if (!((eflags ^ read_eflags()) & PSL_ID)) return (0); /* Fetch the vendor string. */ do_cpuid(0, regs); vendor[0] = regs[1]; vendor[1] = regs[3]; vendor[2] = regs[2]; cpu_vendor = (char *)vendor; /* Check for vendors that support AMD features. */ if (strncmp(cpu_vendor, INTEL_VENDOR_ID, 12) != 0 && strncmp(cpu_vendor, AMD_VENDOR_ID, 12) != 0 && strncmp(cpu_vendor, CENTAUR_VENDOR_ID, 12) != 0) return (0); /* Has to support AMD features. */ do_cpuid(0x80000000, regs); if (!(regs[0] >= 0x80000001)) return (0); /* Check for long mode. */ do_cpuid(0x80000001, regs); return (regs[3] & AMDID_LM); #else return (1); #endif }
//设置调色板, 只用到了16个color,后面的都没有用到。 void set_palette(int start,int end, unsigned char *rgb) { int i,eflag; eflag=read_eflags(); //记录从前的cpsr值 io_cli(); // disable interrupt //为什么写port 0x03c8 //rgb=rgb+; outb(0x03c8,start); for(i=start;i<=end;i++) { outb(0x03c9,*(rgb)); //outb函数是往指定的设备,送数据。 outb(0x03c9,*(rgb+1)); outb(0x03c9,*(rgb+2)); rgb=rgb+3; } write_eflags(eflag); //恢复从前的cpsr return; }
void elansc_setperf(int level) { uint32_t eflags; uint8_t cpuctl, speed; level = (level > 50) ? 100 : 0; cpuctl = bus_space_read_1(elansc->sc_memt, elansc->sc_memh, MMCR_CPUCTL); speed = (level == 100) ? 2 : 1; if ((cpuctl & CPUCTL_CPU_CLK_SPD_MASK) == speed) return; eflags = read_eflags(); disable_intr(); bus_space_write_1(elansc->sc_memt, elansc->sc_memh, MMCR_CPUCTL, (cpuctl & ~CPUCTL_CPU_CLK_SPD_MASK) | speed); enable_intr(); write_eflags(eflags); elansc_update_cpuspeed(); }
int acpi_sleep_machdep(struct acpi_softc *sc, int state) { ACPI_STATUS status; struct pmap *pm; int ret; uint32_t cr3; u_long ef; ret = 0; if (sc->acpi_wakeaddr == 0) return (0); AcpiSetFirmwareWakingVector(sc->acpi_wakephys); ef = read_eflags(); /* * Temporarily switch to the kernel pmap because it provides an * identity mapping (setup at boot) for the low physical memory * region containing the wakeup code. */ pm = kernel_pmap; cr3 = rcr3(); #ifdef PAE load_cr3(vtophys(pm->pm_pdpt)); #else load_cr3(vtophys(pm->pm_pdir)); #endif ret_addr = 0; ACPI_DISABLE_IRQS(); if (acpi_savecpu()) { /* Execute Sleep */ intr_suspend(); p_gdt = (struct region_descriptor *) (sc->acpi_wakeaddr + physical_gdt); p_gdt->rd_limit = saved_gdt.rd_limit; p_gdt->rd_base = vtophys(saved_gdt.rd_base); WAKECODE_FIXUP(physical_esp, uint32_t, vtophys(r_esp)); WAKECODE_FIXUP(previous_cr0, uint32_t, r_cr0); WAKECODE_FIXUP(previous_cr2, uint32_t, r_cr2); WAKECODE_FIXUP(previous_cr3, uint32_t, r_cr3); WAKECODE_FIXUP(previous_cr4, uint32_t, r_cr4); WAKECODE_FIXUP(resume_beep, uint32_t, acpi_resume_beep); WAKECODE_FIXUP(reset_video, uint32_t, acpi_reset_video); WAKECODE_FIXUP(previous_tr, uint16_t, r_tr); WAKECODE_BCOPY(previous_gdt, struct region_descriptor, saved_gdt); WAKECODE_FIXUP(previous_ldt, uint16_t, saved_ldt); WAKECODE_BCOPY(previous_idt, struct region_descriptor, saved_idt); WAKECODE_FIXUP(where_to_recover, void *, acpi_restorecpu); WAKECODE_FIXUP(previous_ds, uint16_t, r_ds); WAKECODE_FIXUP(previous_es, uint16_t, r_es); WAKECODE_FIXUP(previous_fs, uint16_t, r_fs); WAKECODE_FIXUP(previous_gs, uint16_t, r_gs); WAKECODE_FIXUP(previous_ss, uint16_t, r_ss); if (bootverbose) acpi_printcpu(); /* Call ACPICA to enter the desired sleep state */ if (state == ACPI_STATE_S4 && sc->acpi_s4bios) status = AcpiEnterSleepStateS4bios(); else status = AcpiEnterSleepState(state); if (status != AE_OK) { device_printf(sc->acpi_dev, "AcpiEnterSleepState failed - %s\n", AcpiFormatException(status)); ret = -1; goto out; } for (;;) ; } else { /* Execute Wakeup */ intr_resume(); if (bootverbose) { acpi_savecpu(); acpi_printcpu(); } } out: load_cr3(cr3); write_eflags(ef); /* If we beeped, turn it off after a delay. */ if (acpi_resume_beep) timeout(acpi_stop_beep, NULL, 3 * hz); return (ret); }
/* * Calibrate the local apic count-down timer (which is running at * bus-clock speed) vs. the i8254 counter/timer (which is running at * a fixed rate). * * The Intel MP spec says: "An MP operating system may use the IRQ8 * real-time clock as a reference to determine the actual APIC timer clock * speed." * * We're actually using the IRQ0 timer. Hmm. */ void lapic_calibrate_timer(struct cpu_info *ci) { unsigned int startapic, endapic; u_int64_t dtick, dapic, tmp; int i, ef = read_eflags(); if (mp_verbose) printf("%s: calibrating local timer\n", ci->ci_dev.dv_xname); /* * Configure timer to one-shot, interrupt masked, * large positive number. */ i82489_writereg(LAPIC_LVTT, LAPIC_LVTT_M); i82489_writereg(LAPIC_DCR_TIMER, LAPIC_DCRT_DIV1); i82489_writereg(LAPIC_ICR_TIMER, 0x80000000); disable_intr(); /* wait for current cycle to finish */ wait_next_cycle(); startapic = lapic_gettick(); /* wait the next hz cycles */ for (i = 0; i < hz; i++) wait_next_cycle(); endapic = lapic_gettick(); write_eflags(ef); dtick = hz * TIMER_DIV(hz); dapic = startapic-endapic; /* * there are TIMER_FREQ ticks per second. * in dtick ticks, there are dapic bus clocks. */ tmp = (TIMER_FREQ * dapic) / dtick; lapic_per_second = tmp; printf("%s: apic clock running at %lldMHz\n", ci->ci_dev.dv_xname, tmp / (1000 * 1000)); if (lapic_per_second != 0) { /* * reprogram the apic timer to run in periodic mode. * XXX need to program timer on other cpu's, too. */ lapic_tval = (lapic_per_second * 2) / hz; lapic_tval = (lapic_tval / 2) + (lapic_tval & 0x1); i82489_writereg(LAPIC_LVTT, LAPIC_LVTT_TM | LAPIC_LVTT_M | LAPIC_TIMER_VECTOR); i82489_writereg(LAPIC_DCR_TIMER, LAPIC_DCRT_DIV1); i82489_writereg(LAPIC_ICR_TIMER, lapic_tval); /* * Compute fixed-point ratios between cycles and * microseconds to avoid having to do any division * in lapic_delay. */ tmp = (1000000 * (u_int64_t)1 << 32) / lapic_per_second; lapic_frac_usec_per_cycle = tmp; tmp = (lapic_per_second * (u_int64_t)1 << 32) / 1000000; lapic_frac_cycle_per_usec = tmp; /* * Compute delay in cycles for likely short delays in usec. */ for (i = 0; i < 26; i++) lapic_delaytab[i] = (lapic_frac_cycle_per_usec * i) >> 32; /* * Now that the timer's calibrated, use the apic timer routines * for all our timing needs.. */ delay_func = lapic_delay; initclock_func = lapic_initclocks; }
int acpi_sleep_machdep(struct acpi_softc *sc, int state) { ACPI_STATUS status; vm_offset_t oldphys; struct pmap *pm; vm_page_t page; static vm_page_t opage = NULL; int ret = 0; int pteobj_allocated = 0; u_long ef; struct proc *p; if (sc->acpi_wakeaddr == 0) { return (0); } AcpiSetFirmwareWakingVector(sc->acpi_wakephys); ef = read_eflags(); disable_intr(); /* Create Identity Mapping */ if ((p = curproc) == NULL) p = &proc0; pm = vmspace_pmap(p->p_vmspace); if (pm->pm_pteobj == NULL) { pm->pm_pteobj = vm_object_allocate(OBJT_DEFAULT, PTDPTDI + 1); pteobj_allocated = 1; } oldphys = pmap_extract(pm, sc->acpi_wakephys); if (oldphys) { opage = PHYS_TO_VM_PAGE(oldphys); } page = PHYS_TO_VM_PAGE(sc->acpi_wakephys); pmap_enter(pm, sc->acpi_wakephys, page, VM_PROT_READ | VM_PROT_WRITE | VM_PROT_EXECUTE, 1); ret_addr = 0; if (acpi_savecpu()) { /* Execute Sleep */ p_gdt = (struct region_descriptor *)(sc->acpi_wakeaddr + physical_gdt); p_gdt->rd_limit = r_gdt.rd_limit; p_gdt->rd_base = vtophys(r_gdt.rd_base); WAKECODE_FIXUP(physical_esp, u_int32_t, vtophys(r_esp)); WAKECODE_FIXUP(previous_cr0, u_int32_t, r_cr0); WAKECODE_FIXUP(previous_cr2, u_int32_t, r_cr2); WAKECODE_FIXUP(previous_cr3, u_int32_t, r_cr3); WAKECODE_FIXUP(previous_cr4, u_int32_t, r_cr4); WAKECODE_FIXUP(previous_tr, u_int16_t, r_tr); WAKECODE_BCOPY(previous_gdt, struct region_descriptor, r_gdt); WAKECODE_FIXUP(previous_ldt, u_int16_t, r_ldt); WAKECODE_BCOPY(previous_idt, struct region_descriptor, r_idt); WAKECODE_FIXUP(where_to_recover, void, acpi_restorecpu); WAKECODE_FIXUP(previous_ds, u_int16_t, r_ds); WAKECODE_FIXUP(previous_es, u_int16_t, r_es); WAKECODE_FIXUP(previous_fs, u_int16_t, r_fs); WAKECODE_FIXUP(previous_gs, u_int16_t, r_gs); WAKECODE_FIXUP(previous_ss, u_int16_t, r_ss); if (acpi_get_verbose(sc)) { acpi_printcpu(); } wbinvd(); if (state == ACPI_STATE_S4 && sc->acpi_s4bios) { status = AcpiEnterSleepStateS4Bios(); } else { status = AcpiEnterSleepState(state); } if (status != AE_OK) { device_printf(sc->acpi_dev, "AcpiEnterSleepState failed - %s\n", AcpiFormatException(status)); ret = -1; goto out; } for (;;) ; } else { /* Execute Wakeup */ #if 0 initializecpu(); #endif icu_reinit(); if (acpi_get_verbose(sc)) { acpi_savecpu(); acpi_printcpu(); } } out: vm_page_lock_queues(); pmap_remove(pm, sc->acpi_wakephys, sc->acpi_wakephys + PAGE_SIZE); vm_page_unlock_queues(); if (opage) { pmap_enter(pm, sc->acpi_wakephys, page, VM_PROT_READ | VM_PROT_WRITE, 0); } if (pteobj_allocated) { vm_object_deallocate(pm->pm_pteobj); pm->pm_pteobj = NULL; } write_eflags(ef); return (ret); }
/* * Cyrix 5x86 */ static void init_5x86(void) { u_long eflags; u_char ccr2, ccr3, ccr4, pcr0; eflags = read_eflags(); disable_intr(); load_cr0(rcr0() | CR0_CD | CR0_NW); wbinvd(); (void)read_cyrix_reg(CCR3); /* dummy */ /* Initialize CCR2. */ ccr2 = read_cyrix_reg(CCR2); ccr2 |= CCR2_WB; #ifdef CPU_SUSP_HLT ccr2 |= CCR2_SUSP_HLT; #else ccr2 &= ~CCR2_SUSP_HLT; #endif ccr2 |= CCR2_WT1; write_cyrix_reg(CCR2, ccr2); /* Initialize CCR4. */ ccr3 = read_cyrix_reg(CCR3); write_cyrix_reg(CCR3, CCR3_MAPEN0); ccr4 = read_cyrix_reg(CCR4); ccr4 |= CCR4_DTE; ccr4 |= CCR4_MEM; #ifdef CPU_FASTER_5X86_FPU ccr4 |= CCR4_FASTFPE; #else ccr4 &= ~CCR4_FASTFPE; #endif ccr4 &= ~CCR4_IOMASK; /******************************************************************** * WARNING: The "BIOS Writers Guide" mentions that I/O recovery time * should be 0 for errata fix. ********************************************************************/ #ifdef CPU_IORT ccr4 |= CPU_IORT & CCR4_IOMASK; #endif write_cyrix_reg(CCR4, ccr4); /* Initialize PCR0. */ /**************************************************************** * WARNING: RSTK_EN and LOOP_EN could make your system unstable. * BTB_EN might make your system unstable. ****************************************************************/ pcr0 = read_cyrix_reg(PCR0); #ifdef CPU_RSTK_EN pcr0 |= PCR0_RSTK; #else pcr0 &= ~PCR0_RSTK; #endif #ifdef CPU_BTB_EN pcr0 |= PCR0_BTB; #else pcr0 &= ~PCR0_BTB; #endif #ifdef CPU_LOOP_EN pcr0 |= PCR0_LOOP; #else pcr0 &= ~PCR0_LOOP; #endif /**************************************************************** * WARNING: if you use a memory mapped I/O device, don't use * DISABLE_5X86_LSSER option, which may reorder memory mapped * I/O access. * IF YOUR MOTHERBOARD HAS PCI BUS, DON'T DISABLE LSSER. ****************************************************************/ #ifdef CPU_DISABLE_5X86_LSSER pcr0 &= ~PCR0_LSSER; #else pcr0 |= PCR0_LSSER; #endif write_cyrix_reg(PCR0, pcr0); /* Restore CCR3. */ write_cyrix_reg(CCR3, ccr3); (void)read_cyrix_reg(0x80); /* dummy */ /* Unlock NW bit in CR0. */ write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) & ~CCR2_LOCK_NW); load_cr0((rcr0() & ~CR0_CD) | CR0_NW); /* CD = 0, NW = 1 */ /* Lock NW bit in CR0. */ write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) | CCR2_LOCK_NW); write_eflags(eflags); }
/* * Cyrix 6x86 * * XXX - What should I do here? Please let me know. */ static void init_6x86(void) { u_long eflags; u_char ccr3, ccr4; eflags = read_eflags(); disable_intr(); load_cr0(rcr0() | CR0_CD | CR0_NW); wbinvd(); /* Initialize CCR0. */ write_cyrix_reg(CCR0, read_cyrix_reg(CCR0) | CCR0_NC1); /* Initialize CCR1. */ #ifdef CPU_CYRIX_NO_LOCK write_cyrix_reg(CCR1, read_cyrix_reg(CCR1) | CCR1_NO_LOCK); #else write_cyrix_reg(CCR1, read_cyrix_reg(CCR1) & ~CCR1_NO_LOCK); #endif /* Initialize CCR2. */ #ifdef CPU_SUSP_HLT write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) | CCR2_SUSP_HLT); #else write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) & ~CCR2_SUSP_HLT); #endif ccr3 = read_cyrix_reg(CCR3); write_cyrix_reg(CCR3, CCR3_MAPEN0); /* Initialize CCR4. */ ccr4 = read_cyrix_reg(CCR4); ccr4 |= CCR4_DTE; ccr4 &= ~CCR4_IOMASK; #ifdef CPU_IORT write_cyrix_reg(CCR4, ccr4 | (CPU_IORT & CCR4_IOMASK)); #else write_cyrix_reg(CCR4, ccr4 | 7); #endif /* Initialize CCR5. */ #ifdef CPU_WT_ALLOC write_cyrix_reg(CCR5, read_cyrix_reg(CCR5) | CCR5_WT_ALLOC); #endif /* Restore CCR3. */ write_cyrix_reg(CCR3, ccr3); /* Unlock NW bit in CR0. */ write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) & ~CCR2_LOCK_NW); /* * Earlier revision of the 6x86 CPU could crash the system if * L1 cache is in write-back mode. */ if ((cyrix_did & 0xff00) > 0x1600) load_cr0(rcr0() & ~(CR0_CD | CR0_NW)); /* CD = 0 and NW = 0 */ else { /* Revision 2.6 and lower. */ #ifdef CYRIX_CACHE_REALLY_WORKS load_cr0(rcr0() & ~(CR0_CD | CR0_NW)); /* CD = 0 and NW = 0 */ #else load_cr0((rcr0() & ~CR0_CD) | CR0_NW); /* CD = 0 and NW = 1 */ #endif } /* Lock NW bit in CR0. */ write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) | CCR2_LOCK_NW); write_eflags(eflags); }