static int nexus_setup_intr(device_t dev, device_t child, struct resource *res, int flags, driver_filter_t *filt, driver_intr_t *intr, void *arg, void **cookiep) { #ifdef INTRNG return (intr_setup_irq(child, res, filt, intr, arg, flags, cookiep)); #else int irq; register_t s; s = intr_disable(); irq = rman_get_start(res); if (irq >= NUM_MIPS_IRQS) { intr_restore(s); return (0); } cpu_establish_hardintr(device_get_nameunit(child), filt, intr, arg, irq, flags, cookiep); intr_restore(s); return (0); #endif }
/* * Write the User/OEM 64-bit segment of the PR. * XXX should allow writing individual words/bytes */ int cfi_intel_set_oem_pr(struct cfi_softc *sc, uint64_t id) { #ifdef CFI_ARMEDANDDANGEROUS register_t intr; int i, error; #endif if (sc->sc_cmdset != CFI_VEND_INTEL_ECS) return EOPNOTSUPP; KASSERT(sc->sc_width == 2, ("sc_width %d", sc->sc_width)); #ifdef CFI_ARMEDANDDANGEROUS for (i = 7; i >= 4; i--, id >>= 16) { intr = intr_disable(); cfi_write(sc, 0, CFI_INTEL_PP_SETUP); cfi_put16(sc, CFI_INTEL_PR(i), id&0xffff); intr_restore(intr); error = cfi_wait_ready(sc, CFI_BCS_READ_STATUS, sc->sc_write_timeout); if (error) break; } cfi_write(sc, 0, CFI_BCS_READ_ARRAY); return error; #else device_printf(sc->sc_dev, "%s: OEM PR not set, " "CFI_ARMEDANDDANGEROUS not configured\n", __func__); return ENXIO; #endif }
/* * Flush all lines from the level 1 caches. */ void cheetah_cache_flush(void) { u_long addr, lsu; register_t s; s = intr_disable(); for (addr = 0; addr < PCPU_GET(cache.dc_size); addr += PCPU_GET(cache.dc_linesize)) /* * Note that US-IV+ additionally require a membar #Sync before * a load or store to ASI_DCACHE_TAG. */ __asm __volatile( "membar #Sync;" "stxa %%g0, [%0] %1;" "membar #Sync" : : "r" (addr), "n" (ASI_DCACHE_TAG)); /* The I$ must be disabled when flushing it so ensure it's off. */ lsu = ldxa(0, ASI_LSU_CTL_REG); stxa(0, ASI_LSU_CTL_REG, lsu & ~(LSU_IC)); flush(KERNBASE); for (addr = CHEETAH_ICACHE_TAG_LOWER; addr < PCPU_GET(cache.ic_size) * 2; addr += PCPU_GET(cache.ic_linesize) * 2) __asm __volatile( "stxa %%g0, [%0] %1;" "membar #Sync" : : "r" (addr), "n" (ASI_ICACHE_TAG)); stxa(0, ASI_LSU_CTL_REG, lsu); flush(KERNBASE); intr_restore(s); }
/* * Common spl raise routine, takes new ipl to set * returns the old ipl, will not lower ipl. */ int splr(int newpri) { ulong_t flag; cpu_t *cpu; int curpri, basepri; flag = intr_clear(); cpu = CPU; /* ints are disabled, now safe to cache cpu ptr */ curpri = cpu->cpu_m.mcpu_pri; /* * Only do something if new priority is larger */ if (newpri > curpri) { basepri = cpu->cpu_base_spl; if (newpri < basepri) newpri = basepri; cpu->cpu_m.mcpu_pri = newpri; (*setspl)(newpri); /* * See if new priority level allows pending softint delivery */ if (IS_FAKE_SOFTINT(flag, newpri)) fakesoftint(); } intr_restore(flag); return (curpri); }
/* * do_splx routine, takes new ipl to set * returns the old ipl. * We are careful not to set priority lower than CPU->cpu_base_pri, * even though it seems we're raising the priority, it could be set * higher at any time by an interrupt routine, so we must block interrupts * and look at CPU->cpu_base_pri */ int do_splx(int newpri) { ulong_t flag; cpu_t *cpu; int curpri, basepri; flag = intr_clear(); cpu = CPU; /* ints are disabled, now safe to cache cpu ptr */ curpri = cpu->cpu_m.mcpu_pri; basepri = cpu->cpu_base_spl; if (newpri < basepri) newpri = basepri; cpu->cpu_m.mcpu_pri = newpri; (*setspl)(newpri); /* * If we are going to reenable interrupts see if new priority level * allows pending softint delivery. */ if (IS_FAKE_SOFTINT(flag, newpri)) fakesoftint(); ASSERT(!interrupts_enabled()); intr_restore(flag); return (curpri); }
void tlb_range_demap(struct pmap *pm, vm_offset_t start, vm_offset_t end) { vm_offset_t va; void *cookie; u_long flags; register_t s; PMAP_STATS_INC(tlb_nrange_demap); cookie = ipi_tlb_range_demap(pm, start, end); s = intr_disable(); if (CPU_ISSET(PCPU_GET(cpuid), &pm->pm_active)) { KASSERT(pm->pm_context[curcpu] != -1, ("tlb_range_demap: inactive pmap?")); if (pm == kernel_pmap) flags = TLB_DEMAP_NUCLEUS | TLB_DEMAP_PAGE; else flags = TLB_DEMAP_PRIMARY | TLB_DEMAP_PAGE; for (va = start; va < end; va += PAGE_SIZE) { stxa(TLB_DEMAP_VA(va) | flags, ASI_DMMU_DEMAP, 0); stxa(TLB_DEMAP_VA(va) | flags, ASI_IMMU_DEMAP, 0); flush(KERNBASE); } } intr_restore(s); ipi_wait(cookie); }
/* * Write the Protection Lock Register to lock down the * user-settable segment of the Protection Register. * NOTE: this operation is not reversible. */ int cfi_intel_set_plr(struct cfi_softc *sc) { #ifdef CFI_ARMEDANDDANGEROUS register_t intr; int error; #endif if (sc->sc_cmdset != CFI_VEND_INTEL_ECS) return EOPNOTSUPP; KASSERT(sc->sc_width == 2, ("sc_width %d", sc->sc_width)); #ifdef CFI_ARMEDANDDANGEROUS /* worthy of console msg */ device_printf(sc->sc_dev, "set PLR\n"); intr = intr_disable(); cfi_write(sc, 0, CFI_INTEL_PP_SETUP); cfi_put16(sc, CFI_INTEL_PLR, 0xFFFD); intr_restore(intr); error = cfi_wait_ready(sc, CFI_BCS_READ_STATUS, sc->sc_write_timeout); cfi_write(sc, 0, CFI_BCS_READ_ARRAY); return error; #else device_printf(sc->sc_dev, "%s: PLR not set, " "CFI_ARMEDANDDANGEROUS not configured\n", __func__); return ENXIO; #endif }
void tlb_page_demap(struct pmap *pm, vm_offset_t va) { u_long flags; void *cookie; u_long s; critical_enter(); cookie = ipi_tlb_page_demap(pm, va); if (pm->pm_active & PCPU_GET(cpumask)) { KASSERT(pm->pm_context[PCPU_GET(cpuid)] != -1, ("tlb_page_demap: inactive pmap?")); if (pm == kernel_pmap) flags = TLB_DEMAP_NUCLEUS | TLB_DEMAP_PAGE; else flags = TLB_DEMAP_PRIMARY | TLB_DEMAP_PAGE; s = intr_disable(); stxa(TLB_DEMAP_VA(va) | flags, ASI_DMMU_DEMAP, 0); stxa(TLB_DEMAP_VA(va) | flags, ASI_IMMU_DEMAP, 0); membar(Sync); intr_restore(s); } ipi_wait(cookie); critical_exit(); }
/* * Initialize the floating point unit. */ void fpuinit(void) { register_t saveintr; u_int mxcsr; u_short control; if (IS_BSP()) fpuinit_bsp1(); if (use_xsave) { load_cr4(rcr4() | CR4_XSAVE); xsetbv(XCR0, xsave_mask); } /* * XCR0 shall be set up before CPU can report the save area size. */ if (IS_BSP()) fpuinit_bsp2(); /* * It is too early for critical_enter() to work on AP. */ saveintr = intr_disable(); stop_emulating(); fninit(); control = __INITIAL_FPUCW__; fldcw(control); mxcsr = __INITIAL_MXCSR__; ldmxcsr(mxcsr); start_emulating(); intr_restore(saveintr); }
void tlb_context_demap(struct pmap *pm) { void *cookie; register_t s; /* * It is important that we are not interrupted or preempted while * doing the IPIs. The interrupted CPU may hold locks, and since * it will wait for the CPU that sent the IPI, this can lead * to a deadlock when an interrupt comes in on that CPU and it's * handler tries to grab one of that locks. This will only happen for * spin locks, but these IPI types are delivered even if normal * interrupts are disabled, so the lock critical section will not * protect the target processor from entering the IPI handler with * the lock held. */ PMAP_STATS_INC(tlb_ncontext_demap); cookie = ipi_tlb_context_demap(pm); if (pm->pm_active & PCPU_GET(cpumask)) { KASSERT(pm->pm_context[curcpu] != -1, ("tlb_context_demap: inactive pmap?")); s = intr_disable(); stxa(TLB_DEMAP_PRIMARY | TLB_DEMAP_CONTEXT, ASI_DMMU_DEMAP, 0); stxa(TLB_DEMAP_PRIMARY | TLB_DEMAP_CONTEXT, ASI_IMMU_DEMAP, 0); flush(KERNBASE); intr_restore(s); } ipi_wait(cookie); }
void tlb_page_demap(struct pmap *pm, vm_offset_t va) { u_long flags; void *cookie; register_t s; PMAP_STATS_INC(tlb_npage_demap); cookie = ipi_tlb_page_demap(pm, va); if (pm->pm_active & PCPU_GET(cpumask)) { KASSERT(pm->pm_context[curcpu] != -1, ("tlb_page_demap: inactive pmap?")); if (pm == kernel_pmap) flags = TLB_DEMAP_NUCLEUS | TLB_DEMAP_PAGE; else flags = TLB_DEMAP_PRIMARY | TLB_DEMAP_PAGE; s = intr_disable(); stxa(TLB_DEMAP_VA(va) | flags, ASI_DMMU_DEMAP, 0); stxa(TLB_DEMAP_VA(va) | flags, ASI_IMMU_DEMAP, 0); flush(KERNBASE); intr_restore(s); } ipi_wait(cookie); }
static int acpi_timer_test() { uint32_t last, this; int min, max, n, delta; register_t s; min = 10000000; max = 0; /* Test the timer with interrupts disabled to get accurate results. */ s = intr_disable(); last = acpi_timer_read(); for (n = 0; n < N; n++) { this = acpi_timer_read(); delta = acpi_TimerDelta(this, last); if (delta > max) max = delta; else if (delta < min) min = delta; last = this; } intr_restore(s); if (max - min > 2) n = 0; else if (min < 0 || max == 0) n = 0; else n = 1; if (bootverbose) printf(" %d/%d", n, max-min); return (n); }
/* * IBM Blue Lightning */ static void init_bluelightning(void) { register_t saveintr; #if defined(PC98) && !defined(CPU_UPGRADE_HW_CACHE) need_post_dma_flush = 1; #endif saveintr = intr_disable(); load_cr0(rcr0() | CR0_CD | CR0_NW); invd(); #ifdef CPU_BLUELIGHTNING_FPU_OP_CACHE wrmsr(0x1000, 0x9c92LL); /* FP operand can be cacheable on Cyrix FPU */ #else wrmsr(0x1000, 0x1c92LL); /* Intel FPU */ #endif /* Enables 13MB and 0-640KB cache. */ wrmsr(0x1001, (0xd0LL << 32) | 0x3ff); #ifdef CPU_BLUELIGHTNING_3X wrmsr(0x1002, 0x04000000LL); /* Enables triple-clock mode. */ #else wrmsr(0x1002, 0x03000000LL); /* Enables double-clock mode. */ #endif /* Enable caching in CR0. */ load_cr0(rcr0() & ~(CR0_CD | CR0_NW)); /* CD = 0 and NW = 0 */ invd(); intr_restore(saveintr); }
/* * This function will reprogram the timer. * * When in oneshot mode the argument is the absolute time in future at which to * generate the interrupt. * * When in periodic mode, the argument is the interval at which the * interrupts should be generated. There is no need to support the periodic * mode timer change at this time. * * Note that we must be careful to convert from hrtime to Xen system time (see * xpv_timestamp.c). */ static void xen_uppc_timer_reprogram(hrtime_t timer_req) { hrtime_t now, timer_new, time_delta, xen_time; ulong_t flags; flags = intr_clear(); /* * We should be called from high PIL context (CBE_HIGH_PIL), * so kpreempt is disabled. */ now = xpv_gethrtime(); xen_time = xpv_getsystime(); if (timer_req <= now) { /* * requested to generate an interrupt in the past * generate an interrupt as soon as possible */ time_delta = XEN_NSEC_PER_TICK; } else time_delta = timer_req - now; timer_new = xen_time + time_delta; if (HYPERVISOR_set_timer_op(timer_new) != 0) panic("can't set hypervisor timer?"); intr_restore(flags); }
/* * Initialize BBL_CR_CTL3 (Control register 3: used to configure the * L2 cache). */ static void init_mendocino(void) { #ifdef CPU_PPRO2CELERON register_t saveintr; u_int64_t bbl_cr_ctl3; saveintr = intr_disable(); load_cr0(rcr0() | CR0_CD | CR0_NW); wbinvd(); bbl_cr_ctl3 = rdmsr(MSR_BBL_CR_CTL3); /* If the L2 cache is configured, do nothing. */ if (!(bbl_cr_ctl3 & 1)) { bbl_cr_ctl3 = 0x134052bLL; /* Set L2 Cache Latency (Default: 5). */ #ifdef CPU_CELERON_L2_LATENCY #if CPU_L2_LATENCY > 15 #error invalid CPU_L2_LATENCY. #endif bbl_cr_ctl3 |= CPU_L2_LATENCY << 1; #else bbl_cr_ctl3 |= 5 << 1; #endif wrmsr(MSR_BBL_CR_CTL3, bbl_cr_ctl3); } load_cr0(rcr0() & ~(CR0_CD | CR0_NW)); intr_restore(saveintr); #endif /* CPU_PPRO2CELERON */ }
/* * Cyrix 486S/DX series */ static void init_cy486dx(void) { register_t saveintr; u_char ccr2; saveintr = intr_disable(); invd(); ccr2 = read_cyrix_reg(CCR2); #ifdef CPU_SUSP_HLT ccr2 |= CCR2_SUSP_HLT; #endif #ifdef PC98 /* Enables WB cache interface pin and Lock NW bit in CR0. */ ccr2 |= CCR2_WB | CCR2_LOCK_NW; /* Unlock NW bit in CR0. */ write_cyrix_reg(CCR2, ccr2 & ~CCR2_LOCK_NW); load_cr0((rcr0() & ~CR0_CD) | CR0_NW); /* CD = 0, NW = 1 */ #endif write_cyrix_reg(CCR2, ccr2); intr_restore(saveintr); }
/* * Send an interprocessor interrupt - sun4u. */ void sparc64_send_ipi_sun4u(int upaid, ipifunc_t func, uint64_t arg1, uint64_t arg2) { int i, ik, shift = 0; uint64_t intr_func; KASSERT(upaid != curcpu()->ci_cpuid); /* * UltraSPARC-IIIi CPUs select the BUSY/NACK pair based on the * lower two bits of the ITID. */ if (CPU_IS_USIIIi()) shift = (upaid & 0x3) * 2; if (ldxa(0, ASI_IDSR) & (IDSR_BUSY << shift)) panic("recursive IPI?"); intr_func = (uint64_t)(u_long)func; /* Schedule an interrupt. */ for (i = 0; i < 10000; i++) { int s = intr_disable(); stxa(IDDR_0H, ASI_INTERRUPT_DISPATCH, intr_func); stxa(IDDR_1H, ASI_INTERRUPT_DISPATCH, arg1); stxa(IDDR_2H, ASI_INTERRUPT_DISPATCH, arg2); stxa(IDCR(upaid), ASI_INTERRUPT_DISPATCH, 0); membar_Sync(); /* Workaround for SpitFire erratum #54, from FreeBSD */ if (CPU_IS_SPITFIRE()) { (void)ldxa(P_DCR_0, ASI_INTERRUPT_RECEIVE_DATA); membar_Sync(); } for (ik = 0; ik < 1000000; ik++) { if (ldxa(0, ASI_IDSR) & (IDSR_BUSY << shift)) continue; else break; } intr_restore(s); if (ik == 1000000) break; if ((ldxa(0, ASI_IDSR) & (IDSR_NACK << shift)) == 0) return; /* * Wait for a while with enabling interrupts to avoid * deadlocks. XXX - random value is better. */ DELAY(1); } if (panicstr == NULL) panic("cpu%d: ipi_send: couldn't send ipi to UPAID %u" " (tried %d times)", cpu_number(), upaid, i); }
static void ap_start(phandle_t node, u_int mid, u_int cpu_impl) { volatile struct cpu_start_args *csa; struct pcpu *pc; register_t s; vm_offset_t va; u_int cpuid; uint32_t clock; if (cpuids > mp_maxid) return; if (OF_getprop(node, "clock-frequency", &clock, sizeof(clock)) <= 0) panic("%s: couldn't determine CPU frequency", __func__); if (clock != PCPU_GET(clock)) tick_et_use_stick = 1; csa = &cpu_start_args; csa->csa_state = 0; sun4u_startcpu(node, (void *)mp_tramp, 0); s = intr_disable(); while (csa->csa_state != CPU_TICKSYNC) ; membar(StoreLoad); csa->csa_tick = rd(tick); if (cpu_impl == CPU_IMPL_SPARC64V || cpu_impl >= CPU_IMPL_ULTRASPARCIII) { while (csa->csa_state != CPU_STICKSYNC) ; membar(StoreLoad); csa->csa_stick = rdstick(); } while (csa->csa_state != CPU_INIT) ; csa->csa_tick = csa->csa_stick = 0; intr_restore(s); cpuid = cpuids++; cpuid_to_mid[cpuid] = mid; cpu_identify(csa->csa_ver, clock, cpuid); va = kmem_malloc(kernel_arena, PCPU_PAGES * PAGE_SIZE, M_WAITOK | M_ZERO); pc = (struct pcpu *)(va + (PCPU_PAGES * PAGE_SIZE)) - 1; pcpu_init(pc, cpuid, sizeof(*pc)); dpcpu_init((void *)kmem_malloc(kernel_arena, DPCPU_SIZE, M_WAITOK | M_ZERO), cpuid); pc->pc_addr = va; pc->pc_clock = clock; pc->pc_impl = cpu_impl; pc->pc_mid = mid; pc->pc_node = node; cache_init(pc); CPU_SET(cpuid, &all_cpus); intr_add_cpu(cpuid); }
// default printf is the same as printf_i, for now int printf (const char *format, ...) { int *varg = (int *) (char *) (&format); int level = intr_disable(); int ret = print (0, varg); intr_restore(level); return ret; }
void lapic_setup(int boot) { struct lapic *la; u_int32_t maxlvt; register_t saveintr; char buf[MAXCOMLEN + 1]; la = &lapics[lapic_id()]; KASSERT(la->la_present, ("missing APIC structure")); saveintr = intr_disable(); maxlvt = (lapic->version & APIC_VER_MAXLVT) >> MAXLVTSHIFT; /* Initialize the TPR to allow all interrupts. */ lapic_set_tpr(0); /* Setup spurious vector and enable the local APIC. */ lapic_enable(); /* Program LINT[01] LVT entries. */ lapic->lvt_lint0 = lvt_mode(la, LVT_LINT0, lapic->lvt_lint0); lapic->lvt_lint1 = lvt_mode(la, LVT_LINT1, lapic->lvt_lint1); /* Program the PMC LVT entry if present. */ if (maxlvt >= LVT_PMC) lapic->lvt_pcint = lvt_mode(la, LVT_PMC, lapic->lvt_pcint); /* Program timer LVT and setup handler. */ la->lvt_timer_cache = lapic->lvt_timer = lvt_mode(la, LVT_TIMER, lapic->lvt_timer); if (boot) { snprintf(buf, sizeof(buf), "cpu%d:timer", PCPU_GET(cpuid)); intrcnt_add(buf, &la->la_timer_count); } /* Setup the timer if configured. */ if (la->la_timer_mode != 0) { KASSERT(la->la_timer_period != 0, ("lapic%u: zero divisor", lapic_id())); lapic_timer_set_divisor(lapic_timer_divisor); if (la->la_timer_mode == 1) lapic_timer_periodic(la, la->la_timer_period, 1); else lapic_timer_oneshot(la, la->la_timer_period, 1); } /* Program error LVT and clear any existing errors. */ lapic->lvt_error = lvt_mode(la, LVT_ERROR, lapic->lvt_error); lapic->esr = 0; /* XXX: Thermal LVT */ /* Program the CMCI LVT entry if present. */ if (maxlvt >= LVT_CMCI) lapic->lvt_cmci = lvt_mode(la, LVT_CMCI, lapic->lvt_cmci); intr_restore(saveintr); }
/* * Cyrix 6x86MX (code-named M2) * * XXX - What should I do here? Please let me know. */ static void init_6x86MX(void) { register_t saveintr; u_char ccr3, ccr4; saveintr = intr_disable(); load_cr0(rcr0() | CR0_CD | CR0_NW); wbinvd(); /* Initialize CCR0. */ write_cyrix_reg(CCR0, read_cyrix_reg(CCR0) | CCR0_NC1); /* Initialize CCR1. */ #ifdef CPU_CYRIX_NO_LOCK write_cyrix_reg(CCR1, read_cyrix_reg(CCR1) | CCR1_NO_LOCK); #else write_cyrix_reg(CCR1, read_cyrix_reg(CCR1) & ~CCR1_NO_LOCK); #endif /* Initialize CCR2. */ #ifdef CPU_SUSP_HLT write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) | CCR2_SUSP_HLT); #else write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) & ~CCR2_SUSP_HLT); #endif ccr3 = read_cyrix_reg(CCR3); write_cyrix_reg(CCR3, CCR3_MAPEN0); /* Initialize CCR4. */ ccr4 = read_cyrix_reg(CCR4); ccr4 &= ~CCR4_IOMASK; #ifdef CPU_IORT write_cyrix_reg(CCR4, ccr4 | (CPU_IORT & CCR4_IOMASK)); #else write_cyrix_reg(CCR4, ccr4 | 7); #endif /* Initialize CCR5. */ #ifdef CPU_WT_ALLOC write_cyrix_reg(CCR5, read_cyrix_reg(CCR5) | CCR5_WT_ALLOC); #endif /* Restore CCR3. */ write_cyrix_reg(CCR3, ccr3); /* Unlock NW bit in CR0. */ write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) & ~CCR2_LOCK_NW); load_cr0(rcr0() & ~(CR0_CD | CR0_NW)); /* CD = 0 and NW = 0 */ /* Lock NW bit in CR0. */ write_cyrix_reg(CCR2, read_cyrix_reg(CCR2) | CCR2_LOCK_NW); intr_restore(saveintr); }
static void elan_poll_pps(struct timecounter *tc) { static int state; int i; uint16_t u, x, y, z; register_t saveintr; /* * Grab the HW state as quickly and compactly as we can. Disable * interrupts to avoid measuring our interrupt service time on * hw with quality clock sources. */ saveintr = intr_disable(); x = *pps_ap[0]; /* state, must be first, see below */ y = *pps_ap[1]; /* timer2 */ z = *pps_ap[2]; /* timer1 */ intr_restore(saveintr); /* * Order is important here. We need to check the state of the GPIO * pin first, in order to avoid reading timer 1 right before the * state change. Technically pps_a may be zero in which case we * harmlessly read the REVID register and the contents of pps_d is * of no concern. */ i = x & pps_d; /* If state did not change or we don't have a GPIO pin, return */ if (i == state || pps_a == 0) return; state = i; /* If the state is "low", flip the echo GPIO and return. */ if (!i) { if (echo_a) mmcrptr[(echo_a ^ 0xc) / 2] = echo_d; return; } /* * Subtract timer1 from timer2 to compensate for time from the * edge until we read the counters. */ u = y - z; pps_capture(&elan_pps); elan_pps.capcount = u; pps_event(&elan_pps, PPS_CAPTUREASSERT); /* Twiddle echo bit */ if (echo_a) mmcrptr[echo_a / 2] = echo_d; }
static void native_lapic_xapic_mode(void) { register_t saveintr; saveintr = intr_disable(); if (x2apic_mode) native_lapic_enable_x2apic(); intr_restore(saveintr); }
/* * Clear MPERF/APERF MSR */ static void reset_turbo_info(void) { ulong_t iflag; iflag = intr_clear(); wrmsr(IA32_MPERF_MSR, 0); wrmsr(IA32_APERF_MSR, 0); intr_restore(iflag); }
static int atkbdc_setup(atkbdc_softc_t *sc, bus_space_tag_t tag, bus_space_handle_t h0, bus_space_handle_t h1) { #if defined(__amd64__) u_int64_t tscval[3], read_delay; register_t flags; #endif if (sc->ioh0 == 0) { /* XXX */ sc->command_byte = -1; sc->command_mask = 0; sc->lock = FALSE; sc->kbd.head = sc->kbd.tail = 0; sc->aux.head = sc->aux.tail = 0; #if KBDIO_DEBUG >= 2 sc->kbd.call_count = 0; sc->kbd.qcount = sc->kbd.max_qcount = 0; sc->aux.call_count = 0; sc->aux.qcount = sc->aux.max_qcount = 0; #endif } sc->iot = tag; sc->ioh0 = h0; sc->ioh1 = h1; #if defined(__amd64__) /* * On certain chipsets AT keyboard controller isn't present and is * emulated by BIOS using SMI interrupt. On those chipsets reading * from the status port may be thousand times slower than usually. * Sometimes this emilation is not working properly resulting in * commands timing our and since we assume that inb() operation * takes very little time to complete we need to adjust number of * retries to keep waiting time within a designed limits (100ms). * Measure time it takes to make read_status() call and adjust * number of retries accordingly. */ flags = intr_disable(); tscval[0] = rdtsc(); read_status(sc); tscval[1] = rdtsc(); DELAY(1000); tscval[2] = rdtsc(); intr_restore(flags); read_delay = tscval[1] - tscval[0]; read_delay /= (tscval[2] - tscval[1]) / 1000; sc->retry = 100000 / ((KBDD_DELAYTIME * 2) + read_delay); #else sc->retry = 5000; #endif sc->quirks = atkbdc_getquirks(); return 0; }
/* * ENERGY_PERF_BIAS setting, * A hint to HW, based on user power-policy */ static void cpupm_iepb_set_policy(uint64_t iepb_policy) { ulong_t iflag; uint64_t epb_value; epb_value = iepb_policy & EPB_MSR_MASK; iflag = intr_clear(); wrmsr(IA32_ENERGY_PERF_BIAS_MSR, epb_value); intr_restore(iflag); }
void spinlock_exit(void) { struct thread *td; register_t sstatus_ie; td = curthread; critical_exit(); sstatus_ie = td->td_md.md_saved_sstatus_ie; td->td_md.md_spinlock_count--; if (td->td_md.md_spinlock_count == 0) intr_restore(sstatus_ie); }
void spinlock_exit(void) { struct thread *td; register_t daif; td = curthread; critical_exit(); daif = td->td_md.md_saved_daif; td->td_md.md_spinlock_count--; if (td->td_md.md_spinlock_count == 0) intr_restore(daif); }
void spinlock_exit(void) { struct thread *td; register_t intr; td = curthread; critical_exit(); intr = td->td_md.md_saved_intr; td->td_md.md_spinlock_count--; if (td->td_md.md_spinlock_count == 0) intr_restore(intr); }
/* * Get count of MPERF/APERF MSR */ static void get_turbo_info(cpupm_mach_turbo_info_t *turbo_info) { ulong_t iflag; uint64_t mcnt, acnt; iflag = intr_clear(); mcnt = rdmsr(IA32_MPERF_MSR); acnt = rdmsr(IA32_APERF_MSR); turbo_info->t_mcnt += mcnt; turbo_info->t_acnt += acnt; intr_restore(iflag); }