static void fbt_resume(void *arg, dtrace_id_t id, void *parg) { fbt_probe_t *fbt = parg; #if 0 dtrace_modctl_t *ctl = fbt->fbtp_ctl; #endif u_long psl; u_long cr0; #if 0 /* XXX TBD */ ASSERT(ctl->nenabled > 0); if ((ctl->loadcnt != fbt->fbtp_loadcnt)) return; #endif /* Disable interrupts. */ psl = x86_read_psl(); x86_disable_intr(); /* Disable write protection in supervisor mode. */ cr0 = rcr0(); lcr0(cr0 & ~CR0_WP); for (; fbt != NULL; fbt = fbt->fbtp_next) *fbt->fbtp_patchpoint = fbt->fbtp_patchval; /* Write back and invalidate cache, flush pipelines. */ wbinvd(); x86_flush(); x86_write_psl(psl); /* Re-enable write protection. */ lcr0(cr0); }
static u_int tmx86_set_longrun_mode(u_int mode) { u_long eflags; union msrinfo msrinfo; if (mode >= LONGRUN_MODE_UNKNOWN) return 0; eflags = x86_read_psl(); x86_disable_intr(); /* Write LongRun mode values to Model Specific Register. */ msrinfo.msr = rdmsr(MSR_TMx86_LONGRUN); msrinfo.regs[0] = LONGRUN_MODE_WRITE(msrinfo.regs[0], longrun_modes[mode][0]); msrinfo.regs[1] = LONGRUN_MODE_WRITE(msrinfo.regs[1], longrun_modes[mode][1]); wrmsr(MSR_TMx86_LONGRUN, msrinfo.msr); /* Write LongRun mode flags to Model Specific Register. */ msrinfo.msr = rdmsr(MSR_TMx86_LONGRUN_FLAGS); msrinfo.regs[0] = (msrinfo.regs[0] & ~0x01) | longrun_modes[mode][2]; wrmsr(MSR_TMx86_LONGRUN_FLAGS, msrinfo.msr); x86_write_psl(eflags); return 1; }
u_int tmx86_get_longrun_mode(void) { u_long eflags; union msrinfo msrinfo; u_int low, high, flags, mode; eflags = x86_read_psl(); x86_disable_intr(); msrinfo.msr = rdmsr(MSR_TMx86_LONGRUN); low = LONGRUN_MODE_MASK(msrinfo.regs[0]); high = LONGRUN_MODE_MASK(msrinfo.regs[1]); flags = rdmsr(MSR_TMx86_LONGRUN_FLAGS) & 0x01; for (mode = 0; mode < LONGRUN_MODE_MAX; mode++) { if (low == longrun_modes[mode][0] && high == longrun_modes[mode][1] && flags == longrun_modes[mode][2]) { goto out; } } mode = LONGRUN_MODE_UNKNOWN; out: x86_write_psl(eflags); return mode; }
static int fbt_enable(void *arg, dtrace_id_t id, void *parg) { fbt_probe_t *fbt = parg; #if 0 dtrace_modctl_t *ctl = fbt->fbtp_ctl; #endif u_long psl; u_long cr0; #if 0 /* XXX TBD */ ctl->nenabled++; /* * Now check that our modctl has the expected load count. If it * doesn't, this module must have been unloaded and reloaded -- and * we're not going to touch it. */ if (ctl->loadcnt != fbt->fbtp_loadcnt) { if (fbt_verbose) { printf("fbt is failing for probe %s " "(module %s reloaded)", fbt->fbtp_name, ctl->filename); } return; } #endif /* Disable interrupts. */ psl = x86_read_psl(); x86_disable_intr(); /* Disable write protection in supervisor mode. */ cr0 = rcr0(); lcr0(cr0 & ~CR0_WP); for (; fbt != NULL; fbt = fbt->fbtp_next) { *fbt->fbtp_patchpoint = fbt->fbtp_patchval; } /* Write back and invalidate cache, flush pipelines. */ wbinvd(); x86_flush(); x86_write_psl(psl); /* Re-enable write protection. */ lcr0(cr0); return 0; }
void tmx86_get_longrun_status(u_int *frequency, u_int *voltage, u_int *percentage) { u_long eflags; u_int descs[4]; eflags = x86_read_psl(); x86_disable_intr(); x86_cpuid(0x80860007, descs); *frequency = descs[0]; *voltage = descs[1]; *percentage = descs[2]; x86_write_psl(eflags); }
/* * Restore a value to cpl (unmasking interrupts). If any unmasked * interrupts are pending, call Xspllower() to process them. */ void spllower(int nlevel) { struct cpu_info *ci = curcpu(); uint32_t imask; u_long psl; if (ci->ci_ilevel <= nlevel) return; __insn_barrier(); imask = IUNMASK(ci, nlevel); psl = x86_read_psl(); x86_disable_intr(); if (ci->ci_ipending & imask) { KASSERT(psl == 0); Xspllower(nlevel); /* Xspllower does enable_intr() */ } else { ci->ci_ilevel = nlevel; x86_write_psl(psl); } }