Exemplo n.º 1
0
static int
ucp_start_pmc(int cpu, int ri)
{
	struct pmc *pm;
	uint32_t evsel;
	struct uncore_cpu *cc;

	KASSERT(cpu >= 0 && cpu < pmc_cpu_max(),
	    ("[uncore,%d] illegal CPU value %d", __LINE__, cpu));
	KASSERT(ri >= 0 && ri < uncore_ucp_npmc,
	    ("[uncore,%d] illegal row-index %d", __LINE__, ri));

	cc = uncore_pcpu[cpu];
	pm = cc->pc_uncorepmcs[ri].phw_pmc;

	KASSERT(pm,
	    ("[uncore,%d] starting cpu%d,ri%d with no pmc configured",
		__LINE__, cpu, ri));

	PMCDBG(MDP,STA,1, "ucp-start cpu=%d ri=%d", cpu, ri);

	evsel = pm->pm_md.pm_ucp.pm_ucp_evsel;

	PMCDBG(MDP,STA,2, "ucp-start/2 cpu=%d ri=%d evselmsr=0x%x evsel=0x%x",
	    cpu, ri, UCP_EVSEL0 + ri, evsel);

	wrmsr(UCP_EVSEL0 + ri, evsel);

	do {
		cc->pc_resync = 0;
		cc->pc_globalctrl |= (1ULL << ri);
		wrmsr(UC_GLOBAL_CTRL, cc->pc_globalctrl);
	} while (cc->pc_resync != 0);

	return (0);
}
Exemplo n.º 2
0
static void enable_pm_timer(void)
{
	/* ACPI PM timer emulation */
	msr_t msr;
	/*
	 * The derived frequency is calculated as follows:
	 *    (CTC_FREQ * msr[63:32]) >> 32 = target frequency.
	 * Back solve the multiplier so the 3.579545MHz ACPI timer
	 * frequency is used.
	 */
	msr.hi = (3579545ULL << 32) / CTC_FREQ;
	/* Set PM1 timer IO port and enable*/
	msr.lo = EMULATE_PM_TMR_EN | (ACPI_PMIO_BASE + R_ACPI_PM1_TMR);
	wrmsr(MSR_EMULATE_PM_TMR, msr);
}
Exemplo n.º 3
0
void disable_lapic_nmi_watchdog(void)
{
    if (nmi_active <= 0)
        return;
    switch (boot_cpu_data.x86_vendor) {
    case X86_VENDOR_AMD:
        wrmsr(MSR_K7_EVNTSEL0, 0, 0);
        break;
    case X86_VENDOR_INTEL:
        switch (boot_cpu_data.x86) {
        case 6:
            wrmsr(MSR_P6_EVNTSEL0, 0, 0);
            break;
        case 15:
            wrmsr(MSR_P4_IQ_CCCR0, 0, 0);
            wrmsr(MSR_P4_CRU_ESCR0, 0, 0);
            break;
        }
        break;
    }
    nmi_active = -1;
    /* tell do_nmi() and others that we're not active any more */
    nmi_watchdog = NMI_NONE;
}
Exemplo n.º 4
0
static void configure_mca(void)
{
	msr_t msr;
	const unsigned int mcg_cap_msr = 0x179;
	int i;
	int num_banks;

	msr = rdmsr(mcg_cap_msr);
	num_banks = msr.lo & 0xff;

	/* TODO(adurbin): This should only be done on a cold boot. Also, some
	 * of these banks are core vs package scope. For now every CPU clears
	 * every bank. */
	msr.lo = msr.hi = 0;
	for (i = 0; i < num_banks; i++) {
		wrmsr(MSR_IA32_MC0_STATUS + (i * 4) + 1, msr);
		wrmsr(MSR_IA32_MC0_STATUS + (i * 4) + 2, msr);
		wrmsr(MSR_IA32_MC0_STATUS + (i * 4) + 3, msr);
	}

	msr.lo = msr.hi = 0xffffffff;
	for (i = 0; i < num_banks; i++)
		wrmsr(MSR_IA32_MC0_STATUS + (i * 4), msr);
}
Exemplo n.º 5
0
static void
tprof_amdpmi_start_cpu(void *arg1, void *arg2)
{
	struct cpu_info * const ci = curcpu();
	uint64_t pesr;
	uint64_t event_lo;
	uint64_t event_hi;

	event_hi = event >> 8;
	event_lo = event & 0xff;
	pesr = PESR_USR | PESR_OS | PESR_INT |
	    __SHIFTIN(event_lo, PESR_EVENT_MASK_LO) |
	    __SHIFTIN(event_hi, PESR_EVENT_MASK_HI) |
	    __SHIFTIN(0, PESR_COUNTER_MASK) |
	    __SHIFTIN(unit, PESR_UNIT_MASK);

	wrmsr(PERFCTR(ctrno), counter_reset_val);
	wrmsr(PERFEVTSEL(ctrno), pesr);

	tprof_amdpmi_lapic_saved[cpu_index(ci)] = i82489_readreg(LAPIC_PCINT);
	i82489_writereg(LAPIC_PCINT, LAPIC_DLMODE_NMI);

	wrmsr(PERFEVTSEL(ctrno), pesr | PESR_EN);
}
Exemplo n.º 6
0
static void nmi_restore_registers(struct op_msrs * msrs)
{
    unsigned int const nr_ctrs = model->num_counters;
    unsigned int const nr_ctrls = model->num_controls;
    struct op_msr * counters = msrs->counters;
    struct op_msr * controls = msrs->controls;
    unsigned int i;

    for (i = 0; i < nr_ctrls; ++i) {
        if (controls[i].addr) {
            wrmsr(controls[i].addr,
                  controls[i].saved.low,
                  controls[i].saved.high);
        }
    }

    for (i = 0; i < nr_ctrs; ++i) {
        if (counters[i].addr) {
            wrmsr(counters[i].addr,
                  counters[i].saved.low,
                  counters[i].saved.high);
        }
    }
}
Exemplo n.º 7
0
int sysArchPrctl(int which, void *addr) {
	int error = 0;
	uintptr_t uaddr = (uintptr_t)addr;

	if (uaddr & (0xFFFF8000UL << 32)) {
		return -EINVAL;
	}
	thread_t thread = getCurrentThread();
	switch (which) {
		case PRCTL_FS:
			thread->fsBase = addr;
			wrmsr(0xC0000100, uaddr);
			break;
		case PRCTL_GS:
			thread->gsBase = addr;
			wrmsr(0xC0000102, uaddr);
			break;
		default:
			error = -EINVAL;
			break;
	}

	return error;
}
Exemplo n.º 8
0
u_int64_t
cpu_tsc_freq_ctr(struct cpu_info *ci)
{
	u_int64_t count, last_count, msr;

	if ((ci->ci_flags & CPUF_CONST_TSC) == 0 ||
	    (cpu_perf_eax & CPUIDEAX_VERID) <= 1 ||
	    CPUIDEDX_NUM_FC(cpu_perf_edx) <= 1)
		return (0);

	msr = rdmsr(MSR_PERF_FIXED_CTR_CTRL);
	if (msr & MSR_PERF_FIXED_CTR_FC(1, MSR_PERF_FIXED_CTR_FC_MASK)) {
		/* some hypervisor is dicking us around */
		return (0);
	}

	msr |= MSR_PERF_FIXED_CTR_FC(1, MSR_PERF_FIXED_CTR_FC_1);
	wrmsr(MSR_PERF_FIXED_CTR_CTRL, msr);

	msr = rdmsr(MSR_PERF_GLOBAL_CTRL) | MSR_PERF_GLOBAL_CTR1_EN;
	wrmsr(MSR_PERF_GLOBAL_CTRL, msr);

	last_count = rdmsr(MSR_PERF_FIXED_CTR1);
	delay(100000);
	count = rdmsr(MSR_PERF_FIXED_CTR1);

	msr = rdmsr(MSR_PERF_FIXED_CTR_CTRL);
	msr &= MSR_PERF_FIXED_CTR_FC(1, MSR_PERF_FIXED_CTR_FC_MASK);
	wrmsr(MSR_PERF_FIXED_CTR_CTRL, msr);

	msr = rdmsr(MSR_PERF_GLOBAL_CTRL);
	msr &= ~MSR_PERF_GLOBAL_CTR1_EN;
	wrmsr(MSR_PERF_GLOBAL_CTRL, msr);

	return ((count - last_count) * 10);
}
Exemplo n.º 9
0
static void asmlinkage cpu_smm_do_relocation(void *arg)
{
#ifndef CONFIG_MAX_CPUS
#error CONFIG_MAX_CPUS must be set.
#endif
	msr_t smrr;
	em64t100_smm_state_save_area_t *smm_state;
        const struct smm_module_params *p;
        const struct smm_runtime *runtime;
        int cpu;

        p = arg;
        runtime = p->runtime;
        cpu = p->cpu;

	if (cpu >= CONFIG_MAX_CPUS) {
		printk(BIOS_CRIT,
		       "Invalid CPU number assigned in SMM stub: %d\n", cpu);
		return;
	}

	/* Set up SMRR. */
	smrr.lo = relo_attrs.smrr_base;
	smrr.hi = 0;
	wrmsr(SMRR_PHYS_BASE, smrr);
	smrr.lo = relo_attrs.smrr_mask;
	smrr.hi = 0;
	wrmsr(SMRR_PHYS_MASK, smrr);

	/* The relocated handler runs with all CPUs concurrently. Therefore
	 * stagger the entry points adjusting SMBASE downwards by save state
	 * size * CPU num. */
	smm_state = (void *)(SMM_EM64T100_SAVE_STATE_OFFSET + runtime->smbase);
	smm_state->smbase = relo_attrs.smbase - cpu * runtime->save_state_size;
	printk(BIOS_DEBUG, "New SMBASE 0x%08x\n", smm_state->smbase);
}
Exemplo n.º 10
0
static void __pminit setup_k7_watchdog(void)
{
	int i;
	unsigned int evntsel;

	nmi_perfctr_msr = MSR_K7_PERFCTR0;

	for(i = 0; i < 4; ++i) {
		wrmsr(MSR_K7_EVNTSEL0+i, 0, 0);
		wrmsr(MSR_K7_PERFCTR0+i, 0, 0);
	}

	evntsel = K7_EVNTSEL_INT
		| K7_EVNTSEL_OS
		| K7_EVNTSEL_USR
		| K7_NMI_EVENT;

	wrmsr(MSR_K7_EVNTSEL0, evntsel, 0);
	Dprintk("setting K7_PERFCTR0 to %08lx\n", -(cpu_khz/nmi_hz*1000));
	wrmsr(MSR_K7_PERFCTR0, -(cpu_khz/nmi_hz*1000), -1);
	apic_write(APIC_LVTPC, APIC_DM_NMI);
	evntsel |= K7_EVNTSEL_ENABLE;
	wrmsr(MSR_K7_EVNTSEL0, evntsel, 0);
}
Exemplo n.º 11
0
static void
init_ppro(void)
{
	u_int64_t	apicbase;

	/*
	 * Local APIC should be disabled if it is not going to be used.
	 */
	if (ppro_apic_used != 1) {
		apicbase = rdmsr(MSR_APICBASE);
		apicbase &= ~APICBASE_ENABLED;
		wrmsr(MSR_APICBASE, apicbase);
		ppro_apic_used = 0;
	}
}
Exemplo n.º 12
0
static void configure_c_states(void)
{
	msr_t msr;

	msr = rdmsr(MSR_PMG_CST_CONFIG_CONTROL);
	msr.lo |= (1 << 15); // Lock configuration
	msr.lo |= (1 << 10); // redirect IO-based CState transition requests to MWAIT
	msr.lo &= ~(1 << 9); // Issue a single stop grant cycle upon stpclk
	msr.lo &= ~7; msr.lo |= HIGHEST_CLEVEL; // support at most C3
	// TODO Do we want Deep C4 and  Dynamic L2 shrinking?
	wrmsr(MSR_PMG_CST_CONFIG_CONTROL, msr);

	/* Set Processor MWAIT IO BASE (P_BLK) */
	msr.hi = 0;
	// TODO Do we want PM1_BASE? Needs SMM?
	//msr.lo = ((PMB0_BASE + 4) & 0xffff) | (((PMB1_BASE + 9) & 0xffff) << 16);
	msr.lo = ((PMB0_BASE + 4) & 0xffff);
	wrmsr(MSR_PMG_IO_BASE_ADDR, msr);

	/* set C_LVL controls */
	msr.hi = 0;
	msr.lo = (PMB0_BASE + 4) | ((HIGHEST_CLEVEL - 2) << 16); // -2 because LVL0+1 aren't counted
	wrmsr(MSR_PMG_IO_CAPTURE_ADDR, msr);
}
Exemplo n.º 13
0
void set_resume_cache(void)
{
	msr_t msr;

	/* disable fixed mtrr for now,  it will be enabled by mtrr restore */
	msr = rdmsr(SYSCFG_MSR);
	msr.lo &= ~(SYSCFG_MSR_MtrrFixDramEn | SYSCFG_MSR_MtrrFixDramModEn);
	wrmsr(SYSCFG_MSR, msr);

	/* Enable caching for 0 - coreboot ram using variable mtrr */
	msr.lo = 0 | MTRR_TYPE_WRBACK;
	msr.hi = 0;
	wrmsr(MTRRphysBase_MSR(0), msr);
	msr.lo = ~(CONFIG_RAMTOP - 1) | MTRRphysMaskValid;
	msr.hi = (1 << (CONFIG_CPU_ADDR_BITS - 32)) - 1;
	wrmsr(MTRRphysMask_MSR(0), msr);

	/* Set the default memory type and disable fixed and enable variable MTRRs */
	msr.hi = 0;
	msr.lo = (1 << 11);
	wrmsr(MTRRdefType_MSR, msr);

	enable_cache();
}
Exemplo n.º 14
0
void intel_p4_mcheck_init(struct cpuinfo_x86 *c)
{
	u32 l, h;
	int i;
	
	machine_check_vector = intel_machine_check;
	wmb();

	printk (KERN_INFO "Intel machine check architecture supported.\n");
	rdmsr (MSR_IA32_MCG_CAP, l, h);
	if (l & (1<<8))	/* Control register present ? */
		wrmsr (MSR_IA32_MCG_CTL, 0xffffffff, 0xffffffff);
	nr_mce_banks = l & 0xff;

	for (i=0; i<nr_mce_banks; i++) {
		wrmsr (MSR_IA32_MC0_CTL+4*i, 0xffffffff, 0xffffffff);
		wrmsr (MSR_IA32_MC0_STATUS+4*i, 0x0, 0x0);
	}

	set_in_cr4 (X86_CR4_MCE);
	printk (KERN_INFO "Intel machine check reporting enabled on CPU#%d.\n",
		smp_processor_id());

	/* Check for P4/Xeon extended MCE MSRs */
	rdmsr (MSR_IA32_MCG_CAP, l, h);
	if (l & (1<<9))	{/* MCG_EXT_P */
		mce_num_extended_msrs = (l >> 16) & 0xff;
		printk (KERN_INFO "CPU%d: Intel P4/Xeon Extended MCE MSRs (%d)"
				" available\n",
			smp_processor_id(), mce_num_extended_msrs);

#ifdef CONFIG_X86_MCE_P4THERMAL
		/* Check for P4/Xeon Thermal monitor */
		intel_init_thermal(c);
#endif
	}
Exemplo n.º 15
0
Arquivo: nmi.c Projeto: 1x23/unifi-gpl
void nmi_watchdog_tick (struct pt_regs * regs)
{

	/*
	 * Since current_thread_info()-> is always on the stack, and we
	 * always switch the stack NMI-atomically, it's safe to use
	 * smp_processor_id().
	 */
	int sum, cpu = smp_processor_id();

	sum = per_cpu(irq_stat, cpu).apic_timer_irqs;

	if (last_irq_sums[cpu] == sum) {
		/*
		 * Ayiee, looks like this CPU is stuck ...
		 * wait a few IRQs (5 seconds) before doing the oops ...
		 */
		alert_counter[cpu]++;
		if (alert_counter[cpu] == 5*nmi_hz)
			/*
			 * die_nmi will return ONLY if NOTIFY_STOP happens..
			 */
			die_nmi(regs, "NMI Watchdog detected LOCKUP");

		last_irq_sums[cpu] = sum;
		alert_counter[cpu] = 0;
	}
	if (nmi_perfctr_msr) {
		if (nmi_perfctr_msr == MSR_P4_IQ_COUNTER0) {
			/*
			 * P4 quirks:
			 * - An overflown perfctr will assert its interrupt
			 *   until the OVF flag in its CCCR is cleared.
			 * - LVTPC is masked on interrupt and must be
			 *   unmasked by the LVTPC handler.
			 */
			wrmsr(MSR_P4_IQ_CCCR0, nmi_p4_cccr_val, 0);
			apic_write(APIC_LVTPC, APIC_DM_NMI);
		}
		else if (nmi_perfctr_msr == MSR_P6_PERFCTR0) {
			/* Only P6 based Pentium M need to re-unmask
			 * the apic vector but it doesn't hurt
			 * other P6 variant */
			apic_write(APIC_LVTPC, APIC_DM_NMI);
		}
		write_watchdog_counter(NULL);
	}
}
Exemplo n.º 16
0
static int rthal_nmi_watchdog_tick(struct pt_regs *regs, unsigned reason)
#endif /* Linux >= 2.6.19 */
{
	int cpu = rthal_processor_id();
	rthal_nmi_wd_t *wd = &rthal_nmi_wds[cpu];
	unsigned long long now;

	if (wd->armed) {
		if (rthal_rdtsc() - wd->tick_date < rthal_maxlat_tsc) {
			++wd->early_shots;
			wd->next_linux_check = wd->tick_date + rthal_maxlat_tsc;
		} else {
			printk("NMI early shots: %d\n", wd->early_shots);
			rthal_nmi_emergency(regs);
		}
	}

	now = rthal_rdtsc();

	if ((long long)(now - wd->next_linux_check) >= 0) {

		CALL_LINUX_NMI;

		do {
			wd->next_linux_check += RTHAL_CPU_FREQ;
		} while ((long long)(now - wd->next_linux_check) >= 0);
	}

	if (wd->perfctr_msr == MSR_P4_IQ_COUNTER0) {
		/*
		 * P4 quirks:
		 * - An overflown perfctr will assert its interrupt
		 *   until the OVF flag in its CCCR is cleared.
		 * - LVTPC is masked on interrupt and must be
		 *   unmasked by the LVTPC handler.
		 */
		wrmsr(MSR_P4_IQ_CCCR0, wd->p4_cccr_val, 0);
		apic_write(APIC_LVTPC, APIC_DM_NMI);
	} else if (rthal_nmi_perfctr_msr == MSR_P6_PERFCTR0) {
		/* Only P6 based Pentium M need to re-unmask
		 * the apic vector but it doesn't hurt
		 * other P6 variant */
		apic_write(APIC_LVTPC, APIC_DM_NMI);
	}
	
	wrmsrl(wd->perfctr_msr, now - wd->next_linux_check);
	NMI_RETURN;
}
Exemplo n.º 17
0
/**
 * @brief Software interrupt thread routine to handle channel messages from
 * the hypervisor.
 */
static void
vmbus_msg_swintr(void *dummy)
{
	int 			cpu;
	void*			page_addr;
	hv_vmbus_message*	msg;
	hv_vmbus_message*	copied;

	cpu = PCPU_GET(cpuid);
	page_addr = hv_vmbus_g_context.syn_ic_msg_page[cpu];
	msg = (hv_vmbus_message*) page_addr + HV_VMBUS_MESSAGE_SINT;

	for (;;) {
		if (msg->header.message_type == HV_MESSAGE_TYPE_NONE) {
			break; /* no message */
		} else {
			copied = malloc(sizeof(hv_vmbus_message),
					M_DEVBUF, M_NOWAIT);
			KASSERT(copied != NULL,
				("Error VMBUS: malloc failed to allocate"
					" hv_vmbus_message!"));
			if (copied == NULL)
				continue;
			memcpy(copied, msg, sizeof(hv_vmbus_message));
			hv_queue_work_item(hv_vmbus_g_connection.work_queue,
			hv_vmbus_on_channel_message, copied);
	    }

	    msg->header.message_type = HV_MESSAGE_TYPE_NONE;

	    /*
	     * Make sure the write to message_type (ie set to
	     * HV_MESSAGE_TYPE_NONE) happens before we read the
	     * message_pending and EOMing. Otherwise, the EOMing will
	     * not deliver any more messages
	     * since there is no empty slot
	     */
	    wmb();

	    if (msg->header.message_flags.u.message_pending) {
			/*
			 * This will cause message queue rescan to possibly
			 * deliver another msg from the hypervisor
			 */
			wrmsr(HV_X64_MSR_EOM, 0);
	    }
	}
}
Exemplo n.º 18
0
void __init set_nx(void)
{
	unsigned int v[4], l, h;

	if (cpu_has_pae && (cpuid_eax(0x80000000) > 0x80000001)) {
		cpuid(0x80000001, &v[0], &v[1], &v[2], &v[3]);

		if ((v[3] & (1 << 20)) && !disable_nx) {
			rdmsr(MSR_EFER, l, h);
			l |= EFER_NX;
			wrmsr(MSR_EFER, l, h);
			nx_enabled = 1;
			__supported_pte_mask |= _PAGE_NX;
		}
	}
}
Exemplo n.º 19
0
Arquivo: intel.c Projeto: Chong-Li/xen
void set_cpuid_faulting(bool_t enable)
{
	uint32_t hi, lo;

	if (!cpu_has_cpuid_faulting ||
	    this_cpu(cpuid_faulting_enabled) == enable )
		return;

	rdmsr(MSR_INTEL_MISC_FEATURES_ENABLES, lo, hi);
	lo &= ~MSR_MISC_FEATURES_CPUID_FAULTING;
	if (enable)
		lo |= MSR_MISC_FEATURES_CPUID_FAULTING;
	wrmsr(MSR_INTEL_MISC_FEATURES_ENABLES, lo, hi);

	this_cpu(cpuid_faulting_enabled) = enable;
}
Exemplo n.º 20
0
/*
 * Try to enable Turbo mode.
 */
void enable_turbo(void)
{
	msr_t msr;

	/* Only possible if turbo is available but hidden */
	if (get_turbo_state() == TURBO_DISABLED) {
		/* Clear Turbo Disable bit in Misc Enables */
		msr = rdmsr(MSR_IA32_MISC_ENABLES);
		msr.hi &= ~H_MISC_DISABLE_TURBO;
		wrmsr(MSR_IA32_MISC_ENABLES, msr);

		/* Update cached turbo state */
		set_global_turbo_state(TURBO_ENABLED);
		printk(BIOS_INFO, "Turbo has been enabled\n");
	}
}
Exemplo n.º 21
0
/* CPUbugIAENG1398
 *
 * ClearQuest #IAENG1398
 * The MC can not be enabled with SDR memory but can for DDR. Enable for
 * DDR here if the setup token is "Default"
 * Add this back to core by default once 2.0 CPUs are not supported.
 */
static void eng1398(void)
{
	msr_t msr;

	msr = rdmsr(MSR_GLCP+0x17);
	if ((msr.lo & 0xff) <= CPU_REV_2_0) {
		msr = rdmsr(GLCP_SYS_RSTPLL);
		if (msr.lo & (1<<RSTPPL_LOWER_SDRMODE_SHIFT))
			return;
	}

	/* no CMOS/NVRAM to check, so enable MC Clock Gating */
	msr = rdmsr(MC_GLD_MSR_PM);
	msr.lo |= 3; /* enable MC clock gating.*/
	wrmsr(MC_GLD_MSR_PM, msr);
}
Exemplo n.º 22
0
static void msr_set_bit(unsigned reg, unsigned bit)
{
	msr_t msr = rdmsr(reg);

	if (bit < 32) {
		if (msr.lo & (1 << bit))
			return;
		msr.lo |= 1 << bit;
	} else {
		if (msr.hi & (1 << (bit - 32)))
			return;
		msr.hi |= 1 << (bit - 32);
	}

	wrmsr(reg, msr);
}
Exemplo n.º 23
0
static void set_energy_perf_bias(u8 policy)
{
	msr_t msr;
	int ecx;

	/* Determine if energy efficient policy is supported. */
	ecx = cpuid_ecx(0x6);
	if (!(ecx & (1 << 3)))
		return;

	/* Energy Policy is bits 3:0 */
	msr = rdmsr(IA32_ENERGY_PERF_BIAS);
	msr.lo &= ~0xf;
	msr.lo |= policy & 0xf;
	wrmsr(IA32_ENERGY_PERF_BIAS, msr);
}
Exemplo n.º 24
0
int
glxpcib_activate(struct device *self, int act)
{
#ifndef SMALL_KERNEL
	struct glxpcib_softc *sc = (struct glxpcib_softc *)self;
	uint i;
#endif
	int rv = 0;

	switch (act) {
	case DVACT_SUSPEND:
#ifndef SMALL_KERNEL
		if (sc->sc_wdog) {
			sc->sc_wdog_period = bus_space_read_2(sc->sc_iot,
			    sc->sc_ioh, AMD5536_MFGPT0_CMP2);
			glxpcib_wdogctl_cb(sc, 0);
		}
#endif
		rv = config_activate_children(self, act);
#ifndef SMALL_KERNEL
		for (i = 0; i < nitems(glxpcib_msrlist); i++)
			sc->sc_msrsave[i] = rdmsr(glxpcib_msrlist[i]);
#endif

		break;
	case DVACT_RESUME:
#ifndef SMALL_KERNEL
		if (sc->sc_wdog)
			glxpcib_wdogctl_cb(sc, sc->sc_wdog_period);
		for (i = 0; i < nitems(glxpcib_msrlist); i++)
			wrmsr(glxpcib_msrlist[i], sc->sc_msrsave[i]);
#endif
		rv = config_activate_children(self, act);
		break;
	case DVACT_POWERDOWN:
#ifndef SMALL_KERNEL
		if (sc->sc_wdog)
			wdog_shutdown(self);
#endif
		rv = config_activate_children(self, act);
		break;
	default:
		rv = config_activate_children(self, act);
		break;
	}
	return (rv);
}
Exemplo n.º 25
0
int geode_mfgpt_toggle_event(int timer, int cmp, int event, int enable)
{
    u32 msr, mask, value, dummy;
    int shift = (cmp == MFGPT_CMP1) ? 0 : 8;

    if (timer < 0 || timer >= MFGPT_MAX_TIMERS)
        return -EIO;

    /*
     * The register maps for these are described in sections 6.17.1.x of
     * the AMD Geode CS5536 Companion Device Data Book.
     */
    switch (event) {
    case MFGPT_EVENT_RESET:
        /*
         * XXX: According to the docs, we cannot reset timers above
         * 6; that is, resets for 7 and 8 will be ignored.  Is this
         * a problem?   -dilinger
         */
        msr = MSR_MFGPT_NR;
        mask = 1 << (timer + 24);
        break;

    case MFGPT_EVENT_NMI:
        msr = MSR_MFGPT_NR;
        mask = 1 << (timer + shift);
        break;

    case MFGPT_EVENT_IRQ:
        msr = MSR_MFGPT_IRQ;
        mask = 1 << (timer + shift);
        break;

    default:
        return -EIO;
    }

    rdmsr(msr, value, dummy);

    if (enable)
        value |= mask;
    else
        value &= ~mask;

    wrmsr(msr, value, dummy);
    return 0;
}
Exemplo n.º 26
0
/*
 * Initialize CPU control registers
 */
void
initializecpu(void)
{
	uint64_t msr;

	if ((cpu_feature & CPUID_XMM) && (cpu_feature & CPUID_FXSR)) {
		load_cr4(rcr4() | CR4_FXSR | CR4_XMM);
		cpu_fxsr = hw_instruction_sse = 1;
	}
	if ((amd_feature & AMDID_NX) != 0) {
		msr = rdmsr(MSR_EFER) | EFER_NXE;
		wrmsr(MSR_EFER, msr);
		pg_nx = PG_NX;
	}
	if (cpu_vendor_id == CPU_VENDOR_CENTAUR)
		init_via();
}
static u64 __rmid_read(u32 rmid)
{
	u64 val;

	/*
	 * Ignore the SDM, this thing is _NOTHING_ like a regular perfcnt,
	 * it just says that to increase confusion.
	 */
	wrmsr(MSR_IA32_QM_EVTSEL, QOS_L3_OCCUP_EVENT_ID, rmid);
	rdmsrl(MSR_IA32_QM_CTR, val);

	/*
	 * Aside from the ERROR and UNAVAIL bits, assume this thing returns
	 * the number of cachelines tagged with @rmid.
	 */
	return val;
}
Exemplo n.º 28
0
/* Set up machine check reporting on the Winchip C6 series */
void winchip_mcheck_init(struct cpuinfo_x86 *c)
{
	u32 lo, hi;

	machine_check_vector = winchip_machine_check;
	/* Make sure the vector pointer is visible before we enable MCEs: */
	wmb();

	rdmsr(MSR_IDT_FCR1, lo, hi);
	lo |= (1<<2);	/* Enable EIERRINT (int 18 MCE) */
	lo &= ~(1<<4);	/* Enable MCE */
	wrmsr(MSR_IDT_FCR1, lo, hi);

	cr4_set_bits(X86_CR4_MCE);

	pr_info("Winchip machine check reporting enabled on CPU#0.\n");
}
Exemplo n.º 29
0
static int
k6_mrset(struct mem_range_softc *sc, struct mem_range_desc *desc, int *arg)
{
    u_int64_t reg;
    u_int32_t mtrr;
    int error, d;

    switch (*arg) {
    case MEMRANGE_SET_UPDATE:
        error = k6_mrmake(desc, &mtrr);
        if (error)
            return (error);
        for (d = 0; d < sc->mr_ndesc; d++) {
            if (!sc->mr_desc[d].mr_len) {
                sc->mr_desc[d] = *desc;
                goto out;
            }
            if (sc->mr_desc[d].mr_base == desc->mr_base &&
                    sc->mr_desc[d].mr_len == desc->mr_len)
                return (EEXIST);
        }
        return (ENOSPC);
    case MEMRANGE_SET_REMOVE:
        mtrr = 0;
        for (d = 0; d < sc->mr_ndesc; d++)
            if (sc->mr_desc[d].mr_base == desc->mr_base &&
                    sc->mr_desc[d].mr_len == desc->mr_len) {
                bzero(&sc->mr_desc[d], sizeof(sc->mr_desc[d]));
                goto out;
            }
        return (ENOENT);
    default:
        return (EOPNOTSUPP);
    }
out:
    disable_intr();
    wbinvd();
    reg = rdmsr(UWCCR);
    reg &= ~(0xffffffff << (32 * d));
    reg |= mtrr << (32 * d);
    wrmsr(UWCCR, reg);
    wbinvd();
    enable_intr();

    return (0);
}
Exemplo n.º 30
0
Arquivo: intel.c Projeto: Chong-Li/xen
/*
 * P4 Xeon errata 037 workaround.
 * Hardware prefetcher may cause stale data to be loaded into the cache.
 *
 * Xeon 7400 erratum AAI65 (and further newer Xeons)
 * MONITOR/MWAIT may have excessive false wakeups
 */
static void Intel_errata_workarounds(struct cpuinfo_x86 *c)
{
	unsigned long lo, hi;

	if ((c->x86 == 15) && (c->x86_model == 1) && (c->x86_mask == 1)) {
		rdmsr (MSR_IA32_MISC_ENABLE, lo, hi);
		if ((lo & (1<<9)) == 0) {
			printk (KERN_INFO "CPU: C0 stepping P4 Xeon detected.\n");
			printk (KERN_INFO "CPU: Disabling hardware prefetching (Errata 037)\n");
			lo |= (1<<9);	/* Disable hw prefetching */
			wrmsr (MSR_IA32_MISC_ENABLE, lo, hi);
		}
	}

	if (c->x86 == 6 && cpu_has_clflush &&
	    (c->x86_model == 29 || c->x86_model == 46 || c->x86_model == 47))
		__set_bit(X86_FEATURE_CLFLUSH_MONITOR, c->x86_capability);
}