static void p4_setup_ctrs(struct op_msrs const * const msrs)
{
	unsigned int i;
	unsigned int low, high;
	unsigned int stag;

	stag = get_stagger();

	rdmsr(MSR_IA32_MISC_ENABLE, low, high);
	if (!MISC_PMC_ENABLED_P(low)) {
		printk(KERN_ERR "oprofile: P4 PMC not available\n");
		return;
	}

	/* clear the cccrs we will use */
	for (i = 0 ; i < num_counters ; i++) {
		if (unlikely(!CTRL_IS_RESERVED(msrs, i)))
			continue;
		rdmsr(p4_counters[VIRT_CTR(stag, i)].cccr_address, low, high);
		CCCR_CLEAR(low);
		CCCR_SET_REQUIRED_BITS(low);
		wrmsr(p4_counters[VIRT_CTR(stag, i)].cccr_address, low, high);
	}

	/* clear all escrs (including those outside our concern) */
	for (i = num_counters; i < num_controls; i++) {
		if (unlikely(!CTRL_IS_RESERVED(msrs, i)))
			continue;
		wrmsr(msrs->controls[i].addr, 0, 0);
	}

	/* setup all counters */
	for (i = 0 ; i < num_counters ; ++i) {
		if ((counter_config[i].enabled) && (CTRL_IS_RESERVED(msrs, i))) {
			reset_value[i] = counter_config[i].count;
			pmc_setup_one_p4_counter(i);
			CTR_WRITE(counter_config[i].count, VIRT_CTR(stag, i));
		} else {
			reset_value[i] = 0;
		}
	}
}
Exemple #2
0
static void athlon_shutdown(struct op_msrs const * const msrs)
{
	int i;

	for (i = 0 ; i < NUM_COUNTERS ; ++i) {
		if (CTR_IS_RESERVED(msrs,i))
			release_perfctr_nmi(MSR_K7_PERFCTR0 + i);
	}
	for (i = 0 ; i < NUM_CONTROLS ; ++i) {
		if (CTRL_IS_RESERVED(msrs,i))
			release_evntsel_nmi(MSR_K7_EVNTSEL0 + i);
	}
}
Exemple #3
0
static void athlon_setup_ctrs(struct op_msrs const * const msrs)
{
	unsigned int low, high;
	int i;
 
	/* clear all counters */
	for (i = 0 ; i < NUM_CONTROLS; ++i) {
		if (unlikely(!CTRL_IS_RESERVED(msrs,i)))
			continue;
		CTRL_READ(low, high, msrs, i);
		CTRL_CLEAR_LO(low);
		CTRL_CLEAR_HI(high);
		CTRL_WRITE(low, high, msrs, i);
	}

	/* avoid a false detection of ctr overflows in NMI handler */
	for (i = 0; i < NUM_COUNTERS; ++i) {
		if (unlikely(!CTR_IS_RESERVED(msrs,i)))
			continue;
		CTR_WRITE(1, msrs, i);
	}

	/* enable active counters */
	for (i = 0; i < NUM_COUNTERS; ++i) {
		if ((counter_config[i].enabled) && (CTR_IS_RESERVED(msrs,i))) {
			reset_value[i] = counter_config[i].count;

			CTR_WRITE(counter_config[i].count, msrs, i);

			CTRL_READ(low, high, msrs, i);
			CTRL_CLEAR_LO(low);
			CTRL_CLEAR_HI(high);
			CTRL_SET_ENABLE(low);
			CTRL_SET_USR(low, counter_config[i].user);
			CTRL_SET_KERN(low, counter_config[i].kernel);
			CTRL_SET_UM(low, counter_config[i].unit_mask);
			CTRL_SET_EVENT_LOW(low, counter_config[i].event);
			CTRL_SET_EVENT_HIGH(high, counter_config[i].event);
			CTRL_SET_HOST_ONLY(high, 0);
			CTRL_SET_GUEST_ONLY(high, 0);

			CTRL_WRITE(low, high, msrs, i);
		} else {
			reset_value[i] = 0;
		}
	}
}
static void ppro_shutdown(struct op_msrs const * const msrs)
{
	int i;

	for (i = 0 ; i < num_counters ; ++i) {
		if (CTR_IS_RESERVED(msrs, i))
			release_perfctr_nmi(MSR_P6_PERFCTR0 + i);
	}
	for (i = 0 ; i < num_counters ; ++i) {
		if (CTRL_IS_RESERVED(msrs, i))
			release_evntsel_nmi(MSR_P6_EVNTSEL0 + i);
	}
	if (reset_value) {
		kfree(reset_value);
		reset_value = NULL;
	}
}
Exemple #5
0
static void p4_shutdown(struct op_msrs const * const msrs)
{
	int i;

	for (i = 0 ; i < num_counters ; ++i) {
		if (CTR_IS_RESERVED(msrs,i))
			release_perfctr_nmi(msrs->counters[i].addr);
	}
	/* some of the control registers are specially reserved in
	 * conjunction with the counter registers (hence the starting offset).
	 * This saves a few bits.
	 */
	for (i = num_counters ; i < num_controls ; ++i) {
		if (CTRL_IS_RESERVED(msrs,i))
			release_evntsel_nmi(msrs->controls[i].addr);
	}
}
static void ppro_setup_ctrs(struct op_msrs const * const msrs)
{
	unsigned int low, high;
	int i;

	if (!reset_value) {
		reset_value = kmalloc(sizeof(reset_value[0]) * num_counters,
					GFP_ATOMIC);
		if (!reset_value)
			return;
	}

	if (cpu_has_arch_perfmon) {
		union cpuid10_eax eax;
		eax.full = cpuid_eax(0xa);

		/*
		 * For Core2 (family 6, model 15), don't reset the
		 * counter width:
		 */
		if (!(eax.split.version_id == 0 &&
			current_cpu_data.x86 == 6 &&
				current_cpu_data.x86_model == 15)) {

			if (counter_width < eax.split.bit_width)
				counter_width = eax.split.bit_width;
		}
	}

	/* clear all counters */
	for (i = 0 ; i < num_counters; ++i) {
		if (unlikely(!CTRL_IS_RESERVED(msrs, i)))
			continue;
		CTRL_READ(low, high, msrs, i);
		CTRL_CLEAR(low);
		CTRL_WRITE(low, high, msrs, i);
	}

	/* avoid a false detection of ctr overflows in NMI handler */
	for (i = 0; i < num_counters; ++i) {
		if (unlikely(!CTR_IS_RESERVED(msrs, i)))
			continue;
		wrmsrl(msrs->counters[i].addr, -1LL);
	}

	/* enable active counters */
	for (i = 0; i < num_counters; ++i) {
		if ((counter_config[i].enabled) && (CTR_IS_RESERVED(msrs, i))) {
			reset_value[i] = counter_config[i].count;

			wrmsrl(msrs->counters[i].addr, -reset_value[i]);

			CTRL_READ(low, high, msrs, i);
			CTRL_CLEAR(low);
			CTRL_SET_ENABLE(low);
			CTRL_SET_USR(low, counter_config[i].user);
			CTRL_SET_KERN(low, counter_config[i].kernel);
			CTRL_SET_UM(low, counter_config[i].unit_mask);
			CTRL_SET_EVENT(low, counter_config[i].event);
			CTRL_WRITE(low, high, msrs, i);
		} else {
			reset_value[i] = 0;
		}
	}
}