/* Setup the event. Test for authorized counter sets and only include counter
 * sets which are authorized at the time of the setup. Including unauthorized
 * counter sets result in specification exception (and panic).
 */
static int __hw_perf_event_init(struct perf_event *event)
{
	struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events);
	struct perf_event_attr *attr = &event->attr;
	enum cpumf_ctr_set i;
	int err = 0;

	debug_sprintf_event(cf_diag_dbg, 5,
			    "%s event %p cpu %d authorized %#x\n", __func__,
			    event, event->cpu, cpuhw->info.auth_ctl);

	event->hw.config = attr->config;
	event->hw.config_base = 0;
	local64_set(&event->count, 0);

	/* Add all authorized counter sets to config_base */
	for (i = CPUMF_CTR_SET_BASIC; i < CPUMF_CTR_SET_MAX; ++i)
		if (cpuhw->info.auth_ctl & cpumf_ctr_ctl[i])
			event->hw.config_base |= cpumf_ctr_ctl[i];

	/* No authorized counter sets, nothing to count/sample */
	if (!event->hw.config_base) {
		err = -EINVAL;
		goto out;
	}

	/* Set sample_period to indicate sampling */
	event->hw.sample_period = attr->sample_period;
	local64_set(&event->hw.period_left, event->hw.sample_period);
	event->hw.last_period  = event->hw.sample_period;
out:
	debug_sprintf_event(cf_diag_dbg, 5, "%s err %d config_base %#lx\n",
			    __func__, err, event->hw.config_base);
	return err;
}
Ejemplo n.º 2
0
static void msr_event_start(struct perf_event *event, int flags)
{
	u64 now;

	now = msr_read_counter(event);
	local64_set(&event->hw.prev_count, now);
}
Ejemplo n.º 3
0
static int perf_ibs_init(struct perf_event *event)
{
	struct hw_perf_event *hwc = &event->hw;
	struct perf_ibs *perf_ibs;
	u64 max_cnt, config;
	int ret;

	perf_ibs = get_ibs_pmu(event->attr.type);
	if (perf_ibs) {
		config = event->attr.config;
	} else {
		perf_ibs = &perf_ibs_op;
		ret = perf_ibs_precise_event(event, &config);
		if (ret)
			return ret;
	}

	if (event->pmu != &perf_ibs->pmu)
		return -ENOENT;

	if (config & ~perf_ibs->config_mask)
		return -EINVAL;

	if (hwc->sample_period) {
		if (config & perf_ibs->cnt_mask)
			/* raw max_cnt may not be set */
			return -EINVAL;
		if (!event->attr.sample_freq && hwc->sample_period & 0x0f)
			/*
			 * lower 4 bits can not be set in ibs max cnt,
			 * but allowing it in case we adjust the
			 * sample period to set a frequency.
			 */
			return -EINVAL;
		hwc->sample_period &= ~0x0FULL;
		if (!hwc->sample_period)
			hwc->sample_period = 0x10;
	} else {
		max_cnt = config & perf_ibs->cnt_mask;
		config &= ~perf_ibs->cnt_mask;
		event->attr.sample_period = max_cnt << 4;
		hwc->sample_period = event->attr.sample_period;
	}

	if (!hwc->sample_period)
		return -EINVAL;

	/*
	 * If we modify hwc->sample_period, we also need to update
	 * hwc->last_period and hwc->period_left.
	 */
	hwc->last_period = hwc->sample_period;
	local64_set(&hwc->period_left, hwc->sample_period);

	hwc->config_base = perf_ibs->msr;
	hwc->config = config;

	return 0;
}
Ejemplo n.º 4
0
static int
perf_event_set_period(struct hw_perf_event *hwc, u64 min, u64 max, u64 *hw_period)
{
	s64 left = local64_read(&hwc->period_left);
	s64 period = hwc->sample_period;
	int overflow = 0;

	/*
	 * If we are way outside a reasonable range then just skip forward:
	 */
	if (unlikely(left <= -period)) {
		left = period;
		local64_set(&hwc->period_left, left);
		hwc->last_period = period;
		overflow = 1;
	}

	if (unlikely(left < (s64)min)) {
		left += period;
		local64_set(&hwc->period_left, left);
		hwc->last_period = period;
		overflow = 1;
	}

	/*
	 * If the hw period that triggers the sw overflow is too short
	 * we might hit the irq handler. This biases the results.
	 * Thus we shorten the next-to-last period and set the last
	 * period to the max period.
	 */
	if (left > max) {
		left -= max;
		if (left > max)
			left = max;
		else if (left < min)
			left = min;
	}

	*hw_period = (u64)left;

	return overflow;
}
Ejemplo n.º 5
0
static int perf_ibs_set_period(struct perf_ibs *perf_ibs,
			       struct hw_perf_event *hwc, u64 *period)
{
	int overflow;

	/* ignore lower 4 bits in min count: */
	overflow = perf_event_set_period(hwc, 1<<4, perf_ibs->max_period, period);
	local64_set(&hwc->prev_count, 0);

	return overflow;
}
Ejemplo n.º 6
0
static void snb_uncore_imc_event_start(struct perf_event *event, int flags)
{
	struct intel_uncore_box *box = uncore_event_to_box(event);
	u64 count;

	if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
		return;

	event->hw.state = 0;
	box->n_active++;

	list_add_tail(&event->active_entry, &box->active_list);

	count = snb_uncore_imc_read_counter(box, event);
	local64_set(&event->hw.prev_count, count);

	if (box->n_active == 1)
		uncore_pmu_start_hrtimer(box);
}
Ejemplo n.º 7
0
static void msr_event_update(struct perf_event *event)
{
	u64 prev, now;
	s64 delta;

	/* Careful, an NMI might modify the previous event value: */
again:
	prev = local64_read(&event->hw.prev_count);
	now = msr_read_counter(event);

	if (local64_cmpxchg(&event->hw.prev_count, prev, now) != prev)
		goto again;

	delta = now - prev;
	if (unlikely(event->hw.event_base == MSR_SMI_COUNT)) {
		delta = sign_extend64(delta, 31);
		local64_add(delta, &event->count);
	} else if (unlikely(event->hw.event_base == MSR_IA32_THERM_STATUS)) {
		/* If valid, extract digital readout, otherwise set to -1: */
		now = now & (1ULL << 31) ? (now >> 16) & 0x3f :  -1;
		local64_set(&event->count, now);
	} else {
Ejemplo n.º 8
0
static void h_gpci_event_start(struct perf_event *event, int flags)
{
	local64_set(&event->hw.prev_count, h_gpci_get_value(event));
}
Ejemplo n.º 9
0
static void cstate_pmu_event_start(struct perf_event *event, int mode)
{
	local64_set(&event->hw.prev_count, cstate_pmu_read_counter(event));
}
Ejemplo n.º 10
0
static int param_set_local64(const char *val, const struct kernel_param *kp)
{
	/* clear on any write */
	local64_set((local64_t *)kp->arg, 0);
	return 0;
}