Esempio n. 1
0
static void gator_event_sampling_online_dispatch(int cpu)
{
	struct perf_event * ev;

	if (!event_based_sampling)
		return;

#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 1, 0)
	ev = per_cpu(pevent, cpu) = perf_event_create_kernel_counter(per_cpu(pevent_attr, cpu), cpu, 0, ebs_overflow_handler);
#else
	ev = per_cpu(pevent, cpu) = perf_event_create_kernel_counter(per_cpu(pevent_attr, cpu), cpu, 0, ebs_overflow_handler, 0);
#endif

	if (IS_ERR(ev)) {
		pr_err("gator: unable to start event-based-sampling");
		return;
	}

	if (ev->state != PERF_EVENT_STATE_ACTIVE) {
		pr_err("gator: unable to start event-based-sampling");
		perf_event_release_kernel(ev);
		return;
	}

	ev->pmu->read(ev);
	per_cpu(prev_value, cpu) = local64_read(&ev->count);
}
Esempio n. 2
0
static void ebs_overflow_handler(struct perf_event *event, struct perf_sample_data *data, struct pt_regs *regs)
#endif
{
	unsigned int value, delta, cpu = smp_processor_id(), buftype = EVENT_BUF;

	if (event != per_cpu(pevent, cpu))
		return;

	if (buffer_check_space(cpu, buftype, 5 * MAXSIZE_PACK32 + MAXSIZE_PACK64)) {
		value = local64_read(&event->count);
		delta = value - per_cpu(prev_value, cpu);
		per_cpu(prev_value, cpu) = value;

		// Counters header
		gator_buffer_write_packed_int(cpu, buftype, MESSAGE_COUNTERS);     // type
		gator_buffer_write_packed_int64(cpu, buftype, gator_get_time());   // time

		// Output counter
		gator_buffer_write_packed_int(cpu, buftype, 2);                    // length
		gator_buffer_write_packed_int(cpu, buftype, per_cpu(key, cpu));    // key
		gator_buffer_write_packed_int(cpu, buftype, delta);                // delta

		// End Counters, length of zero
		gator_buffer_write_packed_int(cpu, buftype, 0);
	}

	// Output backtrace
	if (buffer_check_space(cpu, buftype, gator_backtrace_depth * 2 * MAXSIZE_PACK32))
		gator_add_sample(cpu, buftype, regs);

	// Check and commit; commit is set to occur once buffer is 3/4 full
	buffer_check(cpu, buftype);
}
Esempio n. 3
0
static void cstate_pmu_event_update(struct perf_event *event)
{
	struct hw_perf_event *hwc = &event->hw;
	u64 prev_raw_count, new_raw_count;

again:
	prev_raw_count = local64_read(&hwc->prev_count);
	new_raw_count = cstate_pmu_read_counter(event);

	if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
			    new_raw_count) != prev_raw_count)
		goto again;

	local64_add(new_raw_count - prev_raw_count, &event->count);
}
Esempio n. 4
0
static void msr_event_update(struct perf_event *event)
{
	u64 prev, now;
	s64 delta;

	/* Careful, an NMI might modify the previous event value. */
again:
	prev = local64_read(&event->hw.prev_count);
	now = msr_read_counter(event);

	if (local64_cmpxchg(&event->hw.prev_count, prev, now) != prev)
		goto again;

	delta = now - prev;
	if (unlikely(event->hw.event_base == MSR_SMI_COUNT)) {
		delta <<= 32;
		delta >>= 32; /* sign extend */
	}
Esempio n. 5
0
static int
perf_event_set_period(struct hw_perf_event *hwc, u64 min, u64 max, u64 *hw_period)
{
	s64 left = local64_read(&hwc->period_left);
	s64 period = hwc->sample_period;
	int overflow = 0;

	/*
	 * If we are way outside a reasonable range then just skip forward:
	 */
	if (unlikely(left <= -period)) {
		left = period;
		local64_set(&hwc->period_left, left);
		hwc->last_period = period;
		overflow = 1;
	}

	if (unlikely(left < (s64)min)) {
		left += period;
		local64_set(&hwc->period_left, left);
		hwc->last_period = period;
		overflow = 1;
	}

	/*
	 * If the hw period that triggers the sw overflow is too short
	 * we might hit the irq handler. This biases the results.
	 * Thus we shorten the next-to-last period and set the last
	 * period to the max period.
	 */
	if (left > max) {
		left -= max;
		if (left > max)
			left = max;
		else if (left < min)
			left = min;
	}

	*hw_period = (u64)left;

	return overflow;
}
Esempio n. 6
0
static void msr_event_update(struct perf_event *event)
{
	u64 prev, now;
	s64 delta;

	/* Careful, an NMI might modify the previous event value: */
again:
	prev = local64_read(&event->hw.prev_count);
	now = msr_read_counter(event);

	if (local64_cmpxchg(&event->hw.prev_count, prev, now) != prev)
		goto again;

	delta = now - prev;
	if (unlikely(event->hw.event_base == MSR_SMI_COUNT)) {
		delta = sign_extend64(delta, 31);
		local64_add(delta, &event->count);
	} else if (unlikely(event->hw.event_base == MSR_IA32_THERM_STATUS)) {
		/* If valid, extract digital readout, otherwise set to -1: */
		now = now & (1ULL << 31) ? (now >> 16) & 0x3f :  -1;
		local64_set(&event->count, now);
	} else {
Esempio n. 7
0
static enum hrtimer_restart hrprof_timer_notify(struct hrtimer *hrtimer) {
	u64 val;
	u64 oruns;
	unsigned long flags;
	hrprof_cpu_t* event = this_cpu_ptr(&hrprof_cpu_event);

	/* XXX: is this call precise? */
	oruns = hrtimer_forward_now(hrtimer, ns_to_ktime(hrprof_resolution));

#ifndef HRPROF_DEMO
	rdpmcl(event->event->hw.idx, val);
#else
	val = local64_read(&event->event->count) + atomic64_read(&event->event->child_count);
#endif

	spin_lock_irqsave(&event->lock, flags);

	do {
		/* Advance queue indexes. If queue is overrun, lose oldest value */
		++event->tail;
		if(event->tail == HRPROF_QUEUE_LEN) {
			event->tail = 0;
		}
		if(event->tail == event->head) {
			++event->head;
			if(event->head == HRPROF_QUEUE_LEN) {
				event->head = 0;
			}
		}

		event->queue[event->tail] = val;
		val = 0;
	} while(--oruns != 0);

	spin_unlock_irqrestore(&event->lock, flags);

	return HRTIMER_RESTART;
}
Esempio n. 8
0
static void gator_event_sampling_online(void)
{
	int cpu = smp_processor_id(), buftype = EVENT_BUF;

	// read the counter and toss the invalid data, return zero instead
	struct perf_event * ev = per_cpu(pevent, cpu);
	if (ev != NULL && ev->state == PERF_EVENT_STATE_ACTIVE) {
		ev->pmu->read(ev);
		per_cpu(prev_value, cpu) = local64_read(&ev->count);

		// Counters header
		gator_buffer_write_packed_int(cpu, buftype, MESSAGE_COUNTERS);     // type
		gator_buffer_write_packed_int64(cpu, buftype, gator_get_time());   // time

		// Output counter
		gator_buffer_write_packed_int(cpu, buftype, 2);                    // length
		gator_buffer_write_packed_int(cpu, buftype, per_cpu(key, cpu));    // key
		gator_buffer_write_packed_int(cpu, buftype, 0);                    // delta - zero for initialization

		// End Counters, length of zero
		gator_buffer_write_packed_int(cpu, buftype, 0);
	}
}
Esempio n. 9
0
static  int
perf_event_try_update(struct perf_event *event, u64 new_raw_count, int width)
{
	struct hw_perf_event *hwc = &event->hw;
	int shift = 64 - width;
	u64 prev_raw_count;
	u64 delta;

	/*
	 * Careful: an NMI might modify the previous event value.
	 *
	 * Our tactic to handle this is to first atomically read and
	 * exchange a new raw count - then add that new-prev delta
	 * count to the generic event atomically:
	 */
	prev_raw_count = local64_read(&hwc->prev_count);
	if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
					new_raw_count) != prev_raw_count)
		return 0;

	/*
	 * Now we have the new raw value and have updated the prev
	 * timestamp already. We can now calculate the elapsed delta
	 * (event-)time and add that to the generic event.
	 *
	 * Careful, not all hw sign-extends above the physical width
	 * of the count.
	 */
	delta = (new_raw_count << shift) - (prev_raw_count << shift);
	delta >>= shift;

	local64_add(delta, &event->count);
	local64_sub(delta, &hwc->period_left);

	return 1;
}
/*
 * The following values show statistics on how perf events are affecting
 * this system.
 */
static int param_get_local64(char *buffer, const struct kernel_param *kp)
{
	return sprintf(buffer, "%lu\n", local64_read((local64_t *)kp->arg));
}