/* Create perf event sample with the counter sets as raw data.	The sample
 * is then pushed to the event subsystem and the function checks for
 * possible event overflows. If an event overflow occurs, the PMU is
 * stopped.
 *
 * Return non-zero if an event overflow occurred.
 */
static int cf_diag_push_sample(struct perf_event *event,
			       struct cf_diag_csd *csd)
{
	struct perf_sample_data data;
	struct perf_raw_record raw;
	struct pt_regs regs;
	int overflow;

	/* Setup perf sample */
	perf_sample_data_init(&data, 0, event->hw.last_period);
	memset(&regs, 0, sizeof(regs));
	memset(&raw, 0, sizeof(raw));

	if (event->attr.sample_type & PERF_SAMPLE_CPU)
		data.cpu_entry.cpu = event->cpu;
	if (event->attr.sample_type & PERF_SAMPLE_RAW) {
		raw.frag.size = csd->used;
		raw.frag.data = csd->data;
		raw.size = csd->used;
		data.raw = &raw;
	}

	overflow = perf_event_overflow(event, &data, &regs);
	debug_sprintf_event(cf_diag_dbg, 6,
			    "%s event %p cpu %d sample_type %#llx raw %d "
			    "ov %d\n", __func__, event, event->cpu,
			    event->attr.sample_type, raw.size, overflow);
	if (overflow)
		event->pmu->stop(event, 0);

	perf_event_update_userpage(event);
	return overflow;
}
/*
 * As System PMUs are affine to CPU0, the fact that interrupts are disabled
 * during interrupt handling is enough to serialise our actions and make this
 * safe. We do not need to grab our pmu_lock here.
 */
static irqreturn_t l2x0pmu_handle_irq(int irq, void *dev)
{
	irqreturn_t status = IRQ_NONE;
	struct perf_sample_data data;
	struct pt_regs *regs;
	int idx;

	regs = get_irq_regs();

	for (idx = 0; idx < L2X0_NUM_COUNTERS; ++idx) {
		struct perf_event *event = l2x0pmu_hw_events.events[idx];
		struct hw_perf_event *hwc;

		if (!counter_is_saturated(idx))
			continue;

		status = IRQ_HANDLED;

		hwc = &event->hw;

		/*
		 * The armpmu_* functions expect counters to overflow, but
		 * L220/PL310 counters saturate instead. Fake the overflow
		 * here so the hardware is in sync with what the framework
		 * expects.
		 */
		l2x0pmu_write_counter(idx, 0);

		armpmu_event_update(event, hwc, idx);
		data.period = event->hw.last_period;

		if (!armpmu_event_set_period(event, hwc, idx))
			continue;

		if (perf_event_overflow(event, &data, regs))
			l2x0pmu_disable_counter(idx);
	}

	l2x0_clear_interrupts(L2X0_INTR_MASK_ECNTR);

	irq_work_run();

	return status;
}
static irqreturn_t l2x0pmu_handle_irq(int irq, void *dev)
{
	irqreturn_t status = IRQ_NONE;
	struct perf_sample_data data;
	struct pt_regs *regs;
	int idx;

	regs = get_irq_regs();

	for (idx = 0; idx < L2X0_NUM_COUNTERS; ++idx) {
		struct perf_event *event = l2x0pmu_hw_events.events[idx];
		struct hw_perf_event *hwc;

		if (!counter_is_saturated(idx))
			continue;

		status = IRQ_HANDLED;

		hwc = &event->hw;

		/*
                                                            
                                                            
                                                            
             
   */
		l2x0pmu_write_counter(idx, 0);

		armpmu_event_update(event, hwc, idx);
		data.period = event->hw.last_period;

		if (!armpmu_event_set_period(event, hwc, idx))
			continue;

		if (perf_event_overflow(event, &data, regs))
			l2x0pmu_disable_counter(idx);
	}

	l2x0_clear_interrupts(L2X0_INTR_MASK_ECNTR);

	irq_work_run();

	return status;
}
Example #4
0
static int perf_ibs_handle_irq(struct perf_ibs *perf_ibs, struct pt_regs *iregs)
{
	struct cpu_perf_ibs *pcpu = this_cpu_ptr(perf_ibs->pcpu);
	struct perf_event *event = pcpu->event;
	struct hw_perf_event *hwc = &event->hw;
	struct perf_sample_data data;
	struct perf_raw_record raw;
	struct pt_regs regs;
	struct perf_ibs_data ibs_data;
	int offset, size, check_rip, offset_max, throttle = 0;
	unsigned int msr;
	u64 *buf, *config, period;

	if (!test_bit(IBS_STARTED, pcpu->state)) {
		/*
		 * Catch spurious interrupts after stopping IBS: After
		 * disabling IBS there could be still incoming NMIs
		 * with samples that even have the valid bit cleared.
		 * Mark all this NMIs as handled.
		 */
		return test_and_clear_bit(IBS_STOPPING, pcpu->state) ? 1 : 0;
	}

	msr = hwc->config_base;
	buf = ibs_data.regs;
	rdmsrl(msr, *buf);
	if (!(*buf++ & perf_ibs->valid_mask))
		return 0;

	config = &ibs_data.regs[0];
	perf_ibs_event_update(perf_ibs, event, config);
	perf_sample_data_init(&data, 0, hwc->last_period);
	if (!perf_ibs_set_period(perf_ibs, hwc, &period))
		goto out;	/* no sw counter overflow */

	ibs_data.caps = ibs_caps;
	size = 1;
	offset = 1;
	check_rip = (perf_ibs == &perf_ibs_op && (ibs_caps & IBS_CAPS_RIPINVALIDCHK));
	if (event->attr.sample_type & PERF_SAMPLE_RAW)
		offset_max = perf_ibs->offset_max;
	else if (check_rip)
		offset_max = 2;
	else
		offset_max = 1;
	do {
		rdmsrl(msr + offset, *buf++);
		size++;
		offset = find_next_bit(perf_ibs->offset_mask,
				       perf_ibs->offset_max,
				       offset + 1);
	} while (offset < offset_max);
	if (event->attr.sample_type & PERF_SAMPLE_RAW) {
		/*
		 * Read IbsBrTarget and IbsOpData4 separately
		 * depending on their availability.
		 * Can't add to offset_max as they are staggered
		 */
		if (ibs_caps & IBS_CAPS_BRNTRGT) {
			rdmsrl(MSR_AMD64_IBSBRTARGET, *buf++);
			size++;
		}
		if (ibs_caps & IBS_CAPS_OPDATA4) {
			rdmsrl(MSR_AMD64_IBSOPDATA4, *buf++);
			size++;
		}
	}
	ibs_data.size = sizeof(u64) * size;

	regs = *iregs;
	if (check_rip && (ibs_data.regs[2] & IBS_RIP_INVALID)) {
		regs.flags &= ~PERF_EFLAGS_EXACT;
	} else {
		set_linear_ip(&regs, ibs_data.regs[1]);
		regs.flags |= PERF_EFLAGS_EXACT;
	}

	if (event->attr.sample_type & PERF_SAMPLE_RAW) {
		raw.size = sizeof(u32) + ibs_data.size;
		raw.data = ibs_data.data;
		data.raw = &raw;
	}

	throttle = perf_event_overflow(event, &data, &regs);
out:
	if (throttle)
		perf_ibs_disable_event(perf_ibs, hwc, *config);
	else
		perf_ibs_enable_event(perf_ibs, hwc, period >> 4);

	perf_event_update_userpage(event);

	return 1;
}
Example #5
0
static irqreturn_t armv8pmu_handle_irq(int irq_num, void *dev)
{
	u32 pmovsr;
	struct perf_sample_data data;
	struct arm_pmu *cpu_pmu = (struct arm_pmu *)dev;
	struct pmu_hw_events *cpuc = this_cpu_ptr(cpu_pmu->hw_events);
	struct pt_regs *regs;
	int idx;

	/*
	 * Get and reset the IRQ flags
	 */
	pmovsr = armv8pmu_getreset_flags();

	/*
	 * Did an overflow occur?
	 */
	if (!armv8pmu_has_overflowed(pmovsr))
		return IRQ_NONE;

	/*
	 * Handle the counter(s) overflow(s)
	 */
	regs = get_irq_regs();

	for (idx = 0; idx < cpu_pmu->num_events; ++idx) {
		struct perf_event *event = cpuc->events[idx];
		struct hw_perf_event *hwc;

		/* Ignore if we don't have an event. */
		if (!event)
			continue;

		/*
		 * We have a single interrupt for all counters. Check that
		 * each counter has overflowed before we process it.
		 */
		if (!armv8pmu_counter_has_overflowed(pmovsr, idx))
			continue;

		hwc = &event->hw;
		armpmu_event_update(event);
		perf_sample_data_init(&data, 0, hwc->last_period);
		if (!armpmu_event_set_period(event))
			continue;

		if (perf_event_overflow(event, &data, regs))
			cpu_pmu->disable(event);
	}

	/*
	 * Handle the pending perf events.
	 *
	 * Note: this call *must* be run with interrupts disabled. For
	 * platforms that can have the PMU interrupts raised as an NMI, this
	 * will not work.
	 */
	irq_work_run();

	return IRQ_HANDLED;
}
Example #6
0
File: ibs.c Project: AK101111/linux
	} else {
		set_linear_ip(&regs, ibs_data.regs[1]);
		regs.flags |= PERF_EFLAGS_EXACT;
	}

	if (event->attr.sample_type & PERF_SAMPLE_RAW) {
		raw = (struct perf_raw_record){
			.frag = {
				.size = sizeof(u32) + ibs_data.size,
				.data = ibs_data.data,
			},
		};
		data.raw = &raw;
	}

	throttle = perf_event_overflow(event, &data, &regs);
out:
	if (throttle)
		perf_ibs_stop(event, 0);
	else
		perf_ibs_enable_event(perf_ibs, hwc, period >> 4);

	perf_event_update_userpage(event);

	return 1;
}

static int
perf_ibs_nmi_handler(unsigned int cmd, struct pt_regs *regs)
{
	u64 stamp = sched_clock();