static irqreturn_t loongson2_perfcount_handler(int irq, void *dev_id)
{
	uint64_t counter, counter1, counter2;
	struct pt_regs *regs = get_irq_regs();
	int enabled;

	/* Check whether the irq belongs to me */
	enabled = read_c0_perfctrl() & LOONGSON2_PERFCTRL_ENABLE;
	if (!enabled)
		return IRQ_NONE;
	enabled = reg.cnt1_enabled | reg.cnt2_enabled;
	if (!enabled)
		return IRQ_NONE;

	counter = read_c0_perfcnt();
	counter1 = counter & 0xffffffff;
	counter2 = counter >> 32;

	if (counter1 & LOONGSON2_PERFCNT_OVERFLOW) {
		if (reg.cnt1_enabled)
			oprofile_add_sample(regs, 0);
		counter1 = reg.reset_counter1;
	}
	if (counter2 & LOONGSON2_PERFCNT_OVERFLOW) {
		if (reg.cnt2_enabled)
			oprofile_add_sample(regs, 1);
		counter2 = reg.reset_counter2;
	}

	write_c0_perfcnt((counter2 << 32) | counter1);

	return IRQ_HANDLED;
}
static irqreturn_t rm9000_perfcount_handler(int irq, void *dev_id)
{
	unsigned int control = read_c0_perfcontrol();
	struct pt_regs *regs = get_irq_regs();
	uint32_t counter1, counter2;
	uint64_t counters;

	/*
	 * RM9000 combines two 32-bit performance counters into a single
	 * 64-bit coprocessor zero register.  To avoid a race updating the
	 * registers we need to stop the counters while we're messing with
	 * them ...
	 */
	write_c0_perfcontrol(0);

	counters = read_c0_perfcount();
	counter1 = counters;
	counter2 = counters >> 32;

	if (control & RM9K_COUNTER1_OVERFLOW) {
		oprofile_add_sample(regs, 0);
		counter1 = reg.reset_counter1;
	}
	if (control & RM9K_COUNTER2_OVERFLOW) {
		oprofile_add_sample(regs, 1);
		counter2 = reg.reset_counter2;
	}

	counters = ((uint64_t)counter2 << 32) | counter1;
	write_c0_perfcount(counters);
	write_c0_perfcontrol(reg.control);

	return IRQ_HANDLED;
}
static int ppro_check_ctrs(struct pt_regs * const regs,
			   struct op_msrs const * const msrs)
{
	unsigned int low, high;
	int i;
 
	for (i = 0 ; i < NUM_COUNTERS; ++i) {
		CTR_READ(low, high, msrs, i);
		if (CTR_OVERFLOWED(low)) {
			oprofile_add_sample(regs, i);
			CTR_WRITE(reset_value[i], msrs, i);
		}
	}

	/* Only P6 based Pentium M need to re-unmask the apic vector but it
	 * doesn't hurt other P6 variant */
	apic_write(APIC_LVTPC, apic_read(APIC_LVTPC) & ~APIC_LVT_MASKED);

	/* We can't work out if we really handled an interrupt. We
	 * might have caught a *second* counter just after overflowing
	 * the interrupt for this counter then arrives
	 * and we don't find a counter that's overflowed, so we
	 * would return 0 and get dazed + confused. Instead we always
	 * assume we found an overflow. This sucks.
	 */
	return 1;
}
Пример #4
0
static int ppro_check_ctrs(unsigned int const cpu, 
			    struct op_msrs const * const msrs,
			    struct pt_regs * const regs)
{
	unsigned int low, high;
	int i;
	unsigned long eip = instruction_pointer(regs);
	int is_kernel = !user_mode(regs);
 
	for (i = 0 ; i < NUM_COUNTERS; ++i) {
		CTR_READ(low, high, msrs, i);
		if (CTR_OVERFLOWED(low)) {
			oprofile_add_sample(eip, is_kernel, i, cpu);
			CTR_WRITE(reset_value[i], msrs, i);
		}
	}

	/* We can't work out if we really handled an interrupt. We
	 * might have caught a *second* counter just after overflowing
	 * the interrupt for this counter then arrives
	 * and we don't find a counter that's overflowed, so we
	 * would return 0 and get dazed + confused. Instead we always
	 * assume we found an overflow. This sucks.
	 */
	return 1;
}
Пример #5
0
static int nmi_timer_callback(struct pt_regs * regs, int cpu)
{
	unsigned long eip = instruction_pointer(regs);
 
	oprofile_add_sample(eip, !user_mode(regs), 0, cpu);
	return 1;
}
Пример #6
0
static void nmi_timer_callback(struct perf_event *event,
			       struct perf_sample_data *data,
			       struct pt_regs *regs)
{
	event->hw.interrupts = 0;       /* don't throttle interrupts */
	oprofile_add_sample(regs, 0);
}
Пример #7
0
static int ppro_check_ctrs(struct pt_regs * const regs,
			   struct op_msrs const * const msrs)
{
	u64 val;
	int i;

	for (i = 0 ; i < num_counters; ++i) {
		if (!reset_value[i])
			continue;
		rdmsrl(msrs->counters[i].addr, val);
		if (CTR_OVERFLOWED(val)) {
			oprofile_add_sample(regs, i);
			wrmsrl(msrs->counters[i].addr, -reset_value[i]);
		}
	}

	/* Only P6 based Pentium M need to re-unmask the apic vector but it
	 * doesn't hurt other P6 variant */
	apic_write(APIC_LVTPC, apic_read(APIC_LVTPC) & ~APIC_LVT_MASKED);

	/* We can't work out if we really handled an interrupt. We
	 * might have caught a *second* counter just after overflowing
	 * the interrupt for this counter then arrives
	 * and we don't find a counter that's overflowed, so we
	 * would return 0 and get dazed + confused. Instead we always
	 * assume we found an overflow. This sucks.
	 */
	return 1;
}
Пример #8
0
int pm_overflow_handler(int irq, struct pt_regs *regs)
{
	int is_kernel;
	int i, cpu;
	unsigned int pc, pfctl;
	unsigned int count[2];

	pr_debug("get interrupt in %s\n", __FUNCTION__);
	if (oprofile_running == 0) {
		pr_debug("error: entering interrupt when oprofile is stopped.\n\r");
		return -1;
	}

	is_kernel = get_kernel();
	cpu = smp_processor_id();
	pc = regs->pc;
	pfctl = ctr_read();

	/* read the two event counter regs */
	count_read(count);

	/* if the counter overflows, add sample to oprofile buffer */
	for (i = 0; i < 2; ++i) {
		if (oprofile_running) {
			oprofile_add_sample(regs, i);
		}
	}

	/* reset the perfmon counter */
	ctr_write(curr_pfctl);
	count_write(curr_count);
	return 0;
}
Пример #9
0
static void
ev5_handle_interrupt(unsigned long which, struct pt_regs *regs,
		     struct op_counter_config *ctr)
{
	/* Record the sample.  */
	oprofile_add_sample(regs, which);
}
Пример #10
0
static int timer_notify(struct pt_regs *regs)
{
	int cpu = smp_processor_id();
	unsigned long eip = profile_pc(regs);
 
	oprofile_add_sample(eip, !user_mode(regs), 0, cpu);
	return 0;
}
Пример #11
0
static int timer_notify(struct notifier_block * self, unsigned long val, void * data)
{
	struct pt_regs * regs = (struct pt_regs *)data;
	int cpu = smp_processor_id();
 	unsigned long eip = instruction_pointer(regs);
 
	oprofile_add_sample(eip, !user_mode(regs), 0, cpu);
	return 0;
}
Пример #12
0
static void
ev4_handle_interrupt(unsigned long which, struct pt_regs *regs,
		     struct op_counter_config *ctr)
{
	if (!ctr[which].enabled)
		return;

	
	oprofile_add_sample(regs, which);
}
Пример #13
0
/*
 * CPU counters' IRQ handler (one IRQ per CPU)
 */
static irqreturn_t ak98_timer_interrupt(int irq, void *arg)
{
	struct pt_regs *regs = get_irq_regs();
	
	oprofile_add_sample(regs, 0);//CCNT
	
	/* Clear counter flag(s) */
	*(volatile unsigned int *)(AK98_TIMER2_CTRL) |= TIMER_INT_CLR | TIMER_ENABLE;
	return IRQ_HANDLED;
}
static irqreturn_t loongson2_perfcount_handler(int irq, void *dev_id)
{
	uint64_t counter, counter1, counter2;
	struct pt_regs *regs = get_irq_regs();
	int enabled;
	unsigned long flags;

	/*
	 * LOONGSON2 defines two 32-bit performance counters.
	 * To avoid a race updating the registers we need to stop the counters
	 * while we're messing with
	 * them ...
	 */

	/* Check whether the irq belongs to me */
	enabled = reg.cnt1_enabled | reg.cnt2_enabled;
	if (!enabled)
		return IRQ_NONE;

	counter = read_c0_perfcnt();
	counter1 = counter & 0xffffffff;
	counter2 = counter >> 32;

	spin_lock_irqsave(&sample_lock, flags);

	if (counter1 & LOONGSON2_PERFCNT_OVERFLOW) {
		if (reg.cnt1_enabled)
			oprofile_add_sample(regs, 0);
		counter1 = reg.reset_counter1;
	}
	if (counter2 & LOONGSON2_PERFCNT_OVERFLOW) {
		if (reg.cnt2_enabled)
			oprofile_add_sample(regs, 1);
		counter2 = reg.reset_counter2;
	}

	spin_unlock_irqrestore(&sample_lock, flags);

	write_c0_perfcnt((counter2 << 32) | counter1);

	return IRQ_HANDLED;
}
Пример #15
0
static int p4_check_ctrs(unsigned int const cpu, 
			  struct op_msrs const * const msrs,
			  struct pt_regs * const regs)
{
	unsigned long ctr, low, high, stag, real;
	int i;
	unsigned long eip = instruction_pointer(regs);
	int is_kernel = !user_mode(regs);

	stag = get_stagger();

	for (i = 0; i < num_counters; ++i) {
		
		if (!counter_config[i].event) 
			continue;

		/* 
		 * there is some eccentricity in the hardware which
		 * requires that we perform 2 extra corrections:
		 *
		 * - check both the CCCR:OVF flag for overflow and the
		 *   counter high bit for un-flagged overflows.
		 *
		 * - write the counter back twice to ensure it gets
		 *   updated properly.
		 * 
		 * the former seems to be related to extra NMIs happening
		 * during the current NMI; the latter is reported as errata
		 * N15 in intel doc 249199-029, pentium 4 specification
		 * update, though their suggested work-around does not
		 * appear to solve the problem.
		 */
		
		real = VIRT_CTR(stag, i);

		CCCR_READ(low, high, real);
 		CTR_READ(ctr, high, real);
		if (CCCR_OVF_P(low) || CTR_OVERFLOW_P(ctr)) {
			oprofile_add_sample(eip, is_kernel, i, cpu);
 			CTR_WRITE(reset_value[i], real);
			CCCR_CLEAR_OVF(low);
			CCCR_WRITE(low, high, real);
 			CTR_WRITE(reset_value[i], real);
			/* P4 quirk: you have to re-unmask the apic vector */
			apic_write(APIC_LVTPC, apic_read(APIC_LVTPC) & ~APIC_LVT_MASKED);
		}
	}

	/* P4 quirk: you have to re-unmask the apic vector */
	apic_write(APIC_LVTPC, apic_read(APIC_LVTPC) & ~APIC_LVT_MASKED);

	/* See op_model_ppro.c */
	return 1;
}
Пример #16
0
static inline void
op_add_pm(unsigned long pc, int kern, unsigned long counter,
          struct op_counter_config *ctr, unsigned long event)
{
    unsigned long fake_counter = 2 + event;
    if (counter == 1)
        fake_counter += PM_NUM_COUNTERS;
    if (ctr[fake_counter].enabled)
        oprofile_add_sample(pc, kern, fake_counter,
                            smp_processor_id());
}
static irqreturn_t octeon_perfcount_handler(int irq, void * dev_id,
                                            struct pt_regs *regs)
{
    uint64_t counter;

    counter = __read_64bit_c0_register($25, 1);
    if (counter & (1ull<<63))
    {
        oprofile_add_sample(regs, 0);
        __write_64bit_c0_register($25, 1, octeon_config.reset_value[0]);
    }

    counter = __read_64bit_c0_register($25, 3);
    if (counter & (1ull<<63))
    {
        oprofile_add_sample(regs, 1);
        __write_64bit_c0_register($25, 3, octeon_config.reset_value[1]);
    }

    return IRQ_HANDLED;
}
Пример #18
0
static void
ev4_handle_interrupt(unsigned long which, struct pt_regs *regs,
		     struct op_counter_config *ctr)
{
	/* EV4 can't properly disable counters individually.
	   Discard "disabled" events now.  */
	if (!ctr[which].enabled)
		return;

	/* Record the sample.  */
	oprofile_add_sample(regs, which);
}
Пример #19
0
static enum hrtimer_restart oprofile_hrtimer_notify(struct hrtimer *hrtimer)
{
	oprofile_add_sample(get_irq_regs(), 0);
	hrtimer_forward_now(hrtimer, ns_to_ktime(TICK_NSEC));

#if defined(CONFIG_MP_DEBUG_TOOL_OPROFILE)
#ifdef CONFIG_ADVANCE_OPROFILE
	atomic_inc(&g_aop_pc_sample_count);
#endif/* CONFIG_ADVANCE_OPROFILE */
#endif /* CONFIG_MP_DEBUG_TOOL_OPROFILE */

	return HRTIMER_RESTART;
}
Пример #20
0
/*
 * SCU counters' IRQ handler (one IRQ per counter => 2 IRQs per CPU)
 */
static irqreturn_t scu_em_interrupt(int irq, void *arg)
{
	struct eventmonitor __iomem *emc = SCU_EVENTMONITORS_VA_BASE;
	unsigned int cnt;

	cnt = irq - IRQ_EB11MP_PMU_SCU0;
	oprofile_add_sample(get_irq_regs(), SCU_COUNTER(cnt));
	scu_reset_counter(emc, cnt);

	/* Clear overflow flag for this counter */
	writel(1 << (cnt + 16), &emc->PMCR);

	return IRQ_HANDLED;
}
static int loongson3_perfcount_handler(void)
{
	unsigned long flags;
	uint64_t counter1, counter2;
	uint32_t cause, handled = IRQ_NONE;
	struct pt_regs *regs = get_irq_regs();

	cause = read_c0_cause();
	if (!(cause & CAUSEF_PCI))
		return handled;

	counter1 = read_c0_perfhi1();
	counter2 = read_c0_perfhi2();

	local_irq_save(flags);

	if (counter1 & LOONGSON3_PERFCNT_OVERFLOW) {
		if (reg.ctr1_enable)
			oprofile_add_sample(regs, 0);
		counter1 = reg.reset_counter1;
	}
	if (counter2 & LOONGSON3_PERFCNT_OVERFLOW) {
		if (reg.ctr2_enable)
			oprofile_add_sample(regs, 1);
		counter2 = reg.reset_counter2;
	}

	local_irq_restore(flags);

	write_c0_perfhi1(counter1);
	write_c0_perfhi2(counter2);

	if (!(cause & CAUSEF_TI))
		handled = IRQ_HANDLED;

	return handled;
}
Пример #22
0
static int profile_timer_exceptions_notify(struct notifier_block *self,
					   unsigned long val, void *data)
{
	struct die_args *args = data;
	int ret = NOTIFY_DONE;

	switch (val) {
	case DIE_NMI:
		oprofile_add_sample(args->regs, 0);
		ret = NOTIFY_STOP;
		break;
	default:
		break;
	}
	return ret;
}
Пример #23
0
/*
 * Overflow callback for oprofile.
 */
static void op_overflow_handler(struct perf_event *event, int unused,
			struct perf_sample_data *data, struct pt_regs *regs)
{
	int id;
	u32 cpu = smp_processor_id();

	for (id = 0; id < perf_num_counters; ++id)
		if (perf_events[cpu][id] == event)
			break;

	if (id != perf_num_counters)
		oprofile_add_sample(regs, id);
	else
		pr_warning("oprofile: ignoring spurious overflow "
				"on cpu %u\n", cpu);
}
Пример #24
0
static void power4_handle_interrupt(struct pt_regs *regs,
				    struct op_counter_config *ctr)
{
	unsigned long pc;
	int is_kernel;
	int val;
	int i;
	unsigned int cpu = smp_processor_id();
	unsigned int mmcr0;

	pc = get_pc(regs);
	is_kernel = get_kernel(pc);

	/* set the PMM bit (see comment below) */
	mtmsrd(mfmsr() | MSR_PMM);

	for (i = 0; i < num_counters; ++i) {
		val = ctr_read(i);
		if (val < 0) {
			if (oprofile_running && ctr[i].enabled) {
				oprofile_add_sample(pc, is_kernel, i, cpu);
				ctr_write(i, reset_value[i]);
			} else {
				ctr_write(i, 0);
			}
		}
	}

	mmcr0 = mfspr(SPRN_MMCR0);

	/* reset the perfmon trigger */
	mmcr0 |= MMCR0_PMXE;

	/*
	 * We must clear the PMAO bit on some (GQ) chips. Just do it
	 * all the time
	 */
	mmcr0 &= ~MMCR0_PMAO;

	/*
	 * now clear the freeze bit, counting will not start until we
	 * rfid from this exception, because only at that point will
	 * the PMM bit be cleared
	 */
	mmcr0 &= ~MMCR0_FC;
	mtspr(SPRN_MMCR0, mmcr0);
}
Пример #25
0
static int
perfmon_handler(struct task_struct *task, void *buf, pfm_ovfl_arg_t *arg,
                struct pt_regs *regs, unsigned long stamp)
{
	int cpu = smp_processor_id();
	unsigned long eip = instruction_pointer(regs);
	int event = arg->pmd_eventid;
 
	arg->ovfl_ctrl.bits.reset_ovfl_pmds = 1;

	/* the owner of the oprofile event buffer may have exited
	 * without perfmon being shutdown (e.g. SIGSEGV)
	 */
	if (allow_ints)
		oprofile_add_sample(eip, !user_mode(regs), event, cpu);
	return 0;
}
Пример #26
0
static int athlon_check_ctrs(struct pt_regs * const regs,
			     struct op_msrs const * const msrs)
{
	unsigned int low, high;
	int i;

	for (i = 0 ; i < NUM_COUNTERS; ++i) {
		if (!reset_value[i])
			continue;
		CTR_READ(low, high, msrs, i);
		if (CTR_OVERFLOWED(low)) {
			oprofile_add_sample(regs, i);
			CTR_WRITE(reset_value[i], msrs, i);
		}
	}

	/* See op_model_ppro.c */
	return 1;
}
/***********************************************************************
 * sh7109_pwm_interrupt()
 *
 *
 */
static irqreturn_t sh7109_pwm_interrupt(int irq, void *dev_id)
{
        u32 reg = 0;
        struct pt_regs *regs = get_irq_regs();

        /* Give the sample to oprofile. */
        oprofile_add_sample(regs, 0);

        /* Update the compare value. */
        reg = ctrl_inl(pwm->base + PWM1_CMP_VAL_REG);
        reg += results.compare_increment;
        ctrl_outl(reg, pwm->base + PWM1_CMP_VAL_REG);

        /* Ack active irq sources. */
        reg = ctrl_inl(pwm->base + PWM_INT_STA_REG);
        ctrl_outl(reg, pwm->base + PWM_INT_ACK_REG);


        return IRQ_HANDLED;
}
Пример #28
0
static int athlon_check_ctrs(unsigned int const cpu, 
			      struct op_msrs const * const msrs, 
			      struct pt_regs * const regs)
{
	unsigned int low, high;
	int i;
	unsigned long eip = instruction_pointer(regs);
	int is_kernel = !user_mode(regs);

	for (i = 0 ; i < NUM_COUNTERS; ++i) {
		CTR_READ(low, high, msrs, i);
		if (CTR_OVERFLOWED(low)) {
			oprofile_add_sample(eip, is_kernel, i, cpu);
			CTR_WRITE(reset_value[i], msrs, i);
		}
	}

	/* See op_model_ppro.c */
	return 1;
}
static int ppro_check_ctrs(struct pt_regs * const regs,
			   struct op_msrs const * const msrs)
{
	u64 val;
	int i;

	for (i = 0; i < num_counters; ++i) {
		if (!reset_value[i])
			continue;
		rdmsrl(msrs->counters[i].addr, val);
		if (val & (1ULL << (counter_width - 1)))
			continue;
		oprofile_add_sample(regs, i);
		wrmsrl(msrs->counters[i].addr, -reset_value[i]);
	}

	apic_write(APIC_LVTPC, apic_read(APIC_LVTPC) & ~APIC_LVT_MASKED);

	return 1;
}
Пример #30
0
static int ppro_check_ctrs(struct pt_regs * const regs,
			   struct op_msrs const * const msrs)
{
	u64 val;
	int i;

	/*
	 * This can happen if perf counters are in use when
	 * we steal the die notifier NMI.
	 */
	if (unlikely(!reset_value))
		goto out;

	for (i = 0; i < num_counters; ++i) {
		if (!reset_value[i])
			continue;
		rdmsrl(msrs->counters[i].addr, val);
		if (val & (1ULL << (counter_width - 1)))
			continue;
		oprofile_add_sample(regs, i);
		wrmsrl(msrs->counters[i].addr, -reset_value[i]);
	}

out:
	/* Only P6 based Pentium M need to re-unmask the apic vector but it
	 * doesn't hurt other P6 variant */
	apic_write(APIC_LVTPC, apic_read(APIC_LVTPC) & ~APIC_LVT_MASKED);

	/* We can't work out if we really handled an interrupt. We
	 * might have caught a *second* counter just after overflowing
	 * the interrupt for this counter then arrives
	 * and we don't find a counter that's overflowed, so we
	 * would return 0 and get dazed + confused. Instead we always
	 * assume we found an overflow. This sucks.
	 */
	return 1;
}