示例#1
0
void oprofile_add_sample(struct pt_regs * const regs, unsigned long event)
{
	int is_kernel = !user_mode(regs);
	unsigned long pc = profile_pc(regs);

	__oprofile_add_ext_sample(pc, regs, event, is_kernel);
}
示例#2
0
static int ppro_check_ctrs(unsigned int const cpu, 
			    struct op_msrs const * const msrs,
			    struct pt_regs * const regs)
{
	u64 val;
	int i;
	unsigned long eip = profile_pc(regs);
	int is_kernel = !user_mode(regs);

	for (i = 0 ; i < num_counters; ++i) {
		if (!reset_value[i])
			continue;
		rdmsrl(msrs->counters[i].addr, val);
		if (CTR_OVERFLOWED(val)) {
			oprofile_add_sample(eip, is_kernel, i, cpu);
			wrmsrl(msrs->counters[i].addr, -reset_value[i]);
		}
	}

	/* Only P6 based Pentium M need to re-unmask the apic vector but it
	 * doesn't hurt other P6 variant */
	apic_write(APIC_LVTPC, apic_read(APIC_LVTPC) & ~APIC_LVT_MASKED);

	/* We can't work out if we really handled an interrupt. We
	 * might have caught a *second* counter just after overflowing
	 * the interrupt for this counter then arrives
	 * and we don't find a counter that's overflowed, so we
	 * would return 0 and get dazed + confused. Instead we always
	 * assume we found an overflow. This sucks.
	 */
	return 1;
}
示例#3
0
void profile_tick(int type, struct pt_regs *regs)
{
	if (type == CPU_PROFILING && timer_hook)
		timer_hook(regs);
	if (!user_mode(regs) && cpu_isset(smp_processor_id(), prof_cpu_mask))
		profile_hit(type, (void *)profile_pc(regs));
}
示例#4
0
static int
xenoprof_handler(struct task_struct *task, void *buf, pfm_ovfl_arg_t *arg,
                 struct pt_regs *regs, unsigned long stamp)
{
    unsigned long ip = profile_pc(regs);
    int event = arg->pmd_eventid;
    struct vcpu *v = current;
    int mode = xenoprofile_get_mode(v, regs);

    // see pfm_do_interrupt_handler() in xen/arch/ia64/linux-xen/perfmon.c.
    // It always passes task as NULL. This is work around
    BUG_ON(task != NULL);

    arg->ovfl_ctrl.bits.reset_ovfl_pmds = 1;
    if (!allow_virq || !allow_ints)
        return 0;

    // Note that log event actually expect cpu_user_regs, cast back 
    // appropriately when doing the backtrace implementation in ia64
    xenoprof_log_event(v, regs, ip, mode, event);
    // send VIRQ_XENOPROF
    if (is_active(v->domain) && !xenoprof_is_xen_mode(v, regs) &&
        !is_idle_vcpu(v))
        send_guest_vcpu_virq(v, VIRQ_XENOPROF);

    return 0;
}
void oprofile_add_ibs_op_sample(struct pt_regs *const regs,
				unsigned int * const ibs_op)
{
	int is_kernel = !user_mode(regs);
	unsigned long pc = profile_pc(regs);

	struct oprofile_cpu_buffer *cpu_buf = &cpu_buffer[smp_processor_id()];

#ifdef CONFIG_CA_CSS
	if (!ca_css_depth) {
		log_ibs_sample(cpu_buf, pc, is_kernel, ibs_op, IBS_OP_BEGIN);
		return;
	}

	/* if log_sample() fails we can't backtrace since we lost the source
	* of this event */
	if (log_ibs_sample(cpu_buf, pc, is_kernel, ibs_op, IBS_OP_BEGIN))
		oprofile_ops.ca_css(regs, ca_css_depth, cpu_buf->last_task);
#else
	if (!backtrace_depth) {
		log_ibs_sample(cpu_buf, pc, is_kernel, ibs_op, IBS_OP_BEGIN);
		return;
	}

	/* if log_sample() fails we can't backtrace since we lost the source
	* of this event */
	if (log_ibs_sample(cpu_buf, pc, is_kernel, ibs_op, IBS_OP_BEGIN))
		oprofile_ops.backtrace(regs, backtrace_depth);
#endif
}
示例#6
0
void profile_tick(int type)
{
	struct pt_regs *regs = get_irq_regs();

	if (!user_mode(regs) && prof_cpu_mask != NULL &&
	    cpumask_test_cpu(smp_processor_id(), prof_cpu_mask))
		profile_hit(type, (void *)profile_pc(regs));
}
示例#7
0
static int timer_notify(struct pt_regs *regs)
{
	int cpu = smp_processor_id();
	unsigned long eip = profile_pc(regs);
 
	oprofile_add_sample(eip, !user_mode(regs), 0, cpu);
	return 0;
}
void oprofile_add_sample(struct pt_regs * const regs, unsigned long event)
{
	int is_kernel = !user_mode(regs);
	unsigned long pc = profile_pc(regs);

#ifdef CONFIG_CA_CSS
	ca_css_add_ext_sample(pc, regs, event, is_kernel);
#else
	oprofile_add_ext_sample(pc, regs, event, is_kernel);
#endif
}
示例#9
0
static int p4_check_ctrs(unsigned int const cpu, 
			  struct op_msrs const * const msrs,
			  struct pt_regs * const regs)
{
	unsigned long ctr, low, high, stag, real;
	int i;
	unsigned long eip = profile_pc(regs);
	int is_kernel = !user_mode(regs);

	stag = get_stagger();

	for (i = 0; i < num_counters; ++i) {
		
		if (!reset_value[i]) 
			continue;

		/* 
		 * there is some eccentricity in the hardware which
		 * requires that we perform 2 extra corrections:
		 *
		 * - check both the CCCR:OVF flag for overflow and the
		 *   counter high bit for un-flagged overflows.
		 *
		 * - write the counter back twice to ensure it gets
		 *   updated properly.
		 * 
		 * the former seems to be related to extra NMIs happening
		 * during the current NMI; the latter is reported as errata
		 * N15 in intel doc 249199-029, pentium 4 specification
		 * update, though their suggested work-around does not
		 * appear to solve the problem.
		 */
		
		real = VIRT_CTR(stag, i);

		CCCR_READ(low, high, real);
 		CTR_READ(ctr, high, real);
		if (CCCR_OVF_P(low) || CTR_OVERFLOW_P(ctr)) {
			oprofile_add_sample(eip, is_kernel, i, cpu);
 			CTR_WRITE(reset_value[i], real);
			CCCR_CLEAR_OVF(low);
			CCCR_WRITE(low, high, real);
 			CTR_WRITE(reset_value[i], real);
		}
	}

	/* P4 quirk: you have to re-unmask the apic vector */
	apic_write(APIC_LVTPC, apic_read(APIC_LVTPC) & ~APIC_LVT_MASKED);

	/* See op_model_ppro.c */
	return 1;
}
示例#10
0
void oprofile_add_sample(struct pt_regs * const regs, unsigned long event)
{
	int is_kernel;
	unsigned long pc;

	if (likely(regs)) {
		is_kernel = !user_mode(regs);
		pc = profile_pc(regs);
	} else {
		is_kernel = 0;    /* This value will not be used */
		pc = ESCAPE_CODE; /* as this causes an early return. */
	}

	__oprofile_add_ext_sample(pc, regs, event, is_kernel);
}
void oprofile_add_sample(struct pt_regs * const regs, unsigned long event)
{
	int is_kernel;
	unsigned long pc;

	if (likely(regs)) {
		is_kernel = !user_mode(regs);
		pc = profile_pc(regs);
	} else {
		is_kernel = 0;    /*                             */
		pc = ESCAPE_CODE; /*                                 */
	}

	__oprofile_add_ext_sample(pc, regs, event, is_kernel, NULL);
}
示例#12
0
void oprofile_add_ibs_sample(struct pt_regs *const regs,
				unsigned int * const ibs_sample, u8 code)
{
	int is_kernel = !user_mode(regs);
	unsigned long pc = profile_pc(regs);

	struct oprofile_cpu_buffer *cpu_buf =
			 &per_cpu(cpu_buffer, smp_processor_id());

	if (!backtrace_depth) {
		log_ibs_sample(cpu_buf, pc, is_kernel, ibs_sample, code);
		return;
	}

	/* if log_sample() fails we can't backtrace since we lost the source
	* of this event */
	if (log_ibs_sample(cpu_buf, pc, is_kernel, ibs_sample, code))
		oprofile_ops.backtrace(regs, backtrace_depth);
}
示例#13
0
void add_cookie(struct pt_regs *regs, int cpu)
{
	unsigned long pc;
	off_t off;

	if (regs == 0)
		return;

	pc = profile_pc(regs);

	if (user_mode(regs)) {
		struct mm_struct *mm;
		struct vm_area_struct *vma;
		struct path *ppath;

		mm = current->mm;
		for (vma = find_vma(mm, s.pc); vma; vma = vma->vm_next) {

			if (s.pc < vma->vm_start || s.pc >= vma->vm_end)
				continue;

			if (vma->vm_file) {
				ppath = &(vma->vm_file->f_path);
				off = (vma->vm_pgoff << PAGE_SHIFT) + s.pc - vma->vm_start;
				cookie(current->comm, pc, (char *)(ppath->dentry->d_name.name), off, 0);
			} else {
				/* must be an anonymous map */
				cookie(current->comm, pc, "nofile", pc, 0);
			}
			break;
		}
	} else {
		struct module *mod = __module_address(s.pc);
		if (mod) {
			s.off = s.pc - (unsigned long)mod->module_core;
			cookie(current->comm, pc, mod->name, off, 1);
		} else {
			s.off = 0;
			cookie(current->comm, pc, "vmlinux", off, 0);
		}
	}
}
示例#14
0
void oprofile_add_sample(struct pt_regs * const regs, unsigned long event)
{
	struct oprofile_cpu_buffer * cpu_buf = &cpu_buffer[smp_processor_id()];
	unsigned long pc = profile_pc(regs);
	int is_kernel = !user_mode(regs);

	if (!backtrace_depth) {
		log_sample(cpu_buf, pc, is_kernel, event);
		return;
	}

	if (!oprofile_begin_trace(cpu_buf))
		return;

	/* if log_sample() fail we can't backtrace since we lost the source
	 * of this event */
	if (log_sample(cpu_buf, pc, is_kernel, event))
		oprofile_ops.backtrace(regs, backtrace_depth);
	oprofile_end_trace(cpu_buf);
}
示例#15
0
void notrace profile_tick(int type, struct pt_regs *regs)
{
	if (type == CPU_PROFILING && timer_hook)
		timer_hook(regs);
	if (!user_mode(regs) && (prof_pid == -1 || prof_pid == current->pid) &&
			cpu_isset(smp_processor_id(), prof_cpu_mask)) {
		if (prof_on == PREEMPT_PROFILING && type == CPU_PROFILING) {
#ifdef CONFIG_PREEMPT
			int count = preempt_count() - HARDIRQ_OFFSET;

			if (!count)
				profile_hit(PREEMPT_PROFILING,
						(void *)preemption_enabled);
			else
#endif
				profile_hit(PREEMPT_PROFILING,
						(void *)preemption_disabled);
		} else
			profile_hit(type, (void *)profile_pc(regs));
	}
}