示例#1
0
static void gator_event_sampling_online_dispatch(int cpu)
{
	struct perf_event * ev;

	if (!event_based_sampling)
		return;

#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 1, 0)
	ev = per_cpu(pevent, cpu) = perf_event_create_kernel_counter(per_cpu(pevent_attr, cpu), cpu, 0, ebs_overflow_handler);
#else
	ev = per_cpu(pevent, cpu) = perf_event_create_kernel_counter(per_cpu(pevent_attr, cpu), cpu, 0, ebs_overflow_handler, 0);
#endif

	if (IS_ERR(ev)) {
		pr_err("gator: unable to start event-based-sampling");
		return;
	}

	if (ev->state != PERF_EVENT_STATE_ACTIVE) {
		pr_err("gator: unable to start event-based-sampling");
		perf_event_release_kernel(ev);
		return;
	}

	ev->pmu->read(ev);
	per_cpu(prev_value, cpu) = local64_read(&ev->count);
}
示例#2
0
文件: perf.c 项目: AMCScarface/misc
/** Initialize performance sampling
 * Call this during probe initialization to set up performance event sampling
 * for all online cpus.  Returns non-zero on error.
 *
 * @param stp Handle for the event to be registered.
 */
static long _stp_perf_init (struct stap_perf_probe *stp)
{
	int cpu;

	/* allocate space for the event descriptor for each cpu */
	stp->events = _stp_alloc_percpu (sizeof(struct perf_event*));
	if (stp->events == NULL) {
		return -ENOMEM;
	}

	/* initialize event on each processor */
	stp_for_each_cpu(cpu) {
		struct perf_event **event = per_cpu_ptr (stp->events, cpu);
		if (cpu_is_offline(cpu)) {
			*event = NULL;
			continue;
		}
		*event = perf_event_create_kernel_counter(&stp->attr,
							  cpu, -1,
							  stp->callback);

		if (IS_ERR(*event)) {
			long rc = PTR_ERR(*event);
			*event = NULL;
			_stp_perf_del(stp);
			return rc;
		}
	}
	return 0;
}
示例#3
0
/**
 * register_user_hw_breakpoint - register a hardware breakpoint for user space
 * @attr: breakpoint attributes
 * @triggered: callback to trigger when we hit the breakpoint
 * @tsk: pointer to 'task_struct' of the process to which the address belongs
 */
struct perf_event *
register_user_hw_breakpoint(struct perf_event_attr *attr,
			    perf_overflow_handler_t triggered,
			    struct task_struct *tsk)
{
	return perf_event_create_kernel_counter(attr, -1, tsk->pid, triggered);
}
示例#4
0
static int op_create_counter(int cpu, int event)
{
	struct perf_event *pevent;

	if (!counter_config[event].enabled || perf_events[cpu][event])
		return 0;

	pevent = perf_event_create_kernel_counter(&counter_config[event].attr,
						  cpu, NULL,
						  op_overflow_handler, NULL);

	if (IS_ERR(pevent))
		return PTR_ERR(pevent);

	if (pevent->state != PERF_EVENT_STATE_ACTIVE) {
		perf_event_release_kernel(pevent);
		pr_warning("oprofile: failed to enable event %d "
				"on CPU %d\n", event, cpu);
		return -EBUSY;
	}

	perf_events[cpu][event] = pevent;

	return 0;
}
static int op_create_counter(int cpu, int event)
{
	int ret = 0;
	struct perf_event *pevent;

	if (!counter_config[event].enabled || (perf_events[cpu][event] != NULL))
		return ret;

	pevent = perf_event_create_kernel_counter(&counter_config[event].attr,
						  cpu, -1,
						  op_overflow_handler);

	if (IS_ERR(pevent)) {
		ret = PTR_ERR(pevent);
	} else if (pevent->state != PERF_EVENT_STATE_ACTIVE) {
		perf_event_release_kernel(pevent);
		pr_warning("oprofile: failed to enable event %d "
				"on CPU %d\n", event, cpu);
		ret = -EBUSY;
	} else {
		perf_events[cpu][event] = pevent;
	}

	return ret;
}
示例#6
0
static int watchdog_nmi_enable(unsigned int cpu)
{
	struct perf_event_attr *wd_attr;
	struct perf_event *event = per_cpu(watchdog_ev, cpu);

	/* is it already setup and enabled? */
	if (event && event->state > PERF_EVENT_STATE_OFF)
		goto out;

	/* it is setup but not enabled */
	if (event != NULL)
		goto out_enable;

	wd_attr = &wd_hw_attr;
	wd_attr->sample_period = hw_nmi_get_sample_period(watchdog_thresh);

	/* Try to register using hardware perf events */
	event = perf_event_create_kernel_counter(wd_attr, cpu, NULL, watchdog_overflow_callback, NULL);

	/* save cpu0 error for future comparision */
	if (cpu == 0 && IS_ERR(event))
		cpu0_err = PTR_ERR(event);

	if (!IS_ERR(event)) {
		/* only print for cpu0 or different than cpu0 */
		if (cpu == 0 || cpu0_err)
			pr_info("enabled on all CPUs, permanently consumes one hw-PMU counter.\n");
		goto out_save;
	}

	/* skip displaying the same error again */
	if (cpu > 0 && (PTR_ERR(event) == cpu0_err))
		return PTR_ERR(event);

	/* vary the KERN level based on the returned errno */
	if (PTR_ERR(event) == -EOPNOTSUPP)
		pr_info("disabled (cpu%i): not supported (no LAPIC?)\n", cpu);
	else if (PTR_ERR(event) == -ENOENT)
		pr_warning("disabled (cpu%i): hardware events not enabled\n",
			 cpu);
	else
		pr_err("disabled (cpu%i): unable to create perf event: %ld\n",
			cpu, PTR_ERR(event));
	return PTR_ERR(event);

	/* success path */
out_save:
	per_cpu(watchdog_ev, cpu) = event;
out_enable:
	perf_event_enable(per_cpu(watchdog_ev, cpu));
out:
	return 0;
}
示例#7
0
void fn (void) {
  struct perf_event_attr *attr = NULL;
  int cpu = 0;
  struct task_struct *tsk = NULL;
  perf_overflow_handler_t callback = NULL;

  /* linux-2.6 commit 38a81da2205f94 */
  (void) perf_event_create_kernel_counter(attr,
                                   cpu,                  
                                   tsk, /* as opposed to int pid */
                                   callback);  
}
示例#8
0
/*
 * This function sets up the performance counter registers to record. 
 */
struct perf_event *perf_counter_init(struct task_struct *tsk){
        int cpu;
        struct perf_event *pevent;

        if(attr == NULL) {
                attr = kmalloc(sizeof(*attr), GFP_KERNEL);
                memset(attr,0,sizeof(*attr));
                attr->type = PERF_TYPE_HARDWARE;
                //attr->type = PERF_TYPE_RAW;
                attr->config = PERF_COUNT_HW_INSTRUCTIONS;
                attr->size = sizeof(*attr);
                attr->sample_period = 0;
                attr->disabled = 0;
                attr->inherit = 0;
                attr->pinned = 1;     //TODO Not sure if we want this set or not yet
                //attr->exclusive = 1;
                attr->freq = 0;
                attr->exclude_user = 0; 
                attr->exclude_kernel = 1;        
                attr->exclude_hv = 1;         
                attr->exclude_idle = 1;      
                attr->inherit_stat = 1;
                //attr->precise_ip = 3;       
                //attr->wakeup_events = 10000;	  // wakeup every n events
        }

        cpu = -1;       // count events for this thread on all cpus

        pevent = perf_event_create_kernel_counter(attr, cpu, tsk, 
                        (perf_overflow_handler_t) capo_overflow_handler);

        if (IS_ERR(pevent)){
                BUG();
                printk(KERN_CRIT "***** PERFCT: Failed to create kernel counter");
                //return PTR_ERR(pevent);
                return NULL;
        }

        if (pevent->state != PERF_EVENT_STATE_ACTIVE) {
                printk(KERN_CRIT "***** PERFCT: Failed to enable kernel counter");
                kfree(attr);
                attr=NULL;
                perf_event_release_kernel(pevent);
                BUG();
                //return -EBUSY;
                return NULL;
        }

        return pevent;
}
static int nmi_timer_start_cpu(int cpu)
{
	struct perf_event *event = per_cpu(nmi_timer_events, cpu);

	if (!event) {
		event = perf_event_create_kernel_counter(&nmi_timer_attr, cpu, NULL,
							 nmi_timer_callback, NULL);
		if (IS_ERR(event))
			return PTR_ERR(event);
		per_cpu(nmi_timer_events, cpu) = event;
	}

	if (event && ctr_running)
		perf_event_enable(event);

	return 0;
}
示例#10
0
文件: hrprof.c 项目: myaut/hrprof
static int hrprof_init_event(int cpu) {
	hrprof_cpu_t* event = per_cpu_ptr(&hrprof_cpu_event, cpu);

	spin_lock_init(&event->lock);

	event->head = 0;
	event->tail = 0;

	event->event = perf_event_create_kernel_counter(
			&pe_attr, cpu, NULL, overflow_handler, NULL);

	if(IS_ERR(event->event)) {
		return PTR_ERR(event->event);
	}

	hrtimer_init(&event->hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
	event->hrtimer.function = hrprof_timer_notify;

	return 0;
}
示例#11
0
/**
 * register_wide_hw_breakpoint - register a wide breakpoint in the kernel
 * @attr: breakpoint attributes
 * @triggered: callback to trigger when we hit the breakpoint
 *
 * @return a set of per_cpu pointers to perf events
 */
struct perf_event * __percpu *
register_wide_hw_breakpoint(struct perf_event_attr *attr,
			    perf_overflow_handler_t triggered)
{
	struct perf_event * __percpu *cpu_events, **pevent, *bp;
	long err;
	int cpu;

	cpu_events = alloc_percpu(typeof(*cpu_events));
	if (!cpu_events)
		return (void __percpu __force *)ERR_PTR(-ENOMEM);

	get_online_cpus();
	for_each_online_cpu(cpu) {
		pevent = per_cpu_ptr(cpu_events, cpu);
		bp = perf_event_create_kernel_counter(attr, cpu, -1, triggered);

		*pevent = bp;

		if (IS_ERR(bp)) {
			err = PTR_ERR(bp);
			goto fail;
		}
	}
	put_online_cpus();

	return cpu_events;

fail:
	for_each_online_cpu(cpu) {
		pevent = per_cpu_ptr(cpu_events, cpu);
		if (IS_ERR(*pevent))
			break;
		unregister_hw_breakpoint(*pevent);
	}
	put_online_cpus();

	free_percpu(cpu_events);
	return (void __percpu __force *)ERR_PTR(err);
}
示例#12
0
int watchdog_nmi_enable(unsigned int cpu)
{
	struct perf_event_attr *wd_attr;
	struct perf_event *event = per_cpu(watchdog_ev, cpu);

	/* nothing to do if the hard lockup detector is disabled */
	if (!(watchdog_enabled & NMI_WATCHDOG_ENABLED))
		goto out;

	/* is it already setup and enabled? */
	if (event && event->state > PERF_EVENT_STATE_OFF)
		goto out;

	/* it is setup but not enabled */
	if (event != NULL)
		goto out_enable;

	wd_attr = &wd_hw_attr;
	wd_attr->sample_period = hw_nmi_get_sample_period(watchdog_thresh);

	/* Try to register using hardware perf events */
	event = perf_event_create_kernel_counter(wd_attr, cpu, NULL, watchdog_overflow_callback, NULL);

	/* save cpu0 error for future comparision */
	if (cpu == 0 && IS_ERR(event))
		cpu0_err = PTR_ERR(event);

	if (!IS_ERR(event)) {
		/* only print for cpu0 or different than cpu0 */
		if (cpu == 0 || cpu0_err)
			pr_info("enabled on all CPUs, permanently consumes one hw-PMU counter.\n");
		goto out_save;
	}

	/*
	 * Disable the hard lockup detector if _any_ CPU fails to set up
	 * set up the hardware perf event. The watchdog() function checks
	 * the NMI_WATCHDOG_ENABLED bit periodically.
	 *
	 * The barriers are for syncing up watchdog_enabled across all the
	 * cpus, as clear_bit() does not use barriers.
	 */
	smp_mb__before_atomic();
	clear_bit(NMI_WATCHDOG_ENABLED_BIT, &watchdog_enabled);
	smp_mb__after_atomic();

	/* skip displaying the same error again */
	if (cpu > 0 && (PTR_ERR(event) == cpu0_err))
		return PTR_ERR(event);

	/* vary the KERN level based on the returned errno */
	if (PTR_ERR(event) == -EOPNOTSUPP)
		pr_info("disabled (cpu%i): not supported (no LAPIC?)\n", cpu);
	else if (PTR_ERR(event) == -ENOENT)
		pr_warn("disabled (cpu%i): hardware events not enabled\n",
			 cpu);
	else
		pr_err("disabled (cpu%i): unable to create perf event: %ld\n",
			cpu, PTR_ERR(event));

	pr_info("Shutting down hard lockup detector on all cpus\n");

	return PTR_ERR(event);

	/* success path */
out_save:
	per_cpu(watchdog_ev, cpu) = event;
out_enable:
	perf_event_enable(per_cpu(watchdog_ev, cpu));
out:
	return 0;
}