/**
 * modify_user_hw_breakpoint - modify a user-space hardware breakpoint
 * @bp: the breakpoint structure to modify
 * @attr: new breakpoint attributes
 * @triggered: callback to trigger when we hit the breakpoint
 * @tsk: pointer to 'task_struct' of the process to which the address belongs
 */
int modify_user_hw_breakpoint(struct perf_event *bp, struct perf_event_attr *attr)
{
	u64 old_addr = bp->attr.bp_addr;
	u64 old_len = bp->attr.bp_len;
	int old_type = bp->attr.bp_type;
	int err = 0;

	perf_event_disable(bp);

	bp->attr.bp_addr = attr->bp_addr;
	bp->attr.bp_type = attr->bp_type;
	bp->attr.bp_len = attr->bp_len;

	if (attr->disabled)
		goto end;

	err = validate_hw_breakpoint(bp);
	if (!err)
		perf_event_enable(bp);

	if (err) {
		bp->attr.bp_addr = old_addr;
		bp->attr.bp_type = old_type;
		bp->attr.bp_len = old_len;
		if (!bp->attr.disabled)
			perf_event_enable(bp);

		return err;
	}

end:
	bp->attr.disabled = attr->disabled;

	return 0;
}
static void nmi_timer_stop_cpu(int cpu)
{
	struct perf_event *event = per_cpu(nmi_timer_events, cpu);

	if (event && ctr_running)
		perf_event_disable(event);
}
int __kprobes hw_breakpoint_handler(struct die_args *args)
{
	int rc = NOTIFY_STOP;
	struct perf_event *bp;
	struct pt_regs *regs = args->regs;
	int stepped = 1;
	struct arch_hw_breakpoint *info;
	unsigned int instr;
	unsigned long dar = regs->dar;

	
	set_dabr(0);

	rcu_read_lock();

	bp = __get_cpu_var(bp_per_reg);
	if (!bp)
		goto out;
	info = counter_arch_bp(bp);

	if (bp->overflow_handler == ptrace_triggered) {
		perf_bp_event(bp, regs);
		rc = NOTIFY_DONE;
		goto out;
	}

	info->extraneous_interrupt = !((bp->attr.bp_addr <= dar) &&
			(dar - bp->attr.bp_addr < bp->attr.bp_len));

	
	if (user_mode(regs)) {
		bp->ctx->task->thread.last_hit_ubp = bp;
		regs->msr |= MSR_SE;
		goto out;
	}

	stepped = 0;
	instr = 0;
	if (!__get_user_inatomic(instr, (unsigned int *) regs->nip))
		stepped = emulate_step(regs, instr);

	if (!stepped) {
		WARN(1, "Unable to handle hardware breakpoint. Breakpoint at "
			"0x%lx will be disabled.", info->address);
		perf_event_disable(bp);
		goto out;
	}
	if (!info->extraneous_interrupt)
		perf_bp_event(bp, regs);

	set_dabr(info->address | info->type | DABR_TRANSLATION);
out:
	rcu_read_unlock();
	return rc;
}
Beispiel #4
0
static void watchdog_nmi_disable(unsigned int cpu)
{
	struct perf_event *event = per_cpu(watchdog_ev, cpu);

	if (event) {
		perf_event_disable(event);
		per_cpu(watchdog_ev, cpu) = NULL;

		/* should be in cleanup, but blocks oprofile */
		perf_event_release_kernel(event);
	}
	return;
}
Beispiel #5
0
/**
 * kvm_pmu_stop_counter - stop PMU counter
 * @pmc: The PMU counter pointer
 *
 * If this counter has been configured to monitor some event, release it here.
 */
static void kvm_pmu_stop_counter(struct kvm_vcpu *vcpu, struct kvm_pmc *pmc)
{
	u64 counter, reg;

	if (pmc->perf_event) {
		counter = kvm_pmu_get_counter_value(vcpu, pmc->idx);
		reg = (pmc->idx == ARMV8_PMU_CYCLE_IDX)
		       ? PMCCNTR_EL0 : PMEVCNTR0_EL0 + pmc->idx;
		__vcpu_sys_reg(vcpu, reg) = counter;
		perf_event_disable(pmc->perf_event);
		perf_event_release_kernel(pmc->perf_event);
		pmc->perf_event = NULL;
	}
}
Beispiel #6
0
static void nmi_timer_shutdown(void)
{
	struct perf_event *event;
	int cpu;

	cpuhp_remove_state(hp_online);
	for_each_possible_cpu(cpu) {
		event = per_cpu(nmi_timer_events, cpu);
		if (!event)
			continue;
		perf_event_disable(event);
		per_cpu(nmi_timer_events, cpu) = NULL;
		perf_event_release_kernel(event);
	}
}
Beispiel #7
0
/**
 * kvm_pmu_vcpu_destroy - free perf event of PMU for cpu
 * @vcpu: The vcpu pointer
 *
 */
void kvm_pmu_vcpu_destroy(struct kvm_vcpu *vcpu)
{
	int i;
	struct kvm_pmu *pmu = &vcpu->arch.pmu;

	for (i = 0; i < ARMV8_PMU_MAX_COUNTERS; i++) {
		struct kvm_pmc *pmc = &pmu->pmc[i];

		if (pmc->perf_event) {
			perf_event_disable(pmc->perf_event);
			perf_event_release_kernel(pmc->perf_event);
			pmc->perf_event = NULL;
		}
	}
}
Beispiel #8
0
void watchdog_nmi_disable(unsigned int cpu)
{
	struct perf_event *event = per_cpu(watchdog_ev, cpu);

	if (event) {
		perf_event_disable(event);
		per_cpu(watchdog_ev, cpu) = NULL;

		/* should be in cleanup, but blocks oprofile */
		perf_event_release_kernel(event);
	}
	if (cpu == 0) {
		/* watchdog_nmi_enable() expects this to be zero initially. */
		cpu0_err = 0;
	}
}
static void nmi_timer_shutdown(void)
{
	struct perf_event *event;
	int cpu;

	cpu_notifier_register_begin();
	__unregister_cpu_notifier(&nmi_timer_cpu_nb);
	for_each_possible_cpu(cpu) {
		event = per_cpu(nmi_timer_events, cpu);
		if (!event)
			continue;
		perf_event_disable(event);
		per_cpu(nmi_timer_events, cpu) = NULL;
		perf_event_release_kernel(event);
	}

	cpu_notifier_register_done();
}
/*
 * Handle debug exception notifications.
 */
int __kprobes hw_breakpoint_handler(struct die_args *args)
{
	int rc = NOTIFY_STOP;
	struct perf_event *bp;
	struct pt_regs *regs = args->regs;
	int stepped = 1;
	struct arch_hw_breakpoint *info;
	unsigned int instr;
	unsigned long dar = regs->dar;

	/* Disable breakpoints during exception handling */
	set_dabr(0, 0);

	/*
	 * The counter may be concurrently released but that can only
	 * occur from a call_rcu() path. We can then safely fetch
	 * the breakpoint, use its callback, touch its counter
	 * while we are in an rcu_read_lock() path.
	 */
	rcu_read_lock();

	bp = __get_cpu_var(bp_per_reg);
	if (!bp)
		goto out;
	info = counter_arch_bp(bp);

	/*
	 * Return early after invoking user-callback function without restoring
	 * DABR if the breakpoint is from ptrace which always operates in
	 * one-shot mode. The ptrace-ed process will receive the SIGTRAP signal
	 * generated in do_dabr().
	 */
	if (bp->overflow_handler == ptrace_triggered) {
		perf_bp_event(bp, regs);
		rc = NOTIFY_DONE;
		goto out;
	}

	/*
	 * Verify if dar lies within the address range occupied by the symbol
	 * being watched to filter extraneous exceptions.  If it doesn't,
	 * we still need to single-step the instruction, but we don't
	 * generate an event.
	 */
	info->extraneous_interrupt = !((bp->attr.bp_addr <= dar) &&
			(dar - bp->attr.bp_addr < bp->attr.bp_len));

	/* Do not emulate user-space instructions, instead single-step them */
	if (user_mode(regs)) {
		current->thread.last_hit_ubp = bp;
		regs->msr |= MSR_SE;
		goto out;
	}

	stepped = 0;
	instr = 0;
	if (!__get_user_inatomic(instr, (unsigned int *) regs->nip))
		stepped = emulate_step(regs, instr);

	/*
	 * emulate_step() could not execute it. We've failed in reliably
	 * handling the hw-breakpoint. Unregister it and throw a warning
	 * message to let the user know about it.
	 */
	if (!stepped) {
		WARN(1, "Unable to handle hardware breakpoint. Breakpoint at "
			"0x%lx will be disabled.", info->address);
		perf_event_disable(bp);
		goto out;
	}
	/*
	 * As a policy, the callback is invoked in a 'trigger-after-execute'
	 * fashion
	 */
	if (!info->extraneous_interrupt)
		perf_bp_event(bp, regs);

	set_dabr(info->address | info->type | DABR_TRANSLATION, info->dabrx);
out:
	rcu_read_unlock();
	return rc;
}
Beispiel #11
0
void capo_perf_event_disable(struct perf_event *pevent){
        if(pevent != NULL)
                perf_event_disable(pevent);
}