/*
 * Validate the arch-specific HW Breakpoint register settings
 */
int arch_validate_hwbkpt_settings(struct perf_event *bp,
				  struct task_struct *tsk)
{
	struct arch_hw_breakpoint *info = counter_arch_bp(bp);
	unsigned int align;
	int ret;

	ret = arch_build_bp_info(bp);
	if (ret)
		return ret;

	ret = -EINVAL;

	switch (info->len) {
	case SH_BREAKPOINT_LEN_1:
		align = 0;
		break;
	case SH_BREAKPOINT_LEN_2:
		align = 1;
		break;
	case SH_BREAKPOINT_LEN_4:
		align = 3;
		break;
	case SH_BREAKPOINT_LEN_8:
		align = 7;
		break;
	default:
		return ret;
	}

	/*
	 * For kernel-addresses, either the address or symbol name can be
	 * specified.
	 */
	if (info->name)
		info->address = (unsigned long)kallsyms_lookup_name(info->name);

	/*
	 * Check that the low-order bits of the address are appropriate
	 * for the alignment implied by len.
	 */
	if (info->address & align)
		return -EINVAL;

	/* Check that the virtual address is in the proper range */
	if (tsk) {
		if (!arch_check_va_in_userspace(info->address, info->len))
			return -EFAULT;
	} else {
		if (!arch_check_va_in_kernelspace(info->address, info->len))
			return -EFAULT;
	}

	return 0;
}
static int __kprobes hw_breakpoint_handler(struct die_args *args)
{
	int cpu, i, rc = NOTIFY_STOP;
	struct perf_event *bp;
	unsigned int cmf, resume_mask;

	/*
	 * Do an early return if none of the channels triggered.
	 */
	cmf = sh_ubc->triggered_mask();
	if (unlikely(!cmf))
		return NOTIFY_DONE;

	/*
	 * By default, resume all of the active channels.
	 */
	resume_mask = sh_ubc->active_mask();

	/*
	 * Disable breakpoints during exception handling.
	 */
	sh_ubc->disable_all();

	cpu = get_cpu();
	for (i = 0; i < sh_ubc->num_events; i++) {
		unsigned long event_mask = (1 << i);

		if (likely(!(cmf & event_mask)))
			continue;

		/*
		 * The counter may be concurrently released but that can only
		 * occur from a call_rcu() path. We can then safely fetch
		 * the breakpoint, use its callback, touch its counter
		 * while we are in an rcu_read_lock() path.
		 */
		rcu_read_lock();

		bp = per_cpu(bp_per_reg[i], cpu);
		if (bp)
			rc = NOTIFY_DONE;

		/*
		 * Reset the condition match flag to denote completion of
		 * exception handling.
		 */
		sh_ubc->clear_triggered_mask(event_mask);

		/*
		 * bp can be NULL due to concurrent perf counter
		 * removing.
		 */
		if (!bp) {
			rcu_read_unlock();
			break;
		}

		/*
		 * Don't restore the channel if the breakpoint is from
		 * ptrace, as it always operates in one-shot mode.
		 */
		if (bp->overflow_handler == ptrace_triggered)
			resume_mask &= ~(1 << i);

		perf_bp_event(bp, args->regs);

		/* Deliver the signal to userspace */
		if (arch_check_va_in_userspace(bp->attr.bp_addr,
					       bp->attr.bp_len)) {
			siginfo_t info;

			info.si_signo = args->signr;
			info.si_errno = notifier_to_errno(rc);
			info.si_code = TRAP_HWBKPT;

			force_sig_info(args->signr, &info, current);
		}

		rcu_read_unlock();
	}

	if (cmf == 0)
		rc = NOTIFY_DONE;

	sh_ubc->enable_all(resume_mask);

	put_cpu();

	return rc;
}
Beispiel #3
0
/*
 * Validate the arch-specific HW Breakpoint register settings
 */
int arch_validate_hwbkpt_settings(struct perf_event *bp,
				  struct task_struct *tsk)
{
	struct arch_hw_breakpoint *info = counter_arch_bp(bp);
	unsigned int align;
	int ret;


	ret = arch_build_bp_info(bp);
	if (ret)
		return ret;

	ret = -EINVAL;

	if (info->type == X86_BREAKPOINT_EXECUTE)
		/*
		 * Ptrace-refactoring code
		 * For now, we'll allow instruction breakpoint only for user-space
		 * addresses
		 */
		if ((!arch_check_va_in_userspace(info->address, info->len)) &&
			info->len != X86_BREAKPOINT_EXECUTE)
			return ret;

	switch (info->len) {
	case X86_BREAKPOINT_LEN_1:
		align = 0;
		break;
	case X86_BREAKPOINT_LEN_2:
		align = 1;
		break;
	case X86_BREAKPOINT_LEN_4:
		align = 3;
		break;
#ifdef CONFIG_X86_64
	case X86_BREAKPOINT_LEN_8:
		align = 7;
		break;
#endif
	default:
		return ret;
	}

	/*
	 * For kernel-addresses, either the address or symbol name can be
	 * specified.
	 */
	if (info->name)
		info->address = (unsigned long)
				kallsyms_lookup_name(info->name);
	/*
	 * Check that the low-order bits of the address are appropriate
	 * for the alignment implied by len.
	 */
	if (info->address & align)
		return -EINVAL;

	/* Check that the virtual address is in the proper range */
	if (tsk) {
		if (!arch_check_va_in_userspace(info->address, info->len))
			return -EFAULT;
	} else {
		if (!arch_check_va_in_kernelspace(info->address, info->len))
			return -EFAULT;
	}

	return 0;
}