示例#1
0
文件: msr.c 项目: CCNITSilchar/linux
static int msr_event_init(struct perf_event *event)
{
	u64 cfg = event->attr.config;

	if (event->attr.type != event->pmu->type)
		return -ENOENT;

	/* unsupported modes and filters */
	if (event->attr.exclude_user   ||
	    event->attr.exclude_kernel ||
	    event->attr.exclude_hv     ||
	    event->attr.exclude_idle   ||
	    event->attr.exclude_host   ||
	    event->attr.exclude_guest  ||
	    event->attr.sample_period) /* no sampling */
		return -EINVAL;

	if (cfg >= PERF_MSR_EVENT_MAX)
		return -EINVAL;

	cfg = array_index_nospec((unsigned long)cfg, PERF_MSR_EVENT_MAX);

	if (!msr[cfg].attr)
		return -EINVAL;

	event->hw.idx		= -1;
	event->hw.event_base	= msr[cfg].msr;
	event->hw.config	= cfg;

	return 0;
}
示例#2
0
文件: common.c 项目: avagin/linux
/*
 * Does a 32-bit syscall.  Called with IRQs on in CONTEXT_KERNEL.  Does
 * all entry and exit work and returns with IRQs off.  This function is
 * extremely hot in workloads that use it, and it's usually called from
 * do_fast_syscall_32, so forcibly inline it to improve performance.
 */
static __always_inline void do_syscall_32_irqs_on(struct pt_regs *regs)
{
	struct thread_info *ti = current_thread_info();
	unsigned int nr = (unsigned int)regs->orig_ax;

#ifdef CONFIG_IA32_EMULATION
	ti->status |= TS_COMPAT;
#endif

	if (READ_ONCE(ti->flags) & _TIF_WORK_SYSCALL_ENTRY) {
		/*
		 * Subtlety here: if ptrace pokes something larger than
		 * 2^32-1 into orig_ax, this truncates it.  This may or
		 * may not be necessary, but it matches the old asm
		 * behavior.
		 */
		nr = syscall_trace_enter(regs);
	}

	if (likely(nr < IA32_NR_syscalls)) {
		nr = array_index_nospec(nr, IA32_NR_syscalls);
#ifdef CONFIG_IA32_EMULATION
		regs->ax = ia32_sys_call_table[nr](regs);
#else
		/*
		 * It's possible that a 32-bit syscall implementation
		 * takes a 64-bit parameter but nonetheless assumes that
		 * the high bits are zero.  Make sure we zero-extend all
		 * of the args.
		 */
		regs->ax = ia32_sys_call_table[nr](
			(unsigned int)regs->bx, (unsigned int)regs->cx,
			(unsigned int)regs->dx, (unsigned int)regs->si,
			(unsigned int)regs->di, (unsigned int)regs->bp);
#endif /* CONFIG_IA32_EMULATION */
	}

	syscall_return_slowpath(regs);
}
示例#3
0
文件: common.c 项目: avagin/linux
__visible void do_syscall_64(unsigned long nr, struct pt_regs *regs)
{
	struct thread_info *ti;

	enter_from_user_mode();
	local_irq_enable();
	ti = current_thread_info();
	if (READ_ONCE(ti->flags) & _TIF_WORK_SYSCALL_ENTRY)
		nr = syscall_trace_enter(regs);

	/*
	 * NB: Native and x32 syscalls are dispatched from the same
	 * table.  The only functional difference is the x32 bit in
	 * regs->orig_ax, which changes the behavior of some syscalls.
	 */
	nr &= __SYSCALL_MASK;
	if (likely(nr < NR_syscalls)) {
		nr = array_index_nospec(nr, NR_syscalls);
		regs->ax = sys_call_table[nr](regs);
	}

	syscall_return_slowpath(regs);
}
示例#4
0
long ptp_ioctl(struct posix_clock *pc, unsigned int cmd, unsigned long arg)
{
	struct ptp_clock *ptp = container_of(pc, struct ptp_clock, clock);
	struct ptp_sys_offset_extended *extoff = NULL;
	struct ptp_sys_offset_precise precise_offset;
	struct system_device_crosststamp xtstamp;
	struct ptp_clock_info *ops = ptp->info;
	struct ptp_sys_offset *sysoff = NULL;
	struct ptp_system_timestamp sts;
	struct ptp_clock_request req;
	struct ptp_clock_caps caps;
	struct ptp_clock_time *pct;
	unsigned int i, pin_index;
	struct ptp_pin_desc pd;
	struct timespec64 ts;
	int enable, err = 0;

	switch (cmd) {

	case PTP_CLOCK_GETCAPS:
		memset(&caps, 0, sizeof(caps));
		caps.max_adj = ptp->info->max_adj;
		caps.n_alarm = ptp->info->n_alarm;
		caps.n_ext_ts = ptp->info->n_ext_ts;
		caps.n_per_out = ptp->info->n_per_out;
		caps.pps = ptp->info->pps;
		caps.n_pins = ptp->info->n_pins;
		caps.cross_timestamping = ptp->info->getcrosststamp != NULL;
		if (copy_to_user((void __user *)arg, &caps, sizeof(caps)))
			err = -EFAULT;
		break;

	case PTP_EXTTS_REQUEST:
		if (copy_from_user(&req.extts, (void __user *)arg,
				   sizeof(req.extts))) {
			err = -EFAULT;
			break;
		}
		if (req.extts.index >= ops->n_ext_ts) {
			err = -EINVAL;
			break;
		}
		req.type = PTP_CLK_REQ_EXTTS;
		enable = req.extts.flags & PTP_ENABLE_FEATURE ? 1 : 0;
		err = ops->enable(ops, &req, enable);
		break;

	case PTP_PEROUT_REQUEST:
		if (copy_from_user(&req.perout, (void __user *)arg,
				   sizeof(req.perout))) {
			err = -EFAULT;
			break;
		}
		if (req.perout.index >= ops->n_per_out) {
			err = -EINVAL;
			break;
		}
		req.type = PTP_CLK_REQ_PEROUT;
		enable = req.perout.period.sec || req.perout.period.nsec;
		err = ops->enable(ops, &req, enable);
		break;

	case PTP_ENABLE_PPS:
		if (!capable(CAP_SYS_TIME))
			return -EPERM;
		req.type = PTP_CLK_REQ_PPS;
		enable = arg ? 1 : 0;
		err = ops->enable(ops, &req, enable);
		break;

	case PTP_SYS_OFFSET_PRECISE:
		if (!ptp->info->getcrosststamp) {
			err = -EOPNOTSUPP;
			break;
		}
		err = ptp->info->getcrosststamp(ptp->info, &xtstamp);
		if (err)
			break;

		memset(&precise_offset, 0, sizeof(precise_offset));
		ts = ktime_to_timespec64(xtstamp.device);
		precise_offset.device.sec = ts.tv_sec;
		precise_offset.device.nsec = ts.tv_nsec;
		ts = ktime_to_timespec64(xtstamp.sys_realtime);
		precise_offset.sys_realtime.sec = ts.tv_sec;
		precise_offset.sys_realtime.nsec = ts.tv_nsec;
		ts = ktime_to_timespec64(xtstamp.sys_monoraw);
		precise_offset.sys_monoraw.sec = ts.tv_sec;
		precise_offset.sys_monoraw.nsec = ts.tv_nsec;
		if (copy_to_user((void __user *)arg, &precise_offset,
				 sizeof(precise_offset)))
			err = -EFAULT;
		break;

	case PTP_SYS_OFFSET_EXTENDED:
		if (!ptp->info->gettimex64) {
			err = -EOPNOTSUPP;
			break;
		}
		extoff = memdup_user((void __user *)arg, sizeof(*extoff));
		if (IS_ERR(extoff)) {
			err = PTR_ERR(extoff);
			extoff = NULL;
			break;
		}
		if (extoff->n_samples > PTP_MAX_SAMPLES
		    || extoff->rsv[0] || extoff->rsv[1] || extoff->rsv[2]) {
			err = -EINVAL;
			break;
		}
		for (i = 0; i < extoff->n_samples; i++) {
			err = ptp->info->gettimex64(ptp->info, &ts, &sts);
			if (err)
				goto out;
			extoff->ts[i][0].sec = sts.pre_ts.tv_sec;
			extoff->ts[i][0].nsec = sts.pre_ts.tv_nsec;
			extoff->ts[i][1].sec = ts.tv_sec;
			extoff->ts[i][1].nsec = ts.tv_nsec;
			extoff->ts[i][2].sec = sts.post_ts.tv_sec;
			extoff->ts[i][2].nsec = sts.post_ts.tv_nsec;
		}
		if (copy_to_user((void __user *)arg, extoff, sizeof(*extoff)))
			err = -EFAULT;
		break;

	case PTP_SYS_OFFSET:
		sysoff = memdup_user((void __user *)arg, sizeof(*sysoff));
		if (IS_ERR(sysoff)) {
			err = PTR_ERR(sysoff);
			sysoff = NULL;
			break;
		}
		if (sysoff->n_samples > PTP_MAX_SAMPLES) {
			err = -EINVAL;
			break;
		}
		pct = &sysoff->ts[0];
		for (i = 0; i < sysoff->n_samples; i++) {
			ktime_get_real_ts64(&ts);
			pct->sec = ts.tv_sec;
			pct->nsec = ts.tv_nsec;
			pct++;
			if (ops->gettimex64)
				err = ops->gettimex64(ops, &ts, NULL);
			else
				err = ops->gettime64(ops, &ts);
			if (err)
				goto out;
			pct->sec = ts.tv_sec;
			pct->nsec = ts.tv_nsec;
			pct++;
		}
		ktime_get_real_ts64(&ts);
		pct->sec = ts.tv_sec;
		pct->nsec = ts.tv_nsec;
		if (copy_to_user((void __user *)arg, sysoff, sizeof(*sysoff)))
			err = -EFAULT;
		break;

	case PTP_PIN_GETFUNC:
		if (copy_from_user(&pd, (void __user *)arg, sizeof(pd))) {
			err = -EFAULT;
			break;
		}
		pin_index = pd.index;
		if (pin_index >= ops->n_pins) {
			err = -EINVAL;
			break;
		}
		pin_index = array_index_nospec(pin_index, ops->n_pins);
		if (mutex_lock_interruptible(&ptp->pincfg_mux))
			return -ERESTARTSYS;
		pd = ops->pin_config[pin_index];
		mutex_unlock(&ptp->pincfg_mux);
		if (!err && copy_to_user((void __user *)arg, &pd, sizeof(pd)))
			err = -EFAULT;
		break;

	case PTP_PIN_SETFUNC:
		if (copy_from_user(&pd, (void __user *)arg, sizeof(pd))) {
			err = -EFAULT;
			break;
		}
		pin_index = pd.index;
		if (pin_index >= ops->n_pins) {
			err = -EINVAL;
			break;
		}
		pin_index = array_index_nospec(pin_index, ops->n_pins);
		if (mutex_lock_interruptible(&ptp->pincfg_mux))
			return -ERESTARTSYS;
		err = ptp_set_pinfunc(ptp, pin_index, pd.func, pd.chan);
		mutex_unlock(&ptp->pincfg_mux);
		break;

	default:
		err = -ENOTTY;
		break;
	}

out:
	kfree(extoff);
	kfree(sysoff);
	return err;
}