Exemple #1
0
/*
 * Called from trap() when processing the ast posted by the high-level
 * interrupt handler.
 */
int
kcpc_overflow_ast()
{
	kcpc_ctx_t	*ctx = curthread->t_cpc_ctx;
	int		i;
	int		found = 0;
	uint64_t	curtick = KCPC_GET_TICK();

	ASSERT(ctx != NULL);	/* Beware of interrupt skid. */

	/*
	 * An overflow happened: sample the context to ensure that
	 * the overflow is propagated into the upper bits of the
	 * virtualized 64-bit counter(s).
	 */
	kpreempt_disable();
	ctx->kc_hrtime = gethrtime_waitfree();
	pcbe_ops->pcbe_sample(ctx);
	kpreempt_enable();

	ctx->kc_vtick += curtick - ctx->kc_rawtick;

	/*
	 * The interrupt handler has marked any pics with KCPC_PIC_OVERFLOWED
	 * if that pic generated an overflow and if the request it was counting
	 * on behalf of had CPC_OVERFLOW_REQUEST specified. We go through all
	 * pics in the context and clear the KCPC_PIC_OVERFLOWED flags. If we
	 * found any overflowed pics, keep the context frozen and return true
	 * (thus causing a signal to be sent).
	 */
	for (i = 0; i < cpc_ncounters; i++) {
		if (ctx->kc_pics[i].kp_flags & KCPC_PIC_OVERFLOWED) {
			atomic_and_uint(&ctx->kc_pics[i].kp_flags,
			    ~KCPC_PIC_OVERFLOWED);
			found = 1;
		}
	}
	if (found)
		return (1);

	/*
	 * Otherwise, re-enable the counters and continue life as before.
	 */
	kpreempt_disable();
	atomic_and_uint(&ctx->kc_flags, ~KCPC_CTX_FREEZE);
	pcbe_ops->pcbe_program(ctx);
	kpreempt_enable();
	return (0);
}
Exemple #2
0
int
kcpc_restart(kcpc_set_t *set)
{
	kcpc_ctx_t	*ctx = set->ks_ctx;
	int		i;

	ASSERT(ctx != NULL);
	ASSERT(ctx->kc_thread == curthread);
	ASSERT(ctx->kc_cpuid == -1);

	kpreempt_disable();

	/*
	 * If the user is doing this on a running set, make sure the counters
	 * are stopped first.
	 */
	if ((ctx->kc_flags & KCPC_CTX_FREEZE) == 0)
		pcbe_ops->pcbe_allstop();

	for (i = 0; i < set->ks_nreqs; i++) {
		*(set->ks_req[i].kr_data) = set->ks_req[i].kr_preset;
		pcbe_ops->pcbe_configure(0, NULL, set->ks_req[i].kr_preset,
		    0, 0, NULL, &set->ks_req[i].kr_config, NULL);
	}

	/*
	 * Ask the backend to program the hardware.
	 */
	ctx->kc_rawtick = KCPC_GET_TICK();
	atomic_and_uint(&ctx->kc_flags, ~KCPC_CTX_FREEZE);
	pcbe_ops->pcbe_program(ctx);
	kpreempt_enable();

	return (0);
}
Exemple #3
0
/* ARGSUSED */
int
sys_flock(struct lwp *l, const struct sys_flock_args *uap, register_t *retval)
{
	/* {
		syscallarg(int)	fd;
		syscallarg(int)	how;
	} */
	int fd, how, error;
	file_t *fp;
	vnode_t	*vp;
	struct flock lf;

	fd = SCARG(uap, fd);
	how = SCARG(uap, how);
	error = 0;

	if ((fp = fd_getfile(fd)) == NULL) {
		return EBADF;
	}
	if (fp->f_type != DTYPE_VNODE) {
		fd_putfile(fd);
		return EOPNOTSUPP;
	}

	vp = fp->f_vnode;
	lf.l_whence = SEEK_SET;
	lf.l_start = 0;
	lf.l_len = 0;

	switch (how & ~LOCK_NB) {
	case LOCK_UN:
		lf.l_type = F_UNLCK;
		atomic_and_uint(&fp->f_flag, ~FHASLOCK);
		error = VOP_ADVLOCK(vp, fp, F_UNLCK, &lf, F_FLOCK);
		fd_putfile(fd);
		return error;
	case LOCK_EX:
		lf.l_type = F_WRLCK;
		break;
	case LOCK_SH:
		lf.l_type = F_RDLCK;
		break;
	default:
		fd_putfile(fd);
		return EINVAL;
	}

	atomic_or_uint(&fp->f_flag, FHASLOCK);
	if (how & LOCK_NB) {
		error = VOP_ADVLOCK(vp, fp, F_SETLK, &lf, F_FLOCK);
	} else {
		error = VOP_ADVLOCK(vp, fp, F_SETLK, &lf, F_FLOCK|F_WAIT);
	}
	fd_putfile(fd);
	return error;
}
Exemple #4
0
static inline struct rumpcpu *
getnextcpu(void)
{
    unsigned newcpu;

    newcpu = atomic_inc_uint_nv(&nextcpu);
    if (__predict_false(ncpu > UINT_MAX/2))
        atomic_and_uint(&nextcpu, 0);
    newcpu = newcpu % ncpu;

    return &rcpu_storage[newcpu];
}
Exemple #5
0
int
sys_setsockopt(struct lwp *l, const struct sys_setsockopt_args *uap,
    register_t *retval)
{
	/* {
		syscallarg(int)			s;
		syscallarg(int)			level;
		syscallarg(int)			name;
		syscallarg(const void *)	val;
		syscallarg(unsigned int)	valsize;
	} */
	struct sockopt	sopt;
	struct socket	*so;
	file_t		*fp;
	int		error;
	unsigned int	len;

	len = SCARG(uap, valsize);
	if (len > 0 && SCARG(uap, val) == NULL)
		return EINVAL;

	if (len > MCLBYTES)
		return EINVAL;

	if ((error = fd_getsock1(SCARG(uap, s), &so, &fp)) != 0)
		return (error);

	sockopt_init(&sopt, SCARG(uap, level), SCARG(uap, name), len);

	if (len > 0) {
		error = copyin(SCARG(uap, val), sopt.sopt_data, len);
		if (error)
			goto out;
	}

	error = sosetopt(so, &sopt);
	if (so->so_options & SO_NOSIGPIPE)
		atomic_or_uint(&fp->f_flag, FNOSIGPIPE);
	else
		atomic_and_uint(&fp->f_flag, ~FNOSIGPIPE);

 out:
	sockopt_destroy(&sopt);
	fd_putfile(SCARG(uap, s));
	return error;
}
Exemple #6
0
/*
 * The file control system call.
 */
int
sys_fcntl(struct lwp *l, const struct sys_fcntl_args *uap, register_t *retval)
{
	/* {
		syscallarg(int)		fd;
		syscallarg(int)		cmd;
		syscallarg(void *)	arg;
	} */
	int fd, i, tmp, error, cmd, newmin;
	filedesc_t *fdp;
	file_t *fp;
	struct flock fl;
	bool cloexec = false;

	fd = SCARG(uap, fd);
	cmd = SCARG(uap, cmd);
	fdp = l->l_fd;
	error = 0;

	switch (cmd) {
	case F_CLOSEM:
		if (fd < 0)
			return EBADF;
		while ((i = fdp->fd_lastfile) >= fd) {
			if (fd_getfile(i) == NULL) {
				/* Another thread has updated. */
				continue;
			}
			fd_close(i);
		}
		return 0;

	case F_MAXFD:
		*retval = fdp->fd_lastfile;
		return 0;

	case F_SETLKW:
	case F_SETLK:
	case F_GETLK:
		error = copyin(SCARG(uap, arg), &fl, sizeof(fl));
		if (error)
			return error;
		error = do_fcntl_lock(fd, cmd, &fl);
		if (cmd == F_GETLK && error == 0)
			error = copyout(&fl, SCARG(uap, arg), sizeof(fl));
		return error;

	default:
		/* Handled below */
		break;
	}

	if ((fp = fd_getfile(fd)) == NULL)
		return (EBADF);

	if ((cmd & F_FSCTL)) {
		error = fcntl_forfs(fd, fp, cmd, SCARG(uap, arg));
		fd_putfile(fd);
		return error;
	}

	switch (cmd) {
	case F_DUPFD_CLOEXEC:
		cloexec = true;
		/*FALLTHROUGH*/
	case F_DUPFD:
		newmin = (long)SCARG(uap, arg);
		if ((u_int)newmin >=
		    l->l_proc->p_rlimit[RLIMIT_NOFILE].rlim_cur ||
		    (u_int)newmin >= maxfiles) {
			fd_putfile(fd);
			return EINVAL;
		}
		error = fd_dup(fp, newmin, &i, cloexec);
		*retval = i;
		break;

	case F_GETFD:
		*retval = fdp->fd_dt->dt_ff[fd]->ff_exclose;
		break;

	case F_SETFD:
		fd_set_exclose(l, fd,
		    ((long)SCARG(uap, arg) & FD_CLOEXEC) != 0);
		break;

	case F_GETNOSIGPIPE:
		*retval = (fp->f_flag & FNOSIGPIPE) != 0;
		break;

	case F_SETNOSIGPIPE:
		if (SCARG(uap, arg))
			atomic_or_uint(&fp->f_flag, FNOSIGPIPE);
		else
			atomic_and_uint(&fp->f_flag, ~FNOSIGPIPE);
		*retval = 0;
		break;

	case F_GETFL:
		*retval = OFLAGS(fp->f_flag);
		break;

	case F_SETFL:
		/* XXX not guaranteed to be atomic. */
		tmp = FFLAGS((long)SCARG(uap, arg)) & FCNTLFLAGS;
		error = (*fp->f_ops->fo_fcntl)(fp, F_SETFL, &tmp);
		if (error)
			break;
		i = tmp ^ fp->f_flag;
		if (i & FNONBLOCK) {
			int flgs = tmp & FNONBLOCK;
			error = (*fp->f_ops->fo_ioctl)(fp, FIONBIO, &flgs);
			if (error) {
				(*fp->f_ops->fo_fcntl)(fp, F_SETFL,
				    &fp->f_flag);
				break;
			}
		}
		if (i & FASYNC) {
			int flgs = tmp & FASYNC;
			error = (*fp->f_ops->fo_ioctl)(fp, FIOASYNC, &flgs);
			if (error) {
				if (i & FNONBLOCK) {
					tmp = fp->f_flag & FNONBLOCK;
					(void)(*fp->f_ops->fo_ioctl)(fp,
						FIONBIO, &tmp);
				}
				(*fp->f_ops->fo_fcntl)(fp, F_SETFL,
				    &fp->f_flag);
				break;
			}
		}
		fp->f_flag = (fp->f_flag & ~FCNTLFLAGS) | tmp;
		break;

	case F_GETOWN:
		error = (*fp->f_ops->fo_ioctl)(fp, FIOGETOWN, &tmp);
		*retval = tmp;
		break;

	case F_SETOWN:
		tmp = (int)(uintptr_t) SCARG(uap, arg);
		error = (*fp->f_ops->fo_ioctl)(fp, FIOSETOWN, &tmp);
		break;

	default:
		error = EINVAL;
	}

	fd_putfile(fd);
	return (error);
}
Exemple #7
0
/*
 * Caller must hold kcpc_cpuctx_lock.
 */
int
kcpc_enable(kthread_t *t, int cmd, int enable)
{
	kcpc_ctx_t	*ctx = t->t_cpc_ctx;
	kcpc_set_t	*set = t->t_cpc_set;
	kcpc_set_t	*newset;
	int		i;
	int		flag;
	int		err;

	ASSERT(RW_READ_HELD(&kcpc_cpuctx_lock));

	if (ctx == NULL) {
		/*
		 * This thread has a set but no context; it must be a
		 * CPU-bound set.
		 */
		ASSERT(t->t_cpc_set != NULL);
		ASSERT(t->t_cpc_set->ks_ctx->kc_cpuid != -1);
		return (EINVAL);
	} else if (ctx->kc_flags & KCPC_CTX_INVALID)
		return (EAGAIN);

	if (cmd == CPC_ENABLE) {
		if ((ctx->kc_flags & KCPC_CTX_FREEZE) == 0)
			return (EINVAL);
		kpreempt_disable();
		atomic_and_uint(&ctx->kc_flags, ~KCPC_CTX_FREEZE);
		kcpc_restore(ctx);
		kpreempt_enable();
	} else if (cmd == CPC_DISABLE) {
		if (ctx->kc_flags & KCPC_CTX_FREEZE)
			return (EINVAL);
		kpreempt_disable();
		kcpc_save(ctx);
		atomic_or_uint(&ctx->kc_flags, KCPC_CTX_FREEZE);
		kpreempt_enable();
	} else if (cmd == CPC_USR_EVENTS || cmd == CPC_SYS_EVENTS) {
		/*
		 * Strategy for usr/sys: stop counters and update set's presets
		 * with current counter values, unbind, update requests with
		 * new config, then re-bind.
		 */
		flag = (cmd == CPC_USR_EVENTS) ?
		    CPC_COUNT_USER: CPC_COUNT_SYSTEM;

		kpreempt_disable();
		atomic_or_uint(&ctx->kc_flags,
		    KCPC_CTX_INVALID | KCPC_CTX_INVALID_STOPPED);
		pcbe_ops->pcbe_allstop();
		kpreempt_enable();
		for (i = 0; i < set->ks_nreqs; i++) {
			set->ks_req[i].kr_preset = *(set->ks_req[i].kr_data);
			if (enable)
				set->ks_req[i].kr_flags |= flag;
			else
				set->ks_req[i].kr_flags &= ~flag;
		}
		newset = kcpc_dup_set(set);
		if (kcpc_unbind(set) != 0)
			return (EINVAL);
		t->t_cpc_set = newset;
		if (kcpc_bind_thread(newset, t, &err) != 0) {
			t->t_cpc_set = NULL;
			kcpc_free_set(newset);
			return (EINVAL);
		}
	} else
		return (EINVAL);

	return (0);
}
Exemple #8
0
int
kcpc_bind_thread(kcpc_set_t *set, kthread_t *t, int *subcode)
{
	kcpc_ctx_t	*ctx;
	int		error;

	/*
	 * Only one set is allowed per context, so ensure there is no
	 * existing context.
	 */

	if (t->t_cpc_ctx != NULL)
		return (EEXIST);

	ctx = kcpc_ctx_alloc();

	/*
	 * The context must begin life frozen until it has been properly
	 * programmed onto the hardware. This prevents the context ops from
	 * worrying about it until we're ready.
	 */
	ctx->kc_flags |= KCPC_CTX_FREEZE;
	ctx->kc_hrtime = gethrtime();

	if (kcpc_assign_reqs(set, ctx) != 0) {
		kcpc_ctx_free(ctx);
		*subcode = CPC_RESOURCE_UNAVAIL;
		return (EINVAL);
	}

	ctx->kc_cpuid = -1;
	if (set->ks_flags & CPC_BIND_LWP_INHERIT)
		ctx->kc_flags |= KCPC_CTX_LWPINHERIT;
	ctx->kc_thread = t;
	t->t_cpc_ctx = ctx;
	/*
	 * Permit threads to look at their own hardware counters from userland.
	 */
	ctx->kc_flags |= KCPC_CTX_NONPRIV;

	/*
	 * Create the data store for this set.
	 */
	set->ks_data = kmem_alloc(set->ks_nreqs * sizeof (uint64_t), KM_SLEEP);

	if ((error = kcpc_configure_reqs(ctx, set, subcode)) != 0) {
		kmem_free(set->ks_data, set->ks_nreqs * sizeof (uint64_t));
		kcpc_ctx_free(ctx);
		t->t_cpc_ctx = NULL;
		return (error);
	}

	set->ks_ctx = ctx;
	ctx->kc_set = set;

	/*
	 * Add a device context to the subject thread.
	 */
	installctx(t, ctx, kcpc_save, kcpc_restore, NULL,
	    kcpc_lwp_create, NULL, kcpc_free);

	/*
	 * Ask the backend to program the hardware.
	 */
	if (t == curthread) {
		kpreempt_disable();
		ctx->kc_rawtick = KCPC_GET_TICK();
		atomic_and_uint(&ctx->kc_flags, ~KCPC_CTX_FREEZE);
		pcbe_ops->pcbe_program(ctx);
		kpreempt_enable();
	} else
		/*
		 * Since we are the agent LWP, we know the victim LWP is stopped
		 * until we're done here; no need to worry about preemption or
		 * migration here. We still use an atomic op to clear the flag
		 * to ensure the flags are always self-consistent; they can
		 * still be accessed from, for instance, another CPU doing a
		 * kcpc_invalidate_all().
		 */
		atomic_and_uint(&ctx->kc_flags, ~KCPC_CTX_FREEZE);


	return (0);
}