예제 #1
0
파일: cpu_subr.c 프로젝트: ryo/netbsd-src
void
cpu_need_resched(struct cpu_info *ci, int flags)
{
	struct lwp * const l = ci->ci_data.cpu_onproc;
#ifdef MULTIPROCESSOR
	struct cpu_info * const cur_ci = curcpu();
#endif

	KASSERT(kpreempt_disabled());

	ci->ci_want_resched |= flags;

	if (__predict_false((l->l_pflag & LP_INTR) != 0)) {
		/*
		 * No point doing anything, it will switch soon.
		 * Also here to prevent an assertion failure in
		 * kpreempt() due to preemption being set on a
		 * soft interrupt LWP.
		 */
		return;
	}

	if (__predict_false(l == ci->ci_data.cpu_idlelwp)) {
#ifdef MULTIPROCESSOR
		/*
		 * If the other CPU is idling, it must be waiting for an
		 * interrupt.  So give it one.
		 */
		if (__predict_false(ci != cur_ci))
			cpu_send_ipi(ci, IPI_NOP);
#endif
		return;
	}

#ifdef MULTIPROCESSOR
	atomic_or_uint(&ci->ci_want_resched, flags);
#else
	ci->ci_want_resched |= flags;
#endif

	if (flags & RESCHED_KPREEMPT) {
#ifdef __HAVE_PREEMPTION
		atomic_or_uint(&l->l_dopreempt, DOPREEMPT_ACTIVE);
		if (ci == cur_ci) {
			softint_trigger(SOFTINT_KPREEMPT);
		} else {
			cpu_send_ipi(ci, IPI_KPREEMPT);
		}
#endif
		return;
	}
	l->l_md.md_astpending = 1;		/* force call to ast() */
#ifdef MULTIPROCESSOR
	if (ci != cur_ci && (flags & RESCHED_IMMED)) {
		cpu_send_ipi(ci, IPI_AST);
	}
#endif
}
예제 #2
0
/*
 * Called when switching away from current thread.
 */
static void
kcpc_save(kcpc_ctx_t *ctx)
{
	if (ctx->kc_flags & KCPC_CTX_INVALID) {
		if (ctx->kc_flags & KCPC_CTX_INVALID_STOPPED)
			return;
		/*
		 * This context has been invalidated but the counters have not
		 * been stopped. Stop them here and mark the context stopped.
		 */
		pcbe_ops->pcbe_allstop();
		atomic_or_uint(&ctx->kc_flags, KCPC_CTX_INVALID_STOPPED);
		return;
	}

	pcbe_ops->pcbe_allstop();
	if (ctx->kc_flags & KCPC_CTX_FREEZE)
		return;

	/*
	 * Need to sample for all reqs into each req's current mpic.
	 */
	ctx->kc_hrtime = gethrtime();
	ctx->kc_vtick += KCPC_GET_TICK() - ctx->kc_rawtick;
	pcbe_ops->pcbe_sample(ctx);
}
예제 #3
0
/*ARGSUSED*/
static void
kcpc_remotestop_func(uint64_t arg1, uint64_t arg2)
{
	ASSERT(CPU->cpu_cpc_ctx != NULL);
	pcbe_ops->pcbe_allstop();
	atomic_or_uint(&CPU->cpu_cpc_ctx->kc_flags, KCPC_CTX_INVALID_STOPPED);
}
예제 #4
0
static int
kcpc_remotestop_func(void)
{
	ASSERT(CPU->cpu_cpc_ctx != NULL);
	pcbe_ops->pcbe_allstop();
	atomic_or_uint(&CPU->cpu_cpc_ctx->kc_flags, KCPC_CTX_INVALID_STOPPED);

	return (0);
}
예제 #5
0
/*
 * Interface for PCBEs to signal that an existing configuration has suddenly
 * become invalid.
 */
void
kcpc_invalidate_config(void *token)
{
	kcpc_ctx_t *ctx = token;

	ASSERT(ctx != NULL);

	atomic_or_uint(&ctx->kc_flags, KCPC_CTX_INVALID);
}
예제 #6
0
/* ARGSUSED */
int
sys_flock(struct lwp *l, const struct sys_flock_args *uap, register_t *retval)
{
	/* {
		syscallarg(int)	fd;
		syscallarg(int)	how;
	} */
	int fd, how, error;
	file_t *fp;
	vnode_t	*vp;
	struct flock lf;

	fd = SCARG(uap, fd);
	how = SCARG(uap, how);
	error = 0;

	if ((fp = fd_getfile(fd)) == NULL) {
		return EBADF;
	}
	if (fp->f_type != DTYPE_VNODE) {
		fd_putfile(fd);
		return EOPNOTSUPP;
	}

	vp = fp->f_vnode;
	lf.l_whence = SEEK_SET;
	lf.l_start = 0;
	lf.l_len = 0;

	switch (how & ~LOCK_NB) {
	case LOCK_UN:
		lf.l_type = F_UNLCK;
		atomic_and_uint(&fp->f_flag, ~FHASLOCK);
		error = VOP_ADVLOCK(vp, fp, F_UNLCK, &lf, F_FLOCK);
		fd_putfile(fd);
		return error;
	case LOCK_EX:
		lf.l_type = F_WRLCK;
		break;
	case LOCK_SH:
		lf.l_type = F_RDLCK;
		break;
	default:
		fd_putfile(fd);
		return EINVAL;
	}

	atomic_or_uint(&fp->f_flag, FHASLOCK);
	if (how & LOCK_NB) {
		error = VOP_ADVLOCK(vp, fp, F_SETLK, &lf, F_FLOCK);
	} else {
		error = VOP_ADVLOCK(vp, fp, F_SETLK, &lf, F_FLOCK|F_WAIT);
	}
	fd_putfile(fd);
	return error;
}
예제 #7
0
/*ARGSUSED*/
static void
kcpc_lwp_create(kthread_t *t, kthread_t *ct)
{
	kcpc_ctx_t	*ctx = t->t_cpc_ctx, *cctx;
	int		i;

	if (ctx == NULL || (ctx->kc_flags & KCPC_CTX_LWPINHERIT) == 0)
		return;

	rw_enter(&kcpc_cpuctx_lock, RW_READER);
	if (ctx->kc_flags & KCPC_CTX_INVALID) {
		rw_exit(&kcpc_cpuctx_lock);
		return;
	}
	cctx = kcpc_ctx_alloc();
	kcpc_ctx_clone(ctx, cctx);
	rw_exit(&kcpc_cpuctx_lock);

	/*
	 * Copy the parent context's kc_flags field, but don't overwrite
	 * the child's in case it was modified during kcpc_ctx_clone.
	 */
	cctx->kc_flags |= ctx->kc_flags;
	cctx->kc_thread = ct;
	cctx->kc_cpuid = -1;
	ct->t_cpc_set = cctx->kc_set;
	ct->t_cpc_ctx = cctx;

	if (cctx->kc_flags & KCPC_CTX_SIGOVF) {
		kcpc_set_t *ks = cctx->kc_set;
		/*
		 * Our contract with the user requires us to immediately send an
		 * overflow signal to all children if we have the LWPINHERIT
		 * and SIGOVF flags set. In addition, all counters should be
		 * set to UINT64_MAX, and their pic's overflow flag turned on
		 * so that our trap() processing knows to send a signal.
		 */
		atomic_or_uint(&cctx->kc_flags, KCPC_CTX_FREEZE);
		for (i = 0; i < ks->ks_nreqs; i++) {
			kcpc_request_t *kr = &ks->ks_req[i];

			if (kr->kr_flags & CPC_OVF_NOTIFY_EMT) {
				*(kr->kr_data) = UINT64_MAX;
				kr->kr_picp->kp_flags |= KCPC_PIC_OVERFLOWED;
			}
		}
		ttolwp(ct)->lwp_pcb.pcb_flags |= CPC_OVERFLOW;
		aston(ct);
	}

	installctx(ct, cctx, kcpc_save, kcpc_restore,
	    NULL, kcpc_lwp_create, NULL, kcpc_free);
}
예제 #8
0
/*
 * Called from lwp_exit() and thread_exit()
 */
void
kcpc_passivate(void)
{
	kcpc_ctx_t *ctx = curthread->t_cpc_ctx;
	kcpc_set_t *set = curthread->t_cpc_set;

	if (set == NULL)
		return;

	/*
	 * We're cleaning up after this thread; ensure there are no dangling
	 * CPC pointers left behind. The context and set will be freed by
	 * freectx() in the case of an LWP-bound set, and by kcpc_unbind() in
	 * the case of a CPU-bound set.
	 */
	curthread->t_cpc_ctx = NULL;

	if (ctx == NULL) {
		/*
		 * This thread has a set but no context; it must be a CPU-bound
		 * set. The hardware will be stopped via kcpc_unbind() when the
		 * process exits and closes its file descriptors with
		 * kcpc_close(). Our only job here is to clean up this thread's
		 * state; the set will be freed with the unbind().
		 */
		(void) kcpc_unbind(set);
		/*
		 * Unbinding a set belonging to the current thread should clear
		 * its set pointer.
		 */
		ASSERT(curthread->t_cpc_set == NULL);
		return;
	}

	curthread->t_cpc_set = NULL;

	/*
	 * This thread/LWP is exiting but context switches will continue to
	 * happen for a bit as the exit proceeds.  Kernel preemption must be
	 * disabled here to prevent a race between checking or setting the
	 * INVALID_STOPPED flag here and kcpc_restore() setting the flag during
	 * a context switch.
	 */

	kpreempt_disable();
	if ((ctx->kc_flags & KCPC_CTX_INVALID_STOPPED) == 0) {
		pcbe_ops->pcbe_allstop();
		atomic_or_uint(&ctx->kc_flags,
		    KCPC_CTX_INVALID | KCPC_CTX_INVALID_STOPPED);
	}
	kpreempt_enable();
}
예제 #9
0
/*
 * Grab every existing context and mark it as invalid.
 */
void
kcpc_invalidate_all(void)
{
	kcpc_ctx_t *ctx;
	long hash;

	for (hash = 0; hash < CPC_HASH_BUCKETS; hash++) {
		mutex_enter(&kcpc_ctx_llock[hash]);
		for (ctx = kcpc_ctx_list[hash]; ctx; ctx = ctx->kc_next)
			atomic_or_uint(&ctx->kc_flags, KCPC_CTX_INVALID);
		mutex_exit(&kcpc_ctx_llock[hash]);
	}
}
예제 #10
0
int
sys_setsockopt(struct lwp *l, const struct sys_setsockopt_args *uap,
    register_t *retval)
{
	/* {
		syscallarg(int)			s;
		syscallarg(int)			level;
		syscallarg(int)			name;
		syscallarg(const void *)	val;
		syscallarg(unsigned int)	valsize;
	} */
	struct sockopt	sopt;
	struct socket	*so;
	file_t		*fp;
	int		error;
	unsigned int	len;

	len = SCARG(uap, valsize);
	if (len > 0 && SCARG(uap, val) == NULL)
		return EINVAL;

	if (len > MCLBYTES)
		return EINVAL;

	if ((error = fd_getsock1(SCARG(uap, s), &so, &fp)) != 0)
		return (error);

	sockopt_init(&sopt, SCARG(uap, level), SCARG(uap, name), len);

	if (len > 0) {
		error = copyin(SCARG(uap, val), sopt.sopt_data, len);
		if (error)
			goto out;
	}

	error = sosetopt(so, &sopt);
	if (so->so_options & SO_NOSIGPIPE)
		atomic_or_uint(&fp->f_flag, FNOSIGPIPE);
	else
		atomic_and_uint(&fp->f_flag, ~FNOSIGPIPE);

 out:
	sockopt_destroy(&sopt);
	fd_putfile(SCARG(uap, s));
	return error;
}
예제 #11
0
/*
 * Stop the counters on the CPU this context is bound to.
 */
static void
kcpc_stop_hw(kcpc_ctx_t *ctx)
{
	cpu_t *cp;

	ASSERT((ctx->kc_flags & (KCPC_CTX_INVALID | KCPC_CTX_INVALID_STOPPED))
	    == KCPC_CTX_INVALID);

	kpreempt_disable();

	cp = cpu_get(ctx->kc_cpuid);
	ASSERT(cp != NULL);

	if (cp == CPU) {
		pcbe_ops->pcbe_allstop();
		atomic_or_uint(&ctx->kc_flags,
		    KCPC_CTX_INVALID_STOPPED);
	} else
		kcpc_remote_stop(cp);
	kpreempt_enable();
}
예제 #12
0
static void
kcpc_restore(kcpc_ctx_t *ctx)
{
	if ((ctx->kc_flags & (KCPC_CTX_INVALID | KCPC_CTX_INVALID_STOPPED)) ==
	    KCPC_CTX_INVALID)
		/*
		 * The context is invalidated but has not been marked stopped.
		 * We mark it as such here because we will not start the
		 * counters during this context switch.
		 */
		atomic_or_uint(&ctx->kc_flags, KCPC_CTX_INVALID_STOPPED);


	if (ctx->kc_flags & (KCPC_CTX_INVALID | KCPC_CTX_FREEZE))
		return;

	/*
	 * While programming the hardware, the counters should be stopped. We
	 * don't do an explicit pcbe_allstop() here because they should have
	 * been stopped already by the last consumer.
	 */
	ctx->kc_rawtick = KCPC_GET_TICK();
	pcbe_ops->pcbe_program(ctx);
}
예제 #13
0
int
kcpc_unbind(kcpc_set_t *set)
{
	kcpc_ctx_t	*ctx = set->ks_ctx;
	kthread_t	*t;

	if (ctx == NULL)
		return (EINVAL);

	atomic_or_uint(&ctx->kc_flags, KCPC_CTX_INVALID);

	if (ctx->kc_cpuid == -1) {
		t = ctx->kc_thread;
		/*
		 * The context is thread-bound and therefore has a device
		 * context.  It will be freed via removectx() calling
		 * freectx() calling kcpc_free().
		 */
		if (t == curthread &&
		    (ctx->kc_flags & KCPC_CTX_INVALID_STOPPED) == 0) {
			kpreempt_disable();
			pcbe_ops->pcbe_allstop();
			atomic_or_uint(&ctx->kc_flags,
			    KCPC_CTX_INVALID_STOPPED);
			kpreempt_enable();
		}
#ifdef DEBUG
		if (removectx(t, ctx, kcpc_save, kcpc_restore, NULL,
		    kcpc_lwp_create, NULL, kcpc_free) == 0)
			panic("kcpc_unbind: context %p not preset on thread %p",
			    ctx, t);
#else
		(void) removectx(t, ctx, kcpc_save, kcpc_restore, NULL,
		    kcpc_lwp_create, NULL, kcpc_free);
#endif /* DEBUG */
		t->t_cpc_set = NULL;
		t->t_cpc_ctx = NULL;
	} else {
		/*
		 * If we are unbinding a CPU-bound set from a remote CPU, the
		 * native CPU's idle thread could be in the midst of programming
		 * this context onto the CPU. We grab the context's lock here to
		 * ensure that the idle thread is done with it. When we release
		 * the lock, the CPU no longer has a context and the idle thread
		 * will move on.
		 *
		 * cpu_lock must be held to prevent the CPU from being DR'd out
		 * while we disassociate the context from the cpu_t.
		 */
		cpu_t *cp;
		mutex_enter(&cpu_lock);
		cp = cpu_get(ctx->kc_cpuid);
		if (cp != NULL) {
			/*
			 * The CPU may have been DR'd out of the system.
			 */
			mutex_enter(&cp->cpu_cpc_ctxlock);
			if ((ctx->kc_flags & KCPC_CTX_INVALID_STOPPED) == 0)
				kcpc_stop_hw(ctx);
			ASSERT(ctx->kc_flags & KCPC_CTX_INVALID_STOPPED);
			cp->cpu_cpc_ctx = NULL;
			mutex_exit(&cp->cpu_cpc_ctxlock);
		}
		mutex_exit(&cpu_lock);
		if (ctx->kc_thread == curthread) {
			kcpc_free(ctx, 0);
			curthread->t_cpc_set = NULL;
		}
	}

	return (0);
}
예제 #14
0
/*
 * The file control system call.
 */
int
sys_fcntl(struct lwp *l, const struct sys_fcntl_args *uap, register_t *retval)
{
	/* {
		syscallarg(int)		fd;
		syscallarg(int)		cmd;
		syscallarg(void *)	arg;
	} */
	int fd, i, tmp, error, cmd, newmin;
	filedesc_t *fdp;
	file_t *fp;
	struct flock fl;
	bool cloexec = false;

	fd = SCARG(uap, fd);
	cmd = SCARG(uap, cmd);
	fdp = l->l_fd;
	error = 0;

	switch (cmd) {
	case F_CLOSEM:
		if (fd < 0)
			return EBADF;
		while ((i = fdp->fd_lastfile) >= fd) {
			if (fd_getfile(i) == NULL) {
				/* Another thread has updated. */
				continue;
			}
			fd_close(i);
		}
		return 0;

	case F_MAXFD:
		*retval = fdp->fd_lastfile;
		return 0;

	case F_SETLKW:
	case F_SETLK:
	case F_GETLK:
		error = copyin(SCARG(uap, arg), &fl, sizeof(fl));
		if (error)
			return error;
		error = do_fcntl_lock(fd, cmd, &fl);
		if (cmd == F_GETLK && error == 0)
			error = copyout(&fl, SCARG(uap, arg), sizeof(fl));
		return error;

	default:
		/* Handled below */
		break;
	}

	if ((fp = fd_getfile(fd)) == NULL)
		return (EBADF);

	if ((cmd & F_FSCTL)) {
		error = fcntl_forfs(fd, fp, cmd, SCARG(uap, arg));
		fd_putfile(fd);
		return error;
	}

	switch (cmd) {
	case F_DUPFD_CLOEXEC:
		cloexec = true;
		/*FALLTHROUGH*/
	case F_DUPFD:
		newmin = (long)SCARG(uap, arg);
		if ((u_int)newmin >=
		    l->l_proc->p_rlimit[RLIMIT_NOFILE].rlim_cur ||
		    (u_int)newmin >= maxfiles) {
			fd_putfile(fd);
			return EINVAL;
		}
		error = fd_dup(fp, newmin, &i, cloexec);
		*retval = i;
		break;

	case F_GETFD:
		*retval = fdp->fd_dt->dt_ff[fd]->ff_exclose;
		break;

	case F_SETFD:
		fd_set_exclose(l, fd,
		    ((long)SCARG(uap, arg) & FD_CLOEXEC) != 0);
		break;

	case F_GETNOSIGPIPE:
		*retval = (fp->f_flag & FNOSIGPIPE) != 0;
		break;

	case F_SETNOSIGPIPE:
		if (SCARG(uap, arg))
			atomic_or_uint(&fp->f_flag, FNOSIGPIPE);
		else
			atomic_and_uint(&fp->f_flag, ~FNOSIGPIPE);
		*retval = 0;
		break;

	case F_GETFL:
		*retval = OFLAGS(fp->f_flag);
		break;

	case F_SETFL:
		/* XXX not guaranteed to be atomic. */
		tmp = FFLAGS((long)SCARG(uap, arg)) & FCNTLFLAGS;
		error = (*fp->f_ops->fo_fcntl)(fp, F_SETFL, &tmp);
		if (error)
			break;
		i = tmp ^ fp->f_flag;
		if (i & FNONBLOCK) {
			int flgs = tmp & FNONBLOCK;
			error = (*fp->f_ops->fo_ioctl)(fp, FIONBIO, &flgs);
			if (error) {
				(*fp->f_ops->fo_fcntl)(fp, F_SETFL,
				    &fp->f_flag);
				break;
			}
		}
		if (i & FASYNC) {
			int flgs = tmp & FASYNC;
			error = (*fp->f_ops->fo_ioctl)(fp, FIOASYNC, &flgs);
			if (error) {
				if (i & FNONBLOCK) {
					tmp = fp->f_flag & FNONBLOCK;
					(void)(*fp->f_ops->fo_ioctl)(fp,
						FIONBIO, &tmp);
				}
				(*fp->f_ops->fo_fcntl)(fp, F_SETFL,
				    &fp->f_flag);
				break;
			}
		}
		fp->f_flag = (fp->f_flag & ~FCNTLFLAGS) | tmp;
		break;

	case F_GETOWN:
		error = (*fp->f_ops->fo_ioctl)(fp, FIOGETOWN, &tmp);
		*retval = tmp;
		break;

	case F_SETOWN:
		tmp = (int)(uintptr_t) SCARG(uap, arg);
		error = (*fp->f_ops->fo_ioctl)(fp, FIOSETOWN, &tmp);
		break;

	default:
		error = EINVAL;
	}

	fd_putfile(fd);
	return (error);
}
예제 #15
0
/*ARGSUSED*/
static void
kcpc_free(kcpc_ctx_t *ctx, int isexec)
{
	int		i;
	kcpc_set_t	*set = ctx->kc_set;

	ASSERT(set != NULL);

	atomic_or_uint(&ctx->kc_flags, KCPC_CTX_INVALID);

	if (isexec) {
		/*
		 * This thread is execing, and after the exec it should not have
		 * any performance counter context. Stop the counters properly
		 * here so the system isn't surprised by an overflow interrupt
		 * later.
		 */
		if (ctx->kc_cpuid != -1) {
			cpu_t *cp;
			/*
			 * CPU-bound context; stop the appropriate CPU's ctrs.
			 * Hold cpu_lock while examining the CPU to ensure it
			 * doesn't go away.
			 */
			mutex_enter(&cpu_lock);
			cp = cpu_get(ctx->kc_cpuid);
			/*
			 * The CPU could have been DR'd out, so only stop the
			 * CPU and clear its context pointer if the CPU still
			 * exists.
			 */
			if (cp != NULL) {
				mutex_enter(&cp->cpu_cpc_ctxlock);
				kcpc_stop_hw(ctx);
				cp->cpu_cpc_ctx = NULL;
				mutex_exit(&cp->cpu_cpc_ctxlock);
			}
			mutex_exit(&cpu_lock);
			ASSERT(curthread->t_cpc_ctx == NULL);
		} else {
			/*
			 * Thread-bound context; stop _this_ CPU's counters.
			 */
			kpreempt_disable();
			pcbe_ops->pcbe_allstop();
			atomic_or_uint(&ctx->kc_flags,
			    KCPC_CTX_INVALID_STOPPED);
			kpreempt_enable();
			curthread->t_cpc_ctx = NULL;
		}

		/*
		 * Since we are being called from an exec and we know that
		 * exec is not permitted via the agent thread, we should clean
		 * up this thread's CPC state completely, and not leave dangling
		 * CPC pointers behind.
		 */
		ASSERT(ctx->kc_thread == curthread);
		curthread->t_cpc_set = NULL;
	}

	/*
	 * Walk through each request in this context's set and free the PCBE's
	 * configuration if it exists.
	 */
	for (i = 0; i < set->ks_nreqs; i++) {
		if (set->ks_req[i].kr_config != NULL)
			pcbe_ops->pcbe_free(set->ks_req[i].kr_config);
	}

	kmem_free(set->ks_data, set->ks_nreqs * sizeof (uint64_t));
	kcpc_ctx_free(ctx);
	kcpc_free_set(set);
}
예제 #16
0
/*ARGSUSED*/
kcpc_ctx_t *
kcpc_overflow_intr(caddr_t arg, uint64_t bitmap)
{
	kcpc_ctx_t	*ctx;
	kthread_t	*t = curthread;
	int		i;

	/*
	 * On both x86 and UltraSPARC, we may deliver the high-level
	 * interrupt in kernel mode, just after we've started to run an
	 * interrupt thread.  (That's because the hardware helpfully
	 * delivers the overflow interrupt some random number of cycles
	 * after the instruction that caused the overflow by which time
	 * we're in some part of the kernel, not necessarily running on
	 * the right thread).
	 *
	 * Check for this case here -- find the pinned thread
	 * that was running when the interrupt went off.
	 */
	if (t->t_flag & T_INTR_THREAD) {
		klwp_t *lwp;

		atomic_add_32(&kcpc_intrctx_count, 1);

		/*
		 * Note that t_lwp is always set to point at the underlying
		 * thread, thus this will work in the presence of nested
		 * interrupts.
		 */
		ctx = NULL;
		if ((lwp = t->t_lwp) != NULL) {
			t = lwptot(lwp);
			ctx = t->t_cpc_ctx;
		}
	} else
		ctx = t->t_cpc_ctx;

	if (ctx == NULL) {
		/*
		 * This can easily happen if we're using the counters in
		 * "shared" mode, for example, and an overflow interrupt
		 * occurs while we are running cpustat.  In that case, the
		 * bound thread that has the context that belongs to this
		 * CPU is almost certainly sleeping (if it was running on
		 * the CPU we'd have found it above), and the actual
		 * interrupted thread has no knowledge of performance counters!
		 */
		ctx = curthread->t_cpu->cpu_cpc_ctx;
		if (ctx != NULL) {
			/*
			 * Return the bound context for this CPU to
			 * the interrupt handler so that it can synchronously
			 * sample the hardware counters and restart them.
			 */
			return (ctx);
		}

		/*
		 * As long as the overflow interrupt really is delivered early
		 * enough after trapping into the kernel to avoid switching
		 * threads, we must always be able to find the cpc context,
		 * or something went terribly wrong i.e. we ended up
		 * running a passivated interrupt thread, a kernel
		 * thread or we interrupted idle, all of which are Very Bad.
		 */
		if (kcpc_nullctx_panic)
			panic("null cpc context, thread %p", (void *)t);
		atomic_add_32(&kcpc_nullctx_count, 1);
	} else if ((ctx->kc_flags & KCPC_CTX_INVALID) == 0) {
		/*
		 * Schedule an ast to sample the counters, which will
		 * propagate any overflow into the virtualized performance
		 * counter(s), and may deliver a signal.
		 */
		ttolwp(t)->lwp_pcb.pcb_flags |= CPC_OVERFLOW;
		/*
		 * If a counter has overflowed which was counting on behalf of
		 * a request which specified CPC_OVF_NOTIFY_EMT, send the
		 * process a signal.
		 */
		for (i = 0; i < cpc_ncounters; i++) {
			if (ctx->kc_pics[i].kp_req != NULL &&
			    bitmap & (1 << i) &&
			    ctx->kc_pics[i].kp_req->kr_flags &
			    CPC_OVF_NOTIFY_EMT) {
				/*
				 * A signal has been requested for this PIC, so
				 * so freeze the context. The interrupt handler
				 * has already stopped the counter hardware.
				 */
				atomic_or_uint(&ctx->kc_flags, KCPC_CTX_FREEZE);
				atomic_or_uint(&ctx->kc_pics[i].kp_flags,
				    KCPC_PIC_OVERFLOWED);
			}
		}
		aston(t);
	}
	return (NULL);
}
예제 #17
0
/*
 * Caller must hold kcpc_cpuctx_lock.
 */
int
kcpc_enable(kthread_t *t, int cmd, int enable)
{
	kcpc_ctx_t	*ctx = t->t_cpc_ctx;
	kcpc_set_t	*set = t->t_cpc_set;
	kcpc_set_t	*newset;
	int		i;
	int		flag;
	int		err;

	ASSERT(RW_READ_HELD(&kcpc_cpuctx_lock));

	if (ctx == NULL) {
		/*
		 * This thread has a set but no context; it must be a
		 * CPU-bound set.
		 */
		ASSERT(t->t_cpc_set != NULL);
		ASSERT(t->t_cpc_set->ks_ctx->kc_cpuid != -1);
		return (EINVAL);
	} else if (ctx->kc_flags & KCPC_CTX_INVALID)
		return (EAGAIN);

	if (cmd == CPC_ENABLE) {
		if ((ctx->kc_flags & KCPC_CTX_FREEZE) == 0)
			return (EINVAL);
		kpreempt_disable();
		atomic_and_uint(&ctx->kc_flags, ~KCPC_CTX_FREEZE);
		kcpc_restore(ctx);
		kpreempt_enable();
	} else if (cmd == CPC_DISABLE) {
		if (ctx->kc_flags & KCPC_CTX_FREEZE)
			return (EINVAL);
		kpreempt_disable();
		kcpc_save(ctx);
		atomic_or_uint(&ctx->kc_flags, KCPC_CTX_FREEZE);
		kpreempt_enable();
	} else if (cmd == CPC_USR_EVENTS || cmd == CPC_SYS_EVENTS) {
		/*
		 * Strategy for usr/sys: stop counters and update set's presets
		 * with current counter values, unbind, update requests with
		 * new config, then re-bind.
		 */
		flag = (cmd == CPC_USR_EVENTS) ?
		    CPC_COUNT_USER: CPC_COUNT_SYSTEM;

		kpreempt_disable();
		atomic_or_uint(&ctx->kc_flags,
		    KCPC_CTX_INVALID | KCPC_CTX_INVALID_STOPPED);
		pcbe_ops->pcbe_allstop();
		kpreempt_enable();
		for (i = 0; i < set->ks_nreqs; i++) {
			set->ks_req[i].kr_preset = *(set->ks_req[i].kr_data);
			if (enable)
				set->ks_req[i].kr_flags |= flag;
			else
				set->ks_req[i].kr_flags &= ~flag;
		}
		newset = kcpc_dup_set(set);
		if (kcpc_unbind(set) != 0)
			return (EINVAL);
		t->t_cpc_set = newset;
		if (kcpc_bind_thread(newset, t, &err) != 0) {
			t->t_cpc_set = NULL;
			kcpc_free_set(newset);
			return (EINVAL);
		}
	} else
		return (EINVAL);

	return (0);
}