Esempio n. 1
0
/*
 * Get the time accounting information for the calling LWP.
 */
int
lwp_info(timestruc_t *tvp)
{
	timestruc_t tv[2];
	hrtime_t hrutime, hrstime;
	klwp_t *lwp = ttolwp(curthread);

	hrutime = lwp->lwp_mstate.ms_acct[LMS_USER];
	hrstime = lwp->lwp_mstate.ms_acct[LMS_SYSTEM] +
	    lwp->lwp_mstate.ms_acct[LMS_TRAP];
	scalehrtime(&hrutime);
	scalehrtime(&hrstime);

	hrt2ts(hrutime, &tv[0]);
	hrt2ts(hrstime, &tv[1]);

	if (get_udatamodel() == DATAMODEL_NATIVE) {
		if (copyout(tv, tvp, sizeof (tv)))
			return (set_errno(EFAULT));
	} else {
		timestruc32_t tv32[2];

		if (TIMESPEC_OVERFLOW(&tv[0]) ||
		    TIMESPEC_OVERFLOW(&tv[1]))
			return (set_errno(EOVERFLOW));	/* unlikely */

		TIMESPEC_TO_TIMESPEC32(&tv32[0], &tv[0]);
		TIMESPEC_TO_TIMESPEC32(&tv32[1], &tv[1]);

		if (copyout(tv32, tvp, sizeof (tv32)))
			return (set_errno(EFAULT));
	}
	return (0);
}
Esempio n. 2
0
/*
 * Return the amount of onproc and runnable time this thread has experienced.
 *
 * Because the fields we read are not protected by locks when updated
 * by the thread itself, this is an inherently racey interface.  In
 * particular, the ASSERT(THREAD_LOCK_HELD(t)) doesn't guarantee as much
 * as it might appear to.
 *
 * The implication for users of this interface is that onproc and runnable
 * are *NOT* monotonically increasing; they may temporarily be larger than
 * they should be.
 */
void
mstate_systhread_times(kthread_t *t, hrtime_t *onproc, hrtime_t *runnable)
{
	struct mstate	*const	ms = &ttolwp(t)->lwp_mstate;

	int		mstate;
	hrtime_t	now;
	hrtime_t	state_start;
	hrtime_t	waitrq;
	hrtime_t	aggr_onp;
	hrtime_t	aggr_run;

	ASSERT(THREAD_LOCK_HELD(t));
	ASSERT(t->t_procp->p_flag & SSYS);
	ASSERT(ttolwp(t) != NULL);

	/* shouldn't be any non-SYSTEM on-CPU time */
	ASSERT(ms->ms_acct[LMS_USER] == 0);
	ASSERT(ms->ms_acct[LMS_TRAP] == 0);

	mstate = t->t_mstate;
	waitrq = t->t_waitrq;
	state_start = ms->ms_state_start;

	aggr_onp = ms->ms_acct[LMS_SYSTEM];
	aggr_run = ms->ms_acct[LMS_WAIT_CPU];

	now = gethrtime_unscaled();

	/* if waitrq == 0, then there is no time to account to TS_RUN */
	if (waitrq == 0)
		waitrq = now;

	/* If there is system time to accumulate, do so */
	if (mstate == LMS_SYSTEM && state_start < waitrq)
		aggr_onp += waitrq - state_start;

	if (waitrq < now)
		aggr_run += now - waitrq;

	scalehrtime(&aggr_onp);
	scalehrtime(&aggr_run);

	*onproc = aggr_onp;
	*runnable = aggr_run;
}
Esempio n. 3
0
/*
 * Return an aggregation of microstate times in scaled nanoseconds (high-res
 * time).  This keeps in mind that p_acct is already scaled, and ms_acct is
 * not.
 */
hrtime_t
mstate_aggr_state(proc_t *p, int a_state)
{
	struct mstate *ms;
	kthread_t *t;
	klwp_t *lwp;
	hrtime_t aggr_time;
	hrtime_t scaledtime;

	ASSERT(MUTEX_HELD(&p->p_lock));
	ASSERT((unsigned)a_state < NMSTATES);

	aggr_time = p->p_acct[a_state];
	if (a_state == LMS_SYSTEM)
		aggr_time += p->p_acct[LMS_TRAP];

	t = p->p_tlist;
	if (t == NULL)
		return (aggr_time);

	do {
		if (t->t_proc_flag & TP_LWPEXIT)
			continue;

		lwp = ttolwp(t);
		ms = &lwp->lwp_mstate;
		scaledtime = ms->ms_acct[a_state];
		scalehrtime(&scaledtime);
		aggr_time += scaledtime;
		if (a_state == LMS_SYSTEM) {
			scaledtime = ms->ms_acct[LMS_TRAP];
			scalehrtime(&scaledtime);
			aggr_time += scaledtime;
		}
	} while ((t = t->t_forw) != p->p_tlist);

	return (aggr_time);
}
Esempio n. 4
0
/*
 * Return an aggregation of user and system CPU time consumed by
 * the specified thread in scaled nanoseconds.
 */
hrtime_t
mstate_thread_onproc_time(kthread_t *t)
{
	hrtime_t aggr_time;
	hrtime_t now;
	hrtime_t waitrq;
	hrtime_t state_start;
	struct mstate *ms;
	klwp_t *lwp;
	int	mstate;

	ASSERT(THREAD_LOCK_HELD(t));

	if ((lwp = ttolwp(t)) == NULL)
		return (0);

	mstate = t->t_mstate;
	waitrq = t->t_waitrq;
	ms = &lwp->lwp_mstate;
	state_start = ms->ms_state_start;

	aggr_time = ms->ms_acct[LMS_USER] +
	    ms->ms_acct[LMS_SYSTEM] + ms->ms_acct[LMS_TRAP];

	now = gethrtime_unscaled();

	/*
	 * NOTE: gethrtime_unscaled on X86 taken on different CPUs is
	 * inconsistent, so it is possible that now < state_start.
	 */
	if (mstate == LMS_USER || mstate == LMS_SYSTEM || mstate == LMS_TRAP) {
		/* if waitrq is zero, count all of the time. */
		if (waitrq == 0) {
			waitrq = now;
		}

		if (waitrq > state_start) {
			aggr_time += waitrq - state_start;
		}
	}

	scalehrtime(&aggr_time);
	return (aggr_time);
}
Esempio n. 5
0
static int
fipe_kstat_update(kstat_t *ksp, int rw)
{
	struct fipe_kstat_s *sp;
	hrtime_t hrt;

	if (rw == KSTAT_WRITE) {
		return (EACCES);
	}

	sp = ksp->ks_data;
	sp->fipe_enabled.value.i32 = fipe_gbl_ctrl.pm_enabled ? 1 : 0;
	sp->fipe_policy.value.i32 = fipe_pm_policy;

	hrt = fipe_gbl_ctrl.time_in_pm;
	scalehrtime(&hrt);
	sp->fipe_pm_time.value.ui64 = (uint64_t)hrt;

#ifdef	FIPE_KSTAT_DETAIL
	sp->ioat_ready.value.i32 = fipe_ioat_ctrl.ioat_ready ? 1 : 0;
#endif	/* FIPE_KSTAT_DETAIL */

	return (0);
}
Esempio n. 6
0
/*
 * Initialize memory power management subsystem.
 * Note: This function should only be called from ATTACH.
 * Note: caller must ensure exclusive access to all fipe_xxx interfaces.
 */
int
fipe_init(dev_info_t *dip)
{
	size_t nsize;
	hrtime_t hrt;

	/* Initialize global control structure. */
	bzero(&fipe_gbl_ctrl, sizeof (fipe_gbl_ctrl));
	mutex_init(&fipe_gbl_ctrl.lock, NULL, MUTEX_DRIVER, NULL);

	/* Query power management policy from device property. */
	fipe_pm_policy = ddi_prop_get_int(DDI_DEV_T_ANY, dip, 0,
	    FIPE_PROP_PM_POLICY, fipe_pm_policy);
	if (fipe_pm_policy < 0 || fipe_pm_policy >= FIPE_PM_POLICY_MAX) {
		cmn_err(CE_CONT,
		    "?fipe: invalid power management policy %d.\n",
		    fipe_pm_policy);
		fipe_pm_policy = FIPE_PM_POLICY_BALANCE;
	}
	fipe_profile_curr = &fipe_profiles[fipe_pm_policy];

	/*
	 * Compute unscaled hrtime value corresponding to FIPE_STAT_INTERVAL.
	 * (1 << 36) should be big enough here.
	 */
	hrt = 1ULL << 36;
	scalehrtime(&hrt);
	fipe_idle_ctrl.tick_interval = FIPE_STAT_INTERVAL * (1ULL << 36) / hrt;

	if (fipe_mc_init(dip) != 0) {
		cmn_err(CE_WARN, "!fipe: failed to initialize mc state.");
		goto out_mc_error;
	}
	if (fipe_ioat_init() != 0) {
		cmn_err(CE_NOTE, "!fipe: failed to initialize ioat state.");
		goto out_ioat_error;
	}

	/* Allocate per-CPU structure. */
	nsize = max_ncpus * sizeof (fipe_cpu_state_t);
	nsize += CPU_CACHE_COHERENCE_SIZE;
	fipe_gbl_ctrl.state_buf = kmem_zalloc(nsize, KM_SLEEP);
	fipe_gbl_ctrl.state_size = nsize;
	fipe_cpu_states = (fipe_cpu_state_t *)P2ROUNDUP(
	    (intptr_t)fipe_gbl_ctrl.state_buf, CPU_CACHE_COHERENCE_SIZE);

#ifdef	FIPE_KSTAT_SUPPORT
	fipe_gbl_ctrl.fipe_kstat = kstat_create("fipe", 0, "fipe-pm", "misc",
	    KSTAT_TYPE_NAMED, sizeof (fipe_kstat) / sizeof (kstat_named_t),
	    KSTAT_FLAG_VIRTUAL);
	if (fipe_gbl_ctrl.fipe_kstat == NULL) {
		cmn_err(CE_CONT, "?fipe: failed to create kstat object.\n");
	} else {
		fipe_gbl_ctrl.fipe_kstat->ks_lock = &fipe_gbl_ctrl.lock;
		fipe_gbl_ctrl.fipe_kstat->ks_data = &fipe_kstat;
		fipe_gbl_ctrl.fipe_kstat->ks_update = fipe_kstat_update;
		kstat_install(fipe_gbl_ctrl.fipe_kstat);
	}
#endif	/* FIPE_KSTAT_SUPPORT */

	return (0);

out_ioat_error:
	fipe_mc_fini();
out_mc_error:
	mutex_destroy(&fipe_gbl_ctrl.lock);
	bzero(&fipe_gbl_ctrl, sizeof (fipe_gbl_ctrl));

	return (-1);
}