예제 #1
0
파일: kern_clock.c 프로젝트: ryo/netbsd-src
/*
 * Independent profiling "tick" in case we're using a separate
 * clock or profiling event source.  Currently, that's just
 * performance counters--hence the wrapper.
 */
void
proftick(struct clockframe *frame)
{
#ifdef GPROF
        struct gmonparam *g;
        intptr_t i;
#endif
	struct lwp *l;
	struct proc *p;

	l = curcpu()->ci_data.cpu_onproc;
	p = (l ? l->l_proc : NULL);
	if (CLKF_USERMODE(frame)) {
		mutex_spin_enter(&p->p_stmutex);
		if (p->p_stflag & PST_PROFIL)
			addupc_intr(l, CLKF_PC(frame));
		mutex_spin_exit(&p->p_stmutex);
	} else {
#ifdef GPROF
		g = &_gmonparam;
		if (g->state == GMON_PROF_ON) {
			i = CLKF_PC(frame) - g->lowpc;
			if (i < g->textsize) {
				i /= HISTFRACTION * sizeof(*g->kcount);
				g->kcount[i]++;
			}
		}
#endif
#ifdef LWP_PC
		if (p != NULL && (p->p_stflag & PST_PROFIL) != 0)
			addupc_intr(l, LWP_PC(l));
#endif
	}
}
예제 #2
0
/*
 * The real-time timer, interrupting hz times per second.
 */
void
hardclock(struct clockframe *frame)
{
	struct proc *p;
	struct cpu_info *ci = curcpu();

	p = curproc;
	if (p && ((p->p_flag & (P_SYSTEM | P_WEXIT)) == 0)) {
		struct process *pr = p->p_p;

		/*
		 * Run current process's virtual and profile time, as needed.
		 */
		if (CLKF_USERMODE(frame) &&
		    timerisset(&pr->ps_timer[ITIMER_VIRTUAL].it_value) &&
		    itimerdecr(&pr->ps_timer[ITIMER_VIRTUAL], tick) == 0) {
			atomic_setbits_int(&p->p_flag, P_ALRMPEND);
			need_proftick(p);
		}
		if (timerisset(&pr->ps_timer[ITIMER_PROF].it_value) &&
		    itimerdecr(&pr->ps_timer[ITIMER_PROF], tick) == 0) {
			atomic_setbits_int(&p->p_flag, P_PROFPEND);
			need_proftick(p);
		}
	}

	/*
	 * If no separate statistics clock is available, run it from here.
	 */
	if (stathz == 0)
		statclock(frame);

	if (--ci->ci_schedstate.spc_rrticks <= 0)
		roundrobin(ci);

	/*
	 * If we are not the primary CPU, we're not allowed to do
	 * any more work.
	 */
	if (CPU_IS_PRIMARY(ci) == 0)
		return;

	tc_ticktock();
	ticks++;

	/*
	 * Update real-time timeout queue.
	 * Process callouts at a very low cpu priority, so we don't keep the
	 * relatively high clock interrupt priority any longer than necessary.
	 */
	if (timeout_hardclock_update())
		softintr_schedule(softclock_si);
}
예제 #3
0
파일: kern_clock.c 프로젝트: ryo/netbsd-src
/*
 * The real-time timer, interrupting hz times per second.
 */
void
hardclock(struct clockframe *frame)
{
	struct lwp *l;
	struct cpu_info *ci;

	ci = curcpu();
	l = ci->ci_data.cpu_onproc;

	timer_tick(l, CLKF_USERMODE(frame));

	/*
	 * If no separate statistics clock is available, run it from here.
	 */
	if (stathz == 0)
		statclock(frame);
	/*
	 * If no separate schedclock is provided, call it here
	 * at about 16 Hz.
	 */
	if (schedhz == 0) {
		if ((int)(--ci->ci_schedstate.spc_schedticks) <= 0) {
			schedclock(l);
			ci->ci_schedstate.spc_schedticks = hardscheddiv;
		}
	}
	if ((--ci->ci_schedstate.spc_ticks) <= 0)
		sched_tick(ci);

	if (CPU_IS_PRIMARY(ci)) {
		hardclock_ticks++;
		tc_ticktock();
	}

	/*
	 * Update real-time timeout queue.
	 */
	callout_hardclock();

#ifdef KDTRACE_HOOKS
	cyclic_clock_func_t func = cyclic_clock_func[cpu_index(ci)];
	if (func) {
		(*func)((struct clockframe *)frame);
	}
#endif
}
예제 #4
0
/*
 * Statistics clock.  Grab profile sample, and if divider reaches 0,
 * do process and kernel statistics.
 */
void
statclock(struct clockframe *frame)
{
#ifdef GPROF
	struct gmonparam *g;
	u_long i;
#endif
	struct cpu_info *ci = curcpu();
	struct schedstate_percpu *spc = &ci->ci_schedstate;
	struct proc *p = curproc;
	struct process *pr;

	/*
	 * Notice changes in divisor frequency, and adjust clock
	 * frequency accordingly.
	 */
	if (spc->spc_psdiv != psdiv) {
		spc->spc_psdiv = psdiv;
		spc->spc_pscnt = psdiv;
		if (psdiv == 1) {
			setstatclockrate(stathz);
		} else {
			setstatclockrate(profhz);			
		}
	}

	if (CLKF_USERMODE(frame)) {
		pr = p->p_p;
		if (pr->ps_flags & PS_PROFIL)
			addupc_intr(p, CLKF_PC(frame));
		if (--spc->spc_pscnt > 0)
			return;
		/*
		 * Came from user mode; CPU was in user state.
		 * If this process is being profiled record the tick.
		 */
		p->p_uticks++;
		if (pr->ps_nice > NZERO)
			spc->spc_cp_time[CP_NICE]++;
		else
			spc->spc_cp_time[CP_USER]++;
	} else {
#ifdef GPROF
		/*
		 * Kernel statistics are just like addupc_intr, only easier.
		 */
		g = ci->ci_gmon;
		if (g != NULL && g->state == GMON_PROF_ON) {
			i = CLKF_PC(frame) - g->lowpc;
			if (i < g->textsize) {
				i /= HISTFRACTION * sizeof(*g->kcount);
				g->kcount[i]++;
			}
		}
#endif
#if defined(PROC_PC)
		if (p != NULL && p->p_p->ps_flags & PS_PROFIL)
			addupc_intr(p, PROC_PC(p));
#endif
		if (--spc->spc_pscnt > 0)
			return;
		/*
		 * Came from kernel mode, so we were:
		 * - handling an interrupt,
		 * - doing syscall or trap work on behalf of the current
		 *   user process, or
		 * - spinning in the idle loop.
		 * Whichever it is, charge the time as appropriate.
		 * Note that we charge interrupts to the current process,
		 * regardless of whether they are ``for'' that process,
		 * so that we know how much of its real time was spent
		 * in ``non-process'' (i.e., interrupt) work.
		 */
		if (CLKF_INTR(frame)) {
			if (p != NULL)
				p->p_iticks++;
			spc->spc_cp_time[CP_INTR]++;
		} else if (p != NULL && p != spc->spc_idleproc) {
			p->p_sticks++;
			spc->spc_cp_time[CP_SYS]++;
		} else
			spc->spc_cp_time[CP_IDLE]++;
	}
	spc->spc_pscnt = psdiv;

	if (p != NULL) {
		p->p_cpticks++;
		/*
		 * If no schedclock is provided, call it here at ~~12-25 Hz;
		 * ~~16 Hz is best
		 */
		if (schedhz == 0) {
			if ((++curcpu()->ci_schedstate.spc_schedticks & 3) ==
			    0)
				schedclock(p);
		}
	}
}
예제 #5
0
파일: kern_clock.c 프로젝트: ryo/netbsd-src
/*
 * Statistics clock.  Grab profile sample, and if divider reaches 0,
 * do process and kernel statistics.
 */
void
statclock(struct clockframe *frame)
{
#ifdef GPROF
	struct gmonparam *g;
	intptr_t i;
#endif
	struct cpu_info *ci = curcpu();
	struct schedstate_percpu *spc = &ci->ci_schedstate;
	struct proc *p;
	struct lwp *l;

	/*
	 * Notice changes in divisor frequency, and adjust clock
	 * frequency accordingly.
	 */
	if (spc->spc_psdiv != psdiv) {
		spc->spc_psdiv = psdiv;
		spc->spc_pscnt = psdiv;
		if (psdiv == 1) {
			setstatclockrate(stathz);
		} else {
			setstatclockrate(profhz);
		}
	}
	l = ci->ci_data.cpu_onproc;
	if ((l->l_flag & LW_IDLE) != 0) {
		/*
		 * don't account idle lwps as swapper.
		 */
		p = NULL;
	} else {
		p = l->l_proc;
		mutex_spin_enter(&p->p_stmutex);
	}

	if (CLKF_USERMODE(frame)) {
		if ((p->p_stflag & PST_PROFIL) && profsrc == PROFSRC_CLOCK)
			addupc_intr(l, CLKF_PC(frame));
		if (--spc->spc_pscnt > 0) {
			mutex_spin_exit(&p->p_stmutex);
			return;
		}

		/*
		 * Came from user mode; CPU was in user state.
		 * If this process is being profiled record the tick.
		 */
		p->p_uticks++;
		if (p->p_nice > NZERO)
			spc->spc_cp_time[CP_NICE]++;
		else
			spc->spc_cp_time[CP_USER]++;
	} else {
#ifdef GPROF
		/*
		 * Kernel statistics are just like addupc_intr, only easier.
		 */
		g = &_gmonparam;
		if (profsrc == PROFSRC_CLOCK && g->state == GMON_PROF_ON) {
			i = CLKF_PC(frame) - g->lowpc;
			if (i < g->textsize) {
				i /= HISTFRACTION * sizeof(*g->kcount);
				g->kcount[i]++;
			}
		}
#endif
#ifdef LWP_PC
		if (p != NULL && profsrc == PROFSRC_CLOCK &&
		    (p->p_stflag & PST_PROFIL)) {
			addupc_intr(l, LWP_PC(l));
		}
#endif
		if (--spc->spc_pscnt > 0) {
			if (p != NULL)
				mutex_spin_exit(&p->p_stmutex);
			return;
		}
		/*
		 * Came from kernel mode, so we were:
		 * - handling an interrupt,
		 * - doing syscall or trap work on behalf of the current
		 *   user process, or
		 * - spinning in the idle loop.
		 * Whichever it is, charge the time as appropriate.
		 * Note that we charge interrupts to the current process,
		 * regardless of whether they are ``for'' that process,
		 * so that we know how much of its real time was spent
		 * in ``non-process'' (i.e., interrupt) work.
		 */
		if (CLKF_INTR(frame) || (curlwp->l_pflag & LP_INTR) != 0) {
			if (p != NULL) {
				p->p_iticks++;
			}
			spc->spc_cp_time[CP_INTR]++;
		} else if (p != NULL) {
			p->p_sticks++;
			spc->spc_cp_time[CP_SYS]++;
		} else {
			spc->spc_cp_time[CP_IDLE]++;
		}
	}
	spc->spc_pscnt = psdiv;

	if (p != NULL) {
		atomic_inc_uint(&l->l_cpticks);
		mutex_spin_exit(&p->p_stmutex);
	}
}