示例#1
0
/*
 * The real-time timer, interrupting hz times per second.
 */
void
hardclock(struct clockframe *frame)
{
	struct proc *p;
	struct cpu_info *ci = curcpu();

	p = curproc;
	if (p && ((p->p_flag & (P_SYSTEM | P_WEXIT)) == 0)) {
		struct process *pr = p->p_p;

		/*
		 * Run current process's virtual and profile time, as needed.
		 */
		if (CLKF_USERMODE(frame) &&
		    timerisset(&pr->ps_timer[ITIMER_VIRTUAL].it_value) &&
		    itimerdecr(&pr->ps_timer[ITIMER_VIRTUAL], tick) == 0) {
			atomic_setbits_int(&p->p_flag, P_ALRMPEND);
			need_proftick(p);
		}
		if (timerisset(&pr->ps_timer[ITIMER_PROF].it_value) &&
		    itimerdecr(&pr->ps_timer[ITIMER_PROF], tick) == 0) {
			atomic_setbits_int(&p->p_flag, P_PROFPEND);
			need_proftick(p);
		}
	}

	/*
	 * If no separate statistics clock is available, run it from here.
	 */
	if (stathz == 0)
		statclock(frame);

	if (--ci->ci_schedstate.spc_rrticks <= 0)
		roundrobin(ci);

	/*
	 * If we are not the primary CPU, we're not allowed to do
	 * any more work.
	 */
	if (CPU_IS_PRIMARY(ci) == 0)
		return;

	tc_ticktock();
	ticks++;

	/*
	 * Update real-time timeout queue.
	 * Process callouts at a very low cpu priority, so we don't keep the
	 * relatively high clock interrupt priority any longer than necessary.
	 */
	if (timeout_hardclock_update())
		softintr_schedule(softclock_si);
}
示例#2
0
/*
 * The real-time timer, interrupting hz times per second.
 */
void
hardclock(struct clockframe *frame)
{
	struct lwp *l;
	struct cpu_info *ci;

	ci = curcpu();
	l = ci->ci_data.cpu_onproc;

	timer_tick(l, CLKF_USERMODE(frame));

	/*
	 * If no separate statistics clock is available, run it from here.
	 */
	if (stathz == 0)
		statclock(frame);
	/*
	 * If no separate schedclock is provided, call it here
	 * at about 16 Hz.
	 */
	if (schedhz == 0) {
		if ((int)(--ci->ci_schedstate.spc_schedticks) <= 0) {
			schedclock(l);
			ci->ci_schedstate.spc_schedticks = hardscheddiv;
		}
	}
	if ((--ci->ci_schedstate.spc_ticks) <= 0)
		sched_tick(ci);

	if (CPU_IS_PRIMARY(ci)) {
		hardclock_ticks++;
		tc_ticktock();
	}

	/*
	 * Update real-time timeout queue.
	 */
	callout_hardclock();

#ifdef KDTRACE_HOOKS
	cyclic_clock_func_t func = cyclic_clock_func[cpu_index(ci)];
	if (func) {
		(*func)((struct clockframe *)frame);
	}
#endif
}
示例#3
0
/*
 * The real-time timer, interrupting hz times per second.
 */
void
uinet_hardclock(void)
{

	atomic_add_int((volatile int *)&ticks, 1);

	/* hardclock_cpu(usermode);
	 *
	 * There is no need for the process accounting done in
	 * hardclock_cpu().  We only need the callout_tick() call, which is
	 * reproduced below.
	 */

	callout_tick();
	tc_ticktock(1);

	/* cpu_tick_calibration();
	 *
	 * There is no need for cpu_tick_calibration(), as we are operating
	 * as a fixed-rate ticker.
	 */

	/*
	 * If no separate statistics clock is available, run it from here.
	 *
	 * XXX: this only works for UP
	 */
	/* if (stathz == 0) {
	 *	profclock(usermode, pc);
	 *	statclock(usermode);
	 * }
	 *
	 * No need for profclock or statclock support.
	 */

#ifndef UINET  /* No DEVICE_POLLING or SW_WATCHDOG support under UINET */
#ifdef DEVICE_POLLING
	hardclock_device_poll();	/* this is very short and quick */
#endif /* DEVICE_POLLING */
#ifdef SW_WATCHDOG
	if (watchdog_enabled > 0 && --watchdog_ticks <= 0)
		watchdog_fire();
#endif /* SW_WATCHDOG */
#endif /* UINET */
}