static int slugled_tmr(void *arg) { struct clockframe *cf = arg; uint32_t reg, bit; int is; if (CLKF_INTR(cf) || sched_curcpu_runnable_p() || (curlwp != NULL && curlwp != curcpu()->ci_data.cpu_idlelwp)) bit = LEDBITS_STATUS; else bit = 0; is = disable_interrupts(I32_bit); reg = GPIO_CONF_READ_4(ixp425_softc, IXP425_GPIO_GPOUTR); reg &= ~LEDBITS_STATUS; GPIO_CONF_WRITE_4(ixp425_softc, IXP425_GPIO_GPOUTR, reg | bit); restore_interrupts(is); return (1); }
/* * Statistics clock. Grab profile sample, and if divider reaches 0, * do process and kernel statistics. */ void statclock(struct clockframe *frame) { #ifdef GPROF struct gmonparam *g; u_long i; #endif struct cpu_info *ci = curcpu(); struct schedstate_percpu *spc = &ci->ci_schedstate; struct proc *p = curproc; struct process *pr; /* * Notice changes in divisor frequency, and adjust clock * frequency accordingly. */ if (spc->spc_psdiv != psdiv) { spc->spc_psdiv = psdiv; spc->spc_pscnt = psdiv; if (psdiv == 1) { setstatclockrate(stathz); } else { setstatclockrate(profhz); } } if (CLKF_USERMODE(frame)) { pr = p->p_p; if (pr->ps_flags & PS_PROFIL) addupc_intr(p, CLKF_PC(frame)); if (--spc->spc_pscnt > 0) return; /* * Came from user mode; CPU was in user state. * If this process is being profiled record the tick. */ p->p_uticks++; if (pr->ps_nice > NZERO) spc->spc_cp_time[CP_NICE]++; else spc->spc_cp_time[CP_USER]++; } else { #ifdef GPROF /* * Kernel statistics are just like addupc_intr, only easier. */ g = ci->ci_gmon; if (g != NULL && g->state == GMON_PROF_ON) { i = CLKF_PC(frame) - g->lowpc; if (i < g->textsize) { i /= HISTFRACTION * sizeof(*g->kcount); g->kcount[i]++; } } #endif #if defined(PROC_PC) if (p != NULL && p->p_p->ps_flags & PS_PROFIL) addupc_intr(p, PROC_PC(p)); #endif if (--spc->spc_pscnt > 0) return; /* * Came from kernel mode, so we were: * - handling an interrupt, * - doing syscall or trap work on behalf of the current * user process, or * - spinning in the idle loop. * Whichever it is, charge the time as appropriate. * Note that we charge interrupts to the current process, * regardless of whether they are ``for'' that process, * so that we know how much of its real time was spent * in ``non-process'' (i.e., interrupt) work. */ if (CLKF_INTR(frame)) { if (p != NULL) p->p_iticks++; spc->spc_cp_time[CP_INTR]++; } else if (p != NULL && p != spc->spc_idleproc) { p->p_sticks++; spc->spc_cp_time[CP_SYS]++; } else spc->spc_cp_time[CP_IDLE]++; } spc->spc_pscnt = psdiv; if (p != NULL) { p->p_cpticks++; /* * If no schedclock is provided, call it here at ~~12-25 Hz; * ~~16 Hz is best */ if (schedhz == 0) { if ((++curcpu()->ci_schedstate.spc_schedticks & 3) == 0) schedclock(p); } } }
/* * Statistics clock. Grab profile sample, and if divider reaches 0, * do process and kernel statistics. */ void statclock(struct clockframe *frame) { #ifdef GPROF struct gmonparam *g; intptr_t i; #endif struct cpu_info *ci = curcpu(); struct schedstate_percpu *spc = &ci->ci_schedstate; struct proc *p; struct lwp *l; /* * Notice changes in divisor frequency, and adjust clock * frequency accordingly. */ if (spc->spc_psdiv != psdiv) { spc->spc_psdiv = psdiv; spc->spc_pscnt = psdiv; if (psdiv == 1) { setstatclockrate(stathz); } else { setstatclockrate(profhz); } } l = ci->ci_data.cpu_onproc; if ((l->l_flag & LW_IDLE) != 0) { /* * don't account idle lwps as swapper. */ p = NULL; } else { p = l->l_proc; mutex_spin_enter(&p->p_stmutex); } if (CLKF_USERMODE(frame)) { if ((p->p_stflag & PST_PROFIL) && profsrc == PROFSRC_CLOCK) addupc_intr(l, CLKF_PC(frame)); if (--spc->spc_pscnt > 0) { mutex_spin_exit(&p->p_stmutex); return; } /* * Came from user mode; CPU was in user state. * If this process is being profiled record the tick. */ p->p_uticks++; if (p->p_nice > NZERO) spc->spc_cp_time[CP_NICE]++; else spc->spc_cp_time[CP_USER]++; } else { #ifdef GPROF /* * Kernel statistics are just like addupc_intr, only easier. */ g = &_gmonparam; if (profsrc == PROFSRC_CLOCK && g->state == GMON_PROF_ON) { i = CLKF_PC(frame) - g->lowpc; if (i < g->textsize) { i /= HISTFRACTION * sizeof(*g->kcount); g->kcount[i]++; } } #endif #ifdef LWP_PC if (p != NULL && profsrc == PROFSRC_CLOCK && (p->p_stflag & PST_PROFIL)) { addupc_intr(l, LWP_PC(l)); } #endif if (--spc->spc_pscnt > 0) { if (p != NULL) mutex_spin_exit(&p->p_stmutex); return; } /* * Came from kernel mode, so we were: * - handling an interrupt, * - doing syscall or trap work on behalf of the current * user process, or * - spinning in the idle loop. * Whichever it is, charge the time as appropriate. * Note that we charge interrupts to the current process, * regardless of whether they are ``for'' that process, * so that we know how much of its real time was spent * in ``non-process'' (i.e., interrupt) work. */ if (CLKF_INTR(frame) || (curlwp->l_pflag & LP_INTR) != 0) { if (p != NULL) { p->p_iticks++; } spc->spc_cp_time[CP_INTR]++; } else if (p != NULL) { p->p_sticks++; spc->spc_cp_time[CP_SYS]++; } else { spc->spc_cp_time[CP_IDLE]++; } } spc->spc_pscnt = psdiv; if (p != NULL) { atomic_inc_uint(&l->l_cpticks); mutex_spin_exit(&p->p_stmutex); } }