/* * The real-time timer, interrupting hz times per second. */ void hardclock(struct clockframe *frame) { struct lwp *l; struct cpu_info *ci; ci = curcpu(); l = ci->ci_data.cpu_onproc; timer_tick(l, CLKF_USERMODE(frame)); /* * If no separate statistics clock is available, run it from here. */ if (stathz == 0) statclock(frame); /* * If no separate schedclock is provided, call it here * at about 16 Hz. */ if (schedhz == 0) { if ((int)(--ci->ci_schedstate.spc_schedticks) <= 0) { schedclock(l); ci->ci_schedstate.spc_schedticks = hardscheddiv; } } if ((--ci->ci_schedstate.spc_ticks) <= 0) sched_tick(ci); if (CPU_IS_PRIMARY(ci)) { hardclock_ticks++; tc_ticktock(); } /* * Update real-time timeout queue. */ callout_hardclock(); #ifdef KDTRACE_HOOKS cyclic_clock_func_t func = cyclic_clock_func[cpu_index(ci)]; if (func) { (*func)((struct clockframe *)frame); } #endif }
void interrupt(unsigned long a0, unsigned long a1, unsigned long a2, struct trapframe *framep) { struct cpu_info *ci = curcpu(); extern int schedhz; switch (a0) { case ALPHA_INTR_XPROC: /* interprocessor interrupt */ #if defined(MULTIPROCESSOR) atomic_add_ulong(&ci->ci_intrdepth, 1); alpha_ipi_process(ci, framep); /* * Handle inter-console messages if we're the primary * CPU. */ if (ci->ci_cpuid == hwrpb->rpb_primary_cpu_id && hwrpb->rpb_txrdy != 0) cpu_iccb_receive(); atomic_sub_ulong(&ci->ci_intrdepth, 1); #else printf("WARNING: received interprocessor interrupt!\n"); #endif /* MULTIPROCESSOR */ break; case ALPHA_INTR_CLOCK: /* clock interrupt */ atomic_add_int(&uvmexp.intrs, 1); if (CPU_IS_PRIMARY(ci)) clk_count.ec_count++; if (platform.clockintr) { /* * Call hardclock(). This will also call * statclock(). On the primary CPU, it * will also deal with time-of-day stuff. */ (*platform.clockintr)((struct clockframe *)framep); /* * If it's time to call the scheduler clock, * do so. */ if ((++ci->ci_schedstate.spc_schedticks & 0x3f) == 0 && schedhz != 0) schedclock(ci->ci_curproc); } break; case ALPHA_INTR_ERROR: /* Machine Check or Correctable Error */ atomic_add_ulong(&ci->ci_intrdepth, 1); a0 = alpha_pal_rdmces(); if (platform.mcheck_handler) (*platform.mcheck_handler)(a0, framep, a1, a2); else machine_check(a0, framep, a1, a2); atomic_sub_ulong(&ci->ci_intrdepth, 1); break; case ALPHA_INTR_DEVICE: /* I/O device interrupt */ { struct scbvec *scb; KDASSERT(a1 >= SCB_IOVECBASE && a1 < SCB_SIZE); atomic_add_ulong(&ci->ci_intrdepth, 1); atomic_add_int(&uvmexp.intrs, 1); scb = &scb_iovectab[SCB_VECTOIDX(a1 - SCB_IOVECBASE)]; (*scb->scb_func)(scb->scb_arg, a1); atomic_sub_ulong(&ci->ci_intrdepth, 1); break; } case ALPHA_INTR_PERF: /* performance counter interrupt */ printf("WARNING: received performance counter interrupt!\n"); break; case ALPHA_INTR_PASSIVE: #if 0 printf("WARNING: received passive release interrupt vec " "0x%lx\n", a1); #endif break; default: printf("unexpected interrupt: type 0x%lx vec 0x%lx " "a2 0x%lx" #if defined(MULTIPROCESSOR) " cpu %lu" #endif "\n", a0, a1, a2 #if defined(MULTIPROCESSOR) , ci->ci_cpuid #endif ); panic("interrupt"); /* NOTREACHED */ } }
/* * Statistics clock. Grab profile sample, and if divider reaches 0, * do process and kernel statistics. */ void statclock(struct clockframe *frame) { #ifdef GPROF struct gmonparam *g; u_long i; #endif struct cpu_info *ci = curcpu(); struct schedstate_percpu *spc = &ci->ci_schedstate; struct proc *p = curproc; struct process *pr; /* * Notice changes in divisor frequency, and adjust clock * frequency accordingly. */ if (spc->spc_psdiv != psdiv) { spc->spc_psdiv = psdiv; spc->spc_pscnt = psdiv; if (psdiv == 1) { setstatclockrate(stathz); } else { setstatclockrate(profhz); } } if (CLKF_USERMODE(frame)) { pr = p->p_p; if (pr->ps_flags & PS_PROFIL) addupc_intr(p, CLKF_PC(frame)); if (--spc->spc_pscnt > 0) return; /* * Came from user mode; CPU was in user state. * If this process is being profiled record the tick. */ p->p_uticks++; if (pr->ps_nice > NZERO) spc->spc_cp_time[CP_NICE]++; else spc->spc_cp_time[CP_USER]++; } else { #ifdef GPROF /* * Kernel statistics are just like addupc_intr, only easier. */ g = ci->ci_gmon; if (g != NULL && g->state == GMON_PROF_ON) { i = CLKF_PC(frame) - g->lowpc; if (i < g->textsize) { i /= HISTFRACTION * sizeof(*g->kcount); g->kcount[i]++; } } #endif #if defined(PROC_PC) if (p != NULL && p->p_p->ps_flags & PS_PROFIL) addupc_intr(p, PROC_PC(p)); #endif if (--spc->spc_pscnt > 0) return; /* * Came from kernel mode, so we were: * - handling an interrupt, * - doing syscall or trap work on behalf of the current * user process, or * - spinning in the idle loop. * Whichever it is, charge the time as appropriate. * Note that we charge interrupts to the current process, * regardless of whether they are ``for'' that process, * so that we know how much of its real time was spent * in ``non-process'' (i.e., interrupt) work. */ if (CLKF_INTR(frame)) { if (p != NULL) p->p_iticks++; spc->spc_cp_time[CP_INTR]++; } else if (p != NULL && p != spc->spc_idleproc) { p->p_sticks++; spc->spc_cp_time[CP_SYS]++; } else spc->spc_cp_time[CP_IDLE]++; } spc->spc_pscnt = psdiv; if (p != NULL) { p->p_cpticks++; /* * If no schedclock is provided, call it here at ~~12-25 Hz; * ~~16 Hz is best */ if (schedhz == 0) { if ((++curcpu()->ci_schedstate.spc_schedticks & 3) == 0) schedclock(p); } } }
void interrupt(unsigned long a0, unsigned long a1, unsigned long a2, struct trapframe *framep) { struct proc *p; struct cpu_info *ci = curcpu(); extern int schedhz; switch (a0) { case ALPHA_INTR_XPROC: /* interprocessor interrupt */ #if defined(MULTIPROCESSOR) { u_long pending_ipis, bit; #if 0 printf("CPU %lu got IPI\n", cpu_id); #endif #ifdef DIAGNOSTIC if (ci->ci_dev == NULL) { /* XXX panic? */ printf("WARNING: no device for ID %lu\n", ci->ci_cpuid); return; } #endif pending_ipis = atomic_loadlatch_ulong(&ci->ci_ipis, 0); for (bit = 0; bit < ALPHA_NIPIS; bit++) if (pending_ipis & (1UL << bit)) (*ipifuncs[bit])(); /* * Handle inter-console messages if we're the primary * CPU. */ if (ci->ci_cpuid == hwrpb->rpb_primary_cpu_id && hwrpb->rpb_txrdy != 0) cpu_iccb_receive(); } #else printf("WARNING: received interprocessor interrupt!\n"); #endif /* MULTIPROCESSOR */ break; case ALPHA_INTR_CLOCK: /* clock interrupt */ #if defined(MULTIPROCESSOR) /* XXX XXX XXX */ if (CPU_IS_PRIMARY(ci) == 0) return; #endif uvmexp.intrs++; clk_count.ec_count++; if (platform.clockintr) { /* * Call hardclock(). This will also call * statclock(). On the primary CPU, it * will also deal with time-of-day stuff. */ (*platform.clockintr)((struct clockframe *)framep); /* * If it's time to call the scheduler clock, * do so. */ if ((++schedclk2 & 0x3f) == 0 && (p = ci->ci_curproc) != NULL && schedhz != 0) schedclock(p); } break; case ALPHA_INTR_ERROR: /* Machine Check or Correctable Error */ a0 = alpha_pal_rdmces(); if (platform.mcheck_handler) (*platform.mcheck_handler)(a0, framep, a1, a2); else machine_check(a0, framep, a1, a2); break; case ALPHA_INTR_DEVICE: /* I/O device interrupt */ { struct scbvec *scb; KDASSERT(a1 >= SCB_IOVECBASE && a1 < SCB_SIZE); #if defined(MULTIPROCESSOR) /* XXX XXX XXX */ if (CPU_IS_PRIMARY(ci) == 0) return; #endif uvmexp.intrs++; scb = &scb_iovectab[SCB_VECTOIDX(a1 - SCB_IOVECBASE)]; (*scb->scb_func)(scb->scb_arg, a1); break; } case ALPHA_INTR_PERF: /* performance counter interrupt */ printf("WARNING: received performance counter interrupt!\n"); break; case ALPHA_INTR_PASSIVE: #if 0 printf("WARNING: received passive release interrupt vec " "0x%lx\n", a1); #endif break; default: printf("unexpected interrupt: type 0x%lx vec 0x%lx " "a2 0x%lx" #if defined(MULTIPROCESSOR) " cpu %lu" #endif "\n", a0, a1, a2 #if defined(MULTIPROCESSOR) , ci->ci_cpuid #endif ); panic("interrupt"); /* NOTREACHED */ } }
void interrupt(unsigned long a0, unsigned long a1, unsigned long a2, struct trapframe *framep) { struct cpu_info *ci = curcpu(); struct cpu_softc *sc = ci->ci_softc; switch (a0) { case ALPHA_INTR_XPROC: /* interprocessor interrupt */ #if defined(MULTIPROCESSOR) atomic_inc_ulong(&ci->ci_intrdepth); alpha_ipi_process(ci, framep); /* * Handle inter-console messages if we're the primary * CPU. */ if (ci->ci_cpuid == hwrpb->rpb_primary_cpu_id && hwrpb->rpb_txrdy != 0) cpu_iccb_receive(); atomic_dec_ulong(&ci->ci_intrdepth); #else printf("WARNING: received interprocessor interrupt!\n"); #endif /* MULTIPROCESSOR */ break; case ALPHA_INTR_CLOCK: /* clock interrupt */ /* * We don't increment the interrupt depth for the * clock interrupt, since it is *sampled* from * the clock interrupt, so if we did, all system * time would be counted as interrupt time. */ sc->sc_evcnt_clock.ev_count++; ci->ci_data.cpu_nintr++; if (platform.clockintr) { /* * Call hardclock(). This will also call * statclock(). On the primary CPU, it * will also deal with time-of-day stuff. */ (*platform.clockintr)((struct clockframe *)framep); /* * If it's time to call the scheduler clock, * do so. */ if ((++ci->ci_schedstate.spc_schedticks & 0x3f) == 0 && schedhz != 0) schedclock(ci->ci_curlwp); } break; case ALPHA_INTR_ERROR: /* Machine Check or Correctable Error */ atomic_inc_ulong(&ci->ci_intrdepth); a0 = alpha_pal_rdmces(); if (platform.mcheck_handler != NULL && (void *)framep->tf_regs[FRAME_PC] != XentArith) (*platform.mcheck_handler)(a0, framep, a1, a2); else machine_check(a0, framep, a1, a2); atomic_dec_ulong(&ci->ci_intrdepth); break; case ALPHA_INTR_DEVICE: /* I/O device interrupt */ { struct scbvec *scb; int idx = SCB_VECTOIDX(a1 - SCB_IOVECBASE); bool mpsafe = scb_mpsafe[idx]; KDASSERT(a1 >= SCB_IOVECBASE && a1 < SCB_SIZE); atomic_inc_ulong(&sc->sc_evcnt_device.ev_count); atomic_inc_ulong(&ci->ci_intrdepth); if (!mpsafe) { KERNEL_LOCK(1, NULL); } ci->ci_data.cpu_nintr++; scb = &scb_iovectab[idx]; (*scb->scb_func)(scb->scb_arg, a1); if (!mpsafe) KERNEL_UNLOCK_ONE(NULL); atomic_dec_ulong(&ci->ci_intrdepth); break; } case ALPHA_INTR_PERF: /* performance counter interrupt */ printf("WARNING: received performance counter interrupt!\n"); break; case ALPHA_INTR_PASSIVE: #if 0 printf("WARNING: received passive release interrupt vec " "0x%lx\n", a1); #endif break; default: printf("unexpected interrupt: type 0x%lx vec 0x%lx " "a2 0x%lx" #if defined(MULTIPROCESSOR) " cpu %lu" #endif "\n", a0, a1, a2 #if defined(MULTIPROCESSOR) , ci->ci_cpuid #endif ); panic("interrupt"); /* NOTREACHED */ } }