/* * Level 14 (stat clock) interrupts from processor counter. */ int statintr_4m(void *cap) { struct clockframe *frame = cap; u_long newint; kpreempt_disable(); /* read the limit register to clear the interrupt */ *((volatile int *)&counterreg4m->t_limit); statclock(frame); /* * Compute new randomized interval. */ newint = new_interval(); /* * Use the `non-resetting' limit register, so we don't * loose the counter ticks that happened since this * interrupt was raised. */ counterreg4m->t_limit_nr = tmr_ustolim4m(newint); /* * The factor 8 is only valid for stathz==100. * See also clock.c */ if ((++cpuinfo.ci_schedstate.spc_schedticks & 7) == 0 && schedhz != 0) { if (CLKF_LOPRI(frame, IPL_SCHED)) { /* No need to schedule a soft interrupt */ spllowerschedclock(); schedintr(cap); } else { /* * We're interrupting a thread that may have the * scheduler lock; run schedintr() on this CPU later. */ raise_ipi(&cpuinfo, IPL_SCHED); /* sched_cookie->pil */ } } kpreempt_enable(); return (1); }
/* * Level 14 (stat clock) interrupts from processor counter. */ int statintr_4(void *cap) { struct clockframe *frame = cap; u_long newint; /* read the limit register to clear the interrupt */ *((volatile int *)&timerreg4->t_c14.t_limit); statclock(frame); /* * Compute new randomized interval. */ newint = new_interval(); /* * The sun4/4c timer has no `non-resetting' register; * use the current counter value to compensate the new * limit value for the number of counter ticks elapsed. */ newint -= tmr_cnttous(timerreg4->t_c14.t_counter); timerreg4->t_c14.t_limit = tmr_ustolim(newint); /* * The factor 8 is only valid for stathz==100. * See also clock.c */ if (curlwp && (++cpuinfo.ci_schedstate.spc_schedticks & 7) == 0) { if (CLKF_LOPRI(frame, IPL_SCHED)) { /* No need to schedule a soft interrupt */ spllowerschedclock(); schedintr(cap); } else { /* * We're interrupting a thread that may have the * scheduler lock; run schedintr() later. */ sparc_softintr_schedule(sched_cookie); } } return (1); }