/*! * Initialize an interval with parameters passed. * * Function maybe be used with _INFLECT and _BISECT intervals and with * _POINT intervals if the start parameter is set to the point. May also * be used to on initial IT_GBBP_CLIMB but not once climb_step is non-zero * * @param plane plane that the interval belongs to (defunct) * @param n_p number of parameters of the interval points * @param type interval tyle * @param start pointer to the start parameter array * @param end pointer to the end parameter array * * @return pointer to a newly allocated interval with values initialised as * per the arguments passed * */ struct pmm_interval* init_interval(int plane, int n_p, enum pmm_interval_type type, int *start, int *end) { struct pmm_interval *i; i = new_interval(); i->plane = plane; i->n_p = n_p; i->type = type; switch(i->type) { case IT_GBBP_CLIMB : case IT_GBBP_BISECT : case IT_GBBP_INFLECT : i->start = init_param_array_copy(start, n_p); i->end = init_param_array_copy(end, n_p); break; case IT_POINT : i->start = init_param_array_copy(start, n_p); break; default : ERRPRINTF("Interval type not supported; %s\n", interval_type_to_string(i->type)); free_interval(&i); return NULL; } return i; }
/* * Level 14 (stat clock) interrupts from processor counter. */ int statintr_4m(void *cap) { struct clockframe *frame = cap; u_long newint; kpreempt_disable(); /* read the limit register to clear the interrupt */ *((volatile int *)&counterreg4m->t_limit); statclock(frame); /* * Compute new randomized interval. */ newint = new_interval(); /* * Use the `non-resetting' limit register, so we don't * loose the counter ticks that happened since this * interrupt was raised. */ counterreg4m->t_limit_nr = tmr_ustolim4m(newint); /* * The factor 8 is only valid for stathz==100. * See also clock.c */ if ((++cpuinfo.ci_schedstate.spc_schedticks & 7) == 0 && schedhz != 0) { if (CLKF_LOPRI(frame, IPL_SCHED)) { /* No need to schedule a soft interrupt */ spllowerschedclock(); schedintr(cap); } else { /* * We're interrupting a thread that may have the * scheduler lock; run schedintr() on this CPU later. */ raise_ipi(&cpuinfo, IPL_SCHED); /* sched_cookie->pil */ } } kpreempt_enable(); return (1); }
/* * Level 14 (stat clock) interrupts from processor counter. */ int statintr_4(void *cap) { struct clockframe *frame = cap; u_long newint; /* read the limit register to clear the interrupt */ *((volatile int *)&timerreg4->t_c14.t_limit); statclock(frame); /* * Compute new randomized interval. */ newint = new_interval(); /* * The sun4/4c timer has no `non-resetting' register; * use the current counter value to compensate the new * limit value for the number of counter ticks elapsed. */ newint -= tmr_cnttous(timerreg4->t_c14.t_counter); timerreg4->t_c14.t_limit = tmr_ustolim(newint); /* * The factor 8 is only valid for stathz==100. * See also clock.c */ if (curlwp && (++cpuinfo.ci_schedstate.spc_schedticks & 7) == 0) { if (CLKF_LOPRI(frame, IPL_SCHED)) { /* No need to schedule a soft interrupt */ spllowerschedclock(); schedintr(cap); } else { /* * We're interrupting a thread that may have the * scheduler lock; run schedintr() later. */ sparc_softintr_schedule(sched_cookie); } } return (1); }