/* * Start the real-time and statistics clocks. Leave stathz 0 since there * are no other timers available. */ void cp0_startclock(struct cpu_info *ci) { int s; #ifdef MULTIPROCESSOR if (!CPU_IS_PRIMARY(ci)) { s = splhigh(); nanouptime(&ci->ci_schedstate.spc_runtime); splx(s); /* try to avoid getting clock interrupts early */ cp0_set_compare(cp0_get_count() - 1); cp0_calibrate(ci); } #endif /* Start the clock. */ s = splclock(); ci->ci_cpu_counter_interval = (ci->ci_hw.clock / CP0_CYCLE_DIVIDER) / hz; ci->ci_cpu_counter_last = cp0_get_count() + ci->ci_cpu_counter_interval; cp0_set_compare(ci->ci_cpu_counter_last); ci->ci_clock_started++; splx(s); }
/* * Interrupt handler for targets using the internal count register * as interval clock. Normally the system is run with the clock * interrupt always enabled. Masking is done here and if the clock * can not be run the tick is just counted and handled later when * the clock is unmasked again. */ intrmask_t clock_int5( intrmask_t mask, struct trap_frame *tf) { u_int32_t clkdiff; /* * If clock is started count the tick, else just arm for a new. */ if (clock_started && cpu_counter_interval != 0) { clkdiff = cp0_get_count() - cpu_counter_last; while (clkdiff >= cpu_counter_interval) { cpu_counter_last += cpu_counter_interval; clkdiff = cp0_get_count() - cpu_counter_last; pendingticks++; } cpu_counter_last += cpu_counter_interval; pendingticks++; } else { cpu_counter_last = cpu_counter_interval + cp0_get_count(); } cp0_set_compare(cpu_counter_last); if ((tf->cpl & SPL_CLOCKMASK) == 0) { while (pendingticks) { hardclock(tf); pendingticks--; } } return CR_INT_5; /* Clock is always on 5 */ }
/* * Clock interrupt code for machines using the on cpu chip * counter register. This register counts at half the pipeline * frequency so the frequency must be known and the options * register wired to allow it's use. * * The code is enabled by setting 'cpu_counter_interval'. */ void clock_int5_init(struct clock_softc *sc) { int s; s = splclock(); cpu_counter_interval = sys_config.cpu[0].clock / (hz * 2); cpu_counter_last = cp0_get_count() + cpu_counter_interval * 4; cp0_set_compare(cpu_counter_last); splx(s); }
void clockattach(struct device *parent, struct device *self, void *aux) { printf(": int 5\n"); /* * We need to register the interrupt now, for idle_mask to * be computed correctly. */ set_intr(INTPRI_CLOCK, CR_INT_5, cp0_int5); evcount_attach(&cp0_clock_count, "clock", &cp0_clock_irq); /* try to avoid getting clock interrupts early */ cp0_set_compare(cp0_get_count() - 1); md_startclock = cp0_startclock; }
/* * Dummy count register interrupt handler used on some targets. * Just resets the compare register and acknowledge the interrupt. */ intrmask_t clock_int5_dummy(intrmask_t mask, struct trap_frame *tf) { cp0_set_compare(0); /* Shut up counter int's for a while */ return CR_INT_5; /* Clock is always on 5 */ }
/* * Interrupt handler for targets using the internal count register * as interval clock. Normally the system is run with the clock * interrupt always enabled. Masking is done here and if the clock * can not be run the tick is just counted and handled later when * the clock is logically unmasked again. */ uint32_t cp0_int5(uint32_t mask, struct trapframe *tf) { u_int32_t clkdiff; struct cpu_info *ci = curcpu(); /* * If we got an interrupt before we got ready to process it, * retrigger it as far as possible. cpu_initclocks() will * take care of retriggering it correctly. */ if (ci->ci_clock_started == 0) { cp0_set_compare(cp0_get_count() - 1); return CR_INT_5; } /* * Count how many ticks have passed since the last clock interrupt... */ clkdiff = cp0_get_count() - ci->ci_cpu_counter_last; while (clkdiff >= ci->ci_cpu_counter_interval) { ci->ci_cpu_counter_last += ci->ci_cpu_counter_interval; clkdiff = cp0_get_count() - ci->ci_cpu_counter_last; ci->ci_pendingticks++; } ci->ci_pendingticks++; ci->ci_cpu_counter_last += ci->ci_cpu_counter_interval; /* * Set up next tick, and check if it has just been hit; in this * case count it and schedule one tick ahead. */ cp0_set_compare(ci->ci_cpu_counter_last); clkdiff = cp0_get_count() - ci->ci_cpu_counter_last; if ((int)clkdiff >= 0) { ci->ci_cpu_counter_last += ci->ci_cpu_counter_interval; ci->ci_pendingticks++; cp0_set_compare(ci->ci_cpu_counter_last); } /* * Process clock interrupt unless it is currently masked. */ if (tf->ipl < IPL_CLOCK) { #ifdef MULTIPROCESSOR register_t sr; sr = getsr(); ENABLEIPI(); #endif while (ci->ci_pendingticks) { cp0_clock_count.ec_count++; hardclock(tf); ci->ci_pendingticks--; } #ifdef MULTIPROCESSOR setsr(sr); #endif } return CR_INT_5; /* Clock is always on 5 */ }