boolean_t timer_call_cancel( timer_call_t call) { boolean_t result = TRUE; spl_t s; s = splclock(); simple_lock(&timer_call_lock); if (call->state == DELAYED) { queue_t queue = &PROCESSOR_DATA(current_processor(), timer_call_queue); if (queue_first(queue) == qe(call)) { _delayed_call_dequeue(call); if (!queue_empty(queue)) _set_delayed_call_timer((timer_call_t)queue_first(queue)); } else _delayed_call_dequeue(call); } else result = FALSE; simple_unlock(&timer_call_lock); splx(s); return (result); }
boolean_t timer_call_enter1( timer_call_t call, timer_call_param_t param1, uint64_t deadline) { boolean_t result = TRUE; queue_t queue; spl_t s; s = splclock(); simple_lock(&timer_call_lock); if (call->state == DELAYED) _delayed_call_dequeue(call); else result = FALSE; call->param1 = param1; call->deadline = deadline; queue = &PROCESSOR_DATA(current_processor(), timer_call_queue); _delayed_call_enqueue(queue, call); if (queue_first(queue) == qe(call)) _set_delayed_call_timer(call); simple_unlock(&timer_call_lock); splx(s); return (result); }
/* * Start the real-time and statistics clocks. Leave stathz 0 since there * are no other timers available. */ void cp0_startclock(struct cpu_info *ci) { int s; #ifdef MULTIPROCESSOR if (!CPU_IS_PRIMARY(ci)) { s = splhigh(); nanouptime(&ci->ci_schedstate.spc_runtime); splx(s); /* try to avoid getting clock interrupts early */ cp0_set_compare(cp0_get_count() - 1); cp0_calibrate(ci); } #endif /* Start the clock. */ s = splclock(); ci->ci_cpu_counter_interval = (ci->ci_hw.clock / CP0_CYCLE_DIVIDER) / hz; ci->ci_cpu_counter_last = cp0_get_count() + ci->ci_cpu_counter_interval; cp0_set_compare(ci->ci_cpu_counter_last); ci->ci_clock_started++; splx(s); }
int clock_intr(void *arg) { volatile struct timer_reg *timer; int whilecount = 0; if (!INTR_OCCURRED(NEXT_I_TIMER)) { return(0); } do { static int in_hardclock = 0; int s; timer = (volatile struct timer_reg *)IIOV(NEXT_P_TIMER); timer->csr |= TIMER_REG_UPDATE; if (! in_hardclock) { in_hardclock = 1; s = splclock (); hardclock(arg); splx(s); in_hardclock = 0; } if (whilecount++ > 10) panic ("whilecount"); } while (INTR_OCCURRED(NEXT_I_TIMER)); return(1); }
/* * does not implement security features of kern_time.c:settime() */ void afs_osi_SetTime(osi_timeval_t * atv) { #ifdef AFS_FBSD50_ENV printf("afs attempted to set clock; use \"afsd -nosettime\"\n"); #else struct timespec ts; struct timeval tv, delta; int s; AFS_GUNLOCK(); s = splclock(); microtime(&tv); delta = *atv; timevalsub(&delta, &tv); ts.tv_sec = atv->tv_sec; ts.tv_nsec = atv->tv_usec * 1000; set_timecounter(&ts); (void)splsoftclock(); lease_updatetime(delta.tv_sec); splx(s); resettodr(); AFS_GLOCK(); #endif }
/* * Return the best possible estimate of the time in the timeval * to which tvp points. We guarantee that the time will be greater * than the value obtained by a previous call. */ void microtime(struct timeval *tvp) { static struct timeval lasttime; u_int32_t clkdiff; int s = splclock(); *tvp = time; clkdiff = (cp0_get_count() - cpu_counter_last) * 1000; tvp->tv_usec += clkdiff / ticktime; while (tvp->tv_usec >= 1000000) { tvp->tv_sec++; tvp->tv_usec -= 1000000; } if (tvp->tv_sec == lasttime.tv_sec && tvp->tv_usec <= lasttime.tv_usec) { tvp->tv_usec++; if (tvp->tv_usec >= 1000000) { tvp->tv_sec++; tvp->tv_usec -= 1000000; } } lasttime = *tvp; splx(s); }
/* * Maskable IPIs. * * These IPIs are received as non maskable, but are not processed in * the NMI handler; instead, they are processed from the soft interrupt * handler. * * XXX This is grossly suboptimal. */ void m197_soft_ipi() { struct cpu_info *ci = curcpu(); struct trapframe faketf; int s; __mp_lock(&kernel_lock); s = splclock(); if (ci->ci_h_sxip != 0) { faketf.tf_cpu = ci; faketf.tf_sxip = ci->ci_h_sxip; faketf.tf_epsr = ci->ci_h_epsr; ci->ci_h_sxip = 0; hardclock((struct clockframe *)&faketf); } if (ci->ci_s_sxip != 0) { faketf.tf_cpu = ci; faketf.tf_sxip = ci->ci_s_sxip; faketf.tf_epsr = ci->ci_s_epsr; ci->ci_s_sxip = 0; statclock((struct clockframe *)&faketf); } splx(s); __mp_unlock(&kernel_lock); }
/* This function is used by clock_settime and settimeofday */ static int settime1(struct proc *p, const struct timespec *ts, bool check_kauth) { struct timespec delta, now; int s; /* WHAT DO WE DO ABOUT PENDING REAL-TIME TIMEOUTS??? */ s = splclock(); nanotime(&now); timespecsub(ts, &now, &delta); if (check_kauth && kauth_authorize_system(kauth_cred_get(), KAUTH_SYSTEM_TIME, KAUTH_REQ_SYSTEM_TIME_SYSTEM, __UNCONST(ts), &delta, KAUTH_ARG(check_kauth ? false : true)) != 0) { splx(s); return (EPERM); } #ifdef notyet if ((delta.tv_sec < 86400) && securelevel > 0) { /* XXX elad - notyet */ splx(s); return (EPERM); } #endif tc_setclock(ts); timespecadd(&boottime, &delta, &boottime); resettodr(); splx(s); return (0); }
void parintr(void *arg) { int s, mask; mask = (int)arg; s = splclock(); intio_set_sicilian_intr(intio_get_sicilian_intr() & ~SICILIAN_INTR_PAR); #ifdef DEBUG if (pardebug & PDB_INTERRUPT) printf("parintr %d(%s)\n", mask, mask ? "FLG" : "tout"); #endif /* if invoked from timeout handler, mask will be 0, * if from interrupt, it will contain the cia-icr mask, * which is != 0 */ if (mask) { if (partimeout_pending) callout_stop(&intr_callout); if (parsend_pending) parsend_pending = 0; } /* either way, there won't be a timeout pending any longer */ partimeout_pending = 0; wakeup(parintr); splx(s); }
/* * Determine mass storage and memory configuration for a machine. * We get the PROM's root device and make sure we understand it, then * attach it as `mainbus0'. We also set up to handle the PROM `sync' * command. */ void cpu_configure(void) { if (CPU_ISSUN4V) mdesc_init(); bool userconf = (boothowto & RB_USERCONF) != 0; /* fetch boot device settings */ get_bootpath_from_prom(); if (((boothowto & RB_USERCONF) != 0) && !userconf) /* * Old bootloaders do not pass boothowto, and MI code * has already handled userconfig before we get here * and finally fetch the right options. So if we missed * it, just do it here. */ userconf_prompt(); /* block clock interrupts and anything below */ splclock(); /* Enable device interrupts */ setpstate(getpstate()|PSTATE_IE); if (config_rootfound("mainbus", NULL) == NULL) panic("mainbus not configured"); /* Enable device interrupts */ setpstate(getpstate()|PSTATE_IE); (void)spl0(); }
/* * wdt_set_timeout * * Load the watchdog timer with the specified number of seconds. * Clamp seconds to be in the interval [2; 1800]. */ int wdt_set_timeout(void *self, int seconds) { struct wdt_softc *wdt = (struct wdt_softc *)self; u_int16_t v; int s; s = splclock(); wdt_timer_disable(wdt); if (seconds == 0) { splx(s); return (0); } else if (seconds < 2) seconds = 2; else if (seconds > 1800) seconds = 1800; /* 8254 has been programmed with a 2ms period */ v = (u_int16_t)seconds * 50; /* load the new timeout count */ wdt_8254_count(wdt, WDT_8254_TC_HI, v); /* enable the timer */ bus_space_write_1(wdt->sc_iot, wdt->sc_ioh, WDT_ENABLE_TIMER, 0); splx(s); return (seconds); }
int mcclock_getsecs(device_t dev, int *secp) { int timeout = 100000000; int sec; int s; s = splclock(); for (;;) { if (!(MCCLOCK_READ(dev, MC_REGA) & MC_REGA_UIP)) { sec = MCCLOCK_READ(dev, MC_SEC); break; } if (--timeout == 0) goto fail; } splx(s); *secp = sec; return 0; fail: splx(s); return ETIMEDOUT; }
void amptimer_setstatclockrate(int newhz) { struct amptimer_softc *sc = amptimer_cd.cd_devs[0]; int minint, statint; int s; s = splclock(); statint = sc->sc_ticks_per_second / newhz; /* calculate largest 2^n which is smaller that just over half statint */ sc->sc_statvar = 0x40000000; /* really big power of two */ minint = statint / 2 + 100; while (sc->sc_statvar > minint) sc->sc_statvar >>= 1; sc->sc_statmin = statint - (sc->sc_statvar >> 1); splx(s); /* * XXX this allows the next stat timer to occur then it switches * to the new frequency. Rather than switching instantly. */ }
/* * Reset the TODR based on the time value. */ int mcclock_set(todr_chip_handle_t tch, struct timeval *tvp) { struct mcclock_softc *sc = tch->cookie; struct clock_ymdhms dt; uint32_t yearsecs; mc_todregs regs; int s; /* * calculate seconds relative to this year */ clock_secs_to_ymdhms(tvp->tv_sec, &dt); /* get the year */ dt.dt_mon = 1; dt.dt_day = 1; dt.dt_hour = 0; dt.dt_min = 0; dt.dt_sec = 0; yearsecs = tvp->tv_sec - clock_ymdhms_to_secs(&dt); #define first72 ((72 - 70) * SECYR) clock_secs_to_ymdhms(first72 + yearsecs, &dt); #ifdef DEBUG if (dt.dt_year != 1972) printf("resettodr: botch (%d, %" PRId64 ")\n", yearsecs, time_second); #endif s = splclock(); MC146818_GETTOD(sc, ®s); splx(s); regs[MC_SEC] = dt.dt_sec; regs[MC_MIN] = dt.dt_min; regs[MC_HOUR] = dt.dt_hour; regs[MC_DOW] = dt.dt_wday; regs[MC_DOM] = dt.dt_day; regs[MC_MONTH] = dt.dt_mon; regs[MC_YEAR] = dt.dt_year - 1900; /* rt clock wants 2 digits */ s = splclock(); MC146818_PUTTOD(sc, ®s); splx(s); return 0; }
/* * clock_set_calendar_microtime: * * Sets the current calendar value by * recalculating the epoch and offset * from the system clock. * * Also adjusts the boottime to keep the * value consistent, writes the new * calendar value to the platform clock, * and sends calendar change notifications. */ void clock_set_calendar_microtime( clock_sec_t secs, clock_usec_t microsecs) { clock_sec_t sys; clock_usec_t microsys; clock_sec_t newsecs; spl_t s; newsecs = (microsecs < 500*USEC_PER_SEC)? secs: secs + 1; s = splclock(); clock_lock(); commpage_disable_timestamp(); /* * Calculate the new calendar epoch based on * the new value and the system clock. */ clock_get_system_microtime(&sys, µsys); TIME_SUB(secs, sys, microsecs, microsys, USEC_PER_SEC); /* * Adjust the boottime based on the delta. */ clock_boottime += secs - clock_calend.epoch; /* * Set the new calendar epoch. */ clock_calend.epoch = secs; nanoseconds_to_absolutetime((uint64_t)microsecs * NSEC_PER_USEC, &clock_calend.offset); /* * Cancel any adjustment in progress. */ calend_adjtotal = clock_calend.adjdelta = 0; clock_unlock(); /* * Set the new value for the platform clock. */ PESetGMTTimeOfDay(newsecs); splx(s); /* * Send host notifications. */ host_notify_calendar_change(); #if CONFIG_DTRACE clock_track_calend_nowait(); #endif }
/* * rtc_nanotime_init_commpage: * * Call back from the commpage initialization to * cause the commpage data to be filled in once the * commpages have been created. */ void rtc_nanotime_init_commpage(void) { spl_t s = splclock(); rtc_nanotime_set_commpage(&pal_rtc_nanotime_info); splx(s); }
int parsendch (u_char ch) { int error = 0; int s; /* if either offline, busy or out of paper, wait for that condition to clear */ s = splclock(); while (!error && (parsend_pending || ((ciab.pra ^ CIAB_PRA_SEL) & (CIAB_PRA_SEL|CIAB_PRA_BUSY|CIAB_PRA_POUT)))) { extern int hz; #ifdef DEBUG if (pardebug & PDB_INTERRUPT) printf ("parsendch, port = $%x\n", ((ciab.pra ^ CIAB_PRA_SEL) & (CIAB_PRA_SEL|CIAB_PRA_BUSY|CIAB_PRA_POUT))); #endif /* this is essentially a flipflop to have us wait for the first character being transmitted when trying to transmit the second, etc. */ parsend_pending = 0; /* it's quite important that a parallel putc can be interrupted, given the possibility to lock a printer in an offline condition.. */ error = tsleep(parintr, PCATCH | (PZERO - 1), "parsendch", hz); if (error == EWOULDBLOCK) error = 0; if (error > 0) { #ifdef DEBUG if (pardebug & PDB_INTERRUPT) printf ("parsendch interrupted, error = %d\n", error); #endif } } if (! error) { #ifdef DEBUG if (pardebug & PDB_INTERRUPT) printf ("#%d", ch); #endif ciaa.prb = ch; parsend_pending = 1; } splx (s); return error; }
/* * Clock interrupt code for machines using the on cpu chip * counter register. This register counts at half the pipeline * frequency so the frequency must be known and the options * register wired to allow it's use. * * The code is enabled by setting 'cpu_counter_interval'. */ void clock_int5_init(struct clock_softc *sc) { int s; s = splclock(); cpu_counter_interval = sys_config.cpu[0].clock / (hz * 2); cpu_counter_last = cp0_get_count() + cpu_counter_interval * 4; cp0_set_compare(cpu_counter_last); splx(s); }
static void cmos_fetch(void) { int i, s; uint8_t *p; p = cmos_buf; s = splclock(); for (i = 0; i < CMOS_SIZE; i++) *p++ = mc146818_read(NULL, i); splx(s); }
void timer_queue_shutdown( mpqueue_head_t *queue) { timer_call_t call; mpqueue_head_t *new_queue; spl_t s; DBG("timer_queue_shutdown(%p)\n", queue); s = splclock(); /* Note comma operator in while expression re-locking each iteration */ while (timer_queue_lock_spin(queue), !queue_empty(&queue->head)) { call = TIMER_CALL(queue_first(&queue->head)); if (!simple_lock_try(&call->lock)) { /* * case (2b) lock order inversion, dequeue and skip * Don't change the call_entry queue back-pointer * but set the async_dequeue field. */ timer_queue_shutdown_lock_skips++; timer_call_entry_dequeue_async(call); #if TIMER_ASSERT TIMER_KDEBUG_TRACE(KDEBUG_TRACE, DECR_TIMER_ASYNC_DEQ | DBG_FUNC_NONE, call, call->async_dequeue, CE(call)->queue, 0x2b, 0); #endif timer_queue_unlock(queue); continue; } /* remove entry from old queue */ timer_call_entry_dequeue(call); timer_queue_unlock(queue); /* and queue it on new */ new_queue = timer_queue_assign(CE(call)->deadline); timer_queue_lock_spin(new_queue); timer_call_entry_enqueue_deadline( call, new_queue, CE(call)->deadline); timer_queue_unlock(new_queue); simple_unlock(&call->lock); } timer_queue_unlock(queue); splx(s); }
/* * Reset the TODR based on the time value. */ void mcclock_set(device_t dev, struct clocktime *ct) { mc_todregs regs; int s; s = splclock(); MC146818_GETTOD(dev, ®s); splx(s); regs[MC_SEC] = ct->sec; regs[MC_MIN] = ct->min; regs[MC_HOUR] = ct->hour; regs[MC_DOW] = ct->dow; regs[MC_DOM] = ct->day; regs[MC_MONTH] = ct->mon; regs[MC_YEAR] = ct->year; s = splclock(); MC146818_PUTTOD(dev, ®s); splx(s); }
void zsclock_attach(struct device *parent, struct device *self, void *aux) { struct zsc_softc *zsc = (void *)parent; struct zsclock_softc *sc = (void *)self; struct zsc_attach_args *args = aux; struct zs_chanstate *cs; int channel; int reset, s, tconst; channel = args->channel; cs = &zsc->zsc_cs[channel]; cs->cs_private = zsc; cs->cs_ops = &zsops_clock; sc->zsc_cs = cs; printf("\n"); hz = 100; tconst = ((PCLK / 2) / hz) - 2; s = splclock(); reset = (channel == 0) ? ZSWR9_A_RESET : ZSWR9_B_RESET; zs_write_reg(cs, 9, reset); cs->cs_preg[1] = 0; cs->cs_preg[3] = ZSWR3_RX_8 | ZSWR3_RX_ENABLE; cs->cs_preg[4] = ZSWR4_CLK_X1 | ZSWR4_ONESB | ZSWR4_PARENB; cs->cs_preg[5] = ZSWR5_TX_8 | ZSWR5_TX_ENABLE; cs->cs_preg[9] = ZSWR9_MASTER_IE; cs->cs_preg[10] = 0; cs->cs_preg[11] = ZSWR11_RXCLK_RTXC | ZSWR11_TXCLK_RTXC | ZSWR11_TRXC_OUT_ENA | ZSWR11_TRXC_BAUD; cs->cs_preg[12] = tconst; cs->cs_preg[13] = tconst >> 8; cs->cs_preg[14] = ZSWR14_BAUD_FROM_PCLK | ZSWR14_BAUD_ENA; cs->cs_preg[15] = ZSWR15_ZERO_COUNT_IE; zs_loadchannelregs(cs); splx(s); /* enable interrupts */ cs->cs_preg[1] |= ZSWR1_SIE; zs_write_reg(cs, 1, cs->cs_preg[1]); zsclock_attached = 1; }
/* * clock_initialize_calendar: * * Set the calendar and related clocks * from the platform clock at boot or * wake event. * * Also sends host notifications. */ void clock_initialize_calendar(void) { clock_sec_t sys, secs = PEGetGMTTimeOfDay(); clock_usec_t microsys, microsecs = 0; spl_t s; s = splclock(); clock_lock(); commpage_disable_timestamp(); if ((long)secs >= (long)clock_boottime) { /* * Initialize the boot time based on the platform clock. */ if (clock_boottime == 0) clock_boottime = secs; /* * Calculate the new calendar epoch based on * the platform clock and the system clock. */ clock_get_system_microtime(&sys, µsys); TIME_SUB(secs, sys, microsecs, microsys, USEC_PER_SEC); /* * Set the new calendar epoch. */ clock_calend.epoch = secs; nanoseconds_to_absolutetime((uint64_t)microsecs * NSEC_PER_USEC, &clock_calend.offset); /* * Cancel any adjustment in progress. */ calend_adjtotal = clock_calend.adjdelta = 0; } clock_unlock(); splx(s); /* * Send host notifications. */ host_notify_calendar_change(); #if CONFIG_DTRACE clock_track_calend_nowait(); #endif }
int pwdog_set_timeout(void *self, int seconds) { struct pwdog_softc *pwdog = (struct pwdog_softc *)self; int s; s = splclock(); if (seconds) bus_space_write_1(pwdog->iot, pwdog->ioh, PWDOG_ACTIVATE, 0); else bus_space_write_1(pwdog->iot, pwdog->ioh, PWDOG_DISABLE, 0); splx(s); return seconds; }
/* * Reset the TODR based on the time value. */ void mcclock_set(struct device *dev, struct clocktime *ct) { struct mcclock_softc *sc = (struct mcclock_softc *)dev; mc_todregs regs; int s; s = splclock(); MC146818_GETTOD(sc, ®s); splx(s); regs[MC_SEC] = ct->sec; regs[MC_MIN] = ct->min; regs[MC_HOUR] = ct->hour; regs[MC_DOW] = ct->dow; regs[MC_DOM] = ct->day; regs[MC_MONTH] = ct->mon; regs[MC_YEAR] = ct->year + ALGOR_YEAR_OFFSET; s = splclock(); MC146818_PUTTOD(sc, ®s); splx(s); }
/* * Emit tone of frequency thz for given number of centisecs */ static void tone(unsigned int thz, unsigned int centisecs) { int sps, timo; if (thz <= 0) return; #ifdef DEBUG (void) printf("tone: thz=%d centisecs=%d\n", thz, centisecs); #endif /* DEBUG */ /* set timer to generate clicks at given frequency in Hertz */ sps = splclock(); if (timer_spkr_acquire()) { /* enter list of waiting procs ??? */ splx(sps); return; } splx(sps); disable_intr(); timer_spkr_setfreq(thz); enable_intr(); /* * Set timeout to endtone function, then give up the timeslice. * This is so other processes can execute while the tone is being * emitted. */ timo = centisecs * hz / 100; if (timo > 0) tsleep(&endtone, SPKRPRI | PCATCH, "spkrtn", timo); sps = splclock(); timer_spkr_release(); splx(sps); }
static boolean_t timer_call_enter_internal( timer_call_t call, timer_call_param_t param1, uint64_t deadline, uint32_t flags) { mpqueue_head_t *queue; mpqueue_head_t *old_queue; spl_t s; uint64_t slop = 0; s = splclock(); call->soft_deadline = deadline; call->flags = flags; if ((flags & TIMER_CALL_CRITICAL) == 0 && mach_timer_coalescing_enabled) { slop = timer_call_slop(deadline); deadline += slop; } #if defined(__i386__) || defined(__x86_64__) uint64_t ctime = mach_absolute_time(); if (__improbable(deadline < ctime)) { uint64_t delta = (ctime - deadline); past_deadline_timers++; past_deadline_deltas += delta; if (delta > past_deadline_longest) past_deadline_longest = deadline; if (delta < past_deadline_shortest) past_deadline_shortest = delta; deadline = ctime + past_deadline_timer_adjustment; call->soft_deadline = deadline; } #endif queue = timer_queue_assign(deadline); old_queue = timer_call_enqueue_deadline_unlocked(call, queue, deadline); CE(call)->param1 = param1; splx(s); return (old_queue != NULL); }
void timer_call_initialize(void) { spl_t s; simple_lock_init(&timer_call_lock, 0); s = splclock(); simple_lock(&timer_call_lock); clock_set_timer_func((clock_timer_func_t)timer_call_interrupt); simple_unlock(&timer_call_lock); splx(s); }
/* * Adjust the Universal (Posix) time gradually. */ kern_return_t host_adjust_time( host_t host, time_value_t newadj, time_value_t *oldadj) /* OUT */ { time_value_t oadj; integer_t ndelta; spl_t s; if (host == HOST_NULL) return (KERN_INVALID_HOST); ndelta = (newadj.seconds * 1000000) + newadj.microseconds; #if NCPUS > 1 thread_bind(current_thread(), master_processor); mp_disable_preemption(); if (current_processor() != master_processor) { mp_enable_preemption(); thread_block((void (*)(void)) 0); } else { mp_enable_preemption(); } #endif /* NCPUS > 1 */ s = splclock(); oadj.seconds = timedelta / 1000000; oadj.microseconds = timedelta % 1000000; if (timedelta == 0) { if (ndelta > bigadj) tickdelta = 10 * tickadj; else tickdelta = tickadj; } if (ndelta % tickdelta) ndelta = ndelta / tickdelta * tickdelta; timedelta = ndelta; splx(s); #if NCPUS > 1 thread_bind(current_thread(), PROCESSOR_NULL); #endif /* NCPUS > 1 */ *oldadj = oadj; return (KERN_SUCCESS); }
/* * Set the clock deadline. */ void etimer_set_deadline(uint64_t deadline) { rtclock_timer_t *mytimer; spl_t s; struct per_proc_info *pp; s = splclock(); /* no interruptions */ pp = getPerProc(); mytimer = &pp->rtclock_timer; /* Point to the timer itself */ mytimer->deadline = deadline; /* Set the new expiration time */ etimer_resync_deadlines(); splx(s); }