void thread_read_times( thread_t thread, time_value_t *user_time, time_value_t *system_time) { clock_sec_t secs; clock_usec_t usecs; uint64_t tval_user, tval_system; tval_user = timer_grab(&thread->user_timer); tval_system = timer_grab(&thread->system_timer); if (thread->precise_user_kernel_time) { absolutetime_to_microtime(tval_user, &secs, &usecs); user_time->seconds = (typeof(user_time->seconds))secs; user_time->microseconds = usecs; absolutetime_to_microtime(tval_system, &secs, &usecs); system_time->seconds = (typeof(system_time->seconds))secs; system_time->microseconds = usecs; } else { /* system_timer may represent either sys or user */ tval_user += tval_system; absolutetime_to_microtime(tval_user, &secs, &usecs); user_time->seconds = (typeof(user_time->seconds))secs; user_time->microseconds = usecs; system_time->seconds = 0; system_time->microseconds = 0; } }
void thread_read_times( thread_t thread, time_value_t *user_time, time_value_t *system_time) { absolutetime_to_microtime(timer_grab(&thread->user_timer), (unsigned *)&user_time->seconds, (unsigned *)&user_time->microseconds); absolutetime_to_microtime(timer_grab(&thread->system_timer), (unsigned *)&system_time->seconds, (unsigned *)&system_time->microseconds); }
void thread_read_times( thread_t thread, time_value_t *user_time, time_value_t *system_time) { clock_sec_t secs; clock_usec_t usecs; absolutetime_to_microtime(timer_grab(&thread->user_timer), &secs, &usecs); user_time->seconds = (typeof(user_time->seconds))secs; user_time->microseconds = usecs; absolutetime_to_microtime(timer_grab(&thread->system_timer), &secs, &usecs); system_time->seconds = (typeof(system_time->seconds))secs; system_time->microseconds = usecs; }
/* * clock_get_calendar_nanotime_nowait * * Description: Non-blocking version of clock_get_calendar_nanotime() * * Notes: This function operates by separately tracking calendar time * updates using a two element structure to copy the calendar * state, which may be asynchronously modified. It utilizes * barrier instructions in the tracking process and in the local * stable snapshot process in order to ensure that a consistent * snapshot is used to perform the calculation. */ void clock_get_calendar_nanotime_nowait( clock_sec_t *secs, clock_nsec_t *nanosecs) { int i = 0; uint64_t now; struct unlocked_clock_calend stable; for (;;) { stable = flipflop[i]; /* take snapshot */ /* * Use a barrier instructions to ensure atomicity. We AND * off the "in progress" bit to get the current generation * count. */ (void)hw_atomic_and(&stable.gen, ~(uint32_t)1); /* * If an update _is_ in progress, the generation count will be * off by one, if it _was_ in progress, it will be off by two, * and if we caught it at a good time, it will be equal (and * our snapshot is threfore stable). */ if (flipflop[i].gen == stable.gen) break; /* Switch to the oher element of the flipflop, and try again. */ i ^= 1; } now = mach_absolute_time(); if (stable.calend.adjdelta < 0) { uint32_t t32; if (now > stable.calend.adjstart) { t32 = (uint32_t)(now - stable.calend.adjstart); if (t32 > stable.calend.adjoffset) now -= stable.calend.adjoffset; else now = stable.calend.adjstart; } } now += stable.calend.offset; absolutetime_to_microtime(now, secs, nanosecs); *nanosecs *= NSEC_PER_USEC; *secs += (clock_sec_t)stable.calend.epoch; }
/* * clock_get_calendar_microtime: * * Returns the current calendar value, * microseconds as the fraction. */ void clock_get_calendar_microtime( clock_sec_t *secs, clock_usec_t *microsecs) { uint64_t now; spl_t s; s = splclock(); clock_lock(); now = mach_absolute_time(); if (clock_calend.adjdelta < 0) { uint32_t t32; /* * Since offset is decremented during a negative adjustment, * ensure that time increases monotonically without going * temporarily backwards. * If the delta has not yet passed, now is set to the start * of the current adjustment period; otherwise, we're between * the expiry of the delta and the next call to calend_adjust(), * and we offset accordingly. */ if (now > clock_calend.adjstart) { t32 = (uint32_t)(now - clock_calend.adjstart); if (t32 > clock_calend.adjoffset) now -= clock_calend.adjoffset; else now = clock_calend.adjstart; } } now += clock_calend.offset; absolutetime_to_microtime(now, secs, microsecs); *secs += (clock_sec_t)clock_calend.epoch; clock_unlock(); splx(s); }
/* * clock_gettimeofday: * * Kernel interface for commpage implementation of * gettimeofday() syscall. * * Returns the current calendar value, and updates the * commpage info as appropriate. Because most calls to * gettimeofday() are handled in user mode by the commpage, * this routine should be used infrequently. */ void clock_gettimeofday( clock_sec_t *secs, clock_usec_t *microsecs) { uint64_t now; spl_t s; s = splclock(); clock_lock(); now = mach_absolute_time(); if (clock_calend.adjdelta >= 0) { clock_gettimeofday_set_commpage(now, clock_calend.epoch, clock_calend.offset, secs, microsecs); } else { uint32_t t32; if (now > clock_calend.adjstart) { t32 = (uint32_t)(now - clock_calend.adjstart); if (t32 > clock_calend.adjoffset) now -= clock_calend.adjoffset; else now = clock_calend.adjstart; } now += clock_calend.offset; absolutetime_to_microtime(now, secs, microsecs); *secs += (clock_sec_t)clock_calend.epoch; } clock_unlock(); splx(s); }
/* * clock_get_calendar_nanotime: * * Returns the current calendar value, * nanoseconds as the fraction. * * Since we do not have an interface to * set the calendar with resolution greater * than a microsecond, we honor that here. */ void clock_get_calendar_nanotime( clock_sec_t *secs, clock_nsec_t *nanosecs) { uint64_t now; spl_t s; s = splclock(); clock_lock(); now = mach_absolute_time(); if (clock_calend.adjdelta < 0) { uint32_t t32; if (now > clock_calend.adjstart) { t32 = (uint32_t)(now - clock_calend.adjstart); if (t32 > clock_calend.adjoffset) now -= clock_calend.adjoffset; else now = clock_calend.adjstart; } } now += clock_calend.offset; absolutetime_to_microtime(now, secs, nanosecs); *nanosecs *= NSEC_PER_USEC; *secs += (clock_sec_t)clock_calend.epoch; clock_unlock(); splx(s); }