void HighPrecisionClock::runLoop() { if (!(MachThreadSelf("MWorks High Precision Clock").setRealtime(period, computation, period))) { merror(M_SCHEDULER_MESSAGE_DOMAIN, "HighPrecisionClock failed to achieve real time scheduling"); } uint64_t startTime = mach_absolute_time(); while (true) { { lock_guard lock(waitsMutex); while (!waits.empty() && waits.top().getExpirationTime() <= startTime) { logMachError("semaphore_signal", semaphore_signal(waits.top().getSemaphore())); waits.pop(); } } // Give another thread a chance to terminate this one boost::this_thread::interruption_point(); // Sleep until the next work cycle startTime += period; if (mach_absolute_time() < startTime) { logMachError("mach_wait_until", mach_wait_until(startTime)); } } }
void HighPrecisionClock::wait(uint64_t expirationTime) { semaphore_t *sem = threadSpecificSemaphore.get(); if (!sem) { semaphore_t newSem; if (logMachError("semaphore_create", semaphore_create(mach_task_self(), &newSem, SYNC_POLICY_FIFO, 0)) || (threadSpecificSemaphore.reset(new semaphore_t(newSem)), false) || // NOTE: This final test looks unnecessary, but omitting it leads to server crashes when using // Boost 1.60.0 !(sem = threadSpecificSemaphore.get())) { // If we can't create the semaphore, do a low-precision wait with mach_wait_until, and hope // that semaphore_create will work next time if (0 == expirationTime) { expirationTime = mach_absolute_time() + period; } logMachError("mach_wait_until", mach_wait_until(expirationTime)); return; } } { lock_guard lock(waitsMutex); waits.push(WaitInfo(expirationTime, *sem)); } logMachError("semaphore_wait", semaphore_wait(*sem)); }
/* the core sleep function, uses floats and is used in MPlayer G2 */ float sleep_accurate(float time_frame) { uint64_t deadline = time_frame / timebase_ratio + mach_absolute_time(); mach_wait_until(deadline); return (mach_absolute_time() - deadline) * timebase_ratio; }
void StandardClock::sleepNS(MWTime time){ #ifdef USE_MACH_MOJO uint64_t now = mach_absolute_time(); //mach_wait_until(now + (uint64_t)((long double)time / (long double)cv)); mach_wait_until(now + (uint64_t)time * tTBI.denom / tTBI.numer); #else long seconds = 0; long nano = 0; if((time - (MWTime)1000000000)){ lldiv_t div = lldiv(time, (MWTime)1000000000); seconds = (long)(div.quot); nano = (long)(div.rem); } else { // mprintf("not bigger %lld", time - (MWTime)1000000000); nano = (long)time; } // mprintf("nanosleeping for %d seconds, %d ns", seconds, nano); struct timespec time_to_sleep; time_to_sleep.tv_sec = seconds; time_to_sleep.tv_nsec = nano; // check every 300 usec int result = nanosleep(&time_to_sleep, NULL); if(result < 0){ char *resultstring = "Unknown error"; switch(errno){ case EINTR: resultstring = "Interrumpted by signal"; break; case EINVAL: resultstring = "Invalid time value"; break; case ENOSYS: resultstring = "Not supported"; break; case EFAULT: resultstring = "Invalid address"; break; } mprintf("Clock sleep error %d: %s (%d seconds, %d nano)", errno, resultstring, (long)seconds, (long)nano); } #endif }
void PsychWaitIntervalSeconds(double delaySecs) { long double waitPeriodTicks; kern_return_t waitResult; uint64_t startTimeAbsTics, deadlineAbsTics; if (delaySecs <= 0) return; startTimeAbsTics = mach_absolute_time(); if(!isKernelTimebaseFrequencyHzInitialized) PsychGetKernelTimebaseFrequencyHz(); waitPeriodTicks= kernelTimebaseFrequencyHz * delaySecs; deadlineAbsTics= startTimeAbsTics + (uint64_t) waitPeriodTicks; while(mach_wait_until(deadlineAbsTics)); }
int clock_nanosleep(clockid_t clk_id, int flags, struct timespec *tp, struct timespec *remain) { if (clk_id != CLOCK_MONOTONIC || flags != TIMER_ABSTIME) return -1; static mach_timebase_info_data_t info = {0,0}; if (info.denom == 0) mach_timebase_info(&info); uint64_t clk = (tp->tv_sec*1e9 + tp->tv_nsec)/(info.numer/info.denom); mach_wait_until(clk); return 0; }
void PsychWaitUntilSeconds(double whenSecs) { kern_return_t waitResult; uint64_t deadlineAbsTics; // Compute deadline for wakeup in mach absolute time units: deadlineAbsTics= (uint64_t) (kernelTimebaseFrequencyHz * ((long double) whenSecs)); if (!(deadlineAbsTics > 0 && whenSecs > 0)) return; // Call mach_wait_unit in an endless loop, because it can fail with retcode>0. // In that case we just restart... while(mach_wait_until(deadlineAbsTics)); }
static void *Pt_CallbackProc(void *p) { pt_callback_parameters *parameters = (pt_callback_parameters *) p; int mytime = 1; kern_return_t error; thread_extended_policy_data_t extendedPolicy; thread_precedence_policy_data_t precedencePolicy; extendedPolicy.timeshare = 0; error = thread_policy_set(mach_thread_self(), THREAD_EXTENDED_POLICY, (thread_policy_t)&extendedPolicy, THREAD_EXTENDED_POLICY_COUNT); if (error != KERN_SUCCESS) { mach_error("Couldn't set thread timeshare policy", error); } precedencePolicy.importance = THREAD_IMPORTANCE; error = thread_policy_set(mach_thread_self(), THREAD_PRECEDENCE_POLICY, (thread_policy_t)&precedencePolicy, THREAD_PRECEDENCE_POLICY_COUNT); if (error != KERN_SUCCESS) { mach_error("Couldn't set thread precedence policy", error); } /* to kill a process, just increment the pt_callback_proc_id */ /* printf("pt_callback_proc_id %d, id %d\n", pt_callback_proc_id, parameters->id); */ while (pt_callback_proc_id == parameters->id) { /* wait for a multiple of resolution ms */ UInt64 wait_time; int delay = mytime++ * parameters->resolution - Pt_Time(); PtTimestamp timestamp; if (delay < 0) delay = 0; wait_time = AudioConvertNanosToHostTime((UInt64)delay * NSEC_PER_MSEC); wait_time += AudioGetCurrentHostTime(); error = mach_wait_until(wait_time); timestamp = Pt_Time(); (*(parameters->callback))(timestamp, parameters->userData); } free(parameters); return NULL; }
void* eal_timer_thread(void * data) { lm_timer_ctx_t *ctx = (lm_timer_ctx_t*)data; uint64_t factor = 1; uint64_t time_to_wait = 0; uint64_t now = 0; uint64_t interval = ctx->interval; void (*handler) (void*) = ctx->handler; volatile int64_t *quit_flag = ctx->quit_flag; void* context = ctx->context; /* clean data */ eal_timer_free_context(ctx); /* init factor of nano second */ eal_time_factor(&factor); /* from 100x nano-seconds to nano-seconds */ time_to_wait = interval * 100llu * factor; interval = time_to_wait; /* for-loop to work */ for(;;) { if(*quit_flag == 1) break; now = mach_absolute_time() + time_to_wait; mach_wait_until(now); /* call user function */ handler(context); time_to_wait = now + interval - mach_absolute_time(); if(time_to_wait > interval) time_to_wait = 0; } return 0; }
static void* TimerTask(void* unused) { // Initialization mach_timebase_info_data_t tbi; mach_timebase_info(&tbi); double invRatio = ((double)tbi.denom) / ((double)tbi.numer); double timeResNanos = gTimeResMilli * 1000000; // In nanosecond double nextDateNanos, curDateNanos = mach_absolute_time() / invRatio; while (1) { pthread_testcancel(); nextDateNanos = mach_absolute_time () / invRatio; while (curDateNanos < nextDateNanos) { long call = gTimeResMilli; while (call--) ClockHandler(gMem); curDateNanos += timeResNanos; } mach_wait_until(curDateNanos * invRatio); } return 0; }
int64_t sleepUntil(uint64_t kernelTargetTime, uint64_t kernelEarlyWakeup) { uint64_t now = mach_absolute_time(); int64_t jitter; if (now > kernelTargetTime) return 0; // Sleep // printf("Sleeping for %d\n", kernelTargetTime - now); mach_wait_until(kernelTargetTime - kernelEarlyWakeup); // Count some sheep to increase precision unsigned sheep = 0; do { jitter = mach_absolute_time() - kernelTargetTime; sheep++; } while (jitter < 0); return jitter; }
int main(int argc, char **argv) { uint64_t iterations, i; double *jitter_arr, *fraction_arr; double *wakeup_second_jitter_arr; uint64_t target_time; uint64_t sleep_length_abs; uint64_t min_sleep_ns = 0; uint64_t max_sleep_ns = DEFAULT_MAX_SLEEP_NS; uint64_t wake_time; unsigned random_seed; boolean_t need_seed = TRUE; char ch; int res; kern_return_t kret; my_policy_type_t pol; boolean_t wakeup_second_thread = FALSE; semaphore_t wakeup_semaphore, return_semaphore; double avg, stddev, max, min; double avg_fract, stddev_fract, max_fract, min_fract; uint64_t too_much; struct second_thread_args secargs; pthread_t secthread; mach_timebase_info(&g_mti); /* Seed random */ opterr = 0; while ((ch = getopt(argc, argv, "m:n:hs:w")) != -1 && ch != '?') { switch (ch) { case 's': /* Specified seed for random)() */ random_seed = (unsigned)atoi(optarg); srandom(random_seed); need_seed = FALSE; break; case 'm': /* How long per timer? */ max_sleep_ns = strtoull(optarg, NULL, 10); break; case 'n': /* How long per timer? */ min_sleep_ns = strtoull(optarg, NULL, 10); break; case 'w': /* After each timed wait, wakeup another thread */ wakeup_second_thread = TRUE; break; case 'h': print_usage(); exit(0); break; default: fprintf(stderr, "Got unexpected result from getopt().\n"); exit(1); break; } } argc -= optind; argv += optind; if (argc != 3) { print_usage(); exit(1); } if (min_sleep_ns >= max_sleep_ns) { print_usage(); exit(1); } if (need_seed) { srandom(time(NULL)); } /* What scheduling policy? */ pol = parse_thread_policy(argv[0]); /* How many timers? */ iterations = strtoull(argv[1], NULL, 10); /* How much jitter is so extreme that we should cut a trace point */ too_much = strtoull(argv[2], NULL, 10); /* Array for data */ jitter_arr = (double*)malloc(sizeof(*jitter_arr) * iterations); if (jitter_arr == NULL) { printf("Couldn't allocate array to store results.\n"); exit(1); } fraction_arr = (double*)malloc(sizeof(*fraction_arr) * iterations); if (fraction_arr == NULL) { printf("Couldn't allocate array to store results.\n"); exit(1); } if (wakeup_second_thread) { /* Array for data */ wakeup_second_jitter_arr = (double*)malloc(sizeof(*jitter_arr) * iterations); if (wakeup_second_jitter_arr == NULL) { printf("Couldn't allocate array to store results.\n"); exit(1); } kret = semaphore_create(mach_task_self(), &wakeup_semaphore, SYNC_POLICY_FIFO, 0); if (kret != KERN_SUCCESS) { printf("Couldn't allocate semaphore %d\n", kret); exit(1); } kret = semaphore_create(mach_task_self(), &return_semaphore, SYNC_POLICY_FIFO, 0); if (kret != KERN_SUCCESS) { printf("Couldn't allocate semaphore %d\n", kret); exit(1); } secargs.wakeup_semaphore = wakeup_semaphore; secargs.return_semaphore = return_semaphore; secargs.iterations = iterations; secargs.pol = pol; secargs.wakeup_second_jitter_arr = wakeup_second_jitter_arr; secargs.woke_on_same_cpu = 0; secargs.too_much = too_much; secargs.last_poke_time = 0ULL; secargs.cpuno = 0; res = pthread_create(§hread, NULL, second_thread, &secargs); if (res) { err(1, "pthread_create"); } sleep(1); /* Time for other thread to start up */ } /* Set scheduling policy */ res = thread_setup(pol); if (res != 0) { printf("Couldn't set thread policy.\n"); exit(1); } /* * Repeatedly pick a random timer length and * try to sleep exactly that long */ for (i = 0; i < iterations; i++) { sleep_length_abs = (uint64_t) (get_random_sleep_length_abs_ns(min_sleep_ns, max_sleep_ns) * (((double)g_mti.denom) / ((double)g_mti.numer))); target_time = mach_absolute_time() + sleep_length_abs; /* Sleep */ kret = mach_wait_until(target_time); wake_time = mach_absolute_time(); jitter_arr[i] = (double)(wake_time - target_time); fraction_arr[i] = jitter_arr[i] / ((double)sleep_length_abs); /* Too much: cut a tracepoint for a debugger */ if (jitter_arr[i] >= too_much) { kdebug_trace(0xeeeee0 | DBG_FUNC_NONE, 0, 0, 0, 0); } if (wakeup_second_thread) { secargs.last_poke_time = mach_absolute_time(); secargs.cpuno = cpu_number(); OSMemoryBarrier(); kret = semaphore_signal(wakeup_semaphore); if (kret != KERN_SUCCESS) { errx(1, "semaphore_signal"); } kret = semaphore_wait(return_semaphore); if (kret != KERN_SUCCESS) { errx(1, "semaphore_wait"); } } } /* * Compute statistics and output results. */ compute_stats(jitter_arr, iterations, &avg, &max, &min, &stddev); compute_stats(fraction_arr, iterations, &avg_fract, &max_fract, &min_fract, &stddev_fract); putchar('\n'); print_stats_us("jitter", avg, max, min, stddev); print_stats_fract("%", avg_fract, max_fract, min_fract, stddev_fract); if (wakeup_second_thread) { res = pthread_join(secthread, NULL); if (res) { err(1, "pthread_join"); } compute_stats(wakeup_second_jitter_arr, iterations, &avg, &max, &min, &stddev); putchar('\n'); print_stats_us("second jitter", avg, max, min, stddev); putchar('\n'); printf("%llu/%llu (%.1f%%) wakeups on same CPU\n", secargs.woke_on_same_cpu, iterations, 100.0*((double)secargs.woke_on_same_cpu)/iterations); } return 0; }
int nanosleep(const struct timespec *requested_time, struct timespec *remaining_time) { kern_return_t ret; uint64_t end, units; static struct mach_timebase_info info = {0, 0}; static int unity; if ((requested_time == NULL) || (requested_time->tv_sec < 0) || (requested_time->tv_nsec > NSEC_PER_SEC)) { errno = EINVAL; return -1; } if (info.denom == 0) { ret = mach_timebase_info(&info); if (ret != KERN_SUCCESS) { fprintf(stderr, "mach_timebase_info() failed: %s\n", mach_error_string(ret)); errno = EAGAIN; return -1; } /* If numer == denom == 1 (as in intel), no conversion needed */ unity = (info.numer == info.denom); } if(unity) units = (uint64_t)requested_time->tv_sec * NSEC_PER_SEC; else if(!muldiv128((uint64_t)info.denom * NSEC_PER_SEC, (uint64_t)requested_time->tv_sec, (uint64_t)info.numer, &units)) { errno = EINVAL; return -1; } end = mach_absolute_time() + units + (uint64_t)info.denom * requested_time->tv_nsec / info.numer; ret = mach_wait_until(end); if (ret != KERN_SUCCESS) { if (ret == KERN_ABORTED) { errno = EINTR; if (remaining_time != NULL) { uint64_t now = mach_absolute_time(); if (now >= end) { remaining_time->tv_sec = 0; remaining_time->tv_nsec = 0; } else { if(unity) units = (end - now); else muldiv128((uint64_t)info.numer, (end - now), (uint64_t)info.denom, &units); // this can't overflow remaining_time->tv_sec = units / NSEC_PER_SEC; remaining_time->tv_nsec = units % NSEC_PER_SEC; } } } else { errno = EINVAL; } return -1; } return 0; }
void mp_sleep_us(int64_t us) { uint64_t deadline = us / 1e6 / timebase_ratio + mach_absolute_time(); mach_wait_until(deadline); }
long do_mach_syscall(void *cpu_env, int num, uint32_t arg1, uint32_t arg2, uint32_t arg3, uint32_t arg4, uint32_t arg5, uint32_t arg6, uint32_t arg7, uint32_t arg8) { extern uint32_t mach_reply_port(); long ret = 0; arg1 = tswap32(arg1); arg2 = tswap32(arg2); arg3 = tswap32(arg3); arg4 = tswap32(arg4); arg5 = tswap32(arg5); arg6 = tswap32(arg6); arg7 = tswap32(arg7); arg8 = tswap32(arg8); DPRINTF("mach syscall %d : " , num); switch(num) { /* see xnu/osfmk/mach/syscall_sw.h */ case -26: DPRINTF("mach_reply_port()\n"); ret = mach_reply_port(); break; case -27: DPRINTF("mach_thread_self()\n"); ret = mach_thread_self(); break; case -28: DPRINTF("mach_task_self()\n"); ret = mach_task_self(); break; case -29: DPRINTF("mach_host_self()\n"); ret = mach_host_self(); break; case -31: DPRINTF("mach_msg_trap(0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x, 0x%x)\n", arg1, arg2, arg3, arg4, arg5, arg6, arg7); ret = target_mach_msg_trap((mach_msg_header_t *)arg1, arg2, arg3, arg4, arg5, arg6, arg7); break; /* may need more translation if target arch is different from host */ #if (defined(TARGET_I386) && defined(__i386__)) || (defined(TARGET_PPC) && defined(__ppc__)) case -33: DPRINTF("semaphore_signal_trap(0x%x)\n", arg1); ret = semaphore_signal_trap(arg1); break; case -34: DPRINTF("semaphore_signal_all_trap(0x%x)\n", arg1); ret = semaphore_signal_all_trap(arg1); break; case -35: DPRINTF("semaphore_signal_thread_trap(0x%x)\n", arg1, arg2); ret = semaphore_signal_thread_trap(arg1,arg2); break; #endif case -36: DPRINTF("semaphore_wait_trap(0x%x)\n", arg1); extern int semaphore_wait_trap(int); // XXX: is there any header for that? ret = semaphore_wait_trap(arg1); break; /* may need more translation if target arch is different from host */ #if (defined(TARGET_I386) && defined(__i386__)) || (defined(TARGET_PPC) && defined(__ppc__)) case -37: DPRINTF("semaphore_wait_signal_trap(0x%x, 0x%x)\n", arg1, arg2); ret = semaphore_wait_signal_trap(arg1,arg2); break; #endif case -43: DPRINTF("map_fd(0x%x, 0x%x, 0x%x, 0x%x, 0x%x)\n", arg1, arg2, arg3, arg4, arg5); ret = map_fd(arg1, arg2, (void*)arg3, arg4, arg5); tswap32s((uint32_t*)arg3); break; /* may need more translation if target arch is different from host */ #if (defined(TARGET_I386) && defined(__i386__)) || (defined(TARGET_PPC) && defined(__ppc__)) case -61: DPRINTF("syscall_thread_switch(0x%x, 0x%x, 0x%x)\n", arg1, arg2, arg3); ret = syscall_thread_switch(arg1, arg2, arg3); // just a hint to the scheduler; can drop? break; #endif case -89: DPRINTF("mach_timebase_info(0x%x)\n", arg1); struct mach_timebase_info info; ret = mach_timebase_info(&info); if(!is_error(ret)) { struct mach_timebase_info *outInfo = (void*)arg1; outInfo->numer = tswap32(info.numer); outInfo->denom = tswap32(info.denom); } break; case -90: DPRINTF("mach_wait_until()\n"); extern int mach_wait_until(uint64_t); // XXX: is there any header for that? ret = mach_wait_until(((uint64_t)arg2<<32) | (uint64_t)arg1); break; case -91: DPRINTF("mk_timer_create()\n"); extern int mk_timer_create(); // XXX: is there any header for that? ret = mk_timer_create(); break; case -92: DPRINTF("mk_timer_destroy()\n"); extern int mk_timer_destroy(int); // XXX: is there any header for that? ret = mk_timer_destroy(arg1); break; case -93: DPRINTF("mk_timer_create()\n"); extern int mk_timer_arm(int, uint64_t); // XXX: is there any header for that? ret = mk_timer_arm(arg1, ((uint64_t)arg3<<32) | (uint64_t)arg2); break; case -94: DPRINTF("mk_timer_cancel()\n"); extern int mk_timer_cancel(int, uint64_t *); // XXX: is there any header for that? ret = mk_timer_cancel(arg1, (uint64_t *)arg2); if((!is_error(ret)) && arg2) tswap64s((uint64_t *)arg2); break; default: gemu_log("qemu: Unsupported mach syscall: %d(0x%x)\n", num, num); gdb_handlesig (cpu_env, SIGTRAP); exit(0); break; } return ret; }