/** Waits until jobs arrive in the dispatch queue and processes them. */ static void * workerLoop(struct workerStartData *startInfo) { /* Initialized the (thread local) random seed */ UA_random_seed((uintptr_t)startInfo); rcu_register_thread(); UA_UInt32 *c = UA_malloc(sizeof(UA_UInt32)); uatomic_set(c, 0); *startInfo->workerCounter = c; UA_Server *server = startInfo->server; UA_free(startInfo); pthread_mutex_t mutex; // required for the condition variable pthread_mutex_init(&mutex,0); pthread_mutex_lock(&mutex); struct timespec to; while(*server->running) { struct DispatchJobsList *wln = (struct DispatchJobsList*) cds_wfcq_dequeue_blocking(&server->dispatchQueue_head, &server->dispatchQueue_tail); if(wln) { processJobs(server, wln->jobs, wln->jobsSize); UA_free(wln->jobs); UA_free(wln); } else { /* sleep until a work arrives (and wakes up all worker threads) */ #if defined(__APPLE__) || defined(__MACH__) // OS X does not have clock_gettime, use clock_get_time clock_serv_t cclock; mach_timespec_t mts; host_get_clock_service(mach_host_self(), CALENDAR_CLOCK, &cclock); clock_get_time(cclock, &mts); mach_port_deallocate(mach_task_self(), cclock); to.tv_sec = mts.tv_sec; to.tv_nsec = mts.tv_nsec; #else clock_gettime(CLOCK_REALTIME, &to); #endif to.tv_sec += 2; pthread_cond_timedwait(&server->dispatchQueue_condition, &mutex, &to); } uatomic_inc(c); // increase the workerCounter; } pthread_mutex_unlock(&mutex); pthread_mutex_destroy(&mutex); rcu_barrier(); // wait for all scheduled call_rcu work to complete rcu_unregister_thread(); /* we need to return _something_ for pthreads */ return NULL; }
void current_utc_time(struct timespec *ts) { #ifdef __MACH__ // OS X does not have clock_gettime, use clock_get_time clock_serv_t cclock; mach_timespec_t mts; host_get_clock_service(mach_host_self(), CALENDAR_CLOCK, &cclock); clock_get_time(cclock, &mts); mach_port_deallocate(mach_task_self(), cclock); ts->tv_sec = mts.tv_sec; ts->tv_nsec = mts.tv_nsec; #else clock_gettime(CLOCK_REALTIME, ts); #endif }
int clock_gettime(clockid_t clk_id, struct timespec *tp) { kern_return_t ret; clock_serv_t clk; clock_id_t clk_serv_id; mach_timespec_t tm; uint64_t start, end, delta, nano; int retval = -1; switch (clk_id) { case CLOCK_REALTIME: case CLOCK_MONOTONIC: clk_serv_id = clk_id == CLOCK_REALTIME ? CALENDAR_CLOCK : SYSTEM_CLOCK; if (KERN_SUCCESS == (ret = host_get_clock_service(mach_host_self(), clk_serv_id, &clk))) { if (KERN_SUCCESS == (ret = clock_get_time(clk, &tm))) { tp->tv_sec = tm.tv_sec; tp->tv_nsec = tm.tv_nsec; retval = 0; } } if (KERN_SUCCESS != ret) { errno = EINVAL; retval = -1; } break; case CLOCK_PROCESS_CPUTIME_ID: case CLOCK_THREAD_CPUTIME_ID: start = mach_absolute_time(); if (clk_id == CLOCK_PROCESS_CPUTIME_ID) { getpid(); } else { sched_yield(); } end = mach_absolute_time(); delta = end - start; if (0 == clock_gettime_inf.denom) { mach_timebase_info(&clock_gettime_inf); } nano = delta * clock_gettime_inf.numer / clock_gettime_inf.denom; tp->tv_sec = nano * 1e-9; tp->tv_nsec = nano - (tp->tv_sec * 1e9); retval = 0; break; default: errno = EINVAL; retval = -1; } return retval; }
time_t get_monotonic_timestamp() { struct timespec ts; #if defined(__MACH__) && ! defined(CLOCK_REALTIME) // OS X does not have clock_gettime, use clock_get_time clock_serv_t cclock; mach_timespec_t mts; host_get_clock_service(mach_host_self(), CALENDAR_CLOCK, &cclock); clock_get_time(cclock, &mts); mach_port_deallocate(mach_task_self(), cclock); ts.tv_sec = mts.tv_sec; ts.tv_nsec = mts.tv_nsec; #else clock_gettime(CLOCK_REALTIME, &ts); #endif return ts.tv_sec; }
/// \return .tv_sec .tv_nsec inline struct timespec monotonic_timespec() { struct timespec ts; #ifdef __MACH__ // OS X does not have clock_gettime, use clock_get_time clock_serv_t cclock; mach_timespec_t mts; host_get_clock_service(mach_host_self(), SYSTEM_CLOCK, &cclock); clock_get_time(cclock, &mts); mach_port_deallocate(mach_task_self(), cclock); ts.tv_sec = mts.tv_sec; ts.tv_nsec = mts.tv_nsec; #else clock_gettime(CLOCK_MONOTONIC, &ts); #endif return ts; }
int clock_gettime(int clk_id, struct timespec *ts) { clock_serv_t cclock; mach_timespec_t mts; kern_return_t retval = KERN_SUCCESS; host_get_clock_service(mach_host_self(), SYSTEM_CLOCK, &cclock); retval = clock_get_time(cclock, &mts); mach_port_deallocate(mach_task_self(), cclock); ts->tv_sec = mts.tv_sec; ts->tv_nsec = mts.tv_nsec; return retval; }
u64 raw__nanoseconds_since_1970() { struct timespec ts; #ifdef F2__APPLE // OS X does not have clock_gettime, use clock_get_time clock_serv_t cclock; mach_timespec_t mts; host_get_clock_service(mach_host_self(), CALENDAR_CLOCK, &cclock); clock_get_time(cclock, &mts); mach_port_deallocate(mach_task_self(), cclock); ts.tv_sec = mts.tv_sec; ts.tv_nsec = mts.tv_nsec; #else clock_gettime(CLOCK_REALTIME, &ts); #endif return (((u64)ts.tv_sec) * nanoseconds_per_second) + ((u64)ts.tv_nsec); }
uint64_t cputime() { #ifdef __MACH__ // OS X does not have clock_gettime, use clock_get_time clock_serv_t cclock; mach_timespec_t t; host_get_clock_service(mach_host_self(), CALENDAR_CLOCK, &cclock); clock_get_time(cclock, &t); mach_port_deallocate(mach_task_self(), cclock); return t.tv_sec * __FREQUENCY + t.tv_nsec; #else timespec t; clock_gettime(CLOCK_MONOTONIC, &t); return t.tv_sec * __FREQUENCY + t.tv_nsec; #endif }
int clock_gettime(int i,struct timespec *ts) { if (i != 42) { errno = EINVAL; return -1; } clock_serv_t cclock; mach_timespec_t mts; host_get_clock_service(mach_host_self(), CALENDAR_CLOCK, &cclock); clock_get_time(cclock, &mts); mach_port_deallocate(mach_task_self(), cclock); ts->tv_sec = mts.tv_sec; ts->tv_nsec = mts.tv_nsec; return 0; }
void wTimeInit() { assert(!gTimeInitDone); if (!gTimeInitDone) { #if defined(__APPLE__) int ret = host_get_clock_service(mach_host_self(), CALENDAR_CLOCK, &gClockServ); assert(ret == 0); #elif defined(__WIN32__) int ret = QueryPerformanceFrequency(&gFrequency); assert(ret != 0); #endif gTimeInitDone = true; } }
//------------------------------------------------------------------------------ // Clock::GetTime //------------------------------------------------------------------------------ double Clock::GetTime() { #if defined(WIN32) QueryPerformanceFrequency(&_frequency); QueryPerformanceCounter(&_counter); return (double(_counter.QuadPart) / double(_frequency.QuadPart)); #elif __MACH__ clock_serv_t cclock; mach_timespec_t mts; host_get_clock_service(mach_host_self(), CALENDAR_CLOCK, &cclock); clock_get_time(cclock, &mts); mach_port_deallocate(mach_task_self(), cclock); return mts.tv_sec + 1e-9 * mts.tv_nsec; #endif }
static inline void current_utc_time(struct timespec *ts) { #if defined(__APPLE__) && defined(__MACH__) clock_serv_t cclock; mach_timespec_t mts; host_get_clock_service(mach_host_self(), CALENDAR_CLOCK, &cclock); clock_get_time(cclock, &mts); mach_port_deallocate(mach_task_self(), cclock); ts->tv_sec = mts.tv_sec; ts->tv_nsec = mts.tv_nsec; #else clock_gettime(CLOCK_REALTIME, ts); #endif }
int clock_gettime(clock_t clock_id, struct timespec *timestamp) { clock_serv_t clock_server; mach_timespec_t mach_timestamp; host_get_clock_service(mach_host_self(), clock_id, &clock_server); clock_get_time(clock_server, &mach_timestamp); timestamp->tv_sec = mach_timestamp.tv_sec; timestamp->tv_nsec = mach_timestamp.tv_nsec; mach_port_deallocate(mach_task_self(), clock_server); return 0; }
double current_time() { struct timespec ts; #ifdef __MACH__ clock_serv_t cclock; mach_timespec_t mts; host_get_clock_service(mach_host_self(), CALENDAR_CLOCK, &cclock); clock_get_time(cclock, &mts); mach_port_deallocate(mach_task_self(), cclock); ts.tv_sec = mts.tv_sec; ts.tv_nsec = mts.tv_nsec; #else clock_gettime(CLOCK_REALTIME, &ts); #endif return (double)ts.tv_sec + (double)ts.tv_nsec / 1000000000.0; }
void corto_timeGet(corto_time* time) { #ifdef __MACH__ // OS X does not have clock_gettime, use clock_get_time clock_serv_t cclock; mach_timespec_t mts; host_get_clock_service(mach_host_self(), CALENDAR_CLOCK, &cclock); clock_get_time(cclock, &mts); mach_port_deallocate(mach_task_self(), cclock); time->sec = mts.tv_sec; time->nanosec = mts.tv_nsec; #else struct timespec t; clock_gettime(CLOCK_REALTIME, &t); time->sec = t.tv_sec; time->nanosec = t.tv_nsec; #endif }
// the portable function for clock_gettime ;-) void current_utc_time(struct timespec *ts) { // mac implementation of clock_get_time by: jbenet (https://gist.github.com/1087739) #ifdef __MACH__ // OS X does not have clock_gettime, use clock_get_time clock_serv_t cclock; mach_timespec_t mts; host_get_clock_service(mach_host_self(), CALENDAR_CLOCK, &cclock); clock_get_time(cclock, &mts); mach_port_deallocate(mach_task_self(), cclock); ts->tv_sec = mts.tv_sec; ts->tv_nsec = mts.tv_nsec; #elif _WIN32 clock_gettime(CLOCK_REALTIME, (struct timeval*)ts); #else /* LINUX */ clock_gettime(CLOCK_REALTIME, ts); #endif }
void _init_clock_port(void) { kern_return_t kr; mach_port_t host = mach_host_self(); /* Get the clock service port for nanosleep */ kr = host_get_clock_service(host, SYSTEM_CLOCK, &clock_port); if (kr != KERN_SUCCESS) { abort(); } kr = semaphore_create(mach_task_self_, &clock_sem, SYNC_POLICY_FIFO, 0); if (kr != KERN_SUCCESS) { abort(); } mach_port_deallocate(mach_task_self(), host); }
avdecc_lib_os::aTimestamp timer::clk_monotonic(void) { struct timespec tp; avdecc_lib_os::aTimestamp time; clock_serv_t cclock; mach_timespec_t mts; host_get_clock_service(mach_host_self(), CALENDAR_CLOCK, &cclock); clock_get_time(cclock, &mts); mach_port_deallocate(mach_task_self(), cclock); tp.tv_sec = mts.tv_sec; tp.tv_nsec = mts.tv_nsec; time = (avdecc_lib_os::aTimestamp)(tp.tv_sec * 1000) + (avdecc_lib_os::aTimestamp)(tp.tv_nsec/1000000); return time; }
// Fill the stop point void check() { #ifdef __MACH__ // OS X does not have clock_gettime, use clock_get_time clock_serv_t cclock; mach_timespec_t mts; host_get_clock_service(mach_host_self(), CALENDAR_CLOCK, &cclock); clock_get_time(cclock, &mts); mach_port_deallocate(mach_task_self(), cclock); tsstop.tv_sec = mts.tv_sec; tsstop.tv_nsec = mts.tv_nsec; #else clock_gettime(CLOCK_REALTIME, &tsstop); #endif cstop = clock(); }
void AJ_InitTimer(AJ_Time* timer) { struct timespec now; clock_serv_t cclock; mach_timespec_t mts; host_get_clock_service(mach_host_self(), CALENDAR_CLOCK, &cclock); clock_get_time(cclock, &mts); mach_port_deallocate(mach_task_self(), cclock); now.tv_sec = mts.tv_sec; now.tv_nsec = mts.tv_nsec; //clock_gettime(CLOCK_MONOTONIC, &now); timer->seconds = now.tv_sec; timer->milliseconds = now.tv_nsec / 1000000; }
unsigned long getMilliSec() { struct timespec ts; #ifdef __MACH__ // OS X does not have clock_gettime, use clock_get_time clock_serv_t cclock; mach_timespec_t mts; host_get_clock_service(mach_host_self(), CALENDAR_CLOCK, &cclock); clock_get_time(cclock, &mts); mach_port_deallocate(mach_task_self(), cclock); ts.tv_sec = mts.tv_sec; ts.tv_nsec = mts.tv_nsec; #else clock_gettime(CLOCK_REALTIME, &ts); #endif return 1000 * ts.tv_sec + ts.tv_nsec/1000000; }
/** * Stops the high resolution timer */ void stop_timer(struct hrtimer_t *timer) { #ifdef LINUX clock_gettime(CLOCK_REALTIME, &timer->stop); #elif __APPLE__ // OS X does not have clock_gettime, use clock_get_time clock_serv_t cclock; mach_timespec_t mts; host_get_clock_service(mach_host_self(), CALENDAR_CLOCK, &cclock); clock_get_time(cclock, &mts); mach_port_deallocate(mach_task_self(), cclock); timer->stop.tv_sec = mts.tv_sec; timer->stop.tv_nsec = mts.tv_nsec; #else QueryPerformanceCounter(&timer->stop); #endif }
// Возвращает в милисекундах разницу во времени между двумя временными отсчётами void CTimePrivate::captureTime() { #ifdef __MACH__ // OS X does not have clock_gettime, use clock_get_time clock_serv_t cclock; mach_timespec_t mts; host_get_clock_service(mach_host_self(), CALENDAR_CLOCK, &cclock); clock_get_time(cclock, &mts); mach_port_deallocate(mach_task_self(), cclock); m_tspec.tv_sec = mts.tv_sec; m_tspec.tv_nsec = mts.tv_nsec; m_bIsValid = true; #else m_bIsValid = clock_gettime(CLOCK_MONOTONIC, &m_tspec) == 0; #endif assert(m_bIsValid == bool("Error calling clock_gettime")); }
int clock_gettime_internal(struct timespec * ts, int type) { clock_serv_t cclock; mach_timespec_t mts; kern_return_t retval; // Get the time host_get_clock_service(mach_host_self(), type, &cclock); retval = clock_get_time(cclock, &mts); mach_port_deallocate(mach_task_self(), cclock); ts->tv_sec = mts.tv_sec; ts->tv_nsec = mts.tv_nsec; return (retval == KERN_SUCCESS) ? GETTIME_SUCCESS : GETTIME_FAIL; }
static unsigned long long current_time_ns() { #ifdef __MACH__ clock_serv_t cclock; mach_timespec_t mts; host_get_clock_service(mach_host_self(), CALENDAR_CLOCK, &cclock); clock_get_time(cclock, &mts); mach_port_deallocate(mach_task_self(), cclock); unsigned long long s = 1000000000ULL * (unsigned long long)mts.tv_sec; return (unsigned long long)mts.tv_nsec + s; #else struct timespec t ={0,0}; clock_gettime(CLOCK_MONOTONIC, &t); unsigned long long s = 1000000000ULL * (unsigned long long)t.tv_sec; return (((unsigned long long)t.tv_nsec)) + s; #endif }
VALUE gettime_monotonic(VALUE self) { struct timespec ts; #ifdef __MACH__ // OS X does not have clock_gettime, use clock_get_time clock_serv_t cclock; mach_timespec_t mts; host_get_clock_service(mach_host_self(), SYSTEM_CLOCK, &cclock); clock_get_time(cclock, &mts); mach_port_deallocate(mach_task_self(), cclock); ts.tv_sec = mts.tv_sec; ts.tv_nsec = mts.tv_nsec; #else clock_gettime(CLOCK_REALTIME, &ts); #endif return rb_float_new((double)ts.tv_sec + (double)ts.tv_nsec * 1e-9); }
void clock_now(struct timespec *ts) { #if defined(__MACH__) /* this is not quite monotonic time, but hopefully it's good enough */ clock_serv_t cclock; mach_timespec_t mts; host_get_clock_service(mach_host_self(), CALENDAR_CLOCK, &cclock); clock_get_time(cclock, &mts); mach_port_deallocate(mach_task_self(), cclock); ts->tv_sec = mts.tv_sec; ts->tv_nsec = mts.tv_nsec; #else clock_gettime(CLOCK_MONOTONIC, ts); #endif }
/** * portable implementation for clock_gettime(CLOCK_REALTIME, ts) */ int ser_clock_gettime(struct timespec *ts) { #ifdef __OS_darwin clock_serv_t cclock; mach_timespec_t mts; /* OS X does not have clock_gettime, use clock_get_time */ host_get_clock_service(mach_host_self(), CALENDAR_CLOCK, &cclock); clock_get_time(cclock, &mts); mach_port_deallocate(mach_task_self(), cclock); ts->tv_sec = mts.tv_sec; ts->tv_nsec = mts.tv_nsec; return 0; #else return clock_gettime(CLOCK_REALTIME, ts); #endif }
void simple_tic /* returns current time in seconds and nanoseconds */ ( double tic [2] /* tic [0]: seconds, tic [1]: nanoseconds */ ) { #if defined ( _OPENMP ) /* OpenMP is available; use the OpenMP timer function */ tic [0] = omp_get_wtime ( ) ; tic [1] = 0 ; #elif defined ( __linux__ ) /* Linux has a very low resolution clock() function, so use the high resolution clock_gettime instead. May require -lrt */ struct timespec t ; clock_gettime (CLOCK_MONOTONIC, &t) ; tic [0] = (double) t.tv_sec ; tic [1] = (double) t.tv_nsec ; #elif defined ( __MACH__ ) clock_serv_t cclock ; mach_timespec_t t ; host_get_clock_service (mach_host_self ( ), SYSTEM_CLOCK, &cclock) ; clock_get_time (cclock, &t) ; mach_port_deallocate (mach_task_self ( ), cclock) ; tic [0] = (double) t.tv_sec; tic [1] = (double) t.tv_nsec; #else /* The ANSI C11 clock() function is used instead. This gives the processor time, not the wallclock time, and it might have low resolution. It returns the time since some unspecified fixed time in the past, as a clock_t integer. The clock ticks per second are given by CLOCKS_PER_SEC. In Mac OSX this is a very high resolution clock, and clock ( ) is faster than clock_get_time (...) ; */ clock_t t = clock ( ) ; tic [0] = ((double) t) / ((double) CLOCKS_PER_SEC) ; tic [1] = 0 ; #endif }
static uint64_t get_timestamp_mili() { struct timespec stamp; #ifdef __MACH__ clock_serv_t cclock; mach_timespec_t mts; host_get_clock_service(mach_host_self(), CALENDAR_CLOCK, &cclock); clock_get_time(cclock, &mts); mach_port_deallocate(mach_task_self(), cclock); stamp.tv_sec = mts.tv_sec; stamp.tv_nsec = mts.tv_nsec; #else clock_gettime(CLOCK_MONOTONIC, &stamp); #endif return ((stamp.tv_sec * 1000000000) + stamp.tv_nsec)/1000000; }