uint64_t getTimeMilliseconds(void) { uint64_t milliseconds = 0; #if (defined(__MACH__) && defined(__APPLE__)) struct mach_timebase_info convfact; mach_timebase_info(&convfact); // get ticks->nanoseconds conversion factor // get time in nanoseconds since computer was booted // the measurement is different per core uint64_t tick = mach_absolute_time(); milliseconds = (tick * convfact.numer) / (convfact.denom * 1000000); #elif defined(_WIN32) milliseconds = GetTickCount(); #elif defined(_WIN64) milliseconds = GetTickCount64(); #elif defined(__unix__) struct timespec now; clock_gettime(CLOCK_MONOTONIC, &now); milliseconds = now.tv_sec*1000 + now.tv_nsec/1000000; #endif return milliseconds; }
TimeMs getms(bool checked) { _msInitialize(); #ifdef Q_OS_WIN LARGE_INTEGER li; QueryPerformanceCounter(&li); return ((li.QuadPart - _msStart) * _msFreq) + (checked ? _msAddToMsStart : 0LL); #elif defined Q_OS_MAC auto msCount = static_cast<TimeMs>(mach_absolute_time()); return ((msCount - _msStart) * _msFreq) + (checked ? _msAddToMsStart : 0LL); #else timespec ts; auto res = clock_gettime(CLOCK_MONOTONIC, &ts); if (res != 0) { LOG(("Bad clock_gettime result: %1").arg(res)); return 0; } auto msCount = 1000LL * static_cast<TimeMs>(ts.tv_sec) + (static_cast<TimeMs>(ts.tv_nsec) / 1000000LL); return (msCount - _msStart) + (checked ? _msAddToMsStart : 0LL); #endif }
Uint64 ClockImpl::GetMicroSeconds() { #ifdef SFML_SYSTEM_MACOS // Mac OS X specific implementation (it doesn't support clock_gettime) static mach_timebase_info_data_t frequency = {0, 0}; if (frequency.denom == 0) mach_timebase_info(&frequency); Uint64 nanoseconds = mach_absolute_time() * frequency.numer / frequency.denom; return nanoseconds / 1000; #else // POSIX implementation timespec time; clock_gettime(CLOCK_MONOTONIC, &time); return time.tv_sec * 1000000 + time.tv_nsec / 1000; #endif }
uint64 getms(bool checked) { _msInitialize(); #ifdef Q_OS_WIN LARGE_INTEGER li; QueryPerformanceCounter(&li); return (uint64)((li.QuadPart - _msStart) * _msFreq) + (checked ? _msAddToMsStart : 0); #elif defined Q_OS_MAC uint64 msCount = mach_absolute_time(); return (uint64)((msCount - _msStart) * _msFreq) + (checked ? _msAddToMsStart : 0); #else timespec ts; int res = clock_gettime(CLOCK_MONOTONIC, &ts); if (res != 0) { LOG(("Bad clock_gettime result: %1").arg(res)); return 0; } uint64 msCount = 1000 * uint64(ts.tv_sec) + (uint64(ts.tv_nsec) / 1000000); return (uint64)(msCount - _msStart) + (checked ? _msAddToMsStart : 0); #endif }
void CPerfCounter::Stop(void) { i64 n; #ifdef _WIN32 QueryPerformanceCounter((LARGE_INTEGER *)&n); #elif __APPLE__ n = mach_absolute_time(); #else struct timespec s; clock_gettime( CLOCK_REALTIME, &s ); n = (i64)s.tv_sec * 1e9 + (i64)s.tv_nsec; #endif n -= _start; _start = 0; _clocks += n; }
static bool button_pressed(void *event_system, uint32_t usage_page, uint32_t usage) { /* This magic comes straight from Substrate... I don't really understand * what it's doing. In particular, where is the equivalent kernel * implementation on OS X? Does it not exist? But I guess Substrate is * emulating backboardd. */ void *dummy = IOHIDEventCreateKeyboardEvent(NULL, mach_absolute_time(), usage_page, usage, 0, 0); if (!dummy) { ib_log("couldn't create dummy HID event"); return false; } void *event = IOHIDEventSystemCopyEvent(event_system, kIOHIDEventTypeKeyboard, dummy, 0); if (!event) return false; CFIndex ival = IOHIDEventGetIntegerValue(event, kIOHIDEventFieldKeyboardDown); return ival; }
Time ClockImpl::getCurrentTime() { #if defined(SFML_SYSTEM_MACOS) || defined(SFML_SYSTEM_IOS) // Mac OS X specific implementation (it doesn't support clock_gettime) static mach_timebase_info_data_t frequency = {0, 0}; if (frequency.denom == 0) mach_timebase_info(&frequency); Uint64 nanoseconds = mach_absolute_time() * frequency.numer / frequency.denom; return sf::microseconds(nanoseconds / 1000); #else // POSIX implementation timespec time; clock_gettime(CLOCK_MONOTONIC, &time); return sf::microseconds(static_cast<Uint64>(time.tv_sec) * 1000000 + time.tv_nsec / 1000); #endif }
uint64_t amqp_get_monotonic_timestamp(void) { static mach_timebase_info_data_t s_timebase = {0, 0}; uint64_t timestamp; timestamp = mach_absolute_time(); if (s_timebase.denom == 0) { mach_timebase_info(&s_timebase); if (0 == s_timebase.denom) { return 0; } } timestamp *= (uint64_t)s_timebase.numer; timestamp /= (uint64_t)s_timebase.denom; return timestamp; }
uint64 TimeNanos() { int64 ticks = 0; #if defined(OSX) || defined(IOS) static mach_timebase_info_data_t timebase; if (timebase.denom == 0) { // Get the timebase if this is the first time we run. // Recommended by Apple's QA1398. VERIFY(KERN_SUCCESS == mach_timebase_info(&timebase)); } // Use timebase to convert absolute time tick units into nanoseconds. ticks = mach_absolute_time() * timebase.numer / timebase.denom; #elif defined(POSIX) struct timespec ts; // TODO: Do we need to handle the case when CLOCK_MONOTONIC // is not supported? clock_gettime(CLOCK_MONOTONIC, &ts); ticks = kNumNanosecsPerSec * static_cast<int64>(ts.tv_sec) + static_cast<int64>(ts.tv_nsec); #elif defined(WIN32) static volatile LONG last_timegettime = 0; static volatile int64 num_wrap_timegettime = 0; volatile LONG* last_timegettime_ptr = &last_timegettime; DWORD now = timeGetTime(); // Atomically update the last gotten time DWORD old = InterlockedExchange(last_timegettime_ptr, now); if (now < old) { // If now is earlier than old, there may have been a race between // threads. // 0x0fffffff ~3.1 days, the code will not take that long to execute // so it must have been a wrap around. if (old > 0xf0000000 && now < 0x0fffffff) { num_wrap_timegettime++; } } ticks = now + (num_wrap_timegettime << 32); // TODO: Calculate with nanosecond precision. Otherwise, we're just // wasting a multiply and divide when doing Time() on Windows. ticks = ticks * kNumNanosecsPerMillisec; #endif return ticks; }
CAMLprim value sys_time() { #ifdef _WIN32 #define EPOCH_DIFF (134774*24*60*60.0) static LARGE_INTEGER freq; static int freq_init = -1; LARGE_INTEGER counter; if( freq_init == -1 ) freq_init = QueryPerformanceFrequency(&freq); if( !freq_init || !QueryPerformanceCounter(&counter) ) { SYSTEMTIME t; FILETIME ft; ULARGE_INTEGER ui; GetSystemTime(&t); if( !SystemTimeToFileTime(&t,&ft) ) failwith("sys_cpu_time"); ui.LowPart = ft.dwLowDateTime; ui.HighPart = ft.dwHighDateTime; return caml_copy_double( ((double)ui.QuadPart) / 10000000.0 - EPOCH_DIFF ); } return caml_copy_double( ((double)counter.QuadPart) / ((double)freq.QuadPart) ); #elif __APPLE__ uint64_t time; uint64_t elapsedNano; static mach_timebase_info_data_t sTimebaseInfo; time = mach_absolute_time(); if ( sTimebaseInfo.denom == 0 ) { (void) mach_timebase_info(&sTimebaseInfo); } elapsedNano = time * sTimebaseInfo.numer / sTimebaseInfo.denom; return caml_copy_double(time / 1000000000.0); #else struct timespec t; clock_gettime(CLOCK_MONOTONIC_RAW, &t); return caml_copy_double(TimeSpecToSeconds(t)); #endif }
/// "Universal" wallclock time (works at least for Mac, MTA, and most Linux) inline double walltime(void) { #if defined(__MTA__) return((double)mta_get_clock(0) / mta_clock_freq()); #elif defined(__MACH__) static mach_timebase_info_data_t info; mach_timebase_info(&info); uint64_t now = mach_absolute_time(); now *= info.numer; now /= info.denom; return 1.0e-9 * (double)now; #else struct timespec tp; #if defined(CLOCK_PROCESS_CPUTIME_ID) #define CLKID CLOCK_PROCESS_CPUTIME_ID #elif defined(CLOCK_REALTIME_ID) #define CLKID CLOCK_REALTIME_ID #endif clock_gettime(CLOCK_MONOTONIC, &tp); return (double)tp.tv_sec + (double)tp.tv_nsec / BILLION; #endif }
static void smaplog_add_entry(boolean_t enabling) { uint32_t index = 0; thread_t thread = current_thread(); do { index = smaplog_head; } while (!OSCompareAndSwap(index, (index + 1) % SMAPLOG_BUFFER_SIZE, &smaplog_head)); assert(index < SMAPLOG_BUFFER_SIZE); assert(smaplog_head < SMAPLOG_BUFFER_SIZE); assert(thread); smaplog_cbuf[index].timestamp = mach_absolute_time(); smaplog_cbuf[index].thread = thread; smaplog_cbuf[index].cpuid = cpu_number(); smaplog_cbuf[index].cr4 = get_cr4(); smaplog_cbuf[index].smap_state = enabling; smaplog_cbuf[index].copyio_active = (thread->machine.specFlags & CopyIOActive) ? 1 : 0; }
/// Resets the initial reference time. void btClock::reset() { #ifdef BT_USE_WINDOWS_TIMERS QueryPerformanceCounter(&m_data->mStartTime); m_data->mStartTick = GetTickCount64(); #else #ifdef __CELLOS_LV2__ typedef uint64_t ClockSize; ClockSize newTime; //__asm __volatile__( "mftb %0" : "=r" (newTime) : : "memory"); SYS_TIMEBASE_GET( newTime ); m_data->mStartTime = newTime; #else #ifdef __APPLE__ m_data->mStartTimeNano = mach_absolute_time(); #endif gettimeofday(&m_data->mStartTime, 0); #endif #endif }
int64_t now(void) { #if defined __APPLE__ static mach_timebase_info_data_t dill_mtid = {0}; if (dill_slow(!dill_mtid.denom)) mach_timebase_info(&dill_mtid); uint64_t ticks = mach_absolute_time(); return (int64_t)(ticks * dill_mtid.numer / dill_mtid.denom / 1000000); #elif defined CLOCK_MONOTONIC struct timespec ts; int rc = clock_gettime(CLOCK_MONOTONIC, &ts); dill_assert (rc == 0); return ((int64_t)ts.tv_sec) * 1000 + (((int64_t)ts.tv_nsec) / 1000000); #else /* This is slow and error-prone (time can jump backwards!) but it's just a last resort option. */ struct timeval tv; int rc = gettimeofday(&tv, NULL); assert(rc == 0); return ((int64_t)tv.tv_sec) * 1000 + (((int64_t)tv.tv_usec) / 1000); #endif }
int mwGetHighResTime_RealTime(MWHighResTime* timer) { uint64_t t; static mach_timebase_info_data_t timeInfo; if (timeInfo.denom == 0) { (void) mach_timebase_info(&timeInfo); } t = mach_absolute_time(); /* Convert to nanoseconds */ t *= timeInfo.numer; t /= timeInfo.denom; timer->sec = t / nsPerSec; timer->nSec = t - nsPerSec * timer->sec; return 0; }
unsigned int GetTick() { #if defined(__linux) struct timespec ts; unsigned int theTick = 0U; clock_gettime(CLOCK_MONOTONIC, &ts); theTick = ts.tv_nsec / 1000000; theTick += ts.tv_sec * 1000; return theTick; #elif defined(__APPLE__) mach_timebase_info_data_t info; mach_timebase_info(&info); uint64_t value = mach_absolute_time(); value *= info.numer; value /= info.denom; value /= 1000000; return (unsigned int)value; #else return (unsigned int)GetTickCount(); #endif }
int64_t dtrace_calc_thread_recent_vtime(thread_t thread) { #if STAT_TIME if (thread != THREAD_NULL) { return timer_grab(&(thread->system_timer)) + timer_grab(&(thread->user_timer)); } else return 0; #else if (thread != THREAD_NULL) { processor_t processor = current_processor(); uint64_t abstime = mach_absolute_time(); timer_t timer; timer = PROCESSOR_DATA(processor, thread_timer); return timer_grab(&(thread->system_timer)) + timer_grab(&(thread->user_timer)) + (abstime - timer->tstamp); /* XXX need interrupts off to prevent missed time? */ } else return 0; #endif }
uint32 platform_get_ticks() { #ifdef _WIN32 LARGE_INTEGER pfc; QueryPerformanceCounter(&pfc); LARGE_INTEGER runningDelta; runningDelta.QuadPart = pfc.QuadPart - _entryTimestamp.QuadPart; return (uint32)(runningDelta.QuadPart / _frequency); #elif defined(__APPLE__) && (__ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ < 101200) return (uint32)(((mach_absolute_time() * _mach_base_info.numer) / _mach_base_info.denom) / 1000000); #else struct timespec ts; if (clock_gettime(CLOCK_MONOTONIC, &ts) != 0) { log_fatal("clock_gettime failed"); exit(-1); } return (uint32)(ts.tv_sec * 1000 + ts.tv_nsec / 1000000); #endif }
/**************************************************************************** ** *F SyNanosecondsSinceEpoch() ** ** 'SyNanosecondsSinceEpoch' returns a 64-bit integer which represents the ** number of nanoseconds since some unspecified starting point. This means ** that the number returned by this function is not in itself meaningful, ** but the difference between the values returned by two consecutive calls ** can be used to measure wallclock time. ** ** The accuracy of this is system dependent. For systems that implement ** clock_getres, we could get the promised accuracy. ** ** Note that gettimeofday has been marked obsolete in the POSIX standard. ** We are using it because it is implemented in most systems still. ** ** If we are using gettimeofday we cannot guarantee the values that ** are returned by SyNanosecondsSinceEpoch to be monotonic. ** ** Returns -1 to represent failure ** */ Int8 SyNanosecondsSinceEpoch(void) { Int8 res; #if defined(SYS_IS_DARWIN) static mach_timebase_info_data_t timeinfo; if ( timeinfo.denom == 0 ) { (void) mach_timebase_info(&timeinfo); } res = mach_absolute_time(); res *= timeinfo.numer; res /= timeinfo.denom; #elif defined(HAVE_CLOCK_GETTIME) && defined(CLOCK_MONOTONIC) struct timespec ts; if (clock_gettime(CLOCK_MONOTONIC, &ts) == 0) { res = ts.tv_sec; res *= 1000000000L; res += ts.tv_nsec; } else { res = -1; } #elif defined(HAVE_GETTIMEOFDAY) struct timeval tv; if (gettimeofday(&tv, NULL) == 0) { res = tv.tv_sec; res *= 1000000L; res += tv.tv_usec; res *= 1000; } else { res = -1; }; #else res = -1; #endif return res; }
// taken from uv_hrtime(), see http://docs.libuv.org/en/v1.x/misc.html uint64_t CTimeMeter::GetCurrUSec() { #if BEYONDENGINE_PLATFORM == PLATFORM_WIN32 LARGE_INTEGER liCounter; QueryPerformanceCounter(&liCounter); LARGE_INTEGER liFreq; QueryPerformanceFrequency(&liFreq); return (uint64_t)((double)liCounter.QuadPart * 1000000 / liFreq.QuadPart); #elif BEYONDENGINE_PLATFORM == PLATFORM_IOS // see https://github.com/libuv/libuv/blob/v1.x/src/unix/darwin.c static mach_timebase_info_data_t info; if ((ACCESS_ONCE(uint32_t, info.numer) == 0 || ACCESS_ONCE(uint32_t, info.denom) == 0)) { int r = mach_timebase_info(&info); BEATS_ASSERT(r == KERN_SUCCESS, "mach_timebase_info() return %d", r); } return (mach_absolute_time() * info.numer / info.denom) / 1000; #else // see https://github.com/libuv/libuv/blob/v1.x/src/unix/linux-core.c // // Try CLOCK_BOOTTIME first, fall back to CLOCK_MONOTONIC if not available (pre-2.6.39 kernels). // CLOCK_MONOTONIC doesn't increase when the system is suspended. static volatile int no_clock_boottime; struct timespec now = {}; int r = 0; if (no_clock_boottime) { RETRY: r = clock_gettime(CLOCK_MONOTONIC, &now); } else if ((r = clock_gettime(CLOCK_BOOTTIME, &now)) && errno == EINVAL) { no_clock_boottime = 1; goto RETRY; } BEATS_ASSERT(r == 0, "clock_gettime() %d failed with %d", no_clock_boottime, r); return ((uint64_t)now.tv_sec * (uint64_t)1e9 + now.tv_nsec) / 1000; #endif }
void etimer_resync_deadlines(void) { uint64_t deadline; uint64_t pmdeadline; rtclock_timer_t *mytimer; spl_t s = splclock(); cpu_data_t *pp; uint32_t decr; pp = current_cpu_datap(); deadline = EndOfAllTime; /* * If we have a clock timer set, pick that. */ mytimer = &pp->rt_timer; if (!mytimer->has_expired && 0 < mytimer->deadline && mytimer->deadline < EndOfAllTime) deadline = mytimer->deadline; /* * Go and set the "pop" event. */ if (deadline > 0 && deadline <= pp->rtcPop) { int decr; uint64_t now; now = mach_absolute_time(); decr = setPop(deadline); if (deadline < now) { pp->rtcPop = now + decr; } else { pp->rtcPop = deadline; } } splx(s); }
// ---------------------------------------------------------- void ofxAudioUnitFilePlayer::play(uint64_t startTime) // ---------------------------------------------------------- { if(!(_region.mTimeStamp.mFlags & kAudioTimeStampHostTimeValid)) { cout << "ofxAudioUnitFilePlayer has no file to play" << endl; return; } OFXAU_RETURN(AudioUnitSetProperty(*_unit, kAudioUnitProperty_ScheduledFileIDs, kAudioUnitScope_Global, 0, _fileID, sizeof(_fileID)), "setting file player's file ID"); OFXAU_RETURN(AudioUnitSetProperty(*_unit, kAudioUnitProperty_ScheduledFileRegion, kAudioUnitScope_Global, 0, &_region, sizeof(_region)), "setting file player region"); if(startTime == 0) { startTime = mach_absolute_time(); } AudioTimeStamp startTimeStamp = {0}; FillOutAudioTimeStampWithHostTime(startTimeStamp, startTime); OFXAU_RETURN(AudioUnitSetProperty(*_unit, kAudioUnitProperty_ScheduleStartTimeStamp, kAudioUnitScope_Global, 0, &startTimeStamp, sizeof(startTimeStamp)), "setting file player start time"); }
// Get current time in microseconds... UInt64 GetNanoseconds(void) { #if defined(OVR_CAPTURE_HAS_MACH_ABSOLUTE_TIME) // OSX/iOS doesn't have clock_gettime()... but it does have gettimeofday(), or even better mach_absolute_time() // which is about 50% faster than gettimeofday() and higher precision! // Only 24.5ns per GetNanoseconds() call! But we can do better... // It seems that modern Darwin already returns nanoseconds, so numer==denom // when we test that assumption it brings us down to 16ns per GetNanoseconds() call!!! // Timed on MacBookPro running OSX. static mach_timebase_info_data_t info = {0}; if(!info.denom) mach_timebase_info(&info); const UInt64 t = mach_absolute_time(); if(info.numer==info.denom) return t; return (t * info.numer) / info.denom; #elif defined(OVR_CAPTURE_HAS_CLOCK_GETTIME) // 23ns per call on i7 Desktop running Ubuntu 64 // >800ns per call on Galaxy Note 4 running Android 4.3!!! struct timespec tp; clock_gettime(CLOCK_MONOTONIC, &tp); return ((UInt64)tp.tv_sec)*1000000000 + (UInt64)tp.tv_nsec; #elif defined(OVR_CAPTURE_HAS_GETTIMEOFDAY) // Just here for reference... this timer is only microsecond level of precision, and >2x slower than the mach timer... // And on non-mach platforms clock_gettime() is the preferred method... // 34ns per call on MacBookPro running OSX... // 23ns per call on i7 Desktop running Ubuntu 64 // >800ns per call on Galaxy Note 4 running Android 4.3!!! struct timeval tv; gettimeofday(&tv, 0); const UInt64 us = ((UInt64)tv.tv_sec)*1000000 + (UInt64)tv.tv_usec; return us*1000; #else #error Unknown Platform! #endif }
long int GetTicks(void) { #ifdef WIN32 // don't use GetTickCount anymore because it's not accurate enough (~16ms resolution) // don't use QueryPerformanceCounter anymore because it isn't guaranteed to be strictly increasing on some systems and thus requires "smoothing" code // use timeGetTime instead, which typically has a high resolution (5ms or more) but we request a lower resolution on startup return timeGetTime( ); #elif __APPLE__ long int current = mach_absolute_time( ); static mach_timebase_info_data_t info = { 0, 0 }; // get timebase info if( info.denom == 0 ) mach_timebase_info( &info ); long int elapsednano = current * ( info.numer / info.denom ); // convert ns to ms return elapsednano / 1e6; #else return (double)clock(); #endif }
/* return a monotonic time counter, in Win32 ticks */ static ULONGLONG monotonic_counter(void) { struct timeval now; #ifdef HAVE_CLOCK_GETTIME struct timespec ts; #ifdef CLOCK_MONOTONIC_RAW if (!clock_gettime( CLOCK_MONOTONIC_RAW, &ts )) return ts.tv_sec * (ULONGLONG)TICKSPERSEC + ts.tv_nsec / 100; #endif if (!clock_gettime( CLOCK_MONOTONIC, &ts )) return ts.tv_sec * (ULONGLONG)TICKSPERSEC + ts.tv_nsec / 100; #elif defined(__APPLE__) static mach_timebase_info_data_t timebase; if (!timebase.denom) mach_timebase_info( &timebase ); return mach_absolute_time() * timebase.numer / timebase.denom / 100; #endif gettimeofday( &now, 0 ); return now.tv_sec * (ULONGLONG)TICKSPERSEC + now.tv_usec * 10 + TICKS_1601_TO_1970 - server_start_time; }
uint64_t get_absolute_time_in_fp() { uint64_t time_now_fp; #ifdef COMPILE_FOR_LINUX_AND_FREEBSD_AND_CYGWIN struct timespec tn; // can't use CLOCK_MONOTONIC_RAW as it's not implemented in OpenWrt clock_gettime(CLOCK_MONOTONIC, &tn); time_now_fp = ((uint64_t)tn.tv_sec << 32) + ((uint64_t)tn.tv_nsec << 32) / 1000000000; // types okay #endif #ifdef COMPILE_FOR_OSX uint64_t time_now_mach; uint64_t elapsedNano; static mach_timebase_info_data_t sTimebaseInfo = {0, 0}; time_now_mach = mach_absolute_time(); // If this is the first time we've run, get the timebase. // We can use denom == 0 to indicate that sTimebaseInfo is // uninitialised because it makes no sense to have a zero // denominator in a fraction. if (sTimebaseInfo.denom == 0) { debug(1, "Mac initialise timebase info."); (void)mach_timebase_info(&sTimebaseInfo); } // Do the maths. We hope that the multiplication doesn't // overflow; the price you pay for working in fixed point. // this gives us nanoseconds uint64_t time_now_ns = time_now_mach * sTimebaseInfo.numer / sTimebaseInfo.denom; // take the units and shift them to the upper half of the fp, and take the nanoseconds, shift them // to the upper half and then divide the result to 1000000000 time_now_fp = ((time_now_ns / 1000000000) << 32) + (((time_now_ns % 1000000000) << 32) / 1000000000); #endif return time_now_fp; }
int RDebug::stopTimer(int id, const QString& msg) { #ifdef Q_OS_MAC Nanoseconds elapsedNano; uint64_t end = mach_absolute_time(); uint64_t elapsed = end - timerMac[id]; elapsedNano = AbsoluteToNanoseconds( *(AbsoluteTime *) &elapsed ); int t = (unsigned int)((* (uint64_t *) &elapsedNano) / 1000000); timerMac.remove(id); #else int t = timer[id].elapsed(); timer.remove(id); #endif /* va_list varg; va_start(varg, format); fprintf(stream, "TIMER: %d ms ", t); printV(format, varg); va_end(varg); */ qDebug() << "TIMER: " << t << "ms - " << msg; return t; }
quint32 GetTicks( ) { #ifdef WIN32 return timeGetTime( ); #elif __APPLE__ uint64_t current = mach_absolute_time( ); static mach_timebase_info_data_t info = { 0, 0 }; // get timebase info if( info.denom == 0 ) mach_timebase_info( &info ); uint64_t elapsednano = current * ( info.numer / info.denom ); // convert ns to ms return elapsednano / 1e6; #else uint32_t ticks; struct timespec t; clock_gettime( 1, &t ); ticks = t.tv_sec * 1000; ticks += t.tv_nsec / 1000000; return ticks; #endif }
void kscrash_reinstall(const char* const crashReportFilePath, const char* const recrashReportFilePath, const char* const stateFilePath, const char* const crashID) { KSLOG_TRACE("reportFilePath = %s", crashReportFilePath); KSLOG_TRACE("secondaryReportFilePath = %s", recrashReportFilePath); KSLOG_TRACE("stateFilePath = %s", stateFilePath); KSLOG_TRACE("crashID = %s", crashID); ksstring_replace((const char**)&g_stateFilePath, stateFilePath); ksstring_replace((const char**)&g_crashReportFilePath, crashReportFilePath); ksstring_replace((const char**)&g_recrashReportFilePath, recrashReportFilePath); KSCrash_Context* context = crashContext(); ksstring_replace(&context->config.crashID, crashID); if(!kscrashstate_init(g_stateFilePath, &context->state)) { KSLOG_ERROR("Failed to initialize persistent crash state"); } context->state.appLaunchTime = mach_absolute_time(); }
uint64_t getTimeHighRes(void) { uint64_t timestamp = 0; #if (defined(__MACH__) && defined(__APPLE__)) struct mach_timebase_info convfact; mach_timebase_info(&convfact); // get ticks->nanoseconds conversion factor // get time in nanoseconds since computer was booted // the measurement is different per core timestamp = mach_absolute_time(); #elif defined(_WIN32) LARGE_INTEGER hpc; QueryPerformanceCounter(&hpc); timestamp = (uint64_t)hpc.QuadPart; #elif defined(__unix__) struct timespec now; clock_gettime(CLOCK_MONOTONIC, &now); timestamp = now.tv_sec * 1000000000 + now.tv_nsec; #endif return timestamp; }