int main(int argc, char **argv) { pid_t pid[32], p; int i; uint64_t start, end, usec; printf("Create directories...\n"); start = read_tsc(); create_dir(PATH_PREFIX); for (i = 0; i < nr_threads; i++) { p = fork(); if (p == 0) { worker((void *) (long) i); exit(0); } pid[i] = p; } for (i = 0; i < nr_threads; i++) { waitpid(pid[i], NULL, 0); } end = read_tsc(); usec = (end - start) * 1000000 / get_cpu_freq(); printf("usec: %ld\t\n", usec); printf("Cleanup directories...\n"); /* system("rm -rf /tmp/_dirs"); */ return 0; }
void vcore_entry(void) { uint32_t vcoreid = vcore_id(); if (vcoreid) { mcs_barrier_wait(&b, vcoreid); udelay(5000000); if (vcoreid == 1) printf("Proc %d's vcores are yielding\n", getpid()); sys_yield(0); } else { /* trip the barrier here, all future times are in the loop */ mcs_barrier_wait(&b, vcoreid); while (1) { udelay(15000000); printf("Proc %d requesting its cores again\n", getpid()); begin = read_tsc(); sys_resource_req(RES_CORES, max_vcores(), 1, REQ_SOFT); mcs_barrier_wait(&b, vcoreid); end = read_tsc(); printf("Took %llu usec (%llu nsec) to get my yielded cores back.\n", udiff(begin, end), ndiff(begin, end)); printf("[T]:010:%llu:%llu\n", udiff(begin, end), ndiff(begin, end)); } } printf("We're screwed!\n"); exit(-1); }
void *work(void *thread_arg) { while(!flag) ; thread_params *thread_param = (thread_params*)thread_arg; int tid = thread_param->thread_id; int arrival_lambda = thread_param->arrival_lambda; double sevice_time = 0.00001; uint64_t start_time = read_tsc(); int s; //for handling the return value of pthread_setaffinity s = pthread_setaffinity_np(threads[tid], sizeof(cpu_set_t), &cpuset[tid]); if (s != 0) perror("set affinity error\n"); if(tid == 0) //wait for a while if i'm thread 0 { while(read_tsc() - start_time < 2*sevice_time*CPU_FREQ) ; } uint64_t getlock_time = read_tsc(); pthread_spin_lock(thread_param->spinlock_ptr); while(read_tsc() - getlock_time < sevice_time*CPU_FREQ) { ; } pthread_spin_unlock(thread_param->spinlock_ptr); uint64_t end_cs_time = read_tsc(); num_access_each_thread[tid]++; time_in_cs[tid] += (end_cs_time - getlock_time)/(double)CPU_FREQ; return ; }
void memory_stress_rand(perf_counter_t *pc, long *working_area, long working_size) { register unsigned long i; register long *ptr; long *ptr_start; unsigned long *shufflearray; const unsigned long niter = 2 << 10; struct timespec stime, ftime; register uintptr_t t0, t1; { // initialize shuffled pointer loop const unsigned long ncacheline = working_size / CACHELINE_SIZE; const unsigned long step = CACHELINE_SIZE / sizeof(long); unsigned long offset, tmp; if ((shufflearray = (unsigned long *)calloc(ncacheline, sizeof(long))) == NULL) { perror("calloc()"); exit(EXIT_FAILURE); } for (i = 0; i < ncacheline; i++){ shufflearray[i] = i; } for (i = 0; i < ncacheline; i++){ offset = drand48() * ncacheline; tmp = shufflearray[i]; shufflearray[i] = shufflearray[offset]; shufflearray[offset] = tmp; } ptr_start = working_area + (shufflearray[0] * step); for (i = 1, ptr = ptr_start; i < ncacheline; i++, ptr = (long *)*ptr){ *ptr = (long)(working_area + (shufflearray[i] * step)); } *ptr = (long)ptr_start; free(shufflearray); // check loop for (i = 1, ptr = (long *)*ptr_start; i < ncacheline; i++, ptr = (long *)*ptr) { } if (ptr != ptr_start) { fprintf(stderr, "initialization failed : broken loop\n"); exit(EXIT_FAILURE); } } CLOCK_GETTIME(&stime); CLOCK_GETTIME(&ftime); while (TIMEINTERVAL_SEC(stime, ftime) < option.timeout) { t0 = read_tsc(); ptr = ptr_start; for (i = 0; i < niter; i++){ #include "membench-inner-rand.c" } t1 = read_tsc(); pc->clk += t1 - t0; pc->ops += niter * MEM_INNER_LOOP_RANDOM_NUM_OPS; CLOCK_GETTIME(&ftime); } pc->wallclocktime = TIMEINTERVAL_SEC(stime, ftime); }
int main(int argc, char** argv) { uint32_t vcoreid = vcore_id(); int retval = 0; mcs_barrier_init(&b, max_vcores()); /* begin: stuff userspace needs to do before switching to multi-mode */ vcore_lib_init(); #if 0 /* tell the kernel where and how we want to receive notifications */ struct notif_method *nm; for (int i = 0; i < MAX_NR_NOTIF; i++) { nm = &__procdata.notif_methods[i]; nm->flags |= NOTIF_WANTED | NOTIF_MSG | NOTIF_IPI; nm->vcoreid = i % 2; // vcore0 or 1, keepin' it fresh. } #endif /* Need to save this somewhere that you can find it again when restarting * core0 */ core0_tls = get_tls_desc(0); /* Need to save our floating point state somewhere (like in the * user_thread_tcb so it can be restarted too */ /* end: stuff userspace needs to do before switching to multi-mode */ /* get into multi mode */ retval = vcore_request(1); if (retval) printf("F****d!\n"); printf("Proc %d requesting another vcore\n", getpid()); begin = read_tsc(); retval = vcore_request(1); if (retval) printf("F****d!\n"); while (!core1_up) cpu_relax; end = read_tsc(); printf("Took %llu usec (%llu nsec) to receive 1 core (cold).\n", udiff(begin, end), ndiff(begin, end)); printf("[T]:002:%llu:%llu:1:C.\n", udiff(begin, end), ndiff(begin, end)); core1_up = FALSE; udelay(2000000); printf("Proc %d requesting the vcore again\n", getpid()); begin = read_tsc(); retval = vcore_request(1); if (retval) printf("F****d!\n"); while (!core1_up) cpu_relax(); end = read_tsc(); printf("Took %llu usec (%llu nsec) to receive 1 core (warm).\n", udiff(begin, end), ndiff(begin, end)); printf("[T]:002:%llu:%llu:1:W.\n", udiff(begin, end), ndiff(begin, end)); return 0; }
int time_base(double *av, double *sig) { int i, tol, lcnt, sam_cnt; double cy, av1, sig1; tol = 10; lcnt = sam_cnt = 0; while(!sam_cnt) { av1 = sig1 = 0.0; for(i = 0; i < SAMPLE1; ++i) { cy = (double)read_tsc(); cy = (double)read_tsc() - cy; av1 += cy; sig1 += cy * cy; } av1 /= SAMPLE1; sig1 = sqrt((sig1 - av1 * av1 * SAMPLE1) / SAMPLE1); sig1 = (sig1 < 0.05 * av1 ? 0.05 * av1 : sig1); *av = *sig = 0.0; for(i = 0; i < SAMPLE2; ++i) { cy = (double)read_tsc(); cy = (double)read_tsc() - cy; if(cy > av1 - sig1 && cy < av1 + sig1) { *av += cy; *sig += cy * cy; sam_cnt++; } } if(10 * sam_cnt > 9 * SAMPLE2) { *av /= sam_cnt; *sig = sqrt((*sig - *av * *av * sam_cnt) / sam_cnt); if(*sig > (tol / 100.0) * *av) sam_cnt = 0; } else { if(lcnt++ == 10) { lcnt = 0; tol += 5; if(tol > 30) return 0; } sam_cnt = 0; } } return 1; }
/* Not super accurate, due to overheads of reading tsc and looping */ void ndelay(uint64_t nsec) { uint64_t start, end, now; start = read_tsc(); end = start + (get_tsc_freq() * nsec) / 1000000000; do { cpu_relax(); now = read_tsc(); } while (now < end || (now > start && end < start)); }
void tsc_udelay(uint64_t us) { uint64_t delay = (cpu_freq * (us > 0 ? us : 1)) / 1000000ull; uint64_t stop = read_tsc() + delay; // the tsc counter wraps every 200 years on a 3GH processor // so we do not check for wrap around while (read_tsc() < stop) cpu_relax(); }
int main(void) { int i; uint64_t begin,end; begin = read_tsc(); for(i = 0; i < 1000000; i++) { read_tsc(); } end = read_tsc(); printf("timestamp counter difference %u\n",end - begin); printf("Each read_tsc takes: %f ticks\n",(end-begin)/(double)1000000); }
INIT_CODE void tsc_init(void) { unsigned int ms_delay = 10; uint64_t tsc_start, tsc_diff, diff_max = 0; for (int i = 0; i < 5; ++i) { tsc_start = read_tsc(); pit_udelay(ms_delay * 1000); tsc_diff = read_tsc() - tsc_start; diff_max = tsc_diff > diff_max ? tsc_diff : diff_max; } cpu_freq = diff_max * (1000u / ms_delay); printk(LOG "detected %u MHz processor\n", cpu_freq / 1000000); }
int main(int argc, char **argv) { int i = 0; int iter = 0; char input; tsc_uint64_t buffer1; tsc_uint64_t buffer2; again: printf("\nPlease enter the number of loop iterations: "); scanf("%d", &iter); printf("#iters = %d\n", iter); g_sim_time = 0; g_sim_time_offset = 0; i = 1; while (i <= iter) { read_tsc(&buffer2); printf("%-6s %4d \t\t %-8s %12Ld \t\t %-8s %7Ld \n", "ITER #", i, "Hardware time = ", buffer2.bits64, "tw_sim_time = ", g_sim_time); i++; } printf("\nAgain? (y/n) "); scanf("%s", &input); if(input == 'y') goto again; return 0; }
/* * read binary time info. all numbers are little endian. * ticks and nsec are syncronized. */ static int readbintime(char *buf, int n) { int i; int64_t nsec, ticks; uint8_t *b = (uint8_t *) buf; i = 0; if (fasthz == 0LL) fasthz = system_timing.tsc_freq; #if 0 fastticks((uint64_t *) & fasthz); nsec = todget(&ticks); #endif ticks = read_tsc(); nsec = tsc2nsec(ticks); if (n >= 3 * sizeof(uint64_t)) { int64_t2le(b + 2 * sizeof(uint64_t), fasthz); i += sizeof(uint64_t); } if (n >= 2 * sizeof(uint64_t)) { int64_t2le(b + sizeof(uint64_t), ticks); i += sizeof(uint64_t); } if (n >= 8) { int64_t2le(b, nsec); i += sizeof(int64_t); } return i; }
UINT32 ali_rand(UINT32 MaxVal) { UINT32 dwTick; if (MaxVal == 0) return 0; dwTick = read_tsc(); return (dwTick % MaxVal); }
void udelay(uint64_t usec) { if (system_timing.tsc_freq != 0) { uint64_t start, end, now; start = read_tsc(); end = start + (system_timing.tsc_freq * usec) / 1000000; do { cpu_relax(); now = read_tsc(); } while (now < end || (now > start && end < start)); } else panic("udelay() was called before timer_init(), moron!"); }
int timerfd_settime(int fd, int flags, const struct itimerspec *new_value, struct itimerspec *old_value) { int timerfd, periodfd; int ret; uint64_t period; struct timespec now_timespec = {0}; struct timespec rel_timespec; timerfd = get_sibling_fd(fd, "timer"); if (timerfd < 0) return -1; periodfd = get_sibling_fd(fd, "period"); if (periodfd < 0) { close(timerfd); return -1; } if (old_value) { if (__timerfd_gettime(timerfd, periodfd, old_value)) { ret = -1; goto out; } } if (!new_value->it_value.tv_sec && !new_value->it_value.tv_nsec) { ret = set_timer(timerfd, 0); goto out; } period = timespec2tsc(&new_value->it_interval); ret = set_period(periodfd, period); if (ret < 0) goto out; /* So the caller is asking for timespecs in wall-clock time (depending * on the clock, actually, (TODO)), and the kernel expects TSC ticks * from boot. If !ABSTIME, then it's just relative to now. If it is * ABSTIME, then they are asking in terms of real-world time, which * means ABS - NOW to get the rel time, then convert to tsc ticks. */ if (flags & TFD_TIMER_ABSTIME) { ret = clock_gettime(CLOCK_MONOTONIC, &now_timespec); if (ret < 0) goto out; subtract_timespecs(&rel_timespec, &new_value->it_value, &now_timespec); } else { rel_timespec = new_value->it_value; } ret = set_timer(timerfd, timespec2tsc(&rel_timespec) + read_tsc()); /* fall-through */ out: close(timerfd); close(periodfd); return ret; }
void util_timer_end(util_timingdata_t *timingdata) { unsigned long h, l, d = 0, binsize; int bin; read_tsc(&h, &l); if (!timingdata->starttimes[HIGHCOUNT]) { panic(__FILE__, "timer stopped but not started", NO_NUM); return; } if (timingdata->starttimes[HIGHCOUNT] == h) { d = (l - timingdata->starttimes[LOWCOUNT]); } else if (timingdata->starttimes[HIGHCOUNT] == h-1 && timingdata->starttimes[LOWCOUNT] > l) { d = ((ULONG_MAX - timingdata->starttimes[LOWCOUNT]) + l); } else { timingdata->misses++; return; } timingdata->starttimes[HIGHCOUNT] = 0; if (!timingdata->lock_timings_range[START] || d < timingdata->lock_timings_range[START] || d > timingdata->lock_timings_range[END]) { int t; if (!timingdata->lock_timings_range[START] || d < timingdata->lock_timings_range[START]) timingdata->lock_timings_range[START] = d; if (!timingdata->lock_timings_range[END] || d > timingdata->lock_timings_range[END]) timingdata->lock_timings_range[END] = d; for(t = 0; t < TIMING_POINTS; t++) timingdata->lock_timings[t] = 0; timingdata->binsize = (timingdata->lock_timings_range[END] - timingdata->lock_timings_range[START])/(TIMING_POINTS+1); if (timingdata->binsize < 1) timingdata->binsize = 1; timingdata->resets++; } bin = (d-timingdata->lock_timings_range[START]) / timingdata->binsize; if (bin < 0 || bin >= TIMING_POINTS) { /* not serious, but can't happen, so shouldn't */ panic(__FILE__, "bin out of range", bin); } else { timingdata->lock_timings[bin]++; timingdata->measurements++; } return; }
void reset_cpu_state_ticks(int coreid) { struct per_cpu_info *pcpui = &per_cpu_info[coreid]; uint64_t now_ticks; if (coreid >= num_cores) return; /* need to update last_tick_cnt, so the current value doesn't get added in * next time we update */ now_ticks = read_tsc(); for (int i = 0; i < NR_CPU_STATES; i++) { pcpui->state_ticks[i] = 0; pcpui->last_tick_cnt = now_ticks; } }
/* it's actually okay to set the state to the existing state. originally, it * was a bug in the state tracking, but it is possible, at least on x86, to have * a halted core (state IDLE) get woken up by an IRQ that does not trigger the * IRQ handling state. for example, there is the I_POKE_CORE ipi. smp_idle * will just sleep again, and reset the state from IDLE to IDLE. */ void __set_cpu_state(struct per_cpu_info *pcpui, int state) { uint64_t now_ticks; assert(!irq_is_enabled()); /* TODO: could put in an option to enable/disable state tracking. */ now_ticks = read_tsc(); pcpui->state_ticks[pcpui->cpu_state] += now_ticks - pcpui->last_tick_cnt; /* TODO: if the state was user, we could account for the vcore's time, * similar to the total_ticks in struct vcore. the difference is that the * total_ticks tracks the vcore's virtual time, while this tracks user time. * something like vcore->user_ticks. */ pcpui->cpu_state = state; pcpui->last_tick_cnt = now_ticks; }
static irqreturn_t timer_proc (int irq, void *dev_id, struct pt_regs *regs) { struct rt_tmbench_context *ctx = (struct rt_tmbench_context *) dev_id; volatile unsigned long long tsc; read_tsc (tsc); eval_inner_loop (ctx, (long) (tsc - ctx->date)); tb_timer_stop (); read_tsc (ctx->start_time); tb_timer_start ((long) (ctx->date - ctx->start_time)); if (++ctx->curr.test_loops < ctx->samples_per_sec) return IRQ_HANDLED; ctx->curr.test_loops = 0; eval_outer_loop (ctx); return IRQ_HANDLED; }
// timer init calibrates both tsc timer and lapic timer using PIT void timer_init(void){ uint64_t tscval[2]; long timercount[2]; pit_set_timer(0xffff, TIMER_RATEGEN); // assume tsc exist tscval[0] = read_tsc(); udelay_pit(1000000); tscval[1] = read_tsc(); system_timing.tsc_freq = SINIT(tscval[1] - tscval[0]); cprintf("TSC Frequency: %llu\n", system_timing.tsc_freq); __lapic_set_timer(0xffffffff, LAPIC_TIMER_DEFAULT_VECTOR, FALSE, LAPIC_TIMER_DEFAULT_DIVISOR); // Mask the LAPIC Timer, so we never receive this interrupt (minor race) mask_lapic_lvt(LAPIC_LVT_TIMER); timercount[0] = read_mmreg32(LAPIC_TIMER_CURRENT); udelay_pit(1000000); timercount[1] = read_mmreg32(LAPIC_TIMER_CURRENT); system_timing.bus_freq = SINIT((timercount[0] - timercount[1])*128); cprintf("Bus Frequency: %llu\n", system_timing.bus_freq); }
// forces cpu to relax for usec miliseconds. declared in kern/include/timing.h void udelay(uint64_t usec) { #if !defined(__BOCHS__) if (system_timing.tsc_freq != 0) { uint64_t start, end, now; start = read_tsc(); end = start + (system_timing.tsc_freq * usec) / 1000000; //cprintf("start %llu, end %llu\n", start, end); if (end == 0) cprintf("This is terribly wrong \n"); do { cpu_relax(); now = read_tsc(); //cprintf("now %llu\n", now); } while (now < end || (now > start && end < start)); return; } else #endif { udelay_pit(usec); } }
int CCommonFnc::Sample_GenerateSampleUniqueID(__int64* id) { __int64 a,b; srand((DWORD) read_tsc()); //srand(time(NULL)); a = 0; for(int i=0;i<8;i++){ b = rand()%256; a = (a<<8)|b; } if (a<0) { *id = ~a; } else { *id = a; } return STAT_OK; }
void util_timer_start(util_timingdata_t *timingdata, char *name) { size_t i; if(timingdata->names[0] == '\0') { for(i = 0; i < sizeof(timingdata->names) && *name; i++) timingdata->names[i] = *name++; timingdata->names[sizeof(timingdata->names)-1] = '\0'; } if (timingdata->starttimes[HIGHCOUNT]) { panic("restart timer?"); return; } read_tsc((u32_t *) &timingdata->starttimes[HIGHCOUNT], (u32_t *) &timingdata->starttimes[LOWCOUNT]); }
void netlog(struct Fs *f, int mask, char *fmt, ...) { char buf[256], *t, *fp; int i, n; va_list arg; struct timespec ts_now; if (!(f->alog->logmask & mask)) return; if (f->alog->opens == 0) return; /* Same style as trace_printk */ if (likely(__proc_global_info.tsc_freq)) ts_now = tsc2timespec(read_tsc()); n = snprintf(buf, sizeof(buf), "[%lu.%09lu]: ", ts_now.tv_sec, ts_now.tv_nsec); va_start(arg, fmt); n += vsnprintf(buf + n, sizeof(buf) - n, fmt, arg); va_end(arg); spin_lock(&f->alog->lock); i = f->alog->len + n - Nlog; if (i > 0) { f->alog->len -= i; f->alog->rptr += i; if (f->alog->rptr >= f->alog->end) f->alog->rptr = f->alog->buf + (f->alog->rptr - f->alog->end); } t = f->alog->rptr + f->alog->len; fp = buf; f->alog->len += n; while (n-- > 0) { if (t >= f->alog->end) t = f->alog->buf + (t - f->alog->end); *t++ = *fp++; } spin_unlock(&f->alog->lock); rendez_wakeup(&f->alog->r); }
void util_timer_start(util_timingdata_t *timingdata, char *name) { unsigned long h, l; int i; if(timingdata->names[0] == '\0') { for(i = 0; i < sizeof(timingdata->names) && *name; i++) timingdata->names[i] = *name++; timingdata->names[sizeof(timingdata->names)-1] = '\0'; } if (timingdata->starttimes[HIGHCOUNT]) { panic(__FILE__, "restart timer?", NO_NUM); return; } read_tsc(&timingdata->starttimes[HIGHCOUNT], &timingdata->starttimes[LOWCOUNT]); }
/* Arch-independent per-cpu initialization. This will call the arch dependent * init first. */ void smp_percpu_init(void) { uint32_t coreid = core_id(); struct per_cpu_info *pcpui = &per_cpu_info[coreid]; void *trace_buf; struct kthread *kthread; /* Don't initialize __ctx_depth here, since it is already 1 (at least on * x86), since this runs in irq context. */ /* Do this first */ __arch_pcpu_init(coreid); /* init our kthread (tracks our currently running context) */ kthread = __kthread_zalloc(); kthread->stacktop = get_stack_top(); /* assumes we're on the 1st page */ pcpui->cur_kthread = kthread; /* Treat the startup threads as ktasks. This will last until smp_idle when * they clear it, either in anticipation of being a user-backing kthread or * to handle an RKM. */ kthread->flags = KTH_KTASK_FLAGS; per_cpu_info[coreid].spare = 0; /* Init relevant lists */ spinlock_init_irqsave(&per_cpu_info[coreid].immed_amsg_lock); STAILQ_INIT(&per_cpu_info[coreid].immed_amsgs); spinlock_init_irqsave(&per_cpu_info[coreid].routine_amsg_lock); STAILQ_INIT(&per_cpu_info[coreid].routine_amsgs); /* Initialize the per-core timer chain */ init_timer_chain(&per_cpu_info[coreid].tchain, set_pcpu_alarm_interrupt); #ifdef CONFIG_KTHREAD_POISON *kstack_bottom_addr(kthread->stacktop) = 0xdeadbeef; #endif /* CONFIG_KTHREAD_POISON */ /* Init generic tracing ring */ trace_buf = kpage_alloc_addr(); assert(trace_buf); trace_ring_init(&pcpui->traces, trace_buf, PGSIZE, sizeof(struct pcpu_trace_event)); for (int i = 0; i < NR_CPU_STATES; i++) pcpui->state_ticks[i] = 0; pcpui->last_tick_cnt = read_tsc(); /* Core 0 is in the KERNEL state, called from smp_boot. The other cores are * too, at least on x86, where we were called from asm (woken by POKE). */ pcpui->cpu_state = CPU_STATE_KERNEL; /* Enable full lock debugging, after all pcpui work is done */ pcpui->__lock_checking_enabled = 1; }
/* * The time between reseed must be at least reseedInterval * microseconds. */ static int enough_time_passed(FState *st) { int ok; int32_t now; int32_t last = st->lastReseedTime; now = tsc2sec(read_tsc()); /* check how much time has passed */ ok = 0; if (now - last >= reseedInterval) ok = 1; /* reseed will happen, update lastReseedTime */ if (ok) st->lastReseedTime = now; return ok; }
/* * like the old #c/time but with added info. Return * * secs nanosecs fastticks fasthz */ static int readtime(uint32_t off, char *buf, int n) { int64_t nsec, ticks; long sec; char str[7 * NUMSIZE]; if (fasthz == 0LL) fasthz = system_timing.tsc_freq; #if 0 fastticks((uint64_t *) & fasthz); nsec = todget(&ticks); #endif ticks = read_tsc(); nsec = tsc2nsec(ticks); sec = nsec / 1000000000ULL; snprintf(str, sizeof(str), "%*lud %*llud %*llud %*llud ", NUMSIZE - 1, sec, VLNUMSIZE - 1, nsec, VLNUMSIZE - 1, ticks, VLNUMSIZE - 1, fasthz); return consreadstr(off, buf, n, str); }
static int __timerfd_gettime(int timerfd, int periodfd, struct itimerspec *curr_value) { char buf[20]; uint64_t timer_tsc, now_tsc, period_tsc; if (read(periodfd, buf, sizeof(buf) <= 0)) return -1; period_tsc = strtoul(buf, 0, 0); tsc2timespec(period_tsc, &curr_value->it_interval); if (read(timerfd, buf, sizeof(buf) <= 0)) return -1; timer_tsc = strtoul(buf, 0, 0); /* If 0 (disabled), we'll return 0 for 'it_value'. o/w we need to * return the relative time. */ if (timer_tsc) { now_tsc = read_tsc(); if (timer_tsc > now_tsc) { timer_tsc -= now_tsc; } else { /* it's possible that timer_tsc is in the past, and that * we lost the race. The alarm fired since we looked at * it, and it might be disabled. It might have fired * multiple times too. */ if (!period_tsc) { /* if there was no period and the alarm fired, * then it should be disabled. This is racy, if * there are other people setting the timer. */ timer_tsc = 0; } else { while (timer_tsc < now_tsc) timer_tsc += period_tsc; } } } tsc2timespec(timer_tsc, &curr_value->it_value); return 0; }
uint64_t epoch_tsc(void) { return read_tsc() + sec2tsc(boot_sec); }