/* test the "responsiveness" of ics-110bl acquire enable/disable bit: * * i.e. will the ADC acquire frames "instantaneously" once the enable bit is set ? * will the ADC stop acquiring "instantaneously" once the enable bit is cleared ??? * * ANSWER: yes... to both... * */ void StartAdcAcquireTest(void) { double trueSampleRate = 0.0; extern double tscTicksPerSecond; uint64_t now, then, start; int i; syslog(LOG_INFO, "StartAdcAcquireTest() beginning...\n"); now=then=start=0; InitializeVmeCrate(); trueSampleRate = InitializeAdcModules(10.0/*kHz*/, 32/*channelsPerFrame*/); syslog(LOG_INFO, "trueSampleRate=%.9g\n", trueSampleRate); /* need to start test on the edge of an RTEMS tick... */ rtems_task_wake_after(1); /*we're only going to play with one ADC...*/ ICS110BStartAcquisition(AdcModules[0]); rdtscll(start); /* after 1 tick (==1 ms), we "should" have 10 frames in the adc's FIFO */ rtems_task_wake_after(1); ICS110BStopAcquisition(AdcModules[0]); rdtscll(now); syslog(LOG_INFO, "adcAcquireTest: slept for %.9f [s]\n", ((double)(now-start))/tscTicksPerSecond); for(i=0; !ICS110BIsEmpty(AdcModules[0]); i++) { uint32_t data = readD32(AdcModules[0], ICS110B_FIFO_OFFSET); } syslog(LOG_INFO, "adcAcquireTest: read %d frames, %d channels\n", i/32, i); ShutdownVmeCrates(); }
void test_perf(void) { lwt_t chld1, chld2; int i; unsigned long long start, end; /* Performance tests */ rdtscll(start); for (i = 0 ; i < ITER ; i++) { lwt_chan_t c = lwt_chan(0); chld1 = lwt_create(fn_null, NULL, 0, c); lwt_join(chld1); } rdtscll(end); printf("[PERF] %lld <- fork/join\n", (end-start)/ITER); IS_RESET(); lwt_chan_t c1 = lwt_chan(0); chld1 = lwt_create(fn_bounce, (void*)1, 0, c1); lwt_chan_t c2 = lwt_chan(0); chld2 = lwt_create(fn_bounce, NULL, 0, c2); lwt_join(chld1); lwt_join(chld2); IS_RESET(); }
void *meas_thd(void *d) { unsigned long long s, e; unsigned long iter = 0; while (mcsync.r != -1) { mcsync.r = 0; while (mcsync.r == 0) ; if (mcsync.r == -1) break; rdtscll(s); /* let the other thread go first to avoid races */ while (bounce == MEAS_ITER) ; while (bounce > 0) { unsigned long t; t = bounce-1; bounce = t; /* write to cache line */ if (bounce == 0) break; while (bounce == t) ; } bounce = 0; rdtscll(e); // assert(s < e); mcsync.cost = (e-s)/MEAS_ITER; } printf("running consumer\n"); fflush(stdout); // rb_test_c(); system("PCM/Intel/./main"); return NULL; }
int v3_handle_svm_halt(struct guest_info * info) { if (info->cpl!=0) { v3_raise_exception(info, GPF_EXCEPTION); } else { // What we should do is starting waiting on an OS event that will // result in an injection of an interrupt. // What we will hackishly do instead is resume on any event // Plus is this totally GeekOS specific ullong_t yield_start = 0; ullong_t yield_stop = 0; uint32_t gap = 0; PrintDebug("GeekOS Yield\n"); rdtscll(yield_start); V3_Yield(); rdtscll(yield_stop); //v3_update_time(info, yield_stop - yield_start); gap = yield_stop - yield_start; v3_raise_irq(info, 0); PrintDebug("GeekOS Yield Done (%d cycles)\n", gap); info->rip+=1; } return 0; }
int test_speed_create_join() { int i=0; unsigned long long start, end; lwt_t tid1, tid2, tid3; tid1 = lwt_create(fn, NULL, 0, 0); lwt_join(tid1); IS_RESET(); rdtscll(start); for(i=0 ; i < ITER; i++) { tid1 = lwt_create(fn, NULL, 0, 0); lwt_join(tid1); } rdtscll(end); IS_RESET(); printf("performance of fork/join: --> %lld\n", (end-start)/ITER); for(i=0 ; i < ITER; i++) { tid1 = lwt_create(fn, NULL, 0, 0); tid2 = lwt_create(fn, NULL, 0, 0); tid3 = lwt_create(fn, NULL, 0, 0); lwt_join(tid3); lwt_join(tid1); lwt_join(tid2); } IS_RESET(); return 0; }
void producer(void) { int i; unsigned long long start, end, sum = 0, avg, dev = 0; for (i = 0 ; i < RB_ITER ; i++) { rdtscll(start); __p(); rdtscll(end); meas[i] = end - start; sum += meas[i]; } avg = sum/RB_ITER; for (i = 0 ; i < RB_ITER ; i++) { unsigned long long diff = (meas[i] > avg) ? meas[i] - avg : avg - meas[i]; dev += (diff*diff); } dev /= RB_ITER; printf("round trip deviation^2 = %llu\n", dev); printf("RPC pipe: Producer: %llu\n", avg); }
/* * fh_cpu_rdspeed * * Compute the CPU speed (in MHz) by looking at the elapsed time in microseconds * that it takes to get through 500M CPU cycles. */ uint32_t fh_cpu_rdspeed() { uint64_t tsc_start = 0; uint64_t tsc_stop = 0; uint64_t tsc_total = 0; uint64_t ts2, ts1; uint32_t cpu_speed; fh_time_get(&ts1); rdtscll(tsc_start); while (1) { barrier(); rdtscll(tsc_stop); if (likely(tsc_stop > tsc_start)) { tsc_total += tsc_stop - tsc_start; } else { tsc_total += (uint64_t)~0 - tsc_start + tsc_stop; } if (tsc_total > (uint64_t)500000000) { break; } tsc_start = tsc_stop; } fh_time_get(&ts2); cpu_speed = (uint32_t) (tsc_total / (ts2-ts1)); return cpu_speed; }
void timer_monotonic_get(struct mono_time *mt) { uint64_t current_tick; uint64_t ticks_elapsed; if (!mono_counter.initialized) { init_timer(); mono_counter.last_value = rdtscll(); mono_counter.initialized = 1; } current_tick = rdtscll(); ticks_elapsed = current_tick - mono_counter.last_value; /* Update current time and tick values only if a full tick occurred. */ if (ticks_elapsed >= clocks_per_usec) { uint64_t usecs_elapsed; usecs_elapsed = ticks_elapsed / clocks_per_usec; mono_time_add_usecs(&mono_counter.time, (long)usecs_elapsed); mono_counter.last_value = current_tick; } /* Save result. */ *mt = mono_counter.time; }
void call_cs(void) { static int first = 0; static int high, low; u64_t start = 0, end = 0; if(first == 1){ low = cos_get_thd_id(); sched_wakeup(cos_spd_id(), high); } if(first == 0){ first = 1; high = cos_get_thd_id(); sched_block(cos_spd_id(), 0); rdtscll(start); sched_block(cos_spd_id(), low); } if (cos_get_thd_id() == low) { sched_wakeup(cos_spd_id(), high); } if (cos_get_thd_id() == high) { rdtscll(end); printc("context switch cost: %llu cycs\n", (end-start) >> 1); first = 0; }
static void test_aes_perf(void) { #if 0 /* this did not seem to work with new compiler?! */ #ifdef __i386__ #define rdtscll(val) \ __asm__ __volatile__("rdtsc" : "=A" (val)) const int num_iters = 10; int i; unsigned int start, end; u8 key[16], pt[16], ct[16]; void *ctx; printf("keySetupEnc:"); for (i = 0; i < num_iters; i++) { rdtscll(start); ctx = aes_encrypt_init(key, 16); rdtscll(end); aes_encrypt_deinit(ctx); printf(" %d", end - start); } printf("\n"); printf("Encrypt:"); ctx = aes_encrypt_init(key, 16); for (i = 0; i < num_iters; i++) { rdtscll(start); aes_encrypt(ctx, pt, ct); rdtscll(end); printf(" %d", end - start); } aes_encrypt_deinit(ctx); printf("\n"); #endif /* __i386__ */ #endif }
void test_perf_channels(int chsz) { lwt_chan_t from, to; lwt_t t; int i; unsigned long long start, end; //assert(LWT_RUNNING == lwt_current()->state); from = lwt_chan(chsz); assert(from); t = lwt_create_chan(fn_chan, from, 0); to = lwt_rcv_chan(from); assert(to->snd_cnt); rdtscll(start); for (i = 0 ; i < ITER ; i++) { assert(1 == (int)lwt_rcv(from)); lwt_snd(to, (void*)2); } lwt_chan_deref(to); rdtscll(end); printf("[PERF] %5lld <- snd+rcv (buffer size %d)\n", (end-start)/(ITER*2), chsz); lwt_join(t); }
static unsigned long measure_loop_costs(unsigned long spin) { u64_t start, end, min, max; unsigned long temp = 0; temp = spin * 15 / 2; loop_cost = temp; return temp; rdtscll(start); do { int i; min = MAXULONG; max= 0; for (i = 0 ; i < 10 ; i++) { temp = get_loop_cost(spin); if (temp < min) min = temp; if (temp > max) max = temp; } } while ((max-min) > (min/128)); loop_cost = temp; rdtscll(end); assert(end>start); printc("spin:%lu, loopcost measurement :%lu\n",spin, temp ); return temp; }
void timer_monotonic_get(struct mono_time *mt) { uint64_t current_tick; uint64_t ticks_elapsed; unsigned long ticks_per_usec; struct monotonic_counter *mono_counter; mono_counter = get_monotonic_context(); if (!mono_counter->initialized) { init_timer(); mono_counter->last_value = rdtscll(); mono_counter->initialized = 1; } current_tick = rdtscll(); ticks_elapsed = current_tick - mono_counter->last_value; ticks_per_usec = get_clocks_per_usec(); /* Update current time and tick values only if a full tick occurred. */ if (ticks_elapsed >= ticks_per_usec) { uint64_t usecs_elapsed; usecs_elapsed = ticks_elapsed / ticks_per_usec; mono_time_add_usecs(&mono_counter->time, (long)usecs_elapsed); mono_counter->last_value = current_tick; } /* Save result. */ *mt = mono_counter->time; }
void consumer(void) { int i; unsigned long long start, end, c_sum = 0, avg, dev = 0; rdtscll(start); for (i = 0 ; i < RB_ITER ; i++) { c_meas[i] = __c(); c_sum += c_meas[i]; } rdtscll(end); avg = sum/RB_ITER; for (i = 0 ; i < RB_ITER ; i++) { unsigned long long diff = (c_meas[i] > avg) ? c_meas[i] - avg : avg - c_meas[i]; dev += (diff*diff); // printf("%llu, diff %llu\n", c_meas[i], diff); } dev /= RB_ITER; printf("one way trip deviation^2 = %llu\n", dev); printf("one way: %d\n", sum / RB_ITER); // printf("RPC pipe: Consumer: %lld\n", avg); }
void call(void) { static int first = 0; static int low = 0, high = 0; if (first == 0 ) { high = cos_get_thd_id(); low = high + 1; first = 1; } u64_t start = 0, end = 0; int j = 0; while(j++ < 10) { if (cos_get_thd_id() == high) { /* printc("p3\n"); */ sched_block(cos_spd_id(), 0); /* printc("p4\n"); */ rdtscll(start); } /* printc(" thd %d is calling lower \n", cos_get_thd_id()); */ call_lower(low, high); if (cos_get_thd_id() == high) { rdtscll(end); printc("cost of cached stkPIP %llu cycs\n", end-start); } } return; }
void perf_test() { int i,j; struct foo *fp[AMT]; struct kmem_cache *foo_cache =kmem_cache_create("foo",sizeof(struct foo), 0, NULL, NULL); //warm up loop for(i = 0; i < AMT; i++){ fp[i] = kmem_cache_alloc(foo_cache, KM_SLEEP); fp[i] -> i[ 7 ] = 'a'; } for(i = 0; i < AMT; i++) kmem_cache_free(foo_cache,fp[i]); rdtscll(start); for(j = 0; j < LOOP; j++){ for(i = 0; i < AMT; i++){ fp[i] = kmem_cache_alloc(foo_cache, KM_SLEEP); fp[i] -> i[ 6 ] = 'a'; } for(i = 0; i < AMT; i++) kmem_cache_free(foo_cache,fp[i]); } rdtscll(end); kmem_cache_destroy(foo_cache); printf("[PERF] kmem_slab %lld \n", (end-start)/LOOP/AMT); rdtscll(start); for(j = 0; j < LOOP; j++){ for(i = 0; i < AMT; i++){ fp[i] = malloc(sizeof(struct foo)); fp[i] -> i[ i% 7 ] = i; } for(i = 0; i < AMT; i++) free(fp[i]); } rdtscll(end); printf("[PERF] malloc %lld \n", (end-start)/LOOP/AMT); }
CSTUB_FN(unsigned long, lock_component_alloc) (struct usr_inv_cap *uc, spdid_t spdid) { long fault; unsigned long ret; struct rec_data_lk *rd = NULL; unsigned long ser_lkid, cli_lkid; if (first == 0) { cos_map_init_static(&uniq_lkids); first = 1; } #ifdef BENCHMARK_MEAS_CREATION_TIME rdtscll(meas_start); #endif redo: #ifdef BENCHMARK_MEAS_ALLOC rdtscll(meas_end); if (test_flag) { test_flag = 0; printc("recovery a lock cost: %llu\n", meas_end - meas_start); } #endif CSTUB_INVOKE(ret, fault, uc, 1, spdid); if (unlikely (fault)){ #ifdef BENCHMARK_MEAS_ALLOC test_flag = 1; rdtscll(meas_start); #endif CSTUB_FAULT_UPDATE(); goto redo; } assert(ret > 0); cli_lkid = rdlk_alloc(); assert(cli_lkid >= 1); rd = rdlk_lookup(cli_lkid); assert(rd); rd_cons(rd, cos_spd_id(), cli_lkid, ret, LOCK_ALLOC); ret = cli_lkid; #ifdef BENCHMARK_MEAS_CREATION_TIME rdtscll(meas_end); printc("creating a lock costs %llu\n", meas_end - meas_start); #endif return ret; }
int main(int argc, char **argv) { su3_matrix a, b, c, d; int i,j,iter=1; struct sched_param param={sched_priority:20}; volatile unsigned long long timeA, timeMILC, timeSSE; unsigned int seed=1; int randomfd; if ((randomfd=open("/dev/urandom", O_RDONLY)) < 0) perror("Attempt to open /dev/urandom"); if (read(randomfd, &seed, sizeof(seed)) < sizeof(seed)) perror("Attempt to read /dev/urandom"); close(randomfd); srand(seed); if (sched_setscheduler(0, SCHED_FIFO, ¶m) < 0) perror("Attempt to put in real time queue"); if (argc > 1) sscanf(argv[1],"%d",&iter); for (i=0; i<3; i++) { for (j=0; j<3; j++) { a.e[i][j].real = (Real)(rand() - RAND_MAX/2)/(Real)(RAND_MAX/2)*2.0; a.e[i][j].imag = (Real)(rand() - RAND_MAX/2)/(Real)(RAND_MAX/2)*2.0; b.e[i][j].real = (Real)(rand() - RAND_MAX/2)/(Real)(RAND_MAX/2)*2.0; b.e[i][j].imag = (Real)(rand() - RAND_MAX/2)/(Real)(RAND_MAX/2)*2.0; } } rdtscll(timeA); for (i=0; i<iter; i++) { mult_su3_na(&a, &b, &c); } rdtscll(timeMILC); timeMILC -= timeA; rdtscll(timeA); for (i=0; i<iter; i++) { _inline_sse_mult_su3_na(&a, &b, &d); } rdtscll(timeSSE); timeSSE -= timeA; for (i=0; i<3; i++) { printf("%4.1f%+4.1fi %4.1f%+4.1fi %4.1f%+4.1fi | %4.1f%+4.1fi %4.1f%+4.1fi %4.1f%+4.1fi | %4.1f%+4.1fi %4.1f%+4.1fi %4.1f%+4.1fi | %4.1f%+4.1fi %4.1f%+4.1fi %4.1f%+4.1fi\n", a.e[i][0].real, a.e[i][0].imag, a.e[i][1].real, a.e[i][1].imag, a.e[i][2].real, a.e[i][2].imag, b.e[i][0].real, b.e[i][0].imag, b.e[i][1].real, b.e[i][1].imag, b.e[i][2].real, b.e[i][2].imag, c.e[i][0].real, c.e[i][0].imag, c.e[i][1].real, c.e[i][1].imag, c.e[i][2].real, c.e[i][2].imag, d.e[i][0].real, d.e[i][0].imag, d.e[i][1].real, d.e[i][1].imag, d.e[i][2].real, d.e[i][2].imag); } printf("Time per iteration:\n MILC: %Lu\n SSE: %Lu\n", timeMILC/(unsigned long long)iter, timeSSE/(unsigned long long)iter); exit(0); }
CSTUB_FN(int, lock_component_take) (struct usr_inv_cap *uc, spdid_t spdid, unsigned long lock_id, unsigned short int thd) { long fault = 0; int ret; struct rec_data_lk *rd = NULL; redo: rd = rd_update(lock_id, LOCK_TAKE); assert(rd); #ifdef BENCHMARK_MEAS_TAKE rdtscll(meas_end); /* printc("now take again(thd %d, end %llu)!!!!\n", cos_get_thd_id(), meas_end); */ if (test_flag) { test_flag = 0; printc("recovery a lock cost: %llu\n", meas_end - meas_start); } #endif CSTUB_INVOKE(ret, fault, uc, 3, spdid, rd->s_lkid, thd); if (unlikely (fault)){ /* printc("cli:thd %d see a fault in lock_component_take!\n", cos_get_thd_id()); */ lock_component_take_ubenchmark_flag = 1; rdtscll(ubenchmark_start); #ifdef BENCHMARK_MEAS_TAKE test_flag = 1; rdtscll(meas_start); /* printc("a fault(thd %d start %llu)!!!!\n", cos_get_thd_id(), meas_start); */ #endif CSTUB_FAULT_UPDATE(); goto redo; /* rd = rd_update(lock_id, LOCK_TAKE); */ /* ret = 0; */ /* rdtscll(ubenchmark_end); */ /* if (lock_component_take_ubenchmark_flag) { */ /* lock_component_take_ubenchmark_flag = 0; */ /* printc */ /* ("lock_component_take(C3):recover per object end-end cost: %llu\n", */ /* ubenchmark_end - ubenchmark_start); */ /* } */ } if (ret == -EINVAL) { /* printc("cli:thd %d lock_component_take return EINVAL\n", cos_get_thd_id()); */ rd_recover_state(rd); goto redo; } /* printc("cli:thd %d lock_component_take return %d\n", cos_get_thd_id(), ret); */ return ret; }
static unsigned long get_loop_cost(unsigned long loop_num) { u64_t start,end; rdtscll(start); do_loop(loop_num); rdtscll(end); return end-start; }
static inline void tracking_start(struct cbuf_tracking *t, cbuf_debug_t index) { #if defined(DEBUG) rdtscll(tsc_start[index]); #endif if (t && index == CBUF_COLLECT) rdtscll(t->gc_start); return ; }
static void __init calibrate_APIC_clock(void) { unsigned apic, apic_start; unsigned long tsc, tsc_start; int result; local_irq_disable(); /* * Put whatever arbitrary (but long enough) timeout * value into the APIC clock, we just want to get the * counter running for calibration. * * No interrupt enable ! */ __setup_APIC_LVTT(250000000, 0, 0); apic_start = apic_read(APIC_TMCCT); #ifdef CONFIG_X86_PM_TIMER if (apic_calibrate_pmtmr && pmtmr_ioport) { pmtimer_wait(5000); /* 5ms wait */ apic = apic_read(APIC_TMCCT); result = (apic_start - apic) * 1000L / 5; } else #endif { rdtscll(tsc_start); do { apic = apic_read(APIC_TMCCT); rdtscll(tsc); } while ((tsc - tsc_start) < TICK_COUNT && (apic_start - apic) < TICK_COUNT); result = (apic_start - apic) * 1000L * tsc_khz / (tsc - tsc_start); } local_irq_enable(); printk(KERN_DEBUG "APIC timer calibration result %d\n", result); printk(KERN_INFO "Detected %d.%03d MHz APIC timer.\n", result / 1000 / 1000, result / 1000 % 1000); /* Calculate the scaled math multiplication factor */ lapic_clockevent.mult = div_sc(result, NSEC_PER_SEC, 32); lapic_clockevent.max_delta_ns = clockevent_delta2ns(0x7FFFFF, &lapic_clockevent); lapic_clockevent.min_delta_ns = clockevent_delta2ns(0xF, &lapic_clockevent); calibration_result = result / HZ; }
unsigned long test_ipi_send_time(int cpu) { unsigned long tsc_init, tsc_final; rdtscll(tsc_init); apic->send_IPI_mask(cpumask_of(cpu), POPCORN_IPI_LATENCY_VECTOR); rdtscll(tsc_final); return tsc_final - tsc_init; }
unsigned long get_loop_cost(unsigned long loop_num) { u64_t start,end; unsigned long int i; kkk = 0; rdtscll(start); for (i=0;i<loop_num;i++) kkk++; /* Make sure that -O3 is on to get the best result */ rdtscll(end); return (end-start)/loop_num; /* avg is 6 per loop */ }
void run(const char *desc) { unsigned int i; long long then, now; // total flush test: verb = gverb_new(48000, 300.0f, 50.0f, 7.0f, 0.5f, 15.0f, 0.5f, 0.5f, 0.5f); rdtscll(then); for (i=0; i<SIZE; i++) { //printf("%f\n", in[i]); gverb_do(verb, in[i], out[0]+i, out[1]+i); } rdtscll(now); printf("%s took %lld cycles/sample\n", desc, (now - then) / (long long)SIZE); gverb_flush(verb); }
/* calculate the tsc period */ unsigned long long tsc_period_ps(void) { struct timeval tv_start; struct timeval tv_end; unsigned long long tsc_start, tsc_end; rdtscll(tsc_start); gettimeofday(&tv_start, NULL); sleep(1); rdtscll(tsc_end); gettimeofday(&tv_end, NULL); return (1000 * tv_minus(&tv_start, &tv_end)) / tsc_minus(tsc_start, tsc_end); }
static inline duration_t read_tsc_nsec (void) { hwtime_t tsc_time, s; rdtscll (tsc_time); s = tsc_time / cpu_hz; return s * NSECS_PER_SEC + ((tsc_time - s * cpu_hz) * NSECS_PER_SEC) / cpu_hz; }
static int interrupt_wait(void) { int ret; unsigned long long t; assert(wildcard_brand_id > 0); // printc("sleeping...\n"); if (-1 == (ret = cos_brand_wait(wildcard_brand_id))) BUG(); rdtscll(t); // printc("up\n"); if (t_0 != old_t_0) { old_t_0 = t_0; // printc("t_0 %llu, t %llu, cost %llu\n", t_0, t, t - t_0); meas[idx++] = t - t_0; if (idx == ITER) { meas_proc(); idx = 0; }; } else { printc("jitter...\n"); } #ifdef UPCALL_TIMING last_upcall_cyc = (u32_t)ret; #endif return 0; }
void cos_init(void *arg) { static volatile int first = 1, second = 1; if (first) { first = 0; union sched_param sp; sp.c.type = SCHEDP_PRIO; sp.c.value = 10; if (sched_create_thd(cos_spd_id(), sp.v, 0, 0) == 0) BUG(); return; } else if (second) { // high prio thd union sched_param sp; second = 0; init(); sp.c.type = SCHEDP_PRIO; sp.c.value = 20; if (sched_create_thd(cos_spd_id(), sp.v, 0, 0) == 0) BUG(); event_wait(); } else { // low prio thd. keep writing tsc while (1) rdtscll(t_0); } }
void do_timer_tsc_timekeeping(struct pt_regs *regs) { cycles_t tsc, tsc_not_accounted, tsc_accounted, tsc_wd; rdtscll(tsc); tsc_accounted = last_tsc_accounted; if (unlikely(tsc < tsc_accounted)) return; tsc_not_accounted = tsc - tsc_accounted; if (tsc_not_accounted > cycles_accounted_limit) { /* Be extra safe and limit the loop below. */ tsc_accounted = tsc_not_accounted - cycles_accounted_limit; tsc_not_accounted = cycles_accounted_limit; } tsc_wd = 0; while (tsc_not_accounted >= cycles_per_tick) { do_timer_jiffy(regs); if (tsc_wd > cycles_per_tick) { touch_all_softlockup_watchdogs(); tsc_wd = 0; } tsc_not_accounted -= cycles_per_tick; tsc_accounted += cycles_per_tick; tsc_wd += cycles_per_tick; } last_tsc_accounted = tsc_accounted; }