int benchmarkCpu(const command_data_t &cmd_data) { // Use volatile so that the loop is not optimized away by the compiler. volatile int cpu_foo; uint64_t time_ns; int iters = cmd_data.args[1]; bool print_each_iter = cmd_data.print_each_iter; bool print_average = cmd_data.print_average; double avg, running_avg = 0.0, square_avg = 0.0; double max = 0.0, min = 0.0; for (int i = 0; iters == -1 || i < iters; i++) { time_ns = nanoTime(); for (cpu_foo = 0; cpu_foo < 100000000; cpu_foo++); time_ns = nanoTime() - time_ns; avg = (double)time_ns / NS_PER_SEC; if (print_average) { COMPUTE_RUNNING(avg, running_avg, square_avg, i); COMPUTE_MIN_MAX(avg, min, max); } if (print_each_iter) { printf("cpu took %.06f seconds\n", avg); } } if (print_average) { printf(" cpu average %.06f seconds std dev %f min %0.6f seconds max %0.6f seconds\n", running_avg, GET_STD_DEV(running_avg, square_avg), min, max); } return 0; }
int benchmarkSleep(const command_data_t &cmd_data) { uint64_t time_ns; int delay = cmd_data.args[0]; int iters = cmd_data.args[1]; bool print_each_iter = cmd_data.print_each_iter; bool print_average = cmd_data.print_average; double avg, running_avg = 0.0, square_avg = 0.0; double max = 0.0, min = 0.0; for (int i = 0; iters == -1 || i < iters; i++) { time_ns = nanoTime(); sleep(delay); time_ns = nanoTime() - time_ns; avg = (double)time_ns / NS_PER_SEC; if (print_average) { COMPUTE_RUNNING(avg, running_avg, square_avg, i); COMPUTE_MIN_MAX(avg, min, max); } if (print_each_iter) { printf("sleep(%d) took %.06f seconds\n", delay, avg); } } if (print_average) { printf(" sleep(%d) average %.06f seconds std dev %f min %.06f seconds max %0.6f seconds\n", delay, running_avg, GET_STD_DEV(running_avg, square_avg), min, max); } return 0; }
int benchmarkStrcmp(const command_data_t &cmd_data) { int size = cmd_data.args[0]; int iters = cmd_data.args[1]; // Allocate a large chunk of memory to hold both strings. uint8_t *memory = (uint8_t*)malloc(2*size + 2048); if (!memory) return -1; char *string1 = reinterpret_cast<char*>(getAlignedMemory(memory, cmd_data.src_align, cmd_data.src_or_mask)); char *string2 = reinterpret_cast<char*>(getAlignedMemory((uint8_t*)string1+size, cmd_data.dst_align, cmd_data.dst_or_mask)); for (int i = 0; i < size - 1; i++) { string1[i] = (char)(32 + (i % 96)); string2[i] = string1[i]; } string1[size-1] = '\0'; string2[size-1] = '\0'; uint64_t time_ns; double avg_kb, running_avg_kb = 0.0, square_avg_kb = 0.0; double max_kb = 0.0, min_kb = 0.0; int j; bool print_average = cmd_data.print_average; bool print_each_iter = cmd_data.print_each_iter; int copies = cmd_data.data_size / size; int retval = 0; for (int i = 0; iters == -1 || i < iters; i++) { time_ns = nanoTime(); for (j = 0; j < copies; j++) { retval = strcmp(string1, string2); if (retval != 0) { printf("strcmp failed, return value %d\n", retval); } } time_ns = nanoTime() - time_ns; // Compute in kb to avoid any overflows. COMPUTE_AVERAGE_KB(avg_kb, copies * size, time_ns); if (print_average) { COMPUTE_RUNNING(avg_kb, running_avg_kb, square_avg_kb, i); COMPUTE_MIN_MAX(avg_kb, min_kb, max_kb); } if (print_each_iter) { printf("strcmp %dx%d bytes took %.06f seconds (%f MB/s)\n", copies, size, (double)time_ns / NS_PER_SEC, avg_kb / 1024.0); } } if (print_average) { printf(" strcmp %dx%d bytes average %.2f MB/s std dev %.4f min %.2f MB/s max %.2f MB/s\n", copies, size, running_avg_kb/1024.0, GET_STD_DEV(running_avg_kb, square_avg_kb) / 1024.0, min_kb / 1024.0, max_kb / 1024.0); } return 0; }
int benchmarkMemcpy(const command_data_t &cmd_data) { int size = cmd_data.args[0]; int iters = cmd_data.args[1]; uint8_t *src = allocateAlignedMemory(size, cmd_data.src_align, cmd_data.src_or_mask); if (!src) return -1; uint8_t *dst = allocateAlignedMemory(size, cmd_data.dst_align, cmd_data.dst_or_mask); if (!dst) return -1; // Initialize the source and destination to known values. // If not initialized, the benchmark results are skewed. memset(src, 0xffff, size); memset(dst, 0, size); uint64_t time_ns; double avg_kb, running_avg_kb = 0.0, square_avg_kb = 0.0; double max_kb = 0.0, min_kb = 0.0; int j; bool print_average = cmd_data.print_average; bool print_each_iter = cmd_data.print_each_iter; int copies = cmd_data.data_size / size; for (int i = 0; iters == -1 || i < iters; i++) { time_ns = nanoTime(); for (j = 0; j < copies; j++) memcpy(dst, src, size); time_ns = nanoTime() - time_ns; // Compute in kb to avoid any overflows. COMPUTE_AVERAGE_KB(avg_kb, copies * size, time_ns); if (print_average) { COMPUTE_RUNNING(avg_kb, running_avg_kb, square_avg_kb, i); COMPUTE_MIN_MAX(avg_kb, min_kb, max_kb); } if (print_each_iter) { printf("memcpy %dx%d bytes took %.06f seconds (%f MB/s)\n", copies, size, (double)time_ns / NS_PER_SEC, avg_kb / 1024.0); } } if (print_average) { printf(" memcpy %dx%d bytes average %.2f MB/s std dev %.4f min %.2f MB/s max %.2f MB/s\n", copies, size, running_avg_kb/1024.0, GET_STD_DEV(running_avg_kb, square_avg_kb) / 1024.0, min_kb / 1024.0, max_kb / 1024.0); } return 0; }
int benchmarkMemread(const command_data_t &cmd_data) { int size = cmd_data.args[0]; int iters = cmd_data.args[1]; int *src = reinterpret_cast<int*>(malloc(size)); if (!src) return -1; // Use volatile so the compiler does not optimize away the reads. volatile int foo; uint64_t time_ns; int j, k; double avg_kb, running_avg_kb = 0.0, square_avg_kb = 0.0; double max_kb = 0.0, min_kb = 0.0; bool print_average = cmd_data.print_average; bool print_each_iter = cmd_data.print_each_iter; int c = cmd_data.data_size / size; for (int i = 0; iters == -1 || i < iters; i++) { time_ns = nanoTime(); for (j = 0; j < c; j++) for (k = 0; k < size/4; k++) foo = src[k]; time_ns = nanoTime() - time_ns; // Compute in kb to avoid any overflows. COMPUTE_AVERAGE_KB(avg_kb, c * size, time_ns); if (print_average) { COMPUTE_RUNNING(avg_kb, running_avg_kb, square_avg_kb, i); COMPUTE_MIN_MAX(avg_kb, min_kb, max_kb); } if (print_each_iter) { printf("read %dx%d bytes took %.06f seconds (%f MB/s)\n", c, size, (double)time_ns / NS_PER_SEC, avg_kb / 1024.0); } } if (print_average) { printf(" read %dx%d bytes average %.2f MB/s std dev %.4f min %.2f MB/s max %.2f MB/s\n", c, size, running_avg_kb/1024.0, GET_STD_DEV(running_avg_kb, square_avg_kb) / 1024.0, min_kb / 1024.0, max_kb / 1024.0); } return 0; }
void AndroidTimer::Update() { // get the delta between the last frame and current moment TimeUnit now = nanoTime(); const float _MULIPLIER_ = 0.000000001f; float deltaFrameTime = (now - m_lastFrameTime) * _MULIPLIER_; m_lastFrameTime = now; m_delta = deltaFrameTime * m_deltaMultiplier; }
void MemoryInstrumenter::allocImpl(mem_t size, char type) { if(enabled_) { std::vector<mem_t> buffer(labels_.size()); assert(output_ != NULL); assert((unsigned char) type < buffer.size() - 1); std::fill(buffer.begin(), buffer.end(), 0); buffer[0] = nanoTime(); #ifdef __GNUC__ if(type == FULL_MATRIX) { buffer[FULL_MATRIX] = __sync_add_and_fetch(&fullMatrixMem_, size); } else #endif if(type > 0) buffer[type] = size; #ifdef __GLIBC__ mallinfo_counter ++; if(mallinfo_counter >= mallinfo_sampling) { global_mallinfo = mallinfo(); mallinfo_counter = 0; } int k = 3; buffer[k++] = global_mallinfo.arena; //buffer[k++] = global_mallinfo.ordblks; //buffer[k++] = global_mallinfo.smblks; //buffer[k++] = global_mallinfo.hblks; buffer[k++] = global_mallinfo.hblkhd; //buffer[k++] = global_mallinfo.usmblks; //buffer[k++] = global_mallinfo.fsmblks; buffer[k++] = global_mallinfo.uordblks; //buffer[k++] = global_mallinfo.fordblks; buffer[k++] = global_mallinfo.keepcost; #endif for(unsigned int i = 0; i < hooks_.size(); i++) { if(hooks_[i]) { assert((unsigned char)type != i); buffer[i] = hooks_[i](hookParams_[i]); } } assert(buffer[0] > 0); write_counter ++; if(write_counter >= write_sampling) { fwrite(buffer.data(), sizeof(size_t), buffer.size(), output_); fflush(output_); write_counter = 0; } } }
void Timer::OnResume() { this->m_lastFrameTime = nanoTime(); }
bool Timer::Start() { this->m_lastFrameTime = nanoTime(); return true; }