static void ucx_perf_test_reset(ucx_perf_context_t *perf, ucx_perf_params_t *params) { unsigned i; perf->params = *params; perf->start_time = ucs_get_time(); perf->prev_time = perf->start_time; perf->end_time = (perf->params.max_time == 0.0) ? UINT64_MAX : ucs_time_from_sec(perf->params.max_time) + perf->start_time; perf->max_iter = (perf->params.max_iter == 0) ? UINT64_MAX : perf->params.max_iter; perf->report_interval = ucs_time_from_sec(perf->params.report_interval); perf->current.time = 0; perf->current.msgs = 0; perf->current.bytes = 0; perf->current.iters = 0; perf->prev.time = perf->start_time; perf->prev.msgs = 0; perf->prev.bytes = 0; perf->prev.iters = 0; perf->timing_queue_head = 0; perf->offset = 0; for (i = 0; i < TIMING_QUEUE_SIZE; ++i) { perf->timing_queue[i] = 0; } }
static ucs_status_t ucs_async_handler_dispatch(ucs_async_handler_t *handler) { ucs_async_context_t *async; ucs_async_mode_t mode; ucs_status_t status; mode = handler->mode; async = handler->async; if (async != NULL) { async->last_wakeup = ucs_get_time(); } if (async == NULL) { ucs_trace_async("calling async handler " UCS_ASYNC_HANDLER_FMT, UCS_ASYNC_HANDLER_ARG(handler)); handler->cb(handler->id, handler->arg); } else if (ucs_async_method_call(mode, context_try_block, async)) { ucs_trace_async("calling async handler " UCS_ASYNC_HANDLER_FMT, UCS_ASYNC_HANDLER_ARG(handler)); handler->cb(handler->id, handler->arg); ucs_async_method_call(mode, context_unblock, async); } else /* async != NULL */ { ucs_trace_async("missed " UCS_ASYNC_HANDLER_FMT ", last_wakeup %lu", UCS_ASYNC_HANDLER_ARG(handler), async->last_wakeup); if (ucs_atomic_cswap32(&handler->missed, 0, 1) == 0) { status = ucs_mpmc_queue_push(&async->missed, handler->id); if (status != UCS_OK) { ucs_fatal("Failed to push event %d to miss queue: %s", handler->id, ucs_status_string(status)); } } return UCS_ERR_NO_PROGRESS; } return UCS_OK; }
uint64_t ucs_generate_uuid(uint64_t seed) { struct timeval tv; gettimeofday(&tv, NULL); return seed + ucs_get_prime(0) * ucs_get_tid() + ucs_get_prime(1) * ucs_get_time() + ucs_get_prime(2) * ucs_get_mac_address() + ucs_get_prime(3) * tv.tv_sec + ucs_get_prime(4) * tv.tv_usec + __sumup_host_name(5); }
static double measure_memcpy_bandwidth(size_t size) { ucs_time_t start_time, end_time; void *src, *dst; double result = 0.0; int iter; src = mmap(NULL, size, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0); if (src == MAP_FAILED) { goto out; } dst = mmap(NULL, size, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANONYMOUS, -1, 0); if (dst == MAP_FAILED) { goto out_unmap_src; } memset(dst, 0, size); memset(src, 0, size); memcpy(dst, src, size); iter = 0; start_time = ucs_get_time(); do { memcpy(dst, src, size); end_time = ucs_get_time(); ++iter; } while (end_time < start_time + ucs_time_from_sec(0.5)); result = size * iter / ucs_time_to_sec(end_time - start_time); munmap(dst, size); out_unmap_src: munmap(src, size); out: return result; }
static void get_resource_usage(resource_usage_t *usage) { struct rusage rusage; int ret; usage->time = ucs_get_time(); ret = getrusage(RUSAGE_SELF, &rusage); if (ret == 0) { usage->memory = rusage.ru_maxrss * 1024; } else { usage->memory = -1; } usage->num_fds = get_num_fds(); }
ucs_status_t ucs_twheel_init(ucs_twheel_t *twheel, ucs_time_t resolution) { unsigned i; twheel->res = ucs_roundup_pow2(resolution); twheel->res_order = (unsigned) ucs_log2(twheel->res); twheel->num_slots = 1024; twheel->current = 0; twheel->now = ucs_get_time(); twheel->wheel = malloc(sizeof(*twheel->wheel) * twheel->num_slots); for (i = 0; i < twheel->num_slots; i++) { ucs_list_head_init(&twheel->wheel[i]); } ucs_debug("high res timer created log=%d resolution=%lf usec wanted: %lf usec", twheel->res_order, ucs_time_to_usec(twheel->res), ucs_time_to_usec(resolution)); return UCS_OK; }
void ucx_perf_test_start_clock(ucx_perf_context_t *perf) { perf->start_time = ucs_get_time(); perf->prev_time = perf->start_time; perf->prev.time = perf->start_time; }