static char* test_create_with_large_values() { struct hdr_histogram* h = NULL; int r = hdr_init(20000000, 100000000, 5, &h); mu_assert("Didn't create", r == 0); hdr_record_value(h, 100000000); hdr_record_value(h, 20000000); hdr_record_value(h, 30000000); mu_assert( "50.0% Percentile", hdr_values_are_equivalent(h, 20000000, hdr_value_at_percentile(h, 50.0))); mu_assert( "83.33% Percentile", hdr_values_are_equivalent(h, 30000000, hdr_value_at_percentile(h, 83.33))); mu_assert( "83.34% Percentile", hdr_values_are_equivalent(h, 100000000, hdr_value_at_percentile(h, 83.34))); mu_assert( "99.0% Percentile", hdr_values_are_equivalent(h, 100000000, hdr_value_at_percentile(h, 99.0))); return 0; }
static char* test_scaling_equivalence() { load_histograms(); mu_assert( "Averages should be equivalent", compare_values( hdr_mean(cor_histogram) * 512, hdr_mean(scaled_cor_histogram), 0.000001)); mu_assert( "Total count should be equivalent", compare_int64( cor_histogram->total_count, scaled_cor_histogram->total_count)); int64_t expected_99th = hdr_value_at_percentile(cor_histogram, 99.0) * 512; int64_t scaled_99th = hdr_value_at_percentile(scaled_cor_histogram, 99.0); mu_assert( "99%'iles should be equivalent", compare_int64( hdr_lowest_equivalent_value(cor_histogram, expected_99th), hdr_lowest_equivalent_value(scaled_cor_histogram, scaled_99th))); mu_assert( "Max should be equivalent", compare_int64(hdr_max(cor_histogram) * 512, hdr_max(scaled_cor_histogram))); return 0; }
ERL_NIF_TERM _hh_percentile(ErlNifEnv* env, int argc, const ERL_NIF_TERM argv[]) { double percentile; hh_ctx_t* ctx = NULL; ErlNifResourceType* ctx_type = get_hh_ctx_type(env); if (argc != 2 || ctx_type == NULL || !enif_get_resource(env, argv[0], ctx_type, (void **)&ctx) || !enif_get_double(env, argv[1], &percentile)) { return enif_make_badarg(env); } if (ctx != NULL) { if (ctx->data->total_count == 0) { return enif_make_double(env, 0.); } else { return enif_make_double( env, round_to_significant_figures( hdr_value_at_percentile(ctx->data,percentile), ctx->significant_figures ) ); } } return make_error(env, "bad_hdr_histogram_nif_impl"); }
// Time: seconds.millis // Interval: number // IntervalPercentiles: ( 50% 90% Max ) // TotalPercentiles: ( 50% 90% 99% 99.9% 99.99% max ) void print_histogram_line (FILE *out, struct hdr_histogram *interval, struct hdr_histogram *cumulative) { struct timeval now; unsigned int millis_delta, seconds_delta, millis_remainder; gettimeofday(&now,NULL); millis_delta = ((1000000 * (now.tv_sec - start.tv_sec)) + now.tv_usec - start.tv_usec)/1000; seconds_delta = millis_delta/1000; millis_remainder = millis_delta-(seconds_delta*1000); fprintf(out,"%d.%3.3d: I:%ld ", seconds_delta,millis_remainder,interval->total_count); fprintf(out,"( %ld %ld %ld ) ", hdr_value_at_percentile(interval,50.0), hdr_value_at_percentile(interval,90.0), hdr_max(interval)); fprintf(out,"T:%ld ",cumulative->total_count); fprintf(out,"( %ld %ld %ld %ld %ld %ld)\n", hdr_value_at_percentile(cumulative,50.0), hdr_value_at_percentile(cumulative,90.0), hdr_value_at_percentile(cumulative,99.0), hdr_value_at_percentile(cumulative,99.9), hdr_value_at_percentile(cumulative,99.99), hdr_max(cumulative)); return; }
uint64_t stats_percentile(stats *stats, long double p) { if (stats->histogram != NULL) { double percentile = p; int64_t value = hdr_value_at_percentile(stats->histogram, percentile); return (value < 0) ? 0 : value; } uint64_t rank = round((p / 100.0) * stats->limit + 0.5); return stats->data[rank - 1]; }
void report() { unsigned int current_sec = start > 0 ? time(NULL) - start + 1 : 0; int first_zero = -1; unsigned int valid_cnt = 0; unsigned int valid_pps[MAX_SECONDS]; fprintf(pps_output, "# seconds \t k packets\n"); if (current_sec > MAX_SECONDS) current_sec = MAX_SECONDS; for (int i = 0; i < current_sec; ++i) { // Omit trailing zeros if (pps[i] == 0) { if (first_zero < 0) first_zero = i; continue; } if (first_zero >= 0) { for (int j = first_zero; j < i; ++j) { fprintf(pps_output, "%d \t %d\n", j, 0); valid_pps[valid_cnt++] = 0; } first_zero = -1; } fprintf(pps_output, "%d \t %d\n", i, pps[i]); valid_pps[valid_cnt++] = pps[i]; } qsort(valid_pps, valid_cnt, sizeof(unsigned int), intcmp); hdr_percentiles_print(hist, lat_output, 5, 1.0, CLASSIC); printf("TOTAL_PACKETS=%"PRIu64"\n", received_packets); if (received_packets > 0) { printf("MEDIAN_PPS=%u\n", valid_pps[valid_cnt / 2]); printf("MAX_PPS=%u\n", valid_pps[valid_cnt - 1]); printf("LAT_50=%"PRIu64"\n", hdr_value_at_percentile(hist, 50.0)); printf("LAT_75=%"PRIu64"\n", hdr_value_at_percentile(hist, 75.0)); printf("LAT_90=%"PRIu64"\n", hdr_value_at_percentile(hist, 90.0)); printf("LAT_99=%"PRIu64"\n", hdr_value_at_percentile(hist, 99.0)); printf("LAT_99_9=%"PRIu64"\n", hdr_value_at_percentile(hist, 99.9)); } close(sock_raw); }
static char* test_reset() { load_histograms(); // before mu_assert("Value at 99% == 0.0", hdr_value_at_percentile(raw_histogram, 99.0) != 0); mu_assert("Value at 99% == 0.0", hdr_value_at_percentile(cor_histogram, 99.0) != 0); hdr_reset(raw_histogram); hdr_reset(cor_histogram); //after mu_assert("Total raw count != 0", raw_histogram->total_count == 0); mu_assert("Total corrected count != 0", cor_histogram->total_count == 0); mu_assert("Value at 99% not 0.0", hdr_value_at_percentile(raw_histogram, 99.0) == 0); mu_assert("Value at 99% not 0.0", hdr_value_at_percentile(cor_histogram, 99.0) == 0); return 0; }
static enum conclusion conclusion_calc(disk_t *disk) { if (disk->num_errors > 0) return CONCLUSION_FAILED_IO_ERRORS; if (hdr_max(disk->histogram) > 10000000) return CONCLUSION_FAILED_MAX_LATENCY; if (hdr_value_at_percentile(disk->histogram, 99.99) > 8000000) return CONCLUSION_FAILED_LATENCY_PERCENTILE; VERBOSE("Disk has passed the test"); return CONCLUSION_PASSED; }
static int format_latency(char *buf, size_t size, const char *prefix, struct hdr_histogram *hist) { if(hist) { if(hist->total_count) { return snprintf(buf, size, "%s%.1f ", prefix, hdr_value_at_percentile(hist, 95.0) / 10.0); } else { return snprintf(buf, size, "%s? ", prefix); } } else { return 0; } }
static char* decode_v0_log() { const char* v1_log = "jHiccup-2.0.1.logV0.hlog"; FILE* f = fopen(v1_log, "r"); mu_assert("Can not open v1 log file", f != NULL); struct hdr_histogram* accum; hdr_init(1, INT64_C(3600000000000), 3, &accum); struct hdr_histogram* h = NULL; struct hdr_log_reader reader; hdr_timespec timestamp; hdr_timespec interval; hdr_log_reader_init(&reader); int rc = hdr_log_read_header(&reader, f); mu_assert("Failed to read header", rc == 0); int histogram_count = 0; int64_t total_count = 0; while ((rc = hdr_log_read(&reader, f, &h, ×tamp, &interval)) != EOF) { mu_assert("Failed to read histogram", rc == 0); histogram_count++; total_count += h->total_count; int64_t dropped = hdr_add(accum, h); mu_assert("Dropped events", compare_int64(dropped, 0)); free(h); h = NULL; } mu_assert("Wrong number of histograms", compare_int(histogram_count, 81)); mu_assert("Wrong total count", compare_int64(total_count, 61256)); mu_assert("99.9 percentile wrong", compare_int64(1510998015, hdr_value_at_percentile(accum, 99.9))); mu_assert("max value wrong", compare_int64(1569718271, hdr_max(accum))); mu_assert("Seconds wrong", compare_int64(1438869961, reader.start_timestamp.tv_sec)); mu_assert("Nanoseconds wrong", compare_int64(225000000, reader.start_timestamp.tv_nsec)); return 0; }
static char* decode_v3_log() { const char* v3_log = "jHiccup-2.0.7S.logV3.hlog"; FILE* f = fopen(v3_log, "r"); mu_assert("Can not open v3 log file", f != NULL); struct hdr_histogram* accum; hdr_init(1, INT64_C(3600000000000), 3, &accum); struct hdr_histogram* h = NULL; struct hdr_log_reader reader; hdr_timespec timestamp; hdr_timespec interval; hdr_log_reader_init(&reader); int rc = hdr_log_read_header(&reader, f); mu_assert("Failed to read header", validate_return_code(rc)); int histogram_count = 0; int64_t total_count = 0; while ((rc = hdr_log_read(&reader, f, &h, ×tamp, &interval)) != EOF) { mu_assert("Failed to read histogram", validate_return_code(rc)); histogram_count++; total_count += h->total_count; int64_t dropped = hdr_add(accum, h); mu_assert("Dropped events", compare_int64(dropped, 0)); free(h); h = NULL; } mu_assert("Wrong number of histograms", compare_int(histogram_count, 62)); mu_assert("Wrong total count", compare_int64(total_count, 48761)); mu_assert("99.9 percentile wrong", compare_int64(1745879039, hdr_value_at_percentile(accum, 99.9))); mu_assert("max value wrong", compare_int64(1796210687, hdr_max(accum))); mu_assert("Seconds wrong", compare_int64(1441812279, reader.start_timestamp.tv_sec)); mu_assert("Nanoseconds wrong", compare_int64(474000000, reader.start_timestamp.tv_nsec)); return 0; }
void get_snapshot(Snapshot* snapshot) const { ScopedMutex l(&mutex_); hdr_histogram* h = histogram_; for (size_t i = 0; i < thread_state_->max_threads(); ++i) { histograms_[i].add(h); } snapshot->min = hdr_min(h); snapshot->max = hdr_max(h); snapshot->mean = static_cast<int64_t>(hdr_mean(h)); snapshot->stddev = static_cast<int64_t>(hdr_stddev(h)); snapshot->median = hdr_value_at_percentile(h, 50.0); snapshot->percentile_75th = hdr_value_at_percentile(h, 75.0); snapshot->percentile_95th = hdr_value_at_percentile(h, 95.0); snapshot->percentile_98th = hdr_value_at_percentile(h, 98.0); snapshot->percentile_99th = hdr_value_at_percentile(h, 99.0); snapshot->percentile_999th = hdr_value_at_percentile(h, 99.9); }
void report_to_statsd(Statsd *statsd, statsd_feedback *sf) { static statsd_feedback empty_feedback; if(!statsd) return; if(!sf) sf = &empty_feedback; statsd_resetBatch(statsd); #define SBATCH(t, str, value) \ do { \ int ret = statsd_addToBatch(statsd, t, str, value, 1); \ if(ret == STATSD_BATCH_FULL) { \ statsd_sendBatch(statsd); \ ret = statsd_addToBatch(statsd, t, str, value, 1); \ } \ assert(ret == STATSD_SUCCESS); \ } while(0) SBATCH(STATSD_COUNT, "connections.opened", sf->opened); SBATCH(STATSD_GAUGE, "connections.total", sf->conns_in + sf->conns_out); SBATCH(STATSD_GAUGE, "connections.total.in", sf->conns_in); SBATCH(STATSD_GAUGE, "connections.total.out", sf->conns_out); SBATCH(STATSD_GAUGE, "traffic.bitrate", sf->bps_in + sf->bps_out); SBATCH(STATSD_GAUGE, "traffic.bitrate.in", sf->bps_in); SBATCH(STATSD_GAUGE, "traffic.bitrate.out", sf->bps_out); SBATCH(STATSD_COUNT, "traffic.data", sf->traffic_delta.bytes_rcvd + sf->traffic_delta.bytes_sent); SBATCH(STATSD_COUNT, "traffic.data.rcvd", sf->traffic_delta.bytes_rcvd); SBATCH(STATSD_COUNT, "traffic.data.sent", sf->traffic_delta.bytes_sent); SBATCH(STATSD_COUNT, "traffic.data.reads", sf->traffic_delta.num_reads); SBATCH(STATSD_COUNT, "traffic.data.writes", sf->traffic_delta.num_writes); if((sf->latency && sf->latency->marker_histogram) || sf == &empty_feedback) { struct { unsigned p50; unsigned p95; unsigned p99; unsigned p99_5; unsigned mean; unsigned max; } lat; if(sf->latency && sf->latency->marker_histogram) { struct hdr_histogram *hist = sf->latency->marker_histogram; lat.p50 = hdr_value_at_percentile(hist, 50.0) / 10.0; lat.p95 = hdr_value_at_percentile(hist, 95.0) / 10.0; lat.p99 = hdr_value_at_percentile(hist, 99.0) / 10.0; lat.p99_5 = hdr_value_at_percentile(hist, 99.5) / 10.0; lat.mean = hdr_mean(hist) / 10.0; lat.max = hdr_max(hist) / 10.0; assert(lat.p95 < 1000000); assert(lat.mean < 1000000); assert(lat.max < 1000000); } else { memset(&lat, 0, sizeof(lat)); } SBATCH(STATSD_GAUGE, "latency.mean", lat.mean); SBATCH(STATSD_GAUGE, "latency.50", lat.p50); SBATCH(STATSD_GAUGE, "latency.95", lat.p95); SBATCH(STATSD_GAUGE, "latency.99", lat.p99); SBATCH(STATSD_GAUGE, "latency.99.5", lat.p99_5); SBATCH(STATSD_GAUGE, "latency.max", lat.max); } statsd_sendBatch(statsd); }
static char* test_percentiles() { load_histograms(); mu_assert("Value at 30% not 1000.0", compare_percentile(hdr_value_at_percentile(raw_histogram, 30.0), 1000.0, 0.001)); mu_assert("Value at 99% not 1000.0", compare_percentile(hdr_value_at_percentile(raw_histogram, 99.0), 1000.0, 0.001)); mu_assert("Value at 99.99% not 1000.0", compare_percentile(hdr_value_at_percentile(raw_histogram, 99.99), 1000.0, 0.001)); mu_assert("Value at 99.999% not 100000000.0", compare_percentile(hdr_value_at_percentile(raw_histogram, 99.999), 100000000.0, 0.001)); mu_assert("Value at 100% not 100000000.0", compare_percentile(hdr_value_at_percentile(raw_histogram, 100.0), 100000000.0, 0.001)); mu_assert("Value at 30% not 1000.0", compare_percentile(hdr_value_at_percentile(cor_histogram, 30.0), 1000.0, 0.001)); mu_assert("Value at 50% not 1000.0", compare_percentile(hdr_value_at_percentile(cor_histogram, 50.0), 1000.0, 0.001)); mu_assert("Value at 75% not 50000000.0", compare_percentile(hdr_value_at_percentile(cor_histogram, 75.0), 50000000.0, 0.001)); mu_assert("Value at 90% not 80000000.0", compare_percentile(hdr_value_at_percentile(cor_histogram, 90.0), 80000000.0, 0.001)); mu_assert("Value at 99% not 98000000.0", compare_percentile(hdr_value_at_percentile(cor_histogram, 99.0), 98000000.0, 0.001)); mu_assert("Value at 99.999% not 100000000.0", compare_percentile(hdr_value_at_percentile(cor_histogram, 99.999), 100000000.0, 0.001)); mu_assert("Value at 100% not 100000000.0", compare_percentile(hdr_value_at_percentile(cor_histogram, 100.0), 100000000.0, 0.001)); return 0; }
static enum { MRR_ONGOING, MRR_RATE_SEARCH_SUCCEEDED, MRR_RATE_SEARCH_FAILED } modulate_request_rate(struct engine *eng, double now, struct rate_modulator *rm, struct latency_snapshot *latency) { if(rm->mode == RM_UNMODULATED || !latency->marker_histogram) return MRR_ONGOING; /* * Do not measure and modulate latency estimates more frequently than * necessary. */ const double short_time = 1.0; /* Every second */ const double long_time = 10.0; if(rm->state == RMS_STATE_INITIAL) { const struct engine_params *params = engine_params(eng); if(params->channel_send_rate.value_base == RS_MESSAGES_PER_SECOND) rm->suggested_rate_value = params->channel_send_rate.value; else rm->suggested_rate_value = 100; rm->binary_search_steps = 15; rm->last_update_short = now; rm->last_update_long = now; rm->state = RMS_RATE_RAMP_UP; } if(!every(short_time, now, &rm->last_update_short)) return MRR_ONGOING; double lat = hdr_value_at_percentile(latency->marker_histogram, 95.0) / 10.0 / 1000.0; if(every(long_time, now, &rm->last_update_long)) { /* * Every long time (to make moving averages stabilize a bit) * we do assessment and ramp up furtheri or adjust lower and * upper bounds if we're doing a binary search for the best rate. */ rm->prev_latency = lat; rm->prev_max_latency_exceeded = 0; if(rm->state == RMS_RATE_RAMP_UP) { if(lat < 1.5 * rm->latency_target) { if(lat < 0.95 * rm->latency_target) { rm->rate_min_bound = rm->suggested_rate_value; rm->rate_max_bound = INFINITY; } rm->suggested_rate_value *= 4; } else { rm->state = RMS_RATE_BINARY_SEARCH; rm->rate_max_bound = rm->suggested_rate_value; } } if(rm->state == RMS_RATE_BINARY_SEARCH) { if(rm->binary_search_steps-- <= 0) return MRR_RATE_SEARCH_FAILED; if(lat < 0.98 * rm->latency_target) { rm->rate_min_bound = rm->suggested_rate_value; } else if(lat > 1.01 * rm->latency_target) { rm->rate_max_bound = rm->suggested_rate_value; } else { return MRR_RATE_SEARCH_SUCCEEDED; } /* If bounds are within 1% of each other, means we've failed */ if(rm->rate_max_bound > 0 && 0.001 > ((rm->rate_max_bound - rm->rate_min_bound) / 2) / rm->rate_min_bound) { if(lat <= rm->latency_target) return MRR_RATE_SEARCH_SUCCEEDED; return MRR_RATE_SEARCH_FAILED; } rm->suggested_rate_value = (rm->rate_max_bound + rm->rate_min_bound) / 2.0; } engine_update_message_send_rate(eng, rm->suggested_rate_value); fprintf(stderr, "Attempting --message-rate %g (in range %g..%g)%s\n", rm->suggested_rate_value, rm->rate_min_bound, rm->rate_max_bound, tcpkali_clear_eol()); } else if(rm->state == RMS_RATE_RAMP_UP) { if(lat > 2 * rm->latency_target) { if(lat > rm->prev_latency) { rm->prev_max_latency_exceeded++; } else { /* If we're fluctuating, disable fast exit from RAMP_UP */ rm->prev_max_latency_exceeded = -1000; } } rm->prev_latency = lat; /* * If for the last few seconds we've been consistently increasing * latency, quickly exit the ramp-up state. */ if(rm->prev_max_latency_exceeded > 3) { rm->state = RMS_RATE_BINARY_SEARCH; rm->rate_max_bound = rm->suggested_rate_value; } } return MRR_ONGOING; }