void lat_target_init(struct thread_data *td) { if (td->o.latency_target) { dprint(FD_RATE, "Latency target=%llu\n", td->o.latency_target); fio_gettime(&td->latency_ts, NULL); td->latency_qd = 1; td->latency_qd_high = td->o.iodepth; td->latency_qd_low = 1; td->latency_ios = ddir_rw_sum(td->io_blocks); } else td->latency_qd = td->o.iodepth; }
/* * Check if we can bump the queue depth */ void lat_target_check(struct thread_data *td) { uint64_t usec_window; uint64_t ios; double success_ios; usec_window = utime_since_now(&td->latency_ts); if (usec_window < td->o.latency_window) return; ios = ddir_rw_sum(td->io_blocks) - td->latency_ios; success_ios = (double) (ios - td->latency_failed) / (double) ios; success_ios *= 100.0; dprint(FD_RATE, "Success rate: %.2f%% (target %.2f%%)\n", success_ios, td->o.latency_percentile.u.f); if (success_ios >= td->o.latency_percentile.u.f) lat_target_success(td); else __lat_target_failed(td); }
/* * Print status of the jobs we know about. This includes rate estimates, * ETA, thread state, etc. */ int calc_thread_status(struct jobs_eta *je, int force) { struct thread_data *td; int i, unified_rw_rep; unsigned long rate_time, disp_time, bw_avg_time, *eta_secs; unsigned long long io_bytes[DDIR_RWDIR_CNT]; unsigned long long io_iops[DDIR_RWDIR_CNT]; struct timeval now; static unsigned long long rate_io_bytes[DDIR_RWDIR_CNT]; static unsigned long long disp_io_bytes[DDIR_RWDIR_CNT]; static unsigned long long disp_io_iops[DDIR_RWDIR_CNT]; static struct timeval rate_prev_time, disp_prev_time; if (!force) { if (output_format != FIO_OUTPUT_NORMAL && f_out == stdout) return 0; if (temp_stall_ts || eta_print == FIO_ETA_NEVER) return 0; if (!isatty(STDOUT_FILENO) && (eta_print != FIO_ETA_ALWAYS)) return 0; } if (!ddir_rw_sum(rate_io_bytes)) fill_start_time(&rate_prev_time); if (!ddir_rw_sum(disp_io_bytes)) fill_start_time(&disp_prev_time); eta_secs = malloc(thread_number * sizeof(unsigned long)); memset(eta_secs, 0, thread_number * sizeof(unsigned long)); je->elapsed_sec = (mtime_since_genesis() + 999) / 1000; io_bytes[DDIR_READ] = io_bytes[DDIR_WRITE] = io_bytes[DDIR_TRIM] = 0; io_iops[DDIR_READ] = io_iops[DDIR_WRITE] = io_iops[DDIR_TRIM] = 0; bw_avg_time = ULONG_MAX; unified_rw_rep = 0; for_each_td(td, i) { unified_rw_rep += td->o.unified_rw_rep; if (is_power_of_2(td->o.kb_base)) je->is_pow2 = 1; je->unit_base = td->o.unit_base; if (td->o.bw_avg_time < bw_avg_time) bw_avg_time = td->o.bw_avg_time; if (td->runstate == TD_RUNNING || td->runstate == TD_VERIFYING || td->runstate == TD_FSYNCING || td->runstate == TD_PRE_READING || td->runstate == TD_FINISHING) { je->nr_running++; if (td_read(td)) { je->t_rate[0] += td->o.rate[DDIR_READ]; je->t_iops[0] += td->o.rate_iops[DDIR_READ]; je->m_rate[0] += td->o.ratemin[DDIR_READ]; je->m_iops[0] += td->o.rate_iops_min[DDIR_READ]; } if (td_write(td)) { je->t_rate[1] += td->o.rate[DDIR_WRITE]; je->t_iops[1] += td->o.rate_iops[DDIR_WRITE]; je->m_rate[1] += td->o.ratemin[DDIR_WRITE]; je->m_iops[1] += td->o.rate_iops_min[DDIR_WRITE]; } if (td_trim(td)) { je->t_rate[2] += td->o.rate[DDIR_TRIM]; je->t_iops[2] += td->o.rate_iops[DDIR_TRIM]; je->m_rate[2] += td->o.ratemin[DDIR_TRIM]; je->m_iops[2] += td->o.rate_iops_min[DDIR_TRIM]; } je->files_open += td->nr_open_files; } else if (td->runstate == TD_RAMP) { je->nr_running++; je->nr_ramp++; } else if (td->runstate == TD_SETTING_UP) { je->nr_running++; je->nr_setting_up++; } else if (td->runstate < TD_RUNNING) je->nr_pending++; if (je->elapsed_sec >= 3) eta_secs[i] = thread_eta(td); else eta_secs[i] = INT_MAX; check_str_update(td); if (td->runstate > TD_SETTING_UP) { int ddir; for (ddir = DDIR_READ; ddir < DDIR_RWDIR_CNT; ddir++) { if (unified_rw_rep) { io_bytes[0] += td->io_bytes[ddir]; io_iops[0] += td->io_blocks[ddir]; } else { io_bytes[ddir] += td->io_bytes[ddir]; io_iops[ddir] += td->io_blocks[ddir]; } } } }
/* * Best effort calculation of the estimated pending runtime of a job. */ static int thread_eta(struct thread_data *td) { unsigned long long bytes_total, bytes_done; unsigned long eta_sec = 0; unsigned long elapsed; uint64_t timeout; elapsed = (mtime_since_now(&td->epoch) + 999) / 1000; timeout = td->o.timeout / 1000000UL; bytes_total = td->total_io_size; if (td->o.fill_device && td->o.size == -1ULL) { if (!td->fill_device_size || td->fill_device_size == -1ULL) return 0; bytes_total = td->fill_device_size; } if (td->o.zone_size && td->o.zone_skip && bytes_total) { unsigned int nr_zones; uint64_t zone_bytes; zone_bytes = bytes_total + td->o.zone_size + td->o.zone_skip; nr_zones = (zone_bytes - 1) / (td->o.zone_size + td->o.zone_skip); bytes_total -= nr_zones * td->o.zone_skip; } /* * if writing and verifying afterwards, bytes_total will be twice the * size. In a mixed workload, verify phase will be the size of the * first stage writes. */ if (td->o.do_verify && td->o.verify && td_write(td)) { if (td_rw(td)) { unsigned int perc = 50; if (td->o.rwmix[DDIR_WRITE]) perc = td->o.rwmix[DDIR_WRITE]; bytes_total += (bytes_total * perc) / 100; } else bytes_total <<= 1; } if (td->runstate == TD_RUNNING || td->runstate == TD_VERIFYING) { double perc, perc_t; bytes_done = ddir_rw_sum(td->io_bytes); if (bytes_total) { perc = (double) bytes_done / (double) bytes_total; if (perc > 1.0) perc = 1.0; } else perc = 0.0; if (td->o.time_based) { if (timeout) { perc_t = (double) elapsed / (double) timeout; if (perc_t < perc) perc = perc_t; } else { /* * Will never hit, we can't have time_based * without a timeout set. */ perc = 0.0; } } eta_sec = (unsigned long) (elapsed * (1.0 / perc)) - elapsed; if (td->o.timeout && eta_sec > (timeout + done_secs - elapsed)) eta_sec = timeout + done_secs - elapsed; } else if (td->runstate == TD_NOT_CREATED || td->runstate == TD_CREATED || td->runstate == TD_INITIALIZED || td->runstate == TD_SETTING_UP || td->runstate == TD_RAMP || td->runstate == TD_PRE_READING) { int t_eta = 0, r_eta = 0; unsigned long long rate_bytes; /* * We can only guess - assume it'll run the full timeout * if given, otherwise assume it'll run at the specified rate. */ if (td->o.timeout) { uint64_t timeout = td->o.timeout; uint64_t start_delay = td->o.start_delay; uint64_t ramp_time = td->o.ramp_time; t_eta = timeout + start_delay + ramp_time; t_eta /= 1000000ULL; if (in_ramp_time(td)) { unsigned long ramp_left; ramp_left = mtime_since_now(&td->epoch); ramp_left = (ramp_left + 999) / 1000; if (ramp_left <= t_eta) t_eta -= ramp_left; } } rate_bytes = ddir_rw_sum(td->o.rate); if (rate_bytes) { r_eta = (bytes_total / 1024) / rate_bytes; r_eta += (td->o.start_delay / 1000000ULL); } if (r_eta && t_eta) eta_sec = min(r_eta, t_eta); else if (r_eta) eta_sec = r_eta; else if (t_eta) eta_sec = t_eta; else eta_sec = 0; } else { /* * thread is already done or waiting for fsync */ eta_sec = 0; } return eta_sec; }
static void lat_new_cycle(struct thread_data *td) { fio_gettime(&td->latency_ts, NULL); td->latency_ios = ddir_rw_sum(td->io_blocks); td->latency_failed = 0; }
/* * Print status of the jobs we know about. This includes rate estimates, * ETA, thread state, etc. */ int calc_thread_status(struct jobs_eta *je, int force) { struct thread_data *td; int i; unsigned long rate_time, disp_time, bw_avg_time, *eta_secs; unsigned long long io_bytes[DDIR_RWDIR_CNT]; unsigned long long io_iops[DDIR_RWDIR_CNT]; struct timeval now; static unsigned long long rate_io_bytes[DDIR_RWDIR_CNT]; static unsigned long long disp_io_bytes[DDIR_RWDIR_CNT]; static unsigned long long disp_io_iops[DDIR_RWDIR_CNT]; static struct timeval rate_prev_time, disp_prev_time; if (!force) { if (output_format != FIO_OUTPUT_NORMAL) return 0; if (temp_stall_ts || eta_print == FIO_ETA_NEVER) return 0; if (!isatty(STDOUT_FILENO) && (eta_print != FIO_ETA_ALWAYS)) return 0; } if (!ddir_rw_sum(rate_io_bytes)) fill_start_time(&rate_prev_time); if (!ddir_rw_sum(disp_io_bytes)) fill_start_time(&disp_prev_time); eta_secs = malloc(thread_number * sizeof(unsigned long)); memset(eta_secs, 0, thread_number * sizeof(unsigned long)); je->elapsed_sec = (mtime_since_genesis() + 999) / 1000; io_bytes[DDIR_READ] = io_bytes[DDIR_WRITE] = io_bytes[DDIR_TRIM] = 0; io_iops[DDIR_READ] = io_iops[DDIR_WRITE] = io_iops[DDIR_TRIM] = 0; bw_avg_time = ULONG_MAX; for_each_td(td, i) { if (is_power_of_2(td->o.kb_base)) je->is_pow2 = 1; if (td->o.bw_avg_time < bw_avg_time) bw_avg_time = td->o.bw_avg_time; if (td->runstate == TD_RUNNING || td->runstate == TD_VERIFYING || td->runstate == TD_FSYNCING || td->runstate == TD_PRE_READING) { je->nr_running++; if (td_read(td)) { je->t_rate += td->o.rate[DDIR_READ]; je->t_iops += td->o.rate_iops[DDIR_READ]; je->m_rate += td->o.ratemin[DDIR_READ]; je->m_iops += td->o.rate_iops_min[DDIR_READ]; } if (td_write(td)) { je->t_rate += td->o.rate[DDIR_WRITE]; je->t_iops += td->o.rate_iops[DDIR_WRITE]; je->m_rate += td->o.ratemin[DDIR_WRITE]; je->m_iops += td->o.rate_iops_min[DDIR_WRITE]; } if (td_trim(td)) { je->t_rate += td->o.rate[DDIR_TRIM]; je->t_iops += td->o.rate_iops[DDIR_TRIM]; je->m_rate += td->o.ratemin[DDIR_TRIM]; je->m_iops += td->o.rate_iops_min[DDIR_TRIM]; } je->files_open += td->nr_open_files; } else if (td->runstate == TD_RAMP) { je->nr_running++; je->nr_ramp++; } else if (td->runstate == TD_SETTING_UP) je->nr_running++; else if (td->runstate < TD_RUNNING) je->nr_pending++; if (je->elapsed_sec >= 3) eta_secs[i] = thread_eta(td); else eta_secs[i] = INT_MAX; check_str_update(td); if (td->runstate > TD_RAMP) { int ddir; for (ddir = DDIR_READ; ddir < DDIR_RWDIR_CNT; ddir++) { io_bytes[ddir] += td->io_bytes[ddir]; io_iops[ddir] += td->io_blocks[ddir]; } } } if (exitall_on_terminate) je->eta_sec = INT_MAX; else je->eta_sec = 0; for_each_td(td, i) { if (exitall_on_terminate) { if (eta_secs[i] < je->eta_sec) je->eta_sec = eta_secs[i]; } else { if (eta_secs[i] > je->eta_sec) je->eta_sec = eta_secs[i]; } } free(eta_secs); fio_gettime(&now, NULL); rate_time = mtime_since(&rate_prev_time, &now); if (write_bw_log && rate_time > bw_avg_time && !in_ramp_time(td)) { calc_rate(rate_time, io_bytes, rate_io_bytes, je->rate); memcpy(&rate_prev_time, &now, sizeof(now)); add_agg_sample(je->rate[DDIR_READ], DDIR_READ, 0); add_agg_sample(je->rate[DDIR_WRITE], DDIR_WRITE, 0); add_agg_sample(je->rate[DDIR_TRIM], DDIR_TRIM, 0); } disp_time = mtime_since(&disp_prev_time, &now); /* * Allow a little slack, the target is to print it every 1000 msecs */ if (!force && disp_time < 900) return 0; calc_rate(disp_time, io_bytes, disp_io_bytes, je->rate); calc_iops(disp_time, io_iops, disp_io_iops, je->iops); memcpy(&disp_prev_time, &now, sizeof(now)); if (!force && !je->nr_running && !je->nr_pending) return 0; je->nr_threads = thread_number; memcpy(je->run_str, run_str, thread_number * sizeof(char)); return 1; }
/* * Best effort calculation of the estimated pending runtime of a job. */ static int thread_eta(struct thread_data *td) { unsigned long long bytes_total, bytes_done; unsigned long eta_sec = 0; unsigned long elapsed; elapsed = (mtime_since_now(&td->epoch) + 999) / 1000; bytes_total = td->total_io_size; if (td->o.fill_device && td->o.size == -1ULL) { if (!td->fill_device_size || td->fill_device_size == -1ULL) return 0; bytes_total = td->fill_device_size; } /* * if writing, bytes_total will be twice the size. If mixing, * assume a 50/50 split and thus bytes_total will be 50% larger. */ if (td->o.do_verify && td->o.verify && td_write(td)) { if (td_rw(td)) bytes_total = bytes_total * 3 / 2; else bytes_total <<= 1; } if (td->o.zone_size && td->o.zone_skip) bytes_total /= (td->o.zone_skip / td->o.zone_size); if (td->runstate == TD_RUNNING || td->runstate == TD_VERIFYING) { double perc, perc_t; bytes_done = ddir_rw_sum(td->io_bytes); perc = (double) bytes_done / (double) bytes_total; if (perc > 1.0) perc = 1.0; if (td->o.time_based) { perc_t = (double) elapsed / (double) td->o.timeout; if (perc_t < perc) perc = perc_t; } eta_sec = (unsigned long) (elapsed * (1.0 / perc)) - elapsed; if (td->o.timeout && eta_sec > (td->o.timeout + done_secs - elapsed)) eta_sec = td->o.timeout + done_secs - elapsed; } else if (td->runstate == TD_NOT_CREATED || td->runstate == TD_CREATED || td->runstate == TD_INITIALIZED || td->runstate == TD_RAMP || td->runstate == TD_PRE_READING) { int t_eta = 0, r_eta = 0; unsigned long long rate_bytes; /* * We can only guess - assume it'll run the full timeout * if given, otherwise assume it'll run at the specified rate. */ if (td->o.timeout) { t_eta = td->o.timeout + td->o.start_delay + td->o.ramp_time; if (in_ramp_time(td)) { unsigned long ramp_left; ramp_left = mtime_since_now(&td->epoch); ramp_left = (ramp_left + 999) / 1000; if (ramp_left <= t_eta) t_eta -= ramp_left; } } rate_bytes = ddir_rw_sum(td->o.rate); if (rate_bytes) { r_eta = (bytes_total / 1024) / rate_bytes; r_eta += td->o.start_delay; } if (r_eta && t_eta) eta_sec = min(r_eta, t_eta); else if (r_eta) eta_sec = r_eta; else if (t_eta) eta_sec = t_eta; else eta_sec = 0; } else { /* * thread is already done or waiting for fsync */ eta_sec = 0; } return eta_sec; }