Beispiel #1
0
Datei: log.c Projekt: jeefke/fio
int read_iolog_get(struct thread_data *td, struct io_u *io_u)
{
	struct io_piece *ipo;
	unsigned long elapsed;
	
	while (!flist_empty(&td->io_log_list)) {
		int ret;

		ipo = flist_entry(td->io_log_list.next, struct io_piece, list);
		flist_del(&ipo->list);
		remove_trim_entry(td, ipo);

		ret = ipo_special(td, ipo);
		if (ret < 0) {
			free(ipo);
			break;
		} else if (ret > 0) {
			free(ipo);
			continue;
		}

		io_u->ddir = ipo->ddir;
		if (ipo->ddir != DDIR_WAIT) {
			io_u->offset = ipo->offset;
			io_u->buflen = ipo->len;
			io_u->file = td->files[ipo->fileno];
			get_file(io_u->file);
			dprint(FD_IO, "iolog: get %llu/%lu/%s\n", io_u->offset,
						io_u->buflen, io_u->file->file_name);
			if (ipo->delay)
				iolog_delay(td, ipo->delay);
		} else {
			elapsed = mtime_since_genesis();
			if (ipo->delay > elapsed)
				usec_sleep(td, (ipo->delay - elapsed) * 1000);
				
		}

		free(ipo);
		
		if (io_u->ddir != DDIR_WAIT)
			return 0;
	}

	td->done = 1;
	return 1;
}
Beispiel #2
0
/*
 * Print status of the jobs we know about. This includes rate estimates,
 * ETA, thread state, etc.
 */
void print_thread_status(void)
{
	unsigned long elapsed = (mtime_since_genesis() + 999) / 1000;
	int i, nr_ramp, nr_running, nr_pending, t_rate, m_rate;
	int t_iops, m_iops, files_open;
	struct thread_data *td;
	char eta_str[128];
	double perc = 0.0;
	unsigned long long io_bytes[2], io_iops[2];
	unsigned long rate_time, disp_time, bw_avg_time, *eta_secs, eta_sec;
	struct timeval now;

	static unsigned long long rate_io_bytes[2];
	static unsigned long long disp_io_bytes[2];
	static unsigned long long disp_io_iops[2];
	static struct timeval rate_prev_time, disp_prev_time;
	static unsigned int rate[2], iops[2];
	static int linelen_last;
	static int eta_good;
	int i2p = 0;

	if (temp_stall_ts || terse_output || eta_print == FIO_ETA_NEVER)
		return;

	if (!isatty(STDOUT_FILENO) && (eta_print != FIO_ETA_ALWAYS))
		return;

	if (!rate_io_bytes[0] && !rate_io_bytes[1])
		fill_start_time(&rate_prev_time);
	if (!disp_io_bytes[0] && !disp_io_bytes[1])
		fill_start_time(&disp_prev_time);

	eta_secs = malloc(thread_number * sizeof(unsigned long));
	memset(eta_secs, 0, thread_number * sizeof(unsigned long));

	io_bytes[0] = io_bytes[1] = 0;
	io_iops[0] = io_iops[1] = 0;
	nr_pending = nr_running = t_rate = m_rate = t_iops = m_iops = 0;
	nr_ramp = 0;
	bw_avg_time = ULONG_MAX;
	files_open = 0;
	for_each_td(td, i) {
		if (td->o.bw_avg_time < bw_avg_time)
			bw_avg_time = td->o.bw_avg_time;
		if (td->runstate == TD_RUNNING || td->runstate == TD_VERIFYING
		    || td->runstate == TD_FSYNCING
		    || td->runstate == TD_PRE_READING) {
			nr_running++;
			t_rate += td->o.rate[0] + td->o.rate[1];
			m_rate += td->o.ratemin[0] + td->o.ratemin[1];
			t_iops += td->o.rate_iops[0] + td->o.rate_iops[1];
			m_iops += td->o.rate_iops_min[0] + td->o.rate_iops_min[1];
			files_open += td->nr_open_files;
		} else if (td->runstate == TD_RAMP) {
			nr_running++;
			nr_ramp++;
		} else if (td->runstate < TD_RUNNING)
			nr_pending++;

		if (elapsed >= 3)
			eta_secs[i] = thread_eta(td);
		else
			eta_secs[i] = INT_MAX;

		check_str_update(td);

		if (td->runstate > TD_RAMP) {
			io_bytes[0] += td->io_bytes[0];
			io_bytes[1] += td->io_bytes[1];
			io_iops[0] += td->io_blocks[0];
			io_iops[1] += td->io_blocks[1];
		}
	}

	if (exitall_on_terminate)
		eta_sec = INT_MAX;
	else
		eta_sec = 0;

	for_each_td(td, i) {
		if (!i2p && is_power_of_2(td->o.kb_base))
			i2p = 1;
		if (exitall_on_terminate) {
			if (eta_secs[i] < eta_sec)
				eta_sec = eta_secs[i];
		} else {
			if (eta_secs[i] > eta_sec)
				eta_sec = eta_secs[i];
		}
	}

	free(eta_secs);

	if (eta_sec != INT_MAX && elapsed) {
		perc = (double) elapsed / (double) (elapsed + eta_sec);
		eta_to_str(eta_str, eta_sec);
	}

	fio_gettime(&now, NULL);
	rate_time = mtime_since(&rate_prev_time, &now);

	if (write_bw_log && rate_time > bw_avg_time && !in_ramp_time(td)) {
		calc_rate(rate_time, io_bytes, rate_io_bytes, rate);
		memcpy(&rate_prev_time, &now, sizeof(now));
		add_agg_sample(rate[DDIR_READ], DDIR_READ, 0);
		add_agg_sample(rate[DDIR_WRITE], DDIR_WRITE, 0);
	}

	disp_time = mtime_since(&disp_prev_time, &now);
	if (disp_time < 1000)
		return;

	calc_rate(disp_time, io_bytes, disp_io_bytes, rate);
	calc_iops(disp_time, io_iops, disp_io_iops, iops);

	memcpy(&disp_prev_time, &now, sizeof(now));

	if (!nr_running && !nr_pending)
		return;

	printf("Jobs: %d (f=%d)", nr_running, files_open);
	if (m_rate || t_rate) {
		char *tr, *mr;

		mr = num2str(m_rate, 4, 0, i2p);
		tr = num2str(t_rate, 4, 0, i2p);
		printf(", CR=%s/%s KB/s", tr, mr);
		free(tr);
		free(mr);
	} else if (m_iops || t_iops)
		printf(", CR=%d/%d IOPS", t_iops, m_iops);
	if (eta_sec != INT_MAX && nr_running) {
		char perc_str[32];
		char *iops_str[2];
		char *rate_str[2];
		int l;

		if ((!eta_sec && !eta_good) || nr_ramp == nr_running)
			strcpy(perc_str, "-.-% done");
		else {
			eta_good = 1;
			perc *= 100.0;
			sprintf(perc_str, "%3.1f%% done", perc);
		}

		rate_str[0] = num2str(rate[0], 5, 10, i2p);
		rate_str[1] = num2str(rate[1], 5, 10, i2p);

		iops_str[0] = num2str(iops[0], 4, 1, 0);
		iops_str[1] = num2str(iops[1], 4, 1, 0);

		l = printf(": [%s] [%s] [%s/%s /s] [%s/%s iops] [eta %s]",
				 run_str, perc_str, rate_str[0], rate_str[1], 
				 iops_str[0], iops_str[1], eta_str);
		if (l >= 0 && l < linelen_last)
			printf("%*s", linelen_last - l, "");
		linelen_last = l;

		free(rate_str[0]);
		free(rate_str[1]);
		free(iops_str[0]);
		free(iops_str[1]);
	}
	printf("\r");
	fflush(stdout);
}
Beispiel #3
0
/*
 * Print status of the jobs we know about. This includes rate estimates,
 * ETA, thread state, etc.
 */
int calc_thread_status(struct jobs_eta *je, int force)
{
    struct thread_data *td;
    int i, unified_rw_rep;
    unsigned long rate_time, disp_time, bw_avg_time, *eta_secs;
    unsigned long long io_bytes[DDIR_RWDIR_CNT];
    unsigned long long io_iops[DDIR_RWDIR_CNT];
    struct timeval now;

    static unsigned long long rate_io_bytes[DDIR_RWDIR_CNT];
    static unsigned long long disp_io_bytes[DDIR_RWDIR_CNT];
    static unsigned long long disp_io_iops[DDIR_RWDIR_CNT];
    static struct timeval rate_prev_time, disp_prev_time;

    if (!force) {
        if (output_format != FIO_OUTPUT_NORMAL &&
                f_out == stdout)
            return 0;
        if (temp_stall_ts || eta_print == FIO_ETA_NEVER)
            return 0;

        if (!isatty(STDOUT_FILENO) && (eta_print != FIO_ETA_ALWAYS))
            return 0;
    }

    if (!ddir_rw_sum(rate_io_bytes))
        fill_start_time(&rate_prev_time);
    if (!ddir_rw_sum(disp_io_bytes))
        fill_start_time(&disp_prev_time);

    eta_secs = malloc(thread_number * sizeof(unsigned long));
    memset(eta_secs, 0, thread_number * sizeof(unsigned long));

    je->elapsed_sec = (mtime_since_genesis() + 999) / 1000;

    io_bytes[DDIR_READ] = io_bytes[DDIR_WRITE] = io_bytes[DDIR_TRIM] = 0;
    io_iops[DDIR_READ] = io_iops[DDIR_WRITE] = io_iops[DDIR_TRIM] = 0;
    bw_avg_time = ULONG_MAX;
    unified_rw_rep = 0;
    for_each_td(td, i) {
        unified_rw_rep += td->o.unified_rw_rep;
        if (is_power_of_2(td->o.kb_base))
            je->is_pow2 = 1;
        je->unit_base = td->o.unit_base;
        if (td->o.bw_avg_time < bw_avg_time)
            bw_avg_time = td->o.bw_avg_time;
        if (td->runstate == TD_RUNNING || td->runstate == TD_VERIFYING
                || td->runstate == TD_FSYNCING
                || td->runstate == TD_PRE_READING
                || td->runstate == TD_FINISHING) {
            je->nr_running++;
            if (td_read(td)) {
                je->t_rate[0] += td->o.rate[DDIR_READ];
                je->t_iops[0] += td->o.rate_iops[DDIR_READ];
                je->m_rate[0] += td->o.ratemin[DDIR_READ];
                je->m_iops[0] += td->o.rate_iops_min[DDIR_READ];
            }
            if (td_write(td)) {
                je->t_rate[1] += td->o.rate[DDIR_WRITE];
                je->t_iops[1] += td->o.rate_iops[DDIR_WRITE];
                je->m_rate[1] += td->o.ratemin[DDIR_WRITE];
                je->m_iops[1] += td->o.rate_iops_min[DDIR_WRITE];
            }
            if (td_trim(td)) {
                je->t_rate[2] += td->o.rate[DDIR_TRIM];
                je->t_iops[2] += td->o.rate_iops[DDIR_TRIM];
                je->m_rate[2] += td->o.ratemin[DDIR_TRIM];
                je->m_iops[2] += td->o.rate_iops_min[DDIR_TRIM];
            }

            je->files_open += td->nr_open_files;
        } else if (td->runstate == TD_RAMP) {
            je->nr_running++;
            je->nr_ramp++;
        } else if (td->runstate == TD_SETTING_UP) {
            je->nr_running++;
            je->nr_setting_up++;
        } else if (td->runstate < TD_RUNNING)
            je->nr_pending++;

        if (je->elapsed_sec >= 3)
            eta_secs[i] = thread_eta(td);
        else
            eta_secs[i] = INT_MAX;

        check_str_update(td);

        if (td->runstate > TD_SETTING_UP) {
            int ddir;

            for (ddir = DDIR_READ; ddir < DDIR_RWDIR_CNT; ddir++) {
                if (unified_rw_rep) {
                    io_bytes[0] += td->io_bytes[ddir];
                    io_iops[0] += td->io_blocks[ddir];
                } else {
                    io_bytes[ddir] += td->io_bytes[ddir];
                    io_iops[ddir] += td->io_blocks[ddir];
                }
            }
        }
    }
Beispiel #4
0
/*
 * Print status of the jobs we know about. This includes rate estimates,
 * ETA, thread state, etc.
 */
int calc_thread_status(struct jobs_eta *je, int force)
{
	struct thread_data *td;
	int i;
	unsigned long rate_time, disp_time, bw_avg_time, *eta_secs;
	unsigned long long io_bytes[DDIR_RWDIR_CNT];
	unsigned long long io_iops[DDIR_RWDIR_CNT];
	struct timeval now;

	static unsigned long long rate_io_bytes[DDIR_RWDIR_CNT];
	static unsigned long long disp_io_bytes[DDIR_RWDIR_CNT];
	static unsigned long long disp_io_iops[DDIR_RWDIR_CNT];
	static struct timeval rate_prev_time, disp_prev_time;

	if (!force) {
		if (output_format != FIO_OUTPUT_NORMAL)
			return 0;
		if (temp_stall_ts || eta_print == FIO_ETA_NEVER)
			return 0;

		if (!isatty(STDOUT_FILENO) && (eta_print != FIO_ETA_ALWAYS))
			return 0;
	}

	if (!ddir_rw_sum(rate_io_bytes))
		fill_start_time(&rate_prev_time);
	if (!ddir_rw_sum(disp_io_bytes))
		fill_start_time(&disp_prev_time);

	eta_secs = malloc(thread_number * sizeof(unsigned long));
	memset(eta_secs, 0, thread_number * sizeof(unsigned long));

	je->elapsed_sec = (mtime_since_genesis() + 999) / 1000;

	io_bytes[DDIR_READ] = io_bytes[DDIR_WRITE] = io_bytes[DDIR_TRIM] = 0;
	io_iops[DDIR_READ] = io_iops[DDIR_WRITE] = io_iops[DDIR_TRIM] = 0;
	bw_avg_time = ULONG_MAX;
	for_each_td(td, i) {
		if (is_power_of_2(td->o.kb_base))
			je->is_pow2 = 1;
		if (td->o.bw_avg_time < bw_avg_time)
			bw_avg_time = td->o.bw_avg_time;
		if (td->runstate == TD_RUNNING || td->runstate == TD_VERIFYING
		    || td->runstate == TD_FSYNCING
		    || td->runstate == TD_PRE_READING) {
			je->nr_running++;
			if (td_read(td)) {
				je->t_rate += td->o.rate[DDIR_READ];
				je->t_iops += td->o.rate_iops[DDIR_READ];
				je->m_rate += td->o.ratemin[DDIR_READ];
				je->m_iops += td->o.rate_iops_min[DDIR_READ];
			}
			if (td_write(td)) {
				je->t_rate += td->o.rate[DDIR_WRITE];
				je->t_iops += td->o.rate_iops[DDIR_WRITE];
				je->m_rate += td->o.ratemin[DDIR_WRITE];
				je->m_iops += td->o.rate_iops_min[DDIR_WRITE];
			}
			if (td_trim(td)) {
				je->t_rate += td->o.rate[DDIR_TRIM];
				je->t_iops += td->o.rate_iops[DDIR_TRIM];
				je->m_rate += td->o.ratemin[DDIR_TRIM];
				je->m_iops += td->o.rate_iops_min[DDIR_TRIM];
			}

			je->files_open += td->nr_open_files;
		} else if (td->runstate == TD_RAMP) {
			je->nr_running++;
			je->nr_ramp++;
		} else if (td->runstate == TD_SETTING_UP)
			je->nr_running++;
		else if (td->runstate < TD_RUNNING)
			je->nr_pending++;

		if (je->elapsed_sec >= 3)
			eta_secs[i] = thread_eta(td);
		else
			eta_secs[i] = INT_MAX;

		check_str_update(td);

		if (td->runstate > TD_RAMP) {
			int ddir;
			for (ddir = DDIR_READ; ddir < DDIR_RWDIR_CNT; ddir++) {
				io_bytes[ddir] += td->io_bytes[ddir];
				io_iops[ddir] += td->io_blocks[ddir];
			}
		}
	}

	if (exitall_on_terminate)
		je->eta_sec = INT_MAX;
	else
		je->eta_sec = 0;

	for_each_td(td, i) {
		if (exitall_on_terminate) {
			if (eta_secs[i] < je->eta_sec)
				je->eta_sec = eta_secs[i];
		} else {
			if (eta_secs[i] > je->eta_sec)
				je->eta_sec = eta_secs[i];
		}
	}

	free(eta_secs);

	fio_gettime(&now, NULL);
	rate_time = mtime_since(&rate_prev_time, &now);

	if (write_bw_log && rate_time > bw_avg_time && !in_ramp_time(td)) {
		calc_rate(rate_time, io_bytes, rate_io_bytes, je->rate);
		memcpy(&rate_prev_time, &now, sizeof(now));
		add_agg_sample(je->rate[DDIR_READ], DDIR_READ, 0);
		add_agg_sample(je->rate[DDIR_WRITE], DDIR_WRITE, 0);
		add_agg_sample(je->rate[DDIR_TRIM], DDIR_TRIM, 0);
	}

	disp_time = mtime_since(&disp_prev_time, &now);

	/*
	 * Allow a little slack, the target is to print it every 1000 msecs
	 */
	if (!force && disp_time < 900)
		return 0;

	calc_rate(disp_time, io_bytes, disp_io_bytes, je->rate);
	calc_iops(disp_time, io_iops, disp_io_iops, je->iops);

	memcpy(&disp_prev_time, &now, sizeof(now));

	if (!force && !je->nr_running && !je->nr_pending)
		return 0;

	je->nr_threads = thread_number;
	memcpy(je->run_str, run_str, thread_number * sizeof(char));

	return 1;
}