Пример #1
0
static uint64_t alloc_reply(uint64_t tag, uint16_t opcode)
{
	struct fio_net_cmd_reply *reply;

	reply = calloc(1, sizeof(*reply));
	INIT_FLIST_HEAD(&reply->list);
	fio_gettime(&reply->tv, NULL);
	reply->saved_tag = tag;
	reply->opcode = opcode;

	return (uintptr_t) reply;
}
Пример #2
0
Файл: io_u.c Проект: hanhua/fio
static enum fio_ddir rate_ddir(struct thread_data *td, enum fio_ddir ddir)
{
	enum fio_ddir odir = ddir ^ 1;
	struct timeval t;
	long usec;

	assert(ddir_rw(ddir));

	if (td->rate_pending_usleep[ddir] <= 0)
		return ddir;

	/*
	 * We have too much pending sleep in this direction. See if we
	 * should switch.
	 */
	if (td_rw(td) && td->o.rwmix[odir]) {
		/*
		 * Other direction does not have too much pending, switch
		 */
		if (td->rate_pending_usleep[odir] < 100000)
			return odir;

		/*
		 * Both directions have pending sleep. Sleep the minimum time
		 * and deduct from both.
		 */
		if (td->rate_pending_usleep[ddir] <=
			td->rate_pending_usleep[odir]) {
			usec = td->rate_pending_usleep[ddir];
		} else {
			usec = td->rate_pending_usleep[odir];
			ddir = odir;
		}
	} else
		usec = td->rate_pending_usleep[ddir];

	io_u_quiesce(td);

	fio_gettime(&t, NULL);
	usec_sleep(td, usec);
	usec = utime_since_now(&t);

	td->rate_pending_usleep[ddir] -= usec;

	odir = ddir ^ 1;
	if (td_rw(td) && __should_check_rate(td, odir))
		td->rate_pending_usleep[odir] -= usec;

	if (ddir_trim(ddir))
		return ddir;

	return ddir;
}
Пример #3
0
void lat_target_init(struct thread_data *td)
{
	if (td->o.latency_target) {
		dprint(FD_RATE, "Latency target=%llu\n", td->o.latency_target);
		fio_gettime(&td->latency_ts, NULL);
		td->latency_qd = 1;
		td->latency_qd_high = td->o.iodepth;
		td->latency_qd_low = 1;
		td->latency_ios = ddir_rw_sum(td->io_blocks);
	} else
		td->latency_qd = td->o.iodepth;
}
Пример #4
0
static void init_icd(struct thread_data *td, struct io_completion_data *icd,
		     int nr)
{
	int ddir;
	if (!td->o.disable_clat || !td->o.disable_bw)
		fio_gettime(&icd->time, NULL);

	icd->nr = nr;

	icd->error = 0;
	for (ddir = DDIR_READ; ddir < DDIR_RWDIR_CNT; ddir++)
		icd->bytes_done[ddir] = 0;
}
Пример #5
0
static void init_icd(struct thread_data *td, struct io_completion_data *icd,
		     int nr)
{
	int ddir;

	if (!gtod_reduce(td))
		fio_gettime(&icd->time, NULL);

	icd->nr = nr;

	icd->error = 0;
	for (ddir = DDIR_READ; ddir < DDIR_RWDIR_CNT; ddir++)
		icd->bytes_done[ddir] = 0;
}
Пример #6
0
int ramp_time_over(struct thread_data *td)
{
	struct timeval tv;

	if (!td->o.ramp_time || td->ramp_time_over)
		return 1;

	fio_gettime(&tv, NULL);
	if (mtime_since(&td->epoch, &tv) >= td->o.ramp_time * 1000) {
		td->ramp_time_over = 1;
		reset_all_stats(td);
		td_set_runstate(td, TD_RAMP);
		return 1;
	}

	return 0;
}
Пример #7
0
static int init_submit_worker(struct submit_worker *sw)
{
	struct thread_data *parent = sw->wq->td;
	struct thread_data *td = &sw->td;
	int fio_unused ret;

	memcpy(&td->o, &parent->o, sizeof(td->o));
	memcpy(&td->ts, &parent->ts, sizeof(td->ts));
	td->o.uid = td->o.gid = -1U;
	dup_files(td, parent);
	td->eo = parent->eo;
	fio_options_mem_dupe(td);

	if (ioengine_load(td))
		goto err;

	if (td->o.odirect)
		td->io_ops->flags |= FIO_RAWIO;

	td->pid = gettid();

	INIT_FLIST_HEAD(&td->io_log_list);
	INIT_FLIST_HEAD(&td->io_hist_list);
	INIT_FLIST_HEAD(&td->verify_list);
	INIT_FLIST_HEAD(&td->trim_list);
	INIT_FLIST_HEAD(&td->next_rand_list);
	td->io_hist_tree = RB_ROOT;

	td->o.iodepth = 1;
	if (td_io_init(td))
		goto err_io_init;

	fio_gettime(&td->epoch, NULL);
	fio_getrusage(&td->ru_start);
	clear_io_state(td);

	td_set_runstate(td, TD_RUNNING);
	td->flags |= TD_F_CHILD;
	td->parent = parent;
	return 0;

err_io_init:
	close_ioengine(td);
err:
	return 1;
}
Пример #8
0
static void fio_guasi_queued(struct thread_data *td, struct io_u **io_us, int nr)
{
	int i;
	struct io_u *io_u;
	struct timeval now;

	if (!fio_fill_issue_time(td))
		return;

	io_u_mark_submit(td, nr);
	fio_gettime(&now, NULL);
	for (i = 0; i < nr; i++) {
		io_u = io_us[i];
		memcpy(&io_u->issue_time, &now, sizeof(now));
		io_u_queued(td, io_u);
	}
}
Пример #9
0
void fio_idle_prof_stop(void)
{
	int i;
	uint64_t runt;
	struct timeval tp;
	struct timespec ts;
	struct idle_prof_thread *ipt;

	if (ipc.opt == IDLE_PROF_OPT_NONE)
		return;

	if (ipc.opt == IDLE_PROF_OPT_CALI)
		return;

	ipc.status = IDLE_PROF_STATUS_PROF_STOP;

	/* wait for all threads to exit from profiling */
	for (i = 0; i < ipc.nr_cpus; i++) {
		ipt = &ipc.ipts[i];
		pthread_mutex_lock(&ipt->start_lock);
		while ((ipt->state != TD_EXITED) &&
		       (ipt->state!=TD_NOT_CREATED)) {
			fio_gettime(&tp, NULL);
			ts.tv_sec = tp.tv_sec + 1;
			ts.tv_nsec = tp.tv_usec * 1000;
			/* timed wait in case a signal is not received */
			pthread_cond_timedwait(&ipt->cond, &ipt->start_lock, &ts);
		}
		pthread_mutex_unlock(&ipt->start_lock);

		/* calculate idleness */
		if (ipc.cali_mean != 0.0) {
			runt = utime_since(&ipt->tps, &ipt->tpe);
			if (runt)
				ipt->idleness = ipt->loops * ipc.cali_mean / runt;
			else
				ipt->idleness = 0.0;
		} else
			ipt->idleness = 0.0;
	}

	/*
	 * memory allocations are freed via explicit fio_idle_prof_cleanup
	 * after profiling stats are collected by apps.  
	 */
}
Пример #10
0
Файл: test.c Проект: joahsun/fio
static uint64_t t_crc32(void)
{
	struct timeval s;
	uint64_t ret;
	void *buf;
	int i;

	buf = malloc(CHUNK);
	randomize_buf(buf, CHUNK, 0x8989);

	fio_gettime(&s, NULL);
	for (i = 0; i < NR_CHUNKS; i++)
		fio_crc32(buf, CHUNK);

	ret = utime_since_now(&s);
	free(buf);
	return ret;
}
Пример #11
0
void reset_all_stats(struct thread_data *td)
{
	struct timeval tv;
	int i;

	reset_io_counters(td);

	for (i = 0; i < DDIR_RWDIR_CNT; i++) {
		td->io_bytes[i] = 0;
		td->io_blocks[i] = 0;
		td->io_issues[i] = 0;
		td->ts.total_io_u[i] = 0;
		td->ts.runtime[i] = 0;
		td->rwmix_issues = 0;
	}

	fio_gettime(&tv, NULL);
	memcpy(&td->epoch, &tv, sizeof(tv));
	memcpy(&td->start, &tv, sizeof(tv));
}
Пример #12
0
Файл: io_uring.c Проект: arh/fio
static void fio_ioring_queued(struct thread_data *td, int start, int nr)
{
	struct ioring_data *ld = td->io_ops_data;
	struct timespec now;

	if (!fio_fill_issue_time(td))
		return;

	fio_gettime(&now, NULL);

	while (nr--) {
		struct io_sq_ring *ring = &ld->sq_ring;
		int index = ring->array[start & ld->sq_ring_mask];
		struct io_u *io_u = ld->io_u_index[index];

		memcpy(&io_u->issue_time, &now, sizeof(now));
		io_u_queued(td, io_u);

		start++;
	}
}
Пример #13
0
Файл: test.c Проект: joahsun/fio
static uint64_t t_sha512(void)
{
	uint8_t sha[128];
	struct fio_sha512_ctx ctx = { .buf = sha };
	struct timeval s;
	uint64_t ret;
	void *buf;
	int i;

	fio_sha512_init(&ctx);

	buf = malloc(CHUNK);
	randomize_buf(buf, CHUNK, 0x8989);

	fio_gettime(&s, NULL);
	for (i = 0; i < NR_CHUNKS; i++)
		fio_sha512_update(&ctx, buf, CHUNK);

	ret = utime_since_now(&s);
	free(buf);
	return ret;
}
Пример #14
0
Файл: test.c Проект: joahsun/fio
static uint64_t t_xxhash(void)
{
	void *state;
	struct timeval s;
	uint64_t ret;
	void *buf;
	int i;

	state = XXH32_init(0x8989);

	buf = malloc(CHUNK);
	randomize_buf(buf, CHUNK, 0x8989);

	fio_gettime(&s, NULL);
	for (i = 0; i < NR_CHUNKS; i++)
		XXH32_update(state, buf, CHUNK);

	XXH32_digest(state);
	ret = utime_since_now(&s);
	free(buf);
	return ret;
}
Пример #15
0
Файл: test.c Проект: joahsun/fio
static uint64_t t_md5(void)
{
	uint32_t digest[4];
	struct fio_md5_ctx ctx = { .hash = digest };
	struct timeval s;
	uint64_t ret;
	void *buf;
	int i;

	fio_md5_init(&ctx);

	buf = malloc(CHUNK);
	randomize_buf(buf, CHUNK, 0x8989);

	fio_gettime(&s, NULL);
	for (i = 0; i < NR_CHUNKS; i++)
		fio_md5_update(&ctx, buf, CHUNK);

	ret = utime_since_now(&s);
	free(buf);
	return ret;
}
Пример #16
0
uint64_t usec_sleep(struct thread_data *td, unsigned long usec)
{
	struct timespec req;
	struct timeval tv;
	uint64_t t = 0;

	do {
		unsigned long ts = usec;

		if (usec < ns_granularity) {
			t += usec_spin(usec);
			break;
		}

		ts = usec - ns_granularity;

		if (ts >= 1000000) {
			req.tv_sec = ts / 1000000;
			ts -= 1000000 * req.tv_sec;
		} else
			req.tv_sec = 0;

		req.tv_nsec = ts * 1000;
		fio_gettime(&tv, NULL);

		if (nanosleep(&req, NULL) < 0)
			break;

		ts = utime_since_now(&tv);
		t += ts;
		if (ts >= usec)
			break;

		usec -= ts;
	} while (!td->terminate);

	return t;
}
Пример #17
0
static void fio_init time_init(void)
{
	int i;

	/*
	 * Check the granularity of the nanosleep function
	 */
	for (i = 0; i < 10; i++) {
		struct timeval tv;
		struct timespec ts;
		unsigned long elapsed;

		fio_gettime(&tv, NULL);
		ts.tv_sec = 0;
		ts.tv_nsec = 1000;

		nanosleep(&ts, NULL);
		elapsed = utime_since_now(&tv);

		if (elapsed > ns_granularity)
			ns_granularity = elapsed;
	}
}
Пример #18
0
static void fio_rdmaio_queued(struct thread_data *td, struct io_u **io_us,
			      unsigned int nr)
{
	struct rdmaio_data *rd = td->io_ops->data;
	struct timeval now;
	unsigned int i;

	if (!fio_fill_issue_time(td))
		return;

	fio_gettime(&now, NULL);

	for (i = 0; i < nr; i++) {
		struct io_u *io_u = io_us[i];

		/* queued -> flight */
		rd->io_us_flight[rd->io_u_flight_nr] = io_u;
		rd->io_u_flight_nr++;

		memcpy(&io_u->issue_time, &now, sizeof(now));
		io_u_queued(td, io_u);
	}
}
Пример #19
0
void rate_throttle(struct thread_data *td, unsigned long time_spent,
		   unsigned int bytes)
{
	unsigned long usec_cycle;
	unsigned int bs;

	if (!td->o.rate && !td->o.rate_iops)
		return;

	if (td_rw(td))
		bs = td->o.rw_min_bs;
	else if (td_read(td))
		bs = td->o.min_bs[DDIR_READ];
	else
		bs = td->o.min_bs[DDIR_WRITE];

	usec_cycle = td->rate_usec_cycle * (bytes / bs);

	if (time_spent < usec_cycle) {
		unsigned long s = usec_cycle - time_spent;

		td->rate_pending_usleep += s;

		if (td->rate_pending_usleep >= 100000) {
			struct timeval t;

			fio_gettime(&t, NULL);
			usec_sleep(td, td->rate_pending_usleep);
			td->rate_pending_usleep -= utime_since_now(&t);
		}
	} else {
		long overtime = time_spent - usec_cycle;

		td->rate_pending_usleep -= overtime;
	}
}
Пример #20
0
/*
 * Print status of the jobs we know about. This includes rate estimates,
 * ETA, thread state, etc.
 */
int calc_thread_status(struct jobs_eta *je, int force)
{
	struct thread_data *td;
	int i;
	unsigned long rate_time, disp_time, bw_avg_time, *eta_secs;
	unsigned long long io_bytes[DDIR_RWDIR_CNT];
	unsigned long long io_iops[DDIR_RWDIR_CNT];
	struct timeval now;

	static unsigned long long rate_io_bytes[DDIR_RWDIR_CNT];
	static unsigned long long disp_io_bytes[DDIR_RWDIR_CNT];
	static unsigned long long disp_io_iops[DDIR_RWDIR_CNT];
	static struct timeval rate_prev_time, disp_prev_time;

	if (!force) {
		if (output_format != FIO_OUTPUT_NORMAL)
			return 0;
		if (temp_stall_ts || eta_print == FIO_ETA_NEVER)
			return 0;

		if (!isatty(STDOUT_FILENO) && (eta_print != FIO_ETA_ALWAYS))
			return 0;
	}

	if (!ddir_rw_sum(rate_io_bytes))
		fill_start_time(&rate_prev_time);
	if (!ddir_rw_sum(disp_io_bytes))
		fill_start_time(&disp_prev_time);

	eta_secs = malloc(thread_number * sizeof(unsigned long));
	memset(eta_secs, 0, thread_number * sizeof(unsigned long));

	je->elapsed_sec = (mtime_since_genesis() + 999) / 1000;

	io_bytes[DDIR_READ] = io_bytes[DDIR_WRITE] = io_bytes[DDIR_TRIM] = 0;
	io_iops[DDIR_READ] = io_iops[DDIR_WRITE] = io_iops[DDIR_TRIM] = 0;
	bw_avg_time = ULONG_MAX;
	for_each_td(td, i) {
		if (is_power_of_2(td->o.kb_base))
			je->is_pow2 = 1;
		if (td->o.bw_avg_time < bw_avg_time)
			bw_avg_time = td->o.bw_avg_time;
		if (td->runstate == TD_RUNNING || td->runstate == TD_VERIFYING
		    || td->runstate == TD_FSYNCING
		    || td->runstate == TD_PRE_READING) {
			je->nr_running++;
			if (td_read(td)) {
				je->t_rate += td->o.rate[DDIR_READ];
				je->t_iops += td->o.rate_iops[DDIR_READ];
				je->m_rate += td->o.ratemin[DDIR_READ];
				je->m_iops += td->o.rate_iops_min[DDIR_READ];
			}
			if (td_write(td)) {
				je->t_rate += td->o.rate[DDIR_WRITE];
				je->t_iops += td->o.rate_iops[DDIR_WRITE];
				je->m_rate += td->o.ratemin[DDIR_WRITE];
				je->m_iops += td->o.rate_iops_min[DDIR_WRITE];
			}
			if (td_trim(td)) {
				je->t_rate += td->o.rate[DDIR_TRIM];
				je->t_iops += td->o.rate_iops[DDIR_TRIM];
				je->m_rate += td->o.ratemin[DDIR_TRIM];
				je->m_iops += td->o.rate_iops_min[DDIR_TRIM];
			}

			je->files_open += td->nr_open_files;
		} else if (td->runstate == TD_RAMP) {
			je->nr_running++;
			je->nr_ramp++;
		} else if (td->runstate == TD_SETTING_UP)
			je->nr_running++;
		else if (td->runstate < TD_RUNNING)
			je->nr_pending++;

		if (je->elapsed_sec >= 3)
			eta_secs[i] = thread_eta(td);
		else
			eta_secs[i] = INT_MAX;

		check_str_update(td);

		if (td->runstate > TD_RAMP) {
			int ddir;
			for (ddir = DDIR_READ; ddir < DDIR_RWDIR_CNT; ddir++) {
				io_bytes[ddir] += td->io_bytes[ddir];
				io_iops[ddir] += td->io_blocks[ddir];
			}
		}
	}

	if (exitall_on_terminate)
		je->eta_sec = INT_MAX;
	else
		je->eta_sec = 0;

	for_each_td(td, i) {
		if (exitall_on_terminate) {
			if (eta_secs[i] < je->eta_sec)
				je->eta_sec = eta_secs[i];
		} else {
			if (eta_secs[i] > je->eta_sec)
				je->eta_sec = eta_secs[i];
		}
	}

	free(eta_secs);

	fio_gettime(&now, NULL);
	rate_time = mtime_since(&rate_prev_time, &now);

	if (write_bw_log && rate_time > bw_avg_time && !in_ramp_time(td)) {
		calc_rate(rate_time, io_bytes, rate_io_bytes, je->rate);
		memcpy(&rate_prev_time, &now, sizeof(now));
		add_agg_sample(je->rate[DDIR_READ], DDIR_READ, 0);
		add_agg_sample(je->rate[DDIR_WRITE], DDIR_WRITE, 0);
		add_agg_sample(je->rate[DDIR_TRIM], DDIR_TRIM, 0);
	}

	disp_time = mtime_since(&disp_prev_time, &now);

	/*
	 * Allow a little slack, the target is to print it every 1000 msecs
	 */
	if (!force && disp_time < 900)
		return 0;

	calc_rate(disp_time, io_bytes, disp_io_bytes, je->rate);
	calc_iops(disp_time, io_iops, disp_io_iops, je->iops);

	memcpy(&disp_prev_time, &now, sizeof(now));

	if (!force && !je->nr_running && !je->nr_pending)
		return 0;

	je->nr_threads = thread_number;
	memcpy(je->run_str, run_str, thread_number * sizeof(char));

	return 1;
}
Пример #21
0
static void *idle_prof_thread_fn(void *data)
{
	int retval;
	unsigned long j, k;
	struct idle_prof_thread *ipt = data;

	/* wait for all threads are spawned */
	pthread_mutex_lock(&ipt->init_lock);

	/* exit if any other thread failed to start */
	if (ipc.status == IDLE_PROF_STATUS_ABORT) {
		pthread_mutex_unlock(&ipt->init_lock);
		return NULL;
	}

	retval = set_cpu_affinity(ipt);
	if (retval == -1) {
		ipt->state = TD_EXITED;
		pthread_mutex_unlock(&ipt->init_lock);
		return NULL;
        }

	ipt->cali_time = calibrate_unit(ipt->data);

	/* delay to set IDLE class till now for better calibration accuracy */
#if defined(CONFIG_SCHED_IDLE)
	if ((retval = fio_set_sched_idle()))
		log_err("fio: fio_set_sched_idle failed\n");
#else
	retval = -1;
	log_err("fio: fio_set_sched_idle not supported\n");
#endif
	if (retval == -1) {
		ipt->state = TD_EXITED;
		pthread_mutex_unlock(&ipt->init_lock);
		goto do_exit;
	}

	ipt->state = TD_INITIALIZED;

	/* signal the main thread that calibration is done */
	pthread_cond_signal(&ipt->cond);
	pthread_mutex_unlock(&ipt->init_lock);

	/* wait for other calibration to finish */
	pthread_mutex_lock(&ipt->start_lock);

	/* exit if other threads failed to initialize */
	if (ipc.status == IDLE_PROF_STATUS_ABORT) {
		pthread_mutex_unlock(&ipt->start_lock);
		goto do_exit;
	}

	/* exit if we are doing calibration only */
	if (ipc.status == IDLE_PROF_STATUS_CALI_STOP) {
		pthread_mutex_unlock(&ipt->start_lock);
		goto do_exit;
	}

	fio_gettime(&ipt->tps, NULL);
	ipt->state = TD_RUNNING;

	j = 0;
	while (1) {
		for (k = 0; k < page_size; k++) {
			ipt->data[(k + j) % page_size] = k % 256;
			if (ipc.status == IDLE_PROF_STATUS_PROF_STOP) {
				fio_gettime(&ipt->tpe, NULL);
				goto idle_prof_done;
			}
		}
		j++;
	}

idle_prof_done:

	ipt->loops = j + (double) k / page_size;
	ipt->state = TD_EXITED;
	pthread_mutex_unlock(&ipt->start_lock);

do_exit:
	free_cpu_affinity(ipt);
	return NULL;
}
Пример #22
0
void fio_idle_prof_init(void)
{
	int i, ret;
	struct timeval tp;
	struct timespec ts;
	pthread_attr_t tattr;
	struct idle_prof_thread *ipt;

	ipc.nr_cpus = cpus_online();
	ipc.status = IDLE_PROF_STATUS_OK;

	if (ipc.opt == IDLE_PROF_OPT_NONE)
		return;

	if ((ret = pthread_attr_init(&tattr))) {
		log_err("fio: pthread_attr_init %s\n", strerror(ret));
		return;
	}
	if ((ret = pthread_attr_setscope(&tattr, PTHREAD_SCOPE_SYSTEM))) {
		log_err("fio: pthread_attr_setscope %s\n", strerror(ret));
		return;
	}

	ipc.ipts = malloc(ipc.nr_cpus * sizeof(struct idle_prof_thread));
	if (!ipc.ipts) {
		log_err("fio: malloc failed\n");
		return;
	}

	ipc.buf = malloc(ipc.nr_cpus * page_size);
	if (!ipc.buf) {
		log_err("fio: malloc failed\n");
		free(ipc.ipts);
		return;
	}

	/*
	 * profiling aborts on any single thread failure since the
	 * result won't be accurate if any cpu is not used.
	 */
	for (i = 0; i < ipc.nr_cpus; i++) {
		ipt = &ipc.ipts[i];

		ipt->cpu = i;	
		ipt->state = TD_NOT_CREATED;
		ipt->data = (unsigned char *)(ipc.buf + page_size * i);

		if ((ret = pthread_mutex_init(&ipt->init_lock, NULL))) {
			ipc.status = IDLE_PROF_STATUS_ABORT;
			log_err("fio: pthread_mutex_init %s\n", strerror(ret));
			break;
		}

		if ((ret = pthread_mutex_init(&ipt->start_lock, NULL))) {
			ipc.status = IDLE_PROF_STATUS_ABORT;
			log_err("fio: pthread_mutex_init %s\n", strerror(ret));
			break;
		}

		if ((ret = pthread_cond_init(&ipt->cond, NULL))) {
			ipc.status = IDLE_PROF_STATUS_ABORT;
			log_err("fio: pthread_cond_init %s\n", strerror(ret));
			break;
		}

		/* make sure all threads are spawned before they start */
		pthread_mutex_lock(&ipt->init_lock);

		/* make sure all threads finish init before profiling starts */
		pthread_mutex_lock(&ipt->start_lock);

		if ((ret = pthread_create(&ipt->thread, &tattr, idle_prof_thread_fn, ipt))) {
			ipc.status = IDLE_PROF_STATUS_ABORT;
			log_err("fio: pthread_create %s\n", strerror(ret));
			break;
		} else
			ipt->state = TD_CREATED;

		if ((ret = pthread_detach(ipt->thread))) {
			/* log error and let the thread spin */
			log_err("fio: pthread_detatch %s\n", strerror(ret));
		}
	}

	/*
	 * let good threads continue so that they can exit
	 * if errors on other threads occurred previously.
	 */
	for (i = 0; i < ipc.nr_cpus; i++) {
		ipt = &ipc.ipts[i];
		pthread_mutex_unlock(&ipt->init_lock);
	}
	
	if (ipc.status == IDLE_PROF_STATUS_ABORT)
		return;
	
	/* wait for calibration to finish */
	for (i = 0; i < ipc.nr_cpus; i++) {
		ipt = &ipc.ipts[i];
		pthread_mutex_lock(&ipt->init_lock);
		while ((ipt->state != TD_EXITED) &&
		       (ipt->state!=TD_INITIALIZED)) {
			fio_gettime(&tp, NULL);
			ts.tv_sec = tp.tv_sec + 1;
			ts.tv_nsec = tp.tv_usec * 1000;
			pthread_cond_timedwait(&ipt->cond, &ipt->init_lock, &ts);
		}
		pthread_mutex_unlock(&ipt->init_lock);
	
		/*
		 * any thread failed to initialize would abort other threads
		 * later after fio_idle_prof_start. 
		 */	
		if (ipt->state == TD_EXITED)
			ipc.status = IDLE_PROF_STATUS_ABORT;
	}

	if (ipc.status != IDLE_PROF_STATUS_ABORT)
		calibration_stats();
	else
		ipc.cali_mean = ipc.cali_stddev = 0.0;

	if (ipc.opt == IDLE_PROF_OPT_CALI)
		ipc.status = IDLE_PROF_STATUS_CALI_STOP;
}
Пример #23
0
void set_genesis_time(void)
{
	fio_gettime(&genesis, NULL);
}
Пример #24
0
int main(int argc, char *argv[])
{
	int r;
	struct timeval start, end;
	struct fio_lfsr *fl;
	int verify = 0;
	unsigned int spin = 0;
	uint64_t seed = 0;
	uint64_t numbers;
	uint64_t v_size;
	uint64_t i;
	void *v = NULL, *v_start;
	double total, mean;

	/* Read arguments */
	switch (argc) {
		case 5: if (strncmp(argv[4], "verify", 7) == 0)
					verify = 1;
		case 4: spin = atoi(argv[3]);
		case 3: seed = atol(argv[2]);
		case 2: numbers = strtol(argv[1], NULL, 16);
				break;
		default: usage();
				 return 1;
	}

	/* Initialize LFSR */
	fl = malloc(sizeof(struct fio_lfsr));
	if (!fl) {
		perror("malloc");
		return 1;
	}

	r = lfsr_init(fl, numbers, seed, spin);
	if (r) {
		printf("Initialization failed.\n");
		return r;
	}

	/* Print specs */
	printf("LFSR specs\n");
	printf("==========================\n");
	printf("Size is         %u\n", 64 - __builtin_clzl(fl->cached_bit));
	printf("Max val is      %lu\n", (unsigned long) fl->max_val);
	printf("XOR-mask is     0x%lX\n", (unsigned long) fl->xormask);
	printf("Seed is         %lu\n", (unsigned long) fl->last_val);
	printf("Spin is         %u\n", fl->spin);
	printf("Cycle length is %lu\n", (unsigned long) fl->cycle_length);

	/* Create verification table */
	if (verify) {
		v_size = numbers * sizeof(uint8_t);
		v = malloc(v_size);
		memset(v, 0, v_size);
		printf("\nVerification table is %lf KBs\n", (double)(v_size) / 1024);
	}
	v_start = v;

	/*
	 * Iterate over a tight loop until we have produced all the requested
	 * numbers. Verifying the results should introduce some small yet not
	 * negligible overhead.
	 */
	fprintf(stderr, "\nTest initiated... ");
	fio_gettime(&start, NULL);
	while (!lfsr_next(fl, &i)) {
		if (verify)
			*(uint8_t *)(v + i) += 1;
	}
	fio_gettime(&end, NULL);
	fprintf(stderr, "finished.\n");


	/* Check if all expected numbers within range have been calculated */
	r = 0;
	if (verify) {
		fprintf(stderr, "Verifying results... ");
		for (i = 0; i < numbers; i++) {
			if (*(uint8_t *)(v + i) != 1) {
				fprintf(stderr, "failed (%lu = %d).\n",
						(unsigned long) i,
						*(uint8_t *)(v + i));
				r = 1;
				break;
			}
		}
		if (!r)
			fprintf(stderr, "OK!\n");
	}

	/* Calculate elapsed time and mean time per number */
	total = utime_since(&start, &end);
	mean = total / fl->num_vals;

	printf("\nTime results ");
	if (verify)
		printf("(slower due to verification)");
	printf("\n==============================\n");
	printf("Elapsed: %lf s\n", total / pow(10,6));
	printf("Mean:    %lf us\n", mean);

	free(v_start);
	free(fl);
	return r;
}
Пример #25
0
static enum fio_ddir rate_ddir(struct thread_data *td, enum fio_ddir ddir)
{
	enum fio_ddir odir = ddir ^ 1;
	struct timeval t;
	long usec;

	assert(ddir_rw(ddir));

	if (td->rate_pending_usleep[ddir] <= 0)
		return ddir;

	/*
	 * We have too much pending sleep in this direction. See if we
	 * should switch.
	 */
	if (td_rw(td)) {
		/*
		 * Other direction does not have too much pending, switch
		 */
		if (td->rate_pending_usleep[odir] < 100000)
			return odir;

		/*
		 * Both directions have pending sleep. Sleep the minimum time
		 * and deduct from both.
		 */
		if (td->rate_pending_usleep[ddir] <=
			td->rate_pending_usleep[odir]) {
			usec = td->rate_pending_usleep[ddir];
		} else {
			usec = td->rate_pending_usleep[odir];
			ddir = odir;
		}
	} else
		usec = td->rate_pending_usleep[ddir];

	/*
	 * We are going to sleep, ensure that we flush anything pending as
	 * not to skew our latency numbers.
	 *
	 * Changed to only monitor 'in flight' requests here instead of the
	 * td->cur_depth, b/c td->cur_depth does not accurately represent
	 * io's that have been actually submitted to an async engine,
	 * and cur_depth is meaningless for sync engines.
	 */
	if (td->io_u_in_flight) {
		int fio_unused ret;

		ret = io_u_queued_complete(td, td->io_u_in_flight, NULL);
	}

	fio_gettime(&t, NULL);
	usec_sleep(td, usec);
	usec = utime_since_now(&t);

	td->rate_pending_usleep[ddir] -= usec;

	odir = ddir ^ 1;
	if (td_rw(td) && __should_check_rate(td, odir))
		td->rate_pending_usleep[odir] -= usec;

	if (ddir_trim(ddir))
		return ddir;
	return ddir;
}
Пример #26
0
static void lat_new_cycle(struct thread_data *td)
{
	fio_gettime(&td->latency_ts, NULL);
	td->latency_ios = ddir_rw_sum(td->io_blocks);
	td->latency_failed = 0;
}
Пример #27
0
/*
 * Return an io_u to be processed. Gets a buflen and offset, sets direction,
 * etc. The returned io_u is fully ready to be prepped and submitted.
 */
struct io_u *get_io_u(struct thread_data *td)
{
	struct fio_file *f;
	struct io_u *io_u;
	int do_scramble = 0;
	long ret = 0;

	io_u = __get_io_u(td);
	if (!io_u) {
		dprint(FD_IO, "__get_io_u failed\n");
		return NULL;
	}

	if (check_get_verify(td, io_u))
		goto out;
	if (check_get_trim(td, io_u))
		goto out;

	/*
	 * from a requeue, io_u already setup
	 */
	if (io_u->file)
		goto out;

	/*
	 * If using an iolog, grab next piece if any available.
	 */
	if (td->flags & TD_F_READ_IOLOG) {
		if (read_iolog_get(td, io_u))
			goto err_put;
	} else if (set_io_u_file(td, io_u)) {
		ret = -EBUSY;
		dprint(FD_IO, "io_u %p, setting file failed\n", io_u);
		goto err_put;
	}

	f = io_u->file;
	if (!f) {
		dprint(FD_IO, "io_u %p, setting file failed\n", io_u);
		goto err_put;
	}

	assert(fio_file_open(f));

	if (ddir_rw(io_u->ddir)) {
		if (!io_u->buflen && !(td->io_ops->flags & FIO_NOIO)) {
			dprint(FD_IO, "get_io_u: zero buflen on %p\n", io_u);
			goto err_put;
		}

		f->last_start = io_u->offset;
		f->last_pos = io_u->offset + io_u->buflen;

		if (io_u->ddir == DDIR_WRITE) {
			if (td->flags & TD_F_REFILL_BUFFERS) {
				io_u_fill_buffer(td, io_u,
					io_u->xfer_buflen, io_u->xfer_buflen);
			} else if (td->flags & TD_F_SCRAMBLE_BUFFERS)
				do_scramble = 1;
			if (td->flags & TD_F_VER_NONE) {
				populate_verify_io_u(td, io_u);
				do_scramble = 0;
			}
		} else if (io_u->ddir == DDIR_READ) {
			/*
			 * Reset the buf_filled parameters so next time if the
			 * buffer is used for writes it is refilled.
			 */
			io_u->buf_filled_len = 0;
		}
	}

	/*
	 * Set io data pointers.
	 */
	io_u->xfer_buf = io_u->buf;
	io_u->xfer_buflen = io_u->buflen;

out:
	assert(io_u->file);
	if (!td_io_prep(td, io_u)) {
		if (!td->o.disable_slat)
			fio_gettime(&io_u->start_time, NULL);
		if (do_scramble)
			small_content_scramble(io_u);
		return io_u;
	}
err_put:
	dprint(FD_IO, "get_io_u failed\n");
	put_io_u(td, io_u);
	return ERR_PTR(ret);
}
Пример #28
0
Файл: eta.c Проект: martin21/fio
/*
 * Print status of the jobs we know about. This includes rate estimates,
 * ETA, thread state, etc.
 */
void print_thread_status(void)
{
	unsigned long elapsed = (mtime_since_genesis() + 999) / 1000;
	int i, nr_ramp, nr_running, nr_pending, t_rate, m_rate;
	int t_iops, m_iops, files_open;
	struct thread_data *td;
	char eta_str[128];
	double perc = 0.0;
	unsigned long long io_bytes[2], io_iops[2];
	unsigned long rate_time, disp_time, bw_avg_time, *eta_secs, eta_sec;
	struct timeval now;

	static unsigned long long rate_io_bytes[2];
	static unsigned long long disp_io_bytes[2];
	static unsigned long long disp_io_iops[2];
	static struct timeval rate_prev_time, disp_prev_time;
	static unsigned int rate[2], iops[2];
	static int linelen_last;
	static int eta_good;
	int i2p = 0;

	if (temp_stall_ts || terse_output || eta_print == FIO_ETA_NEVER)
		return;

	if (!isatty(STDOUT_FILENO) && (eta_print != FIO_ETA_ALWAYS))
		return;

	if (!rate_io_bytes[0] && !rate_io_bytes[1])
		fill_start_time(&rate_prev_time);
	if (!disp_io_bytes[0] && !disp_io_bytes[1])
		fill_start_time(&disp_prev_time);

	eta_secs = malloc(thread_number * sizeof(unsigned long));
	memset(eta_secs, 0, thread_number * sizeof(unsigned long));

	io_bytes[0] = io_bytes[1] = 0;
	io_iops[0] = io_iops[1] = 0;
	nr_pending = nr_running = t_rate = m_rate = t_iops = m_iops = 0;
	nr_ramp = 0;
	bw_avg_time = ULONG_MAX;
	files_open = 0;
	for_each_td(td, i) {
		if (td->o.bw_avg_time < bw_avg_time)
			bw_avg_time = td->o.bw_avg_time;
		if (td->runstate == TD_RUNNING || td->runstate == TD_VERIFYING
		    || td->runstate == TD_FSYNCING
		    || td->runstate == TD_PRE_READING) {
			nr_running++;
			t_rate += td->o.rate[0] + td->o.rate[1];
			m_rate += td->o.ratemin[0] + td->o.ratemin[1];
			t_iops += td->o.rate_iops[0] + td->o.rate_iops[1];
			m_iops += td->o.rate_iops_min[0] + td->o.rate_iops_min[1];
			files_open += td->nr_open_files;
		} else if (td->runstate == TD_RAMP) {
			nr_running++;
			nr_ramp++;
		} else if (td->runstate < TD_RUNNING)
			nr_pending++;

		if (elapsed >= 3)
			eta_secs[i] = thread_eta(td);
		else
			eta_secs[i] = INT_MAX;

		check_str_update(td);

		if (td->runstate > TD_RAMP) {
			io_bytes[0] += td->io_bytes[0];
			io_bytes[1] += td->io_bytes[1];
			io_iops[0] += td->io_blocks[0];
			io_iops[1] += td->io_blocks[1];
		}
	}

	if (exitall_on_terminate)
		eta_sec = INT_MAX;
	else
		eta_sec = 0;

	for_each_td(td, i) {
		if (!i2p && is_power_of_2(td->o.kb_base))
			i2p = 1;
		if (exitall_on_terminate) {
			if (eta_secs[i] < eta_sec)
				eta_sec = eta_secs[i];
		} else {
			if (eta_secs[i] > eta_sec)
				eta_sec = eta_secs[i];
		}
	}

	free(eta_secs);

	if (eta_sec != INT_MAX && elapsed) {
		perc = (double) elapsed / (double) (elapsed + eta_sec);
		eta_to_str(eta_str, eta_sec);
	}

	fio_gettime(&now, NULL);
	rate_time = mtime_since(&rate_prev_time, &now);

	if (write_bw_log && rate_time > bw_avg_time && !in_ramp_time(td)) {
		calc_rate(rate_time, io_bytes, rate_io_bytes, rate);
		memcpy(&rate_prev_time, &now, sizeof(now));
		add_agg_sample(rate[DDIR_READ], DDIR_READ, 0);
		add_agg_sample(rate[DDIR_WRITE], DDIR_WRITE, 0);
	}

	disp_time = mtime_since(&disp_prev_time, &now);
	if (disp_time < 1000)
		return;

	calc_rate(disp_time, io_bytes, disp_io_bytes, rate);
	calc_iops(disp_time, io_iops, disp_io_iops, iops);

	memcpy(&disp_prev_time, &now, sizeof(now));

	if (!nr_running && !nr_pending)
		return;

	printf("Jobs: %d (f=%d)", nr_running, files_open);
	if (m_rate || t_rate) {
		char *tr, *mr;

		mr = num2str(m_rate, 4, 0, i2p);
		tr = num2str(t_rate, 4, 0, i2p);
		printf(", CR=%s/%s KB/s", tr, mr);
		free(tr);
		free(mr);
	} else if (m_iops || t_iops)
		printf(", CR=%d/%d IOPS", t_iops, m_iops);
	if (eta_sec != INT_MAX && nr_running) {
		char perc_str[32];
		char *iops_str[2];
		char *rate_str[2];
		int l;

		if ((!eta_sec && !eta_good) || nr_ramp == nr_running)
			strcpy(perc_str, "-.-% done");
		else {
			eta_good = 1;
			perc *= 100.0;
			sprintf(perc_str, "%3.1f%% done", perc);
		}

		rate_str[0] = num2str(rate[0], 5, 10, i2p);
		rate_str[1] = num2str(rate[1], 5, 10, i2p);

		iops_str[0] = num2str(iops[0], 4, 1, 0);
		iops_str[1] = num2str(iops[1], 4, 1, 0);

		l = printf(": [%s] [%s] [%s/%s /s] [%s/%s iops] [eta %s]",
				 run_str, perc_str, rate_str[0], rate_str[1], 
				 iops_str[0], iops_str[1], eta_str);
		if (l >= 0 && l < linelen_last)
			printf("%*s", linelen_last - l, "");
		linelen_last = l;

		free(rate_str[0]);
		free(rate_str[1]);
		free(iops_str[0]);
		free(iops_str[1]);
	}
	printf("\r");
	fflush(stdout);
}