Example #1
0
static int pre_read_file(struct thread_data *td, struct fio_file *f)
{
	int ret = 0, r, did_open = 0, old_runstate;
	unsigned long long left;
	unsigned int bs;
	char *b;

	if (td->io_ops->flags & FIO_PIPEIO)
		return 0;

	if (!fio_file_open(f)) {
		if (td->io_ops->open_file(td, f)) {
			log_err("fio: cannot pre-read, failed to open file\n");
			return 1;
		}
		did_open = 1;
	}

	old_runstate = td_bump_runstate(td, TD_PRE_READING);

	bs = td->o.max_bs[DDIR_READ];
	b = malloc(bs);
	memset(b, 0, bs);

	if (lseek(f->fd, f->file_offset, SEEK_SET) < 0) {
		td_verror(td, errno, "lseek");
		log_err("fio: failed to lseek pre-read file\n");
		ret = 1;
		goto error;
	}

	left = f->io_size;

	while (left && !td->terminate) {
		if (bs > left)
			bs = left;

		r = read(f->fd, b, bs);

		if (r == (int) bs) {
			left -= bs;
			continue;
		} else {
			td_verror(td, EIO, "pre_read");
			break;
		}
	}

error:
	td_restore_runstate(td, old_runstate);

	if (did_open)
		td->io_ops->close_file(td, f);

	free(b);
	return ret;
}
Example #2
0
/*
 * Load a blktrace file by reading all the blk_io_trace entries, and storing
 * them as io_pieces like the fio text version would do.
 */
int load_blktrace(struct thread_data *td, const char *filename, int need_swap)
{
	struct blk_io_trace t;
	unsigned long ios[DDIR_RWDIR_CNT], skipped_writes;
	unsigned int rw_bs[DDIR_RWDIR_CNT];
	struct fifo *fifo;
	int fd, i, old_state;
	struct fio_file *f;
	int this_depth, depth;

	fd = open(filename, O_RDONLY);
	if (fd < 0) {
		td_verror(td, errno, "open blktrace file");
		return 1;
	}

	fifo = fifo_alloc(TRACE_FIFO_SIZE);

	old_state = td_bump_runstate(td, TD_SETTING_UP);

	td->o.size = 0;

	ios[0] = ios[1] = 0;
	rw_bs[0] = rw_bs[1] = 0;
	skipped_writes = 0;
	this_depth = depth = 0;
	do {
		int ret = trace_fifo_get(td, fifo, fd, &t, sizeof(t));

		if (ret < 0)
			goto err;
		else if (!ret)
			break;
		else if (ret < (int) sizeof(t)) {
			log_err("fio: short fifo get\n");
			break;
		}

		if (need_swap)
			byteswap_trace(&t);

		if ((t.magic & 0xffffff00) != BLK_IO_TRACE_MAGIC) {
			log_err("fio: bad magic in blktrace data: %x\n",
								t.magic);
			goto err;
		}
		if ((t.magic & 0xff) != BLK_IO_TRACE_VERSION) {
			log_err("fio: bad blktrace version %d\n",
								t.magic & 0xff);
			goto err;
		}
		ret = discard_pdu(td, fifo, fd, &t);
		if (ret < 0) {
			td_verror(td, ret, "blktrace lseek");
			goto err;
		} else if (t.pdu_len != ret) {
			log_err("fio: discarded %d of %d\n", ret, t.pdu_len);
			goto err;
		}
		if ((t.action & BLK_TC_ACT(BLK_TC_NOTIFY)) == 0) {
			if ((t.action & 0xffff) == __BLK_TA_QUEUE)
				this_depth++;
			else if ((t.action & 0xffff) == __BLK_TA_COMPLETE) {
				depth = max(depth, this_depth);
				this_depth = 0;
			}

			if (t_is_write(&t) && read_only) {
				skipped_writes++;
				continue;
			}
		}

		handle_trace(td, &t, ios, rw_bs);
	} while (1);

	for (i = 0; i < td->files_index; i++) {
		f = td->files[i];
		trace_add_open_close_event(td, f->fileno, FIO_LOG_CLOSE_FILE);
	}

	fifo_free(fifo);
	close(fd);

	td_restore_runstate(td, old_state);

	if (!td->files_index) {
		log_err("fio: did not find replay device(s)\n");
		return 1;
	}

	/*
	 * For stacked devices, we don't always get a COMPLETE event so
	 * the depth grows to insane values. Limit it to something sane(r).
	 */
	if (!depth || depth > 1024)
		depth = 1024;

	if (skipped_writes)
		log_err("fio: %s skips replay of %lu writes due to read-only\n",
						td->o.name, skipped_writes);

	if (!ios[DDIR_READ] && !ios[DDIR_WRITE]) {
		log_err("fio: found no ios in blktrace data\n");
		return 1;
	} else if (ios[DDIR_READ] && !ios[DDIR_WRITE]) {
		td->o.td_ddir = TD_DDIR_READ;
		td->o.max_bs[DDIR_READ] = rw_bs[DDIR_READ];
	} else if (!ios[DDIR_READ] && ios[DDIR_WRITE]) {
		td->o.td_ddir = TD_DDIR_WRITE;
		td->o.max_bs[DDIR_WRITE] = rw_bs[DDIR_WRITE];
	} else {
		td->o.td_ddir = TD_DDIR_RW;
		td->o.max_bs[DDIR_READ] = rw_bs[DDIR_READ];
		td->o.max_bs[DDIR_WRITE] = rw_bs[DDIR_WRITE];
		td->o.max_bs[DDIR_TRIM] = rw_bs[DDIR_TRIM];
	}

	/*
	 * We need to do direct/raw ios to the device, to avoid getting
	 * read-ahead in our way.
	 */
	td->o.odirect = 1;

	/*
	 * we don't know if this option was set or not. it defaults to 1,
	 * so we'll just guess that we should override it if it's still 1
	 */
	if (td->o.iodepth != 1)
		td->o.iodepth = depth;

	return 0;
err:
	close(fd);
	fifo_free(fifo);
	return 1;
}
Example #3
0
File: blktrace.c Project: asias/fio
/*
 * Load a blktrace file by reading all the blk_io_trace entries, and storing
 * them as io_pieces like the fio text version would do.
 */
int load_blktrace(struct thread_data *td, const char *filename, int need_swap)
{
	unsigned long long ttime, delay;
	struct blk_io_trace t;
	unsigned long ios[2], skipped_writes;
	unsigned int cpu;
	unsigned int rw_bs[2];
	struct fifo *fifo;
	int fd, i, old_state;
	struct fio_file *f;

	fd = open(filename, O_RDONLY);
	if (fd < 0) {
		td_verror(td, errno, "open blktrace file");
		return 1;
	}

	fifo = fifo_alloc(TRACE_FIFO_SIZE);

	old_state = td_bump_runstate(td, TD_SETTING_UP);

	td->o.size = 0;

	cpu = 0;
	ttime = 0;
	ios[0] = ios[1] = 0;
	rw_bs[0] = rw_bs[1] = 0;
	skipped_writes = 0;
	do {
		int ret = trace_fifo_get(td, fifo, fd, &t, sizeof(t));

		if (ret < 0)
			goto err;
		else if (!ret)
			break;
		else if (ret < (int) sizeof(t)) {
			log_err("fio: short fifo get\n");
			break;
		}

		if (need_swap)
			byteswap_trace(&t);

		if ((t.magic & 0xffffff00) != BLK_IO_TRACE_MAGIC) {
			log_err("fio: bad magic in blktrace data: %x\n",
								t.magic);
			goto err;
		}
		if ((t.magic & 0xff) != BLK_IO_TRACE_VERSION) {
			log_err("fio: bad blktrace version %d\n",
								t.magic & 0xff);
			goto err;
		}
		ret = discard_pdu(td, fifo, fd, &t);
		if (ret < 0) {
			td_verror(td, ret, "blktrace lseek");
			goto err;
		} else if (t.pdu_len != ret) {
			log_err("fio: discarded %d of %d\n", ret, t.pdu_len);
			goto err;
		}
		if ((t.action & BLK_TC_ACT(BLK_TC_NOTIFY)) == 0) {
			if (!ttime) {
				ttime = t.time;
				cpu = t.cpu;
			}

			delay = 0;
			if (cpu == t.cpu)
				delay = t.time - ttime;
			if ((t.action & BLK_TC_ACT(BLK_TC_WRITE)) && read_only)
				skipped_writes++;
			else {
				/*
				 * set delay to zero if no_stall enabled for
				 * fast replay
				 */
				if (td->o.no_stall)
					delay = 0;

				handle_trace(td, &t, delay, ios, rw_bs);
			}

			ttime = t.time;
			cpu = t.cpu;
		} else {
			delay = 0;
			handle_trace(td, &t, delay, ios, rw_bs);
		}
	} while (1);

	for (i = 0; i < td->files_index; i++) {
		f = td->files[i];
		trace_add_open_close_event(td, f->fileno, FIO_LOG_CLOSE_FILE);
	}

	fifo_free(fifo);
	close(fd);

	td_restore_runstate(td, old_state);

	if (!td->files_index) {
		log_err("fio: did not find replay device(s)\n");
		return 1;
	}

	if (skipped_writes)
		log_err("fio: %s skips replay of %lu writes due to read-only\n",
						td->o.name, skipped_writes);

	if (!ios[DDIR_READ] && !ios[DDIR_WRITE]) {
		log_err("fio: found no ios in blktrace data\n");
		return 1;
	} else if (ios[DDIR_READ] && !ios[DDIR_READ]) {
		td->o.td_ddir = TD_DDIR_READ;
		td->o.max_bs[DDIR_READ] = rw_bs[DDIR_READ];
	} else if (!ios[DDIR_READ] && ios[DDIR_WRITE]) {
		td->o.td_ddir = TD_DDIR_WRITE;
		td->o.max_bs[DDIR_WRITE] = rw_bs[DDIR_WRITE];
	} else {
		td->o.td_ddir = TD_DDIR_RW;
		td->o.max_bs[DDIR_READ] = rw_bs[DDIR_READ];
		td->o.max_bs[DDIR_WRITE] = rw_bs[DDIR_WRITE];
	}

	/*
	 * We need to do direct/raw ios to the device, to avoid getting
	 * read-ahead in our way.
	 */
	td->o.odirect = 1;

	return 0;
err:
	close(fd);
	fifo_free(fifo);
	return 1;
}
Example #4
0
/*
 * Load a blktrace file by reading all the blk_io_trace entries, and storing
 * them as io_pieces like the fio text version would do.
 */
int load_blktrace(struct thread_data *td, const char *filename, int need_swap)
{
	struct blk_io_trace t;
	unsigned long ios[DDIR_RWDIR_CNT], skipped_writes;
	unsigned int rw_bs[DDIR_RWDIR_CNT];
	struct fifo *fifo;
	int fd, i, old_state;
	struct fio_file *f;
	int this_depth[DDIR_RWDIR_CNT], depth[DDIR_RWDIR_CNT], max_depth;

	fd = open(filename, O_RDONLY);
	if (fd < 0) {
		td_verror(td, errno, "open blktrace file");
		return 1;
	}

	fifo = fifo_alloc(TRACE_FIFO_SIZE);

	old_state = td_bump_runstate(td, TD_SETTING_UP);

	td->o.size = 0;

	for (i = 0; i < DDIR_RWDIR_CNT; i++) {
		ios[i] = 0;
		rw_bs[i] = 0;
		this_depth[i] = 0;
		depth[i] = 0;
	}

	skipped_writes = 0;
	do {
		int ret = trace_fifo_get(td, fifo, fd, &t, sizeof(t));

		if (ret < 0)
			goto err;
		else if (!ret)
			break;
		else if (ret < (int) sizeof(t)) {
			log_err("fio: short fifo get\n");
			break;
		}

		if (need_swap)
			byteswap_trace(&t);

		if ((t.magic & 0xffffff00) != BLK_IO_TRACE_MAGIC) {
			log_err("fio: bad magic in blktrace data: %x\n",
								t.magic);
			goto err;
		}
		if ((t.magic & 0xff) != BLK_IO_TRACE_VERSION) {
			log_err("fio: bad blktrace version %d\n",
								t.magic & 0xff);
			goto err;
		}
		ret = discard_pdu(td, fifo, fd, &t);
		if (ret < 0) {
			td_verror(td, ret, "blktrace lseek");
			goto err;
		} else if (t.pdu_len != ret) {
			log_err("fio: discarded %d of %d\n", ret, t.pdu_len);
			goto err;
		}
		if ((t.action & BLK_TC_ACT(BLK_TC_NOTIFY)) == 0) {
			if ((t.action & 0xffff) == __BLK_TA_QUEUE)
				depth_inc(&t, this_depth);
			else if (((t.action & 0xffff) == __BLK_TA_BACKMERGE) ||
				((t.action & 0xffff) == __BLK_TA_FRONTMERGE))
				depth_dec(&t, this_depth);
			else if ((t.action & 0xffff) == __BLK_TA_COMPLETE)
				depth_end(&t, this_depth, depth);

			if (t_is_write(&t) && read_only) {
				skipped_writes++;
				continue;
			}
		}

		handle_trace(td, &t, ios, rw_bs);
	} while (1);

	for (i = 0; i < td->files_index; i++) {
		f = td->files[i];
		trace_add_open_close_event(td, f->fileno, FIO_LOG_CLOSE_FILE);
	}

	fifo_free(fifo);
	close(fd);

	td_restore_runstate(td, old_state);

	if (!td->files_index) {
		log_err("fio: did not find replay device(s)\n");
		return 1;
	}

	/*
	 * For stacked devices, we don't always get a COMPLETE event so
	 * the depth grows to insane values. Limit it to something sane(r).
	 */
	max_depth = 0;
	for (i = 0; i < DDIR_RWDIR_CNT; i++) {
		if (depth[i] > 1024)
			depth[i] = 1024;
		else if (!depth[i] && ios[i])
			depth[i] = 1;
		max_depth = max(depth[i], max_depth);
	}

	if (skipped_writes)
		log_err("fio: %s skips replay of %lu writes due to read-only\n",
						td->o.name, skipped_writes);

	if (!ios[DDIR_READ] && !ios[DDIR_WRITE]) {
		log_err("fio: found no ios in blktrace data\n");
		return 1;
	} else if (ios[DDIR_READ] && !ios[DDIR_WRITE]) {
		td->o.td_ddir = TD_DDIR_READ;
		td->o.max_bs[DDIR_READ] = rw_bs[DDIR_READ];
	} else if (!ios[DDIR_READ] && ios[DDIR_WRITE]) {
		td->o.td_ddir = TD_DDIR_WRITE;
		td->o.max_bs[DDIR_WRITE] = rw_bs[DDIR_WRITE];
	} else {
		td->o.td_ddir = TD_DDIR_RW;
		td->o.max_bs[DDIR_READ] = rw_bs[DDIR_READ];
		td->o.max_bs[DDIR_WRITE] = rw_bs[DDIR_WRITE];
		td->o.max_bs[DDIR_TRIM] = rw_bs[DDIR_TRIM];
	}

	/*
	 * We need to do direct/raw ios to the device, to avoid getting
	 * read-ahead in our way. But only do so if the minimum block size
	 * is a multiple of 4k, otherwise we don't know if it's safe to do so.
	 */
	if (!fio_option_is_set(&td->o, odirect) && !(td_min_bs(td) & 4095))
		td->o.odirect = 1;

	/*
	 * If depth wasn't manually set, use probed depth
	 */
	if (!fio_option_is_set(&td->o, iodepth))
		td->o.iodepth = td->o.iodepth_low = max_depth;

	return 0;
err:
	close(fd);
	fifo_free(fifo);
	return 1;
}