Ejemplo n.º 1
0
/*
 * Just discard the pdu by seeking past it.
 */
static int discard_pdu(struct thread_data *td, struct fifo *fifo, int fd,
		       struct blk_io_trace *t)
{
	if (t->pdu_len == 0)
		return 0;

	dprint(FD_BLKTRACE, "discard pdu len %u\n", t->pdu_len);
	return trace_fifo_get(td, fifo, fd, NULL, t->pdu_len);
}
Ejemplo n.º 2
0
Archivo: blktrace.c Proyecto: asias/fio
/*
 * Load a blktrace file by reading all the blk_io_trace entries, and storing
 * them as io_pieces like the fio text version would do.
 */
int load_blktrace(struct thread_data *td, const char *filename, int need_swap)
{
	unsigned long long ttime, delay;
	struct blk_io_trace t;
	unsigned long ios[2], skipped_writes;
	unsigned int cpu;
	unsigned int rw_bs[2];
	struct fifo *fifo;
	int fd, i, old_state;
	struct fio_file *f;

	fd = open(filename, O_RDONLY);
	if (fd < 0) {
		td_verror(td, errno, "open blktrace file");
		return 1;
	}

	fifo = fifo_alloc(TRACE_FIFO_SIZE);

	old_state = td_bump_runstate(td, TD_SETTING_UP);

	td->o.size = 0;

	cpu = 0;
	ttime = 0;
	ios[0] = ios[1] = 0;
	rw_bs[0] = rw_bs[1] = 0;
	skipped_writes = 0;
	do {
		int ret = trace_fifo_get(td, fifo, fd, &t, sizeof(t));

		if (ret < 0)
			goto err;
		else if (!ret)
			break;
		else if (ret < (int) sizeof(t)) {
			log_err("fio: short fifo get\n");
			break;
		}

		if (need_swap)
			byteswap_trace(&t);

		if ((t.magic & 0xffffff00) != BLK_IO_TRACE_MAGIC) {
			log_err("fio: bad magic in blktrace data: %x\n",
								t.magic);
			goto err;
		}
		if ((t.magic & 0xff) != BLK_IO_TRACE_VERSION) {
			log_err("fio: bad blktrace version %d\n",
								t.magic & 0xff);
			goto err;
		}
		ret = discard_pdu(td, fifo, fd, &t);
		if (ret < 0) {
			td_verror(td, ret, "blktrace lseek");
			goto err;
		} else if (t.pdu_len != ret) {
			log_err("fio: discarded %d of %d\n", ret, t.pdu_len);
			goto err;
		}
		if ((t.action & BLK_TC_ACT(BLK_TC_NOTIFY)) == 0) {
			if (!ttime) {
				ttime = t.time;
				cpu = t.cpu;
			}

			delay = 0;
			if (cpu == t.cpu)
				delay = t.time - ttime;
			if ((t.action & BLK_TC_ACT(BLK_TC_WRITE)) && read_only)
				skipped_writes++;
			else {
				/*
				 * set delay to zero if no_stall enabled for
				 * fast replay
				 */
				if (td->o.no_stall)
					delay = 0;

				handle_trace(td, &t, delay, ios, rw_bs);
			}

			ttime = t.time;
			cpu = t.cpu;
		} else {
			delay = 0;
			handle_trace(td, &t, delay, ios, rw_bs);
		}
	} while (1);

	for (i = 0; i < td->files_index; i++) {
		f = td->files[i];
		trace_add_open_close_event(td, f->fileno, FIO_LOG_CLOSE_FILE);
	}

	fifo_free(fifo);
	close(fd);

	td_restore_runstate(td, old_state);

	if (!td->files_index) {
		log_err("fio: did not find replay device(s)\n");
		return 1;
	}

	if (skipped_writes)
		log_err("fio: %s skips replay of %lu writes due to read-only\n",
						td->o.name, skipped_writes);

	if (!ios[DDIR_READ] && !ios[DDIR_WRITE]) {
		log_err("fio: found no ios in blktrace data\n");
		return 1;
	} else if (ios[DDIR_READ] && !ios[DDIR_READ]) {
		td->o.td_ddir = TD_DDIR_READ;
		td->o.max_bs[DDIR_READ] = rw_bs[DDIR_READ];
	} else if (!ios[DDIR_READ] && ios[DDIR_WRITE]) {
		td->o.td_ddir = TD_DDIR_WRITE;
		td->o.max_bs[DDIR_WRITE] = rw_bs[DDIR_WRITE];
	} else {
		td->o.td_ddir = TD_DDIR_RW;
		td->o.max_bs[DDIR_READ] = rw_bs[DDIR_READ];
		td->o.max_bs[DDIR_WRITE] = rw_bs[DDIR_WRITE];
	}

	/*
	 * We need to do direct/raw ios to the device, to avoid getting
	 * read-ahead in our way.
	 */
	td->o.odirect = 1;

	return 0;
err:
	close(fd);
	fifo_free(fifo);
	return 1;
}
Ejemplo n.º 3
0
/*
 * Load a blktrace file by reading all the blk_io_trace entries, and storing
 * them as io_pieces like the fio text version would do.
 */
int load_blktrace(struct thread_data *td, const char *filename, int need_swap)
{
	struct blk_io_trace t;
	unsigned long ios[DDIR_RWDIR_CNT], skipped_writes;
	unsigned int rw_bs[DDIR_RWDIR_CNT];
	struct fifo *fifo;
	int fd, i, old_state;
	struct fio_file *f;
	int this_depth, depth;

	fd = open(filename, O_RDONLY);
	if (fd < 0) {
		td_verror(td, errno, "open blktrace file");
		return 1;
	}

	fifo = fifo_alloc(TRACE_FIFO_SIZE);

	old_state = td_bump_runstate(td, TD_SETTING_UP);

	td->o.size = 0;

	ios[0] = ios[1] = 0;
	rw_bs[0] = rw_bs[1] = 0;
	skipped_writes = 0;
	this_depth = depth = 0;
	do {
		int ret = trace_fifo_get(td, fifo, fd, &t, sizeof(t));

		if (ret < 0)
			goto err;
		else if (!ret)
			break;
		else if (ret < (int) sizeof(t)) {
			log_err("fio: short fifo get\n");
			break;
		}

		if (need_swap)
			byteswap_trace(&t);

		if ((t.magic & 0xffffff00) != BLK_IO_TRACE_MAGIC) {
			log_err("fio: bad magic in blktrace data: %x\n",
								t.magic);
			goto err;
		}
		if ((t.magic & 0xff) != BLK_IO_TRACE_VERSION) {
			log_err("fio: bad blktrace version %d\n",
								t.magic & 0xff);
			goto err;
		}
		ret = discard_pdu(td, fifo, fd, &t);
		if (ret < 0) {
			td_verror(td, ret, "blktrace lseek");
			goto err;
		} else if (t.pdu_len != ret) {
			log_err("fio: discarded %d of %d\n", ret, t.pdu_len);
			goto err;
		}
		if ((t.action & BLK_TC_ACT(BLK_TC_NOTIFY)) == 0) {
			if ((t.action & 0xffff) == __BLK_TA_QUEUE)
				this_depth++;
			else if ((t.action & 0xffff) == __BLK_TA_COMPLETE) {
				depth = max(depth, this_depth);
				this_depth = 0;
			}

			if (t_is_write(&t) && read_only) {
				skipped_writes++;
				continue;
			}
		}

		handle_trace(td, &t, ios, rw_bs);
	} while (1);

	for (i = 0; i < td->files_index; i++) {
		f = td->files[i];
		trace_add_open_close_event(td, f->fileno, FIO_LOG_CLOSE_FILE);
	}

	fifo_free(fifo);
	close(fd);

	td_restore_runstate(td, old_state);

	if (!td->files_index) {
		log_err("fio: did not find replay device(s)\n");
		return 1;
	}

	/*
	 * For stacked devices, we don't always get a COMPLETE event so
	 * the depth grows to insane values. Limit it to something sane(r).
	 */
	if (!depth || depth > 1024)
		depth = 1024;

	if (skipped_writes)
		log_err("fio: %s skips replay of %lu writes due to read-only\n",
						td->o.name, skipped_writes);

	if (!ios[DDIR_READ] && !ios[DDIR_WRITE]) {
		log_err("fio: found no ios in blktrace data\n");
		return 1;
	} else if (ios[DDIR_READ] && !ios[DDIR_WRITE]) {
		td->o.td_ddir = TD_DDIR_READ;
		td->o.max_bs[DDIR_READ] = rw_bs[DDIR_READ];
	} else if (!ios[DDIR_READ] && ios[DDIR_WRITE]) {
		td->o.td_ddir = TD_DDIR_WRITE;
		td->o.max_bs[DDIR_WRITE] = rw_bs[DDIR_WRITE];
	} else {
		td->o.td_ddir = TD_DDIR_RW;
		td->o.max_bs[DDIR_READ] = rw_bs[DDIR_READ];
		td->o.max_bs[DDIR_WRITE] = rw_bs[DDIR_WRITE];
		td->o.max_bs[DDIR_TRIM] = rw_bs[DDIR_TRIM];
	}

	/*
	 * We need to do direct/raw ios to the device, to avoid getting
	 * read-ahead in our way.
	 */
	td->o.odirect = 1;

	/*
	 * we don't know if this option was set or not. it defaults to 1,
	 * so we'll just guess that we should override it if it's still 1
	 */
	if (td->o.iodepth != 1)
		td->o.iodepth = depth;

	return 0;
err:
	close(fd);
	fifo_free(fifo);
	return 1;
}
Ejemplo n.º 4
0
/*
 * Load a blktrace file by reading all the blk_io_trace entries, and storing
 * them as io_pieces like the fio text version would do.
 */
int load_blktrace(struct thread_data *td, const char *filename, int need_swap)
{
	struct blk_io_trace t;
	unsigned long ios[DDIR_RWDIR_CNT], skipped_writes;
	unsigned int rw_bs[DDIR_RWDIR_CNT];
	struct fifo *fifo;
	int fd, i, old_state;
	struct fio_file *f;
	int this_depth[DDIR_RWDIR_CNT], depth[DDIR_RWDIR_CNT], max_depth;

	fd = open(filename, O_RDONLY);
	if (fd < 0) {
		td_verror(td, errno, "open blktrace file");
		return 1;
	}

	fifo = fifo_alloc(TRACE_FIFO_SIZE);

	old_state = td_bump_runstate(td, TD_SETTING_UP);

	td->o.size = 0;

	for (i = 0; i < DDIR_RWDIR_CNT; i++) {
		ios[i] = 0;
		rw_bs[i] = 0;
		this_depth[i] = 0;
		depth[i] = 0;
	}

	skipped_writes = 0;
	do {
		int ret = trace_fifo_get(td, fifo, fd, &t, sizeof(t));

		if (ret < 0)
			goto err;
		else if (!ret)
			break;
		else if (ret < (int) sizeof(t)) {
			log_err("fio: short fifo get\n");
			break;
		}

		if (need_swap)
			byteswap_trace(&t);

		if ((t.magic & 0xffffff00) != BLK_IO_TRACE_MAGIC) {
			log_err("fio: bad magic in blktrace data: %x\n",
								t.magic);
			goto err;
		}
		if ((t.magic & 0xff) != BLK_IO_TRACE_VERSION) {
			log_err("fio: bad blktrace version %d\n",
								t.magic & 0xff);
			goto err;
		}
		ret = discard_pdu(td, fifo, fd, &t);
		if (ret < 0) {
			td_verror(td, ret, "blktrace lseek");
			goto err;
		} else if (t.pdu_len != ret) {
			log_err("fio: discarded %d of %d\n", ret, t.pdu_len);
			goto err;
		}
		if ((t.action & BLK_TC_ACT(BLK_TC_NOTIFY)) == 0) {
			if ((t.action & 0xffff) == __BLK_TA_QUEUE)
				depth_inc(&t, this_depth);
			else if (((t.action & 0xffff) == __BLK_TA_BACKMERGE) ||
				((t.action & 0xffff) == __BLK_TA_FRONTMERGE))
				depth_dec(&t, this_depth);
			else if ((t.action & 0xffff) == __BLK_TA_COMPLETE)
				depth_end(&t, this_depth, depth);

			if (t_is_write(&t) && read_only) {
				skipped_writes++;
				continue;
			}
		}

		handle_trace(td, &t, ios, rw_bs);
	} while (1);

	for (i = 0; i < td->files_index; i++) {
		f = td->files[i];
		trace_add_open_close_event(td, f->fileno, FIO_LOG_CLOSE_FILE);
	}

	fifo_free(fifo);
	close(fd);

	td_restore_runstate(td, old_state);

	if (!td->files_index) {
		log_err("fio: did not find replay device(s)\n");
		return 1;
	}

	/*
	 * For stacked devices, we don't always get a COMPLETE event so
	 * the depth grows to insane values. Limit it to something sane(r).
	 */
	max_depth = 0;
	for (i = 0; i < DDIR_RWDIR_CNT; i++) {
		if (depth[i] > 1024)
			depth[i] = 1024;
		else if (!depth[i] && ios[i])
			depth[i] = 1;
		max_depth = max(depth[i], max_depth);
	}

	if (skipped_writes)
		log_err("fio: %s skips replay of %lu writes due to read-only\n",
						td->o.name, skipped_writes);

	if (!ios[DDIR_READ] && !ios[DDIR_WRITE]) {
		log_err("fio: found no ios in blktrace data\n");
		return 1;
	} else if (ios[DDIR_READ] && !ios[DDIR_WRITE]) {
		td->o.td_ddir = TD_DDIR_READ;
		td->o.max_bs[DDIR_READ] = rw_bs[DDIR_READ];
	} else if (!ios[DDIR_READ] && ios[DDIR_WRITE]) {
		td->o.td_ddir = TD_DDIR_WRITE;
		td->o.max_bs[DDIR_WRITE] = rw_bs[DDIR_WRITE];
	} else {
		td->o.td_ddir = TD_DDIR_RW;
		td->o.max_bs[DDIR_READ] = rw_bs[DDIR_READ];
		td->o.max_bs[DDIR_WRITE] = rw_bs[DDIR_WRITE];
		td->o.max_bs[DDIR_TRIM] = rw_bs[DDIR_TRIM];
	}

	/*
	 * We need to do direct/raw ios to the device, to avoid getting
	 * read-ahead in our way. But only do so if the minimum block size
	 * is a multiple of 4k, otherwise we don't know if it's safe to do so.
	 */
	if (!fio_option_is_set(&td->o, odirect) && !(td_min_bs(td) & 4095))
		td->o.odirect = 1;

	/*
	 * If depth wasn't manually set, use probed depth
	 */
	if (!fio_option_is_set(&td->o, iodepth))
		td->o.iodepth = td->o.iodepth_low = max_depth;

	return 0;
err:
	close(fd);
	fifo_free(fifo);
	return 1;
}