Example #1
0
File: sync.c Project: apexearth/fio
static int fio_syncio_queue(struct thread_data *td, struct io_u *io_u)
{
	struct fio_file *f = io_u->file;
	int ret;

	fio_ro_check(td, io_u);

	if (io_u->ddir == DDIR_READ)
		ret = read(f->fd, io_u->xfer_buf, io_u->xfer_buflen);
	else if (io_u->ddir == DDIR_WRITE)
		ret = write(f->fd, io_u->xfer_buf, io_u->xfer_buflen);
	else if (io_u->ddir == DDIR_TRIM) {
		do_io_u_trim(td, io_u);
		return FIO_Q_COMPLETED;
	} else
		ret = do_io_u_sync(td, io_u);

	return fio_io_end(td, io_u, ret);
}
Example #2
0
File: mmap.c Project: arh/fio
static enum fio_q_status fio_mmapio_queue(struct thread_data *td,
					  struct io_u *io_u)
{
	struct fio_file *f = io_u->file;
	struct fio_mmap_data *fmd = FILE_ENG_DATA(f);

	fio_ro_check(td, io_u);

	if (io_u->ddir == DDIR_READ)
		memcpy(io_u->xfer_buf, io_u->mmap_data, io_u->xfer_buflen);
	else if (io_u->ddir == DDIR_WRITE)
		memcpy(io_u->mmap_data, io_u->xfer_buf, io_u->xfer_buflen);
	else if (ddir_sync(io_u->ddir)) {
		if (msync(fmd->mmap_ptr, fmd->mmap_sz, MS_SYNC)) {
			io_u->error = errno;
			td_verror(td, io_u->error, "msync");
		}
	} else if (io_u->ddir == DDIR_TRIM) {
		int ret = do_io_u_trim(td, io_u);

		if (!ret)
			td_verror(td, io_u->error, "trim");
	}


	/*
	 * not really direct, but should drop the pages from the cache
	 */
	if (td->o.odirect && ddir_rw(io_u->ddir)) {
		if (msync(io_u->mmap_data, io_u->xfer_buflen, MS_SYNC) < 0) {
			io_u->error = errno;
			td_verror(td, io_u->error, "msync");
		}
		if (posix_madvise(io_u->mmap_data, io_u->xfer_buflen, POSIX_MADV_DONTNEED) < 0) {
			io_u->error = errno;
			td_verror(td, io_u->error, "madvise");
		}
	}

	return FIO_Q_COMPLETED;
}
Example #3
0
File: io_uring.c Project: arh/fio
static enum fio_q_status fio_ioring_queue(struct thread_data *td,
					  struct io_u *io_u)
{
	struct ioring_data *ld = td->io_ops_data;
	struct io_sq_ring *ring = &ld->sq_ring;
	unsigned tail, next_tail;

	fio_ro_check(td, io_u);

	if (ld->queued == ld->iodepth)
		return FIO_Q_BUSY;

	if (io_u->ddir == DDIR_TRIM) {
		if (ld->queued)
			return FIO_Q_BUSY;

		do_io_u_trim(td, io_u);
		io_u_mark_submit(td, 1);
		io_u_mark_complete(td, 1);
		return FIO_Q_COMPLETED;
	}

	tail = *ring->tail;
	next_tail = tail + 1;
	read_barrier();
	if (next_tail == *ring->head)
		return FIO_Q_BUSY;

	/* ensure sqe stores are ordered with tail update */
	write_barrier();
	ring->array[tail & ld->sq_ring_mask] = io_u->index;
	*ring->tail = next_tail;
	write_barrier();

	ld->queued++;
	return FIO_Q_QUEUED;
}
Example #4
0
File: sync.c Project: apexearth/fio
static int fio_pvsyncio_queue(struct thread_data *td, struct io_u *io_u)
{
	struct syncio_data *sd = td->io_ops->data;
	struct iovec *iov = &sd->iovecs[0];
	struct fio_file *f = io_u->file;
	int ret;

	fio_ro_check(td, io_u);

	iov->iov_base = io_u->xfer_buf;
	iov->iov_len = io_u->xfer_buflen;

	if (io_u->ddir == DDIR_READ)
		ret = preadv(f->fd, iov, 1, io_u->offset);
	else if (io_u->ddir == DDIR_WRITE)
		ret = pwritev(f->fd, iov, 1, io_u->offset);
	else if (io_u->ddir == DDIR_TRIM) {
		do_io_u_trim(td, io_u);
		return FIO_Q_COMPLETED;
	} else
		ret = do_io_u_sync(td, io_u);

	return fio_io_end(td, io_u, ret);
}
Example #5
0
File: splice.c Project: Andiry/fio
/*
 * For splice writing, we can vmsplice our data buffer directly into a
 * pipe and then splice that to a file.
 */
static int fio_splice_write(struct thread_data *td, struct io_u *io_u)
{
	struct spliceio_data *sd = td->io_ops->data;
	struct iovec iov = {
		.iov_base = io_u->xfer_buf,
		.iov_len = io_u->xfer_buflen,
	};
	struct pollfd pfd = { .fd = sd->pipe[1], .events = POLLOUT, };
	struct fio_file *f = io_u->file;
	off_t off = io_u->offset;
	int ret, ret2;

	while (iov.iov_len) {
		if (poll(&pfd, 1, -1) < 0)
			return errno;

		ret = vmsplice(sd->pipe[1], &iov, 1, SPLICE_F_NONBLOCK);
		if (ret < 0)
			return -errno;

		iov.iov_len -= ret;
		iov.iov_base += ret;

		while (ret) {
			ret2 = splice(sd->pipe[0], NULL, f->fd, &off, ret, 0);
			if (ret2 < 0)
				return -errno;

			ret -= ret2;
		}
	}

	return io_u->xfer_buflen;
}

static int fio_spliceio_queue(struct thread_data *td, struct io_u *io_u)
{
	struct spliceio_data *sd = td->io_ops->data;
	int ret = 0;

	fio_ro_check(td, io_u);

	if (io_u->ddir == DDIR_READ) {
		if (sd->vmsplice_to_user) {
			ret = fio_splice_read(td, io_u);
			/*
			 * This kernel doesn't support vmsplice to user
			 * space. Reset the vmsplice_to_user flag, so that
			 * we retry below and don't hit this path again.
			 */
			if (ret == -EBADF)
				sd->vmsplice_to_user = 0;
		}
		if (!sd->vmsplice_to_user)
			ret = fio_splice_read_old(td, io_u);
	} else if (io_u->ddir == DDIR_WRITE)
		ret = fio_splice_write(td, io_u);
	else if (io_u->ddir == DDIR_TRIM)
		ret = do_io_u_trim(td, io_u);
	else
		ret = do_io_u_sync(td, io_u);

	if (ret != (int) io_u->xfer_buflen) {
		if (ret >= 0) {
			io_u->resid = io_u->xfer_buflen - ret;
			io_u->error = 0;
			return FIO_Q_COMPLETED;
		} else
			io_u->error = errno;
	}

	if (io_u->error) {
		td_verror(td, io_u->error, "xfer");
		if (io_u->error == EINVAL)
			log_err("fio: looks like splice doesn't work on this"
					" file system\n");
	}

	return FIO_Q_COMPLETED;
}

static void fio_spliceio_cleanup(struct thread_data *td)
{
	struct spliceio_data *sd = td->io_ops->data;

	if (sd) {
		close(sd->pipe[0]);
		close(sd->pipe[1]);
		free(sd);
	}
}

static int fio_spliceio_init(struct thread_data *td)
{
	struct spliceio_data *sd = malloc(sizeof(*sd));

	if (pipe(sd->pipe) < 0) {
		td_verror(td, errno, "pipe");
		free(sd);
		return 1;
	}

	/*
	 * Assume this work, we'll reset this if it doesn't
	 */
	sd->vmsplice_to_user = 1;

	/*
	 * Works with "real" vmsplice to user, eg mapping pages directly.
	 * Reset if we fail.
	 */
	sd->vmsplice_to_user_map = 1;

	/*
	 * And if vmsplice_to_user works, we definitely need aligned
	 * buffers. Just set ->odirect to force that.
	 */
	if (td_read(td))
		td->o.mem_align = 1;

	td->io_ops->data = sd;
	return 0;
}

static struct ioengine_ops ioengine = {
	.name		= "splice",
	.version	= FIO_IOOPS_VERSION,
	.init		= fio_spliceio_init,
	.queue		= fio_spliceio_queue,
	.cleanup	= fio_spliceio_cleanup,
	.open_file	= generic_open_file,
	.close_file	= generic_close_file,
	.get_file_size	= generic_get_file_size,
	.flags		= FIO_SYNCIO | FIO_PIPEIO,
};

static void fio_init fio_spliceio_register(void)
{
	register_ioengine(&ioengine);
}

static void fio_exit fio_spliceio_unregister(void)
{
	unregister_ioengine(&ioengine);
}