static int fio_ioring_commit(struct thread_data *td) { struct ioring_data *ld = td->io_ops_data; struct ioring_options *o = td->eo; int ret; if (!ld->queued) return 0; /* * Kernel side does submission. just need to check if the ring is * flagged as needing a kick, if so, call io_uring_enter(). This * only happens if we've been idle too long. */ if (o->sqpoll_thread) { struct io_sq_ring *ring = &ld->sq_ring; read_barrier(); if (*ring->flags & IORING_SQ_NEED_WAKEUP) io_uring_enter(ld, ld->queued, 0, IORING_ENTER_SQ_WAKEUP); ld->queued = 0; return 0; } do { unsigned start = *ld->sq_ring.head; long nr = ld->queued; ret = io_uring_enter(ld, nr, 0, IORING_ENTER_GETEVENTS); if (ret > 0) { fio_ioring_queued(td, start, ret); io_u_mark_submit(td, ret); ld->queued -= ret; ret = 0; } else if (!ret) { io_u_mark_submit(td, ret); continue; } else { if (errno == EAGAIN) { ret = fio_ioring_cqring_reap(td, 0, ld->queued); if (ret) continue; /* Shouldn't happen */ usleep(1); continue; } td_verror(td, errno, "io_uring_enter submit"); break; } } while (ld->queued); return ret; }
static int fio_rdmaio_commit(struct thread_data *td) { struct rdmaio_data *rd = td->io_ops->data; struct io_u **io_us; int ret; if (!rd->io_us_queued) return 0; io_us = rd->io_us_queued; do { /* RDMA_WRITE or RDMA_READ */ if (rd->is_client) ret = fio_rdmaio_send(td, io_us, rd->io_u_queued_nr); else if (!rd->is_client) ret = fio_rdmaio_recv(td, io_us, rd->io_u_queued_nr); else ret = 0; /* must be a SYNC */ if (ret > 0) { fio_rdmaio_queued(td, io_us, ret); io_u_mark_submit(td, ret); rd->io_u_queued_nr -= ret; io_us += ret; ret = 0; } else break; } while (rd->io_u_queued_nr); return ret; }
static int fio_vsyncio_commit(struct thread_data *td) { struct syncio_data *sd = td->io_ops->data; struct fio_file *f; ssize_t ret; if (!sd->queued) return 0; io_u_mark_submit(td, sd->queued); f = sd->last_file; if (lseek(f->fd, sd->io_us[0]->offset, SEEK_SET) == -1) { int err = -errno; td_verror(td, errno, "lseek"); return err; } if (sd->last_ddir == DDIR_READ) ret = readv(f->fd, sd->iovecs, sd->queued); else ret = writev(f->fd, sd->iovecs, sd->queued); dprint(FD_IO, "vsyncio_commit: %d\n", (int) ret); sd->events = sd->queued; sd->queued = 0; return fio_vsyncio_end(td, ret); }
static int fio_null_commit(struct thread_data *td) { struct null_data *nd = (struct null_data *) td->io_ops_data; if (!nd->events) { #ifndef FIO_EXTERNAL_ENGINE io_u_mark_submit(td, nd->queued); #endif nd->events = nd->queued; nd->queued = 0; } return 0; }
static void fio_guasi_queued(struct thread_data *td, struct io_u **io_us, int nr) { int i; struct io_u *io_u; struct timeval now; if (!fio_fill_issue_time(td)) return; io_u_mark_submit(td, nr); fio_gettime(&now, NULL); for (i = 0; i < nr; i++) { io_u = io_us[i]; memcpy(&io_u->issue_time, &now, sizeof(now)); io_u_queued(td, io_u); } }
static enum fio_q_status fio_ioring_queue(struct thread_data *td, struct io_u *io_u) { struct ioring_data *ld = td->io_ops_data; struct io_sq_ring *ring = &ld->sq_ring; unsigned tail, next_tail; fio_ro_check(td, io_u); if (ld->queued == ld->iodepth) return FIO_Q_BUSY; if (io_u->ddir == DDIR_TRIM) { if (ld->queued) return FIO_Q_BUSY; do_io_u_trim(td, io_u); io_u_mark_submit(td, 1); io_u_mark_complete(td, 1); return FIO_Q_COMPLETED; } tail = *ring->tail; next_tail = tail + 1; read_barrier(); if (next_tail == *ring->head) return FIO_Q_BUSY; /* ensure sqe stores are ordered with tail update */ write_barrier(); ring->array[tail & ld->sq_ring_mask] = io_u->index; *ring->tail = next_tail; write_barrier(); ld->queued++; return FIO_Q_QUEUED; }