static int io_workqueue_fn(struct submit_worker *sw, struct workqueue_work *work) { struct io_u *io_u = container_of(work, struct io_u, work); const enum fio_ddir ddir = io_u->ddir; struct thread_data *td = sw->priv; int ret, error; if (td->o.serialize_overlap) check_overlap(io_u); dprint(FD_RATE, "io_u %p queued by %u\n", io_u, gettid()); io_u_set(td, io_u, IO_U_F_NO_FILE_PUT); td->cur_depth++; do { ret = td_io_queue(td, io_u); if (ret != FIO_Q_BUSY) break; ret = io_u_queued_complete(td, 1); if (ret > 0) td->cur_depth -= ret; else if (ret < 0) break; io_u_clear(td, io_u, IO_U_F_FLIGHT); } while (1); dprint(FD_RATE, "io_u %p ret %d by %u\n", io_u, ret, gettid()); error = io_queue_event(td, io_u, &ret, ddir, NULL, 0, NULL); if (ret == FIO_Q_COMPLETED) td->cur_depth--; else if (ret == FIO_Q_QUEUED) { unsigned int min_evts; if (td->o.iodepth == 1) min_evts = 1; else min_evts = 0; ret = io_u_queued_complete(td, min_evts); if (ret > 0) td->cur_depth -= ret; } if (error || td->error) pthread_cond_signal(&td->parent->free_cond); return 0; }
void io_u_quiesce(struct thread_data *td) { /* * We are going to sleep, ensure that we flush anything pending as * not to skew our latency numbers. * * Changed to only monitor 'in flight' requests here instead of the * td->cur_depth, b/c td->cur_depth does not accurately represent * io's that have been actually submitted to an async engine, * and cur_depth is meaningless for sync engines. */ while (td->io_u_in_flight) { int fio_unused ret; ret = io_u_queued_complete(td, 1, NULL); } }
static enum fio_ddir rate_ddir(struct thread_data *td, enum fio_ddir ddir) { enum fio_ddir odir = ddir ^ 1; struct timeval t; long usec; assert(ddir_rw(ddir)); if (td->rate_pending_usleep[ddir] <= 0) return ddir; /* * We have too much pending sleep in this direction. See if we * should switch. */ if (td_rw(td)) { /* * Other direction does not have too much pending, switch */ if (td->rate_pending_usleep[odir] < 100000) return odir; /* * Both directions have pending sleep. Sleep the minimum time * and deduct from both. */ if (td->rate_pending_usleep[ddir] <= td->rate_pending_usleep[odir]) { usec = td->rate_pending_usleep[ddir]; } else { usec = td->rate_pending_usleep[odir]; ddir = odir; } } else usec = td->rate_pending_usleep[ddir]; /* * We are going to sleep, ensure that we flush anything pending as * not to skew our latency numbers. * * Changed to only monitor 'in flight' requests here instead of the * td->cur_depth, b/c td->cur_depth does not accurately represent * io's that have been actually submitted to an async engine, * and cur_depth is meaningless for sync engines. */ if (td->io_u_in_flight) { int fio_unused ret; ret = io_u_queued_complete(td, td->io_u_in_flight, NULL); } fio_gettime(&t, NULL); usec_sleep(td, usec); usec = utime_since_now(&t); td->rate_pending_usleep[ddir] -= usec; odir = ddir ^ 1; if (td_rw(td) && __should_check_rate(td, odir)) td->rate_pending_usleep[odir] -= usec; if (ddir_trim(ddir)) return ddir; return ddir; }