static int io_workqueue_fn(struct submit_worker *sw, struct workqueue_work *work) { struct io_u *io_u = container_of(work, struct io_u, work); const enum fio_ddir ddir = io_u->ddir; struct thread_data *td = sw->priv; int ret, error; if (td->o.serialize_overlap) check_overlap(io_u); dprint(FD_RATE, "io_u %p queued by %u\n", io_u, gettid()); io_u_set(td, io_u, IO_U_F_NO_FILE_PUT); td->cur_depth++; do { ret = td_io_queue(td, io_u); if (ret != FIO_Q_BUSY) break; ret = io_u_queued_complete(td, 1); if (ret > 0) td->cur_depth -= ret; else if (ret < 0) break; io_u_clear(td, io_u, IO_U_F_FLIGHT); } while (1); dprint(FD_RATE, "io_u %p ret %d by %u\n", io_u, ret, gettid()); error = io_queue_event(td, io_u, &ret, ddir, NULL, 0, NULL); if (ret == FIO_Q_COMPLETED) td->cur_depth--; else if (ret == FIO_Q_QUEUED) { unsigned int min_evts; if (td->o.iodepth == 1) min_evts = 1; else min_evts = 0; ret = io_u_queued_complete(td, min_evts); if (ret > 0) td->cur_depth -= ret; } if (error || td->error) pthread_cond_signal(&td->parent->free_cond); return 0; }
/* * Push IO verification to a separate thread */ int verify_io_u_async(struct thread_data *td, struct io_u **io_u_ptr) { struct io_u *io_u = *io_u_ptr; pthread_mutex_lock(&td->io_u_lock); if (io_u->file) put_file_log(td, io_u->file); if (io_u->flags & IO_U_F_IN_CUR_DEPTH) { td->cur_depth--; io_u_clear(io_u, IO_U_F_IN_CUR_DEPTH); } flist_add_tail(&io_u->verify_list, &td->verify_list); *io_u_ptr = NULL; pthread_mutex_unlock(&td->io_u_lock); pthread_cond_signal(&td->verify_cond); return 0; }