/* * Complete a single io_u for the sync engines. */ int io_u_sync_complete(struct thread_data *td, struct io_u *io_u, uint64_t *bytes) { struct io_completion_data icd; init_icd(td, &icd, 1); io_completed(td, io_u, &icd); if (!(io_u->flags & IO_U_F_FREE_DEF)) put_io_u(td, io_u); if (icd.error) { td_verror(td, icd.error, "io_u_sync_complete"); return -1; } if (bytes) { int ddir; for (ddir = DDIR_READ; ddir < DDIR_RWDIR_CNT; ddir++) bytes[ddir] += icd.bytes_done[ddir]; } return 0; }
static void ios_completed(struct thread_data *td, struct io_completion_data *icd) { struct io_u *io_u; int i; for (i = 0; i < icd->nr; i++) { io_u = td->io_ops->event(td, i); io_completed(td, io_u, icd); if (!(io_u->flags & IO_U_F_FREE_DEF)) put_io_u(td, io_u); } }
/* * Manage next request event */ void datadev_request( datadev_t ddp) { kern_return_t rc; io_req_t ior; spl_t s; s = splsched(); mutex_lock(&datadev_lock); if (ddp != (datadev_t)0) { /* * Queue current request */ queue_enter(&datadev_wait, ddp, datadev_t, dd_chain); } /* * Try to start next request */ if (queue_empty(&datadev_wait) || datadev_ior == (io_req_t)0) { /* * No request or no pending read */ mutex_unlock(&datadev_lock); splx(s); return; } /* * Extract first waiting request */ ddp = (datadev_t)queue_first(&datadev_wait); /* * Extract pending I/O request */ ior = datadev_ior; datadev_ior = (io_req_t)0; /* * Allocate read memory */ if (ior->io_count < ddp->dd_size) { /* * Return size error for this request */ mutex_unlock(&datadev_lock); splx(s); ior->io_error = D_INVALID_SIZE; } else { /* * Move waiting request from the waiting queue to the active one. */ queue_remove(&datadev_wait, ddp, datadev_t, dd_chain); queue_enter(&datadev_curr, ddp, datadev_t, dd_chain); mutex_unlock(&datadev_lock); splx(s); /* * Activate the request */ bcopy(ddp->dd_name, ior->io_data, ddp->dd_size); ddp->dd_dev = ior->io_unit; ior->io_residual = ior->io_count - ddp->dd_size; ior->io_error = D_SUCCESS; } io_completed(ior, FALSE); }