static enum fio_ddir rate_ddir(struct thread_data *td, enum fio_ddir ddir) { enum fio_ddir odir = ddir ^ 1; struct timeval t; long usec; assert(ddir_rw(ddir)); if (td->rate_pending_usleep[ddir] <= 0) return ddir; /* * We have too much pending sleep in this direction. See if we * should switch. */ if (td_rw(td) && td->o.rwmix[odir]) { /* * Other direction does not have too much pending, switch */ if (td->rate_pending_usleep[odir] < 100000) return odir; /* * Both directions have pending sleep. Sleep the minimum time * and deduct from both. */ if (td->rate_pending_usleep[ddir] <= td->rate_pending_usleep[odir]) { usec = td->rate_pending_usleep[ddir]; } else { usec = td->rate_pending_usleep[odir]; ddir = odir; } } else usec = td->rate_pending_usleep[ddir]; io_u_quiesce(td); fio_gettime(&t, NULL); usec_sleep(td, usec); usec = utime_since_now(&t); td->rate_pending_usleep[ddir] -= usec; odir = ddir ^ 1; if (td_rw(td) && __should_check_rate(td, odir)) td->rate_pending_usleep[odir] -= usec; if (ddir_trim(ddir)) return ddir; return ddir; }
static enum fio_ddir rate_ddir(struct thread_data *td, enum fio_ddir ddir) { enum fio_ddir odir = ddir ^ 1; struct timeval t; long usec; assert(ddir_rw(ddir)); if (td->rate_pending_usleep[ddir] <= 0) return ddir; /* * We have too much pending sleep in this direction. See if we * should switch. */ if (td_rw(td)) { /* * Other direction does not have too much pending, switch */ if (td->rate_pending_usleep[odir] < 100000) return odir; /* * Both directions have pending sleep. Sleep the minimum time * and deduct from both. */ if (td->rate_pending_usleep[ddir] <= td->rate_pending_usleep[odir]) { usec = td->rate_pending_usleep[ddir]; } else { usec = td->rate_pending_usleep[odir]; ddir = odir; } } else usec = td->rate_pending_usleep[ddir]; /* * We are going to sleep, ensure that we flush anything pending as * not to skew our latency numbers. * * Changed to only monitor 'in flight' requests here instead of the * td->cur_depth, b/c td->cur_depth does not accurately represent * io's that have been actually submitted to an async engine, * and cur_depth is meaningless for sync engines. */ if (td->io_u_in_flight) { int fio_unused ret; ret = io_u_queued_complete(td, td->io_u_in_flight, NULL); } fio_gettime(&t, NULL); usec_sleep(td, usec); usec = utime_since_now(&t); td->rate_pending_usleep[ddir] -= usec; odir = ddir ^ 1; if (td_rw(td) && __should_check_rate(td, odir)) td->rate_pending_usleep[odir] -= usec; if (ddir_trim(ddir)) return ddir; return ddir; }
static void io_completed(struct thread_data *td, struct io_u *io_u, struct io_completion_data *icd) { struct fio_file *f; dprint_io_u(io_u, "io complete"); td_io_u_lock(td); assert(io_u->flags & IO_U_F_FLIGHT); io_u->flags &= ~(IO_U_F_FLIGHT | IO_U_F_BUSY_OK); /* * Mark IO ok to verify */ if (io_u->ipo) { io_u->ipo->flags &= ~IP_F_IN_FLIGHT; write_barrier(); } td_io_u_unlock(td); if (ddir_sync(io_u->ddir)) { td->last_was_sync = 1; f = io_u->file; if (f) { f->first_write = -1ULL; f->last_write = -1ULL; } return; } td->last_was_sync = 0; td->last_ddir = io_u->ddir; if (!io_u->error && ddir_rw(io_u->ddir)) { unsigned int bytes = io_u->buflen - io_u->resid; const enum fio_ddir idx = io_u->ddir; const enum fio_ddir odx = io_u->ddir ^ 1; int ret; td->io_blocks[idx]++; td->this_io_blocks[idx]++; td->io_bytes[idx] += bytes; if (!(io_u->flags & IO_U_F_VER_LIST)) td->this_io_bytes[idx] += bytes; if (idx == DDIR_WRITE) { f = io_u->file; if (f) { if (f->first_write == -1ULL || io_u->offset < f->first_write) f->first_write = io_u->offset; if (f->last_write == -1ULL || ((io_u->offset + bytes) > f->last_write)) f->last_write = io_u->offset + bytes; } } if (ramp_time_over(td) && (td->runstate == TD_RUNNING || td->runstate == TD_VERIFYING)) { account_io_completion(td, io_u, icd, idx, bytes); if (__should_check_rate(td, idx)) { td->rate_pending_usleep[idx] = (usec_for_io(td, idx) - utime_since_now(&td->start)); } if (idx != DDIR_TRIM && __should_check_rate(td, odx)) td->rate_pending_usleep[odx] = (usec_for_io(td, odx) - utime_since_now(&td->start)); } icd->bytes_done[idx] += bytes; if (io_u->end_io) { ret = io_u->end_io(td, io_u); if (ret && !icd->error) icd->error = ret; } } else if (io_u->error) { icd->error = io_u->error; io_u_log_error(td, io_u); } if (icd->error) { enum error_type_bit eb = td_error_type(io_u->ddir, icd->error); if (!td_non_fatal_error(td, eb, icd->error)) return; /* * If there is a non_fatal error, then add to the error count * and clear all the errors. */ update_error_count(td, icd->error); td_clear_error(td); icd->error = 0; io_u->error = 0; } }
static void io_completed(struct thread_data *td, struct io_u *io_u, struct io_completion_data *icd) { struct fio_file *f; dprint_io_u(io_u, "io complete"); td_io_u_lock(td); assert(io_u->flags & IO_U_F_FLIGHT); io_u->flags &= ~(IO_U_F_FLIGHT | IO_U_F_BUSY_OK); td_io_u_unlock(td); if (ddir_sync(io_u->ddir)) { td->last_was_sync = 1; f = io_u->file; if (f) { f->first_write = -1ULL; f->last_write = -1ULL; } return; } td->last_was_sync = 0; td->last_ddir = io_u->ddir; if (!io_u->error && ddir_rw(io_u->ddir)) { unsigned int bytes = io_u->buflen - io_u->resid; const enum fio_ddir idx = io_u->ddir; const enum fio_ddir odx = io_u->ddir ^ 1; int ret; td->io_blocks[idx]++; td->this_io_blocks[idx]++; td->io_bytes[idx] += bytes; if (!(io_u->flags & IO_U_F_VER_LIST)) td->this_io_bytes[idx] += bytes; if (idx == DDIR_WRITE) { f = io_u->file; if (f) { if (f->first_write == -1ULL || io_u->offset < f->first_write) f->first_write = io_u->offset; if (f->last_write == -1ULL || ((io_u->offset + bytes) > f->last_write)) f->last_write = io_u->offset + bytes; } } if (ramp_time_over(td) && (td->runstate == TD_RUNNING || td->runstate == TD_VERIFYING)) { account_io_completion(td, io_u, icd, idx, bytes); if (__should_check_rate(td, idx)) { td->rate_pending_usleep[idx] = (usec_for_io(td, idx) - utime_since_now(&td->start)); } if (__should_check_latency(td, idx)) { unsigned long lusec = utime_since( &io_u->issue_time, &icd->time); /* Linear increase and logarithmic decrease */ if (lusec > td->o.shed_latency[idx]) { if (td->shed_count[idx] < MAX_SHED_COUNT ) { td->shed_count[idx] += (1<<SHED_FRAC_BITS); } } else if (td->shed_count[idx]) { td->shed_count[idx] -= get_used_bits(td->shed_count[idx]); } if (td->shed_count[idx]) { lusec = (lusec * td->shed_count[idx]) >> SHED_FRAC_BITS; if (lusec > td->rate_pending_usleep[idx]) { td->rate_pending_usleep[idx] = lusec; } } }