static int fill_io_u(struct thread_data *td, struct io_u *io_u) { if (td->io_ops->flags & FIO_NOIO) goto out; set_rw_ddir(td, io_u); /* * fsync() or fdatasync() or trim etc, we are done */ if (!ddir_rw(io_u->ddir)) goto out; /* * See if it's time to switch to a new zone */ if (td->zone_bytes >= td->o.zone_size && td->o.zone_skip) { td->zone_bytes = 0; io_u->file->file_offset += td->o.zone_range + td->o.zone_skip; io_u->file->last_pos = io_u->file->file_offset; td->io_skip_bytes += td->o.zone_skip; } /* * No log, let the seq/rand engine retrieve the next buflen and * position. */ if (get_next_offset(td, io_u)) { dprint(FD_IO, "io_u %p, failed getting offset\n", io_u); return 1; } io_u->buflen = get_next_buflen(td, io_u); if (!io_u->buflen) { dprint(FD_IO, "io_u %p, failed getting buflen\n", io_u); return 1; } if (io_u->offset + io_u->buflen > io_u->file->real_file_size) { dprint(FD_IO, "io_u %p, offset too large\n", io_u); dprint(FD_IO, " off=%llu/%lu > %llu\n", io_u->offset, io_u->buflen, io_u->file->real_file_size); return 1; } /* * mark entry before potentially trimming io_u */ if (td_random(td) && file_randommap(td, io_u->file)) mark_random_map(td, io_u); /* * If using a write iolog, store this entry. */ out: dprint_io_u(io_u, "fill_io_u"); td->zone_bytes += io_u->buflen; log_io_u(td, io_u); return 0; }
/* * Must be serialized by caller. */ int workqueue_enqueue(struct workqueue *wq, struct io_u *io_u) { struct submit_worker *sw; sw = get_submit_worker(wq); if (sw) { const enum fio_ddir ddir = acct_ddir(io_u); struct thread_data *parent = wq->td; if (ddir_rw(ddir)) { parent->io_issues[ddir]++; parent->io_issue_bytes[ddir] += io_u->xfer_buflen; parent->rate_io_issue_bytes[ddir] += io_u->xfer_buflen; } pthread_mutex_lock(&sw->lock); flist_add_tail(&io_u->verify_list, &sw->work_list); sw->seq = ++wq->work_seq; sw->flags &= ~SW_F_IDLE; pthread_mutex_unlock(&sw->lock); pthread_cond_signal(&sw->cond); return FIO_Q_QUEUED; } return FIO_Q_BUSY; }
static int get_next_block(struct thread_data *td, struct io_u *io_u, enum fio_ddir ddir, int rw_seq, unsigned long long *b) { struct fio_file *f = io_u->file; int ret; assert(ddir_rw(ddir)); if (rw_seq) { if (td_random(td)) ret = get_next_rand_block(td, f, ddir, b); else ret = get_next_seq_block(td, f, ddir, b); } else { io_u->flags |= IO_U_F_BUSY_OK; if (td->o.rw_seq == RW_SEQ_SEQ) { ret = get_next_seq_block(td, f, ddir, b); if (ret) ret = get_next_rand_block(td, f, ddir, b); } else if (td->o.rw_seq == RW_SEQ_IDENT) { if (f->last_start != -1ULL) *b = (f->last_start - f->file_offset) / td->o.min_bs[ddir]; else *b = 0; ret = 0; } else { log_err("fio: unknown rw_seq=%d\n", td->o.rw_seq); ret = 1; } } return ret; }
/* * For random io, generate a random new block and see if it's used. Repeat * until we find a free one. For sequential io, just return the end of * the last io issued. */ static int __get_next_offset(struct thread_data *td, struct io_u *io_u, unsigned int *is_random) { struct fio_file *f = io_u->file; enum fio_ddir ddir = io_u->ddir; int rw_seq_hit = 0; assert(ddir_rw(ddir)); if (td->o.ddir_seq_nr && !--td->ddir_seq_nr) { rw_seq_hit = 1; td->ddir_seq_nr = td->o.ddir_seq_nr; } if (get_next_block(td, io_u, ddir, rw_seq_hit, is_random)) return 1; if (io_u->offset >= f->io_size) { dprint(FD_IO, "get_next_offset: offset %llu >= io_size %llu\n", (unsigned long long) io_u->offset, (unsigned long long) f->io_size); return 1; } io_u->offset += f->file_offset; if (io_u->offset >= f->real_file_size) { dprint(FD_IO, "get_next_offset: offset %llu >= size %llu\n", (unsigned long long) io_u->offset, (unsigned long long) f->real_file_size); return 1; } return 0; }
static uint64_t last_block(struct thread_data *td, struct fio_file *f, enum fio_ddir ddir) { uint64_t max_blocks; uint64_t max_size; assert(ddir_rw(ddir)); /* * Hmm, should we make sure that ->io_size <= ->real_file_size? */ max_size = f->io_size; if (max_size > f->real_file_size) max_size = f->real_file_size; if (td->o.zone_range) max_size = td->o.zone_range; if (td->o.min_bs[ddir] > td->o.ba[ddir]) max_size -= td->o.min_bs[ddir] - td->o.ba[ddir]; max_blocks = max_size / (uint64_t) td->o.ba[ddir]; if (!max_blocks) return 0; return max_blocks; }
static unsigned int __get_next_buflen(struct thread_data *td, struct io_u *io_u) { const int ddir = io_u->ddir; unsigned int uninitialized_var(buflen); unsigned int minbs, maxbs; unsigned long r, rand_max; assert(ddir_rw(ddir)); minbs = td->o.min_bs[ddir]; maxbs = td->o.max_bs[ddir]; if (minbs == maxbs) return minbs; /* * If we can't satisfy the min block size from here, then fail */ if (!io_u_fits(td, io_u, minbs)) return 0; if (td->o.use_os_rand) rand_max = OS_RAND_MAX; else rand_max = FRAND_MAX; do { if (td->o.use_os_rand) r = os_random_long(&td->bsrange_state); else r = __rand(&td->__bsrange_state); if (!td->o.bssplit_nr[ddir]) { buflen = 1 + (unsigned int) ((double) maxbs * (r / (rand_max + 1.0))); if (buflen < minbs) buflen = minbs; } else { long perc = 0; unsigned int i; for (i = 0; i < td->o.bssplit_nr[ddir]; i++) { struct bssplit *bsp = &td->o.bssplit[ddir][i]; buflen = bsp->bs; perc += bsp->perc; if ((r <= ((rand_max / 100L) * perc)) && io_u_fits(td, io_u, buflen)) break; } } if (!td->o.bs_unaligned && is_power_of_2(minbs)) buflen = (buflen + minbs - 1) & ~(minbs - 1); } while (!io_u_fits(td, io_u, buflen)); return buflen; }
static int get_next_seq_block(struct thread_data *td, struct fio_file *f, enum fio_ddir ddir, unsigned long long *b) { assert(ddir_rw(ddir)); if (f->last_pos < f->real_file_size) { *b = (f->last_pos - f->file_offset) / td->o.min_bs[ddir]; return 0; } return 1; }
static enum fio_ddir rate_ddir(struct thread_data *td, enum fio_ddir ddir) { enum fio_ddir odir = ddir ^ 1; struct timeval t; long usec; assert(ddir_rw(ddir)); if (td->rate_pending_usleep[ddir] <= 0) return ddir; /* * We have too much pending sleep in this direction. See if we * should switch. */ if (td_rw(td) && td->o.rwmix[odir]) { /* * Other direction does not have too much pending, switch */ if (td->rate_pending_usleep[odir] < 100000) return odir; /* * Both directions have pending sleep. Sleep the minimum time * and deduct from both. */ if (td->rate_pending_usleep[ddir] <= td->rate_pending_usleep[odir]) { usec = td->rate_pending_usleep[ddir]; } else { usec = td->rate_pending_usleep[odir]; ddir = odir; } } else usec = td->rate_pending_usleep[ddir]; io_u_quiesce(td); fio_gettime(&t, NULL); usec_sleep(td, usec); usec = utime_since_now(&t); td->rate_pending_usleep[ddir] -= usec; odir = ddir ^ 1; if (td_rw(td) && __should_check_rate(td, odir)) td->rate_pending_usleep[odir] -= usec; if (ddir_trim(ddir)) return ddir; return ddir; }
static int get_next_block(struct thread_data *td, struct io_u *io_u, enum fio_ddir ddir, int rw_seq) { struct fio_file *f = io_u->file; unsigned long long b, offset; int ret; assert(ddir_rw(ddir)); b = offset = -1ULL; if (rw_seq) { if (td_random(td)) ret = get_next_rand_block(td, f, ddir, &b); else ret = get_next_seq_offset(td, f, ddir, &offset); } else { io_u->flags |= IO_U_F_BUSY_OK; if (td->o.rw_seq == RW_SEQ_SEQ) { ret = get_next_seq_offset(td, f, ddir, &offset); if (ret) ret = get_next_rand_block(td, f, ddir, &b); } else if (td->o.rw_seq == RW_SEQ_IDENT) { if (f->last_start != -1ULL) offset = f->last_start - f->file_offset; else offset = 0; ret = 0; } else { log_err("fio: unknown rw_seq=%d\n", td->o.rw_seq); ret = 1; } } if (!ret) { if (offset != -1ULL) io_u->offset = offset; else if (b != -1ULL) io_u->offset = b * td->o.ba[ddir]; else { log_err("fio: bug in offset generation\n"); ret = 1; } } return ret; }
static int fio_syncio_prep(struct thread_data *td, struct io_u *io_u) { struct fio_file *f = io_u->file; if (!ddir_rw(io_u->ddir)) return 0; if (LAST_POS(f) != -1ULL && LAST_POS(f) == io_u->offset) return 0; if (lseek(f->fd, io_u->offset, SEEK_SET) == -1) { td_verror(td, errno, "lseek"); return 1; } return 0; }
static unsigned int __get_next_buflen(struct thread_data *td, struct io_u *io_u) { const int ddir = io_u->ddir; unsigned int uninitialized_var(buflen); unsigned int minbs, maxbs; long r; assert(ddir_rw(ddir)); minbs = td->o.min_bs[ddir]; maxbs = td->o.max_bs[ddir]; if (minbs == maxbs) buflen = minbs; else { r = os_random_long(&td->bsrange_state); if (!td->o.bssplit_nr[ddir]) { buflen = 1 + (unsigned int) ((double) maxbs * (r / (OS_RAND_MAX + 1.0))); if (buflen < minbs) buflen = minbs; } else { long perc = 0; unsigned int i; for (i = 0; i < td->o.bssplit_nr[ddir]; i++) { struct bssplit *bsp = &td->o.bssplit[ddir][i]; buflen = bsp->bs; perc += bsp->perc; if (r <= ((OS_RAND_MAX / 100L) * perc)) break; } } if (!td->o.bs_unaligned && is_power_of_2(minbs)) buflen = (buflen + minbs - 1) & ~(minbs - 1); } if (io_u->offset + buflen > io_u->file->real_file_size) { dprint(FD_IO, "lower buflen %u -> %u (ddir=%d)\n", buflen, minbs, ddir); buflen = minbs; } return buflen; }
static int __setup_rate(struct thread_data *td, enum fio_ddir ddir) { unsigned int bs = td->o.min_bs[ddir]; assert(ddir_rw(ddir)); if (td->o.rate[ddir]) td->rate_bps[ddir] = td->o.rate[ddir]; else td->rate_bps[ddir] = td->o.rate_iops[ddir] * bs; if (!td->rate_bps[ddir]) { log_err("rate lower than supported\n"); return -1; } td->rate_pending_usleep[ddir] = 0; return 0; }
void requeue_io_u(struct thread_data *td, struct io_u **io_u) { struct io_u *__io_u = *io_u; dprint(FD_IO, "requeue %p\n", __io_u); td_io_u_lock(td); __io_u->flags |= IO_U_F_FREE; if ((__io_u->flags & IO_U_F_FLIGHT) && ddir_rw(__io_u->ddir)) td->io_issues[__io_u->ddir]--; __io_u->flags &= ~IO_U_F_FLIGHT; if (__io_u->flags & IO_U_F_IN_CUR_DEPTH) td->cur_depth--; flist_del(&__io_u->list); flist_add_tail(&__io_u->list, &td->io_u_requeues); td_io_u_unlock(td); *io_u = NULL; }
static int fio_gf_prep(struct thread_data *td, struct io_u *io_u) { struct fio_file *f = io_u->file; struct gf_data *g = td->io_ops_data; dprint(FD_FILE, "fio prep\n"); if (!ddir_rw(io_u->ddir)) return 0; if (LAST_POS(f) != -1ULL && LAST_POS(f) == io_u->offset) return 0; if (glfs_lseek(g->fd, io_u->offset, SEEK_SET) < 0) { td_verror(td, errno, "lseek"); return 1; } return 0; }
static int fio_gf_queue(struct thread_data *td, struct io_u *io_u) { struct gf_data *g = td->io_ops_data; int ret = 0; dprint(FD_FILE, "fio queue len %lu\n", io_u->xfer_buflen); fio_ro_check(td, io_u); if (io_u->ddir == DDIR_READ) ret = glfs_read(g->fd, io_u->xfer_buf, io_u->xfer_buflen, 0); else if (io_u->ddir == DDIR_WRITE) ret = glfs_write(g->fd, io_u->xfer_buf, io_u->xfer_buflen, 0); else if (io_u->ddir == DDIR_SYNC) ret = glfs_fsync(g->fd); else if (io_u->ddir == DDIR_DATASYNC) ret = glfs_fdatasync(g->fd); else { log_err("unsupported operation.\n"); return -EINVAL; } dprint(FD_FILE, "fio len %lu ret %d\n", io_u->xfer_buflen, ret); if (io_u->file && ret >= 0 && ddir_rw(io_u->ddir)) LAST_POS(io_u->file) = io_u->offset + ret; if (ret != (int)io_u->xfer_buflen) { if (ret >= 0) { io_u->resid = io_u->xfer_buflen - ret; io_u->error = 0; return FIO_Q_COMPLETED; } else io_u->error = errno; } if (io_u->error) { log_err("IO failed.\n"); td_verror(td, io_u->error, "xfer"); } return FIO_Q_COMPLETED; }
static enum fio_q_status fio_mmapio_queue(struct thread_data *td, struct io_u *io_u) { struct fio_file *f = io_u->file; struct fio_mmap_data *fmd = FILE_ENG_DATA(f); fio_ro_check(td, io_u); if (io_u->ddir == DDIR_READ) memcpy(io_u->xfer_buf, io_u->mmap_data, io_u->xfer_buflen); else if (io_u->ddir == DDIR_WRITE) memcpy(io_u->mmap_data, io_u->xfer_buf, io_u->xfer_buflen); else if (ddir_sync(io_u->ddir)) { if (msync(fmd->mmap_ptr, fmd->mmap_sz, MS_SYNC)) { io_u->error = errno; td_verror(td, io_u->error, "msync"); } } else if (io_u->ddir == DDIR_TRIM) { int ret = do_io_u_trim(td, io_u); if (!ret) td_verror(td, io_u->error, "trim"); } /* * not really direct, but should drop the pages from the cache */ if (td->o.odirect && ddir_rw(io_u->ddir)) { if (msync(io_u->mmap_data, io_u->xfer_buflen, MS_SYNC) < 0) { io_u->error = errno; td_verror(td, io_u->error, "msync"); } if (posix_madvise(io_u->mmap_data, io_u->xfer_buflen, POSIX_MADV_DONTNEED) < 0) { io_u->error = errno; td_verror(td, io_u->error, "madvise"); } } return FIO_Q_COMPLETED; }
void requeue_io_u(struct thread_data *td, struct io_u **io_u) { struct io_u *__io_u = *io_u; enum fio_ddir ddir = acct_ddir(__io_u); dprint(FD_IO, "requeue %p\n", __io_u); td_io_u_lock(td); __io_u->flags |= IO_U_F_FREE; if ((__io_u->flags & IO_U_F_FLIGHT) && ddir_rw(ddir)) td->io_issues[ddir]--; __io_u->flags &= ~IO_U_F_FLIGHT; if (__io_u->flags & IO_U_F_IN_CUR_DEPTH) td->cur_depth--; io_u_rpush(&td->io_u_requeues, __io_u); td_io_u_unlock(td); *io_u = NULL; }
static unsigned long long last_block(struct thread_data *td, struct fio_file *f, enum fio_ddir ddir) { unsigned long long max_blocks; unsigned long long max_size; assert(ddir_rw(ddir)); /* * Hmm, should we make sure that ->io_size <= ->real_file_size? */ max_size = f->io_size; if (max_size > f->real_file_size) max_size = f->real_file_size; max_blocks = max_size / (unsigned long long) td->o.ba[ddir]; if (!max_blocks) return 0; return max_blocks; }
static int fio_io_end(struct thread_data *td, struct io_u *io_u, int ret) { if (io_u->file && ret >= 0 && ddir_rw(io_u->ddir)) LAST_POS(io_u->file) = io_u->offset + ret; if (ret != (int) io_u->xfer_buflen) { if (ret >= 0) { io_u->resid = io_u->xfer_buflen - ret; io_u->error = 0; return FIO_Q_COMPLETED; } else io_u->error = errno; } if (io_u->error) { io_u_log_error(td, io_u); td_verror(td, io_u->error, "xfer"); } return FIO_Q_COMPLETED; }
static int get_next_seq_offset(struct thread_data *td, struct fio_file *f, enum fio_ddir ddir, uint64_t *offset) { assert(ddir_rw(ddir)); if (f->last_pos >= f->io_size + get_start_offset(td) && td->o.time_based) f->last_pos = f->last_pos - f->io_size; if (f->last_pos < f->real_file_size) { uint64_t pos; if (f->last_pos == f->file_offset && td->o.ddir_seq_add < 0) f->last_pos = f->real_file_size; pos = f->last_pos - f->file_offset; if (pos) pos += td->o.ddir_seq_add; *offset = pos; return 0; } return 1; }
static void io_completed(struct thread_data *td, struct io_u *io_u, struct io_completion_data *icd) { struct fio_file *f; dprint_io_u(io_u, "io complete"); td_io_u_lock(td); assert(io_u->flags & IO_U_F_FLIGHT); io_u->flags &= ~(IO_U_F_FLIGHT | IO_U_F_BUSY_OK); /* * Mark IO ok to verify */ if (io_u->ipo) { io_u->ipo->flags &= ~IP_F_IN_FLIGHT; write_barrier(); } td_io_u_unlock(td); if (ddir_sync(io_u->ddir)) { td->last_was_sync = 1; f = io_u->file; if (f) { f->first_write = -1ULL; f->last_write = -1ULL; } return; } td->last_was_sync = 0; td->last_ddir = io_u->ddir; if (!io_u->error && ddir_rw(io_u->ddir)) { unsigned int bytes = io_u->buflen - io_u->resid; const enum fio_ddir idx = io_u->ddir; const enum fio_ddir odx = io_u->ddir ^ 1; int ret; td->io_blocks[idx]++; td->this_io_blocks[idx]++; td->io_bytes[idx] += bytes; if (!(io_u->flags & IO_U_F_VER_LIST)) td->this_io_bytes[idx] += bytes; if (idx == DDIR_WRITE) { f = io_u->file; if (f) { if (f->first_write == -1ULL || io_u->offset < f->first_write) f->first_write = io_u->offset; if (f->last_write == -1ULL || ((io_u->offset + bytes) > f->last_write)) f->last_write = io_u->offset + bytes; } } if (ramp_time_over(td) && (td->runstate == TD_RUNNING || td->runstate == TD_VERIFYING)) { account_io_completion(td, io_u, icd, idx, bytes); if (__should_check_rate(td, idx)) { td->rate_pending_usleep[idx] = (usec_for_io(td, idx) - utime_since_now(&td->start)); } if (idx != DDIR_TRIM && __should_check_rate(td, odx)) td->rate_pending_usleep[odx] = (usec_for_io(td, odx) - utime_since_now(&td->start)); } icd->bytes_done[idx] += bytes; if (io_u->end_io) { ret = io_u->end_io(td, io_u); if (ret && !icd->error) icd->error = ret; } } else if (io_u->error) { icd->error = io_u->error; io_u_log_error(td, io_u); } if (icd->error) { enum error_type_bit eb = td_error_type(io_u->ddir, icd->error); if (!td_non_fatal_error(td, eb, icd->error)) return; /* * If there is a non_fatal error, then add to the error count * and clear all the errors. */ update_error_count(td, icd->error); td_clear_error(td); icd->error = 0; io_u->error = 0; } }
/* * Return an io_u to be processed. Gets a buflen and offset, sets direction, * etc. The returned io_u is fully ready to be prepped and submitted. */ struct io_u *get_io_u(struct thread_data *td) { struct fio_file *f; struct io_u *io_u; int do_scramble = 0; long ret = 0; io_u = __get_io_u(td); if (!io_u) { dprint(FD_IO, "__get_io_u failed\n"); return NULL; } if (check_get_verify(td, io_u)) goto out; if (check_get_trim(td, io_u)) goto out; /* * from a requeue, io_u already setup */ if (io_u->file) goto out; /* * If using an iolog, grab next piece if any available. */ if (td->flags & TD_F_READ_IOLOG) { if (read_iolog_get(td, io_u)) goto err_put; } else if (set_io_u_file(td, io_u)) { ret = -EBUSY; dprint(FD_IO, "io_u %p, setting file failed\n", io_u); goto err_put; } f = io_u->file; if (!f) { dprint(FD_IO, "io_u %p, setting file failed\n", io_u); goto err_put; } assert(fio_file_open(f)); if (ddir_rw(io_u->ddir)) { if (!io_u->buflen && !(td->io_ops->flags & FIO_NOIO)) { dprint(FD_IO, "get_io_u: zero buflen on %p\n", io_u); goto err_put; } f->last_start = io_u->offset; f->last_pos = io_u->offset + io_u->buflen; if (io_u->ddir == DDIR_WRITE) { if (td->flags & TD_F_REFILL_BUFFERS) { io_u_fill_buffer(td, io_u, io_u->xfer_buflen, io_u->xfer_buflen); } else if (td->flags & TD_F_SCRAMBLE_BUFFERS) do_scramble = 1; if (td->flags & TD_F_VER_NONE) { populate_verify_io_u(td, io_u); do_scramble = 0; } } else if (io_u->ddir == DDIR_READ) { /* * Reset the buf_filled parameters so next time if the * buffer is used for writes it is refilled. */ io_u->buf_filled_len = 0; } } /* * Set io data pointers. */ io_u->xfer_buf = io_u->buf; io_u->xfer_buflen = io_u->buflen; out: assert(io_u->file); if (!td_io_prep(td, io_u)) { if (!td->o.disable_slat) fio_gettime(&io_u->start_time, NULL); if (do_scramble) small_content_scramble(io_u); return io_u; } err_put: dprint(FD_IO, "get_io_u failed\n"); put_io_u(td, io_u); return ERR_PTR(ret); }
static enum fio_ddir rate_ddir(struct thread_data *td, enum fio_ddir ddir) { enum fio_ddir odir = ddir ^ 1; struct timeval t; long usec; assert(ddir_rw(ddir)); if (td->rate_pending_usleep[ddir] <= 0) return ddir; /* * We have too much pending sleep in this direction. See if we * should switch. */ if (td_rw(td)) { /* * Other direction does not have too much pending, switch */ if (td->rate_pending_usleep[odir] < 100000) return odir; /* * Both directions have pending sleep. Sleep the minimum time * and deduct from both. */ if (td->rate_pending_usleep[ddir] <= td->rate_pending_usleep[odir]) { usec = td->rate_pending_usleep[ddir]; } else { usec = td->rate_pending_usleep[odir]; ddir = odir; } } else usec = td->rate_pending_usleep[ddir]; /* * We are going to sleep, ensure that we flush anything pending as * not to skew our latency numbers. * * Changed to only monitor 'in flight' requests here instead of the * td->cur_depth, b/c td->cur_depth does not accurately represent * io's that have been actually submitted to an async engine, * and cur_depth is meaningless for sync engines. */ if (td->io_u_in_flight) { int fio_unused ret; ret = io_u_queued_complete(td, td->io_u_in_flight, NULL); } fio_gettime(&t, NULL); usec_sleep(td, usec); usec = utime_since_now(&t); td->rate_pending_usleep[ddir] -= usec; odir = ddir ^ 1; if (td_rw(td) && __should_check_rate(td, odir)) td->rate_pending_usleep[odir] -= usec; if (ddir_trim(ddir)) return ddir; return ddir; }
static void io_completed(struct thread_data *td, struct io_u *io_u, struct io_completion_data *icd) { struct fio_file *f; dprint_io_u(io_u, "io complete"); td_io_u_lock(td); assert(io_u->flags & IO_U_F_FLIGHT); io_u->flags &= ~(IO_U_F_FLIGHT | IO_U_F_BUSY_OK); td_io_u_unlock(td); if (ddir_sync(io_u->ddir)) { td->last_was_sync = 1; f = io_u->file; if (f) { f->first_write = -1ULL; f->last_write = -1ULL; } return; } td->last_was_sync = 0; td->last_ddir = io_u->ddir; if (!io_u->error && ddir_rw(io_u->ddir)) { unsigned int bytes = io_u->buflen - io_u->resid; const enum fio_ddir idx = io_u->ddir; const enum fio_ddir odx = io_u->ddir ^ 1; int ret; td->io_blocks[idx]++; td->this_io_blocks[idx]++; td->io_bytes[idx] += bytes; if (!(io_u->flags & IO_U_F_VER_LIST)) td->this_io_bytes[idx] += bytes; if (idx == DDIR_WRITE) { f = io_u->file; if (f) { if (f->first_write == -1ULL || io_u->offset < f->first_write) f->first_write = io_u->offset; if (f->last_write == -1ULL || ((io_u->offset + bytes) > f->last_write)) f->last_write = io_u->offset + bytes; } } if (ramp_time_over(td) && (td->runstate == TD_RUNNING || td->runstate == TD_VERIFYING)) { account_io_completion(td, io_u, icd, idx, bytes); if (__should_check_rate(td, idx)) { td->rate_pending_usleep[idx] = (usec_for_io(td, idx) - utime_since_now(&td->start)); } if (__should_check_latency(td, idx)) { unsigned long lusec = utime_since( &io_u->issue_time, &icd->time); /* Linear increase and logarithmic decrease */ if (lusec > td->o.shed_latency[idx]) { if (td->shed_count[idx] < MAX_SHED_COUNT ) { td->shed_count[idx] += (1<<SHED_FRAC_BITS); } } else if (td->shed_count[idx]) { td->shed_count[idx] -= get_used_bits(td->shed_count[idx]); } if (td->shed_count[idx]) { lusec = (lusec * td->shed_count[idx]) >> SHED_FRAC_BITS; if (lusec > td->rate_pending_usleep[idx]) { td->rate_pending_usleep[idx] = lusec; } } }