static void randomize_buf(void *buf, unsigned int size, int seed) { struct frand_state state; init_rand_seed(&state, seed); fill_random_buf(&state, buf, size); }
void fill_pattern(struct thread_data *td, void *p, unsigned int len, struct io_u *io_u, unsigned long seed, int use_seed) { switch (td->o.verify_pattern_bytes) { case 0: dprint(FD_VERIFY, "fill random bytes len=%u\n", len); if (use_seed) __fill_random_buf(p, len, seed); else io_u->rand_seed = fill_random_buf(&td->buf_state, p, len); break; case 1: if (io_u->buf_filled_len >= len) { dprint(FD_VERIFY, "using already filled verify pattern b=0 len=%u\n", len); return; } dprint(FD_VERIFY, "fill verify pattern b=0 len=%u\n", len); memset(p, td->o.verify_pattern[0], len); io_u->buf_filled_len = len; break; default: { unsigned int i = 0, size = 0; unsigned char *b = p; if (io_u->buf_filled_len >= len) { dprint(FD_VERIFY, "using already filled verify pattern b=%d len=%u\n", td->o.verify_pattern_bytes, len); return; } dprint(FD_VERIFY, "fill verify pattern b=%d len=%u\n", td->o.verify_pattern_bytes, len); while (i < len) { size = td->o.verify_pattern_bytes; if (size > (len - i)) size = len - i; memcpy(b+i, td->o.verify_pattern, size); i += size; } io_u->buf_filled_len = len; break; } } }
void fill_verify_pattern(struct thread_data *td, void *p, unsigned int len, struct io_u *io_u, unsigned long seed, int use_seed) { if (!td->o.verify_pattern_bytes) { dprint(FD_VERIFY, "fill random bytes len=%u\n", len); if (use_seed) __fill_random_buf(p, len, seed); else io_u->rand_seed = fill_random_buf(&td->buf_state, p, len); return; } if (io_u->buf_filled_len >= len) { dprint(FD_VERIFY, "using already filled verify pattern b=%d len=%u\n", td->o.verify_pattern_bytes, len); return; } fill_pattern(td, p, len, td->o.verify_pattern, td->o.verify_pattern_bytes); io_u->buf_filled_len = len; }
/* * Called to complete min_events number of io for the async engines. */ int io_u_queued_complete(struct thread_data *td, int min_evts, uint64_t *bytes) { struct io_completion_data icd; struct timespec *tvp = NULL; int ret; struct timespec ts = { .tv_sec = 0, .tv_nsec = 0, }; dprint(FD_IO, "io_u_queued_completed: min=%d\n", min_evts); if (!min_evts) tvp = &ts; ret = td_io_getevents(td, min_evts, td->o.iodepth_batch_complete, tvp); if (ret < 0) { td_verror(td, -ret, "td_io_getevents"); return ret; } else if (!ret) return ret; init_icd(td, &icd, ret); ios_completed(td, &icd); if (icd.error) { td_verror(td, icd.error, "io_u_queued_complete"); return -1; } if (bytes) { int ddir; for (ddir = DDIR_READ; ddir < DDIR_RWDIR_CNT; ddir++) bytes[ddir] += icd.bytes_done[ddir]; } return 0; } /* * Call when io_u is really queued, to update the submission latency. */ void io_u_queued(struct thread_data *td, struct io_u *io_u) { if (!td->o.disable_slat) { unsigned long slat_time; slat_time = utime_since(&io_u->start_time, &io_u->issue_time); add_slat_sample(td, io_u->ddir, slat_time, io_u->xfer_buflen); } } void fill_io_buffer(struct thread_data *td, void *buf, unsigned int min_write, unsigned int max_bs) { if (td->o.buffer_pattern_bytes) fill_buffer_pattern(td, buf, max_bs); else if (!td->o.zero_buffers) { unsigned int perc = td->o.compress_percentage; if (perc) { unsigned int seg = min_write; seg = min(min_write, td->o.compress_chunk); if (!seg) seg = min_write; fill_random_buf_percentage(&td->buf_state, buf, perc, seg, max_bs); } else fill_random_buf(&td->buf_state, buf, max_bs); } else memset(buf, 0, max_bs); } /* * "randomly" fill the buffer contents */ void io_u_fill_buffer(struct thread_data *td, struct io_u *io_u, unsigned int min_write, unsigned int max_bs) { io_u->buf_filled_len = 0; fill_io_buffer(td, io_u->buf, min_write, max_bs); }