unsigned long fill_random_buf(void *buf, unsigned int len) { unsigned long r = __rand(&__fio_rand_state); if (sizeof(int) != sizeof(long *)) r *= (unsigned long) __rand(&__fio_rand_state); __fill_random_buf(buf, len, r); return r; }
unsigned long fill_random_buf(struct frand_state *fs, void *buf, unsigned int len) { unsigned long r = __rand(fs); if (sizeof(int) != sizeof(long *)) r *= (unsigned long) __rand(fs); __fill_random_buf(buf, len, r); return r; }
unsigned long fill_random_buf_percentage(struct frand_state *fs, void *buf, unsigned int percentage, unsigned int segment, unsigned int len, char *pattern, unsigned int pbytes) { unsigned long r = __rand(fs); if (sizeof(int) != sizeof(long *)) r *= (unsigned long) __rand(fs); __fill_random_buf_percentage(r, buf, percentage, segment, len, pattern, pbytes); return r; }
unsigned long long zipf_next(struct zipf_state *zs) { double alpha, eta, rand_uni, rand_z; unsigned long long n = zs->nranges; unsigned long long val; alpha = 1.0 / (1.0 - zs->theta); eta = (1.0 - pow(2.0 / n, 1.0 - zs->theta)) / (1.0 - zs->zeta2 / zs->zetan); rand_uni = (double) __rand(&zs->rand) / (double) FRAND32_MAX; rand_z = rand_uni * zs->zetan; if (rand_z < 1.0) val = 1; else if (rand_z < (1.0 + pow(0.5, zs->theta))) val = 2; else val = 1 + (unsigned long long)(n * pow(eta*rand_uni - eta + 1.0, alpha)); val--; if (!zs->disable_hash) val = __hash_u64(val); return (val + zs->rand_off) % zs->nranges; }
static unsigned int __get_next_buflen(struct thread_data *td, struct io_u *io_u) { const int ddir = io_u->ddir; unsigned int uninitialized_var(buflen); unsigned int minbs, maxbs; unsigned long r, rand_max; assert(ddir_rw(ddir)); minbs = td->o.min_bs[ddir]; maxbs = td->o.max_bs[ddir]; if (minbs == maxbs) return minbs; /* * If we can't satisfy the min block size from here, then fail */ if (!io_u_fits(td, io_u, minbs)) return 0; if (td->o.use_os_rand) rand_max = OS_RAND_MAX; else rand_max = FRAND_MAX; do { if (td->o.use_os_rand) r = os_random_long(&td->bsrange_state); else r = __rand(&td->__bsrange_state); if (!td->o.bssplit_nr[ddir]) { buflen = 1 + (unsigned int) ((double) maxbs * (r / (rand_max + 1.0))); if (buflen < minbs) buflen = minbs; } else { long perc = 0; unsigned int i; for (i = 0; i < td->o.bssplit_nr[ddir]; i++) { struct bssplit *bsp = &td->o.bssplit[ddir][i]; buflen = bsp->bs; perc += bsp->perc; if ((r <= ((rand_max / 100L) * perc)) && io_u_fits(td, io_u, buflen)) break; } } if (!td->o.bs_unaligned && is_power_of_2(minbs)) buflen = (buflen + minbs - 1) & ~(minbs - 1); } while (!io_u_fits(td, io_u, buflen)); return buflen; }
__declspec ( naked ) void nseel_asm_rand(void) { FUNC1_ENTER *__nextBlock = __rand(parm_a); FUNC_LEAVE }
static void shared_rand_init(struct zipf_state *zs, unsigned long nranges, unsigned int seed) { memset(zs, 0, sizeof(*zs)); zs->nranges = nranges; init_rand_seed(&zs->rand, seed, 0); zs->rand_off = __rand(&zs->rand); }
static int __get_next_rand_offset(struct thread_data *td, struct fio_file *f, enum fio_ddir ddir, uint64_t *b) { uint64_t r, lastb; lastb = last_block(td, f, ddir); if (!lastb) return 1; if (td->o.random_generator == FIO_RAND_GEN_TAUSWORTHE) { uint64_t rmax; rmax = td->o.use_os_rand ? OS_RAND_MAX : FRAND_MAX; if (td->o.use_os_rand) { rmax = OS_RAND_MAX; r = os_random_long(&td->random_state); } else { rmax = FRAND_MAX; r = __rand(&td->__random_state); } dprint(FD_RANDOM, "off rand %llu\n", (unsigned long long) r); *b = (lastb - 1) * (r / ((uint64_t) rmax + 1.0)); } else { uint64_t off = 0; if (lfsr_next(&f->lfsr, &off, lastb)) return 1; *b = off; } /* * if we are not maintaining a random map, we are done. */ if (!file_randommap(td, f)) goto ret; /* * calculate map offset and check if it's free */ if (random_map_free(f, *b)) goto ret; dprint(FD_RANDOM, "get_next_rand_offset: offset %llu busy\n", (unsigned long long) *b); *b = axmap_next_free(f->io_axmap, *b); if (*b == (uint64_t) -1ULL) return 1; ret: return 0; }
uint16_t random_port() { uint16_t min = 0x4000; uint16_t max = 0x7FFF; int ret = __rand(); ret = ret | min; ret = ret & max; return (uint16_t)ret; }
unsigned long long pareto_next(struct zipf_state *zs) { double rand = (double) __rand(&zs->rand) / (double) FRAND32_MAX; unsigned long long n; n = (zs->nranges - 1) * pow(rand, zs->pareto_pow); if (!zs->disable_hash) n = __hash_u64(n); return (n + zs->rand_off) % zs->nranges; }
static void __init_rand(struct frand_state *state, unsigned int seed) { int cranks = 6; #define LCG(x, seed) ((x) * 69069 ^ (seed)) state->s1 = __seed(LCG((2^31) + (2^17) + (2^7), seed), 1); state->s2 = __seed(LCG(state->s1, seed), 7); state->s3 = __seed(LCG(state->s2, seed), 15); while (cranks--) __rand(state); }
static int should_do_random(struct thread_data *td, enum fio_ddir ddir) { unsigned int v; unsigned long r; if (td->o.perc_rand[ddir] == 100) return 1; r = __rand(&td->seq_rand_state[ddir]); v = 1 + (int) (100.0 * (r / (FRAND_MAX + 1.0))); return v <= td->o.perc_rand[ddir]; }
static int gauss_dev(struct gauss_state *gs) { unsigned int r; int vr; if (!gs->stddev) return 0; r = __rand(&gs->r); vr = gs->stddev * (r / (FRAND32_MAX + 1.0)); return vr - gs->stddev / 2; }
static int __get_next_rand_offset(struct thread_data *td, struct fio_file *f, enum fio_ddir ddir, uint64_t *b) { uint64_t r; if (td->o.random_generator == FIO_RAND_GEN_TAUSWORTHE || td->o.random_generator == FIO_RAND_GEN_TAUSWORTHE64) { uint64_t frand_max, lastb; lastb = last_block(td, f, ddir); if (!lastb) return 1; frand_max = rand_max(&td->random_state); r = __rand(&td->random_state); dprint(FD_RANDOM, "off rand %llu\n", (unsigned long long) r); *b = lastb * (r / ((uint64_t) frand_max + 1.0)); } else { uint64_t off = 0; assert(fio_file_lfsr(f)); if (lfsr_next(&f->lfsr, &off)) return 1; *b = off; } /* * if we are not maintaining a random map, we are done. */ if (!file_randommap(td, f)) goto ret; /* * calculate map offset and check if it's free */ if (random_map_free(f, *b)) goto ret; dprint(FD_RANDOM, "get_next_rand_offset: offset %llu busy\n", (unsigned long long) *b); *b = axmap_next_free(f->io_axmap, *b); if (*b == (uint64_t) -1ULL) return 1; ret: return 0; }
unsigned long long get_rand_file_size(struct thread_data *td) { unsigned long long ret, sized; uint64_t frand_max; unsigned long r; frand_max = rand_max(&td->file_size_state); r = __rand(&td->file_size_state); sized = td->o.file_size_high - td->o.file_size_low; ret = (unsigned long long) ((double) sized * (r / (frand_max + 1.0))); ret += td->o.file_size_low; ret -= (ret % td->o.rw_min_bs); return ret; }
static bool should_do_random(struct thread_data *td, enum fio_ddir ddir) { uint64_t frand_max; unsigned int v; unsigned long r; if (td->o.perc_rand[ddir] == 100) return true; frand_max = rand_max(&td->seq_rand_state[ddir]); r = __rand(&td->seq_rand_state[ddir]); v = 1 + (int) (100.0 * (r / (frand_max + 1.0))); return v <= td->o.perc_rand[ddir]; }
/* * Get next file to service by choosing one at random */ static struct fio_file *get_next_file_rand(struct thread_data *td, enum fio_file_flags goodf, enum fio_file_flags badf) { struct fio_file *f; int fno; do { int opened = 0; unsigned long r; if (td->o.use_os_rand) { r = os_random_long(&td->next_file_state); fno = (unsigned int) ((double) td->o.nr_files * (r / (OS_RAND_MAX + 1.0))); } else { r = __rand(&td->__next_file_state); fno = (unsigned int) ((double) td->o.nr_files * (r / (FRAND_MAX + 1.0))); } f = td->files[fno]; if (fio_file_done(f)) continue; if (!fio_file_open(f)) { int err; if (td->nr_open_files >= td->o.open_files) return ERR_PTR(-EBUSY); err = td_io_open_file(td, f); if (err) continue; opened = 1; } if ((!goodf || (f->flags & goodf)) && !(f->flags & badf)) { dprint(FD_FILE, "get_next_file_rand: %p\n", f); return f; } if (opened) td_io_close_file(td, f); } while (1); }
static unsigned long long get_rand_start_delay(struct thread_data *td) { unsigned long long delayrange; unsigned long r; delayrange = td->o.start_delay_high - td->o.start_delay; if (td->o.use_os_rand) { r = os_random_long(&td->delay_state); delayrange = (unsigned long long) ((double) delayrange * (r / (OS_RAND_MAX + 1.0))); } else { r = __rand(&td->__delay_state); delayrange = (unsigned long long) ((double) delayrange * (r / (FRAND_MAX + 1.0))); } delayrange += td->o.start_delay; return delayrange; }
static inline enum fio_ddir get_rand_ddir(struct thread_data *td) { unsigned int v; unsigned long r; if (td->o.use_os_rand) { r = os_random_long(&td->rwmix_state); v = 1 + (int) (100.0 * (r / (OS_RAND_MAX + 1.0))); } else { r = __rand(&td->__rwmix_state); v = 1 + (int) (100.0 * (r / (FRAND_MAX + 1.0))); } if (v <= td->o.rwmix[DDIR_READ]) return DDIR_READ; return DDIR_WRITE; }
static unsigned long long get_rand_file_size(struct thread_data *td) { unsigned long long ret, sized; unsigned long r; if (td->o.use_os_rand) { r = os_random_long(&td->file_size_state); sized = td->o.file_size_high - td->o.file_size_low; ret = (unsigned long long) ((double) sized * (r / (OS_RAND_MAX + 1.0))); } else { r = __rand(&td->__file_size_state); sized = td->o.file_size_high - td->o.file_size_low; ret = (unsigned long long) ((double) sized * (r / (FRAND_MAX + 1.0))); } ret += td->o.file_size_low; ret -= (ret % td->o.rw_min_bs); return ret; }
int io_u_should_trim(struct thread_data *td, struct io_u *io_u) { unsigned long long val; unsigned long r; if (!td->o.trim_percentage) return 0; if (td->o.use_os_rand) { r = os_random_long(&td->trim_state); val = (OS_RAND_MAX / 100ULL); } else { r = __rand(&td->__trim_state); val = (FRAND_MAX / 100ULL); } val *= (unsigned long long) td->o.trim_percentage; return r <= val; }
unsigned long long gauss_next(struct gauss_state *gs) { unsigned long long sum = 0; int i; for (i = 0; i < GAUSS_ITERS; i++) sum += __rand(&gs->r) % (gs->nranges + 1); sum = (sum + GAUSS_ITERS - 1) / GAUSS_ITERS; if (gs->stddev) { int dev = gauss_dev(gs); while (dev + sum >= gs->nranges) dev /= 2; sum += dev; } return __hash_u64(sum) % gs->nranges; }
void init_rand(struct frand_state *state) { #define LCG(x) ((x) * 69069) /* super-duper LCG */ state->s1 = __seed(LCG((2^31) + (2^17) + (2^7)), 1); state->s2 = __seed(LCG(state->s1), 7); state->s3 = __seed(LCG(state->s2), 15); __rand(state); __rand(state); __rand(state); __rand(state); __rand(state); __rand(state); }
static int fio_rdmaio_send(struct thread_data *td, struct io_u **io_us, unsigned int nr) { struct rdmaio_data *rd = td->io_ops->data; struct ibv_send_wr *bad_wr; #if 0 enum ibv_wc_opcode comp_opcode; comp_opcode = IBV_WC_RDMA_WRITE; #endif int i; long index; struct rdma_io_u_data *r_io_u_d; r_io_u_d = NULL; for (i = 0; i < nr; i++) { /* RDMA_WRITE or RDMA_READ */ switch (rd->rdma_protocol) { case FIO_RDMA_MEM_WRITE: /* compose work request */ r_io_u_d = io_us[i]->engine_data; index = __rand(&rd->rand_state) % rd->rmt_nr; r_io_u_d->sq_wr.opcode = IBV_WR_RDMA_WRITE; r_io_u_d->sq_wr.wr.rdma.rkey = rd->rmt_us[index].rkey; r_io_u_d->sq_wr.wr.rdma.remote_addr = \ rd->rmt_us[index].buf; r_io_u_d->sq_wr.sg_list->length = io_us[i]->buflen; break; case FIO_RDMA_MEM_READ: /* compose work request */ r_io_u_d = io_us[i]->engine_data; index = __rand(&rd->rand_state) % rd->rmt_nr; r_io_u_d->sq_wr.opcode = IBV_WR_RDMA_READ; r_io_u_d->sq_wr.wr.rdma.rkey = rd->rmt_us[index].rkey; r_io_u_d->sq_wr.wr.rdma.remote_addr = \ rd->rmt_us[index].buf; r_io_u_d->sq_wr.sg_list->length = io_us[i]->buflen; break; case FIO_RDMA_CHA_SEND: r_io_u_d = io_us[i]->engine_data; r_io_u_d->sq_wr.opcode = IBV_WR_SEND; r_io_u_d->sq_wr.send_flags = IBV_SEND_SIGNALED; break; default: log_err("fio: unknown rdma protocol - %d\n", rd->rdma_protocol); break; } if (ibv_post_send(rd->qp, &r_io_u_d->sq_wr, &bad_wr) != 0) { log_err("fio: ibv_post_send fail\n"); return -1; } dprint_io_u(io_us[i], "fio_rdmaio_send"); } /* wait for completion rdma_poll_wait(td, comp_opcode); */ return i; }
static void fill_sha512(struct verify_header *hdr, void *p, unsigned int len) { struct vhdr_sha512 *vh = hdr_priv(hdr); struct fio_sha512_ctx sha512_ctx = { .buf = vh->sha512, }; fio_sha512_init(&sha512_ctx); fio_sha512_update(&sha512_ctx, p, len); } static void fill_sha256(struct verify_header *hdr, void *p, unsigned int len) { struct vhdr_sha256 *vh = hdr_priv(hdr); struct fio_sha256_ctx sha256_ctx = { .buf = vh->sha256, }; fio_sha256_init(&sha256_ctx); fio_sha256_update(&sha256_ctx, p, len); } static void fill_sha1(struct verify_header *hdr, void *p, unsigned int len) { struct vhdr_sha1 *vh = hdr_priv(hdr); struct fio_sha1_ctx sha1_ctx = { .H = vh->sha1, }; fio_sha1_init(&sha1_ctx); fio_sha1_update(&sha1_ctx, p, len); } static void fill_crc7(struct verify_header *hdr, void *p, unsigned int len) { struct vhdr_crc7 *vh = hdr_priv(hdr); vh->crc7 = fio_crc7(p, len); } static void fill_crc16(struct verify_header *hdr, void *p, unsigned int len) { struct vhdr_crc16 *vh = hdr_priv(hdr); vh->crc16 = fio_crc16(p, len); } static void fill_crc32(struct verify_header *hdr, void *p, unsigned int len) { struct vhdr_crc32 *vh = hdr_priv(hdr); vh->crc32 = fio_crc32(p, len); } static void fill_crc32c(struct verify_header *hdr, void *p, unsigned int len) { struct vhdr_crc32 *vh = hdr_priv(hdr); vh->crc32 = fio_crc32c(p, len); } static void fill_crc64(struct verify_header *hdr, void *p, unsigned int len) { struct vhdr_crc64 *vh = hdr_priv(hdr); vh->crc64 = fio_crc64(p, len); } static void fill_md5(struct verify_header *hdr, void *p, unsigned int len) { struct vhdr_md5 *vh = hdr_priv(hdr); struct fio_md5_ctx md5_ctx = { .hash = (uint32_t *) vh->md5_digest, }; fio_md5_init(&md5_ctx); fio_md5_update(&md5_ctx, p, len); } static void populate_hdr(struct thread_data *td, struct io_u *io_u, struct verify_header *hdr, unsigned int header_num, unsigned int header_len) { unsigned int data_len; void *data, *p; p = (void *) hdr; hdr->magic = FIO_HDR_MAGIC; hdr->verify_type = td->o.verify; hdr->len = header_len; hdr->rand_seed = io_u->rand_seed; hdr->crc32 = fio_crc32c(p, offsetof(struct verify_header, crc32)); data_len = header_len - hdr_size(hdr); data = p + hdr_size(hdr); switch (td->o.verify) { case VERIFY_MD5: dprint(FD_VERIFY, "fill md5 io_u %p, len %u\n", io_u, hdr->len); fill_md5(hdr, data, data_len); break; case VERIFY_CRC64: dprint(FD_VERIFY, "fill crc64 io_u %p, len %u\n", io_u, hdr->len); fill_crc64(hdr, data, data_len); break; case VERIFY_CRC32C: case VERIFY_CRC32C_INTEL: dprint(FD_VERIFY, "fill crc32c io_u %p, len %u\n", io_u, hdr->len); fill_crc32c(hdr, data, data_len); break; case VERIFY_CRC32: dprint(FD_VERIFY, "fill crc32 io_u %p, len %u\n", io_u, hdr->len); fill_crc32(hdr, data, data_len); break; case VERIFY_CRC16: dprint(FD_VERIFY, "fill crc16 io_u %p, len %u\n", io_u, hdr->len); fill_crc16(hdr, data, data_len); break; case VERIFY_CRC7: dprint(FD_VERIFY, "fill crc7 io_u %p, len %u\n", io_u, hdr->len); fill_crc7(hdr, data, data_len); break; case VERIFY_SHA256: dprint(FD_VERIFY, "fill sha256 io_u %p, len %u\n", io_u, hdr->len); fill_sha256(hdr, data, data_len); break; case VERIFY_SHA512: dprint(FD_VERIFY, "fill sha512 io_u %p, len %u\n", io_u, hdr->len); fill_sha512(hdr, data, data_len); break; case VERIFY_XXHASH: dprint(FD_VERIFY, "fill xxhash io_u %p, len %u\n", io_u, hdr->len); fill_xxhash(hdr, data, data_len); break; case VERIFY_META: dprint(FD_VERIFY, "fill meta io_u %p, len %u\n", io_u, hdr->len); fill_meta(hdr, td, io_u, header_num); break; case VERIFY_SHA1: dprint(FD_VERIFY, "fill sha1 io_u %p, len %u\n", io_u, hdr->len); fill_sha1(hdr, data, data_len); break; case VERIFY_PATTERN: /* nothing to do here */ break; default: log_err("fio: bad verify type: %d\n", td->o.verify); assert(0); } if (td->o.verify_offset) memswp(p, p + td->o.verify_offset, hdr_size(hdr)); } /* * fill body of io_u->buf with random data and add a header with the * checksum of choice */ void populate_verify_io_u(struct thread_data *td, struct io_u *io_u) { if (td->o.verify == VERIFY_NULL) return; io_u->numberio = td->io_issues[io_u->ddir]; fill_pattern_headers(td, io_u, 0, 0); } int get_next_verify(struct thread_data *td, struct io_u *io_u) { struct io_piece *ipo = NULL; /* * this io_u is from a requeue, we already filled the offsets */ if (io_u->file) return 0; if (!RB_EMPTY_ROOT(&td->io_hist_tree)) { struct rb_node *n = rb_first(&td->io_hist_tree); ipo = rb_entry(n, struct io_piece, rb_node); /* * Ensure that the associated IO has completed */ read_barrier(); if (ipo->flags & IP_F_IN_FLIGHT) goto nothing; rb_erase(n, &td->io_hist_tree); assert(ipo->flags & IP_F_ONRB); ipo->flags &= ~IP_F_ONRB; } else if (!flist_empty(&td->io_hist_list)) { ipo = flist_first_entry(&td->io_hist_list, struct io_piece, list); /* * Ensure that the associated IO has completed */ read_barrier(); if (ipo->flags & IP_F_IN_FLIGHT) goto nothing; flist_del(&ipo->list); assert(ipo->flags & IP_F_ONLIST); ipo->flags &= ~IP_F_ONLIST; } if (ipo) { td->io_hist_len--; io_u->offset = ipo->offset; io_u->buflen = ipo->len; io_u->numberio = ipo->numberio; io_u->file = ipo->file; io_u->flags |= IO_U_F_VER_LIST; if (ipo->flags & IP_F_TRIMMED) io_u->flags |= IO_U_F_TRIMMED; if (!fio_file_open(io_u->file)) { int r = td_io_open_file(td, io_u->file); if (r) { dprint(FD_VERIFY, "failed file %s open\n", io_u->file->file_name); return 1; } } get_file(ipo->file); assert(fio_file_open(io_u->file)); io_u->ddir = DDIR_READ; io_u->xfer_buf = io_u->buf; io_u->xfer_buflen = io_u->buflen; remove_trim_entry(td, ipo); free(ipo); dprint(FD_VERIFY, "get_next_verify: ret io_u %p\n", io_u); if (!td->o.verify_pattern_bytes) { io_u->rand_seed = __rand(&td->verify_state); if (sizeof(int) != sizeof(long *)) io_u->rand_seed *= __rand(&td->verify_state); } return 0; } nothing: dprint(FD_VERIFY, "get_next_verify: empty\n"); return 1; } void fio_verify_init(struct thread_data *td) { if (td->o.verify == VERIFY_CRC32C_INTEL || td->o.verify == VERIFY_CRC32C) { crc32c_intel_probe(); } } static void *verify_async_thread(void *data) { struct thread_data *td = data; struct io_u *io_u; int ret = 0; if (td->o.verify_cpumask_set && fio_setaffinity(td->pid, td->o.verify_cpumask)) { log_err("fio: failed setting verify thread affinity\n"); goto done; } do { FLIST_HEAD(list); read_barrier(); if (td->verify_thread_exit) break; pthread_mutex_lock(&td->io_u_lock); while (flist_empty(&td->verify_list) && !td->verify_thread_exit) { ret = pthread_cond_wait(&td->verify_cond, &td->io_u_lock); if (ret) { pthread_mutex_unlock(&td->io_u_lock); break; } } flist_splice_init(&td->verify_list, &list); pthread_mutex_unlock(&td->io_u_lock); if (flist_empty(&list)) continue; while (!flist_empty(&list)) { io_u = flist_first_entry(&list, struct io_u, verify_list); flist_del_init(&io_u->verify_list); io_u->flags |= IO_U_F_NO_FILE_PUT; ret = verify_io_u(td, &io_u); put_io_u(td, io_u); if (!ret) continue; if (td_non_fatal_error(td, ERROR_TYPE_VERIFY_BIT, ret)) { update_error_count(td, ret); td_clear_error(td); ret = 0; } } } while (!ret); if (ret) { td_verror(td, ret, "async_verify"); if (td->o.verify_fatal) fio_mark_td_terminate(td); } done: pthread_mutex_lock(&td->io_u_lock); td->nr_verify_threads--; pthread_mutex_unlock(&td->io_u_lock); pthread_cond_signal(&td->free_cond); return NULL; } int verify_async_init(struct thread_data *td) { int i, ret; pthread_attr_t attr; pthread_attr_init(&attr); pthread_attr_setstacksize(&attr, PTHREAD_STACK_MIN); td->verify_thread_exit = 0; td->verify_threads = malloc(sizeof(pthread_t) * td->o.verify_async); for (i = 0; i < td->o.verify_async; i++) { ret = pthread_create(&td->verify_threads[i], &attr, verify_async_thread, td); if (ret) { log_err("fio: async verify creation failed: %s\n", strerror(ret)); break; } ret = pthread_detach(td->verify_threads[i]); if (ret) { log_err("fio: async verify thread detach failed: %s\n", strerror(ret)); break; } td->nr_verify_threads++; } pthread_attr_destroy(&attr); if (i != td->o.verify_async) { log_err("fio: only %d verify threads started, exiting\n", i); td->verify_thread_exit = 1; write_barrier(); pthread_cond_broadcast(&td->verify_cond); return 1; } return 0; }
static void fill_sha512(struct verify_header *hdr, void *p, unsigned int len) { struct vhdr_sha512 *vh = hdr_priv(hdr); struct fio_sha512_ctx sha512_ctx = { .buf = vh->sha512, }; fio_sha512_init(&sha512_ctx); fio_sha512_update(&sha512_ctx, p, len); } static void fill_sha256(struct verify_header *hdr, void *p, unsigned int len) { struct vhdr_sha256 *vh = hdr_priv(hdr); struct fio_sha256_ctx sha256_ctx = { .buf = vh->sha256, }; fio_sha256_init(&sha256_ctx); fio_sha256_update(&sha256_ctx, p, len); fio_sha256_final(&sha256_ctx); } static void fill_sha1(struct verify_header *hdr, void *p, unsigned int len) { struct vhdr_sha1 *vh = hdr_priv(hdr); struct fio_sha1_ctx sha1_ctx = { .H = vh->sha1, }; fio_sha1_init(&sha1_ctx); fio_sha1_update(&sha1_ctx, p, len); fio_sha1_final(&sha1_ctx); } static void fill_crc7(struct verify_header *hdr, void *p, unsigned int len) { struct vhdr_crc7 *vh = hdr_priv(hdr); vh->crc7 = fio_crc7(p, len); } static void fill_crc16(struct verify_header *hdr, void *p, unsigned int len) { struct vhdr_crc16 *vh = hdr_priv(hdr); vh->crc16 = fio_crc16(p, len); } static void fill_crc32(struct verify_header *hdr, void *p, unsigned int len) { struct vhdr_crc32 *vh = hdr_priv(hdr); vh->crc32 = fio_crc32(p, len); } static void fill_crc32c(struct verify_header *hdr, void *p, unsigned int len) { struct vhdr_crc32 *vh = hdr_priv(hdr); vh->crc32 = fio_crc32c(p, len); } static void fill_crc64(struct verify_header *hdr, void *p, unsigned int len) { struct vhdr_crc64 *vh = hdr_priv(hdr); vh->crc64 = fio_crc64(p, len); } static void fill_md5(struct verify_header *hdr, void *p, unsigned int len) { struct vhdr_md5 *vh = hdr_priv(hdr); struct fio_md5_ctx md5_ctx = { .hash = (uint32_t *) vh->md5_digest, }; fio_md5_init(&md5_ctx); fio_md5_update(&md5_ctx, p, len); fio_md5_final(&md5_ctx); } static void __fill_hdr(struct verify_header *hdr, int verify_type, uint32_t len, uint64_t rand_seed) { void *p = hdr; hdr->magic = FIO_HDR_MAGIC; hdr->verify_type = verify_type; hdr->len = len; hdr->rand_seed = rand_seed; hdr->crc32 = fio_crc32c(p, offsetof(struct verify_header, crc32)); } static void fill_hdr(struct verify_header *hdr, int verify_type, uint32_t len, uint64_t rand_seed) { if (verify_type != VERIFY_PATTERN_NO_HDR) __fill_hdr(hdr, verify_type, len, rand_seed); } static void populate_hdr(struct thread_data *td, struct io_u *io_u, struct verify_header *hdr, unsigned int header_num, unsigned int header_len) { unsigned int data_len; void *data, *p; p = (void *) hdr; fill_hdr(hdr, td->o.verify, header_len, io_u->rand_seed); data_len = header_len - hdr_size(td, hdr); data = p + hdr_size(td, hdr); switch (td->o.verify) { case VERIFY_MD5: dprint(FD_VERIFY, "fill md5 io_u %p, len %u\n", io_u, hdr->len); fill_md5(hdr, data, data_len); break; case VERIFY_CRC64: dprint(FD_VERIFY, "fill crc64 io_u %p, len %u\n", io_u, hdr->len); fill_crc64(hdr, data, data_len); break; case VERIFY_CRC32C: case VERIFY_CRC32C_INTEL: dprint(FD_VERIFY, "fill crc32c io_u %p, len %u\n", io_u, hdr->len); fill_crc32c(hdr, data, data_len); break; case VERIFY_CRC32: dprint(FD_VERIFY, "fill crc32 io_u %p, len %u\n", io_u, hdr->len); fill_crc32(hdr, data, data_len); break; case VERIFY_CRC16: dprint(FD_VERIFY, "fill crc16 io_u %p, len %u\n", io_u, hdr->len); fill_crc16(hdr, data, data_len); break; case VERIFY_CRC7: dprint(FD_VERIFY, "fill crc7 io_u %p, len %u\n", io_u, hdr->len); fill_crc7(hdr, data, data_len); break; case VERIFY_SHA256: dprint(FD_VERIFY, "fill sha256 io_u %p, len %u\n", io_u, hdr->len); fill_sha256(hdr, data, data_len); break; case VERIFY_SHA512: dprint(FD_VERIFY, "fill sha512 io_u %p, len %u\n", io_u, hdr->len); fill_sha512(hdr, data, data_len); break; case VERIFY_XXHASH: dprint(FD_VERIFY, "fill xxhash io_u %p, len %u\n", io_u, hdr->len); fill_xxhash(hdr, data, data_len); break; case VERIFY_META: dprint(FD_VERIFY, "fill meta io_u %p, len %u\n", io_u, hdr->len); fill_meta(hdr, td, io_u, header_num); break; case VERIFY_SHA1: dprint(FD_VERIFY, "fill sha1 io_u %p, len %u\n", io_u, hdr->len); fill_sha1(hdr, data, data_len); break; case VERIFY_PATTERN: case VERIFY_PATTERN_NO_HDR: /* nothing to do here */ break; default: log_err("fio: bad verify type: %d\n", td->o.verify); assert(0); } if (td->o.verify_offset && hdr_size(td, hdr)) memswp(p, p + td->o.verify_offset, hdr_size(td, hdr)); } /* * fill body of io_u->buf with random data and add a header with the * checksum of choice */ void populate_verify_io_u(struct thread_data *td, struct io_u *io_u) { if (td->o.verify == VERIFY_NULL) return; io_u->numberio = td->io_issues[io_u->ddir]; fill_pattern_headers(td, io_u, 0, 0); } int get_next_verify(struct thread_data *td, struct io_u *io_u) { struct io_piece *ipo = NULL; /* * this io_u is from a requeue, we already filled the offsets */ if (io_u->file) return 0; if (!RB_EMPTY_ROOT(&td->io_hist_tree)) { struct rb_node *n = rb_first(&td->io_hist_tree); ipo = rb_entry(n, struct io_piece, rb_node); /* * Ensure that the associated IO has completed */ read_barrier(); if (ipo->flags & IP_F_IN_FLIGHT) goto nothing; rb_erase(n, &td->io_hist_tree); assert(ipo->flags & IP_F_ONRB); ipo->flags &= ~IP_F_ONRB; } else if (!flist_empty(&td->io_hist_list)) { ipo = flist_first_entry(&td->io_hist_list, struct io_piece, list); /* * Ensure that the associated IO has completed */ read_barrier(); if (ipo->flags & IP_F_IN_FLIGHT) goto nothing; flist_del(&ipo->list); assert(ipo->flags & IP_F_ONLIST); ipo->flags &= ~IP_F_ONLIST; } if (ipo) { td->io_hist_len--; io_u->offset = ipo->offset; io_u->buflen = ipo->len; io_u->numberio = ipo->numberio; io_u->file = ipo->file; io_u_set(io_u, IO_U_F_VER_LIST); if (ipo->flags & IP_F_TRIMMED) io_u_set(io_u, IO_U_F_TRIMMED); if (!fio_file_open(io_u->file)) { int r = td_io_open_file(td, io_u->file); if (r) { dprint(FD_VERIFY, "failed file %s open\n", io_u->file->file_name); return 1; } } get_file(ipo->file); assert(fio_file_open(io_u->file)); io_u->ddir = DDIR_READ; io_u->xfer_buf = io_u->buf; io_u->xfer_buflen = io_u->buflen; remove_trim_entry(td, ipo); free(ipo); dprint(FD_VERIFY, "get_next_verify: ret io_u %p\n", io_u); if (!td->o.verify_pattern_bytes) { io_u->rand_seed = __rand(&td->verify_state); if (sizeof(int) != sizeof(long *)) io_u->rand_seed *= __rand(&td->verify_state); } return 0; } nothing: dprint(FD_VERIFY, "get_next_verify: empty\n"); return 1; }
static int get_next_rand_offset(struct thread_data *td, struct fio_file *f, enum fio_ddir ddir, unsigned long long *b) { unsigned long long rmax, r, lastb; int loops = 5; lastb = last_block(td, f, ddir); if (!lastb) return 1; if (f->failed_rands >= 200) goto ffz; rmax = td->o.use_os_rand ? OS_RAND_MAX : FRAND_MAX; do { if (td->o.use_os_rand) r = os_random_long(&td->random_state); else r = __rand(&td->__random_state); *b = (lastb - 1) * (r / ((unsigned long long) rmax + 1.0)); dprint(FD_RANDOM, "off rand %llu\n", r); /* * if we are not maintaining a random map, we are done. */ if (!file_randommap(td, f)) goto ret_good; /* * calculate map offset and check if it's free */ if (random_map_free(f, *b)) goto ret_good; dprint(FD_RANDOM, "get_next_rand_offset: offset %llu busy\n", *b); } while (--loops); if (!f->failed_rands++) f->last_free_lookup = 0; /* * we get here, if we didn't suceed in looking up a block. generate * a random start offset into the filemap, and find the first free * block from there. */ loops = 10; do { f->last_free_lookup = (f->num_maps - 1) * (r / ((unsigned long long) rmax + 1.0)); if (!get_next_free_block(td, f, ddir, b)) goto ret; if (td->o.use_os_rand) r = os_random_long(&td->random_state); else r = __rand(&td->__random_state); } while (--loops); /* * that didn't work either, try exhaustive search from the start */ f->last_free_lookup = 0; ffz: if (!get_next_free_block(td, f, ddir, b)) return 0; f->last_free_lookup = 0; return get_next_free_block(td, f, ddir, b); ret_good: f->failed_rands = 0; ret: return 0; }