static int get_next_rand_offset(struct thread_data *td, struct fio_file *f, enum fio_ddir ddir, unsigned long long *b) { unsigned long long r, lastb; int loops = 5; lastb = last_block(td, f, ddir); if (!lastb) return 1; do { r = os_random_long(&td->random_state); dprint(FD_RANDOM, "off rand %llu\n", r); *b = (lastb - 1) * (r / ((unsigned long long) OS_RAND_MAX + 1.0)); /* * if we are not maintaining a random map, we are done. */ if (!file_randommap(td, f)) return 0; /* * calculate map offset and check if it's free */ if (random_map_free(f, *b)) return 0; dprint(FD_RANDOM, "get_next_rand_offset: offset %llu busy\n", *b); } while (--loops); /* * we get here, if we didn't suceed in looking up a block. generate * a random start offset into the filemap, and find the first free * block from there. */ loops = 10; do { f->last_free_lookup = (f->num_maps - 1) * (r / (OS_RAND_MAX + 1.0)); if (!get_next_free_block(td, f, ddir, b)) return 0; r = os_random_long(&td->random_state); } while (--loops); /* * that didn't work either, try exhaustive search from the start */ f->last_free_lookup = 0; return get_next_free_block(td, f, ddir, b); }
/* * Get next file to service by choosing one at random */ static struct fio_file *get_next_file_rand(struct thread_data *td, enum fio_file_flags goodf, enum fio_file_flags badf) { struct fio_file *f; int fno; do { long r = os_random_long(&td->next_file_state); int opened = 0; fno = (unsigned int) ((double) td->o.nr_files * (r / (OS_RAND_MAX + 1.0))); f = td->files[fno]; if (fio_file_done(f)) continue; if (!fio_file_open(f)) { int err; err = td_io_open_file(td, f); if (err) continue; opened = 1; } if ((!goodf || (f->flags & goodf)) && !(f->flags & badf)) { dprint(FD_FILE, "get_next_file_rand: %p\n", f); return f; } if (opened) td_io_close_file(td, f); } while (1); }
static unsigned int __get_next_buflen(struct thread_data *td, struct io_u *io_u) { const int ddir = io_u->ddir; unsigned int uninitialized_var(buflen); unsigned int minbs, maxbs; unsigned long r, rand_max; assert(ddir_rw(ddir)); minbs = td->o.min_bs[ddir]; maxbs = td->o.max_bs[ddir]; if (minbs == maxbs) return minbs; /* * If we can't satisfy the min block size from here, then fail */ if (!io_u_fits(td, io_u, minbs)) return 0; if (td->o.use_os_rand) rand_max = OS_RAND_MAX; else rand_max = FRAND_MAX; do { if (td->o.use_os_rand) r = os_random_long(&td->bsrange_state); else r = __rand(&td->__bsrange_state); if (!td->o.bssplit_nr[ddir]) { buflen = 1 + (unsigned int) ((double) maxbs * (r / (rand_max + 1.0))); if (buflen < minbs) buflen = minbs; } else { long perc = 0; unsigned int i; for (i = 0; i < td->o.bssplit_nr[ddir]; i++) { struct bssplit *bsp = &td->o.bssplit[ddir][i]; buflen = bsp->bs; perc += bsp->perc; if ((r <= ((rand_max / 100L) * perc)) && io_u_fits(td, io_u, buflen)) break; } } if (!td->o.bs_unaligned && is_power_of_2(minbs)) buflen = (buflen + minbs - 1) & ~(minbs - 1); } while (!io_u_fits(td, io_u, buflen)); return buflen; }
static int __get_next_rand_offset(struct thread_data *td, struct fio_file *f, enum fio_ddir ddir, uint64_t *b) { uint64_t r, lastb; lastb = last_block(td, f, ddir); if (!lastb) return 1; if (td->o.random_generator == FIO_RAND_GEN_TAUSWORTHE) { uint64_t rmax; rmax = td->o.use_os_rand ? OS_RAND_MAX : FRAND_MAX; if (td->o.use_os_rand) { rmax = OS_RAND_MAX; r = os_random_long(&td->random_state); } else { rmax = FRAND_MAX; r = __rand(&td->__random_state); } dprint(FD_RANDOM, "off rand %llu\n", (unsigned long long) r); *b = (lastb - 1) * (r / ((uint64_t) rmax + 1.0)); } else { uint64_t off = 0; if (lfsr_next(&f->lfsr, &off, lastb)) return 1; *b = off; } /* * if we are not maintaining a random map, we are done. */ if (!file_randommap(td, f)) goto ret; /* * calculate map offset and check if it's free */ if (random_map_free(f, *b)) goto ret; dprint(FD_RANDOM, "get_next_rand_offset: offset %llu busy\n", (unsigned long long) *b); *b = axmap_next_free(f->io_axmap, *b); if (*b == (uint64_t) -1ULL) return 1; ret: return 0; }
static inline enum fio_ddir get_rand_ddir(struct thread_data *td) { unsigned int v; long r; r = os_random_long(&td->rwmix_state); v = 1 + (int) (100.0 * (r / (OS_RAND_MAX + 1.0))); if (v <= td->o.rwmix[DDIR_READ]) return DDIR_READ; return DDIR_WRITE; }
static unsigned long long get_rand_file_size(struct thread_data *td) { unsigned long long ret, sized; long r; r = os_random_long(&td->file_size_state); sized = td->o.file_size_high - td->o.file_size_low; ret = (unsigned long long) ((double) sized * (r / (OS_RAND_MAX + 1.0))); ret += td->o.file_size_low; ret -= (ret % td->o.rw_min_bs); return ret; }
int io_u_should_trim(struct thread_data *td, struct io_u *io_u) { unsigned long long val; long r; if (!td->o.trim_percentage) return 0; r = os_random_long(&td->trim_state); val = (OS_RAND_MAX / 100ULL); val *= (unsigned long long) td->o.trim_percentage; return r <= val; }
static unsigned int __get_next_buflen(struct thread_data *td, struct io_u *io_u) { const int ddir = io_u->ddir; unsigned int uninitialized_var(buflen); unsigned int minbs, maxbs; long r; assert(ddir_rw(ddir)); minbs = td->o.min_bs[ddir]; maxbs = td->o.max_bs[ddir]; if (minbs == maxbs) buflen = minbs; else { r = os_random_long(&td->bsrange_state); if (!td->o.bssplit_nr[ddir]) { buflen = 1 + (unsigned int) ((double) maxbs * (r / (OS_RAND_MAX + 1.0))); if (buflen < minbs) buflen = minbs; } else { long perc = 0; unsigned int i; for (i = 0; i < td->o.bssplit_nr[ddir]; i++) { struct bssplit *bsp = &td->o.bssplit[ddir][i]; buflen = bsp->bs; perc += bsp->perc; if (r <= ((OS_RAND_MAX / 100L) * perc)) break; } } if (!td->o.bs_unaligned && is_power_of_2(minbs)) buflen = (buflen + minbs - 1) & ~(minbs - 1); } if (io_u->offset + buflen > io_u->file->real_file_size) { dprint(FD_IO, "lower buflen %u -> %u (ddir=%d)\n", buflen, minbs, ddir); buflen = minbs; } return buflen; }
static int should_do_random(struct thread_data *td, enum fio_ddir ddir) { unsigned int v; unsigned long r; if (td->o.perc_rand[ddir] == 100) return 1; if (td->o.use_os_rand) { r = os_random_long(&td->seq_rand_state[ddir]); v = 1 + (int) (100.0 * (r / (OS_RAND_MAX + 1.0))); } else { r = __rand(&td->__seq_rand_state[ddir]); v = 1 + (int) (100.0 * (r / (FRAND_MAX + 1.0))); } return v <= td->o.perc_rand[ddir]; }
static unsigned long long get_rand_start_delay(struct thread_data *td) { unsigned long long delayrange; unsigned long r; delayrange = td->o.start_delay_high - td->o.start_delay; if (td->o.use_os_rand) { r = os_random_long(&td->delay_state); delayrange = (unsigned long long) ((double) delayrange * (r / (OS_RAND_MAX + 1.0))); } else { r = __rand(&td->__delay_state); delayrange = (unsigned long long) ((double) delayrange * (r / (FRAND_MAX + 1.0))); } delayrange += td->o.start_delay; return delayrange; }
int do_unit(unsigned long bytes, struct drand48_data *rand_data) { unsigned long i; unsigned long *p; int rep; p = prealloc ? : allocate(bytes); for (rep = 0; rep < reps; rep++) { unsigned long m = bytes / sizeof(*p); if (rep > 0 && !quiet) { printf("."); fflush(stdout); } for (i = 0; i < m; i += step / sizeof(*p)) { volatile long d; unsigned long idx = i; if (opt_randomise) idx = os_random_long(m - 1, rand_data); /* verify last write */ if (rep && !opt_readonly && !opt_randomise && p[idx] != idx) { fprintf(stderr, "Data wrong at offset 0x%lx. " "Expected 0x%08lx, got 0x%08lx\n", idx * sizeof(*p), idx, p[idx]); exit(1); } /* read / write */ if (opt_readonly) d = p[idx]; else p[idx] = idx; if (!(i & 0xffff) && runtime_exceeded()) { rep = reps; break; } } if (msync_mode) msync(p, bytes, msync_mode); } if (do_getchar) { for ( ; ; ) { switch (getchar()) { case 'u': printf("munlocking\n"); munlock(p, bytes); break; case 'l': printf("munlocking\n"); mlock(p, bytes); break; } } } return 0; }