static void handle_trace_discard(struct thread_data *td, struct blk_io_trace *t, unsigned long long ttime, unsigned long *ios, unsigned int *bs) { struct io_piece *ipo = malloc(sizeof(*ipo)); int fileno; init_ipo(ipo); fileno = trace_add_file(td, t->device); ios[DDIR_TRIM]++; if (t->bytes > bs[DDIR_TRIM]) bs[DDIR_TRIM] = t->bytes; td->o.size += t->bytes; memset(ipo, 0, sizeof(*ipo)); INIT_FLIST_HEAD(&ipo->list); /* * the 512 is wrong here, it should be the hardware sector size... */ ipo->offset = t->sector * 512; ipo->len = t->bytes; ipo->delay = ttime / 1000; ipo->ddir = DDIR_TRIM; ipo->fileno = fileno; dprint(FD_BLKTRACE, "store discard, off=%llu, len=%lu, delay=%lu\n", ipo->offset, ipo->len, ipo->delay); queue_io_piece(td, ipo); }
/* * Store blk_io_trace data in an ipo for later retrieval. */ static void store_ipo(struct thread_data *td, unsigned long long offset, unsigned int bytes, int rw, unsigned long long ttime, int fileno) { struct io_piece *ipo = malloc(sizeof(*ipo)); init_ipo(ipo); /* * the 512 is wrong here, it should be the hardware sector size... */ ipo->offset = offset * 512; ipo->len = bytes; ipo->delay = ttime / 1000; if (rw) ipo->ddir = DDIR_WRITE; else ipo->ddir = DDIR_READ; ipo->fileno = fileno; dprint(FD_BLKTRACE, "store ddir=%d, off=%llu, len=%lu, delay=%lu\n", ipo->ddir, ipo->offset, ipo->len, ipo->delay); queue_io_piece(td, ipo); }
static void handle_trace_discard(struct thread_data *td, struct blk_io_trace *t, unsigned long long ttime, unsigned long *ios, unsigned int *rw_bs) { struct io_piece *ipo = malloc(sizeof(*ipo)); unsigned int bs; int fileno; init_ipo(ipo); fileno = trace_add_file(td, t->device, &bs); ios[DDIR_TRIM]++; if (t->bytes > rw_bs[DDIR_TRIM]) rw_bs[DDIR_TRIM] = t->bytes; td->o.size += t->bytes; memset(ipo, 0, sizeof(*ipo)); INIT_FLIST_HEAD(&ipo->list); ipo->offset = t->sector * bs; if (td->o.replay_scale) ipo->offset = ipo->offset / td->o.replay_scale; ipo_bytes_align(&td->o, ipo); ipo->len = t->bytes; ipo->delay = ttime / 1000; ipo->ddir = DDIR_TRIM; ipo->fileno = fileno; dprint(FD_BLKTRACE, "store discard, off=%llu, len=%lu, delay=%lu\n", ipo->offset, ipo->len, ipo->delay); queue_io_piece(td, ipo); }
/* * Store blk_io_trace data in an ipo for later retrieval. */ static void store_ipo(struct thread_data *td, unsigned long long offset, unsigned int bytes, int rw, unsigned long long ttime, int fileno, unsigned int bs) { struct io_piece *ipo = malloc(sizeof(*ipo)); init_ipo(ipo); ipo->offset = offset * bs; if (td->o.replay_scale) ipo->offset = ipo->offset / td->o.replay_scale; ipo_bytes_align(&td->o, ipo); ipo->len = bytes; ipo->delay = ttime / 1000; if (rw) ipo->ddir = DDIR_WRITE; else ipo->ddir = DDIR_READ; ipo->fileno = fileno; dprint(FD_BLKTRACE, "store ddir=%d, off=%llu, len=%lu, delay=%lu\n", ipo->ddir, ipo->offset, ipo->len, ipo->delay); queue_io_piece(td, ipo); }
static void trace_add_open_close_event(struct thread_data *td, int fileno, enum file_log_act action) { struct io_piece *ipo; ipo = calloc(1, sizeof(*ipo)); init_ipo(ipo); ipo->ddir = DDIR_INVAL; ipo->fileno = fileno; ipo->file_action = action; flist_add_tail(&ipo->list, &td->io_log_list); }
static void trace_add_open_event(struct thread_data *td, int fileno) { struct io_piece *ipo; ipo = calloc(1, sizeof(*ipo)); init_ipo(ipo); ipo->ddir = DDIR_INVAL; ipo->fileno = fileno; ipo->file_action = FIO_LOG_OPEN_FILE; flist_add_tail(&ipo->list, &td->io_log_list); }
/* * Read version 2 iolog data. It is enhanced to include per-file logging, * syncs, etc. */ static int read_iolog2(struct thread_data *td, FILE *f) { unsigned long long offset; unsigned int bytes; int reads, writes, waits, fileno = 0, file_action = 0; /* stupid gcc */ char *fname, *act; char *str, *p; enum fio_ddir rw; free_release_files(td); /* * Read in the read iolog and store it, reuse the infrastructure * for doing verifications. */ str = malloc(4096); fname = malloc(256+16); act = malloc(256+16); reads = writes = waits = 0; while ((p = fgets(str, 4096, f)) != NULL) { struct io_piece *ipo; int r; r = sscanf(p, "%256s %256s %llu %u", fname, act, &offset, &bytes); if (r == 4) { /* * Check action first */ if (!strcmp(act, "wait")) rw = DDIR_WAIT; else if (!strcmp(act, "read")) rw = DDIR_READ; else if (!strcmp(act, "write")) rw = DDIR_WRITE; else if (!strcmp(act, "sync")) rw = DDIR_SYNC; else if (!strcmp(act, "datasync")) rw = DDIR_DATASYNC; else if (!strcmp(act, "trim")) rw = DDIR_TRIM; else { log_err("fio: bad iolog file action: %s\n", act); continue; } fileno = get_fileno(td, fname); } else if (r == 2) { rw = DDIR_INVAL; if (!strcmp(act, "add")) { fileno = add_file(td, fname, 0, 1); file_action = FIO_LOG_ADD_FILE; continue; } else if (!strcmp(act, "open")) { fileno = get_fileno(td, fname); file_action = FIO_LOG_OPEN_FILE; } else if (!strcmp(act, "close")) { fileno = get_fileno(td, fname); file_action = FIO_LOG_CLOSE_FILE; } else { log_err("fio: bad iolog file action: %s\n", act); continue; } } else { log_err("bad iolog2: %s", p); continue; } if (rw == DDIR_READ) reads++; else if (rw == DDIR_WRITE) { /* * Don't add a write for ro mode */ if (read_only) continue; writes++; } else if (rw == DDIR_WAIT) { waits++; } else if (rw == DDIR_INVAL) { } else if (!ddir_sync(rw)) { log_err("bad ddir: %d\n", rw); continue; } /* * Make note of file */ ipo = malloc(sizeof(*ipo)); init_ipo(ipo); ipo->ddir = rw; if (rw == DDIR_WAIT) { ipo->delay = offset; } else { ipo->offset = offset; ipo->len = bytes; if (rw != DDIR_INVAL && bytes > td->o.max_bs[rw]) td->o.max_bs[rw] = bytes; ipo->fileno = fileno; ipo->file_action = file_action; td->o.size += bytes; } queue_io_piece(td, ipo); } free(str); free(act); free(fname); if (writes && read_only) { log_err("fio: <%s> skips replay of %d writes due to" " read-only\n", td->o.name, writes); writes = 0; } if (!reads && !writes && !waits) return 1; else if (reads && !writes) td->o.td_ddir = TD_DDIR_READ; else if (!reads && writes) td->o.td_ddir = TD_DDIR_WRITE; else td->o.td_ddir = TD_DDIR_RW; return 0; }
/* * log a successful write, so we can unwind the log for verify */ void log_io_piece(struct thread_data *td, struct io_u *io_u) { struct rb_node **p, *parent; struct io_piece *ipo, *__ipo; ipo = malloc(sizeof(struct io_piece)); init_ipo(ipo); ipo->file = io_u->file; ipo->offset = io_u->offset; ipo->len = io_u->buflen; ipo->numberio = io_u->numberio; ipo->flags = IP_F_IN_FLIGHT; io_u->ipo = ipo; if (io_u_should_trim(td, io_u)) { flist_add_tail(&ipo->trim_list, &td->trim_list); td->trim_entries++; } /* * We don't need to sort the entries, if: * * Sequential writes, or * Random writes that lay out the file as it goes along * * For both these cases, just reading back data in the order we * wrote it out is the fastest. * * One exception is if we don't have a random map AND we are doing * verifies, in that case we need to check for duplicate blocks and * drop the old one, which we rely on the rb insert/lookup for * handling. */ if (((!td->o.verifysort) || !td_random(td) || !td->o.overwrite) && (file_randommap(td, ipo->file) || td->o.verify == VERIFY_NONE)) { INIT_FLIST_HEAD(&ipo->list); flist_add_tail(&ipo->list, &td->io_hist_list); ipo->flags |= IP_F_ONLIST; td->io_hist_len++; return; } RB_CLEAR_NODE(&ipo->rb_node); /* * Sort the entry into the verification list */ restart: p = &td->io_hist_tree.rb_node; parent = NULL; while (*p) { parent = *p; __ipo = rb_entry(parent, struct io_piece, rb_node); if (ipo->file < __ipo->file) p = &(*p)->rb_left; else if (ipo->file > __ipo->file) p = &(*p)->rb_right; else if (ipo->offset < __ipo->offset) p = &(*p)->rb_left; else if (ipo->offset > __ipo->offset) p = &(*p)->rb_right; else { dprint(FD_IO, "iolog: overlap %llu/%lu, %llu/%lu", __ipo->offset, __ipo->len, ipo->offset, ipo->len); td->io_hist_len--; rb_erase(parent, &td->io_hist_tree); remove_trim_entry(td, __ipo); free(__ipo); goto restart; } } rb_link_node(&ipo->rb_node, parent, p); rb_insert_color(&ipo->rb_node, &td->io_hist_tree); ipo->flags |= IP_F_ONRB; td->io_hist_len++; }