/* * Attempt to mmap the entire file */ static int fio_libpmem_prep_full(struct thread_data *td, struct io_u *io_u) { struct fio_file *f = io_u->file; struct fio_libpmem_data *fdd = FILE_ENG_DATA(f); int ret; dprint(FD_IO, "DEBUG fio_libpmem_prep_full\n" ); if (fio_file_partial_mmap(f)) return EINVAL; dprint(FD_IO," f->io_size %ld : io_u->offset %lld \n", f->io_size, io_u->offset); if (io_u->offset != (size_t) io_u->offset || f->io_size != (size_t) f->io_size) { fio_file_set_partial_mmap(f); return EINVAL; } fdd->libpmem_sz = f->io_size; fdd->libpmem_off = 0; ret = fio_libpmem_file(td, f, fdd->libpmem_sz, fdd->libpmem_off); if (ret) fio_file_set_partial_mmap(f); return ret; }
static int fio_mmapio_prep(struct thread_data *td, struct io_u *io_u) { struct fio_file *f = io_u->file; struct fio_mmap_data *fmd = FILE_ENG_DATA(f); int ret; /* * It fits within existing mapping, use it */ if (io_u->offset >= fmd->mmap_off && io_u->offset + io_u->buflen <= fmd->mmap_off + fmd->mmap_sz) goto done; /* * unmap any existing mapping */ if (fmd->mmap_ptr) { if (munmap(fmd->mmap_ptr, fmd->mmap_sz) < 0) return errno; fmd->mmap_ptr = NULL; } if (fio_mmapio_prep_full(td, io_u)) { td_clear_error(td); ret = fio_mmapio_prep_limited(td, io_u); if (ret) return ret; } done: io_u->mmap_data = fmd->mmap_ptr + io_u->offset - fmd->mmap_off - f->file_offset; return 0; }
static int fio_mtd_close_file(struct thread_data *td, struct fio_file *f) { struct fio_mtd_data *fmd = FILE_ENG_DATA(f); FILE_SET_ENG_DATA(f, NULL); free(fmd); return generic_close_file(td, f); }
static int fio_mmapio_close_file(struct thread_data *td, struct fio_file *f) { struct fio_mmap_data *fmd = FILE_ENG_DATA(f); FILE_SET_ENG_DATA(f, NULL); free(fmd); fio_file_clear_partial_mmap(f); return generic_close_file(td, f); }
static int fio_libpmem_close_file(struct thread_data *td, struct fio_file *f) { struct fio_libpmem_data *fdd = FILE_ENG_DATA(f); dprint(FD_IO,"DEBUG fio_libpmem_close_file\n"); dprint(FD_IO,"td->o.odirect %d \n",td->o.odirect); if (!td->o.odirect) { dprint(FD_IO,"pmem_drain\n"); pmem_drain(); } FILE_SET_ENG_DATA(f, NULL); free(fdd); fio_file_clear_partial_mmap(f); return generic_close_file(td, f); }
/* * Just mmap an appropriate portion, we cannot mmap the full extent */ static int fio_mmapio_prep_limited(struct thread_data *td, struct io_u *io_u) { struct fio_file *f = io_u->file; struct fio_mmap_data *fmd = FILE_ENG_DATA(f); if (io_u->buflen > mmap_map_size) { log_err("fio: bs too big for mmap engine\n"); return EIO; } fmd->mmap_sz = mmap_map_size; if (fmd->mmap_sz > f->io_size) fmd->mmap_sz = f->io_size; fmd->mmap_off = io_u->offset; return fio_mmap_file(td, f, fmd->mmap_sz, fmd->mmap_off); }
static int fio_libpmem_prep(struct thread_data *td, struct io_u *io_u) { struct fio_file *f = io_u->file; struct fio_libpmem_data *fdd = FILE_ENG_DATA(f); int ret; dprint(FD_IO, "DEBUG fio_libpmem_prep\n" ); /* * It fits within existing mapping, use it */ dprint(FD_IO," io_u->offset %llu : fdd->libpmem_off %llu : " "io_u->buflen %llu : fdd->libpmem_sz %llu\n", io_u->offset, (unsigned long long) fdd->libpmem_off, io_u->buflen, (unsigned long long) fdd->libpmem_sz); if (io_u->offset >= fdd->libpmem_off && (io_u->offset + io_u->buflen <= fdd->libpmem_off + fdd->libpmem_sz)) goto done; /* * unmap any existing mapping */ if (fdd->libpmem_ptr) { dprint(FD_IO,"munmap \n"); if (munmap(fdd->libpmem_ptr, fdd->libpmem_sz) < 0) return errno; fdd->libpmem_ptr = NULL; } if (fio_libpmem_prep_full(td, io_u)) { td_clear_error(td); ret = fio_libpmem_prep_limited(td, io_u); if (ret) return ret; } done: io_u->mmap_data = fdd->libpmem_ptr + io_u->offset - fdd->libpmem_off - f->file_offset; return 0; }
static int fio_mmap_file(struct thread_data *td, struct fio_file *f, size_t length, off_t off) { struct fio_mmap_data *fmd = FILE_ENG_DATA(f); int flags = 0; if (td_rw(td) && !td->o.verify_only) flags = PROT_READ | PROT_WRITE; else if (td_write(td) && !td->o.verify_only) { flags = PROT_WRITE; if (td->o.verify != VERIFY_NONE) flags |= PROT_READ; } else flags = PROT_READ; fmd->mmap_ptr = mmap(NULL, length, flags, MAP_SHARED, f->fd, off); if (fmd->mmap_ptr == MAP_FAILED) { fmd->mmap_ptr = NULL; td_verror(td, errno, "mmap"); goto err; } if (!fio_madvise_file(td, f, length)) goto err; if (posix_madvise(fmd->mmap_ptr, length, POSIX_MADV_DONTNEED) < 0) { td_verror(td, errno, "madvise"); goto err; } #ifdef FIO_MADV_FREE if (f->filetype == FIO_TYPE_BLOCK) (void) posix_madvise(fmd->mmap_ptr, fmd->mmap_sz, FIO_MADV_FREE); #endif err: if (td->error && fmd->mmap_ptr) munmap(fmd->mmap_ptr, length); return td->error; }
/* * XXX Just mmap an appropriate portion, we cannot mmap the full extent */ static int fio_libpmem_prep_limited(struct thread_data *td, struct io_u *io_u) { struct fio_file *f = io_u->file; struct fio_libpmem_data *fdd = FILE_ENG_DATA(f); dprint(FD_IO, "DEBUG fio_libpmem_prep_limited\n" ); if (io_u->buflen > f->real_file_size) { log_err("libpmem: bs too big for libpmem engine\n"); return EIO; } fdd->libpmem_sz = min(MMAP_TOTAL_SZ, f->real_file_size); if (fdd->libpmem_sz > f->io_size) fdd->libpmem_sz = f->io_size; fdd->libpmem_off = io_u->offset; return fio_libpmem_file(td, f, fdd->libpmem_sz, fdd->libpmem_off); }
static enum fio_q_status fio_mmapio_queue(struct thread_data *td, struct io_u *io_u) { struct fio_file *f = io_u->file; struct fio_mmap_data *fmd = FILE_ENG_DATA(f); fio_ro_check(td, io_u); if (io_u->ddir == DDIR_READ) memcpy(io_u->xfer_buf, io_u->mmap_data, io_u->xfer_buflen); else if (io_u->ddir == DDIR_WRITE) memcpy(io_u->mmap_data, io_u->xfer_buf, io_u->xfer_buflen); else if (ddir_sync(io_u->ddir)) { if (msync(fmd->mmap_ptr, fmd->mmap_sz, MS_SYNC)) { io_u->error = errno; td_verror(td, io_u->error, "msync"); } } else if (io_u->ddir == DDIR_TRIM) { int ret = do_io_u_trim(td, io_u); if (!ret) td_verror(td, io_u->error, "trim"); } /* * not really direct, but should drop the pages from the cache */ if (td->o.odirect && ddir_rw(io_u->ddir)) { if (msync(io_u->mmap_data, io_u->xfer_buflen, MS_SYNC) < 0) { io_u->error = errno; td_verror(td, io_u->error, "msync"); } if (posix_madvise(io_u->mmap_data, io_u->xfer_buflen, POSIX_MADV_DONTNEED) < 0) { io_u->error = errno; td_verror(td, io_u->error, "madvise"); } } return FIO_Q_COMPLETED; }
static bool fio_madvise_file(struct thread_data *td, struct fio_file *f, size_t length) { struct fio_mmap_data *fmd = FILE_ENG_DATA(f); if (!td->o.fadvise_hint) return true; if (!td_random(td)) { if (posix_madvise(fmd->mmap_ptr, length, POSIX_MADV_SEQUENTIAL) < 0) { td_verror(td, errno, "madvise"); return false; } } else { if (posix_madvise(fmd->mmap_ptr, length, POSIX_MADV_RANDOM) < 0) { td_verror(td, errno, "madvise"); return false; } } return true; }
/* * Attempt to mmap the entire file */ static int fio_mmapio_prep_full(struct thread_data *td, struct io_u *io_u) { struct fio_file *f = io_u->file; struct fio_mmap_data *fmd = FILE_ENG_DATA(f); int ret; if (fio_file_partial_mmap(f)) return EINVAL; if (io_u->offset != (size_t) io_u->offset || f->io_size != (size_t) f->io_size) { fio_file_set_partial_mmap(f); return EINVAL; } fmd->mmap_sz = f->io_size; fmd->mmap_off = 0; ret = fio_mmap_file(td, f, fmd->mmap_sz, fmd->mmap_off); if (ret) fio_file_set_partial_mmap(f); return ret; }
/* * This is the mmap execution function */ static int fio_libpmem_file(struct thread_data *td, struct fio_file *f, size_t length, off_t off) { struct fio_libpmem_data *fdd = FILE_ENG_DATA(f); int flags = 0; void *addr = NULL; dprint(FD_IO, "DEBUG fio_libpmem_file\n"); if (td_rw(td)) flags = PROT_READ | PROT_WRITE; else if (td_write(td)) { flags = PROT_WRITE; if (td->o.verify != VERIFY_NONE) flags |= PROT_READ; } else flags = PROT_READ; dprint(FD_IO, "f->file_name = %s td->o.verify = %d \n", f->file_name, td->o.verify); dprint(FD_IO, "length = %ld flags = %d f->fd = %d off = %ld \n", length, flags, f->fd,off); addr = util_map_hint(length, 0); fdd->libpmem_ptr = mmap(addr, length, flags, MAP_SHARED, f->fd, off); if (fdd->libpmem_ptr == MAP_FAILED) { fdd->libpmem_ptr = NULL; td_verror(td, errno, "mmap"); } if (td->error && fdd->libpmem_ptr) munmap(fdd->libpmem_ptr, length); return td->error; }
static int fio_mtd_queue(struct thread_data *td, struct io_u *io_u) { struct fio_file *f = io_u->file; struct fio_mtd_data *fmd = FILE_ENG_DATA(f); int local_offs = 0; int ret; fio_ro_check(td, io_u); /* * Errors tend to pertain to particular erase blocks, so divide up * I/O to erase block size. * If an error is encountered, log it and keep going onto the next * block because the error probably just pertains to that block. * TODO(dehrenberg): Divide up reads and writes into page-sized * operations to get more fine-grained information about errors. */ while (local_offs < io_u->buflen) { int eb = (io_u->offset + local_offs) / fmd->info.eb_size; int eb_offs = (io_u->offset + local_offs) % fmd->info.eb_size; /* The length is the smaller of the length remaining in the * buffer and the distance to the end of the erase block */ int len = min((int)io_u->buflen - local_offs, (int)fmd->info.eb_size - eb_offs); char *buf = ((char *)io_u->buf) + local_offs; if (td->o.skip_bad) { ret = fio_mtd_is_bad(td, fmd, io_u, eb); if (ret == -1) break; else if (ret == 1) goto next; } if (io_u->ddir == DDIR_READ) { ret = mtd_read(&fmd->info, f->fd, eb, eb_offs, buf, len); if (ret != 0) { io_u->error = errno; td_verror(td, errno, "mtd_read"); if (fio_mtd_maybe_mark_bad(td, fmd, io_u, eb)) break; } } else if (io_u->ddir == DDIR_WRITE) { ret = mtd_write(desc, &fmd->info, f->fd, eb, eb_offs, buf, len, NULL, 0, 0); if (ret != 0) { io_u->error = errno; td_verror(td, errno, "mtd_write"); if (fio_mtd_maybe_mark_bad(td, fmd, io_u, eb)) break; } } else if (io_u->ddir == DDIR_TRIM) { if (eb_offs != 0 || len != fmd->info.eb_size) { io_u->error = EINVAL; td_verror(td, EINVAL, "trim on MTD must be erase block-aligned"); } ret = mtd_erase(desc, &fmd->info, f->fd, eb); if (ret != 0) { io_u->error = errno; td_verror(td, errno, "mtd_erase"); if (fio_mtd_maybe_mark_bad(td, fmd, io_u, eb)) break; } } else { io_u->error = ENOTSUP; td_verror(td, io_u->error, "operation not supported on mtd"); } next: local_offs += len; } return FIO_Q_COMPLETED; }