/* * stress_fiemap_writer() * write data in random places and punch holes * in data in random places to try and maximize * extents in the file */ int stress_fiemap_writer( const char *name, const int fd, uint64_t *counters, const uint64_t max_ops) { uint8_t buf[1]; uint64_t len = (off_t)opt_fiemap_size - sizeof(buf); uint64_t counter; int rc = EXIT_FAILURE; #if defined(FALLOC_FL_PUNCH_HOLE) && \ defined(FALLOC_FL_KEEP_SIZE) bool punch_hole = true; #endif stress_strnrnd((char *)buf, sizeof(buf)); do { uint64_t offset; size_t i; counter = 0; offset = (mwc64() % len) & ~0x1fff; if (lseek(fd, (off_t)offset, SEEK_SET) < 0) break; if (write(fd, buf, sizeof(buf)) < 0) { if ((errno != EAGAIN) && (errno != EINTR)) { pr_fail_err(name, "write"); goto tidy; } } #if defined(FALLOC_FL_PUNCH_HOLE) && \ defined(FALLOC_FL_KEEP_SIZE) if (!punch_hole) continue; offset = mwc64() % len; if (fallocate(fd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, offset, 8192) < 0) { if (errno == EOPNOTSUPP) punch_hole = false; } #endif for (i = 0; i < MAX_FIEMAP_PROCS; i++) counter += counters[i]; } while (opt_do_run && (!max_ops || counter < max_ops)); rc = EXIT_SUCCESS; tidy: (void)close(fd); return rc; }
/* * stress_matrix() * stress CPU by doing floating point math ops */ int stress_matrix( uint64_t *const counter, const uint32_t instance, const uint64_t max_ops, const char *name) { stress_matrix_func func = opt_matrix_stressor->func; size_t n; const matrix_type_t v = 1 / (matrix_type_t)((uint32_t)~0); (void)instance; (void)name; if (!set_matrix_size) { if (opt_flags & OPT_FLAGS_MAXIMIZE) opt_matrix_size = MAX_MATRIX_SIZE; if (opt_flags & OPT_FLAGS_MINIMIZE) opt_matrix_size = MIN_MATRIX_SIZE; } n = opt_matrix_size; { register size_t i; matrix_type_t a[n][n], b[n][n], r[n][n]; /* * Initialise matrices */ for (i = 0; i < n; i++) { register size_t j; for (j = 0; j < n; j++) { a[i][j] = (matrix_type_t)mwc64() * v; b[i][j] = (matrix_type_t)mwc64() * v; r[i][j] = 0.0; } } /* * Normal use case, 100% load, simple spinning on CPU */ do { (void)func(n, a, b, r); (*counter)++; } while (opt_do_run && (!max_ops || *counter < max_ops)); } return EXIT_SUCCESS; }
/* * stress_bsearch() * stress bsearch */ int stress_bsearch( uint64_t *const counter, const uint32_t instance, const uint64_t max_ops, const char *name) { int32_t *data, *ptr, prev = 0; size_t n, n8, i; (void)instance; if (!set_bsearch_size) { if (opt_flags & OPT_FLAGS_MAXIMIZE) opt_bsearch_size = MAX_BSEARCH_SIZE; if (opt_flags & OPT_FLAGS_MINIMIZE) opt_bsearch_size = MIN_BSEARCH_SIZE; } n = (size_t)opt_bsearch_size; n8 = (n + 7) & ~7; /* allocate in multiples of 8 */ if ((data = malloc(sizeof(int32_t) * n8)) == NULL) { pr_failed_dbg(name, "malloc"); return EXIT_FAILURE; } /* Populate with ascending data */ prev = 0; for (i = 0; i < n;) { uint64_t v = mwc64(); SETDATA(data, i, v, prev); SETDATA(data, i, v, prev); SETDATA(data, i, v, prev); SETDATA(data, i, v, prev); SETDATA(data, i, v, prev); SETDATA(data, i, v, prev); SETDATA(data, i, v, prev); SETDATA(data, i, v, prev); } do { for (ptr = data, i = 0; i < n; i++, ptr++) { int32_t *result; result = bsearch(ptr, data, n, sizeof(*ptr), cmp); if (opt_flags & OPT_FLAGS_VERIFY) { if (result == NULL) pr_fail(stderr, "%s: element %zu could not be found\n", name, i); else if (*result != *ptr) pr_fail(stderr, "%s: element %zu found %" PRIu32 ", expecting %" PRIu32 "\n", name, i, *result, *ptr); } } (*counter)++; } while (opt_do_run && (!max_ops || *counter < max_ops)); free(data); return EXIT_SUCCESS; }
/* * stress_msync() * stress msync */ int stress_msync( uint64_t *const counter, const uint32_t instance, const uint64_t max_ops, const char *name) { uint8_t *buf = NULL; const size_t page_size = stress_get_pagesize(); const size_t min_size = 2 * page_size; size_t sz = min_size; ssize_t ret, rc = EXIT_SUCCESS; const pid_t pid = getpid(); int fd = -1; char filename[PATH_MAX]; ret = sigsetjmp(jmp_env, 1); if (ret) { pr_fail_err(name, "sigsetjmp"); return EXIT_FAILURE; } if (stress_sighandler(name, SIGBUS, stress_sigbus_handler, NULL) < 0) return EXIT_FAILURE; if (!set_msync_bytes) { if (opt_flags & OPT_FLAGS_MAXIMIZE) opt_msync_bytes = MAX_MSYNC_BYTES; if (opt_flags & OPT_FLAGS_MINIMIZE) opt_msync_bytes = MIN_MSYNC_BYTES; } sz = opt_msync_bytes & ~(page_size - 1); if (sz < min_size) sz = min_size; /* Make sure this is killable by OOM killer */ set_oom_adjustment(name, true); rc = stress_temp_dir_mk(name, pid, instance); if (rc < 0) return exit_status(-rc); (void)stress_temp_filename(filename, sizeof(filename), name, pid, instance, mwc32()); (void)umask(0077); if ((fd = open(filename, O_CREAT | O_RDWR, S_IRUSR | S_IWUSR)) < 0) { rc = exit_status(errno); pr_fail_err(name, "open"); (void)unlink(filename); (void)stress_temp_dir_rm(name, pid, instance); return rc; } (void)unlink(filename); if (ftruncate(fd, sz) < 0) { pr_err(stderr, "%s: ftruncate failed, errno=%d (%s)\n", name, errno, strerror(errno)); (void)close(fd); (void)stress_temp_dir_rm(name, pid, instance); return EXIT_FAILURE; } buf = (uint8_t *)mmap(NULL, sz, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); if (buf == MAP_FAILED) { pr_err(stderr, "%s: failed to mmap memory, errno=%d (%s)\n", name, errno, strerror(errno)); rc = EXIT_NO_RESOURCE; goto err; } do { off_t offset; uint8_t val, data[page_size]; ret = sigsetjmp(jmp_env, 1); if (ret) { /* Try again */ continue; } /* * Change data in memory, msync to disk */ offset = (mwc64() % (sz - page_size)) & ~(page_size - 1); val = mwc8(); memset(buf + offset, val, page_size); ret = msync(buf + offset, page_size, MS_SYNC); if (ret < 0) { pr_fail(stderr, "%s: msync MS_SYNC on " "offset %jd failed, errno=%d (%s)", name, (intmax_t)offset, errno, strerror(errno)); goto do_invalidate; } ret = lseek(fd, offset, SEEK_SET); if (ret == (off_t)-1) { pr_err(stderr, "%s: cannot seet to offset %jd, " "errno=%d (%s)\n", name, (intmax_t)offset, errno, strerror(errno)); rc = EXIT_NO_RESOURCE; break; } ret = read(fd, data, sizeof(data)); if (ret < (ssize_t)sizeof(data)) { pr_fail(stderr, "%s: read failed, errno=%d (%s)\n", name, errno, strerror(errno)); goto do_invalidate; } if (stress_page_check(data, val, sizeof(data)) < 0) { pr_fail(stderr, "%s: msync'd data in file different " "to data in memory\n", name); } do_invalidate: /* * Now change data on disc, msync invalidate */ offset = (mwc64() % (sz - page_size)) & ~(page_size - 1); val = mwc8(); memset(buf + offset, val, page_size); ret = lseek(fd, offset, SEEK_SET); if (ret == (off_t)-1) { pr_err(stderr, "%s: cannot seet to offset %jd, errno=%d (%s)\n", name, (intmax_t)offset, errno, strerror(errno)); rc = EXIT_NO_RESOURCE; break; } ret = read(fd, data, sizeof(data)); if (ret < (ssize_t)sizeof(data)) { pr_fail(stderr, "%s: read failed, errno=%d (%s)\n", name, errno, strerror(errno)); goto do_next; } ret = msync(buf + offset, page_size, MS_INVALIDATE); if (ret < 0) { pr_fail(stderr, "%s: msync MS_INVALIDATE on " "offset %jd failed, errno=%d (%s)", name, (intmax_t)offset, errno, strerror(errno)); goto do_next; } if (stress_page_check(buf + offset, val, sizeof(data)) < 0) { pr_fail(stderr, "%s: msync'd data in memory " "different to data in file\n", name); } do_next: (*counter)++; } while (opt_do_run && (!max_ops || *counter < max_ops)); (void)munmap((void *)buf, sz); err: (void)close(fd); (void)stress_temp_dir_rm(name, pid, instance); if (sigbus_count) pr_inf(stdout, "%s: caught %" PRIu64 " SIGBUS signals\n", name, sigbus_count); return rc; }
/* * stress_sync_file * stress the sync_file_range system call */ static int stress_sync_file(const args_t *args) { int fd, ret; off_t sync_file_bytes = DEFAULT_SYNC_FILE_BYTES; char filename[PATH_MAX]; if (!get_setting("sync_file-bytes", &sync_file_bytes)) { if (g_opt_flags & OPT_FLAGS_MAXIMIZE) sync_file_bytes = MAX_SYNC_FILE_BYTES; if (g_opt_flags & OPT_FLAGS_MINIMIZE) sync_file_bytes = MIN_SYNC_FILE_BYTES; } sync_file_bytes /= args->num_instances; if (sync_file_bytes < (off_t)MIN_SYNC_FILE_BYTES) sync_file_bytes = (off_t)MIN_SYNC_FILE_BYTES; ret = stress_temp_dir_mk_args(args); if (ret < 0) return exit_status(-ret); (void)stress_temp_filename_args(args, filename, sizeof(filename), mwc32()); if ((fd = open(filename, O_CREAT | O_RDWR, S_IRUSR | S_IWUSR)) < 0) { ret = exit_status(errno); pr_fail_err("open"); (void)stress_temp_dir_rm_args(args); return ret; } (void)unlink(filename); do { shim_off64_t i, offset; const size_t mode_index = mwc32() % SIZEOF_ARRAY(sync_modes); const int mode = sync_modes[mode_index]; if (stress_sync_allocate(args, fd, sync_file_bytes) < 0) break; for (offset = 0; g_keep_stressing_flag && (offset < (shim_off64_t)sync_file_bytes); ) { shim_off64_t sz = (mwc32() & 0x1fc00) + KB; ret = shim_sync_file_range(fd, offset, sz, mode); if (ret < 0) { if (errno == ENOSYS) { pr_inf("%s: skipping stressor, sync_file_range is not implemented\n", args->name); goto err; } pr_fail_err("sync_file_range (forward)"); break; } offset += sz; } if (!g_keep_stressing_flag) break; if (stress_sync_allocate(args, fd, sync_file_bytes) < 0) break; for (offset = 0; g_keep_stressing_flag && (offset < (shim_off64_t)sync_file_bytes); ) { shim_off64_t sz = (mwc32() & 0x1fc00) + KB; ret = shim_sync_file_range(fd, sync_file_bytes - offset, sz, mode); if (ret < 0) { if (errno == ENOSYS) { pr_inf("%s: skipping stressor, sync_file_range is not implemented\n", args->name); goto err; } pr_fail_err("sync_file_range (reverse)"); break; } offset += sz; } if (!g_keep_stressing_flag) break; if (stress_sync_allocate(args, fd, sync_file_bytes) < 0) break; for (i = 0; i < g_keep_stressing_flag && ((shim_off64_t)(sync_file_bytes / (128 * KB))); i++) { offset = (mwc64() % sync_file_bytes) & ~((128 * KB) - 1); ret = shim_sync_file_range(fd, offset, 128 * KB, mode); if (ret < 0) { if (errno == ENOSYS) { pr_inf("%s: skipping stressor, sync_file_range is not implemented\n", args->name); goto err; } pr_fail_err("sync_file_range (random)"); break; } } inc_counter(args); } while (keep_stressing()); err: (void)close(fd); (void)stress_temp_dir_rm_args(args); return EXIT_SUCCESS; }
/* * stress_copy_file * stress reading chunks of file using copy_file_range() */ int stress_copy_file( uint64_t *const counter, const uint32_t instance, const uint64_t max_ops, const char *name) { int fd_in, fd_out, rc = EXIT_FAILURE; char filename[PATH_MAX], tmp[PATH_MAX]; pid_t pid = getpid(); if (!set_copy_file_bytes) { if (opt_flags & OPT_FLAGS_MAXIMIZE) opt_copy_file_bytes = MAX_HDD_BYTES; if (opt_flags & OPT_FLAGS_MINIMIZE) opt_copy_file_bytes = MIN_HDD_BYTES; } if (opt_copy_file_bytes < DEFAULT_COPY_FILE_SIZE) opt_copy_file_bytes = DEFAULT_COPY_FILE_SIZE * 2; if (stress_temp_dir_mk(name, pid, instance) < 0) goto tidy_dir; (void)stress_temp_filename(filename, sizeof(filename), name, pid, instance, mwc32()); snprintf(tmp, sizeof(tmp), "%s-orig", filename); if ((fd_in = open(tmp, O_CREAT | O_RDWR, S_IRUSR | S_IWUSR)) < 0) { rc = exit_status(errno); pr_fail_err(name, "open"); goto tidy_dir; } (void)unlink(tmp); if (ftruncate(fd_in, opt_copy_file_bytes) < 0) { rc = exit_status(errno); pr_fail_err(name, "ftruncate"); goto tidy_in; } if (fsync(fd_in) < 0) { pr_fail_err(name, "fsync"); goto tidy_in; } snprintf(tmp, sizeof(tmp), "%s-copy", filename); if ((fd_out = open(tmp, O_CREAT | O_WRONLY, S_IRUSR | S_IWUSR)) < 0) { rc = exit_status(errno); pr_fail_err(name, "open"); goto tidy_in; } (void)unlink(tmp); do { ssize_t ret; loff_t off_in, off_out; off_in = mwc64() % (opt_copy_file_bytes - DEFAULT_COPY_FILE_SIZE); off_out = mwc64() % (opt_copy_file_bytes - DEFAULT_COPY_FILE_SIZE); ret = sys_copy_file_range(fd_in, &off_in, fd_out, &off_out, DEFAULT_COPY_FILE_SIZE, 0); if (ret < 0) { if ((errno == EAGAIN) || (errno == EINTR)) continue; pr_fail_err(name, "copy_file_range"); goto tidy_out; } (void)fsync(fd_out); (*counter)++; } while (opt_do_run && (!max_ops || *counter < max_ops)); rc = EXIT_SUCCESS; tidy_out: (void)close(fd_out); tidy_in: (void)close(fd_in); tidy_dir: (void)stress_temp_dir_rm(name, pid, instance); return rc; }
/* * stress_fallocate * stress I/O via fallocate and ftruncate */ static int stress_fallocate(const args_t *args) { int fd, ret; char filename[PATH_MAX]; uint64_t ftrunc_errs = 0; off_t fallocate_bytes = DEFAULT_FALLOCATE_BYTES; if (!get_setting("fallocate-bytes", &fallocate_bytes)) { if (g_opt_flags & OPT_FLAGS_MAXIMIZE) fallocate_bytes = MAX_FALLOCATE_BYTES; if (g_opt_flags & OPT_FLAGS_MINIMIZE) fallocate_bytes = MIN_FALLOCATE_BYTES; } fallocate_bytes /= args->num_instances; if (fallocate_bytes < (off_t)MIN_FALLOCATE_BYTES) fallocate_bytes = (off_t)MIN_FALLOCATE_BYTES; ret = stress_temp_dir_mk_args(args); if (ret < 0) return exit_status(-ret); (void)stress_temp_filename_args(args, filename, sizeof(filename), mwc32()); if ((fd = open(filename, O_CREAT | O_RDWR, S_IRUSR | S_IWUSR)) < 0) { ret = exit_status(errno); pr_fail_err("open"); (void)stress_temp_dir_rm_args(args); return ret; } (void)unlink(filename); do { #if defined(HAVE_POSIX_FALLOCATE) ret = posix_fallocate(fd, (off_t)0, fallocate_bytes); #else ret = shim_fallocate(fd, 0, (off_t)0, fallocate_bytes); #endif if (!g_keep_stressing_flag) break; (void)shim_fsync(fd); if ((ret == 0) && (g_opt_flags & OPT_FLAGS_VERIFY)) { struct stat buf; if (fstat(fd, &buf) < 0) pr_fail("%s: fstat on file failed", args->name); else if (buf.st_size != fallocate_bytes) pr_fail("%s: file size %jd does not " "match size the expected file " "size of %jd\n", args->name, (intmax_t)buf.st_size, (intmax_t)fallocate_bytes); } if (ftruncate(fd, 0) < 0) ftrunc_errs++; if (!g_keep_stressing_flag) break; (void)shim_fsync(fd); if (g_opt_flags & OPT_FLAGS_VERIFY) { struct stat buf; if (fstat(fd, &buf) < 0) pr_fail("%s: fstat on file failed", args->name); else if (buf.st_size != (off_t)0) pr_fail("%s: file size %jd does not " "match size the expected file size " "of 0\n", args->name, (intmax_t)buf.st_size); } if (ftruncate(fd, fallocate_bytes) < 0) ftrunc_errs++; (void)shim_fsync(fd); if (ftruncate(fd, 0) < 0) ftrunc_errs++; (void)shim_fsync(fd); if (SIZEOF_ARRAY(modes) > 1) { /* * non-portable Linux fallocate() */ int i; (void)shim_fallocate(fd, 0, (off_t)0, fallocate_bytes); if (!g_keep_stressing_flag) break; (void)shim_fsync(fd); for (i = 0; i < 64; i++) { off_t offset = (mwc64() % fallocate_bytes) & ~0xfff; int j = (mwc32() >> 8) % SIZEOF_ARRAY(modes); (void)shim_fallocate(fd, modes[j], offset, 64 * KB); if (!g_keep_stressing_flag) break; (void)shim_fsync(fd); } if (ftruncate(fd, 0) < 0) ftrunc_errs++; (void)shim_fsync(fd); } inc_counter(args); } while (keep_stressing());
/* * stress_mmap() * stress mmap */ int stress_mmap( uint64_t *const counter, const uint32_t instance, const uint64_t max_ops, const char *name) { uint8_t *buf = NULL; const size_t page_size = stress_get_pagesize(); size_t sz, pages4k; #if !defined(__gnu_hurd__) const int ms_flags = (opt_flags & OPT_FLAGS_MMAP_ASYNC) ? MS_ASYNC : MS_SYNC; #endif const pid_t pid = getpid(); int fd = -1, flags = MAP_PRIVATE | MAP_ANONYMOUS; char filename[PATH_MAX]; (void)instance; #ifdef MAP_POPULATE flags |= MAP_POPULATE; #endif if (!set_mmap_bytes) { if (opt_flags & OPT_FLAGS_MAXIMIZE) opt_mmap_bytes = MAX_MMAP_BYTES; if (opt_flags & OPT_FLAGS_MINIMIZE) opt_mmap_bytes = MIN_MMAP_BYTES; } sz = opt_mmap_bytes & ~(page_size - 1); pages4k = sz / page_size; /* Make sure this is killable by OOM killer */ set_oom_adjustment(name, true); if (opt_flags & OPT_FLAGS_MMAP_FILE) { ssize_t ret; char ch = '\0'; if (stress_temp_dir_mk(name, pid, instance) < 0) return EXIT_FAILURE; (void)stress_temp_filename(filename, sizeof(filename), name, pid, instance, mwc32()); (void)umask(0077); if ((fd = open(filename, O_CREAT | O_RDWR, S_IRUSR | S_IWUSR)) < 0) { pr_failed_err(name, "open"); (void)unlink(filename); (void)stress_temp_dir_rm(name, pid, instance); return EXIT_FAILURE; } (void)unlink(filename); if (lseek(fd, sz - sizeof(ch), SEEK_SET) < 0) { pr_failed_err(name, "lseek"); (void)close(fd); (void)stress_temp_dir_rm(name, pid, instance); return EXIT_FAILURE; } redo: ret = write(fd, &ch, sizeof(ch)); if (ret != sizeof(ch)) { if ((errno == EAGAIN) || (errno == EINTR)) goto redo; pr_failed_err(name, "write"); (void)close(fd); (void)stress_temp_dir_rm(name, pid, instance); return EXIT_FAILURE; } flags &= ~(MAP_ANONYMOUS | MAP_PRIVATE); flags |= MAP_SHARED; } do { uint8_t mapped[pages4k]; uint8_t *mappings[pages4k]; size_t n; if (!opt_do_run) break; buf = mmap(NULL, sz, PROT_READ | PROT_WRITE, flags, fd, 0); if (buf == MAP_FAILED) { /* Force MAP_POPULATE off, just in case */ #ifdef MAP_POPULATE flags &= ~MAP_POPULATE; #endif continue; /* Try again */ } if (opt_flags & OPT_FLAGS_MMAP_FILE) { memset(buf, 0xff, sz); #if !defined(__gnu_hurd__) (void)msync(buf, sz, ms_flags); #endif } (void)madvise_random(buf, sz); (void)mincore_touch_pages(buf, opt_mmap_bytes); stress_mmap_mprotect(name, buf, sz); memset(mapped, PAGE_MAPPED, sizeof(mapped)); for (n = 0; n < pages4k; n++) mappings[n] = buf + (n * page_size); /* Ensure we can write to the mapped pages */ stress_mmap_set(buf, sz); if (opt_flags & OPT_FLAGS_VERIFY) { if (stress_mmap_check(buf, sz) < 0) pr_fail(stderr, "%s: mmap'd region of %zu bytes does " "not contain expected data\n", name, sz); } /* * Step #1, unmap all pages in random order */ (void)mincore_touch_pages(buf, opt_mmap_bytes); for (n = pages4k; n; ) { uint64_t j, i = mwc64() % pages4k; for (j = 0; j < n; j++) { uint64_t page = (i + j) % pages4k; if (mapped[page] == PAGE_MAPPED) { mapped[page] = 0; (void)madvise_random(mappings[page], page_size); stress_mmap_mprotect(name, mappings[page], page_size); (void)munmap(mappings[page], page_size); n--; break; } if (!opt_do_run) goto cleanup; } } (void)munmap(buf, sz); #ifdef MAP_FIXED /* * Step #2, map them back in random order */ for (n = pages4k; n; ) { uint64_t j, i = mwc64() % pages4k; for (j = 0; j < n; j++) { uint64_t page = (i + j) % pages4k; if (!mapped[page]) { off_t offset = (opt_flags & OPT_FLAGS_MMAP_FILE) ? page * page_size : 0; /* * Attempt to map them back into the original address, this * may fail (it's not the most portable operation), so keep * track of failed mappings too */ mappings[page] = mmap(mappings[page], page_size, PROT_READ | PROT_WRITE, MAP_FIXED | flags, fd, offset); if (mappings[page] == MAP_FAILED) { mapped[page] = PAGE_MAPPED_FAIL; mappings[page] = NULL; } else { (void)mincore_touch_pages(mappings[page], page_size); (void)madvise_random(mappings[page], page_size); stress_mmap_mprotect(name, mappings[page], page_size); mapped[page] = PAGE_MAPPED; /* Ensure we can write to the mapped page */ stress_mmap_set(mappings[page], page_size); if (stress_mmap_check(mappings[page], page_size) < 0) pr_fail(stderr, "%s: mmap'd region of %zu bytes does " "not contain expected data\n", name, page_size); if (opt_flags & OPT_FLAGS_MMAP_FILE) { memset(mappings[page], n, page_size); #if !defined(__gnu_hurd__) (void)msync(mappings[page], page_size, ms_flags); #endif } } n--; break; } if (!opt_do_run) goto cleanup; } } #endif cleanup: /* * Step #3, unmap them all */ for (n = 0; n < pages4k; n++) { if (mapped[n] & PAGE_MAPPED) { (void)madvise_random(mappings[n], page_size); stress_mmap_mprotect(name, mappings[n], page_size); (void)munmap(mappings[n], page_size); } } (*counter)++; } while (opt_do_run && (!max_ops || *counter < max_ops)); if (opt_flags & OPT_FLAGS_MMAP_FILE) { (void)close(fd); (void)stress_temp_dir_rm(name, pid, instance); } return EXIT_SUCCESS; }
/* * stress_hdd * stress I/O via writes */ int stress_hdd( uint64_t *const counter, const uint32_t instance, const uint64_t max_ops, const char *name) { uint8_t *buf = NULL; uint64_t i, min_size, remainder; const pid_t pid = getpid(); int ret, rc = EXIT_FAILURE; char filename[PATH_MAX]; int flags = O_CREAT | O_RDWR | O_TRUNC | opt_hdd_oflags; int fadvise_flags = opt_hdd_flags & HDD_OPT_FADV_MASK; if (!set_hdd_bytes) { if (opt_flags & OPT_FLAGS_MAXIMIZE) opt_hdd_bytes = MAX_HDD_BYTES; if (opt_flags & OPT_FLAGS_MINIMIZE) opt_hdd_bytes = MIN_HDD_BYTES; } if (!set_hdd_write_size) { if (opt_flags & OPT_FLAGS_MAXIMIZE) opt_hdd_write_size = MAX_HDD_WRITE_SIZE; if (opt_flags & OPT_FLAGS_MINIMIZE) opt_hdd_write_size = MIN_HDD_WRITE_SIZE; } if (opt_hdd_flags & HDD_OPT_O_DIRECT) { min_size = (opt_hdd_flags & HDD_OPT_IOVEC) ? HDD_IO_VEC_MAX * BUF_ALIGNMENT : MIN_HDD_WRITE_SIZE; } else { min_size = (opt_hdd_flags & HDD_OPT_IOVEC) ? HDD_IO_VEC_MAX * MIN_HDD_WRITE_SIZE : MIN_HDD_WRITE_SIZE; } /* Ensure I/O size is not too small */ if (opt_hdd_write_size < min_size) { opt_hdd_write_size = min_size; pr_inf(stderr, "%s: increasing read/write size to %" PRIu64 " bytes\n", name, opt_hdd_write_size); } /* Ensure we get same sized iovec I/O sizes */ remainder = opt_hdd_write_size % HDD_IO_VEC_MAX; if ((opt_hdd_flags & HDD_OPT_IOVEC) && (remainder != 0)) { opt_hdd_write_size += HDD_IO_VEC_MAX - remainder; pr_inf(stderr, "%s: increasing read/write size to %" PRIu64 " bytes in iovec mode\n", name, opt_hdd_write_size); } /* Ensure complete file size is not less than the I/O size */ if (opt_hdd_bytes < opt_hdd_write_size) { opt_hdd_bytes = opt_hdd_write_size; pr_inf(stderr, "%s: increasing file size to write size of %" PRIu64 " bytes\n", name, opt_hdd_bytes); } if (stress_temp_dir_mk(name, pid, instance) < 0) return EXIT_FAILURE; /* Must have some write option */ if ((opt_hdd_flags & HDD_OPT_WR_MASK) == 0) opt_hdd_flags |= HDD_OPT_WR_SEQ; /* Must have some read option */ if ((opt_hdd_flags & HDD_OPT_RD_MASK) == 0) opt_hdd_flags |= HDD_OPT_RD_SEQ; ret = posix_memalign((void **)&buf, BUF_ALIGNMENT, (size_t)opt_hdd_write_size); if (ret || !buf) { pr_err(stderr, "%s: cannot allocate buffer\n", name); (void)stress_temp_dir_rm(name, pid, instance); return EXIT_FAILURE; } for (i = 0; i < opt_hdd_write_size; i++) buf[i] = mwc8(); (void)stress_temp_filename(filename, sizeof(filename), name, pid, instance, mwc32()); do { int fd; (void)umask(0077); if ((fd = open(filename, flags, S_IRUSR | S_IWUSR)) < 0) { pr_failed_err(name, "open"); goto finish; } if (ftruncate(fd, (off_t)0) < 0) { pr_failed_err(name, "ftruncate"); (void)close(fd); goto finish; } (void)unlink(filename); if (stress_hdd_advise(name, fd, fadvise_flags) < 0) { (void)close(fd); goto finish; } /* Random Write */ if (opt_hdd_flags & HDD_OPT_WR_RND) { for (i = 0; i < opt_hdd_bytes; i += opt_hdd_write_size) { size_t j; off_t offset = (i == 0) ? opt_hdd_bytes : (mwc64() % opt_hdd_bytes) & ~511; ssize_t ret; if (lseek(fd, offset, SEEK_SET) < 0) { pr_failed_err(name, "lseek"); (void)close(fd); goto finish; } rnd_wr_retry: if (!opt_do_run || (max_ops && *counter >= max_ops)) break; for (j = 0; j < opt_hdd_write_size; j++) buf[j] = (offset + j) & 0xff; ret = stress_hdd_write(fd, buf, (size_t)opt_hdd_write_size); if (ret <= 0) { if ((errno == EAGAIN) || (errno == EINTR)) goto rnd_wr_retry; if (errno) { pr_failed_err(name, "write"); (void)close(fd); goto finish; } continue; } (*counter)++; } } /* Sequential Write */ if (opt_hdd_flags & HDD_OPT_WR_SEQ) { for (i = 0; i < opt_hdd_bytes; i += opt_hdd_write_size) { ssize_t ret; size_t j; seq_wr_retry: if (!opt_do_run || (max_ops && *counter >= max_ops)) break; for (j = 0; j < opt_hdd_write_size; j += 512) buf[j] = (i + j) & 0xff; ret = stress_hdd_write(fd, buf, (size_t)opt_hdd_write_size); if (ret <= 0) { if ((errno == EAGAIN) || (errno == EINTR)) goto seq_wr_retry; if (errno) { pr_failed_err(name, "write"); (void)close(fd); goto finish; } continue; } (*counter)++; } } /* Sequential Read */ if (opt_hdd_flags & HDD_OPT_RD_SEQ) { uint64_t misreads = 0; uint64_t baddata = 0; if (lseek(fd, 0, SEEK_SET) < 0) { pr_failed_err(name, "lseek"); (void)close(fd); goto finish; } for (i = 0; i < opt_hdd_bytes; i += opt_hdd_write_size) { ssize_t ret; seq_rd_retry: if (!opt_do_run || (max_ops && *counter >= max_ops)) break; ret = stress_hdd_read(fd, buf, (size_t)opt_hdd_write_size); if (ret <= 0) { if ((errno == EAGAIN) || (errno == EINTR)) goto seq_rd_retry; if (errno) { pr_failed_err(name, "read"); (void)close(fd); goto finish; } continue; } if (ret != (ssize_t)opt_hdd_write_size) misreads++; if (opt_flags & OPT_FLAGS_VERIFY) { size_t j; for (j = 0; j < opt_hdd_write_size; j += 512) { uint8_t v = (i + j) & 0xff; if (opt_hdd_flags & HDD_OPT_WR_SEQ) { /* Write seq has written to all of the file, so it should always be OK */ if (buf[0] != v) baddata++; } else { /* Write rnd has written to some of the file, so data either zero or OK */ if (buf[0] != 0 && buf[0] != v) baddata++; } } } (*counter)++; } if (misreads) pr_dbg(stderr, "%s: %" PRIu64 " incomplete sequential reads\n", name, misreads); if (baddata) pr_fail(stderr, "%s: incorrect data found %" PRIu64 " times\n", name, baddata); } /* Random Read */ if (opt_hdd_flags & HDD_OPT_RD_RND) { uint64_t misreads = 0; uint64_t baddata = 0; for (i = 0; i < opt_hdd_bytes; i += opt_hdd_write_size) { ssize_t ret; off_t offset = (mwc64() % (opt_hdd_bytes - opt_hdd_write_size)) & ~511; if (lseek(fd, offset, SEEK_SET) < 0) { pr_failed_err(name, "lseek"); (void)close(fd); goto finish; } rnd_rd_retry: if (!opt_do_run || (max_ops && *counter >= max_ops)) break; ret = stress_hdd_read(fd, buf, (size_t)opt_hdd_write_size); if (ret <= 0) { if ((errno == EAGAIN) || (errno == EINTR)) goto rnd_rd_retry; if (errno) { pr_failed_err(name, "read"); (void)close(fd); goto finish; } continue; } if (ret != (ssize_t)opt_hdd_write_size) misreads++; if (opt_flags & OPT_FLAGS_VERIFY) { size_t j; for (j = 0; j < opt_hdd_write_size; j += 512) { uint8_t v = (i + j) & 0xff; if (opt_hdd_flags & HDD_OPT_WR_SEQ) { /* Write seq has written to all of the file, so it should always be OK */ if (buf[0] != v) baddata++; } else { /* Write rnd has written to some of the file, so data either zero or OK */ if (buf[0] != 0 && buf[0] != v) baddata++; } } } (*counter)++; } if (misreads) pr_dbg(stderr, "%s: %" PRIu64 " incomplete random reads\n", name, misreads); } (void)close(fd); } while (opt_do_run && (!max_ops || *counter < max_ops)); rc = EXIT_SUCCESS; finish: free(buf); (void)stress_temp_dir_rm(name, pid, instance); return rc; }