/* * stress_bsearch() * stress bsearch */ int stress_bsearch( uint64_t *const counter, const uint32_t instance, const uint64_t max_ops, const char *name) { int32_t *data, *ptr, prev = 0; size_t n, n8, i; (void)instance; if (!set_bsearch_size) { if (opt_flags & OPT_FLAGS_MAXIMIZE) opt_bsearch_size = MAX_BSEARCH_SIZE; if (opt_flags & OPT_FLAGS_MINIMIZE) opt_bsearch_size = MIN_BSEARCH_SIZE; } n = (size_t)opt_bsearch_size; n8 = (n + 7) & ~7; /* allocate in multiples of 8 */ if ((data = malloc(sizeof(int32_t) * n8)) == NULL) { pr_failed_dbg(name, "malloc"); return EXIT_FAILURE; } /* Populate with ascending data */ prev = 0; for (i = 0; i < n;) { uint64_t v = mwc64(); SETDATA(data, i, v, prev); SETDATA(data, i, v, prev); SETDATA(data, i, v, prev); SETDATA(data, i, v, prev); SETDATA(data, i, v, prev); SETDATA(data, i, v, prev); SETDATA(data, i, v, prev); SETDATA(data, i, v, prev); } do { for (ptr = data, i = 0; i < n; i++, ptr++) { int32_t *result; result = bsearch(ptr, data, n, sizeof(*ptr), cmp); if (opt_flags & OPT_FLAGS_VERIFY) { if (result == NULL) pr_fail(stderr, "%s: element %zu could not be found\n", name, i); else if (*result != *ptr) pr_fail(stderr, "%s: element %zu found %" PRIu32 ", expecting %" PRIu32 "\n", name, i, *result, *ptr); } } (*counter)++; } while (opt_do_run && (!max_ops || *counter < max_ops)); free(data); return EXIT_SUCCESS; }
/* * stress_lsearch() * stress lsearch */ static int stress_lsearch(const args_t *args) { int32_t *data, *root; size_t i, max; uint64_t lsearch_size = DEFAULT_LSEARCH_SIZE; if (!get_setting("lsearch-size", &lsearch_size)) { if (g_opt_flags & OPT_FLAGS_MAXIMIZE) lsearch_size = MAX_LSEARCH_SIZE; if (g_opt_flags & OPT_FLAGS_MINIMIZE) lsearch_size = MIN_LSEARCH_SIZE; } max = (size_t)lsearch_size; if ((data = calloc(max, sizeof(*data))) == NULL) { pr_fail_dbg("malloc"); return EXIT_NO_RESOURCE; } if ((root = calloc(max, sizeof(*data))) == NULL) { free(data); pr_fail_dbg("malloc"); return EXIT_NO_RESOURCE; } do { size_t n = 0; /* Step #1, populate with data */ for (i = 0; g_keep_stressing_flag && i < max; i++) { void *ptr; data[i] = ((mwc32() & 0xfff) << 20) ^ i; ptr = lsearch(&data[i], root, &n, sizeof(*data), cmp); (void)ptr; } /* Step #2, find */ for (i = 0; g_keep_stressing_flag && i < n; i++) { int32_t *result; result = lfind(&data[i], root, &n, sizeof(*data), cmp); if (g_opt_flags & OPT_FLAGS_VERIFY) { if (result == NULL) pr_fail("%s: element %zu could not be found\n", args->name, i); else if (*result != data[i]) pr_fail("%s: element %zu found %" PRIu32 ", expecting %" PRIu32 "\n", args->name, i, *result, data[i]); } } inc_counter(args); } while (keep_stressing()); free(root); free(data); return EXIT_SUCCESS; }
/* * stress on sched_kill() * stress system by rapid kills */ int stress_kill( uint64_t *const counter, const uint32_t instance, const uint64_t max_ops, const char *name) { struct sigaction new_action; const pid_t pid = getpid(); (void)instance; (void)name; memset(&new_action, 0, sizeof new_action); new_action.sa_handler = SIG_IGN; sigemptyset(&new_action.sa_mask); new_action.sa_flags = 0; if (sigaction(SIGUSR1, &new_action, NULL) < 0) { pr_failed_err(name, "sigusr1"); return EXIT_FAILURE; } do { int ret; ret = kill(pid, SIGUSR1); if ((ret < 0) && (opt_flags & OPT_FLAGS_VERIFY)) pr_fail(stderr, "%s: kill failed: errno=%d (%s)\n", name, errno, strerror(errno)); /* Zero signal can be used to see if process exists */ ret = kill(pid, 0); if ((ret < 0) && (opt_flags & OPT_FLAGS_VERIFY)) pr_fail(stderr, "%s: kill failed: errno=%d (%s)\n", name, errno, strerror(errno)); /* * Zero signal can be used to see if process exists, * -1 pid means signal sent to every process caller has * permission to send to */ ret = kill(-1, 0); if ((ret < 0) && (opt_flags & OPT_FLAGS_VERIFY)) pr_fail(stderr, "%s: kill failed: errno=%d (%s)\n", name, errno, strerror(errno)); (*counter)++; } while (opt_do_run && (!max_ops || *counter < max_ops)); return EXIT_SUCCESS; }
static void check_flag( const args_t *args, const char *ioctl_name, const int fd, const int flag, const int ret, const bool set) { #if defined(F_GETFL) if (ret == 0) { int flags; flags = fcntl(fd, F_GETFL, 0); /* * The fcntl failed, so checking is not a valid * thing to sanity check with. */ if (errno != 0) return; if ((set && !(flags & flag)) || (!set && (flags & flag))) pr_fail("%s: ioctl %s failed, unexpected flags when checked with F_GETFL\n", args->name, ioctl_name); } #else (void)args; (void)ioctl_name; (void)fd; (void)flag; (void)ret; #endif }
/* * stress_mmap_mprotect() * cycle through page settings on a region of mmap'd memory */ static void stress_mmap_mprotect(const char *name, void *addr, const size_t len) { if (opt_flags & OPT_FLAGS_MMAP_MPROTECT) { /* Cycle through potection */ if (mprotect(addr, len, PROT_NONE) < 0) pr_fail(stderr, "%s: mprotect set to PROT_NONE failed\n", name); if (mprotect(addr, len, PROT_READ) < 0) pr_fail(stderr, "%s: mprotect set to PROT_READ failed\n", name); if (mprotect(addr, len, PROT_WRITE) < 0) pr_fail(stderr, "%s: mprotect set to PROT_WRITE failed\n", name); if (mprotect(addr, len, PROT_EXEC) < 0) pr_fail(stderr, "%s: mprotect set to PROT_EXEC failed\n", name); if (mprotect(addr, len, PROT_READ | PROT_WRITE) < 0) pr_fail(stderr, "%s: mprotect set to PROT_READ | PROT_WRITE failed\n", name); } }
/* * do_quotactl() * do a quotactl command */ static void do_quotactl( const char *name, const int flag, const char *cmdname, int *tested, int *failed, int *enosys, int cmd, const char *special, int id, caddr_t addr) { static int failed_mask = 0; int ret = quotactl(cmd, special, id, addr); (*tested)++; if (ret < 0) { if ((failed_mask & flag) == 0) { /* Just issue the warning once, reduce log spamming */ failed_mask |= flag; pr_fail(stderr, "%s: quotactl command %s failed: errno=%d (%s)\n", name, cmdname, errno, strerror(errno)); } if (errno == ENOSYS) (*enosys)++; else (*failed)++; } }
static inline void strchk( const char *name, const int ok, const char *msg) { if ((opt_flags & OPT_FLAGS_VERIFY) && (!ok)) pr_fail(stderr, "%s: %s did not return expected result\n", name, msg); }
/* * stress on sched_affinity() * stress system by changing CPU affinity periodically */ int stress_affinity( uint64_t *const counter, const uint32_t instance, const uint64_t max_ops, const char *name) { unsigned long int cpu = 0; const unsigned long int cpus = (unsigned long int)stress_get_processors_configured(); cpu_set_t mask; (void)instance; (void)name; do { cpu = (opt_flags & OPT_FLAGS_AFFINITY_RAND) ? (mwc32() >> 4) : cpu + 1; cpu %= cpus; CPU_ZERO(&mask); CPU_SET(cpu, &mask); if (sched_setaffinity(0, sizeof(mask), &mask) < 0) { pr_fail(stderr, "%s: failed to move to CPU %lu\n", name, cpu); #if defined(_POSIX_PRIORITY_SCHEDULING) sched_yield(); #endif } else { /* Now get and check */ CPU_ZERO(&mask); CPU_SET(cpu, &mask); sched_getaffinity(0, sizeof(mask), &mask); if ((opt_flags & OPT_FLAGS_VERIFY) && (!CPU_ISSET(cpu, &mask))) pr_fail(stderr, "%s: failed to move to CPU %lu\n", name, cpu); } (*counter)++; } while (opt_do_run && (!max_ops || *counter < max_ops)); return EXIT_SUCCESS; }
static int stress_capgetset_pid( const char *name, const pid_t pid, const bool do_set, uint64_t *counter, const bool exists) { int ret; struct __user_cap_header_struct uch; struct __user_cap_data_struct ucd[_LINUX_CAPABILITY_U32S_3]; memset(&uch, 0, sizeof uch); memset(ucd, 0, sizeof ucd); uch.version = _LINUX_CAPABILITY_VERSION_3; uch.pid = pid; ret = capget(&uch, ucd); if (ret < 0) { if (((errno == ESRCH) && exists) || (errno != ESRCH)) { pr_fail(stderr, "%s: capget on pid %d failed: errno=%d (%s)\n", name, pid, errno, strerror(errno)); } } if (do_set) { ret = capset(&uch, ucd); if (ret < 0) { if (((errno == ESRCH) && exists) || (errno != ESRCH)) { pr_fail(stderr, "%s: capget on pid %d failed: errno=%d (%s)\n", name, pid, errno, strerror(errno)); } } } (*counter)++; return ret; }
/* * stress_sigrestore() * restore a handler */ int stress_sigrestore( const char *name, const int signum, struct sigaction *orig_action) { if (sigaction(signum, orig_action, NULL) < 0) { pr_fail(stderr, "%s: sigaction %s restore: errno=%d (%s)\n", name, stress_strsignal(signum), errno, strerror(errno)); return -1; } return 0; }
/* * stress_fork_fn() * stress by forking and exiting using * fork function fork_fn (fork or vfork) */ int stress_fork_fn( uint64_t *const counter, const uint32_t instance, const uint64_t max_ops, const char *name, pid_t (*fork_fn)(void), const uint64_t fork_max) { (void)instance; pid_t pids[MAX_FORKS]; do { unsigned int i; memset(pids, 0, sizeof(pids)); for (i = 0; i < fork_max; i++) { pids[i] = fork_fn(); if (pids[i] == 0) { setpgid(0, pgrp); stress_parent_died_alarm(); /* Child, immediately exit */ _exit(0); } if (pids[i] > -1) setpgid(pids[i], pgrp); if (!opt_do_run) break; } for (i = 0; i < fork_max; i++) { if (pids[i] > 0) { int status; /* Parent, wait for child */ (void)waitpid(pids[i], &status, 0); (*counter)++; } } for (i = 0; i < fork_max; i++) { if ((pids[i] < 0) && (opt_flags & OPT_FLAGS_VERIFY)) { pr_fail(stderr, "%s: fork failed\n", name); } } } while (opt_do_run && (!max_ops || *counter < max_ops)); return EXIT_SUCCESS; }
/* * stress_filename_test() * create a file, and check if it fails. * should_pass = true - create must pass * should_pass = false - expect it to fail (name too long) */ void stress_filename_test( const char *name, const char *filename, const size_t sz_max, const bool should_pass) { int fd; if ((fd = creat(filename, S_IRUSR | S_IWUSR)) < 0) { if ((!should_pass) && (errno == ENAMETOOLONG)) return; pr_fail(stderr, "%s: open failed on file of length " "%zu bytes, errno=%d (%s)\n", name, sz_max, errno, strerror(errno)); } else { (void)close(fd); (void)unlink(filename); } }
/* * stress_sighandler() * set signal handler in generic way */ int stress_sighandler( const char *name, const int signum, void (*handler)(int), struct sigaction *orig_action) { struct sigaction new_action; memset(&new_action, 0, sizeof new_action); new_action.sa_handler = handler; sigemptyset(&new_action.sa_mask); new_action.sa_flags = 0; if (sigaction(signum, &new_action, orig_action) < 0) { pr_fail(stderr, "%s: sigaction %s: errno=%d (%s)\n", name, stress_strsignal(signum), errno, strerror(errno)); return -1; } return 0; }
/* * check_order() * check page order */ static void check_order( const args_t *args, const size_t stride, const mapdata_t *data, const size_t *order, const char *ordering) { size_t i; bool failed; for (failed = false, i = 0; i < N_PAGES; i++) { if (data[i * stride] != order[i]) { failed = true; break; } } if (failed) pr_fail("%s: remap %s order pages failed\n", args->name, ordering); }
/* * stress_bind_mount() * stress bind mounting */ int stress_bind_mount( uint64_t *const counter, const uint32_t instance, const uint64_t max_ops, const char *name) { int pid = 0, status; context_t context; const ssize_t stack_offset = stress_get_stack_direction(&pid) * (CLONE_STACK_SIZE - 64); char stack[CLONE_STACK_SIZE]; char *stack_top = stack + stack_offset; (void)instance; context.name = name; context.max_ops = max_ops; context.counter = counter; pid = clone(stress_bind_mount_child, stack_top, CLONE_NEWUSER | CLONE_NEWNS | CLONE_NEWPID | CLONE_VM, &context, 0); if (pid < 0) { int rc = exit_status(errno); pr_fail(stderr, "%s: clone failed: errno=%d (%s)\n", name, errno, strerror(errno)); return rc; } do { /* Twiddle thumbs */ sleep(1); } while (opt_do_run && (!max_ops || *counter < max_ops)); (void)kill(pid, SIGKILL); (void)waitpid(pid, &status, 0); return EXIT_SUCCESS; }
/* * stress_bind_mount_child() * aggressively perform bind mounts, this can force out of memory * situations */ int stress_bind_mount_child(void *arg) { context_t *context = (context_t *)arg; setpgid(0, pgrp); stress_parent_died_alarm(); do { if (mount("/", "/", "", MS_BIND | MS_REC, 0) < 0) { pr_fail(stderr, "%s: mount failed: errno=%d (%s)\n", context->name, errno, strerror(errno)); break; } /* * The following fails with -EBUSY, but try it anyhow ` * just to make the kernel work harder */ (void)umount("/"); } while (opt_do_run && (!context->max_ops || *(context->counter) < context->max_ops)); return 0; }
/* * try_remap() * try and remap old size to new size */ static int try_remap( const char *name, uint8_t **buf, const size_t old_sz, const size_t new_sz) { uint8_t *newbuf; int retry; #if defined(MREMAP_MAYMOVE) int flags = MREMAP_MAYMOVE; #else int flags = 0; #endif for (retry = 0; retry < 100; retry++) { if (!opt_do_run) return 0; newbuf = mremap(*buf, old_sz, new_sz, flags); if (newbuf != MAP_FAILED) { *buf = newbuf; return 0; } switch (errno) { case ENOMEM: case EAGAIN: continue; case EFAULT: case EINVAL: default: break; } } pr_fail(stderr, "%s: mremap failed, errno = %d (%s)\n", name, errno, strerror(errno)); return -1; }
/* * stress_sys_read() * read a proc file */ static inline void stress_sys_read(const char *name, const char *path) { int fd; ssize_t i = 0; char buffer[SYS_BUF_SZ]; if ((fd = open(path, O_RDONLY | O_NONBLOCK)) < 0) return; /* * Multiple randomly sized reads */ while (i < (4096 * SYS_BUF_SZ)) { ssize_t ret, sz = 1 + (mwc32() % sizeof(buffer)); redo: if (!opt_do_run) break; ret = read(fd, buffer, sz); if (ret < 0) { if ((errno == EAGAIN) || (errno == EINTR)) goto redo; break; } if (ret < sz) break; i += sz; } (void)close(fd); /* file should be R_OK if we've just opened it */ if ((access(path, R_OK) < 0) && (opt_flags & OPT_FLAGS_VERIFY)) { pr_fail(stderr, "%s: R_OK access failed on %s which " "could be opened, errno=%d (%s)\n", name, path, errno, strerror(errno)); } }
int stress_vm_child(void *arg) { context_t *ctxt = (context_t *)arg; uint8_t *buf; int ret = EXIT_SUCCESS; addr_msg_t msg_rd, msg_wr; setpgid(0, pgrp); stress_parent_died_alarm(); /* Close unwanted ends */ (void)close(ctxt->pipe_wr[0]); (void)close(ctxt->pipe_rd[1]); buf = mmap(NULL, ctxt->sz, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); if (buf == MAP_FAILED) { ret = exit_status(errno); pr_fail_dbg(ctxt->name, "mmap"); goto cleanup; } while (opt_do_run) { uint8_t *ptr, *end = buf + ctxt->sz; int ret; memset(&msg_wr, 0, sizeof(msg_wr)); msg_wr.addr = buf; msg_wr.val = 0; /* Send address of buffer to parent */ redo_wr1: ret = write(ctxt->pipe_wr[1], &msg_wr, sizeof(msg_wr)); if (ret < 0) { if ((errno == EAGAIN) || (errno == EINTR)) goto redo_wr1; if (errno != EBADF) pr_fail_dbg(ctxt->name, "write"); break; } redo_rd1: /* Wait for parent to populate data */ ret = read(ctxt->pipe_rd[0], &msg_rd, sizeof(msg_rd)); if (ret < 0) { if ((errno == EAGAIN) || (errno == EINTR)) goto redo_rd1; pr_fail_dbg(ctxt->name, "read"); break; } if (ret == 0) break; if (ret != sizeof(msg_rd)) { pr_fail_dbg(ctxt->name, "read"); break; } if (opt_flags & OPT_FLAGS_VERIFY) { /* Check memory altered by parent is sane */ for (ptr = buf; ptr < end; ptr += ctxt->page_size) { if (*ptr != msg_rd.val) { pr_fail(stderr, "%s: memory at %p: %d vs %d\n", ctxt->name, ptr, *ptr, msg_rd.val); goto cleanup; } *ptr = 0; } } } cleanup: /* Tell parent we're done */ msg_wr.addr = 0; msg_wr.val = 0; if (write(ctxt->pipe_wr[1], &msg_wr, sizeof(msg_wr)) <= 0) { if (errno != EBADF) pr_dbg(stderr, "%s: failed to write termination message " "over pipe: errno=%d (%s)\n", ctxt->name, errno, strerror(errno)); } (void)close(ctxt->pipe_wr[1]); (void)close(ctxt->pipe_rd[0]); (void)munmap(buf, ctxt->sz); return ret; }
int stress_vm_parent(context_t *ctxt) { /* Parent */ int status; uint8_t val = 0; uint8_t *localbuf; addr_msg_t msg_rd, msg_wr; setpgid(ctxt->pid, pgrp); localbuf = mmap(NULL, ctxt->sz, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); if (localbuf == MAP_FAILED) { (void)close(ctxt->pipe_wr[0]); (void)close(ctxt->pipe_wr[1]); (void)close(ctxt->pipe_rd[0]); (void)close(ctxt->pipe_rd[1]); pr_fail_dbg(ctxt->name, "mmap"); return EXIT_FAILURE; } /* Close unwanted ends */ (void)close(ctxt->pipe_wr[1]); (void)close(ctxt->pipe_rd[0]); do { struct iovec local[1], remote[1]; uint8_t *ptr, *end = localbuf + ctxt->sz; int ret; /* Wait for address of child's buffer */ redo_rd2: if (!opt_do_run) break; ret = read(ctxt->pipe_wr[0], &msg_rd, sizeof(msg_rd)); if (ret < 0) { if ((errno == EAGAIN) || (errno == EINTR)) goto redo_rd2; pr_fail_dbg(ctxt->name, "read"); break; } if (ret == 0) break; if (ret != sizeof(msg_rd)) { pr_fail_dbg(ctxt->name, "read"); break; } /* Child telling us it's terminating? */ if (!msg_rd.addr) break; /* Perform read from child's memory */ local[0].iov_base = localbuf; local[0].iov_len = ctxt->sz; remote[0].iov_base = msg_rd.addr; remote[0].iov_len = ctxt->sz; if (process_vm_readv(ctxt->pid, local, 1, remote, 1, 0) < 0) { pr_fail_dbg(ctxt->name, "process_vm_readv"); break; } if (opt_flags & OPT_FLAGS_VERIFY) { /* Check data is sane */ for (ptr = localbuf; ptr < end; ptr += ctxt->page_size) { if (*ptr) { pr_fail(stderr, "%s: memory at %p: %d vs %d\n", ctxt->name, ptr, *ptr, msg_rd.val); goto fail; } *ptr = 0; } /* Set memory */ for (ptr = localbuf; ptr < end; ptr += ctxt->page_size) *ptr = val; } /* Write to child's memory */ msg_wr = msg_rd; local[0].iov_base = localbuf; local[0].iov_len = ctxt->sz; remote[0].iov_base = msg_rd.addr; remote[0].iov_len = ctxt->sz; if (process_vm_writev(ctxt->pid, local, 1, remote, 1, 0) < 0) { pr_fail_dbg(ctxt->name, "process_vm_writev"); break; } msg_wr.val = val; val++; redo_wr2: if (!opt_do_run) break; /* Inform child that memory has been changed */ ret = write(ctxt->pipe_rd[1], &msg_wr, sizeof(msg_wr)); if (ret < 0) { if ((errno == EAGAIN) || (errno == EINTR)) goto redo_wr2; if (errno != EBADF) pr_fail_dbg(ctxt->name, "write"); break; } (*ctxt->counter)++; } while (opt_do_run && (!ctxt->max_ops || *ctxt->counter < ctxt->max_ops)); fail: /* Tell child we're done */ msg_wr.addr = NULL; msg_wr.val = 0; if (write(ctxt->pipe_wr[0], &msg_wr, sizeof(msg_wr)) < 0) { if (errno != EBADF) pr_dbg(stderr, "%s: failed to write " "termination message " "over pipe: errno=%d (%s)\n", ctxt->name, errno, strerror(errno)); } (void)close(ctxt->pipe_wr[0]); (void)close(ctxt->pipe_rd[1]); (void)kill(ctxt->pid, SIGKILL); (void)waitpid(ctxt->pid, &status, 0); (void)munmap(localbuf, ctxt->sz); return EXIT_SUCCESS; }
/* * stress_msg * stress by message queues */ int stress_msg( uint64_t *const counter, const uint32_t instance, const uint64_t max_ops, const char *name) { pid_t pid; int msgq_id; (void)instance; msgq_id = msgget(IPC_PRIVATE, S_IRUSR | S_IWUSR | IPC_CREAT | IPC_EXCL); if (msgq_id < 0) { pr_failed_dbg(name, "msgget"); return EXIT_FAILURE; } pr_dbg(stderr, "System V message queue created, id: %d\n", msgq_id); again: pid = fork(); if (pid < 0) { if (opt_do_run && (errno == EAGAIN)) goto again; pr_failed_dbg(name, "fork"); return EXIT_FAILURE; } else if (pid == 0) { setpgid(0, pgrp); while (opt_do_run) { msg_t msg; uint64_t i; for (i = 0; ; i++) { uint64_t v; if (msgrcv(msgq_id, &msg, sizeof(msg.msg), 0, 0) < 0) { pr_failed_dbg(name, "msgrcv"); break; } if (!strcmp(msg.msg, MSG_STOP)) break; if (opt_flags & OPT_FLAGS_VERIFY) { memcpy(&v, msg.msg, sizeof(v)); if (v != i) pr_fail(stderr, "%s: msgrcv: expected msg containing 0x%" PRIx64 " but received 0x%" PRIx64 " instead\n", name, i, v); } } exit(EXIT_SUCCESS); } } else { msg_t msg; uint64_t i = 0; int status; /* Parent */ setpgid(pid, pgrp); do { memcpy(msg.msg, &i, sizeof(i)); msg.mtype = 1; if (msgsnd(msgq_id, &msg, sizeof(i), 0) < 0) { if (errno != EINTR) pr_failed_dbg(name, "msgsnd"); break; } i++; (*counter)++; } while (opt_do_run && (!max_ops || *counter < max_ops)); strncpy(msg.msg, MSG_STOP, sizeof(msg.msg)); if (msgsnd(msgq_id, &msg, sizeof(msg.msg), 0) < 0) pr_failed_dbg(name, "termination msgsnd"); (void)kill(pid, SIGKILL); (void)waitpid(pid, &status, 0); if (msgctl(msgq_id, IPC_RMID, NULL) < 0) pr_failed_dbg(name, "msgctl"); else pr_dbg(stderr, "System V message queue deleted, id: %d\n", msgq_id); } return EXIT_SUCCESS; }
/* * stress_chmod * stress chmod */ int stress_chmod( uint64_t *const counter, const uint32_t instance, const uint64_t max_ops, const char *name) { const pid_t ppid = getppid(); int i, fd = -1, rc = EXIT_FAILURE, retries = 0; mode_t all_mask = 0; char filename[PATH_MAX], dirname[PATH_MAX]; /* * Allow for multiple workers to chmod the *same* file */ stress_temp_dir(dirname, sizeof(dirname), name, ppid, 0); if (mkdir(dirname, S_IRWXU) < 0) { if (errno != EEXIST) { pr_failed_err(name, "mkdir"); return EXIT_FAILURE; } } (void)stress_temp_filename(filename, sizeof(filename), name, ppid, 0, 0); do { errno = 0; /* * Try and open the file, it may be impossible momentarily * because other chmod stressors have already created it and * changed the permission bits. If so, wait a while and retry. */ if ((fd = creat(filename, S_IRUSR | S_IWUSR)) < 0) { if (errno == EPERM || errno == EACCES) { (void)usleep(100000); continue; } pr_failed_err(name, "open"); goto tidy; } break; } while (opt_do_run && ++retries < 100); if (retries >= 100) { pr_err(stderr, "%s: chmod: file %s took %d retries to create (instance %" PRIu32 ")\n", name, filename, retries, instance); goto tidy; } for (i = 0; modes[i]; i++) all_mask |= modes[i]; do { mode_t mask = 0; for (i = 0; modes[i]; i++) { mask |= modes[i]; if (do_fchmod(fd, i, mask, all_mask) < 0) { pr_fail(stderr, "%s: fchmod: errno=%d (%s)\n", name, errno, strerror(errno)); } if (do_chmod(filename, i, mask, all_mask) < 0) { if (errno == ENOENT || errno == ENOTDIR) { /* * File was removed during test by * another worker */ rc = EXIT_SUCCESS; goto tidy; } pr_fail(stderr, "%s: chmod: errno=%d (%s)\n", name, errno, strerror(errno)); } } (*counter)++; } while (opt_do_run && (!max_ops || *counter < max_ops)); rc = EXIT_SUCCESS; tidy: (void)fchmod(fd, 0666); if (fd >= 0) (void)close(fd); (void)unlink(filename); (void)rmdir(dirname); return rc; }
/* * stress_access * stress access family of system calls */ static int stress_access(const args_t *args) { int fd = -1, ret, rc = EXIT_FAILURE; char filename[PATH_MAX]; const mode_t all_mask = 0700; size_t i; const bool is_root = (geteuid() == 0); ret = stress_temp_dir_mk_args(args); if (ret < 0) return exit_status(-ret); (void)stress_temp_filename_args(args, filename, sizeof(filename), mwc32()); (void)umask(0700); if ((fd = creat(filename, S_IRUSR | S_IWUSR)) < 0) { rc = exit_status(errno); pr_fail_err("creat"); goto tidy; } do { for (i = 0; i < SIZEOF_ARRAY(modes); i++) { ret = fchmod(fd, modes[i].chmod_mode); if (CHMOD_ERR(ret)) { pr_err("%s: fchmod %3.3o failed: %d (%s)\n", args->name, (unsigned int)modes[i].chmod_mode, errno, strerror(errno)); goto tidy; } ret = access(filename, modes[i].access_mode); if (ret < 0) { pr_fail("%s: access %3.3o on chmod mode %3.3o failed: %d (%s)\n", args->name, modes[i].access_mode, (unsigned int)modes[i].chmod_mode, errno, strerror(errno)); } #if defined(HAVE_FACCESSAT) ret = faccessat(AT_FDCWD, filename, modes[i].access_mode, 0); if (ret < 0) { pr_fail("%s: faccessat %3.3o on chmod mode %3.3o failed: %d (%s)\n", args->name, modes[i].access_mode, (unsigned int)modes[i].chmod_mode, errno, strerror(errno)); } #endif if (modes[i].access_mode != 0) { const mode_t chmod_mode = modes[i].chmod_mode ^ all_mask; const bool s_ixusr = chmod_mode & S_IXUSR; const bool dont_ignore = !(is_root && s_ixusr); ret = fchmod(fd, chmod_mode); if (CHMOD_ERR(ret)) { pr_err("%s: fchmod %3.3o failed: %d (%s)\n", args->name, (unsigned int)chmod_mode, errno, strerror(errno)); goto tidy; } ret = access(filename, modes[i].access_mode); if ((ret == 0) && dont_ignore) { pr_fail("%s: access %3.3o on chmod mode %3.3o was ok (not expected): %d (%s)\n", args->name, modes[i].access_mode, (unsigned int)chmod_mode, errno, strerror(errno)); } #if defined(HAVE_FACCESSAT) ret = faccessat(AT_FDCWD, filename, modes[i].access_mode, AT_SYMLINK_NOFOLLOW); if ((ret == 0) && dont_ignore) { pr_fail("%s: faccessat %3.3o on chmod mode %3.3o was ok (not expected): %d (%s)\n", args->name, modes[i].access_mode, (unsigned int)chmod_mode, errno, strerror(errno)); } #endif } } inc_counter(args); } while (keep_stressing()); rc = EXIT_SUCCESS; tidy: if (fd >= 0) { (void)fchmod(fd, 0666); (void)close(fd); } (void)unlink(filename); (void)stress_temp_dir_rm_args(args); return rc; }
/* * stress_loop() * stress loopback device */ static int stress_loop(const args_t *args) { int ret, backing_fd, rc = EXIT_FAILURE; char backing_file[PATH_MAX]; size_t backing_size = 2 * MB; ret = stress_temp_dir_mk_args(args); if (ret < 0) return exit_status(-ret); (void)stress_temp_filename_args(args, backing_file, sizeof(backing_file), mwc32()); if ((backing_fd = open(backing_file, O_RDWR | O_CREAT, S_IRUSR | S_IWUSR)) < 0) { pr_fail_err("open"); goto tidy; } if (ftruncate(backing_fd, backing_size) < 0) { pr_fail_err("ftruncate"); (void)close(backing_fd); goto tidy; } (void)unlink(backing_file); do { int ctrl_dev, loop_dev; int i; long dev_num; char dev_name[PATH_MAX]; struct loop_info info; /* * Open loop control device */ ctrl_dev = open("/dev/loop-control", O_RDWR); if (ctrl_dev < 0) { pr_fail("%s: cannot open /dev/loop-control: %d (%s)\n", args->name, errno, strerror(errno)); break; } /* * Attempt to get a free loop device */ dev_num = ioctl(ctrl_dev, LOOP_CTL_GET_FREE); if (dev_num < 0) goto next; /* * Open new loop device */ (void)snprintf(dev_name, sizeof(dev_name), "/dev/loop%ld", dev_num); loop_dev = open(dev_name, O_RDWR); if (loop_dev < 0) goto destroy_loop; /* * Associate loop device with backing storage */ ret = ioctl(loop_dev, LOOP_SET_FD, backing_fd); if (ret < 0) goto close_loop; #if defined(LOOP_GET_STATUS) /* * Fetch loop device status information */ ret = ioctl(loop_dev, LOOP_GET_STATUS, &info); if (ret < 0) goto clr_loop; /* * Try to set some flags */ info.lo_flags |= (LO_FLAGS_AUTOCLEAR | LO_FLAGS_READ_ONLY); #if defined(LOOP_SET_STATUS) ret = ioctl(loop_dev, LOOP_SET_STATUS, &info); (void)ret; #endif #endif #if defined(LOOP_SET_CAPACITY) /* * Resize command (even though we have not changed size) */ ret = ftruncate(backing_fd, backing_size * 2); (void)ret; ret = ioctl(loop_dev, LOOP_SET_CAPACITY); (void)ret; #endif #if defined(LOOP_GET_STATUS) clr_loop: #endif /* * Disassociate backing store from loop device */ for (i = 0; i < 1000; i++) { ret = ioctl(loop_dev, LOOP_CLR_FD, backing_fd); if (ret < 0) { if (errno == EBUSY) { (void)shim_usleep(10); } else { pr_fail("%s: failed to disassociate %s from backing store, " "errno=%d (%s)\n", args->name, dev_name, errno, strerror(errno)); goto close_loop; } } else { break; } } close_loop: (void)close(loop_dev); /* * Remove the loop device, may need several retries * if we get EBUSY */ destroy_loop: for (i = 0; i < 1000; i++) { ret = ioctl(ctrl_dev, LOOP_CTL_REMOVE, dev_num); if ((ret < 0) && (errno == EBUSY)) { (void)shim_usleep(10); } else { break; } } next: (void)close(ctrl_dev); #if defined(LOOP_SET_CAPACITY) ret = ftruncate(backing_fd, backing_size); (void)ret; #endif inc_counter(args); } while (keep_stressing()); rc = EXIT_SUCCESS; (void)close(backing_fd); tidy: (void)stress_temp_dir_rm_args(args); return rc; }
/* * stress_vm_rw * stress vm_read_v/vm_write_v */ int stress_vm_rw( uint64_t *const counter, const uint32_t instance, const uint64_t max_ops, const char *name) { pid_t pid; int pipe_wr[2], pipe_rd[2]; const size_t page_size = stress_get_pagesize(); size_t sz; (void)instance; if (!set_vm_rw_bytes) { if (opt_flags & OPT_FLAGS_MAXIMIZE) opt_vm_rw_bytes = MAX_VM_RW_BYTES; if (opt_flags & OPT_FLAGS_MINIMIZE) opt_vm_rw_bytes = MIN_VM_RW_BYTES; } sz = opt_vm_rw_bytes & ~(page_size - 1); if (pipe(pipe_wr) < 0) { pr_failed_dbg(name, "pipe"); return EXIT_FAILURE; } if (pipe(pipe_rd) < 0) { (void)close(pipe_wr[0]); (void)close(pipe_wr[1]); pr_failed_dbg(name, "pipe"); return EXIT_FAILURE; } pid = fork(); if (pid < 0) { (void)close(pipe_wr[0]); (void)close(pipe_wr[1]); (void)close(pipe_rd[0]); (void)close(pipe_rd[1]); pr_failed_dbg(name, "fork"); return EXIT_FAILURE; } else if (pid == 0) { /* Child */ uint8_t *buf; int ret = EXIT_SUCCESS; addr_msg_t msg_rd, msg_wr; /* Close unwanted ends */ (void)close(pipe_wr[0]); (void)close(pipe_rd[1]); buf = mmap(NULL, sz, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); if (buf == MAP_FAILED) { pr_failed_dbg(name, "mmap"); ret = EXIT_FAILURE; goto cleanup; } for (;;) { uint8_t *ptr, *end = buf + sz; int ret; memset(&msg_wr, 0, sizeof(msg_wr)); msg_wr.addr = buf; msg_wr.val = 0; /* Send address of buffer to parent */ redo_wr1: ret = write(pipe_wr[1], &msg_wr, sizeof(msg_wr)); if (ret < 0) { if ((errno == EAGAIN) || (errno == EINTR)) goto redo_wr1; if (errno != EBADF) pr_failed_dbg(name, "write"); break; } redo_rd1: /* Wait for parent to populate data */ ret = read(pipe_rd[0], &msg_rd, sizeof(msg_rd)); if (ret < 0) { if ((errno == EAGAIN) || (errno == EINTR)) goto redo_rd1; pr_failed_dbg(name, "read"); break; } if (ret == 0) break; if (ret != sizeof(msg_rd)) { pr_failed_dbg(name, "read"); break; } if (opt_flags & OPT_FLAGS_VERIFY) { /* Check memory altered by parent is sane */ for (ptr = buf; ptr < end; ptr += page_size) { if (*ptr != msg_rd.val) { pr_fail(stderr, "%s: memory at %p: %d vs %d\n", name, ptr, *ptr, msg_rd.val); goto cleanup; } *ptr = 0; } } } cleanup: /* Tell parent we're done */ msg_wr.addr = 0; msg_wr.val = 0; if (write(pipe_wr[1], &msg_wr, sizeof(msg_wr)) <= 0) { if (errno != EBADF) pr_dbg(stderr, "%s: failed to write termination message " "over pipe: errno=%d (%s)\n", name, errno, strerror(errno)); } (void)close(pipe_wr[0]); (void)close(pipe_wr[1]); (void)close(pipe_rd[0]); (void)close(pipe_rd[1]); (void)munmap(buf, sz); exit(ret); } else { /* Parent */ int status; uint8_t val = 0; uint8_t *localbuf; addr_msg_t msg_rd, msg_wr; localbuf = mmap(NULL, sz, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); if (localbuf == MAP_FAILED) { (void)close(pipe_wr[0]); (void)close(pipe_wr[1]); (void)close(pipe_rd[0]); (void)close(pipe_rd[1]); pr_failed_dbg(name, "mmap"); exit(EXIT_FAILURE); } /* Close unwanted ends */ (void)close(pipe_wr[1]); (void)close(pipe_rd[0]); do { struct iovec local[1], remote[1]; uint8_t *ptr, *end = localbuf + sz; int ret; /* Wait for address of child's buffer */ redo_rd2: if (!opt_do_run) break; ret = read(pipe_wr[0], &msg_rd, sizeof(msg_rd)); if (ret < 0) { if ((errno == EAGAIN) || (errno == EINTR)) goto redo_rd2; pr_failed_dbg(name, "read"); break; } if (ret == 0) break; if (ret != sizeof(msg_rd)) { pr_failed_dbg(name, "read"); break; } /* Child telling us it's terminating? */ if (!msg_rd.addr) break; /* Perform read from child's memory */ local[0].iov_base = localbuf; local[0].iov_len = sz; remote[0].iov_base = msg_rd.addr; remote[0].iov_len = sz; if (process_vm_readv(pid, local, 1, remote, 1, 0) < 0) { pr_failed_dbg(name, "process_vm_readv"); break; } if (opt_flags & OPT_FLAGS_VERIFY) { /* Check data is sane */ for (ptr = localbuf; ptr < end; ptr += page_size) { if (*ptr) { pr_fail(stderr, "%s: memory at %p: %d vs %d\n", name, ptr, *ptr, msg_rd.val); goto fail; } *ptr = 0; } /* Set memory */ for (ptr = localbuf; ptr < end; ptr += page_size) *ptr = val; } /* Write to child's memory */ msg_wr = msg_rd; local[0].iov_base = localbuf; local[0].iov_len = sz; remote[0].iov_base = msg_rd.addr; remote[0].iov_len = sz; if (process_vm_writev(pid, local, 1, remote, 1, 0) < 0) { pr_failed_dbg(name, "process_vm_writev"); break; } msg_wr.val = val; val++; redo_wr2: if (!opt_do_run) break; /* Inform child that memory has been changed */ ret = write(pipe_rd[1], &msg_wr, sizeof(msg_wr)); if (ret < 0) { if ((errno == EAGAIN) || (errno == EINTR)) goto redo_wr2; if (errno != EBADF) pr_failed_dbg(name, "write"); break; } (*counter)++; } while (opt_do_run && (!max_ops || *counter < max_ops)); fail: /* Tell child we're done */ msg_wr.addr = NULL; msg_wr.val = 0; if (write(pipe_wr[0], &msg_wr, sizeof(msg_wr)) < 0) { if (errno != EBADF) pr_dbg(stderr, "%s: failed to write termination message " "over pipe: errno=%d (%s)\n", name, errno, strerror(errno)); } (void)close(pipe_wr[0]); (void)close(pipe_wr[1]); (void)close(pipe_rd[0]); (void)close(pipe_rd[1]); (void)kill(pid, SIGKILL); (void)waitpid(pid, &status, 0); (void)munmap(localbuf, sz); } return EXIT_SUCCESS; }
/* * stress_fallocate * stress I/O via fallocate and ftruncate */ static int stress_fallocate(const args_t *args) { int fd, ret; char filename[PATH_MAX]; uint64_t ftrunc_errs = 0; off_t fallocate_bytes = DEFAULT_FALLOCATE_BYTES; if (!get_setting("fallocate-bytes", &fallocate_bytes)) { if (g_opt_flags & OPT_FLAGS_MAXIMIZE) fallocate_bytes = MAX_FALLOCATE_BYTES; if (g_opt_flags & OPT_FLAGS_MINIMIZE) fallocate_bytes = MIN_FALLOCATE_BYTES; } fallocate_bytes /= args->num_instances; if (fallocate_bytes < (off_t)MIN_FALLOCATE_BYTES) fallocate_bytes = (off_t)MIN_FALLOCATE_BYTES; ret = stress_temp_dir_mk_args(args); if (ret < 0) return exit_status(-ret); (void)stress_temp_filename_args(args, filename, sizeof(filename), mwc32()); if ((fd = open(filename, O_CREAT | O_RDWR, S_IRUSR | S_IWUSR)) < 0) { ret = exit_status(errno); pr_fail_err("open"); (void)stress_temp_dir_rm_args(args); return ret; } (void)unlink(filename); do { #if defined(HAVE_POSIX_FALLOCATE) ret = posix_fallocate(fd, (off_t)0, fallocate_bytes); #else ret = shim_fallocate(fd, 0, (off_t)0, fallocate_bytes); #endif if (!g_keep_stressing_flag) break; (void)shim_fsync(fd); if ((ret == 0) && (g_opt_flags & OPT_FLAGS_VERIFY)) { struct stat buf; if (fstat(fd, &buf) < 0) pr_fail("%s: fstat on file failed", args->name); else if (buf.st_size != fallocate_bytes) pr_fail("%s: file size %jd does not " "match size the expected file " "size of %jd\n", args->name, (intmax_t)buf.st_size, (intmax_t)fallocate_bytes); } if (ftruncate(fd, 0) < 0) ftrunc_errs++; if (!g_keep_stressing_flag) break; (void)shim_fsync(fd); if (g_opt_flags & OPT_FLAGS_VERIFY) { struct stat buf; if (fstat(fd, &buf) < 0) pr_fail("%s: fstat on file failed", args->name); else if (buf.st_size != (off_t)0) pr_fail("%s: file size %jd does not " "match size the expected file size " "of 0\n", args->name, (intmax_t)buf.st_size); } if (ftruncate(fd, fallocate_bytes) < 0) ftrunc_errs++; (void)shim_fsync(fd); if (ftruncate(fd, 0) < 0) ftrunc_errs++; (void)shim_fsync(fd); if (SIZEOF_ARRAY(modes) > 1) { /* * non-portable Linux fallocate() */ int i; (void)shim_fallocate(fd, 0, (off_t)0, fallocate_bytes); if (!g_keep_stressing_flag) break; (void)shim_fsync(fd); for (i = 0; i < 64; i++) { off_t offset = (mwc64() % fallocate_bytes) & ~0xfff; int j = (mwc32() >> 8) % SIZEOF_ARRAY(modes); (void)shim_fallocate(fd, modes[j], offset, 64 * KB); if (!g_keep_stressing_flag) break; (void)shim_fsync(fd); } if (ftruncate(fd, 0) < 0) ftrunc_errs++; (void)shim_fsync(fd); } inc_counter(args); } while (keep_stressing());
/* * stress_ptrace() * stress ptracing */ int stress_ptrace( uint64_t *const counter, const uint32_t instance, const uint64_t max_ops, const char *name) { pid_t pid; (void)instance; pid = fork(); if (pid < 0) { pr_failed_dbg(name, "fork"); return EXIT_FAILURE; } else if (pid == 0) { setpgid(0, pgrp); /* * Child to be traced, we abort if we detect * we are already being traced by someone else * as this makes life way too complex */ if (ptrace(PTRACE_TRACEME) != 0) { pr_fail(stderr, "%s: ptrace child being traced " "already, aborting\n", name); _exit(0); } /* Wait for parent to start tracing me */ kill(getpid(), SIGSTOP); /* * A simple mix of system calls */ while (opt_do_run) { (void)getppid(); (void)getgid(); (void)getegid(); (void)getuid(); (void)geteuid(); (void)getpgrp(); (void)time(NULL); } _exit(0); } else { /* Parent to do the tracing */ int status; setpgid(pid, pgrp); if (waitpid(pid, &status, 0) < 0) { pr_failed_dbg(name, "waitpid"); return EXIT_FAILURE; } if (ptrace(PTRACE_SETOPTIONS, pid, 0, PTRACE_O_TRACESYSGOOD) < 0) { pr_failed_dbg(name, "ptrace"); return EXIT_FAILURE; } do { /* * We do two of the following per syscall, * one at the start, and one at the end to catch * the return. In this stressor we don't really * care which is which, we just care about counting * them */ if (stress_syscall_wait(name, pid)) break; (*counter)++; } while (opt_do_run && (!max_ops || *counter < max_ops)); /* Terminate child */ (void)kill(pid, SIGKILL); if (waitpid(pid, &status, 0) < 0) pr_failed_dbg(name, "waitpid"); } return EXIT_SUCCESS; }
/* * stress_mergesort() * stress mergesort */ static int stress_mergesort(const args_t *args) { uint64_t mergesort_size = DEFAULT_MERGESORT_SIZE; int32_t *data, *ptr; size_t n, i; struct sigaction old_action; int ret; if (!get_setting("mergesort-size", &mergesort_size)) { if (g_opt_flags & OPT_FLAGS_MAXIMIZE) mergesort_size = MAX_MERGESORT_SIZE; if (g_opt_flags & OPT_FLAGS_MINIMIZE) mergesort_size = MIN_MERGESORT_SIZE; } n = (size_t)mergesort_size; if ((data = calloc(n, sizeof(*data))) == NULL) { pr_fail_dbg("malloc"); return EXIT_NO_RESOURCE; } if (stress_sighandler(args->name, SIGALRM, stress_mergesort_handler, &old_action) < 0) { free(data); return EXIT_FAILURE; } ret = sigsetjmp(jmp_env, 1); if (ret) { /* * We return here if SIGALRM jmp'd back */ (void)stress_sigrestore(args->name, SIGALRM, &old_action); goto tidy; } /* This is expensive, do it once */ for (ptr = data, i = 0; i < n; i++) *ptr++ = mwc32(); do { /* Sort "random" data */ if (mergesort(data, n, sizeof(*data), stress_mergesort_cmp_1) < 0) { pr_fail("%s: mergesort of random data failed: %d (%s)\n", args->name, errno, strerror(errno)); } else { if (g_opt_flags & OPT_FLAGS_VERIFY) { for (ptr = data, i = 0; i < n - 1; i++, ptr++) { if (*ptr > *(ptr+1)) { pr_fail("%s: sort error " "detected, incorrect ordering " "found\n", args->name); break; } } } } if (!g_keep_stressing_flag) break; /* Reverse sort */ if (mergesort(data, n, sizeof(*data), stress_mergesort_cmp_2) < 0) { pr_fail("%s: reversed mergesort of random data failed: %d (%s)\n", args->name, errno, strerror(errno)); } else { if (g_opt_flags & OPT_FLAGS_VERIFY) { for (ptr = data, i = 0; i < n - 1; i++, ptr++) { if (*ptr < *(ptr+1)) { pr_fail("%s: reverse sort " "error detected, incorrect " "ordering found\n", args->name); break; } } } } if (!g_keep_stressing_flag) break; /* And re-order by random compare to remix the data */ if (mergesort(data, n, sizeof(*data), stress_mergesort_cmp_3) < 0) { pr_fail("%s: mergesort failed: %d (%s)\n", args->name, errno, strerror(errno)); } /* Reverse sort this again */ if (mergesort(data, n, sizeof(*data), stress_mergesort_cmp_2) < 0) { pr_fail("%s: reversed mergesort of random data failed: %d (%s)\n", args->name, errno, strerror(errno)); } if (g_opt_flags & OPT_FLAGS_VERIFY) { for (ptr = data, i = 0; i < n - 1; i++, ptr++) { if (*ptr < *(ptr+1)) { pr_fail("%s: reverse sort " "error detected, incorrect " "ordering found\n", args->name); break; } } } if (!g_keep_stressing_flag) break; inc_counter(args); } while (keep_stressing()); do_jmp = false; (void)stress_sigrestore(args->name, SIGALRM, &old_action); tidy: free(data); return EXIT_SUCCESS; }
/* * stress_msync() * stress msync */ int stress_msync( uint64_t *const counter, const uint32_t instance, const uint64_t max_ops, const char *name) { uint8_t *buf = NULL; const size_t page_size = stress_get_pagesize(); const size_t min_size = 2 * page_size; size_t sz = min_size; ssize_t ret, rc = EXIT_SUCCESS; const pid_t pid = getpid(); int fd = -1; char filename[PATH_MAX]; ret = sigsetjmp(jmp_env, 1); if (ret) { pr_fail_err(name, "sigsetjmp"); return EXIT_FAILURE; } if (stress_sighandler(name, SIGBUS, stress_sigbus_handler, NULL) < 0) return EXIT_FAILURE; if (!set_msync_bytes) { if (opt_flags & OPT_FLAGS_MAXIMIZE) opt_msync_bytes = MAX_MSYNC_BYTES; if (opt_flags & OPT_FLAGS_MINIMIZE) opt_msync_bytes = MIN_MSYNC_BYTES; } sz = opt_msync_bytes & ~(page_size - 1); if (sz < min_size) sz = min_size; /* Make sure this is killable by OOM killer */ set_oom_adjustment(name, true); rc = stress_temp_dir_mk(name, pid, instance); if (rc < 0) return exit_status(-rc); (void)stress_temp_filename(filename, sizeof(filename), name, pid, instance, mwc32()); (void)umask(0077); if ((fd = open(filename, O_CREAT | O_RDWR, S_IRUSR | S_IWUSR)) < 0) { rc = exit_status(errno); pr_fail_err(name, "open"); (void)unlink(filename); (void)stress_temp_dir_rm(name, pid, instance); return rc; } (void)unlink(filename); if (ftruncate(fd, sz) < 0) { pr_err(stderr, "%s: ftruncate failed, errno=%d (%s)\n", name, errno, strerror(errno)); (void)close(fd); (void)stress_temp_dir_rm(name, pid, instance); return EXIT_FAILURE; } buf = (uint8_t *)mmap(NULL, sz, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); if (buf == MAP_FAILED) { pr_err(stderr, "%s: failed to mmap memory, errno=%d (%s)\n", name, errno, strerror(errno)); rc = EXIT_NO_RESOURCE; goto err; } do { off_t offset; uint8_t val, data[page_size]; ret = sigsetjmp(jmp_env, 1); if (ret) { /* Try again */ continue; } /* * Change data in memory, msync to disk */ offset = (mwc64() % (sz - page_size)) & ~(page_size - 1); val = mwc8(); memset(buf + offset, val, page_size); ret = msync(buf + offset, page_size, MS_SYNC); if (ret < 0) { pr_fail(stderr, "%s: msync MS_SYNC on " "offset %jd failed, errno=%d (%s)", name, (intmax_t)offset, errno, strerror(errno)); goto do_invalidate; } ret = lseek(fd, offset, SEEK_SET); if (ret == (off_t)-1) { pr_err(stderr, "%s: cannot seet to offset %jd, " "errno=%d (%s)\n", name, (intmax_t)offset, errno, strerror(errno)); rc = EXIT_NO_RESOURCE; break; } ret = read(fd, data, sizeof(data)); if (ret < (ssize_t)sizeof(data)) { pr_fail(stderr, "%s: read failed, errno=%d (%s)\n", name, errno, strerror(errno)); goto do_invalidate; } if (stress_page_check(data, val, sizeof(data)) < 0) { pr_fail(stderr, "%s: msync'd data in file different " "to data in memory\n", name); } do_invalidate: /* * Now change data on disc, msync invalidate */ offset = (mwc64() % (sz - page_size)) & ~(page_size - 1); val = mwc8(); memset(buf + offset, val, page_size); ret = lseek(fd, offset, SEEK_SET); if (ret == (off_t)-1) { pr_err(stderr, "%s: cannot seet to offset %jd, errno=%d (%s)\n", name, (intmax_t)offset, errno, strerror(errno)); rc = EXIT_NO_RESOURCE; break; } ret = read(fd, data, sizeof(data)); if (ret < (ssize_t)sizeof(data)) { pr_fail(stderr, "%s: read failed, errno=%d (%s)\n", name, errno, strerror(errno)); goto do_next; } ret = msync(buf + offset, page_size, MS_INVALIDATE); if (ret < 0) { pr_fail(stderr, "%s: msync MS_INVALIDATE on " "offset %jd failed, errno=%d (%s)", name, (intmax_t)offset, errno, strerror(errno)); goto do_next; } if (stress_page_check(buf + offset, val, sizeof(data)) < 0) { pr_fail(stderr, "%s: msync'd data in memory " "different to data in file\n", name); } do_next: (*counter)++; } while (opt_do_run && (!max_ops || *counter < max_ops)); (void)munmap((void *)buf, sz); err: (void)close(fd); (void)stress_temp_dir_rm(name, pid, instance); if (sigbus_count) pr_inf(stdout, "%s: caught %" PRIu64 " SIGBUS signals\n", name, sigbus_count); return rc; }
/* * stress_hsearch() * stress hsearch */ int stress_hsearch( uint64_t *const counter, const uint32_t instance, const uint64_t max_ops, const char *name) { size_t i, max = (size_t)opt_hsearch_size; int ret = EXIT_FAILURE; char **keys; (void)instance; if (!set_hsearch_size) { if (opt_flags & OPT_FLAGS_MAXIMIZE) opt_hsearch_size = MAX_HSEARCH_SIZE; if (opt_flags & OPT_FLAGS_MINIMIZE) opt_hsearch_size = MIN_HSEARCH_SIZE; } max = (size_t)opt_hsearch_size; /* Make hash table with 25% slack */ if (!hcreate(max + (max / 4))) { pr_failed_err(name, "hcreate"); return EXIT_FAILURE; } if ((keys = calloc(max, sizeof(char *))) == NULL) { pr_err(stderr, "%s: cannot allocate keys\n", name); goto free_hash; } /* Populate hash, make it 100% full for worst performance */ for (i = 0; i < max; i++) { char buffer[32]; ENTRY e; snprintf(buffer, sizeof(buffer), "%zu", i); keys[i] = strdup(buffer); if (!keys[i]) { pr_err(stderr, "%s: cannot allocate key\n", name); goto free_all; } e.key = keys[i]; e.data = (void *)i; if (hsearch(e, ENTER) == NULL) { pr_err(stderr, "%s: cannot allocate new hash item\n", name); goto free_all; } } do { for (i = 0; opt_do_run && i < max; i++) { ENTRY e, *ep; e.key = keys[i]; e.data = NULL; /* Keep Coverity quiet */ ep = hsearch(e, FIND); if (opt_flags & OPT_FLAGS_VERIFY) { if (ep == NULL) { pr_fail(stderr, "%s: cannot find key %s\n", name, keys[i]); } else { if (i != (size_t)ep->data) { pr_fail(stderr, "%s: hash returned incorrect data %zd\n", name, i); } } } } (*counter)++; } while (opt_do_run && (!max_ops || *counter < max_ops)); ret = EXIT_SUCCESS; free_all: for (i = 0; i < max; i++) free(keys[i]); free(keys); free_hash: hdestroy(); return ret; }