/* * stress_bsearch() * stress bsearch */ int stress_bsearch( uint64_t *const counter, const uint32_t instance, const uint64_t max_ops, const char *name) { int32_t *data, *ptr, prev = 0; size_t n, n8, i; (void)instance; if (!set_bsearch_size) { if (opt_flags & OPT_FLAGS_MAXIMIZE) opt_bsearch_size = MAX_BSEARCH_SIZE; if (opt_flags & OPT_FLAGS_MINIMIZE) opt_bsearch_size = MIN_BSEARCH_SIZE; } n = (size_t)opt_bsearch_size; n8 = (n + 7) & ~7; /* allocate in multiples of 8 */ if ((data = malloc(sizeof(int32_t) * n8)) == NULL) { pr_failed_dbg(name, "malloc"); return EXIT_FAILURE; } /* Populate with ascending data */ prev = 0; for (i = 0; i < n;) { uint64_t v = mwc64(); SETDATA(data, i, v, prev); SETDATA(data, i, v, prev); SETDATA(data, i, v, prev); SETDATA(data, i, v, prev); SETDATA(data, i, v, prev); SETDATA(data, i, v, prev); SETDATA(data, i, v, prev); SETDATA(data, i, v, prev); } do { for (ptr = data, i = 0; i < n; i++, ptr++) { int32_t *result; result = bsearch(ptr, data, n, sizeof(*ptr), cmp); if (opt_flags & OPT_FLAGS_VERIFY) { if (result == NULL) pr_fail(stderr, "%s: element %zu could not be found\n", name, i); else if (*result != *ptr) pr_fail(stderr, "%s: element %zu found %" PRIu32 ", expecting %" PRIu32 "\n", name, i, *result, *ptr); } } (*counter)++; } while (opt_do_run && (!max_ops || *counter < max_ops)); free(data); return EXIT_SUCCESS; }
/* * main syscall ptrace loop */ static inline bool stress_syscall_wait( const char *name, const pid_t pid) { while (opt_do_run) { int status; if (ptrace(PTRACE_SYSCALL, pid, 0, 0) < 0) { pr_failed_dbg(name, "ptrace"); return true; } if (waitpid(pid, &status, 0) < 0) { pr_failed_dbg(name, "waitpid"); return true; } if (WIFSTOPPED(status) && (WSTOPSIG(status) & 0x80)) return false; if (WIFEXITED(status)) return true; } return true; }
/* * stress_switch * stress by heavy context switching */ int stress_switch( uint64_t *const counter, const uint32_t instance, const uint64_t max_ops, const char *name) { pid_t pid; int pipefds[2]; (void)instance; if (pipe(pipefds) < 0) { pr_failed_dbg(name, "pipe"); return EXIT_FAILURE; } again: pid = fork(); if (pid < 0) { if (opt_do_run && (errno == EAGAIN)) goto again; (void)close(pipefds[0]); (void)close(pipefds[1]); pr_failed_dbg(name, "fork"); return EXIT_FAILURE; } else if (pid == 0) { (void)close(pipefds[1]); for (;;) { char ch; for (;;) { ssize_t ret; ret = read(pipefds[0], &ch, sizeof(ch)); if (ret < 0) { if ((errno == EAGAIN) || (errno == EINTR)) continue; pr_failed_dbg(name, "read"); break; } if (ret == 0) break; if (ch == SWITCH_STOP) break; } (void)close(pipefds[0]); exit(EXIT_SUCCESS); } } else { char ch = '_'; int status; /* Parent */ (void)close(pipefds[0]); do { ssize_t ret; ret = write(pipefds[1], &ch, sizeof(ch)); if (ret <= 0) { if ((errno == EAGAIN) || (errno == EINTR)) continue; if (errno) { pr_failed_dbg(name, "write"); break; } continue; } (*counter)++; } while (opt_do_run && (!max_ops || *counter < max_ops)); ch = SWITCH_STOP; if (write(pipefds[1], &ch, sizeof(ch)) <= 0) pr_failed_dbg(name, "termination write"); (void)kill(pid, SIGKILL); (void)waitpid(pid, &status, 0); } return EXIT_SUCCESS; }
/* * stress_msg * stress by message queues */ int stress_msg( uint64_t *const counter, const uint32_t instance, const uint64_t max_ops, const char *name) { pid_t pid; int msgq_id; (void)instance; msgq_id = msgget(IPC_PRIVATE, S_IRUSR | S_IWUSR | IPC_CREAT | IPC_EXCL); if (msgq_id < 0) { pr_failed_dbg(name, "msgget"); return EXIT_FAILURE; } pr_dbg(stderr, "System V message queue created, id: %d\n", msgq_id); again: pid = fork(); if (pid < 0) { if (opt_do_run && (errno == EAGAIN)) goto again; pr_failed_dbg(name, "fork"); return EXIT_FAILURE; } else if (pid == 0) { setpgid(0, pgrp); while (opt_do_run) { msg_t msg; uint64_t i; for (i = 0; ; i++) { uint64_t v; if (msgrcv(msgq_id, &msg, sizeof(msg.msg), 0, 0) < 0) { pr_failed_dbg(name, "msgrcv"); break; } if (!strcmp(msg.msg, MSG_STOP)) break; if (opt_flags & OPT_FLAGS_VERIFY) { memcpy(&v, msg.msg, sizeof(v)); if (v != i) pr_fail(stderr, "%s: msgrcv: expected msg containing 0x%" PRIx64 " but received 0x%" PRIx64 " instead\n", name, i, v); } } exit(EXIT_SUCCESS); } } else { msg_t msg; uint64_t i = 0; int status; /* Parent */ setpgid(pid, pgrp); do { memcpy(msg.msg, &i, sizeof(i)); msg.mtype = 1; if (msgsnd(msgq_id, &msg, sizeof(i), 0) < 0) { if (errno != EINTR) pr_failed_dbg(name, "msgsnd"); break; } i++; (*counter)++; } while (opt_do_run && (!max_ops || *counter < max_ops)); strncpy(msg.msg, MSG_STOP, sizeof(msg.msg)); if (msgsnd(msgq_id, &msg, sizeof(msg.msg), 0) < 0) pr_failed_dbg(name, "termination msgsnd"); (void)kill(pid, SIGKILL); (void)waitpid(pid, &status, 0); if (msgctl(msgq_id, IPC_RMID, NULL) < 0) pr_failed_dbg(name, "msgctl"); else pr_dbg(stderr, "System V message queue deleted, id: %d\n", msgq_id); } return EXIT_SUCCESS; }
/* * stress_vm_rw * stress vm_read_v/vm_write_v */ int stress_vm_rw( uint64_t *const counter, const uint32_t instance, const uint64_t max_ops, const char *name) { pid_t pid; int pipe_wr[2], pipe_rd[2]; const size_t page_size = stress_get_pagesize(); size_t sz; (void)instance; if (!set_vm_rw_bytes) { if (opt_flags & OPT_FLAGS_MAXIMIZE) opt_vm_rw_bytes = MAX_VM_RW_BYTES; if (opt_flags & OPT_FLAGS_MINIMIZE) opt_vm_rw_bytes = MIN_VM_RW_BYTES; } sz = opt_vm_rw_bytes & ~(page_size - 1); if (pipe(pipe_wr) < 0) { pr_failed_dbg(name, "pipe"); return EXIT_FAILURE; } if (pipe(pipe_rd) < 0) { (void)close(pipe_wr[0]); (void)close(pipe_wr[1]); pr_failed_dbg(name, "pipe"); return EXIT_FAILURE; } pid = fork(); if (pid < 0) { (void)close(pipe_wr[0]); (void)close(pipe_wr[1]); (void)close(pipe_rd[0]); (void)close(pipe_rd[1]); pr_failed_dbg(name, "fork"); return EXIT_FAILURE; } else if (pid == 0) { /* Child */ uint8_t *buf; int ret = EXIT_SUCCESS; addr_msg_t msg_rd, msg_wr; /* Close unwanted ends */ (void)close(pipe_wr[0]); (void)close(pipe_rd[1]); buf = mmap(NULL, sz, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); if (buf == MAP_FAILED) { pr_failed_dbg(name, "mmap"); ret = EXIT_FAILURE; goto cleanup; } for (;;) { uint8_t *ptr, *end = buf + sz; int ret; memset(&msg_wr, 0, sizeof(msg_wr)); msg_wr.addr = buf; msg_wr.val = 0; /* Send address of buffer to parent */ redo_wr1: ret = write(pipe_wr[1], &msg_wr, sizeof(msg_wr)); if (ret < 0) { if ((errno == EAGAIN) || (errno == EINTR)) goto redo_wr1; if (errno != EBADF) pr_failed_dbg(name, "write"); break; } redo_rd1: /* Wait for parent to populate data */ ret = read(pipe_rd[0], &msg_rd, sizeof(msg_rd)); if (ret < 0) { if ((errno == EAGAIN) || (errno == EINTR)) goto redo_rd1; pr_failed_dbg(name, "read"); break; } if (ret == 0) break; if (ret != sizeof(msg_rd)) { pr_failed_dbg(name, "read"); break; } if (opt_flags & OPT_FLAGS_VERIFY) { /* Check memory altered by parent is sane */ for (ptr = buf; ptr < end; ptr += page_size) { if (*ptr != msg_rd.val) { pr_fail(stderr, "%s: memory at %p: %d vs %d\n", name, ptr, *ptr, msg_rd.val); goto cleanup; } *ptr = 0; } } } cleanup: /* Tell parent we're done */ msg_wr.addr = 0; msg_wr.val = 0; if (write(pipe_wr[1], &msg_wr, sizeof(msg_wr)) <= 0) { if (errno != EBADF) pr_dbg(stderr, "%s: failed to write termination message " "over pipe: errno=%d (%s)\n", name, errno, strerror(errno)); } (void)close(pipe_wr[0]); (void)close(pipe_wr[1]); (void)close(pipe_rd[0]); (void)close(pipe_rd[1]); (void)munmap(buf, sz); exit(ret); } else { /* Parent */ int status; uint8_t val = 0; uint8_t *localbuf; addr_msg_t msg_rd, msg_wr; localbuf = mmap(NULL, sz, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); if (localbuf == MAP_FAILED) { (void)close(pipe_wr[0]); (void)close(pipe_wr[1]); (void)close(pipe_rd[0]); (void)close(pipe_rd[1]); pr_failed_dbg(name, "mmap"); exit(EXIT_FAILURE); } /* Close unwanted ends */ (void)close(pipe_wr[1]); (void)close(pipe_rd[0]); do { struct iovec local[1], remote[1]; uint8_t *ptr, *end = localbuf + sz; int ret; /* Wait for address of child's buffer */ redo_rd2: if (!opt_do_run) break; ret = read(pipe_wr[0], &msg_rd, sizeof(msg_rd)); if (ret < 0) { if ((errno == EAGAIN) || (errno == EINTR)) goto redo_rd2; pr_failed_dbg(name, "read"); break; } if (ret == 0) break; if (ret != sizeof(msg_rd)) { pr_failed_dbg(name, "read"); break; } /* Child telling us it's terminating? */ if (!msg_rd.addr) break; /* Perform read from child's memory */ local[0].iov_base = localbuf; local[0].iov_len = sz; remote[0].iov_base = msg_rd.addr; remote[0].iov_len = sz; if (process_vm_readv(pid, local, 1, remote, 1, 0) < 0) { pr_failed_dbg(name, "process_vm_readv"); break; } if (opt_flags & OPT_FLAGS_VERIFY) { /* Check data is sane */ for (ptr = localbuf; ptr < end; ptr += page_size) { if (*ptr) { pr_fail(stderr, "%s: memory at %p: %d vs %d\n", name, ptr, *ptr, msg_rd.val); goto fail; } *ptr = 0; } /* Set memory */ for (ptr = localbuf; ptr < end; ptr += page_size) *ptr = val; } /* Write to child's memory */ msg_wr = msg_rd; local[0].iov_base = localbuf; local[0].iov_len = sz; remote[0].iov_base = msg_rd.addr; remote[0].iov_len = sz; if (process_vm_writev(pid, local, 1, remote, 1, 0) < 0) { pr_failed_dbg(name, "process_vm_writev"); break; } msg_wr.val = val; val++; redo_wr2: if (!opt_do_run) break; /* Inform child that memory has been changed */ ret = write(pipe_rd[1], &msg_wr, sizeof(msg_wr)); if (ret < 0) { if ((errno == EAGAIN) || (errno == EINTR)) goto redo_wr2; if (errno != EBADF) pr_failed_dbg(name, "write"); break; } (*counter)++; } while (opt_do_run && (!max_ops || *counter < max_ops)); fail: /* Tell child we're done */ msg_wr.addr = NULL; msg_wr.val = 0; if (write(pipe_wr[0], &msg_wr, sizeof(msg_wr)) < 0) { if (errno != EBADF) pr_dbg(stderr, "%s: failed to write termination message " "over pipe: errno=%d (%s)\n", name, errno, strerror(errno)); } (void)close(pipe_wr[0]); (void)close(pipe_wr[1]); (void)close(pipe_rd[0]); (void)close(pipe_rd[1]); (void)kill(pid, SIGKILL); (void)waitpid(pid, &status, 0); (void)munmap(localbuf, sz); } return EXIT_SUCCESS; }
/* * stress_udp * stress by heavy udp ops */ int stress_udp( uint64_t *const counter, const uint32_t instance, const uint64_t max_ops, const char *name) { pid_t pid, ppid = getppid(); int rc = EXIT_SUCCESS; pr_dbg(stderr, "%s: process [%d] using udp port %d\n", name, getpid(), opt_udp_port + instance); again: pid = fork(); if (pid < 0) { if (opt_do_run && (errno == EAGAIN)) goto again; pr_failed_dbg(name, "fork"); return EXIT_FAILURE; } else if (pid == 0) { /* Child, client */ struct sockaddr *addr; do { char buf[UDP_BUF]; socklen_t len; int fd; int j = 0; if ((fd = socket(opt_udp_domain, SOCK_DGRAM, 0)) < 0) { pr_failed_dbg(name, "socket"); /* failed, kick parent to finish */ (void)kill(getppid(), SIGALRM); exit(EXIT_FAILURE); } stress_set_sockaddr(name, instance, ppid, opt_udp_domain, opt_udp_port, &addr, &len); do { size_t i; for (i = 16; i < sizeof(buf); i += 16, j++) { memset(buf, 'A' + (j % 26), sizeof(buf)); ssize_t ret = sendto(fd, buf, i, 0, addr, len); if (ret < 0) { if (errno != EINTR) pr_failed_dbg(name, "sendto"); break; } } } while (opt_do_run && (!max_ops || *counter < max_ops)); (void)close(fd); } while (opt_do_run && (!max_ops || *counter < max_ops)); #ifdef AF_UNIX if (opt_udp_domain == AF_UNIX) { struct sockaddr_un *addr_un = (struct sockaddr_un *)addr; (void)unlink(addr_un->sun_path); } #endif /* Inform parent we're all done */ (void)kill(getppid(), SIGALRM); exit(EXIT_SUCCESS); } else { /* Parent, server */ char buf[UDP_BUF]; int fd, status; int so_reuseaddr = 1; socklen_t addr_len = 0; struct sigaction new_action; struct sockaddr *addr; new_action.sa_handler = handle_udp_sigalrm; sigemptyset(&new_action.sa_mask); new_action.sa_flags = 0; if (sigaction(SIGALRM, &new_action, NULL) < 0) { pr_failed_err(name, "sigaction"); rc = EXIT_FAILURE; goto die; } if ((fd = socket(opt_udp_domain, SOCK_DGRAM, 0)) < 0) { pr_failed_dbg(name, "socket"); rc = EXIT_FAILURE; goto die; } stress_set_sockaddr(name, instance, ppid, opt_udp_domain, opt_udp_port, &addr, &addr_len); if (bind(fd, addr, addr_len) < 0) { pr_failed_dbg(name, "bind"); rc = EXIT_FAILURE; goto die_close; } if (setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &so_reuseaddr, sizeof(so_reuseaddr)) < 0) { pr_failed_dbg(name, "setsockopt"); rc = EXIT_FAILURE; goto die_close; } do { socklen_t len = addr_len; ssize_t n = recvfrom(fd, buf, sizeof(buf), 0, addr, &len); if (n == 0) break; if (n < 0) { if (errno != EINTR) pr_failed_dbg(name, "recvfrom"); break; } (*counter)++; } while (opt_do_run && (!max_ops || *counter < max_ops)); die_close: (void)close(fd); die: #ifdef AF_UNIX if (opt_udp_domain == AF_UNIX) { struct sockaddr_un *addr_un = (struct sockaddr_un *)addr; (void)unlink(addr_un->sun_path); } #endif if (pid) { (void)kill(pid, SIGKILL); (void)waitpid(pid, &status, 0); } } return rc; }
/* * stress_ptrace() * stress ptracing */ int stress_ptrace( uint64_t *const counter, const uint32_t instance, const uint64_t max_ops, const char *name) { pid_t pid; (void)instance; pid = fork(); if (pid < 0) { pr_failed_dbg(name, "fork"); return EXIT_FAILURE; } else if (pid == 0) { setpgid(0, pgrp); /* * Child to be traced, we abort if we detect * we are already being traced by someone else * as this makes life way too complex */ if (ptrace(PTRACE_TRACEME) != 0) { pr_fail(stderr, "%s: ptrace child being traced " "already, aborting\n", name); _exit(0); } /* Wait for parent to start tracing me */ kill(getpid(), SIGSTOP); /* * A simple mix of system calls */ while (opt_do_run) { (void)getppid(); (void)getgid(); (void)getegid(); (void)getuid(); (void)geteuid(); (void)getpgrp(); (void)time(NULL); } _exit(0); } else { /* Parent to do the tracing */ int status; setpgid(pid, pgrp); if (waitpid(pid, &status, 0) < 0) { pr_failed_dbg(name, "waitpid"); return EXIT_FAILURE; } if (ptrace(PTRACE_SETOPTIONS, pid, 0, PTRACE_O_TRACESYSGOOD) < 0) { pr_failed_dbg(name, "ptrace"); return EXIT_FAILURE; } do { /* * We do two of the following per syscall, * one at the start, and one at the end to catch * the return. In this stressor we don't really * care which is which, we just care about counting * them */ if (stress_syscall_wait(name, pid)) break; (*counter)++; } while (opt_do_run && (!max_ops || *counter < max_ops)); /* Terminate child */ (void)kill(pid, SIGKILL); if (waitpid(pid, &status, 0) < 0) pr_failed_dbg(name, "waitpid"); } return EXIT_SUCCESS; }
/* * stress_kcmp * stress sys_kcmp */ int stress_kcmp( uint64_t *const counter, const uint32_t instance, const uint64_t max_ops, const char *name) { pid_t pid1; int fd1; int ret = EXIT_SUCCESS; (void)instance; if ((fd1 = open("/dev/null", O_WRONLY)) < 0) { pr_failed_err(name, "open"); return EXIT_FAILURE; } again: pid1 = fork(); if (pid1 < 0) { if (opt_do_run && (errno == EAGAIN)) goto again; pr_failed_dbg(name, "fork"); (void)close(fd1); return EXIT_FAILURE; } else if (pid1 == 0) { setpgid(0, pgrp); /* Child */ while (opt_do_run) pause(); /* will never get here */ (void)close(fd1); exit(EXIT_SUCCESS); } else { /* Parent */ int fd2, status, pid2; setpgid(pid1, pgrp); pid2 = getpid(); if ((fd2 = open("/dev/null", O_WRONLY)) < 0) { pr_failed_err(name, "open"); ret = EXIT_FAILURE; goto reap; } do { KCMP(pid1, pid2, KCMP_FILE, fd1, fd2); KCMP(pid1, pid1, KCMP_FILE, fd1, fd1); KCMP(pid2, pid2, KCMP_FILE, fd1, fd1); KCMP(pid2, pid2, KCMP_FILE, fd2, fd2); KCMP(pid1, pid2, KCMP_FILES, 0, 0); KCMP(pid1, pid1, KCMP_FILES, 0, 0); KCMP(pid2, pid2, KCMP_FILES, 0, 0); KCMP(pid1, pid2, KCMP_FS, 0, 0); KCMP(pid1, pid1, KCMP_FS, 0, 0); KCMP(pid2, pid2, KCMP_FS, 0, 0); KCMP(pid1, pid2, KCMP_IO, 0, 0); KCMP(pid1, pid1, KCMP_IO, 0, 0); KCMP(pid2, pid2, KCMP_IO, 0, 0); KCMP(pid1, pid2, KCMP_SIGHAND, 0, 0); KCMP(pid1, pid1, KCMP_SIGHAND, 0, 0); KCMP(pid2, pid2, KCMP_SIGHAND, 0, 0); KCMP(pid1, pid2, KCMP_SYSVSEM, 0, 0); KCMP(pid1, pid1, KCMP_SYSVSEM, 0, 0); KCMP(pid2, pid2, KCMP_SYSVSEM, 0, 0); KCMP(pid1, pid2, KCMP_VM, 0, 0); KCMP(pid1, pid1, KCMP_VM, 0, 0); KCMP(pid2, pid2, KCMP_VM, 0, 0); /* Same simple checks */ if (opt_flags & OPT_FLAGS_VERIFY) { KCMP_VERIFY(pid1, pid1, KCMP_FILE, fd1, fd1, 0); KCMP_VERIFY(pid1, pid1, KCMP_FILES, 0, 0, 0); KCMP_VERIFY(pid1, pid1, KCMP_FS, 0, 0, 0); KCMP_VERIFY(pid1, pid1, KCMP_IO, 0, 0, 0); KCMP_VERIFY(pid1, pid1, KCMP_SIGHAND, 0, 0, 0); KCMP_VERIFY(pid1, pid1, KCMP_SYSVSEM, 0, 0, 0); KCMP_VERIFY(pid1, pid1, KCMP_VM, 0, 0, 0); KCMP_VERIFY(pid1, pid2, KCMP_SYSVSEM, 0, 0, 0); } (*counter)++; } while (opt_do_run && (!max_ops || *counter < max_ops)); reap: if (fd2 >= 0) (void)close(fd2); (void)kill(pid1, SIGKILL); (void)waitpid(pid1, &status, 0); (void)close(fd1); } return ret; }
/* * stress_splice * stress copying of /dev/zero to /dev/null */ int stress_vm_splice( uint64_t *const counter, const uint32_t instance, const uint64_t max_ops, const char *name) { int fd, fds[2]; uint8_t *buf; const size_t page_size = stress_get_pagesize(); size_t sz; (void)instance; if (!set_vm_splice_bytes) { if (opt_flags & OPT_FLAGS_MAXIMIZE) opt_vm_splice_bytes = MAX_VM_SPLICE_BYTES; if (opt_flags & OPT_FLAGS_MINIMIZE) opt_vm_splice_bytes = MIN_VM_SPLICE_BYTES; } sz = opt_vm_splice_bytes & ~(page_size - 1); buf = mmap(NULL, sz, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0); if (buf == MAP_FAILED) { pr_failed_dbg(name, "mmap"); return(EXIT_FAILURE); } if (pipe(fds) < 0) { (void)munmap(buf, sz); pr_failed_err(name, "pipe"); return EXIT_FAILURE; } if ((fd = open("/dev/null", O_WRONLY)) < 0) { (void)munmap(buf, sz); (void)close(fds[0]); (void)close(fds[1]); pr_failed_err(name, "open"); return EXIT_FAILURE; } do { int ret; ssize_t bytes; struct iovec iov; iov.iov_base = buf; iov.iov_len = sz; bytes = vmsplice(fds[1], &iov, 1, 0); if (bytes < 0) break; ret = splice(fds[0], NULL, fd, NULL, opt_vm_splice_bytes, SPLICE_F_MOVE); if (ret < 0) break; (*counter)++; } while (opt_do_run && (!max_ops || *counter < max_ops)); (void)munmap(buf, sz); (void)close(fd); (void)close(fds[0]); (void)close(fds[1]); return EXIT_SUCCESS; }
/* * stress_sigsuspend * stress sigsuspend */ int stress_sigsuspend( uint64_t *const counter, const uint32_t instance, const uint64_t max_ops, const char *name) { pid_t pid[MAX_SIGSUSPEND_PIDS]; size_t n, i; sigset_t mask; int status; uint64_t *counters, c; volatile uint64_t *v_counters; const size_t counters_size = (sizeof(uint64_t) * MAX_SIGSUSPEND_PIDS) << CACHE_STRIDE_SHIFT; (void)instance; v_counters = counters = mmap(NULL, counters_size, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS, -1, 0); if (counters == MAP_FAILED) { pr_failed_dbg(name, "mmap"); return EXIT_FAILURE; } memset(counters, 0, counters_size); sigfillset(&mask); sigdelset(&mask, SIGUSR1); for (n = 0; n < MAX_SIGSUSPEND_PIDS; n++) { again: pid[n] = fork(); if (pid[n] < 0) { if (opt_do_run && (errno == EAGAIN)) goto again; pr_failed_dbg(name, "fork"); goto reap; } else if (pid[n] == 0) { setpgid(0, pgrp); while (opt_do_run) { sigsuspend(&mask); v_counters[n << CACHE_STRIDE_SHIFT]++; } _exit(0); } setpgid(pid[n], pgrp); } /* Parent */ do { c = 0; for (i = 0; i < n; i++) { c += v_counters[i << CACHE_STRIDE_SHIFT]; kill(pid[i], SIGUSR1); } } while (opt_do_run && (!max_ops || c < max_ops)); *counter = c; reap: for (i = 0; i < n; i++) { /* terminate child */ (void)kill(pid[i], SIGKILL); (void)waitpid(pid[i], &status, 0); } (void)munmap(counters, counters_size); return EXIT_SUCCESS; }
/* * stress_eventfd * stress eventfd read/writes */ int stress_eventfd( uint64_t *const counter, const uint32_t instance, const uint64_t max_ops, const char *name) { pid_t pid; int fd1, fd2; (void)instance; fd1 = eventfd(0, 0); if (fd1 < 0) { pr_failed_dbg(name, "eventfd"); return EXIT_FAILURE; } fd2 = eventfd(0, 0); if (fd1 < 0) { pr_failed_dbg(name, "eventfd"); (void)close(fd1); return EXIT_FAILURE; } again: pid = fork(); if (pid < 0) { if (opt_do_run && (errno == EAGAIN)) goto again; pr_failed_dbg(name, "fork"); (void)close(fd1); (void)close(fd2); return EXIT_FAILURE; } else if (pid == 0) { setpgid(0, pgrp); while (opt_do_run) { uint64_t val; ssize_t ret; for (;;) { if (!opt_do_run) goto exit_child; ret = read(fd1, &val, sizeof(val)); if (ret < 0) { if ((errno == EAGAIN) || (errno == EINTR)) continue; pr_failed_dbg(name, "child read"); goto exit_child; } if (ret < (ssize_t)sizeof(val)) { pr_failed_dbg(name, "child short read"); goto exit_child; } break; } val = 1; for (;;) { if (!opt_do_run) goto exit_child; ret = write(fd2, &val, sizeof(val)); if (ret < 0) { if ((errno == EAGAIN) || (errno == EINTR)) continue; pr_failed_dbg(name, "child write"); goto exit_child; } if (ret < (ssize_t)sizeof(val)) { pr_failed_dbg(name, "child short write"); goto exit_child; } break; } } exit_child: (void)close(fd1); (void)close(fd2); exit(EXIT_SUCCESS); } else { int status; do { uint64_t val = 1; int ret; for (;;) { if (!opt_do_run) goto exit_parent; ret = write(fd1, &val, sizeof(val)); if (ret < 0) { if ((errno == EAGAIN) || (errno == EINTR)) continue; pr_failed_dbg(name, "parent write"); goto exit_parent; } if (ret < (ssize_t)sizeof(val)) { pr_failed_dbg(name, "parent short write"); goto exit_parent; } break; } for (;;) { if (!opt_do_run) goto exit_parent; ret = read(fd2, &val, sizeof(val)); if (ret < 0) { if ((errno == EAGAIN) || (errno == EINTR)) continue; pr_failed_dbg(name, "parent read"); goto exit_parent; } if (ret < (ssize_t)sizeof(val)) { pr_failed_dbg(name, "parent short read"); goto exit_parent; } break; } (*counter)++; } while (opt_do_run && (!max_ops || *counter < max_ops)); exit_parent: (void)kill(pid, SIGKILL); (void)waitpid(pid, &status, 0); (void)close(fd1); (void)close(fd2); } return EXIT_SUCCESS; }
/* * stress_sigq * stress by heavy sigqueue message sending */ int stress_sigq( uint64_t *const counter, const uint32_t instance, const uint64_t max_ops, const char *name) { pid_t pid; struct sigaction new_action; new_action.sa_handler = stress_sigqhandler; sigemptyset(&new_action.sa_mask); new_action.sa_flags = 0; if (sigaction(SIGUSR1, &new_action, NULL) < 0) { pr_failed_err(name, "sigaction"); return EXIT_FAILURE; } again: pid = fork(); if (pid < 0) { if (opt_do_run && (errno == EAGAIN)) goto again; pr_failed_dbg(name, "fork"); return EXIT_FAILURE; } else if (pid == 0) { sigset_t mask; sigemptyset(&mask); sigaddset(&mask, SIGUSR1); for (;;) { siginfo_t info; sigwaitinfo(&mask, &info); if (info.si_value.sival_int) break; } pr_dbg(stderr, "%s: child got termination notice\n", name); pr_dbg(stderr, "%s: exited on pid [%d] (instance %" PRIu32 ")\n", name, getpid(), instance); _exit(0); } else { /* Parent */ union sigval s; int status; do { memset(&s, 0, sizeof(s)); s.sival_int = 0; sigqueue(pid, SIGUSR1, s); (*counter)++; } while (opt_do_run && (!max_ops || *counter < max_ops)); pr_dbg(stderr, "%s: parent sent termination notice\n", name); memset(&s, 0, sizeof(s)); s.sival_int = 1; sigqueue(pid, SIGUSR1, s); usleep(250); /* And ensure child is really dead */ (void)kill(pid, SIGKILL); (void)waitpid(pid, &status, 0); } return EXIT_SUCCESS; }
/* * stress_tsearch() * stress tsearch */ int stress_tsearch( uint64_t *const counter, const uint32_t instance, const uint64_t max_ops, const char *name) { int32_t *data; size_t i, n; (void)instance; if (!set_tsearch_size) { if (opt_flags & OPT_FLAGS_MAXIMIZE) opt_tsearch_size = MAX_TSEARCH_SIZE; if (opt_flags & OPT_FLAGS_MINIMIZE) opt_tsearch_size = MIN_TSEARCH_SIZE; } n = (size_t)opt_tsearch_size; if ((data = malloc(sizeof(int32_t) * n)) == NULL) { pr_failed_dbg(name, "malloc"); return EXIT_FAILURE; } do { void *root = NULL; /* Step #1, populate tree */ for (i = 0; i < n; i++) { data[i] = ((mwc32() & 0xfff) << 20) ^ i; if (tsearch(&data[i], &root, cmp) == NULL) { size_t j; pr_err(stderr, "%s: cannot allocate new tree node\n", name); for (j = 0; j < i; j++) tdelete(&data[j], &root, cmp); goto abort; } } /* Step #2, find */ for (i = 0; opt_do_run && i < n; i++) { void **result; result = tfind(&data[i], &root, cmp); if (opt_flags & OPT_FLAGS_VERIFY) { if (result == NULL) pr_fail(stderr, "%s: element %zu could not be found\n", name, i); else { int32_t *val; val = *result; if (*val != data[i]) pr_fail(stderr, "%s: element %zu found %" PRIu32 ", expecting %" PRIu32 "\n", name, i, *val, data[i]); } } } /* Step #3, delete */ for (i = 0; i < n; i++) { void **result; result = tdelete(&data[i], &root, cmp); if ((opt_flags & OPT_FLAGS_VERIFY) && (result == NULL)) pr_fail(stderr, "%s: element %zu could not be found\n", name, i); } (*counter)++; } while (opt_do_run && (!max_ops || *counter < max_ops)); abort: free(data); return EXIT_SUCCESS; }
/* * stress_fifo * stress by heavy fifo I/O */ int stress_fifo( uint64_t *const counter, const uint32_t instance, const uint64_t max_ops, const char *name) { pid_t pids[MAX_FIFO_READERS]; int fd; char fifoname[PATH_MAX]; uint64_t i, val = 0ULL; int ret = EXIT_FAILURE; const pid_t pid = getpid(); if (!set_fifo_readers) { if (opt_flags & OPT_FLAGS_MAXIMIZE) opt_fifo_readers = MAX_FIFO_READERS; if (opt_flags & OPT_FLAGS_MINIMIZE) opt_fifo_readers = MIN_FIFO_READERS; } if (stress_temp_dir_mk(name, pid, instance) < 0) return EXIT_FAILURE; (void)stress_temp_filename(fifoname, sizeof(fifoname), name, pid, instance, mwc32()); (void)umask(0077); if (mkfifo(fifoname, S_IRUSR | S_IWUSR) < 0) { pr_err(stderr, "%s: mkfifo failed: errno=%d (%s)\n", name, errno, strerror(errno)); goto tidy; } memset(pids, 0, sizeof(pids)); for (i = 0; i < opt_fifo_readers; i++) { pids[i] = fifo_spawn(stress_fifo_reader, name, fifoname); if (pids[i] < 0) goto reap; if (!opt_do_run) goto reap; } fd = open(fifoname, O_WRONLY); if (fd < 0) { pr_err(stderr, "%s: fifo write open failed: errno=%d (%s)\n", name, errno, strerror(errno)); goto reap; } do { ssize_t ret; ret = write(fd, &val, sizeof(val)); if (ret <= 0) { if ((errno == EAGAIN) || (errno == EINTR)) continue; if (errno) { pr_failed_dbg(name, "write"); break; } continue; } val++; (*counter)++; } while (opt_do_run && (!max_ops || *counter < max_ops)); (void)close(fd); ret = EXIT_SUCCESS; reap: for (i = 0; i < opt_fifo_readers; i++) { if (pids[i] > 0) { int status; (void)kill(pids[i], SIGKILL); (void)waitpid(pids[i], &status, 0); } } tidy: (void)unlink(fifoname); (void)stress_temp_dir_rm(name, pid, instance); return ret; }
/* * stress_shm_posix() * stress SYSTEM V shared memory */ int stress_shm_posix( uint64_t *const counter, const uint32_t instance, const uint64_t max_ops, const char *name) { const size_t page_size = stress_get_pagesize(); size_t orig_sz, sz; int pipefds[2]; int rc = EXIT_SUCCESS; ssize_t i; pid_t pid; (void)instance; if (!set_shm_posix_bytes) { if (opt_flags & OPT_FLAGS_MAXIMIZE) opt_shm_posix_bytes = MAX_SHM_POSIX_BYTES; if (opt_flags & OPT_FLAGS_MINIMIZE) opt_shm_posix_bytes = MIN_SHM_POSIX_BYTES; } if (!set_shm_posix_objects) { if (opt_flags & OPT_FLAGS_MAXIMIZE) opt_shm_posix_objects = MAX_SHM_POSIX_OBJECTS; if (opt_flags & OPT_FLAGS_MINIMIZE) opt_shm_posix_objects = MIN_SHM_POSIX_OBJECTS; } orig_sz = sz = opt_shm_posix_bytes & ~(page_size - 1); while (opt_do_run) { if (pipe(pipefds) < 0) { pr_failed_dbg(name, "pipe"); return EXIT_FAILURE; } fork_again: pid = fork(); if (pid < 0) { /* Can't fork, retry? */ if (errno == EAGAIN) goto fork_again; pr_err(stderr, "%s: fork failed: errno=%d: (%s)\n", name, errno, strerror(errno)); (void)close(pipefds[0]); (void)close(pipefds[1]); /* Nope, give up! */ return EXIT_FAILURE; } else if (pid > 0) { /* Parent */ int status; char shm_names[MAX_SHM_POSIX_OBJECTS][SHM_NAME_LEN]; ssize_t n; setpgid(pid, pgrp); (void)close(pipefds[1]); memset(shm_names, 0, sizeof(shm_names)); while (opt_do_run) { shm_msg_t msg; char *shm_name; /* * Blocking read on child shm ID info * pipe. We break out if pipe breaks * on child death, or child tells us * off its demise. */ n = read(pipefds[0], &msg, sizeof(msg)); if (n <= 0) { if ((errno == EAGAIN) || (errno == EINTR)) continue; if (errno) { pr_failed_dbg(name, "read"); break; } pr_failed_dbg(name, "zero byte read"); break; } if ((msg.index < 0) || (msg.index >= MAX_SHM_POSIX_OBJECTS)) break; shm_name = shm_names[msg.index]; shm_name[SHM_NAME_LEN - 1] = '\0'; strncpy(shm_name, msg.shm_name, SHM_NAME_LEN); } (void)kill(pid, SIGKILL); (void)waitpid(pid, &status, 0); (void)close(pipefds[1]); /* * The child may have been killed by the OOM killer or * some other way, so it may have left the shared * memory segment around. At this point the child * has died, so we should be able to remove the * shared memory segment. */ for (i = 0; i < (ssize_t)opt_shm_posix_objects; i++) { char *shm_name = shm_names[i]; if (*shm_name) (void)shm_unlink(shm_name); } } else if (pid == 0) { /* Child, stress memory */ (void)close(pipefds[0]); rc = stress_shm_posix_child(pipefds[1], counter, max_ops, name, sz); (void)close(pipefds[1]); _exit(rc); } } if (orig_sz != sz) pr_dbg(stderr, "%s: reduced shared memory size from " "%zu to %zu bytes\n", name, orig_sz, sz); return rc; }