/* * stress_sendfile * stress reading of a temp file and writing to /dev/null via sendfile */ int stress_sendfile( uint64_t *const counter, const uint32_t instance, const uint64_t max_ops, const char *name) { char filename[PATH_MAX]; int fdin, fdout, ret = EXIT_SUCCESS; size_t sz; const pid_t pid = getpid(); if (!set_sendfile_size) { if (opt_flags & OPT_FLAGS_MAXIMIZE) opt_sendfile_size = MAX_SENDFILE_SIZE; if (opt_flags & OPT_FLAGS_MINIMIZE) opt_sendfile_size = MIN_SENDFILE_SIZE; } sz = (size_t)opt_sendfile_size; if (stress_temp_dir_mk(name, pid, instance) < 0) return EXIT_FAILURE; (void)umask(0077); (void)stress_temp_filename(filename, sizeof(filename), name, pid, instance, mwc32()); if ((fdin = open(filename, O_CREAT | O_RDWR, S_IRUSR | S_IWUSR)) < 0) { pr_fail_err(name, "open"); ret = EXIT_FAILURE; goto dir_out; } (void)posix_fallocate(fdin, (off_t)0, (off_t)sz); if ((fdout = open("/dev/null", O_WRONLY)) < 0) { pr_fail_err(name, "open"); ret = EXIT_FAILURE; goto close_in; } do { off_t offset = 0; if (sendfile(fdout, fdin, &offset, sz) < 0) { pr_fail_err(name, "sendfile"); ret = EXIT_FAILURE; goto close_out; } (*counter)++; } while (opt_do_run && (!max_ops || *counter < max_ops)); close_out: (void)close(fdout); close_in: (void)close(fdin); (void)unlink(filename); dir_out: (void)stress_temp_dir_rm(name, pid, instance); return ret; }
/* * stress_timer * stress timers */ static int stress_timer(const args_t *args) { struct sigevent sev; struct itimerspec timer; sigset_t mask; uint64_t timer_freq = DEFAULT_TIMER_FREQ; (void)sigemptyset(&mask); (void)sigaddset(&mask, SIGINT); (void)sigprocmask(SIG_SETMASK, &mask, NULL); max_ops = args->max_ops; start = time_now(); if (!get_setting("timer-freq", &timer_freq)) { if (g_opt_flags & OPT_FLAGS_MAXIMIZE) timer_freq = MAX_TIMER_FREQ; if (g_opt_flags & OPT_FLAGS_MINIMIZE) timer_freq = MIN_TIMER_FREQ; } rate_ns = timer_freq ? 1000000000.0 / timer_freq : 1000000000.0; if (stress_sighandler(args->name, SIGRTMIN, stress_timer_handler, NULL) < 0) return EXIT_FAILURE; sev.sigev_notify = SIGEV_SIGNAL; sev.sigev_signo = SIGRTMIN; sev.sigev_value.sival_ptr = &timerid; if (timer_create(CLOCK_REALTIME, &sev, &timerid) < 0) { pr_fail_err("timer_create"); return EXIT_FAILURE; } stress_timer_set(&timer); if (timer_settime(timerid, 0, &timer, NULL) < 0) { pr_fail_err("timer_settime"); return EXIT_FAILURE; } do { struct timespec req; req.tv_sec = 0; req.tv_nsec = 10000000; (void)nanosleep(&req, NULL); set_counter(args, timer_counter); } while (keep_stressing()); if (timer_delete(timerid) < 0) { pr_fail_err("timer_delete"); return EXIT_FAILURE; } pr_dbg("%s: %" PRIu64 " timer overruns (instance %" PRIu32 ")\n", args->name, overruns, args->instance); return EXIT_SUCCESS; }
/* * stress_splice * stress copying of /dev/zero to /dev/null */ static int stress_splice(const args_t *args) { int fd_in, fd_out, fds[2]; size_t splice_bytes = DEFAULT_SPLICE_BYTES; if (!get_setting("splice-bytes", &splice_bytes)) { if (g_opt_flags & OPT_FLAGS_MAXIMIZE) splice_bytes = MAX_SPLICE_BYTES; if (g_opt_flags & OPT_FLAGS_MINIMIZE) splice_bytes = MIN_SPLICE_BYTES; } splice_bytes /= args->num_instances; if (splice_bytes < MIN_SPLICE_BYTES) splice_bytes = MIN_SPLICE_BYTES; if (pipe(fds) < 0) { pr_fail_err("pipe"); return EXIT_FAILURE; } if ((fd_in = open("/dev/zero", O_RDONLY)) < 0) { (void)close(fds[0]); (void)close(fds[1]); pr_fail_err("open"); return EXIT_FAILURE; } if ((fd_out = open("/dev/null", O_WRONLY)) < 0) { (void)close(fd_in); (void)close(fds[0]); (void)close(fds[1]); pr_fail_err("open"); return EXIT_FAILURE; } do { ssize_t ret; ret = splice(fd_in, NULL, fds[1], NULL, splice_bytes, SPLICE_F_MOVE); if (ret < 0) break; ret = splice(fds[0], NULL, fd_out, NULL, splice_bytes, SPLICE_F_MOVE); if (ret < 0) break; inc_counter(args); } while (keep_stressing()); (void)close(fd_out); (void)close(fd_in); (void)close(fds[0]); (void)close(fds[1]); return EXIT_SUCCESS; }
/* * stress_fiemap_ioctl() * exercise the FIEMAP ioctl */ void stress_fiemap_ioctl( const char *name, int fd, uint64_t *const counter, const uint64_t max_ops) { do { struct fiemap *fiemap, *tmp; size_t extents_size; fiemap = (struct fiemap *)calloc(1, sizeof(struct fiemap)); if (!fiemap) { pr_err(stderr, "Out of memory allocating fiemap\n"); break; } fiemap->fm_length = ~0; /* Find out how many extents there are */ if (ioctl(fd, FS_IOC_FIEMAP, fiemap) < 0) { pr_fail_err(name, "FS_IOC_FIEMAP ioctl()\n"); free(fiemap); break; } /* Read in the extents */ extents_size = sizeof(struct fiemap_extent) * (fiemap->fm_mapped_extents); /* Resize fiemap to allow us to read in the extents */ tmp = (struct fiemap *)realloc(fiemap, sizeof(struct fiemap) + extents_size); if (!tmp) { pr_fail_err(name, "FS_IOC_FIEMAP ioctl()\n"); free(fiemap); break; } fiemap = tmp; memset(fiemap->fm_extents, 0, extents_size); fiemap->fm_extent_count = fiemap->fm_mapped_extents; fiemap->fm_mapped_extents = 0; if (ioctl(fd, FS_IOC_FIEMAP, fiemap) < 0) { pr_fail_err(name, "FS_IOC_FIEMAP ioctl()\n"); free(fiemap); break; } free(fiemap); (*counter)++; } while (opt_do_run && (!max_ops || *counter < max_ops)); }
/* * stress on sync() * stress system by IO sync calls */ int stress_io( uint64_t *const counter, const uint32_t instance, const uint64_t max_ops, const char *name) { #if defined(__linux__) int fd; #endif (void)instance; #if !(defined(__linux__) && NEED_GLIBC(2,14,0)) (void)name; #endif #if defined(__linux__) fd = openat(AT_FDCWD, ".", O_RDONLY | O_NONBLOCK | O_DIRECTORY); #endif do { sync(); #if defined(__linux__) && NEED_GLIBC(2,14,0) if ((fd != -1) && (syncfs(fd) < 0)) pr_fail_err(name, "syncfs"); #endif (*counter)++; } while (opt_do_run && (!max_ops || *counter < max_ops)); #if defined(__linux__) if (fd != -1) (void)close(fd); #endif return EXIT_SUCCESS; }
/* * stress_userfaultfd_child() * generate page faults for parent to handle */ static int stress_userfaultfd_child(void *arg) { context_t *c = (context_t *)arg; const args_t *args = c->args; (void)setpgid(0, g_pgrp); stress_parent_died_alarm(); if (stress_sighandler(args->name, SIGALRM, stress_child_alarm_handler, NULL) < 0) return EXIT_NO_RESOURCE; do { uint8_t *ptr, *end = c->data + c->sz; /* hint we don't need these pages */ if (shim_madvise(c->data, c->sz, MADV_DONTNEED) < 0) { pr_fail_err("userfaultfd madvise failed"); (void)kill(c->parent, SIGALRM); return -1; } /* and trigger some page faults */ for (ptr = c->data; ptr < end; ptr += c->page_size) *ptr = 0xff; } while (keep_stressing()); return 0; }
/* * handle_page_fault() * handle a write page fault caused by child */ static inline int handle_page_fault( const args_t *args, const int fd, uint8_t *addr, void *zero_page, uint8_t *data_start, uint8_t *data_end, const size_t page_size) { if ((addr < data_start) || (addr >= data_end)) { pr_fail_err("userfaultfd page fault address out of range"); return -1; } if (mwc32() & 1) { struct uffdio_copy copy; copy.copy = 0; copy.mode = 0; copy.dst = (unsigned long)addr; copy.src = (unsigned long)zero_page; copy.len = page_size; if (ioctl(fd, UFFDIO_COPY, ©) < 0) { pr_fail_err("userfaultfd page fault copy ioctl failed"); return -1; } } else { struct uffdio_zeropage zeropage; zeropage.range.start = (unsigned long)addr; zeropage.range.len = page_size; zeropage.mode = 0; if (ioctl(fd, UFFDIO_ZEROPAGE, &zeropage) < 0) { pr_fail_err("userfaultfd page fault zeropage ioctl failed"); return -1; } } return 0; }
/* * stress_fiemap_writer() * write data in random places and punch holes * in data in random places to try and maximize * extents in the file */ int stress_fiemap_writer( const char *name, const int fd, uint64_t *counters, const uint64_t max_ops) { uint8_t buf[1]; uint64_t len = (off_t)opt_fiemap_size - sizeof(buf); uint64_t counter; int rc = EXIT_FAILURE; #if defined(FALLOC_FL_PUNCH_HOLE) && \ defined(FALLOC_FL_KEEP_SIZE) bool punch_hole = true; #endif stress_strnrnd((char *)buf, sizeof(buf)); do { uint64_t offset; size_t i; counter = 0; offset = (mwc64() % len) & ~0x1fff; if (lseek(fd, (off_t)offset, SEEK_SET) < 0) break; if (write(fd, buf, sizeof(buf)) < 0) { if ((errno != EAGAIN) && (errno != EINTR)) { pr_fail_err(name, "write"); goto tidy; } } #if defined(FALLOC_FL_PUNCH_HOLE) && \ defined(FALLOC_FL_KEEP_SIZE) if (!punch_hole) continue; offset = mwc64() % len; if (fallocate(fd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, offset, 8192) < 0) { if (errno == EOPNOTSUPP) punch_hole = false; } #endif for (i = 0; i < MAX_FIEMAP_PROCS; i++) counter += counters[i]; } while (opt_do_run && (!max_ops || counter < max_ops)); rc = EXIT_SUCCESS; tidy: (void)close(fd); return rc; }
/* * epoll_notification() * handle accept notification on sfd, add * fd's to epoll event list */ static int epoll_notification( const char *name, const int efd, const int sfd) { for (;;) { struct sockaddr saddr; socklen_t slen = sizeof(saddr); int fd; if ((fd = accept(sfd, &saddr, &slen)) < 0) { if ((errno == EAGAIN) || (errno == EWOULDBLOCK)) { /* all incoming connections handled so finish */ return 0; } if ((errno == EMFILE) || (errno == ENFILE)) { /* out of file descriptors! */ return 0; } pr_fail_err(name, "accept"); return -1; } /* * Add non-blocking fd to epoll event list */ if (epoll_set_fd_nonblock(fd) < 0) { pr_fail_err(name, "setting socket to non-blocking"); (void)close(fd); return -1; } if (epoll_ctl_add(efd, fd) < 0) { pr_fail_err(name, "epoll ctl add"); (void)close(fd); return -1; } } return 0; }
/* * stress_null * stress writing to /dev/null */ int stress_null( uint64_t *const counter, const uint32_t instance, const uint64_t max_ops, const char *name) { int fd; char buffer[4096]; (void)instance; if ((fd = open("/dev/null", O_WRONLY)) < 0) { pr_fail_err(name, "open"); return EXIT_FAILURE; } memset(buffer, 0xff, sizeof(buffer)); do { ssize_t ret; ret = write(fd, buffer, sizeof(buffer)); if (ret <= 0) { if ((errno == EAGAIN) || (errno == EINTR)) continue; if (errno) { pr_fail_err(name, "write"); (void)close(fd); return EXIT_FAILURE; } continue; } (*counter)++; } while (opt_do_run && (!max_ops || *counter < max_ops)); (void)close(fd); return EXIT_SUCCESS; }
/* * stress_temp_dir_rm() * remove a temporary directory */ int stress_temp_dir_rm( const char *name, const pid_t pid, const uint32_t instance) { int ret; char tmp[PATH_MAX + 1]; stress_temp_dir(tmp, sizeof(tmp), name, pid, instance); ret = rmdir(tmp); if (ret < 0) { ret = -errno; pr_fail_err(name, "rmdir"); } return ret; }
/* * stress on sync() * stress system by IO sync calls */ static int stress_io(const args_t *args) { #if defined(HAVE_SYNCFS) int i, fd, n_mnts; char *mnts[MAX_MNTS]; int fds[MAX_MNTS]; n_mnts = mount_get(mnts, MAX_MNTS); for (i = 0; i < n_mnts; i++) fds[i] = openat(AT_FDCWD, mnts[i], O_RDONLY | O_NONBLOCK | O_DIRECTORY); fd = openat(AT_FDCWD, ".", O_RDONLY | O_NONBLOCK | O_DIRECTORY); #endif do { (void)sync(); #if defined(HAVE_SYNCFS) if ((fd != -1) && (syncfs(fd) < 0)) pr_fail_err("syncfs"); /* try to sync on all the mount points */ for (i = 0; i < n_mnts; i++) if (fds[i] != -1) (void)syncfs(fds[i]); #endif inc_counter(args); } while (keep_stressing()); #if defined(HAVE_SYNCFS) if (fd != -1) (void)close(fd); for (i = 0; i < n_mnts; i++) if (fds[i] != -1) (void)close(fds[i]); mount_free(mnts, n_mnts); #endif return EXIT_SUCCESS; }
/* * remap_order() * remap based on given order */ static int remap_order( const args_t *args, const size_t stride, mapdata_t *data, const size_t *order, const size_t page_size) { size_t i; for (i = 0; i < N_PAGES; i++) { int ret; ret = remap_file_pages(data + (i * stride), page_size, 0, order[i], 0); if (ret < 0) { pr_fail_err("remap_file_pages"); return -1; } } return 0; }
/* * stress_getrandom * stress reading random values using getrandom() */ int stress_getrandom( uint64_t *const counter, const uint32_t instance, const uint64_t max_ops, const char *name) { (void)instance; do { char buffer[8192]; ssize_t ret; ret = sys_getrandom(buffer, sizeof(buffer), 0); if (ret < 0) { if (errno == EAGAIN) continue; pr_fail_err(name, "getrandom"); return EXIT_FAILURE; } (*counter)++; } while (opt_do_run && (!max_ops || *counter < max_ops)); return EXIT_SUCCESS; }
/* * epoll_server() * wait on connections and read data */ static void epoll_server( const int child, uint64_t *const counter, const uint32_t instance, const uint64_t max_ops, const char *name, const pid_t ppid) { int efd = -1, sfd = -1, rc = EXIT_SUCCESS; int so_reuseaddr = 1; int port = opt_epoll_port + child + (max_servers * instance); struct sigaction new_action; struct epoll_event *events = NULL; struct sockaddr *addr = NULL; socklen_t addr_len = 0; new_action.sa_handler = handle_socket_sigalrm; sigemptyset(&new_action.sa_mask); new_action.sa_flags = 0; if (sigaction(SIGALRM, &new_action, NULL) < 0) { pr_fail_err(name, "sigaction"); rc = EXIT_FAILURE; goto die; } if ((sfd = socket(opt_epoll_domain, SOCK_STREAM, 0)) < 0) { pr_fail_err(name, "socket"); rc = EXIT_FAILURE; goto die; } if (setsockopt(sfd, SOL_SOCKET, SO_REUSEADDR, &so_reuseaddr, sizeof(so_reuseaddr)) < 0) { pr_fail_err(name, "setsockopt"); rc = EXIT_FAILURE; goto die_close; } stress_set_sockaddr(name, instance, ppid, opt_epoll_domain, port, &addr, &addr_len); if (bind(sfd, addr, addr_len) < 0) { pr_fail_err(name, "bind"); rc = EXIT_FAILURE; goto die_close; } if (epoll_set_fd_nonblock(sfd) < 0) { pr_fail_err(name, "setting socket to non-blocking"); rc = EXIT_FAILURE; goto die_close; } if (listen(sfd, SOMAXCONN) < 0) { pr_fail_err(name, "listen"); rc = EXIT_FAILURE; goto die_close; } if ((efd = epoll_create1(0)) < 0) { pr_fail_err(name, "epoll_create1"); rc = EXIT_FAILURE; goto die_close; } if (epoll_ctl_add(efd, sfd) < 0) { pr_fail_err(name, "epoll ctl add"); rc = EXIT_FAILURE; goto die_close; } if ((events = calloc(MAX_EPOLL_EVENTS, sizeof(struct epoll_event))) == NULL) { pr_fail_err(name, "epoll ctl add"); rc = EXIT_FAILURE; goto die_close; } do { int n, i; memset(events, 0, MAX_EPOLL_EVENTS * sizeof(struct epoll_event)); errno = 0; /* * Wait for 100ms for an event, allowing us to * to break out if opt_do_run has been changed */ n = epoll_wait(efd, events, MAX_EPOLL_EVENTS, 100); if (n < 0) { if (errno != EINTR) { pr_fail_err(name, "epoll_wait"); rc = EXIT_FAILURE; goto die_close; } break; } for (i = 0; i < n; i++) { if ((events[i].events & EPOLLERR) || (events[i].events & EPOLLHUP) || (!(events[i].events & EPOLLIN))) { /* * Error has occurred or fd is not * for reading anymore.. so reap fd */ (void)close(events[i].data.fd); } else if (sfd == events[i].data.fd) { /* * The listening socket has notification(s) * pending, so handle incoming connections */ if (epoll_notification(name, efd, sfd) < 0) break; } else { /* * The fd has data available, so read it */ epoll_recv_data(events[i].data.fd); } } } while (opt_do_run && (!max_ops || *counter < max_ops)); die_close: if (efd != -1) (void)close(efd); if (sfd != -1) (void)close(sfd); die: #ifdef AF_UNIX if (addr && (opt_epoll_domain == AF_UNIX)) { struct sockaddr_un *addr_un = (struct sockaddr_un *)addr; (void)unlink(addr_un->sun_path); } #endif free(events); exit(rc); }
/* * stress_access * stress access family of system calls */ static int stress_access(const args_t *args) { int fd = -1, ret, rc = EXIT_FAILURE; char filename[PATH_MAX]; const mode_t all_mask = 0700; size_t i; const bool is_root = (geteuid() == 0); ret = stress_temp_dir_mk_args(args); if (ret < 0) return exit_status(-ret); (void)stress_temp_filename_args(args, filename, sizeof(filename), mwc32()); (void)umask(0700); if ((fd = creat(filename, S_IRUSR | S_IWUSR)) < 0) { rc = exit_status(errno); pr_fail_err("creat"); goto tidy; } do { for (i = 0; i < SIZEOF_ARRAY(modes); i++) { ret = fchmod(fd, modes[i].chmod_mode); if (CHMOD_ERR(ret)) { pr_err("%s: fchmod %3.3o failed: %d (%s)\n", args->name, (unsigned int)modes[i].chmod_mode, errno, strerror(errno)); goto tidy; } ret = access(filename, modes[i].access_mode); if (ret < 0) { pr_fail("%s: access %3.3o on chmod mode %3.3o failed: %d (%s)\n", args->name, modes[i].access_mode, (unsigned int)modes[i].chmod_mode, errno, strerror(errno)); } #if defined(HAVE_FACCESSAT) ret = faccessat(AT_FDCWD, filename, modes[i].access_mode, 0); if (ret < 0) { pr_fail("%s: faccessat %3.3o on chmod mode %3.3o failed: %d (%s)\n", args->name, modes[i].access_mode, (unsigned int)modes[i].chmod_mode, errno, strerror(errno)); } #endif if (modes[i].access_mode != 0) { const mode_t chmod_mode = modes[i].chmod_mode ^ all_mask; const bool s_ixusr = chmod_mode & S_IXUSR; const bool dont_ignore = !(is_root && s_ixusr); ret = fchmod(fd, chmod_mode); if (CHMOD_ERR(ret)) { pr_err("%s: fchmod %3.3o failed: %d (%s)\n", args->name, (unsigned int)chmod_mode, errno, strerror(errno)); goto tidy; } ret = access(filename, modes[i].access_mode); if ((ret == 0) && dont_ignore) { pr_fail("%s: access %3.3o on chmod mode %3.3o was ok (not expected): %d (%s)\n", args->name, modes[i].access_mode, (unsigned int)chmod_mode, errno, strerror(errno)); } #if defined(HAVE_FACCESSAT) ret = faccessat(AT_FDCWD, filename, modes[i].access_mode, AT_SYMLINK_NOFOLLOW); if ((ret == 0) && dont_ignore) { pr_fail("%s: faccessat %3.3o on chmod mode %3.3o was ok (not expected): %d (%s)\n", args->name, modes[i].access_mode, (unsigned int)chmod_mode, errno, strerror(errno)); } #endif } } inc_counter(args); } while (keep_stressing()); rc = EXIT_SUCCESS; tidy: if (fd >= 0) { (void)fchmod(fd, 0666); (void)close(fd); } (void)unlink(filename); (void)stress_temp_dir_rm_args(args); return rc; }
/* * stress_loop() * stress loopback device */ static int stress_loop(const args_t *args) { int ret, backing_fd, rc = EXIT_FAILURE; char backing_file[PATH_MAX]; size_t backing_size = 2 * MB; ret = stress_temp_dir_mk_args(args); if (ret < 0) return exit_status(-ret); (void)stress_temp_filename_args(args, backing_file, sizeof(backing_file), mwc32()); if ((backing_fd = open(backing_file, O_RDWR | O_CREAT, S_IRUSR | S_IWUSR)) < 0) { pr_fail_err("open"); goto tidy; } if (ftruncate(backing_fd, backing_size) < 0) { pr_fail_err("ftruncate"); (void)close(backing_fd); goto tidy; } (void)unlink(backing_file); do { int ctrl_dev, loop_dev; int i; long dev_num; char dev_name[PATH_MAX]; struct loop_info info; /* * Open loop control device */ ctrl_dev = open("/dev/loop-control", O_RDWR); if (ctrl_dev < 0) { pr_fail("%s: cannot open /dev/loop-control: %d (%s)\n", args->name, errno, strerror(errno)); break; } /* * Attempt to get a free loop device */ dev_num = ioctl(ctrl_dev, LOOP_CTL_GET_FREE); if (dev_num < 0) goto next; /* * Open new loop device */ (void)snprintf(dev_name, sizeof(dev_name), "/dev/loop%ld", dev_num); loop_dev = open(dev_name, O_RDWR); if (loop_dev < 0) goto destroy_loop; /* * Associate loop device with backing storage */ ret = ioctl(loop_dev, LOOP_SET_FD, backing_fd); if (ret < 0) goto close_loop; #if defined(LOOP_GET_STATUS) /* * Fetch loop device status information */ ret = ioctl(loop_dev, LOOP_GET_STATUS, &info); if (ret < 0) goto clr_loop; /* * Try to set some flags */ info.lo_flags |= (LO_FLAGS_AUTOCLEAR | LO_FLAGS_READ_ONLY); #if defined(LOOP_SET_STATUS) ret = ioctl(loop_dev, LOOP_SET_STATUS, &info); (void)ret; #endif #endif #if defined(LOOP_SET_CAPACITY) /* * Resize command (even though we have not changed size) */ ret = ftruncate(backing_fd, backing_size * 2); (void)ret; ret = ioctl(loop_dev, LOOP_SET_CAPACITY); (void)ret; #endif #if defined(LOOP_GET_STATUS) clr_loop: #endif /* * Disassociate backing store from loop device */ for (i = 0; i < 1000; i++) { ret = ioctl(loop_dev, LOOP_CLR_FD, backing_fd); if (ret < 0) { if (errno == EBUSY) { (void)shim_usleep(10); } else { pr_fail("%s: failed to disassociate %s from backing store, " "errno=%d (%s)\n", args->name, dev_name, errno, strerror(errno)); goto close_loop; } } else { break; } } close_loop: (void)close(loop_dev); /* * Remove the loop device, may need several retries * if we get EBUSY */ destroy_loop: for (i = 0; i < 1000; i++) { ret = ioctl(ctrl_dev, LOOP_CTL_REMOVE, dev_num); if ((ret < 0) && (errno == EBUSY)) { (void)shim_usleep(10); } else { break; } } next: (void)close(ctrl_dev); #if defined(LOOP_SET_CAPACITY) ret = ftruncate(backing_fd, backing_size); (void)ret; #endif inc_counter(args); } while (keep_stressing()); rc = EXIT_SUCCESS; (void)close(backing_fd); tidy: (void)stress_temp_dir_rm_args(args); return rc; }
/* * stress_sync_file * stress the sync_file_range system call */ static int stress_sync_file(const args_t *args) { int fd, ret; off_t sync_file_bytes = DEFAULT_SYNC_FILE_BYTES; char filename[PATH_MAX]; if (!get_setting("sync_file-bytes", &sync_file_bytes)) { if (g_opt_flags & OPT_FLAGS_MAXIMIZE) sync_file_bytes = MAX_SYNC_FILE_BYTES; if (g_opt_flags & OPT_FLAGS_MINIMIZE) sync_file_bytes = MIN_SYNC_FILE_BYTES; } sync_file_bytes /= args->num_instances; if (sync_file_bytes < (off_t)MIN_SYNC_FILE_BYTES) sync_file_bytes = (off_t)MIN_SYNC_FILE_BYTES; ret = stress_temp_dir_mk_args(args); if (ret < 0) return exit_status(-ret); (void)stress_temp_filename_args(args, filename, sizeof(filename), mwc32()); if ((fd = open(filename, O_CREAT | O_RDWR, S_IRUSR | S_IWUSR)) < 0) { ret = exit_status(errno); pr_fail_err("open"); (void)stress_temp_dir_rm_args(args); return ret; } (void)unlink(filename); do { shim_off64_t i, offset; const size_t mode_index = mwc32() % SIZEOF_ARRAY(sync_modes); const int mode = sync_modes[mode_index]; if (stress_sync_allocate(args, fd, sync_file_bytes) < 0) break; for (offset = 0; g_keep_stressing_flag && (offset < (shim_off64_t)sync_file_bytes); ) { shim_off64_t sz = (mwc32() & 0x1fc00) + KB; ret = shim_sync_file_range(fd, offset, sz, mode); if (ret < 0) { if (errno == ENOSYS) { pr_inf("%s: skipping stressor, sync_file_range is not implemented\n", args->name); goto err; } pr_fail_err("sync_file_range (forward)"); break; } offset += sz; } if (!g_keep_stressing_flag) break; if (stress_sync_allocate(args, fd, sync_file_bytes) < 0) break; for (offset = 0; g_keep_stressing_flag && (offset < (shim_off64_t)sync_file_bytes); ) { shim_off64_t sz = (mwc32() & 0x1fc00) + KB; ret = shim_sync_file_range(fd, sync_file_bytes - offset, sz, mode); if (ret < 0) { if (errno == ENOSYS) { pr_inf("%s: skipping stressor, sync_file_range is not implemented\n", args->name); goto err; } pr_fail_err("sync_file_range (reverse)"); break; } offset += sz; } if (!g_keep_stressing_flag) break; if (stress_sync_allocate(args, fd, sync_file_bytes) < 0) break; for (i = 0; i < g_keep_stressing_flag && ((shim_off64_t)(sync_file_bytes / (128 * KB))); i++) { offset = (mwc64() % sync_file_bytes) & ~((128 * KB) - 1); ret = shim_sync_file_range(fd, offset, 128 * KB, mode); if (ret < 0) { if (errno == ENOSYS) { pr_inf("%s: skipping stressor, sync_file_range is not implemented\n", args->name); goto err; } pr_fail_err("sync_file_range (random)"); break; } } inc_counter(args); } while (keep_stressing()); err: (void)close(fd); (void)stress_temp_dir_rm_args(args); return EXIT_SUCCESS; }
/* * stress_filename() * stress filename sizes etc */ int stress_filename ( uint64_t *const counter, const uint32_t instance, const uint64_t max_ops, const char *name) { const pid_t pid = getpid(); int rc = EXIT_FAILURE; size_t sz_left, sz_max; char dirname[PATH_MAX]; char filename[PATH_MAX]; char *ptr; struct statvfs buf; size_t i, chars_allowed = 0, sz; stress_temp_dir(dirname, sizeof(dirname), name, pid, instance); if (mkdir(dirname, S_IRWXU) < 0) { if (errno != EEXIST) { pr_fail_err(name, "mkdir"); return EXIT_FAILURE; } } if (statvfs(dirname, &buf) < 0) { pr_fail_err(name, "statvfs"); goto tidy_dir; } if (instance == 0) pr_dbg(stderr, "%s: maximum file size: %lu bytes\n", name, (long unsigned) buf.f_namemax); strncpy(filename, dirname, sizeof(filename) - 1); ptr = filename + strlen(dirname); *(ptr++) = '/'; *(ptr) = '\0'; sz_left = sizeof(filename) - (ptr - filename); sz_max = (size_t)buf.f_namemax; if (sz_left >= PATH_MAX) { pr_fail(stderr, "%s: max file name larger than PATH_MAX\n", name); goto tidy_dir; } switch (filename_opt) { case STRESS_FILENAME_POSIX: strcpy(allowed, posix_allowed); chars_allowed = strlen(allowed); break; case STRESS_FILENAME_EXT: stress_filename_ext(&chars_allowed); break; case STRESS_FILENAME_PROBE: default: stress_filename_probe(name, filename, ptr, &chars_allowed); break; } if (instance == 0) pr_dbg(stdout, "%s: filesystem allows %zu unique characters in a filename\n", name, chars_allowed); if (chars_allowed == 0) { pr_fail(stderr, "%s: cannot determine allowed characters in a filename\n", name); goto tidy_dir; } i = 0; sz = 1; do { char ch = allowed[i]; size_t rnd_sz = 1 + (mwc32() % sz_max); i++; if (i >= chars_allowed) i = 0; /* Should succeed */ stress_filename_generate(ptr, 1, ch); stress_filename_test(name, filename, 1, true); stress_filename_generate_random(ptr, 1, chars_allowed); stress_filename_test(name, filename, 1, true); /* Should succeed */ stress_filename_generate(ptr, sz_max, ch); stress_filename_test(name, filename, sz_max, true); stress_filename_generate_random(ptr, sz_max, chars_allowed); stress_filename_test(name, filename, sz_max, true); /* Should succeed */ stress_filename_generate(ptr, sz_max - 1, ch); stress_filename_test(name, filename, sz_max - 1, true); stress_filename_generate_random(ptr, sz_max - 1, chars_allowed); stress_filename_test(name, filename, sz_max - 1, true); /* Should fail */ stress_filename_generate(ptr, sz_max + 1, ch); stress_filename_test(name, filename, sz_max + 1, false); stress_filename_generate_random(ptr, sz_max + 1, chars_allowed); stress_filename_test(name, filename, sz_max + 1, false); /* Should succeed */ stress_filename_generate(ptr, sz, ch); stress_filename_test(name, filename, sz, true); stress_filename_generate_random(ptr, sz, chars_allowed); stress_filename_test(name, filename, sz, true); /* Should succeed */ stress_filename_generate(ptr, rnd_sz, ch); stress_filename_test(name, filename, rnd_sz, true); stress_filename_generate_random(ptr, rnd_sz, chars_allowed); stress_filename_test(name, filename, rnd_sz, true); sz++; if (sz > sz_max) sz = 1; } while (opt_do_run && (!max_ops || *counter < max_ops)); rc = EXIT_SUCCESS; tidy_dir: (void)rmdir(dirname); return rc; }
/* * stress_kcmp * stress sys_kcmp */ static int stress_kcmp(const args_t *args) { pid_t pid1; int fd1; #if defined(HAVE_SYS_EPOLL_H) && NEED_GLIBC(2,3,2) int efd, sfd; int so_reuseaddr = 1; struct epoll_event ev; struct sockaddr *addr = NULL; socklen_t addr_len = 0; #endif int ret = EXIT_SUCCESS; static const char *capfail = "need CAP_SYS_PTRACE capability to run kcmp stressor, " "aborting stress test\n"; if ((fd1 = open("/dev/null", O_WRONLY)) < 0) { pr_fail_err("open"); return EXIT_FAILURE; } #if defined(HAVE_SYS_EPOLL_H) && NEED_GLIBC(2,3,2) efd = -1; if ((sfd = socket(AF_INET, SOCK_STREAM, 0)) < 0) { sfd = -1; goto again; } if (setsockopt(sfd, SOL_SOCKET, SO_REUSEADDR, &so_reuseaddr, sizeof(so_reuseaddr)) < 0) { (void)close(sfd); sfd = -1; goto again; } stress_set_sockaddr(args->name, args->instance, args->ppid, AF_INET, 23000, &addr, &addr_len, NET_ADDR_ANY); if (bind(sfd, addr, addr_len) < 0) { (void)close(sfd); sfd = -1; goto again; } if (listen(sfd, SOMAXCONN) < 0) { (void)close(sfd); sfd = -1; goto again; } efd = epoll_create1(0); if (efd < 0) { (void)close(sfd); sfd = -1; efd = -1; goto again; } (void)memset(&ev, 0, sizeof(ev)); ev.data.fd = efd; ev.events = EPOLLIN | EPOLLET; if (epoll_ctl(efd, EPOLL_CTL_ADD, sfd, &ev) < 0) { (void)close(sfd); (void)close(efd); sfd = -1; efd = -1; } #endif again: pid1 = fork(); if (pid1 < 0) { if (g_keep_stressing_flag && ((errno == EAGAIN) || (errno == ENOMEM))) goto again; pr_fail_dbg("fork"); (void)close(fd1); #if defined(HAVE_SYS_EPOLL_H) && NEED_GLIBC(2,3,2) if (sfd != -1) (void)close(sfd); #endif return EXIT_FAILURE; } else if (pid1 == 0) { (void)setpgid(0, g_pgrp); stress_parent_died_alarm(); /* Child */ while (g_keep_stressing_flag) (void)pause(); /* will never get here */ (void)close(fd1); #if defined(HAVE_SYS_EPOLL_H) && NEED_GLIBC(2,3,2) if (efd != -1) (void)close(efd); if (sfd != -1) (void)close(sfd); #endif _exit(EXIT_SUCCESS); } else { /* Parent */ int fd2, status, pid2; (void)setpgid(pid1, g_pgrp); pid2 = getpid(); if ((fd2 = open("/dev/null", O_WRONLY)) < 0) { pr_fail_err("open"); ret = EXIT_FAILURE; goto reap; } do { KCMP(pid1, pid2, SHIM_KCMP_FILE, fd1, fd2); KCMP(pid1, pid1, SHIM_KCMP_FILE, fd1, fd1); KCMP(pid2, pid2, SHIM_KCMP_FILE, fd1, fd1); KCMP(pid2, pid2, SHIM_KCMP_FILE, fd2, fd2); KCMP(pid1, pid2, SHIM_KCMP_FILES, 0, 0); KCMP(pid1, pid1, SHIM_KCMP_FILES, 0, 0); KCMP(pid2, pid2, SHIM_KCMP_FILES, 0, 0); KCMP(pid1, pid2, SHIM_KCMP_FS, 0, 0); KCMP(pid1, pid1, SHIM_KCMP_FS, 0, 0); KCMP(pid2, pid2, SHIM_KCMP_FS, 0, 0); KCMP(pid1, pid2, SHIM_KCMP_IO, 0, 0); KCMP(pid1, pid1, SHIM_KCMP_IO, 0, 0); KCMP(pid2, pid2, SHIM_KCMP_IO, 0, 0); KCMP(pid1, pid2, SHIM_KCMP_SIGHAND, 0, 0); KCMP(pid1, pid1, SHIM_KCMP_SIGHAND, 0, 0); KCMP(pid2, pid2, SHIM_KCMP_SIGHAND, 0, 0); KCMP(pid1, pid2, SHIM_KCMP_SYSVSEM, 0, 0); KCMP(pid1, pid1, SHIM_KCMP_SYSVSEM, 0, 0); KCMP(pid2, pid2, SHIM_KCMP_SYSVSEM, 0, 0); KCMP(pid1, pid2, SHIM_KCMP_VM, 0, 0); KCMP(pid1, pid1, SHIM_KCMP_VM, 0, 0); KCMP(pid2, pid2, SHIM_KCMP_VM, 0, 0); #if defined(HAVE_SYS_EPOLL_H) && NEED_GLIBC(2,3,2) if (efd != -1) { struct kcmp_epoll_slot slot; slot.efd = efd; slot.tfd = sfd; slot.toff = 0; KCMP(pid1, pid2, SHIM_KCMP_EPOLL_TFD, efd, (unsigned long)&slot); KCMP(pid2, pid1, SHIM_KCMP_EPOLL_TFD, efd, (unsigned long)&slot); KCMP(pid2, pid2, SHIM_KCMP_EPOLL_TFD, efd, (unsigned long)&slot); } #endif /* Same simple checks */ if (g_opt_flags & OPT_FLAGS_VERIFY) { KCMP_VERIFY(pid1, pid1, SHIM_KCMP_FILE, fd1, fd1, 0); KCMP_VERIFY(pid1, pid1, SHIM_KCMP_FILES, 0, 0, 0); KCMP_VERIFY(pid1, pid1, SHIM_KCMP_FS, 0, 0, 0); KCMP_VERIFY(pid1, pid1, SHIM_KCMP_IO, 0, 0, 0); KCMP_VERIFY(pid1, pid1, SHIM_KCMP_SIGHAND, 0, 0, 0); KCMP_VERIFY(pid1, pid1, SHIM_KCMP_SYSVSEM, 0, 0, 0); KCMP_VERIFY(pid1, pid1, SHIM_KCMP_VM, 0, 0, 0); KCMP_VERIFY(pid1, pid2, SHIM_KCMP_SYSVSEM, 0, 0, 0); #if defined(HAVE_SYS_EPOLL_H) && NEED_GLIBC(2,3,2) if (efd != -1) { struct kcmp_epoll_slot slot; slot.efd = efd; slot.tfd = sfd; slot.toff = 0; KCMP(pid1, pid2, SHIM_KCMP_EPOLL_TFD, efd, (unsigned long)&slot); } #endif } inc_counter(args); } while (keep_stressing()); reap: if (fd2 >= 0) (void)close(fd2); (void)kill(pid1, SIGKILL); (void)shim_waitpid(pid1, &status, 0); (void)close(fd1); } #if defined(HAVE_SYS_EPOLL_H) && NEED_GLIBC(2,3,2) if (efd != -1) (void)close(efd); if (sfd != -1) (void)close(sfd); #endif return ret; }
/* * stress_fiemap * stress fiemap IOCTL */ int stress_fiemap( uint64_t *const counter, const uint32_t instance, const uint64_t max_ops, const char *name) { pid_t pids[MAX_FIEMAP_PROCS], mypid; int ret, fd, rc = EXIT_FAILURE, status; char filename[PATH_MAX]; size_t i; const size_t counters_sz = sizeof(uint64_t) * MAX_FIEMAP_PROCS; uint64_t *counters; uint64_t ops_per_proc = max_ops / MAX_FIEMAP_PROCS; uint64_t ops_remaining = max_ops % MAX_FIEMAP_PROCS; if (!set_fiemap_size) { if (opt_flags & OPT_FLAGS_MAXIMIZE) opt_fiemap_size = MAX_SEEK_SIZE; if (opt_flags & OPT_FLAGS_MINIMIZE) opt_fiemap_size = MIN_SEEK_SIZE; } /* We need some share memory for counter accounting */ counters = mmap(NULL, counters_sz, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS, -1, 0); if (counters == MAP_FAILED) { pr_err(stderr, "%s: mmap failed: errno=%d (%s)\n", name, errno, strerror(errno)); return EXIT_NO_RESOURCE; } memset(counters, 0, counters_sz); mypid = getpid(); ret = stress_temp_dir_mk(name, mypid, instance); if (ret < 0) { rc = exit_status(-ret); goto clean; } (void)stress_temp_filename(filename, sizeof(filename), name, mypid, instance, mwc32()); (void)umask(0077); if ((fd = open(filename, O_CREAT | O_RDWR, S_IRUSR | S_IWUSR)) < 0) { rc = exit_status(errno); pr_fail_err(name, "open"); goto clean; } (void)unlink(filename); for (i = 0; i < MAX_FIEMAP_PROCS; i++) { uint64_t ops = ops_per_proc + ((i == 0) ? ops_remaining : 0); pids[i] = stress_fiemap_spawn(name, fd, &counters[i], ops); if (pids[i] < 0) goto fail; } rc = stress_fiemap_writer(name, fd, counters, max_ops); /* And reap stressors */ for (i = 0; i < MAX_FIEMAP_PROCS; i++) { (void)kill(pids[i], SIGKILL); (void)waitpid(pids[i], &status, 0); (*counter) += counters[i]; } fail: (void)close(fd); clean: (void)munmap(counters, counters_sz); (void)stress_temp_dir_rm(name, mypid, instance); return rc; }
/* * stress_userfaultfd_oomable() * stress userfaultfd system call, this * is an OOM-able child process that the * parent can restart */ static int stress_userfaultfd_oomable( const args_t *args, const size_t userfaultfd_bytes) { const size_t page_size = args->page_size; size_t sz; uint8_t *data; void *zero_page = NULL; int fd = -1, fdinfo = -1, status, rc = EXIT_SUCCESS, count = 0; const unsigned int uffdio_copy = 1 << _UFFDIO_COPY; const unsigned int uffdio_zeropage = 1 << _UFFDIO_ZEROPAGE; pid_t pid; struct uffdio_api api; struct uffdio_register reg; context_t c; bool do_poll = true; char filename[PATH_MAX]; /* Child clone stack */ static uint8_t stack[STACK_SIZE]; const ssize_t stack_offset = stress_get_stack_direction() * (STACK_SIZE - 64); uint8_t *stack_top = stack + stack_offset; sz = userfaultfd_bytes & ~(page_size - 1); if (posix_memalign(&zero_page, page_size, page_size)) { pr_err("%s: zero page allocation failed\n", args->name); return EXIT_NO_RESOURCE; } data = mmap(NULL, sz, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); if (data == MAP_FAILED) { rc = EXIT_NO_RESOURCE; pr_err("%s: mmap failed\n", args->name); goto free_zeropage; } /* Get userfault fd */ if ((fd = shim_userfaultfd(0)) < 0) { if (errno == ENOSYS) { pr_inf("%s: stressor will be skipped, " "userfaultfd not supported\n", args->name); rc = EXIT_NOT_IMPLEMENTED; goto unmap_data; } rc = exit_status(errno); pr_err("%s: userfaultfd failed, errno = %d (%s)\n", args->name, errno, strerror(errno)); goto unmap_data; } (void)snprintf(filename, sizeof(filename), "/proc/%d/fdinfo/%d", getpid(), fd); fdinfo = open(filename, O_RDONLY); if (stress_set_nonblock(fd) < 0) do_poll = false; /* API sanity check */ (void)memset(&api, 0, sizeof(api)); api.api = UFFD_API; api.features = 0; if (ioctl(fd, UFFDIO_API, &api) < 0) { pr_err("%s: ioctl UFFDIO_API failed, errno = %d (%s)\n", args->name, errno, strerror(errno)); rc = EXIT_FAILURE; goto unmap_data; } if (api.api != UFFD_API) { pr_err("%s: ioctl UFFDIO_API API check failed\n", args->name); rc = EXIT_FAILURE; goto unmap_data; } /* Register fault handling mode */ (void)memset(®, 0, sizeof(reg)); reg.range.start = (unsigned long)data; reg.range.len = sz; reg.mode = UFFDIO_REGISTER_MODE_MISSING; if (ioctl(fd, UFFDIO_REGISTER, ®) < 0) { pr_err("%s: ioctl UFFDIO_REGISTER failed, errno = %d (%s)\n", args->name, errno, strerror(errno)); rc = EXIT_FAILURE; goto unmap_data; } /* OK, so do we have copy supported? */ if ((reg.ioctls & uffdio_copy) != uffdio_copy) { pr_err("%s: ioctl UFFDIO_REGISTER did not support _UFFDIO_COPY\n", args->name); rc = EXIT_FAILURE; goto unmap_data; } /* OK, so do we have zeropage supported? */ if ((reg.ioctls & uffdio_zeropage) != uffdio_zeropage) { pr_err("%s: ioctl UFFDIO_REGISTER did not support _UFFDIO_ZEROPAGE\n", args->name); rc = EXIT_FAILURE; goto unmap_data; } /* Set up context for child */ c.args = args; c.data = data; c.sz = sz; c.page_size = page_size; c.parent = getpid(); /* * We need to clone the child and share the same VM address space * as parent so we can perform the page fault handling */ pid = clone(stress_userfaultfd_child, align_stack(stack_top), SIGCHLD | CLONE_FILES | CLONE_FS | CLONE_SIGHAND | CLONE_VM, &c); if (pid < 0) { pr_err("%s: fork failed, errno = %d (%s)\n", args->name, errno, strerror(errno)); goto unreg; } /* Parent */ do { struct uffd_msg msg; ssize_t ret; /* check we should break out before we block on the read */ if (!g_keep_stressing_flag) break; /* * polled wait exercises userfaultfd_poll * in the kernel, but only works if fd is NONBLOCKing */ if (do_poll) { struct pollfd fds[1]; (void)memset(fds, 0, sizeof fds); fds[0].fd = fd; fds[0].events = POLLIN; /* wait for 1 second max */ ret = poll(fds, 1, 1000); if (ret == 0) continue; /* timed out, redo the poll */ if (ret < 0) { if (errno == EINTR) continue; if (errno != ENOMEM) { pr_fail_err("poll userfaultfd"); if (!g_keep_stressing_flag) break; } /* * poll ran out of free space for internal * fd tables, so give up and block on the * read anyway */ goto do_read; } /* No data, re-poll */ if (!(fds[0].revents & POLLIN)) continue; if (LIKELY(fdinfo > -1) && UNLIKELY(count++ >= COUNT_MAX)) { ret = lseek(fdinfo, 0, SEEK_SET); if (ret == 0) { char buffer[4096]; ret = read(fdinfo, buffer, sizeof(buffer)); (void)ret; } count = 0; } } do_read: if ((ret = read(fd, &msg, sizeof(msg))) < 0) { if (errno == EINTR) continue; pr_fail_err("read userfaultfd"); if (!g_keep_stressing_flag) break; continue; } /* We only expect a page fault event */ if (msg.event != UFFD_EVENT_PAGEFAULT) { pr_fail_err("userfaultfd msg not pagefault event"); continue; } /* We only expect a write fault */ if (!(msg.arg.pagefault.flags & UFFD_PAGEFAULT_FLAG_WRITE)) { pr_fail_err("userfaultfd msg not write page fault event"); continue; } /* Go handle the page fault */ if (handle_page_fault(args, fd, (uint8_t *)(ptrdiff_t)msg.arg.pagefault.address, zero_page, data, data + sz, page_size) < 0) break; inc_counter(args); } while (keep_stressing()); /* Run it over, zap child */ (void)kill(pid, SIGKILL); if (shim_waitpid(pid, &status, 0) < 0) { pr_dbg("%s: waitpid failed, errno = %d (%s)\n", args->name, errno, strerror(errno)); } unreg: if (ioctl(fd, UFFDIO_UNREGISTER, ®) < 0) { pr_err("%s: ioctl UFFDIO_UNREGISTER failed, errno = %d (%s)\n", args->name, errno, strerror(errno)); rc = EXIT_FAILURE; goto unmap_data; } unmap_data: (void)munmap(data, sz); free_zeropage: free(zero_page); if (fdinfo > -1) (void)close(fdinfo); if (fd > -1) (void)close(fd); return rc; }
/* * stress_timer * stress timers */ int stress_timer( uint64_t *const counter, const uint32_t instance, const uint64_t max_ops, const char *name) { struct sigaction new_action; struct sigevent sev; struct itimerspec timer; sigset_t mask; sigemptyset(&mask); sigaddset(&mask, SIGINT); sigprocmask(SIG_SETMASK, &mask, NULL); start = time_now(); if (!set_timer_freq) { if (opt_flags & OPT_FLAGS_MAXIMIZE) opt_timer_freq = MAX_TIMER_FREQ; if (opt_flags & OPT_FLAGS_MINIMIZE) opt_timer_freq = MIN_TIMER_FREQ; } rate_ns = opt_timer_freq ? 1000000000 / opt_timer_freq : 1000000000; new_action.sa_flags = 0; new_action.sa_handler = stress_timer_handler; sigemptyset(&new_action.sa_mask); if (sigaction(SIGRTMIN, &new_action, NULL) < 0) { pr_fail_err(name, "sigaction"); return EXIT_FAILURE; } sev.sigev_notify = SIGEV_SIGNAL; sev.sigev_signo = SIGRTMIN; sev.sigev_value.sival_ptr = &timerid; if (timer_create(CLOCK_REALTIME, &sev, &timerid) < 0) { pr_fail_err(name, "timer_create"); return EXIT_FAILURE; } stress_timer_set(&timer); if (timer_settime(timerid, 0, &timer, NULL) < 0) { pr_fail_err(name, "timer_settime"); return EXIT_FAILURE; } do { struct timespec req; req.tv_sec = 0; req.tv_nsec = 10000000; (void)nanosleep(&req, NULL); *counter = timer_counter; } while (opt_do_run && (!max_ops || timer_counter < max_ops)); if (timer_delete(timerid) < 0) { pr_fail_err(name, "timer_delete"); return EXIT_FAILURE; } pr_dbg(stderr, "%s: %" PRIu64 " timer overruns (instance %" PRIu32 ")\n", name, overruns, instance); return EXIT_SUCCESS; }
/* * stress_mlock() * stress mlock with pages being locked/unlocked */ int stress_mlock( uint64_t *const counter, const uint32_t instance, const uint64_t max_ops, const char *name) { const size_t page_size = stress_get_pagesize(); pid_t pid; size_t max = sysconf(_SC_MAPPED_FILES); max = max > MLOCK_MAX ? MLOCK_MAX : max; again: pid = fork(); if (pid < 0) { if (opt_do_run && (errno == EAGAIN)) goto again; pr_err(stderr, "%s: fork failed: errno=%d: (%s)\n", name, errno, strerror(errno)); } else if (pid > 0) { int status, ret; setpgid(pid, pgrp); stress_parent_died_alarm(); /* Parent, wait for child */ ret = waitpid(pid, &status, 0); if (ret < 0) { if (errno != EINTR) pr_dbg(stderr, "%s: waitpid(): errno=%d (%s)\n", name, errno, strerror(errno)); (void)kill(pid, SIGTERM); (void)kill(pid, SIGKILL); (void)waitpid(pid, &status, 0); } else if (WIFSIGNALED(status)) { pr_dbg(stderr, "%s: child died: %s (instance %d)\n", name, stress_strsignal(WTERMSIG(status)), instance); /* If we got killed by OOM killer, re-start */ if (WTERMSIG(status) == SIGKILL) { pr_dbg(stderr, "%s: assuming killed by OOM " "killer, restarting again " "(instance %d)\n", name, instance); goto again; } } } else if (pid == 0) { uint8_t *mappings[max]; size_t i, n; setpgid(0, pgrp); /* Make sure this is killable by OOM killer */ set_oom_adjustment(name, true); do { for (n = 0; opt_do_run && (n < max); n++) { int ret; if (!opt_do_run || (max_ops && *counter >= max_ops)) break; mappings[n] = mmap(NULL, page_size * 3, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS, -1, 0); if (mappings[n] == MAP_FAILED) break; ret = mlock_shim(mappings[n] + page_size, page_size); if (ret < 0) { if (errno == EAGAIN) continue; if (errno == ENOMEM) break; pr_fail_err(name, "mlock"); break; } else { /* * Mappings are always page aligned so * we can use the bottom bit to * indicate if the page has been * mlocked or not */ mappings[n] = (uint8_t *) ((ptrdiff_t)mappings[n] | 1); (*counter)++; } } for (i = 0; i < n; i++) { ptrdiff_t addr = (ptrdiff_t)mappings[i]; ptrdiff_t mlocked = addr & 1; addr ^= mlocked; if (mlocked) (void)munlock((uint8_t *)addr + page_size, page_size); munmap((void *)addr, page_size * 3); } #if !defined(__gnu_hurd__) (void)mlockall(MCL_CURRENT); (void)mlockall(MCL_FUTURE); #if defined(MCL_ONFAULT) (void)mlockall(MCL_ONFAULT); #endif #endif for (n = 0; opt_do_run && (n < max); n++) { if (!opt_do_run || (max_ops && *counter >= max_ops)) break; mappings[n] = mmap(NULL, page_size, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS, -1, 0); if (mappings[n] == MAP_FAILED) break; } #if !defined(__gnu_hurd__) (void)munlockall(); #endif for (i = 0; i < n; i++) munmap(mappings[i], page_size); } while (opt_do_run && (!max_ops || *counter < max_ops)); } return EXIT_SUCCESS; }
/* * stress_fault() * stress min and max page faulting */ static int stress_fault(const args_t *args) { #if !defined(__HAIKU__) struct rusage usage; #endif char filename[PATH_MAX]; int ret; NOCLOBBER int i; ret = stress_temp_dir_mk_args(args); if (ret < 0) return exit_status(-ret); (void)stress_temp_filename_args(args, filename, sizeof(filename), mwc32()); i = 0; if (stress_sighandler(args->name, SIGSEGV, stress_segvhandler, NULL) < 0) return EXIT_FAILURE; if (stress_sighandler(args->name, SIGBUS, stress_segvhandler, NULL) < 0) return EXIT_FAILURE; do { char *ptr; int fd; ret = sigsetjmp(jmp_env, 1); if (ret) { do_jmp = false; pr_err("%s: unexpected segmentation fault\n", args->name); break; } fd = open(filename, O_RDWR | O_CREAT, S_IRUSR | S_IWUSR); if (fd < 0) { if ((errno == ENOSPC) || (errno == ENOMEM)) continue; /* Try again */ pr_fail_err("open"); break; } #if defined(HAVE_POSIX_FALLOCATE) if (posix_fallocate(fd, 0, 1) < 0) { if (errno == ENOSPC) { (void)close(fd); continue; /* Try again */ } (void)close(fd); pr_fail_err("posix_fallocate"); break; } #else { char buffer[1]; redo: if (g_keep_stressing_flag && (write(fd, buffer, sizeof(buffer)) < 0)) { if ((errno == EAGAIN) || (errno == EINTR)) goto redo; if (errno == ENOSPC) { (void)close(fd); continue; } (void)close(fd); pr_fail_err("write"); break; } } #endif ret = sigsetjmp(jmp_env, 1); if (ret) { if (!keep_stressing()) do_jmp = false; if (fd != -1) (void)close(fd); goto next; } /* * Removing file here causes major fault when we touch * ptr later */ if (i & 1) (void)unlink(filename); ptr = mmap(NULL, 1, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); (void)close(fd); fd = -1; (void)fd; if (ptr == MAP_FAILED) { if ((errno == EAGAIN) || (errno == ENOMEM) || (errno == ENFILE)) goto next; pr_err("%s: mmap failed: errno=%d (%s)\n", args->name, errno, strerror(errno)); break; } *ptr = 0; /* Cause the page fault */ if (munmap(ptr, 1) < 0) { pr_err("%s: munmap failed: errno=%d (%s)\n", args->name, errno, strerror(errno)); break; } next: /* Remove file on-non major fault case */ if (!(i & 1)) (void)unlink(filename); i++; inc_counter(args); } while (keep_stressing()); /* Clean up, most times this is redundant */ (void)unlink(filename); (void)stress_temp_dir_rm_args(args); #if !defined(__HAIKU__) if (!getrusage(RUSAGE_SELF, &usage)) { pr_dbg("%s: page faults: minor: %lu, major: %lu\n", args->name, usage.ru_minflt, usage.ru_majflt); } #endif return EXIT_SUCCESS; }
/* * stress_copy_file * stress reading chunks of file using copy_file_range() */ int stress_copy_file( uint64_t *const counter, const uint32_t instance, const uint64_t max_ops, const char *name) { int fd_in, fd_out, rc = EXIT_FAILURE; char filename[PATH_MAX], tmp[PATH_MAX]; pid_t pid = getpid(); if (!set_copy_file_bytes) { if (opt_flags & OPT_FLAGS_MAXIMIZE) opt_copy_file_bytes = MAX_HDD_BYTES; if (opt_flags & OPT_FLAGS_MINIMIZE) opt_copy_file_bytes = MIN_HDD_BYTES; } if (opt_copy_file_bytes < DEFAULT_COPY_FILE_SIZE) opt_copy_file_bytes = DEFAULT_COPY_FILE_SIZE * 2; if (stress_temp_dir_mk(name, pid, instance) < 0) goto tidy_dir; (void)stress_temp_filename(filename, sizeof(filename), name, pid, instance, mwc32()); snprintf(tmp, sizeof(tmp), "%s-orig", filename); if ((fd_in = open(tmp, O_CREAT | O_RDWR, S_IRUSR | S_IWUSR)) < 0) { rc = exit_status(errno); pr_fail_err(name, "open"); goto tidy_dir; } (void)unlink(tmp); if (ftruncate(fd_in, opt_copy_file_bytes) < 0) { rc = exit_status(errno); pr_fail_err(name, "ftruncate"); goto tidy_in; } if (fsync(fd_in) < 0) { pr_fail_err(name, "fsync"); goto tidy_in; } snprintf(tmp, sizeof(tmp), "%s-copy", filename); if ((fd_out = open(tmp, O_CREAT | O_WRONLY, S_IRUSR | S_IWUSR)) < 0) { rc = exit_status(errno); pr_fail_err(name, "open"); goto tidy_in; } (void)unlink(tmp); do { ssize_t ret; loff_t off_in, off_out; off_in = mwc64() % (opt_copy_file_bytes - DEFAULT_COPY_FILE_SIZE); off_out = mwc64() % (opt_copy_file_bytes - DEFAULT_COPY_FILE_SIZE); ret = sys_copy_file_range(fd_in, &off_in, fd_out, &off_out, DEFAULT_COPY_FILE_SIZE, 0); if (ret < 0) { if ((errno == EAGAIN) || (errno == EINTR)) continue; pr_fail_err(name, "copy_file_range"); goto tidy_out; } (void)fsync(fd_out); (*counter)++; } while (opt_do_run && (!max_ops || *counter < max_ops)); rc = EXIT_SUCCESS; tidy_out: (void)close(fd_out); tidy_in: (void)close(fd_in); tidy_dir: (void)stress_temp_dir_rm(name, pid, instance); return rc; }
/* * stress_msync() * stress msync */ int stress_msync( uint64_t *const counter, const uint32_t instance, const uint64_t max_ops, const char *name) { uint8_t *buf = NULL; const size_t page_size = stress_get_pagesize(); const size_t min_size = 2 * page_size; size_t sz = min_size; ssize_t ret, rc = EXIT_SUCCESS; const pid_t pid = getpid(); int fd = -1; char filename[PATH_MAX]; ret = sigsetjmp(jmp_env, 1); if (ret) { pr_fail_err(name, "sigsetjmp"); return EXIT_FAILURE; } if (stress_sighandler(name, SIGBUS, stress_sigbus_handler, NULL) < 0) return EXIT_FAILURE; if (!set_msync_bytes) { if (opt_flags & OPT_FLAGS_MAXIMIZE) opt_msync_bytes = MAX_MSYNC_BYTES; if (opt_flags & OPT_FLAGS_MINIMIZE) opt_msync_bytes = MIN_MSYNC_BYTES; } sz = opt_msync_bytes & ~(page_size - 1); if (sz < min_size) sz = min_size; /* Make sure this is killable by OOM killer */ set_oom_adjustment(name, true); rc = stress_temp_dir_mk(name, pid, instance); if (rc < 0) return exit_status(-rc); (void)stress_temp_filename(filename, sizeof(filename), name, pid, instance, mwc32()); (void)umask(0077); if ((fd = open(filename, O_CREAT | O_RDWR, S_IRUSR | S_IWUSR)) < 0) { rc = exit_status(errno); pr_fail_err(name, "open"); (void)unlink(filename); (void)stress_temp_dir_rm(name, pid, instance); return rc; } (void)unlink(filename); if (ftruncate(fd, sz) < 0) { pr_err(stderr, "%s: ftruncate failed, errno=%d (%s)\n", name, errno, strerror(errno)); (void)close(fd); (void)stress_temp_dir_rm(name, pid, instance); return EXIT_FAILURE; } buf = (uint8_t *)mmap(NULL, sz, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); if (buf == MAP_FAILED) { pr_err(stderr, "%s: failed to mmap memory, errno=%d (%s)\n", name, errno, strerror(errno)); rc = EXIT_NO_RESOURCE; goto err; } do { off_t offset; uint8_t val, data[page_size]; ret = sigsetjmp(jmp_env, 1); if (ret) { /* Try again */ continue; } /* * Change data in memory, msync to disk */ offset = (mwc64() % (sz - page_size)) & ~(page_size - 1); val = mwc8(); memset(buf + offset, val, page_size); ret = msync(buf + offset, page_size, MS_SYNC); if (ret < 0) { pr_fail(stderr, "%s: msync MS_SYNC on " "offset %jd failed, errno=%d (%s)", name, (intmax_t)offset, errno, strerror(errno)); goto do_invalidate; } ret = lseek(fd, offset, SEEK_SET); if (ret == (off_t)-1) { pr_err(stderr, "%s: cannot seet to offset %jd, " "errno=%d (%s)\n", name, (intmax_t)offset, errno, strerror(errno)); rc = EXIT_NO_RESOURCE; break; } ret = read(fd, data, sizeof(data)); if (ret < (ssize_t)sizeof(data)) { pr_fail(stderr, "%s: read failed, errno=%d (%s)\n", name, errno, strerror(errno)); goto do_invalidate; } if (stress_page_check(data, val, sizeof(data)) < 0) { pr_fail(stderr, "%s: msync'd data in file different " "to data in memory\n", name); } do_invalidate: /* * Now change data on disc, msync invalidate */ offset = (mwc64() % (sz - page_size)) & ~(page_size - 1); val = mwc8(); memset(buf + offset, val, page_size); ret = lseek(fd, offset, SEEK_SET); if (ret == (off_t)-1) { pr_err(stderr, "%s: cannot seet to offset %jd, errno=%d (%s)\n", name, (intmax_t)offset, errno, strerror(errno)); rc = EXIT_NO_RESOURCE; break; } ret = read(fd, data, sizeof(data)); if (ret < (ssize_t)sizeof(data)) { pr_fail(stderr, "%s: read failed, errno=%d (%s)\n", name, errno, strerror(errno)); goto do_next; } ret = msync(buf + offset, page_size, MS_INVALIDATE); if (ret < 0) { pr_fail(stderr, "%s: msync MS_INVALIDATE on " "offset %jd failed, errno=%d (%s)", name, (intmax_t)offset, errno, strerror(errno)); goto do_next; } if (stress_page_check(buf + offset, val, sizeof(data)) < 0) { pr_fail(stderr, "%s: msync'd data in memory " "different to data in file\n", name); } do_next: (*counter)++; } while (opt_do_run && (!max_ops || *counter < max_ops)); (void)munmap((void *)buf, sz); err: (void)close(fd); (void)stress_temp_dir_rm(name, pid, instance); if (sigbus_count) pr_inf(stdout, "%s: caught %" PRIu64 " SIGBUS signals\n", name, sigbus_count); return rc; }
/* * stress_zero * stress reading of /dev/zero */ static int stress_zero(const args_t *args) { int fd; const size_t page_size = args->page_size; #if defined(__minix__) const int flags = O_RDONLY; #else const int flags = O_RDWR; #endif char wr_buffer[page_size]; if ((fd = open("/dev/zero", flags)) < 0) { pr_fail_err("open /dev/zero"); return EXIT_FAILURE; } (void)memset(wr_buffer, 0, sizeof wr_buffer); do { char rd_buffer[page_size]; ssize_t ret; #if defined(__linux__) int32_t *ptr; #endif ret = read(fd, rd_buffer, sizeof(rd_buffer)); if (ret < 0) { if ((errno == EAGAIN) || (errno == EINTR)) continue; pr_fail_err("read"); (void)close(fd); return EXIT_FAILURE; } #if !defined(__minix__) /* One can also write to /dev/zero w/o failure */ ret = write(fd, wr_buffer, sizeof(wr_buffer)); if (ret < 0) { if ((errno == EAGAIN) || (errno == EINTR)) continue; pr_fail_err("write"); (void)close(fd); return EXIT_FAILURE; } #endif #if defined(__linux__) /* * check if we can mmap /dev/zero */ ptr = mmap(NULL, page_size, PROT_READ, MAP_PRIVATE | MAP_ANONYMOUS, fd, page_size * mwc16()); if (ptr == MAP_FAILED) { if (errno == ENOMEM) continue; pr_fail_err("mmap /dev/zero"); (void)close(fd); return EXIT_FAILURE; } /* Quick sanity check if first 32 bits are zero */ if (*ptr != 0) { pr_fail_err("mmap'd /dev/zero not null"); (void)munmap(ptr, page_size); (void)close(fd); return EXIT_FAILURE; } (void)munmap(ptr, page_size); #endif inc_counter(args); } while (keep_stressing()); (void)close(fd); return EXIT_SUCCESS; }
/* * stress_tlb_shootdown() * stress out TLB shootdowns */ static int stress_tlb_shootdown(const args_t *args) { const size_t page_size = args->page_size; const size_t mmap_size = page_size * MMAP_PAGES; pid_t pids[MAX_TLB_PROCS]; cpu_set_t proc_mask_initial; if (sched_getaffinity(0, sizeof(proc_mask_initial), &proc_mask_initial) < 0) { pr_fail_err("could not get CPU affinity"); return EXIT_FAILURE; } do { uint8_t *mem, *ptr; int retry = 128; cpu_set_t proc_mask; int32_t tlb_procs, i; const int32_t max_cpus = stress_get_processors_configured(); CPU_ZERO(&proc_mask); CPU_OR(&proc_mask, &proc_mask_initial, &proc_mask); tlb_procs = max_cpus; if (tlb_procs > MAX_TLB_PROCS) tlb_procs = MAX_TLB_PROCS; if (tlb_procs < MIN_TLB_PROCS) tlb_procs = MIN_TLB_PROCS; for (;;) { mem = mmap(NULL, mmap_size, PROT_WRITE | PROT_READ, MAP_SHARED | MAP_ANONYMOUS, -1, 0); if ((void *)mem == MAP_FAILED) { if ((errno == EAGAIN) || (errno == ENOMEM) || (errno == ENFILE)) { if (--retry < 0) return EXIT_NO_RESOURCE; } else { pr_fail_err("mmap"); } } else { break; } } (void)memset(mem, 0, mmap_size); for (i = 0; i < tlb_procs; i++) pids[i] = -1; for (i = 0; i < tlb_procs; i++) { int32_t j, cpu = -1; for (j = 0; j < max_cpus; j++) { if (CPU_ISSET(j, &proc_mask)) { cpu = j; CPU_CLR(j, &proc_mask); break; } } if (cpu == -1) break; pids[i] = fork(); if (pids[i] < 0) break; if (pids[i] == 0) { cpu_set_t mask; char buffer[page_size]; (void)setpgid(0, g_pgrp); stress_parent_died_alarm(); /* Make sure this is killable by OOM killer */ set_oom_adjustment(args->name, true); CPU_ZERO(&mask); CPU_SET(cpu % max_cpus, &mask); (void)sched_setaffinity(args->pid, sizeof(mask), &mask); for (ptr = mem; ptr < mem + mmap_size; ptr += page_size) { /* Force tlb shoot down on page */ (void)mprotect(ptr, page_size, PROT_READ); (void)memcpy(buffer, ptr, page_size); (void)munmap(ptr, page_size); } _exit(0); } } for (i = 0; i < tlb_procs; i++) { if (pids[i] != -1) { int status, ret; ret = shim_waitpid(pids[i], &status, 0); if ((ret < 0) && (errno == EINTR)) { int j; /* * We got interrupted, so assume * it was the alarm (timedout) or * SIGINT so force terminate */ for (j = i; j < tlb_procs; j++) { if (pids[j] != -1) (void)kill(pids[j], SIGKILL); } /* re-wait on the failed wait */ (void)shim_waitpid(pids[i], &status, 0); /* and continue waitpid on the pids */ } } } (void)munmap(mem, mmap_size); (void)sched_setaffinity(0, sizeof(proc_mask_initial), &proc_mask_initial); inc_counter(args); } while (keep_stressing()); return EXIT_SUCCESS; }
/* * stress_fallocate * stress I/O via fallocate and ftruncate */ static int stress_fallocate(const args_t *args) { int fd, ret; char filename[PATH_MAX]; uint64_t ftrunc_errs = 0; off_t fallocate_bytes = DEFAULT_FALLOCATE_BYTES; if (!get_setting("fallocate-bytes", &fallocate_bytes)) { if (g_opt_flags & OPT_FLAGS_MAXIMIZE) fallocate_bytes = MAX_FALLOCATE_BYTES; if (g_opt_flags & OPT_FLAGS_MINIMIZE) fallocate_bytes = MIN_FALLOCATE_BYTES; } fallocate_bytes /= args->num_instances; if (fallocate_bytes < (off_t)MIN_FALLOCATE_BYTES) fallocate_bytes = (off_t)MIN_FALLOCATE_BYTES; ret = stress_temp_dir_mk_args(args); if (ret < 0) return exit_status(-ret); (void)stress_temp_filename_args(args, filename, sizeof(filename), mwc32()); if ((fd = open(filename, O_CREAT | O_RDWR, S_IRUSR | S_IWUSR)) < 0) { ret = exit_status(errno); pr_fail_err("open"); (void)stress_temp_dir_rm_args(args); return ret; } (void)unlink(filename); do { #if defined(HAVE_POSIX_FALLOCATE) ret = posix_fallocate(fd, (off_t)0, fallocate_bytes); #else ret = shim_fallocate(fd, 0, (off_t)0, fallocate_bytes); #endif if (!g_keep_stressing_flag) break; (void)shim_fsync(fd); if ((ret == 0) && (g_opt_flags & OPT_FLAGS_VERIFY)) { struct stat buf; if (fstat(fd, &buf) < 0) pr_fail("%s: fstat on file failed", args->name); else if (buf.st_size != fallocate_bytes) pr_fail("%s: file size %jd does not " "match size the expected file " "size of %jd\n", args->name, (intmax_t)buf.st_size, (intmax_t)fallocate_bytes); } if (ftruncate(fd, 0) < 0) ftrunc_errs++; if (!g_keep_stressing_flag) break; (void)shim_fsync(fd); if (g_opt_flags & OPT_FLAGS_VERIFY) { struct stat buf; if (fstat(fd, &buf) < 0) pr_fail("%s: fstat on file failed", args->name); else if (buf.st_size != (off_t)0) pr_fail("%s: file size %jd does not " "match size the expected file size " "of 0\n", args->name, (intmax_t)buf.st_size); } if (ftruncate(fd, fallocate_bytes) < 0) ftrunc_errs++; (void)shim_fsync(fd); if (ftruncate(fd, 0) < 0) ftrunc_errs++; (void)shim_fsync(fd); if (SIZEOF_ARRAY(modes) > 1) { /* * non-portable Linux fallocate() */ int i; (void)shim_fallocate(fd, 0, (off_t)0, fallocate_bytes); if (!g_keep_stressing_flag) break; (void)shim_fsync(fd); for (i = 0; i < 64; i++) { off_t offset = (mwc64() % fallocate_bytes) & ~0xfff; int j = (mwc32() >> 8) % SIZEOF_ARRAY(modes); (void)shim_fallocate(fd, modes[j], offset, 64 * KB); if (!g_keep_stressing_flag) break; (void)shim_fsync(fd); } if (ftruncate(fd, 0) < 0) ftrunc_errs++; (void)shim_fsync(fd); } inc_counter(args); } while (keep_stressing());