static void HOT OPTIMIZE3 stress_memthrash_mfence(const args_t *args, size_t mem_size) { uint32_t i; const uint32_t max = mwc16(); (void)args; for (i = 0; !thread_terminate && (i < max); i++) { size_t offset = mwc32() % mem_size; volatile uint8_t *ptr = mem + offset; *ptr = i & 0xff; mfence(); } }
static void HOT OPTIMIZE3 stress_memthrash_flush(const args_t *args, size_t mem_size) { uint32_t i; const uint32_t max = mwc16(); (void)args; for (i = 0; !thread_terminate && (i < max); i++) { size_t offset = mwc32() % mem_size; uint8_t *const ptr = mem + offset; volatile uint8_t *const vptr = ptr; *vptr = i & 0xff; clflush(ptr); } }
static inline HOT OPTIMIZE3 void stress_memthrash_random_chunk(const size_t chunk_size, size_t mem_size) { uint32_t i; const uint32_t max = mwc16(); size_t chunks = mem_size / chunk_size; if (chunks < 1) chunks = 1; for (i = 0; !thread_terminate && (i < max); i++) { const size_t chunk = mwc32() % chunks; const size_t offset = chunk * chunk_size; #if defined(__GNUC__) (void)__builtin_memset((void *)mem + offset, mwc8(), chunk_size); #else (void)memset((void *)mem + offset, mwc8(), chunk_size); #endif } }
/* * stress_zero * stress reading of /dev/zero */ static int stress_zero(const args_t *args) { int fd; const size_t page_size = args->page_size; #if defined(__minix__) const int flags = O_RDONLY; #else const int flags = O_RDWR; #endif char wr_buffer[page_size]; if ((fd = open("/dev/zero", flags)) < 0) { pr_fail_err("open /dev/zero"); return EXIT_FAILURE; } (void)memset(wr_buffer, 0, sizeof wr_buffer); do { char rd_buffer[page_size]; ssize_t ret; #if defined(__linux__) int32_t *ptr; #endif ret = read(fd, rd_buffer, sizeof(rd_buffer)); if (ret < 0) { if ((errno == EAGAIN) || (errno == EINTR)) continue; pr_fail_err("read"); (void)close(fd); return EXIT_FAILURE; } #if !defined(__minix__) /* One can also write to /dev/zero w/o failure */ ret = write(fd, wr_buffer, sizeof(wr_buffer)); if (ret < 0) { if ((errno == EAGAIN) || (errno == EINTR)) continue; pr_fail_err("write"); (void)close(fd); return EXIT_FAILURE; } #endif #if defined(__linux__) /* * check if we can mmap /dev/zero */ ptr = mmap(NULL, page_size, PROT_READ, MAP_PRIVATE | MAP_ANONYMOUS, fd, page_size * mwc16()); if (ptr == MAP_FAILED) { if (errno == ENOMEM) continue; pr_fail_err("mmap /dev/zero"); (void)close(fd); return EXIT_FAILURE; } /* Quick sanity check if first 32 bits are zero */ if (*ptr != 0) { pr_fail_err("mmap'd /dev/zero not null"); (void)munmap(ptr, page_size); (void)close(fd); return EXIT_FAILURE; } (void)munmap(ptr, page_size); #endif inc_counter(args); } while (keep_stressing()); (void)close(fd); return EXIT_SUCCESS; }
/* * stress_aio_linux * stress asynchronous I/O using the linux specific aio ABI */ int stress_aio_linux( uint64_t *const counter, const uint32_t instance, const uint64_t max_ops, const char *name) { int fd, rc = EXIT_FAILURE; char filename[PATH_MAX]; const pid_t pid = getpid(); aio_context_t ctx = 0; if (!set_aio_linux_requests) { if (opt_flags & OPT_FLAGS_MAXIMIZE) opt_aio_linux_requests = MAX_AIO_REQUESTS; if (opt_flags & OPT_FLAGS_MINIMIZE) opt_aio_linux_requests = MIN_AIO_REQUESTS; } if (sys_io_setup(opt_aio_linux_requests, &ctx) < 0) { pr_failed_err(name, "io_setup"); return EXIT_FAILURE; } if (stress_temp_dir_mk(name, pid, instance) < 0) { return EXIT_FAILURE; } (void)stress_temp_filename(filename, sizeof(filename), name, pid, instance, mwc32()); (void)umask(0077); if ((fd = open(filename, O_CREAT | O_RDWR, S_IRUSR | S_IWUSR)) < 0) { pr_failed_err(name, "open"); goto finish; } (void)unlink(filename); do { struct iocb cb[opt_aio_linux_requests]; struct iocb *cbs[opt_aio_linux_requests]; struct io_event events[opt_aio_linux_requests]; uint8_t buffers[opt_aio_linux_requests][BUFFER_SZ]; int ret, i; long n; for (i = 0; i < opt_aio_linux_requests; i++) aio_linux_fill_buffer(i, buffers[i], BUFFER_SZ); memset(cb, 0, sizeof(cb)); for (i = 0; i < opt_aio_linux_requests; i++) { cb[i].aio_fildes = fd; cb[i].aio_lio_opcode = IOCB_CMD_PWRITE; cb[i].aio_buf = (long)buffers[i]; cb[i].aio_offset = mwc16() * BUFFER_SZ; cb[i].aio_nbytes = BUFFER_SZ; cbs[i] = &cb[i]; } ret = sys_io_submit(ctx, opt_aio_linux_requests, cbs); if (ret < 0) { if (errno == EAGAIN) continue; pr_failed_err(name, "io_submit"); break; } n = opt_aio_linux_requests; do { struct timespec timeout, *timeout_ptr; if (clock_gettime(CLOCK_REALTIME, &timeout) < 0) { timeout_ptr = NULL; } else { timeout.tv_nsec += 1000000; if (timeout.tv_nsec > 1000000000) { timeout.tv_nsec -= 1000000000; timeout.tv_sec++; } timeout_ptr = &timeout; } ret = sys_io_getevents(ctx, 1, n, events, timeout_ptr); if (ret < 0) { if ((errno == EINTR) && (opt_do_run)) continue; pr_failed_err(name, "io_getevents"); break; } else { n -= ret; } } while ((n > 0) && opt_do_run); (*counter)++; } while (opt_do_run && (!max_ops || *counter < max_ops)); rc = EXIT_SUCCESS; (void)close(fd); finish: (void)sys_io_destroy(ctx); (void)stress_temp_dir_rm(name, pid, instance); return rc; }
/* * stress_aiol * stress asynchronous I/O using the linux specific aio ABI */ static int stress_aiol(const args_t *args) { int fd, ret, rc = EXIT_FAILURE; char filename[PATH_MAX]; char buf[64]; io_context_t ctx = 0; uint64_t aio_linux_requests = DEFAULT_AIO_LINUX_REQUESTS; uint8_t *buffer; uint64_t aio_max_nr = DEFAULT_AIO_MAX_NR; if (!get_setting("aiol-requests", &aio_linux_requests)) { if (g_opt_flags & OPT_FLAGS_MAXIMIZE) aio_linux_requests = MAX_AIO_REQUESTS; if (g_opt_flags & OPT_FLAGS_MINIMIZE) aio_linux_requests = MIN_AIO_REQUESTS; } if ((aio_linux_requests < MIN_AIO_REQUESTS) || (aio_linux_requests > MAX_AIO_REQUESTS)) { pr_err("%s: iol_requests out of range", args->name); return EXIT_FAILURE; } ret = system_read("/proc/sys/fs/aio-max-nr", buf, sizeof(buf)); if (ret > 0) { if (sscanf(buf, "%" SCNu64, &aio_max_nr) != 1) { /* Guess max */ aio_max_nr = DEFAULT_AIO_MAX_NR; } } else { /* Guess max */ aio_max_nr = DEFAULT_AIO_MAX_NR; } aio_max_nr /= (args->num_instances == 0) ? 1 : args->num_instances; if (aio_max_nr < 1) aio_max_nr = 1; if (aio_linux_requests > aio_max_nr) { aio_linux_requests = aio_max_nr; if (args->instance == 0) pr_inf("%s: Limiting AIO requests to " "%" PRIu64 " per stressor (avoids running out of resources)\n", args->name, aio_linux_requests); } ret = posix_memalign((void **)&buffer, 4096, aio_linux_requests * BUFFER_SZ); if (ret) { pr_inf("%s: Out of memory allocating buffers, errno=%d (%s)", args->name, errno, strerror(errno)); return EXIT_NO_RESOURCE; } ret = io_setup(aio_linux_requests, &ctx); if (ret < 0) { /* * The libaio interface returns -errno in the * return value, so set errno accordingly */ errno = -ret; if ((errno == EAGAIN) || (errno == EACCES)) { pr_err("%s: io_setup failed, ran out of " "available events, consider increasing " "/proc/sys/fs/aio-max-nr, errno=%d (%s)\n", args->name, errno, strerror(errno)); rc = EXIT_NO_RESOURCE; goto free_buffer; } else if (errno == ENOMEM) { pr_err("%s: io_setup failed, ran out of " "memory, errno=%d (%s)\n", args->name, errno, strerror(errno)); rc = EXIT_NO_RESOURCE; goto free_buffer; } else if (errno == ENOSYS) { pr_err("%s: io_setup failed, no io_setup " "system call with this kernel, " "errno=%d (%s)\n", args->name, errno, strerror(errno)); rc = EXIT_NO_RESOURCE; goto free_buffer; } else { pr_fail_err("io_setup"); rc = EXIT_FAILURE; goto free_buffer; } } ret = stress_temp_dir_mk_args(args); if (ret < 0) { rc = exit_status(-ret); goto free_buffer; } (void)stress_temp_filename_args(args, filename, sizeof(filename), mwc32()); if ((fd = open(filename, O_CREAT | O_RDWR | O_DIRECT, S_IRUSR | S_IWUSR)) < 0) { rc = exit_status(errno); pr_fail_err("open"); goto finish; } (void)unlink(filename); do { struct iocb cb[aio_linux_requests]; struct iocb *cbs[aio_linux_requests]; struct io_event events[aio_linux_requests]; uint8_t *buffers[aio_linux_requests]; uint8_t *bufptr = buffer; uint64_t i; long n; for (i = 0; i < aio_linux_requests; i++, bufptr += BUFFER_SZ) { buffers[i] = bufptr; aio_linux_fill_buffer(i, buffers[i], BUFFER_SZ); } (void)memset(cb, 0, sizeof(cb)); for (i = 0; i < aio_linux_requests; i++) { cb[i].aio_fildes = fd; cb[i].aio_lio_opcode = IO_CMD_PWRITE; cb[i].u.c.buf = buffers[i]; cb[i].u.c.offset = mwc16() * BUFFER_SZ; cb[i].u.c.nbytes = BUFFER_SZ; cbs[i] = &cb[i]; } ret = io_submit(ctx, (long)aio_linux_requests, cbs); if (ret < 0) { errno = -ret; if (errno == EAGAIN) continue; pr_fail_err("io_submit"); break; } n = aio_linux_requests; do { struct timespec timeout, *timeout_ptr; if (clock_gettime(CLOCK_REALTIME, &timeout) < 0) { timeout_ptr = NULL; } else { timeout.tv_nsec += 1000000; if (timeout.tv_nsec > 1000000000) { timeout.tv_nsec -= 1000000000; timeout.tv_sec++; } timeout_ptr = &timeout; } ret = io_getevents(ctx, 1, n, events, timeout_ptr); if (ret < 0) { errno = -ret; if (errno == EINTR) { if (g_keep_stressing_flag) continue; else break; } pr_fail_err("io_getevents"); break; } else { n -= ret; } } while ((n > 0) && g_keep_stressing_flag); inc_counter(args); } while (keep_stressing()); rc = EXIT_SUCCESS; (void)close(fd); finish: (void)io_destroy(ctx); (void)stress_temp_dir_rm_args(args); free_buffer: free(buffer); return rc; }
/* * stress_shm_sysv_child() * stress out the shm allocations. This can be killed by * the out of memory killer, so we need to keep the parent * informed of the allocated shared memory ids so these can * be reaped cleanly if this process gets prematurely killed. */ static int stress_shm_sysv_child( const int fd, uint64_t *const counter, const uint64_t max_ops, const char *name, const size_t max_sz, const size_t page_size) { struct sigaction new_action; void *addrs[MAX_SHM_SYSV_SEGMENTS]; key_t keys[MAX_SHM_SYSV_SEGMENTS]; int shm_ids[MAX_SHM_SYSV_SEGMENTS]; shm_msg_t msg; size_t i; int rc = EXIT_SUCCESS; bool ok = true; int mask = ~0; int instances; new_action.sa_handler = handle_shm_sysv_sigalrm; sigemptyset(&new_action.sa_mask); new_action.sa_flags = 0; if (sigaction(SIGALRM, &new_action, NULL) < 0) { pr_fail_err(name, "sigaction"); return EXIT_FAILURE; } memset(addrs, 0, sizeof(addrs)); memset(keys, 0, sizeof(keys)); for (i = 0; i < MAX_SHM_SYSV_SEGMENTS; i++) shm_ids[i] = -1; /* Make sure this is killable by OOM killer */ set_oom_adjustment(name, true); if ((instances = stressor_instances(STRESS_SHM_SYSV)) < 1) instances = (int)stress_get_processors_configured(); /* Should never happen, but be safe */ if (instances < 1) instances = 1; do { size_t sz = max_sz; for (i = 0; i < opt_shm_sysv_segments; i++) { int shm_id, count = 0; void *addr; key_t key; size_t shmall, freemem, totalmem; /* Try hard not to overcommit at this current time */ stress_get_memlimits(&shmall, &freemem, &totalmem); shmall /= instances; freemem /= instances; if ((shmall > page_size) && sz > shmall) sz = shmall; if ((freemem > page_size) && sz > freemem) sz = freemem; if (!opt_do_run) goto reap; for (count = 0; count < KEY_GET_RETRIES; count++) { bool unique = true; const int rnd = mwc32() % SIZEOF_ARRAY(shm_flags); const int rnd_flag = shm_flags[rnd] & mask; if (sz < page_size) goto reap; /* Get a unique key */ do { size_t j; if (!opt_do_run) goto reap; /* Get a unique random key */ key = (key_t)mwc16(); for (j = 0; j < i - 1; j++) { if (key == keys[j]) { unique = false; break; } } if (!opt_do_run) goto reap; } while (!unique); shm_id = shmget(key, sz, IPC_CREAT | IPC_EXCL | S_IRUSR | S_IWUSR | rnd_flag); if (shm_id >= 0) break; if (errno == EINTR) goto reap; if (errno == EPERM) { /* ignore using the flag again */ mask &= ~rnd_flag; } if ((errno == EINVAL) || (errno == ENOMEM)) { /* * On some systems we may need * to reduce the size */ sz = sz / 2; } } if (shm_id < 0) { ok = false; pr_fail(stderr, "%s: shmget failed: errno=%d (%s)\n", name, errno, strerror(errno)); rc = EXIT_FAILURE; goto reap; } /* Inform parent of the new shm ID */ msg.index = i; msg.shm_id = shm_id; if (write(fd, &msg, sizeof(msg)) < 0) { pr_err(stderr, "%s: write failed: errno=%d: (%s)\n", name, errno, strerror(errno)); rc = EXIT_FAILURE; goto reap; } addr = shmat(shm_id, NULL, 0); if (addr == (char *) -1) { ok = false; pr_fail(stderr, "%s: shmat failed: errno=%d (%s)\n", name, errno, strerror(errno)); rc = EXIT_FAILURE; goto reap; } addrs[i] = addr; shm_ids[i] = shm_id; keys[i] = key; if (!opt_do_run) goto reap; (void)mincore_touch_pages(addr, sz); if (!opt_do_run) goto reap; (void)madvise_random(addr, sz); if (!opt_do_run) goto reap; if (stress_shm_sysv_check(addr, sz, page_size) < 0) { ok = false; pr_fail(stderr, "%s: memory check failed\n", name); rc = EXIT_FAILURE; goto reap; } (*counter)++; } reap: for (i = 0; i < opt_shm_sysv_segments; i++) { if (addrs[i]) { if (shmdt(addrs[i]) < 0) { pr_fail(stderr, "%s: shmdt failed: errno=%d (%s)\n", name, errno, strerror(errno)); } } if (shm_ids[i] >= 0) { if (shmctl(shm_ids[i], IPC_RMID, NULL) < 0) { if (errno != EIDRM) pr_fail(stderr, "%s: shmctl failed: errno=%d (%s)\n", name, errno, strerror(errno)); } } /* Inform parent shm ID is now free */ msg.index = i; msg.shm_id = -1; if (write(fd, &msg, sizeof(msg)) < 0) { pr_dbg(stderr, "%s: write failed: errno=%d: (%s)\n", name, errno, strerror(errno)); ok = false; } addrs[i] = NULL; shm_ids[i] = -1; keys[i] = 0; } } while (ok && opt_do_run && (!max_ops || *counter < max_ops)); /* Inform parent of end of run */ msg.index = -1; msg.shm_id = -1; if (write(fd, &msg, sizeof(msg)) < 0) { pr_err(stderr, "%s: write failed: errno=%d: (%s)\n", name, errno, strerror(errno)); rc = EXIT_FAILURE; } return rc; }