/* * stress_mmap() * stress mmap */ int stress_mmap( uint64_t *const counter, const uint32_t instance, const uint64_t max_ops, const char *name) { uint8_t *buf = NULL; const size_t page_size = stress_get_pagesize(); size_t sz, pages4k; #if !defined(__gnu_hurd__) const int ms_flags = (opt_flags & OPT_FLAGS_MMAP_ASYNC) ? MS_ASYNC : MS_SYNC; #endif const pid_t pid = getpid(); int fd = -1, flags = MAP_PRIVATE | MAP_ANONYMOUS; char filename[PATH_MAX]; (void)instance; #ifdef MAP_POPULATE flags |= MAP_POPULATE; #endif if (!set_mmap_bytes) { if (opt_flags & OPT_FLAGS_MAXIMIZE) opt_mmap_bytes = MAX_MMAP_BYTES; if (opt_flags & OPT_FLAGS_MINIMIZE) opt_mmap_bytes = MIN_MMAP_BYTES; } sz = opt_mmap_bytes & ~(page_size - 1); pages4k = sz / page_size; /* Make sure this is killable by OOM killer */ set_oom_adjustment(name, true); if (opt_flags & OPT_FLAGS_MMAP_FILE) { ssize_t ret; char ch = '\0'; if (stress_temp_dir_mk(name, pid, instance) < 0) return EXIT_FAILURE; (void)stress_temp_filename(filename, sizeof(filename), name, pid, instance, mwc32()); (void)umask(0077); if ((fd = open(filename, O_CREAT | O_RDWR, S_IRUSR | S_IWUSR)) < 0) { pr_failed_err(name, "open"); (void)unlink(filename); (void)stress_temp_dir_rm(name, pid, instance); return EXIT_FAILURE; } (void)unlink(filename); if (lseek(fd, sz - sizeof(ch), SEEK_SET) < 0) { pr_failed_err(name, "lseek"); (void)close(fd); (void)stress_temp_dir_rm(name, pid, instance); return EXIT_FAILURE; } redo: ret = write(fd, &ch, sizeof(ch)); if (ret != sizeof(ch)) { if ((errno == EAGAIN) || (errno == EINTR)) goto redo; pr_failed_err(name, "write"); (void)close(fd); (void)stress_temp_dir_rm(name, pid, instance); return EXIT_FAILURE; } flags &= ~(MAP_ANONYMOUS | MAP_PRIVATE); flags |= MAP_SHARED; } do { uint8_t mapped[pages4k]; uint8_t *mappings[pages4k]; size_t n; if (!opt_do_run) break; buf = mmap(NULL, sz, PROT_READ | PROT_WRITE, flags, fd, 0); if (buf == MAP_FAILED) { /* Force MAP_POPULATE off, just in case */ #ifdef MAP_POPULATE flags &= ~MAP_POPULATE; #endif continue; /* Try again */ } if (opt_flags & OPT_FLAGS_MMAP_FILE) { memset(buf, 0xff, sz); #if !defined(__gnu_hurd__) (void)msync(buf, sz, ms_flags); #endif } (void)madvise_random(buf, sz); (void)mincore_touch_pages(buf, opt_mmap_bytes); stress_mmap_mprotect(name, buf, sz); memset(mapped, PAGE_MAPPED, sizeof(mapped)); for (n = 0; n < pages4k; n++) mappings[n] = buf + (n * page_size); /* Ensure we can write to the mapped pages */ stress_mmap_set(buf, sz); if (opt_flags & OPT_FLAGS_VERIFY) { if (stress_mmap_check(buf, sz) < 0) pr_fail(stderr, "%s: mmap'd region of %zu bytes does " "not contain expected data\n", name, sz); } /* * Step #1, unmap all pages in random order */ (void)mincore_touch_pages(buf, opt_mmap_bytes); for (n = pages4k; n; ) { uint64_t j, i = mwc64() % pages4k; for (j = 0; j < n; j++) { uint64_t page = (i + j) % pages4k; if (mapped[page] == PAGE_MAPPED) { mapped[page] = 0; (void)madvise_random(mappings[page], page_size); stress_mmap_mprotect(name, mappings[page], page_size); (void)munmap(mappings[page], page_size); n--; break; } if (!opt_do_run) goto cleanup; } } (void)munmap(buf, sz); #ifdef MAP_FIXED /* * Step #2, map them back in random order */ for (n = pages4k; n; ) { uint64_t j, i = mwc64() % pages4k; for (j = 0; j < n; j++) { uint64_t page = (i + j) % pages4k; if (!mapped[page]) { off_t offset = (opt_flags & OPT_FLAGS_MMAP_FILE) ? page * page_size : 0; /* * Attempt to map them back into the original address, this * may fail (it's not the most portable operation), so keep * track of failed mappings too */ mappings[page] = mmap(mappings[page], page_size, PROT_READ | PROT_WRITE, MAP_FIXED | flags, fd, offset); if (mappings[page] == MAP_FAILED) { mapped[page] = PAGE_MAPPED_FAIL; mappings[page] = NULL; } else { (void)mincore_touch_pages(mappings[page], page_size); (void)madvise_random(mappings[page], page_size); stress_mmap_mprotect(name, mappings[page], page_size); mapped[page] = PAGE_MAPPED; /* Ensure we can write to the mapped page */ stress_mmap_set(mappings[page], page_size); if (stress_mmap_check(mappings[page], page_size) < 0) pr_fail(stderr, "%s: mmap'd region of %zu bytes does " "not contain expected data\n", name, page_size); if (opt_flags & OPT_FLAGS_MMAP_FILE) { memset(mappings[page], n, page_size); #if !defined(__gnu_hurd__) (void)msync(mappings[page], page_size, ms_flags); #endif } } n--; break; } if (!opt_do_run) goto cleanup; } } #endif cleanup: /* * Step #3, unmap them all */ for (n = 0; n < pages4k; n++) { if (mapped[n] & PAGE_MAPPED) { (void)madvise_random(mappings[n], page_size); stress_mmap_mprotect(name, mappings[n], page_size); (void)munmap(mappings[n], page_size); } } (*counter)++; } while (opt_do_run && (!max_ops || *counter < max_ops)); if (opt_flags & OPT_FLAGS_MMAP_FILE) { (void)close(fd); (void)stress_temp_dir_rm(name, pid, instance); } return EXIT_SUCCESS; }
static int stress_mremap_child( const args_t *args, const size_t sz, size_t new_sz, const size_t page_size, const size_t mremap_bytes, int *flags) { do { uint8_t *buf = NULL; size_t old_sz; if (!g_keep_stressing_flag) break; buf = mmap(NULL, new_sz, PROT_READ | PROT_WRITE, *flags, -1, 0); if (buf == MAP_FAILED) { /* Force MAP_POPULATE off, just in case */ #if defined(MAP_POPULATE) *flags &= ~MAP_POPULATE; #endif continue; /* Try again */ } (void)madvise_random(buf, new_sz); (void)mincore_touch_pages(buf, mremap_bytes); /* Ensure we can write to the mapped pages */ if (g_opt_flags & OPT_FLAGS_VERIFY) { mmap_set(buf, new_sz, page_size); if (mmap_check(buf, sz, page_size) < 0) { pr_fail("%s: mmap'd region of %zu " "bytes does not contain expected data\n", args->name, sz); (void)munmap(buf, new_sz); return EXIT_FAILURE; } } old_sz = new_sz; new_sz >>= 1; while (new_sz > page_size) { if (try_remap(args, &buf, old_sz, new_sz) < 0) { (void)munmap(buf, old_sz); return EXIT_FAILURE; } (void)madvise_random(buf, new_sz); if (g_opt_flags & OPT_FLAGS_VERIFY) { if (mmap_check(buf, new_sz, page_size) < 0) { pr_fail("%s: mremap'd region " "of %zu bytes does " "not contain expected data\n", args->name, sz); (void)munmap(buf, new_sz); return EXIT_FAILURE; } } old_sz = new_sz; new_sz >>= 1; } new_sz <<= 1; while (new_sz < mremap_bytes) { if (try_remap(args, &buf, old_sz, new_sz) < 0) { (void)munmap(buf, old_sz); return EXIT_FAILURE; } (void)madvise_random(buf, new_sz); old_sz = new_sz; new_sz <<= 1; } (void)munmap(buf, old_sz); inc_counter(args); } while (keep_stressing()); return EXIT_SUCCESS; }
/* * stress_shm_posix_child() * stress out the shm allocations. This can be killed by * the out of memory killer, so we need to keep the parent * informed of the allocated shared memory ids so these can * be reaped cleanly if this process gets prematurely killed. */ static int stress_shm_posix_child( const int fd, uint64_t *const counter, const uint64_t max_ops, const char *name, size_t sz) { void *addrs[MAX_SHM_POSIX_OBJECTS]; char shm_names[MAX_SHM_POSIX_OBJECTS][SHM_NAME_LEN]; shm_msg_t msg; int i; int rc = EXIT_SUCCESS; bool ok = true; pid_t pid = getpid(); uint64_t id = 0; memset(addrs, 0, sizeof(addrs)); memset(shm_names, 0, sizeof(shm_names)); /* Make sure this is killable by OOM killer */ set_oom_adjustment(name, true); do { for (i = 0; i < (ssize_t)opt_shm_posix_objects; i++) { int shm_fd; void *addr; char *shm_name = shm_names[i]; shm_name[0] = '\0'; if (!opt_do_run) goto reap; snprintf(shm_name, SHM_NAME_LEN, "/stress-ng-%u-%" PRIx64 "-%" PRIx32, pid, id, mwc32()); shm_fd = shm_open(shm_name, O_CREAT | O_RDWR | O_TRUNC, S_IRUSR | S_IWUSR); if (shm_fd < 0) { ok = false; pr_fail(stderr, "%s: shm_open failed: errno=%d (%s)\n", name, errno, strerror(errno)); rc = EXIT_FAILURE; goto reap; } /* Inform parent of the new shm name */ msg.index = i; shm_name[SHM_NAME_LEN - 1] = '\0'; strncpy(msg.shm_name, shm_name, SHM_NAME_LEN); if (write(fd, &msg, sizeof(msg)) < 0) { pr_err(stderr, "%s: write failed: errno=%d: (%s)\n", name, errno, strerror(errno)); rc = EXIT_FAILURE; (void)close(shm_fd); goto reap; } addr = mmap(NULL, sz, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_ANONYMOUS, shm_fd, 0); if (addr == MAP_FAILED) { ok = false; pr_fail(stderr, "%s: mmap failed: errno=%d (%s)\n", name, errno, strerror(errno)); rc = EXIT_FAILURE; (void)close(shm_fd); goto reap; } addrs[i] = addr; (void)close(shm_fd); if (!opt_do_run) goto reap; (void)mincore_touch_pages(addr, sz); if (!opt_do_run) goto reap; (void)madvise_random(addr, sz); if (!opt_do_run) goto reap; if (stress_shm_posix_check(addr, sz) < 0) { ok = false; pr_fail(stderr, "%s: memory check failed\n", name); rc = EXIT_FAILURE; goto reap; } id++; (*counter)++; } reap: for (i = 0; i < (ssize_t)opt_shm_posix_objects; i++) { char *shm_name = shm_names[i]; if (addrs[i]) (void)munmap(addrs[i], sz); if (*shm_name) { if (shm_unlink(shm_name) < 0) { pr_fail(stderr, "%s: shm_unlink " "failed: errno=%d (%s)\n", name, errno, strerror(errno)); } } /* Inform parent shm ID is now free */ msg.index = i; msg.shm_name[SHM_NAME_LEN - 1] = '\0'; strncpy(msg.shm_name, shm_name, SHM_NAME_LEN); if (write(fd, &msg, sizeof(msg)) < 0) { pr_dbg(stderr, "%s: write failed: errno=%d: (%s)\n", name, errno, strerror(errno)); ok = false; } addrs[i] = NULL; *shm_name = '\0'; } } while (ok && opt_do_run && (!max_ops || *counter < max_ops)); /* Inform parent of end of run */ msg.index = -1; strncpy(msg.shm_name, "", SHM_NAME_LEN); if (write(fd, &msg, sizeof(msg)) < 0) { pr_err(stderr, "%s: write failed: errno=%d: (%s)\n", name, errno, strerror(errno)); rc = EXIT_FAILURE; } return rc; }
/* * stress_shm_sysv_child() * stress out the shm allocations. This can be killed by * the out of memory killer, so we need to keep the parent * informed of the allocated shared memory ids so these can * be reaped cleanly if this process gets prematurely killed. */ static int stress_shm_sysv_child( const int fd, uint64_t *const counter, const uint64_t max_ops, const char *name, const size_t max_sz, const size_t page_size) { struct sigaction new_action; void *addrs[MAX_SHM_SYSV_SEGMENTS]; key_t keys[MAX_SHM_SYSV_SEGMENTS]; int shm_ids[MAX_SHM_SYSV_SEGMENTS]; shm_msg_t msg; size_t i; int rc = EXIT_SUCCESS; bool ok = true; int mask = ~0; int instances; new_action.sa_handler = handle_shm_sysv_sigalrm; sigemptyset(&new_action.sa_mask); new_action.sa_flags = 0; if (sigaction(SIGALRM, &new_action, NULL) < 0) { pr_fail_err(name, "sigaction"); return EXIT_FAILURE; } memset(addrs, 0, sizeof(addrs)); memset(keys, 0, sizeof(keys)); for (i = 0; i < MAX_SHM_SYSV_SEGMENTS; i++) shm_ids[i] = -1; /* Make sure this is killable by OOM killer */ set_oom_adjustment(name, true); if ((instances = stressor_instances(STRESS_SHM_SYSV)) < 1) instances = (int)stress_get_processors_configured(); /* Should never happen, but be safe */ if (instances < 1) instances = 1; do { size_t sz = max_sz; for (i = 0; i < opt_shm_sysv_segments; i++) { int shm_id, count = 0; void *addr; key_t key; size_t shmall, freemem, totalmem; /* Try hard not to overcommit at this current time */ stress_get_memlimits(&shmall, &freemem, &totalmem); shmall /= instances; freemem /= instances; if ((shmall > page_size) && sz > shmall) sz = shmall; if ((freemem > page_size) && sz > freemem) sz = freemem; if (!opt_do_run) goto reap; for (count = 0; count < KEY_GET_RETRIES; count++) { bool unique = true; const int rnd = mwc32() % SIZEOF_ARRAY(shm_flags); const int rnd_flag = shm_flags[rnd] & mask; if (sz < page_size) goto reap; /* Get a unique key */ do { size_t j; if (!opt_do_run) goto reap; /* Get a unique random key */ key = (key_t)mwc16(); for (j = 0; j < i - 1; j++) { if (key == keys[j]) { unique = false; break; } } if (!opt_do_run) goto reap; } while (!unique); shm_id = shmget(key, sz, IPC_CREAT | IPC_EXCL | S_IRUSR | S_IWUSR | rnd_flag); if (shm_id >= 0) break; if (errno == EINTR) goto reap; if (errno == EPERM) { /* ignore using the flag again */ mask &= ~rnd_flag; } if ((errno == EINVAL) || (errno == ENOMEM)) { /* * On some systems we may need * to reduce the size */ sz = sz / 2; } } if (shm_id < 0) { ok = false; pr_fail(stderr, "%s: shmget failed: errno=%d (%s)\n", name, errno, strerror(errno)); rc = EXIT_FAILURE; goto reap; } /* Inform parent of the new shm ID */ msg.index = i; msg.shm_id = shm_id; if (write(fd, &msg, sizeof(msg)) < 0) { pr_err(stderr, "%s: write failed: errno=%d: (%s)\n", name, errno, strerror(errno)); rc = EXIT_FAILURE; goto reap; } addr = shmat(shm_id, NULL, 0); if (addr == (char *) -1) { ok = false; pr_fail(stderr, "%s: shmat failed: errno=%d (%s)\n", name, errno, strerror(errno)); rc = EXIT_FAILURE; goto reap; } addrs[i] = addr; shm_ids[i] = shm_id; keys[i] = key; if (!opt_do_run) goto reap; (void)mincore_touch_pages(addr, sz); if (!opt_do_run) goto reap; (void)madvise_random(addr, sz); if (!opt_do_run) goto reap; if (stress_shm_sysv_check(addr, sz, page_size) < 0) { ok = false; pr_fail(stderr, "%s: memory check failed\n", name); rc = EXIT_FAILURE; goto reap; } (*counter)++; } reap: for (i = 0; i < opt_shm_sysv_segments; i++) { if (addrs[i]) { if (shmdt(addrs[i]) < 0) { pr_fail(stderr, "%s: shmdt failed: errno=%d (%s)\n", name, errno, strerror(errno)); } } if (shm_ids[i] >= 0) { if (shmctl(shm_ids[i], IPC_RMID, NULL) < 0) { if (errno != EIDRM) pr_fail(stderr, "%s: shmctl failed: errno=%d (%s)\n", name, errno, strerror(errno)); } } /* Inform parent shm ID is now free */ msg.index = i; msg.shm_id = -1; if (write(fd, &msg, sizeof(msg)) < 0) { pr_dbg(stderr, "%s: write failed: errno=%d: (%s)\n", name, errno, strerror(errno)); ok = false; } addrs[i] = NULL; shm_ids[i] = -1; keys[i] = 0; } } while (ok && opt_do_run && (!max_ops || *counter < max_ops)); /* Inform parent of end of run */ msg.index = -1; msg.shm_id = -1; if (write(fd, &msg, sizeof(msg)) < 0) { pr_err(stderr, "%s: write failed: errno=%d: (%s)\n", name, errno, strerror(errno)); rc = EXIT_FAILURE; } return rc; }
/* * stress_mremap() * stress mmap */ int stress_mremap( uint64_t *const counter, const uint32_t instance, const uint64_t max_ops, const char *name) { uint8_t *buf = NULL; const size_t page_size = stress_get_pagesize(); size_t sz, new_sz, old_sz; int flags = MAP_PRIVATE | MAP_ANONYMOUS; (void)instance; #ifdef MAP_POPULATE flags |= MAP_POPULATE; #endif if (!set_mremap_bytes) { if (opt_flags & OPT_FLAGS_MAXIMIZE) opt_mremap_bytes = MAX_MREMAP_BYTES; if (opt_flags & OPT_FLAGS_MINIMIZE) opt_mremap_bytes = MIN_MREMAP_BYTES; } new_sz = sz = opt_mremap_bytes & ~(page_size - 1); /* Make sure this is killable by OOM killer */ set_oom_adjustment(name, true); do { if (!opt_do_run) break; buf = mmap(NULL, new_sz, PROT_READ | PROT_WRITE, flags, -1, 0); if (buf == MAP_FAILED) { /* Force MAP_POPULATE off, just in case */ #ifdef MAP_POPULATE flags &= ~MAP_POPULATE; #endif continue; /* Try again */ } (void)madvise_random(buf, new_sz); (void)mincore_touch_pages(buf, opt_mremap_bytes); /* Ensure we can write to the mapped pages */ if (opt_flags & OPT_FLAGS_VERIFY) { stress_mremap_set(buf, new_sz, page_size); if (stress_mremap_check(buf, sz, page_size) < 0) { pr_fail(stderr, "%s: mmap'd region of %zu " "bytes does not contain expected data\n", name, sz); munmap(buf, new_sz); return EXIT_FAILURE; } } old_sz = new_sz; new_sz >>= 1; while (new_sz > page_size) { if (try_remap(name, &buf, old_sz, new_sz) < 0) { munmap(buf, old_sz); return EXIT_FAILURE; } (void)madvise_random(buf, new_sz); if (opt_flags & OPT_FLAGS_VERIFY) { if (stress_mremap_check(buf, new_sz, page_size) < 0) { pr_fail(stderr, "%s: mremap'd region " "of %zu bytes does " "not contain expected data\n", name, sz); munmap(buf, new_sz); return EXIT_FAILURE; } } old_sz = new_sz; new_sz >>= 1; } new_sz <<= 1; while (new_sz < opt_mremap_bytes) { if (try_remap(name, &buf, old_sz, new_sz) < 0) { munmap(buf, old_sz); return EXIT_FAILURE; } (void)madvise_random(buf, new_sz); old_sz = new_sz; new_sz <<= 1; } (void)munmap(buf, old_sz); (*counter)++; } while (opt_do_run && (!max_ops || *counter < max_ops)); return EXIT_SUCCESS; }