Exemplo n.º 1
0
/*
 *  pr_yaml_runinfo()
 *	log info about the system we are running stress-ng on
 */
void pr_yaml_runinfo(FILE *yaml)
{
#if defined(__linux__)
	struct utsname uts;
	struct sysinfo info;
#endif
	time_t t;
	struct tm *tm = NULL;
	char hostname[128];
	char *user = getlogin();

	pr_yaml(yaml, "system-info:\n");
	if (time(&t) != ((time_t)-1))
		tm = localtime(&t);

	pr_yaml(yaml, "      stress-ng-version: " VERSION "\n");
	pr_yaml(yaml, "      run-by: %s\n", user ? user : "******");
	if (tm) {
		pr_yaml(yaml, "      date-yyyy-mm-dd: %4.4d:%2.2d:%2.2d\n",
			tm->tm_year + 1900, tm->tm_mon + 1, tm->tm_mday);
		pr_yaml(yaml, "      time-hh-mm-ss: %2.2d:%2.2d:%2.2d\n",
			tm->tm_hour, tm->tm_min, tm->tm_sec);
		pr_yaml(yaml, "      epoch-secs: %ld\n", (long)t);
	}
	if (!gethostname(hostname, sizeof(hostname)))
		pr_yaml(yaml, "      hostname: %s\n", hostname);
#if defined(__linux__)
	if (uname(&uts) == 0) {
		pr_yaml(yaml, "      sysname: %s\n", uts.sysname);
		pr_yaml(yaml, "      nodename: %s\n", uts.nodename);
		pr_yaml(yaml, "      release: %s\n", uts.release);
		pr_yaml(yaml, "      version: %s\n", uts.version);
		pr_yaml(yaml, "      machine: %s\n", uts.machine);
	}
	if (sysinfo(&info) == 0) {
		pr_yaml(yaml, "      uptime: %ld\n", info.uptime);
		pr_yaml(yaml, "      totalram: %lu\n", info.totalram);
		pr_yaml(yaml, "      freeram: %lu\n", info.freeram);
		pr_yaml(yaml, "      sharedram: %lu\n", info.sharedram);
		pr_yaml(yaml, "      bufferram: %lu\n", info.bufferram);
		pr_yaml(yaml, "      totalswap: %lu\n", info.totalswap);
		pr_yaml(yaml, "      freeswap: %lu\n", info.freeswap);
	}
#endif
	pr_yaml(yaml, "      pagesize: %zd\n", stress_get_pagesize());
	pr_yaml(yaml, "      cpus: %ld\n", stress_get_processors_configured());
	pr_yaml(yaml, "      cpus-online: %ld\n", stress_get_processors_online());
	pr_yaml(yaml, "      ticks-per-second: %ld\n", stress_get_ticks_per_second());
	pr_yaml(yaml, "\n");
}
Exemplo n.º 2
0
/*
 *  stress_vm_rw
 *	stress vm_read_v/vm_write_v
 */
int stress_vm_rw(
	uint64_t *const counter,
	const uint32_t instance,
	const uint64_t max_ops,
	const char *name)
{
	context_t ctxt;
	uint8_t stack[64*1024];
	const ssize_t stack_offset =
		stress_get_stack_direction(&ctxt) * (STACK_SIZE - 64);
	uint8_t *stack_top = stack + stack_offset;

	(void)instance;

	if (!set_vm_rw_bytes) {
		if (opt_flags & OPT_FLAGS_MAXIMIZE)
			opt_vm_rw_bytes = MAX_VM_RW_BYTES;
		if (opt_flags & OPT_FLAGS_MINIMIZE)
			opt_vm_rw_bytes = MIN_VM_RW_BYTES;
	}
	ctxt.name = name;
	ctxt.page_size = stress_get_pagesize();
	ctxt.sz = opt_vm_rw_bytes & ~(ctxt.page_size - 1);
	ctxt.counter = counter;
	ctxt.max_ops = max_ops;

	if (pipe(ctxt.pipe_wr) < 0) {
		pr_fail_dbg(name, "pipe");
		return EXIT_FAILURE;
	}
	if (pipe(ctxt.pipe_rd) < 0) {
		(void)close(ctxt.pipe_wr[0]);
		(void)close(ctxt.pipe_wr[1]);
		pr_fail_dbg(name, "pipe");
		return EXIT_FAILURE;
	}

	ctxt.pid = clone(stress_vm_child, align_stack(stack_top),
		SIGCHLD | CLONE_VM, &ctxt);
	if (ctxt.pid < 0) {
		(void)close(ctxt.pipe_wr[0]);
		(void)close(ctxt.pipe_wr[1]);
		(void)close(ctxt.pipe_rd[0]);
		(void)close(ctxt.pipe_rd[1]);
		pr_fail_dbg(name, "clone");
		return EXIT_FAILURE;
	}
	return stress_vm_parent(&ctxt);
}
Exemplo n.º 3
0
/*
 * madvise_random()
 *	apply random madvise setting to a memory region
 */
int mincore_touch_pages(void *buf, const size_t buf_len)
{
#if !defined(__gnu_hurd__)
#if defined(__FreeBSD__) || defined(__OpenBSD__) || defined(__NetBSD__)
	char *vec;
#else
	unsigned char *vec;
#endif
	char *buffer;
	size_t vec_len, i;
	const size_t page_size = stress_get_pagesize();

	if (!(opt_flags & OPT_FLAGS_MMAP_MINCORE))
		return 0;

	vec_len = buf_len / page_size;
	if (vec_len < 1)
		return -1;

	vec = calloc(vec_len, 1);
	if (!vec)
		return -1;

	if (mincore(buf, buf_len, vec) < 0) {
		free(vec);
		return -1;
	}

	/* If page is not resident in memory, touch it */
	for (buffer = buf, i = 0; i < vec_len; i++, buffer += page_size)
		if (!(vec[i] & 1))
			(*buffer)++;

	/* And return it back */
	for (buffer = buf, i = 0; i < vec_len; i++, buffer += page_size)
		if (!(vec[i] & 1))
			(*buffer)--;

	free(vec);
#else
	(void)buf;
	(void)buf_len;
#endif
	return 0;
}
Exemplo n.º 4
0
/*
 *  stress_vm_rw
 *	stress vm_read_v/vm_write_v
 */
int stress_vm_rw(
	uint64_t *const counter,
	const uint32_t instance,
	const uint64_t max_ops,
	const char *name)
{
	pid_t pid;
	int pipe_wr[2], pipe_rd[2];
	const size_t page_size = stress_get_pagesize();
	size_t sz;

	(void)instance;

	if (!set_vm_rw_bytes) {
		if (opt_flags & OPT_FLAGS_MAXIMIZE)
			opt_vm_rw_bytes = MAX_VM_RW_BYTES;
		if (opt_flags & OPT_FLAGS_MINIMIZE)
			opt_vm_rw_bytes = MIN_VM_RW_BYTES;
	}
	sz = opt_vm_rw_bytes & ~(page_size - 1);

	if (pipe(pipe_wr) < 0) {
		pr_failed_dbg(name, "pipe");
		return EXIT_FAILURE;
	}
	if (pipe(pipe_rd) < 0) {
		(void)close(pipe_wr[0]);
		(void)close(pipe_wr[1]);
		pr_failed_dbg(name, "pipe");
		return EXIT_FAILURE;
	}

	pid = fork();
	if (pid < 0) {
		(void)close(pipe_wr[0]);
		(void)close(pipe_wr[1]);
		(void)close(pipe_rd[0]);
		(void)close(pipe_rd[1]);
		pr_failed_dbg(name, "fork");
		return EXIT_FAILURE;
	} else if (pid == 0) {
		/* Child */
		uint8_t *buf;
		int ret = EXIT_SUCCESS;
		addr_msg_t msg_rd, msg_wr;

		/* Close unwanted ends */
		(void)close(pipe_wr[0]);
		(void)close(pipe_rd[1]);

		buf = mmap(NULL, sz, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
		if (buf == MAP_FAILED) {
			pr_failed_dbg(name, "mmap");
			ret = EXIT_FAILURE;
			goto cleanup;
		}

		for (;;) {
			uint8_t *ptr, *end = buf + sz;
			int ret;

			memset(&msg_wr, 0, sizeof(msg_wr));
			msg_wr.addr = buf;
			msg_wr.val = 0;

			/* Send address of buffer to parent */
redo_wr1:
			ret = write(pipe_wr[1], &msg_wr, sizeof(msg_wr));
			if (ret < 0) {
				if ((errno == EAGAIN) || (errno == EINTR))
					goto redo_wr1;
				if (errno != EBADF)
					pr_failed_dbg(name, "write");
				break;
			}
redo_rd1:
			/* Wait for parent to populate data */
			ret = read(pipe_rd[0], &msg_rd, sizeof(msg_rd));
			if (ret < 0) {
				if ((errno == EAGAIN) || (errno == EINTR))
					goto redo_rd1;
				pr_failed_dbg(name, "read");
				break;
			}
			if (ret == 0)
				break;
			if (ret != sizeof(msg_rd)) {
				pr_failed_dbg(name, "read");
				break;
			}

			if (opt_flags & OPT_FLAGS_VERIFY) {
				/* Check memory altered by parent is sane */
				for (ptr = buf; ptr < end; ptr += page_size) {
					if (*ptr != msg_rd.val) {
						pr_fail(stderr, "%s: memory at %p: %d vs %d\n",
							name, ptr, *ptr, msg_rd.val);
						goto cleanup;
					}
					*ptr = 0;
				}
			}
		}
cleanup:
		/* Tell parent we're done */
		msg_wr.addr = 0;
		msg_wr.val = 0;
		if (write(pipe_wr[1], &msg_wr, sizeof(msg_wr)) <= 0) {
			if (errno != EBADF)
				pr_dbg(stderr, "%s: failed to write termination message "
					"over pipe: errno=%d (%s)\n",
					name, errno, strerror(errno));
		}

		(void)close(pipe_wr[0]);
		(void)close(pipe_wr[1]);
		(void)close(pipe_rd[0]);
		(void)close(pipe_rd[1]);
		(void)munmap(buf, sz);
		exit(ret);
	} else {
		/* Parent */
		int status;
		uint8_t val = 0;
		uint8_t *localbuf;
		addr_msg_t msg_rd, msg_wr;

		localbuf = mmap(NULL, sz, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
		if (localbuf == MAP_FAILED) {
			(void)close(pipe_wr[0]);
			(void)close(pipe_wr[1]);
			(void)close(pipe_rd[0]);
			(void)close(pipe_rd[1]);
			pr_failed_dbg(name, "mmap");
			exit(EXIT_FAILURE);
		}

		/* Close unwanted ends */
		(void)close(pipe_wr[1]);
		(void)close(pipe_rd[0]);

		do {
			struct iovec local[1], remote[1];
			uint8_t *ptr, *end = localbuf + sz;
			int ret;

			/* Wait for address of child's buffer */
redo_rd2:
			if (!opt_do_run)
				break;
			ret = read(pipe_wr[0], &msg_rd, sizeof(msg_rd));
			if (ret < 0) {
				if ((errno == EAGAIN) || (errno == EINTR))
					goto redo_rd2;
				pr_failed_dbg(name, "read");
				break;
			}
			if (ret == 0)
				break;
			if (ret != sizeof(msg_rd)) {
				pr_failed_dbg(name, "read");
				break;
			}
			/* Child telling us it's terminating? */
			if (!msg_rd.addr)
				break;

			/* Perform read from child's memory */
			local[0].iov_base = localbuf;
			local[0].iov_len = sz;
			remote[0].iov_base = msg_rd.addr;
			remote[0].iov_len = sz;
			if (process_vm_readv(pid, local, 1, remote, 1, 0) < 0) {
				pr_failed_dbg(name, "process_vm_readv");
				break;
			}

			if (opt_flags & OPT_FLAGS_VERIFY) {
				/* Check data is sane */
				for (ptr = localbuf; ptr < end; ptr += page_size) {
					if (*ptr) {
						pr_fail(stderr, "%s: memory at %p: %d vs %d\n",
							name, ptr, *ptr, msg_rd.val);
						goto fail;
					}
					*ptr = 0;
				}
				/* Set memory */
				for (ptr = localbuf; ptr < end; ptr += page_size)
					*ptr = val;
			}

			/* Write to child's memory */
			msg_wr = msg_rd;
			local[0].iov_base = localbuf;
			local[0].iov_len = sz;
			remote[0].iov_base = msg_rd.addr;
			remote[0].iov_len = sz;
			if (process_vm_writev(pid, local, 1, remote, 1, 0) < 0) {
				pr_failed_dbg(name, "process_vm_writev");
				break;
			}
			msg_wr.val = val;
			val++;

redo_wr2:
			if (!opt_do_run)
				break;
			/* Inform child that memory has been changed */
			ret = write(pipe_rd[1], &msg_wr, sizeof(msg_wr));
			if (ret < 0) {
				if ((errno == EAGAIN) || (errno == EINTR))
					goto redo_wr2;
				if (errno != EBADF)
					pr_failed_dbg(name, "write");
				break;
			}
			(*counter)++;
		} while (opt_do_run && (!max_ops || *counter < max_ops));
fail:
		/* Tell child we're done */
		msg_wr.addr = NULL;
		msg_wr.val = 0;
		if (write(pipe_wr[0], &msg_wr, sizeof(msg_wr)) < 0) {
			if (errno != EBADF)
				pr_dbg(stderr, "%s: failed to write termination message "
					"over pipe: errno=%d (%s)\n",
					name, errno, strerror(errno));
		}
		(void)close(pipe_wr[0]);
		(void)close(pipe_wr[1]);
		(void)close(pipe_rd[0]);
		(void)close(pipe_rd[1]);
		(void)kill(pid, SIGKILL);
		(void)waitpid(pid, &status, 0);
		(void)munmap(localbuf, sz);
	}

	return EXIT_SUCCESS;
}
Exemplo n.º 5
0
/*
 *  stress_mlock()
 *	stress mlock with pages being locked/unlocked
 */
int stress_mlock(
	uint64_t *const counter,
	const uint32_t instance,
	const uint64_t max_ops,
	const char *name)
{
	const size_t page_size = stress_get_pagesize();
	pid_t pid;
	size_t max = sysconf(_SC_MAPPED_FILES);
	max = max > MLOCK_MAX ? MLOCK_MAX : max;

again:
	pid = fork();
	if (pid < 0) {
		if (opt_do_run && (errno == EAGAIN))
			goto again;
		pr_err(stderr, "%s: fork failed: errno=%d: (%s)\n",
			name, errno, strerror(errno));
	} else if (pid > 0) {
		int status, ret;

		setpgid(pid, pgrp);
		stress_parent_died_alarm();

		/* Parent, wait for child */
		ret = waitpid(pid, &status, 0);
		if (ret < 0) {
			if (errno != EINTR)
				pr_dbg(stderr, "%s: waitpid(): errno=%d (%s)\n",
					name, errno, strerror(errno));
			(void)kill(pid, SIGTERM);
			(void)kill(pid, SIGKILL);
			(void)waitpid(pid, &status, 0);
		} else if (WIFSIGNALED(status)) {
			pr_dbg(stderr, "%s: child died: %s (instance %d)\n",
				name, stress_strsignal(WTERMSIG(status)),
				instance);
			/* If we got killed by OOM killer, re-start */
			if (WTERMSIG(status) == SIGKILL) {
				pr_dbg(stderr, "%s: assuming killed by OOM "
					"killer, restarting again "
					"(instance %d)\n", name, instance);
				goto again;
			}
		}
	} else if (pid == 0) {
		uint8_t *mappings[max];
		size_t i, n;

		setpgid(0, pgrp);

		/* Make sure this is killable by OOM killer */
		set_oom_adjustment(name, true);

		do {
			for (n = 0; opt_do_run && (n < max); n++) {
				int ret;
				if (!opt_do_run || (max_ops && *counter >= max_ops))
					break;

				mappings[n] = mmap(NULL, page_size * 3,
					PROT_READ | PROT_WRITE,
					MAP_SHARED | MAP_ANONYMOUS, -1, 0);
				if (mappings[n] == MAP_FAILED)
					break;
				ret = mlock_shim(mappings[n] + page_size, page_size);
				if (ret < 0) {
					if (errno == EAGAIN)
						continue;
					if (errno == ENOMEM)
						break;
					pr_fail_err(name, "mlock");
					break;
				} else {
					/*
					 * Mappings are always page aligned so
					 * we can use the bottom bit to
					 * indicate if the page has been
					 * mlocked or not
				 	 */
					mappings[n] = (uint8_t *)
						((ptrdiff_t)mappings[n] | 1);
					(*counter)++;
				}
			}

			for (i = 0; i < n;  i++) {
				ptrdiff_t addr = (ptrdiff_t)mappings[i];
				ptrdiff_t mlocked = addr & 1;

				addr ^= mlocked;
				if (mlocked)
					(void)munlock((uint8_t *)addr + page_size, page_size);
				munmap((void *)addr, page_size * 3);
			}
#if !defined(__gnu_hurd__)
			(void)mlockall(MCL_CURRENT);
			(void)mlockall(MCL_FUTURE);
#if defined(MCL_ONFAULT)
			(void)mlockall(MCL_ONFAULT);
#endif
#endif
			for (n = 0; opt_do_run && (n < max); n++) {
				if (!opt_do_run || (max_ops && *counter >= max_ops))
					break;

				mappings[n] = mmap(NULL, page_size,
					PROT_READ | PROT_WRITE,
					MAP_SHARED | MAP_ANONYMOUS, -1, 0);
				if (mappings[n] == MAP_FAILED)
					break;
			}
#if !defined(__gnu_hurd__)
			(void)munlockall();
#endif
			for (i = 0; i < n;  i++)
				munmap(mappings[i], page_size);
		} while (opt_do_run && (!max_ops || *counter < max_ops));
	}

	return EXIT_SUCCESS;
}
Exemplo n.º 6
0
/*
 *  stress_switch
 *	stress by heavy context switching
 */
int stress_switch(
	uint64_t *const counter,
	const uint32_t instance,
	const uint64_t max_ops,
	const char *name)
{
	pid_t pid;
	int pipefds[2];
	size_t buf_size;

	(void)instance;

#if defined(__linux__) && NEED_GLIBC(2,9,0)
	if (pipe2(pipefds, O_DIRECT) < 0) {
		pr_fail_dbg(name, "pipe2");
		return EXIT_FAILURE;
	}
	buf_size = 1;
#else
	if (pipe(pipefds) < 0) {
		pr_fail_dbg(name, "pipe");
		return EXIT_FAILURE;
	}
	buf_size = stress_get_pagesize();
#endif

#if defined(F_SETPIPE_SZ)
	if (fcntl(pipefds[0], F_SETPIPE_SZ, buf_size) < 0) {
		pr_dbg(stderr, "%s: could not force pipe size to 1 page, "
			"errno = %d (%s)\n",
			name, errno, strerror(errno));
	}
	if (fcntl(pipefds[1], F_SETPIPE_SZ, buf_size) < 0) {
		pr_dbg(stderr, "%s: could not force pipe size to 1 page, "
			"errno = %d (%s)\n",
			name, errno, strerror(errno));
	}
#endif

again:
	pid = fork();
	if (pid < 0) {
		if (opt_do_run && (errno == EAGAIN))
			goto again;
		(void)close(pipefds[0]);
		(void)close(pipefds[1]);
		pr_fail_dbg(name, "fork");
		return EXIT_FAILURE;
	} else if (pid == 0) {
		char buf[buf_size];

		setpgid(0, pgrp);
		stress_parent_died_alarm();

		(void)close(pipefds[1]);

		while (opt_do_run) {
			ssize_t ret;

			ret = read(pipefds[0], buf, sizeof(buf));
			if (ret < 0) {
				if ((errno == EAGAIN) || (errno == EINTR))
					continue;
				pr_fail_dbg(name, "read");
				break;
			}
			if (ret == 0)
				break;
			if (*buf == SWITCH_STOP)
				break;
		}
		(void)close(pipefds[0]);
		exit(EXIT_SUCCESS);
	} else {
		char buf[buf_size];
		int status;

		/* Parent */
		setpgid(pid, pgrp);
		(void)close(pipefds[0]);

		memset(buf, '_', buf_size);

		do {
			ssize_t ret;

			ret = write(pipefds[1], buf, sizeof(buf));
			if (ret <= 0) {
				if ((errno == EAGAIN) || (errno == EINTR))
					continue;
				if (errno) {
					pr_fail_dbg(name, "write");
					break;
				}
				continue;
			}
			(*counter)++;
		} while (opt_do_run && (!max_ops || *counter < max_ops));

		memset(buf, SWITCH_STOP, sizeof(buf));
		if (write(pipefds[1], buf, sizeof(buf)) <= 0)
			pr_fail_dbg(name, "termination write");
		(void)kill(pid, SIGKILL);
		(void)waitpid(pid, &status, 0);
	}

	return EXIT_SUCCESS;
}
Exemplo n.º 7
0
/*
 *  stress_mmap()
 *	stress mmap
 */
int stress_mmap(
	uint64_t *const counter,
	const uint32_t instance,
	const uint64_t max_ops,
	const char *name)
{
	uint8_t *buf = NULL;
	const size_t page_size = stress_get_pagesize();
	size_t sz, pages4k;
#if !defined(__gnu_hurd__)
	const int ms_flags = (opt_flags & OPT_FLAGS_MMAP_ASYNC) ?
		MS_ASYNC : MS_SYNC;
#endif
	const pid_t pid = getpid();
	int fd = -1, flags = MAP_PRIVATE | MAP_ANONYMOUS;
	char filename[PATH_MAX];

	(void)instance;
#ifdef MAP_POPULATE
	flags |= MAP_POPULATE;
#endif

	if (!set_mmap_bytes) {
		if (opt_flags & OPT_FLAGS_MAXIMIZE)
			opt_mmap_bytes = MAX_MMAP_BYTES;
		if (opt_flags & OPT_FLAGS_MINIMIZE)
			opt_mmap_bytes = MIN_MMAP_BYTES;
	}
	sz = opt_mmap_bytes & ~(page_size - 1);
	pages4k = sz / page_size;

	/* Make sure this is killable by OOM killer */
	set_oom_adjustment(name, true);

	if (opt_flags & OPT_FLAGS_MMAP_FILE) {
		ssize_t ret;
		char ch = '\0';

		if (stress_temp_dir_mk(name, pid, instance) < 0)
			return EXIT_FAILURE;

		(void)stress_temp_filename(filename, sizeof(filename),
			name, pid, instance, mwc32());

		(void)umask(0077);
		if ((fd = open(filename, O_CREAT | O_RDWR, S_IRUSR | S_IWUSR)) < 0) {
			pr_failed_err(name, "open");
			(void)unlink(filename);
			(void)stress_temp_dir_rm(name, pid, instance);

			return EXIT_FAILURE;
		}
		(void)unlink(filename);
		if (lseek(fd, sz - sizeof(ch), SEEK_SET) < 0) {
			pr_failed_err(name, "lseek");
			(void)close(fd);
			(void)stress_temp_dir_rm(name, pid, instance);

			return EXIT_FAILURE;
		}
redo:
		ret = write(fd, &ch, sizeof(ch));
		if (ret != sizeof(ch)) {
			if ((errno == EAGAIN) || (errno == EINTR))
				goto redo;
			pr_failed_err(name, "write");
			(void)close(fd);
			(void)stress_temp_dir_rm(name, pid, instance);

			return EXIT_FAILURE;
		}
		flags &= ~(MAP_ANONYMOUS | MAP_PRIVATE);
		flags |= MAP_SHARED;
	}

	do {
		uint8_t mapped[pages4k];
		uint8_t *mappings[pages4k];
		size_t n;

		if (!opt_do_run)
			break;
		buf = mmap(NULL, sz, PROT_READ | PROT_WRITE, flags, fd, 0);
		if (buf == MAP_FAILED) {
			/* Force MAP_POPULATE off, just in case */
#ifdef MAP_POPULATE
			flags &= ~MAP_POPULATE;
#endif
			continue;	/* Try again */
		}
		if (opt_flags & OPT_FLAGS_MMAP_FILE) {
			memset(buf, 0xff, sz);
#if !defined(__gnu_hurd__)
			(void)msync(buf, sz, ms_flags);
#endif
		}
		(void)madvise_random(buf, sz);
		(void)mincore_touch_pages(buf, opt_mmap_bytes);
		stress_mmap_mprotect(name, buf, sz);
		memset(mapped, PAGE_MAPPED, sizeof(mapped));
		for (n = 0; n < pages4k; n++)
			mappings[n] = buf + (n * page_size);

		/* Ensure we can write to the mapped pages */
		stress_mmap_set(buf, sz);
		if (opt_flags & OPT_FLAGS_VERIFY) {
			if (stress_mmap_check(buf, sz) < 0)
				pr_fail(stderr, "%s: mmap'd region of %zu bytes does "
					"not contain expected data\n", name, sz);
		}

		/*
		 *  Step #1, unmap all pages in random order
		 */
		(void)mincore_touch_pages(buf, opt_mmap_bytes);
		for (n = pages4k; n; ) {
			uint64_t j, i = mwc64() % pages4k;
			for (j = 0; j < n; j++) {
				uint64_t page = (i + j) % pages4k;
				if (mapped[page] == PAGE_MAPPED) {
					mapped[page] = 0;
					(void)madvise_random(mappings[page], page_size);
					stress_mmap_mprotect(name, mappings[page], page_size);
					(void)munmap(mappings[page], page_size);
					n--;
					break;
				}
				if (!opt_do_run)
					goto cleanup;
			}
		}
		(void)munmap(buf, sz);
#ifdef MAP_FIXED
		/*
		 *  Step #2, map them back in random order
		 */
		for (n = pages4k; n; ) {
			uint64_t j, i = mwc64() % pages4k;
			for (j = 0; j < n; j++) {
				uint64_t page = (i + j) % pages4k;
				if (!mapped[page]) {
					off_t offset = (opt_flags & OPT_FLAGS_MMAP_FILE) ?
							page * page_size : 0;
					/*
					 * Attempt to map them back into the original address, this
					 * may fail (it's not the most portable operation), so keep
					 * track of failed mappings too
					 */
					mappings[page] = mmap(mappings[page], page_size, PROT_READ | PROT_WRITE, MAP_FIXED | flags, fd, offset);
					if (mappings[page] == MAP_FAILED) {
						mapped[page] = PAGE_MAPPED_FAIL;
						mappings[page] = NULL;
					} else {
						(void)mincore_touch_pages(mappings[page], page_size);
						(void)madvise_random(mappings[page], page_size);
						stress_mmap_mprotect(name, mappings[page], page_size);
						mapped[page] = PAGE_MAPPED;
						/* Ensure we can write to the mapped page */
						stress_mmap_set(mappings[page], page_size);
						if (stress_mmap_check(mappings[page], page_size) < 0)
							pr_fail(stderr, "%s: mmap'd region of %zu bytes does "
								"not contain expected data\n", name, page_size);
						if (opt_flags & OPT_FLAGS_MMAP_FILE) {
							memset(mappings[page], n, page_size);
#if !defined(__gnu_hurd__)
							(void)msync(mappings[page], page_size, ms_flags);
#endif
						}
					}
					n--;
					break;
				}
				if (!opt_do_run)
					goto cleanup;
			}
		}
#endif
cleanup:
		/*
		 *  Step #3, unmap them all
		 */
		for (n = 0; n < pages4k; n++) {
			if (mapped[n] & PAGE_MAPPED) {
				(void)madvise_random(mappings[n], page_size);
				stress_mmap_mprotect(name, mappings[n], page_size);
				(void)munmap(mappings[n], page_size);
			}
		}
		(*counter)++;
	} while (opt_do_run && (!max_ops || *counter < max_ops));

	if (opt_flags & OPT_FLAGS_MMAP_FILE) {
		(void)close(fd);
		(void)stress_temp_dir_rm(name, pid, instance);
	}
	return EXIT_SUCCESS;
}
Exemplo n.º 8
0
/*
 *  stress_splice
 *	stress copying of /dev/zero to /dev/null
 */
int stress_vm_splice(
    uint64_t *const counter,
    const uint32_t instance,
    const uint64_t max_ops,
    const char *name)
{
    int fd, fds[2];
    uint8_t *buf;
    const size_t page_size = stress_get_pagesize();
    size_t sz;

    (void)instance;

    if (!set_vm_splice_bytes) {
        if (opt_flags & OPT_FLAGS_MAXIMIZE)
            opt_vm_splice_bytes = MAX_VM_SPLICE_BYTES;
        if (opt_flags & OPT_FLAGS_MINIMIZE)
            opt_vm_splice_bytes = MIN_VM_SPLICE_BYTES;
    }
    sz = opt_vm_splice_bytes & ~(page_size - 1);

    buf = mmap(NULL, sz, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
    if (buf == MAP_FAILED) {
        pr_failed_dbg(name, "mmap");
        return(EXIT_FAILURE);
    }

    if (pipe(fds) < 0) {
        (void)munmap(buf, sz);
        pr_failed_err(name, "pipe");
        return EXIT_FAILURE;
    }

    if ((fd = open("/dev/null", O_WRONLY)) < 0) {
        (void)munmap(buf, sz);
        (void)close(fds[0]);
        (void)close(fds[1]);
        pr_failed_err(name, "open");
        return EXIT_FAILURE;
    }

    do {
        int ret;
        ssize_t bytes;
        struct iovec iov;

        iov.iov_base = buf;
        iov.iov_len = sz;

        bytes = vmsplice(fds[1], &iov, 1, 0);
        if (bytes < 0)
            break;
        ret = splice(fds[0], NULL, fd, NULL, opt_vm_splice_bytes, SPLICE_F_MOVE);
        if (ret < 0)
            break;

        (*counter)++;
    } while (opt_do_run && (!max_ops || *counter < max_ops));

    (void)munmap(buf, sz);
    (void)close(fd);
    (void)close(fds[0]);
    (void)close(fds[1]);

    return EXIT_SUCCESS;
}
Exemplo n.º 9
0
/*
 *  stress_stackmmap
 *	stress a file memory map'd stack
 */
int stress_stackmmap(
	uint64_t *const counter,
	const uint32_t instance,
	const uint64_t max_ops,
	const char *name)
{
	int fd, ret;
	volatile int rc = EXIT_FAILURE;		/* could be clobbered */
	const pid_t pid = getpid();
        stack_t ss;
	struct sigaction new_action;
	char filename[PATH_MAX];
	uint8_t stack_sig[SIGSTKSZ] ALIGN64;	/* ensure we have a sig stack */

	page_size = stress_get_pagesize();
	page_mask = ~(page_size - 1);

	/*
	 *  We need to handle SEGV signals when we
	 *  hit the end of the mmap'd stack; however
	 *  an alternative signal handling stack
	 *  is required because we ran out of stack
	 */
	memset(&new_action, 0, sizeof new_action);
	new_action.sa_handler = stress_segvhandler;
	sigemptyset(&new_action.sa_mask);
	new_action.sa_flags = SA_ONSTACK;
	if (sigaction(SIGSEGV, &new_action, NULL) < 0) {
		pr_fail_err(name, "sigaction");
		return EXIT_FAILURE;
	}

	/*
	 *  We need an alternative signal stack
	 *  to handle segfaults on an overrun
	 *  mmap'd stack
	 */
        memset(stack_sig, 0, sizeof(stack_sig));
        ss.ss_sp = (void *)stack_sig;
        ss.ss_size = SIGSTKSZ;
        ss.ss_flags = 0;
        if (sigaltstack(&ss, NULL) < 0) {
		pr_fail_err(name, "sigaltstack");
		return EXIT_FAILURE;
	}

	if (stress_temp_dir_mk(name, pid, instance) < 0)
		return EXIT_FAILURE;
	(void)stress_temp_filename(filename, sizeof(filename),
		name, pid, instance, mwc32());

	/* Create file back'd mmaping for the stack */
	fd = open(filename, O_SYNC | O_RDWR | O_CREAT, S_IRUSR | S_IWUSR);
	if (fd < 0) {
		pr_fail_err(name, "mmap'd stack file open");
		goto tidy_dir;
	}
	(void)unlink(filename);
	if (ftruncate(fd, MMAPSTACK_SIZE) < 0) {
		pr_fail_err(name, "ftruncate");
		(void)close(fd);
		goto tidy_dir;
	}
	stack_mmap = mmap(NULL, MMAPSTACK_SIZE, PROT_READ | PROT_WRITE,
		MAP_SHARED, fd, 0);
	if (stack_mmap == MAP_FAILED) {
		pr_fail_err(name, "mmap");
		(void)close(fd);
		goto tidy_dir;
	}
	(void)close(fd);

	if (madvise(stack_mmap, MMAPSTACK_SIZE, MADV_RANDOM) < 0) {
		pr_dbg(stderr, "%s: madvise failed: errno=%d (%s)\n",
			name, errno, strerror(errno));
	}

	memset(&c_test, 0, sizeof(c_test));
	if (getcontext(&c_test) < 0) {
		pr_fail_err(name, "getcontext");
		goto tidy_mmap;
	}
	c_test.uc_stack.ss_sp = stack_mmap;
	c_test.uc_stack.ss_size = MMAPSTACK_SIZE;
	c_test.uc_link = &c_main;
	makecontext(&c_test, stress_stackmmap_push_msync, 0);

	/*
	 *  set jmp handler to jmp back into the loop on a full
	 *  stack segfault.  Use swapcontext to jump into a
	 *  new context using the new mmap'd stack
	 */
	do {
		ret = sigsetjmp(jmp_env, 1);
		if (!ret)
			swapcontext(&c_main, &c_test);
		(*counter)++;
	} while (opt_do_run && (!max_ops || *counter < max_ops));

	rc = EXIT_SUCCESS;

tidy_mmap:
	munmap(stack_mmap, MMAPSTACK_SIZE);
tidy_dir:
	(void)stress_temp_dir_rm(name, pid, instance);

	return rc;
}
Exemplo n.º 10
0
/*
 *  pagein_proc()
 *	force pages into memory for a given process
 */
static int pagein_proc(const pid_t pid)
{
	char path[PATH_MAX];
	char buffer[4096];
	int fdmem, ret;
	FILE *fpmap;
	const size_t page_size = stress_get_pagesize();
	size_t pages = 0;

	ret = ptrace(PTRACE_ATTACH, pid, NULL, NULL);
	if (ret < 0)
		return -errno;

	(void)snprintf(path, sizeof(path), "/proc/%d/mem", pid);
	fdmem = open(path, O_RDONLY);
	if (fdmem < 0)
		return -errno;

	(void)snprintf(path, sizeof(path), "/proc/%d/maps", pid);
	fpmap = fopen(path, "r");
	if (!fpmap) {
		(void)close(fdmem);
		return -errno;
	}

	/*
	 * Look for field 0060b000-0060c000 r--p 0000b000 08:01 1901726
	 */
	while (fgets(buffer, sizeof(buffer), fpmap)) {
		uintmax_t begin, end, len;
		uintptr_t off;
		char tmppath[1024];
		char prot[5];

		if (sscanf(buffer, "%" SCNx64 "-%" SCNx64
		           " %5s %*x %*x:%*x %*d %1023s", &begin, &end, prot, tmppath) != 4)
			continue;

		/* ignore non-readable or non-private mappings */
		if (prot[0] != 'r' && prot[3] != 'p')
			continue;
		len = end - begin;

		/* Ignore bad range */
		if ((begin >= end) || (len == 0) || (begin == 0))
			continue;
		/* Skip huge ranges more than 2GB */
		if (len > 0x80000000UL)
			continue;

		for (off = begin; off < end; off += page_size, pages++) {
			unsigned long data;
			off_t pos;
			size_t sz;

			(void)ptrace(PTRACE_PEEKDATA, pid, (void *)off, &data);
			pos = lseek(fdmem, off, SEEK_SET);
			if (pos != (off_t)off)
				continue;
			sz = read(fdmem, &data, sizeof(data));
			(void)sz;
		}
	}

	(void)ptrace(PTRACE_DETACH, pid, NULL, NULL);
	(void)fclose(fpmap);
	(void)close(fdmem);

	return 0;
}
Exemplo n.º 11
0
/*
 *  stress_bigheap()
 *	stress heap allocation
 */
int stress_bigheap(
	uint64_t *const counter,
	const uint32_t instance,
	const uint64_t max_ops,
	const char *name)
{
	void *ptr = NULL, *last_ptr = NULL;
	uint8_t *last_ptr_end = NULL;
	size_t size = 0;
	const size_t stride = stress_get_pagesize();
	pid_t pid;
	uint32_t restarts = 0, nomems = 0;
	const size_t page_size = stress_get_pagesize();

	if (!set_bigheap_growth) {
		if (opt_flags & OPT_FLAGS_MAXIMIZE)
			opt_bigheap_growth = MAX_BIGHEAP_GROWTH;
		if (opt_flags & OPT_FLAGS_MINIMIZE)
			opt_bigheap_growth = MIN_BIGHEAP_GROWTH;
	}
again:
	if (!opt_do_run)
		return EXIT_SUCCESS;
	pid = fork();
	if (pid < 0) {
		if (errno == EAGAIN)
			goto again;
		pr_err(stderr, "%s: fork failed: errno=%d: (%s)\n",
			name, errno, strerror(errno));
	} else if (pid > 0) {
		int status, ret;

		setpgid(pid, pgrp);
		/* Parent, wait for child */
		ret = waitpid(pid, &status, 0);
		if (ret < 0) {
			if (errno != EINTR)
				pr_dbg(stderr, "%s: waitpid(): errno=%d (%s)\n",
					name, errno, strerror(errno));
			(void)kill(pid, SIGTERM);
			(void)kill(pid, SIGKILL);
			(void)waitpid(pid, &status, 0);
		} else if (WIFSIGNALED(status)) {
			pr_dbg(stderr, "%s: child died: %s (instance %d)\n",
				name, stress_strsignal(WTERMSIG(status)),
				instance);
			/* If we got killed by OOM killer, re-start */
			if (WTERMSIG(status) == SIGKILL) {
				pr_dbg(stderr, "%s: assuming killed by OOM "
					"killer, restarting again "
					"(instance %d)\n",
					name, instance);
				restarts++;
				goto again;
			}
		}
	} else if (pid == 0) {
		setpgid(0, pgrp);
		stress_parent_died_alarm();

		/* Make sure this is killable by OOM killer */
		set_oom_adjustment(name, true);

		do {
			void *old_ptr = ptr;
			size += (size_t)opt_bigheap_growth;

			/*
			 * With many instances running it is wise to
			 * double check before the next realloc as
			 * sometimes process start up is delayed for
			 * some time and we should bail out before
			 * exerting any more memory pressure
			 */
			if (!opt_do_run)
				goto abort;

			ptr = realloc(old_ptr, size);
			if (ptr == NULL) {
				pr_dbg(stderr, "%s: out of memory at %" PRIu64
					" MB (instance %d)\n",
					name, (uint64_t)(4096ULL * size) >> 20,
					instance);
				free(old_ptr);
				size = 0;
				nomems++;
			} else {
				size_t i, n;
				uint8_t *u8ptr, *tmp;

				if (last_ptr == ptr) {
					tmp = u8ptr = last_ptr_end;
					n = (size_t)opt_bigheap_growth;
				} else {
					tmp = u8ptr = ptr;
					n = size;
				}

				if (page_size > 0) {
					size_t sz = page_size - 1;
					uintptr_t pg_ptr = ((uintptr_t)ptr + sz) & ~sz;
					size_t len = size - (pg_ptr - (uintptr_t)ptr);
					(void)mincore_touch_pages((void *)pg_ptr, len);
				}

				for (i = 0; i < n; i+= stride, u8ptr += stride) {
					if (!opt_do_run)
						goto abort;
					*u8ptr = (uint8_t)i;
				}

				if (opt_flags & OPT_FLAGS_VERIFY) {
					for (i = 0; i < n; i+= stride, tmp += stride) {
						if (!opt_do_run)
							goto abort;
						if (*tmp != (uint8_t)i)
							pr_fail(stderr, "%s: byte at location %p was 0x%" PRIx8
								" instead of 0x%" PRIx8 "\n",
								name, u8ptr, *tmp, (uint8_t)i);
					}
				}

				last_ptr = ptr;
				last_ptr_end = u8ptr;

			}
			(*counter)++;
		} while (opt_do_run && (!max_ops || *counter < max_ops));
Exemplo n.º 12
0
/*
 *  Create allocations using memfd_create, ftruncate and mmap
 */
static void stress_memfd_allocs(
	const char *name,
	uint64_t *const counter,
	const uint64_t max_ops)
{
	int fds[MAX_MEM_FDS];
	void *maps[MAX_MEM_FDS];
	size_t i;
	const size_t size = stress_get_pagesize() * MEM_PAGES;
	const pid_t pid = getpid();

	do {
		for (i = 0; i < MAX_MEM_FDS; i++) {
			fds[i] = -1;
			maps[i] = MAP_FAILED;
		}

		for (i = 0; i < MAX_MEM_FDS; i++) {
			char name[PATH_MAX];

			snprintf(name, sizeof(name), "memfd-%u-%zu", pid, i);
			fds[i] = __memfd_create(name, 0);
			if (fds[i] < 0) {
				switch (errno) {
				case EMFILE:
				case ENFILE:
					break;
				case ENOMEM:
					goto clean;
				case ENOSYS:
				case EFAULT:
				default:
					pr_err(stderr, "%s: memfd_create failed: errno=%d (%s)\n",
						name, errno, strerror(errno));
					opt_do_run = false;
					goto clean;
				}
			}
		}
	
		for (i = 0; i < MAX_MEM_FDS; i++) {
			if (fds[i] >= 0) {
				ssize_t ret;

				if (!opt_do_run)
					break;

				/* Allocate space */
				ret = ftruncate(fds[i], size);
				if (ret < 0) {
					switch (errno) {
					case EINTR:
						break;
					default:
						pr_fail(stderr, "%s: ftruncate failed, errno=%d (%s)\n",
                                        		name, errno, strerror(errno));
						break;
					}
				}
				/* ..and map it in, using MAP_POPULATE to force page it in */
				maps[i] = mmap(NULL, size, PROT_WRITE,
					MAP_FILE | MAP_PRIVATE | MAP_POPULATE, fds[i], 0);
				mincore_touch_pages(maps[i], size);
			}
		}
clean:
		for (i = 0; i < MAX_MEM_FDS; i++) {
			if (maps[i] != MAP_FAILED)
				(void)munmap(maps[i], size);
			if (fds[i] >= 0)
				(void)close(fds[i]);
		}
		(*counter)++;
	} while (opt_do_run && (!max_ops || *counter < max_ops));
}
Exemplo n.º 13
0
/*
 *  stress_shm_sysv()
 *	stress SYSTEM V shared memory
 */
int stress_shm_sysv(
	uint64_t *const counter,
	const uint32_t instance,
	const uint64_t max_ops,
	const char *name)
{
	const size_t page_size = stress_get_pagesize();
	size_t orig_sz, sz;
	int pipefds[2];
	int rc = EXIT_SUCCESS;
	ssize_t i;
	pid_t pid;
	bool retry = true;
	uint32_t restarts = 0;

	if (!set_shm_sysv_bytes) {
		if (opt_flags & OPT_FLAGS_MAXIMIZE)
			opt_shm_sysv_bytes = MAX_SHM_SYSV_BYTES;
		if (opt_flags & OPT_FLAGS_MINIMIZE)
			opt_shm_sysv_bytes = MIN_SHM_SYSV_BYTES;
	}

	if (!set_shm_sysv_segments) {
		if (opt_flags & OPT_FLAGS_MAXIMIZE)
			opt_shm_sysv_segments = MAX_SHM_SYSV_SEGMENTS;
		if (opt_flags & OPT_FLAGS_MINIMIZE)
			opt_shm_sysv_segments = MIN_SHM_SYSV_SEGMENTS;
	}
	orig_sz = sz = opt_shm_sysv_bytes & ~(page_size - 1);

	while (opt_do_run && retry) {
		if (pipe(pipefds) < 0) {
			pr_fail_dbg(name, "pipe");
			return EXIT_FAILURE;
		}
fork_again:
		pid = fork();
		if (pid < 0) {
			/* Can't fork, retry? */
			if (errno == EAGAIN)
				goto fork_again;
			pr_err(stderr, "%s: fork failed: errno=%d: (%s)\n",
				name, errno, strerror(errno));
			(void)close(pipefds[0]);
			(void)close(pipefds[1]);

			/* Nope, give up! */
			return EXIT_FAILURE;
		} else if (pid > 0) {
			/* Parent */
			int status, shm_ids[MAX_SHM_SYSV_SEGMENTS];
			ssize_t n;

			setpgid(pid, pgrp);
			set_oom_adjustment(name, false);
			(void)close(pipefds[1]);

			for (i = 0; i < (ssize_t)opt_shm_sysv_segments; i++)
				shm_ids[i] = -1;

			while (opt_do_run) {
				shm_msg_t 	msg;

				/*
				 *  Blocking read on child shm ID info
				 *  pipe.  We break out if pipe breaks
				 *  on child death, or child tells us
				 *  off its demise.
				 */
				n = read(pipefds[0], &msg, sizeof(msg));
				if (n <= 0) {
					if ((errno == EAGAIN) || (errno == EINTR))
						continue;
					if (errno) {
						pr_fail_dbg(name, "read");
						break;
					}
					pr_fail_dbg(name, "zero byte read");
					break;
				}
				if ((msg.index < 0) ||
				    (msg.index >= MAX_SHM_SYSV_SEGMENTS)) {
					retry = false;
					break;
				}
				shm_ids[msg.index] = msg.shm_id;
			}
			(void)kill(pid, SIGALRM);
			(void)waitpid(pid, &status, 0);
			if (WIFSIGNALED(status)) {
				if ((WTERMSIG(status) == SIGKILL) ||
				    (WTERMSIG(status) == SIGBUS)) {
					pr_dbg(stderr, "%s: assuming killed by OOM killer, "
						"restarting again (instance %d)\n",
						name, instance);
					restarts++;
				}
			}
			(void)close(pipefds[1]);
			/*
			 *  The child may have been killed by the OOM killer or
			 *  some other way, so it may have left the shared
			 *  memory segment around.  At this point the child
			 *  has died, so we should be able to remove the
			 *  shared memory segment.
			 */
			for (i = 0; i < (ssize_t)opt_shm_sysv_segments; i++) {
				if (shm_ids[i] != -1)
					(void)shmctl(shm_ids[i], IPC_RMID, NULL);
			}
		} else if (pid == 0) {
			/* Child, stress memory */
			setpgid(0, pgrp);
			stress_parent_died_alarm();

			/*
			 * Nicing the child may OOM it first as this
			 * doubles the OOM score
			 */
			if (nice(5) < 0)
				pr_dbg(stderr, "%s: nice of child failed, "
					"(instance %d)\n", name, instance);

			(void)close(pipefds[0]);
			rc = stress_shm_sysv_child(pipefds[1], counter,
				max_ops, name, sz, page_size);
			(void)close(pipefds[1]);
			_exit(rc);
		}
	}
	if (orig_sz != sz)
		pr_dbg(stderr, "%s: reduced shared memory size from "
			"%zu to %zu bytes\n", name, orig_sz, sz);
	if (restarts) {
		pr_dbg(stderr, "%s: OOM restarts: %" PRIu32 "\n",
			name, restarts);
	}
	return rc;
}
Exemplo n.º 14
0
/*
 *  stress_brk()
 *	stress brk and sbrk
 */
int stress_brk(
	uint64_t *const counter,
	const uint32_t instance,
	const uint64_t max_ops,
	const char *name)
{
	pid_t pid;
	uint32_t restarts = 0, nomems = 0;
	const size_t page_size = stress_get_pagesize();

again:
	if (!opt_do_run)
		return EXIT_SUCCESS;
	pid = fork();
	if (pid < 0) {
		if (errno == EAGAIN)
			goto again;
		pr_err(stderr, "%s: fork failed: errno=%d: (%s)\n",
			name, errno, strerror(errno));
	} else if (pid > 0) {
		int status, ret;

		setpgid(pid, pgrp);
		/* Parent, wait for child */
		ret = waitpid(pid, &status, 0);
		if (ret < 0) {
			if (errno != EINTR)
				pr_dbg(stderr, "%s: waitpid(): errno=%d (%s)\n",
					name, errno, strerror(errno));
			(void)kill(pid, SIGTERM);
			(void)kill(pid, SIGKILL);
			(void)waitpid(pid, &status, 0);
		} else if (WIFSIGNALED(status)) {
			pr_dbg(stderr, "%s: child died: %s (instance %d)\n",
				name, stress_strsignal(WTERMSIG(status)),
				instance);
			/* If we got killed by OOM killer, re-start */
			if (WTERMSIG(status) == SIGKILL) {
				pr_dbg(stderr, "%s: assuming killed by OOM "
					"killer, restarting again "
					"(instance %d)\n",
					name, instance);
				restarts++;
				goto again;
			}
		}
	} else if (pid == 0) {
		uint8_t *start_ptr;
		bool touch = !(opt_flags & OPT_FLAGS_BRK_NOTOUCH);

		setpgid(0, pgrp);

		/* Make sure this is killable by OOM killer */
		set_oom_adjustment(name, true);

		start_ptr = sbrk(0);
		if (start_ptr == (void *) -1) {
			pr_err(stderr, "%s: sbrk(0) failed: errno=%d (%s)\n",
				name, errno, strerror(errno));
			exit(EXIT_FAILURE);
		}

		do {
			uint8_t *ptr = sbrk((intptr_t)page_size);
			if (ptr == (void *)-1) {
				if (errno == ENOMEM) {
					nomems++;
					if (brk(start_ptr) < 0) {
						pr_err(stderr, "%s: brk(%p) failed: errno=%d (%s)\n",
							name, start_ptr, errno,
							strerror(errno));
						exit(EXIT_FAILURE);
					}
				} else {
					pr_err(stderr, "%s: sbrk(%d) failed: errno=%d (%s)\n",
						name, (int)page_size, errno,
						strerror(errno));
					exit(EXIT_FAILURE);
				}
			} else {
				/* Touch page, force it to be resident */
				if (touch)
					*(ptr - 1) = 0;
			}
			(*counter)++;
		} while (opt_do_run && (!max_ops || *counter < max_ops));
	}
	if (restarts + nomems > 0)
		pr_dbg(stderr, "%s: OOM restarts: %" PRIu32
			", out of memory restarts: %" PRIu32 ".\n",
			name, restarts, nomems);

	return EXIT_SUCCESS;
}
Exemplo n.º 15
0
/*
 *  stress_shm_posix()
 *	stress SYSTEM V shared memory
 */
int stress_shm_posix(
	uint64_t *const counter,
	const uint32_t instance,
	const uint64_t max_ops,
	const char *name)
{
	const size_t page_size = stress_get_pagesize();
	size_t orig_sz, sz;
	int pipefds[2];
	int rc = EXIT_SUCCESS;
	ssize_t i;
	pid_t pid;

	(void)instance;

	if (!set_shm_posix_bytes) {
		if (opt_flags & OPT_FLAGS_MAXIMIZE)
			opt_shm_posix_bytes = MAX_SHM_POSIX_BYTES;
		if (opt_flags & OPT_FLAGS_MINIMIZE)
			opt_shm_posix_bytes = MIN_SHM_POSIX_BYTES;
	}
	if (!set_shm_posix_objects) {
		if (opt_flags & OPT_FLAGS_MAXIMIZE)
			opt_shm_posix_objects = MAX_SHM_POSIX_OBJECTS;
		if (opt_flags & OPT_FLAGS_MINIMIZE)
			opt_shm_posix_objects = MIN_SHM_POSIX_OBJECTS;
	}
	orig_sz = sz = opt_shm_posix_bytes & ~(page_size - 1);

	while (opt_do_run) {
		if (pipe(pipefds) < 0) {
			pr_failed_dbg(name, "pipe");
			return EXIT_FAILURE;
		}
fork_again:
		pid = fork();
		if (pid < 0) {
			/* Can't fork, retry? */
			if (errno == EAGAIN)
				goto fork_again;
			pr_err(stderr, "%s: fork failed: errno=%d: (%s)\n",
				name, errno, strerror(errno));
			(void)close(pipefds[0]);
			(void)close(pipefds[1]);

			/* Nope, give up! */
			return EXIT_FAILURE;
		} else if (pid > 0) {
			/* Parent */
			int status;
			char shm_names[MAX_SHM_POSIX_OBJECTS][SHM_NAME_LEN];
			ssize_t n;

			setpgid(pid, pgrp);
			(void)close(pipefds[1]);

			memset(shm_names, 0, sizeof(shm_names));

			while (opt_do_run) {
				shm_msg_t 	msg;
				char *shm_name;

				/*
				 *  Blocking read on child shm ID info
				 *  pipe.  We break out if pipe breaks
				 *  on child death, or child tells us
				 *  off its demise.
				 */
				n = read(pipefds[0], &msg, sizeof(msg));
				if (n <= 0) {
					if ((errno == EAGAIN) || (errno == EINTR))
						continue;
					if (errno) {
						pr_failed_dbg(name, "read");
						break;
					}
					pr_failed_dbg(name, "zero byte read");
					break;
				}
				if ((msg.index < 0) ||
				    (msg.index >= MAX_SHM_POSIX_OBJECTS))
					break;

				shm_name = shm_names[msg.index];
				shm_name[SHM_NAME_LEN - 1] = '\0';
				strncpy(shm_name, msg.shm_name, SHM_NAME_LEN);
			}
			(void)kill(pid, SIGKILL);
			(void)waitpid(pid, &status, 0);
			(void)close(pipefds[1]);

			/*
			 *  The child may have been killed by the OOM killer or
			 *  some other way, so it may have left the shared
			 *  memory segment around.  At this point the child
			 *  has died, so we should be able to remove the
			 *  shared memory segment.
			 */
			for (i = 0; i < (ssize_t)opt_shm_posix_objects; i++) {
				char *shm_name = shm_names[i];
				if (*shm_name)
					(void)shm_unlink(shm_name);
			}
		} else if (pid == 0) {
			/* Child, stress memory */
			(void)close(pipefds[0]);
			rc = stress_shm_posix_child(pipefds[1], counter,
				max_ops, name, sz);
			(void)close(pipefds[1]);
			_exit(rc);
		}
	}
	if (orig_sz != sz)
		pr_dbg(stderr, "%s: reduced shared memory size from "
			"%zu to %zu bytes\n", name, orig_sz, sz);
	return rc;
}
Exemplo n.º 16
0
/*
 *  stress_tlb_shootdown()
 *	stress out TLB shootdowns
 */
int stress_tlb_shootdown(
	uint64_t *const counter,
	const uint32_t instance,
	const uint64_t max_ops,
	const char *name)
{
	const size_t page_size = stress_get_pagesize();
	const size_t mmap_size = page_size * MMAP_PAGES;
	pid_t pids[MAX_TLB_PROCS];

	(void)instance;

	do {
		uint8_t *mem, *ptr;
		int retry = 128;
		cpu_set_t proc_mask;
		int32_t cpus, tlb_procs, i;
		const int32_t max_cpus = stress_get_processors_configured();

		if (sched_getaffinity(0, sizeof(proc_mask), &proc_mask) < 0) {
			pr_fail(stderr, "%s: could not get CPU affinity: "
				"errno=%d, (%s)\n",
				name, errno, strerror(errno));
			return EXIT_FAILURE;
		}
		cpus = CPU_COUNT(&proc_mask);
		tlb_procs = STRESS_MAXIMUM(cpus, MAX_TLB_PROCS);

		for (;;) {
			mem = mmap(NULL, mmap_size, PROT_WRITE | PROT_READ,
			MAP_SHARED | MAP_ANONYMOUS, -1, 0);
			if ((void *)mem == MAP_FAILED) {
				if ((errno == EAGAIN) || (errno == ENOMEM)) {
					if (--retry < 0)
						return EXIT_NO_RESOURCE;
				} else {
					pr_fail(stderr, "%s: mmap failed: "
						"errno=%d (%s)\n",
						name, errno, strerror(errno));
				}
			} else {
				break;
			}
		}
		memset(mem, 0, mmap_size);

		for (i = 0; i < tlb_procs; i++)
			pids[i] = -1;

		for (i = 0; i < tlb_procs; i++) {
			int32_t j, cpu = -1;

			for (j = 0; j < max_cpus; j++) {
				if (CPU_ISSET(j, &proc_mask)) {
					cpu = j;
					CPU_CLR(j, &proc_mask);
					break;
				}
			}
			if (cpu == -1)
				break;

			pids[i] = fork();
			if (pids[i] < 0)
				break;
			if (pids[i] == 0) {
				cpu_set_t mask;
				char buffer[page_size];

				CPU_ZERO(&mask);
				CPU_SET(cpu % max_cpus, &mask);
				(void)sched_setaffinity(getpid(), sizeof(mask), &mask);

				for (ptr = mem; ptr < mem + mmap_size; ptr += page_size) {
					/* Force tlb shoot down on page */
					(void)mprotect(ptr, page_size, PROT_READ);
					memcpy(buffer, ptr, page_size);
					(void)munmap(ptr, page_size);
				}
				_exit(0);
			}
		}

		for (i = 0; i < tlb_procs; i++) {
			if (pids[i] != -1) {
				int status;

				kill(pids[i], SIGKILL);
				(void)waitpid(pids[i], &status, 0);
			}
		}
		(void)munmap(mem, mmap_size);
		(*counter)++;
	} while (opt_do_run && (!max_ops || *counter < max_ops));

	return EXIT_SUCCESS;
}
Exemplo n.º 17
0
/*
 *  stress_msync()
 *	stress msync
 */
int stress_msync(
	uint64_t *const counter,
	const uint32_t instance,
	const uint64_t max_ops,
	const char *name)
{
	uint8_t *buf = NULL;
	const size_t page_size = stress_get_pagesize();
	const size_t min_size = 2 * page_size;
	size_t sz = min_size;
	ssize_t ret, rc = EXIT_SUCCESS;

	const pid_t pid = getpid();
	int fd = -1;
	char filename[PATH_MAX];

	ret = sigsetjmp(jmp_env, 1);
	if (ret) {
		pr_fail_err(name, "sigsetjmp");
		return EXIT_FAILURE;
	}
	if (stress_sighandler(name, SIGBUS, stress_sigbus_handler, NULL) < 0)
		return EXIT_FAILURE;

	if (!set_msync_bytes) {
		if (opt_flags & OPT_FLAGS_MAXIMIZE)
			opt_msync_bytes = MAX_MSYNC_BYTES;
		if (opt_flags & OPT_FLAGS_MINIMIZE)
			opt_msync_bytes = MIN_MSYNC_BYTES;
	}
	sz = opt_msync_bytes & ~(page_size - 1);
	if (sz < min_size)
		sz = min_size;

	/* Make sure this is killable by OOM killer */
	set_oom_adjustment(name, true);

	rc = stress_temp_dir_mk(name, pid, instance);
	if (rc < 0)
		return exit_status(-rc);

	(void)stress_temp_filename(filename, sizeof(filename),
		name, pid, instance, mwc32());

	(void)umask(0077);
	if ((fd = open(filename, O_CREAT | O_RDWR, S_IRUSR | S_IWUSR)) < 0) {
		rc = exit_status(errno);
		pr_fail_err(name, "open");
		(void)unlink(filename);
		(void)stress_temp_dir_rm(name, pid, instance);

		return rc;
	}
	(void)unlink(filename);

	if (ftruncate(fd, sz) < 0) {
		pr_err(stderr, "%s: ftruncate failed, errno=%d (%s)\n",
			name, errno, strerror(errno));
		(void)close(fd);
		(void)stress_temp_dir_rm(name, pid, instance);

		return EXIT_FAILURE;
	}

	buf = (uint8_t *)mmap(NULL, sz,
		PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
	if (buf == MAP_FAILED) {
		pr_err(stderr, "%s: failed to mmap memory, errno=%d (%s)\n",
			name, errno, strerror(errno));
		rc = EXIT_NO_RESOURCE;
		goto err;
	}

	do {
		off_t offset;
		uint8_t val, data[page_size];

		ret = sigsetjmp(jmp_env, 1);
		if (ret) {
			/* Try again */
			continue;
		}
		/*
		 *  Change data in memory, msync to disk
		 */
		offset = (mwc64() % (sz - page_size)) & ~(page_size - 1);
		val = mwc8();

		memset(buf + offset, val, page_size);
		ret = msync(buf + offset, page_size, MS_SYNC);
		if (ret < 0) {
			pr_fail(stderr, "%s: msync MS_SYNC on "
				"offset %jd failed, errno=%d (%s)",
				name, (intmax_t)offset, errno, strerror(errno));
			goto do_invalidate;
		}
		ret = lseek(fd, offset, SEEK_SET);
		if (ret == (off_t)-1) {
			pr_err(stderr, "%s: cannot seet to offset %jd, "
				"errno=%d (%s)\n",
				name, (intmax_t)offset, errno, strerror(errno));
			rc = EXIT_NO_RESOURCE;
			break;
		}
		ret = read(fd, data, sizeof(data));
		if (ret < (ssize_t)sizeof(data)) {
			pr_fail(stderr, "%s: read failed, errno=%d (%s)\n",
				name, errno, strerror(errno));
			goto do_invalidate;
		}
		if (stress_page_check(data, val, sizeof(data)) < 0) {
			pr_fail(stderr, "%s: msync'd data in file different "
				"to data in memory\n", name);
		}

do_invalidate:
		/*
		 *  Now change data on disc, msync invalidate
		 */
		offset = (mwc64() % (sz - page_size)) & ~(page_size - 1);
		val = mwc8();

		memset(buf + offset, val, page_size);

		ret = lseek(fd, offset, SEEK_SET);
		if (ret == (off_t)-1) {
			pr_err(stderr, "%s: cannot seet to offset %jd, errno=%d (%s)\n",
				name, (intmax_t)offset, errno, strerror(errno));
			rc = EXIT_NO_RESOURCE;
			break;
		}
		ret = read(fd, data, sizeof(data));
		if (ret < (ssize_t)sizeof(data)) {
			pr_fail(stderr, "%s: read failed, errno=%d (%s)\n",
				name, errno, strerror(errno));
			goto do_next;
		}
		ret = msync(buf + offset, page_size, MS_INVALIDATE);
		if (ret < 0) {
			pr_fail(stderr, "%s: msync MS_INVALIDATE on "
				"offset %jd failed, errno=%d (%s)",
				name, (intmax_t)offset, errno, strerror(errno));
			goto do_next;
		}
		if (stress_page_check(buf + offset, val, sizeof(data)) < 0) {
			pr_fail(stderr, "%s: msync'd data in memory "
				"different to data in file\n", name);
		}
do_next:

		(*counter)++;
	} while (opt_do_run && (!max_ops || *counter < max_ops));

	(void)munmap((void *)buf, sz);
err:
	(void)close(fd);
	(void)stress_temp_dir_rm(name, pid, instance);

	if (sigbus_count)
		pr_inf(stdout, "%s: caught %" PRIu64 " SIGBUS signals\n",
			name, sigbus_count);
	return rc;
}
Exemplo n.º 18
0
/*
 *  stress_mremap()
 *	stress mmap
 */
int stress_mremap(
    uint64_t *const counter,
    const uint32_t instance,
    const uint64_t max_ops,
    const char *name)
{
    uint8_t *buf = NULL;
    const size_t page_size = stress_get_pagesize();
    size_t sz, new_sz, old_sz;
    int flags = MAP_PRIVATE | MAP_ANONYMOUS;

    (void)instance;
#ifdef MAP_POPULATE
    flags |= MAP_POPULATE;
#endif
    if (!set_mremap_bytes) {
        if (opt_flags & OPT_FLAGS_MAXIMIZE)
            opt_mremap_bytes = MAX_MREMAP_BYTES;
        if (opt_flags & OPT_FLAGS_MINIMIZE)
            opt_mremap_bytes = MIN_MREMAP_BYTES;
    }
    new_sz = sz = opt_mremap_bytes & ~(page_size - 1);

    /* Make sure this is killable by OOM killer */
    set_oom_adjustment(name, true);

    do {

        if (!opt_do_run)
            break;

        buf = mmap(NULL, new_sz, PROT_READ | PROT_WRITE, flags, -1, 0);
        if (buf == MAP_FAILED) {
            /* Force MAP_POPULATE off, just in case */
#ifdef MAP_POPULATE
            flags &= ~MAP_POPULATE;
#endif
            continue;	/* Try again */
        }
        (void)madvise_random(buf, new_sz);
        (void)mincore_touch_pages(buf, opt_mremap_bytes);

        /* Ensure we can write to the mapped pages */
        if (opt_flags & OPT_FLAGS_VERIFY) {
            stress_mremap_set(buf, new_sz, page_size);
            if (stress_mremap_check(buf, sz, page_size) < 0) {
                pr_fail(stderr, "%s: mmap'd region of %zu "
                        "bytes does not contain expected data\n",
                        name, sz);
                munmap(buf, new_sz);
                return EXIT_FAILURE;
            }
        }

        old_sz = new_sz;
        new_sz >>= 1;
        while (new_sz > page_size) {
            if (try_remap(name, &buf, old_sz, new_sz) < 0) {
                munmap(buf, old_sz);
                return EXIT_FAILURE;
            }
            (void)madvise_random(buf, new_sz);
            if (opt_flags & OPT_FLAGS_VERIFY) {
                if (stress_mremap_check(buf, new_sz, page_size) < 0) {
                    pr_fail(stderr, "%s: mremap'd region "
                            "of %zu bytes does "
                            "not contain expected data\n",
                            name, sz);
                    munmap(buf, new_sz);
                    return EXIT_FAILURE;
                }
            }
            old_sz = new_sz;
            new_sz >>= 1;
        }

        new_sz <<= 1;
        while (new_sz < opt_mremap_bytes) {
            if (try_remap(name, &buf, old_sz, new_sz) < 0) {
                munmap(buf, old_sz);
                return EXIT_FAILURE;
            }
            (void)madvise_random(buf, new_sz);
            old_sz = new_sz;
            new_sz <<= 1;
        }
        (void)munmap(buf, old_sz);

        (*counter)++;
    } while (opt_do_run && (!max_ops || *counter < max_ops));

    return EXIT_SUCCESS;
}