示例#1
0
/*
 *  pr_yaml_runinfo()
 *	log info about the system we are running stress-ng on
 */
void pr_yaml_runinfo(FILE *yaml)
{
#if defined(__linux__)
	struct utsname uts;
	struct sysinfo info;
#endif
	time_t t;
	struct tm *tm = NULL;
	char hostname[128];
	char *user = getlogin();

	pr_yaml(yaml, "system-info:\n");
	if (time(&t) != ((time_t)-1))
		tm = localtime(&t);

	pr_yaml(yaml, "      stress-ng-version: " VERSION "\n");
	pr_yaml(yaml, "      run-by: %s\n", user ? user : "******");
	if (tm) {
		pr_yaml(yaml, "      date-yyyy-mm-dd: %4.4d:%2.2d:%2.2d\n",
			tm->tm_year + 1900, tm->tm_mon + 1, tm->tm_mday);
		pr_yaml(yaml, "      time-hh-mm-ss: %2.2d:%2.2d:%2.2d\n",
			tm->tm_hour, tm->tm_min, tm->tm_sec);
		pr_yaml(yaml, "      epoch-secs: %ld\n", (long)t);
	}
	if (!gethostname(hostname, sizeof(hostname)))
		pr_yaml(yaml, "      hostname: %s\n", hostname);
#if defined(__linux__)
	if (uname(&uts) == 0) {
		pr_yaml(yaml, "      sysname: %s\n", uts.sysname);
		pr_yaml(yaml, "      nodename: %s\n", uts.nodename);
		pr_yaml(yaml, "      release: %s\n", uts.release);
		pr_yaml(yaml, "      version: %s\n", uts.version);
		pr_yaml(yaml, "      machine: %s\n", uts.machine);
	}
	if (sysinfo(&info) == 0) {
		pr_yaml(yaml, "      uptime: %ld\n", info.uptime);
		pr_yaml(yaml, "      totalram: %lu\n", info.totalram);
		pr_yaml(yaml, "      freeram: %lu\n", info.freeram);
		pr_yaml(yaml, "      sharedram: %lu\n", info.sharedram);
		pr_yaml(yaml, "      bufferram: %lu\n", info.bufferram);
		pr_yaml(yaml, "      totalswap: %lu\n", info.totalswap);
		pr_yaml(yaml, "      freeswap: %lu\n", info.freeswap);
	}
#endif
	pr_yaml(yaml, "      pagesize: %zd\n", stress_get_pagesize());
	pr_yaml(yaml, "      cpus: %ld\n", stress_get_processors_configured());
	pr_yaml(yaml, "      cpus-online: %ld\n", stress_get_processors_online());
	pr_yaml(yaml, "      ticks-per-second: %ld\n", stress_get_ticks_per_second());
	pr_yaml(yaml, "\n");
}
示例#2
0
/*
 *  stress on sched_affinity()
 *	stress system by changing CPU affinity periodically
 */
int stress_affinity(
	uint64_t *const counter,
	const uint32_t instance,
	const uint64_t max_ops,
	const char *name)
{
	unsigned long int cpu = 0;
	const unsigned long int cpus =
		(unsigned long int)stress_get_processors_configured();
	cpu_set_t mask;

	(void)instance;
	(void)name;

	do {
		cpu = (opt_flags & OPT_FLAGS_AFFINITY_RAND) ?
			(mwc32() >> 4) : cpu + 1;
		cpu %= cpus;
		CPU_ZERO(&mask);
		CPU_SET(cpu, &mask);
		if (sched_setaffinity(0, sizeof(mask), &mask) < 0) {
			pr_fail(stderr, "%s: failed to move to CPU %lu\n",
				name, cpu);
#if defined(_POSIX_PRIORITY_SCHEDULING)
			sched_yield();
#endif
		} else {
			/* Now get and check */
			CPU_ZERO(&mask);
			CPU_SET(cpu, &mask);
			sched_getaffinity(0, sizeof(mask), &mask);
			if ((opt_flags & OPT_FLAGS_VERIFY) &&
			    (!CPU_ISSET(cpu, &mask)))
				pr_fail(stderr, "%s: failed to move to CPU %lu\n",
					name, cpu);
		}
		(*counter)++;
	} while (opt_do_run && (!max_ops || *counter < max_ops));

	return EXIT_SUCCESS;
}
/*
 *  stress_tlb_shootdown()
 *	stress out TLB shootdowns
 */
static int stress_tlb_shootdown(const args_t *args)
{
	const size_t page_size = args->page_size;
	const size_t mmap_size = page_size * MMAP_PAGES;
	pid_t pids[MAX_TLB_PROCS];
	cpu_set_t proc_mask_initial;

	if (sched_getaffinity(0, sizeof(proc_mask_initial), &proc_mask_initial) < 0) {
		pr_fail_err("could not get CPU affinity");
		return EXIT_FAILURE;
	}

	do {
		uint8_t *mem, *ptr;
		int retry = 128;
		cpu_set_t proc_mask;
		int32_t tlb_procs, i;
		const int32_t max_cpus = stress_get_processors_configured();

		CPU_ZERO(&proc_mask);
		CPU_OR(&proc_mask, &proc_mask_initial, &proc_mask);

		tlb_procs = max_cpus;
		if (tlb_procs > MAX_TLB_PROCS)
			tlb_procs = MAX_TLB_PROCS;
		if (tlb_procs < MIN_TLB_PROCS)
			tlb_procs = MIN_TLB_PROCS;

		for (;;) {
			mem = mmap(NULL, mmap_size, PROT_WRITE | PROT_READ,
				MAP_SHARED | MAP_ANONYMOUS, -1, 0);
			if ((void *)mem == MAP_FAILED) {
				if ((errno == EAGAIN) ||
				    (errno == ENOMEM) ||
				    (errno == ENFILE)) {
					if (--retry < 0)
						return EXIT_NO_RESOURCE;
				} else {
					pr_fail_err("mmap");
				}
			} else {
				break;
			}
		}
		(void)memset(mem, 0, mmap_size);

		for (i = 0; i < tlb_procs; i++)
			pids[i] = -1;

		for (i = 0; i < tlb_procs; i++) {
			int32_t j, cpu = -1;

			for (j = 0; j < max_cpus; j++) {
				if (CPU_ISSET(j, &proc_mask)) {
					cpu = j;
					CPU_CLR(j, &proc_mask);
					break;
				}
			}
			if (cpu == -1)
				break;

			pids[i] = fork();
			if (pids[i] < 0)
				break;
			if (pids[i] == 0) {
				cpu_set_t mask;
				char buffer[page_size];

				(void)setpgid(0, g_pgrp);
				stress_parent_died_alarm();

				/* Make sure this is killable by OOM killer */
				set_oom_adjustment(args->name, true);

				CPU_ZERO(&mask);
				CPU_SET(cpu % max_cpus, &mask);
				(void)sched_setaffinity(args->pid, sizeof(mask), &mask);

				for (ptr = mem; ptr < mem + mmap_size; ptr += page_size) {
					/* Force tlb shoot down on page */
					(void)mprotect(ptr, page_size, PROT_READ);
					(void)memcpy(buffer, ptr, page_size);
					(void)munmap(ptr, page_size);
				}
				_exit(0);
			}
		}

		for (i = 0; i < tlb_procs; i++) {
			if (pids[i] != -1) {
				int status, ret;

				ret = shim_waitpid(pids[i], &status, 0);
				if ((ret < 0) && (errno == EINTR)) {
					int j;

					/*
					 * We got interrupted, so assume
					 * it was the alarm (timedout) or
					 * SIGINT so force terminate
					 */
					for (j = i; j < tlb_procs; j++) {
						if (pids[j] != -1)
							(void)kill(pids[j], SIGKILL);
					}

					/* re-wait on the failed wait */
					(void)shim_waitpid(pids[i], &status, 0);

					/* and continue waitpid on the pids */
				}
			}
		}
		(void)munmap(mem, mmap_size);
		(void)sched_setaffinity(0, sizeof(proc_mask_initial), &proc_mask_initial);
		inc_counter(args);
	} while (keep_stressing());

	return EXIT_SUCCESS;
}
示例#4
0
/*
 *  stress_memthrash()
 *	stress by creating pthreads
 */
static int stress_memthrash(const args_t *args)
{
	const stress_memthrash_method_info_t *memthrash_method = &memthrash_methods[0];
	const uint32_t total_cpus = stress_get_processors_configured();
	const uint32_t max_threads = stress_memthrash_max(args->num_instances, total_cpus);
	pthread_t pthreads[max_threads];
	int ret[max_threads];
	pthread_args_t pargs;
	memthrash_func_t func;
	pid_t pid;

	(void)get_setting("memthrash-method", &memthrash_method);
	func = memthrash_method->func;

	pr_dbg("%s: using method '%s'\n", args->name, memthrash_method->name);
	if (args->instance == 0) {
		pr_inf("%s: starting %" PRIu32 " thread%s on each of the %"
			PRIu32 " stressors on a %" PRIu32 " CPU system\n",
			args->name, max_threads, plural(max_threads),
			args->num_instances, total_cpus);
		if (max_threads * args->num_instances > total_cpus) {
			pr_inf("%s: this is not an optimal choice of stressors, "
				"try %" PRIu32 " instead\n",
			args->name,
			stress_memthash_optimal(args->num_instances, total_cpus));
		}
	}

	pargs.args = args;
	pargs.data = func;

	(void)memset(pthreads, 0, sizeof(pthreads));
	(void)memset(ret, 0, sizeof(ret));
	(void)sigfillset(&set);

again:
	if (!g_keep_stressing_flag)
		return EXIT_SUCCESS;
	pid = fork();
	if (pid < 0) {
		if ((errno == EAGAIN) || (errno == ENOMEM))
			goto again;
		pr_err("%s: fork failed: errno=%d: (%s)\n",
			args->name, errno, strerror(errno));
	} else if (pid > 0) {
		int status, waitret;

		/* Parent, wait for child */
		(void)setpgid(pid, g_pgrp);
		waitret = shim_waitpid(pid, &status, 0);
		if (waitret < 0) {
			if (errno != EINTR)
				pr_dbg("%s: waitpid(): errno=%d (%s)\n",
					args->name, errno, strerror(errno));
			(void)kill(pid, SIGTERM);
			(void)kill(pid, SIGKILL);
			(void)shim_waitpid(pid, &status, 0);
		} else if (WIFSIGNALED(status)) {
			pr_dbg("%s: child died: %s (instance %d)\n",
				args->name, stress_strsignal(WTERMSIG(status)),
				args->instance);
			/* If we got killed by OOM killer, re-start */
			if (WTERMSIG(status) == SIGKILL) {
				log_system_mem_info();
				pr_dbg("%s: assuming killed by OOM killer, "
					"restarting again (instance %d)\n",
					args->name, args->instance);
				goto again;
			}
		}
	} else if (pid == 0) {
		uint32_t i;

		/* Make sure this is killable by OOM killer */
		set_oom_adjustment(args->name, true);

		int flags = MAP_PRIVATE | MAP_ANONYMOUS;
#if defined(MAP_POPULATE)
		flags |= MAP_POPULATE;
#endif

mmap_retry:
		mem = mmap(NULL, MEM_SIZE, PROT_READ | PROT_WRITE, flags, -1, 0);
		if (mem == MAP_FAILED) {
#if defined(MAP_POPULATE)
			flags &= ~MAP_POPULATE;	/* Less aggressive, more OOMable */
#endif
			if (!g_keep_stressing_flag) {
				pr_dbg("%s: mmap failed: %d %s\n",
					args->name, errno, strerror(errno));
				return EXIT_NO_RESOURCE;
			}
			(void)shim_usleep(100000);
			if (!g_keep_stressing_flag)
				goto reap_mem;
			goto mmap_retry;
		}

		for (i = 0; i < max_threads; i++) {
			ret[i] = pthread_create(&pthreads[i], NULL,
				stress_memthrash_func, (void *)&pargs);
			if (ret[i]) {
				/* Just give up and go to next thread */
				if (ret[i] == EAGAIN)
					continue;
				/* Something really unexpected */
				pr_fail_errno("pthread create", ret[i]);
				goto reap;
			}
			if (!g_keep_stressing_flag)
				goto reap;
		}
		/* Wait for SIGALRM or SIGINT/SIGHUP etc */
		(void)pause();

reap:
		thread_terminate = true;
		for (i = 0; i < max_threads; i++) {
			if (!ret[i]) {
				ret[i] = pthread_join(pthreads[i], NULL);
				if (ret[i])
					pr_fail_errno("pthread join", ret[i]);
			}
		}
reap_mem:
		(void)munmap(mem, MEM_SIZE);
	}
	return EXIT_SUCCESS;
}
示例#5
0
/*
 *  stress_shm_sysv_child()
 * 	stress out the shm allocations. This can be killed by
 *	the out of memory killer, so we need to keep the parent
 *	informed of the allocated shared memory ids so these can
 *	be reaped cleanly if this process gets prematurely killed.
 */
static int stress_shm_sysv_child(
	const int fd,
	uint64_t *const counter,
	const uint64_t max_ops,
	const char *name,
	const size_t max_sz,
	const size_t page_size)
{
	struct sigaction new_action;
	void *addrs[MAX_SHM_SYSV_SEGMENTS];
	key_t keys[MAX_SHM_SYSV_SEGMENTS];
	int shm_ids[MAX_SHM_SYSV_SEGMENTS];
	shm_msg_t msg;
	size_t i;
	int rc = EXIT_SUCCESS;
	bool ok = true;
	int mask = ~0;
	int instances;

	new_action.sa_handler = handle_shm_sysv_sigalrm;
	sigemptyset(&new_action.sa_mask);
	new_action.sa_flags = 0;
	if (sigaction(SIGALRM, &new_action, NULL) < 0) {
		pr_fail_err(name, "sigaction");
		return EXIT_FAILURE;
	}

	memset(addrs, 0, sizeof(addrs));
	memset(keys, 0, sizeof(keys));
	for (i = 0; i < MAX_SHM_SYSV_SEGMENTS; i++)
		shm_ids[i] = -1;

	/* Make sure this is killable by OOM killer */
	set_oom_adjustment(name, true);

	if ((instances = stressor_instances(STRESS_SHM_SYSV)) < 1)
		instances = (int)stress_get_processors_configured();
	/* Should never happen, but be safe */
	if (instances < 1)
		instances = 1;

	do {
		size_t sz = max_sz;

		for (i = 0; i < opt_shm_sysv_segments; i++) {
			int shm_id, count = 0;
			void *addr;
			key_t key;
			size_t shmall, freemem, totalmem;

			/* Try hard not to overcommit at this current time */
			stress_get_memlimits(&shmall, &freemem, &totalmem);
			shmall /= instances;
			freemem /= instances;
			if ((shmall > page_size) && sz > shmall)
				sz = shmall;
			if ((freemem > page_size) && sz > freemem)
				sz = freemem;
			if (!opt_do_run)
				goto reap;

			for (count = 0; count < KEY_GET_RETRIES; count++) {
				bool unique = true;
				const int rnd =
					mwc32() % SIZEOF_ARRAY(shm_flags);
				const int rnd_flag = shm_flags[rnd] & mask;

				if (sz < page_size)
					goto reap;

				/* Get a unique key */
				do {
					size_t j;

					if (!opt_do_run)
						goto reap;

					/* Get a unique random key */
					key = (key_t)mwc16();
					for (j = 0; j < i - 1; j++) {
						if (key == keys[j]) {
							unique = false;
							break;
						}
					}
					if (!opt_do_run)
						goto reap;

				} while (!unique);

				shm_id = shmget(key, sz,
					IPC_CREAT | IPC_EXCL |
					S_IRUSR | S_IWUSR | rnd_flag);
				if (shm_id >= 0)
					break;
				if (errno == EINTR)
					goto reap;
				if (errno == EPERM) {
					/* ignore using the flag again */
					mask &= ~rnd_flag;
				}
				if ((errno == EINVAL) || (errno == ENOMEM)) {
					/*
					 * On some systems we may need
					 * to reduce the size
					 */
					sz = sz / 2;
				}
			}
			if (shm_id < 0) {
				ok = false;
				pr_fail(stderr, "%s: shmget failed: errno=%d (%s)\n",
					name, errno, strerror(errno));
				rc = EXIT_FAILURE;
				goto reap;
			}

			/* Inform parent of the new shm ID */
			msg.index = i;
			msg.shm_id = shm_id;
			if (write(fd, &msg, sizeof(msg)) < 0) {
				pr_err(stderr, "%s: write failed: errno=%d: (%s)\n",
					name, errno, strerror(errno));
				rc = EXIT_FAILURE;
				goto reap;
			}

			addr = shmat(shm_id, NULL, 0);
			if (addr == (char *) -1) {
				ok = false;
				pr_fail(stderr, "%s: shmat failed: errno=%d (%s)\n",
					name, errno, strerror(errno));
				rc = EXIT_FAILURE;
				goto reap;
			}
			addrs[i] = addr;
			shm_ids[i] = shm_id;
			keys[i] = key;

			if (!opt_do_run)
				goto reap;
			(void)mincore_touch_pages(addr, sz);

			if (!opt_do_run)
				goto reap;
			(void)madvise_random(addr, sz);

			if (!opt_do_run)
				goto reap;
			if (stress_shm_sysv_check(addr, sz, page_size) < 0) {
				ok = false;
				pr_fail(stderr, "%s: memory check failed\n", name);
				rc = EXIT_FAILURE;
				goto reap;
			}
			(*counter)++;
		}
reap:
		for (i = 0; i < opt_shm_sysv_segments; i++) {
			if (addrs[i]) {
				if (shmdt(addrs[i]) < 0) {
					pr_fail(stderr, "%s: shmdt failed: errno=%d (%s)\n",
						name, errno, strerror(errno));
				}
			}
			if (shm_ids[i] >= 0) {
				if (shmctl(shm_ids[i], IPC_RMID, NULL) < 0) {
					if (errno != EIDRM)
						pr_fail(stderr, "%s: shmctl failed: errno=%d (%s)\n",
							name, errno, strerror(errno));
				}
			}

			/* Inform parent shm ID is now free */
			msg.index = i;
			msg.shm_id = -1;
			if (write(fd, &msg, sizeof(msg)) < 0) {
				pr_dbg(stderr, "%s: write failed: errno=%d: (%s)\n",
					name, errno, strerror(errno));
				ok = false;
			}
			addrs[i] = NULL;
			shm_ids[i] = -1;
			keys[i] = 0;
		}
	} while (ok && opt_do_run && (!max_ops || *counter < max_ops));

	/* Inform parent of end of run */
	msg.index = -1;
	msg.shm_id = -1;
	if (write(fd, &msg, sizeof(msg)) < 0) {
		pr_err(stderr, "%s: write failed: errno=%d: (%s)\n",
			name, errno, strerror(errno));
		rc = EXIT_FAILURE;
	}

	return rc;
}
/*
 *  stress_tlb_shootdown()
 *	stress out TLB shootdowns
 */
int stress_tlb_shootdown(
	uint64_t *const counter,
	const uint32_t instance,
	const uint64_t max_ops,
	const char *name)
{
	const size_t page_size = stress_get_pagesize();
	const size_t mmap_size = page_size * MMAP_PAGES;
	pid_t pids[MAX_TLB_PROCS];

	(void)instance;

	do {
		uint8_t *mem, *ptr;
		int retry = 128;
		cpu_set_t proc_mask;
		int32_t cpus, tlb_procs, i;
		const int32_t max_cpus = stress_get_processors_configured();

		if (sched_getaffinity(0, sizeof(proc_mask), &proc_mask) < 0) {
			pr_fail(stderr, "%s: could not get CPU affinity: "
				"errno=%d, (%s)\n",
				name, errno, strerror(errno));
			return EXIT_FAILURE;
		}
		cpus = CPU_COUNT(&proc_mask);
		tlb_procs = STRESS_MAXIMUM(cpus, MAX_TLB_PROCS);

		for (;;) {
			mem = mmap(NULL, mmap_size, PROT_WRITE | PROT_READ,
			MAP_SHARED | MAP_ANONYMOUS, -1, 0);
			if ((void *)mem == MAP_FAILED) {
				if ((errno == EAGAIN) || (errno == ENOMEM)) {
					if (--retry < 0)
						return EXIT_NO_RESOURCE;
				} else {
					pr_fail(stderr, "%s: mmap failed: "
						"errno=%d (%s)\n",
						name, errno, strerror(errno));
				}
			} else {
				break;
			}
		}
		memset(mem, 0, mmap_size);

		for (i = 0; i < tlb_procs; i++)
			pids[i] = -1;

		for (i = 0; i < tlb_procs; i++) {
			int32_t j, cpu = -1;

			for (j = 0; j < max_cpus; j++) {
				if (CPU_ISSET(j, &proc_mask)) {
					cpu = j;
					CPU_CLR(j, &proc_mask);
					break;
				}
			}
			if (cpu == -1)
				break;

			pids[i] = fork();
			if (pids[i] < 0)
				break;
			if (pids[i] == 0) {
				cpu_set_t mask;
				char buffer[page_size];

				CPU_ZERO(&mask);
				CPU_SET(cpu % max_cpus, &mask);
				(void)sched_setaffinity(getpid(), sizeof(mask), &mask);

				for (ptr = mem; ptr < mem + mmap_size; ptr += page_size) {
					/* Force tlb shoot down on page */
					(void)mprotect(ptr, page_size, PROT_READ);
					memcpy(buffer, ptr, page_size);
					(void)munmap(ptr, page_size);
				}
				_exit(0);
			}
		}

		for (i = 0; i < tlb_procs; i++) {
			if (pids[i] != -1) {
				int status;

				kill(pids[i], SIGKILL);
				(void)waitpid(pids[i], &status, 0);
			}
		}
		(void)munmap(mem, mmap_size);
		(*counter)++;
	} while (opt_do_run && (!max_ops || *counter < max_ops));

	return EXIT_SUCCESS;
}
示例#7
0
/*
 *  stress_mmapfork()
 *	stress mappings + fork VM subystem
 */
int stress_mmapfork(
	uint64_t *const counter,
	const uint32_t instance,
	const uint64_t max_ops,
	const char *name)
{
	pid_t pids[MAX_PIDS];
	struct sysinfo info;
	void *ptr;
	int instances;

	(void)instance;

	if ((instances = stressor_instances(STRESS_MMAPFORK)) < 1)
		instances = (int)stress_get_processors_configured();

	do {
		size_t i, n;
		size_t len;

		memset(pids, 0, sizeof(pids));

		for (n = 0; n < MAX_PIDS; n++) {
retry:			if (!opt_do_run)
				goto reap;

			pids[n] = fork();
			if (pids[n] < 0) {
				/* Out of resources for fork, re-do, ugh */
				if (errno == EAGAIN) {
					usleep(10000);
					goto retry;
				}
				break;
			}
			if (pids[n] == 0) {
				/* Child */
				if (sysinfo(&info) < 0) {
					pr_failed_err(name, "sysinfo");
					_exit(0);
				}
				len = ((size_t)info.freeram / (instances * MAX_PIDS)) / 2;
				ptr = mmap(NULL, len, PROT_READ | PROT_WRITE,
					MAP_POPULATE | MAP_SHARED | MAP_ANONYMOUS, -1, 0);
				if (ptr != MAP_FAILED) {
					madvise(ptr, len, MADV_WILLNEED);
					memset(ptr, 0, len);
					madvise(ptr, len, MADV_DONTNEED);
					munmap(ptr, len);
				}
				_exit(0);
			}
		}
reap:
		for (i = 0; i < n; i++) {
			int status;

			if (waitpid(pids[i], &status, 0) < 0) {
				if (errno != EINTR)
					pr_err(stderr, "%s: waitpid errno=%d (%s)\n",
						name, errno, strerror(errno));
			}
		}
		(*counter)++;
	} while (opt_do_run && (!max_ops || *counter < max_ops));

	return EXIT_SUCCESS;
}