예제 #1
0
/*
 *  stress_sem()
 *	stress system by POSIX sem ops
 */
static int stress_sem(const args_t *args)
{
	uint64_t semaphore_posix_procs = DEFAULT_SEMAPHORE_PROCS;
	uint64_t i;
	bool created = false;
	pthread_args_t p_args;

	if (!get_setting("sem-procs", &semaphore_posix_procs)) {
		if (g_opt_flags & OPT_FLAGS_MAXIMIZE)
			semaphore_posix_procs = MAX_SEMAPHORE_PROCS;
		if (g_opt_flags & OPT_FLAGS_MINIMIZE)
			semaphore_posix_procs = MIN_SEMAPHORE_PROCS;
	}

	/* create a semaphore */
	if (sem_init(&sem, 0, 1) < 0) {
		pr_err("semaphore init (POSIX) failed: errno=%d: "
			"(%s)\n", errno, strerror(errno));
		return EXIT_FAILURE;
	}

	(void)memset(pthreads, 0, sizeof(pthreads));
	(void)memset(p_ret, 0, sizeof(p_ret));

	for (i = 0; i < semaphore_posix_procs; i++) {
		p_args.args = args;
		p_args.data = NULL;
		p_ret[i] = pthread_create(&pthreads[i], NULL,
                                semaphore_posix_thrash, (void *)&p_args);
		if ((p_ret[i]) && (p_ret[i] != EAGAIN)) {
			pr_fail_errno("pthread create", p_ret[i]);
			break;
		}
		if (!g_keep_stressing_flag)
			break;
		created = true;
	}

	if (!created) {
		pr_inf("%s: could not create any pthreads\n", args->name);
		return EXIT_NO_RESOURCE;
	}

	/* Wait for termination */
	while (keep_stressing())
		(void)shim_usleep(100000);

	for (i = 0; i < semaphore_posix_procs; i++) {
		int ret;

		if (p_ret[i])
			continue;

		ret = pthread_join(pthreads[i], NULL);
		(void)ret;
	}
	(void)sem_destroy(&sem);

	return EXIT_SUCCESS;
}
예제 #2
0
/*
 *  stress_memthrash()
 *	stress by creating pthreads
 */
static int stress_memthrash(const args_t *args)
{
	const stress_memthrash_method_info_t *memthrash_method = &memthrash_methods[0];
	const uint32_t total_cpus = stress_get_processors_configured();
	const uint32_t max_threads = stress_memthrash_max(args->num_instances, total_cpus);
	pthread_t pthreads[max_threads];
	int ret[max_threads];
	pthread_args_t pargs;
	memthrash_func_t func;
	pid_t pid;

	(void)get_setting("memthrash-method", &memthrash_method);
	func = memthrash_method->func;

	pr_dbg("%s: using method '%s'\n", args->name, memthrash_method->name);
	if (args->instance == 0) {
		pr_inf("%s: starting %" PRIu32 " thread%s on each of the %"
			PRIu32 " stressors on a %" PRIu32 " CPU system\n",
			args->name, max_threads, plural(max_threads),
			args->num_instances, total_cpus);
		if (max_threads * args->num_instances > total_cpus) {
			pr_inf("%s: this is not an optimal choice of stressors, "
				"try %" PRIu32 " instead\n",
			args->name,
			stress_memthash_optimal(args->num_instances, total_cpus));
		}
	}

	pargs.args = args;
	pargs.data = func;

	(void)memset(pthreads, 0, sizeof(pthreads));
	(void)memset(ret, 0, sizeof(ret));
	(void)sigfillset(&set);

again:
	if (!g_keep_stressing_flag)
		return EXIT_SUCCESS;
	pid = fork();
	if (pid < 0) {
		if ((errno == EAGAIN) || (errno == ENOMEM))
			goto again;
		pr_err("%s: fork failed: errno=%d: (%s)\n",
			args->name, errno, strerror(errno));
	} else if (pid > 0) {
		int status, waitret;

		/* Parent, wait for child */
		(void)setpgid(pid, g_pgrp);
		waitret = shim_waitpid(pid, &status, 0);
		if (waitret < 0) {
			if (errno != EINTR)
				pr_dbg("%s: waitpid(): errno=%d (%s)\n",
					args->name, errno, strerror(errno));
			(void)kill(pid, SIGTERM);
			(void)kill(pid, SIGKILL);
			(void)shim_waitpid(pid, &status, 0);
		} else if (WIFSIGNALED(status)) {
			pr_dbg("%s: child died: %s (instance %d)\n",
				args->name, stress_strsignal(WTERMSIG(status)),
				args->instance);
			/* If we got killed by OOM killer, re-start */
			if (WTERMSIG(status) == SIGKILL) {
				log_system_mem_info();
				pr_dbg("%s: assuming killed by OOM killer, "
					"restarting again (instance %d)\n",
					args->name, args->instance);
				goto again;
			}
		}
	} else if (pid == 0) {
		uint32_t i;

		/* Make sure this is killable by OOM killer */
		set_oom_adjustment(args->name, true);

		int flags = MAP_PRIVATE | MAP_ANONYMOUS;
#if defined(MAP_POPULATE)
		flags |= MAP_POPULATE;
#endif

mmap_retry:
		mem = mmap(NULL, MEM_SIZE, PROT_READ | PROT_WRITE, flags, -1, 0);
		if (mem == MAP_FAILED) {
#if defined(MAP_POPULATE)
			flags &= ~MAP_POPULATE;	/* Less aggressive, more OOMable */
#endif
			if (!g_keep_stressing_flag) {
				pr_dbg("%s: mmap failed: %d %s\n",
					args->name, errno, strerror(errno));
				return EXIT_NO_RESOURCE;
			}
			(void)shim_usleep(100000);
			if (!g_keep_stressing_flag)
				goto reap_mem;
			goto mmap_retry;
		}

		for (i = 0; i < max_threads; i++) {
			ret[i] = pthread_create(&pthreads[i], NULL,
				stress_memthrash_func, (void *)&pargs);
			if (ret[i]) {
				/* Just give up and go to next thread */
				if (ret[i] == EAGAIN)
					continue;
				/* Something really unexpected */
				pr_fail_errno("pthread create", ret[i]);
				goto reap;
			}
			if (!g_keep_stressing_flag)
				goto reap;
		}
		/* Wait for SIGALRM or SIGINT/SIGHUP etc */
		(void)pause();

reap:
		thread_terminate = true;
		for (i = 0; i < max_threads; i++) {
			if (!ret[i]) {
				ret[i] = pthread_join(pthreads[i], NULL);
				if (ret[i])
					pr_fail_errno("pthread join", ret[i]);
			}
		}
reap_mem:
		(void)munmap(mem, MEM_SIZE);
	}
	return EXIT_SUCCESS;
}
예제 #3
0
/*
 *  stress_pthread_func()
 *	pthread that exits immediately
 */
static void *stress_pthread_func(void *ctxt)
{
	uint8_t stack[SIGSTKSZ];
	stack_t ss;
	static void *nowt = NULL;
	int ret;

	(void)ctxt;

	/*
	 *  Block all signals, let controlling thread
	 *  handle these
	 */
	sigprocmask(SIG_BLOCK, &set, NULL);

	/*
	 *  According to POSIX.1 a thread should have
	 *  a distinct alternative signal stack.
	 *  However, we block signals in this thread
	 *  so this is probably just totally unncessary.
	 */
	ss.ss_sp = (void *)stack;
	ss.ss_size = SIGSTKSZ;
	ss.ss_flags = 0;
	if (sigaltstack(&ss, NULL) < 0) {
		pr_fail_err("pthread", "sigaltstack");
		goto die;
	}

	/*
	 *  Bump count of running threads
	 */
	ret = pthread_mutex_lock(&mutex);
	if (ret) {
		pr_fail_errno("pthread", "mutex lock", ret);
		goto die;
	}
	pthread_count++;
	ret = pthread_mutex_unlock(&mutex);
	if (ret) {
		pr_fail_errno("pthread", "mutex unlock", ret);
		goto die;
	}

	/*
	 *  Wait for controlling thread to
	 *  indicate it is time to die
	 */
	ret = pthread_mutex_lock(&mutex);
	if (ret) {
		pr_fail_errno("pthread", "mutex unlock", ret);
		goto die;
	}
	while (!thread_terminate) {
		ret = pthread_cond_wait(&cond, &mutex);
		if (ret) {
			pr_fail_errno("pthread",
				"pthread condition wait", ret);
			break;
		}
	}
	ret = pthread_mutex_unlock(&mutex);
	if (ret)
		pr_fail_errno("pthread", "mutex unlock", ret);
die:
	return &nowt;
}
예제 #4
0
/*
 *  stress_aio
 *	stress asynchronous I/O
 */
int stress_aio(
	uint64_t *const counter,
	const uint32_t instance,
	const uint64_t max_ops,
	const char *name)
{
	int ret, fd, rc = EXIT_FAILURE;
	io_req_t *io_reqs;
	struct sigaction sa, sa_old;
	int i;
	uint64_t total = 0;
	char filename[PATH_MAX];
	const pid_t pid = getpid();
	const size_t io_req_sz = (size_t)opt_aio_requests * sizeof(io_req_t);

	if ((io_reqs = alloca(io_req_sz)) == NULL) {
		pr_err(stderr, "%s: cannot allocate io request structures\n", name);
		return EXIT_NO_RESOURCE;
	}
	memset(io_reqs, 0, io_req_sz);

	ret = stress_temp_dir_mk(name, pid, instance);
	if (ret < 0)
		return exit_status(-ret);

	(void)stress_temp_filename(filename, sizeof(filename),
		name, pid, instance, mwc32());

	(void)umask(0077);
	if ((fd = open(filename, O_CREAT | O_RDWR, S_IRUSR | S_IWUSR)) < 0) {
		rc = exit_status(errno);
		pr_fail_err(name, "open");
		goto finish;
	}
	(void)unlink(filename);

	sigemptyset(&sa.sa_mask);
	sa.sa_flags = SA_RESTART | SA_SIGINFO;
	sa.sa_sigaction = aio_signal_handler;
	if (sigaction(SIGUSR1, &sa, &sa_old) < 0)
		pr_fail_err(name, "sigaction");

	/* Kick off requests */
	for (i = 0; i < opt_aio_requests; i++) {
		aio_fill_buffer(i, io_reqs[i].buffer, BUFFER_SZ);
		ret = issue_aio_request(name, fd, (off_t)i * BUFFER_SZ, &io_reqs[i], i, aio_write);
		if (ret < 0)
			goto cancel;
		if (ret > 0) {
			rc = EXIT_SUCCESS;
			goto cancel;
		}
	}

	do {
		usleep(250000); /* wait until a signal occurs */

		for (i = 0; opt_do_run && (i < opt_aio_requests); i++) {
			if (io_reqs[i].status != EINPROGRESS)
				continue;

			io_reqs[i].status = aio_error(&io_reqs[i].aiocb);
			switch (io_reqs[i].status) {
			case ECANCELED:
			case 0:
				/* Succeeded or cancelled, so redo another */
				(*counter)++;
				if (issue_aio_request(name, fd, (off_t)i * BUFFER_SZ, &io_reqs[i], i,
					(mwc32() & 0x8) ? aio_read : aio_write) < 0)
					goto cancel;
				break;
			case EINPROGRESS:
				break;
			default:
				/* Something went wrong */
				pr_fail_errno(name, "aio_error", io_reqs[i].status);
				goto cancel;
			}
		}
	} while (opt_do_run && (!max_ops || *counter < max_ops));

	rc = EXIT_SUCCESS;

cancel:
	/* Stop accounting */
	do_accounting = false;
	/* Cancel pending AIO requests */
	for (i = 0; i < opt_aio_requests; i++) {
		aio_issue_cancel(name, &io_reqs[i]);
		total += io_reqs[i].count;
	}
	(void)close(fd);
finish:
	pr_dbg(stderr, "%s: total of %" PRIu64 " async I/O signals caught (instance %d)\n",
		name, total, instance);
	(void)stress_temp_dir_rm(name, pid, instance);
	return rc;
}
예제 #5
0
/*
 *  stress_pthread()
 *	stress by creating pthreads
 */
int stress_pthread(
	uint64_t *const counter,
	const uint32_t instance,
	const uint64_t max_ops,
	const char *name)
{
	pthread_t pthreads[MAX_PTHREAD];
	bool ok = true;
	uint64_t limited = 0, attempted = 0;

	if (!set_pthread_max) {
		if (opt_flags & OPT_FLAGS_MAXIMIZE)
			opt_pthread_max = MAX_PTHREAD;
		if (opt_flags & OPT_FLAGS_MINIMIZE)
			opt_pthread_max = MIN_PTHREAD;
	}

	sigfillset(&set);
	do {
		uint64_t i, j;
		int ret;

		thread_terminate = false;
		pthread_count = 0;

		for (i = 0; (i < opt_pthread_max) && (!max_ops || *counter < max_ops); i++) {
			ret = pthread_create(&pthreads[i], NULL,
				stress_pthread_func, NULL);
			if (ret) {
				/* Out of resources, don't try any more */
				if (ret == EAGAIN) {
					limited++;
					break;
				}
				/* Something really unexpected */
				pr_fail_errno(name, "pthread create", ret);
				ok = false;
				break;
			}
			(*counter)++;
			if (!opt_do_run)
				break;
		}
		attempted++;

		/*
		 *  Wait until they are all started or
		 *  we get bored waiting..
		 */
		for (j = 0; j < 1000; j++) {
			bool all_running = false;

			ret = pthread_mutex_lock(&mutex);
			if (ret) {
				pr_fail_errno(name, "mutex lock", ret);
				ok = false;
				goto reap;
			}
			all_running = (pthread_count == i);
			ret = pthread_mutex_unlock(&mutex);
			if (ret) {
				pr_fail_errno(name, "mutex unlock", ret);
				ok = false;
				goto reap;
			}

			if (all_running)
				break;
		}

		ret = pthread_mutex_lock(&mutex);
		if (ret) {
			pr_fail_errno(name, "mutex lock", ret);
			ok = false;
			goto reap;
		}
		thread_terminate = true;
		ret = pthread_cond_broadcast(&cond);
		if (ret) {
			pr_fail_errno(name,
				"pthread condition broadcast", ret);
			ok = false;
			/* fall through and unlock */
		}
		ret = pthread_mutex_unlock(&mutex);
		if (ret) {
			pr_fail_errno(name, "mutex unlock", ret);
			ok = false;
		}
reap:
		for (j = 0; j < i; j++) {
			ret = pthread_join(pthreads[j], NULL);
			if (ret) {
				pr_fail_errno(name, "pthread join", ret);
				ok = false;
			}
		}
	} while (ok && opt_do_run && (!max_ops || *counter < max_ops));

	if (limited) {
		pr_inf(stdout, "%s: %.2f%% of iterations could not reach "
			"requested %" PRIu64 " threads (instance %"
			PRIu32 ")\n",
			name,
			100.0 * (double)limited / (double)attempted,
			opt_pthread_max, instance);
	}

	(void)pthread_cond_destroy(&cond);
	(void)pthread_mutex_destroy(&mutex);

	return EXIT_SUCCESS;
}