Example #1
0
/*
 *  stress_zero
 *	stress reading of /dev/zero
 */
int stress_zero(
	uint64_t *const counter,
	const uint32_t instance,
	const uint64_t max_ops,
	const char *name)
{
	int fd;

	(void)instance;

	if ((fd = open("/dev/zero", O_RDONLY)) < 0) {
		pr_failed_err(name, "open");
		return EXIT_FAILURE;
	}

	do {
		char buffer[4096];
		ssize_t ret;

		ret = read(fd, buffer, sizeof(buffer));
		if (ret < 0) {
			if ((errno == EAGAIN) || (errno == EINTR))
				continue;
			pr_failed_err(name, "read");
			(void)close(fd);
			return EXIT_FAILURE;
		}
		(*counter)++;
	} while (opt_do_run && (!max_ops || *counter < max_ops));
	(void)close(fd);

	return EXIT_SUCCESS;
}
Example #2
0
/*
 *  stress_timer
 *	stress timers
 */
int stress_timer(
	uint64_t *const counter,
	const uint32_t instance,
	const uint64_t max_ops,
	const char *name)
{
	struct sigaction new_action;
	struct sigevent sev;
	struct itimerspec timer;

	(void)instance;

	if (!set_timer_freq) {
		if (opt_flags & OPT_FLAGS_MAXIMIZE)
			opt_timer_freq = MAX_TIMER_FREQ;
		if (opt_flags & OPT_FLAGS_MINIMIZE)
			opt_timer_freq = MIN_TIMER_FREQ;
	}
	rate_ns = opt_timer_freq ? 1000000000 / opt_timer_freq : 1000000000;

	new_action.sa_flags = 0;
	new_action.sa_handler = stress_timer_handler;
	sigemptyset(&new_action.sa_mask);
	if (sigaction(SIGRTMIN, &new_action, NULL) < 0) {
		pr_failed_err(name, "sigaction");
		return EXIT_FAILURE;
	}

	sev.sigev_notify = SIGEV_SIGNAL;
	sev.sigev_signo = SIGRTMIN;
	sev.sigev_value.sival_ptr = &timerid;
	if (timer_create(CLOCK_REALTIME, &sev, &timerid) < 0) {
		pr_failed_err(name, "timer_create");
		return EXIT_FAILURE;
	}

	stress_timer_set(&timer);
	if (timer_settime(timerid, 0, &timer, NULL) < 0) {
		pr_failed_err(name, "timer_settime");
		return EXIT_FAILURE;
	}

	do {
		struct timespec req;

		req.tv_sec = 0;
		req.tv_nsec = 10000000;
		(void)nanosleep(&req, NULL);
		*counter = timer_counter;
	} while (opt_do_run && (!max_ops || timer_counter < max_ops));

	if (timer_delete(timerid) < 0) {
		pr_failed_err(name, "timer_delete");
		return EXIT_FAILURE;
	}
	pr_dbg(stderr, "%s: %" PRIu64 " timer overruns (instance %" PRIu32 ")\n",
		name, overruns, instance);

	return EXIT_SUCCESS;
}
Example #3
0
/*
 *  stress_sendfile
 *	stress reading of a temp file and writing to /dev/null via sendfile
 */
int stress_sendfile(
	uint64_t *const counter,
	const uint32_t instance,
	const uint64_t max_ops,
	const char *name)
{
	char filename[PATH_MAX];
	int fdin, fdout, ret = EXIT_SUCCESS;
	size_t sz;
	const pid_t pid = getpid();

	if (!set_sendfile_size) {
		if (opt_flags & OPT_FLAGS_MAXIMIZE)
			opt_sendfile_size = MAX_SENDFILE_SIZE;
		if (opt_flags & OPT_FLAGS_MINIMIZE)
			opt_sendfile_size = MIN_SENDFILE_SIZE;
	}
	sz = (size_t)opt_sendfile_size;

	if (stress_temp_dir_mk(name, pid, instance) < 0)
		return EXIT_FAILURE;

        (void)umask(0077);

	(void)stress_temp_filename(filename, sizeof(filename),
		name, pid, instance, mwc32());

        if ((fdin = open(filename, O_CREAT | O_RDWR, S_IRUSR | S_IWUSR)) < 0) {
                pr_failed_err(name, "open");
		ret = EXIT_FAILURE;
		goto dir_out;
        }
	(void)posix_fallocate(fdin, (off_t)0, (off_t)sz);
	if ((fdout = open("/dev/null", O_WRONLY)) < 0) {
		pr_failed_err(name, "open");
		ret = EXIT_FAILURE;
		goto close_in;
	}

	do {
		off_t offset = 0;
		if (sendfile(fdout, fdin, &offset, sz) < 0) {
			pr_failed_err(name, "sendfile");
			ret = EXIT_FAILURE;
			goto close_out;
		}
		(*counter)++;
	} while (opt_do_run && (!max_ops || *counter < max_ops));

close_out:
	(void)close(fdout);
close_in:
	(void)close(fdin);
	(void)unlink(filename);
dir_out:
	(void)stress_temp_dir_rm(name, pid, instance);

	return ret;
}
Example #4
0
/*
 *  stress_splice
 *	stress copying of /dev/zero to /dev/null
 */
int stress_splice(
	uint64_t *const counter,
	const uint32_t instance,
	const uint64_t max_ops,
	const char *name)
{
	int fd_in, fd_out, fds[2];

	if (!set_splice_bytes) {
		if (opt_flags & OPT_FLAGS_MAXIMIZE)
			opt_splice_bytes = MAX_SPLICE_BYTES;
		if (opt_flags & OPT_FLAGS_MINIMIZE)
			opt_splice_bytes = MIN_SPLICE_BYTES;
	}

	(void)instance;

	if (pipe(fds) < 0) {
		pr_failed_err(name, "pipe");
		return EXIT_FAILURE;
	}

	if ((fd_in = open("/dev/zero", O_RDONLY)) < 0) {
		(void)close(fds[0]);
		(void)close(fds[1]);
		pr_failed_err(name, "open");
		return EXIT_FAILURE;
	}
	if ((fd_out = open("/dev/null", O_WRONLY)) < 0) {
		(void)close(fd_in);
		(void)close(fds[0]);
		(void)close(fds[1]);
		pr_failed_err(name, "open");
		return EXIT_FAILURE;
	}

	do {
		int ret;

		ssize_t bytes;

		bytes = splice(fd_in, NULL, fds[1], NULL, opt_splice_bytes, SPLICE_F_MOVE);
		if (bytes < 0)
			break;

		ret = splice(fds[0], NULL, fd_out, NULL, opt_splice_bytes, SPLICE_F_MOVE);
		if (ret < 0)
			break;

		(*counter)++;
	} while (opt_do_run && (!max_ops || *counter < max_ops));
	(void)close(fd_out);
	(void)close(fd_in);
	(void)close(fds[0]);
	(void)close(fds[1]);

	return EXIT_SUCCESS;
}
Example #5
0
/*
 *  stress_sys_read_thread
 *	keep exercising a sysfs entry until
 *	controlling thread triggers an exit
 */
static void *stress_sys_read_thread(void *ctxt_ptr)
{
	static void *nowt = NULL;
	uint8_t stack[SIGSTKSZ];
        stack_t ss;
	ctxt_t *ctxt = (ctxt_t *)ctxt_ptr;

	/*
	 *  Block all signals, let controlling thread
	 *  handle these
	 */
	sigprocmask(SIG_BLOCK, &set, NULL);

	/*
	 *  According to POSIX.1 a thread should have
	 *  a distinct alternative signal stack.
	 *  However, we block signals in this thread
	 *  so this is probably just totally unncessary.
	 */
	ss.ss_sp = (void *)stack;
	ss.ss_size = SIGSTKSZ;
	ss.ss_flags = 0;
	if (sigaltstack(&ss, NULL) < 0) {
		pr_failed_err("pthread", "sigaltstack");
		return &nowt;
	}
	while (keep_running && opt_do_run)
		stress_sys_read(ctxt->name, ctxt->path);

	return &nowt;
}
Example #6
0
/*
 *  stress_hdd_advise()
 *	set posix_fadvise options
 */
static int stress_hdd_advise(const char *name, const int fd, const int flags)
{
#if (defined(POSIX_FADV_SEQ) || defined(POSIX_FADV_RANDOM) || \
    defined(POSIX_FADV_NOREUSE) || defined(POSIX_FADV_WILLNEED) || \
    defined(POSIX_FADV_DONTNEED)) && !defined(__gnu_hurd__)
    int i;

    if (!(flags & HDD_OPT_FADV_MASK))
        return 0;

    for (i = 0; hdd_opts[i].opt; i++) {
        if (hdd_opts[i].flag & flags) {
            if (posix_fadvise(fd, 0, 0, hdd_opts[i].advice) < 0) {
                pr_failed_err(name, "posix_fadvise");
                return -1;
            }
        }
    }
#else
    (void)name;
    (void)fd;
    (void)flags;
#endif
    return 0;
}
Example #7
0
/*
 *  stress_sigsegv
 *	stress by generating segmentation faults
 */
int stress_sigsegv(
	uint64_t *const counter,
	const uint32_t instance,
	const uint64_t max_ops,
	const char *name)
{
	uint8_t *ptr = NULL;

	(void)instance;

	for (;;) {
		struct sigaction new_action;
		int ret;

		memset(&new_action, 0, sizeof new_action);
		new_action.sa_handler = stress_segvhandler;
		sigemptyset(&new_action.sa_mask);
		new_action.sa_flags = 0;

		if (sigaction(SIGSEGV, &new_action, NULL) < 0) {
			pr_failed_err(name, "sigaction");
			return EXIT_FAILURE;
		}
		if (sigaction(SIGILL, &new_action, NULL) < 0) {
			pr_failed_err(name, "sigaction");
			return EXIT_FAILURE;
		}
		ret = sigsetjmp(jmp_env, 1);
		/*
		 * We return here if we segfault, so
		 * first check if we need to terminate
		 */
		if (!opt_do_run || (max_ops && *counter >= max_ops))
			break;

		if (ret)
			(*counter)++;	/* SIGSEGV/SIGILL occurred */
		else
			*ptr = 0;	/* Trip a SIGSEGV/SIGILL */
	}

	return EXIT_SUCCESS;
}
Example #8
0
/*
 *  stress on sched_kill()
 *	stress system by rapid kills
 */
int stress_kill(
	uint64_t *const counter,
	const uint32_t instance,
	const uint64_t max_ops,
	const char *name)
{
	struct sigaction new_action;
	const pid_t pid = getpid();

	(void)instance;
	(void)name;

	memset(&new_action, 0, sizeof new_action);
	new_action.sa_handler = SIG_IGN;
	sigemptyset(&new_action.sa_mask);
	new_action.sa_flags = 0;

	if (sigaction(SIGUSR1, &new_action, NULL) < 0) {
		pr_failed_err(name, "sigusr1");
		return EXIT_FAILURE;
	}

	do {
		int ret;

		ret = kill(pid, SIGUSR1);
		if ((ret < 0) && (opt_flags & OPT_FLAGS_VERIFY))
			pr_fail(stderr, "%s: kill failed: errno=%d (%s)\n",
				name, errno, strerror(errno));

		/* Zero signal can be used to see if process exists */
		ret = kill(pid, 0);
		if ((ret < 0) && (opt_flags & OPT_FLAGS_VERIFY))
			pr_fail(stderr, "%s: kill failed: errno=%d (%s)\n",
				name, errno, strerror(errno));

		/*
		 * Zero signal can be used to see if process exists,
		 * -1 pid means signal sent to every process caller has
		 * permission to send to
		 */
		ret = kill(-1, 0);
		if ((ret < 0) && (opt_flags & OPT_FLAGS_VERIFY))
			pr_fail(stderr, "%s: kill failed: errno=%d (%s)\n",
				name, errno, strerror(errno));

		(*counter)++;
	} while (opt_do_run && (!max_ops || *counter < max_ops));

	return EXIT_SUCCESS;
}
Example #9
0
/*
 *  stress_sigfpe
 *	stress by generating floating point errors
 */
int stress_sigfpe(
	uint64_t *const counter,
	const uint32_t instance,
	const uint64_t max_ops,
	const char *name)
{
	(void)instance;

	for (;;) {
		struct sigaction new_action;
		int ret;

		memset(&new_action, 0, sizeof new_action);
		new_action.sa_handler = stress_fpehandler;
		sigemptyset(&new_action.sa_mask);
		new_action.sa_flags = 0;

		if (sigaction(SIGFPE, &new_action, NULL) < 0) {
			pr_failed_err(name, "sigfpe");
			return EXIT_FAILURE;
		}
		ret = sigsetjmp(jmp_env, 1);
		/*
		 * We return here if we get SIGFPE, so
		 * first check if we need to terminate
		 */
		if (!opt_do_run || (max_ops && *counter >= max_ops))
			break;

		if (ret)
			(*counter)++;	/* SIGFPE occurred */
		else
			uint64_put(1 / uint64_zero());	/* force division by zero */
	}

	return EXIT_SUCCESS;
}
Example #10
0
/*
 *  stress_getrandom
 *	stress reading random values using getrandom()
 */
int stress_getrandom(
	uint64_t *const counter,
	const uint32_t instance,
	const uint64_t max_ops,
	const char *name)
{
	(void)instance;

	do {
		char buffer[8192];
		ssize_t ret;

		ret = __getrandom(buffer, sizeof(buffer), 0);
		if (ret < 0) {
			if (errno == EAGAIN)
				continue;
			pr_failed_err(name, "getrandom");
			return EXIT_FAILURE;
		}
		(*counter)++;
	} while (opt_do_run && (!max_ops || *counter < max_ops));

	return EXIT_SUCCESS;
}
Example #11
0
/*
 *  stress_kcmp
 *	stress sys_kcmp
 */
int stress_kcmp(
	uint64_t *const counter,
	const uint32_t instance,
	const uint64_t max_ops,
	const char *name)
{
	pid_t pid1;
	int fd1;
	int ret = EXIT_SUCCESS;

	(void)instance;

	if ((fd1 = open("/dev/null", O_WRONLY)) < 0) {
		pr_failed_err(name, "open");
		return EXIT_FAILURE;
	}

again:
	pid1 = fork();
	if (pid1 < 0) {
		if (opt_do_run && (errno == EAGAIN))
			goto again;

		pr_failed_dbg(name, "fork");
		(void)close(fd1);
		return EXIT_FAILURE;
	} else if (pid1 == 0) {
		setpgid(0, pgrp);

		/* Child */
		while (opt_do_run)
			pause();

		/* will never get here */
		(void)close(fd1);
		exit(EXIT_SUCCESS);
	} else {
		/* Parent */
		int fd2, status, pid2;

		setpgid(pid1, pgrp);
		pid2 = getpid();
		if ((fd2 = open("/dev/null", O_WRONLY)) < 0) {
			pr_failed_err(name, "open");
			ret = EXIT_FAILURE;
			goto reap;
		}

		do {
			KCMP(pid1, pid2, KCMP_FILE, fd1, fd2);
			KCMP(pid1, pid1, KCMP_FILE, fd1, fd1);
			KCMP(pid2, pid2, KCMP_FILE, fd1, fd1);
			KCMP(pid2, pid2, KCMP_FILE, fd2, fd2);

			KCMP(pid1, pid2, KCMP_FILES, 0, 0);
			KCMP(pid1, pid1, KCMP_FILES, 0, 0);
			KCMP(pid2, pid2, KCMP_FILES, 0, 0);

			KCMP(pid1, pid2, KCMP_FS, 0, 0);
			KCMP(pid1, pid1, KCMP_FS, 0, 0);
			KCMP(pid2, pid2, KCMP_FS, 0, 0);

			KCMP(pid1, pid2, KCMP_IO, 0, 0);
			KCMP(pid1, pid1, KCMP_IO, 0, 0);
			KCMP(pid2, pid2, KCMP_IO, 0, 0);

			KCMP(pid1, pid2, KCMP_SIGHAND, 0, 0);
			KCMP(pid1, pid1, KCMP_SIGHAND, 0, 0);
			KCMP(pid2, pid2, KCMP_SIGHAND, 0, 0);

			KCMP(pid1, pid2, KCMP_SYSVSEM, 0, 0);
			KCMP(pid1, pid1, KCMP_SYSVSEM, 0, 0);
			KCMP(pid2, pid2, KCMP_SYSVSEM, 0, 0);

			KCMP(pid1, pid2, KCMP_VM, 0, 0);
			KCMP(pid1, pid1, KCMP_VM, 0, 0);
			KCMP(pid2, pid2, KCMP_VM, 0, 0);

			/* Same simple checks */
			if (opt_flags & OPT_FLAGS_VERIFY) {
				KCMP_VERIFY(pid1, pid1, KCMP_FILE, fd1, fd1, 0);
				KCMP_VERIFY(pid1, pid1, KCMP_FILES, 0, 0, 0);
				KCMP_VERIFY(pid1, pid1, KCMP_FS, 0, 0, 0);
				KCMP_VERIFY(pid1, pid1, KCMP_IO, 0, 0, 0);
				KCMP_VERIFY(pid1, pid1, KCMP_SIGHAND, 0, 0, 0);
				KCMP_VERIFY(pid1, pid1, KCMP_SYSVSEM, 0, 0, 0);
				KCMP_VERIFY(pid1, pid1, KCMP_VM, 0, 0, 0);
				KCMP_VERIFY(pid1, pid2, KCMP_SYSVSEM, 0, 0, 0);
			}
			(*counter)++;
		} while (opt_do_run && (!max_ops || *counter < max_ops));
reap:
		if (fd2 >= 0)
			(void)close(fd2);
		(void)kill(pid1, SIGKILL);
		(void)waitpid(pid1, &status, 0);
		(void)close(fd1);
	}
	return ret;
}
Example #12
0
/*
 *  stress_aio
 *	stress asynchronous I/O
 */
int stress_aio(
	uint64_t *const counter,
	const uint32_t instance,
	const uint64_t max_ops,
	const char *name)
{
	int fd, rc = EXIT_FAILURE;
	io_req_t *io_reqs;
	struct sigaction sa;
	int i;
	uint64_t total = 0;
	char filename[PATH_MAX];
	const pid_t pid = getpid();

	if ((io_reqs = calloc((size_t)opt_aio_requests, sizeof(io_req_t))) == NULL) {
		pr_err(stderr, "%s: cannot allocate io request structures\n", name);
		return EXIT_FAILURE;
	}

	if (stress_temp_dir_mk(name, pid, instance) < 0) {
		free(io_reqs);
		return EXIT_FAILURE;
	}
	(void)stress_temp_filename(filename, sizeof(filename),
		name, pid, instance, mwc32());

	(void)umask(0077);
	if ((fd = open(filename, O_CREAT | O_RDWR, S_IRUSR | S_IWUSR)) < 0) {
		pr_failed_err(name, "open");
		goto finish;
	}
	(void)unlink(filename);

	sigemptyset(&sa.sa_mask);
	sa.sa_flags = SA_RESTART | SA_SIGINFO;
	sa.sa_sigaction = aio_signal_handler;
	if (sigaction(SIGUSR1, &sa, NULL) < 0) {
		pr_failed_err(name, "sigaction");
	}

	/* Kick off requests */
	for (i = 0; i < opt_aio_requests; i++) {
		aio_fill_buffer(i, io_reqs[i].buffer, BUFFER_SZ);
		if (issue_aio_request(name, fd, (off_t)i * BUFFER_SZ, &io_reqs[i], i, aio_write) < 0)
			goto cancel;
	}

	do {
		usleep(250000); /* wait until a signal occurs */

		for (i = 0; opt_do_run && (i < opt_aio_requests); i++) {
			if (io_reqs[i].status != EINPROGRESS)
				continue;

			io_reqs[i].status = aio_error(&io_reqs[i].aiocb);
			switch (io_reqs[i].status) {
			case ECANCELED:
			case 0:
				/* Succeeded or cancelled, so redo another */
				(*counter)++;
				if (issue_aio_request(name, fd, (off_t)i * BUFFER_SZ, &io_reqs[i], i,
					(mwc32() & 0x8) ? aio_read : aio_write) < 0)
					goto cancel;
				break;
			case EINPROGRESS:
				break;
			default:
				/* Something went wrong */
				pr_failed_errno(name, "aio_error", io_reqs[i].status);
				goto cancel;
			}
		}
	} while (opt_do_run && (!max_ops || *counter < max_ops));

	rc = EXIT_SUCCESS;

cancel:
	for (i = 0; i < opt_aio_requests; i++) {
		aio_issue_cancel(name, &io_reqs[i]);
		total += io_reqs[i].count;
	}
	(void)close(fd);
finish:
	pr_dbg(stderr, "%s: total of %" PRIu64 " async I/O signals caught (instance %d)\n",
		name, total, instance);
	(void)stress_temp_dir_rm(name, pid, instance);
	free(io_reqs);
	return rc;
}
Example #13
0
/*
 *  stress_mmap()
 *	stress mmap
 */
int stress_mmap(
	uint64_t *const counter,
	const uint32_t instance,
	const uint64_t max_ops,
	const char *name)
{
	uint8_t *buf = NULL;
	const size_t page_size = stress_get_pagesize();
	size_t sz, pages4k;
#if !defined(__gnu_hurd__)
	const int ms_flags = (opt_flags & OPT_FLAGS_MMAP_ASYNC) ?
		MS_ASYNC : MS_SYNC;
#endif
	const pid_t pid = getpid();
	int fd = -1, flags = MAP_PRIVATE | MAP_ANONYMOUS;
	char filename[PATH_MAX];

	(void)instance;
#ifdef MAP_POPULATE
	flags |= MAP_POPULATE;
#endif

	if (!set_mmap_bytes) {
		if (opt_flags & OPT_FLAGS_MAXIMIZE)
			opt_mmap_bytes = MAX_MMAP_BYTES;
		if (opt_flags & OPT_FLAGS_MINIMIZE)
			opt_mmap_bytes = MIN_MMAP_BYTES;
	}
	sz = opt_mmap_bytes & ~(page_size - 1);
	pages4k = sz / page_size;

	/* Make sure this is killable by OOM killer */
	set_oom_adjustment(name, true);

	if (opt_flags & OPT_FLAGS_MMAP_FILE) {
		ssize_t ret;
		char ch = '\0';

		if (stress_temp_dir_mk(name, pid, instance) < 0)
			return EXIT_FAILURE;

		(void)stress_temp_filename(filename, sizeof(filename),
			name, pid, instance, mwc32());

		(void)umask(0077);
		if ((fd = open(filename, O_CREAT | O_RDWR, S_IRUSR | S_IWUSR)) < 0) {
			pr_failed_err(name, "open");
			(void)unlink(filename);
			(void)stress_temp_dir_rm(name, pid, instance);

			return EXIT_FAILURE;
		}
		(void)unlink(filename);
		if (lseek(fd, sz - sizeof(ch), SEEK_SET) < 0) {
			pr_failed_err(name, "lseek");
			(void)close(fd);
			(void)stress_temp_dir_rm(name, pid, instance);

			return EXIT_FAILURE;
		}
redo:
		ret = write(fd, &ch, sizeof(ch));
		if (ret != sizeof(ch)) {
			if ((errno == EAGAIN) || (errno == EINTR))
				goto redo;
			pr_failed_err(name, "write");
			(void)close(fd);
			(void)stress_temp_dir_rm(name, pid, instance);

			return EXIT_FAILURE;
		}
		flags &= ~(MAP_ANONYMOUS | MAP_PRIVATE);
		flags |= MAP_SHARED;
	}

	do {
		uint8_t mapped[pages4k];
		uint8_t *mappings[pages4k];
		size_t n;

		if (!opt_do_run)
			break;
		buf = mmap(NULL, sz, PROT_READ | PROT_WRITE, flags, fd, 0);
		if (buf == MAP_FAILED) {
			/* Force MAP_POPULATE off, just in case */
#ifdef MAP_POPULATE
			flags &= ~MAP_POPULATE;
#endif
			continue;	/* Try again */
		}
		if (opt_flags & OPT_FLAGS_MMAP_FILE) {
			memset(buf, 0xff, sz);
#if !defined(__gnu_hurd__)
			(void)msync(buf, sz, ms_flags);
#endif
		}
		(void)madvise_random(buf, sz);
		(void)mincore_touch_pages(buf, opt_mmap_bytes);
		stress_mmap_mprotect(name, buf, sz);
		memset(mapped, PAGE_MAPPED, sizeof(mapped));
		for (n = 0; n < pages4k; n++)
			mappings[n] = buf + (n * page_size);

		/* Ensure we can write to the mapped pages */
		stress_mmap_set(buf, sz);
		if (opt_flags & OPT_FLAGS_VERIFY) {
			if (stress_mmap_check(buf, sz) < 0)
				pr_fail(stderr, "%s: mmap'd region of %zu bytes does "
					"not contain expected data\n", name, sz);
		}

		/*
		 *  Step #1, unmap all pages in random order
		 */
		(void)mincore_touch_pages(buf, opt_mmap_bytes);
		for (n = pages4k; n; ) {
			uint64_t j, i = mwc64() % pages4k;
			for (j = 0; j < n; j++) {
				uint64_t page = (i + j) % pages4k;
				if (mapped[page] == PAGE_MAPPED) {
					mapped[page] = 0;
					(void)madvise_random(mappings[page], page_size);
					stress_mmap_mprotect(name, mappings[page], page_size);
					(void)munmap(mappings[page], page_size);
					n--;
					break;
				}
				if (!opt_do_run)
					goto cleanup;
			}
		}
		(void)munmap(buf, sz);
#ifdef MAP_FIXED
		/*
		 *  Step #2, map them back in random order
		 */
		for (n = pages4k; n; ) {
			uint64_t j, i = mwc64() % pages4k;
			for (j = 0; j < n; j++) {
				uint64_t page = (i + j) % pages4k;
				if (!mapped[page]) {
					off_t offset = (opt_flags & OPT_FLAGS_MMAP_FILE) ?
							page * page_size : 0;
					/*
					 * Attempt to map them back into the original address, this
					 * may fail (it's not the most portable operation), so keep
					 * track of failed mappings too
					 */
					mappings[page] = mmap(mappings[page], page_size, PROT_READ | PROT_WRITE, MAP_FIXED | flags, fd, offset);
					if (mappings[page] == MAP_FAILED) {
						mapped[page] = PAGE_MAPPED_FAIL;
						mappings[page] = NULL;
					} else {
						(void)mincore_touch_pages(mappings[page], page_size);
						(void)madvise_random(mappings[page], page_size);
						stress_mmap_mprotect(name, mappings[page], page_size);
						mapped[page] = PAGE_MAPPED;
						/* Ensure we can write to the mapped page */
						stress_mmap_set(mappings[page], page_size);
						if (stress_mmap_check(mappings[page], page_size) < 0)
							pr_fail(stderr, "%s: mmap'd region of %zu bytes does "
								"not contain expected data\n", name, page_size);
						if (opt_flags & OPT_FLAGS_MMAP_FILE) {
							memset(mappings[page], n, page_size);
#if !defined(__gnu_hurd__)
							(void)msync(mappings[page], page_size, ms_flags);
#endif
						}
					}
					n--;
					break;
				}
				if (!opt_do_run)
					goto cleanup;
			}
		}
#endif
cleanup:
		/*
		 *  Step #3, unmap them all
		 */
		for (n = 0; n < pages4k; n++) {
			if (mapped[n] & PAGE_MAPPED) {
				(void)madvise_random(mappings[n], page_size);
				stress_mmap_mprotect(name, mappings[n], page_size);
				(void)munmap(mappings[n], page_size);
			}
		}
		(*counter)++;
	} while (opt_do_run && (!max_ops || *counter < max_ops));

	if (opt_flags & OPT_FLAGS_MMAP_FILE) {
		(void)close(fd);
		(void)stress_temp_dir_rm(name, pid, instance);
	}
	return EXIT_SUCCESS;
}
Example #14
0
/*
 *  stress_splice
 *	stress copying of /dev/zero to /dev/null
 */
int stress_vm_splice(
    uint64_t *const counter,
    const uint32_t instance,
    const uint64_t max_ops,
    const char *name)
{
    int fd, fds[2];
    uint8_t *buf;
    const size_t page_size = stress_get_pagesize();
    size_t sz;

    (void)instance;

    if (!set_vm_splice_bytes) {
        if (opt_flags & OPT_FLAGS_MAXIMIZE)
            opt_vm_splice_bytes = MAX_VM_SPLICE_BYTES;
        if (opt_flags & OPT_FLAGS_MINIMIZE)
            opt_vm_splice_bytes = MIN_VM_SPLICE_BYTES;
    }
    sz = opt_vm_splice_bytes & ~(page_size - 1);

    buf = mmap(NULL, sz, PROT_READ | PROT_WRITE, MAP_ANONYMOUS | MAP_PRIVATE, -1, 0);
    if (buf == MAP_FAILED) {
        pr_failed_dbg(name, "mmap");
        return(EXIT_FAILURE);
    }

    if (pipe(fds) < 0) {
        (void)munmap(buf, sz);
        pr_failed_err(name, "pipe");
        return EXIT_FAILURE;
    }

    if ((fd = open("/dev/null", O_WRONLY)) < 0) {
        (void)munmap(buf, sz);
        (void)close(fds[0]);
        (void)close(fds[1]);
        pr_failed_err(name, "open");
        return EXIT_FAILURE;
    }

    do {
        int ret;
        ssize_t bytes;
        struct iovec iov;

        iov.iov_base = buf;
        iov.iov_len = sz;

        bytes = vmsplice(fds[1], &iov, 1, 0);
        if (bytes < 0)
            break;
        ret = splice(fds[0], NULL, fd, NULL, opt_vm_splice_bytes, SPLICE_F_MOVE);
        if (ret < 0)
            break;

        (*counter)++;
    } while (opt_do_run && (!max_ops || *counter < max_ops));

    (void)munmap(buf, sz);
    (void)close(fd);
    (void)close(fds[0]);
    (void)close(fds[1]);

    return EXIT_SUCCESS;
}
Example #15
0
/*
 *  stress_sigq
 *	stress by heavy sigqueue message sending
 */
int stress_sigq(
	uint64_t *const counter,
	const uint32_t instance,
	const uint64_t max_ops,
	const char *name)
{
	pid_t pid;
	struct sigaction new_action;

	new_action.sa_handler = stress_sigqhandler;
	sigemptyset(&new_action.sa_mask);
	new_action.sa_flags = 0;
	if (sigaction(SIGUSR1, &new_action, NULL) < 0) {
		pr_failed_err(name, "sigaction");
		return EXIT_FAILURE;
	}

again:
	pid = fork();
	if (pid < 0) {
		if (opt_do_run && (errno == EAGAIN))
			goto again;
		pr_failed_dbg(name, "fork");
		return EXIT_FAILURE;
	} else if (pid == 0) {
		sigset_t mask;

		sigemptyset(&mask);
		sigaddset(&mask, SIGUSR1);

		for (;;) {
			siginfo_t info;
			sigwaitinfo(&mask, &info);
			if (info.si_value.sival_int)
				break;
		}
		pr_dbg(stderr, "%s: child got termination notice\n", name);
		pr_dbg(stderr, "%s: exited on pid [%d] (instance %" PRIu32 ")\n",
			name, getpid(), instance);
		_exit(0);
	} else {
		/* Parent */
		union sigval s;
		int status;

		do {
			memset(&s, 0, sizeof(s));
			s.sival_int = 0;
			sigqueue(pid, SIGUSR1, s);
			(*counter)++;
		} while (opt_do_run && (!max_ops || *counter < max_ops));

		pr_dbg(stderr, "%s: parent sent termination notice\n", name);
		memset(&s, 0, sizeof(s));
		s.sival_int = 1;
		sigqueue(pid, SIGUSR1, s);
		usleep(250);
		/* And ensure child is really dead */
		(void)kill(pid, SIGKILL);
		(void)waitpid(pid, &status, 0);
	}

	return EXIT_SUCCESS;
}
Example #16
0
/*
 *  stress_udp
 *	stress by heavy udp ops
 */
int stress_udp(
	uint64_t *const counter,
	const uint32_t instance,
	const uint64_t max_ops,
	const char *name)
{
	pid_t pid, ppid = getppid();
	int rc = EXIT_SUCCESS;

	pr_dbg(stderr, "%s: process [%d] using udp port %d\n",
		name, getpid(), opt_udp_port + instance);

again:
	pid = fork();
	if (pid < 0) {
		if (opt_do_run && (errno == EAGAIN))
			goto again;
		pr_failed_dbg(name, "fork");
		return EXIT_FAILURE;
	} else if (pid == 0) {
		/* Child, client */
		struct sockaddr *addr;

		do {
			char buf[UDP_BUF];
			socklen_t len;
			int fd;
			int j = 0;

			if ((fd = socket(opt_udp_domain, SOCK_DGRAM, 0)) < 0) {
				pr_failed_dbg(name, "socket");
				/* failed, kick parent to finish */
				(void)kill(getppid(), SIGALRM);
				exit(EXIT_FAILURE);
			}
			stress_set_sockaddr(name, instance, ppid,
				opt_udp_domain, opt_udp_port, &addr, &len);

			do {
				size_t i;

				for (i = 16; i < sizeof(buf); i += 16, j++) {
					memset(buf, 'A' + (j % 26), sizeof(buf));
					ssize_t ret = sendto(fd, buf, i, 0, addr, len);
					if (ret < 0) {
						if (errno != EINTR)
							pr_failed_dbg(name, "sendto");
						break;
					}
				}
			} while (opt_do_run && (!max_ops || *counter < max_ops));
			(void)close(fd);
		} while (opt_do_run && (!max_ops || *counter < max_ops));

#ifdef AF_UNIX
		if (opt_udp_domain == AF_UNIX) {
			struct sockaddr_un *addr_un = (struct sockaddr_un *)addr;
			(void)unlink(addr_un->sun_path);
		}
#endif
		/* Inform parent we're all done */
		(void)kill(getppid(), SIGALRM);
		exit(EXIT_SUCCESS);
	} else {
		/* Parent, server */

		char buf[UDP_BUF];
		int fd, status;
		int so_reuseaddr = 1;
		socklen_t addr_len = 0;
		struct sigaction new_action;
		struct sockaddr *addr;

		new_action.sa_handler = handle_udp_sigalrm;
		sigemptyset(&new_action.sa_mask);
		new_action.sa_flags = 0;
		if (sigaction(SIGALRM, &new_action, NULL) < 0) {
			pr_failed_err(name, "sigaction");
			rc = EXIT_FAILURE;
			goto die;
		}
		if ((fd = socket(opt_udp_domain, SOCK_DGRAM, 0)) < 0) {
			pr_failed_dbg(name, "socket");
			rc = EXIT_FAILURE;
			goto die;
		}
		stress_set_sockaddr(name, instance, ppid,
			opt_udp_domain, opt_udp_port, &addr, &addr_len);
		if (bind(fd, addr, addr_len) < 0) {
			pr_failed_dbg(name, "bind");
			rc = EXIT_FAILURE;
			goto die_close;
		}
		if (setsockopt(fd, SOL_SOCKET, SO_REUSEADDR, &so_reuseaddr, sizeof(so_reuseaddr)) < 0) {
			pr_failed_dbg(name, "setsockopt");
			rc = EXIT_FAILURE;
			goto die_close;
		}

		do {
			socklen_t len = addr_len;
			ssize_t n = recvfrom(fd, buf, sizeof(buf), 0, addr, &len);
			if (n == 0)
				break;
			if (n < 0) {
				if (errno != EINTR)
					pr_failed_dbg(name, "recvfrom");
				break;
			}
			(*counter)++;
		} while (opt_do_run && (!max_ops || *counter < max_ops));

die_close:
		(void)close(fd);
die:
#ifdef AF_UNIX
		if (opt_udp_domain == AF_UNIX) {
			struct sockaddr_un *addr_un = (struct sockaddr_un *)addr;
			(void)unlink(addr_un->sun_path);
		}
#endif
		if (pid) {
			(void)kill(pid, SIGKILL);
			(void)waitpid(pid, &status, 0);
		}
	}
	return rc;
}
/*
 *  stress_sigpending
 *	stress sigpending system call
 */
int stress_sigpending(
	uint64_t *const counter,
	const uint32_t instance,
	const uint64_t max_ops,
	const char *name)
{
	struct sigaction new_action;
	sigset_t sigset;
	const pid_t mypid = getpid();

	(void)instance;

	memset(&new_action, 0, sizeof new_action);
	new_action.sa_handler = stress_usr1_handler;
	sigemptyset(&new_action.sa_mask);
	new_action.sa_flags = 0;

	if (sigaction(SIGUSR1, &new_action, NULL) < 0) {
		pr_failed_err(name, "sigaction");
		return EXIT_FAILURE;
	}


	do {
		sigemptyset(&sigset);
		sigaddset(&sigset, SIGUSR1);
		if (sigprocmask(SIG_SETMASK, &sigset, NULL) < 0) {
			pr_failed_err(name, "sigprocmask");
			return EXIT_FAILURE;
		}

		(void)kill(mypid, SIGUSR1);
		if (sigpending(&sigset) < 0) {
			pr_failed_err(name, "sigpending");
			continue;
		}
		/* We should get a SIGUSR1 here */
		if (!sigismember(&sigset, SIGUSR1)) {
			pr_failed_err(name, "sigismember");
			continue;
		}

		/* Unmask signal, signal is handled */
		sigemptyset(&sigset);
		sigprocmask(SIG_SETMASK, &sigset, NULL);

		/* And it is no longer pending */
		if (sigpending(&sigset) < 0) {
			pr_failed_err(name, "sigpending");
			continue;
		}
		if (sigismember(&sigset, SIGUSR1)) {
			pr_failed_err(name, "sigismember");
			continue;
		}
		/* Success! */
		(*counter)++;
	} while (opt_do_run && (!max_ops || *counter < max_ops));

	return EXIT_SUCCESS;
}
Example #18
0
/*
 *  stress_mmapfork()
 *	stress mappings + fork VM subystem
 */
int stress_mmapfork(
	uint64_t *const counter,
	const uint32_t instance,
	const uint64_t max_ops,
	const char *name)
{
	pid_t pids[MAX_PIDS];
	struct sysinfo info;
	void *ptr;
	int instances;

	(void)instance;

	if ((instances = stressor_instances(STRESS_MMAPFORK)) < 1)
		instances = (int)stress_get_processors_configured();

	do {
		size_t i, n;
		size_t len;

		memset(pids, 0, sizeof(pids));

		for (n = 0; n < MAX_PIDS; n++) {
retry:			if (!opt_do_run)
				goto reap;

			pids[n] = fork();
			if (pids[n] < 0) {
				/* Out of resources for fork, re-do, ugh */
				if (errno == EAGAIN) {
					usleep(10000);
					goto retry;
				}
				break;
			}
			if (pids[n] == 0) {
				/* Child */
				if (sysinfo(&info) < 0) {
					pr_failed_err(name, "sysinfo");
					_exit(0);
				}
				len = ((size_t)info.freeram / (instances * MAX_PIDS)) / 2;
				ptr = mmap(NULL, len, PROT_READ | PROT_WRITE,
					MAP_POPULATE | MAP_SHARED | MAP_ANONYMOUS, -1, 0);
				if (ptr != MAP_FAILED) {
					madvise(ptr, len, MADV_WILLNEED);
					memset(ptr, 0, len);
					madvise(ptr, len, MADV_DONTNEED);
					munmap(ptr, len);
				}
				_exit(0);
			}
		}
reap:
		for (i = 0; i < n; i++) {
			int status;

			if (waitpid(pids[i], &status, 0) < 0) {
				if (errno != EINTR)
					pr_err(stderr, "%s: waitpid errno=%d (%s)\n",
						name, errno, strerror(errno));
			}
		}
		(*counter)++;
	} while (opt_do_run && (!max_ops || *counter < max_ops));

	return EXIT_SUCCESS;
}
Example #19
0
/*
 *  stress_key
 *	stress key operations
 */
int stress_key(
	uint64_t *const counter,
	const uint32_t instance,
	const uint64_t max_ops,
	const char *name)
{
	(void)instance;
	(void)name;
	pid_t ppid = getppid();

	key_serial_t keys[MAX_KEYS];

	do {
		size_t i, n = 0;
		char description[64];
		char payload[64];

		/* Add as many keys as we are allowed */
		for (n = 0; n < MAX_KEYS; n++) {
			snprintf(description, sizeof(description),
				"stress-ng-key-%u-%" PRIu32
				"-%zu", ppid, instance, n);
			snprintf(payload, sizeof(payload),
				"somedata-%zu", n);

			keys[n] = sys_add_key("user", description,
				payload, strlen(payload),
				KEY_SPEC_PROCESS_KEYRING);
			if (keys[n] < 0) {
				if ((errno != ENOMEM) && (errno != EDQUOT))
					pr_failed_err(name, "add_key");
				break;
			}
#if defined(KEYCTL_SET_TIMEOUT)
			if (sys_keyctl(KEYCTL_SET_TIMEOUT, keys[n], 1) < 0)
				pr_failed_err(name, "keyctl KEYCTL_SET_TIMEOUT");
#endif
		}

		/* And manipulate the keys */
		for (i = 0; i < n; i++) {
			snprintf(description, sizeof(description),
				"stress-ng-key-%u-%" PRIu32
				"-%zu", ppid, instance, i);
#if defined(KEYCTL_DESCRIBE)
			if (sys_keyctl(KEYCTL_DESCRIBE, keys[i], description) < 0)
				pr_failed_err(name, "keyctl KEYCTL_DESCRIBE");
			if (!opt_do_run)
				break;
#endif

			snprintf(payload, sizeof(payload),
				"somedata-%zu", n);
#if defined(KEYCTL_UPDATE)
			if (sys_keyctl(KEYCTL_UPDATE, keys[i],
			    payload, strlen(payload)) < 0) {
				if ((errno != ENOMEM) && (errno != EDQUOT))
					pr_failed_err(name, "keyctl KEYCTL_UPDATE");
			}
			if (!opt_do_run)
				break;
#endif

#if defined(KEYCTL_READ)
			memset(payload, 0, sizeof(payload));
			if (sys_keyctl(KEYCTL_READ, keys[i],
			    payload, sizeof(payload)) < 0)
				pr_failed_err(name, "keyctl KEYCTL_READ");
			if (!opt_do_run)
				break;
#endif

#if defined(KEYCTL_CLEAR)
			(void)sys_keyctl(KEYCTL_CLEAR, keys[i]);
#endif
#if defined(KEYCTL_INVALIDATE)
			(void)sys_keyctl(KEYCTL_INVALIDATE, keys[i]);
#endif
			(*counter)++;
		}
		/* If we hit too many errors and bailed out early, clean up */
		while (i < n) {
#if defined(KEYCTL_CLEAR)
			(void)sys_keyctl(KEYCTL_CLEAR, keys[i]);
#endif
#if defined(KEYCTL_INVALIDATE)
			(void)sys_keyctl(KEYCTL_INVALIDATE, keys[i]);
#endif
			i++;
		}
	} while (opt_do_run && (!max_ops || *counter < max_ops));

	return EXIT_SUCCESS;
}
Example #20
0
/*
 *  stress_timerfd
 *	stress timerfd
 */
int stress_timerfd(
	uint64_t *const counter,
	const uint32_t instance,
	const uint64_t max_ops,
	const char *name)
{
	struct itimerspec timer;

	(void)instance;

	if (!set_timerfd_freq) {
		if (opt_flags & OPT_FLAGS_MAXIMIZE)
			opt_timerfd_freq = MAX_TIMERFD_FREQ;
		if (opt_flags & OPT_FLAGS_MINIMIZE)
			opt_timerfd_freq = MIN_TIMERFD_FREQ;
	}
	rate_ns = opt_timerfd_freq ? 1000000000 / opt_timerfd_freq : 1000000000;

	timerfd = timerfd_create(CLOCK_REALTIME, 0);
	if (timerfd < 0) {
		pr_failed_err(name, "timerfd_create");
		(void)close(timerfd);
		return EXIT_FAILURE;
	}
	stress_timerfd_set(&timer);
	if (timerfd_settime(timerfd, 0, &timer, NULL) < 0) {
		pr_failed_err(name, "timer_settime");
		(void)close(timerfd);
		return EXIT_FAILURE;
	}

	do {
		int ret;
		uint64_t exp;
		struct itimerspec value;
		struct timeval timeout;
		fd_set rdfs;

		FD_ZERO(&rdfs);
		FD_SET(timerfd, &rdfs);
		timeout.tv_sec = 0;
		timeout.tv_usec = 500000;

		if (!opt_do_run)
			break;
		ret = select(timerfd + 1, &rdfs, NULL, NULL, &timeout);
		if (ret < 0) {
			if (errno == EINTR)
				continue;
			pr_failed_err(name, "select");
			break;
		}
		if (ret < 1)
			continue; /* Timeout */

		ret = read(timerfd, &exp, sizeof exp);
		if (ret < 0) {
			pr_failed_err(name, "timerfd read");
			break;
		}
		if (timerfd_gettime(timerfd, &value) < 0) {
			pr_failed_err(name, "timerfd_gettime");
			break;
		}
		if (opt_flags & OPT_FLAGS_TIMERFD_RAND) {
			stress_timerfd_set(&timer);
			if (timerfd_settime(timerfd, 0, &timer, NULL) < 0) {
				pr_failed_err(name, "timer_settime");
				break;
			}
		}
		(*counter)++;
	} while (opt_do_run && (!max_ops || timerfd_counter < max_ops));

	(void)close(timerfd);

	return EXIT_SUCCESS;
}
Example #21
0
/*
 *  stress_hdd
 *	stress I/O via writes
 */
int stress_hdd(
    uint64_t *const counter,
    const uint32_t instance,
    const uint64_t max_ops,
    const char *name)
{
    uint8_t *buf = NULL;
    uint64_t i, min_size, remainder;
    const pid_t pid = getpid();
    int ret, rc = EXIT_FAILURE;
    char filename[PATH_MAX];
    int flags = O_CREAT | O_RDWR | O_TRUNC | opt_hdd_oflags;
    int fadvise_flags = opt_hdd_flags & HDD_OPT_FADV_MASK;

    if (!set_hdd_bytes) {
        if (opt_flags & OPT_FLAGS_MAXIMIZE)
            opt_hdd_bytes = MAX_HDD_BYTES;
        if (opt_flags & OPT_FLAGS_MINIMIZE)
            opt_hdd_bytes = MIN_HDD_BYTES;
    }

    if (!set_hdd_write_size) {
        if (opt_flags & OPT_FLAGS_MAXIMIZE)
            opt_hdd_write_size = MAX_HDD_WRITE_SIZE;
        if (opt_flags & OPT_FLAGS_MINIMIZE)
            opt_hdd_write_size = MIN_HDD_WRITE_SIZE;
    }

    if (opt_hdd_flags & HDD_OPT_O_DIRECT) {
        min_size = (opt_hdd_flags & HDD_OPT_IOVEC) ?
                   HDD_IO_VEC_MAX * BUF_ALIGNMENT : MIN_HDD_WRITE_SIZE;
    } else {
        min_size = (opt_hdd_flags & HDD_OPT_IOVEC) ?
                   HDD_IO_VEC_MAX * MIN_HDD_WRITE_SIZE : MIN_HDD_WRITE_SIZE;
    }
    /* Ensure I/O size is not too small */
    if (opt_hdd_write_size < min_size) {
        opt_hdd_write_size = min_size;
        pr_inf(stderr, "%s: increasing read/write size to %" PRIu64 " bytes\n",
               name, opt_hdd_write_size);
    }

    /* Ensure we get same sized iovec I/O sizes */
    remainder = opt_hdd_write_size % HDD_IO_VEC_MAX;
    if ((opt_hdd_flags & HDD_OPT_IOVEC) && (remainder != 0)) {
        opt_hdd_write_size += HDD_IO_VEC_MAX - remainder;
        pr_inf(stderr, "%s: increasing read/write size to %" PRIu64 " bytes in iovec mode\n",
               name, opt_hdd_write_size);
    }

    /* Ensure complete file size is not less than the I/O size */
    if (opt_hdd_bytes < opt_hdd_write_size) {
        opt_hdd_bytes = opt_hdd_write_size;
        pr_inf(stderr, "%s: increasing file size to write size of %" PRIu64 " bytes\n",
               name, opt_hdd_bytes);
    }


    if (stress_temp_dir_mk(name, pid, instance) < 0)
        return EXIT_FAILURE;

    /* Must have some write option */
    if ((opt_hdd_flags & HDD_OPT_WR_MASK) == 0)
        opt_hdd_flags |= HDD_OPT_WR_SEQ;
    /* Must have some read option */
    if ((opt_hdd_flags & HDD_OPT_RD_MASK) == 0)
        opt_hdd_flags |= HDD_OPT_RD_SEQ;

    ret = posix_memalign((void **)&buf, BUF_ALIGNMENT, (size_t)opt_hdd_write_size);
    if (ret || !buf) {
        pr_err(stderr, "%s: cannot allocate buffer\n", name);
        (void)stress_temp_dir_rm(name, pid, instance);
        return EXIT_FAILURE;
    }

    for (i = 0; i < opt_hdd_write_size; i++)
        buf[i] = mwc8();

    (void)stress_temp_filename(filename, sizeof(filename),
                               name, pid, instance, mwc32());
    do {
        int fd;

        (void)umask(0077);
        if ((fd = open(filename, flags, S_IRUSR | S_IWUSR)) < 0) {
            pr_failed_err(name, "open");
            goto finish;
        }
        if (ftruncate(fd, (off_t)0) < 0) {
            pr_failed_err(name, "ftruncate");
            (void)close(fd);
            goto finish;
        }
        (void)unlink(filename);

        if (stress_hdd_advise(name, fd, fadvise_flags) < 0) {
            (void)close(fd);
            goto finish;
        }

        /* Random Write */
        if (opt_hdd_flags & HDD_OPT_WR_RND) {
            for (i = 0; i < opt_hdd_bytes; i += opt_hdd_write_size) {
                size_t j;

                off_t offset = (i == 0) ?
                               opt_hdd_bytes :
                               (mwc64() % opt_hdd_bytes) & ~511;
                ssize_t ret;

                if (lseek(fd, offset, SEEK_SET) < 0) {
                    pr_failed_err(name, "lseek");
                    (void)close(fd);
                    goto finish;
                }
rnd_wr_retry:
                if (!opt_do_run || (max_ops && *counter >= max_ops))
                    break;

                for (j = 0; j < opt_hdd_write_size; j++)
                    buf[j] = (offset + j) & 0xff;

                ret = stress_hdd_write(fd, buf, (size_t)opt_hdd_write_size);
                if (ret <= 0) {
                    if ((errno == EAGAIN) || (errno == EINTR))
                        goto rnd_wr_retry;
                    if (errno) {
                        pr_failed_err(name, "write");
                        (void)close(fd);
                        goto finish;
                    }
                    continue;
                }
                (*counter)++;
            }
        }
        /* Sequential Write */
        if (opt_hdd_flags & HDD_OPT_WR_SEQ) {
            for (i = 0; i < opt_hdd_bytes; i += opt_hdd_write_size) {
                ssize_t ret;
                size_t j;
seq_wr_retry:
                if (!opt_do_run || (max_ops && *counter >= max_ops))
                    break;

                for (j = 0; j < opt_hdd_write_size; j += 512)
                    buf[j] = (i + j) & 0xff;
                ret = stress_hdd_write(fd, buf, (size_t)opt_hdd_write_size);
                if (ret <= 0) {
                    if ((errno == EAGAIN) || (errno == EINTR))
                        goto seq_wr_retry;
                    if (errno) {
                        pr_failed_err(name, "write");
                        (void)close(fd);
                        goto finish;
                    }
                    continue;
                }
                (*counter)++;
            }
        }

        /* Sequential Read */
        if (opt_hdd_flags & HDD_OPT_RD_SEQ) {
            uint64_t misreads = 0;
            uint64_t baddata = 0;

            if (lseek(fd, 0, SEEK_SET) < 0) {
                pr_failed_err(name, "lseek");
                (void)close(fd);
                goto finish;
            }
            for (i = 0; i < opt_hdd_bytes; i += opt_hdd_write_size) {
                ssize_t ret;
seq_rd_retry:
                if (!opt_do_run || (max_ops && *counter >= max_ops))
                    break;

                ret = stress_hdd_read(fd, buf, (size_t)opt_hdd_write_size);
                if (ret <= 0) {
                    if ((errno == EAGAIN) || (errno == EINTR))
                        goto seq_rd_retry;
                    if (errno) {
                        pr_failed_err(name, "read");
                        (void)close(fd);
                        goto finish;
                    }
                    continue;
                }
                if (ret != (ssize_t)opt_hdd_write_size)
                    misreads++;

                if (opt_flags & OPT_FLAGS_VERIFY) {
                    size_t j;

                    for (j = 0; j < opt_hdd_write_size; j += 512) {
                        uint8_t v = (i + j) & 0xff;
                        if (opt_hdd_flags & HDD_OPT_WR_SEQ) {
                            /* Write seq has written to all of the file, so it should always be OK */
                            if (buf[0] != v)
                                baddata++;
                        } else {
                            /* Write rnd has written to some of the file, so data either zero or OK */
                            if (buf[0] != 0 && buf[0] != v)
                                baddata++;
                        }
                    }
                }
                (*counter)++;
            }
            if (misreads)
                pr_dbg(stderr, "%s: %" PRIu64 " incomplete sequential reads\n",
                       name, misreads);
            if (baddata)
                pr_fail(stderr, "%s: incorrect data found %" PRIu64 " times\n",
                        name, baddata);
        }
        /* Random Read */
        if (opt_hdd_flags & HDD_OPT_RD_RND) {
            uint64_t misreads = 0;
            uint64_t baddata = 0;

            for (i = 0; i < opt_hdd_bytes; i += opt_hdd_write_size) {
                ssize_t ret;
                off_t offset = (mwc64() % (opt_hdd_bytes - opt_hdd_write_size)) & ~511;

                if (lseek(fd, offset, SEEK_SET) < 0) {
                    pr_failed_err(name, "lseek");
                    (void)close(fd);
                    goto finish;
                }
rnd_rd_retry:
                if (!opt_do_run || (max_ops && *counter >= max_ops))
                    break;
                ret = stress_hdd_read(fd, buf, (size_t)opt_hdd_write_size);
                if (ret <= 0) {
                    if ((errno == EAGAIN) || (errno == EINTR))
                        goto rnd_rd_retry;
                    if (errno) {
                        pr_failed_err(name, "read");
                        (void)close(fd);
                        goto finish;
                    }
                    continue;
                }
                if (ret != (ssize_t)opt_hdd_write_size)
                    misreads++;

                if (opt_flags & OPT_FLAGS_VERIFY) {
                    size_t j;

                    for (j = 0; j < opt_hdd_write_size; j += 512) {
                        uint8_t v = (i + j) & 0xff;
                        if (opt_hdd_flags & HDD_OPT_WR_SEQ) {
                            /* Write seq has written to all of the file, so it should always be OK */
                            if (buf[0] != v)
                                baddata++;
                        } else {
                            /* Write rnd has written to some of the file, so data either zero or OK */
                            if (buf[0] != 0 && buf[0] != v)
                                baddata++;
                        }
                    }
                }

                (*counter)++;
            }
            if (misreads)
                pr_dbg(stderr, "%s: %" PRIu64 " incomplete random reads\n",
                       name, misreads);
        }
        (void)close(fd);

    } while (opt_do_run && (!max_ops || *counter < max_ops));

    rc = EXIT_SUCCESS;
finish:
    free(buf);
    (void)stress_temp_dir_rm(name, pid, instance);
    return rc;
}
Example #22
0
/*
 *  stress_hsearch()
 *	stress hsearch
 */
int stress_hsearch(
	uint64_t *const counter,
	const uint32_t instance,
	const uint64_t max_ops,
	const char *name)
{
	size_t i, max = (size_t)opt_hsearch_size;
	int ret = EXIT_FAILURE;
	char **keys;

	(void)instance;

	if (!set_hsearch_size) {
		if (opt_flags & OPT_FLAGS_MAXIMIZE)
			opt_hsearch_size = MAX_HSEARCH_SIZE;
		if (opt_flags & OPT_FLAGS_MINIMIZE)
			opt_hsearch_size = MIN_HSEARCH_SIZE;
	}

	max = (size_t)opt_hsearch_size;

	/* Make hash table with 25% slack */
	if (!hcreate(max + (max / 4))) {
		pr_failed_err(name, "hcreate");
		return EXIT_FAILURE;
	}

	if ((keys = calloc(max, sizeof(char *))) == NULL) {
		pr_err(stderr, "%s: cannot allocate keys\n", name);
		goto free_hash;
	}

	/* Populate hash, make it 100% full for worst performance */
	for (i = 0; i < max; i++) {
		char buffer[32];
		ENTRY e;

		snprintf(buffer, sizeof(buffer), "%zu", i);
		keys[i] = strdup(buffer);
		if (!keys[i]) {
			pr_err(stderr, "%s: cannot allocate key\n", name);
			goto free_all;
		}

		e.key = keys[i];
		e.data = (void *)i;

		if (hsearch(e, ENTER) == NULL) {
			pr_err(stderr, "%s: cannot allocate new hash item\n", name);
			goto free_all;
		}
	}

	do {
		for (i = 0; opt_do_run && i < max; i++) {
			ENTRY e, *ep;

			e.key = keys[i];
			e.data = NULL;	/* Keep Coverity quiet */
			ep = hsearch(e, FIND);
			if (opt_flags & OPT_FLAGS_VERIFY) {
				if (ep == NULL) {
					pr_fail(stderr, "%s: cannot find key %s\n", name, keys[i]);
				} else {
					if (i != (size_t)ep->data) {
						pr_fail(stderr, "%s: hash returned incorrect data %zd\n", name, i);
					}
				}
			}
		}
		(*counter)++;
	} while (opt_do_run && (!max_ops || *counter < max_ops));

	ret = EXIT_SUCCESS;

free_all:
	for (i = 0; i < max; i++)
		free(keys[i]);
	free(keys);
free_hash:
	hdestroy();

	return ret;
}
Example #23
0
/*
 *  stress_aio_linux
 *	stress asynchronous I/O using the linux specific aio ABI
 */
int stress_aio_linux(
	uint64_t *const counter,
	const uint32_t instance,
	const uint64_t max_ops,
	const char *name)
{
	int fd, rc = EXIT_FAILURE;
	char filename[PATH_MAX];
	const pid_t pid = getpid();
	aio_context_t ctx = 0;

	if (!set_aio_linux_requests) {
		if (opt_flags & OPT_FLAGS_MAXIMIZE)
			opt_aio_linux_requests = MAX_AIO_REQUESTS;
		if (opt_flags & OPT_FLAGS_MINIMIZE)
			opt_aio_linux_requests = MIN_AIO_REQUESTS;
	}
	if (sys_io_setup(opt_aio_linux_requests, &ctx) < 0) {
		pr_failed_err(name, "io_setup");
		return EXIT_FAILURE;
	}
	if (stress_temp_dir_mk(name, pid, instance) < 0) {
		return EXIT_FAILURE;
	}
	(void)stress_temp_filename(filename, sizeof(filename),
		name, pid, instance, mwc32());

	(void)umask(0077);
	if ((fd = open(filename, O_CREAT | O_RDWR, S_IRUSR | S_IWUSR)) < 0) {
		pr_failed_err(name, "open");
		goto finish;
	}
	(void)unlink(filename);

	do {
		struct iocb cb[opt_aio_linux_requests];
		struct iocb *cbs[opt_aio_linux_requests];
		struct io_event events[opt_aio_linux_requests];
		uint8_t buffers[opt_aio_linux_requests][BUFFER_SZ];
		int ret, i;
		long n;

		for (i = 0; i < opt_aio_linux_requests; i++)
			aio_linux_fill_buffer(i, buffers[i], BUFFER_SZ);

		memset(cb, 0, sizeof(cb));
		for (i = 0; i < opt_aio_linux_requests; i++) {
			cb[i].aio_fildes = fd;
			cb[i].aio_lio_opcode = IOCB_CMD_PWRITE;
			cb[i].aio_buf = (long)buffers[i];
			cb[i].aio_offset = mwc16() * BUFFER_SZ;
			cb[i].aio_nbytes = BUFFER_SZ;
			cbs[i] = &cb[i];
		}
		ret = sys_io_submit(ctx, opt_aio_linux_requests, cbs);
		if (ret < 0) {
			if (errno == EAGAIN)
				continue;
			pr_failed_err(name, "io_submit");
			break;
		}

		n = opt_aio_linux_requests;
		do {
			struct timespec timeout, *timeout_ptr;

			if (clock_gettime(CLOCK_REALTIME, &timeout) < 0) {
				timeout_ptr = NULL;
			} else {
				timeout.tv_nsec += 1000000;
				if (timeout.tv_nsec > 1000000000) {
					timeout.tv_nsec -= 1000000000;
					timeout.tv_sec++;
				}
				timeout_ptr = &timeout;
			}

			ret = sys_io_getevents(ctx, 1, n, events, timeout_ptr);
			if (ret < 0) {
				if ((errno == EINTR) && (opt_do_run))
					continue;
				pr_failed_err(name, "io_getevents");
				break;
			} else {
				n -= ret;
			}
		} while ((n > 0) && opt_do_run);
		(*counter)++;
	} while (opt_do_run && (!max_ops || *counter < max_ops));

	rc = EXIT_SUCCESS;
	(void)close(fd);
finish:
	(void)sys_io_destroy(ctx);
	(void)stress_temp_dir_rm(name, pid, instance);
	return rc;
}
Example #24
0
/*
 *  stress_chmod
 *	stress chmod
 */
int stress_chmod(
	uint64_t *const counter,
	const uint32_t instance,
	const uint64_t max_ops,
	const char *name)
{
	const pid_t ppid = getppid();
	int i, fd = -1, rc = EXIT_FAILURE, retries = 0;
	mode_t all_mask = 0;
	char filename[PATH_MAX], dirname[PATH_MAX];

	/*
	 *  Allow for multiple workers to chmod the *same* file
	 */
	stress_temp_dir(dirname, sizeof(dirname), name, ppid, 0);
        if (mkdir(dirname, S_IRWXU) < 0) {
		if (errno != EEXIST) {
			pr_failed_err(name, "mkdir");
			return EXIT_FAILURE;
		}
	}
	(void)stress_temp_filename(filename, sizeof(filename),
		name, ppid, 0, 0);

	do {
		errno = 0;
		/*
		 *  Try and open the file, it may be impossible momentarily
		 *  because other chmod stressors have already created it and
		 *  changed the permission bits. If so, wait a while and retry.
		 */
		if ((fd = creat(filename, S_IRUSR | S_IWUSR)) < 0) {
			if (errno == EPERM || errno == EACCES) {
				(void)usleep(100000);
				continue;
			}
			pr_failed_err(name, "open");
			goto tidy;
		}
		break;
	} while (opt_do_run && ++retries < 100);

	if (retries >= 100) {
		pr_err(stderr, "%s: chmod: file %s took %d retries to create (instance %" PRIu32 ")\n",
			name, filename, retries, instance);
		goto tidy;
	}

	for (i = 0; modes[i]; i++)
		all_mask |= modes[i];

	do {
		mode_t mask = 0;

		for (i = 0; modes[i]; i++) {
			mask |= modes[i];
			if (do_fchmod(fd, i, mask, all_mask) < 0) {
				pr_fail(stderr, "%s: fchmod: errno=%d (%s)\n",
					name, errno, strerror(errno));
			}
			if (do_chmod(filename, i, mask, all_mask) < 0) {
				if (errno == ENOENT || errno == ENOTDIR) {
					/*
					 * File was removed during test by
					 * another worker
					 */
					rc = EXIT_SUCCESS;
					goto tidy;
				}
				pr_fail(stderr, "%s: chmod: errno=%d (%s)\n",
					name, errno, strerror(errno));
			}
		}
		(*counter)++;
	} while (opt_do_run && (!max_ops || *counter < max_ops));

	rc = EXIT_SUCCESS;
tidy:
	(void)fchmod(fd, 0666);
	if (fd >= 0)
		(void)close(fd);
	(void)unlink(filename);
	(void)rmdir(dirname);

	return rc;
}
Example #25
0
/*
 *  stress_xattr
 *	stress the xattr operations
 */
int stress_xattr(
	uint64_t *const counter,
	const uint32_t instance,
	const uint64_t max_ops,
	const char *name)
{
	pid_t pid = getpid();
	int fd, rc = EXIT_FAILURE;
	char filename[PATH_MAX];

	if (stress_temp_dir_mk(name, pid, instance) < 0)
		return EXIT_FAILURE;

	(void)stress_temp_filename(filename, sizeof(filename),
		name, pid, instance, mwc32());
	(void)umask(0077);
	if ((fd = open(filename, O_CREAT | O_RDWR, S_IRUSR | S_IWUSR)) < 0) {
		pr_failed_err(name, "open");
		goto out;
	}
	(void)unlink(filename);

	do {
		int i, j;
		int ret;
		char name[32];
		char value[32];
		ssize_t sz;
		char *buffer;

		for (i = 0; i < 4096; i++) {
			snprintf(name, sizeof(name), "user.var_%d", i);
			snprintf(value, sizeof(value), "orig-value-%i", i);

			ret = fsetxattr(fd, name, value, strlen(value), XATTR_CREATE);
			if (ret < 0) {
				if (errno == ENOTSUP) {
					pr_inf(stderr, "%s stressor will be "
						"skipped, filesystem does not "
						"support xattr.\n", name);
				}
				if (errno == ENOSPC || errno == EDQUOT)
					break;
				pr_failed_err(name, "fsetxattr");
				goto out_close;
			}
		}
		for (j = 0; j < i; j++) {
			snprintf(name, sizeof(name), "user.var_%d", j);
			snprintf(value, sizeof(value), "value-%i", j);

			ret = fsetxattr(fd, name, value, strlen(value),
				XATTR_REPLACE);
			if (ret < 0) {
				if (errno == ENOSPC || errno == EDQUOT)
					break;
				pr_failed_err(name, "fsetxattr");
				goto out_close;
			}
		}
		for (j = 0; j < i; j++) {
			char tmp[sizeof(value)];

			snprintf(name, sizeof(name), "user.var_%d", j);
			snprintf(value, sizeof(value), "value-%i", j);

			ret = fgetxattr(fd, name, tmp, sizeof(tmp));
			if (ret < 0) {
				pr_failed_err(name, "fgetxattr");
				goto out_close;
			}
			if (strncmp(value, tmp, ret)) {
				pr_fail(stderr, "%s: fgetxattr values "
					"different %.*s vs %.*s\n",
					name, ret, value, ret, tmp);
				goto out_close;
			}
		}
		/* Determine how large a buffer we required... */
		sz = flistxattr(fd, NULL, 0);
		if (sz < 0) {
			pr_failed_err(name, "fremovexattr");
			goto out_close;
		}
		buffer = malloc(sz);
		if (buffer) {
			/* ...and fetch */
			sz = flistxattr(fd, buffer, sz);
			if (sz < 0) {
				pr_failed_err(name, "fremovexattr");
				goto out_close;
			}
			free(buffer);
		}
		for (j = 0; j < i; j++) {
			snprintf(name, sizeof(name), "user.var_%d", j);
			
			ret = fremovexattr(fd, name);
			if (ret < 0) {
				pr_failed_err(name, "fremovexattr");
				goto out_close;
			}
		}
		(*counter)++;
	} while (opt_do_run && (!max_ops || *counter < max_ops));

	rc = EXIT_SUCCESS;
out_close:
	(void)close(fd);
out:
	(void)stress_temp_dir_rm(name, pid, instance);
	return rc;
}