Beispiel #1
0
/*
 * Initialize a testing context given the file descriptors provided by the
 * test setup.
 */
static void
aio_context_init(struct aio_context *ac, int read_fd,
    int write_fd, int buflen, int seconds, void (*cleanup)(void *),
    void *cleanup_arg)
{

	ATF_REQUIRE_MSG(buflen <= BUFFER_MAX,
	    "aio_context_init: buffer too large (%d > %d)",
	    buflen, BUFFER_MAX);
	bzero(ac, sizeof(*ac));
	ac->ac_read_fd = read_fd;
	ac->ac_write_fd = write_fd;
	ac->ac_buflen = buflen;
	srandomdev();
	ac->ac_seed = random();
	aio_fill_buffer(ac->ac_buffer, buflen, ac->ac_seed);
	ATF_REQUIRE_MSG(aio_test_buffer(ac->ac_buffer, buflen,
	    ac->ac_seed) != 0, "aio_test_buffer: internal error");
	ac->ac_seconds = seconds;
	ac->ac_cleanup = cleanup;
	ac->ac_cleanup_arg = cleanup_arg;
}
Beispiel #2
0
/*
 * Initialize a testing context given the file descriptors provided by the
 * test setup.
 */
static void
aio_context_init(struct aio_context *ac, const char *test, int read_fd,
    int write_fd, int buflen, int seconds, void (*cleanup)(void *),
    void *cleanup_arg)
{

	if (buflen > BUFFER_MAX)
		errx(-1, "FAIL: %s: aio_context_init: buffer too large",
		    test);
	bzero(ac, sizeof(*ac));
	ac->ac_test = test;
	ac->ac_read_fd = read_fd;
	ac->ac_write_fd = write_fd;
	ac->ac_buflen = buflen;
	srandomdev();
	ac->ac_seed = random();
	aio_fill_buffer(ac->ac_buffer, buflen, ac->ac_seed);
	if (aio_test_buffer(ac->ac_buffer, buflen, ac->ac_seed) == 0)
		errx(-1, "%s: aio_context_init: aio_test_buffer: internal "
		    "error", test);
	ac->ac_seconds = seconds;
	ac->ac_cleanup = cleanup;
	ac->ac_cleanup_arg = cleanup_arg;
}
Beispiel #3
0
/*
 *  stress_aio
 *	stress asynchronous I/O
 */
int stress_aio(
	uint64_t *const counter,
	const uint32_t instance,
	const uint64_t max_ops,
	const char *name)
{
	int fd, rc = EXIT_FAILURE;
	io_req_t *io_reqs;
	struct sigaction sa;
	int i;
	uint64_t total = 0;
	char filename[PATH_MAX];
	const pid_t pid = getpid();

	if ((io_reqs = calloc((size_t)opt_aio_requests, sizeof(io_req_t))) == NULL) {
		pr_err(stderr, "%s: cannot allocate io request structures\n", name);
		return EXIT_FAILURE;
	}

	if (stress_temp_dir_mk(name, pid, instance) < 0) {
		free(io_reqs);
		return EXIT_FAILURE;
	}
	(void)stress_temp_filename(filename, sizeof(filename),
		name, pid, instance, mwc32());

	(void)umask(0077);
	if ((fd = open(filename, O_CREAT | O_RDWR, S_IRUSR | S_IWUSR)) < 0) {
		pr_failed_err(name, "open");
		goto finish;
	}
	(void)unlink(filename);

	sigemptyset(&sa.sa_mask);
	sa.sa_flags = SA_RESTART | SA_SIGINFO;
	sa.sa_sigaction = aio_signal_handler;
	if (sigaction(SIGUSR1, &sa, NULL) < 0) {
		pr_failed_err(name, "sigaction");
	}

	/* Kick off requests */
	for (i = 0; i < opt_aio_requests; i++) {
		aio_fill_buffer(i, io_reqs[i].buffer, BUFFER_SZ);
		if (issue_aio_request(name, fd, (off_t)i * BUFFER_SZ, &io_reqs[i], i, aio_write) < 0)
			goto cancel;
	}

	do {
		usleep(250000); /* wait until a signal occurs */

		for (i = 0; opt_do_run && (i < opt_aio_requests); i++) {
			if (io_reqs[i].status != EINPROGRESS)
				continue;

			io_reqs[i].status = aio_error(&io_reqs[i].aiocb);
			switch (io_reqs[i].status) {
			case ECANCELED:
			case 0:
				/* Succeeded or cancelled, so redo another */
				(*counter)++;
				if (issue_aio_request(name, fd, (off_t)i * BUFFER_SZ, &io_reqs[i], i,
					(mwc32() & 0x8) ? aio_read : aio_write) < 0)
					goto cancel;
				break;
			case EINPROGRESS:
				break;
			default:
				/* Something went wrong */
				pr_failed_errno(name, "aio_error", io_reqs[i].status);
				goto cancel;
			}
		}
	} while (opt_do_run && (!max_ops || *counter < max_ops));

	rc = EXIT_SUCCESS;

cancel:
	for (i = 0; i < opt_aio_requests; i++) {
		aio_issue_cancel(name, &io_reqs[i]);
		total += io_reqs[i].count;
	}
	(void)close(fd);
finish:
	pr_dbg(stderr, "%s: total of %" PRIu64 " async I/O signals caught (instance %d)\n",
		name, total, instance);
	(void)stress_temp_dir_rm(name, pid, instance);
	free(io_reqs);
	return rc;
}