示例#1
0
/*
 *  stress_sendfile
 *	stress reading of a temp file and writing to /dev/null via sendfile
 */
int stress_sendfile(
	uint64_t *const counter,
	const uint32_t instance,
	const uint64_t max_ops,
	const char *name)
{
	char filename[PATH_MAX];
	int fdin, fdout, ret = EXIT_SUCCESS;
	size_t sz;
	const pid_t pid = getpid();

	if (!set_sendfile_size) {
		if (opt_flags & OPT_FLAGS_MAXIMIZE)
			opt_sendfile_size = MAX_SENDFILE_SIZE;
		if (opt_flags & OPT_FLAGS_MINIMIZE)
			opt_sendfile_size = MIN_SENDFILE_SIZE;
	}
	sz = (size_t)opt_sendfile_size;

	if (stress_temp_dir_mk(name, pid, instance) < 0)
		return EXIT_FAILURE;

        (void)umask(0077);

	(void)stress_temp_filename(filename, sizeof(filename),
		name, pid, instance, mwc32());

        if ((fdin = open(filename, O_CREAT | O_RDWR, S_IRUSR | S_IWUSR)) < 0) {
                pr_fail_err(name, "open");
		ret = EXIT_FAILURE;
		goto dir_out;
        }
	(void)posix_fallocate(fdin, (off_t)0, (off_t)sz);
	if ((fdout = open("/dev/null", O_WRONLY)) < 0) {
		pr_fail_err(name, "open");
		ret = EXIT_FAILURE;
		goto close_in;
	}

	do {
		off_t offset = 0;
		if (sendfile(fdout, fdin, &offset, sz) < 0) {
			pr_fail_err(name, "sendfile");
			ret = EXIT_FAILURE;
			goto close_out;
		}
		(*counter)++;
	} while (opt_do_run && (!max_ops || *counter < max_ops));

close_out:
	(void)close(fdout);
close_in:
	(void)close(fdin);
	(void)unlink(filename);
dir_out:
	(void)stress_temp_dir_rm(name, pid, instance);

	return ret;
}
示例#2
0
/*
 *  stress_msync()
 *	stress msync
 */
int stress_msync(
	uint64_t *const counter,
	const uint32_t instance,
	const uint64_t max_ops,
	const char *name)
{
	uint8_t *buf = NULL;
	const size_t page_size = stress_get_pagesize();
	const size_t min_size = 2 * page_size;
	size_t sz = min_size;
	ssize_t ret, rc = EXIT_SUCCESS;

	const pid_t pid = getpid();
	int fd = -1;
	char filename[PATH_MAX];

	ret = sigsetjmp(jmp_env, 1);
	if (ret) {
		pr_fail_err(name, "sigsetjmp");
		return EXIT_FAILURE;
	}
	if (stress_sighandler(name, SIGBUS, stress_sigbus_handler, NULL) < 0)
		return EXIT_FAILURE;

	if (!set_msync_bytes) {
		if (opt_flags & OPT_FLAGS_MAXIMIZE)
			opt_msync_bytes = MAX_MSYNC_BYTES;
		if (opt_flags & OPT_FLAGS_MINIMIZE)
			opt_msync_bytes = MIN_MSYNC_BYTES;
	}
	sz = opt_msync_bytes & ~(page_size - 1);
	if (sz < min_size)
		sz = min_size;

	/* Make sure this is killable by OOM killer */
	set_oom_adjustment(name, true);

	rc = stress_temp_dir_mk(name, pid, instance);
	if (rc < 0)
		return exit_status(-rc);

	(void)stress_temp_filename(filename, sizeof(filename),
		name, pid, instance, mwc32());

	(void)umask(0077);
	if ((fd = open(filename, O_CREAT | O_RDWR, S_IRUSR | S_IWUSR)) < 0) {
		rc = exit_status(errno);
		pr_fail_err(name, "open");
		(void)unlink(filename);
		(void)stress_temp_dir_rm(name, pid, instance);

		return rc;
	}
	(void)unlink(filename);

	if (ftruncate(fd, sz) < 0) {
		pr_err(stderr, "%s: ftruncate failed, errno=%d (%s)\n",
			name, errno, strerror(errno));
		(void)close(fd);
		(void)stress_temp_dir_rm(name, pid, instance);

		return EXIT_FAILURE;
	}

	buf = (uint8_t *)mmap(NULL, sz,
		PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
	if (buf == MAP_FAILED) {
		pr_err(stderr, "%s: failed to mmap memory, errno=%d (%s)\n",
			name, errno, strerror(errno));
		rc = EXIT_NO_RESOURCE;
		goto err;
	}

	do {
		off_t offset;
		uint8_t val, data[page_size];

		ret = sigsetjmp(jmp_env, 1);
		if (ret) {
			/* Try again */
			continue;
		}
		/*
		 *  Change data in memory, msync to disk
		 */
		offset = (mwc64() % (sz - page_size)) & ~(page_size - 1);
		val = mwc8();

		memset(buf + offset, val, page_size);
		ret = msync(buf + offset, page_size, MS_SYNC);
		if (ret < 0) {
			pr_fail(stderr, "%s: msync MS_SYNC on "
				"offset %jd failed, errno=%d (%s)",
				name, (intmax_t)offset, errno, strerror(errno));
			goto do_invalidate;
		}
		ret = lseek(fd, offset, SEEK_SET);
		if (ret == (off_t)-1) {
			pr_err(stderr, "%s: cannot seet to offset %jd, "
				"errno=%d (%s)\n",
				name, (intmax_t)offset, errno, strerror(errno));
			rc = EXIT_NO_RESOURCE;
			break;
		}
		ret = read(fd, data, sizeof(data));
		if (ret < (ssize_t)sizeof(data)) {
			pr_fail(stderr, "%s: read failed, errno=%d (%s)\n",
				name, errno, strerror(errno));
			goto do_invalidate;
		}
		if (stress_page_check(data, val, sizeof(data)) < 0) {
			pr_fail(stderr, "%s: msync'd data in file different "
				"to data in memory\n", name);
		}

do_invalidate:
		/*
		 *  Now change data on disc, msync invalidate
		 */
		offset = (mwc64() % (sz - page_size)) & ~(page_size - 1);
		val = mwc8();

		memset(buf + offset, val, page_size);

		ret = lseek(fd, offset, SEEK_SET);
		if (ret == (off_t)-1) {
			pr_err(stderr, "%s: cannot seet to offset %jd, errno=%d (%s)\n",
				name, (intmax_t)offset, errno, strerror(errno));
			rc = EXIT_NO_RESOURCE;
			break;
		}
		ret = read(fd, data, sizeof(data));
		if (ret < (ssize_t)sizeof(data)) {
			pr_fail(stderr, "%s: read failed, errno=%d (%s)\n",
				name, errno, strerror(errno));
			goto do_next;
		}
		ret = msync(buf + offset, page_size, MS_INVALIDATE);
		if (ret < 0) {
			pr_fail(stderr, "%s: msync MS_INVALIDATE on "
				"offset %jd failed, errno=%d (%s)",
				name, (intmax_t)offset, errno, strerror(errno));
			goto do_next;
		}
		if (stress_page_check(buf + offset, val, sizeof(data)) < 0) {
			pr_fail(stderr, "%s: msync'd data in memory "
				"different to data in file\n", name);
		}
do_next:

		(*counter)++;
	} while (opt_do_run && (!max_ops || *counter < max_ops));

	(void)munmap((void *)buf, sz);
err:
	(void)close(fd);
	(void)stress_temp_dir_rm(name, pid, instance);

	if (sigbus_count)
		pr_inf(stdout, "%s: caught %" PRIu64 " SIGBUS signals\n",
			name, sigbus_count);
	return rc;
}
/*
 *  stress_fiemap
 *	stress fiemap IOCTL
 */
int stress_fiemap(
	uint64_t *const counter,
	const uint32_t instance,
	const uint64_t max_ops,
	const char *name)
{
	pid_t pids[MAX_FIEMAP_PROCS], mypid;
	int ret, fd, rc = EXIT_FAILURE, status;
	char filename[PATH_MAX];
	size_t i;
	const size_t counters_sz = sizeof(uint64_t) * MAX_FIEMAP_PROCS;
	uint64_t *counters;
	uint64_t ops_per_proc = max_ops / MAX_FIEMAP_PROCS;
	uint64_t ops_remaining = max_ops % MAX_FIEMAP_PROCS;

	if (!set_fiemap_size) {
		if (opt_flags & OPT_FLAGS_MAXIMIZE)
			opt_fiemap_size = MAX_SEEK_SIZE;
		if (opt_flags & OPT_FLAGS_MINIMIZE)
			opt_fiemap_size = MIN_SEEK_SIZE;
	}

	/* We need some share memory for counter accounting */
	counters = mmap(NULL, counters_sz, PROT_READ | PROT_WRITE,
		MAP_SHARED | MAP_ANONYMOUS, -1, 0);
	if (counters == MAP_FAILED) {
		pr_err(stderr, "%s: mmap failed: errno=%d (%s)\n",
			name, errno, strerror(errno));
		return EXIT_NO_RESOURCE;
	}
	memset(counters, 0, counters_sz);

	mypid = getpid();
	ret = stress_temp_dir_mk(name, mypid, instance);
	if (ret < 0) {
		rc = exit_status(-ret);
		goto clean;
	}

	(void)stress_temp_filename(filename, sizeof(filename),
		name, mypid, instance, mwc32());
	(void)umask(0077);
	if ((fd = open(filename, O_CREAT | O_RDWR, S_IRUSR | S_IWUSR)) < 0) {
		rc = exit_status(errno);
		pr_fail_err(name, "open");
		goto clean;
	}
	(void)unlink(filename);

	for (i = 0; i < MAX_FIEMAP_PROCS; i++) {
		uint64_t ops = ops_per_proc +
			((i == 0) ? ops_remaining : 0);
		pids[i] = stress_fiemap_spawn(name, fd,
				&counters[i], ops);
		if (pids[i] < 0)
			goto fail;
	}
	rc = stress_fiemap_writer(name, fd, counters, max_ops);

	/* And reap stressors */
	for (i = 0; i < MAX_FIEMAP_PROCS; i++) {
		(void)kill(pids[i], SIGKILL);
		(void)waitpid(pids[i], &status, 0);
		(*counter) += counters[i];
	}
fail:
	(void)close(fd);
clean:
	(void)munmap(counters, counters_sz);
	(void)stress_temp_dir_rm(name, mypid, instance);
	return rc;
}
/*
 *  stress_copy_file
 *	stress reading chunks of file using copy_file_range()
 */
int stress_copy_file(
	uint64_t *const counter,
	const uint32_t instance,
	const uint64_t max_ops,
	const char *name)
{
	int fd_in, fd_out, rc = EXIT_FAILURE;
	char filename[PATH_MAX], tmp[PATH_MAX];
	pid_t pid = getpid();

	if (!set_copy_file_bytes) {
		if (opt_flags & OPT_FLAGS_MAXIMIZE)
			opt_copy_file_bytes = MAX_HDD_BYTES;
		if (opt_flags & OPT_FLAGS_MINIMIZE)
			opt_copy_file_bytes = MIN_HDD_BYTES;
	}

	if (opt_copy_file_bytes < DEFAULT_COPY_FILE_SIZE)
		opt_copy_file_bytes = DEFAULT_COPY_FILE_SIZE * 2;

	if (stress_temp_dir_mk(name, pid, instance) < 0)
		goto tidy_dir;
	(void)stress_temp_filename(filename, sizeof(filename),
		name, pid, instance, mwc32());
	snprintf(tmp, sizeof(tmp), "%s-orig", filename);
	if ((fd_in = open(tmp, O_CREAT | O_RDWR,  S_IRUSR | S_IWUSR)) < 0) {
		rc = exit_status(errno);
		pr_fail_err(name, "open");
		goto tidy_dir;
	}
	(void)unlink(tmp);
	if (ftruncate(fd_in, opt_copy_file_bytes) < 0) {
		rc = exit_status(errno);
		pr_fail_err(name, "ftruncate");
		goto tidy_in;
	}
	if (fsync(fd_in) < 0) {
		pr_fail_err(name, "fsync");
		goto tidy_in;
	}

	snprintf(tmp, sizeof(tmp), "%s-copy", filename);
	if ((fd_out = open(tmp, O_CREAT | O_WRONLY,  S_IRUSR | S_IWUSR)) < 0) {
		rc = exit_status(errno);
		pr_fail_err(name, "open");
		goto tidy_in;
	}
	(void)unlink(tmp);

	do {
		ssize_t ret;
		loff_t off_in, off_out;

		off_in = mwc64() % (opt_copy_file_bytes - DEFAULT_COPY_FILE_SIZE);
		off_out = mwc64() % (opt_copy_file_bytes - DEFAULT_COPY_FILE_SIZE);

		ret =  sys_copy_file_range(fd_in, &off_in, fd_out, &off_out, DEFAULT_COPY_FILE_SIZE, 0);
		if (ret < 0) {
			if ((errno == EAGAIN) || (errno == EINTR))
				continue;
			pr_fail_err(name, "copy_file_range");
			goto tidy_out;
		}
		(void)fsync(fd_out);
		(*counter)++;
	} while (opt_do_run && (!max_ops || *counter < max_ops));
	rc = EXIT_SUCCESS;

tidy_out:
	(void)close(fd_out);
tidy_in:
	(void)close(fd_in);
tidy_dir:
	(void)stress_temp_dir_rm(name, pid, instance);

	return rc;
}
示例#5
0
/*
 *  stress_aio
 *	stress asynchronous I/O
 */
int stress_aio(
	uint64_t *const counter,
	const uint32_t instance,
	const uint64_t max_ops,
	const char *name)
{
	int fd, rc = EXIT_FAILURE;
	io_req_t *io_reqs;
	struct sigaction sa;
	int i;
	uint64_t total = 0;
	char filename[PATH_MAX];
	const pid_t pid = getpid();

	if ((io_reqs = calloc((size_t)opt_aio_requests, sizeof(io_req_t))) == NULL) {
		pr_err(stderr, "%s: cannot allocate io request structures\n", name);
		return EXIT_FAILURE;
	}

	if (stress_temp_dir_mk(name, pid, instance) < 0) {
		free(io_reqs);
		return EXIT_FAILURE;
	}
	(void)stress_temp_filename(filename, sizeof(filename),
		name, pid, instance, mwc32());

	(void)umask(0077);
	if ((fd = open(filename, O_CREAT | O_RDWR, S_IRUSR | S_IWUSR)) < 0) {
		pr_failed_err(name, "open");
		goto finish;
	}
	(void)unlink(filename);

	sigemptyset(&sa.sa_mask);
	sa.sa_flags = SA_RESTART | SA_SIGINFO;
	sa.sa_sigaction = aio_signal_handler;
	if (sigaction(SIGUSR1, &sa, NULL) < 0) {
		pr_failed_err(name, "sigaction");
	}

	/* Kick off requests */
	for (i = 0; i < opt_aio_requests; i++) {
		aio_fill_buffer(i, io_reqs[i].buffer, BUFFER_SZ);
		if (issue_aio_request(name, fd, (off_t)i * BUFFER_SZ, &io_reqs[i], i, aio_write) < 0)
			goto cancel;
	}

	do {
		usleep(250000); /* wait until a signal occurs */

		for (i = 0; opt_do_run && (i < opt_aio_requests); i++) {
			if (io_reqs[i].status != EINPROGRESS)
				continue;

			io_reqs[i].status = aio_error(&io_reqs[i].aiocb);
			switch (io_reqs[i].status) {
			case ECANCELED:
			case 0:
				/* Succeeded or cancelled, so redo another */
				(*counter)++;
				if (issue_aio_request(name, fd, (off_t)i * BUFFER_SZ, &io_reqs[i], i,
					(mwc32() & 0x8) ? aio_read : aio_write) < 0)
					goto cancel;
				break;
			case EINPROGRESS:
				break;
			default:
				/* Something went wrong */
				pr_failed_errno(name, "aio_error", io_reqs[i].status);
				goto cancel;
			}
		}
	} while (opt_do_run && (!max_ops || *counter < max_ops));

	rc = EXIT_SUCCESS;

cancel:
	for (i = 0; i < opt_aio_requests; i++) {
		aio_issue_cancel(name, &io_reqs[i]);
		total += io_reqs[i].count;
	}
	(void)close(fd);
finish:
	pr_dbg(stderr, "%s: total of %" PRIu64 " async I/O signals caught (instance %d)\n",
		name, total, instance);
	(void)stress_temp_dir_rm(name, pid, instance);
	free(io_reqs);
	return rc;
}
示例#6
0
/*
 *  stress_mmap()
 *	stress mmap
 */
int stress_mmap(
	uint64_t *const counter,
	const uint32_t instance,
	const uint64_t max_ops,
	const char *name)
{
	uint8_t *buf = NULL;
	const size_t page_size = stress_get_pagesize();
	size_t sz, pages4k;
#if !defined(__gnu_hurd__)
	const int ms_flags = (opt_flags & OPT_FLAGS_MMAP_ASYNC) ?
		MS_ASYNC : MS_SYNC;
#endif
	const pid_t pid = getpid();
	int fd = -1, flags = MAP_PRIVATE | MAP_ANONYMOUS;
	char filename[PATH_MAX];

	(void)instance;
#ifdef MAP_POPULATE
	flags |= MAP_POPULATE;
#endif

	if (!set_mmap_bytes) {
		if (opt_flags & OPT_FLAGS_MAXIMIZE)
			opt_mmap_bytes = MAX_MMAP_BYTES;
		if (opt_flags & OPT_FLAGS_MINIMIZE)
			opt_mmap_bytes = MIN_MMAP_BYTES;
	}
	sz = opt_mmap_bytes & ~(page_size - 1);
	pages4k = sz / page_size;

	/* Make sure this is killable by OOM killer */
	set_oom_adjustment(name, true);

	if (opt_flags & OPT_FLAGS_MMAP_FILE) {
		ssize_t ret;
		char ch = '\0';

		if (stress_temp_dir_mk(name, pid, instance) < 0)
			return EXIT_FAILURE;

		(void)stress_temp_filename(filename, sizeof(filename),
			name, pid, instance, mwc32());

		(void)umask(0077);
		if ((fd = open(filename, O_CREAT | O_RDWR, S_IRUSR | S_IWUSR)) < 0) {
			pr_failed_err(name, "open");
			(void)unlink(filename);
			(void)stress_temp_dir_rm(name, pid, instance);

			return EXIT_FAILURE;
		}
		(void)unlink(filename);
		if (lseek(fd, sz - sizeof(ch), SEEK_SET) < 0) {
			pr_failed_err(name, "lseek");
			(void)close(fd);
			(void)stress_temp_dir_rm(name, pid, instance);

			return EXIT_FAILURE;
		}
redo:
		ret = write(fd, &ch, sizeof(ch));
		if (ret != sizeof(ch)) {
			if ((errno == EAGAIN) || (errno == EINTR))
				goto redo;
			pr_failed_err(name, "write");
			(void)close(fd);
			(void)stress_temp_dir_rm(name, pid, instance);

			return EXIT_FAILURE;
		}
		flags &= ~(MAP_ANONYMOUS | MAP_PRIVATE);
		flags |= MAP_SHARED;
	}

	do {
		uint8_t mapped[pages4k];
		uint8_t *mappings[pages4k];
		size_t n;

		if (!opt_do_run)
			break;
		buf = mmap(NULL, sz, PROT_READ | PROT_WRITE, flags, fd, 0);
		if (buf == MAP_FAILED) {
			/* Force MAP_POPULATE off, just in case */
#ifdef MAP_POPULATE
			flags &= ~MAP_POPULATE;
#endif
			continue;	/* Try again */
		}
		if (opt_flags & OPT_FLAGS_MMAP_FILE) {
			memset(buf, 0xff, sz);
#if !defined(__gnu_hurd__)
			(void)msync(buf, sz, ms_flags);
#endif
		}
		(void)madvise_random(buf, sz);
		(void)mincore_touch_pages(buf, opt_mmap_bytes);
		stress_mmap_mprotect(name, buf, sz);
		memset(mapped, PAGE_MAPPED, sizeof(mapped));
		for (n = 0; n < pages4k; n++)
			mappings[n] = buf + (n * page_size);

		/* Ensure we can write to the mapped pages */
		stress_mmap_set(buf, sz);
		if (opt_flags & OPT_FLAGS_VERIFY) {
			if (stress_mmap_check(buf, sz) < 0)
				pr_fail(stderr, "%s: mmap'd region of %zu bytes does "
					"not contain expected data\n", name, sz);
		}

		/*
		 *  Step #1, unmap all pages in random order
		 */
		(void)mincore_touch_pages(buf, opt_mmap_bytes);
		for (n = pages4k; n; ) {
			uint64_t j, i = mwc64() % pages4k;
			for (j = 0; j < n; j++) {
				uint64_t page = (i + j) % pages4k;
				if (mapped[page] == PAGE_MAPPED) {
					mapped[page] = 0;
					(void)madvise_random(mappings[page], page_size);
					stress_mmap_mprotect(name, mappings[page], page_size);
					(void)munmap(mappings[page], page_size);
					n--;
					break;
				}
				if (!opt_do_run)
					goto cleanup;
			}
		}
		(void)munmap(buf, sz);
#ifdef MAP_FIXED
		/*
		 *  Step #2, map them back in random order
		 */
		for (n = pages4k; n; ) {
			uint64_t j, i = mwc64() % pages4k;
			for (j = 0; j < n; j++) {
				uint64_t page = (i + j) % pages4k;
				if (!mapped[page]) {
					off_t offset = (opt_flags & OPT_FLAGS_MMAP_FILE) ?
							page * page_size : 0;
					/*
					 * Attempt to map them back into the original address, this
					 * may fail (it's not the most portable operation), so keep
					 * track of failed mappings too
					 */
					mappings[page] = mmap(mappings[page], page_size, PROT_READ | PROT_WRITE, MAP_FIXED | flags, fd, offset);
					if (mappings[page] == MAP_FAILED) {
						mapped[page] = PAGE_MAPPED_FAIL;
						mappings[page] = NULL;
					} else {
						(void)mincore_touch_pages(mappings[page], page_size);
						(void)madvise_random(mappings[page], page_size);
						stress_mmap_mprotect(name, mappings[page], page_size);
						mapped[page] = PAGE_MAPPED;
						/* Ensure we can write to the mapped page */
						stress_mmap_set(mappings[page], page_size);
						if (stress_mmap_check(mappings[page], page_size) < 0)
							pr_fail(stderr, "%s: mmap'd region of %zu bytes does "
								"not contain expected data\n", name, page_size);
						if (opt_flags & OPT_FLAGS_MMAP_FILE) {
							memset(mappings[page], n, page_size);
#if !defined(__gnu_hurd__)
							(void)msync(mappings[page], page_size, ms_flags);
#endif
						}
					}
					n--;
					break;
				}
				if (!opt_do_run)
					goto cleanup;
			}
		}
#endif
cleanup:
		/*
		 *  Step #3, unmap them all
		 */
		for (n = 0; n < pages4k; n++) {
			if (mapped[n] & PAGE_MAPPED) {
				(void)madvise_random(mappings[n], page_size);
				stress_mmap_mprotect(name, mappings[n], page_size);
				(void)munmap(mappings[n], page_size);
			}
		}
		(*counter)++;
	} while (opt_do_run && (!max_ops || *counter < max_ops));

	if (opt_flags & OPT_FLAGS_MMAP_FILE) {
		(void)close(fd);
		(void)stress_temp_dir_rm(name, pid, instance);
	}
	return EXIT_SUCCESS;
}
示例#7
0
/*
 *  stress_xattr
 *	stress the xattr operations
 */
int stress_xattr(
	uint64_t *const counter,
	const uint32_t instance,
	const uint64_t max_ops,
	const char *name)
{
	pid_t pid = getpid();
	int ret, fd, rc = EXIT_FAILURE;
	char filename[PATH_MAX];

	ret = stress_temp_dir_mk(name, pid, instance);
	if (ret < 0)
		return exit_status(-ret);

	(void)stress_temp_filename(filename, sizeof(filename),
		name, pid, instance, mwc32());
	(void)umask(0077);
	if ((fd = open(filename, O_CREAT | O_RDWR, S_IRUSR | S_IWUSR)) < 0) {
		rc = exit_status(errno);
		pr_fail_err(name, "open");
		goto out;
	}
	(void)unlink(filename);

	do {
		int i, j;
		int ret;
		char attrname[32];
		char value[32];
		ssize_t sz;
		char *buffer;

		for (i = 0; i < 4096; i++) {
			snprintf(attrname, sizeof(attrname), "user.var_%d", i);
			snprintf(value, sizeof(value), "orig-value-%d", i);

			ret = fsetxattr(fd, attrname, value, strlen(value), XATTR_CREATE);
			if (ret < 0) {
				if (errno == ENOTSUP) {
					pr_inf(stderr, "%s stressor will be "
						"skipped, filesystem does not "
						"support xattr.\n", name);
				}
				if (errno == ENOSPC || errno == EDQUOT)
					break;
				pr_fail_err(name, "fsetxattr");
				goto out_close;
			}
		}
		for (j = 0; j < i; j++) {
			snprintf(attrname, sizeof(attrname), "user.var_%d", j);
			snprintf(value, sizeof(value), "value-%d", j);

			ret = fsetxattr(fd, attrname, value, strlen(value),
				XATTR_REPLACE);
			if (ret < 0) {
				if (errno == ENOSPC || errno == EDQUOT)
					break;
				pr_fail_err(name, "fsetxattr");
				goto out_close;
			}
		}
		for (j = 0; j < i; j++) {
			char tmp[sizeof(value)];

			snprintf(attrname, sizeof(attrname), "user.var_%d", j);
			snprintf(value, sizeof(value), "value-%d", j);

			ret = fgetxattr(fd, attrname, tmp, sizeof(tmp));
			if (ret < 0) {
				pr_fail_err(name, "fgetxattr");
				goto out_close;
			}
			if (strncmp(value, tmp, ret)) {
				pr_fail(stderr, "%s: fgetxattr values "
					"different %.*s vs %.*s\n",
					name, ret, value, ret, tmp);
				goto out_close;
			}
		}
		/* Determine how large a buffer we required... */
		sz = flistxattr(fd, NULL, 0);
		if (sz < 0) {
			pr_fail_err(name, "flistxattr");
			goto out_close;
		}
		buffer = malloc(sz);
		if (buffer) {
			/* ...and fetch */
			sz = flistxattr(fd, buffer, sz);
			free(buffer);

			if (sz < 0) {
				pr_fail_err(name, "flistxattr");
				goto out_close;
			}
		}
		for (j = 0; j < i; j++) {
			snprintf(attrname, sizeof(attrname), "user.var_%d", j);
			
			ret = fremovexattr(fd, attrname);
			if (ret < 0) {
				pr_fail_err(name, "fremovexattr");
				goto out_close;
			}
		}
		(*counter)++;
	} while (opt_do_run && (!max_ops || *counter < max_ops));

	rc = EXIT_SUCCESS;
out_close:
	(void)close(fd);
out:
	(void)stress_temp_dir_rm(name, pid, instance);
	return rc;
}
示例#8
0
/*
 *  stress_aio_linux
 *	stress asynchronous I/O using the linux specific aio ABI
 */
int stress_aio_linux(
	uint64_t *const counter,
	const uint32_t instance,
	const uint64_t max_ops,
	const char *name)
{
	int fd, rc = EXIT_FAILURE;
	char filename[PATH_MAX];
	const pid_t pid = getpid();
	aio_context_t ctx = 0;

	if (!set_aio_linux_requests) {
		if (opt_flags & OPT_FLAGS_MAXIMIZE)
			opt_aio_linux_requests = MAX_AIO_REQUESTS;
		if (opt_flags & OPT_FLAGS_MINIMIZE)
			opt_aio_linux_requests = MIN_AIO_REQUESTS;
	}
	if (sys_io_setup(opt_aio_linux_requests, &ctx) < 0) {
		pr_failed_err(name, "io_setup");
		return EXIT_FAILURE;
	}
	if (stress_temp_dir_mk(name, pid, instance) < 0) {
		return EXIT_FAILURE;
	}
	(void)stress_temp_filename(filename, sizeof(filename),
		name, pid, instance, mwc32());

	(void)umask(0077);
	if ((fd = open(filename, O_CREAT | O_RDWR, S_IRUSR | S_IWUSR)) < 0) {
		pr_failed_err(name, "open");
		goto finish;
	}
	(void)unlink(filename);

	do {
		struct iocb cb[opt_aio_linux_requests];
		struct iocb *cbs[opt_aio_linux_requests];
		struct io_event events[opt_aio_linux_requests];
		uint8_t buffers[opt_aio_linux_requests][BUFFER_SZ];
		int ret, i;
		long n;

		for (i = 0; i < opt_aio_linux_requests; i++)
			aio_linux_fill_buffer(i, buffers[i], BUFFER_SZ);

		memset(cb, 0, sizeof(cb));
		for (i = 0; i < opt_aio_linux_requests; i++) {
			cb[i].aio_fildes = fd;
			cb[i].aio_lio_opcode = IOCB_CMD_PWRITE;
			cb[i].aio_buf = (long)buffers[i];
			cb[i].aio_offset = mwc16() * BUFFER_SZ;
			cb[i].aio_nbytes = BUFFER_SZ;
			cbs[i] = &cb[i];
		}
		ret = sys_io_submit(ctx, opt_aio_linux_requests, cbs);
		if (ret < 0) {
			if (errno == EAGAIN)
				continue;
			pr_failed_err(name, "io_submit");
			break;
		}

		n = opt_aio_linux_requests;
		do {
			struct timespec timeout, *timeout_ptr;

			if (clock_gettime(CLOCK_REALTIME, &timeout) < 0) {
				timeout_ptr = NULL;
			} else {
				timeout.tv_nsec += 1000000;
				if (timeout.tv_nsec > 1000000000) {
					timeout.tv_nsec -= 1000000000;
					timeout.tv_sec++;
				}
				timeout_ptr = &timeout;
			}

			ret = sys_io_getevents(ctx, 1, n, events, timeout_ptr);
			if (ret < 0) {
				if ((errno == EINTR) && (opt_do_run))
					continue;
				pr_failed_err(name, "io_getevents");
				break;
			} else {
				n -= ret;
			}
		} while ((n > 0) && opt_do_run);
		(*counter)++;
	} while (opt_do_run && (!max_ops || *counter < max_ops));

	rc = EXIT_SUCCESS;
	(void)close(fd);
finish:
	(void)sys_io_destroy(ctx);
	(void)stress_temp_dir_rm(name, pid, instance);
	return rc;
}
示例#9
0
/*
 *  stress_fifo
 *	stress by heavy fifo I/O
 */
int stress_fifo(
	uint64_t *const counter,
	const uint32_t instance,
	const uint64_t max_ops,
	const char *name)
{
	pid_t pids[MAX_FIFO_READERS];
	int fd;
	char fifoname[PATH_MAX];
	uint64_t i, val = 0ULL;
	int ret = EXIT_FAILURE;
	const pid_t pid = getpid();

	if (!set_fifo_readers) {
		if (opt_flags & OPT_FLAGS_MAXIMIZE)
			opt_fifo_readers = MAX_FIFO_READERS;
		if (opt_flags & OPT_FLAGS_MINIMIZE)
			opt_fifo_readers = MIN_FIFO_READERS;
	}

	if (stress_temp_dir_mk(name, pid, instance) < 0)
		return EXIT_FAILURE;

	(void)stress_temp_filename(fifoname, sizeof(fifoname),
                name, pid, instance, mwc32());
	(void)umask(0077);

	if (mkfifo(fifoname, S_IRUSR | S_IWUSR) < 0) {
		pr_err(stderr, "%s: mkfifo failed: errno=%d (%s)\n",
			name, errno, strerror(errno));
		goto tidy;
	}

	memset(pids, 0, sizeof(pids));
	for (i = 0; i < opt_fifo_readers; i++) {
		pids[i] = fifo_spawn(stress_fifo_reader, name, fifoname);
		if (pids[i] < 0)
			goto reap;
		if (!opt_do_run)
			goto reap;
	}

	fd = open(fifoname, O_WRONLY);
	if (fd < 0) {
		pr_err(stderr, "%s: fifo write open failed: errno=%d (%s)\n",
			name, errno, strerror(errno));
		goto reap;
	}

	do {
		ssize_t ret;

		ret = write(fd, &val, sizeof(val));
		if (ret <= 0) {
			if ((errno == EAGAIN) || (errno == EINTR))
				continue;
			if (errno) {
				pr_failed_dbg(name, "write");
				break;
			}
			continue;
		}
		val++;
		(*counter)++;
	} while (opt_do_run && (!max_ops || *counter < max_ops));

	(void)close(fd);
	ret = EXIT_SUCCESS;
reap:
	for (i = 0; i < opt_fifo_readers; i++) {
		if (pids[i] > 0) {
			int status;

			(void)kill(pids[i], SIGKILL);
			(void)waitpid(pids[i], &status, 0);
		}
	}
tidy:
	(void)unlink(fifoname);
	(void)stress_temp_dir_rm(name, pid, instance);

	return ret;
}
示例#10
0
/*
 *  stress_hdd
 *	stress I/O via writes
 */
int stress_hdd(
    uint64_t *const counter,
    const uint32_t instance,
    const uint64_t max_ops,
    const char *name)
{
    uint8_t *buf = NULL;
    uint64_t i, min_size, remainder;
    const pid_t pid = getpid();
    int ret, rc = EXIT_FAILURE;
    char filename[PATH_MAX];
    int flags = O_CREAT | O_RDWR | O_TRUNC | opt_hdd_oflags;
    int fadvise_flags = opt_hdd_flags & HDD_OPT_FADV_MASK;

    if (!set_hdd_bytes) {
        if (opt_flags & OPT_FLAGS_MAXIMIZE)
            opt_hdd_bytes = MAX_HDD_BYTES;
        if (opt_flags & OPT_FLAGS_MINIMIZE)
            opt_hdd_bytes = MIN_HDD_BYTES;
    }

    if (!set_hdd_write_size) {
        if (opt_flags & OPT_FLAGS_MAXIMIZE)
            opt_hdd_write_size = MAX_HDD_WRITE_SIZE;
        if (opt_flags & OPT_FLAGS_MINIMIZE)
            opt_hdd_write_size = MIN_HDD_WRITE_SIZE;
    }

    if (opt_hdd_flags & HDD_OPT_O_DIRECT) {
        min_size = (opt_hdd_flags & HDD_OPT_IOVEC) ?
                   HDD_IO_VEC_MAX * BUF_ALIGNMENT : MIN_HDD_WRITE_SIZE;
    } else {
        min_size = (opt_hdd_flags & HDD_OPT_IOVEC) ?
                   HDD_IO_VEC_MAX * MIN_HDD_WRITE_SIZE : MIN_HDD_WRITE_SIZE;
    }
    /* Ensure I/O size is not too small */
    if (opt_hdd_write_size < min_size) {
        opt_hdd_write_size = min_size;
        pr_inf(stderr, "%s: increasing read/write size to %" PRIu64 " bytes\n",
               name, opt_hdd_write_size);
    }

    /* Ensure we get same sized iovec I/O sizes */
    remainder = opt_hdd_write_size % HDD_IO_VEC_MAX;
    if ((opt_hdd_flags & HDD_OPT_IOVEC) && (remainder != 0)) {
        opt_hdd_write_size += HDD_IO_VEC_MAX - remainder;
        pr_inf(stderr, "%s: increasing read/write size to %" PRIu64 " bytes in iovec mode\n",
               name, opt_hdd_write_size);
    }

    /* Ensure complete file size is not less than the I/O size */
    if (opt_hdd_bytes < opt_hdd_write_size) {
        opt_hdd_bytes = opt_hdd_write_size;
        pr_inf(stderr, "%s: increasing file size to write size of %" PRIu64 " bytes\n",
               name, opt_hdd_bytes);
    }


    if (stress_temp_dir_mk(name, pid, instance) < 0)
        return EXIT_FAILURE;

    /* Must have some write option */
    if ((opt_hdd_flags & HDD_OPT_WR_MASK) == 0)
        opt_hdd_flags |= HDD_OPT_WR_SEQ;
    /* Must have some read option */
    if ((opt_hdd_flags & HDD_OPT_RD_MASK) == 0)
        opt_hdd_flags |= HDD_OPT_RD_SEQ;

    ret = posix_memalign((void **)&buf, BUF_ALIGNMENT, (size_t)opt_hdd_write_size);
    if (ret || !buf) {
        pr_err(stderr, "%s: cannot allocate buffer\n", name);
        (void)stress_temp_dir_rm(name, pid, instance);
        return EXIT_FAILURE;
    }

    for (i = 0; i < opt_hdd_write_size; i++)
        buf[i] = mwc8();

    (void)stress_temp_filename(filename, sizeof(filename),
                               name, pid, instance, mwc32());
    do {
        int fd;

        (void)umask(0077);
        if ((fd = open(filename, flags, S_IRUSR | S_IWUSR)) < 0) {
            pr_failed_err(name, "open");
            goto finish;
        }
        if (ftruncate(fd, (off_t)0) < 0) {
            pr_failed_err(name, "ftruncate");
            (void)close(fd);
            goto finish;
        }
        (void)unlink(filename);

        if (stress_hdd_advise(name, fd, fadvise_flags) < 0) {
            (void)close(fd);
            goto finish;
        }

        /* Random Write */
        if (opt_hdd_flags & HDD_OPT_WR_RND) {
            for (i = 0; i < opt_hdd_bytes; i += opt_hdd_write_size) {
                size_t j;

                off_t offset = (i == 0) ?
                               opt_hdd_bytes :
                               (mwc64() % opt_hdd_bytes) & ~511;
                ssize_t ret;

                if (lseek(fd, offset, SEEK_SET) < 0) {
                    pr_failed_err(name, "lseek");
                    (void)close(fd);
                    goto finish;
                }
rnd_wr_retry:
                if (!opt_do_run || (max_ops && *counter >= max_ops))
                    break;

                for (j = 0; j < opt_hdd_write_size; j++)
                    buf[j] = (offset + j) & 0xff;

                ret = stress_hdd_write(fd, buf, (size_t)opt_hdd_write_size);
                if (ret <= 0) {
                    if ((errno == EAGAIN) || (errno == EINTR))
                        goto rnd_wr_retry;
                    if (errno) {
                        pr_failed_err(name, "write");
                        (void)close(fd);
                        goto finish;
                    }
                    continue;
                }
                (*counter)++;
            }
        }
        /* Sequential Write */
        if (opt_hdd_flags & HDD_OPT_WR_SEQ) {
            for (i = 0; i < opt_hdd_bytes; i += opt_hdd_write_size) {
                ssize_t ret;
                size_t j;
seq_wr_retry:
                if (!opt_do_run || (max_ops && *counter >= max_ops))
                    break;

                for (j = 0; j < opt_hdd_write_size; j += 512)
                    buf[j] = (i + j) & 0xff;
                ret = stress_hdd_write(fd, buf, (size_t)opt_hdd_write_size);
                if (ret <= 0) {
                    if ((errno == EAGAIN) || (errno == EINTR))
                        goto seq_wr_retry;
                    if (errno) {
                        pr_failed_err(name, "write");
                        (void)close(fd);
                        goto finish;
                    }
                    continue;
                }
                (*counter)++;
            }
        }

        /* Sequential Read */
        if (opt_hdd_flags & HDD_OPT_RD_SEQ) {
            uint64_t misreads = 0;
            uint64_t baddata = 0;

            if (lseek(fd, 0, SEEK_SET) < 0) {
                pr_failed_err(name, "lseek");
                (void)close(fd);
                goto finish;
            }
            for (i = 0; i < opt_hdd_bytes; i += opt_hdd_write_size) {
                ssize_t ret;
seq_rd_retry:
                if (!opt_do_run || (max_ops && *counter >= max_ops))
                    break;

                ret = stress_hdd_read(fd, buf, (size_t)opt_hdd_write_size);
                if (ret <= 0) {
                    if ((errno == EAGAIN) || (errno == EINTR))
                        goto seq_rd_retry;
                    if (errno) {
                        pr_failed_err(name, "read");
                        (void)close(fd);
                        goto finish;
                    }
                    continue;
                }
                if (ret != (ssize_t)opt_hdd_write_size)
                    misreads++;

                if (opt_flags & OPT_FLAGS_VERIFY) {
                    size_t j;

                    for (j = 0; j < opt_hdd_write_size; j += 512) {
                        uint8_t v = (i + j) & 0xff;
                        if (opt_hdd_flags & HDD_OPT_WR_SEQ) {
                            /* Write seq has written to all of the file, so it should always be OK */
                            if (buf[0] != v)
                                baddata++;
                        } else {
                            /* Write rnd has written to some of the file, so data either zero or OK */
                            if (buf[0] != 0 && buf[0] != v)
                                baddata++;
                        }
                    }
                }
                (*counter)++;
            }
            if (misreads)
                pr_dbg(stderr, "%s: %" PRIu64 " incomplete sequential reads\n",
                       name, misreads);
            if (baddata)
                pr_fail(stderr, "%s: incorrect data found %" PRIu64 " times\n",
                        name, baddata);
        }
        /* Random Read */
        if (opt_hdd_flags & HDD_OPT_RD_RND) {
            uint64_t misreads = 0;
            uint64_t baddata = 0;

            for (i = 0; i < opt_hdd_bytes; i += opt_hdd_write_size) {
                ssize_t ret;
                off_t offset = (mwc64() % (opt_hdd_bytes - opt_hdd_write_size)) & ~511;

                if (lseek(fd, offset, SEEK_SET) < 0) {
                    pr_failed_err(name, "lseek");
                    (void)close(fd);
                    goto finish;
                }
rnd_rd_retry:
                if (!opt_do_run || (max_ops && *counter >= max_ops))
                    break;
                ret = stress_hdd_read(fd, buf, (size_t)opt_hdd_write_size);
                if (ret <= 0) {
                    if ((errno == EAGAIN) || (errno == EINTR))
                        goto rnd_rd_retry;
                    if (errno) {
                        pr_failed_err(name, "read");
                        (void)close(fd);
                        goto finish;
                    }
                    continue;
                }
                if (ret != (ssize_t)opt_hdd_write_size)
                    misreads++;

                if (opt_flags & OPT_FLAGS_VERIFY) {
                    size_t j;

                    for (j = 0; j < opt_hdd_write_size; j += 512) {
                        uint8_t v = (i + j) & 0xff;
                        if (opt_hdd_flags & HDD_OPT_WR_SEQ) {
                            /* Write seq has written to all of the file, so it should always be OK */
                            if (buf[0] != v)
                                baddata++;
                        } else {
                            /* Write rnd has written to some of the file, so data either zero or OK */
                            if (buf[0] != 0 && buf[0] != v)
                                baddata++;
                        }
                    }
                }

                (*counter)++;
            }
            if (misreads)
                pr_dbg(stderr, "%s: %" PRIu64 " incomplete random reads\n",
                       name, misreads);
        }
        (void)close(fd);

    } while (opt_do_run && (!max_ops || *counter < max_ops));

    rc = EXIT_SUCCESS;
finish:
    free(buf);
    (void)stress_temp_dir_rm(name, pid, instance);
    return rc;
}
示例#11
0
/*
 *  stress_stackmmap
 *	stress a file memory map'd stack
 */
int stress_stackmmap(
	uint64_t *const counter,
	const uint32_t instance,
	const uint64_t max_ops,
	const char *name)
{
	int fd, ret;
	volatile int rc = EXIT_FAILURE;		/* could be clobbered */
	const pid_t pid = getpid();
        stack_t ss;
	struct sigaction new_action;
	char filename[PATH_MAX];
	uint8_t stack_sig[SIGSTKSZ] ALIGN64;	/* ensure we have a sig stack */

	page_size = stress_get_pagesize();
	page_mask = ~(page_size - 1);

	/*
	 *  We need to handle SEGV signals when we
	 *  hit the end of the mmap'd stack; however
	 *  an alternative signal handling stack
	 *  is required because we ran out of stack
	 */
	memset(&new_action, 0, sizeof new_action);
	new_action.sa_handler = stress_segvhandler;
	sigemptyset(&new_action.sa_mask);
	new_action.sa_flags = SA_ONSTACK;
	if (sigaction(SIGSEGV, &new_action, NULL) < 0) {
		pr_fail_err(name, "sigaction");
		return EXIT_FAILURE;
	}

	/*
	 *  We need an alternative signal stack
	 *  to handle segfaults on an overrun
	 *  mmap'd stack
	 */
        memset(stack_sig, 0, sizeof(stack_sig));
        ss.ss_sp = (void *)stack_sig;
        ss.ss_size = SIGSTKSZ;
        ss.ss_flags = 0;
        if (sigaltstack(&ss, NULL) < 0) {
		pr_fail_err(name, "sigaltstack");
		return EXIT_FAILURE;
	}

	if (stress_temp_dir_mk(name, pid, instance) < 0)
		return EXIT_FAILURE;
	(void)stress_temp_filename(filename, sizeof(filename),
		name, pid, instance, mwc32());

	/* Create file back'd mmaping for the stack */
	fd = open(filename, O_SYNC | O_RDWR | O_CREAT, S_IRUSR | S_IWUSR);
	if (fd < 0) {
		pr_fail_err(name, "mmap'd stack file open");
		goto tidy_dir;
	}
	(void)unlink(filename);
	if (ftruncate(fd, MMAPSTACK_SIZE) < 0) {
		pr_fail_err(name, "ftruncate");
		(void)close(fd);
		goto tidy_dir;
	}
	stack_mmap = mmap(NULL, MMAPSTACK_SIZE, PROT_READ | PROT_WRITE,
		MAP_SHARED, fd, 0);
	if (stack_mmap == MAP_FAILED) {
		pr_fail_err(name, "mmap");
		(void)close(fd);
		goto tidy_dir;
	}
	(void)close(fd);

	if (madvise(stack_mmap, MMAPSTACK_SIZE, MADV_RANDOM) < 0) {
		pr_dbg(stderr, "%s: madvise failed: errno=%d (%s)\n",
			name, errno, strerror(errno));
	}

	memset(&c_test, 0, sizeof(c_test));
	if (getcontext(&c_test) < 0) {
		pr_fail_err(name, "getcontext");
		goto tidy_mmap;
	}
	c_test.uc_stack.ss_sp = stack_mmap;
	c_test.uc_stack.ss_size = MMAPSTACK_SIZE;
	c_test.uc_link = &c_main;
	makecontext(&c_test, stress_stackmmap_push_msync, 0);

	/*
	 *  set jmp handler to jmp back into the loop on a full
	 *  stack segfault.  Use swapcontext to jump into a
	 *  new context using the new mmap'd stack
	 */
	do {
		ret = sigsetjmp(jmp_env, 1);
		if (!ret)
			swapcontext(&c_main, &c_test);
		(*counter)++;
	} while (opt_do_run && (!max_ops || *counter < max_ops));

	rc = EXIT_SUCCESS;

tidy_mmap:
	munmap(stack_mmap, MMAPSTACK_SIZE);
tidy_dir:
	(void)stress_temp_dir_rm(name, pid, instance);

	return rc;
}
示例#12
0
/*
 *  stress_fault()
 *	stress min and max page faulting
 */
int stress_fault(
	uint64_t *const counter,
	const uint32_t instance,
	const uint64_t max_ops,
	const char *name)
{
	struct rusage usage;
	char filename[PATH_MAX];
	int ret, i;
	const pid_t pid = getpid();

	ret = stress_temp_dir_mk(name, pid, instance);
	if (ret < 0)
		return exit_status(-ret);

	(void)stress_temp_filename(filename, sizeof(filename),
		name, pid, instance, mwc32());
	(void)umask(0077);

	i = 0;

	if (stress_sighandler(name, SIGSEGV, stress_segvhandler, NULL) < 0)
		return EXIT_FAILURE;

	do {
		char *ptr;
		int fd;

		ret = sigsetjmp(jmp_env, 1);
		if (ret) {
			do_jmp = false;
			pr_err(stderr, "%s: unexpected segmentation fault\n", name);
			break;
		}

		fd = open(filename, O_RDWR | O_CREAT, S_IRUSR | S_IWUSR);
		if (fd < 0) {
			if ((errno == ENOSPC) || (errno == ENOMEM))
				continue;	/* Try again */
			pr_err(stderr, "%s: open failed: errno=%d (%s)\n",
				name, errno, strerror(errno));
			break;
		}
#if _XOPEN_SOURCE >= 600 || _POSIX_C_SOURCE >= 200112L
		if (posix_fallocate(fd, 0, 1) < 0) {
			if (errno == ENOSPC) {
				(void)close(fd);
				continue;	/* Try again */
			}
			(void)close(fd);
			pr_err(stderr, "%s: posix_fallocate failed: errno=%d (%s)\n",
				name, errno, strerror(errno));
			break;
		}
#else
		{
			char buffer[1];

redo:
			if (opt_do_run && (write(fd, buffer, sizeof(buffer)) < 0)) {
				if ((errno == EAGAIN) || (errno == EINTR))
					goto redo;
				if (errno == ENOSPC) {
					(void)close(fd);
					continue;
				}
				(void)close(fd);
				pr_err(stderr, "%s: write failed: errno=%d (%s)\n",
					name, errno, strerror(errno));
				break;
			}
		}
#endif
		ret = sigsetjmp(jmp_env, 1);
		if (ret) {
			if (!opt_do_run || (max_ops && *counter >= max_ops))
				do_jmp = false;
			if (fd != -1)
				(void)close(fd);
			goto next;
		}

		/*
		 * Removing file here causes major fault when we touch
		 * ptr later
		 */
		if (i & 1)
			(void)unlink(filename);

		ptr = mmap(NULL, 1, PROT_READ | PROT_WRITE,
			MAP_SHARED, fd, 0);
		(void)close(fd);
		fd = -1;

		if (ptr == MAP_FAILED) {
			pr_err(stderr, "%s: mmap failed: errno=%d (%s)\n",
				name, errno, strerror(errno));
			break;

		}
		*ptr = 0;	/* Cause the page fault */

		if (munmap(ptr, 1) < 0) {
			pr_err(stderr, "%s: munmap failed: errno=%d (%s)\n",
				name, errno, strerror(errno));
			break;
		}

next:
		/* Remove file on-non major fault case */
		if (!(i & 1))
			(void)unlink(filename);

		i++;
		(*counter)++;
	} while (opt_do_run && (!max_ops || *counter < max_ops));
	/* Clean up, most times this is redundant */
	(void)unlink(filename);
	(void)stress_temp_dir_rm(name, pid, instance);

	if (!getrusage(RUSAGE_SELF, &usage)) {
		pr_dbg(stderr, "%s: page faults: minor: %lu, major: %lu\n",
			name, usage.ru_minflt, usage.ru_majflt);
	}

	return EXIT_SUCCESS;
}