Example #1
0
/*
 *  set_sched()
 * 	are sched settings valid, if so, set them
 */
void set_sched(const int32_t sched, const int32_t sched_priority)
{
#if defined(SCHED_FIFO) || defined(SCHED_RR)
	int min, max;
#endif
	int rc;
	struct sched_param param;
	const char *name = get_sched_name(sched);

	memset(&param, 0, sizeof(param));

	switch (sched) {
	case UNDEFINED:	/* No preference, don't set */
		return;
#if defined(SCHED_FIFO) || defined(SCHED_RR)
#if defined(SCHED_FIFO)
	case SCHED_FIFO:
#endif
#if defined(SCHED_RR)
	case SCHED_RR:
#endif
		min = sched_get_priority_min(sched);
		max = sched_get_priority_max(sched);

		param.sched_priority = sched_priority;

		if (param.sched_priority == UNDEFINED) {
			if (opt_flags & OPT_FLAGS_AGGRESSIVE)
				param.sched_priority = max;
			else
				param.sched_priority = (max - min) / 2;
			pr_inf(stderr, "priority not given, defaulting to %d\n",
				param.sched_priority);
		}
		if ((param.sched_priority < min) ||
		    (param.sched_priority > max)) {
			fprintf(stderr, "Scheduler priority level must be "
				"set between %d and %d\n",
				min, max);
			exit(EXIT_FAILURE);
		}
		pr_dbg(stderr, "setting scheduler class '%s', priority %d\n",
			name, param.sched_priority);
		break;
#endif
	default:
		param.sched_priority = 0;
		if (sched_priority != UNDEFINED)
			pr_inf(stderr, "ignoring priority level for "
			"scheduler class '%s'\n", name);
		pr_dbg(stderr, "setting scheduler class '%s'\n", name);
		break;
	}
	rc = sched_setscheduler(getpid(), sched, &param);
	if (rc < 0) {
		fprintf(stderr, "Cannot set scheduler: errno=%d (%s)\n",
			errno, strerror(errno));
		exit(EXIT_FAILURE);
	}
}
Example #2
0
/*
 *  stress_sem()
 *	stress system by POSIX sem ops
 */
static int stress_sem(const args_t *args)
{
	uint64_t semaphore_posix_procs = DEFAULT_SEMAPHORE_PROCS;
	uint64_t i;
	bool created = false;
	pthread_args_t p_args;

	if (!get_setting("sem-procs", &semaphore_posix_procs)) {
		if (g_opt_flags & OPT_FLAGS_MAXIMIZE)
			semaphore_posix_procs = MAX_SEMAPHORE_PROCS;
		if (g_opt_flags & OPT_FLAGS_MINIMIZE)
			semaphore_posix_procs = MIN_SEMAPHORE_PROCS;
	}

	/* create a semaphore */
	if (sem_init(&sem, 0, 1) < 0) {
		pr_err("semaphore init (POSIX) failed: errno=%d: "
			"(%s)\n", errno, strerror(errno));
		return EXIT_FAILURE;
	}

	(void)memset(pthreads, 0, sizeof(pthreads));
	(void)memset(p_ret, 0, sizeof(p_ret));

	for (i = 0; i < semaphore_posix_procs; i++) {
		p_args.args = args;
		p_args.data = NULL;
		p_ret[i] = pthread_create(&pthreads[i], NULL,
                                semaphore_posix_thrash, (void *)&p_args);
		if ((p_ret[i]) && (p_ret[i] != EAGAIN)) {
			pr_fail_errno("pthread create", p_ret[i]);
			break;
		}
		if (!g_keep_stressing_flag)
			break;
		created = true;
	}

	if (!created) {
		pr_inf("%s: could not create any pthreads\n", args->name);
		return EXIT_NO_RESOURCE;
	}

	/* Wait for termination */
	while (keep_stressing())
		(void)shim_usleep(100000);

	for (i = 0; i < semaphore_posix_procs; i++) {
		int ret;

		if (p_ret[i])
			continue;

		ret = pthread_join(pthreads[i], NULL);
		(void)ret;
	}
	(void)sem_destroy(&sem);

	return EXIT_SUCCESS;
}
Example #3
0
/*
 *  thrash_start()
 *	start paging in thrash process
 */
int thrash_start(void)
{
	if (geteuid() != 0) {
		pr_inf("not running as root, ignoring --thrash option\n");
		return -1;
	}
	if (thrash_pid) {
		pr_err("thrash background process already started\n");
		return -1;
	}
	thrash_pid = fork();
	if (thrash_pid < 0) {
		pr_err("thrash background process failed to fork: %d (%s)\n",
			errno, strerror(errno));
		return -1;
	} else if (thrash_pid == 0) {
#if defined(SCHED_RR)
		int ret;

		stress_set_proc_name("stress-ng-thrash");

		ret = stress_set_sched(getpid(), SCHED_RR, 10, true);
		(void)ret;
#endif
		while (g_keep_stressing_flag) {
			pagein_all_procs();
			compact_memory();
			merge_memory();
			(void)sleep(1);
		}
		_exit(0);
	}
	return 0;
}
static int stress_set_memthrash_method(const char *name)
{
	(void)name;

	(void)pr_inf("warning: --memthrash-method not available on this system\n");
	return 0;
}
Example #5
0
/*
 *  stress_sysfs
 *	stress reading all of /sys
 */
int stress_sysfs(
	uint64_t *const counter,
	const uint32_t instance,
	const uint64_t max_ops,
	const char *name)
{
	bool sys_read = true;

	(void)instance;
	(void)name;


	if (geteuid() == 0) {
		pr_inf(stderr, "%s: running as root, just traversing /sys "
			"and not reading files.\n", name);
		sys_read = false;
	}

	do {
		stress_sys_dir(name, "/sys", true, 0, sys_read);
		(*counter)++;
	} while (opt_do_run && (!max_ops || *counter < max_ops));

	return EXIT_SUCCESS;
}
Example #6
0
/*
 *  stress_loot_supported()
 *      check if we can run this as root
 */
static int stress_loop_supported(void)
{
	if (geteuid() != 0) {
		pr_inf("loop stressor will be skipped, "
			"need to be running as root for this stressor\n");
		return -1;
	}
	return 0;
}
Example #7
0
void stress_adjust_ptread_max(uint64_t max)
{
	if (opt_pthread_max > max) {
		opt_pthread_max = max;
		pr_inf(stdout, "re-adjusting maximum threads to "
			"soft limit of %" PRIu64 "\n",
			opt_pthread_max);
	}
}
Example #8
0
/*
 *  stress_rdrand_supported()
 *	check if rdrand is supported
 */
static int stress_rdrand_supported(void)
{
	uint32_t eax, ebx, ecx, edx;

	/* Intel CPU? */
	if (!cpu_is_x86()) {
		pr_inf("rdrand stressor will be skipped, "
			"not a recognised Intel CPU\n");
		return -1;
	}
	/* ..and supports rdrand? */
	__cpuid(1, eax, ebx, ecx, edx);
	if (!(ecx & 0x40000000)) {
		pr_inf("rdrand stressor will be skipped, CPU "
			"does not support the rdrand instruction\n");
		return -1;
	}
	rdrand_supported = true;
	return 0;
}
Example #9
0
/*
 *  stress_cwd_readwriteable()
 *	check if cwd is read/writeable
 */
void stress_cwd_readwriteable(void)
{
	char path[PATH_MAX];

	if (getcwd(path, sizeof(path)) == NULL) {
		pr_dbg(stderr, "Cannot determine current working directory\n");
		return;
	}
	if (access(path, R_OK | W_OK)) {
		pr_inf(stderr, "Working directory %s is not read/writeable, "
			"some I/O tests may fail\n", path);
		return;
	}
}
Example #10
0
/*
 *  stress_semaphore_posix_init()
 *	initialize a POSIX semaphore
 */
void stress_semaphore_posix_init(void)
{
	/* create a mutex */
	if (sem_init(&shared->sem_posix.sem, 1, 1) >= 0) {
		shared->sem_posix.init = true;
		return;
	}

	if (opt_sequential) {
		pr_inf(stderr, "Semaphore init failed: errno=%d: (%s), "
			"skipping semaphore stressor\n",
			errno, strerror(errno));
	} else {
		pr_err(stderr, "Semaphore init failed: errno=%d: (%s)\n",
			errno, strerror(errno));
	}
}
/*
 *  stress_filename_probe()
 *	determine allowed filename chars by probing
 */
int stress_filename_probe(
	const char *name,
	char *filename,
	char *ptr,
	size_t *chars_allowed)
{
	size_t i, j;

	/*
	 *  Determine allowed char set for filenames
	 */
	for (j = 0, i = 0; i < 256; i++) {
		int fd;

		if ((i == 0) || (i == '/'))
			continue;
		*ptr = i;
		*(ptr + 1) = 'X';
		*(ptr + 2) = '\0';

		if ((fd = creat(filename, S_IRUSR | S_IWUSR)) < 0) {
			/* We only expect EINVAL on bad filenames */
			if (errno != EINVAL) {
				pr_err(stderr, "%s: creat() failed when probing "
					"for allowed filename characters, "
					"errno = %d (%s)\n",
					name, errno, strerror(errno));
				pr_inf(stderr, "%s: perhaps retry and use --filename-opts posix\n", name);
				*chars_allowed = 0;
				return -errno;
			}
		} else {
			(void)close(fd);
			(void)unlink(filename);
			allowed[j] = i;
			j++;
		}
	}
	*chars_allowed = j;

	return 0;
}
/*
 *  stress_userfaultfd_oomable()
 *	stress userfaultfd system call, this
 *	is an OOM-able child process that the
 *	parent can restart
 */
static int stress_userfaultfd_oomable(
	const args_t *args,
	const size_t userfaultfd_bytes)
{
	const size_t page_size = args->page_size;
	size_t sz;
	uint8_t *data;
	void *zero_page = NULL;
	int fd = -1, fdinfo = -1, status, rc = EXIT_SUCCESS, count = 0;
	const unsigned int uffdio_copy = 1 << _UFFDIO_COPY;
	const unsigned int uffdio_zeropage = 1 << _UFFDIO_ZEROPAGE;
	pid_t pid;
	struct uffdio_api api;
	struct uffdio_register reg;
	context_t c;
	bool do_poll = true;
	char filename[PATH_MAX];

	/* Child clone stack */
	static uint8_t stack[STACK_SIZE];
	const ssize_t stack_offset =
		stress_get_stack_direction() * (STACK_SIZE - 64);
	uint8_t *stack_top = stack + stack_offset;

	sz = userfaultfd_bytes & ~(page_size - 1);

	if (posix_memalign(&zero_page, page_size, page_size)) {
		pr_err("%s: zero page allocation failed\n", args->name);
		return EXIT_NO_RESOURCE;
	}

	data = mmap(NULL, sz, PROT_READ | PROT_WRITE,
		MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
	if (data == MAP_FAILED) {
		rc = EXIT_NO_RESOURCE;
		pr_err("%s: mmap failed\n", args->name);
		goto free_zeropage;
	}

	/* Get userfault fd */
	if ((fd = shim_userfaultfd(0)) < 0) {
		if (errno == ENOSYS) {
			pr_inf("%s: stressor will be skipped, "
				"userfaultfd not supported\n",
				args->name);
			rc = EXIT_NOT_IMPLEMENTED;
			goto unmap_data;
		}
		rc = exit_status(errno);
		pr_err("%s: userfaultfd failed, errno = %d (%s)\n",
			args->name, errno, strerror(errno));
		goto unmap_data;
	}

	(void)snprintf(filename, sizeof(filename), "/proc/%d/fdinfo/%d",
		getpid(), fd);
	fdinfo = open(filename, O_RDONLY);

	if (stress_set_nonblock(fd) < 0)
		do_poll = false;

	/* API sanity check */
	(void)memset(&api, 0, sizeof(api));
	api.api = UFFD_API;
	api.features = 0;
	if (ioctl(fd, UFFDIO_API, &api) < 0) {
		pr_err("%s: ioctl UFFDIO_API failed, errno = %d (%s)\n",
			args->name, errno, strerror(errno));
		rc = EXIT_FAILURE;
		goto unmap_data;
	}
	if (api.api != UFFD_API) {
		pr_err("%s: ioctl UFFDIO_API API check failed\n",
			args->name);
		rc = EXIT_FAILURE;
		goto unmap_data;
	}

	/* Register fault handling mode */
	(void)memset(&reg, 0, sizeof(reg));
	reg.range.start = (unsigned long)data;
	reg.range.len = sz;
	reg.mode = UFFDIO_REGISTER_MODE_MISSING;
	if (ioctl(fd, UFFDIO_REGISTER, &reg) < 0) {
		pr_err("%s: ioctl UFFDIO_REGISTER failed, errno = %d (%s)\n",
			args->name, errno, strerror(errno));
		rc = EXIT_FAILURE;
		goto unmap_data;
	}

	/* OK, so do we have copy supported? */
	if ((reg.ioctls & uffdio_copy) != uffdio_copy) {
		pr_err("%s: ioctl UFFDIO_REGISTER did not support _UFFDIO_COPY\n",
			args->name);
		rc = EXIT_FAILURE;
		goto unmap_data;
	}
	/* OK, so do we have zeropage supported? */
	if ((reg.ioctls & uffdio_zeropage) != uffdio_zeropage) {
		pr_err("%s: ioctl UFFDIO_REGISTER did not support _UFFDIO_ZEROPAGE\n",
			args->name);
		rc = EXIT_FAILURE;
		goto unmap_data;
	}

	/* Set up context for child */
	c.args = args;
	c.data = data;
	c.sz = sz;
	c.page_size = page_size;
	c.parent = getpid();

	/*
	 *  We need to clone the child and share the same VM address space
	 *  as parent so we can perform the page fault handling
	 */
	pid = clone(stress_userfaultfd_child, align_stack(stack_top),
		SIGCHLD | CLONE_FILES | CLONE_FS | CLONE_SIGHAND | CLONE_VM, &c);
	if (pid < 0) {
		pr_err("%s: fork failed, errno = %d (%s)\n",
			args->name, errno, strerror(errno));
		goto unreg;
	}

	/* Parent */
	do {
		struct uffd_msg msg;
		ssize_t ret;

		/* check we should break out before we block on the read */
		if (!g_keep_stressing_flag)
			break;

		/*
		 * polled wait exercises userfaultfd_poll
		 * in the kernel, but only works if fd is NONBLOCKing
		 */
		if (do_poll) {
			struct pollfd fds[1];

			(void)memset(fds, 0, sizeof fds);
			fds[0].fd = fd;
			fds[0].events = POLLIN;
			/* wait for 1 second max */

			ret = poll(fds, 1, 1000);
			if (ret == 0)
				continue;	/* timed out, redo the poll */
			if (ret < 0) {
				if (errno == EINTR)
					continue;
				if (errno != ENOMEM) {
					pr_fail_err("poll userfaultfd");
					if (!g_keep_stressing_flag)
						break;
				}
				/*
				 *  poll ran out of free space for internal
				 *  fd tables, so give up and block on the
				 *  read anyway
				 */
				goto do_read;
			}
			/* No data, re-poll */
			if (!(fds[0].revents & POLLIN))
				continue;

			if (LIKELY(fdinfo > -1) &&
			    UNLIKELY(count++ >= COUNT_MAX)) {
				ret = lseek(fdinfo, 0, SEEK_SET);
				if (ret == 0) {
					char buffer[4096];

					ret = read(fdinfo, buffer, sizeof(buffer));
					(void)ret;
				}
				count = 0;
			}
		}

do_read:
		if ((ret = read(fd, &msg, sizeof(msg))) < 0) {
			if (errno == EINTR)
				continue;
			pr_fail_err("read userfaultfd");
			if (!g_keep_stressing_flag)
				break;
			continue;
		}
		/* We only expect a page fault event */
		if (msg.event != UFFD_EVENT_PAGEFAULT) {
			pr_fail_err("userfaultfd msg not pagefault event");
			continue;
		}
		/* We only expect a write fault */
		if (!(msg.arg.pagefault.flags & UFFD_PAGEFAULT_FLAG_WRITE)) {
			pr_fail_err("userfaultfd msg not write page fault event");
			continue;
		}
		/* Go handle the page fault */
		if (handle_page_fault(args, fd, (uint8_t *)(ptrdiff_t)msg.arg.pagefault.address,
				zero_page, data, data + sz, page_size) < 0)
			break;
		inc_counter(args);
	} while (keep_stressing());

	/* Run it over, zap child */
	(void)kill(pid, SIGKILL);
	if (shim_waitpid(pid, &status, 0) < 0) {
		pr_dbg("%s: waitpid failed, errno = %d (%s)\n",
			args->name, errno, strerror(errno));
	}
unreg:
	if (ioctl(fd, UFFDIO_UNREGISTER, &reg) < 0) {
		pr_err("%s: ioctl UFFDIO_UNREGISTER failed, errno = %d (%s)\n",
			args->name, errno, strerror(errno));
		rc = EXIT_FAILURE;
		goto unmap_data;
	}
unmap_data:
	(void)munmap(data, sz);
free_zeropage:
	free(zero_page);
	if (fdinfo > -1)
		(void)close(fdinfo);
	if (fd > -1)
		(void)close(fd);

	return rc;
}
Example #13
0
/*
 *  stress_hdd
 *	stress I/O via writes
 */
int stress_hdd(
    uint64_t *const counter,
    const uint32_t instance,
    const uint64_t max_ops,
    const char *name)
{
    uint8_t *buf = NULL;
    uint64_t i, min_size, remainder;
    const pid_t pid = getpid();
    int ret, rc = EXIT_FAILURE;
    char filename[PATH_MAX];
    int flags = O_CREAT | O_RDWR | O_TRUNC | opt_hdd_oflags;
    int fadvise_flags = opt_hdd_flags & HDD_OPT_FADV_MASK;

    if (!set_hdd_bytes) {
        if (opt_flags & OPT_FLAGS_MAXIMIZE)
            opt_hdd_bytes = MAX_HDD_BYTES;
        if (opt_flags & OPT_FLAGS_MINIMIZE)
            opt_hdd_bytes = MIN_HDD_BYTES;
    }

    if (!set_hdd_write_size) {
        if (opt_flags & OPT_FLAGS_MAXIMIZE)
            opt_hdd_write_size = MAX_HDD_WRITE_SIZE;
        if (opt_flags & OPT_FLAGS_MINIMIZE)
            opt_hdd_write_size = MIN_HDD_WRITE_SIZE;
    }

    if (opt_hdd_flags & HDD_OPT_O_DIRECT) {
        min_size = (opt_hdd_flags & HDD_OPT_IOVEC) ?
                   HDD_IO_VEC_MAX * BUF_ALIGNMENT : MIN_HDD_WRITE_SIZE;
    } else {
        min_size = (opt_hdd_flags & HDD_OPT_IOVEC) ?
                   HDD_IO_VEC_MAX * MIN_HDD_WRITE_SIZE : MIN_HDD_WRITE_SIZE;
    }
    /* Ensure I/O size is not too small */
    if (opt_hdd_write_size < min_size) {
        opt_hdd_write_size = min_size;
        pr_inf(stderr, "%s: increasing read/write size to %" PRIu64 " bytes\n",
               name, opt_hdd_write_size);
    }

    /* Ensure we get same sized iovec I/O sizes */
    remainder = opt_hdd_write_size % HDD_IO_VEC_MAX;
    if ((opt_hdd_flags & HDD_OPT_IOVEC) && (remainder != 0)) {
        opt_hdd_write_size += HDD_IO_VEC_MAX - remainder;
        pr_inf(stderr, "%s: increasing read/write size to %" PRIu64 " bytes in iovec mode\n",
               name, opt_hdd_write_size);
    }

    /* Ensure complete file size is not less than the I/O size */
    if (opt_hdd_bytes < opt_hdd_write_size) {
        opt_hdd_bytes = opt_hdd_write_size;
        pr_inf(stderr, "%s: increasing file size to write size of %" PRIu64 " bytes\n",
               name, opt_hdd_bytes);
    }


    if (stress_temp_dir_mk(name, pid, instance) < 0)
        return EXIT_FAILURE;

    /* Must have some write option */
    if ((opt_hdd_flags & HDD_OPT_WR_MASK) == 0)
        opt_hdd_flags |= HDD_OPT_WR_SEQ;
    /* Must have some read option */
    if ((opt_hdd_flags & HDD_OPT_RD_MASK) == 0)
        opt_hdd_flags |= HDD_OPT_RD_SEQ;

    ret = posix_memalign((void **)&buf, BUF_ALIGNMENT, (size_t)opt_hdd_write_size);
    if (ret || !buf) {
        pr_err(stderr, "%s: cannot allocate buffer\n", name);
        (void)stress_temp_dir_rm(name, pid, instance);
        return EXIT_FAILURE;
    }

    for (i = 0; i < opt_hdd_write_size; i++)
        buf[i] = mwc8();

    (void)stress_temp_filename(filename, sizeof(filename),
                               name, pid, instance, mwc32());
    do {
        int fd;

        (void)umask(0077);
        if ((fd = open(filename, flags, S_IRUSR | S_IWUSR)) < 0) {
            pr_failed_err(name, "open");
            goto finish;
        }
        if (ftruncate(fd, (off_t)0) < 0) {
            pr_failed_err(name, "ftruncate");
            (void)close(fd);
            goto finish;
        }
        (void)unlink(filename);

        if (stress_hdd_advise(name, fd, fadvise_flags) < 0) {
            (void)close(fd);
            goto finish;
        }

        /* Random Write */
        if (opt_hdd_flags & HDD_OPT_WR_RND) {
            for (i = 0; i < opt_hdd_bytes; i += opt_hdd_write_size) {
                size_t j;

                off_t offset = (i == 0) ?
                               opt_hdd_bytes :
                               (mwc64() % opt_hdd_bytes) & ~511;
                ssize_t ret;

                if (lseek(fd, offset, SEEK_SET) < 0) {
                    pr_failed_err(name, "lseek");
                    (void)close(fd);
                    goto finish;
                }
rnd_wr_retry:
                if (!opt_do_run || (max_ops && *counter >= max_ops))
                    break;

                for (j = 0; j < opt_hdd_write_size; j++)
                    buf[j] = (offset + j) & 0xff;

                ret = stress_hdd_write(fd, buf, (size_t)opt_hdd_write_size);
                if (ret <= 0) {
                    if ((errno == EAGAIN) || (errno == EINTR))
                        goto rnd_wr_retry;
                    if (errno) {
                        pr_failed_err(name, "write");
                        (void)close(fd);
                        goto finish;
                    }
                    continue;
                }
                (*counter)++;
            }
        }
        /* Sequential Write */
        if (opt_hdd_flags & HDD_OPT_WR_SEQ) {
            for (i = 0; i < opt_hdd_bytes; i += opt_hdd_write_size) {
                ssize_t ret;
                size_t j;
seq_wr_retry:
                if (!opt_do_run || (max_ops && *counter >= max_ops))
                    break;

                for (j = 0; j < opt_hdd_write_size; j += 512)
                    buf[j] = (i + j) & 0xff;
                ret = stress_hdd_write(fd, buf, (size_t)opt_hdd_write_size);
                if (ret <= 0) {
                    if ((errno == EAGAIN) || (errno == EINTR))
                        goto seq_wr_retry;
                    if (errno) {
                        pr_failed_err(name, "write");
                        (void)close(fd);
                        goto finish;
                    }
                    continue;
                }
                (*counter)++;
            }
        }

        /* Sequential Read */
        if (opt_hdd_flags & HDD_OPT_RD_SEQ) {
            uint64_t misreads = 0;
            uint64_t baddata = 0;

            if (lseek(fd, 0, SEEK_SET) < 0) {
                pr_failed_err(name, "lseek");
                (void)close(fd);
                goto finish;
            }
            for (i = 0; i < opt_hdd_bytes; i += opt_hdd_write_size) {
                ssize_t ret;
seq_rd_retry:
                if (!opt_do_run || (max_ops && *counter >= max_ops))
                    break;

                ret = stress_hdd_read(fd, buf, (size_t)opt_hdd_write_size);
                if (ret <= 0) {
                    if ((errno == EAGAIN) || (errno == EINTR))
                        goto seq_rd_retry;
                    if (errno) {
                        pr_failed_err(name, "read");
                        (void)close(fd);
                        goto finish;
                    }
                    continue;
                }
                if (ret != (ssize_t)opt_hdd_write_size)
                    misreads++;

                if (opt_flags & OPT_FLAGS_VERIFY) {
                    size_t j;

                    for (j = 0; j < opt_hdd_write_size; j += 512) {
                        uint8_t v = (i + j) & 0xff;
                        if (opt_hdd_flags & HDD_OPT_WR_SEQ) {
                            /* Write seq has written to all of the file, so it should always be OK */
                            if (buf[0] != v)
                                baddata++;
                        } else {
                            /* Write rnd has written to some of the file, so data either zero or OK */
                            if (buf[0] != 0 && buf[0] != v)
                                baddata++;
                        }
                    }
                }
                (*counter)++;
            }
            if (misreads)
                pr_dbg(stderr, "%s: %" PRIu64 " incomplete sequential reads\n",
                       name, misreads);
            if (baddata)
                pr_fail(stderr, "%s: incorrect data found %" PRIu64 " times\n",
                        name, baddata);
        }
        /* Random Read */
        if (opt_hdd_flags & HDD_OPT_RD_RND) {
            uint64_t misreads = 0;
            uint64_t baddata = 0;

            for (i = 0; i < opt_hdd_bytes; i += opt_hdd_write_size) {
                ssize_t ret;
                off_t offset = (mwc64() % (opt_hdd_bytes - opt_hdd_write_size)) & ~511;

                if (lseek(fd, offset, SEEK_SET) < 0) {
                    pr_failed_err(name, "lseek");
                    (void)close(fd);
                    goto finish;
                }
rnd_rd_retry:
                if (!opt_do_run || (max_ops && *counter >= max_ops))
                    break;
                ret = stress_hdd_read(fd, buf, (size_t)opt_hdd_write_size);
                if (ret <= 0) {
                    if ((errno == EAGAIN) || (errno == EINTR))
                        goto rnd_rd_retry;
                    if (errno) {
                        pr_failed_err(name, "read");
                        (void)close(fd);
                        goto finish;
                    }
                    continue;
                }
                if (ret != (ssize_t)opt_hdd_write_size)
                    misreads++;

                if (opt_flags & OPT_FLAGS_VERIFY) {
                    size_t j;

                    for (j = 0; j < opt_hdd_write_size; j += 512) {
                        uint8_t v = (i + j) & 0xff;
                        if (opt_hdd_flags & HDD_OPT_WR_SEQ) {
                            /* Write seq has written to all of the file, so it should always be OK */
                            if (buf[0] != v)
                                baddata++;
                        } else {
                            /* Write rnd has written to some of the file, so data either zero or OK */
                            if (buf[0] != 0 && buf[0] != v)
                                baddata++;
                        }
                    }
                }

                (*counter)++;
            }
            if (misreads)
                pr_dbg(stderr, "%s: %" PRIu64 " incomplete random reads\n",
                       name, misreads);
        }
        (void)close(fd);

    } while (opt_do_run && (!max_ops || *counter < max_ops));

    rc = EXIT_SUCCESS;
finish:
    free(buf);
    (void)stress_temp_dir_rm(name, pid, instance);
    return rc;
}
Example #14
0
/*
 *  stress_memthrash()
 *	stress by creating pthreads
 */
static int stress_memthrash(const args_t *args)
{
	const stress_memthrash_method_info_t *memthrash_method = &memthrash_methods[0];
	const uint32_t total_cpus = stress_get_processors_configured();
	const uint32_t max_threads = stress_memthrash_max(args->num_instances, total_cpus);
	pthread_t pthreads[max_threads];
	int ret[max_threads];
	pthread_args_t pargs;
	memthrash_func_t func;
	pid_t pid;

	(void)get_setting("memthrash-method", &memthrash_method);
	func = memthrash_method->func;

	pr_dbg("%s: using method '%s'\n", args->name, memthrash_method->name);
	if (args->instance == 0) {
		pr_inf("%s: starting %" PRIu32 " thread%s on each of the %"
			PRIu32 " stressors on a %" PRIu32 " CPU system\n",
			args->name, max_threads, plural(max_threads),
			args->num_instances, total_cpus);
		if (max_threads * args->num_instances > total_cpus) {
			pr_inf("%s: this is not an optimal choice of stressors, "
				"try %" PRIu32 " instead\n",
			args->name,
			stress_memthash_optimal(args->num_instances, total_cpus));
		}
	}

	pargs.args = args;
	pargs.data = func;

	(void)memset(pthreads, 0, sizeof(pthreads));
	(void)memset(ret, 0, sizeof(ret));
	(void)sigfillset(&set);

again:
	if (!g_keep_stressing_flag)
		return EXIT_SUCCESS;
	pid = fork();
	if (pid < 0) {
		if ((errno == EAGAIN) || (errno == ENOMEM))
			goto again;
		pr_err("%s: fork failed: errno=%d: (%s)\n",
			args->name, errno, strerror(errno));
	} else if (pid > 0) {
		int status, waitret;

		/* Parent, wait for child */
		(void)setpgid(pid, g_pgrp);
		waitret = shim_waitpid(pid, &status, 0);
		if (waitret < 0) {
			if (errno != EINTR)
				pr_dbg("%s: waitpid(): errno=%d (%s)\n",
					args->name, errno, strerror(errno));
			(void)kill(pid, SIGTERM);
			(void)kill(pid, SIGKILL);
			(void)shim_waitpid(pid, &status, 0);
		} else if (WIFSIGNALED(status)) {
			pr_dbg("%s: child died: %s (instance %d)\n",
				args->name, stress_strsignal(WTERMSIG(status)),
				args->instance);
			/* If we got killed by OOM killer, re-start */
			if (WTERMSIG(status) == SIGKILL) {
				log_system_mem_info();
				pr_dbg("%s: assuming killed by OOM killer, "
					"restarting again (instance %d)\n",
					args->name, args->instance);
				goto again;
			}
		}
	} else if (pid == 0) {
		uint32_t i;

		/* Make sure this is killable by OOM killer */
		set_oom_adjustment(args->name, true);

		int flags = MAP_PRIVATE | MAP_ANONYMOUS;
#if defined(MAP_POPULATE)
		flags |= MAP_POPULATE;
#endif

mmap_retry:
		mem = mmap(NULL, MEM_SIZE, PROT_READ | PROT_WRITE, flags, -1, 0);
		if (mem == MAP_FAILED) {
#if defined(MAP_POPULATE)
			flags &= ~MAP_POPULATE;	/* Less aggressive, more OOMable */
#endif
			if (!g_keep_stressing_flag) {
				pr_dbg("%s: mmap failed: %d %s\n",
					args->name, errno, strerror(errno));
				return EXIT_NO_RESOURCE;
			}
			(void)shim_usleep(100000);
			if (!g_keep_stressing_flag)
				goto reap_mem;
			goto mmap_retry;
		}

		for (i = 0; i < max_threads; i++) {
			ret[i] = pthread_create(&pthreads[i], NULL,
				stress_memthrash_func, (void *)&pargs);
			if (ret[i]) {
				/* Just give up and go to next thread */
				if (ret[i] == EAGAIN)
					continue;
				/* Something really unexpected */
				pr_fail_errno("pthread create", ret[i]);
				goto reap;
			}
			if (!g_keep_stressing_flag)
				goto reap;
		}
		/* Wait for SIGALRM or SIGINT/SIGHUP etc */
		(void)pause();

reap:
		thread_terminate = true;
		for (i = 0; i < max_threads; i++) {
			if (!ret[i]) {
				ret[i] = pthread_join(pthreads[i], NULL);
				if (ret[i])
					pr_fail_errno("pthread join", ret[i]);
			}
		}
reap_mem:
		(void)munmap(mem, MEM_SIZE);
	}
	return EXIT_SUCCESS;
}
/*
 *  stress_file_ioctl
 *	stress file ioctls
 */
static int stress_file_ioctl(const args_t *args)
{
	char filename[PATH_MAX];
	int ret, fd;
#if defined(FICLONE) || defined(FICLONERANGE)
	int dfd;
#endif
	const off_t file_sz = 8192;
	uint32_t rnd = mwc32();

	ret = stress_temp_dir_mk_args(args);
	if (ret < 0)
		return exit_status(-ret);

	(void)stress_temp_filename_args(args, filename, sizeof(filename), rnd);
	fd = open(filename, O_CREAT | O_RDWR, S_IRUSR | S_IWUSR);
	if (fd < 0) {
		pr_err("%s: cannot create %s\n", args->name, filename);
		return exit_status(errno);
	}
	(void)unlink(filename);

#if defined(FICLONE) || defined(FICLONERANGE)
	(void)stress_temp_filename_args(args, filename, sizeof(filename), rnd + 1);
	dfd = open(filename, O_CREAT | O_RDWR, S_IRUSR | S_IWUSR);
	if (dfd < 0) {
		(void)close(fd);
		pr_err("%s: cannot create %s\n", args->name, filename);
		return exit_status(errno);
	}
	(void)unlink(filename);
#endif

	(void)shim_fallocate(fd, 0, 0, file_sz);
#if defined(FICLONE) || defined(FICLONERANGE)
	(void)shim_fallocate(dfd, 0, 0, file_sz);
#endif
	(void)shim_fsync(fd);

	do {
		int exercised = 0;

#if defined(FIOCLEX)
		{
			ret = ioctl(fd, FIOCLEX);
			(void)ret;

			exercised++;
		}
#endif
#if defined(FIONCLEX)
		{
			ret = ioctl(fd, FIONCLEX);
			(void)ret;

			exercised++;
		}
#endif
#if defined(FIONBIO)
		{
			int opt;

			opt = 1;
			ret = ioctl(fd, FIONBIO, &opt);
#if defined(O_NONBLOCK)
			check_flag(args, "FIONBIO", fd, O_NONBLOCK, ret, true);
#else
			(void)ret;
#endif

			opt = 0;
			ret = ioctl(fd, FIONBIO, &opt);
#if defined(O_NONBLOCK)
			check_flag(args, "FIONBIO", fd, O_NONBLOCK, ret, false);
#else
			(void)ret;
#endif
			exercised++;
		}
#endif

#if defined(FIOASYNC)
		{
			int opt;

			opt = 1;
			ret = ioctl(fd, FIOASYNC, &opt);
#if defined(O_ASYNC)
			check_flag(args, "FIONASYNC", fd, O_ASYNC, ret, true);
#else
			(void)ret;
#endif

			opt = 0;
			ret = ioctl(fd, FIOASYNC, &opt);
#if defined(O_ASYNC)
			check_flag(args, "FIONASYNC", fd, O_ASYNC, ret, false);
#else
			(void)ret;
#endif
			exercised++;
		}
#endif

#if defined(FIOQSIZE)
		{
			shim_loff_t sz;
			struct stat buf;

			ret = fstat(fd, &buf);
			if (ret == 0) {
				ret = ioctl(fd, FIOQSIZE, &sz);
				if ((ret == 0) && (file_sz != buf.st_size))
					pr_fail("%s: ioctl FIOQSIZE failed, size "
						"%jd (filesize) vs %jd (reported)\n",
						args->name,
						(intmax_t)file_sz, (intmax_t)sz);
			}
			exercised++;
		}
#endif

/* Disable this at the moment, it is fragile */
#if 0
#if defined(FIFREEZE) && defined(FITHAW)
		{
			ret = ioctl(fd, FIFREEZE);
			(void)ret;
			ret = ioctl(fd, FITHAW);
			(void)ret;

			exercised++;
		}
#endif
#endif

#if defined(FIGETBSZ)
		{
			int isz;

			ret = ioctl(fd, FIGETBSZ, &isz);
			if ((ret == 0) && (isz < 1))
				pr_fail("%s: ioctl FIGETBSZ returned unusual "
					"block size %d\n", args->name, isz);

			exercised++;
		}
#endif

#if defined(FICLONE)
		{
			ret = ioctl(dfd, FICLONE, fd);
			(void)ret;

			exercised++;
		}
#endif

#if defined(FICLONERANGE)
		{
			struct file_clone_range fcr;

			(void)memset(&fcr, 0, sizeof(fcr));
			fcr.src_fd = fd;
			fcr.src_offset = 0;
			fcr.src_length = file_sz;
			fcr.dest_offset = 0;

			ret = ioctl(dfd, FICLONERANGE, &fcr);
			(void)ret;

			exercised++;
		}
#endif

#if defined(FIDEDUPERANGE)
		{
			const size_t sz = sizeof(struct file_dedupe_range) +
					  sizeof(struct file_dedupe_range_info);
			char buf[sz] ALIGNED(64);

			struct file_dedupe_range *d = (struct file_dedupe_range *)buf;

			d->src_offset = 0;
			d->src_length = file_sz;
			d->dest_count = 1;
			d->reserved1 = 0;
			d->reserved2 = 0;
			d->info[0].dest_fd = dfd;
			d->info[0].dest_offset = 0;
			/* Zero the return values */
			d->info[0].bytes_deduped = 0;
			d->info[0].status = 0;
			d->info[0].reserved = 0;
			ret = ioctl(fd, FIDEDUPERANGE, d);
			(void)ret;

			exercised++;
		}
#endif

#if defined(FIONREAD)
		{
			int isz = 0;

			ret = ioctl(fd, FIONREAD, &isz);
			(void)ret;

			exercised++;
		}
#endif

#if defined(FIONWRITE)
		{
			int isz = 0;

			ret = ioctl(fd, FIONWRITE, &isz);
			(void)ret;

			exercised++;
		}
#endif

#if defined(FS_IOC_RESVSP)
		{
			unsigned long isz = file_sz * 2;

			ret = ioctl(fd, FS_IOC_RESVP, &isz);
			(void)ret;

			exercised++;
		}
#endif

#if defined(FS_IOC_RESVSP64)
		{
			unsigned long isz = file_sz * 2;

			ret = ioctl(fd, FS_IOC_RESVP64, &isz);
			(void)ret;

			exercised++;
		}
#endif

#if defined(FIBMAP)
		{
			int block = 0;

			ret = ioctl(fd, FIBMAP, &block);
			(void)ret;

			exercised++;
		}
#endif
		if (!exercised) {
			pr_inf("%s: no available file ioctls to exercise\n",
				args->name);
			ret = EXIT_NOT_IMPLEMENTED;
			goto tidy;
		}

		inc_counter(args);
	} while (keep_stressing());

	ret = EXIT_SUCCESS;

tidy:
#if defined(FICLONE) || defined(FICLONERANGE)
	(void)close(dfd);
#endif
	(void)close(fd);
	(void)stress_temp_dir_rm_args(args);

	return ret;
}
Example #16
0
/*
 *  stress_vforkmany()
 *	stress by vfork'ing as many processes as possible.
 *	vfork has interesting semantics, the parent blocks
 *	until the child has exited, plus child processes
 *	share the same address space. So we need to be
 *	careful not to overrite shared variables across
 *	all the processes.
 */
static int stress_vforkmany(const args_t *args)
{
	static int status;
	static pid_t chpid;
	static volatile int instance = 0;
	static uint8_t stack_sig[SIGSTKSZ + SIGSTKSZ];
	static volatile bool *terminate;
	static bool *terminate_mmap;

	/* We should use an alterative signal stack */
	(void)memset(stack_sig, 0, sizeof(stack_sig));
	if (stress_sigaltstack(stack_sig, SIGSTKSZ) < 0)
		return EXIT_FAILURE;

	terminate = terminate_mmap =
		(bool *)mmap(NULL, args->page_size,
				PROT_READ | PROT_WRITE,
				MAP_SHARED | MAP_ANONYMOUS, -1, 0);
	if (terminate_mmap == MAP_FAILED) {
		pr_inf("%s: mmap failed: %d (%s)\n",
			args->name, errno, strerror(errno));
		return EXIT_NO_RESOURCE;
	}
	*terminate = false;

fork_again:
	if (!g_keep_stressing_flag)
		goto tidy;
	chpid = fork();
	if (chpid < 0) {
		if (errno == EAGAIN)
			goto fork_again;
		pr_err("%s: fork failed: errno=%d: (%s)\n",
			args->name, errno, strerror(errno));
		munmap((void *)terminate_mmap, args->page_size);
		return EXIT_FAILURE;
	} else if (chpid == 0) {
		static uint8_t *waste;
		static size_t waste_size = WASTE_SIZE;

		(void)setpgid(0, g_pgrp);

		/*
		 *  We want the children to be OOM'd if we
		 *  eat up too much memory
		 */
		set_oom_adjustment(args->name, true);
		stress_parent_died_alarm();

		/*
		 *  Allocate some wasted space so this child
		 *  scores more on the OOMable score than the
		 *  parent waiter so in theory it should be
		 *  OOM'd before the parent.
		 */
		do {
			waste = (uint8_t *)mmap(NULL, waste_size, PROT_READ | PROT_WRITE,
					MAP_PRIVATE | MAP_ANONYMOUS, -1, 0);
			if (waste != MAP_FAILED)
				break;

			waste_size >>= 1;
		} while (waste_size > 4096);

		if (waste != MAP_FAILED)
			(void)memset(waste, 0, WASTE_SIZE);
		do {
			/*
			 *  Force pid to be a register, if it's
			 *  stashed on the stack or as a global
			 *  then waitpid will pick up the one
			 *  shared by all the vfork children
			 *  which is problematic on the wait
			 */
			register pid_t pid;
			register bool first = (instance == 0);

vfork_again:
			/*
			 * SIGALRM is not inherited over vfork so
			 * instead poll the run time and break out
			 * of the loop if we've run out of run time
			 */
			if (*terminate) {
				g_keep_stressing_flag = false;
				break;
			}
			inc_counter(args);
			instance++;
			if (first) {
				pid = fork();
			} else {
PRAGMA_PUSH
PRAGMA_WARN_OFF
#if defined(__NR_vfork)
				pid = (pid_t)syscall(__NR_vfork);
#else
				pid = vfork();
#endif
PRAGMA_POP
			}

			if (pid < 0) {
				/* failed, only exit of not the top parent */
				if (!first)
					_exit(0);
			} else if (pid == 0) {
				if (waste != MAP_FAILED) {
					register size_t i;

					for (i = 0; i < WASTE_SIZE; i += 4096)
						waste[i] = 0;
				}

				/* child, parent is blocked, spawn new child */
				if (!args->max_ops || get_counter(args) < args->max_ops)
					goto vfork_again;
				_exit(0);
			}
			/* parent, wait for child, and exit if not top parent */
			(void)shim_waitpid(pid, &status, 0);
			if (!first)
				_exit(0);
		} while (keep_stressing());

		if (waste != MAP_FAILED)
			munmap((void *)waste, WASTE_SIZE);
		_exit(0);
	} else {
Example #17
0
/*
 *  stress_cache_alloc()
 *	allocate shared cache buffer
 */
int stress_cache_alloc(const char *name)
{
#if defined(__linux__)
	cpus_t *cpu_caches = NULL;
	cpu_cache_t *cache = NULL;
	uint16_t max_cache_level = 0;
#endif

#if !defined(__linux__)
	shared->mem_cache_size = MEM_CACHE_SIZE;
#else
	cpu_caches = get_all_cpu_cache_details();
	if (!cpu_caches) {
		pr_inf(stderr, "%s: using built-in defaults as unable to "
			"determine cache details\n", name);
		shared->mem_cache_size = MEM_CACHE_SIZE;
		goto init_done;
	}

	max_cache_level = get_max_cache_level(cpu_caches);

	if (shared->mem_cache_level > max_cache_level) {
		pr_dbg(stderr, "%s: reducing cache level from L%d (too high) "
			"to L%d\n", name,
			shared->mem_cache_level, max_cache_level);
		shared->mem_cache_level = max_cache_level;
	}

	cache = get_cpu_cache(cpu_caches, shared->mem_cache_level);
	if (!cache) {
		pr_inf(stderr, "%s: using built-in defaults as no suitable "
			"cache found\n", name);
		shared->mem_cache_size = MEM_CACHE_SIZE;
		goto init_done;
	}

	if (shared->mem_cache_ways > 0) {
		uint64_t way_size;

		if (shared->mem_cache_ways > cache->ways) {
			pr_inf(stderr, "%s: cache way value too high - "
				"defaulting to %d (the maximum)\n",
				name, cache->ways);
			shared->mem_cache_ways = cache->ways;
		}

		way_size = cache->size / cache->ways;

		/* only fill the specified number of cache ways */
		shared->mem_cache_size = way_size * shared->mem_cache_ways;
	} else {
		/* fill the entire cache */
		shared->mem_cache_size = cache->size;
	}

	if (!shared->mem_cache_size) {
		pr_inf(stderr, "%s: using built-in defaults as unable to "
			"determine cache size\n", name);
		shared->mem_cache_size = MEM_CACHE_SIZE;
	}
init_done:
	free_cpu_caches(cpu_caches);
#endif
	shared->mem_cache = calloc(shared->mem_cache_size, 1);
	if (!shared->mem_cache) {
		pr_err(stderr, "%s: failed to allocate shared cache buffer\n",
			name);
		return -1;
	}
	pr_inf(stderr, "%s: default cache size: %" PRIu64 "K\n",
		name, shared->mem_cache_size / 1024);

	return 0;
}
Example #18
0
void perf_stat_dump(
	FILE *yaml,
	const stress_t stressors[],
	const proc_info_t procs[STRESS_MAX],
	const int32_t max_procs,
	const double duration)
{
	int32_t i;
	bool no_perf_stats = true;

	setlocale(LC_ALL, "");

	pr_yaml(yaml, "perfstats:\n");

	for (i = 0; i < STRESS_MAX; i++) {
		int p;
		uint64_t counter_totals[STRESS_PERF_MAX];
		uint64_t total_cpu_cycles = 0;
		uint64_t total_cache_refs = 0;
		uint64_t total_branches = 0;
		int ids[STRESS_PERF_MAX];
		bool got_data = false;
		char *munged;

		memset(counter_totals, 0, sizeof(counter_totals));

		/* Sum totals across all instances of the stressor */
		for (p = 0; p < STRESS_PERF_MAX; p++) {
			int32_t j, n = (i * max_procs);
			stress_perf_t *sp = &shared->stats[n].sp;

			if (!perf_stat_succeeded(sp))
				continue;

			ids[p] = ~0;
			for (j = 0; j < procs[i].started_procs; j++, n++) {
				uint64_t counter;

				if (perf_get_counter_by_index(sp, p,
				    &counter, &ids[p]) < 0)
					break;
				if (counter == STRESS_PERF_INVALID) {
					counter_totals[p] = STRESS_PERF_INVALID;
					break;
				}
				counter_totals[p] += counter;
				got_data |= (counter > 0);
			}
			if (ids[p] == STRESS_PERF_HW_CPU_CYCLES)
				total_cpu_cycles = counter_totals[p];
			if (ids[p] == STRESS_PERF_HW_CACHE_REFERENCES)
				total_cache_refs = counter_totals[p];
			if (ids[p] == STRESS_PERF_HW_BRANCH_INSTRUCTIONS)
				total_branches = counter_totals[p];
		}

		if (!got_data)
			continue;

		munged = munge_underscore((char *)stressors[i].name);
		pr_inf(stdout, "%s:\n", munged);
		pr_yaml(yaml, "    - stressor: %s\n", munged);
		pr_yaml(yaml, "      duration: %f\n", duration);

		for (p = 0; p < STRESS_PERF_MAX; p++) {
			const char *l = perf_get_label_by_index(p);
			uint64_t ct = counter_totals[p];

			if (l && (ct != STRESS_PERF_INVALID)) {
				char extra[32];
				char yaml_label[128];
				*extra = '\0';

				no_perf_stats = false;

				if ((ids[p] == STRESS_PERF_HW_INSTRUCTIONS) &&
				    (total_cpu_cycles > 0))
					snprintf(extra, sizeof(extra),
						" (%.3f instr. per cycle)",
						(double)ct / (double)total_cpu_cycles);
				if ((ids[p] == STRESS_PERF_HW_CACHE_MISSES) &&
				     (total_cache_refs > 0))
					snprintf(extra, sizeof(extra),
						" (%5.2f%%)",
						100.0 * (double)ct / (double)total_cache_refs);
				if ((ids[p] == STRESS_PERF_HW_BRANCH_MISSES) &&
				    (total_branches > 0))
					snprintf(extra, sizeof(extra),
						" (%5.2f%%)",
						100.0 * (double)ct / (double)total_branches);

				pr_inf(stdout, "%'26" PRIu64 " %-23s %s%s\n",
					ct, l, perf_stat_scale(ct, duration),
					extra);

				perf_yaml_label(yaml_label, l, sizeof(yaml_label));
				pr_yaml(yaml, "      %s_total: %" PRIu64
					"\n", yaml_label, ct);
				pr_yaml(yaml, "      %s_per_second: %f\n",
					yaml_label, (double)ct / duration);
			}
		}
		pr_yaml(yaml, "\n");
	}
	if (no_perf_stats)
		pr_inf(stdout, "perf counters are not available "
			"on this device\n");
}
Example #19
0
/*
 *  stress_stackmmap
 *	stress a file memory map'd stack
 */
static int stress_stackmmap(const args_t *args)
{
	int fd;
	volatile int rc = EXIT_FAILURE;		/* could be clobbered */
	char filename[PATH_MAX];

	page_size = args->page_size;
	page_mask = ~(page_size - 1);

	/* Create file back'd mmaping for the stack */
	if (stress_temp_dir_mk_args(args) < 0)
		return EXIT_FAILURE;
	(void)stress_temp_filename_args(args,
		filename, sizeof(filename), mwc32());

	fd = open(filename, O_SYNC | O_RDWR | O_CREAT, S_IRUSR | S_IWUSR);
	if (fd < 0) {
		pr_fail_err("mmap'd stack file open");
		goto tidy_dir;
	}
	(void)unlink(filename);
	if (ftruncate(fd, MMAPSTACK_SIZE) < 0) {
		pr_fail_err("ftruncate");
		(void)close(fd);
		goto tidy_dir;
	}
	stack_mmap = mmap(NULL, MMAPSTACK_SIZE, PROT_READ | PROT_WRITE,
		MAP_SHARED, fd, 0);
	if (stack_mmap == MAP_FAILED) {
		if (errno == ENXIO) {
			pr_inf("%s: skipping stressor, mmap not possible on file %s\n",
				args->name, filename);
			rc = EXIT_NO_RESOURCE;
			(void)close(fd);
			goto tidy_dir;
		}
		pr_fail_err("mmap");
		(void)close(fd);
		goto tidy_dir;
	}
	(void)close(fd);

	if (shim_madvise(stack_mmap, MMAPSTACK_SIZE, MADV_RANDOM) < 0) {
		pr_dbg("%s: madvise failed: errno=%d (%s)\n",
			args->name, errno, strerror(errno));
	}
	(void)memset(stack_mmap, 0, MMAPSTACK_SIZE);
	(void)memset(&c_test, 0, sizeof(c_test));
	if (getcontext(&c_test) < 0) {
		pr_fail_err("getcontext");
		goto tidy_mmap;
	}
	c_test.uc_stack.ss_sp = stack_mmap;
	c_test.uc_stack.ss_size = MMAPSTACK_SIZE;
	c_test.uc_link = &c_main;

	/*
	 *  set jmp handler to jmp back into the loop on a full
	 *  stack segfault.  Use swapcontext to jump into a
	 *  new context using the new mmap'd stack
	 */
	do {
		pid_t pid;
again:
		if (!g_keep_stressing_flag)
			break;
		pid = fork();
		if (pid < 0) {
			if ((errno == EAGAIN) || (errno == ENOMEM))
				goto again;
			pr_err("%s: fork failed: errno=%d (%s)\n",
				args->name, errno, strerror(errno));
		} else if (pid > 0) {
			int status, waitret;

			/* Parent, wait for child */
			(void)setpgid(pid, g_pgrp);
			waitret = shim_waitpid(pid, &status, 0);
			if (waitret < 0) {
				if (errno != EINTR)
					pr_dbg("%s: waitpid(): errno=%d (%s)\n",
						args->name, errno, strerror(errno));
				(void)kill(pid, SIGTERM);
				(void)kill(pid, SIGKILL);
				(void)shim_waitpid(pid, &status, 0);
			}
		} else if (pid == 0) {
			/* Child */

			(void)setpgid(0, g_pgrp);
			stress_parent_died_alarm();

			/* Make sure this is killable by OOM killer */
			set_oom_adjustment(args->name, true);

			(void)makecontext(&c_test, stress_stackmmap_push_start, 0);
			(void)swapcontext(&c_main, &c_test);

			_exit(0);
		}
		inc_counter(args);
	} while (keep_stressing());

	rc = EXIT_SUCCESS;

tidy_mmap:
	(void)munmap(stack_mmap, MMAPSTACK_SIZE);
tidy_dir:
	(void)stress_temp_dir_rm_args(args);
	return rc;
}
Example #20
0
/*
 *  stress_sendfile
 *	stress reading of a temp file and writing to /dev/null via sendfile
 */
static int stress_sendfile(const args_t *args)
{
	char filename[PATH_MAX];
	int fdin, fdout, ret, rc = EXIT_SUCCESS;
	size_t sz;
	int64_t sendfile_size = DEFAULT_SENDFILE_SIZE;

	if (!get_setting("sendfile-size", &sendfile_size)) {
		if (g_opt_flags & OPT_FLAGS_MAXIMIZE)
			sendfile_size = MAX_SENDFILE_SIZE;
		if (g_opt_flags & OPT_FLAGS_MINIMIZE)
			sendfile_size = MIN_SENDFILE_SIZE;
	}
	sz = (size_t)sendfile_size;

	ret = stress_temp_dir_mk_args(args);
	if (ret < 0)
		return exit_status(-ret);

	(void)stress_temp_filename_args(args,
		filename, sizeof(filename), mwc32());

	if ((fdin = open(filename, O_CREAT | O_RDWR, S_IRUSR | S_IWUSR)) < 0) {
		rc = exit_status(errno);
		pr_fail_err("open");
		goto dir_out;
	}
#if defined(HAVE_POSIX_FALLOCATE)
	ret = posix_fallocate(fdin, (off_t)0, (off_t)sz);
#else
	ret = shim_fallocate(fdin, 0, (off_t)0, (off_t)sz);
#endif
	if (ret < 0) {
		rc = exit_status(errno);
		pr_fail_err("open");
		goto dir_out;
	}
	if ((fdout = open("/dev/null", O_WRONLY)) < 0) {
		pr_fail_err("open");
		rc = EXIT_FAILURE;
		goto close_in;
	}

	do {
		off_t offset = 0;
		if (sendfile(fdout, fdin, &offset, sz) < 0) {
			if (errno == ENOSYS) {
				pr_inf("%s: skipping stressor, sendfile not implemented\n",
					args->name);
				rc = EXIT_NOT_IMPLEMENTED;
				goto close_out;
			}
			if (errno == EINTR)
				continue;
			pr_fail_err("sendfile");
			rc = EXIT_FAILURE;
			goto close_out;
		}
		inc_counter(args);
	} while (keep_stressing());

close_out:
	(void)close(fdout);
close_in:
	(void)close(fdin);
	(void)unlink(filename);
dir_out:
	(void)stress_temp_dir_rm_args(args);

	return rc;
}
Example #21
0
void perf_stat_dump(FILE *yaml, proc_info_t *procs_head, const double duration)
{
	bool no_perf_stats = true;
	proc_info_t *pi;

#if defined(HAVE_LOCALE_H)
	(void)setlocale(LC_ALL, "");
#endif

	pr_yaml(yaml, "perfstats:\n");

	for (pi = procs_head; pi; pi = pi->next) {
		int p;
		uint64_t counter_totals[STRESS_PERF_MAX];
		uint64_t total_cpu_cycles = 0;
		uint64_t total_cache_refs = 0;
		uint64_t total_branches = 0;
		bool got_data = false;
		char *munged;

		(void)memset(counter_totals, 0, sizeof(counter_totals));

		/* Sum totals across all instances of the stressor */
		for (p = 0; p < STRESS_PERF_MAX && perf_info[p].label; p++) {
			int32_t j;
			stress_perf_t *sp = &pi->stats[0]->sp;

			if (!perf_stat_succeeded(sp))
				continue;

			for (j = 0; j < pi->started_procs; j++) {
				const uint64_t counter = sp->perf_stat[p].counter;

				if (counter == STRESS_PERF_INVALID) {
					counter_totals[p] = STRESS_PERF_INVALID;
					break;
				}
				counter_totals[p] += counter;
				got_data |= (counter > 0);
			}
			if (perf_info[p].type == PERF_TYPE_HARDWARE) {
				unsigned long config = perf_info[p].config;

				if (config == PERF_COUNT_HW_CPU_CYCLES)
					total_cpu_cycles = counter_totals[p];
				else if (config == PERF_COUNT_HW_CACHE_REFERENCES)
					total_cache_refs = counter_totals[p];
				else if (config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS)
					total_branches = counter_totals[p];
			}
		}

		if (!got_data)
			continue;

		munged = stress_munge_underscore(pi->stressor->name);
		pr_inf("%s:\n", munged);
		pr_yaml(yaml, "    - stressor: %s\n", munged);
		pr_yaml(yaml, "      duration: %f\n", duration);

		for (p = 0; p < STRESS_PERF_MAX && perf_info[p].label; p++) {
			const char *l = perf_info[p].label;
			uint64_t ct = counter_totals[p];

			if (l && (ct != STRESS_PERF_INVALID)) {
				char extra[32];
				char yaml_label[128];
				*extra = '\0';

				no_perf_stats = false;

				if (perf_info[p].type == PERF_TYPE_HARDWARE) {
					unsigned long config = perf_info[p].config;
					if ((config == PERF_COUNT_HW_INSTRUCTIONS) &&
					    (total_cpu_cycles > 0))
						(void)snprintf(extra, sizeof(extra),
							" (%.3f instr. per cycle)",
							(double)ct / (double)total_cpu_cycles);
					else if ((config == PERF_COUNT_HW_CACHE_MISSES) &&
					     (total_cache_refs > 0))
						(void)snprintf(extra, sizeof(extra),
							" (%5.2f%%)",
							100.0 * (double)ct / (double)total_cache_refs);
					else if ((config == PERF_COUNT_HW_BRANCH_MISSES) &&
					    (total_branches > 0))
						(void)snprintf(extra, sizeof(extra),
							" (%5.2f%%)",
							100.0 * (double)ct / (double)total_branches);
				}

				pr_inf("%'26" PRIu64 " %-24s %s%s\n",
					ct, l, perf_stat_scale(ct, duration),
					extra);

				perf_yaml_label(yaml_label, l, sizeof(yaml_label));
				pr_yaml(yaml, "      %s_total: %" PRIu64
					"\n", yaml_label, ct);
				pr_yaml(yaml, "      %s_per_second: %f\n",
					yaml_label, (double)ct / duration);
			}
		}
		pr_yaml(yaml, "\n");
	}
	if (no_perf_stats) {
		if (geteuid() != 0) {
			char buffer[64];
			int ret;
			bool paranoid = false;
			int level = 0;
			static char *path = "/proc/sys/kernel/perf_event_paranoid";

			ret = system_read(path, buffer, sizeof(buffer) - 1);
			if (ret > 0) {
				if (sscanf(buffer, "%5d", &level) == 1)
					paranoid = true;
			}
			if (paranoid & (level > 1)) {
				pr_inf("Cannot read perf counters, "
					"do not have CAP_SYS_ADMIN capability "
					"or %s is set too high (%d)\n",
					path, level);
			}
		} else {
			pr_inf("perf counters are not available "
				"on this device\n");
		}
	}
}
Example #22
0
static int stress_rdrand_supported(void)
{	
	pr_inf("rdrand stressor will be skipped, CPU "
		"does not support the rdrand instruction.\n");
	return -1;
}
Example #23
0
/*
 *  stress_sync_file
 *	stress the sync_file_range system call
 */
static int stress_sync_file(const args_t *args)
{
	int fd, ret;
	off_t sync_file_bytes = DEFAULT_SYNC_FILE_BYTES;
	char filename[PATH_MAX];

	if (!get_setting("sync_file-bytes", &sync_file_bytes)) {
		if (g_opt_flags & OPT_FLAGS_MAXIMIZE)
			sync_file_bytes = MAX_SYNC_FILE_BYTES;
		if (g_opt_flags & OPT_FLAGS_MINIMIZE)
			sync_file_bytes = MIN_SYNC_FILE_BYTES;
	}
	sync_file_bytes /= args->num_instances;
	if (sync_file_bytes < (off_t)MIN_SYNC_FILE_BYTES)
		sync_file_bytes = (off_t)MIN_SYNC_FILE_BYTES;

	ret = stress_temp_dir_mk_args(args);
	if (ret < 0)
		return exit_status(-ret);

	(void)stress_temp_filename_args(args,
		filename, sizeof(filename), mwc32());
	if ((fd = open(filename, O_CREAT | O_RDWR, S_IRUSR | S_IWUSR)) < 0) {
		ret = exit_status(errno);
		pr_fail_err("open");
		(void)stress_temp_dir_rm_args(args);
		return ret;
	}
	(void)unlink(filename);

	do {
		shim_off64_t i, offset;
		const size_t mode_index = mwc32() % SIZEOF_ARRAY(sync_modes);
		const int mode = sync_modes[mode_index];

		if (stress_sync_allocate(args, fd, sync_file_bytes) < 0)
			break;
		for (offset = 0; g_keep_stressing_flag &&
		     (offset < (shim_off64_t)sync_file_bytes); ) {
			shim_off64_t sz = (mwc32() & 0x1fc00) + KB;

			ret = shim_sync_file_range(fd, offset, sz, mode);
			if (ret < 0) {
				if (errno == ENOSYS) {
					pr_inf("%s: skipping stressor, sync_file_range is not implemented\n",
						args->name);
					goto err;
				}
				pr_fail_err("sync_file_range (forward)");
				break;
			}
			offset += sz;
		}
		if (!g_keep_stressing_flag)
			break;

		if (stress_sync_allocate(args, fd, sync_file_bytes) < 0)
			break;
		for (offset = 0; g_keep_stressing_flag &&
		     (offset < (shim_off64_t)sync_file_bytes); ) {
			shim_off64_t sz = (mwc32() & 0x1fc00) + KB;

			ret = shim_sync_file_range(fd, sync_file_bytes - offset, sz, mode);
			if (ret < 0) {
				if (errno == ENOSYS) {
					pr_inf("%s: skipping stressor, sync_file_range is not implemented\n",
						args->name);
					goto err;
				}
				pr_fail_err("sync_file_range (reverse)");
				break;
			}
			offset += sz;
		}
		if (!g_keep_stressing_flag)
			break;

		if (stress_sync_allocate(args, fd, sync_file_bytes) < 0)
			break;
		for (i = 0; i < g_keep_stressing_flag &&
		     ((shim_off64_t)(sync_file_bytes / (128 * KB))); i++) {
			offset = (mwc64() % sync_file_bytes) & ~((128 * KB) - 1);
			ret = shim_sync_file_range(fd, offset, 128 * KB, mode);
			if (ret < 0) {
				if (errno == ENOSYS) {
					pr_inf("%s: skipping stressor, sync_file_range is not implemented\n",
						args->name);
					goto err;
				}
				pr_fail_err("sync_file_range (random)");
				break;
			}
		}
		inc_counter(args);
	} while (keep_stressing());

err:
	(void)close(fd);
	(void)stress_temp_dir_rm_args(args);

	return EXIT_SUCCESS;
}
Example #24
0
/*
 *  stress_klog
 *	stress kernel logging interface
 */
int stress_klog(
	uint64_t *const counter,
	const uint32_t instance,
	const uint64_t max_ops,
	const char *name)
{
	char *buffer;
	ssize_t len;

	(void)instance;

	len  = sys_syslog(SYSLOG_ACTION_SIZE_BUFFER, NULL, 0);
	if (len < 0) {
		if (!instance)
			pr_err(stderr, "%s: cannot determine syslog buffer "
				"size: errno=%d (%s)\n",
				name, errno, strerror(errno));
		return EXIT_NO_RESOURCE;
	}
	if (len == 0) {
		if (!instance)
			pr_err(stderr, "%s: zero sized syslog buffer, aborting.\n", name);
		return EXIT_NO_RESOURCE;
	}
	if (len > (ssize_t)(4 * MB)) {
		if (!instance)
			pr_inf(stderr, "%s: truncating syslog buffer to 4MB\n", name);
		len  = 4 * MB;
	}
	buffer = malloc((size_t)len);
	if (!buffer) {
		pr_err(stderr, "%s: cannot allocate syslog buffer\n", name);
		return EXIT_NO_RESOURCE;
	}

	do {
		int ret, buflen = (mwc32() % len) + 1;

		ret = sys_syslog(SYSLOG_ACTION_READ_ALL, buffer, buflen);
		if (ret < 0)
			pr_fail(stderr, "%s: syslog ACTION_READ_ALL failed: "
				"errno=%d (%s)\n",
				name, errno, strerror(errno));
		if (ret > buflen)
			pr_fail(stderr, "%s: syslog ACTION_READ_ALL returned more "
				"data than was requested.\n", name);

		/* open, no-op, ignore failure */
		(void)sys_syslog(SYSLOG_ACTION_OPEN, NULL, 0);

		/* close, no-op, ignore failure */
		(void)sys_syslog(SYSLOG_ACTION_CLOSE, NULL, 0);

		/* get unread size, ignore failure */
		(void)sys_syslog(SYSLOG_ACTION_SIZE_UNREAD, NULL, 0);

		/* get size of kernel buffer, ignore return */
		(void)sys_syslog(SYSLOG_ACTION_SIZE_BUFFER, NULL, 0);

		(*counter)++;
	} while (opt_do_run && (!max_ops || *counter < max_ops));

	free(buffer);
	return EXIT_SUCCESS;
}
Example #25
0
/*
 *  stress_aiol
 *	stress asynchronous I/O using the linux specific aio ABI
 */
static int stress_aiol(const args_t *args)
{
	int fd, ret, rc = EXIT_FAILURE;
	char filename[PATH_MAX];
	char buf[64];
	io_context_t ctx = 0;
	uint64_t aio_linux_requests = DEFAULT_AIO_LINUX_REQUESTS;
	uint8_t *buffer;
	uint64_t aio_max_nr = DEFAULT_AIO_MAX_NR;

	if (!get_setting("aiol-requests", &aio_linux_requests)) {
		if (g_opt_flags & OPT_FLAGS_MAXIMIZE)
			aio_linux_requests = MAX_AIO_REQUESTS;
		if (g_opt_flags & OPT_FLAGS_MINIMIZE)
			aio_linux_requests = MIN_AIO_REQUESTS;
	}
	if ((aio_linux_requests < MIN_AIO_REQUESTS) ||
	    (aio_linux_requests > MAX_AIO_REQUESTS)) {
		pr_err("%s: iol_requests out of range", args->name);
		return EXIT_FAILURE;
	}

	ret = system_read("/proc/sys/fs/aio-max-nr", buf, sizeof(buf));
	if (ret > 0) {
		if (sscanf(buf, "%" SCNu64, &aio_max_nr) != 1) {
			/* Guess max */
			aio_max_nr = DEFAULT_AIO_MAX_NR;
		}
	} else {
		/* Guess max */
		aio_max_nr = DEFAULT_AIO_MAX_NR;
	}

	aio_max_nr /= (args->num_instances == 0) ? 1 : args->num_instances;

	if (aio_max_nr < 1)
		aio_max_nr = 1;
	if (aio_linux_requests > aio_max_nr) {
		aio_linux_requests = aio_max_nr;
		if (args->instance == 0)
			pr_inf("%s: Limiting AIO requests to "
				"%" PRIu64 " per stressor (avoids running out of resources)\n",
				args->name, aio_linux_requests);
	}

	ret = posix_memalign((void **)&buffer, 4096,
		aio_linux_requests * BUFFER_SZ);
	if (ret) {
		pr_inf("%s: Out of memory allocating buffers, errno=%d (%s)",
			args->name, errno, strerror(errno));
		return EXIT_NO_RESOURCE;
	}
	ret = io_setup(aio_linux_requests, &ctx);
	if (ret < 0) {
		/*
		 *  The libaio interface returns -errno in the
		 *  return value, so set errno accordingly
		 */
		errno = -ret;
		if ((errno == EAGAIN) || (errno == EACCES)) {
			pr_err("%s: io_setup failed, ran out of "
				"available events, consider increasing "
				"/proc/sys/fs/aio-max-nr, errno=%d (%s)\n",
				args->name, errno, strerror(errno));
			rc = EXIT_NO_RESOURCE;
			goto free_buffer;
		} else if (errno == ENOMEM) {
			pr_err("%s: io_setup failed, ran out of "
				"memory, errno=%d (%s)\n",
				args->name, errno, strerror(errno));
			rc = EXIT_NO_RESOURCE;
			goto free_buffer;
		} else if (errno == ENOSYS) {
			pr_err("%s: io_setup failed, no io_setup "
				"system call with this kernel, "
				"errno=%d (%s)\n",
				args->name, errno, strerror(errno));
			rc = EXIT_NO_RESOURCE;
			goto free_buffer;
		} else {
			pr_fail_err("io_setup");
			rc = EXIT_FAILURE;
			goto free_buffer;
		}
	}
	ret = stress_temp_dir_mk_args(args);
	if (ret < 0) {
		rc = exit_status(-ret);
		goto free_buffer;
	}

	(void)stress_temp_filename_args(args,
		filename, sizeof(filename), mwc32());

	if ((fd = open(filename, O_CREAT | O_RDWR | O_DIRECT, S_IRUSR | S_IWUSR)) < 0) {
		rc = exit_status(errno);
		pr_fail_err("open");
		goto finish;
	}
	(void)unlink(filename);

	do {
		struct iocb cb[aio_linux_requests];
		struct iocb *cbs[aio_linux_requests];
		struct io_event events[aio_linux_requests];
		uint8_t *buffers[aio_linux_requests];
		uint8_t *bufptr = buffer;
		uint64_t i;
		long n;

		for (i = 0; i < aio_linux_requests; i++, bufptr += BUFFER_SZ) {
			buffers[i] = bufptr;
			aio_linux_fill_buffer(i, buffers[i], BUFFER_SZ);
		}

		(void)memset(cb, 0, sizeof(cb));
		for (i = 0; i < aio_linux_requests; i++) {
			cb[i].aio_fildes = fd;
			cb[i].aio_lio_opcode = IO_CMD_PWRITE;
			cb[i].u.c.buf = buffers[i];
			cb[i].u.c.offset = mwc16() * BUFFER_SZ;
			cb[i].u.c.nbytes = BUFFER_SZ;
			cbs[i] = &cb[i];
		}
		ret = io_submit(ctx, (long)aio_linux_requests, cbs);
		if (ret < 0) {
			errno = -ret;
			if (errno == EAGAIN)
				continue;
			pr_fail_err("io_submit");
			break;
		}

		n = aio_linux_requests;
		do {
			struct timespec timeout, *timeout_ptr;

			if (clock_gettime(CLOCK_REALTIME, &timeout) < 0) {
				timeout_ptr = NULL;
			} else {
				timeout.tv_nsec += 1000000;
				if (timeout.tv_nsec > 1000000000) {
					timeout.tv_nsec -= 1000000000;
					timeout.tv_sec++;
				}
				timeout_ptr = &timeout;
			}

			ret = io_getevents(ctx, 1, n, events, timeout_ptr);
			if (ret < 0) {
				errno = -ret;
				if (errno == EINTR) {
					if (g_keep_stressing_flag)
						continue;
					else
						break;
				}
				pr_fail_err("io_getevents");
				break;
			} else {
				n -= ret;
			}
		} while ((n > 0) && g_keep_stressing_flag);
		inc_counter(args);
	} while (keep_stressing());

	rc = EXIT_SUCCESS;
	(void)close(fd);
finish:
	(void)io_destroy(ctx);
	(void)stress_temp_dir_rm_args(args);

free_buffer:
	free(buffer);
	return rc;
}
Example #26
0
/*
 *  stress_pthread()
 *	stress by creating pthreads
 */
int stress_pthread(
	uint64_t *const counter,
	const uint32_t instance,
	const uint64_t max_ops,
	const char *name)
{
	pthread_t pthreads[MAX_PTHREAD];
	bool ok = true;
	uint64_t limited = 0, attempted = 0;

	if (!set_pthread_max) {
		if (opt_flags & OPT_FLAGS_MAXIMIZE)
			opt_pthread_max = MAX_PTHREAD;
		if (opt_flags & OPT_FLAGS_MINIMIZE)
			opt_pthread_max = MIN_PTHREAD;
	}

	sigfillset(&set);
	do {
		uint64_t i, j;
		int ret;

		thread_terminate = false;
		pthread_count = 0;

		for (i = 0; (i < opt_pthread_max) && (!max_ops || *counter < max_ops); i++) {
			ret = pthread_create(&pthreads[i], NULL,
				stress_pthread_func, NULL);
			if (ret) {
				/* Out of resources, don't try any more */
				if (ret == EAGAIN) {
					limited++;
					break;
				}
				/* Something really unexpected */
				pr_fail_errno(name, "pthread create", ret);
				ok = false;
				break;
			}
			(*counter)++;
			if (!opt_do_run)
				break;
		}
		attempted++;

		/*
		 *  Wait until they are all started or
		 *  we get bored waiting..
		 */
		for (j = 0; j < 1000; j++) {
			bool all_running = false;

			ret = pthread_mutex_lock(&mutex);
			if (ret) {
				pr_fail_errno(name, "mutex lock", ret);
				ok = false;
				goto reap;
			}
			all_running = (pthread_count == i);
			ret = pthread_mutex_unlock(&mutex);
			if (ret) {
				pr_fail_errno(name, "mutex unlock", ret);
				ok = false;
				goto reap;
			}

			if (all_running)
				break;
		}

		ret = pthread_mutex_lock(&mutex);
		if (ret) {
			pr_fail_errno(name, "mutex lock", ret);
			ok = false;
			goto reap;
		}
		thread_terminate = true;
		ret = pthread_cond_broadcast(&cond);
		if (ret) {
			pr_fail_errno(name,
				"pthread condition broadcast", ret);
			ok = false;
			/* fall through and unlock */
		}
		ret = pthread_mutex_unlock(&mutex);
		if (ret) {
			pr_fail_errno(name, "mutex unlock", ret);
			ok = false;
		}
reap:
		for (j = 0; j < i; j++) {
			ret = pthread_join(pthreads[j], NULL);
			if (ret) {
				pr_fail_errno(name, "pthread join", ret);
				ok = false;
			}
		}
	} while (ok && opt_do_run && (!max_ops || *counter < max_ops));

	if (limited) {
		pr_inf(stdout, "%s: %.2f%% of iterations could not reach "
			"requested %" PRIu64 " threads (instance %"
			PRIu32 ")\n",
			name,
			100.0 * (double)limited / (double)attempted,
			opt_pthread_max, instance);
	}

	(void)pthread_cond_destroy(&cond);
	(void)pthread_mutex_destroy(&mutex);

	return EXIT_SUCCESS;
}
Example #27
0
static int stress_loop_supported(void)
{
        pr_inf("loop stressor will be skipped, loop is not available\n");
        return -1;
}
/*
 *  stress_socket_server()
 *	server writer
 */
static int stress_socket_server(
	uint64_t *const counter,
	const uint32_t instance,
	const uint64_t max_ops,
	const char *name,
	const pid_t pid,
	const pid_t ppid)
{
	char buf[SOCKET_BUF];
	int fd, status;
	int so_reuseaddr = 1;
	socklen_t addr_len = 0;
	struct sockaddr *addr;
	uint64_t msgs = 0;
	int rc = EXIT_SUCCESS;

	setpgid(pid, pgrp);

	if (stress_sighandler(name, SIGALRM, handle_socket_sigalrm, NULL) < 0) {
		rc = EXIT_FAILURE;
		goto die;
	}
	if ((fd = socket(opt_socket_domain, SOCK_STREAM, 0)) < 0) {
		rc = exit_status(errno);
		pr_fail_dbg(name, "socket");
		goto die;
	}
	if (setsockopt(fd, SOL_SOCKET, SO_REUSEADDR,
		&so_reuseaddr, sizeof(so_reuseaddr)) < 0) {
		pr_fail_dbg(name, "setsockopt");
		rc = EXIT_FAILURE;
		goto die_close;
	}

	stress_set_sockaddr(name, instance, ppid,
		opt_socket_domain, opt_socket_port, &addr, &addr_len);
	if (bind(fd, addr, addr_len) < 0) {
		rc = exit_status(errno);
		pr_fail_dbg(name, "bind");
		goto die_close;
	}
	if (listen(fd, 10) < 0) {
		pr_fail_dbg(name, "listen");
		rc = EXIT_FAILURE;
		goto die_close;
	}

	do {
		int sfd = accept(fd, (struct sockaddr *)NULL, NULL);
		if (sfd >= 0) {
			size_t i, j;
			struct sockaddr addr;
			socklen_t len;
			int sndbuf;
			struct msghdr msg;
			struct iovec vec[sizeof(buf)/16];
#if defined(HAVE_SENDMMSG)
			struct mmsghdr msgvec[MSGVEC_SIZE];
			unsigned int msg_len = 0;
#endif
#if defined(SOCKET_NODELAY)
			int one = 1;
#endif

			len = sizeof(addr);
			if (getsockname(fd, &addr, &len) < 0) {
				pr_fail_dbg(name, "getsockname");
				(void)close(sfd);
				break;
			}
			len = sizeof(sndbuf);
			if (getsockopt(fd, SOL_SOCKET, SO_SNDBUF, &sndbuf, &len) < 0) {
				pr_fail_dbg(name, "getsockopt");
				(void)close(sfd);
				break;
			}
#if defined(SOCKET_NODELAY)
			if (opt_flags & OPT_FLAGS_SOCKET_NODELAY) {
				if (setsockopt(fd, SOL_TCP, TCP_NODELAY, &one, sizeof(one)) < 0) {
					pr_inf(stderr, "%s: setsockopt TCP_NODELAY "
						"failed and disabled, errno=%d (%s)\n",
						name, errno, strerror(errno));
					opt_flags &= ~OPT_FLAGS_SOCKET_NODELAY;
				}
			}
#endif
			memset(buf, 'A' + (*counter % 26), sizeof(buf));
			switch (opt_socket_opts) {
			case SOCKET_OPT_SEND:
				for (i = 16; i < sizeof(buf); i += 16) {
					ssize_t ret = send(sfd, buf, i, 0);
					if (ret < 0) {
						if (errno != EINTR)
							pr_fail_dbg(name, "send");
						break;
					} else
						msgs++;
				}
				break;
			case SOCKET_OPT_SENDMSG:
				for (j = 0, i = 16; i < sizeof(buf); i += 16, j++) {
					vec[j].iov_base = buf;
					vec[j].iov_len = i;
				}
				memset(&msg, 0, sizeof(msg));
				msg.msg_iov = vec;
				msg.msg_iovlen = j;
				if (sendmsg(sfd, &msg, 0) < 0) {
					if (errno != EINTR)
						pr_fail_dbg(name, "sendmsg");
				} else
					msgs += j;
				break;
#if defined(HAVE_SENDMMSG)
			case SOCKET_OPT_SENDMMSG:
				memset(msgvec, 0, sizeof(msgvec));
				for (j = 0, i = 16; i < sizeof(buf); i += 16, j++) {
					vec[j].iov_base = buf;
					vec[j].iov_len = i;
					msg_len += i;
				}
				for (i = 0; i < MSGVEC_SIZE; i++) {
					msgvec[i].msg_hdr.msg_iov = vec;
					msgvec[i].msg_hdr.msg_iovlen = j;
				}
				if (sendmmsg(sfd, msgvec, MSGVEC_SIZE, 0) < 0) {
					if (errno != EINTR)
						pr_fail_dbg(name, "sendmmsg");
				} else
					msgs += (MSGVEC_SIZE * j);
				break;
#endif
			default:
				/* Should never happen */
				pr_err(stderr, "%s: bad option %d\n", name, opt_socket_opts);
				(void)close(sfd);
				goto die_close;
			}
			if (getpeername(sfd, &addr, &len) < 0) {
				pr_fail_dbg(name, "getpeername");
			}
			(void)close(sfd);
		}
		(*counter)++;
	} while (opt_do_run && (!max_ops || *counter < max_ops));

die_close:
	(void)close(fd);
die:
#ifdef AF_UNIX
	if (opt_socket_domain == AF_UNIX) {
		struct sockaddr_un *addr_un = (struct sockaddr_un *)addr;
		(void)unlink(addr_un->sun_path);
	}
#endif
	if (pid) {
		(void)kill(pid, SIGKILL);
		(void)waitpid(pid, &status, 0);
	}
	pr_dbg(stderr, "%s: %" PRIu64 " messages sent\n", name, msgs);

	return rc;
}
Example #29
0
/*
 *  stress_msync()
 *	stress msync
 */
int stress_msync(
	uint64_t *const counter,
	const uint32_t instance,
	const uint64_t max_ops,
	const char *name)
{
	uint8_t *buf = NULL;
	const size_t page_size = stress_get_pagesize();
	const size_t min_size = 2 * page_size;
	size_t sz = min_size;
	ssize_t ret, rc = EXIT_SUCCESS;

	const pid_t pid = getpid();
	int fd = -1;
	char filename[PATH_MAX];

	ret = sigsetjmp(jmp_env, 1);
	if (ret) {
		pr_fail_err(name, "sigsetjmp");
		return EXIT_FAILURE;
	}
	if (stress_sighandler(name, SIGBUS, stress_sigbus_handler, NULL) < 0)
		return EXIT_FAILURE;

	if (!set_msync_bytes) {
		if (opt_flags & OPT_FLAGS_MAXIMIZE)
			opt_msync_bytes = MAX_MSYNC_BYTES;
		if (opt_flags & OPT_FLAGS_MINIMIZE)
			opt_msync_bytes = MIN_MSYNC_BYTES;
	}
	sz = opt_msync_bytes & ~(page_size - 1);
	if (sz < min_size)
		sz = min_size;

	/* Make sure this is killable by OOM killer */
	set_oom_adjustment(name, true);

	rc = stress_temp_dir_mk(name, pid, instance);
	if (rc < 0)
		return exit_status(-rc);

	(void)stress_temp_filename(filename, sizeof(filename),
		name, pid, instance, mwc32());

	(void)umask(0077);
	if ((fd = open(filename, O_CREAT | O_RDWR, S_IRUSR | S_IWUSR)) < 0) {
		rc = exit_status(errno);
		pr_fail_err(name, "open");
		(void)unlink(filename);
		(void)stress_temp_dir_rm(name, pid, instance);

		return rc;
	}
	(void)unlink(filename);

	if (ftruncate(fd, sz) < 0) {
		pr_err(stderr, "%s: ftruncate failed, errno=%d (%s)\n",
			name, errno, strerror(errno));
		(void)close(fd);
		(void)stress_temp_dir_rm(name, pid, instance);

		return EXIT_FAILURE;
	}

	buf = (uint8_t *)mmap(NULL, sz,
		PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
	if (buf == MAP_FAILED) {
		pr_err(stderr, "%s: failed to mmap memory, errno=%d (%s)\n",
			name, errno, strerror(errno));
		rc = EXIT_NO_RESOURCE;
		goto err;
	}

	do {
		off_t offset;
		uint8_t val, data[page_size];

		ret = sigsetjmp(jmp_env, 1);
		if (ret) {
			/* Try again */
			continue;
		}
		/*
		 *  Change data in memory, msync to disk
		 */
		offset = (mwc64() % (sz - page_size)) & ~(page_size - 1);
		val = mwc8();

		memset(buf + offset, val, page_size);
		ret = msync(buf + offset, page_size, MS_SYNC);
		if (ret < 0) {
			pr_fail(stderr, "%s: msync MS_SYNC on "
				"offset %jd failed, errno=%d (%s)",
				name, (intmax_t)offset, errno, strerror(errno));
			goto do_invalidate;
		}
		ret = lseek(fd, offset, SEEK_SET);
		if (ret == (off_t)-1) {
			pr_err(stderr, "%s: cannot seet to offset %jd, "
				"errno=%d (%s)\n",
				name, (intmax_t)offset, errno, strerror(errno));
			rc = EXIT_NO_RESOURCE;
			break;
		}
		ret = read(fd, data, sizeof(data));
		if (ret < (ssize_t)sizeof(data)) {
			pr_fail(stderr, "%s: read failed, errno=%d (%s)\n",
				name, errno, strerror(errno));
			goto do_invalidate;
		}
		if (stress_page_check(data, val, sizeof(data)) < 0) {
			pr_fail(stderr, "%s: msync'd data in file different "
				"to data in memory\n", name);
		}

do_invalidate:
		/*
		 *  Now change data on disc, msync invalidate
		 */
		offset = (mwc64() % (sz - page_size)) & ~(page_size - 1);
		val = mwc8();

		memset(buf + offset, val, page_size);

		ret = lseek(fd, offset, SEEK_SET);
		if (ret == (off_t)-1) {
			pr_err(stderr, "%s: cannot seet to offset %jd, errno=%d (%s)\n",
				name, (intmax_t)offset, errno, strerror(errno));
			rc = EXIT_NO_RESOURCE;
			break;
		}
		ret = read(fd, data, sizeof(data));
		if (ret < (ssize_t)sizeof(data)) {
			pr_fail(stderr, "%s: read failed, errno=%d (%s)\n",
				name, errno, strerror(errno));
			goto do_next;
		}
		ret = msync(buf + offset, page_size, MS_INVALIDATE);
		if (ret < 0) {
			pr_fail(stderr, "%s: msync MS_INVALIDATE on "
				"offset %jd failed, errno=%d (%s)",
				name, (intmax_t)offset, errno, strerror(errno));
			goto do_next;
		}
		if (stress_page_check(buf + offset, val, sizeof(data)) < 0) {
			pr_fail(stderr, "%s: msync'd data in memory "
				"different to data in file\n", name);
		}
do_next:

		(*counter)++;
	} while (opt_do_run && (!max_ops || *counter < max_ops));

	(void)munmap((void *)buf, sz);
err:
	(void)close(fd);
	(void)stress_temp_dir_rm(name, pid, instance);

	if (sigbus_count)
		pr_inf(stdout, "%s: caught %" PRIu64 " SIGBUS signals\n",
			name, sigbus_count);
	return rc;
}
Example #30
0
/*
 *  stress_xattr
 *	stress the xattr operations
 */
int stress_xattr(
	uint64_t *const counter,
	const uint32_t instance,
	const uint64_t max_ops,
	const char *name)
{
	pid_t pid = getpid();
	int ret, fd, rc = EXIT_FAILURE;
	char filename[PATH_MAX];

	ret = stress_temp_dir_mk(name, pid, instance);
	if (ret < 0)
		return exit_status(-ret);

	(void)stress_temp_filename(filename, sizeof(filename),
		name, pid, instance, mwc32());
	(void)umask(0077);
	if ((fd = open(filename, O_CREAT | O_RDWR, S_IRUSR | S_IWUSR)) < 0) {
		rc = exit_status(errno);
		pr_fail_err(name, "open");
		goto out;
	}
	(void)unlink(filename);

	do {
		int i, j;
		int ret;
		char attrname[32];
		char value[32];
		ssize_t sz;
		char *buffer;

		for (i = 0; i < 4096; i++) {
			snprintf(attrname, sizeof(attrname), "user.var_%d", i);
			snprintf(value, sizeof(value), "orig-value-%d", i);

			ret = fsetxattr(fd, attrname, value, strlen(value), XATTR_CREATE);
			if (ret < 0) {
				if (errno == ENOTSUP) {
					pr_inf(stderr, "%s stressor will be "
						"skipped, filesystem does not "
						"support xattr.\n", name);
				}
				if (errno == ENOSPC || errno == EDQUOT)
					break;
				pr_fail_err(name, "fsetxattr");
				goto out_close;
			}
		}
		for (j = 0; j < i; j++) {
			snprintf(attrname, sizeof(attrname), "user.var_%d", j);
			snprintf(value, sizeof(value), "value-%d", j);

			ret = fsetxattr(fd, attrname, value, strlen(value),
				XATTR_REPLACE);
			if (ret < 0) {
				if (errno == ENOSPC || errno == EDQUOT)
					break;
				pr_fail_err(name, "fsetxattr");
				goto out_close;
			}
		}
		for (j = 0; j < i; j++) {
			char tmp[sizeof(value)];

			snprintf(attrname, sizeof(attrname), "user.var_%d", j);
			snprintf(value, sizeof(value), "value-%d", j);

			ret = fgetxattr(fd, attrname, tmp, sizeof(tmp));
			if (ret < 0) {
				pr_fail_err(name, "fgetxattr");
				goto out_close;
			}
			if (strncmp(value, tmp, ret)) {
				pr_fail(stderr, "%s: fgetxattr values "
					"different %.*s vs %.*s\n",
					name, ret, value, ret, tmp);
				goto out_close;
			}
		}
		/* Determine how large a buffer we required... */
		sz = flistxattr(fd, NULL, 0);
		if (sz < 0) {
			pr_fail_err(name, "flistxattr");
			goto out_close;
		}
		buffer = malloc(sz);
		if (buffer) {
			/* ...and fetch */
			sz = flistxattr(fd, buffer, sz);
			free(buffer);

			if (sz < 0) {
				pr_fail_err(name, "flistxattr");
				goto out_close;
			}
		}
		for (j = 0; j < i; j++) {
			snprintf(attrname, sizeof(attrname), "user.var_%d", j);
			
			ret = fremovexattr(fd, attrname);
			if (ret < 0) {
				pr_fail_err(name, "fremovexattr");
				goto out_close;
			}
		}
		(*counter)++;
	} while (opt_do_run && (!max_ops || *counter < max_ops));

	rc = EXIT_SUCCESS;
out_close:
	(void)close(fd);
out:
	(void)stress_temp_dir_rm(name, pid, instance);
	return rc;
}