예제 #1
0
static void stress_memthrash_memset(const args_t *args, size_t mem_size)
{
	(void)args;

#if defined(__GNUC__)
	(void)__builtin_memset((void *)mem, mwc8(), mem_size);
#else
	(void)memset((void *)mem, mwc8(), mem_size);
#endif
}
예제 #2
0
/*
 *  stress_mergesort_cmp_3()
 *	mergesort comparison - random sort(!)
 */
static int stress_mergesort_cmp_3(const void *p1, const void *p2)
{
	int r = ((int)mwc8() % 3) - 1;

	(void)p1;
	(void)p2;

	return r;
}
예제 #3
0
/*
 *  stress_str_fill
 *
 */
static void stress_str_fill(char *str, const size_t len)
{
	char *end = str + len;

	while (str < end - 1)
		*str++ = (mwc8() % 26) + 'a';

	*str = '\0';
}
예제 #4
0
/*
 *  stress_wcs_fill
 */
static void stress_wcs_fill(wchar_t *wcstr, const size_t len)
{
    register size_t i;

    for (i = 0; i < (len-1); i++) {
        *wcstr++ = (mwc8() % 26) + L'a';
    }
    *wcstr = L'\0';
}
예제 #5
0
static inline HOT OPTIMIZE3 void stress_memthrash_random_chunk(const size_t chunk_size, size_t mem_size)
{
	uint32_t i;
	const uint32_t max = mwc16();
	size_t chunks = mem_size / chunk_size;

	if (chunks < 1)
		chunks = 1;

	for (i = 0; !thread_terminate && (i < max); i++) {
		const size_t chunk = mwc32() % chunks;
		const size_t offset = chunk * chunk_size;
#if defined(__GNUC__)
		(void)__builtin_memset((void *)mem + offset, mwc8(), chunk_size);
#else
		(void)memset((void *)mem + offset, mwc8(), chunk_size);
#endif
	}
}
예제 #6
0
/*
 *  push values onto file backed mmap'd stack and
 *  force msync on the map'd region if page boundary
 *  has changed
 */
static void stress_stackmmap_push_msync(void)
{
	void *addr = (void *)(((uintptr_t)&addr) & page_mask);
	static void *laddr;

	if (addr != laddr) {
		msync(addr, page_size, mwc8() >= 128 ? MS_SYNC : MS_ASYNC);
		laddr = addr;
	}
	if (opt_do_run)
		stress_stackmmap_push_msync();
}
예제 #7
0
static void stress_memthrash_random(const args_t *args, size_t mem_size)
{
	/* loop until we find a good candidate */
	for (;;) {
		size_t i = mwc8() % SIZEOF_ARRAY(memthrash_methods);
		const memthrash_func_t func = (memthrash_func_t)memthrash_methods[i].func;

		/* Don't run stress_memthrash_random/all to avoid recursion */
		if ((func != stress_memthrash_random) &&
		    (func != stress_memthrash_all)) {
			func(args, mem_size);
			return;
		}
	}
}
예제 #8
0
/*
 *  push values onto file backed mmap'd stack and
 *  force msync on the map'd region if page boundary
 *  has changed
 */
static void stress_stackmmap_push_msync(void)
{
	void *addr = (void *)(((uintptr_t)&addr) & page_mask);
	static void *laddr;
	char waste[64];

	waste[0] = 0;
	waste[sizeof(waste) - 1] = 0;

	if (addr != laddr) {
		(void)shim_msync(addr, page_size,
			(mwc8() & 1) ? MS_ASYNC : MS_SYNC);
		laddr = addr;
	}
	if (g_keep_stressing_flag)
		stress_stackmmap_push_msync();
}
예제 #9
0
static void HOT OPTIMIZE3 stress_memthrash_matrix(const args_t *args, size_t mem_size)
{
	(void)args;
	(void)mem_size;

	size_t i, j;
	volatile uint8_t *vmem = mem;

	for (i = 0; !thread_terminate && (i < MATRIX_SIZE); i+= ((mwc8() & 0xf) + 1)) {
		for (j = 0; j < MATRIX_SIZE; j+= 16) {
			size_t i1 = (i * MATRIX_SIZE) + j;
			size_t i2 = (j * MATRIX_SIZE) + i;
			uint8_t tmp;

			tmp = vmem[i1];
			vmem[i1] = vmem[i2];
			vmem[i2] = tmp;
		}
	}
}
예제 #10
0
/*
 *  stress_msync()
 *	stress msync
 */
int stress_msync(
	uint64_t *const counter,
	const uint32_t instance,
	const uint64_t max_ops,
	const char *name)
{
	uint8_t *buf = NULL;
	const size_t page_size = stress_get_pagesize();
	const size_t min_size = 2 * page_size;
	size_t sz = min_size;
	ssize_t ret, rc = EXIT_SUCCESS;

	const pid_t pid = getpid();
	int fd = -1;
	char filename[PATH_MAX];

	ret = sigsetjmp(jmp_env, 1);
	if (ret) {
		pr_fail_err(name, "sigsetjmp");
		return EXIT_FAILURE;
	}
	if (stress_sighandler(name, SIGBUS, stress_sigbus_handler, NULL) < 0)
		return EXIT_FAILURE;

	if (!set_msync_bytes) {
		if (opt_flags & OPT_FLAGS_MAXIMIZE)
			opt_msync_bytes = MAX_MSYNC_BYTES;
		if (opt_flags & OPT_FLAGS_MINIMIZE)
			opt_msync_bytes = MIN_MSYNC_BYTES;
	}
	sz = opt_msync_bytes & ~(page_size - 1);
	if (sz < min_size)
		sz = min_size;

	/* Make sure this is killable by OOM killer */
	set_oom_adjustment(name, true);

	rc = stress_temp_dir_mk(name, pid, instance);
	if (rc < 0)
		return exit_status(-rc);

	(void)stress_temp_filename(filename, sizeof(filename),
		name, pid, instance, mwc32());

	(void)umask(0077);
	if ((fd = open(filename, O_CREAT | O_RDWR, S_IRUSR | S_IWUSR)) < 0) {
		rc = exit_status(errno);
		pr_fail_err(name, "open");
		(void)unlink(filename);
		(void)stress_temp_dir_rm(name, pid, instance);

		return rc;
	}
	(void)unlink(filename);

	if (ftruncate(fd, sz) < 0) {
		pr_err(stderr, "%s: ftruncate failed, errno=%d (%s)\n",
			name, errno, strerror(errno));
		(void)close(fd);
		(void)stress_temp_dir_rm(name, pid, instance);

		return EXIT_FAILURE;
	}

	buf = (uint8_t *)mmap(NULL, sz,
		PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
	if (buf == MAP_FAILED) {
		pr_err(stderr, "%s: failed to mmap memory, errno=%d (%s)\n",
			name, errno, strerror(errno));
		rc = EXIT_NO_RESOURCE;
		goto err;
	}

	do {
		off_t offset;
		uint8_t val, data[page_size];

		ret = sigsetjmp(jmp_env, 1);
		if (ret) {
			/* Try again */
			continue;
		}
		/*
		 *  Change data in memory, msync to disk
		 */
		offset = (mwc64() % (sz - page_size)) & ~(page_size - 1);
		val = mwc8();

		memset(buf + offset, val, page_size);
		ret = msync(buf + offset, page_size, MS_SYNC);
		if (ret < 0) {
			pr_fail(stderr, "%s: msync MS_SYNC on "
				"offset %jd failed, errno=%d (%s)",
				name, (intmax_t)offset, errno, strerror(errno));
			goto do_invalidate;
		}
		ret = lseek(fd, offset, SEEK_SET);
		if (ret == (off_t)-1) {
			pr_err(stderr, "%s: cannot seet to offset %jd, "
				"errno=%d (%s)\n",
				name, (intmax_t)offset, errno, strerror(errno));
			rc = EXIT_NO_RESOURCE;
			break;
		}
		ret = read(fd, data, sizeof(data));
		if (ret < (ssize_t)sizeof(data)) {
			pr_fail(stderr, "%s: read failed, errno=%d (%s)\n",
				name, errno, strerror(errno));
			goto do_invalidate;
		}
		if (stress_page_check(data, val, sizeof(data)) < 0) {
			pr_fail(stderr, "%s: msync'd data in file different "
				"to data in memory\n", name);
		}

do_invalidate:
		/*
		 *  Now change data on disc, msync invalidate
		 */
		offset = (mwc64() % (sz - page_size)) & ~(page_size - 1);
		val = mwc8();

		memset(buf + offset, val, page_size);

		ret = lseek(fd, offset, SEEK_SET);
		if (ret == (off_t)-1) {
			pr_err(stderr, "%s: cannot seet to offset %jd, errno=%d (%s)\n",
				name, (intmax_t)offset, errno, strerror(errno));
			rc = EXIT_NO_RESOURCE;
			break;
		}
		ret = read(fd, data, sizeof(data));
		if (ret < (ssize_t)sizeof(data)) {
			pr_fail(stderr, "%s: read failed, errno=%d (%s)\n",
				name, errno, strerror(errno));
			goto do_next;
		}
		ret = msync(buf + offset, page_size, MS_INVALIDATE);
		if (ret < 0) {
			pr_fail(stderr, "%s: msync MS_INVALIDATE on "
				"offset %jd failed, errno=%d (%s)",
				name, (intmax_t)offset, errno, strerror(errno));
			goto do_next;
		}
		if (stress_page_check(buf + offset, val, sizeof(data)) < 0) {
			pr_fail(stderr, "%s: msync'd data in memory "
				"different to data in file\n", name);
		}
do_next:

		(*counter)++;
	} while (opt_do_run && (!max_ops || *counter < max_ops));

	(void)munmap((void *)buf, sz);
err:
	(void)close(fd);
	(void)stress_temp_dir_rm(name, pid, instance);

	if (sigbus_count)
		pr_inf(stdout, "%s: caught %" PRIu64 " SIGBUS signals\n",
			name, sigbus_count);
	return rc;
}
예제 #11
0
/*
 *  stress_hdd
 *	stress I/O via writes
 */
int stress_hdd(
    uint64_t *const counter,
    const uint32_t instance,
    const uint64_t max_ops,
    const char *name)
{
    uint8_t *buf = NULL;
    uint64_t i, min_size, remainder;
    const pid_t pid = getpid();
    int ret, rc = EXIT_FAILURE;
    char filename[PATH_MAX];
    int flags = O_CREAT | O_RDWR | O_TRUNC | opt_hdd_oflags;
    int fadvise_flags = opt_hdd_flags & HDD_OPT_FADV_MASK;

    if (!set_hdd_bytes) {
        if (opt_flags & OPT_FLAGS_MAXIMIZE)
            opt_hdd_bytes = MAX_HDD_BYTES;
        if (opt_flags & OPT_FLAGS_MINIMIZE)
            opt_hdd_bytes = MIN_HDD_BYTES;
    }

    if (!set_hdd_write_size) {
        if (opt_flags & OPT_FLAGS_MAXIMIZE)
            opt_hdd_write_size = MAX_HDD_WRITE_SIZE;
        if (opt_flags & OPT_FLAGS_MINIMIZE)
            opt_hdd_write_size = MIN_HDD_WRITE_SIZE;
    }

    if (opt_hdd_flags & HDD_OPT_O_DIRECT) {
        min_size = (opt_hdd_flags & HDD_OPT_IOVEC) ?
                   HDD_IO_VEC_MAX * BUF_ALIGNMENT : MIN_HDD_WRITE_SIZE;
    } else {
        min_size = (opt_hdd_flags & HDD_OPT_IOVEC) ?
                   HDD_IO_VEC_MAX * MIN_HDD_WRITE_SIZE : MIN_HDD_WRITE_SIZE;
    }
    /* Ensure I/O size is not too small */
    if (opt_hdd_write_size < min_size) {
        opt_hdd_write_size = min_size;
        pr_inf(stderr, "%s: increasing read/write size to %" PRIu64 " bytes\n",
               name, opt_hdd_write_size);
    }

    /* Ensure we get same sized iovec I/O sizes */
    remainder = opt_hdd_write_size % HDD_IO_VEC_MAX;
    if ((opt_hdd_flags & HDD_OPT_IOVEC) && (remainder != 0)) {
        opt_hdd_write_size += HDD_IO_VEC_MAX - remainder;
        pr_inf(stderr, "%s: increasing read/write size to %" PRIu64 " bytes in iovec mode\n",
               name, opt_hdd_write_size);
    }

    /* Ensure complete file size is not less than the I/O size */
    if (opt_hdd_bytes < opt_hdd_write_size) {
        opt_hdd_bytes = opt_hdd_write_size;
        pr_inf(stderr, "%s: increasing file size to write size of %" PRIu64 " bytes\n",
               name, opt_hdd_bytes);
    }


    if (stress_temp_dir_mk(name, pid, instance) < 0)
        return EXIT_FAILURE;

    /* Must have some write option */
    if ((opt_hdd_flags & HDD_OPT_WR_MASK) == 0)
        opt_hdd_flags |= HDD_OPT_WR_SEQ;
    /* Must have some read option */
    if ((opt_hdd_flags & HDD_OPT_RD_MASK) == 0)
        opt_hdd_flags |= HDD_OPT_RD_SEQ;

    ret = posix_memalign((void **)&buf, BUF_ALIGNMENT, (size_t)opt_hdd_write_size);
    if (ret || !buf) {
        pr_err(stderr, "%s: cannot allocate buffer\n", name);
        (void)stress_temp_dir_rm(name, pid, instance);
        return EXIT_FAILURE;
    }

    for (i = 0; i < opt_hdd_write_size; i++)
        buf[i] = mwc8();

    (void)stress_temp_filename(filename, sizeof(filename),
                               name, pid, instance, mwc32());
    do {
        int fd;

        (void)umask(0077);
        if ((fd = open(filename, flags, S_IRUSR | S_IWUSR)) < 0) {
            pr_failed_err(name, "open");
            goto finish;
        }
        if (ftruncate(fd, (off_t)0) < 0) {
            pr_failed_err(name, "ftruncate");
            (void)close(fd);
            goto finish;
        }
        (void)unlink(filename);

        if (stress_hdd_advise(name, fd, fadvise_flags) < 0) {
            (void)close(fd);
            goto finish;
        }

        /* Random Write */
        if (opt_hdd_flags & HDD_OPT_WR_RND) {
            for (i = 0; i < opt_hdd_bytes; i += opt_hdd_write_size) {
                size_t j;

                off_t offset = (i == 0) ?
                               opt_hdd_bytes :
                               (mwc64() % opt_hdd_bytes) & ~511;
                ssize_t ret;

                if (lseek(fd, offset, SEEK_SET) < 0) {
                    pr_failed_err(name, "lseek");
                    (void)close(fd);
                    goto finish;
                }
rnd_wr_retry:
                if (!opt_do_run || (max_ops && *counter >= max_ops))
                    break;

                for (j = 0; j < opt_hdd_write_size; j++)
                    buf[j] = (offset + j) & 0xff;

                ret = stress_hdd_write(fd, buf, (size_t)opt_hdd_write_size);
                if (ret <= 0) {
                    if ((errno == EAGAIN) || (errno == EINTR))
                        goto rnd_wr_retry;
                    if (errno) {
                        pr_failed_err(name, "write");
                        (void)close(fd);
                        goto finish;
                    }
                    continue;
                }
                (*counter)++;
            }
        }
        /* Sequential Write */
        if (opt_hdd_flags & HDD_OPT_WR_SEQ) {
            for (i = 0; i < opt_hdd_bytes; i += opt_hdd_write_size) {
                ssize_t ret;
                size_t j;
seq_wr_retry:
                if (!opt_do_run || (max_ops && *counter >= max_ops))
                    break;

                for (j = 0; j < opt_hdd_write_size; j += 512)
                    buf[j] = (i + j) & 0xff;
                ret = stress_hdd_write(fd, buf, (size_t)opt_hdd_write_size);
                if (ret <= 0) {
                    if ((errno == EAGAIN) || (errno == EINTR))
                        goto seq_wr_retry;
                    if (errno) {
                        pr_failed_err(name, "write");
                        (void)close(fd);
                        goto finish;
                    }
                    continue;
                }
                (*counter)++;
            }
        }

        /* Sequential Read */
        if (opt_hdd_flags & HDD_OPT_RD_SEQ) {
            uint64_t misreads = 0;
            uint64_t baddata = 0;

            if (lseek(fd, 0, SEEK_SET) < 0) {
                pr_failed_err(name, "lseek");
                (void)close(fd);
                goto finish;
            }
            for (i = 0; i < opt_hdd_bytes; i += opt_hdd_write_size) {
                ssize_t ret;
seq_rd_retry:
                if (!opt_do_run || (max_ops && *counter >= max_ops))
                    break;

                ret = stress_hdd_read(fd, buf, (size_t)opt_hdd_write_size);
                if (ret <= 0) {
                    if ((errno == EAGAIN) || (errno == EINTR))
                        goto seq_rd_retry;
                    if (errno) {
                        pr_failed_err(name, "read");
                        (void)close(fd);
                        goto finish;
                    }
                    continue;
                }
                if (ret != (ssize_t)opt_hdd_write_size)
                    misreads++;

                if (opt_flags & OPT_FLAGS_VERIFY) {
                    size_t j;

                    for (j = 0; j < opt_hdd_write_size; j += 512) {
                        uint8_t v = (i + j) & 0xff;
                        if (opt_hdd_flags & HDD_OPT_WR_SEQ) {
                            /* Write seq has written to all of the file, so it should always be OK */
                            if (buf[0] != v)
                                baddata++;
                        } else {
                            /* Write rnd has written to some of the file, so data either zero or OK */
                            if (buf[0] != 0 && buf[0] != v)
                                baddata++;
                        }
                    }
                }
                (*counter)++;
            }
            if (misreads)
                pr_dbg(stderr, "%s: %" PRIu64 " incomplete sequential reads\n",
                       name, misreads);
            if (baddata)
                pr_fail(stderr, "%s: incorrect data found %" PRIu64 " times\n",
                        name, baddata);
        }
        /* Random Read */
        if (opt_hdd_flags & HDD_OPT_RD_RND) {
            uint64_t misreads = 0;
            uint64_t baddata = 0;

            for (i = 0; i < opt_hdd_bytes; i += opt_hdd_write_size) {
                ssize_t ret;
                off_t offset = (mwc64() % (opt_hdd_bytes - opt_hdd_write_size)) & ~511;

                if (lseek(fd, offset, SEEK_SET) < 0) {
                    pr_failed_err(name, "lseek");
                    (void)close(fd);
                    goto finish;
                }
rnd_rd_retry:
                if (!opt_do_run || (max_ops && *counter >= max_ops))
                    break;
                ret = stress_hdd_read(fd, buf, (size_t)opt_hdd_write_size);
                if (ret <= 0) {
                    if ((errno == EAGAIN) || (errno == EINTR))
                        goto rnd_rd_retry;
                    if (errno) {
                        pr_failed_err(name, "read");
                        (void)close(fd);
                        goto finish;
                    }
                    continue;
                }
                if (ret != (ssize_t)opt_hdd_write_size)
                    misreads++;

                if (opt_flags & OPT_FLAGS_VERIFY) {
                    size_t j;

                    for (j = 0; j < opt_hdd_write_size; j += 512) {
                        uint8_t v = (i + j) & 0xff;
                        if (opt_hdd_flags & HDD_OPT_WR_SEQ) {
                            /* Write seq has written to all of the file, so it should always be OK */
                            if (buf[0] != v)
                                baddata++;
                        } else {
                            /* Write rnd has written to some of the file, so data either zero or OK */
                            if (buf[0] != 0 && buf[0] != v)
                                baddata++;
                        }
                    }
                }

                (*counter)++;
            }
            if (misreads)
                pr_dbg(stderr, "%s: %" PRIu64 " incomplete random reads\n",
                       name, misreads);
        }
        (void)close(fd);

    } while (opt_do_run && (!max_ops || *counter < max_ops));

    rc = EXIT_SUCCESS;
finish:
    free(buf);
    (void)stress_temp_dir_rm(name, pid, instance);
    return rc;
}