int readn(int fd, char *buf, int bufsize, ioTunnel *en) { int nleft, nread; nleft = bufsize; while (nleft > 0) { #ifdef WIN32 nread = recv(fd, buf, nleft, 0); #else nread = en == NULL ? system_read(fd, buf, nleft) : en->eRead(fd, buf, nleft); #endif /* WIN32 */ if (nread < 0) return (nread); else if (nread == 0) break; nleft -= nread; buf += nread; } return (bufsize - nleft); }
ssize_t intercept_read(gint fd, gpointer buf, gint n) { return system_read(fd, buf, n); }
/* * stress_aiol * stress asynchronous I/O using the linux specific aio ABI */ static int stress_aiol(const args_t *args) { int fd, ret, rc = EXIT_FAILURE; char filename[PATH_MAX]; char buf[64]; io_context_t ctx = 0; uint64_t aio_linux_requests = DEFAULT_AIO_LINUX_REQUESTS; uint8_t *buffer; uint64_t aio_max_nr = DEFAULT_AIO_MAX_NR; if (!get_setting("aiol-requests", &aio_linux_requests)) { if (g_opt_flags & OPT_FLAGS_MAXIMIZE) aio_linux_requests = MAX_AIO_REQUESTS; if (g_opt_flags & OPT_FLAGS_MINIMIZE) aio_linux_requests = MIN_AIO_REQUESTS; } if ((aio_linux_requests < MIN_AIO_REQUESTS) || (aio_linux_requests > MAX_AIO_REQUESTS)) { pr_err("%s: iol_requests out of range", args->name); return EXIT_FAILURE; } ret = system_read("/proc/sys/fs/aio-max-nr", buf, sizeof(buf)); if (ret > 0) { if (sscanf(buf, "%" SCNu64, &aio_max_nr) != 1) { /* Guess max */ aio_max_nr = DEFAULT_AIO_MAX_NR; } } else { /* Guess max */ aio_max_nr = DEFAULT_AIO_MAX_NR; } aio_max_nr /= (args->num_instances == 0) ? 1 : args->num_instances; if (aio_max_nr < 1) aio_max_nr = 1; if (aio_linux_requests > aio_max_nr) { aio_linux_requests = aio_max_nr; if (args->instance == 0) pr_inf("%s: Limiting AIO requests to " "%" PRIu64 " per stressor (avoids running out of resources)\n", args->name, aio_linux_requests); } ret = posix_memalign((void **)&buffer, 4096, aio_linux_requests * BUFFER_SZ); if (ret) { pr_inf("%s: Out of memory allocating buffers, errno=%d (%s)", args->name, errno, strerror(errno)); return EXIT_NO_RESOURCE; } ret = io_setup(aio_linux_requests, &ctx); if (ret < 0) { /* * The libaio interface returns -errno in the * return value, so set errno accordingly */ errno = -ret; if ((errno == EAGAIN) || (errno == EACCES)) { pr_err("%s: io_setup failed, ran out of " "available events, consider increasing " "/proc/sys/fs/aio-max-nr, errno=%d (%s)\n", args->name, errno, strerror(errno)); rc = EXIT_NO_RESOURCE; goto free_buffer; } else if (errno == ENOMEM) { pr_err("%s: io_setup failed, ran out of " "memory, errno=%d (%s)\n", args->name, errno, strerror(errno)); rc = EXIT_NO_RESOURCE; goto free_buffer; } else if (errno == ENOSYS) { pr_err("%s: io_setup failed, no io_setup " "system call with this kernel, " "errno=%d (%s)\n", args->name, errno, strerror(errno)); rc = EXIT_NO_RESOURCE; goto free_buffer; } else { pr_fail_err("io_setup"); rc = EXIT_FAILURE; goto free_buffer; } } ret = stress_temp_dir_mk_args(args); if (ret < 0) { rc = exit_status(-ret); goto free_buffer; } (void)stress_temp_filename_args(args, filename, sizeof(filename), mwc32()); if ((fd = open(filename, O_CREAT | O_RDWR | O_DIRECT, S_IRUSR | S_IWUSR)) < 0) { rc = exit_status(errno); pr_fail_err("open"); goto finish; } (void)unlink(filename); do { struct iocb cb[aio_linux_requests]; struct iocb *cbs[aio_linux_requests]; struct io_event events[aio_linux_requests]; uint8_t *buffers[aio_linux_requests]; uint8_t *bufptr = buffer; uint64_t i; long n; for (i = 0; i < aio_linux_requests; i++, bufptr += BUFFER_SZ) { buffers[i] = bufptr; aio_linux_fill_buffer(i, buffers[i], BUFFER_SZ); } (void)memset(cb, 0, sizeof(cb)); for (i = 0; i < aio_linux_requests; i++) { cb[i].aio_fildes = fd; cb[i].aio_lio_opcode = IO_CMD_PWRITE; cb[i].u.c.buf = buffers[i]; cb[i].u.c.offset = mwc16() * BUFFER_SZ; cb[i].u.c.nbytes = BUFFER_SZ; cbs[i] = &cb[i]; } ret = io_submit(ctx, (long)aio_linux_requests, cbs); if (ret < 0) { errno = -ret; if (errno == EAGAIN) continue; pr_fail_err("io_submit"); break; } n = aio_linux_requests; do { struct timespec timeout, *timeout_ptr; if (clock_gettime(CLOCK_REALTIME, &timeout) < 0) { timeout_ptr = NULL; } else { timeout.tv_nsec += 1000000; if (timeout.tv_nsec > 1000000000) { timeout.tv_nsec -= 1000000000; timeout.tv_sec++; } timeout_ptr = &timeout; } ret = io_getevents(ctx, 1, n, events, timeout_ptr); if (ret < 0) { errno = -ret; if (errno == EINTR) { if (g_keep_stressing_flag) continue; else break; } pr_fail_err("io_getevents"); break; } else { n -= ret; } } while ((n > 0) && g_keep_stressing_flag); inc_counter(args); } while (keep_stressing()); rc = EXIT_SUCCESS; (void)close(fd); finish: (void)io_destroy(ctx); (void)stress_temp_dir_rm_args(args); free_buffer: free(buffer); return rc; }
void perf_stat_dump(FILE *yaml, proc_info_t *procs_head, const double duration) { bool no_perf_stats = true; proc_info_t *pi; #if defined(HAVE_LOCALE_H) (void)setlocale(LC_ALL, ""); #endif pr_yaml(yaml, "perfstats:\n"); for (pi = procs_head; pi; pi = pi->next) { int p; uint64_t counter_totals[STRESS_PERF_MAX]; uint64_t total_cpu_cycles = 0; uint64_t total_cache_refs = 0; uint64_t total_branches = 0; bool got_data = false; char *munged; (void)memset(counter_totals, 0, sizeof(counter_totals)); /* Sum totals across all instances of the stressor */ for (p = 0; p < STRESS_PERF_MAX && perf_info[p].label; p++) { int32_t j; stress_perf_t *sp = &pi->stats[0]->sp; if (!perf_stat_succeeded(sp)) continue; for (j = 0; j < pi->started_procs; j++) { const uint64_t counter = sp->perf_stat[p].counter; if (counter == STRESS_PERF_INVALID) { counter_totals[p] = STRESS_PERF_INVALID; break; } counter_totals[p] += counter; got_data |= (counter > 0); } if (perf_info[p].type == PERF_TYPE_HARDWARE) { unsigned long config = perf_info[p].config; if (config == PERF_COUNT_HW_CPU_CYCLES) total_cpu_cycles = counter_totals[p]; else if (config == PERF_COUNT_HW_CACHE_REFERENCES) total_cache_refs = counter_totals[p]; else if (config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS) total_branches = counter_totals[p]; } } if (!got_data) continue; munged = stress_munge_underscore(pi->stressor->name); pr_inf("%s:\n", munged); pr_yaml(yaml, " - stressor: %s\n", munged); pr_yaml(yaml, " duration: %f\n", duration); for (p = 0; p < STRESS_PERF_MAX && perf_info[p].label; p++) { const char *l = perf_info[p].label; uint64_t ct = counter_totals[p]; if (l && (ct != STRESS_PERF_INVALID)) { char extra[32]; char yaml_label[128]; *extra = '\0'; no_perf_stats = false; if (perf_info[p].type == PERF_TYPE_HARDWARE) { unsigned long config = perf_info[p].config; if ((config == PERF_COUNT_HW_INSTRUCTIONS) && (total_cpu_cycles > 0)) (void)snprintf(extra, sizeof(extra), " (%.3f instr. per cycle)", (double)ct / (double)total_cpu_cycles); else if ((config == PERF_COUNT_HW_CACHE_MISSES) && (total_cache_refs > 0)) (void)snprintf(extra, sizeof(extra), " (%5.2f%%)", 100.0 * (double)ct / (double)total_cache_refs); else if ((config == PERF_COUNT_HW_BRANCH_MISSES) && (total_branches > 0)) (void)snprintf(extra, sizeof(extra), " (%5.2f%%)", 100.0 * (double)ct / (double)total_branches); } pr_inf("%'26" PRIu64 " %-24s %s%s\n", ct, l, perf_stat_scale(ct, duration), extra); perf_yaml_label(yaml_label, l, sizeof(yaml_label)); pr_yaml(yaml, " %s_total: %" PRIu64 "\n", yaml_label, ct); pr_yaml(yaml, " %s_per_second: %f\n", yaml_label, (double)ct / duration); } } pr_yaml(yaml, "\n"); } if (no_perf_stats) { if (geteuid() != 0) { char buffer[64]; int ret; bool paranoid = false; int level = 0; static char *path = "/proc/sys/kernel/perf_event_paranoid"; ret = system_read(path, buffer, sizeof(buffer) - 1); if (ret > 0) { if (sscanf(buffer, "%5d", &level) == 1) paranoid = true; } if (paranoid & (level > 1)) { pr_inf("Cannot read perf counters, " "do not have CAP_SYS_ADMIN capability " "or %s is set too high (%d)\n", path, level); } } else { pr_inf("perf counters are not available " "on this device\n"); } } }