static void setup(void) { int fd; tst_require_root(NULL); fd = open(boost, O_RDWR); if (fd == -1) { tst_brkm(TCONF, NULL, "acpi-cpufreq not loaded or overclock not supported"); } close(fd); tst_sig(FORK, DEF_HANDLER, cleanup); SAFE_FILE_SCANF(NULL, boost, "%d", &boost_value); SAFE_FILE_SCANF(NULL, up_limit, "%d", &threshold); /* change cpu0 scaling governor */ SAFE_FILE_SCANF(NULL, governor, "%s", governor_name); SAFE_FILE_PRINTF(cleanup, governor, "%s", "userspace"); /* use only cpu0 */ cpu_set_t set; CPU_ZERO(&set); CPU_SET(0, &set); if (sched_setaffinity(0, sizeof(cpu_set_t), &set) < 0) tst_brkm(TBROK | TERRNO, cleanup, "failed to set CPU0"); SAFE_FILE_PRINTF(cleanup, up_limit, "11"); }
void setup(void) { tst_require_root(NULL); if (access(PATH_THP, F_OK) == -1) tst_brkm(TCONF, NULL, "THP is not enabled"); SAFE_FILE_SCANF(NULL, PATH_KHPD "scan_sleep_millisecs", "%d", &pre_thp_scan_sleep_millisecs); /* set 0 to khugepaged/scan_sleep_millisecs to run khugepaged 100% */ SAFE_FILE_PRINTF(NULL, PATH_KHPD "scan_sleep_millisecs", "0"); SAFE_FILE_SCANF(NULL, PATH_KHPD "alloc_sleep_millisecs", "%d", &pre_thp_alloc_sleep_millisecs); /* * set 0 to khugepaged/alloc_sleep_millisecs to make sure khugepaged * don't stop if there's a hugepage allcation failure. */ SAFE_FILE_PRINTF(NULL, PATH_KHPD "alloc_sleep_millisecs", "0"); SAFE_FILE_SCANF(NULL, PATH_THP "enabled", "%[^\n]", pre_thp_enabled); /* open khugepaged as 'always' mode */ SAFE_FILE_PRINTF(NULL, PATH_THP "enabled", "always"); tst_sig(FORK, DEF_HANDLER, NULL); TEST_PAUSE; }
void setup(void) { long mem_total, hpage_size; tst_require_root(); mem_total = read_meminfo("MemTotal:"); SAFE_FILE_SCANF(NULL, PATH_SHMMAX, "%ld", &orig_shmmax); SAFE_FILE_PRINTF(NULL, PATH_SHMMAX, "%ld", (long)SIZE); SAFE_FILE_SCANF(NULL, PATH_SHMMAX, "%ld", &new_shmmax); if (mem_total < 2L*1024*1024) tst_brkm(TCONF, NULL, "Needed > 2GB RAM, have: %ld", mem_total); if (new_shmmax < SIZE) tst_brkm(TCONF, NULL, "shmmax too low, have: %ld", new_shmmax); orig_hugepages = get_sys_tune("nr_hugepages"); hpage_size = read_meminfo("Hugepagesize:") * 1024; hugepages = (orig_hugepages * hpage_size + SIZE) / hpage_size; set_sys_tune("nr_hugepages", hugepages, 1); TEST_PAUSE; }
/* * We try raising bdi readahead limit as much as we can. We write * and read back "read_ahead_kb" sysfs value, starting with filesize. * If that fails, we try again with lower value. * readahead_length used in the test is then set to MIN(bdi limit, 2M), * to respect kernels prior to commit 600e19afc5f8a6c. */ static void setup_readahead_length(void) { struct stat sbuf; char tmp[PATH_MAX], *backing_dev; int ra_new_limit, ra_limit; /* Find out backing device name */ SAFE_LSTAT(tst_device->dev, &sbuf); if (S_ISLNK(sbuf.st_mode)) SAFE_READLINK(tst_device->dev, tmp, PATH_MAX); else strcpy(tmp, tst_device->dev); backing_dev = basename(tmp); sprintf(sys_bdi_ra_path, "/sys/class/block/%s/bdi/read_ahead_kb", backing_dev); if (access(sys_bdi_ra_path, F_OK)) return; SAFE_FILE_SCANF(sys_bdi_ra_path, "%d", &orig_bdi_limit); /* raise bdi limit as much as kernel allows */ ra_new_limit = testfile_size / 1024; while (ra_new_limit > pagesize / 1024) { FILE_PRINTF(sys_bdi_ra_path, "%d", ra_new_limit); SAFE_FILE_SCANF(sys_bdi_ra_path, "%d", &ra_limit); if (ra_limit == ra_new_limit) { readahead_length = MIN(ra_new_limit * 1024, 2 * 1024 * 1024); break; } ra_new_limit = ra_new_limit / 2; } }
static void setup(void) { check_hugepage(); hugepagesize = SAFE_READ_MEMINFO("Hugepagesize:") * 1024; init_sys_sz_paths(); if (opt_sysfs) { path = path_sys_sz_huge; pathover = path_sys_sz_over; } else { path = PATH_PROC_HUGE; pathover = PATH_PROC_OVER; } if (opt_alloc) { size = atoi(opt_alloc); length = (size + size * 0.5) * 2; } if (opt_shmid) { SAFE_FILE_SCANF(PATH_SHMMAX, "%llu", &shmmax); if (shmmax < (unsigned long long)(length / 2 * hugepagesize)) { restore_shmmax = 1; SAFE_FILE_PRINTF(PATH_SHMMAX, "%ld", (length / 2 * hugepagesize)); } } SAFE_FILE_SCANF(path, "%ld", &nr_hugepages); tst_res(TINFO, "original nr_hugepages is %ld", nr_hugepages); /* Reset. */ SAFE_FILE_PRINTF(path, "%ld", size); restore_nr_hgpgs = 1; if (access(pathover, F_OK)) { tst_brk(TCONF, "file %s does not exist in the system", pathover); } SAFE_FILE_SCANF(pathover, "%ld", &nr_overcommit_hugepages); tst_res(TINFO, "original nr_overcommit_hugepages is %ld", nr_overcommit_hugepages); /* Reset. */ SAFE_FILE_PRINTF(pathover, "%ld", size); restore_overcomm_hgpgs = 1; SAFE_MKDIR(MOUNT_DIR, 0700); SAFE_MOUNT(NULL, MOUNT_DIR, "hugetlbfs", 0, NULL); mounted = 1; if (opt_shmid) { /* Use /proc/meminfo to generate an IPC key. */ key = ftok(PATH_MEMINFO, strlen(PATH_MEMINFO)); if (key == -1) tst_brk(TBROK | TERRNO, "ftok"); } }
int tst_get_free_pids_(void (*cleanup_fn) (void)) { FILE *f; int rc, used_pids, max_pids; f = popen("ps -eT | wc -l", "r"); if (!f) { tst_resm(TBROK, "Could not run 'ps' to calculate used " "pids"); return -1; } rc = fscanf(f, "%i", &used_pids); pclose(f); if (rc != 1 || used_pids < 0) { tst_resm(TBROK, "Could not read output of 'ps' to " "calculate used pids"); return -1; } SAFE_FILE_SCANF(cleanup_fn, PID_MAX_PATH, "%d", &max_pids); /* max_pids contains the maximum PID + 1, * used_pids contains used PIDs + 1, * so this additional '1' is eliminated by the substraction */ return max_pids - used_pids; }
static void gather_node_cpus(char *cpus, long nd) { int ncpus = 0; int i; long online; char buf[BUFSIZ]; char path[BUFSIZ], path1[BUFSIZ]; while (path_exist(PATH_SYS_SYSTEM "/cpu/cpu%d", ncpus)) ncpus++; for (i = 0; i < ncpus; i++) { snprintf(path, BUFSIZ, PATH_SYS_SYSTEM "/node/node%ld/cpu%d", nd, i); if (path_exist(path)) { snprintf(path1, BUFSIZ, "%s/online", path); /* * if there is no online knob, then the cpu cannot * be taken offline */ if (path_exist(path1)) { SAFE_FILE_SCANF(cleanup, path1, "%ld", &online); if (online == 0) continue; } sprintf(buf, "%d,", i); strcat(cpus, buf); } } /* Remove the trailing comma. */ cpus[strlen(cpus) - 1] = '\0'; }
static void setup(void) { tst_sig(NOFORK, DEF_HANDLER, cleanup); TEST_PAUSE; tst_tmpdir(); sprintf(fname, "tfile_%d", getpid()); fd = SAFE_OPEN(cleanup, fname, O_RDWR | O_CREAT, 0700); SAFE_WRITE(cleanup, 1, fd, buf, BUF_SIZE); SAFE_CLOSE(cleanup, fd); fd_notify = syscall(__NR_inotify_init1, O_NONBLOCK); if (fd_notify < 0) { if (errno == ENOSYS) { tst_brkm(TCONF, cleanup, "inotify is not configured in this kernel."); } else { tst_brkm(TBROK | TERRNO, cleanup, "inotify_init failed"); } } wd = myinotify_add_watch(fd_notify, fname, IN_ALL_EVENTS); if (wd < 0) { tst_brkm(TBROK | TERRNO, cleanup, "inotify_add_watch (%d, %s, IN_ALL_EVENTS) failed", fd_notify, fname); }; SAFE_FILE_SCANF(cleanup, "/proc/sys/fs/inotify/max_queued_events", "%d", &max_events); }
void testoom(int mempolicy, int lite, int retcode, int allow_sigkill) { int ksm_run_orig; set_global_mempolicy(mempolicy); tst_res(TINFO, "start normal OOM testing."); oom(NORMAL, lite, retcode, allow_sigkill); tst_res(TINFO, "start OOM testing for mlocked pages."); oom(MLOCK, lite, retcode, allow_sigkill); /* * Skip oom(KSM) if lite == 1, since limit_in_bytes may vary from * run to run, which isn't reliable for oom03 cgroup test. */ if (access(PATH_KSM, F_OK) == -1 || lite == 1) { tst_res(TINFO, "KSM is not configed or lite == 1, " "skip OOM test for KSM pags"); } else { tst_res(TINFO, "start OOM testing for KSM pages."); SAFE_FILE_SCANF(PATH_KSM "run", "%d", &ksm_run_orig); SAFE_FILE_PRINTF(PATH_KSM "run", "1"); oom(KSM, lite, retcode, allow_sigkill); SAFE_FILE_PRINTF(PATH_KSM "run", "%d", ksm_run_orig); } }
static void setup(void) { if (access(PATH_AUTOGROUP, F_OK)) tst_brk(TCONF, "autogroup not supported"); SAFE_FILE_SCANF(PATH_AUTOGROUP, "%d", &orig_autogroup); }
void setup(void) { tst_require_root(NULL); if (tst_kvercmp(2, 6, 32) < 0) tst_brkm(TCONF, NULL, "2.6.32 or greater kernel required"); if (access(PATH_KSM, F_OK) == -1) tst_brkm(TCONF, NULL, "KSM configuration is not enabled"); /* * kernel commit 90bd6fd introduced a new KSM sysfs knob * /sys/kernel/mm/ksm/merge_across_nodes, setting it to '0' * will prevent KSM pages being merged across numa nodes, * which will cause the case fail, so we need to make sure * it is enabled before testing. */ if (access(PATH_KSM "merge_across_nodes", F_OK) == 0) { SAFE_FILE_SCANF(NULL, PATH_KSM "merge_across_nodes", "%d", &merge_across_nodes); SAFE_FILE_PRINTF(NULL, PATH_KSM "merge_across_nodes", "1"); } tst_sig(FORK, DEF_HANDLER, NULL); TEST_PAUSE; }
static long get_pid_max(void) { long pid_max; SAFE_FILE_SCANF(NULL, "/proc/sys/kernel/pid_max", "%ld", &pid_max); return pid_max; }
pid_t tst_get_unused_pid_(void (*cleanup_fn) (void)) { pid_t pid; SAFE_FILE_SCANF(cleanup_fn, PID_MAX_PATH, "%d", &pid); return pid; }
static void setup(void) { int fd; unsigned int i; tst_require_root(NULL); for (i = 0; i < ARRAY_SIZE(cdrv); ++i) { fd = open(cdrv[i].file, O_RDWR); if (fd == -1) continue; id = i; close(fd); break; } if (id == -1) tst_brkm(TCONF, NULL, "overclock not supported"); tst_resm(TINFO, "found '%s' driver, sysfs knob '%s'", cdrv[id].name, cdrv[id].file); tst_sig(FORK, DEF_HANDLER, cleanup); SAFE_FILE_SCANF(NULL, cdrv[i].file, "%d", &boost_value); /* change cpu0 scaling governor */ SAFE_FILE_SCANF(NULL, governor, "%s", governor_name); SAFE_FILE_PRINTF(cleanup, governor, "%s", "performance"); /* use only cpu0 */ cpu_set_t set; CPU_ZERO(&set); CPU_SET(0, &set); if (sched_setaffinity(0, sizeof(cpu_set_t), &set) < 0) tst_brkm(TBROK | TERRNO, cleanup, "failed to set CPU0"); struct sched_param params; params.sched_priority = sched_get_priority_max(SCHED_FIFO); if (sched_setscheduler(getpid(), SCHED_FIFO, ¶ms)) { tst_resm(TWARN | TERRNO, "failed to set FIFO sched with max priority"); } }
static int get_page_fault_num(void) { int pg; SAFE_FILE_SCANF("/proc/self/stat", "%*s %*s %*s %*s %*s %*s %*s %*s %*s %*s %*s %d", &pg); return pg; }
void update_shm_size(size_t * shm_size) { size_t shmmax; SAFE_FILE_SCANF(cleanup, PATH_SHMMAX, "%ld", &shmmax); if (*shm_size > shmmax) { tst_resm(TINFO, "Set shm_size to shmmax: %ld", shmmax); *shm_size = shmmax; } }
long get_sys_tune(char *sys_file) { char path[BUFSIZ]; long tune; snprintf(path, BUFSIZ, PATH_SYSVM "%s", sys_file); SAFE_FILE_SCANF(NULL, path, "%ld", &tune); return tune; }
static void check(char *path, long int value) { char fullpath[BUFSIZ]; long actual_val; snprintf(fullpath, BUFSIZ, PATH_KSM "%s", path); SAFE_FILE_SCANF(cleanup, fullpath, "%ld", &actual_val); tst_resm(TINFO, "%s is %ld.", path, actual_val); if (actual_val != value) tst_resm(TFAIL, "%s is not %ld.", path, value); }
static void khugepaged_scan_done(void) { int changing = 1, count = 0, interval; long old_pages_collapsed = 0, old_max_ptes_none = 0, old_pages_to_scan = 0; long pages_collapsed = 0, max_ptes_none = 0, pages_to_scan = 0; /* * as 'khugepaged' run 100% during testing, so 5s is an * enough interval for us to recognize if 'khugepaged' * finish scanning proceses' anonymous hugepages or not. */ interval = 5; while (changing) { sleep(interval); count++; SAFE_FILE_SCANF(cleanup, PATH_KHPD "pages_collapsed", "%ld", &pages_collapsed); SAFE_FILE_SCANF(cleanup, PATH_KHPD "max_ptes_none", "%ld", &max_ptes_none); SAFE_FILE_SCANF(cleanup, PATH_KHPD "pages_to_scan", "%ld", &pages_to_scan); if (pages_collapsed != old_pages_collapsed || max_ptes_none != old_max_ptes_none || pages_to_scan != old_pages_to_scan) { old_pages_collapsed = pages_collapsed; old_max_ptes_none = max_ptes_none; old_pages_to_scan = pages_to_scan; } else { changing = 0; } } tst_resm(TINFO, "khugepaged daemon takes %ds to scan all thp pages", count * interval); }
static void wait_ksmd_done(void) { long pages_shared, pages_sharing, pages_volatile, pages_unshared; long old_pages_shared = 0, old_pages_sharing = 0; long old_pages_volatile = 0, old_pages_unshared = 0; int changing = 1, count = 0; while (changing) { sleep(10); count++; SAFE_FILE_SCANF(cleanup, PATH_KSM "pages_shared", "%ld", &pages_shared); SAFE_FILE_SCANF(cleanup, PATH_KSM "pages_sharing", "%ld", &pages_sharing); SAFE_FILE_SCANF(cleanup, PATH_KSM "pages_volatile", "%ld", &pages_volatile); SAFE_FILE_SCANF(cleanup, PATH_KSM "pages_unshared", "%ld", &pages_unshared); if (pages_shared != old_pages_shared || pages_sharing != old_pages_sharing || pages_volatile != old_pages_volatile || pages_unshared != old_pages_unshared) { old_pages_shared = pages_shared; old_pages_sharing = pages_sharing; old_pages_volatile = pages_volatile; old_pages_unshared = pages_unshared; } else { changing = 0; } } tst_resm(TINFO, "ksm daemon takes %ds to scan all mergeable pages", count * 10); }
static int checksys(char *path, char *string, long exp_val) { long act_val; SAFE_FILE_SCANF(path, "%ld", &act_val); tst_res(TINFO, "%s is %ld.", string, act_val); if (act_val != exp_val) { tst_res(TFAIL, "%s is not %ld but %ld.", string, exp_val, act_val); return 1; } return 0; }
static void set_speed(int freq) { int set_freq; SAFE_FILE_SCANF(cleanup, setspeed, "%d", &set_freq); if (set_freq != freq) { tst_resm(TINFO, "change speed from %d to %d...", set_freq, freq); SAFE_FILE_PRINTF(cleanup, setspeed, "%d", freq); } else { tst_resm(TINFO, "set speed is %d", set_freq); } }
static void check(char *path, long int value) { char fullpath[BUFSIZ]; long actual_val; snprintf(fullpath, BUFSIZ, PATH_KSM "%s", path); SAFE_FILE_SCANF(fullpath, "%ld", &actual_val); if (actual_val != value) tst_res(TFAIL, "%s is not %ld but %ld.", path, value, actual_val); else tst_res(TPASS, "%s is %ld.", path, actual_val); }
static long get_device_readahead(const char *fname) { struct stat st; unsigned long ra_kb = 0; char buf[256]; if (stat(fname, &st) == -1) tst_brkm(TBROK | TERRNO, cleanup, "stat"); snprintf(buf, sizeof(buf), "/sys/dev/block/%d:%d/queue/read_ahead_kb", major(st.st_dev), minor(st.st_dev)); SAFE_FILE_SCANF(cleanup, buf, "%ld", &ra_kb); return ra_kb * 1024; }
static void run_pci_testcases(int bus, int slot) { int i, res; for (i = 0; i < TST_TOTAL; ++i) { /* skip pci disable test-case, it is manual */ if (i == PCI_DISABLE) continue; SAFE_FILE_PRINTF(cleanup, dev_tcase, "%d", i); SAFE_FILE_SCANF(cleanup, dev_result, "%d", &res); tst_resm(res, "PCI bus %02x slot %02x : Test-case '%d'", bus, slot, i); } }
void setup(void) { tst_require_root(NULL); if (access(PATH_THP, F_OK) == -1) tst_brkm(TCONF, NULL, "THP is not enabled"); if (!is_numa(NULL)) tst_brkm(TCONF, NULL, "The case need a NUMA system."); SAFE_FILE_SCANF(NULL, PATH_KHPD "scan_sleep_millisecs", "%d", &pre_thp_scan_sleep_millisecs); SAFE_FILE_PRINTF(NULL, PATH_KHPD "scan_sleep_millisecs", "0"); SAFE_FILE_SCANF(NULL, PATH_KHPD "alloc_sleep_millisecs", "%d", &pre_thp_alloc_sleep_millisecs); SAFE_FILE_PRINTF(NULL, PATH_KHPD "alloc_sleep_millisecs", "0"); SAFE_FILE_SCANF(NULL, PATH_THP "enabled", "%[^\n]", pre_thp_enabled); SAFE_FILE_PRINTF(NULL, PATH_THP "enabled", "always"); tst_sig(FORK, DEF_HANDLER, NULL); TEST_PAUSE; }
static void setup(void) { tst_require_root(); if ((tst_kvercmp(2, 6, 32)) < 0) { tst_brkm(TCONF, NULL, "This test can only run on kernels " "that are 2.6.32 or higher"); } tst_tmpdir(); SAFE_FILE_SCANF(NULL, PATH_NR_HUGEPAGES, "%ld", &orig_hugepages); SAFE_FILE_PRINTF(NULL, PATH_NR_HUGEPAGES, "%d", 1); TEST_PAUSE; }
static void test_run(void) { int i, res; for (i = 0; i < TST_TOTAL; ++i) { if (i == ACPI_TRAVERSE) { res = tc_acpi_str(); } else { SAFE_FILE_PRINTF(cleanup, dev_tcase, "%d", i); SAFE_FILE_SCANF(cleanup, dev_result, "%d", &res); res = res ? TFAIL : TPASS; } tst_resm(res, "Test-case '%d'", i); } }
void setup(void) { tst_require_root(); if (tst_kvercmp(2, 6, 32) < 0) tst_brkm(TCONF, NULL, "2.6.32 or greater kernel required"); if (access(PATH_KSM, F_OK) == -1) tst_brkm(TCONF, NULL, "KSM configuration is not enabled"); if (access(PATH_KSM "merge_across_nodes", F_OK) == 0) { SAFE_FILE_SCANF(NULL, PATH_KSM "merge_across_nodes", "%d", &merge_across_nodes); SAFE_FILE_PRINTF(NULL, PATH_KSM "merge_across_nodes", "1"); } mount_mem("memcg", "cgroup", "memory", MEMCG_PATH, MEMCG_PATH_NEW); tst_sig(FORK, DEF_HANDLER, NULL); TEST_PAUSE; }
static void setup(void) { if (access(PATH_KSM, F_OK) == -1) tst_brk(TCONF, "KSM configuration is not enabled"); parse_ksm_options(opt_sizestr, &size, opt_numstr, &num, opt_unitstr, &unit); /* * kernel commit 90bd6fd introduced a new KSM sysfs knob * /sys/kernel/mm/ksm/merge_across_nodes, setting it to '0' * will prevent KSM pages being merged across numa nodes, * which will cause the case fail, so we need to make sure * it is enabled before testing. */ if (access(PATH_KSM "merge_across_nodes", F_OK) == 0) { SAFE_FILE_SCANF(PATH_KSM "merge_across_nodes", "%d", &merge_across_nodes); SAFE_FILE_PRINTF(PATH_KSM "merge_across_nodes", "1"); } }