void setup(void) { long mem_total, swap_total; tst_require_root(NULL); tst_sig(NOFORK, DEF_HANDLER, cleanup); TEST_PAUSE; if (access(PATH_SYSVM "overcommit_memory", F_OK) == -1 || access(PATH_SYSVM "overcommit_ratio", F_OK) == -1) tst_brkm(TCONF, NULL, "The system " "can't support to test %s", TCID); old_overcommit_memory = get_sys_tune("overcommit_memory"); old_overcommit_ratio = get_sys_tune("overcommit_ratio"); set_sys_tune("overcommit_ratio", overcommit_ratio, 1); mem_total = read_meminfo("MemTotal:"); tst_resm(TINFO, "MemTotal is %ld kB", mem_total); swap_total = read_meminfo("SwapTotal:"); tst_resm(TINFO, "SwapTotal is %ld kB", swap_total); sum_total = mem_total + swap_total; commit_limit = read_meminfo("CommitLimit:"); tst_resm(TINFO, "CommitLimit is %ld kB", commit_limit); }
void setup(void) { long mem_total, hpage_size; tst_require_root(); mem_total = read_meminfo("MemTotal:"); SAFE_FILE_SCANF(NULL, PATH_SHMMAX, "%ld", &orig_shmmax); SAFE_FILE_PRINTF(NULL, PATH_SHMMAX, "%ld", (long)SIZE); SAFE_FILE_SCANF(NULL, PATH_SHMMAX, "%ld", &new_shmmax); if (mem_total < 2L*1024*1024) tst_brkm(TCONF, NULL, "Needed > 2GB RAM, have: %ld", mem_total); if (new_shmmax < SIZE) tst_brkm(TCONF, NULL, "shmmax too low, have: %ld", new_shmmax); orig_hugepages = get_sys_tune("nr_hugepages"); hpage_size = read_meminfo("Hugepagesize:") * 1024; hugepages = (orig_hugepages * hpage_size + SIZE) / hpage_size; set_sys_tune("nr_hugepages", hugepages, 1); TEST_PAUSE; }
int main(int ac, char **av) { int lc, i; tst_parse_opts(ac, av, NULL, NULL); setup(); for (lc = 0; TEST_LOOPING(lc); lc++) { tst_count = 0; for (i = 0; i < TST_TOTAL; i++) { huge_free = read_meminfo("HugePages_Free:"); shared_hugepage(); huge_free2 = read_meminfo("HugePages_Free:"); if (huge_free2 != huge_free) tst_brkm(TFAIL, cleanup, "Test failed. Hugepage leak inspection."); else tst_resm(TPASS, "No regression found."); } } cleanup(); tst_exit(); }
void setup(void) { long hpage_size; tst_require_root(NULL); tst_sig(NOFORK, sighandler, cleanup); tst_tmpdir(); orig_hugepages = get_sys_tune("nr_hugepages"); set_sys_tune("nr_hugepages", hugepages, 1); hpage_size = read_meminfo("Hugepagesize:") * 1024; shm_size = hpage_size * hugepages / 2; update_shm_size(&shm_size); shmkey = getipckey(); /* create a shared memory resource with read and write permissions */ shm_id_1 = shmget(shmkey, shm_size, SHM_HUGETLB | SHM_RW | IPC_CREAT | IPC_EXCL); if (shm_id_1 == -1) tst_brkm(TBROK | TERRNO, cleanup, "shmget"); /* attach the shared memory segment */ shared = shmat(shm_id_1, 0, 0); if (shared == (void *)-1) tst_brkm(TBROK | TERRNO, cleanup, "shmat #1"); /* give a value to the shared memory integer */ *shared = 4; TEST_PAUSE; }
/** * show_mem shouldn't change the behavior of any of its * callers, it only prints a message to the user showing the * total amount of memory in the system (in megabytes). */ void show_mem() { long mem_total; mem_total = read_meminfo(MEM_TOTAL); printf("Total System Memory: %ld MB\n\n", mem_total / 1024); }
void setup(void) { long hpage_size; tst_require_root(); check_hugepage(); tst_sig(FORK, DEF_HANDLER, cleanup); tst_tmpdir(); orig_hugepages = get_sys_tune("nr_hugepages"); set_sys_tune("nr_hugepages", hugepages, 1); hpage_size = read_meminfo("Hugepagesize:") * 1024; shm_size = hpage_size * hugepages / 2; update_shm_size(&shm_size); shmkey = getipckey(cleanup); shm_id_1 = shmget(shmkey, shm_size, SHM_HUGETLB | IPC_CREAT | IPC_EXCL | SHM_RW); if (shm_id_1 == -1) tst_brkm(TBROK | TERRNO, cleanup, "shmget"); /* get the userid for a non root user */ ltp_uid = getuserid(cleanup, ltp_user); TEST_PAUSE; }
static void verify_thp_size(int *children, int nr_children, int nr_thps) { FILE *fp; char path[BUFSIZ], buf[BUFSIZ], line[BUFSIZ]; int i, ret; long expect_thps; /* the amount of per child's transparent hugepages */ long val, actual_thps; long hugepagesize; hugepagesize = read_meminfo("Hugepagesize:"); expect_thps = nr_thps * hugepagesize; for (i = 0; i < nr_children; i++) { actual_thps = 0; snprintf(path, BUFSIZ, "/proc/%d/smaps", children[i]); fp = fopen(path, "r"); while (fgets(line, BUFSIZ, fp) != NULL) { ret = sscanf(line, "%64s %ld", buf, &val); if (ret == 2 && val != 0) { if (strcmp(buf, "AnonHugePages:") == 0) actual_thps += val; } } if (actual_thps != expect_thps) tst_resm(TFAIL, "child[%d] got %ldKB thps - expect %ld" "KB thps", getpid(), actual_thps, expect_thps); fclose(fp); } }
void setup(void) { long hpage_size; tst_require_root(NULL); tst_sig(NOFORK, DEF_HANDLER, cleanup); tst_tmpdir(); orig_hugepages = get_sys_tune("nr_hugepages"); set_sys_tune("nr_hugepages", hugepages, 1); hpage_size = read_meminfo("Hugepagesize:") * 1024; shm_size = hpage_size * hugepages / 2; update_shm_size(&shm_size); shmkey = getipckey(); /* create a shared memory segment without read or write permissions */ shm_id_1 = shmget(shmkey, shm_size, SHM_HUGETLB | IPC_CREAT | IPC_EXCL); if (shm_id_1 == -1) tst_brkm(TBROK | TERRNO, cleanup, "shmget #1"); /* create a shared memory segment with read and write permissions */ shm_id_2 = shmget(shmkey + 1, shm_size, SHM_HUGETLB | IPC_CREAT | IPC_EXCL | SHM_RW); if (shm_id_2 == -1) tst_brkm(TBROK | TERRNO, cleanup, "shmget #2"); TEST_PAUSE; }
void read_stats(struct stats *st) { read_stat_cpu(&st->cpu, 2, &st->uptime, &st->uptime0); read_sysfs_disk(&st->disk, "sda"); read_net_dev(&st->net, 1); read_meminfo(&st->mem); }
static void init_meminfo(void) { swap_free_init = read_meminfo("SwapFree:"); if (FILE_LINES_SCANF(cleanup, "/proc/meminfo", "MemAvailable: %ld", &mem_available_init)) { mem_available_init = read_meminfo("MemFree:") + read_meminfo("Cached:"); } mem_over = mem_available_init * COE_SLIGHT_OVER; mem_over_max = mem_available_init * COE_DELTA; /* at least 10MB available physical memory needed */ if (mem_available_init < 10240) tst_brkm(TCONF, cleanup, "Not enough available mem to test."); if (swap_free_init < mem_over_max) tst_brkm(TCONF, cleanup, "Not enough swap space to test."); }
int copy_mem_stats(perf_stats* stats){ mem_info* m = (mem_info*)malloc(sizeof(mem_info)); if (read_meminfo(m)) { printf("Error reading memory information.\n"); return -1; } stats->memInfo = m; return 0; }
/* *************************************************************************** * Read memory statistics. * * IN: * @a Activity structure. * * OUT: * @a Activity structure with statistics. *************************************************************************** */ __read_funct_t wrap_read_meminfo(struct activity *a) { struct stats_memory *st_memory = (struct stats_memory *) a->_buf0; /* Read memory stats */ read_meminfo(st_memory); return; }
/** * check_swap shouldn't change the behavior of any of its * callers, it only prints a message to the user if something * is being done that might fail without swap available. i.e. * resizing a huge page pool */ void check_swap() { long swap_sz; long swap_total; swap_total = read_meminfo(SWAP_TOTAL); if (swap_total <= 0) { WARNING("There is no swap space configured, resizing hugepage pool may fail\n"); WARNING("Use --add-temp-swap option to temporarily add swap during the resize\n"); return; } swap_sz = read_meminfo(SWAP_FREE); /* meminfo keeps values in kb, but we use bytes for hpage sizes */ swap_sz *= 1024; if (swap_sz <= gethugepagesize()) { WARNING("There is very little swap space free, resizing hugepage pool may fail\n"); WARNING("Use --add-temp-swap option to temporarily add swap during the resize\n"); } }
void setup(void) { if (access(PATH_THP, F_OK) == -1) tst_brkm(TCONF, NULL, "THP not enabled in kernel?"); hugepage_size = read_meminfo("Hugepagesize:") * KB; unaligned_size = hugepage_size * 4 - 1; page_size = SAFE_SYSCONF(NULL, _SC_PAGESIZE); tst_sig(FORK, DEF_HANDLER, cleanup); TEST_PAUSE; }
int main(int ac, char **av) { int lc; char *msg; #if __WORDSIZE == 32 tst_brkm(TCONF, NULL, "This test is only for 64bit"); #endif msg = parse_opts(ac, av, options, &help); if (msg) tst_brkm(TBROK, NULL, "OPTION PARSING ERROR - %s", msg); if (!Hflag) { tst_tmpdir(); Hopt = get_tst_tmpdir(); } if (sflag) hugepages = SAFE_STRTOL(NULL, nr_opt, 0, LONG_MAX); setup(); map_sz = read_meminfo("Hugepagesize:") * 1024; for (lc = 0; TEST_LOOPING(lc); lc++) { fildes = open(TEMPFILE, O_RDWR | O_CREAT, 0666); if (fildes < 0) tst_brkm(TBROK | TERRNO, cleanup, "open %s", TEMPFILE); Tst_count = 0; /* Attempt to mmap into highmem addr, should get ENOMEM */ addr = mmap(HIGH_ADDR, map_sz, PROT_READ, MAP_SHARED | MAP_FIXED, fildes, 0); if (addr != MAP_FAILED) { tst_resm(TFAIL, "mmap into high region " "succeeded unexpectedly"); goto fail; } if (errno != ENOMEM) tst_resm(TFAIL | TERRNO, "mmap into high region " "failed unexpectedly - expect " "errno=ENOMEM, got"); else tst_resm(TPASS | TERRNO, "mmap into high region " "failed as expected"); fail: close(fildes); } cleanup(); tst_exit(); }
static void update_mem(void) { long mem_free, swap_free; long committed; mem_free = read_meminfo("MemFree:"); swap_free = read_meminfo("SwapFree:"); free_total = mem_free + swap_free; commit_limit = read_meminfo("CommitLimit:"); if (get_sys_tune("overcommit_memory") == 2) { committed = read_meminfo("Committed_AS:"); commit_left = commit_limit - committed; if (commit_left < 0) { tst_resm(TINFO, "CommmitLimit is %ld, Committed_AS" " is %ld", commit_limit, committed); tst_brkm(TBROK, cleanup, "Unexpected error: " "CommitLimit < Committed_AS"); } } }
static int create_subproc_thread(const char *name, const subproc_mode mode) { adb_thread_t t; int ret_fd; pid_t pid = -1; long mem_free = 0; read_meminfo(&mem_free); XLOGW("read_meminfo() mem_free=%ld\n", mem_free); const char *arg0, *arg1; if (name == 0 || *name == 0) { arg0 = "-"; arg1 = 0; } else { arg0 = "-c"; arg1 = name; } switch (mode) { case SUBPROC_PTY: ret_fd = create_subproc_pty(SHELL_COMMAND, arg0, arg1, &pid); break; case SUBPROC_RAW: ret_fd = create_subproc_raw(SHELL_COMMAND, arg0, arg1, &pid); break; default: fprintf(stderr, "invalid subproc_mode %d\n", mode); return -1; } D("create_subproc ret_fd=%d pid=%d\n", ret_fd, pid); XLOGW("create_subproc ret_fd=%d pid=%d\n", ret_fd, pid); if ( sti == 0 ) { sti = malloc(sizeof(stinfo)); if(sti == 0) fatal("cannot allocate stinfo"); sti->func = subproc_waiter_service; sti->cookie = (void*) (uintptr_t) pid; sti->fd = ret_fd; if (adb_thread_create(&t, service_bootstrap_func, sti)) { XLOGW("adb_thread_create() errno=%d\n", errno); free(sti); adb_close(ret_fd); fprintf(stderr, "cannot create service thread\n"); return -1; } } D("service thread started, fd=%d pid=%d\n", ret_fd, pid); return ret_fd; }
system_stat get_system_stats(void) { struct timezone tz; system_stat system_stats = read_proc_stat(); system_stats.cpu.cpu_count = sysconf(_SC_NPROCESSORS_ONLN); system_stats.uptime = proc_read_int("/proc/uptime"); system_stats.load_avg = read_load_avg(); system_stats.mem = read_meminfo(); system_stats.sysname = sysname; system_stats.hostname = get_hostname(); gettimeofday(&system_stats.time, &tz); diff_system_stats(&system_stats); return system_stats_old = system_stats; }
static void check_swapping(void) { int status, i; long swap_free_now, swapped; /* wait child stop */ if (waitpid(pid, &status, WUNTRACED) == -1) tst_brkm(TBROK | TERRNO, cleanup, "waitpid"); if (!WIFSTOPPED(status)) tst_brkm(TBROK, cleanup, "child was not stopped."); /* Still occupying memory, loop for a while */ i = 0; while (i < 10) { swap_free_now = read_meminfo("SwapFree:"); sleep(1); if (abs(swap_free_now - read_meminfo("SwapFree:")) < 512) break; i++; } swap_free_now = read_meminfo("SwapFree:"); swapped = swap_free_init - swap_free_now; if (swapped > mem_over_max) { kill(pid, SIGCONT); tst_brkm(TFAIL, cleanup, "heavy swapping detected: " "%ld MB swapped.", swapped / 1024); } tst_resm(TPASS, "no heavy swapping detected, %ld MB swapped.", swapped / 1024); kill(pid, SIGCONT); /* wait child exit */ if (waitpid(pid, &status, 0) == -1) tst_brkm(TBROK | TERRNO, cleanup, "waitpid"); }
void setup(void) { long hpage_size; tst_require_root(NULL); tst_sig(NOFORK, DEF_HANDLER, cleanup); tst_tmpdir(); orig_hugepages = get_sys_tune("nr_hugepages"); set_sys_tune("nr_hugepages", hugepages, 1); hpage_size = read_meminfo("Hugepagesize:") * 1024; shm_size = hpage_size * hugepages / 2; update_shm_size(&shm_size); shmkey = getipckey(); TEST_PAUSE; }
static int create_subproc_thread(const char *name) { adb_thread_t t; int ret_fd; pid_t pid; long mem_free = 0; read_meminfo(&mem_free); D("read_meminfo() mem_free=%d\n", mem_free); XLOGV("read_meminfo() mem_free=%d\n", mem_free); if(name) { ret_fd = create_subprocess(SHELL_COMMAND, "-c", name, &pid); } else { ret_fd = create_subprocess(SHELL_COMMAND, "-", 0, &pid); } D("create_subprocess() ret_fd=%d pid=%d\n", ret_fd, pid); XLOGV("create_subprocess() ret_fd=%d pid=%d\n", ret_fd, pid); if ( sti == 0 ) { sti = malloc(sizeof(stinfo)); if(sti == 0) fatal("cannot allocate stinfo"); sti->func = subproc_waiter_service; sti->cookie = (void*)pid; sti->fd = ret_fd; int nRet = adb_thread_create( &t, service_bootstrap_func, sti); if(nRet) { D("adb_thread_create() nRet=%d errno=%d\n", nRet, errno); XLOGW("adb_thread_create() nRet=%d errno=%d\n", nRet, errno); free(sti); sti = 0; adb_close(ret_fd); printf("cannot create service thread\n"); return -1; } } D("service thread started, fd=%d pid=%d\n",ret_fd, pid); return ret_fd; }
void setup(void) { long hpage_size; tst_require_root(); check_hugepage(); tst_sig(FORK, sighandler, cleanup); tst_tmpdir(); orig_hugepages = get_sys_tune("nr_hugepages"); set_sys_tune("nr_hugepages", hugepages, 1); hpage_size = read_meminfo("Hugepagesize:") * 1024; shm_size = hpage_size * hugepages / 2; update_shm_size(&shm_size); shmkey = getipckey(cleanup); TEST_PAUSE; }
void setup(void) { long hpage_size; char buf[BUFSIZ]; tst_require_root(NULL); tst_sig(NOFORK, DEF_HANDLER, cleanup); tst_tmpdir(); orig_hugepages = get_sys_tune("nr_hugepages"); set_sys_tune("nr_hugepages", hugepages, 1); hpage_size = read_meminfo("Hugepagesize:") * 1024; shm_size = hpage_size; read_file(PATH_SHMMNI, buf); orig_shmmni = SAFE_STRTOL(cleanup, buf, 0, LONG_MAX); snprintf(buf, BUFSIZ, "%ld", hugepages / 2); write_file(PATH_SHMMNI, buf); /* * Use a while loop to create the maximum number of memory segments. * If the loop exceeds MAXIDS, then break the test and cleanup. */ num_shms = 0; shm_id_1 = shmget(IPC_PRIVATE, shm_size, SHM_HUGETLB | IPC_CREAT | IPC_EXCL | SHM_RW); while (shm_id_1 != -1) { shm_id_arr[num_shms++] = shm_id_1; if (num_shms == MAXIDS) tst_brkm(TBROK, cleanup, "The maximum number of " "shared memory ID's has been reached. " "Please increase the MAXIDS value in " "the test."); shm_id_1 = shmget(IPC_PRIVATE, shm_size, SHM_HUGETLB | IPC_CREAT | IPC_EXCL | SHM_RW); } if (errno != ENOSPC) tst_brkm(TBROK | TERRNO, cleanup, "shmget #setup"); TEST_PAUSE; }
void setup(void) { long hpage_size; tst_require_root(); tst_sig(NOFORK, DEF_HANDLER, cleanup); tst_tmpdir(); orig_hugepages = get_sys_tune("nr_hugepages"); set_sys_tune("nr_hugepages", hugepages, 1); hpage_size = read_meminfo("Hugepagesize:") * 1024; shm_size = hpage_size * hugepages / 2; update_shm_size(&shm_size); shmkey = getipckey(); shm_id_1 = shmget(shmkey++, shm_size, SHM_HUGETLB | SHM_RW | IPC_CREAT | IPC_EXCL); if (shm_id_1 == -1) tst_brkm(TBROK | TERRNO, cleanup, "shmget"); TEST_PAUSE; }
static int alloc_transparent_hugepages(int nr_thps, int hg_aligned) { unsigned long hugepagesize, size; void *addr; int ret; hugepagesize = read_meminfo("Hugepagesize:") * KB; size = nr_thps * hugepagesize; if (hg_aligned) { ret = posix_memalign(&addr, hugepagesize, size); if (ret != 0) { printf("posix_memalign failed\n"); return -1; } } else { addr = mmap(NULL, size, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANON, -1, 0); if (addr == MAP_FAILED) { perror("mmap"); return -1; } } memset(addr, 10, size); tst_resm(TINFO, "child[%d] stop here", getpid()); /* * stop here, until the father finish to calculate * all the transparent hugepages. */ if (raise(SIGSTOP) == -1) { perror("kill"); return -1; } return 0; }
int using_system_hpage_size(const char *mount) { struct statfs64 sb; int err; long meminfo_size, mount_size; if (!mount) FAIL("using_system_hpage_size: hugetlbfs is not mounted\n"); err = statfs64(mount, &sb); if (err) FAIL("statfs64: %s\n", strerror(errno)); meminfo_size = read_meminfo("Hugepagesize:"); if (meminfo_size < 0) FAIL("using_system_hpage_size: Failed to read /proc/meminfo\n"); mount_size = sb.f_bsize / 1024; /* Compare to meminfo in kB */ if (mount_size == meminfo_size) return 1; else return 0; }
static void run_race(void *syncarea, int race_type) { volatile int *trigger1, *trigger2; int fd; void *p; int status1, status2; int ret; memset(syncarea, 0, sizeof(*trigger1) + sizeof(*trigger2)); trigger1 = syncarea; trigger2 = trigger1 + 1; /* Get a new file for the final page */ fd = hugetlbfs_unlinked_fd(); if (fd < 0) FAIL("hugetlbfs_unlinked_fd()"); verbose_printf("Mapping final page.. "); p = mmap(NULL, hpage_size, PROT_READ|PROT_WRITE, race_type, fd, 0); if (p == MAP_FAILED) FAIL("mmap(): %s", strerror(errno)); verbose_printf("%p\n", p); if (race_type == MAP_SHARED) { child1 = fork(); if (child1 < 0) FAIL("fork(): %s", strerror(errno)); if (child1 == 0) proc_racer(p, 0, trigger1, trigger2); child2 = fork(); if (child2 < 0) FAIL("fork(): %s", strerror(errno)); if (child2 == 0) proc_racer(p, 1, trigger2, trigger1); /* wait() calls */ ret = waitpid(child1, &status1, 0); if (ret < 0) FAIL("waitpid() child 1: %s", strerror(errno)); verbose_printf("Child 1 status: %x\n", status1); ret = waitpid(child2, &status2, 0); if (ret < 0) FAIL("waitpid() child 2: %s", strerror(errno)); verbose_printf("Child 2 status: %x\n", status2); if (WIFSIGNALED(status1)) FAIL("Child 1 killed by signal %s", strsignal(WTERMSIG(status1))); if (WIFSIGNALED(status2)) FAIL("Child 2 killed by signal %s", strsignal(WTERMSIG(status2))); status1 = WEXITSTATUS(status1); status2 = WEXITSTATUS(status2); } else { struct racer_info ri1 = { .p = p, .cpu = 0, .mytrigger = trigger1, .othertrigger = trigger2, }; struct racer_info ri2 = { .p = p, .cpu = 1, .mytrigger = trigger2, .othertrigger = trigger1, }; void *tret1, *tret2; ret = pthread_create(&thread1, NULL, thread_racer, &ri1); if (ret != 0) FAIL("pthread_create() 1: %s\n", strerror(errno)); ret = pthread_create(&thread2, NULL, thread_racer, &ri2); if (ret != 0) FAIL("pthread_create() 2: %s\n", strerror(errno)); ret = pthread_join(thread1, &tret1); if (ret != 0) FAIL("pthread_join() 1: %s\n", strerror(errno)); if (tret1 != &ri1) FAIL("Thread 1 returned %p not %p, killed?\n", tret1, &ri1); ret = pthread_join(thread2, &tret2); if (ret != 0) FAIL("pthread_join() 2: %s\n", strerror(errno)); if (tret2 != &ri2) FAIL("Thread 2 returned %p not %p, killed?\n", tret2, &ri2); status1 = ri1.status; status2 = ri2.status; } if (status1 != 0) FAIL("Racer 1 terminated with code %d", status1); if (status2 != 0) FAIL("Racer 2 terminated with code %d", status2); } int main(int argc, char *argv[]) { unsigned long totpages; int fd; void *p, *q; unsigned long i; int race_type; test_init(argc, argv); if (argc != 2) CONFIG("Usage: alloc-instantiate-race <private|shared>"); totpages = read_meminfo("HugePages_Free:"); if (strcmp(argv[1], "shared") == 0) { race_type = MAP_SHARED; } else if (strcmp(argv[1], "private") == 0) { race_type = MAP_PRIVATE; } else { CONFIG("Usage: alloc-instantiate-race <private|shared>"); } hpage_size = check_hugepagesize(); fd = hugetlbfs_unlinked_fd(); if (fd < 0) FAIL("hugetlbfs_unlinked_fd()"); /* Get a shared normal page for synchronization */ verbose_printf("Mapping synchronization area.."); q = mmap(NULL, getpagesize(), PROT_READ|PROT_WRITE, MAP_SHARED|MAP_ANONYMOUS, -1, 0); if (q == MAP_FAILED) FAIL("mmap() sync area: %s", strerror(errno)); verbose_printf("done\n"); verbose_printf("Mapping %ld/%ld pages.. ", totpages-1, totpages); p = mmap(NULL, (totpages-1)*hpage_size, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0); if (p == MAP_FAILED) FAIL("mmap() 1: %s", strerror(errno)); /* Allocate all save one of the pages up front */ verbose_printf("instantiating.. "); for (i = 0; i < (totpages - 1); i++) memset(p + (i * hpage_size), 0, sizeof(int)); verbose_printf("done\n"); run_race(q, race_type); PASS(); }
int main(int ac, char **av) { int lc; int Hflag = 0; long page_sz, map_sz; int sflag = 0; option_t options[] = { {"H:", &Hflag, &Hopt}, {"s:", &sflag, &nr_opt}, {NULL, NULL, NULL} }; tst_parse_opts(ac, av, options, &help); check_hugepage(); if (!Hflag) { tst_tmpdir(); Hopt = tst_get_tmpdir(); } if (sflag) hugepages = SAFE_STRTOL(NULL, nr_opt, 0, LONG_MAX); page_sz = getpagesize(); map_sz = read_meminfo("Hugepagesize:") * 1024 * 2; setup(); for (lc = 0; TEST_LOOPING(lc); lc++) { /* Creat a temporary file used for huge mapping */ fildes = open(TEMPFILE, O_RDWR | O_CREAT, 0666); if (fildes < 0) tst_brkm(TBROK | TERRNO, cleanup, "opening %s failed", TEMPFILE); /* Creat a file used for normal mapping */ nfildes = open("/dev/zero", O_RDONLY, 0666); if (nfildes < 0) tst_brkm(TBROK | TERRNO, cleanup, "opening /dev/zero failed"); tst_count = 0; /* * Call mmap on /dev/zero 5 times */ for (i = 0; i < 5; i++) { addr = mmap(0, 256 * 1024 * 1024, PROT_READ, MAP_SHARED, nfildes, 0); addrlist[i] = addr; } while (range_is_mapped(cleanup, low_addr, low_addr + map_sz) == 1) { low_addr = low_addr + 0x10000000; if (low_addr < LOW_ADDR) tst_brkm(TBROK | TERRNO, cleanup, "no empty region to use"); } /* mmap using normal pages and a low memory address */ addr = mmap((void *)low_addr, page_sz, PROT_READ, MAP_SHARED | MAP_FIXED, nfildes, 0); if (addr == MAP_FAILED) tst_brkm(TBROK | TERRNO, cleanup, "mmap failed on nfildes"); while (range_is_mapped(cleanup, low_addr2, low_addr2 + map_sz) == 1) { low_addr2 = low_addr2 + 0x10000000; if (low_addr2 < LOW_ADDR2) tst_brkm(TBROK | TERRNO, cleanup, "no empty region to use"); } /* Attempt to mmap a huge page into a low memory address */ addr2 = mmap((void *)low_addr2, map_sz, PROT_READ | PROT_WRITE, MAP_SHARED, fildes, 0); #if __WORDSIZE == 64 /* 64-bit process */ if (addr2 == MAP_FAILED) { tst_resm(TFAIL | TERRNO, "huge mmap failed unexpectedly" " with %s (64-bit)", TEMPFILE); close(fildes); continue; } else { tst_resm(TPASS, "huge mmap succeeded (64-bit)"); } #else /* 32-bit process */ if (addr2 == MAP_FAILED) tst_resm(TFAIL | TERRNO, "huge mmap failed unexpectedly" " with %s (32-bit)", TEMPFILE); else if (addr2 > 0) { tst_resm(TCONF, "huge mmap failed to test the scenario"); close(fildes); continue; } else if (addr == 0) tst_resm(TPASS, "huge mmap succeeded (32-bit)"); #endif /* Clean up things in case we are looping */ for (i = 0; i < 5; i++) { if (munmap(addrlist[i], 256 * 1024 * 1024) == -1) tst_resm(TBROK | TERRNO, "munmap of addrlist[%d] failed", i); } #if __WORDSIZE == 64 if (munmap(addr2, map_sz) == -1) tst_brkm(TFAIL | TERRNO, NULL, "huge munmap failed"); #endif if (munmap(addr, page_sz) == -1) tst_brkm(TFAIL | TERRNO, NULL, "munmap failed"); close(fildes); } cleanup(); tst_exit(); }
static void max_map_count_test(void) { int status; pid_t pid; long max_maps; long map_count; long max_iters; long memfree; /* * XXX Due to a possible kernel bug, oom-killer can be easily * triggered when doing small piece mmaps in huge amount even if * enough free memory available. Also it has been observed that * oom-killer often kill wrong victims in this situation, we * decided to do following steps to make sure no oom happen: * 1) use a safe maximum max_map_count value as upper-bound, * we set it 65536 in this case, i.e., we don't test too big * value; * 2) make sure total mapping isn't larger tha * CommitLimit - Committed_AS * and set overcommit_memory to 2, this could help mapping * returns ENOMEM instead of triggering oom-killer when * memory is tight. (When there are enough free memory, * step 1) will be used first. * Hope OOM-killer can be more stable oneday. */ memfree = read_meminfo("CommitLimit:") - read_meminfo("Committed_AS:"); /* 64 used as a bias to make sure no overflow happen */ max_iters = memfree / sysconf(_SC_PAGESIZE) * 1024 - 64; if (max_iters > MAX_MAP_COUNT) max_iters = MAX_MAP_COUNT; max_maps = MAP_COUNT_DEFAULT; while (max_maps <= max_iters) { set_sys_tune("max_map_count", max_maps, 1); switch (pid = fork()) { case -1: tst_brkm(TBROK | TERRNO, cleanup, "fork"); case 0: while (mmap(NULL, 1, PROT_READ, MAP_SHARED|MAP_ANONYMOUS, -1, 0) != MAP_FAILED) ; if (raise(SIGSTOP) != 0) tst_brkm(TBROK|TERRNO, tst_exit, "raise"); exit(0); default: break; } /* wait child done mmap and stop */ if (waitpid(pid, &status, WUNTRACED) == -1) tst_brkm(TBROK|TERRNO, cleanup, "waitpid"); if (!WIFSTOPPED(status)) tst_brkm(TBROK, cleanup, "child did not stopped"); map_count = count_maps(pid); if (map_count == max_maps) tst_resm(TPASS, "%ld map entries in total " "as expected.", max_maps); else tst_resm(TFAIL, "%ld map entries in total, but " "expected %ld entries", map_count, max_maps); /* make child continue to exit */ if (kill(pid, SIGCONT) != 0) tst_brkm(TBROK|TERRNO, cleanup, "kill"); if (waitpid(pid, &status, 0) == -1) tst_brkm(TBROK|TERRNO, cleanup, "waitpid"); max_maps = max_maps << 2; } }
int main(int ac, char **av) { int lc; int Hflag = 0; int sflag = 0; int huge_pagesize = 0; option_t options[] = { {"H:", &Hflag, &Hopt}, {"s:", &sflag, &nr_opt}, {NULL, NULL, NULL} }; tst_parse_opts(ac, av, options, &help); if (!Hflag) { tst_tmpdir(); Hopt = tst_get_tmpdir(); } if (sflag) hugepages = SAFE_STRTOL(NULL, nr_opt, 0, LONG_MAX); setup(); for (lc = 0; TEST_LOOPING(lc); lc++) { /* Creat a temporary file used for huge mapping */ fildes = open(TEMPFILE, O_RDWR | O_CREAT, 0666); if (fildes < 0) tst_brkm(TFAIL | TERRNO, cleanup, "open %s failed", TEMPFILE); tst_count = 0; /* Note the number of free huge pages BEFORE testing */ freepages = read_meminfo("HugePages_Free:"); beforetest = freepages; /* Note the size of huge page size BEFORE testing */ huge_pagesize = read_meminfo("Hugepagesize:"); tst_resm(TINFO, "Size of huge pages is %d KB", huge_pagesize); #if __WORDSIZE == 32 tst_resm(TINFO, "Total amount of free huge pages is %d", freepages); tst_resm(TINFO, "Max number allowed for 1 mmap file in" " 32-bits is 128"); if (freepages > 128) freepages = 128; #endif mapsize = (long long)freepages *huge_pagesize * 1024; addr = mmap(NULL, mapsize, PROT_READ | PROT_WRITE, MAP_SHARED, fildes, 0); sleep(2); if (addr == MAP_FAILED) { tst_resm(TFAIL | TERRNO, "mmap() Failed on %s", TEMPFILE); close(fildes); continue; } else { tst_resm(TPASS, "Succeeded mapping file using %ld pages", freepages); /* force to allocate page and change HugePages_Free */ *(int *)addr = 0; } /* * Make sure the number of free huge pages * AFTER testing decreased */ aftertest = read_meminfo("HugePages_Free:"); hugepagesmapped = beforetest - aftertest; if (hugepagesmapped < 1) tst_resm(TWARN, "Number of HUGEPAGES_FREE stayed the" " same. Okay if multiple copies running due" " to test collision."); /* Clean up things in case we are looping */ /* Unmap the mapped memory */ if (munmap(addr, mapsize) != 0) tst_brkm(TFAIL | TERRNO, NULL, "munmap failed"); close(fildes); } cleanup(); tst_exit(); }