static void test_simple_mlock(int flags) { int fd = hugetlbfs_unlinked_fd(); void *p; int ret; long hpage_size = check_hugepagesize(); p = mmap(0, hpage_size, PROT_READ|PROT_WRITE, flags, fd, 0); if (p == MAP_FAILED) FAIL("mmap() failed (flags=%x): %s", flags, strerror(errno)); ret = mlock(p, hpage_size); if (ret) FAIL("mlock() failed (flags=%x): %s", flags, strerror(errno)); ret = munlock(p, hpage_size); if (ret) FAIL("munlock() failed (flags=%x): %s", flags, strerror(errno)); ret = munmap(p, hpage_size); if (ret) FAIL("munmap() failed (flags=%x): %s", flags, strerror(errno)); close(fd); }
int main(int argc, char ** argv) { unsigned long size; long hpage_size; int pid, status; int i; int wait_list[MAX_PROCS]; test_init(argc, argv); if (argc < 3) CONFIG("Usage: %s <# procs> <# pages>", argv[0]); numprocs = atoi(argv[1]); nr_hugepages = atoi(argv[2]); if (numprocs > MAX_PROCS) CONFIG("Cannot spawn more than %d processes", MAX_PROCS); check_hugetlb_shm_group(); hpage_size = check_hugepagesize(); size = hpage_size * nr_hugepages; verbose_printf("Requesting %lu bytes\n", size); if ((shmid = shmget(2, size, SHM_HUGETLB|IPC_CREAT|SHM_R|SHM_W )) < 0) FAIL("shmget(): %s", strerror(errno)); verbose_printf("shmid: %d\n", shmid); verbose_printf("Spawning children:\n"); for (i=0; i<numprocs; i++) { if ((pid = fork()) < 0) FAIL("fork(): %s", strerror(errno)); if (pid == 0) do_child(i, size); wait_list[i] = pid; } for (i=0; i<numprocs; i++) { waitpid(wait_list[i], &status, 0); if (WEXITSTATUS(status) != 0) FAIL("Thread %d (pid=%d) failed", i, wait_list[i]); if (WIFSIGNALED(status)) FAIL("Thread %d (pid=%d) received unhandled signal", i, wait_list[i]); } PASS(); }
int main(int argc, char *argv[]) { int err; int fd; void *p; test_init(argc, argv); struct sigaction sa = { .sa_sigaction = sig_handler, .sa_flags = SA_SIGINFO, }; err = sigaction(SIGSEGV, &sa, NULL); if (err) FAIL("Can't install SIGSEGV handler: %s", strerror(errno)); hpage_size = check_hugepagesize(); fd = hugetlbfs_unlinked_fd(); if (fd < 0) FAIL("hugetlbfs_unlinked_fd()"); verbose_printf("instantiating page\n"); p = mmap(NULL, 2*hpage_size, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0); if (p == MAP_FAILED) FAIL("mmap(): %s", strerror(errno)); memset(p, 0, hpage_size); munmap(p, hpage_size); /* Basic protection change tests */ test_mprotect(fd, "R->RW", hpage_size, PROT_READ, hpage_size, PROT_READ|PROT_WRITE); test_mprotect(fd, "RW->R", hpage_size, PROT_READ|PROT_WRITE, hpage_size, PROT_READ); /* Tests which require VMA splitting */ test_mprotect(fd, "R->RW 1/2", 2*hpage_size, PROT_READ, hpage_size, PROT_READ|PROT_WRITE); test_mprotect(fd, "RW->R 1/2", 2*hpage_size, PROT_READ|PROT_WRITE, hpage_size, PROT_READ); /* PROT_NONE tests */ test_mprotect(fd, "NONE->R", hpage_size, PROT_NONE, hpage_size, PROT_READ); test_mprotect(fd, "NONE->RW", hpage_size, PROT_NONE, hpage_size, PROT_READ|PROT_WRITE); PASS(); }
int main(int argc, char *argv[]) { struct rlimit limit_info; if(getrlimit(RLIMIT_MEMLOCK, &limit_info)) ERROR("Unable to read locked memory rlimit: %s", strerror(errno)); if(limit_info.rlim_cur < check_hugepagesize()) CONFIG("Locked memory ulimit set below huge page size"); test_simple_mlock(MAP_PRIVATE); test_simple_mlock(MAP_SHARED); test_simple_mlock(MAP_PRIVATE|MAP_LOCKED); test_simple_mlock(MAP_SHARED|MAP_LOCKED); PASS(); }
int main(int argc, char ** argv) { int base_nr; test_init(argc, argv); hpage_size = check_hugepagesize(); saved_nr_hugepages = get_huge_page_counter(hpage_size, HUGEPAGES_TOTAL); verify_dynamic_pool_support(); check_must_be_root(); if ((private_resv = kernel_has_private_reservations()) == -1) FAIL("kernel_has_private_reservations() failed\n"); /* * This test case should require a maximum of 3 huge pages. * Run through the battery of tests multiple times, with an increasing * base pool size. This alters the circumstances under which surplus * pages need to be allocated and increases the corner cases tested. */ for (base_nr = 0; base_nr <= 3; base_nr++) { verbose_printf("Base pool size: %i\n", base_nr); /* Run the tests with a clean slate */ run_test("Clean", base_nr); /* Now with a pre-existing untouched, shared mmap */ map(SL_SETUP, 1, MAP_SHARED); run_test("Untouched, shared", base_nr); unmap(SL_SETUP, 1, MAP_SHARED); /* Now with a pre-existing untouched, private mmap */ map(SL_SETUP, 1, MAP_PRIVATE); run_test("Untouched, private", base_nr); unmap(SL_SETUP, 1, MAP_PRIVATE); /* Now with a pre-existing touched, shared mmap */ map(SL_SETUP, 1, MAP_SHARED); touch(SL_SETUP, 1, MAP_SHARED); run_test("Touched, shared", base_nr); unmap(SL_SETUP, 1, MAP_SHARED); /* Now with a pre-existing touched, private mmap */ map(SL_SETUP, 1, MAP_PRIVATE); touch(SL_SETUP, 1, MAP_PRIVATE); run_test("Touched, private", base_nr); unmap(SL_SETUP, 1, MAP_PRIVATE); } PASS(); }
int main(int argc, char *argv[]) { long hpage_size; int nr_hugepages; int fd1, fd2, err; char *p, *q; struct sigaction sa = { .sa_sigaction = sig_handler, .sa_flags = SA_SIGINFO, }; test_init(argc, argv); hpage_size = check_hugepagesize(); nr_hugepages = get_huge_page_counter(hpage_size, HUGEPAGES_FREE); fd1 = hugetlbfs_unlinked_fd(); if (fd1 < 0) FAIL("hugetlbfs_unlinked_fd()"); fd2 = hugetlbfs_unlinked_fd(); if (fd2 < 0) FAIL("hugetlbfs_unlinked_fd()"); err = sigaction(SIGBUS, &sa, NULL); if (err) FAIL("Can't install SIGBUS handler: %s", strerror(errno)); p = mmap(NULL, hpage_size * nr_hugepages, PROT_READ | PROT_WRITE, MAP_SHARED, fd1, 0); if (p == MAP_FAILED) FAIL("mmap() 1: %s", strerror(errno)); verbose_printf("Reserve all hugepages %d\n", nr_hugepages); q = mmap(NULL, hpage_size, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_NORESERVE, fd2, 0); if (q == MAP_FAILED) FAIL("mmap() 2: %s", strerror(errno)); verbose_printf("Write %c to %p to steal reserved page\n", *q, q); test_write(q); FAIL("Steal reserved page"); }
int main(int argc, char *argv[]) { long hpage_size; int fd; void *p; volatile unsigned long *q; int err; cpu_set_t cpu0, cpu1; test_init(argc, argv); hpage_size = check_hugepagesize(); fd = hugetlbfs_unlinked_fd(); if (fd < 0) FAIL("hugetlbfs_unlinked_fd()"); CPU_ZERO(&cpu0); CPU_SET(0, &cpu0); CPU_ZERO(&cpu1); CPU_SET(1, &cpu1); err = sched_setaffinity(getpid(), CPU_SETSIZE/8, &cpu0); if (err != 0) CONFIG("sched_setaffinity(cpu0): %s", strerror(errno)); err = sched_setaffinity(getpid(), CPU_SETSIZE/8, &cpu1); if (err != 0) CONFIG("sched_setaffinity(): %s", strerror(errno)); p = mmap(NULL, hpage_size, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0); if (p == MAP_FAILED) FAIL("mmap()"); err = sched_setaffinity(getpid(), CPU_SETSIZE/8, &cpu0); if (err != 0) CONFIG("sched_setaffinity(cpu0): %s", strerror(errno)); q = (volatile unsigned long *)(p + getpagesize()); *q = 0xdeadbeef; PASS_INCONCLUSIVE(); }
int main(int argc, char *argv[]) { int fd, rc; void *p; test_init(argc, argv); hpage_size = check_hugepagesize(); page_size = getpagesize(); fd = hugetlbfs_unlinked_fd(); if (fd < 0) FAIL("hugetlbfs_unlinked_fd()"); p = mmap(NULL, 3*hpage_size, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0); if (p == MAP_FAILED) FAIL("mmap(): %s", strerror(errno)); rc = munmap(p, hpage_size); if (rc != 0) FAIL("munmap() low hpage: %s", strerror(errno)); rc = munmap(p + 2*hpage_size, hpage_size); if (rc != 0) FAIL("munmap() high hpage: %s", strerror(errno)); p = p + hpage_size; verbose_printf("Hugepage mapping at %p\n", p); do_readback(p, hpage_size, "base hugepage"); do_remap(p - page_size); do_remap(p + hpage_size); PASS(); }
int main(int argc, char *argv[]) { int fd, rc; void *p, *q, *r; test_init(argc, argv); hpage_size = check_hugepagesize(); page_size = getpagesize(); fd = hugetlbfs_unlinked_fd(); if (fd < 0) FAIL("hugetlbfs_unlinked_fd()"); init_slice_boundary(fd); /* First, hugepages above, normal below */ p = mmap((void *)(slice_boundary + hpage_size), hpage_size, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_FIXED, fd, 0); if (p == MAP_FAILED) FAIL("mmap(huge above): %s", strerror(errno)); do_readback(p, hpage_size, "huge above"); q = mmap((void *)(slice_boundary - page_size), page_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0); if (q == MAP_FAILED) FAIL("mmap(normal below): %s", strerror(errno)); do_readback(q, page_size, "normal below"); verbose_printf("Attempting to remap..."); r = mremap(q, page_size, 2*page_size, 0); if (r == MAP_FAILED) { verbose_printf("disallowed\n"); rc = munmap(q, page_size); if (rc != 0) FAIL("munmap(normal below): %s", strerror(errno)); } else { if (r != q) FAIL("mremap() moved without MREMAP_MAYMOVE!?"); verbose_printf("testing..."); do_readback(q, 2*page_size, "normal below expanded"); rc = munmap(q, 2*page_size); if (rc != 0) FAIL("munmap(normal below expanded): %s", strerror(errno)); } rc = munmap(p, hpage_size); if (rc != 0) FAIL("munmap(huge above)"); /* Next, normal pages above, huge below */ p = mmap((void *)(slice_boundary + hpage_size), page_size, PROT_READ|PROT_WRITE, MAP_SHARED | MAP_FIXED | MAP_ANONYMOUS, -1, 0); if (p == MAP_FAILED) FAIL("mmap(normal above): %s", strerror(errno)); do_readback(p, page_size, "normal above"); q = mmap((void *)(slice_boundary - hpage_size), hpage_size, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_FIXED, fd, 0); if (q == MAP_FAILED) FAIL("mmap(huge below): %s", strerror(errno)); do_readback(q, hpage_size, "huge below"); verbose_printf("Attempting to remap..."); r = mremap(q, hpage_size, 2*hpage_size, 0); if (r == MAP_FAILED) { verbose_printf("disallowed\n"); rc = munmap(q, hpage_size); if (rc != 0) FAIL("munmap(huge below): %s", strerror(errno)); } else { if (r != q) FAIL("mremap() moved without MREMAP_MAYMOVE!?"); verbose_printf("testing..."); do_readback(q, 2*hpage_size, "huge below expanded"); rc = munmap(q, 2*hpage_size); if (rc != 0) FAIL("munmap(huge below expanded): %s", strerror(errno)); } rc = munmap(p, page_size); if (rc != 0) FAIL("munmap(normal above)"); PASS(); }
int main(int argc, char **argv) { long hpagesize; int freepages; long size1, size2; void *p1, *p2; int st, pid, rv; test_init(argc, argv); if (!getenv("HUGETLB_MORECORE")) CONFIG("Must have HUGETLB_MORECORE=yes"); hpagesize = check_hugepagesize(); freepages = read_meminfo("HugePages_Free:"); if (freepages < 3) CONFIG("Must have at least 3 free hugepages"); /* * Allocation 1: one hugepage. Due to malloc overhead, morecore * will probably mmap two hugepages. */ size1 = hpagesize; p1 = malloc(size1); if (!p1) FAIL("Couldn't malloc %ld bytes", size1); if (!test_addr_huge(p1)) FAIL("First allocation %p not on hugepages", p1); /* * Allocation 2: all free hugepages to ensure we exhaust the free pool. */ size2 = freepages * hpagesize; p2 = malloc(size2); if (!p2) FAIL("Couldn't malloc %ld bytes", size2); st = test_addr_huge(p2); verbose_printf("Second allocation %p huge? %s\n", p2, st < 0 ? "??" : (st ? "yes" : "no")); /* * Touch the pages in a child process. Kernel sends a SIGKILL if * we run out of hugepages. */ pid = fork(); if (pid < 0) FAIL("fork: %s", strerror(errno)); if (pid == 0) { memset(p1, 0, size1); memset(p2, 0, size2); exit(0); } rv = waitpid(pid, &st, 0); if (rv < 0) FAIL("waitpid: %s\n", strerror(errno)); if (WIFSIGNALED(st)) FAIL("Child killed by signal %d touching malloc'ed memory", WTERMSIG(st)); PASS(); }
int main(int argc, char *argv[]) { int fd; int pipefd[2]; long err; pid_t cpid; void *p; struct sigaction sa = { .sa_sigaction = sigchld_handler, .sa_flags = SA_SIGINFO, }; struct sigaction old_sa; test_init(argc, argv); hpage_size = check_hugepagesize(); fd = hugetlbfs_unlinked_fd(); if (fd < 0) FAIL("hugetlbfs_unlinked_fd()"); err = sigaction(SIGCHLD, &sa, &old_sa); if (err) FAIL("Can't install SIGCHLD handler: %s", strerror(errno)); err = pipe(pipefd); if (err) FAIL("pipe(): %s", strerror(errno)); cpid = fork(); if (cpid < 0) FAIL("fork(): %s", strerror(errno)); if (cpid == 0) { child(fd, pipefd[1]); exit(0); } /* Parent */ err = read(pipefd[0], &p, sizeof(p)); if (err == -1) FAIL("Reading pipe: %s\n", strerror(errno)); if (err != sizeof(p)) FAIL("Short read over pipe"); verbose_printf("Parent received address %p\n", p); err = ptrace(PTRACE_ATTACH, cpid, NULL, NULL); if (err) FAIL("ptrace(ATTACH): %s", strerror(errno)); while (! ready_to_trace) ; do_poke(cpid, p); do_poke(cpid, p + getpagesize()); err = sigaction(SIGCHLD, &old_sa, NULL); if (err) FAIL("Clearing SIGCHLD handler: %s", strerror(errno)); ptrace(PTRACE_KILL, cpid, NULL, NULL); PASS(); }
int main(int argc, char *argv[]) { long hpage_size; int fd, dfd; void *p; size_t ret; test_init(argc, argv); hpage_size = check_hugepagesize(); fd = hugetlbfs_unlinked_fd(); if (fd < 0) FAIL("hugetlbfs_unlinked_fd()"); dfd = open(TMPFILE, O_CREAT|O_EXCL|O_DIRECT|O_RDWR, 0600); if (dfd < 0) { if (errno == EEXIST) CONFIG("Temp file " TMPFILE " already exists"); else CONFIG("Falied to open direct-IO file"); } unlink(TMPFILE); p = mmap(NULL, hpage_size, PROT_READ|PROT_WRITE, MAP_PRIVATE, fd, 0); if (p == MAP_FAILED) FAIL("mmap hugetlbfs file"); memcpy(p, P0, 8); /* Direct write from huge page */ ret = write(dfd, p, IOSZ); if (ret != IOSZ) FAIL("Direct-IO write from huge page"); if (lseek(dfd, 0, SEEK_SET) == -1) FAIL("lseek"); /* Check for accuracy */ ret = read(dfd, buf, IOSZ); if (ret != IOSZ) FAIL("Direct-IO read to normal memory"); if (memcmp(P0, buf, 8)) FAIL("Memory mismatch after Direct-IO write"); if (lseek(dfd, 0, SEEK_SET) == -1) FAIL("lseek"); /* Direct read to huge page */ memset(p, 0, IOSZ); ret = read(dfd, p, IOSZ); if (ret != IOSZ) FAIL("Direct-IO read to huge page"); /* Check for accuracy */ if (memcmp(p, P0, 8)) FAIL("Memory mismatch after Direct-IO read"); close(dfd); unlink(TMPFILE); PASS(); }
int main(int argc, char *argv[]) { long hpage_size; int fd; int err; unsigned long free_before, free_after; test_init(argc, argv); hpage_size = check_hugepagesize(); fd = hugetlbfs_unlinked_fd(); if (fd < 0) FAIL("hugetlbfs_unlinked_fd()"); free_before = get_huge_page_counter(hpage_size, HUGEPAGES_FREE); /* * First preallocate file with with just 1 byte. Allocation sizes * are rounded up, so we should get an entire huge page. */ err = fallocate(fd, 0, 0, 1); if (err) { if (errno == EOPNOTSUPP) IRRELEVANT(); if (err) FAIL("fallocate(): %s", strerror(errno)); } free_after = get_huge_page_counter(hpage_size, HUGEPAGES_FREE); if (free_before - free_after != 1) FAIL("fallocate 1 byte did not preallocate entire huge page\n"); /* * Now punch a hole with just 1 byte. On hole punch, sizes are * rounded down. So, this operation should not create a hole. */ err = fallocate(fd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, 0, 1); if (err) FAIL("fallocate(FALLOC_FL_PUNCH_HOLE): %s", strerror(errno)); free_after = get_huge_page_counter(hpage_size, HUGEPAGES_FREE); if (free_after == free_before) FAIL("fallocate hole punch 1 byte free'ed a huge page\n"); /* * Now punch a hole with of 2 * hpage_size - 1 byte. This size * should be rounded down to a single huge page and the hole created. */ err = fallocate(fd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, 0, (2 * hpage_size) - 1); if (err) FAIL("fallocate(FALLOC_FL_PUNCH_HOLE): %s", strerror(errno)); free_after = get_huge_page_counter(hpage_size, HUGEPAGES_FREE); if (free_after != free_before) FAIL("fallocate hole punch 2 * hpage_size - 1 byte did not free huge page\n"); /* * Perform a preallocate operation with offset 1 and size of * hpage_size. The offset should be rounded down and the * size rounded up to preallocate two huge pages. */ err = fallocate(fd, 0, 1, hpage_size); if (err) FAIL("fallocate(): %s", strerror(errno)); free_after = get_huge_page_counter(hpage_size, HUGEPAGES_FREE); if (free_before - free_after != 2) FAIL("fallocate 1 byte offset, huge page size did not preallocate two huge pages\n"); /* * The hole punch code will only delete 'whole' huge pags that are * in the specified range. The offset is rounded up, and (offset * + size) is rounded down to determine the huge pages to be deleted. * In this case, after rounding the range is (hpage_size, hpage_size). * So, no pages should be deleted. */ err = fallocate(fd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, 1, hpage_size); if (err) FAIL("fallocate(FALLOC_FL_PUNCH_HOLE): %s", strerror(errno)); free_after = get_huge_page_counter(hpage_size, HUGEPAGES_FREE); if (free_before - free_after != 2) FAIL("fallocate hole punch 1 byte offset, huge page size incorrectly deleted a huge page\n"); /* * To delete both huge pages, the range passed to hole punch must * overlap the allocated pages */ err = fallocate(fd, FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, 0, 2 * hpage_size); if (err) FAIL("fallocate(FALLOC_FL_PUNCH_HOLE): %s", strerror(errno)); free_after = get_huge_page_counter(hpage_size, HUGEPAGES_FREE); if (free_after != free_before) FAIL("fallocate hole punch did not delete two huge pages\n"); PASS(); }
int main(int argc, char *argv[]) { long hpage_size; int fd; void *p; unsigned long straddle_addr; test_init(argc, argv); hpage_size = check_hugepagesize(); if (sizeof(void *) <= 4) TEST_BUG("64-bit only"); if (hpage_size > FOURGB) CONFIG("Huge page size too large"); fd = hugetlbfs_unlinked_fd(); if (fd < 0) FAIL("hugetlbfs_unlinked_fd()"); straddle_addr = FOURGB - hpage_size; /* We first try to get the mapping without MAP_FIXED */ verbose_printf("Mapping without MAP_FIXED at %lx...", straddle_addr); p = mmap((void *)straddle_addr, 2*hpage_size, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0); if (p == (void *)straddle_addr) { /* These tests irrelevant if we didn't get the * straddle address */ verbose_printf("done\n"); if (test_addr_huge(p) != 1) FAIL("Mapped address is not hugepage"); if (test_addr_huge(p + hpage_size) != 1) FAIL("Mapped address is not hugepage"); verbose_printf("Clearing below 4GB..."); memset(p, 0, hpage_size); verbose_printf("done\n"); verbose_printf("Clearing above 4GB..."); memset(p + hpage_size, 0, hpage_size); verbose_printf("done\n"); } else { verbose_printf("got %p instead, never mind\n", p); munmap(p, 2*hpage_size); } verbose_printf("Mapping with MAP_FIXED at %lx...", straddle_addr); p = mmap((void *)straddle_addr, 2*hpage_size, PROT_READ|PROT_WRITE, MAP_SHARED|MAP_FIXED, fd, 0); if (p == MAP_FAILED) FAIL("mmap() FIXED: %s", strerror(errno)); if (p != (void *)straddle_addr) { verbose_printf("got %p instead\n", p); FAIL("Wrong address with MAP_FIXED"); } verbose_printf("done\n"); if (test_addr_huge(p) != 1) FAIL("Mapped address is not hugepage"); if (test_addr_huge(p + hpage_size) != 1) FAIL("Mapped address is not hugepage"); verbose_printf("Clearing below 4GB..."); memset(p, 0, hpage_size); verbose_printf("done\n"); verbose_printf("Clearing above 4GB..."); memset(p + hpage_size, 0, hpage_size); verbose_printf("done\n"); verbose_printf("Tested above 4GB\n"); PASS(); }
int main(int argc, char *argv[]) { int page_size; long hpage_size; off_t buggy_offset; int fd; void *p, *q; volatile int *pi; int err; test_init(argc, argv); page_size = getpagesize(); hpage_size = check_hugepagesize(); fd = hugetlbfs_unlinked_fd(); if (fd < 0) FAIL("hugetlbfs_unlinked_fd()"); /* First, we make a 2 page sane hugepage mapping. Then we * memset() it to ensure that the ptes are instantiated for * it. Then we attempt to replace the second half of the map * with one at a bogus offset. We leave the first page of * sane mapping in place to ensure that the corresponding * pud/pmd/whatever entries aren't cleaned away. It's those * bad entries which can trigger bad_pud() checks if the * backout path for the bogus mapping is buggy, which it was * in some kernels. */ verbose_printf("Free hugepages: %lu\n", get_huge_page_counter(hpage_size, HUGEPAGES_FREE)); verbose_printf("Mapping reference map..."); /* First get arena of three hpages size, at file offset 4GB */ p = mmap(NULL, 2*hpage_size, PROT_READ|PROT_WRITE, MAP_PRIVATE, fd, 0); if (p == MAP_FAILED) FAIL("mmap() offset 4GB: %s", strerror(errno)); verbose_printf("%p-%p\n", p, p+2*hpage_size-1); verbose_printf("Free hugepages: %lu\n", get_huge_page_counter(hpage_size, HUGEPAGES_FREE)); /* Instantiate the pages */ verbose_printf("Instantiating..."); memset(p, 0, 2*hpage_size); pi = p; *pi = RANDOM_CONSTANT; verbose_printf("done.\n"); verbose_printf("Free hugepages: %lu\n", get_huge_page_counter(hpage_size, HUGEPAGES_FREE)); /* Toggle the permissions on the first page. This forces TLB * entries (including hash page table on powerpc) to be * flushed, so that the page tables must be accessed for the * test further down. In the buggy case, those page tables * can get thrown away by a pud_clear() */ err = mprotect(p, hpage_size, PROT_READ); if (err) FAIL("mprotect(%p, 0x%lx, PROT_READ): %s", p, hpage_size, strerror(errno)); /* Replace top hpage by hpage mapping at confusing file offset */ buggy_offset = page_size; verbose_printf("Replacing map at %p with map from offset 0x%lx...", p + hpage_size, (unsigned long)buggy_offset); q = mmap(p + hpage_size, hpage_size, PROT_READ|PROT_WRITE, MAP_FIXED|MAP_PRIVATE, fd, buggy_offset); if (q != MAP_FAILED) FAIL("bogus offset mmap() succeeded at %p: %s", q, strerror(errno)); if (errno != EINVAL) FAIL("bogus mmap() failed with \"%s\" instead of \"%s\"", strerror(errno), strerror(EINVAL)); verbose_printf("%s\n", strerror(errno)); verbose_printf("Free hugepages: %lu\n", get_huge_page_counter(hpage_size, HUGEPAGES_FREE)); if (*pi != RANDOM_CONSTANT) FAIL("Pre-existing mapping clobbered: %x instead of %x", *pi, RANDOM_CONSTANT); verbose_printf("Free hugepages: %lu\n", get_huge_page_counter(hpage_size, HUGEPAGES_FREE)); /* The real test is whether we got a bad_pud() or similar * during the run. The check above, combined with the earlier * mprotect()s to flush the TLB are supposed to catch it, but * it's hard to be certain. Once bad_pud() is called * behaviour can be very strange. */ PASS_INCONCLUSIVE(); }
int main(int argc, char **argv) { int freepages; long size1, size2; void *p1, *p2; int st, pid, rv; unsigned long long mapping_size; test_init(argc, argv); if (!getenv("HUGETLB_MORECORE")) CONFIG("Must have HUGETLB_MORECORE=yes"); hpagesize = check_hugepagesize(); /* Must be root because this test modifies the overcommit pool */ check_must_be_root(); oc_pool = read_nr_overcommit(hpagesize); if (oc_pool > 0) set_nr_overcommit_hugepages(hpagesize, 0); freepages = get_huge_page_counter(hpagesize, HUGEPAGES_FREE); if (freepages < 3) CONFIG("Must have at least 3 free hugepages"); /* * Allocation 1: one hugepage. Due to malloc overhead, morecore * will probably mmap two hugepages. */ size1 = hpagesize; p1 = malloc(size1); if (!p1) FAIL("Couldn't malloc %ld bytes", size1); mapping_size = get_mapping_page_size(p1); if (mapping_size != hpagesize) FAIL("First allocation %p not on hugepages", p1); /* * Allocation 2: all free hugepages to ensure we exhaust the free pool. */ size2 = freepages * hpagesize; p2 = malloc(size2); if (!p2) FAIL("Couldn't malloc %ld bytes", size2); mapping_size = get_mapping_page_size(p1); st = (mapping_size == hpagesize); verbose_printf("Second allocation %p huge? %s\n", p2, st < 0 ? "??" : (st ? "yes" : "no")); /* * Touch the pages in a child process. Kernel sends a SIGKILL if * we run out of hugepages. */ pid = fork(); if (pid < 0) FAIL("fork: %s", strerror(errno)); if (pid == 0) { memset(p1, 0, size1); memset(p2, 0, size2); exit(0); } rv = waitpid(pid, &st, 0); if (rv < 0) FAIL("waitpid: %s\n", strerror(errno)); if (WIFSIGNALED(st)) FAIL("Child killed by signal %d touching malloc'ed memory", WTERMSIG(st)); PASS(); }
int main(int argc, char *argv[]) { long hpage_size; unsigned long totpages, chunk1, chunk2; int fd; void *p, *q; pid_t child, ret; int status; test_init(argc, argv); totpages = read_meminfo("HugePages_Free:"); hpage_size = check_hugepagesize(); fd = hugetlbfs_unlinked_fd(); if (fd < 0) FAIL("hugetlbfs_unlinked_fd()"); chunk1 = (totpages / 2) + 1; chunk2 = totpages - chunk1 + 1; verbose_printf("overcommit: %ld hugepages available: " "chunk1=%ld chunk2=%ld\n", totpages, chunk1, chunk2); p = mmap(NULL, chunk1*hpage_size, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0); if (p == MAP_FAILED) FAIL("mmap() chunk1: %s", strerror(errno)); q = mmap(NULL, chunk2*hpage_size, PROT_READ|PROT_WRITE, MAP_SHARED, fd, chunk1*hpage_size); if (q == MAP_FAILED) { if (errno != ENOMEM) FAIL("mmap() chunk2: %s", strerror(errno)); else PASS(); } verbose_printf("Looks like we've overcommitted, testing...\n"); /* Looks like we're overcommited, but we need to confirm that * this is bad. We touch it all in a child process because an * overcommit will generally lead to a SIGKILL which we can't * handle, of course. */ child = fork(); if (child < 0) FAIL("fork(): %s", strerror(errno)); if (child == 0) { memset(p, 0, chunk1*hpage_size); memset(q, 0, chunk2*hpage_size); exit(0); } ret = waitpid(child, &status, 0); if (ret < 0) FAIL("waitpid(): %s", strerror(errno)); if (WIFSIGNALED(status)) FAIL("Killed by signal \"%s\" due to overcommit", strsignal(WTERMSIG(status))); PASS(); }
int main(int argc, char *argv[]) { long hpage_size; int fd; void *p; volatile unsigned int *q; int err; int sigbus_count = 0; unsigned long initial_rsvd, rsvd; struct sigaction sa = { .sa_sigaction = sigbus_handler, .sa_flags = SA_SIGINFO, }; test_init(argc, argv); hpage_size = check_hugepagesize(); fd = hugetlbfs_unlinked_fd(); if (fd < 0) FAIL("hugetlbfs_unlinked_fd()"); initial_rsvd = get_huge_page_counter(hpage_size, HUGEPAGES_RSVD); verbose_printf("Reserve count before map: %lu\n", initial_rsvd); p = mmap(NULL, hpage_size, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0); if (p == MAP_FAILED) FAIL("mmap(): %s", strerror(errno)); q = p; verbose_printf("Reserve count after map: %lu\n", get_huge_page_counter(hpage_size, HUGEPAGES_RSVD)); *q = 0; verbose_printf("Reserve count after touch: %lu\n", get_huge_page_counter(hpage_size, HUGEPAGES_RSVD)); err = ftruncate(fd, 0); if (err) FAIL("ftruncate(): %s", strerror(errno)); rsvd = get_huge_page_counter(hpage_size, HUGEPAGES_RSVD); verbose_printf("Reserve count after truncate: %lu\n", rsvd); if (rsvd != initial_rsvd) FAIL("Reserved count is not restored after truncate: %lu instead of %lu", rsvd, initial_rsvd); err = sigaction(SIGBUS, &sa, NULL); if (err) FAIL("sigaction(): %s", strerror(errno)); if (sigsetjmp(sig_escape, 1) == 0) *q; /* Fault, triggering a SIGBUS */ else sigbus_count++; if (sigbus_count != 1) FAIL("Didn't SIGBUS after truncate"); rsvd = get_huge_page_counter(hpage_size, HUGEPAGES_RSVD); verbose_printf("Reserve count after SIGBUS fault: %lu\n", rsvd); if (rsvd != initial_rsvd) FAIL("Reserved count is altered by SIGBUS fault: %lu instead of %lu", rsvd, initial_rsvd); munmap(p, hpage_size); verbose_printf("Reserve count after munmap(): %lu\n", get_huge_page_counter(hpage_size, HUGEPAGES_RSVD)); close(fd); verbose_printf("Reserve count after close(): %lu\n", get_huge_page_counter(hpage_size, HUGEPAGES_RSVD)); PASS(); }
int main(int argc, char *argv[]) { int page_size; long hpage_size; long long buggy_offset, truncate_point; int fd; void *p, *q; volatile unsigned int *pi, *qi; int err; struct sigaction sa_fail = { .sa_sigaction = sigbus_handler_fail, .sa_flags = SA_SIGINFO, }; struct sigaction sa_pass = { .sa_sigaction = sigbus_handler_pass, .sa_flags = SA_SIGINFO, }; test_init(argc, argv); page_size = getpagesize(); hpage_size = check_hugepagesize(); check_free_huge_pages(3); fd = hugetlbfs_unlinked_fd(); if (fd < 0) FAIL("hugetlbfs_unlinked_fd()"); truncate_point = FOURGIG; buggy_offset = truncate_point / (hpage_size / page_size); buggy_offset = ALIGN(buggy_offset, hpage_size); verbose_printf("Mapping 3 hpages at offset 0x%llx...", truncate_point); /* First get arena of three hpages size, at file offset 4GB */ q = mmap64(NULL, 3*hpage_size, PROT_READ|PROT_WRITE, MAP_PRIVATE, fd, truncate_point); if (q == MAP_FAILED) FAIL("mmap() offset 4GB: %s", strerror(errno)); verbose_printf("mapped at %p\n", q); qi = q; /* Touch the high page */ *qi = 0; /* This part of the test makes the problem more obvious, but * is not essential. It can't be done on powerpc, where * segment restrictions prohibit us from performing such a * mapping, so skip it there. Similarly, ia64's address space * restrictions prevent this. */ #if !defined(__powerpc__) && !defined(__powerpc64__) && !defined(__ia64__) /* Replace middle hpage by tinypage mapping to trigger * nr_ptes BUG */ verbose_printf("Replacing map at %p-%p...", q + hpage_size, q + hpage_size + hpage_size-1); p = mmap64(q + hpage_size, hpage_size, PROT_READ|PROT_WRITE, MAP_FIXED|MAP_PRIVATE|MAP_ANON, -1, 0); if (p != q + hpage_size) FAIL("mmap() before low hpage"); verbose_printf("done\n"); pi = p; /* Touch one page to allocate its page table */ *pi = 0; #endif /* Replace top hpage by hpage mapping at confusing file offset */ verbose_printf("Replacing map at %p with map from offset 0x%llx...", q + 2*hpage_size, buggy_offset); p = mmap64(q + 2*hpage_size, hpage_size, PROT_READ|PROT_WRITE, MAP_FIXED|MAP_PRIVATE, fd, buggy_offset); if (p != q + 2*hpage_size) FAIL("mmap() buggy offset 0x%llx", buggy_offset); verbose_printf("done\n"); pi = p; /* Touch the low page with something non-zero */ *pi = 1; verbose_printf("Truncating at 0x%llx...", truncate_point); err = ftruncate64(fd, truncate_point); if (err) FAIL("ftruncate(): %s", strerror(errno)); verbose_printf("done\n"); err = sigaction(SIGBUS, &sa_fail, NULL); if (err) FAIL("sigaction() fail: %s", strerror(errno)); if (*pi != 1) FAIL("Data 1 has changed to %u", *pi); err = sigaction(SIGBUS, &sa_pass, NULL); if (err) FAIL("sigaction() pass: %s", strerror(errno)); *qi; /* Should have SIGBUSed above */ FAIL("Didn't SIGBUS on truncated page."); }
static void run_race(void *syncarea, int race_type) { volatile int *trigger1, *trigger2; int fd; void *p; int status1, status2; int ret; memset(syncarea, 0, sizeof(*trigger1) + sizeof(*trigger2)); trigger1 = syncarea; trigger2 = trigger1 + 1; /* Get a new file for the final page */ fd = hugetlbfs_unlinked_fd(); if (fd < 0) FAIL("hugetlbfs_unlinked_fd()"); verbose_printf("Mapping final page.. "); p = mmap(NULL, hpage_size, PROT_READ|PROT_WRITE, race_type, fd, 0); if (p == MAP_FAILED) FAIL("mmap(): %s", strerror(errno)); verbose_printf("%p\n", p); if (race_type == MAP_SHARED) { child1 = fork(); if (child1 < 0) FAIL("fork(): %s", strerror(errno)); if (child1 == 0) proc_racer(p, 0, trigger1, trigger2); child2 = fork(); if (child2 < 0) FAIL("fork(): %s", strerror(errno)); if (child2 == 0) proc_racer(p, 1, trigger2, trigger1); /* wait() calls */ ret = waitpid(child1, &status1, 0); if (ret < 0) FAIL("waitpid() child 1: %s", strerror(errno)); verbose_printf("Child 1 status: %x\n", status1); ret = waitpid(child2, &status2, 0); if (ret < 0) FAIL("waitpid() child 2: %s", strerror(errno)); verbose_printf("Child 2 status: %x\n", status2); if (WIFSIGNALED(status1)) FAIL("Child 1 killed by signal %s", strsignal(WTERMSIG(status1))); if (WIFSIGNALED(status2)) FAIL("Child 2 killed by signal %s", strsignal(WTERMSIG(status2))); status1 = WEXITSTATUS(status1); status2 = WEXITSTATUS(status2); } else { struct racer_info ri1 = { .p = p, .cpu = 0, .mytrigger = trigger1, .othertrigger = trigger2, }; struct racer_info ri2 = { .p = p, .cpu = 1, .mytrigger = trigger2, .othertrigger = trigger1, }; void *tret1, *tret2; ret = pthread_create(&thread1, NULL, thread_racer, &ri1); if (ret != 0) FAIL("pthread_create() 1: %s\n", strerror(errno)); ret = pthread_create(&thread2, NULL, thread_racer, &ri2); if (ret != 0) FAIL("pthread_create() 2: %s\n", strerror(errno)); ret = pthread_join(thread1, &tret1); if (ret != 0) FAIL("pthread_join() 1: %s\n", strerror(errno)); if (tret1 != &ri1) FAIL("Thread 1 returned %p not %p, killed?\n", tret1, &ri1); ret = pthread_join(thread2, &tret2); if (ret != 0) FAIL("pthread_join() 2: %s\n", strerror(errno)); if (tret2 != &ri2) FAIL("Thread 2 returned %p not %p, killed?\n", tret2, &ri2); status1 = ri1.status; status2 = ri2.status; } if (status1 != 0) FAIL("Racer 1 terminated with code %d", status1); if (status2 != 0) FAIL("Racer 2 terminated with code %d", status2); } int main(int argc, char *argv[]) { unsigned long totpages; int fd; void *p, *q; unsigned long i; int race_type; test_init(argc, argv); if (argc != 2) CONFIG("Usage: alloc-instantiate-race <private|shared>"); totpages = read_meminfo("HugePages_Free:"); if (strcmp(argv[1], "shared") == 0) { race_type = MAP_SHARED; } else if (strcmp(argv[1], "private") == 0) { race_type = MAP_PRIVATE; } else { CONFIG("Usage: alloc-instantiate-race <private|shared>"); } hpage_size = check_hugepagesize(); fd = hugetlbfs_unlinked_fd(); if (fd < 0) FAIL("hugetlbfs_unlinked_fd()"); /* Get a shared normal page for synchronization */ verbose_printf("Mapping synchronization area.."); q = mmap(NULL, getpagesize(), PROT_READ|PROT_WRITE, MAP_SHARED|MAP_ANONYMOUS, -1, 0); if (q == MAP_FAILED) FAIL("mmap() sync area: %s", strerror(errno)); verbose_printf("done\n"); verbose_printf("Mapping %ld/%ld pages.. ", totpages-1, totpages); p = mmap(NULL, (totpages-1)*hpage_size, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0); if (p == MAP_FAILED) FAIL("mmap() 1: %s", strerror(errno)); /* Allocate all save one of the pages up front */ verbose_printf("instantiating.. "); for (i = 0; i < (totpages - 1); i++) memset(p + (i * hpage_size), 0, sizeof(int)); verbose_printf("done\n"); run_race(q, race_type); PASS(); }
int main(int argc, char *argv[]) { long hpage_size; int fd; void *p; unsigned long straddle_addr; test_init(argc, argv); hpage_size = check_hugepagesize(); if (sizeof(void *) <= 4) TEST_BUG("64-bit only"); if (hpage_size > FOURGB) CONFIG("Huge page size too large"); fd = hugetlbfs_unlinked_fd(); if (fd < 0) FAIL("hugetlbfs_unlinked_fd()"); straddle_addr = FOURGB - hpage_size; /* We first try to get the mapping without MAP_FIXED */ verbose_printf("Mapping without MAP_FIXED at %lx...", straddle_addr); p = mmap((void *)straddle_addr, 2*hpage_size, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0); if (p == (void *)straddle_addr) { /* These tests irrelevant if we didn't get the * straddle address */ verbose_printf("done\n"); if (test_addr_huge(p) != 1) FAIL("Mapped address is not hugepage"); if (test_addr_huge(p + hpage_size) != 1) FAIL("Mapped address is not hugepage"); verbose_printf("Clearing below 4GB..."); memset(p, 0, hpage_size); verbose_printf("done\n"); verbose_printf("Clearing above 4GB..."); memset(p + hpage_size, 0, hpage_size); verbose_printf("done\n"); } else { verbose_printf("got %p instead, never mind\n", p); munmap(p, 2*hpage_size); } verbose_printf("Mapping with MAP_FIXED at %lx...", straddle_addr); p = mmap((void *)straddle_addr, 2*hpage_size, PROT_READ|PROT_WRITE, MAP_SHARED|MAP_FIXED, fd, 0); if (p == MAP_FAILED) { /* this area crosses last low slice and first high slice */ unsigned long below_start = FOURGB - 256L*1024*1024; unsigned long above_end = 1024L*1024*1024*1024; if (range_is_mapped(below_start, above_end) == 1) { verbose_printf("region (4G-256M)-1T is not free\n"); verbose_printf("mmap() failed: %s\n", strerror(errno)); PASS_INCONCLUSIVE(); } else FAIL("mmap() FIXED failed: %s\n", strerror(errno)); } if (p != (void *)straddle_addr) { verbose_printf("got %p instead\n", p); FAIL("Wrong address with MAP_FIXED"); } verbose_printf("done\n"); if (test_addr_huge(p) != 1) FAIL("Mapped address is not hugepage"); if (test_addr_huge(p + hpage_size) != 1) FAIL("Mapped address is not hugepage"); verbose_printf("Clearing below 4GB..."); memset(p, 0, hpage_size); verbose_printf("done\n"); verbose_printf("Clearing above 4GB..."); memset(p + hpage_size, 0, hpage_size); verbose_printf("done\n"); verbose_printf("Tested above 4GB\n"); PASS(); }