static void do_test(void) { int i; if (!SAFE_FORK()) { SAFE_FILE_PRINTF(PATH_AUTOGROUP, "%d", 1); SAFE_SETSID(); if (SAFE_FORK()) pause(); SAFE_KILL(getppid(), SIGKILL); usleep(1000); // The child has gone, the grandchild runs with kref == 1 SAFE_FILE_PRINTF(PATH_AUTOGROUP, "%d", 0); SAFE_SETSID(); // runs with the freed ag/tg for (i = 0; i < LOOPS; i++) usleep(10); TST_CHECKPOINT_WAKE(0); exit(0); } SAFE_WAIT(NULL); // destroy the child's ag/tg TST_CHECKPOINT_WAIT(0); tst_res(TPASS, "Bug not reproduced"); }
static void run(void) { pid_t pid; int status; retry: pid = SAFE_FORK(); if (!pid) { setup_cgroup_paths(getpid()); child(); } setup_cgroup_paths(pid); SAFE_WAIT(&status); cleanup(); /* * Rarely cgroup OOM kills both children not only the one that allocates * memory in loop, hence we retry here if that happens. */ if (WIFSIGNALED(status)) { tst_res(TINFO, "Both children killed, retrying..."); goto retry; } if (WIFEXITED(status) && WEXITSTATUS(status) == TCONF) tst_brk(TCONF, "MADV_FREE is not supported"); if (!WIFEXITED(status) || WEXITSTATUS(status) != 0) tst_brk(TBROK, "Child %s", tst_strstatus(status)); }
static void test(void) { int status; /* unshares the mount ns */ if (unshare(CLONE_NEWNS) == -1) tst_brkm(TBROK | TERRNO, cleanup, "unshare failed"); /* makes sure parent mounts/umounts have no effect on a real system */ SAFE_MOUNT(cleanup, "none", "/", "none", MS_REC|MS_PRIVATE, NULL); /* bind mounts DIRA to itself */ SAFE_MOUNT(cleanup, DIRA, DIRA, "none", MS_BIND, NULL); /* makes mount DIRA shared */ SAFE_MOUNT(cleanup, "none", DIRA, "none", MS_SHARED, NULL); if (do_clone_tests(CLONE_NEWNS, child_func, NULL, NULL, NULL) == -1) tst_brkm(TBROK | TERRNO, cleanup, "clone failed"); /* waits for child to make a slave mount */ TST_CHECKPOINT_PARENT_WAIT(cleanup, &checkpoint1); /* bind mounts DIRB to DIRA making contents of DIRB visible * in DIRA */ SAFE_MOUNT(cleanup, DIRB, DIRA, "none", MS_BIND, NULL); TST_CHECKPOINT_SIGNAL_CHILD(cleanup, &checkpoint2); TST_CHECKPOINT_PARENT_WAIT(cleanup, &checkpoint1); SAFE_UMOUNT(cleanup, DIRA); TST_CHECKPOINT_SIGNAL_CHILD(cleanup, &checkpoint2); TST_CHECKPOINT_PARENT_WAIT(cleanup, &checkpoint1); /* checks that slave mount doesn't propagate to shared mount */ if ((access(DIRA"/A", F_OK) == 0) && (access(DIRA"/B", F_OK) == -1)) tst_resm(TPASS, "propagation from slave mount passed"); else tst_resm(TFAIL, "propagation form slave mount failed"); TST_CHECKPOINT_SIGNAL_CHILD(cleanup, &checkpoint2); SAFE_WAIT(cleanup, &status); if (WIFEXITED(status)) { if (WEXITSTATUS(status) == 0) tst_resm(TPASS, "propagation to slave mount passed"); else tst_resm(TFAIL, "propagation to slave mount failed"); } if (WIFSIGNALED(status)) { tst_resm(TBROK, "child was killed with signal %s", tst_strsig(WTERMSIG(status))); return; } SAFE_UMOUNT(cleanup, DIRA); }
int main(int ac, char **av) { int lc; int sleep_time = 5; int status; int time_sec = 3; pid_t cpid; tst_parse_opts(ac, av, NULL, NULL); setup(); for (lc = 0; TEST_LOOPING(lc); lc++) { tst_count = 0; /* * Call First alarm() with non-zero time parameter * 'time_sec' to send SIGALRM to the process. */ TEST(alarm(time_sec)); /* Now, fork a child process */ cpid = FORK_OR_VFORK(); if (cpid < 0) { tst_resm(TFAIL | TERRNO, "fork() failed"); } sleep(sleep_time); if (cpid == 0) { if (alarms_received == 0) exit(0); else { printf("alarm request not cleared in " "child; alarms received:%d\n", alarms_received); exit(1); } } else { /* Wait for child to complete execution */ SAFE_WAIT(cleanup, &status); if (!WIFEXITED(status) || WEXITSTATUS(status) != 0) tst_brkm(TBROK | TERRNO, cleanup, "child exited abnormally"); } } cleanup(); tst_exit(); }
static void test(void) { int status; /* unshares the mount ns */ if (unshare(CLONE_NEWNS) == -1) tst_brkm(TBROK | TERRNO, cleanup, "unshare failed"); /* makes sure parent mounts/umounts have no effect on a real system */ SAFE_MOUNT(cleanup, "none", "/", "none", MS_REC|MS_PRIVATE, NULL); /* bind mounts DIRA to itself */ SAFE_MOUNT(cleanup, DIRA, DIRA, "none", MS_BIND, NULL); /* makes mount DIRA shared */ SAFE_MOUNT(cleanup, "none", DIRA, "none", MS_SHARED, NULL); if (do_clone_tests(CLONE_NEWNS, child_func, NULL, NULL, NULL) == -1) tst_brkm(TBROK | TERRNO, cleanup, "clone failed"); /* bind mounts DIRB to DIRA making contents of DIRB visible * in DIRA */ SAFE_MOUNT(cleanup, DIRB, DIRA, "none", MS_BIND, NULL); TST_SAFE_CHECKPOINT_WAKE_AND_WAIT(cleanup, 0); SAFE_UMOUNT(cleanup, DIRA); TST_SAFE_CHECKPOINT_WAKE_AND_WAIT(cleanup, 0); if (access(DIRA"/B", F_OK) == 0) tst_resm(TPASS, "shared mount in child passed"); else tst_resm(TFAIL, "shared mount in child failed"); TST_SAFE_CHECKPOINT_WAKE(cleanup, 0); SAFE_WAIT(cleanup, &status); if (WIFEXITED(status)) { if ((WEXITSTATUS(status) == 0)) tst_resm(TPASS, "shared mount in parent passed"); else tst_resm(TFAIL, "shared mount in parent failed"); } if (WIFSIGNALED(status)) { tst_resm(TBROK, "child was killed with signal %s", tst_strsig(WTERMSIG(status))); return; } SAFE_UMOUNT(cleanup, DIRA); }
int main(int ac, char **av) { int lc; int status; tst_parse_opts(ac, av, NULL, NULL); #ifdef UCLINUX maybe_run_child(&do_child, ""); #endif setup(); for (lc = 0; TEST_LOOPING(lc); lc++) { tst_count = 0; if ((cpid = FORK_OR_VFORK()) == -1) tst_brkm(TBROK | TERRNO, NULL, "fork() failed"); if (cpid == 0) { #ifdef UCLINUX if (self_exec(av[0], "") < 0) tst_brkm(TBROK, cleanup, "self_exec failed"); #else do_child(); #endif } TST_PROCESS_STATE_WAIT(cleanup, cpid, 'S'); kill(cpid, SIGKILL); SAFE_WAIT(NULL, &status); if (WIFSIGNALED(status) && WTERMSIG(status) == SIGKILL) { tst_resm(TPASS, "pause() did not return after SIGKILL"); continue; } if (WIFSIGNALED(status)) { tst_resm(TFAIL, "child killed by %s unexpectedly", tst_strsig(WTERMSIG(status))); continue; } tst_resm(TFAIL, "child exited with %i", WEXITSTATUS(status)); } cleanup(); tst_exit(); }
// Accept as much as <size>, from the streaming GET, into caller's <buf>. // We may discover EOF at any time. In that case, we'll return however // much was actually read. The next call // will just short-circuit to return 0, signalling EOF to caller. // // return -1 with errno, for failures. // else return number of chars we get. // ssize_t stream_get(ObjectStream* os, char* buf, size_t size) { static const int get_timeout_sec = 10; /* totally made up out of thin air */ IOBuf* b = &os->iob; // shorthand LOG(LOG_INFO, "entry\n"); if (! (os->flags & OSF_OPEN)) { LOG(LOG_ERR, "%s isn't open\n", os->url); errno = EINVAL; /* ?? */ return -1; } if (! (os->flags & OSF_READING)) { LOG(LOG_ERR, "%s isn't open for reading\n", os->url); errno = EINVAL; /* ?? */ return -1; } if (os->flags & OSF_EOF) { LOG(LOG_INFO, "already at EOF\n"); return 0; // b->write_count; } os->flags &= ~(OSF_EOB); aws_iobuf_reset(b); // doesn't affect <user_data> aws_iobuf_extend_static(b, (char*)buf, size); LOG(LOG_INFO, "got %ld-byte buffer for writefn\n", size); // let writefn move data POST(&os->iob_empty); // wait for writefn to fill our buffer LOG(LOG_INFO, "waiting for writefn\n"); SAFE_WAIT(&os->iob_full, get_timeout_sec, os); // SAFE_WAIT_KILL(&os->iob_full, get_timeout_sec, os); // writefn detected CURL EOF? if (os->flags & OSF_EOF) { LOG(LOG_INFO, "EOF is asserted\n"); } if (os->flags & OSF_EOB) { LOG(LOG_INFO, "EOB is asserted\n"); } os->written += b->write_count; LOG(LOG_INFO, "returning %ld (total=%ld)\n", b->write_count, os->written); return (b->write_count); }
int main(int ac, char **av) { int lc; pid_t pid; char *argv[2] = {TEST_APP, NULL}; char *env[1] = {NULL}; tst_parse_opts(ac, av, NULL, NULL); #ifdef UCLINUX maybe_run_child(&do_child, ""); #endif setup(); for (lc = 0; TEST_LOOPING(lc); lc++) { if ((pid = FORK_OR_VFORK()) == -1) { tst_brkm(TBROK, cleanup, "fork failed"); } else if (pid == 0) { #ifdef UCLINUX if (self_exec(av[0], "") < 0) tst_brkm(TBROK, cleanup, "self_exec failed"); #else do_child(); #endif } TST_SAFE_CHECKPOINT_WAIT(cleanup, 0); TEST(execve(TEST_APP, argv, env)); if (TEST_ERRNO != ETXTBSY) tst_resm(TFAIL | TTERRNO, "execve succeeded, expected failure"); else tst_resm(TPASS | TTERRNO, "execve failed as expected"); TST_SAFE_CHECKPOINT_WAKE(cleanup, 0); SAFE_WAIT(cleanup, NULL); } cleanup(); tst_exit(); }
static void verify_inotify(void) { int inotify_fd, fd; pid_t pid; int i, tests; pid = SAFE_FORK(); if (pid == 0) { while (1) { for (i = 0; i < FILES; i++) { fd = SAFE_OPEN(names[i], O_CREAT | O_RDWR, 0600); SAFE_CLOSE(fd); } for (i = 0; i < FILES; i++) SAFE_UNLINK(names[i]); } } for (tests = 0; tests < TEARDOWNS; tests++) { inotify_fd = myinotify_init1(O_NONBLOCK); if (inotify_fd < 0) tst_brk(TBROK | TERRNO, "inotify_init failed"); for (i = 0; i < FILES; i++) { /* * Both failure and success are fine since * files are being deleted in parallel - this * is what provokes the race we want to test * for... */ myinotify_add_watch(inotify_fd, names[i], IN_MODIFY); } SAFE_CLOSE(inotify_fd); } /* We survived for given time - test succeeded */ tst_res(TPASS, "kernel survived inotify beating"); /* Kill the child creating / deleting files and wait for it */ SAFE_KILL(pid, SIGKILL); SAFE_WAIT(NULL); }
static void doparent(void) { int fd; /* Wait for child lock */ TST_SAFE_CHECKPOINT_WAIT(cleanup, 0); fd = SAFE_OPEN(cleanup, filename, O_RDWR | O_NONBLOCK); ftruncate_expect_fail(fd, RECLEN, "offset before lock"); ftruncate_expect_fail(fd, recstart + RECLEN/2, "offset in lock"); ftruncate_expect_success(fd, recstart + RECLEN, "offset after lock"); /* wake child and wait for it to exit (to free record lock) */ TST_SAFE_CHECKPOINT_WAKE(cleanup, 0); SAFE_WAIT(NULL, NULL); ftruncate_expect_success(fd, recstart + RECLEN/2, "offset in lock"); ftruncate_expect_success(fd, recstart, "offset before lock"); ftruncate_expect_success(fd, recstart + RECLEN, "offset after lock"); SAFE_CLOSE(NULL, fd); }
int main(int ac, char **av) { pid_t pid1; int lc; int rval; tst_parse_opts(ac, av, NULL, NULL); setup(); for (lc = 0; TEST_LOOPING(lc); ++lc) { tst_count = 0; for (testno = 0; testno < TST_TOTAL; ++testno) { pid1 = fork(); //call to fork() if (pid1 == -1) { tst_brkm(TFAIL | TERRNO, cleanup, "fork failed"); } else if (pid1 == 0) { switch (unshare(CLONE_FILES)) { case 0: printf("unshare with CLONE_FILES call " "succeeded\n"); rval = 0; break; case -1: if (errno == ENOSYS) rval = 1; else { perror("unshare failed"); rval = 2; } } exit(rval); } else { SAFE_WAIT(cleanup, &rval); if (rval != 0 && WIFEXITED(rval)) { switch (WEXITSTATUS(rval)) { case 1: tst_brkm(TCONF, cleanup, "unshare not supported in " "kernel"); break; default: tst_brkm(TFAIL, cleanup, "unshare failed"); } } } pid1 = fork(); if (pid1 == -1) { tst_brkm(TFAIL | TERRNO, cleanup, "fork failed"); } else if (pid1 == 0) { switch (unshare(CLONE_FS)) { case 0: printf("unshare with CLONE_FS call " "succeeded\n"); rval = 0; break; case -1: if (errno == ENOSYS) rval = 1; else { perror("unshare failed"); rval = 2; } } exit(rval); } else { SAFE_WAIT(cleanup, &rval); if (rval != 0 && WIFEXITED(rval)) { switch (WEXITSTATUS(rval)) { case 1: tst_brkm(TCONF, cleanup, "unshare not supported in " "kernel"); break; default: tst_brkm(TFAIL, cleanup, "unshare failed"); } } } pid1 = fork(); if (pid1 == -1) { tst_brkm(TFAIL | TERRNO, cleanup, "fork() failed."); } else if (pid1 == 0) { switch (unshare(CLONE_NEWNS)) { case 0: printf("unshare call with CLONE_NEWNS " "succeeded\n"); rval = 0; break; case -1: if (errno == ENOSYS) rval = 1; else { perror("unshare failed"); rval = 2; } } exit(rval); } else { SAFE_WAIT(cleanup, &rval); if (rval != 0 && WIFEXITED(rval)) { switch (WEXITSTATUS(rval)) { case 1: tst_brkm(TCONF, cleanup, "unshare not supported in " "kernel"); break; default: tst_brkm(TFAIL, cleanup, "unshare failed"); } } } } } cleanup(); tst_exit(); }
static void child(void) { size_t i; char *ptr; unsigned int usage, old_limit, old_memsw_limit; int status, pid, retries = 0; SAFE_MKDIR(cgroup_path, 0777); SAFE_FILE_PRINTF(tasks_path, "%i", getpid()); ptr = SAFE_MMAP(NULL, PAGES * page_size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); for (i = 0; i < PAGES * page_size; i++) ptr[i] = 'a'; if (madvise(ptr, PAGES * page_size, MADV_FREE)) { if (errno == EINVAL) tst_brk(TCONF | TERRNO, "MADV_FREE is not supported"); tst_brk(TBROK | TERRNO, "MADV_FREE failed"); } if (ptr[page_size] != 'a') tst_res(TFAIL, "MADV_FREE pages were freed immediatelly"); else tst_res(TPASS, "MADV_FREE pages were not freed immediatelly"); ptr[TOUCHED_PAGE1 * page_size] = 'b'; ptr[TOUCHED_PAGE2 * page_size] = 'b'; usage = 8 * 1024 * 1024; tst_res(TINFO, "Setting memory limits to %u %u", usage, 2 * usage); SAFE_FILE_SCANF(limit_in_bytes_path, "%u", &old_limit); if (swap_accounting_enabled) SAFE_FILE_SCANF(memsw_limit_in_bytes_path, "%u", &old_memsw_limit); SAFE_FILE_PRINTF(limit_in_bytes_path, "%u", usage); if (swap_accounting_enabled) SAFE_FILE_PRINTF(memsw_limit_in_bytes_path, "%u", 2 * usage); do { sleep_between_faults++; pid = SAFE_FORK(); if (!pid) memory_pressure_child(); tst_res(TINFO, "Memory hungry child %i started, try %i", pid, retries); SAFE_WAIT(&status); } while (retries++ < 10 && count_freed(ptr) == 0); char map[PAGES+1]; unsigned int freed = 0; unsigned int corrupted = 0; for (i = 0; i < PAGES; i++) { char exp_val; if (ptr[i * page_size]) { exp_val = 'a'; map[i] = 'p'; } else { exp_val = 0; map[i] = '_'; freed++; } if (i != TOUCHED_PAGE1 && i != TOUCHED_PAGE2) { if (check_page(ptr + i * page_size, exp_val)) { map[i] = '?'; corrupted++; } } else { if (check_page_baaa(ptr + i * page_size)) { map[i] = '?'; corrupted++; } } } map[PAGES] = '\0'; tst_res(TINFO, "Memory map: %s", map); if (freed) tst_res(TPASS, "Pages MADV_FREE were freed on low memory"); else tst_res(TFAIL, "No MADV_FREE page was freed on low memory"); if (corrupted) tst_res(TFAIL, "Found corrupted page"); else tst_res(TPASS, "All pages have expected content"); if (swap_accounting_enabled) SAFE_FILE_PRINTF(memsw_limit_in_bytes_path, "%u", old_memsw_limit); SAFE_FILE_PRINTF(limit_in_bytes_path, "%u", old_limit); SAFE_MUNMAP(ptr, PAGES); exit(0); }
// Hand <buf> over to the streaming_readfunc(), so it can be added into // the ongoing streaming PUT. You must call stream_open() first. // // NOTE: Doing this a little differently from the test_aws.c (case 12) // approach. We're forcing *synchronous* interaction with the // readfunc, because we don't want caller's <buf> to go out of scope // until the readfunc is finished with it. // int stream_put(ObjectStream* os, const char* buf, size_t size) { // static const int put_timeout_sec = 10; /* totally made up out of thin air */ static const int put_timeout_sec = 20; /* totally made up out of thin air */ LOG(LOG_INFO, "(%08lx) entry\n", (size_t)os); if (! (os->flags & OSF_OPEN)) { LOG(LOG_ERR, "(%08lx) %s isn't open\n", (size_t)os, os->url); errno = EINVAL; /* ?? */ return -1; } if (! (os->flags & OSF_WRITING)) { LOG(LOG_ERR, "(%08lx) %s isn't open for writing\n", (size_t)os, os->url); errno = EINVAL; /* ?? */ return -1; } IOBuf* b = &os->iob; // shorthand #if 0 // QUESTION: Does it improve performance to copy the caller's buffer, // so we can return immediately? // // ANSWER: No. LOG(LOG_INFO, "(%08lx) waiting for IOBuf\n", (size_t)os); // readfunc done with IOBuf? SAFE_WAIT(&os->iob_empty, put_timeout_sec, os); // SAFE_WAIT_KILL(&os->iob_empty, put_timeout_sec, os); static size_t tmp_size = 0; static char* tmp_buf = NULL; if (size > tmp_size) { if (tmp_size) free(tmp_buf); tmp_size = size; tmp_buf = (char*) malloc(size); if (! tmp_buf) { errno = ENOMEM; return -1; } } memcpy(tmp_buf, buf, size); // install buffer into IOBuf aws_iobuf_reset(b); // doesn't affect <user_data> aws_iobuf_append_static(b, tmp_buf, size); LOG(LOG_INFO, "(%08lx) installed buffer (%ld bytes) for readfn\n", (size_t)os, size); // let readfunc move data POST(&os->iob_full); #else // install buffer into IOBuf aws_iobuf_reset(b); // doesn't affect <user_data> aws_iobuf_append_static(b, (char*)buf, size); LOG(LOG_INFO, "(%08lx) installed buffer (%ld bytes) for readfn\n", (size_t)os, size); // let readfunc move data POST(&os->iob_full); LOG(LOG_INFO, "(%08lx) waiting for IOBuf\n", (size_t)os); // readfunc done with IOBuf? SAFE_WAIT(&os->iob_empty, put_timeout_sec, os); // SAFE_WAIT_KILL(&os->iob_empty, put_timeout_sec, os); #endif LOG(LOG_INFO, "(%08lx) buffer done\n", (size_t)os); // readfunc done with IOBuf? return size; }