ATF_TC_BODY(inherit, tc) { rump_init(); RZ(rump_pub_lwproc_rfork(RUMP_RFCFDG)); RL(rump_sys_setuid(66)); ATF_REQUIRE_EQ(rump_sys_getuid(), 66); RZ(rump_pub_lwproc_rfork(RUMP_RFCFDG)); ATF_REQUIRE_EQ(rump_sys_getuid(), 66); /* release lwp and proc */ rump_pub_lwproc_releaselwp(); ATF_REQUIRE_EQ(rump_sys_getuid(), 0); }
static void flags(const atf_tc_t *tc, const char *mp) { const char *name = "file.test"; int fd, fflags; struct stat st; FSTEST_ENTER(); if ((fd = rump_sys_open(name, O_RDWR|O_CREAT, 0666)) == -1) atf_tc_fail_errno("open"); if (rump_sys_close(fd) == -1) atf_tc_fail_errno("close"); if (rump_sys_stat(name, &st) == -1) atf_tc_fail_errno("stat"); if (FSTYPE_ZFS(tc)) atf_tc_expect_fail("PR kern/47656: Test known to be broken"); if (rump_sys_chflags(name, st.st_flags) == -1) { if (errno == EOPNOTSUPP) atf_tc_skip("file flags not supported by file system"); atf_tc_fail_errno("chflags"); } fflags = st.st_flags | UF_IMMUTABLE; rump_pub_lwproc_rfork(RUMP_RFCFDG); if (rump_sys_setuid(1) == -1) atf_tc_fail_errno("setuid"); fflags |= UF_IMMUTABLE; if (rump_sys_chflags(name, fflags) != -1 || errno != EPERM) atf_tc_fail_errno("chflags"); rump_pub_lwproc_releaselwp(); if (rump_sys_chflags(name, fflags) == -1) atf_tc_fail_errno("chflags"); fflags &= ~UF_IMMUTABLE; if (rump_sys_chflags(name, fflags) == -1) atf_tc_fail_errno("chflags"); if (rump_sys_unlink(name) == -1) atf_tc_fail_errno("unlink"); FSTEST_EXIT(); }
static void fcntl_lock(const atf_tc_t *tc, const char *mp) { int fd, fd2; struct flock l; struct lwp *lwp1, *lwp2; FSTEST_ENTER(); l.l_pid = 0; l.l_start = l.l_len = 1024; l.l_type = F_RDLCK | F_WRLCK; l.l_whence = SEEK_END; lwp1 = rump_pub_lwproc_curlwp(); RL(fd = rump_sys_open(TESTFILE, O_RDWR | O_CREAT, 0755)); RL(rump_sys_ftruncate(fd, 8192)); /* PR kern/43321 */ if (FSTYPE_ZFS(tc)) atf_tc_expect_fail("PR kern/47656: Test known to be broken"); RL(rump_sys_fcntl(fd, F_SETLK, &l)); /* Next, we fork and try to lock the same area */ RZ(rump_pub_lwproc_rfork(RUMP_RFCFDG)); lwp2 = rump_pub_lwproc_curlwp(); RL(fd2 = rump_sys_open(TESTFILE, O_RDWR, 0)); ATF_REQUIRE_ERRNO(EAGAIN, rump_sys_fcntl(fd2, F_SETLK, &l)); /* Switch back and unlock... */ rump_pub_lwproc_switch(lwp1); l.l_type = F_UNLCK; RL(rump_sys_fcntl(fd, F_SETLK, &l)); /* ... and try to lock again */ rump_pub_lwproc_switch(lwp2); l.l_type = F_RDLCK | F_WRLCK; RL(rump_sys_fcntl(fd2, F_SETLK, &l)); RL(rump_sys_close(fd2)); rump_pub_lwproc_releaselwp(); RL(rump_sys_close(fd)); FSTEST_EXIT(); }
ATF_TC_BODY(nolwprelease, tc) { int status; switch (fork()) { case 0: rump_init(); rump_pub_lwproc_releaselwp(); atf_tc_fail("survived"); break; case -1: atf_tc_fail_errno("fork"); break; default: wait(&status); ATF_REQUIRE(WIFSIGNALED(status)); ATF_REQUIRE_EQ(WTERMSIG(status), SIGABRT); } }
static void fcntl_getlock_pids(const atf_tc_t *tc, const char *mp) { /* test non-overlaping ranges */ struct flock expect[4]; const struct flock lock[4] = { { 0, 2, 0, F_WRLCK, SEEK_SET }, { 2, 1, 0, F_WRLCK, SEEK_SET }, { 7, 5, 0, F_WRLCK, SEEK_SET }, { 4, 3, 0, F_WRLCK, SEEK_SET }, }; /* Add extra element to make sure recursion does't stop at array end */ struct flock result[5]; /* Add 5th process */ int fd[5]; pid_t pid[5]; struct lwp *lwp[5]; unsigned int i, j; const off_t sz = 8192; int omode = 0755; int oflags = O_RDWR | O_CREAT; memcpy(expect, lock, sizeof(lock)); FSTEST_ENTER(); /* * First, we create 4 processes and let each lock a range of the * file. Note that the third and fourth processes lock in * "reverse" order, i.e. the greater pid locks a range before * the lesser pid. * Then, we create 5th process which doesn't lock anything. */ for (i = 0; i < __arraycount(lwp); i++) { RZ(rump_pub_lwproc_rfork(RUMP_RFCFDG)); lwp[i] = rump_pub_lwproc_curlwp(); pid[i] = rump_sys_getpid(); RL(fd[i] = rump_sys_open(TESTFILE, oflags, omode)); oflags = O_RDWR; omode = 0; RL(rump_sys_ftruncate(fd[i], sz)); if (FSTYPE_ZFS(tc)) atf_tc_expect_fail("PR kern/47656: Test known to be " "broken"); if (i < __arraycount(lock)) { RL(rump_sys_fcntl(fd[i], F_SETLK, &lock[i])); expect[i].l_pid = pid[i]; } } qsort(expect, __arraycount(expect), sizeof(expect[0]), &flock_compare); /* * In the context of each process, recursively find all locks * that would block the current process. Processes 1-4 don't * see their own lock, we insert it to simplify checks. * Process 5 sees all 4 locks. */ for (i = 0; i < __arraycount(lwp); i++) { unsigned int nlocks; rump_pub_lwproc_switch(lwp[i]); memset(result, 0, sizeof(result)); nlocks = fcntl_getlocks(fd[i], 0, sz, result, result + __arraycount(result)); if (i < __arraycount(lock)) { ATF_REQUIRE(nlocks < __arraycount(result)); result[nlocks] = lock[i]; result[nlocks].l_pid = pid[i]; nlocks++; } ATF_CHECK_EQ(nlocks, __arraycount(expect)); qsort(result, nlocks, sizeof(result[0]), &flock_compare); for (j = 0; j < nlocks; j++) { ATF_CHECK_EQ(result[j].l_start, expect[j].l_start ); ATF_CHECK_EQ(result[j].l_len, expect[j].l_len ); ATF_CHECK_EQ(result[j].l_pid, expect[j].l_pid ); ATF_CHECK_EQ(result[j].l_type, expect[j].l_type ); ATF_CHECK_EQ(result[j].l_whence, expect[j].l_whence); } } /* * Release processes. This also releases the fds and locks * making fs unmount possible */ for (i = 0; i < __arraycount(lwp); i++) { rump_pub_lwproc_switch(lwp[i]); rump_pub_lwproc_releaselwp(); } FSTEST_EXIT(); }