Esempio n. 1
0
ATF_TC_BODY(makecn, tc)
{
	struct componentname *cn;
	char pathstr[MAXPATHLEN] = TESTFILE;
	struct vnode *vp;
	extern struct vnode *rumpns_rootvnode;

	rump_init();

	/*
	 * Strategy is to create a componentname, edit the passed
	 * string, and then do a lookup with the componentname.
	 */
	RL(rump_sys_mkdir("/" TESTFILE, 0777));

	/* need stable lwp for componentname */
	RZ(rump_pub_lwproc_rfork(RUMP_RFCFDG));

	/* try it once with the right path */
	cn = rump_pub_makecn(RUMP_NAMEI_LOOKUP, 0, pathstr, strlen(pathstr),
	    rump_pub_cred_create(0, 0, 0, NULL), rump_pub_lwproc_curlwp());
	RZ(RUMP_VOP_LOOKUP(rumpns_rootvnode, &vp, cn));
	rump_pub_freecn(cn, RUMPCN_FREECRED);

	/* and then with modification-in-the-middle */
	cn = rump_pub_makecn(RUMP_NAMEI_LOOKUP, 0, pathstr, strlen(pathstr),
	    rump_pub_cred_create(0, 0, 0, NULL), rump_pub_lwproc_curlwp());
	strcpy(pathstr, "/muuta");
	RZ(RUMP_VOP_LOOKUP(rumpns_rootvnode, &vp, cn));
	rump_pub_freecn(cn, RUMPCN_FREECRED);
}
ATF_TC_BODY(lwps, tc)
{
	struct lwp *l[LOOPS];
	pid_t mypid;
	struct lwp *l_orig;
	int i;

	rump_init();

	RZ(rump_pub_lwproc_rfork(RUMP_RFCFDG));
	mypid = rump_sys_getpid();
	RL(rump_sys_setuid(375));

	l_orig = rump_pub_lwproc_curlwp();
	for (i = 0; i < LOOPS; i++) {
		mypid = rump_sys_getpid();
		ATF_REQUIRE(mypid != -1 && mypid != 0);
		RZ(rump_pub_lwproc_newlwp(mypid));
		l[i] = rump_pub_lwproc_curlwp();
		ATF_REQUIRE_EQ(rump_sys_getuid(), 375);
	}

	rump_pub_lwproc_switch(l_orig);
	rump_pub_lwproc_releaselwp();
	for (i = 0; i < LOOPS; i++) {
		rump_pub_lwproc_switch(l[i]);
		ATF_REQUIRE_EQ(rump_sys_getpid(), mypid);
		ATF_REQUIRE_EQ(rump_sys_getuid(), 375);
		rump_pub_lwproc_releaselwp();
		ATF_REQUIRE_EQ(rump_sys_getpid(), 1);
		ATF_REQUIRE_EQ(rump_sys_getuid(), 0);
	}

	ATF_REQUIRE_EQ(rump_pub_lwproc_newlwp(mypid), ESRCH);
}
ATF_TC_BODY(proccreds, tc)
{
	struct lwp *l1, *l2;

	rump_init();
	RZ(rump_pub_lwproc_rfork(RUMP_RFCFDG));
	l1 = rump_pub_lwproc_curlwp();
	RZ(rump_pub_lwproc_newlwp(rump_sys_getpid()));

	RZ(rump_pub_lwproc_rfork(RUMP_RFCFDG));
	l2 = rump_pub_lwproc_curlwp();

	RL(rump_sys_setuid(22));
	ATF_REQUIRE_EQ(rump_sys_getuid(), 22);

	rump_pub_lwproc_switch(l1);
	ATF_REQUIRE_EQ(rump_sys_getuid(), 0); /* from parent, proc0 */
	RL(rump_sys_setuid(11));
	ATF_REQUIRE_EQ(rump_sys_getuid(), 11);

	rump_pub_lwproc_switch(l2);
	ATF_REQUIRE_EQ(rump_sys_getuid(), 22);
	rump_pub_lwproc_newlwp(rump_sys_getpid());
	ATF_REQUIRE_EQ(rump_sys_getuid(), 22);
}
ATF_TC_BODY(rfork, tc)
{
	struct stat sb;
	struct lwp *l, *l2;
	int fd;

	RZ(rump_init());

	ATF_REQUIRE_EQ(rump_pub_lwproc_rfork(RUMP_RFFDG|RUMP_RFCFDG), EINVAL);

	RZ(rump_pub_lwproc_rfork(0));
	l = rump_pub_lwproc_curlwp();

	RL(fd = rump_sys_open("/file", O_RDWR | O_CREAT, 0777));

	/* ok, first check rfork(RUMP_RFCFDG) does *not* preserve fd's */
	RZ(rump_pub_lwproc_rfork(RUMP_RFCFDG));
	ATF_REQUIRE_ERRNO(EBADF, rump_sys_write(fd, &fd, sizeof(fd)) == -1);

	/* then check that rfork(0) does */
	rump_pub_lwproc_switch(l);
	RZ(rump_pub_lwproc_rfork(0));
	ATF_REQUIRE_EQ(rump_sys_write(fd, &fd, sizeof(fd)), sizeof(fd));
	RL(rump_sys_fstat(fd, &sb));
	l2 = rump_pub_lwproc_curlwp();

	/*
	 * check that the shared fd table is really shared by
	 * closing fd in parent
	 */
	rump_pub_lwproc_switch(l);
	RL(rump_sys_close(fd));
	rump_pub_lwproc_switch(l2);
	ATF_REQUIRE_ERRNO(EBADF, rump_sys_fstat(fd, &sb) == -1);

	/* redo, this time copying the fd table instead of sharing it */
	rump_pub_lwproc_releaselwp();
	rump_pub_lwproc_switch(l);
	RL(fd = rump_sys_open("/file", O_RDWR, 0777));
	RZ(rump_pub_lwproc_rfork(RUMP_RFFDG));
	ATF_REQUIRE_EQ(rump_sys_write(fd, &fd, sizeof(fd)), sizeof(fd));
	RL(rump_sys_fstat(fd, &sb));
	l2 = rump_pub_lwproc_curlwp();

	/* check that the fd table is copied */
	rump_pub_lwproc_switch(l);
	RL(rump_sys_close(fd));
	rump_pub_lwproc_switch(l2);
	RL(rump_sys_fstat(fd, &sb));
	ATF_REQUIRE_EQ(sb.st_size, sizeof(fd));
}
Esempio n. 5
0
static void
fcntl_lock(const atf_tc_t *tc, const char *mp)
{
	int fd, fd2;
	struct flock l;
	struct lwp *lwp1, *lwp2;

	FSTEST_ENTER();
	l.l_pid = 0;
	l.l_start = l.l_len = 1024;
	l.l_type = F_RDLCK | F_WRLCK;
	l.l_whence = SEEK_END;

	lwp1 = rump_pub_lwproc_curlwp();
	RL(fd = rump_sys_open(TESTFILE, O_RDWR | O_CREAT, 0755));
	RL(rump_sys_ftruncate(fd, 8192));

	/* PR kern/43321 */
	if (FSTYPE_ZFS(tc))
		atf_tc_expect_fail("PR kern/47656: Test known to be broken");
	RL(rump_sys_fcntl(fd, F_SETLK, &l));

	/* Next, we fork and try to lock the same area */
	RZ(rump_pub_lwproc_rfork(RUMP_RFCFDG));
	lwp2 = rump_pub_lwproc_curlwp();
	RL(fd2 = rump_sys_open(TESTFILE, O_RDWR, 0));
	ATF_REQUIRE_ERRNO(EAGAIN, rump_sys_fcntl(fd2, F_SETLK, &l));

	/* Switch back and unlock... */
	rump_pub_lwproc_switch(lwp1);
	l.l_type = F_UNLCK;
	RL(rump_sys_fcntl(fd, F_SETLK, &l));

	/* ... and try to lock again */
	rump_pub_lwproc_switch(lwp2);
	l.l_type = F_RDLCK | F_WRLCK;
	RL(rump_sys_fcntl(fd2, F_SETLK, &l));

	RL(rump_sys_close(fd2));
	rump_pub_lwproc_releaselwp();

	RL(rump_sys_close(fd));

	FSTEST_EXIT();
}
ATF_TC_BODY(makelwp, tc)
{
	struct lwp *l;
	pid_t pid;

	rump_init();
	RZ(rump_pub_lwproc_newlwp(0));
	ATF_REQUIRE_EQ(rump_pub_lwproc_newlwp(37), ESRCH);
	l = rump_pub_lwproc_curlwp();

	RZ(rump_pub_lwproc_rfork(RUMP_RFCFDG));
	ATF_REQUIRE(rump_pub_lwproc_curlwp() != l);
	l = rump_pub_lwproc_curlwp();

	RZ(rump_pub_lwproc_newlwp(rump_sys_getpid()));
	ATF_REQUIRE(rump_pub_lwproc_curlwp() != l);

	pid = rump_sys_getpid();
	ATF_REQUIRE(pid != -1 && pid != 0);
}
ATF_TC_BODY(nullswitch, tc)
{
	struct lwp *l;

	rump_init();
	RZ(rump_pub_lwproc_newlwp(0));
	l = rump_pub_lwproc_curlwp();
	rump_pub_lwproc_switch(NULL);
	/* if remains LP_RUNNING, next call will panic */
	rump_pub_lwproc_switch(l);
}
Esempio n. 8
0
static int
precall(struct ukfs *ukfs, struct lwp **curlwp)
{

	/* save previous.  ensure start from pristine context */
	*curlwp = rump_pub_lwproc_curlwp();
	if (*curlwp)
		rump_pub_lwproc_switch(ukfs->ukfs_lwp);
	rump_pub_lwproc_rfork(RUMP_RFCFDG);

	if (rump_sys_chroot(ukfs->ukfs_mountpath) == -1)
		return errno;
	if (rump_sys_chdir(ukfs->ukfs_cwd) == -1)
		return errno;

	return 0;
}
Esempio n. 9
0
static void
fcntl_getlock_pids(const atf_tc_t *tc, const char *mp)
{
	/* test non-overlaping ranges */
	struct flock expect[4];
	const struct flock lock[4] = {
		{ 0, 2, 0, F_WRLCK, SEEK_SET },
		{ 2, 1, 0, F_WRLCK, SEEK_SET },
		{ 7, 5, 0, F_WRLCK, SEEK_SET },
		{ 4, 3, 0, F_WRLCK, SEEK_SET },
	};

    /* Add extra element to make sure recursion does't stop at array end */
	struct flock result[5];

	/* Add 5th process */
	int fd[5];
	pid_t pid[5];
	struct lwp *lwp[5];

	unsigned int i, j;
	const off_t sz = 8192;
	int omode  = 0755;
	int oflags = O_RDWR | O_CREAT;

	memcpy(expect, lock, sizeof(lock));

	FSTEST_ENTER();

	/*
	 * First, we create 4 processes and let each lock a range of the
	 * file.  Note that the third and fourth processes lock in
	 * "reverse" order, i.e. the greater pid locks a range before
	 * the lesser pid.
	 * Then, we create 5th process which doesn't lock anything.
	 */
	for (i = 0; i < __arraycount(lwp); i++) {
		RZ(rump_pub_lwproc_rfork(RUMP_RFCFDG));

		lwp[i] = rump_pub_lwproc_curlwp();
		pid[i] = rump_sys_getpid();

		RL(fd[i] = rump_sys_open(TESTFILE, oflags, omode));
		oflags = O_RDWR;
		omode  = 0;

		RL(rump_sys_ftruncate(fd[i], sz));

		if (FSTYPE_ZFS(tc))
			atf_tc_expect_fail("PR kern/47656: Test known to be "
			    "broken");
		if (i < __arraycount(lock)) {
			RL(rump_sys_fcntl(fd[i], F_SETLK, &lock[i]));
			expect[i].l_pid = pid[i];
		}
	}

	qsort(expect, __arraycount(expect), sizeof(expect[0]), &flock_compare);

	/*
	 * In the context of each process, recursively find all locks
	 * that would block the current process. Processes 1-4 don't
	 * see their own lock, we insert it to simplify checks.
	 * Process 5 sees all 4 locks.
	 */
	for (i = 0; i < __arraycount(lwp); i++) {
		unsigned int nlocks;

		rump_pub_lwproc_switch(lwp[i]);

		memset(result, 0, sizeof(result));
		nlocks = fcntl_getlocks(fd[i], 0, sz,
		    result, result + __arraycount(result));

		if (i < __arraycount(lock)) {
			ATF_REQUIRE(nlocks < __arraycount(result));
			result[nlocks] = lock[i];
			result[nlocks].l_pid = pid[i];
			nlocks++;
		}

		ATF_CHECK_EQ(nlocks, __arraycount(expect));

		qsort(result, nlocks, sizeof(result[0]), &flock_compare);

		for (j = 0; j < nlocks; j++) {
			ATF_CHECK_EQ(result[j].l_start,  expect[j].l_start );
			ATF_CHECK_EQ(result[j].l_len,    expect[j].l_len   );
			ATF_CHECK_EQ(result[j].l_pid,    expect[j].l_pid   );
			ATF_CHECK_EQ(result[j].l_type,   expect[j].l_type  );
			ATF_CHECK_EQ(result[j].l_whence, expect[j].l_whence);
		}
	}

	/*
	 * Release processes.  This also releases the fds and locks
	 * making fs unmount possible
	 */
	for (i = 0; i < __arraycount(lwp); i++) {
		rump_pub_lwproc_switch(lwp[i]);
		rump_pub_lwproc_releaselwp();
	}

	FSTEST_EXIT();
}
ATF_TC_BODY(nolwp, tc)
{

	rump_init();
	ATF_REQUIRE_EQ(rump_pub_lwproc_curlwp(), NULL);
}