static void
shrinkfile(const atf_tc_t *tc, const char *mp)
{
	int fd;

	FSTEST_ENTER();
	RL(fd = rump_sys_open("file", O_RDWR|O_CREAT|O_TRUNC, 0666));
	RL(rump_sys_ftruncate(fd, 2));
	RL(rump_sys_ftruncate(fd, 1));
	rump_sys_close(fd);
	FSTEST_EXIT();
}
示例#2
0
static void
fcntl_lock(const atf_tc_t *tc, const char *mp)
{
	int fd, fd2;
	struct flock l;
	struct lwp *lwp1, *lwp2;

	FSTEST_ENTER();
	l.l_pid = 0;
	l.l_start = l.l_len = 1024;
	l.l_type = F_RDLCK | F_WRLCK;
	l.l_whence = SEEK_END;

	lwp1 = rump_pub_lwproc_curlwp();
	RL(fd = rump_sys_open(TESTFILE, O_RDWR | O_CREAT, 0755));
	RL(rump_sys_ftruncate(fd, 8192));

	/* PR kern/43321 */
	if (FSTYPE_ZFS(tc))
		atf_tc_expect_fail("PR kern/47656: Test known to be broken");
	RL(rump_sys_fcntl(fd, F_SETLK, &l));

	/* Next, we fork and try to lock the same area */
	RZ(rump_pub_lwproc_rfork(RUMP_RFCFDG));
	lwp2 = rump_pub_lwproc_curlwp();
	RL(fd2 = rump_sys_open(TESTFILE, O_RDWR, 0));
	ATF_REQUIRE_ERRNO(EAGAIN, rump_sys_fcntl(fd2, F_SETLK, &l));

	/* Switch back and unlock... */
	rump_pub_lwproc_switch(lwp1);
	l.l_type = F_UNLCK;
	RL(rump_sys_fcntl(fd, F_SETLK, &l));

	/* ... and try to lock again */
	rump_pub_lwproc_switch(lwp2);
	l.l_type = F_RDLCK | F_WRLCK;
	RL(rump_sys_fcntl(fd2, F_SETLK, &l));

	RL(rump_sys_close(fd2));
	rump_pub_lwproc_releaselwp();

	RL(rump_sys_close(fd));

	FSTEST_EXIT();
}
static void
overwritebody(const atf_tc_t *tc, off_t count, bool dotrunc)
{
	char *buf;
	int fd;

	REQUIRE_LIBC(buf = malloc(count), NULL);
	FSTEST_ENTER();
	RL(fd = rump_sys_open("testi", O_CREAT | O_RDWR, 0666));
	ATF_REQUIRE_EQ(rump_sys_write(fd, buf, count), count);
	RL(rump_sys_close(fd));

	RL(fd = rump_sys_open("testi", O_RDWR));
	if (dotrunc)
		RL(rump_sys_ftruncate(fd, 0));
	ATF_REQUIRE_EQ(rump_sys_write(fd, buf, count), count);
	RL(rump_sys_close(fd));
	FSTEST_EXIT();
}
static void
extendbody(const atf_tc_t *tc, off_t seekcnt)
{
	char buf[TESTSZ+1];
	struct stat sb;
	int fd;

	FSTEST_ENTER();
	RL(fd = rump_sys_open("testfile",
	    O_CREAT | O_RDWR | (seekcnt ? O_APPEND : 0)));
	RL(rump_sys_ftruncate(fd, seekcnt));
	RL(rump_sys_fstat(fd, &sb));
	ATF_REQUIRE_EQ(sb.st_size, seekcnt);

	ATF_REQUIRE_EQ(rump_sys_write(fd, TESTSTR, TESTSZ), TESTSZ);
	ATF_REQUIRE_EQ(rump_sys_pread(fd, buf, TESTSZ, seekcnt), TESTSZ);
	ATF_REQUIRE_STREQ(buf, TESTSTR);

	RL(rump_sys_fstat(fd, &sb));
	ATF_REQUIRE_EQ(sb.st_size, (off_t)TESTSZ + seekcnt);
	RL(rump_sys_close(fd));
	FSTEST_EXIT();
}
示例#5
0
static void
fcntl_getlock_pids(const atf_tc_t *tc, const char *mp)
{
	/* test non-overlaping ranges */
	struct flock expect[4];
	const struct flock lock[4] = {
		{ 0, 2, 0, F_WRLCK, SEEK_SET },
		{ 2, 1, 0, F_WRLCK, SEEK_SET },
		{ 7, 5, 0, F_WRLCK, SEEK_SET },
		{ 4, 3, 0, F_WRLCK, SEEK_SET },
	};

    /* Add extra element to make sure recursion does't stop at array end */
	struct flock result[5];

	/* Add 5th process */
	int fd[5];
	pid_t pid[5];
	struct lwp *lwp[5];

	unsigned int i, j;
	const off_t sz = 8192;
	int omode  = 0755;
	int oflags = O_RDWR | O_CREAT;

	memcpy(expect, lock, sizeof(lock));

	FSTEST_ENTER();

	/*
	 * First, we create 4 processes and let each lock a range of the
	 * file.  Note that the third and fourth processes lock in
	 * "reverse" order, i.e. the greater pid locks a range before
	 * the lesser pid.
	 * Then, we create 5th process which doesn't lock anything.
	 */
	for (i = 0; i < __arraycount(lwp); i++) {
		RZ(rump_pub_lwproc_rfork(RUMP_RFCFDG));

		lwp[i] = rump_pub_lwproc_curlwp();
		pid[i] = rump_sys_getpid();

		RL(fd[i] = rump_sys_open(TESTFILE, oflags, omode));
		oflags = O_RDWR;
		omode  = 0;

		RL(rump_sys_ftruncate(fd[i], sz));

		if (FSTYPE_ZFS(tc))
			atf_tc_expect_fail("PR kern/47656: Test known to be "
			    "broken");
		if (i < __arraycount(lock)) {
			RL(rump_sys_fcntl(fd[i], F_SETLK, &lock[i]));
			expect[i].l_pid = pid[i];
		}
	}

	qsort(expect, __arraycount(expect), sizeof(expect[0]), &flock_compare);

	/*
	 * In the context of each process, recursively find all locks
	 * that would block the current process. Processes 1-4 don't
	 * see their own lock, we insert it to simplify checks.
	 * Process 5 sees all 4 locks.
	 */
	for (i = 0; i < __arraycount(lwp); i++) {
		unsigned int nlocks;

		rump_pub_lwproc_switch(lwp[i]);

		memset(result, 0, sizeof(result));
		nlocks = fcntl_getlocks(fd[i], 0, sz,
		    result, result + __arraycount(result));

		if (i < __arraycount(lock)) {
			ATF_REQUIRE(nlocks < __arraycount(result));
			result[nlocks] = lock[i];
			result[nlocks].l_pid = pid[i];
			nlocks++;
		}

		ATF_CHECK_EQ(nlocks, __arraycount(expect));

		qsort(result, nlocks, sizeof(result[0]), &flock_compare);

		for (j = 0; j < nlocks; j++) {
			ATF_CHECK_EQ(result[j].l_start,  expect[j].l_start );
			ATF_CHECK_EQ(result[j].l_len,    expect[j].l_len   );
			ATF_CHECK_EQ(result[j].l_pid,    expect[j].l_pid   );
			ATF_CHECK_EQ(result[j].l_type,   expect[j].l_type  );
			ATF_CHECK_EQ(result[j].l_whence, expect[j].l_whence);
		}
	}

	/*
	 * Release processes.  This also releases the fds and locks
	 * making fs unmount possible
	 */
	for (i = 0; i < __arraycount(lwp); i++) {
		rump_pub_lwproc_switch(lwp[i]);
		rump_pub_lwproc_releaselwp();
	}

	FSTEST_EXIT();
}