Example #1
0
File: file.c Project: ren85/jos2006
// Unmap any file pages that no longer represent valid file pages
// when the size of the file as mapped in our address space decreases.
// Harmlessly does nothing if newsize >= oldsize.
static int
funmap(struct Fd* fd, off_t oldsize, off_t newsize, bool dirty)
{
	size_t i;
	char *va;
	int r, ret;

	va = fd2data(fd);

	// Check vpd to see if anything is mapped.
	if (!(vpd[VPD(va)] & PTE_P))
		return 0;

	ret = 0;
	for (i = ROUNDUP(newsize, PGSIZE); i < oldsize; i += PGSIZE)
		if (vpt[VPN(va + i)] & PTE_P) {
			if (dirty
			    && (vpt[VPN(va + i)] & PTE_D)
			    && (r = fsipc_dirty(fd->fd_file.id, i)) < 0)
				ret = r;
			sys_page_unmap(0, va + i);
		}
  	return ret;
}
Example #2
0
// Unmap any file pages that no longer represent valid file pages
// when the size of the file as mapped in our address space decreases.
// Harmlessly does nothing if newsize >= oldsize.
static int
funmap(struct Fd* fd, off_t oldsize, off_t newsize, bool dirty)
{
	size_t i;
	char *va;
	int r, ret;

	// For each page that needs to be unmapped, notify the server if
	// the page is dirty and remove the page.
	
	// Hint: Use vpt to check if a page need to be unmapped.
	
	// LAB 5: Your code here.
	ret = 0;
	va = fd2data(fd);
	for (i = ROUNDUP(newsize, PGSIZE); i < oldsize; i += PGSIZE)
		if (vpt[VPN(va + i)] & PTE_P) {
			if (dirty && (vpt[VPN(va)] & PTE_D)
			    && (r = fsipc_dirty(fd->fd_file.id, i)) < 0)
				ret = r;
			sys_page_unmap(0, va + i);
		}
	return ret;
}
Example #3
0
static int
pipeclose(struct Fd *fd)
{
	return sys_page_unmap(0, fd2data(fd));
}
Example #4
0
void
umain(void)
{
	int p[2], r, pid, i, max;
	void *va;
	struct Fd *fd;
	volatile struct Env *kid;

	cprintf("testing for dup race...\n");
	if ((r = pipe(p)) < 0)
		panic("pipe: %e", r);
	max = 200;
	if ((r = fork()) < 0)
		panic("fork: %e", r);
	if (r == 0) {
		close(p[1]);
		//
		// Now the ref count for p[0] will toggle between 2 and 3
		// as the parent dups and closes it (there's a close implicit in dup).
		//
		// The ref count for p[1] is 1.
		// Thus the ref count for the underlying pipe structure
		// will toggle between 3 and 4.
		//
		// If a clock interrupt catches close between unmapping
		// the pipe structure and unmapping the fd, we'll have
		// a ref count for p[0] of 3, a ref count for p[1] of 1,
		// and a ref count for the pipe structure of 3, which is
		// a no-no.
		//
		// If a clock interrupt catches dup between mapping the
		// fd and mapping the pipe structure, we'll have the same
		// ref counts, still a no-no.
		//
		for (i=0; i<max; i++) {
			if(pipeisclosed(p[0])){
				cprintf("RACE: pipe appears closed\n");
				exit();
			}
			sys_yield();
		}
		// do something to be not runnable besides exiting
		ipc_recv(0,0,0);
	}
	pid = r;
	cprintf("pid is %d\n", pid);
	va = 0;
	kid = &envs[ENVX(pid)];
	cprintf("kid is %d\n", kid-envs);
	dup(p[0], 10);
	while (kid->env_status == ENV_RUNNABLE)
		dup(p[0], 10);

	cprintf("child done with loop\n");
	if (pipeisclosed(p[0]))
		panic("somehow the other end of p[0] got closed!");
	if ((r = fd_lookup(p[0], &fd)) < 0)
		panic("cannot look up p[0]: %e", r);
	va = fd2data(fd);
	if (pageref(va) != 3+1)
		cprintf("\nchild detected race\n");
	else
		cprintf("\nrace didn't happen\n", max);
}
Example #5
0
static int
devpipe_close(struct Fd *fd)
{
	(void) sys_page_unmap(0, fd);
	return sys_page_unmap(0, fd2data(fd));
}
void
umain(void)
{
    int p[2], r, i;
    struct Fd *fd;
    volatile struct Env *kid;

    cprintf("testing for pipeisclosed race...\n");
    if ((r = pipe(p)) < 0)
        panic("pipe: %e", r);
    if ((r = fork()) < 0)
        panic("fork: %e", r);
    if (r == 0) {
        // child just dups and closes repeatedly,
        // yielding so the parent can see
        // the fd state between the two.
        close(p[1]);
        for (i = 0; i < 200; i++) {
            if (i % 10 == 0)
                cprintf("%d.", i);
            // dup, then close.  yield so that other guy will
            // see us while we're between them.
            dup(p[0], 10);
            sys_yield();
            close(10);
            sys_yield();
        }
        exit();
    }

    // We hold both p[0] and p[1] open, so pipeisclosed should
    // never return false.
    //
    // Now the ref count for p[0] will toggle between 2 and 3
    // as the child dups and closes it.
    // The ref count for p[1] is 1.
    // Thus the ref count for the underlying pipe structure
    // will toggle between 3 and 4.
    //
    // If pipeisclosed checks pageref(p[0]) and gets 3, and
    // then the child closes, and then pipeisclosed checks
    // pageref(pipe structure) and gets 3, then it will return true
    // when it shouldn't.
    //
    // If pipeisclosed checks pageref(pipe structure) and gets 3,
    // and then the child dups, and then pipeisclosed checks
    // pageref(p[0]) and gets 3, then it will return true when
    // it shouldn't.
    //
    // So either way, pipeisclosed is going give a wrong answer.
    //
    kid = &envs[ENVX(r)];
    while (kid->env_status == ENV_RUNNABLE)
        if (pipeisclosed(p[0]) != 0) {
            cprintf("\nRACE: pipe appears closed\n");
            sys_env_destroy(r);
            exit();
        }
    cprintf("child done with loop\n");
    if (pipeisclosed(p[0]))
        panic("somehow the other end of p[0] got closed!");
    if ((r = fd_lookup(p[0], &fd)) < 0)
        panic("cannot look up p[0]: %e", r);
    (void) fd2data(fd);
    cprintf("race didn't happen\n");
}
Example #7
0
// Handle an environment's block cache request.
// BCREQ_FLUSH and BCREQ_MAP can be satisified right away.
// BCREQ_MAP_RLOCK, BCREQ_MAP_WLOCK, and BCREQ_UNLOCK manipulate the queue
//   of waiting environments.
// At most 8 IPC requests per block are queued and will be handled in the
//   order they arrive (for fairness).
// The 9th and furhter concurrent requests are ignored; a -E_AGAIN error asks
//   the sending environment to try again later.
//
static void
handle_breq(envid_t envid, int32_t breq)
{
	struct BlockInfo *bip;
	int r;

	// Extract block number and request type from request.
	blocknum_t blocknum = BCREQ_BLOCKNUM(breq);
	int reqtype = BCREQ_TYPE(breq);
	// Check request type.
	if (reqtype < BCREQ_MAP || reqtype > BCREQ_FLUSH_PIPE) {
		ipc_send(envid, -E_NOT_SUPP, 0, 0);
		return;
	}

	if (reqtype == BCREQ_FLUSH_PIPE) {
		ipc_send(envid, 0, 0, 0);
	}
	if (reqtype == BCREQ_PIPE_ATTACH) {
		struct Fd *writer;
		if ((r = fd_lookup(p[1], &writer, true)) < 0)
			ipc_send(envid, r, 0, 0);
		else
			ipc_send(envid, 0, fd2data(writer), PTE_P | PTE_W | PTE_U | PTE_SHARE);
		return;
	}

	// Handle simple requests.
	if (reqtype == BCREQ_FLUSH) {
		return;
	} else if (reqtype == BCREQ_MAP) {
		r = get_block_info(blocknum, &bip, 0);
		send_block(envid, blocknum, r >= 0 ? bip->bi_initialized : 0);
		return;
	}

	// More complex requests need the block_info pointer.
	if ((r = get_block_info(blocknum, &bip, 1)) < 0) {
		ipc_send(envid, r, 0, 0);
		return;
	}

	if (reqtype == BCREQ_INITIALIZE) {
		int old_initialized = bip->bi_initialized;
		bip->bi_initialized = 1;
		ipc_send(envid, old_initialized, 0, 0);
		return;
	}

	// Warn about one particularly simple deadlock.
	if (reqtype == BCREQ_MAP_WLOCK && bip->bi_nlocked > 0
	    && BI_REQTYPE(bip, 0) == BCREQ_MAP_WLOCK
	    && BI_ENVID(bip, 0) == envid)
		cprintf("bufcache: DEADLOCK: env [%08x] re-requests write lock on block %d!\n", envid, blocknum);

	if (reqtype == BCREQ_UNLOCK || reqtype == BCREQ_UNLOCK_FLUSH) {
		// Ensure that envid is one of the environments
		// currently locking the block
		int n = 0;
		while (n < bip->bi_nlocked && BI_ENVID(bip, n) != envid)
			++n;
		if (n == bip->bi_nlocked) {
			ipc_send(envid, -E_NOT_LOCKED, 0, 0);
			return;
		}

		BI_ENVID(bip, n) = BI_ENVID(bip, 0);
		BI_REQTYPE(bip, n) = BI_REQTYPE(bip, 0);
		++bip->bi_head;
		--bip->bi_nlocked;
		--bip->bi_count;

		r = (reqtype == BCREQ_UNLOCK ? 0 : flush_block(blocknum));
		ipc_send(envid, r, 0, 0);
		// Continue on to clear the request queue: perhaps this
		// environment's unlock reqtype lets the next environment lock

	} else if (bip->bi_count == BI_QSIZE) {
		// The queue is full; ask the environment to try again later
		ipc_send(envid, -E_AGAIN, 0, 0);
		return;

	} else {
		BI_ENVID(bip, bip->bi_count) = envid;
		BI_REQTYPE(bip, bip->bi_count) = reqtype;
		++bip->bi_count;
	}

	// Process the request queue
	while (bip->bi_nlocked < bip->bi_count) {
		// If trying to write lock, must be first attempt
		if (BI_REQTYPE(bip, bip->bi_nlocked) == BCREQ_MAP_WLOCK
		    && bip->bi_nlocked > 0)
			break;
		// If trying to read lock, any existing lock must be read
		if (BI_REQTYPE(bip, bip->bi_nlocked) == BCREQ_MAP_RLOCK
		    && bip->bi_nlocked > 0
		    && BI_REQTYPE(bip, 0) != BCREQ_MAP_RLOCK)
			break;
		// If we get here, we can grant the page to this queue element
		send_block(BI_ENVID(bip, bip->bi_nlocked), blocknum,
			   bip->bi_initialized);
		++bip->bi_nlocked;
	}
}