Beispiel #1
0
// Flush the contents of the block containing VA out to disk if
// necessary, then clear the PTE_D bit using sys_page_map.
// If the block is not in the block cache or is not dirty, does
// nothing.
// Hint: Use va_is_mapped, va_is_dirty, and ide_write.
// Hint: Use the PTE_SYSCALL constant when calling sys_page_map.
// Hint: Don't forget to round addr down.
void
flush_block(void *addr)
{
    uint32_t blockno = ((uint32_t)addr - DISKMAP) / BLKSIZE;
    int r;

    if (addr < (void*)DISKMAP || addr >= (void*)(DISKMAP + DISKSIZE))
        panic("flush_block of bad va %08x", addr);

    // LAB 5: Your code here.
    addr = (void *)ROUNDDOWN(addr, PGSIZE);

    if (!va_is_mapped(addr) || !va_is_dirty(addr))
        return;

    if ((r = ide_write(BLKSECTS * blockno, addr, BLKSECTS)) < 0) {
        panic("flush_block: ide_write error %e\n", r);
    }

    if ((r = sys_page_map(0, addr, 0, addr, uvpt[PGNUM(addr)] & PTE_SYSCALL & ~PTE_D))) {
        panic("flush_block: sys_page_map error %e\n", r);
    }

    return;
}
Beispiel #2
0
//
// Custom page fault handler - if faulting page is copy-on-write,
// map in our own private writable copy.
//
static void
pgfault(struct UTrapframe *utf)
{
	void *addr = (void *) utf->utf_fault_va;
	uint32_t err = utf->utf_err;
	int r;

	// Check that the faulting access was (1) a write, and (2) to a
	// copy-on-write page.  If not, panic.
	// Hint:
	//   Use the read-only page table mappings at uvpt
	//   (see <inc/memlayout.h>).

    if (((err & FEC_WR) == 0) || ((uvpd[PDX(addr)] & PTE_P)==0) || 
                    ((uvpt[PGNUM(addr)] & PTE_COW)==0) )
            panic("Page fault in lib/fork.c!\n");
	// LAB 4: Your code here.

	// Allocate a new page, map it at a temporary location (PFTEMP),
	// copy the data from the old page to the new page, then move the new
	// page to the old page's address.
	// Hint:
	//   You should make three system calls.

    if ((r = sys_page_alloc(0, (void*)PFTEMP, PTE_U|PTE_P|PTE_W))  <0)
                panic("alloc page error in lib/fork.c\n");
    addr = ROUNDDOWN(addr, PGSIZE);
    memcpy(PFTEMP, addr, PGSIZE);
    if ((r = sys_page_map(0, PFTEMP, 0, addr, PTE_U|PTE_P|PTE_W)) <0)
                panic("page map error in lib/fork.c\n");
    if ((r = sys_page_unmap(0, PFTEMP)) <0)
                panic("page unmap error in lib/fork.c\n");
// LAB 4: Your code here.

}
Beispiel #3
0
// Copy the mappings for shared pages into the child address space.
static int
copy_shared_pages(envid_t child)
{
	int pn, perm;
	int retval = 0;

	// Step through each page below UTOP. If the page is PTE_SHARE,
	//  then copy the mapping of that page to the child environment.
	for(pn = 0; pn < PGNUM(UTOP); pn++) {
		// Check to see if the page directory entry and page table
		//  entry for this page exist, and if the page is marked
		//  PTE_SHARE.
		if((uvpd[PDX(pn*PGSIZE)]&PTE_P) == 0 ||
		   (uvpt[pn]&PTE_P) == 0 ||
		   (uvpt[pn]&PTE_SHARE) == 0)
			continue;

		// Grab the permissions for the page
		perm = uvpt[pn]&PTE_SYSCALL;

		// Copy the current page number over
		if((retval = sys_page_map(0, (void *)(pn*PGSIZE), child, (void *)(pn*PGSIZE), perm)) != 0)
			break;
	}

	return 0;
}
Beispiel #4
0
void
serve(void)
{
	uint32_t req, whom;
	int perm, r;
	void *pg;

	while (1) {
		perm = 0;
		req = ipc_recv((int32_t *) &whom, fsreq, &perm);
		if (debug)
			cprintf("fs req %d from %08x [page %08x: %s]\n",
				req, whom, vpt[PGNUM(fsreq)], fsreq);

		// All requests must contain an argument page
		if (!(perm & PTE_P)) {
			cprintf("Invalid request from %08x: no argument page\n",
				whom);
			continue; // just leave it hanging...
		}

		pg = NULL;
		if (req == FSREQ_OPEN) {
			r = serve_open(whom, (struct Fsreq_open*)fsreq, &pg, &perm);
		} else if (req < NHANDLERS && handlers[req]) {
			r = handlers[req](whom, fsreq);
		} else {
			cprintf("Invalid request code %d from %08x\n", whom, req);
			r = -E_INVAL;
		}
		ipc_send(whom, r, pg, perm);
		sys_page_unmap(0, fsreq);
	}
}
Beispiel #5
0
//
// User-level fork with copy-on-write.
// Set up our page fault handler appropriately.
// Create a child.
// Copy our address space and page fault handler setup to the child.
// Then mark the child as runnable and return.
//
// Returns: child's envid to the parent, 0 to the child, < 0 on error.
// It is also OK to panic on error.
//
// Hint:
//   Use uvpd, uvpt, and duppage.
//   Remember to fix "thisenv" in the child process.
//   Neither user exception stack should ever be marked copy-on-write,
//   so you must allocate a new page for the child's user exception stack.
//
envid_t
fork(void)
{
	// LAB 4: Your code here.
    set_pgfault_handler(pgfault);
    int r, childid;
    childid = sys_exofork();
    if (childid <0)
            panic("exofork error in fork()!\n");
    else if (childid ==0)
    {
            thisenv = &envs[ENVX(sys_getenvid())];
            return 0;
    } else
    {
        int addr;
        for (addr = UTEXT; addr<UXSTACKTOP-PGSIZE; addr+=PGSIZE)
        {
                int pn = PGNUM(addr);
                if (((uvpd[PDX(addr)] & PTE_P) >0) &&
                    ((uvpt[pn] & PTE_P) >0) &&
                    ((uvpt[pn] & PTE_U) > 0))
                        duppage(childid, pn);
        }
        extern void _pgfault_upcall();
        sys_page_alloc(childid, (void*) (UXSTACKTOP - PGSIZE),  PTE_U|PTE_W|PTE_P);
        sys_env_set_pgfault_upcall(childid, _pgfault_upcall);
        sys_env_set_status(childid, ENV_RUNNABLE);
        return childid;
    }
}
Beispiel #6
0
//
// Custom page fault handler - if faulting page is copy-on-write,
// map in our own private writable copy.
//
static void
pgfault(struct UTrapframe *utf)
{
	int r;
	uint32_t err = utf->utf_err;
	void *addr = (void *) utf->utf_fault_va;

	// Check that the faulting access was (1) a write, and (2) to a
	// copy-on-write page.  If not, panic.
	// Hint:
	//   Use the read-only page table mappings at vpt
	//   (see <inc/memlayout.h>).

	// LAB 4: Your code here.
	if ((err & FEC_WR) == 0)
		panic("pgfault: not a write!");
	if ((vpt[PGNUM(addr)] & PTE_COW) == 0)
		panic("pgfault: not a cow!");

	// Allocate a new page, map it at a temporary location (PFTEMP),
	// copy the data from the old page to the new page, then move the new
	// page to the old page's address.
	// Hint:
	//   You should make three system calls.
	//   No need to explicitly delete the old page's mapping.

	// LAB 4: Your code here.

	//panic("pgfault not implemented");
	if ((r=sys_page_alloc(0, (void*)PFTEMP, PTE_U | PTE_W | PTE_P))<0)
		panic("pgfault: page_alloc failed!");
	memmove((void*)PFTEMP, ROUNDDOWN(addr,PGSIZE),PGSIZE);
	if ((r=sys_page_map(0, (void*)PFTEMP, 0, ROUNDDOWN(addr,PGSIZE), PTE_U | PTE_W | PTE_P))<0)
		panic("pgfault: page_map failed!");
}
Beispiel #7
0
// Challenge!
int
sfork(void)
{
/*	panic("sfork not implemented");
	return -E_INVAL;*/
	extern void _pgfault_upcall(void);
	set_pgfault_handler(pgfault);
	envid_t id = sys_exofork();
	int r;
	if (id < 0)
		panic("exofork: child");
	if (id == 0)
	{
		thisenv = envs + ENVX(sys_getenvid());
		return 0;
	}
	uint32_t i;
/*	for (i=0;i<UTOP-PGSIZE;i+=PGSIZE)
		if ((vpd[PDX(i)] & PTE_P) && (vpt[PGNUM(i)] & PTE_P))
			if ((r=duppage(id,PGNUM(i)))<0)
				return r;*/
	if ((r=sys_page_alloc(id, (void*)(UXSTACKTOP-PGSIZE), PTE_U | PTE_W | PTE_P))<0)
		return r;
//	cprintf("begin to map!\nUSTACKTOP-PGSIZE: %x\nUTEXT: %x\n",USTACKTOP-PGSIZE,UTEXT);
	for (i=USTACKTOP-PGSIZE;i>=UTEXT;i-=PGSIZE)
	{
		if ((vpd[PDX(i)] & PTE_P) && (vpt[PGNUM(i)] & PTE_P))
		{
			if ((r=sduppage(id,PGNUM(i),1))<0)
				return r;	
		}else
			break;
	}
	for (;i>=UTEXT;i-=PGSIZE)
		if ((vpd[PDX(i)] & PTE_P) && (vpt[PGNUM(i)] & PTE_P))
			if ((r=sduppage(id,PGNUM(i),0))<0)
				return r;

	if ((r=sys_env_set_pgfault_upcall(id,(void*)_pgfault_upcall))<0)
		return r;
	if ((r=sys_env_set_status(id, ENV_RUNNABLE))<0)
		return r;
//	cprintf("sfork succeed!\n");
	return id;
}
Beispiel #8
0
void
umain(int argc, char **argv) {
	void *addr = (void *)(USTACKTOP - 10*PGSIZE);
	cprintf("starting in umain\n");
	int r = fork();
	if (r < 0) panic("testmigrate: fork: %e\n", r);
	if (r == 0) {
		for (int i = 0; i < 100; i++) {
			sys_yield();
		}
		cprintf("CHILD: going to migrate locally\n");
		cprintf("CHILD: physical pg num: %x\n", PGNUM(vpt[PGNUM(addr)]));
		int *addr_int = (int *)addr;
		addr_int[1] = 55;
		int r = migrate();
		cprintf("[%08x]: Hello from your migrated process!\n", thisenv->env_id);
		cprintf("addr_int[0] = %d, addr_int[1] = %d\n",
				addr_int[0], addr_int[1]);
		return;
	}
	else {
		envid_t envid = (envid_t)r;
		cprintf("XXX LA LA LA: fork good.  new envid %x\n", envid);
		if (sys_page_alloc(0, addr, PTE_U | PTE_P | PTE_W) < 0)
			panic("sys_page_alloc");
		if (sys_page_map(0, addr, envid, addr, PTE_U | PTE_P | PTE_W) < 0)
			panic("sys_page_map");
		cprintf("XXX LA LA LA: mapped a shared page.\n");
		cprintf("XXX LA LA LA: physical pg num: %x\n", PGNUM(vpt[PGNUM(addr)]));
		int *addr_int = (int *)addr;
		addr_int[0] = 60;
		for (int i = 0; i < 6000; i++) {
			sys_yield();
		}
		cprintf("TRYING TO WRITE TO THE PAGE THAT GOT MIGRATED AWAY\n");
		addr_int[2] = 42;
		cprintf("SUCCESSFULLY WROTE TO THE PAGE THAT GOT MIGRATED AWAY\n");
		cprintf("AND THE ANSWER IS: %d\n", addr_int[0]);
		cprintf("AND THE ANSWER IS: %d\n", addr_int[1]);
		cprintf("AND THE ANSWER IS: %d\n", addr_int[2]);
		while (1) sys_yield() ;
	}
}
Beispiel #9
0
static int
isfree(void *v, size_t n)
{
	uintptr_t va, end_va = (uintptr_t) v + n;

	for (va = (uintptr_t) v; va < end_va; va += PGSIZE)
		if (va >= (uintptr_t) mend
		    || ((uvpd[PDX(va)] & PTE_P) && (uvpt[PGNUM(va)] & PTE_P)))
			return 0;
	return 1;
}
Beispiel #10
0
//
// User-level fork with copy-on-write.
// Set up our page fault handler appropriately.
// Create a child.
// Copy our address space and page fault handler setup to the child.
// Then mark the child as runnable and return.
//
// Returns: child's envid to the parent, 0 to the child, < 0 on error.
// It is also OK to panic on error.
//
// Hint:
//   Use vpd, vpt, and duppage.
//   Remember to fix "thisenv" in the child process.
//   Neither user exception stack should ever be marked copy-on-write,
//   so you must allocate a new page for the child's user exception stack.
//
envid_t
fork(void)
{
	// LAB 4: Your code here.
	envid_t envidnum;
	uint32_t addr;
	int r;
	extern void _pgfault_upcall(void);
	
	set_pgfault_handler(pgfault);
	
	envidnum = sys_exofork();
	if (envidnum < 0)
		panic("sys_exofork: %e", envidnum);
	// We’re the child
	if (envidnum == 0) {
		thisenv = &envs[ENVX(sys_getenvid())];
		return 0;
	}
	// We’re the parent.
	for (addr =  UTEXT; addr < UXSTACKTOP - PGSIZE; addr += PGSIZE) 
	{
		if(	(vpd[PDX(addr)] & PTE_P) > 0 && (vpt[PGNUM(addr)] & PTE_P) > 0 && (vpt[PGNUM(addr)] & PTE_U) > 0)
			duppage(envidnum,PGNUM(addr));
	}
	if ((r = sys_page_alloc (envidnum, (void *)(UXSTACKTOP - PGSIZE), PTE_U|PTE_W|PTE_P)) < 0)
		panic ("fork: page allocation failed : %e", r);
	//cprintf("%x-----%x\n",&envid,envid);
	sys_env_set_pgfault_upcall (envidnum, _pgfault_upcall);
	//cprintf("%x-----%x\n",&envid,envid);
	// Start the child environment running
	if((r = sys_env_set_status(envidnum, ENV_RUNNABLE)) < 0)
		panic("fork: set child env status failed : %e", r);
	//cprintf("%x-----%x\n",&envid,envid);
	//cprintf("fork in %x have set %x ,runnable\n",sys_getenvid(),envidnum);
	//cprintf("fork in %x have set %x ,runnable\n",sys_getenvid(),envidnum);
	//cprintf("%x-----%x\n",&envidnum,envidnum);
	return envidnum;

	//panic("fork not implemented");
}
Beispiel #11
0
// Flush the contents of the block containing VA out to disk if
// necessary, then clear the PTE_D bit using sys_page_map.
// If the block is not in the block cache or is not dirty, does
// nothing.
// Hint: Use va_is_mapped, va_is_dirty, and ide_write.
// Hint: Use the PTE_SYSCALL constant when calling sys_page_map.
// Hint: Don't forget to round addr down.
void
flush_block(void *addr)
{
	uint32_t blockno = ((uint32_t)addr - DISKMAP) / BLKSIZE;

	if (addr < (void*)DISKMAP || addr >= (void*)(DISKMAP + DISKSIZE))
		panic("flush_block of bad va %08x", addr);

	// LAB 5: Your code here.
	if( !va_is_mapped(addr) || !(uvpt[PGNUM(addr)] & PTE_D)) { /* no need to flush */
		return;
	}
	int r;
	addr = ROUNDDOWN(addr, PGSIZE);
	if( (r = ide_write(blockno * BLKSECTS, addr, (PGSIZE/SECTSIZE))) != 0) {
		panic("in flush_block, ide_write: %e", r);
	}

	if ((r = sys_page_map(0, addr, 0, addr, uvpt[PGNUM(addr)] & PTE_SYSCALL)) < 0)
		panic("in sys_page_map, sys_page_map: %e", r);
}
Beispiel #12
0
//
// User-level fork with copy-on-write.
// Set up our page fault handler appropriately.
// Create a child.
// Copy our address space and page fault handler setup to the child.
// Then mark the child as runnable and return.
//
// Returns: child's envid to the parent, 0 to the child, < 0 on error.
// It is also OK to panic on error.
//
// Hint:
//   Use uvpd, uvpt, and duppage.
//   Remember to fix "thisenv" in the child process.
//   Neither user exception stack should ever be marked copy-on-write,
//   so you must allocate a new page for the child's user exception stack.
//
envid_t
fork(void)
{
    envid_t envid;
    uint8_t *addr;
    uintptr_t va;
    int r;
	// LAB 4: Your code here.
	set_pgfault_handler(pgfault);
    envid = sys_exofork();
    if (envid < 0)
    {
        panic("sys_exofork: %d", envid);
    }
    if (envid == 0)
    {
        thisenv = &envs[ENVX(sys_getenvid())];
        return 0;
    }

    // We are parent
    for(va = UTEXT; va < UTOP - PGSIZE; va += PGSIZE)
    {
        if ((uvpd[PDX(va)] & PTE_P) && (uvpt[PGNUM(va)] & PTE_P) && (uvpt[PGNUM(va)] & PTE_U))
        {
            if ((r = duppage(envid, PGNUM(va))) < 0)
                return r;
        }
    }
    // The user exception stack page
    if ((r = sys_page_alloc(envid, (void*)(UXSTACKTOP-PGSIZE), PTE_P|PTE_U|PTE_W) < 0))
    {
        panic("fork: sys_page_alloc: %e", r);
    }

    // Done mapping pages
    sys_env_set_pgfault_upcall(envid, thisenv->env_pgfault_upcall);
    sys_env_set_status(envid, ENV_RUNNABLE);
    return envid;
}
Beispiel #13
0
//
// Initialize page structure and memory free list.
// After this is done, NEVER use boot_alloc again.  ONLY use the page
// allocator functions below to allocate and deallocate physical
// memory via the page_free_list.
//
void
page_init(void)
{
	// LAB 4:
	// Change your code to mark the physical page at MPENTRY_PADDR
	// as in use

	// The example code here marks all physical pages as free.
	// However this is not truly the case.  What memory is free?
	//  1) Mark physical page 0 as in use.
	//     This way we preserve the real-mode IDT and BIOS structures
	//     in case we ever need them.  (Currently we don't, but...)
	//  2) The rest of base memory, [PGSIZE, npages_basemem * PGSIZE)
	//     is free.
	//  3) Then comes the IO hole [IOPHYSMEM, EXTPHYSMEM), which must
	//     never be allocated.
	//  4) Then extended memory [EXTPHYSMEM, ...).
	//     Some of it is in use, some is free. Where is the kernel
	//     in physical memory?  Which pages are already in use for
	//     page tables and other data structures?
	//
	// Change the code to reflect this.
	// NB: DO NOT actually touch the physical memory corresponding to
	// free pages!
	size_t i;
	extern char end[];
	uint32_t upp = PADDR(boot_alloc(0));
	cprintf("end: %x npages: %d sizeof struct Page: %x PGSIZE: %x IOPHYSMEM: %x\n",end,npages,sizeof(struct Page),PGSIZE,IOPHYSMEM);
	for (i = 0; i < npages; i++) {
		if (i == 0) continue;
		if ((i >= PGNUM(IOPHYSMEM) && 
			i < PGNUM(upp)) || i==MPENTRY_PADDR/PGSIZE)
			continue;
		pages[i].pp_ref = 0;
		pages[i].pp_link = page_free_list;
		page_free_list = &pages[i];
	}

}
Beispiel #14
0
static void update_blk_count()
{
        int i;
        void* va = (void*)DISKMAP;
        for (i=0; i<MAXBLK; ++i)
        {
                if (plist[i].valid)
                {
                        if (uvpt[PGNUM(va)] & PTE_A) plist[i].count++;
                }
                va += BLKSIZE;
        }
}
Beispiel #15
0
static void update_time_stamp()
{
        int i;
        void* va = (void*)DISKMAP;
        for (i=0; i<MAXBLK; ++i)
        {
                if (plist[i].valid)
                {
                        if (uvpt[PGNUM(va)] & PTE_A) plist[i].tstamp = timestamp;
                }
                va += BLKSIZE;
        }
}
Beispiel #16
0
// Copy the mappings for shared pages into the child address space.
static int
copy_shared_pages(envid_t child)
{
	// LAB 5: Your code here.
	uintptr_t addr;
	int r;
	int perm = PTE_P | PTE_U | PTE_W | PTE_SHARE;

	for (addr = UTEXT; addr < UTOP; addr += PGSIZE) {
		if (!(uvpd[PDX(addr)] & PTE_P))
			continue;
		if (!(uvpt[PGNUM(addr)] & PTE_P) ||
			!(uvpt[PGNUM(addr)] & PTE_SHARE))
			continue;
		if ((r = sys_page_alloc(child, (void *)addr, perm)) < 0)
			panic("sys_page_alloc: %e", r);
		if ((r = sys_page_map(0, (void *)addr, child,(void *) addr, perm)) < 0)
			panic("sys_page_map: %e", r);
	}

	return 0;
}
Beispiel #17
0
//
// User-level fork with copy-on-write.
// Set up our page fault handler appropriately.
// Create a child.
// Copy our address space and page fault handler setup to the child.
// Then mark the child as runnable and return.
//
// Returns: child's envid to the parent, 0 to the child, < 0 on error.
// It is also OK to panic on error.
//
// Hint:
//   Use uvpd, uvpt, and duppage.
//   Remember to fix "thisenv" in the child process.
//   Neither user exception stack should ever be marked copy-on-write,
//   so you must allocate a new page for the child's user exception stack.
//
envid_t
fork(void)
{
    // Step 1: install user mode pgfault handler.
    set_pgfault_handler(pgfault);

    // Step 2: create child environment.
    envid_t envid = sys_exofork();
    if (envid < 0) {
        panic("fork: cannot create child env");
    }
    else if (envid == 0) {
        // child environment.
        thisenv = &envs[ENVX(sys_getenvid())];
        return 0;
    }

    // Step 3: duplicate pages.
    int ipd;
    for (ipd = 0; ipd < PDX(UTOP); ipd++) {
        // No page table yet.
        if (!(uvpd[ipd] & PTE_P))
            continue;

        int ipt;
        for (ipt = 0; ipt < NPTENTRIES; ipt++) {
            unsigned pn = (ipd << 10) | ipt;
            if (pn != PGNUM(UXSTACKTOP - PGSIZE)) {
                duppage(envid, pn);
            }
        }
    }

    // allocate a new page for child to hold the exception stack.
    if (sys_page_alloc(envid, (void *)(UXSTACKTOP - PGSIZE), PTE_W | PTE_U | PTE_P)) {
        panic("fork: no phys mem for xstk");
    }

    // Step 4: set user page fault entry for child.
    if (sys_env_set_pgfault_upcall(envid, thisenv->env_pgfault_upcall)) {
        panic("fork: cannot set pgfault upcall");
    }

    // Step 5: set child status to ENV_RUNNABLE.
    if (sys_env_set_status(envid, ENV_RUNNABLE)) {
        panic("fork: cannot set env status");
    }

    return envid;

}
Beispiel #18
0
//
// User-level fork with copy-on-write.
// Set up our page fault handler appropriately.
// Create a child.
// Copy our address space and page fault handler setup to the child.
// Then mark the child as runnable and return.
//
// Returns: child's envid to the parent, 0 to the child, < 0 on error.
// It is also OK to panic on error.
//
// Hint:
//   Use uvpd, uvpt, and duppage.
//   Remember to fix "thisenv" in the child process.
//   Neither user exception stack should ever be marked copy-on-write,
//   so you must allocate a new page for the child's user exception stack.
//
envid_t
fork(void)
{
	// LAB 4: Your code here.
  set_pgfault_handler(pgfault);

    envid_t envid;
    uint32_t addr;
    envid = sys_exofork();
    if (envid == 0) 
    {
        thisenv = &envs[ENVX(sys_getenvid())];
        return 0;
    }
    
    if (envid < 0)
        panic("sys_exofork: %e", envid);

    for (addr = 0; addr < USTACKTOP; addr += PGSIZE)
       
    if ((uvpd[PDX(addr)] & PTE_P) && (uvpt[PGNUM(addr)] & PTE_P)&& (uvpt[PGNUM(addr)] & PTE_U))
    {
       duppage(envid, PGNUM(addr));
    }

    if (sys_page_alloc(envid, (void *)(UXSTACKTOP-PGSIZE), PTE_U|PTE_W|PTE_P) < 0)
        panic("in fork: sys_page_alloc wrong!");
 
    extern void _pgfault_upcall();
 
    sys_env_set_pgfault_upcall(envid, _pgfault_upcall);

    if (sys_env_set_status(envid, ENV_RUNNABLE) < 0)
        panic("sys_env_set_status");

    return envid;
	//panic("fork not implemented");
}
Beispiel #19
0
// Write the contents of DISKMAP block 'blocknum' to disk.
// Returns 0 on success, < 0 on failure.  Error codes include -E_INVAL
//   (blocknum out of range), -E_FAULT (block not in memory), -E_IO.
//
static int
flush_block(blocknum_t blocknum)
{
	uintptr_t va = DISKMAP + blocknum * BLKSIZE;
	BlockInfo *bip;
	int r;

	if (blocknum >= (blocknum_t) (DISKSIZE / BLKSIZE))
		return -E_INVAL;
	if (!(vpd[PDX(va)] & PTE_P) || !(vpt[PGNUM(va)] & PTE_P))
		return -E_FAULT;

	return ide_write(blocknum * BLKSECTS, (void *) va, BLKSECTS);
}
Beispiel #20
0
//
// User-level fork with copy-on-write.
// Set up our page fault handler appropriately.
// Create a child.
// Copy our address space and page fault handler setup to the child.
// Then mark the child as runnable and return.
//
// Returns: child's envid to the parent, 0 to the child, < 0 on error.
// It is also OK to panic on error.
//
// Hint:
//   Use uvpd, uvpt, and duppage.
//   Remember to fix "thisenv" in the child process.
//   Neither user exception stack should ever be marked copy-on-write,
//   so you must allocate a new page for the child's user exception stack.
//
envid_t
fork(void)
{
	// LAB 4: Your code here.
	int r;
	envid_t child_envid;

	set_pgfault_handler(pgfault);

	child_envid = sys_exofork();
	if (child_envid < 0)
		panic("sys_exofork: %e\n", child_envid);
	if (child_envid == 0) { // child
		// Fix thisenv like dumbfork does and return 0
		thisenv = &envs[ENVX(sys_getenvid())];
		return 0;
	}

	// We're in the parent

	// Iterate over all pages until UTOP. Map all pages that are present
	// and let duppage worry about the permissions.
	// Note that we don't remap anything above UTOP because the kernel took
	// care of that for us in env_setup_vm().
	uint32_t page_num;
	pte_t *pte;
	for (page_num = 0; page_num < PGNUM(UTOP - PGSIZE); page_num++) {
		uint32_t pdx = ROUNDDOWN(page_num, NPDENTRIES) / NPDENTRIES;
		if ((uvpd[pdx] & PTE_P) == PTE_P &&
			((uvpt[page_num] & PTE_P) == PTE_P)) {
				duppage(child_envid, page_num);
		}
	}

	// Allocate exception stack space for child. The child can't do this themselves
	// because the mechanism by which it would is to run the pgfault handler, which
	// needs to run on the exception stack (catch 22).
	if ((r = sys_page_alloc(child_envid, (void *) (UXSTACKTOP - PGSIZE), PTE_P | PTE_U | PTE_W)) < 0)
		panic("sys_page_alloc: %e\n", r);

	// Set page fault handler for the child
	if ((r = sys_env_set_pgfault_upcall(child_envid, _pgfault_upcall)) < 0)
		panic("sys_env_set_pgfault_upcall: %e\n", r);

	// Mark child environment as runnable
	if ((r = sys_env_set_status(child_envid, ENV_RUNNABLE)) < 0)
		panic("sys_env_set_status: %e\n", r);

	return child_envid;
}
Beispiel #21
0
//
// User-level fork with copy-on-write.
// Set up our page fault handler appropriately.
// Create a child.
// Copy our address space and page fault handler setup to the child.
// Then mark the child as runnable and return.
//
// Returns: child's envid to the parent, 0 to the child, < 0 on error.
// It is also OK to panic on error.
//
// Hint:
//   Use uvpd, uvpt, and duppage.
//   Remember to fix "thisenv" in the child process.
//   Neither user exception stack should ever be marked copy-on-write,
//   so you must allocate a new page for the child's user exception stack.
//
envid_t
fork(void)
{
	// LAB 4: Your code here.
	set_pgfault_handler(pgfault);
	envid_t childid = sys_exofork();
	if(childid < 0)
		panic("Fork Failed\n");
	if(childid == 0) {
		thisenv = &envs[ENVX(sys_getenvid())];
		return 0;
	}

	int i, j;
	for(i = 0; i < PDX(UTOP); i++) {
		if (!(uvpd[i] & PTE_P)) continue;
		for(j = 0; (j < 1024) && (i*NPDENTRIES + j < PGNUM(UXSTACKTOP - PGSIZE)); j++ ) {
			if(!uvpt[i*NPDENTRIES + j] & PTE_P) continue;
			if(duppage(childid, i*NPDENTRIES + j) < 0)
				panic("dup page failed");
		}
	}
	if ((sys_page_alloc(childid, (void *)(UXSTACKTOP - PGSIZE), PTE_U | PTE_P | PTE_W)) < 0) {
          panic("Allocation of page for Exception stack cups!\n");
        }

        if ((sys_env_set_pgfault_upcall(childid, thisenv->env_pgfault_upcall)) < 0) {
          panic("Unable to set child process' upcall");
        }

	 // Copy own uxstack to temp page
        memmove((void *)(UXSTACKTOP - PGSIZE), PFTEMP, PGSIZE);
	int r;
        // Unmap temp page
        if (sys_page_unmap(sys_getenvid(), PFTEMP) < 0) {
                return -1;
        }        

        if ((r = sys_env_set_pgfault_upcall(childid, thisenv->env_pgfault_upcall)) < 0)
                panic("sys_env_set_pgfault_upcall: error %e\n", r);

        if ((r = sys_env_set_status(childid, ENV_RUNNABLE)) < 0) {
                cprintf("sys_env_set_status: error %e\n", r);
                return -1;
        }


	return childid;
	panic("fork not implemented");
}
//
// Custom page fault handler - if faulting page is copy-on-write,
// map in our own private writable copy.
//
static void
pgfault(struct UTrapframe *utf)
{
    void *addr = (void *) utf->utf_fault_va;
    uint32_t err = utf->utf_err;
    int r;

    // Check that the faulting access was (1) a write, and (2) to a
    // copy-on-write page.  If not, panic.
    // Hint:
    //   Use the read-only page table mappings at uvpt
    //   (see <inc/memlayout.h>).

    // LAB 4: Your code here.
    if(!(
                ((err & FEC_WR) == FEC_WR) && (uvpd[PDX(addr)] & PTE_P) &&
                (uvpt[PGNUM(addr)] & PTE_P) && (uvpt[PGNUM(addr)] & PTE_COW)
            )
      )
        panic("err isn't caused by write or cow\n");
    // Allocate a new page, map it at a temporary location (PFTEMP),
    // copy the data from the old page to the new page, then move the new
    // page to the old page's address.
    // Hint:
    //   You should make three system calls.

    // LAB 4: Your code here.
    void *radd = ROUNDDOWN(addr, PGSIZE);
    if(sys_page_alloc(0, PFTEMP, PTE_U | PTE_W | PTE_P) < 0)
        panic("sys_page_alloc fails\n");
    memmove(PFTEMP, radd, PGSIZE);
    if(sys_page_map(0, PFTEMP, 0, radd, PTE_U | PTE_W | PTE_P) < 0)
        panic("sys_page_map fails\n");
    sys_page_unmap(0, PFTEMP);

    //panic("pgfault not implemented");
}
Beispiel #23
0
// Flush the contents of the block containing VA out to disk if
// necessary, then clear the PTE_D bit using sys_page_map.
// If the block is not in the block cache or is not dirty, does
// nothing.
// Hint: Use va_is_mapped, va_is_dirty, and ide_write.
// Hint: Use the PTE_SYSCALL constant when calling sys_page_map.
// Hint: Don't forget to round addr down.
void
flush_block(void *addr)
{
	uint32_t blockno = ((uint32_t)addr - DISKMAP) / BLKSIZE;

	if (addr < (void*)DISKMAP || addr >= (void*)(DISKMAP + DISKSIZE))
		panic("flush_block of bad va %08x", addr);

	// LAB 5: Your code here.
    addr = ROUNDDOWN(addr, PGSIZE);
    if (!va_is_mapped(addr) || !va_is_dirty(addr))
      return;
    ide_write(blockno * BLKSECTS, addr, BLKSECTS);
    sys_page_map(0, addr, 0, addr, uvpt[PGNUM(addr)] & PTE_SYSCALL);
}
Beispiel #24
0
// Finds the smallest i from 0 to MAXFD-1 that doesn't have
// its fd page mapped.
// Sets *fd_store to the corresponding fd page virtual address.
//
// fd_alloc does NOT actually allocate an fd page.
// It is up to the caller to allocate the page somehow.
// This means that if someone calls fd_alloc twice in a row
// without allocating the first page we return, we'll return the same
// page the second time.
//
// Hint: Use INDEX2FD.
//
// Returns 0 on success, < 0 on error.  Errors are:
//	-E_MAX_FD: no more file descriptors
// On error, *fd_store is set to 0.
int
fd_alloc(struct Fd **fd_store)
{
	int i;
	struct Fd *fd;

	for (i = 0; i < MAXFD; i++) {
		fd = INDEX2FD(i);
		if ((uvpd[PDX(fd)] & PTE_P) == 0 || (uvpt[PGNUM(fd)] & PTE_P) == 0) {
			*fd_store = fd;
			return 0;
		}
	}
	*fd_store = 0;
	return -E_MAX_OPEN;
}
Beispiel #25
0
//
// Custom page fault handler - if faulting page is copy-on-write,
// map in our own private writable copy.
//
static void
pgfault(struct UTrapframe *utf)
{
	void *addr = (void *) utf->utf_fault_va;
	uint32_t err = utf->utf_err;
	int r;
    //cprintf("envid: %x, eip: %x, fault_va: %x\n", thisenv->env_id, utf->utf_eip, addr);

	// Check that the faulting access was (1) a write, and (2) to a
	// copy-on-write page.  If not, panic.
	// Hint:
	//   Use the read-only page table mappings at uvpt
	//   (see <inc/memlayout.h>).

	// LAB 4: Your code here.
    if (!(err & FEC_WR))
    {
        panic("pgfault: error is not a write. it is %e with falting addr 0x%x\n", err, addr);
    }

    if (!(uvpt[PGNUM(addr)] & PTE_COW))
    {
        panic("pgfault: page not COW\n");
    }

	// Allocate a new page, map it at a temporary location (PFTEMP),
	// copy the data from the old page to the new page, then move the new
	// page to the old page's address.
	// Hint:
	//   You should make three system calls.

	// LAB 4: Your code here.
    if ((r = sys_page_alloc(0, PFTEMP, PTE_U|PTE_W|PTE_P)) < 0)
    {
        panic("pgfault: sys_page_alloc fail %e", r);
    }
    memmove(PFTEMP, ROUNDDOWN(addr, PGSIZE), PGSIZE);
    if ((r = sys_page_map(0, PFTEMP, 0, ROUNDDOWN(addr, PGSIZE), PTE_P|PTE_U|PTE_W)) < 0)
    {
        panic("pgfault: sys_page_map: %e", r);
    }

    if ((r = sys_page_unmap(0, PFTEMP)) < 0)
    {
        panic("pgfault: sys_page_unmap: %e", r);
    }
}
Beispiel #26
0
int
pipe(int pfd[2])
{
	int r;
	struct Fd *fd0, *fd1;
	void *va;

	// allocate the file descriptor table entries
	if ((r = fd_alloc(&fd0)) < 0
	    || (r = sys_page_alloc(0, fd0, PTE_P|PTE_W|PTE_U|PTE_SHARE)) < 0)
		goto err;

	if ((r = fd_alloc(&fd1)) < 0
	    || (r = sys_page_alloc(0, fd1, PTE_P|PTE_W|PTE_U|PTE_SHARE)) < 0)
		goto err1;

	// allocate the pipe structure as first data page in both
	va = fd2data(fd0);
	if ((r = sys_page_alloc(0, va, PTE_P|PTE_W|PTE_U|PTE_SHARE)) < 0)
		goto err2;
	if ((r = sys_page_map(0, va, 0, fd2data(fd1), PTE_P|PTE_W|PTE_U|PTE_SHARE)) < 0)
		goto err3;

	// set up fd structures
	fd0->fd_dev_id = devpipe.dev_id;
	fd0->fd_omode = O_RDONLY;

	fd1->fd_dev_id = devpipe.dev_id;
	fd1->fd_omode = O_WRONLY;

	if (debug)
		cprintf("[%08x] pipecreate %08x\n", thisenv->env_id, uvpt[PGNUM(va)]);

	pfd[0] = fd2num(fd0);
	pfd[1] = fd2num(fd1);
	return 0;

    err3:
	sys_page_unmap(0, va);
    err2:
	sys_page_unmap(0, fd1);
    err1:
	sys_page_unmap(0, fd0);
    err:
	return r;
}
Beispiel #27
0
// Fault any disk block that is read in to memory by
// loading it from disk.
// Hint: Use ide_read and BLKSECTS.
static void
bc_pgfault(struct UTrapframe *utf)
{
	void *addr = (void *) utf->utf_fault_va;
	uint64_t blockno = ((uint64_t)addr - DISKMAP) / BLKSIZE;
	int r;

	// Check that the fault was within the block cache region
	if (addr < (void*)DISKMAP || addr >= (void*)(DISKMAP + DISKSIZE))
		panic("page fault in FS: eip %08x, va %08x, err %04x",
			  utf->utf_rip, addr, utf->utf_err);

	// Sanity check the block number.
	if (super && blockno >= super->s_nblocks)
		panic("reading non-existent block %08x\n", blockno);

	// Allocate a page in the disk map region, read the contents
	// of the block from the disk into that page.
	// Hint: first round addr to page boundary.
	//
	// LAB 5: your code here:
	addr = ROUNDDOWN(addr, PGSIZE);
	if(0 != sys_page_alloc(0, (void*)addr, PTE_SYSCALL)){
		panic("Page Allocation Failed during handling page fault in FS");
	}
#ifdef VMM_GUEST
	if(0 != host_read((uint32_t) (blockno * BLKSECTS), (void*)addr, BLKSECTS))
	{
		panic("ide read failed in Page Fault Handling");		
	}
#else
	if(0 != ide_read((uint32_t) (blockno * BLKSECTS), (void*)addr, BLKSECTS))
	{
		panic("ide read failed in Page Fault Handling");		
	}
#endif	
	if ((r = sys_page_map(0, addr, 0, addr, uvpt[PGNUM(addr)] & PTE_SYSCALL)) < 0)
		panic("in bc_pgfault, sys_page_map: %e", r);

	// Check that the block we read was allocated. (exercise for
	// the reader: why do we do this *after* reading the block
	// in?)
	if (bitmap && block_is_free(blockno))
		panic("reading free block %08x\n", blockno);
}
Beispiel #28
0
//
// Initialize page structure and memory free list.
// After this is done, NEVER use boot_alloc again.  ONLY use the page
// allocator functions below to allocate and deallocate physical
// memory via the page_free_list.
//
void
page_init(void)
{
	// LAB 4:
	// Change your code to mark the physical page at MPENTRY_PADDR
	// as in use

	// The example code here marks all physical pages as free.
	// However this is not truly the case.  What memory is free?
	//  1) Mark physical page 0 as in use.
	//     This way we preserve the real-mode IDT and BIOS structures
	//     in case we ever need them.  (Currently we don't, but...)
	//  2) The rest of base memory, [PGSIZE, npages_basemem * PGSIZE)
	//     is free.
	//  3) Then comes the IO hole [IOPHYSMEM, EXTPHYSMEM), which must
	//     never be allocated.
	//  4) Then extended memory [EXTPHYSMEM, ...).
	//     Some of it is in use, some is free. Where is the kernel
	//     in physical memory?  Which pages are already in use for
	//     page tables and other data structures?
	//
	// Change the code to reflect this.
	// NB: DO NOT actually touch the physical memory corresponding to
	// free pages!
	/*size_t i;
	for (i = 0; i < npages; i++) {
		pages[i].pp_ref = 0;
		pages[i].pp_link = page_free_list;
		page_free_list = &pages[i];
	}*/
	uint32_t page_mp_entry = MPENTRY_PADDR / PGSIZE;
	uint32_t i;
	page_free_list = NULL;
	for (i = 1; i < npages_basemem && i != page_mp_entry; i++) {
		pages[i].pp_ref = 0;
		pages[i].pp_link = page_free_list;
		page_free_list = &pages[i];
	}
	for (i = PGNUM(PADDR(boot_alloc(0))); i < npages; i++) {
		pages[i].pp_ref = 0;
		pages[i].pp_link = page_free_list;
		page_free_list = &pages[i];
	}
	chunk_list = NULL;
}
Beispiel #29
0
// Fault any disk block that is read in to memory by
// loading it from disk.
static void
bc_pgfault(struct UTrapframe *utf)
{
    void *addr = (void *) utf->utf_fault_va;
    uint32_t blockno = ((uint32_t)addr - DISKMAP) / BLKSIZE;
    int r;

    // Check that the fault was within the block cache region
    if (addr < (void*)DISKMAP || addr >= (void*)(DISKMAP + DISKSIZE))
        panic("page fault in FS: eip %08x, va %08x, err %04x",
              utf->utf_eip, addr, utf->utf_err);

    // Sanity check the block number.
    if (super && blockno >= super->s_nblocks)
        panic("reading non-existent block %08x\n", blockno);

    // Allocate a page in the disk map region, read the contents
    // of the block from the disk into that page.
    // Hint: first round addr to page boundary. fs/ide.c has code to read
    // the disk.
    //
    // LAB 5: you code here:

    addr = (void *)ROUNDDOWN(addr, PGSIZE);

    if ((r = sys_page_alloc(0, addr, PTE_P | PTE_U | PTE_W)) < 0) {
        panic("bc_pgfault: sys_page_alloc error %e\n", r);
    }

    if ((r = ide_read(BLKSECTS * blockno, addr, BLKSECTS)) < 0) {
        panic("bc_pgfault: ide_read error %e\n", r);
    }

    // Clear the dirty bit for the disk block page since we just read the
    // block from disk
    if ((r = sys_page_map(0, addr, 0, addr, uvpt[PGNUM(addr)] & PTE_SYSCALL)) < 0)
        panic("in bc_pgfault, sys_page_map: %e", r);

    // Check that the block we read was allocated. (exercise for
    // the reader: why do we do this *after* reading the block
    // in?)
    if (bitmap && block_is_free(blockno))
        panic("reading free block %08x\n", blockno);

}
Beispiel #30
0
// Check that fdnum is in range and mapped.
// If it is, set *fd_store to the fd page virtual address.
//
// Returns 0 on success (the page is in range and mapped), < 0 on error.
// Errors are:
//	-E_INVAL: fdnum was either not in range or not mapped.
int
fd_lookup(int fdnum, struct Fd **fd_store)
{
	struct Fd *fd;

	if (fdnum < 0 || fdnum >= MAXFD) {
		if (debug)
			cprintf("[%08x] bad fd %d\n", thisenv->env_id, fdnum);
		return -E_INVAL;
	}
	fd = INDEX2FD(fdnum);
	if (!(uvpd[PDX(fd)] & PTE_P) || !(uvpt[PGNUM(fd)] & PTE_P)) {
		if (debug)
			cprintf("[%08x] closed fd %d\n", thisenv->env_id, fdnum);
		return -E_INVAL;
	}
	*fd_store = fd;
	return 0;
}