// Make file descriptor 'newfdnum' a duplicate of file descriptor 'oldfdnum'. // For instance, writing onto either file descriptor will affect the // file and the file offset of the other. // Closes any previously open file descriptor at 'newfdnum'. // This is implemented using virtual memory tricks (of course!). int dup(int oldfdnum, int newfdnum) { int r; char *ova, *nva; pte_t pte; struct Fd *oldfd, *newfd; if ((r = fd_lookup(oldfdnum, &oldfd)) < 0) return r; close(newfdnum); newfd = INDEX2FD(newfdnum); ova = fd2data(oldfd); nva = fd2data(newfd); if ((vpd[VPD(ova)] & PTE_P) && (vpt[VPN(ova)] & PTE_P)) if ((r = sys_page_map(0, ova, 0, nva, vpt[VPN(ova)] & PTE_SYSCALL)) < 0) goto err; if ((r = sys_page_map(0, oldfd, 0, newfd, vpt[VPN(oldfd)] & PTE_SYSCALL)) < 0) goto err; return newfdnum; err: sys_page_unmap(0, newfd); sys_page_unmap(0, nva); return r; }
// // Map our virtual page pn (address pn*PGSIZE) into the target envid // at the same virtual address. If the page is writable or copy-on-write, // the new mapping must be created copy-on-write, and then our mapping must be // marked copy-on-write as well. (Exercise: Why do we need to mark ours // copy-on-write again if it was already copy-on-write at the beginning of // this function?) // // Returns: 0 on success, < 0 on error. // It is also OK to panic on error. // static int duppage(envid_t envid, unsigned pn) { int r; // LAB 4: Your code here. //------------ Lab4 ---------------------------------------------------------------------------------------- if (pn * PGSIZE == UXSTACKTOP - PGSIZE) return 0; uintptr_t addr = (uintptr_t)(pn * PGSIZE); if ( (uvpt[pn] & PTE_W) || (uvpt[pn] & PTE_COW) ) { r = sys_page_map(0, (void *)addr, envid, (void *)addr, PTE_P | PTE_U | PTE_COW); if (r < 0) panic("sys_page_map failed in duppage %e\n", r); r = sys_page_map(0, (void *)addr, 0, (void *)addr, PTE_P | PTE_U | PTE_COW); if (r < 0) panic("sys_page_map failed in duppage %e\n", r); } else { r = sys_page_map(0, (void *)addr, envid, (void *)addr, PTE_P | PTE_U); if (r < 0) panic("sys_page_map failed in duppage %e\n", r); } return 0; //------------ Lab4 ---------------------------------------------------------------------------------------- //panic("duppage not implemented"); return 0; }
// // Map our virtual page pn (address pn*PGSIZE) into the target envid // at the same virtual address. If the page is writable or copy-on-write, // the new mapping must be created copy-on-write, and then our mapping must be // marked copy-on-write as well. (Exercise: Why do we need to mark ours // copy-on-write again if it was already copy-on-write at the beginning of // this function?) // // Returns: 0 on success, < 0 on error. // It is also OK to panic on error. // static int duppage(envid_t envid, unsigned pn) { int r; uint32_t perm = PTE_P | PTE_COW; envid_t this_envid = thisenv->env_id; // LAB 4: Your code here. if (uvpt[pn] & PTE_SHARE) { if ((r = sys_page_map(this_envid, (void *) (pn*PGSIZE), envid, (void *) (pn*PGSIZE), uvpt[pn] & PTE_SYSCALL)) < 0) panic("sys_page_map: %e\n", r); } else if (uvpt[pn] & PTE_COW || uvpt[pn] & PTE_W) { if (uvpt[pn] & PTE_U) perm |= PTE_U; // Map page COW, U and P in child if ((r = sys_page_map(this_envid, (void *) (pn*PGSIZE), envid, (void *) (pn*PGSIZE), perm)) < 0) panic("sys_page_map: %e\n", r); // Map page COW, U and P in parent if ((r = sys_page_map(this_envid, (void *) (pn*PGSIZE), this_envid, (void *) (pn*PGSIZE), perm)) < 0) panic("sys_page_map: %e\n", r); } else { // map pages that are present but not writable or COW with their original permissions if ((r = sys_page_map(this_envid, (void *) (pn*PGSIZE), envid, (void *) (pn*PGSIZE), uvpt[pn] & PTE_SYSCALL)) < 0) panic("sys_page_map: %e\n", r); } return 0; }
// // Map our virtual page pn (address pn*PGSIZE) into the target envid // at the same virtual address. If the page is writable or copy-on-write, // the new mapping must be created copy-on-write, and then our mapping must be // marked copy-on-write as well. (Exercise: Why do we need to mark ours // copy-on-write again if it was already copy-on-write at the beginning of // this function?) // // Returns: 0 on success, < 0 on error. // It is also OK to panic on error. // static int duppage(envid_t envid, unsigned pn) { int r; // LAB 4: Your code here. pte_t pte = uvpt[pn]; void *va = (void *)(pn << PGSHIFT); // If the page is writable or copy-on-write, // the mapping must be copy-on-write , // otherwise the new environment could change this page. if ((pte & PTE_W) || (pte & PTE_COW)) { if (sys_page_map(0, va, envid, va, PTE_COW|PTE_U|PTE_P)) { panic("duppage: map cow error"); } // Change permission of the page in this environment to copy-on-write. // Otherwise the new environment would see the change in this environment. if (sys_page_map(0, va, 0, va, PTE_COW|PTE_U| PTE_P)) { panic("duppage: change perm error"); } } return 0; }
// // Map our virtual page pn (address pn*PGSIZE) into the target envid // at the same virtual address. If the page is writable or copy-on-write, // the new mapping must be created copy-on-write, and then our mapping must be // marked copy-on-write as well. (Exercise: Why do we need to mark ours // copy-on-write again if it was already copy-on-write at the beginning of // this function?) // // Returns: 0 on success, < 0 on error. // It is also OK to panic on error. // static int duppage(envid_t envid, unsigned pn) { if (pn * PGSIZE == UXSTACKTOP - PGSIZE) return 0; int r; // LAB 4: Your code here. if (uvpt[pn] & PTE_SHARE) { r = sys_page_map (0, (void*) (pn * PGSIZE), envid, (void*) (pn * PGSIZE), uvpt[pn] & PTE_SYSCALL); if (r < 0) panic("duppage sys_page_map error : %e\n", r); } else if ((uvpt[pn] & PTE_W) || (uvpt[pn] & PTE_COW)) { r = sys_page_map(0, (void*) (pn * PGSIZE), envid, (void*) (pn * PGSIZE), PTE_U | PTE_P | PTE_COW); if (r < 0) panic("map failed"); r = sys_page_map(0, (void*) (pn * PGSIZE), 0, (void*) (pn * PGSIZE), PTE_U | PTE_P | PTE_COW); if (r < 0) panic("map failed"); } else { r = sys_page_map(0, (void*) (pn * PGSIZE), envid, (void*) (pn * PGSIZE), PTE_U | PTE_P); if (r < 0) panic("map failed"); } // panic("duppage not implemented"); return 0; }
// // Map our virtual page pn (address pn*PGSIZE) into the target envid // at the same virtual address. If the page is writable or copy-on-write, // the new mapping must be created copy-on-write, and then our mapping must be // marked copy-on-write as well. (Exercise: Why might we need to mark ours // copy-on-write again if it was already copy-on-write at the beginning of // this function?) // // Returns: 0 on success, < 0 on error. // It is also OK to panic on error. // static int duppage(envid_t envid, unsigned pn) { int r; // LAB 4: Your code here. // seanyliu // LAB 7: add in a new if check if (vpt[pn] & PTE_SHARE) { if ((r = sys_page_map(sys_getenvid(), (void *)(pn * PGSIZE), envid, (void *)(pn * PGSIZE), vpt[pn] & PTE_USER)) < 0) { return r; } } else if (vpt[pn] & (PTE_W | PTE_COW)) { // If the page is writable or copy-on-write, the new mapping must be created copy-on-write if ((r = sys_page_map(sys_getenvid(), (void *)(pn*PGSIZE), envid, (void *)(pn*PGSIZE), PTE_U | PTE_COW | PTE_P)) < 0) { panic("duppage: sys_page_map %d", r); } // and then our mapping must be marked copy-on-write as well //vpt[pn] = vpt[pn] | PTE_COW; if ((r = sys_page_map(sys_getenvid(), (void *)(pn*PGSIZE), sys_getenvid(), (void *)(pn*PGSIZE), PTE_U | PTE_COW | PTE_P)) < 0) { panic("duppage: sys_page_map %d", r); } } else { if ((r = sys_page_map(sys_getenvid(), (void *)(pn*PGSIZE), envid, (void *)(pn*PGSIZE), PTE_U | PTE_P)) < 0) { panic("duppage: sys_page_map %d", r); } } //panic("duppage not implemented"); return 0; }
// // Map our virtual page pn (address pn*PGSIZE) into the target envid // at the same virtual address. If the page is writable or copy-on-write, // the new mapping must be created copy-on-write, and then our mapping must be // marked copy-on-write as well. (Exercise: Why do we need to mark ours // copy-on-write again if it was already copy-on-write at the beginning of // this function?) // // Returns: 0 on success, < 0 on error. // It is also OK to panic on error. // static int duppage(envid_t envid, unsigned pn) { int r; unsigned va; // LAB 4: Your code here. pte_t pte = vpt[pn]; int perm = pte & 0xfff; int perm_share = pte & PTE_USER; va = pn * PGSIZE; if (perm_share & PTE_SHARE) return sys_page_map(0, (void *)(uint64_t)va, envid, (void *)(uint64_t)va, perm_share); if (pte & (PTE_W|PTE_COW)) { //child if ((r = sys_page_map(0, (void *)(uint64_t)va, envid, (void *)(uint64_t)va, PTE_P|PTE_U|PTE_COW)) < 0) panic("sys_page_map failed: %e\n", r); // parent return sys_page_map(0, (void *)(uint64_t)va, 0, (void *)(uint64_t)va, PTE_P|PTE_U|PTE_COW); } return sys_page_map(0, (void *)(uint64_t)va, envid, (void *)(uint64_t)va, PTE_P|PTE_U|perm); //panic("duppage not implemented"); //return 0; }
// // Map our virtual page pn (address pn*PGSIZE) into the target envid // at the same virtual address. If the page is writable or copy-on-write, // the new mapping must be created copy-on-write, and then our mapping must be // marked copy-on-write as well. (Exercise: Why do we need to mark ours // copy-on-write again if it was already copy-on-write at the beginning of // this function?) // // Returns: 0 on success, < 0 on error. // It is also OK to panic on error. // static int duppage(envid_t envid, unsigned pn) { // LAB 4: Your code here. int r; pte_t pte = vpt[pn]; void * addr = (void *)((uint64_t)pn * PGSIZE); int perm = pte & PTE_USER; if(perm&PTE_SHARE) { if((r = sys_page_map(0, addr, envid, addr, perm)) < 0) //map shared pages panic("Couldn't map shared page %e",r); return 0; } else if((perm&PTE_W) || (perm&PTE_COW)) { //if write or COW pages perm &= ~PTE_W; perm |= PTE_COW; if((r = sys_page_map(0, addr, envid, addr, perm)) < 0) // map once in child panic("Couldn't map page COW child %e",r); if((r = sys_page_map(0, addr, 0, addr, perm)) < 0) //map once in yourself panic("Couldn't map page COW parent %e",r); return 0; } else { //cprintf("perm: %x\n",perm); if((r = sys_page_map(0, addr, envid, addr, PTE_P|PTE_U)) < 0) //map readonly pages panic("Couldn't map readonly page %e",r); return 0; } }
// // Map our virtual page pn (address pn*PGSIZE) into the target envid // at the same virtual address. If the page is writable or copy-on-write, // the new mapping must be created copy-on-write, and then our mapping must be // marked copy-on-write as well. (Exercise: Why do we need to mark ours // copy-on-write again if it was already copy-on-write at the beginning of // this function?) // // Returns: 0 on success, < 0 on error. // It is also OK to panic on error. // static int duppage(envid_t envid, unsigned pn) { int r; // LAB 4: Your code here. pde_t *pde; pte_t *pte; void *addr=(void*)(pn*PGSIZE); pde =(pde_t*) &vpd[VPD(addr)]; if(*pde&PTE_P) { pte=(pte_t*)&vpt[VPN(addr)]; } else panic("page table for pn page is not exist"); if((*pte&PTE_W)||(*pte&PTE_COW)) { if((r=sys_page_map(0,addr,envid,addr,PTE_COW|PTE_U))<0) return r; if((r=sys_page_map(0,addr,0,addr,PTE_COW|PTE_U))<0)//映射的时候注意env的id return r; } else{ if((r=sys_page_map(0,addr,envid,addr,PTE_U|PTE_P))<0) return r; } //panic("duppage not implemented"); return 0; }
// // Map our virtual page pn (address pn*PGSIZE) into the target envid // at the same virtual address. If the page is writable or copy-on-write, // the new mapping must be created copy-on-write, and then our mapping must be // marked copy-on-write as well. (Exercise: Why do we need to mark ours // copy-on-write again if it was already copy-on-write at the beginning of // this function?) // // Returns: 0 on success, < 0 on error. // It is also OK to panic on error. // static int duppage(envid_t envid, unsigned pn) { int perm = uvpt[pn]&PTE_SYSCALL; // If the old permissions were write and the page isn't being shared, // the new permissions should include COW and not include PTE_W. if((uvpt[pn]&(PTE_W|PTE_COW)) != 0 && (uvpt[pn]&PTE_SHARE) == 0) { perm |= PTE_COW; perm &= ~PTE_W; } // Now map the page to the new environment if(sys_page_map(0, (void *)(pn*PGSIZE), envid, (void *)(pn*PGSIZE), perm) != 0) panic("duppage: unable to map page 0x%x to child", pn*PGSIZE); // The old mapping must be converted to COW as well. This must be done // after the child mapping because of mapping the user stack. A fault // happens immediately, switching that mapping to a writable page. That // mapping is then copied over to the child incorrectly. if(perm&PTE_COW) { if(sys_page_map(0, (void *)(pn*PGSIZE), 0, (void *)(pn*PGSIZE), perm) != 0) panic("duppage: unable to set permissions for own page"); } return 0; }
// // Map our virtual page pn (address pn*PGSIZE) into the target envid // at the same virtual address. If the page is writable or copy-on-write, // the new mapping must be created copy-on-write, and then our mapping must be // marked copy-on-write as well. (Exercise: Why mark ours copy-on-write again // if it was already copy-on-write?) // // Returns: 0 on success, < 0 on error. // It is also OK to panic on error. // static int duppage(envid_t envid, unsigned pn) { int r; void *addr; pte_t pte; envid_t our_envid; // LAB 4: Your code here. our_envid = sys_getenvid(); addr = (void *) (pn*PGSIZE); pte = (pte_t)vpt[VPN(addr)]; // + LAB 6: if ( pte & PTE_SHARE ){ if ( (r = sys_page_map(our_envid, addr, envid, addr, pte & PTE_USER)) < 0) panic("duppage() - failed to map shared page!"); return 0; } // - LAB 6 if ( (pte & PTE_W) || (pte & PTE_COW) ){ if ( (r = sys_page_map(our_envid, addr, envid, addr, PTE_COW | PTE_U | PTE_P)) < 0) panic("duppage() - failed to map writable or COW page!"); if ( (r = sys_page_map(our_envid, addr, our_envid, addr, PTE_COW | PTE_U | PTE_P)) < 0) panic("duppage() - failed to remap!"); }else{ if ( (r = sys_page_map(our_envid, addr, envid, addr, PTE_U | PTE_P)) < 0) panic("duppage() - failed to map read-only page!"); } return 0; }
// // Map our virtual page pn (address pn*PGSIZE) into the target envid // at the same virtual address. If the page is writable or copy-on-write, // the new mapping must be created copy-on-write, and then our mapping must be // marked copy-on-write as well. (Exercise: Why do we need to mark ours // copy-on-write again if it was already copy-on-write at the beginning of // this function?) // // Returns: 0 on success, < 0 on error. // It is also OK to panic on error. // static int duppage(envid_t envid, unsigned pn) { int r; // LAB 4: Your code here. if (debug) cprintf("\n duppage: 1\n"); pte_t pte = vpt[pn]; int perm = pte & PTE_USER; void *va = (void*) (pn*PGSIZE); if (debug) cprintf("\n duppage: 2\n"); if ((perm & PTE_P) != PTE_P) panic ("user panic: lib\fork.c: duppage(): page to be duplicated is not PTE_P\n"); if ((perm & PTE_U) != PTE_U) panic ("user panic: lib\fork.c: duppage(): page to be duplicated is not PTE_U\n"); if (debug) cprintf("\n duppage: 3\n"); // LAB 7: Include PTE_SHARE convention if ( !(perm & PTE_SHARE) && (((perm & PTE_W) == PTE_W) || ((perm & PTE_COW) == PTE_COW))) { if (debug) cprintf("\n duppage: 4\n"); // perm = PTE_P | PTE_U | PTE_COW; // buggy permissions, removed in LAB 7 perm &= ~PTE_W; // remove write from perm perm |= PTE_COW; // add copy-on-write if (debug) cprintf("\n duppage: 10\n"); if ((r = sys_page_map(0, va, envid, va, perm)) < 0) return r; if (debug) cprintf("\n duppage: 11\n"); if ((r = sys_page_map(0, va, 0, va, perm)) < 0) return r; if (debug) cprintf("\n duppage: 5\n"); } // LAB 7: Include PTE_SHARE convention else { if (debug) cprintf("\n duppage: 6\n"); if ((r = sys_page_map(0, va, envid, va, perm)) < 0) return r; if (debug) cprintf("\n duppage: 7\n"); } // cprintf("duppage() tried to copy a page which is neither PTE_W nor PTE_COW\n"); // panic("duppage not implemented"); return 0; }
static int duppage(envid_t envid, unsigned pn) { int r; int i=0; int dir_index = 0 ; uint32_t perm = 0 ; //check if vpt[i] is marked as Write or cow, if so //mark it as COW for child and parent //cprintf("i %d vpt[i] %x \n",i,&vpt[i]); //Make sure that this page is not the exception stack if ( pn * PGSIZE >= ( UXSTACKTOP - PGSIZE ) && pn * PGSIZE <= ( UXSTACKTOP ) ) { //cprintf("Found a Exception stack at %d\n",pn); return 0; } if( ( vpt[pn] & PTE_P ) == PTE_P ) { perm = vpt[pn] & 0xFFF; //Get last 12 bits if(( vpt[pn] & PTE_SHARED) == PTE_SHARED) { perm |= PTE_SHARED; //cprintf("Mapping the page for child %d\n",pn); //insert this page in child's PG DIR r = sys_page_map(0,(void*)( pn * PGSIZE), envid,(void*)(pn*PGSIZE),perm | PTE_P); if( r < 0 ) { panic("Unable to map a page %d\n",pn); } //cprintf("Mapping the page for parent %d\n",pn); //update the Parent PGDIR to reflect new permission r = sys_page_map(0,(void*)( pn * PGSIZE), 0,(void*)(pn*PGSIZE),perm | PTE_P); if( r < 0 ) { panic("Unable to map a page %d\n",pn); } } } // LAB 4: Your code here. //panic("duppage not implemented"); return 0; }
static int map_segment(envid_t child, uintptr_t va, size_t memsz, int fd, size_t filesz, off_t fileoffset, int perm) { int i, r; void *blk; //cprintf("map_segment %x+%x\n", va, memsz); if ((i = PGOFF(va))) { va -= i; memsz += i; filesz += i; fileoffset -= i; } for (i = 0; i < memsz; i += PGSIZE) { if (i >= filesz) { // allocate a blank page if ((r = sys_page_alloc(0, UTEMP, perm)) < 0) { return r; } memset(UTEMP, 0, PGSIZE); sys_page_map(0, UTEMP, child, (void *)(va+i), perm); return r; } else { // from file if (perm & PTE_W) { // must make a copy so it can be writable if ((r = sys_page_alloc(0, UTEMP, PTE_P|PTE_U|PTE_W)) < 0) return r; if ((r = seek(fd, fileoffset + i)) < 0) return r; if ((r = read(fd, UTEMP, MIN(PGSIZE, filesz-i))) < 0) return r; memset(UTEMP+MIN(PGSIZE, filesz-i), 0, PGSIZE-MIN(PGSIZE, filesz-i)); if ((r = sys_page_map(0, UTEMP, child, (void*) (va + i), perm)) < 0) panic("spawn: sys_page_map data: %e", r); sys_page_unmap(0, UTEMP); } else { // can map buffer cache read only if ((r = read_map(fd, fileoffset + i, &blk)) < 0) return r; if ((r = sys_page_map(0, blk, child, (void*) (va + i), perm)) < 0) panic("spawn: sys_page_map text: %e", r); } } } return 0; }
// // Custom page fault handler - if faulting page is copy-on-write, // map in our own private writable copy. // static void pgfault(struct UTrapframe *utf) { void *addr = (void *) utf->utf_fault_va; uint32_t err = utf->utf_err; int r; // Check that the faulting access was (1) a write, and (2) to a // copy-on-write page. If not, panic. // Hint: // Use the read-only page table mappings at uvpt // (see <inc/memlayout.h>). // LAB 4: Your code here. if ((err & FEC_WR) == 0 || (vpd[VPD(addr)] & PTE_P) == 0 || (vpt[VPN(addr)] & PTE_COW) == 0) panic ("pgfault: not a write or attempting to access a non-COW page"); // Allocate a new page, map it at a temporary location (PFTEMP), // copy the data from the old page to the new page, then move the new // page to the old page's address. // Hint: // You should make three system calls. // No need to explicitly delete the old page's mapping. // LAB 4: Your code here. if ((r = sys_page_alloc (0, (void *)PFTEMP, PTE_U|PTE_P|PTE_W)) < 0) panic ("pgfault: page allocation failed : %e", r); addr = ROUNDDOWN (addr, PGSIZE); memmove (PFTEMP, addr, PGSIZE); if ((r = sys_page_map (0, PFTEMP, 0, addr, PTE_U|PTE_P|PTE_W)) < 0) panic ("pgfault: page mapping failed : %e", r); //panic("pgfault not implemented"); }
// Copy the mappings for shared pages into the child address space. static int copy_shared_pages(envid_t child) { // LAB 7: Your code here. int r; int pdeno, pteno; uint32_t pn = 0; for (pdeno = 0; pdeno < VPD(UTOP); pdeno++) { if (vpd[pdeno] == 0) { // skip empty PDEs pn += NPTENTRIES; continue; } for (pteno = 0; pteno < NPTENTRIES; pteno++,pn++) { if (vpt[pn] == 0) // skip empty PTEs continue; int perm = vpt[pn] & PTE_USER; if (perm & PTE_SHARE) { void *addr = (void *)(pn << PGSHIFT); r = sys_page_map(0, addr, child, addr, perm); if (r) return r; } } } return 0; }
// Make sure a particular disk block is loaded into memory. // Returns 0 on success, or a negative error code on error. // // If blk != 0, set *blk to the address of the block in memory. // // Hint: Use diskaddr, map_block, and ide_read. static int read_block(uint32_t blockno, char **blk) { int r; char *addr; if (super && blockno >= super->s_nblocks) panic("reading non-existent block %08x\n", blockno); if (bitmap && block_is_free(blockno)) panic("reading free block %08x\n", blockno); // LAB 5: Your code here. r = map_block(blockno); if (r) return r; addr = diskaddr(blockno); r = ide_read(blockno * BLKSECTS, addr, BLKSECTS); if (r) return r; if (blk) *blk = addr; return sys_page_map(0, addr, 0, addr, vpt[VPN(addr)] & PTE_USER); }
// Flush the contents of the block containing VA out to disk if // necessary, then clear the PTE_D bit using sys_page_map. // If the block is not in the block cache or is not dirty, does // nothing. // Hint: Use va_is_mapped, va_is_dirty, and ide_write. // Hint: Use the PTE_USER constant when calling sys_page_map. // Hint: Don't forget to round addr down. void flush_block(void *addr) { uint32_t blockno = ((uint32_t)addr - DISKMAP) / BLKSIZE; uint32_t secno, r; if (addr < (void*)DISKMAP || addr >= (void*)(DISKMAP + DISKSIZE)) panic("flush_block of bad va %08x", addr); // LAB 5: Your code here. addr = ROUNDDOWN(addr,PGSIZE); secno = blockno*BLKSECTS; if(va_is_mapped(addr) && va_is_dirty(addr)) { r = ide_write(secno, addr, BLKSECTS); if(r) panic("IDE write failed: %e", r); //cprintf("\nClearing PTE_D : 0x%08x\n", vpt[VPN(addr)] & (~PTE_D)); r = sys_page_map(0,addr,0,addr, ((vpt[VPN(addr)] | PTE_USER) & (~PTE_D))); // & PTE_USER if(r) cprintf("\nPage Mapping failed %e\n", r); } //panic("flush_block not implemented"); }
// Copy the mappings for shared pages into the child address space. static int copy_shared_pages(envid_t child) { // LAB 7: Your code here. uint64_t vaddr; int r; for(vaddr=0; vaddr<UTOP; vaddr += PGSIZE) { // Copying from lib/fork.c bruh. if( // Gotta use the virtual types apparently. (vpml4e[VPML4E(vaddr)] & PTE_P) // Mota mota level is present. && ((vpde[VPDPE(vaddr)] & PTE_U) && (vpde[VPDPE(vaddr)] & PTE_P)) // Slighlt less mota level is also present. && ((vpd[VPD(vaddr)] & PTE_U) && (vpd[VPD(vaddr)] & PTE_P)) && ((vpt[VPN(vaddr)] & PTE_U) && (vpt[VPN(vaddr)] & PTE_P)) ) if(vpt[VPN(vaddr)]&PTE_SHARE) { r = sys_page_map(0, (void*)vaddr, child, (void*)vaddr, vpt[VPN(vaddr)] & PTE_SYSCALL); if(r<0) { cprintf("WARN: Your environments are now throughly done for man.\n"); return -1; } } } return 0; }
// // User-level fork with copy-on-write. // Set up our page fault handler appropriately. // Create a child. // Copy our address space and page fault handler setup to the child. // Then mark the child as runnable and return. // // Returns: child's envid to the parent, 0 to the child, < 0 on error. // It is also OK to panic on error. // // Hint: // Use vpd, vpt, and duppage. // Remember to fix "thisenv" in the child process. // Neither user exception stack should ever be marked copy-on-write, // so you must allocate a new page for the child's user exception stack. // envid_t fork(void) { // LAB 4: Your code here. envid_t envid; uint64_t addr; uint32_t err; extern unsigned char end[]; int r; set_pgfault_handler(pgfault); envid = sys_exofork(); if (envid < 0) panic("sys_exofork: %e", envid); if (envid == 0) { // We're the child. // The copied value of the global variable 'thisenv' // is no longer valid (it refers to the parent!). // Fix it and return 0. thisenv = &envs[ENVX(sys_getenvid())]; return 0; } //Allocate exception stack for the child if ((err = sys_page_alloc(envid, (void *) (UXSTACKTOP - PGSIZE), PTE_P|PTE_U|PTE_W)) < 0) panic("Error in sys_page_alloc: %e", err); // We're the parent. // Map our entire address space into the child. for (addr = UTEXT; addr < USTACKTOP-PGSIZE; addr += PGSIZE) { if((vpml4e[VPML4E(addr)] & PTE_P) && (vpde[VPDPE(addr)] & PTE_P) && (vpd[VPD(addr)] & PTE_P) && (vpt[VPN(addr)] & PTE_P)) { duppage(envid, VPN(addr)); } } //Allocate a new stack for the child and copy the contents of parent on to it. addr = USTACKTOP-PGSIZE; if ((r = sys_page_alloc(0, (void *)PFTEMP, PTE_P|PTE_U|PTE_W)) < 0) panic("sys_page_alloc failed: %e\n", r); memcpy(PFTEMP, (void *) ROUNDDOWN(addr, PGSIZE), PGSIZE); void *vaTemp = (void *) ROUNDDOWN(addr, PGSIZE); if ((r = sys_page_map(0, (void *)PFTEMP, envid, vaTemp, PTE_P|PTE_U|PTE_W)) < 0) panic("sys_page_map failed: %e\n", r); if ((r = sys_page_unmap(0, (void *)PFTEMP)) < 0) panic("sys_page_unmap failed: %e\n", r); //Set child's page fault handler if ((err = sys_env_set_pgfault_upcall(envid, _pgfault_upcall) < 0)) panic("Error in sys_env_set_pgfault_upcall: %e",err); //Set the child ready to run if ((err = sys_env_set_status(envid, ENV_RUNNABLE)) < 0) panic("sys_env_set_status: %e", err); return envid; panic("fork not implemented"); }
// Copy the mappings for shared pages into the child address space. static int copy_shared_pages(envid_t child) { int pn, perm; int retval = 0; // Step through each page below UTOP. If the page is PTE_SHARE, // then copy the mapping of that page to the child environment. for(pn = 0; pn < PGNUM(UTOP); pn++) { // Check to see if the page directory entry and page table // entry for this page exist, and if the page is marked // PTE_SHARE. if((uvpd[PDX(pn*PGSIZE)]&PTE_P) == 0 || (uvpt[pn]&PTE_P) == 0 || (uvpt[pn]&PTE_SHARE) == 0) continue; // Grab the permissions for the page perm = uvpt[pn]&PTE_SYSCALL; // Copy the current page number over if((retval = sys_page_map(0, (void *)(pn*PGSIZE), child, (void *)(pn*PGSIZE), perm)) != 0) break; } return 0; }
// Flush the contents of the block containing VA out to disk if // necessary, then clear the PTE_D bit using sys_page_map. // If the block is not in the block cache or is not dirty, does // nothing. // Hint: Use va_is_mapped, va_is_dirty, and ide_write. // Hint: Use the PTE_SYSCALL constant when calling sys_page_map. // Hint: Don't forget to round addr down. void flush_block(void *addr) { uint32_t blockno = ((uint32_t)addr - DISKMAP) / BLKSIZE; int r; if (addr < (void*)DISKMAP || addr >= (void*)(DISKMAP + DISKSIZE)) panic("flush_block of bad va %08x", addr); // LAB 5: Your code here. addr = (void *)ROUNDDOWN(addr, PGSIZE); if (!va_is_mapped(addr) || !va_is_dirty(addr)) return; if ((r = ide_write(BLKSECTS * blockno, addr, BLKSECTS)) < 0) { panic("flush_block: ide_write error %e\n", r); } if ((r = sys_page_map(0, addr, 0, addr, uvpt[PGNUM(addr)] & PTE_SYSCALL & ~PTE_D))) { panic("flush_block: sys_page_map error %e\n", r); } return; }
// Copy the current contents of the block out to disk. // Then clear the PTE_D bit using sys_page_map. // Hint: Use ide_write. // Hint: Use the PTE_USER constant when calling sys_page_map. void write_block(uint32_t blockno) { char *addr; if (!block_is_mapped(blockno)) panic("write unmapped block %08x", blockno); // Write the disk block and clear PTE_D. // LAB 5: Your code here. // We will use the VM hardware to keep track of whether a // disk block has been modified since it was last read from // or written to disk. To see whether a block needs writing, // we can just look to see if the PTE_D "dirty" bit is set // in the vpt entry. addr = diskaddr(blockno); if(!va_is_dirty(addr)) return; int error; int secno = blockno*BLKSECTS; error = ide_write(secno, addr, BLKSECTS); if(error<0) panic("write block error on writing"); int env_id = sys_getenvid(); error = sys_page_map(env_id, addr, env_id, addr, ((PTE_U|PTE_P|PTE_W) & ~PTE_D)); if(error<0) panic("write block error on clearing PTE_D"); // panic("write_block not implemented"); }
// Dispatches to the correct kernel function, passing the arguments. int64_t syscall(uint64_t syscallno, uint64_t a1, uint64_t a2, uint64_t a3, uint64_t a4, uint64_t a5) { // Call the function corresponding to the 'syscallno' parameter. // Return any appropriate return value. // LAB 3: Your code here. // panic("syscall not implemented"); switch (syscallno) { case SYS_cputs: sys_cputs((const char *)a1, (size_t)a2); return 0; case SYS_cgetc: return sys_cgetc(); case SYS_getenvid: return sys_getenvid(); case SYS_env_destroy: return sys_env_destroy(a1); case SYS_yield: sys_yield(); return 0; case SYS_page_alloc: return sys_page_alloc((envid_t)a1, (void *)a2,(int)a3); case SYS_page_map: return sys_page_map((envid_t)a1, (void *)a2, (envid_t)a3, (void *)a4, (int)a5); case SYS_page_unmap: return sys_page_unmap((envid_t)a1, (void *)a2); case SYS_exofork: return sys_exofork(); case SYS_env_set_status: return sys_env_set_status((envid_t)a1, a2); case SYS_env_set_pgfault_upcall: return sys_env_set_pgfault_upcall((envid_t)a1,(void *)a2); case SYS_ipc_try_send: return sys_ipc_try_send((envid_t)a1, (uint32_t)a2, (void *)a3, a4); case SYS_ipc_recv: return sys_ipc_recv((void *)a1); case SYS_env_set_trapframe: return sys_env_set_trapframe((envid_t)a1, (struct Trapframe *)a2); case SYS_time_msec: return sys_time_msec(); case SYS_packet_transmit: return sys_packet_transmit((char*)a1,(size_t)a2); case SYS_packet_receive: return sys_packet_receive((char *)a1); //lab 7 code from here case SYS_insmod: return sys_insmod((char *)a1, (char *)a2,(char *)a3); case SYS_rmmod: return sys_rmmod((char *)a1); case SYS_lsmod: return sys_lsmod(); case SYS_depmod: return sys_depmod((char *)a1); //lab7 code ends here default: return -E_NO_SYS; } }
// Try to send 'value' to the target env 'envid'. // If srcva < UTOP, then also send page currently mapped at 'srcva', // so that receiver gets a duplicate mapping of the same page. // // The send fails with a return value of -E_IPC_NOT_RECV if the // target is not blocked, waiting for an IPC. // // The send also can fail for the other reasons listed below. // // Otherwise, the send succeeds, and the target's ipc fields are // updated as follows: // env_ipc_recving is set to 0 to block future sends; // env_ipc_from is set to the sending envid; // env_ipc_value is set to the 'value' parameter; // env_ipc_perm is set to 'perm' if a page was transferred, 0 otherwise. // The target environment is marked runnable again, returning 0 // from the paused sys_ipc_recv system call. (Hint: does the // sys_ipc_recv function ever actually return?) // // If the sender wants to send a page but the receiver isn't asking for one, // then no page mapping is transferred, but no error occurs. // The ipc only happens when no errors occur. // // Returns 0 on success, < 0 on error. // Errors are: // -E_BAD_ENV if environment envid doesn't currently exist. // (No need to check permissions.) // -E_IPC_NOT_RECV if envid is not currently blocked in sys_ipc_recv, // or another environment managed to send first. // -E_INVAL if srcva < UTOP but srcva is not page-aligned. // -E_INVAL if srcva < UTOP and perm is inappropriate // (see sys_page_alloc). // -E_INVAL if srcva < UTOP but srcva is not mapped in the caller's // address space. // -E_INVAL if (perm & PTE_W), but srcva is read-only in the // current environment's address space. // -E_NO_MEM if there's not enough memory to map srcva in envid's // address space. static int sys_ipc_try_send(envid_t envid, uint32_t value, void *srcva, unsigned perm) { // LAB 4: Your code here. // My code : alaud //cprintf("Trying to send to %x, for %x, nice %x\n", envid,curenv->env_id, curenv->env_nice); int status = 0; struct Env *target_env; if((status = envid2env(envid, &target_env, 0)) < 0) { return status; } if(!target_env -> env_ipc_recving) { return -E_IPC_NOT_RECV; } //cprintf("Still Trying to send to %x, for %x, nice %x\n", envid,curenv->env_id, curenv->env_nice); target_env -> env_ipc_perm = 0; if((uint32_t)srcva < UTOP && (uint32_t)target_env -> env_ipc_dstva < UTOP) { if((status = sys_page_map(curenv -> env_id, srcva, envid, target_env -> env_ipc_dstva, perm)) < 0) { return status; } target_env -> env_ipc_perm = perm; } //cprintf("Still Still Trying to send to %x, for %x, nice %x\n", envid,curenv->env_id, curenv->env_nice); target_env -> env_ipc_value = value; target_env -> env_ipc_recving = 0; target_env -> env_ipc_from = curenv -> env_id; target_env -> env_status = ENV_RUNNABLE; return 0; //panic("sys_ipc_try_send not implemented"); }
// Try to send 'value' to the target env 'envid'. // If va != 0, then also send page currently mapped at 'va', // so that receiver gets a duplicate mapping of the same page. // // The send fails with a return value of -E_IPC_NOT_RECV if the // target has not requested IPC with sys_ipc_recv. // // Otherwise, the send succeeds, and the target's ipc fields are // updated as follows: // env_ipc_recving is set to 0 to block future sends; // env_ipc_from is set to the sending envid; // env_ipc_value is set to the 'value' parameter; // env_ipc_perm is set to 'perm' if a page was transferred, 0 otherwise. // The target environment is marked runnable again, returning 0 // from the paused ipc_recv system call. // // If the sender sends a page but the receiver isn't asking for one, // then no page mapping is transferred, but no error occurs. // The ipc doesn't happen unless no errors occur. // // Returns 0 on success where no page mapping occurs, // 1 on success where a page mapping occurs, and < 0 on error. // Errors are: // -E_BAD_ENV if environment envid doesn't currently exist. // (No need to check permissions.) // -E_IPC_NOT_RECV if envid is not currently blocked in sys_ipc_recv, // or another environment managed to send first. // -E_INVAL if srcva < UTOP but srcva is not page-aligned. // -E_INVAL if srcva < UTOP and perm is inappropriate // (see sys_page_alloc). // -E_INVAL if srcva < UTOP but srcva is not mapped in the caller's // address space. // -E_NO_MEM if there's not enough memory to map srcva in envid's // address space. static int sys_ipc_try_send(envid_t envid, uint32_t value, void *srcva, unsigned perm) { // LAB 4: Your code here. int r = 0; struct Env *dstEnv = NULL; if ((r=envid2env(envid, &dstEnv, 0)) < 0) { return r; } if (!dstEnv->env_ipc_recving) { return -E_IPC_NOT_RECV; } dstEnv->env_ipc_recving = 0; dstEnv->env_ipc_from = curenv->env_id; dstEnv->env_ipc_value = value; dstEnv->env_status = ENV_RUNNABLE; if ((uint32_t)srcva < UTOP && (uint32_t)(dstEnv->env_ipc_dstva) < UTOP) { if ((r = sys_page_map(0, srcva, envid, dstEnv->env_ipc_dstva, perm)) < 0) { return r; } dstEnv->env_ipc_perm = perm; } else { dstEnv->env_ipc_perm = 0; } return 0; //panic("sys_ipc_try_send not implemented"); }
// Copy the mappings for shared pages into the child address space. static int copy_shared_pages(envid_t child) { // LAB 7: Your code here. int r; uint32_t i, j, pn; // Copy shared address space to child for (i = PDX(UTEXT); i < PDX(UXSTACKTOP); i++) { if (vpd[i] & PTE_P) { // If page table present for (j = 0; j < NPTENTRIES; j++) { pn = PGNUM(PGADDR(i, j, 0)); if (pn == PGNUM(UXSTACKTOP - PGSIZE)) { break; // Don't map when reach uxstack } if ((vpt[pn] & PTE_P) && (vpt[pn] & PTE_SHARE)) { if ((r = sys_page_map(0, (void *) (pn * PGSIZE), child, (void *) (pn * PGSIZE), vpt[pn] & PTE_SYSCALL)) < 0) { return r; } } } } } return 0; }
// // Custom page fault handler - if faulting page is copy-on-write, // map in our own private writable copy. // static void pgfault(struct UTrapframe *utf) { void *addr = (void *) utf->utf_fault_va; uint32_t err = utf->utf_err; int r; // Check that the faulting access was (1) a write, and (2) to a // copy-on-write page. If not, panic. // Hint: // Use the read-only page table mappings at uvpt // (see <inc/memlayout.h>). if (((err & FEC_WR) == 0) || ((uvpd[PDX(addr)] & PTE_P)==0) || ((uvpt[PGNUM(addr)] & PTE_COW)==0) ) panic("Page fault in lib/fork.c!\n"); // LAB 4: Your code here. // Allocate a new page, map it at a temporary location (PFTEMP), // copy the data from the old page to the new page, then move the new // page to the old page's address. // Hint: // You should make three system calls. if ((r = sys_page_alloc(0, (void*)PFTEMP, PTE_U|PTE_P|PTE_W)) <0) panic("alloc page error in lib/fork.c\n"); addr = ROUNDDOWN(addr, PGSIZE); memcpy(PFTEMP, addr, PGSIZE); if ((r = sys_page_map(0, PFTEMP, 0, addr, PTE_U|PTE_P|PTE_W)) <0) panic("page map error in lib/fork.c\n"); if ((r = sys_page_unmap(0, PFTEMP)) <0) panic("page unmap error in lib/fork.c\n"); // LAB 4: Your code here. }
// Flush the contents of the block containing VA out to disk if // necessary, then clear the PTE_D bit using sys_page_map. // If the block is not in the block cache or is not dirty, does // nothing. // Hint: Use va_is_mapped, va_is_dirty, and ide_write. // Hint: Use the PTE_SYSCALL constant when calling sys_page_map. // Hint: Don't forget to round addr down. void flush_block(void *addr) { uint64_t blockno = ((uint64_t)addr - DISKMAP) / BLKSIZE; int r; if (addr < (void*)DISKMAP || addr >= (void*)(DISKMAP + DISKSIZE)) panic("flush_block of bad va %08x", addr); // LAB 5: Your code here. //panic("flush_block not implemented"); if(va_is_mapped(addr) == false || va_is_dirty(addr) == false) { return; } addr = ROUNDDOWN(addr, PGSIZE); #ifdef VMM_GUEST if(0 != host_write((uint32_t) (blockno * BLKSECTS), (void*)addr, BLKSECTS)) { panic("ide read failed in Page Fault Handling"); } #else if(0 != ide_write((uint32_t) (blockno * BLKSECTS), (void*)addr, BLKSECTS)) { panic("ide write failed in Flush Block"); } #endif if ((r = sys_page_map(0, addr, 0, addr, PTE_SYSCALL)) < 0) { panic("in flush_block, sys_page_map: %e", r); } }
// Copy the mappings for shared pages into the child address space. static int copy_shared_pages(envid_t child) { // LAB 5: Your code here. int i,j,ret; uintptr_t addr; envid_t curr_envid = sys_getenvid(); for(i=0;i<PDX(UTOP);i++) { if(uvpd[i] & PTE_P && i != PDX(UVPT)) { addr = i << PDXSHIFT; for(j=0;j<NPTENTRIES;j++) { addr = (i<<PDXSHIFT)+(j<<PGSHIFT); if((uvpt[addr>>PGSHIFT] & PTE_P) && (uvpt[addr>>PGSHIFT] & PTE_SHARE)) { ret = sys_page_map(curr_envid, (void *)addr, child,(void *)addr,PTE_AVAIL|PTE_P|PTE_U|PTE_W); if(ret) panic("sys_page_map: %e", ret); //cprintf("addr %x is shared\n",addr); } } } }