// // Check that an environment is allowed to access the range of memory // [va, va+len) with permissions 'perm | PTE_P'. // Normally 'perm' will contain PTE_U at least, but this is not required. // 'va' and 'len' need not be page-aligned; you must test every page that // contains any of that range. You will test either 'len/PGSIZE', // 'len/PGSIZE + 1', or 'len/PGSIZE + 2' pages. // // A user program can access a virtual address if (1) the address is below // ULIM, and (2) the page table gives it permission. These are exactly // the tests you should implement here. // // If there is an error, set the 'user_mem_check_addr' variable to the first // erroneous virtual address. // // Returns 0 if the user program can access this range of addresses, // and -E_FAULT otherwise. // int user_mem_check(struct Env *env, const void *va, size_t len, int perm) { // LAB 3: Your code here. pte_t* ppte = NULL; void* addr = 0; void* end = (void*)ROUNDUP((uint64_t)va + len, PGSIZE); if(((uint64_t)va + len) >= ULIM){ user_mem_check_addr = (uint64_t)va; return -E_FAULT; } for(addr = (void*)va; addr < end; addr += PGSIZE){ ppte = pml4e_walk(env->env_pml4e, addr, 0); if((ppte == NULL) || (((*ppte) & (perm | PTE_P)) != (perm | PTE_P))){ cprintf("*ppte = %0x, (perm|PTE_P) = %0x\n", *ppte, perm|PTE_P); user_mem_check_addr = (addr == va)? (uint64_t)va: ROUNDDOWN((uint64_t)addr, PGSIZE); return -E_FAULT; } } return 0; }
// Find the final ept entry for a given guest physical address, // creating any missing intermediate extended page tables if create is non-zero. // // If epte_out is non-NULL, store the found epte_t* at this address. // // Return 0 on success. // // Error values: // -E_INVAL if eptrt is NULL // -E_NO_ENT if create == 0 and the intermediate page table entries are missing. // -E_NO_MEM if allocation of intermediate page table entries fails. // // Hint: Set the permissions of intermediate ept entries to __EPTE_FULL. // The hardware ANDs the permissions at each level, so removing a permission // bit at the last level entry is sufficient (and the bookkeeping is much simpler). static int ept_lookup_gpa(epte_t* eptrt, void *gpa, int create, epte_t **epte_out) { epte_t * pgTblIndexPtr = NULL; struct PageInfo * page = NULL; /* Your code here */ if(eptrt == NULL){ cprintf("ept_lookup_gpa : 1\n"); return -E_INVAL; } pgTblIndexPtr = pml4e_walk(eptrt, gpa, create); if(pgTblIndexPtr == NULL){ cprintf("ept_lookup_gpa : 2\n"); if(create == 0) return -E_NO_ENT; else return -E_NO_MEM; } page = pa2page(*pgTblIndexPtr); if(epte_out) { *epte_out = pgTblIndexPtr; } return 0; //panic("ept_lookup_gpa not implemented\n"); //return 0; }
// // Map the physical page 'pp' at virtual address 'va'. // The permissions (the low 12 bits) of the page table entry // should be set to 'perm|PTE_P'. // // Requirements // - If there is already a page mapped at 'va', it should be page_remove()d. // - If necessary, on demand, a page table should be allocated and inserted // into 'pml4e through pdpe through pgdir'. // - pp->pp_ref should be incremented if the insertion succeeds. // - The TLB must be invalidated if a page was formerly present at 'va'. // // Corner-case hint: Make sure to consider what happens when the same // pp is re-inserted at the same virtual address in the same pgdir. // However, try not to distinguish this case in your code, as this // frequently leads to subtle bugs; there's an elegant way to handle // everything in one code path. // // RETURNS: // 0 on success // -E_NO_MEM, if page table couldn't be allocated // // Hint: The TA solution is implemented using pml4e_walk, page_remove, // and page2pa. // int page_insert(pml4e_t *pml4e, struct Page *pp, void *va, int perm) { // Fill this function in //cprintf("called page_insert, va = %0x, page pa = %0x\n", va, page2pa(pp)); //pdpe_t * pdpe = (pdpe_t *)(KADDR((PTE_ADDR(pml4e[PML4(va)])))); pte_t *pte = pml4e_walk(pml4e, va, 1); //cprintf("pml4e_walk(), result = %0x, value there = %0x\n", pte, *pte); if (pte == 0) { return -E_NO_MEM; } else if(PTE_ADDR(*pte) != page2pa(pp)){ if((*pte) != 0){ page_remove(pml4e, va); } *pte = page2pa(pp); pp->pp_ref++; } else { *pte &= ~0x00000FFF; } // set the permission *pte |= (perm | PTE_P); return 0; }
// // Map [va, va+size) of virtual address space to physical [pa, pa+size) // in the page table rooted at pml4e. Size is a multiple of PGSIZE. // Use permission bits perm|PTE_P for the entries. // // This function is only intended to set up the ``static'' mappings // above UTOP. As such, it should *not* change the pp_ref field on the // mapped pages. // // Hint: the TA solution uses pml4e_walk void boot_map_segment(pml4e_t *pml4e, uintptr_t la, size_t size, physaddr_t pa, int perm) { // Fill this function in pte_t* ppte = NULL; uintptr_t virtual_addr = 0; physaddr_t physical_addr = 0; for(virtual_addr = la, physical_addr = pa; virtual_addr - la < size; virtual_addr += PGSIZE, physical_addr += PGSIZE){ ppte = pml4e_walk(pml4e, (void*)virtual_addr, 1); *ppte = physical_addr | perm | PTE_P; } }
// // Return the page mapped at virtual address 'va'. // If pte_store is not zero, then we store in it the address // of the pte for this page. This is used by page_remove and // can be used to verify page permissions for syscall arguments, // but should not be used by most callers. // // Return NULL if there is no page mapped at va. // // Hint: the TA solution uses pml4e_walk and pa2page. // struct Page * page_lookup(pml4e_t *pml4e, void *va, pte_t **pte_store) { // Fill this function in pte_t* pte = pml4e_walk(pml4e, va, 0); struct Page* pp = NULL; if(pte != 0){ pp = pa2page(*pte); if(pte_store != 0){ *pte_store = pte; } } return pp; }
int ept_page_insert(epte_t* eptrt, struct PageInfo* pp, void* gpa, int perm) { /* Your code here */ // Fill this function ineptrteptrt epte_t * pgTblIndexPtr; if(eptrt == NULL || pp == NULL) { cprintf("returning here 1"); return -E_INVAL; } pgTblIndexPtr = pml4e_walk(eptrt, gpa, 1); //cprintf("pgTblIndexPtr %p %x\n", pgTblIndexPtr, pgTblIndexPtr); if(!pgTblIndexPtr) { cprintf("returning here 2"); return -E_NO_MEM; } //set present bit perm = perm|__EPTE_READ|__EPTE_IPAT; bool present = *pgTblIndexPtr & __EPTE_READ; if(present &&pa2page(PTE_ADDR(*pgTblIndexPtr)) == pp) { *pgTblIndexPtr = (page2pa(pp) | perm); return 0; } //check and remove previous existing page else if(present) { page_remove(eptrt, gpa); } //add new page *pgTblIndexPtr = (page2pa(pp) | perm); //increment reference pp->pp_ref += 1; return 0; //panic("ept_page_insert not implemented\n"); //return 0; }
// check page_insert, page_remove, &c static void page_check(void) { struct Page *pp0, *pp1, *pp2,*pp3,*pp4,*pp5; struct Page * fl; pte_t *ptep, *ptep1; pdpe_t *pdpe; pde_t *pde; void *va; int i; uintptr_t mm1, mm2; pp0 = pp1 = pp2 = pp3 = pp4 = pp5 =0; assert(pp0 = page_alloc(0)); assert(pp1 = page_alloc(0)); assert(pp2 = page_alloc(0)); assert(pp3 = page_alloc(0)); assert(pp4 = page_alloc(0)); assert(pp5 = page_alloc(0)); assert(pp0); assert(pp1 && pp1 != pp0); assert(pp2 && pp2 != pp1 && pp2 != pp0); assert(pp3 && pp3 != pp2 && pp3 != pp1 && pp3 != pp0); assert(pp4 && pp4 != pp3 && pp4 != pp2 && pp4 != pp1 && pp4 != pp0); assert(pp5 && pp5 != pp4 && pp5 != pp3 && pp5 != pp2 && pp5 != pp1 && pp5 != pp0); // temporarily steal the rest of the free pages fl = page_free_list; page_free_list = NULL; // should be no free memory assert(!page_alloc(0)); // there is no page allocated at address 0 assert(page_lookup(boot_pml4e, (void *) 0x0, &ptep) == NULL); // there is no free memory, so we can't allocate a page table assert(page_insert(boot_pml4e, pp1, 0x0, 0) < 0); // free pp0 and try again: pp0 should be used for page table page_free(pp0); assert(page_insert(boot_pml4e, pp1, 0x0, 0) < 0); page_free(pp2); page_free(pp3); //cprintf("pp1 ref count = %d\n",pp1->pp_ref); //cprintf("pp0 ref count = %d\n",pp0->pp_ref); //cprintf("pp2 ref count = %d\n",pp2->pp_ref); assert(page_insert(boot_pml4e, pp1, 0x0, 0) == 0); assert((PTE_ADDR(boot_pml4e[0]) == page2pa(pp0) || PTE_ADDR(boot_pml4e[0]) == page2pa(pp2) || PTE_ADDR(boot_pml4e[0]) == page2pa(pp3) )); assert(check_va2pa(boot_pml4e, 0x0) == page2pa(pp1)); assert(pp1->pp_ref == 1); assert(pp0->pp_ref == 1); assert(pp2->pp_ref == 1); //should be able to map pp3 at PGSIZE because pp0 is already allocated for page table assert(page_insert(boot_pml4e, pp3, (void*) PGSIZE, 0) == 0); assert(check_va2pa(boot_pml4e, PGSIZE) == page2pa(pp3)); assert(pp3->pp_ref == 2); // should be no free memory assert(!page_alloc(0)); // should be able to map pp3 at PGSIZE because it's already there assert(page_insert(boot_pml4e, pp3, (void*) PGSIZE, 0) == 0); assert(check_va2pa(boot_pml4e, PGSIZE) == page2pa(pp3)); assert(pp3->pp_ref == 2); // pp3 should NOT be on the free list // could happen in ref counts are handled sloppily in page_insert assert(!page_alloc(0)); // check that pgdir_walk returns a pointer to the pte pdpe = KADDR(PTE_ADDR(boot_pml4e[PML4(PGSIZE)])); pde = KADDR(PTE_ADDR(pdpe[PDPE(PGSIZE)])); ptep = KADDR(PTE_ADDR(pde[PDX(PGSIZE)])); assert(pml4e_walk(boot_pml4e, (void*)PGSIZE, 0) == ptep+PTX(PGSIZE)); // should be able to change permissions too. assert(page_insert(boot_pml4e, pp3, (void*) PGSIZE, PTE_U) == 0); assert(check_va2pa(boot_pml4e, PGSIZE) == page2pa(pp3)); assert(pp3->pp_ref == 2); assert(*pml4e_walk(boot_pml4e, (void*) PGSIZE, 0) & PTE_U); assert(boot_pml4e[0] & PTE_U); // should not be able to map at PTSIZE because need free page for page table assert(page_insert(boot_pml4e, pp0, (void*) PTSIZE, 0) < 0); // insert pp1 at PGSIZE (replacing pp3) assert(page_insert(boot_pml4e, pp1, (void*) PGSIZE, 0) == 0); assert(!(*pml4e_walk(boot_pml4e, (void*) PGSIZE, 0) & PTE_U)); // should have pp1 at both 0 and PGSIZE assert(check_va2pa(boot_pml4e, 0) == page2pa(pp1)); assert(check_va2pa(boot_pml4e, PGSIZE) == page2pa(pp1)); // ... and ref counts should reflect this assert(pp1->pp_ref == 2); assert(pp3->pp_ref == 1); // unmapping pp1 at 0 should keep pp1 at PGSIZE page_remove(boot_pml4e, 0x0); assert(check_va2pa(boot_pml4e, 0x0) == ~0); assert(check_va2pa(boot_pml4e, PGSIZE) == page2pa(pp1)); assert(pp1->pp_ref == 1); assert(pp3->pp_ref == 1); // Test re-inserting pp1 at PGSIZE. // Thanks to Varun Agrawal for suggesting this test case. assert(page_insert(boot_pml4e, pp1, (void*) PGSIZE, 0) == 0); assert(pp1->pp_ref); assert(pp1->pp_link == NULL); // unmapping pp1 at PGSIZE should free it page_remove(boot_pml4e, (void*) PGSIZE); assert(check_va2pa(boot_pml4e, 0x0) == ~0); assert(check_va2pa(boot_pml4e, PGSIZE) == ~0); assert(pp1->pp_ref == 0); assert(pp3->pp_ref == 1); #if 0 // should be able to page_insert to change a page // and see the new data immediately. memset(page2kva(pp1), 1, PGSIZE); memset(page2kva(pp2), 2, PGSIZE); page_insert(boot_pgdir, pp1, 0x0, 0); assert(pp1->pp_ref == 1); assert(*(int*)0 == 0x01010101); page_insert(boot_pgdir, pp2, 0x0, 0); assert(*(int*)0 == 0x02020202); assert(pp2->pp_ref == 1); assert(pp1->pp_ref == 0); page_remove(boot_pgdir, 0x0); assert(pp2->pp_ref == 0); #endif // forcibly take pp3 back assert(PTE_ADDR(boot_pml4e[0]) == page2pa(pp3)); boot_pml4e[0] = 0; assert(pp3->pp_ref == 1); page_decref(pp3); // check pointer arithmetic in pml4e_walk page_decref(pp0); page_decref(pp2); va = (void*)(PGSIZE * 100); ptep = pml4e_walk(boot_pml4e, va, 1); pdpe = KADDR(PTE_ADDR(boot_pml4e[PML4(va)])); pde = KADDR(PTE_ADDR(pdpe[PDPE(va)])); ptep1 = KADDR(PTE_ADDR(pde[PDX(va)])); assert(ptep == ptep1 + PTX(va)); // check that new page tables get cleared page_decref(pp4); memset(page2kva(pp4), 0xFF, PGSIZE); pml4e_walk(boot_pml4e, 0x0, 1); pdpe = KADDR(PTE_ADDR(boot_pml4e[0])); pde = KADDR(PTE_ADDR(pdpe[0])); ptep = KADDR(PTE_ADDR(pde[0])); for(i=0; i<NPTENTRIES; i++) assert((ptep[i] & PTE_P) == 0); boot_pml4e[0] = 0; // give free list back page_free_list = fl; // free the pages we took page_decref(pp0); page_decref(pp1); page_decref(pp2); // test mmio_map_region mm1 = (uintptr_t) mmio_map_region(0, 4097); mm2 = (uintptr_t) mmio_map_region(0, 4096); // check that they're in the right region assert(mm1 >= MMIOBASE && mm1 + 8096 < MMIOLIM); assert(mm2 >= MMIOBASE && mm2 + 8096 < MMIOLIM); // check that they're page-aligned assert(mm1 % PGSIZE == 0 && mm2 % PGSIZE == 0); // check that they don't overlap assert(mm1 + 8096 <= mm2); // check page mappingsasdfasd assert(check_va2pa(boot_pml4e, mm1) == 0); assert(check_va2pa(boot_pml4e, mm1+PGSIZE) == PGSIZE); assert(check_va2pa(boot_pml4e, mm2) == 0); assert(check_va2pa(boot_pml4e, mm2+PGSIZE) == ~0); // check permissions assert(*pml4e_walk(boot_pml4e, (void*) mm1, 0) & (PTE_W|PTE_PWT|PTE_PCD)); assert(!(*pml4e_walk(boot_pml4e, (void*) mm1, 0) & PTE_U)); // clear the mappings *pml4e_walk(boot_pml4e, (void*) mm1, 0) = 0; *pml4e_walk(boot_pml4e, (void*) mm1 + PGSIZE, 0) = 0; *pml4e_walk(boot_pml4e, (void*) mm2, 0) = 0; cprintf("check_page() succeeded!\n"); }