// // Frees env e and all memory it uses. // void env_free(struct Env *e) { pte_t *pt; uint32_t pdeno, pteno; physaddr_t pa; // If freeing the current environment, switch to boot_pgdir // before freeing the page directory, just in case the page // gets reused. if (e == curenv) lcr3(boot_cr3); // Note the environment's demise. // cprintf("[%08x] free env %08x\n", curenv ? curenv->env_id : 0, e->env_id); // Flush all mapped pages in the user portion of the address space static_assert(UTOP % PTSIZE == 0); for (pdeno = 0; pdeno < PDX(UTOP); pdeno++) { // only look at mapped page tables if (!(e->env_pgdir[pdeno] & PTE_P)) continue; // find the pa and va of the page table pa = PTE_ADDR(e->env_pgdir[pdeno]); pt = (pte_t*) KADDR(pa); // unmap all PTEs in this page table for (pteno = 0; pteno <= PTX(~0); pteno++) { if (pt[pteno] & PTE_P) page_remove(e->env_pgdir, PGADDR(pdeno, pteno, 0)); } // free the page table itself e->env_pgdir[pdeno] = 0; page_decref(pa2page(pa)); } // free the page directory pa = e->env_cr3; e->env_pgdir = 0; e->env_cr3 = 0; page_decref(pa2page(pa)); // return the environment to the free list e->env_status = ENV_FREE; LIST_INSERT_HEAD(&env_free_list, e, env_link); }
// check_pgfault - check correctness of pgfault handler static void check_pgfault(void) { #ifdef UCONFIG_CHECK_PGFAULT kprintf("starting check_pgfault()\n"); size_t nr_used_pages_store = nr_used_pages(); size_t slab_allocated_store = slab_allocated(); check_mm_struct = mm_create(); assert(check_mm_struct != NULL); struct mm_struct *mm = check_mm_struct; pgd_t *pgdir = mm->pgdir = init_pgdir_get(); assert(pgdir[PGX(TEST_PAGE)] == 0); struct vma_struct *vma = vma_create(TEST_PAGE, TEST_PAGE + PTSIZE, VM_WRITE); assert(vma != NULL); insert_vma_struct(mm, vma); uintptr_t addr = TEST_PAGE + 0x100; assert(find_vma(mm, addr) == vma); int i, sum = 0; for (i = 0; i < 100; i++) { *(char *)(addr + i) = i; sum += i; } for (i = 0; i < 100; i++) { sum -= *(char *)(addr + i); } assert(sum == 0); page_remove(pgdir, ROUNDDOWN(addr, PGSIZE)); #if PMXSHIFT != PUXSHIFT free_page(pa2page(PMD_ADDR(*get_pmd(pgdir, addr, 0)))); #endif #if PUXSHIFT != PGXSHIFT free_page(pa2page(PUD_ADDR(*get_pud(pgdir, addr, 0)))); #endif free_page(pa2page(PGD_ADDR(*get_pgd(pgdir, addr, 0)))); pgdir[PGX(TEST_PAGE)] = 0; mm->pgdir = NULL; mm_destroy(mm); check_mm_struct = NULL; assert(nr_used_pages_store == nr_used_pages()); assert(slab_allocated_store == slab_allocated()); kprintf("check_pgfault() succeeded!\n"); #endif }
/** * Check page table */ void check_pgdir(void) { assert(npage <= KMEMSIZE / PGSIZE); assert(boot_pgdir != NULL && (uint32_t) PGOFF(boot_pgdir) == 0); assert(get_page(boot_pgdir, TEST_PAGE, NULL) == NULL); struct Page *p1, *p2; p1 = alloc_page(); assert(page_insert(boot_pgdir, p1, TEST_PAGE, 0) == 0); pte_t *ptep, perm; assert((ptep = get_pte(boot_pgdir, TEST_PAGE, 0)) != NULL); assert(pa2page(*ptep) == p1); assert(page_ref(p1) == 1); ptep = &((pte_t *) KADDR(PTE_ADDR(boot_pgdir[PDX(TEST_PAGE)])))[1]; assert(get_pte(boot_pgdir, TEST_PAGE + PGSIZE, 0) == ptep); p2 = alloc_page(); ptep_unmap(&perm); ptep_set_u_read(&perm); ptep_set_u_write(&perm); assert(page_insert(boot_pgdir, p2, TEST_PAGE + PGSIZE, perm) == 0); assert((ptep = get_pte(boot_pgdir, TEST_PAGE + PGSIZE, 0)) != NULL); assert(ptep_u_read(ptep)); assert(ptep_u_write(ptep)); assert(ptep_u_read(&(boot_pgdir[PDX(TEST_PAGE)]))); assert(page_ref(p2) == 1); assert(page_insert(boot_pgdir, p1, TEST_PAGE + PGSIZE, 0) == 0); assert(page_ref(p1) == 2); assert(page_ref(p2) == 0); assert((ptep = get_pte(boot_pgdir, TEST_PAGE + PGSIZE, 0)) != NULL); assert(pa2page(*ptep) == p1); assert(!ptep_u_read(ptep)); page_remove(boot_pgdir, TEST_PAGE); assert(page_ref(p1) == 1); assert(page_ref(p2) == 0); page_remove(boot_pgdir, TEST_PAGE + PGSIZE); assert(page_ref(p1) == 0); assert(page_ref(p2) == 0); assert(page_ref(pa2page(boot_pgdir[PDX(TEST_PAGE)])) == 1); free_page(pa2page(boot_pgdir[PDX(TEST_PAGE)])); boot_pgdir[PDX(TEST_PAGE)] = 0; exit_range(boot_pgdir, TEST_PAGE, TEST_PAGE + PGSIZE); kprintf("check_pgdir() succeeded.\n"); }
// // Map the physical page 'pp' at virtual address 'va'. // The permissions (the low 12 bits) of the page table entry // should be set to 'perm|PTE_P'. // // Requirements // - If there is already a page mapped at 'va', it should be page_remove()d. // - If necessary, on demand, a page table should be allocated and inserted // into 'pgdir'. // - pp->pp_ref should be incremented if the insertion succeeds. // - The TLB must be invalidated if a page was formerly present at 'va'. // // Corner-case hint: Make sure to consider what happens when the same // pp is re-inserted at the same virtual address in the same pgdir. // However, try not to distinguish this case in your code, as this // frequently leads to subtle bugs; there's an elegant way to handle // everything in one code path. // // RETURNS: // 0 on success // -E_NO_MEM, if page table couldn't be allocated // // Hint: The TA solution is implemented using pgdir_walk, page_remove, // and page2pa. // int page_insert(pde_t *pgdir, struct PageInfo *pp, void *va, int perm) { // Fill this function in pte_t* ptep = pgdir_walk(pgdir, va, true); if(!ptep) { return -E_NO_MEM; } if( pa2page(*ptep) != pp ){ page_remove(pgdir, va); assert( *ptep == 0 ); assert(pp->pp_ref >= 0); pp->pp_ref++; } else { tlb_invalidate(pgdir, va); } *ptep = page2pa(pp) | perm | PTE_P; /* we should also change pde's perm*/ pde_t *pde = pgdir + PDX(va); *pde = *pde | perm; return 0; }
/** * Examine address space, create virtual physical memory and map it. */ static void page_init (void) { int i; int freemem_size = 0; /* Construct page descriptor table. * mem => memory not reserved or occupied by kernel code * freemem => memory available after page descriptor table is built */ /* all pages from 0x100000 to the top should have an entry in page descriptor table */ for (i = 0; i < e820map.nr_map; i++) { mem_size += (uint32_t)(e820map.map[i].size); if (e820map.map[i].type == E820_ARM) freemem_size += e820map.map[i].size; } pages = (struct Page *)(uint32_t)(e820map.map[e820map.nr_map-1].addr); npage = (mem_size) / PGSIZE; for (i = 0; i < npage; i++) { SetPageReserved (pages + i); } uintptr_t freemem = PADDR(ROUNDUP((uintptr_t)pages + sizeof(struct Page) * npage, PGSIZE)); uint32_t freemem_npage = freemem_size / PGSIZE - npage * sizeof (struct Page) / PGSIZE; init_memmap(pa2page(freemem), freemem_npage); }
struct Page * page_lookup(Pde *pgdir, u_long va, Pte **ppte) { struct Page *ppage; Pte *pte; pgdir_walk(pgdir, va, 0, &pte); //printf("page_lookup:come 1\n"); if (pte == 0) { return 0; } if ((*pte & PTE_V) == 0) { return 0; //the page is not in memory. } //printf("page_lookup:come 2\n"); ppage = pa2page(*pte); if (ppte) { *ppte = pte; } return ppage; }
// // Map the physical page 'pp' at virtual address 'va'. // The permissions (the low 12 bits) of the page table entry // should be set to 'perm|PTE_P'. // // Requirements // - If there is already a page mapped at 'va', it should be page_remove()d. // - If necessary, on demand, a page table should be allocated and inserted // into 'pgdir'. // - pp->pp_ref should be incremented if the insertion succeeds. // - The TLB must be invalidated if a page was formerly present at 'va'. // // Corner-case hint: Make sure to consider what happens when the same // pp is re-inserted at the same virtual address in the same pgdir. // However, try not to distinguish this case in your code, as this // frequently leads to subtle bugs; there's an elegant way to handle // everything in one code path. // // RETURNS: // 0 on success // -E_NO_MEM, if page table couldn't be allocated // // Hint: The TA solution is implemented using pgdir_walk, page_remove, // and page2pa. // int page_insert(pde_t *pgdir, struct PageInfo *pp, void *va, int perm) { // Fill this function in pte_t *entry=NULL; struct PageInfo *page=NULL; physaddr_t page_paddr; entry = pgdir_walk(pgdir, va, 1); if(entry) { page_paddr = (physaddr_t)*entry; if(page_paddr & PTE_P) { page = pa2page(page_paddr); if(page!=pp) page_remove(pgdir, va); else { *entry = page2pa(pp) | perm | PTE_P; // cprintf("\npage_insert: perm:%d addr:%p",*entry); return 0; } } *entry = page2pa(pp) | perm | PTE_P; pp->pp_ref++; return 0; } return -E_NO_MEM; }
void check_boot_pgdir(void) { pte_t *ptep; int i; for (i = 0; i < npage; i += PGSIZE) { assert((ptep = get_pte(boot_pgdir, (uintptr_t) KADDR(i), 0)) != NULL); assert(PTE_ADDR(*ptep) == i); } assert(PDE_ADDR(boot_pgdir[PDX(VPT)]) == PADDR(boot_pgdir)); assert(boot_pgdir[0] == 0); struct Page *p; p = alloc_page(); assert(page_insert(boot_pgdir, p, 0x100, PTE_W) == 0); assert(page_ref(p) == 1); assert(page_insert(boot_pgdir, p, 0x100 + PGSIZE, PTE_W) == 0); assert(page_ref(p) == 2); const char *str = "ucore: Hello world!!"; strcpy((void *)0x100, str); assert(strcmp((void *)0x100, (void *)(0x100 + PGSIZE)) == 0); *(char *)(page2kva(p) + 0x100) = '\0'; assert(strlen((const char *)0x100) == 0); free_page(p); free_page(pa2page(PDE_ADDR(boot_pgdir[0]))); boot_pgdir[0] = 0; kprintf("check_boot_pgdir() succeeded!\n"); }
/* * Our version knocked off from kern/src/mm.c version + uncaching logic from * vmap_pmem_nocache(). This routine is expected to be invoked as part of mmap() * handler. */ int map_upage_at_addr(struct proc *p, physaddr_t paddr, uintptr_t addr, int pteprot, int dolock) { pte_t pte; int rv = -1; struct page *pp; /* __vmr_free_pgs() assumes mapped pte is backed by "struct page" */ if (paddr > max_paddr) { printk("[akaros]: map_upage_at_addr(): paddr=0x%llx " "max_paddr=0x%llx\n", paddr, max_paddr); return -1; } pp = pa2page(paddr); /* __vmr_free_pgs() refcnt's pagemap pages differently */ if (atomic_read(&pp->pg_flags) & PG_PAGEMAP) { printk("[akaros]: map_upage_at_addr(): mapPA=0x%llx\n", paddr); return -1; } spin_lock(&p->pte_lock); /* * Free any existing page backing uva, drop in this page, and * acquire refcnt on page on behalf of user. Note though that we * do not expect an existing page, since we are invoked in mmap * path (page_insert() does not handle PG_PAGEMAP refcnt's). */ rv = page_insert(p->env_pgdir, pp, (void *)addr, pteprot); spin_unlock(&p->pte_lock); return rv; }
// Find the final ept entry for a given guest physical address, // creating any missing intermediate extended page tables if create is non-zero. // // If epte_out is non-NULL, store the found epte_t* at this address. // // Return 0 on success. // // Error values: // -E_INVAL if eptrt is NULL // -E_NO_ENT if create == 0 and the intermediate page table entries are missing. // -E_NO_MEM if allocation of intermediate page table entries fails. // // Hint: Set the permissions of intermediate ept entries to __EPTE_FULL. // The hardware ANDs the permissions at each level, so removing a permission // bit at the last level entry is sufficient (and the bookkeeping is much simpler). static int ept_lookup_gpa(epte_t* eptrt, void *gpa, int create, epte_t **epte_out) { epte_t * pgTblIndexPtr = NULL; struct PageInfo * page = NULL; /* Your code here */ if(eptrt == NULL){ cprintf("ept_lookup_gpa : 1\n"); return -E_INVAL; } pgTblIndexPtr = pml4e_walk(eptrt, gpa, create); if(pgTblIndexPtr == NULL){ cprintf("ept_lookup_gpa : 2\n"); if(create == 0) return -E_NO_ENT; else return -E_NO_MEM; } page = pa2page(*pgTblIndexPtr); if(epte_out) { *epte_out = pgTblIndexPtr; } return 0; //panic("ept_lookup_gpa not implemented\n"); //return 0; }
// // Return the page mapped at virtual address 'va'. // If pte_store is not zero, then we store in it the address // of the pte for this page. This is used by page_remove and // can be used to verify page permissions for syscall arguments, // but should not be used by most callers. // // Return NULL if there is no page mapped at va. // // Hint: the TA solution uses pgdir_walk and pa2page. // struct PageInfo * page_lookup(pde_t *pgdir, void *va, pte_t **pte_store) { // Fill this function in pte_t *pte_entry; pte_entry = pgdir_walk(pgdir, va, 0); if (pte_entry == NULL) return NULL; if (*pte_entry == 0) return NULL; if (pte_store != NULL) *pte_store = pte_entry; return pa2page(PTE_ADDR(*pte_entry)); /* pte_t *pgtab=pgdir_walk(pgdir,va,0); if( !(pgtab) ); return NULL; if(pte_store) *pte_store=pgtab; return pa2page(PTE_ADDR(*pgtab)); */ }
int page_insert(Pde *pgdir, struct Page *pp, u_long va, u_int perm) { // Fill this function in u_int PERM; Pte *pgtable_entry; PERM = perm | PTE_V; pgdir_walk(pgdir, va, 0, &pgtable_entry); if (pgtable_entry != 0 && (*pgtable_entry & PTE_V) != 0) { if (pa2page(*pgtable_entry) != pp) { page_remove(pgdir, va); } else { tlb_invalidate(pgdir, va); *pgtable_entry = (page2pa(pp) | PERM); return 0; } } tlb_invalidate(pgdir, va); if (pgdir_walk(pgdir, va, 1, &pgtable_entry) != 0) { return -E_NO_MEM; // panic("page insert wrong.\n"); } *pgtable_entry = (page2pa(pp) | PERM); //printf("page_insert:PTE:\tcon:%x\[email protected]:%x\n",(int)*pgtable_entry,(int)pgtable_entry); pp->pp_ref++; return 0; }
// check_pgfault - check correctness of pgfault handler static void check_pgfault(void) { size_t nr_free_pages_store = nr_free_pages(); size_t slab_allocated_store = slab_allocated(); check_mm_struct = mm_create(); assert(check_mm_struct != NULL); struct mm_struct *mm = check_mm_struct; pgd_t *pgdir = mm->pgdir = boot_pgdir; assert(pgdir[0] == 0); struct vma_struct *vma = vma_create(0, PTSIZE, VM_WRITE); assert(vma != NULL); insert_vma_struct(mm, vma); uintptr_t addr = 0x100; assert(find_vma(mm, addr) == vma); int i, sum = 0; for (i = 0; i < 100; i ++) { *(char *)(addr + i) = i; sum += i; } for (i = 0; i < 100; i ++) { sum -= *(char *)(addr + i); } assert(sum == 0); page_remove(pgdir, ROUNDDOWN(addr, PGSIZE)); free_page(pa2page(PMD_ADDR(*get_pmd(pgdir, addr, 0)))); free_page(pa2page(PUD_ADDR(*get_pud(pgdir, addr, 0)))); free_page(pa2page(PGD_ADDR(*get_pgd(pgdir, addr, 0)))); pgdir[0] = 0; mm->pgdir = NULL; mm_destroy(mm); check_mm_struct = NULL; assert(nr_free_pages_store == nr_free_pages()); assert(slab_allocated_store == slab_allocated()); cprintf("check_pgfault() succeeded!\n"); }
/** * @brief Return the page mapped at virtual address 'va' in * page directory 'pgdir'. * * If pte_store is not NULL, then we store in it the address * of the pte for this page. This is used by page_remove * but should not be used by other callers. * * For jumbos, right now this returns the first Page* in the 4MB range * * @param[in] pgdir the page directory from which we should do the lookup * @param[in] va the virtual address of the page we are looking up * @param[out] pte_store the address of the page table entry for the returned page * * @return PAGE the page mapped at virtual address 'va' * @return NULL No mapping exists at virtual address 'va', or it's paged out */ page_t *page_lookup(pde_t *pgdir, void *va, pte_t **pte_store) { pte_t* pte = pgdir_walk(pgdir, va, 0); if (!pte || !PAGE_PRESENT(*pte)) return 0; if (pte_store) *pte_store = pte; return pa2page(PTE_ADDR(*pte)); }
// // Return the page mapped at virtual address 'va'. // If pte_store is not zero, then we store in it the address // of the pte for this page. This is used by page_remove and // can be used to verify page permissions for syscall arguments, // but should not be used by most callers. // // Return NULL if there is no page mapped at va. // // Hint: the TA solution uses pgdir_walk and pa2page. // struct PageInfo * page_lookup(pde_t *pgdir, void *va, pte_t **pte_store) { // Fill this function in pte_t* pte = pgdir_walk(pgdir, va, 0); if (pte_store != NULL) *pte_store = pte; if (pte == NULL || !(*pte & PTE_P) ) return NULL; return pa2page(PTE_ADDR(*pte)); }
// // Return the page mapped at virtual address 'va'. // If pte_store is not zero, then we store in it the address // of the pte for this page. This is used by page_remove and // can be used to verify page permissions for syscall arguments, // but should not be used by most callers. // // Return NULL if there is no page mapped at va. // // Hint: the TA solution uses pgdir_walk and pa2page. // struct PageInfo * page_lookup(pde_t *pgdir, void *va, pte_t **pte_store) { pte_t *pte = pgdir_walk(pgdir, va, 0); //not create if (!pte || !(*pte & PTE_P)) return NULL; //page not found if (pte_store) *pte_store = pte; //found and set return pa2page(PTE_ADDR(*pte)); }
// // Return the page mapped at virtual address 'va'. // If pte_store is not zero, then we store in it the address // of the pte for this page. This is used by page_remove and // can be used to verify page permissions for syscall arguments, // but should not be used by most callers. // // Return NULL if there is no page mapped at va. // // Hint: the TA solution uses pgdir_walk and pa2page. // struct PageInfo * page_lookup(pde_t *pgdir, void *va, pte_t **pte_store) { // Fill this function in pte_t *pte = pgdir_walk(pgdir, va, 0); if (!pte || !(*pte & PTE_P)) return NULL; //page not found if (pte_store) *pte_store = pte; //found and store return pa2page(PTE_ADDR(*pte)); }
/* pmm_init - initialize the physical memory management */ static void page_init(void) { struct e820map *memmap = (struct e820map *)(0x8000 + KERNBASE); uint64_t maxpa = 0; kprintf("e820map:\n"); int i; for (i = 0; i < memmap->nr_map; i++) { uint64_t begin = memmap->map[i].addr, end = begin + memmap->map[i].size; kprintf(" memory: %08llx, [%08llx, %08llx], type = %d.\n", memmap->map[i].size, begin, end - 1, memmap->map[i].type); if (memmap->map[i].type == E820_ARM) { if (maxpa < end && begin < KMEMSIZE) { maxpa = end; } } } if (maxpa > KMEMSIZE) { maxpa = KMEMSIZE; } extern char end[]; npage = maxpa / PGSIZE; pages = (struct Page *)ROUNDUP((void *)end, PGSIZE); for (i = 0; i < npage; i++) { SetPageReserved(pages + i); } uintptr_t freemem = PADDR((uintptr_t) pages + sizeof(struct Page) * npage); for (i = 0; i < memmap->nr_map; i++) { uint64_t begin = memmap->map[i].addr, end = begin + memmap->map[i].size; if (memmap->map[i].type == E820_ARM) { if (begin < freemem) { begin = freemem; } if (end > KMEMSIZE) { end = KMEMSIZE; } if (begin < end) { begin = ROUNDUP(begin, PGSIZE); end = ROUNDDOWN(end, PGSIZE); if (begin < end) { init_memmap(pa2page(begin), (end - begin) / PGSIZE); } } } } }
static void page_init(void) { int i; /* struct e820map *memmap = (struct e820map *)0x0; */ /* uint32_t maxpa = 0; */ /* kprintf("e820map:\n"); */ /* int i; */ /* for (i = 0; i < memmap->nr_map; i ++) { */ /* uint64_t begin = memmap->map[i].addr, end = begin + memmap->map[i].size; */ /* kprintf(" memory: %08llx, [%08llx, %08llx], type = %d.\n", */ /* memmap->map[i].size, begin, end - 1, memmap->map[i].type); */ /* if (memmap->map[i].type == E820_ARM) { */ /* if (maxpa < end && begin < KMEMSIZE) { */ /* maxpa = end; */ /* } */ /* } */ /* } */ /* if (maxpa > KMEMSIZE) { */ /* maxpa = KMEMSIZE; */ /* } */ extern char end[]; npage = RAM_SIZE / PGSIZE; pages = (struct Page *)ROUNDUP((void *)end, PGSIZE); for (i = 0; i < npage; i ++) { SetPageReserved(pages + i); } uintptr_t freemem = PADDR((uintptr_t)pages + sizeof(struct Page) * npage); uint32_t free_begin = ROUNDUP(freemem, PGSIZE), free_end = RAM_SIZE; init_memmap (pa2page(free_begin), (free_end - free_begin) / PGSIZE); kprintf ("free memory: [0x%x, 0x%x)\n", free_begin, free_end); /* for (i = 0; i < memmap->nr_map; i ++) { */ /* uint64_t begin = memmap->map[i].addr, end = begin + memmap->map[i].size; */ /* if (memmap->map[i].type == E820_ARM) { */ /* if (begin < freemem) { */ /* begin = freemem; */ /* } */ /* if (end > KMEMSIZE) { */ /* end = KMEMSIZE; */ /* } */ /* if (begin < end) { */ /* begin = ROUNDUP(begin, PGSIZE); */ /* end = ROUNDDOWN(end, PGSIZE); */ /* if (begin < end) { */ /* init_memmap(pa2page(begin), (end - begin) / PGSIZE); */ /* } */ /* } */ /* } */ /* } */ }
// // Return the page mapped at virtual address 'va'. // If pte_store is not zero, then we store in it the address // of the pte for this page. This is used by page_remove and // can be used to verify page permissions for syscall arguments, // but should not be used by most callers. // // Return NULL if there is no page mapped at va. // // Hint: the TA solution uses pgdir_walk and pa2page. // struct Page * page_lookup(pde_t *pgdir, void *va, pte_t **pte_store) { pte_t *pte=pgdir_walk(pgdir,va,0); if(pte_store) *pte_store=pte; if((pte!=NULL)&&(*pte&PTE_P)) return pa2page(PTE_ADDR(*pte)); return NULL; }
static int sys_map_kernel_page(void* kpage, void* va) { int r; struct Page* p = pa2page(PADDR(kpage)); if(p ==NULL) return E_INVAL; r = page_insert(curenv->env_pgdir, p, va, PTE_U | PTE_W); return r; }
int get_user_page(struct proc *p, unsigned long uvastart, int write, int force, struct page **plist) { pte_t pte; int ret = -1; struct page *pp; spin_lock(&p->pte_lock); pte = pgdir_walk(p->env_pgdir, (void*)uvastart, TRUE); if (!pte_walk_okay(pte)) goto err1; if (!pte_is_present(pte)) { unsigned long prot = PTE_P | PTE_U | PTE_A | PTE_W | PTE_D; #if 0 printk("[akaros]: get_user_page() uva=0x%llx pte absent\n", uvastart); #endif /* * TODO: ok to allocate with pte_lock? "prot" needs to be * based on VMR writability, refer to pgprot_noncached(). */ if (upage_alloc(p, &pp, 0)) goto err1; pte_write(pte, page2pa(pp), prot); } else { pp = pa2page(pte_get_paddr(pte)); /* __vmr_free_pgs() refcnt's pagemap pages differently */ if (atomic_read(&pp->pg_flags) & PG_PAGEMAP) { printk("[akaros]: get_user_page(): uva=0x%llx\n", uvastart); goto err1; } } if (write && (!pte_has_perm_urw(pte))) { /* TODO: How is Linux using the "force" parameter */ printk("[akaros]: get_user_page() uva=0x%llx pte ro\n", uvastart); goto err1; } /* TODO (GUP): change the interface such that devices provide the memory and * the user mmaps it, instead of trying to pin arbitrary user memory. */ warn_once("Extremely unsafe, unpinned memory mapped! If your process dies, you might scribble on RAM!"); plist[0] = pp; ret = 1; err1: spin_unlock(&p->pte_lock); return ret; }
struct Page * get_page(pgd_t *pgdir, uintptr_t la, pte_t **ptep_store) { pte_t *ptep = get_pte(pgdir, la, 0); if (ptep_store != NULL) { *ptep_store = ptep; } if (ptep != NULL && *ptep & PTE_P) { return pa2page(*ptep); } return NULL; }
// // Return the page mapped at virtual address 'va'. // If pte_store is not zero, then we store in it the address // of the pte for this page. This is used by page_remove and // can be used to verify page permissions for syscall arguments, // but should not be used by most callers. // // Return NULL if there is no page mapped at va. // // Hint: the TA solution uses pgdir_walk and pa2page. // struct PageInfo * page_lookup(pde_t *pgdir, void *va, pte_t **pte_store) { // Fill this function in pte_t *pt_entry = pgdir_walk(pgdir, va, 0); if (pt_entry && *pt_entry&PTE_P) { *pte_store = pt_entry; }else{ return NULL; } return pa2page(PTE_ADDR(*pt_entry)); }
static void free_ept_level(epte_t* eptrt, int level) { epte_t* dir = eptrt; int i; for(i=0; i<NPTENTRIES; ++i) { if(level != 0) { if(epte_present(dir[i])) { physaddr_t pa = epte_addr(dir[i]); free_ept_level((epte_t*) KADDR(pa), level-1); // free the table. page_decref(pa2page(pa)); } } else { // Last level, free the guest physical page. if(epte_present(dir[i])) { physaddr_t pa = epte_addr(dir[i]); page_decref(pa2page(pa)); } } } return; }
// // Return the page mapped at virtual address 'va'. // If pte_store is not zero, then we store in it the address // of the pte for this page. This is used by page_remove and // can be used to verify page permissions for syscall arguments, // but should not be used by most callers. // // Return NULL if there is no page mapped at va. // // Hint: the TA solution uses pgdir_walk and pa2page. // struct PageInfo * page_lookup(pde_t *pgdir, void *va, pte_t **pte_store) { // Fill this function in pte_t *tmp = pgdir_walk(pgdir, va, 0); if ( tmp != NULL && (*tmp & PTE_P)) { if(pte_store != NULL) *pte_store = tmp; return (struct PageInfo *)pa2page(*tmp); } return NULL; }
// // Return the page mapped at virtual address 'va'. // If pte_store is not zero, then we store in it the address // of the pte for this page. This is used by page_remove and // can be used to verify page permissions for syscall arguments, // but should not be used by most callers. // // Return NULL if there is no page mapped at va. // // Hint: the TA solution uses pgdir_walk and pa2page. // struct Page * page_lookup(pde_t *pgdir, void *va, pte_t **pte_store) { pte_t * pte_p = pgdir_walk(pgdir, va, 0); if(pte_p && (* pte_p)) { if(pte_store) { (* pte_store) = pte_p; } return pa2page(* pte_p); } else { return NULL; } }
// // Return the page mapped at virtual address 'va'. // If pte_store is not zero, then we store in it the address // of the pte for this page. This is used by page_remove and // can be used to verify page permissions for syscall arguments, // but should not be used by most callers. // // Return NULL if there is no page mapped at va. // // Hint: the TA solution uses pgdir_walk and pa2page. // struct PageInfo * page_lookup(pde_t *pgdir, void *va, pte_t **pte_store) { // Fill this function in pte_t *pg_t = pgdir_walk(pgdir, va, 0); if(pte_store != 0){ *pte_store = pg_t; } if(!pg_t || !(pg_t[0] & PTE_P)) return NULL; uintptr_t addr = PTE_ADDR(*pg_t); return pa2page(addr); }
// // Return the page mapped at virtual address 'va'. // If pte_store is not zero, then we store in it the address // of the pte for this page. This is used by page_remove and // can be used to verify page permissions for syscall arguments, // but should not be used by most callers. // // Return NULL if there is no page mapped at va. // // Hint: the TA solution uses pgdir_walk and pa2page. // struct Page * page_lookup(pde_t *pgdir, void *va, pte_t **pte_store) { pte_t *pte = pgdir_walk(pgdir,va,0); if (pte == NULL) return NULL; if (pte_store != 0) *pte_store = pte; if (*pte & PTE_P) return (struct Page*)pa2page(PTE_ADDR(*pte)); else return NULL; }
struct page *page_lookup(pml4e_t *pml4, uintptr_t va, pte_t **pte_p) { pte_t *pte; pte = mmap_lookup(pml4, va, 0); if (pte_p != NULL) *pte_p = pte; if (pte == NULL || (*pte & PTE_P) == 0) // nothing to do return NULL; return pa2page(PTE_ADDR(*pte)); }