// check page_insert, page_remove, &c, with an installed kern_pgdir static void check_page_installed_pgdir(void) { struct PageInfo *pp, *pp0, *pp1, *pp2; struct PageInfo *fl; pte_t *ptep, *ptep1; uintptr_t va; int i; // check that we can read and write installed pages pp1 = pp2 = 0; assert((pp0 = page_alloc(0))); assert((pp1 = page_alloc(0))); assert((pp2 = page_alloc(0))); page_free(pp0); memset(page2kva(pp1), 1, PGSIZE); memset(page2kva(pp2), 2, PGSIZE); page_insert(kern_pgdir, pp1, (void*) PGSIZE, PTE_W); assert(pp1->pp_ref == 1); assert(*(uint32_t *)PGSIZE == 0x01010101U); page_insert(kern_pgdir, pp2, (void*) PGSIZE, PTE_W); assert(*(uint32_t *)PGSIZE == 0x02020202U); assert(pp2->pp_ref == 1); assert(pp1->pp_ref == 0); *(uint32_t *)PGSIZE = 0x03030303U; assert(*(uint32_t *)page2kva(pp2) == 0x03030303U); page_remove(kern_pgdir, (void*) PGSIZE); assert(pp2->pp_ref == 0); // forcibly take pp0 back assert(PTE_ADDR(kern_pgdir[0]) == page2pa(pp0)); kern_pgdir[0] = 0; assert(pp0->pp_ref == 1); pp0->pp_ref = 0; // free the pages we took page_free(pp0); cprintf("check_page_installed_pgdir() succeeded!\n"); }
static Pte *boot_pgdir_walk(Pde *pgdir, u_long va, int create) { Pde *pgdir_entryp; Pte *pgtable, *pgtable_entry; pgdir_entryp = (Pde *)(&pgdir[PDX(va)]); pgtable = (Pte *)KADDR(PTE_ADDR(*pgdir_entryp)); if (*pgdir_entryp == 0) { if (create == 0) { return 0; } else { pgtable = alloc(BY2PG, BY2PG, 1); *pgdir_entryp = PADDR(pgtable) | PTE_V | PTE_R; } } pgtable_entry = (Pte *)(&pgtable[PTX(va)]); //printf("pgtable_entry = %x va = %d pgdir=%d\n",pgtable,va,pgdir); return pgtable_entry; }
// Given 'pgdir', a pointer to a page directory, pgdir_walk returns // a pointer to the page table entry (PTE) for linear address 'va'. // This requires walking the two-level page table structure. // // bluesea // pgdir_walk具体返回的是: // 虚拟地址va, 所在的页面对应的page table 表项的地址,所以是二级页表page table // 的表项的地址,而非page dir的表项 // (理由分析见check_page()中的相关分析) // 并且是该地址的虚拟地址! // // // 下面这个需求可能和这个想法有矛盾:PTE_P置为0,即缺页的时候本应该由缺页中断处理。 // 那是另外故事,在这儿,pgdir_walk基本上只用于初始化内核虚拟内存的映射, // 所以缺页新alloc page table没什么问题。 // // The relevant page table page might not exist yet. // If this is true, and create == false, then pgdir_walk returns NULL. // Otherwise, pgdir_walk allocates a new page table page with page_alloc. // - If the allocation fails, pgdir_walk returns NULL. // - Otherwise, the new page's reference count is incremented, // the page is cleared, // and pgdir_walk returns a pointer into the new page table page. // (注:这种情况下也是返回页表项的地址,而页目录的地址。页表项的各个FLAG不用管 // 只需要把页目录对应的位置PTE_P置位即可。) // // Hint 1: you can turn a PageInfo * into the physical address of the // page it refers to with page2pa() from kern/pmap.h. // // Hint 2: the x86 MMU checks permission bits in both the page directory // and the page table, so it's safe to leave permissions in the page // more permissive than strictly necessary. // // Hint 3: look at inc/mmu.h for useful macros that mainipulate page // table and page directory entries. // pte_t * pgdir_walk(pde_t *pgdir, const void *va, int create) { // Fill this function in // bluesea uint32_t pdx = PDX(va), ptx = PTX(va); pde_t *pt = 0; if (pgdir[pdx] & PTE_P){ pt = KADDR(PTE_ADDR(pgdir[pdx])); return &pt[ptx]; } if (!create) return NULL; struct PageInfo *page = page_alloc(ALLOC_ZERO); if (!page) return NULL; page->pp_ref = 1; pgdir[pdx] = page2pa(page) | PTE_P | PTE_U; pt = page2kva(page); //pt[ptx] = PTE_U; return &pt[ptx]; }
// Load a program segment into pgdir. addr must be page-aligned // and the pages from addr to addr+sz must already be mapped. int loaduvm(pde_t *pgdir, char *addr, struct inode *ip, uint offset, uint sz) { uint i, pa, n; pte_t *pte; if((uint) addr % PGSIZE != 0) panic("loaduvm: addr must be page aligned"); for(i = 0; i < sz; i += PGSIZE){ if((pte = walkpgdir(pgdir, addr+i, 0)) == 0) panic("loaduvm: address should exist"); pa = PTE_ADDR(*pte); if(sz - i < PGSIZE) n = sz - i; else n = PGSIZE; if(readi(ip, p2v(pa), offset+i, n) != n) return -1; } return 0; }
// Return the address of the PTE in page table pgdir // that corresponds to linear address va. If create!=0, // create any required page table pages. static pte_t * walkpgdir(pde_t *pgdir, const void *va, int create) { pde_t *pde; pte_t *pgtab; pde = &pgdir[PDX(va)]; if(*pde & PTE_P){ pgtab = (pte_t*)PTE_ADDR(*pde); } else { if(!create || (pgtab = (pte_t*)kalloc()) == 0) return 0; // Make sure all those PTE_P bits are zero. memset(pgtab, 0, PGSIZE); // The permissions here are overly generous, but they can // be further restricted by the permissions in the page table // entries, if necessary. *pde = PADDR(pgtab) | PTE_P | PTE_W | PTE_U; } return &pgtab[PTX(va)]; }
// Given 'pgdir', a pointer to a page directory, pgdir_walk returns // a pointer to the page table entry (PTE) for linear address 'va'. // This requires walking the two-level page table structure. // // The relevant page table page might not exist yet. // If this is true, and create == false, then pgdir_walk returns NULL. // Otherwise, pgdir_walk allocates a new page table page with page_alloc. // - If the allocation fails, pgdir_walk returns NULL. // - Otherwise, the new page's reference count is incremented, // the page is cleared, // and pgdir_walk returns a pointer into the new page table page. // // Hint 1: you can turn a Page * into the physical address of the // page it refers to with page2pa() from kern/pmap.h. // // Hint 2: the x86 MMU checks permission bits in both the page directory // and the page table, so it's safe to leave permissions in the page // directory more permissive than strictly necessary. // // Hint 3: look at inc/mmu.h for useful macros that mainipulate page // table and page directory entries. // pte_t * pgdir_walk(pde_t *pgdir, const void *va, int create) { pde_t *pde; //va(virtual address) point to pa(physical address) pte_t *pgtable; //same as pde struct PageInfo *pp; pde = &pgdir[PDX(va)]; // va->pgdir if(*pde & PTE_P) { pgtable = (KADDR(PTE_ADDR(*pde))); } else { //page table page not exist if(!create || !(pp = page_alloc(ALLOC_ZERO)) || !(pgtable = (pte_t*)page2kva(pp))) return NULL; pp->pp_ref++; *pde = PADDR(pgtable) | PTE_P | PTE_W | PTE_U; } return &pgtable[PTX(va)]; }
// // Return the page mapped at virtual address 'va'. // If pte_store is not zero, then we store in it the address // of the pte for this page. This is used by page_remove and // can be used to verify page permissions for syscall arguments, // but should not be used by most callers. // // Return NULL if there is no page mapped at va. // // Hint: the TA solution uses pgdir_walk and pa2page. // struct PageInfo * page_lookup(pde_t *pgdir, void *va, pte_t **pte_store) { // Fill this function in pte_t * result_page; result_page = pgdir_walk(pgdir, va, 0); if (result_page == NULL) { return NULL; } if ((*result_page & PTE_P) == 0) { return NULL; } if (pte_store != NULL) { *pte_store = result_page; } return pa2page(PTE_ADDR(*result_page)); }
// Return the address of the PTE in page table pgdir // that corresponds to virtual address va. If alloc!=0, // create any required page table pages. static pte_t * walkpgdir(pde_t *pgdir, const void *va, uint l1attr, int alloc) { pde_t *pde; pte_t *pgtab; pde = &pgdir[PDX(va)]; if((uint)*pde != 0){ pgtab = (pte_t*)p2v(PTE_ADDR(*pde)); } else { if(!alloc || (pgtab = (pte_t*)kalloc()) == 0) return 0; // Make sure all those PTE_P bits are zero. memset(pgtab, 0, PGSIZE); // The permissions here are overly generous, but they can // be further restricted by the permissions in the page table // entries, if necessary. *pde = v2p(pgtab) | l1attr; //cprintf("the pde value is %x\n", (uint)*pde); } return &pgtab[PTX(va)]; }
// Deallocate user pages to bring the process size from oldsz to // newsz. oldsz and newsz need not be page-aligned, nor does newsz // need to be less than oldsz. oldsz can be larger than the actual // process size. Returns the new process size. int deallocuvm(pde_t *pgdir, uint oldsz, uint newsz) { pte_t *pte; uint a, pa; if(newsz >= oldsz) return oldsz; a = PGROUNDUP(newsz); for(; a < oldsz; a += PGSIZE){ pte = walkpgdir(pgdir, (char*)a, 0); if(pte && (*pte & PTE_P) != 0){ pa = PTE_ADDR(*pte); if(pa == 0) panic("kfree"); kfree((char*)pa); *pte = 0; } } return newsz; }
// Given 'pgdir', a pointer to a page directory, pgdir_walk returns // a pointer to the page table entry (PTE) for linear address 'va'. // This requires walking the two-level page table structure. // // The relevant page table page might not exist yet. // If this is true, and create == false, then pgdir_walk returns NULL. // Otherwise, pgdir_walk allocates a new page table page with page_alloc. // - If the allocation fails, pgdir_walk returns NULL. // - Otherwise, the new page's reference count is incremented, // the page is cleared, // and pgdir_walk returns a pointer into the new page table page. // // Hint 1: you can turn a Page * into the physical address of the // page it refers to with page2pa() from kern/pmap.h. // // Hint 2: the x86 MMU checks permission bits in both the page directory // and the page table, so it's safe to leave permissions in the page // directory more permissive than strictly necessary. // // Hint 3: look at inc/mmu.h for useful macros that mainipulate page // table and page directory entries. // pte_t * pgdir_walk(pde_t *pgdir, const void *va, int create) { // need to handle permission!!! uintptr_t pd_index=0, pt_index=0; physaddr_t pa_ptba, pa_pte, pde_perm, pte_perm; pte_t *va_ptba=NULL, *va_pte=NULL; struct PageInfo *req_page; pd_index = PDX(va); //check address va_ptba+pt_index if it is correct pointer arithmatic or not. // permissions given for directory table entry are PTE_P and PTE_W //*(pgdir+pd_index) = *(pgdir+pd_index) | PTE_P | PTE_W ; pa_ptba = *(pgdir+pd_index); if(!(pa_ptba & PTE_P)) { // setting up page table for requested virtual address. if(create) { req_page = page_alloc(ALLOC_ZERO); if(req_page==NULL) return NULL; req_page->pp_ref++; pa_ptba = page2pa(req_page) | PTE_P | PTE_U | PTE_W; *(pgdir+pd_index) = pa_ptba; } else return NULL; } pde_perm = PGOFF(pa_ptba); pa_ptba = PTE_ADDR(pa_ptba); va_ptba = KADDR(pa_ptba); pt_index = PTX(va); va_pte = va_ptba + pt_index; return va_pte; }
int pgdir_walk(Pde *pgdir, u_long va, int create, Pte **ppte) { // Fill this function in Pde *pgdir_entryp; Pte *pgtable; struct Page *ppage; pgdir_entryp = (Pde *)(&pgdir[PDX(va)]); pgtable = (Pte *)KADDR(PTE_ADDR(*pgdir_entryp)); // pgtable = PTE_ADDR(*pgdir_entryp); //printf(" in pgdir_walk pgtable_entryp=%x\n",pgtable); if ((*pgdir_entryp & PTE_V) == 0) { //printf("pgdir_walk:come 1\n"); if (create == 0) { *ppte = 0; return 0; } else { //alloc a page for page table. if (page_alloc(&ppage) != 0) { //cannot alloc a page for page table *ppte = 0; return -E_NO_MEM; } pgtable = (Pte *)KADDR(page2pa(ppage)); *pgdir_entryp = PADDR(pgtable) | PTE_V | PTE_R; ppage->pp_ref++; } } //printf("pgdir_walk:come 2\n"); if (ppte) { *ppte = (Pte *)(&pgtable[PTX(va)]); } //printf("out of pgdir_walk\n"); return 0; }
//takes the pid of the process because using proc would end up in the process id of parent and not the child void freevmChild(pde_t *pgdir,struct proc* p) { uint i; if(pgdir == 0) panic("freevm: no pgdir"); cprintf("shared pages in freevm%d",p->procShmemCount); deallocuvm(pgdir, USERTOP-4096*p->procShmemCount, 0); int k; for(k=0;k<4;k++) { if(proc->bitmap) { dec_shmem_proc_count(k); } } for(i = 0; i < NPDENTRIES; i++){ if(pgdir[i] & PTE_P) kfree((char*)PTE_ADDR(pgdir[i])); } kfree((char*)pgdir); }
int page_insert(pml4e_t *pml4, struct page *p, uintptr_t va, unsigned perm) { pte_t *pte = mmap_lookup(pml4, va, 1); if (pte == NULL) // no memory return -1; // remap same page (possible change permissions) if (PTE_ADDR(*pte) == page2pa(p)) { invlpg((void *)va); *pte = page2pa(p) | perm | PTE_P; return 0; } // delete old mapping if exists page_remove(pml4, va); *pte = page2pa(p) | perm | PTE_P; page_incref(p); return 0; }
// Given 'pgdir', a pointer to a page directory, pgdir_walk returns // a pointer to the page table entry (PTE) for linear address 'va'. // This requires walking the two-level page table structure. // // The relevant page table page might not exist yet. // If this is true, and create == false, then pgdir_walk returns NULL. // Otherwise, pgdir_walk allocates a new page table page with page_alloc. // - If the allocation fails, pgdir_walk returns NULL. // - Otherwise, the new page's reference count is incremented, // the page is cleared, // and pgdir_walk returns a pointer into the new page table page. // // Hint 1: you can turn a Page * into the physical address of the // page it refers to with page2pa() from kern/pmap.h. // // Hint 2: the x86 MMU checks permission bits in both the page directory // and the page table, so it's safe to leave permissions in the page // directory more permissive than strictly necessary. // // Hint 3: look at inc/mmu.h for useful macros that mainipulate page // table and page directory entries. // pte_t * pgdir_walk(pde_t *pgdir, const void *va, int create) { // Fill this function in pde_t* pgdir_entry = &pgdir[PDX(va)]; pte_t* pgtb_entry = NULL; struct PageInfo * pg = NULL; if (!(*pgdir_entry & PTE_P)){ if(create){ pg = page_alloc(1); if (!pg) return NULL; memset(page2kva(pg), 0, PGSIZE); pg->pp_ref += 1; *pgdir_entry = page2pa(pg)|PTE_P|PTE_U|PTE_W; }else{ return NULL; } } pgtb_entry = KADDR(PTE_ADDR(*pgdir_entry)); return &pgtb_entry[PTX(va)]; }
// Given 'pgdir', a pointer to a page directory, pgdir_walk returns // a pointer to the page table entry (PTE) for linear address 'va'. // This requires walking the two-level page table structure. // // The relevant page table page might not exist yet. // If this is true, and create == false, then pgdir_walk returns NULL. // Otherwise, pgdir_walk allocates a new page table page with page_alloc. // - If the allocation fails, pgdir_walk returns NULL. // - Otherwise, the new page's reference count is incremented, // the page is cleared, // and pgdir_walk returns a pointer into the new page table page. // // Hint 1: you can turn a Page * into the physical address of the // page it refers to with page2pa() from kern/pmap.h. // // Hint 2: the x86 MMU checks permission bits in both the page directory // and the page table, so it's safe to leave permissions in the page // more permissive than strictly necessary. // // Hint 3: look at inc/mmu.h for useful macros that mainipulate page // table and page directory entries. // pte_t * pgdir_walk(pde_t *pgdir, const void *va, int create) { // Fill this function in pde_t *pde; pte_t *pgtab; struct Page *pp; pde = &pgdir[PDX(va)]; if (*pde & PTE_P) { pgtab = (pte_t *)KADDR(PTE_ADDR(*pde)); } else { if (!create) return 0; if ((pp = page_alloc(ALLOC_ZERO)) == 0) return 0; pp->pp_ref = 1; pgtab = (pte_t *)KADDR(page2pa(pp)); *pde = PADDR(pgtab) | PTE_P | PTE_W | PTE_U; } return &pgtab[PTX(va)]; }
int unmap_userspace(pde_t * pgdir) { uint index,pteidx,ret; pte_t * pte; if (!pgdir) return 0; dbmsg("unmap user space\n"); for (index = PDX(KERNTOP); index < PDX(0xfec00000); index ++) { if (pgdir[index] & PTE_P) { pte = (pte_t *)PTE_ADDR(pgdir[index]); for (pteidx = 0; pteidx < PTENTRY; pteidx ++) { if (pte[pteidx] & PTE_P) { ret = remove_pte(pgdir, &pte[pteidx]); if (ret < 0) return ret; } } remove_pte(pgdir, pte); } } return 0; }
// Deallocate user pages to bring the process size from oldsz to // newsz. oldsz and newsz need not be page-aligned, nor does newsz // need to be less than oldsz. oldsz can be larger than the actual // process size. Returns the new process size. int deallocuvm(pde_t *pgdir, uint oldsz, uint newsz) { pte_t *pte; uint a, pa; if (newsz >= oldsz) return oldsz; a = PGROUNDUP(newsz); for (; a < oldsz; a += PGSIZE) { pte = walkpgdir(pgdir, (char*) a, 0); if (!pte) a += (NPTENTRIES - 1) * PGSIZE; else if ((*pte & PTE_P) != 0 && (*pte & PTE_PG) == 0) { pa = PTE_ADDR(*pte); if (pa == 0) panic("kfree"); char *v = p2v(pa); kfree(v); *pte = 0; } } return newsz; }
// Given a parent process's page table, create a copy // of it for a child. pde_t* copyuvm(pde_t *pgdir, uint sz) { pde_t *d; pte_t *pte; uint pa, i; char *mem; if((d = setupkvm()) == 0) return 0; for(i = 0; i < sz; i += PGSIZE) { if((pte = walkpgdir(pgdir, (void*)i, 0)) == 0) { panic("copyuvm: pte should exist"); } if(!(*pte & PTE_P)) { panic("copyuvm: page not present"); } pa = PTE_ADDR(*pte); if((mem = kalloc()) == 0) { goto bad; } memmove(mem, (char*)pa, PGSIZE); if(mappages(d, (void*)i, PGSIZE, PADDR(mem), PTE_W|PTE_U) < 0) { goto bad; } } return d; bad: freevm(d); return 0; }
void swapOut(struct proc* p){ //create flie char id_as_str[3]; // need to pre determine number of digits in p->pid itoa(p->pid,id_as_str); char path[strlen(id_as_str) + 5]; strcat(path,0,id_as_str,".swap"); p->swapped_file = kernel_open(path,O_CREATE | O_WRONLY); pte_t *pte; int i; uint pa; for(i = 0; i < p->sz; i += PGSIZE){ if((pte = walkpgdir(p->pgdir, (void *) i, 0)) == 0) panic("copyuvm: pte should exist"); if(!(*pte & PTE_P)) panic("copyuvm: page not present"); pa = PTE_ADDR(*pte); //cprintf("p->swapped_file %d\n",p->swapped_file); if(filewrite(p->swapped_file,p2v(pa),PGSIZE) < 0) panic("filewrite: error in swapOut"); } int fd; for(fd = 0; fd < NOFILE; fd++){ if(p->ofile[fd] && p->ofile[fd] == p->swapped_file){ fileclose(p->ofile[fd]); p->ofile[fd] = 0; break; } } p->swapped_file = 0; p->swapped = 1; deallocuvm(p->pgdir,p->sz,0); p->state = SLEEPING_SUSPENDED; }
static void pgflt_handler(uintptr_t addr, uint64_t fec, struct dune_tf *tf) { int ret; ptent_t *pte; bool was_user = (tf->cs & 0x3); if (was_user) { pid_t tid = syscall(SYS_gettid); printf("sandbox: got unexpected G3 page fault" " at addr %lx, fec %lx TID %d\n", addr, fec, tid); dune_dump_trap_frame(tf); print_procmap(); dune_ret_from_user(-EFAULT); } else { /* XXX use mem lock */ pthread_mutex_lock(&_syscall_mtx); ret = dune_vm_lookup(pgroot, (void *) addr, CREATE_NORMAL, &pte); assert(!ret); *pte = PTE_P | PTE_W | PTE_ADDR(dune_va_to_pa((void *) addr)); pthread_mutex_unlock(&_syscall_mtx); } }
// Given 'pgdir', a pointer to a page directory, pgdir_walk returns // a pointer to the page table entry (PTE) for linear address 'va'. // This requires walking the two-level page table structure. // // The relevant page table page might not exist yet. // If this is true, and create == false, then pgdir_walk returns NULL. // Otherwise, pgdir_walk allocates a new page table page with page_alloc. // - If the allocation fails, pgdir_walk returns NULL. // - Otherwise, the new page's reference count is incremented, // the page is cleared, // and pgdir_walk returns a pointer into the new page table page. // // Hint 1: you can turn a Page * into the physical address of the // page it refers to with page2pa() from kern/pmap.h. // // Hint 2: the x86 MMU checks permission bits in both the page directory // and the page table, so it's safe to leave permissions in the page // more permissive than strictly necessary. // // Hint 3: look at inc/mmu.h for useful macros that mainipulate page // table and page directory entries. // pte_t * pgdir_walk(pde_t *pgdir, const void *va, int create) { uintptr_t page_directory_index = PDX(va); uintptr_t page_table_index = PTX(va); pde_t page_directory_entry = pgdir[page_directory_index]; pte_t* page_table_entry; if (page_directory_entry & PTE_P) { // Page table exists page_table_entry = (pte_t*) KADDR(PTE_ADDR(page_directory_entry)); return &page_table_entry[page_table_index]; } // If it does not exist and create is false, return NULL if (create == 0) { return NULL; } struct PageInfo *page = page_alloc(ALLOC_ZERO); // Make sure that the allocation succeeded first if (page == NULL) { return NULL; } physaddr_t addr = page2pa(page); page_table_entry = (pte_t *) page2kva(page); // Set the page table's permissions pgdir[page_directory_index] = addr | PTE_P | PTE_W | PTE_U; page->pp_ref++; return &page_table_entry[page_table_index]; }
// Given 'pgdir', a pointer to a page directory, pgdir_walk returns // a pointer to the page table entry (PTE) for linear address 'va'. // This requires walking the two-level page table structure. // // The relevant page table page might not exist yet. // If this is true, and create == false, then pgdir_walk returns NULL. // Otherwise, pgdir_walk allocates a new page table page with page_alloc. // - If the allocation fails, pgdir_walk returns NULL. // - Otherwise, the new page's reference count is incremented, // the page is cleared, // and pgdir_walk returns a pointer into the new page table page. // // Hint 1: you can turn a Page * into the physical address of the // page it refers to with page2pa() from kern/pmap.h. // // Hint 2: the x86 MMU checks permission bits in both the page directory // and the page table, so it's safe to leave permissions in the page // more permissive than strictly necessary. // // Hint 3: look at inc/mmu.h for useful macros that mainipulate page // table and page directory entries. // pte_t * pgdir_walk(pde_t *pgdir, const void *va, int create) { struct PageInfo *new_page_table; pte_t *result; physaddr_t table_addr = 0x0; // Reference the page directory array. pde_t dir_entry = pgdir[PDX(va)]; // If page table is not found in the directory and create is set, try page_alloc(). if (dir_entry & PTE_P) { // Table is already present. table_addr = PTE_ADDR(dir_entry); } else { if (!create) { // For lookups only. return NULL; } // Allocate a new page table. new_page_table = page_alloc(ALLOC_ZERO); if (!new_page_table) { // Allocation failed, exit. return NULL; } // Increment reference count and set page directory to point to page table's physical address. new_page_table->pp_ref++; table_addr = page2pa(new_page_table); pgdir[PDX(va)] = table_addr | PTE_P | PTE_W; } // Since our table address is a physical address, we need to index into it using PTX times // physaddr_t. This ensures that addresses are aligned on 2-byte boundaries. result = (pte_t *) KADDR(table_addr + sizeof(physaddr_t) * PTX(va)); return result; }
// Given 'pgdir', a pointer to a page directory, pgdir_walk returns // a pointer to the page table entry (PTE) for linear address 'va'. // This requires walking the two-level page table structure. // // The relevant page table page might not exist yet. // If this is true, and create == false, then pgdir_walk returns NULL. // Otherwise, pgdir_walk allocates a new page table page with page_alloc. // - If the allocation fails, pgdir_walk returns NULL. // - Otherwise, the new page's reference count is incremented, // the page is cleared, // and pgdir_walk returns a pointer into the new page table page. // // Hint 1: you can turn a Page * into the physical address of the // page it refers to with page2pa() from kern/pmap.h. // // Hint 2: the x86 MMU checks permission bits in both the page directory // and the page table, so it's safe to leave permissions in the page // more permissive than strictly necessary. // // Hint 3: look at inc/mmu.h for useful macros that mainipulate page // table and page directory entries. // pte_t * pgdir_walk(pde_t *pgdir, const void *va, int create) { // Fill this function in // SUNUS, 23, October, 2013 pde_t *pde; pte_t *pte; pte_t *entry; struct PageInfo *p; pde = &pgdir[PDX(va)]; if ((!*pde) && create) { p = page_alloc(ALLOC_ZERO); if (!p) return NULL; p->pp_ref++; *pde = page2pa(p); *pde |= (PTE_P|PTE_U|PTE_W); } else if (!*pde) return NULL; entry = (pde_t *)PTE_ADDR(*pde); pte = &entry[PTX(va)]; return (pte_t *)KADDR((pte_t)pte); }
// // Map the physical page 'pp' at virtual address 'va'. // The permissions (the low 12 bits) of the page table entry // should be set to 'perm|PTE_P'. // // Requirements // - If there is already a page mapped at 'va', it should be page_remove()d. // - If necessary, on demand, a page table should be allocated and inserted // into 'pgdir'. // - pp->pp_ref should be incremented if the insertion succeeds. // - The TLB must be invalidated if a page was formerly present at 'va'. // // Corner-case hint: Make sure to consider what happens when the same // pp is re-inserted at the same virtual address in the same pgdir. // Don't be tempted to write special-case code to handle this // situation, though; there's an elegant way to address it. // // RETURNS: // 0 on success // -E_NO_MEM, if page table couldn't be allocated // // Hint: The TA solution is implemented using pgdir_walk, page_remove, // and page2pa. // int page_insert(pde_t *pgdir, struct Page *pp, void *va, int perm) { // Fill this function in pte_t *pte = pgdir_walk(pgdir, va, 1); // not exist and can't create if (pte == NULL) return -E_NO_MEM; // if exist if (*pte & PTE_P) { if (PTE_ADDR(*pte) == page2pa(pp)) { tlb_invalidate(pgdir, va); pp->pp_ref--; } else { // page_remove will decrease pp_ref page_remove(pgdir, va); } } *pte = page2pa(pp) | perm | PTE_P; pp->pp_ref++; return 0; }
// // Map the physical page 'pp' at virtual address 'va'. // The permissions (the low 12 bits) of the page table entry // should be set to 'perm|PTE_P'. // // Requirements // - If there is already a page mapped at 'va', it should be page_remove()d. // - If necessary, on demand, a page table should be allocated and inserted // into 'pgdir'. // - pp->pp_ref should be incremented if the insertion succeeds. // - The TLB must be invalidated if a page was formerly present at 'va'. // // Corner-case hint: Make sure to consider what happens when the same // pp is re-inserted at the same virtual address in the same pgdir. // Don't be tempted to write special-case code to handle this // situation, though; there's an elegant way to address it. // // RETURNS: // 0 on success // -E_NO_MEM, if page table couldn't be allocated // // Hint: The TA solution is implemented using pgdir_walk, page_remove, // and page2pa. // int page_insert(pde_t *pgdir, struct Page *pp, void *va, int perm) { pte_t* pte=pgdir_walk(pgdir,va,1); if(pte==NULL) { return -E_NO_MEM; } if(*pte&PTE_P) { if(PTE_ADDR(*pte)==page2pa(pp)) { tlb_invalidate(pgdir,va); pp->pp_ref=(pp->pp_ref)-1; } else { page_remove(pgdir,va); } } *pte=page2pa(pp)|perm|PTE_P; pp->pp_ref=(pp->pp_ref)+1; return 0; }
// check page_insert, page_remove, &c static void check_page(void) { struct Page *pp, *pp0, *pp1, *pp2; struct Page *fl; pte_t *ptep, *ptep1; void *va; uintptr_t mm1, mm2; int i; extern pde_t entry_pgdir[]; // should be able to allocate three pages pp0 = pp1 = pp2 = 0; assert((pp0 = page_alloc(0))); assert((pp1 = page_alloc(0))); assert((pp2 = page_alloc(0))); assert(pp0); assert(pp1 && pp1 != pp0); assert(pp2 && pp2 != pp1 && pp2 != pp0); // temporarily steal the rest of the free pages fl = page_free_list; page_free_list = 0; // should be no free memory assert(!page_alloc(0)); // there is no page allocated at address 0 assert(page_lookup(kern_pgdir, (void *) 0x0, &ptep) == NULL); // there is no free memory, so we can't allocate a page table assert(page_insert(kern_pgdir, pp1, 0x0, PTE_W) < 0); // free pp0 and try again: pp0 should be used for page table page_free(pp0); assert(page_insert(kern_pgdir, pp1, 0x0, PTE_W) == 0); assert(PTE_ADDR(kern_pgdir[0]) == page2pa(pp0)); assert(check_va2pa(kern_pgdir, 0x0) == page2pa(pp1)); assert(pp1->pp_ref == 1); assert(pp0->pp_ref == 1); // should be able to map pp2 at PGSIZE because pp0 is already allocated for page table assert(page_insert(kern_pgdir, pp2, (void*) PGSIZE, PTE_W) == 0); assert(check_va2pa(kern_pgdir, PGSIZE) == page2pa(pp2)); assert(pp2->pp_ref == 1); // should be no free memory assert(!page_alloc(0)); // should be able to map pp2 at PGSIZE because it's already there assert(page_insert(kern_pgdir, pp2, (void*) PGSIZE, PTE_W) == 0); assert(check_va2pa(kern_pgdir, PGSIZE) == page2pa(pp2)); assert(pp2->pp_ref == 1); // pp2 should NOT be on the free list // could happen in ref counts are handled sloppily in page_insert assert(!page_alloc(0)); // check that pgdir_walk returns a pointer to the pte ptep = (pte_t *) KADDR(PTE_ADDR(kern_pgdir[PDX(PGSIZE)])); assert(pgdir_walk(kern_pgdir, (void*)PGSIZE, 0) == ptep+PTX(PGSIZE)); // should be able to change permissions too. assert(page_insert(kern_pgdir, pp2, (void*) PGSIZE, PTE_W|PTE_U) == 0); assert(check_va2pa(kern_pgdir, PGSIZE) == page2pa(pp2)); assert(pp2->pp_ref == 1); assert(*pgdir_walk(kern_pgdir, (void*) PGSIZE, 0) & PTE_U); assert(kern_pgdir[0] & PTE_U); // should be able to remap with fewer permissions assert(page_insert(kern_pgdir, pp2, (void*) PGSIZE, PTE_W) == 0); assert(*pgdir_walk(kern_pgdir, (void*) PGSIZE, 0) & PTE_W); assert(!(*pgdir_walk(kern_pgdir, (void*) PGSIZE, 0) & PTE_U)); // should not be able to map at PTSIZE because need free page for page table assert(page_insert(kern_pgdir, pp0, (void*) PTSIZE, PTE_W) < 0); // insert pp1 at PGSIZE (replacing pp2) assert(page_insert(kern_pgdir, pp1, (void*) PGSIZE, PTE_W) == 0); assert(!(*pgdir_walk(kern_pgdir, (void*) PGSIZE, 0) & PTE_U)); // should have pp1 at both 0 and PGSIZE, pp2 nowhere, ... assert(check_va2pa(kern_pgdir, 0) == page2pa(pp1)); assert(check_va2pa(kern_pgdir, PGSIZE) == page2pa(pp1)); // ... and ref counts should reflect this assert(pp1->pp_ref == 2); assert(pp2->pp_ref == 0); // pp2 should be returned by page_alloc assert((pp = page_alloc(0)) && pp == pp2); // unmapping pp1 at 0 should keep pp1 at PGSIZE page_remove(kern_pgdir, 0x0); assert(check_va2pa(kern_pgdir, 0x0) == ~0); assert(check_va2pa(kern_pgdir, PGSIZE) == page2pa(pp1)); assert(pp1->pp_ref == 1); assert(pp2->pp_ref == 0); // unmapping pp1 at PGSIZE should free it page_remove(kern_pgdir, (void*) PGSIZE); assert(check_va2pa(kern_pgdir, 0x0) == ~0); assert(check_va2pa(kern_pgdir, PGSIZE) == ~0); assert(pp1->pp_ref == 0); assert(pp2->pp_ref == 0); // so it should be returned by page_alloc assert((pp = page_alloc(0)) && pp == pp1); // should be no free memory assert(!page_alloc(0)); // forcibly take pp0 back assert(PTE_ADDR(kern_pgdir[0]) == page2pa(pp0)); kern_pgdir[0] = 0; assert(pp0->pp_ref == 1); pp0->pp_ref = 0; // check pointer arithmetic in pgdir_walk page_free(pp0); va = (void*)(PGSIZE * NPDENTRIES + PGSIZE); ptep = pgdir_walk(kern_pgdir, va, 1); ptep1 = (pte_t *) KADDR(PTE_ADDR(kern_pgdir[PDX(va)])); assert(ptep == ptep1 + PTX(va)); kern_pgdir[PDX(va)] = 0; pp0->pp_ref = 0; // check that new page tables get cleared memset(page2kva(pp0), 0xFF, PGSIZE); page_free(pp0); pgdir_walk(kern_pgdir, 0x0, 1); ptep = (pte_t *) page2kva(pp0); for(i=0; i<NPTENTRIES; i++) assert((ptep[i] & PTE_P) == 0); kern_pgdir[0] = 0; pp0->pp_ref = 0; // give free list back page_free_list = fl; // free the pages we took page_free(pp0); page_free(pp1); page_free(pp2); // test mmio_map_region mm1 = (uintptr_t) mmio_map_region(0, 4097); mm2 = (uintptr_t) mmio_map_region(0, 4096); // check that they're in the right region assert(mm1 >= MMIOBASE && mm1 + 8096 < MMIOLIM); assert(mm2 >= MMIOBASE && mm2 + 8096 < MMIOLIM); // check that they're page-aligned assert(mm1 % PGSIZE == 0 && mm2 % PGSIZE == 0); // check that they don't overlap assert(mm1 + 8096 <= mm2); // check page mappings assert(check_va2pa(kern_pgdir, mm1) == 0); assert(check_va2pa(kern_pgdir, mm1+PGSIZE) == PGSIZE); assert(check_va2pa(kern_pgdir, mm2) == 0); assert(check_va2pa(kern_pgdir, mm2+PGSIZE) == ~0); // check permissions assert(*pgdir_walk(kern_pgdir, (void*) mm1, 0) & (PTE_W|PTE_PWT|PTE_PCD)); assert(!(*pgdir_walk(kern_pgdir, (void*) mm1, 0) & PTE_U)); // clear the mappings *pgdir_walk(kern_pgdir, (void*) mm1, 0) = 0; *pgdir_walk(kern_pgdir, (void*) mm1 + PGSIZE, 0) = 0; *pgdir_walk(kern_pgdir, (void*) mm2, 0) = 0; cprintf("check_page() succeeded!\n"); }
// check page_insert, page_remove, &c static void page_check(void) { struct Page *pp0, *pp1, *pp2,*pp3,*pp4,*pp5; struct Page * fl; pte_t *ptep, *ptep1; pdpe_t *pdpe; pde_t *pde; void *va; int i; uintptr_t mm1, mm2; pp0 = pp1 = pp2 = pp3 = pp4 = pp5 =0; assert(pp0 = page_alloc(0)); assert(pp1 = page_alloc(0)); assert(pp2 = page_alloc(0)); assert(pp3 = page_alloc(0)); assert(pp4 = page_alloc(0)); assert(pp5 = page_alloc(0)); assert(pp0); assert(pp1 && pp1 != pp0); assert(pp2 && pp2 != pp1 && pp2 != pp0); assert(pp3 && pp3 != pp2 && pp3 != pp1 && pp3 != pp0); assert(pp4 && pp4 != pp3 && pp4 != pp2 && pp4 != pp1 && pp4 != pp0); assert(pp5 && pp5 != pp4 && pp5 != pp3 && pp5 != pp2 && pp5 != pp1 && pp5 != pp0); // temporarily steal the rest of the free pages fl = page_free_list; page_free_list = NULL; // should be no free memory assert(!page_alloc(0)); // there is no page allocated at address 0 assert(page_lookup(boot_pml4e, (void *) 0x0, &ptep) == NULL); // there is no free memory, so we can't allocate a page table assert(page_insert(boot_pml4e, pp1, 0x0, 0) < 0); // free pp0 and try again: pp0 should be used for page table page_free(pp0); assert(page_insert(boot_pml4e, pp1, 0x0, 0) < 0); page_free(pp2); page_free(pp3); //cprintf("pp1 ref count = %d\n",pp1->pp_ref); //cprintf("pp0 ref count = %d\n",pp0->pp_ref); //cprintf("pp2 ref count = %d\n",pp2->pp_ref); assert(page_insert(boot_pml4e, pp1, 0x0, 0) == 0); assert((PTE_ADDR(boot_pml4e[0]) == page2pa(pp0) || PTE_ADDR(boot_pml4e[0]) == page2pa(pp2) || PTE_ADDR(boot_pml4e[0]) == page2pa(pp3) )); assert(check_va2pa(boot_pml4e, 0x0) == page2pa(pp1)); assert(pp1->pp_ref == 1); assert(pp0->pp_ref == 1); assert(pp2->pp_ref == 1); //should be able to map pp3 at PGSIZE because pp0 is already allocated for page table assert(page_insert(boot_pml4e, pp3, (void*) PGSIZE, 0) == 0); assert(check_va2pa(boot_pml4e, PGSIZE) == page2pa(pp3)); assert(pp3->pp_ref == 2); // should be no free memory assert(!page_alloc(0)); // should be able to map pp3 at PGSIZE because it's already there assert(page_insert(boot_pml4e, pp3, (void*) PGSIZE, 0) == 0); assert(check_va2pa(boot_pml4e, PGSIZE) == page2pa(pp3)); assert(pp3->pp_ref == 2); // pp3 should NOT be on the free list // could happen in ref counts are handled sloppily in page_insert assert(!page_alloc(0)); // check that pgdir_walk returns a pointer to the pte pdpe = KADDR(PTE_ADDR(boot_pml4e[PML4(PGSIZE)])); pde = KADDR(PTE_ADDR(pdpe[PDPE(PGSIZE)])); ptep = KADDR(PTE_ADDR(pde[PDX(PGSIZE)])); assert(pml4e_walk(boot_pml4e, (void*)PGSIZE, 0) == ptep+PTX(PGSIZE)); // should be able to change permissions too. assert(page_insert(boot_pml4e, pp3, (void*) PGSIZE, PTE_U) == 0); assert(check_va2pa(boot_pml4e, PGSIZE) == page2pa(pp3)); assert(pp3->pp_ref == 2); assert(*pml4e_walk(boot_pml4e, (void*) PGSIZE, 0) & PTE_U); assert(boot_pml4e[0] & PTE_U); // should not be able to map at PTSIZE because need free page for page table assert(page_insert(boot_pml4e, pp0, (void*) PTSIZE, 0) < 0); // insert pp1 at PGSIZE (replacing pp3) assert(page_insert(boot_pml4e, pp1, (void*) PGSIZE, 0) == 0); assert(!(*pml4e_walk(boot_pml4e, (void*) PGSIZE, 0) & PTE_U)); // should have pp1 at both 0 and PGSIZE assert(check_va2pa(boot_pml4e, 0) == page2pa(pp1)); assert(check_va2pa(boot_pml4e, PGSIZE) == page2pa(pp1)); // ... and ref counts should reflect this assert(pp1->pp_ref == 2); assert(pp3->pp_ref == 1); // unmapping pp1 at 0 should keep pp1 at PGSIZE page_remove(boot_pml4e, 0x0); assert(check_va2pa(boot_pml4e, 0x0) == ~0); assert(check_va2pa(boot_pml4e, PGSIZE) == page2pa(pp1)); assert(pp1->pp_ref == 1); assert(pp3->pp_ref == 1); // Test re-inserting pp1 at PGSIZE. // Thanks to Varun Agrawal for suggesting this test case. assert(page_insert(boot_pml4e, pp1, (void*) PGSIZE, 0) == 0); assert(pp1->pp_ref); assert(pp1->pp_link == NULL); // unmapping pp1 at PGSIZE should free it page_remove(boot_pml4e, (void*) PGSIZE); assert(check_va2pa(boot_pml4e, 0x0) == ~0); assert(check_va2pa(boot_pml4e, PGSIZE) == ~0); assert(pp1->pp_ref == 0); assert(pp3->pp_ref == 1); #if 0 // should be able to page_insert to change a page // and see the new data immediately. memset(page2kva(pp1), 1, PGSIZE); memset(page2kva(pp2), 2, PGSIZE); page_insert(boot_pgdir, pp1, 0x0, 0); assert(pp1->pp_ref == 1); assert(*(int*)0 == 0x01010101); page_insert(boot_pgdir, pp2, 0x0, 0); assert(*(int*)0 == 0x02020202); assert(pp2->pp_ref == 1); assert(pp1->pp_ref == 0); page_remove(boot_pgdir, 0x0); assert(pp2->pp_ref == 0); #endif // forcibly take pp3 back assert(PTE_ADDR(boot_pml4e[0]) == page2pa(pp3)); boot_pml4e[0] = 0; assert(pp3->pp_ref == 1); page_decref(pp3); // check pointer arithmetic in pml4e_walk page_decref(pp0); page_decref(pp2); va = (void*)(PGSIZE * 100); ptep = pml4e_walk(boot_pml4e, va, 1); pdpe = KADDR(PTE_ADDR(boot_pml4e[PML4(va)])); pde = KADDR(PTE_ADDR(pdpe[PDPE(va)])); ptep1 = KADDR(PTE_ADDR(pde[PDX(va)])); assert(ptep == ptep1 + PTX(va)); // check that new page tables get cleared page_decref(pp4); memset(page2kva(pp4), 0xFF, PGSIZE); pml4e_walk(boot_pml4e, 0x0, 1); pdpe = KADDR(PTE_ADDR(boot_pml4e[0])); pde = KADDR(PTE_ADDR(pdpe[0])); ptep = KADDR(PTE_ADDR(pde[0])); for(i=0; i<NPTENTRIES; i++) assert((ptep[i] & PTE_P) == 0); boot_pml4e[0] = 0; // give free list back page_free_list = fl; // free the pages we took page_decref(pp0); page_decref(pp1); page_decref(pp2); // test mmio_map_region mm1 = (uintptr_t) mmio_map_region(0, 4097); mm2 = (uintptr_t) mmio_map_region(0, 4096); // check that they're in the right region assert(mm1 >= MMIOBASE && mm1 + 8096 < MMIOLIM); assert(mm2 >= MMIOBASE && mm2 + 8096 < MMIOLIM); // check that they're page-aligned assert(mm1 % PGSIZE == 0 && mm2 % PGSIZE == 0); // check that they don't overlap assert(mm1 + 8096 <= mm2); // check page mappingsasdfasd assert(check_va2pa(boot_pml4e, mm1) == 0); assert(check_va2pa(boot_pml4e, mm1+PGSIZE) == PGSIZE); assert(check_va2pa(boot_pml4e, mm2) == 0); assert(check_va2pa(boot_pml4e, mm2+PGSIZE) == ~0); // check permissions assert(*pml4e_walk(boot_pml4e, (void*) mm1, 0) & (PTE_W|PTE_PWT|PTE_PCD)); assert(!(*pml4e_walk(boot_pml4e, (void*) mm1, 0) & PTE_U)); // clear the mappings *pml4e_walk(boot_pml4e, (void*) mm1, 0) = 0; *pml4e_walk(boot_pml4e, (void*) mm1 + PGSIZE, 0) = 0; *pml4e_walk(boot_pml4e, (void*) mm2, 0) = 0; cprintf("check_page() succeeded!\n"); }
static void pte_show(pte_t pte) { cprintf("pa 0x%x ", PTE_ADDR(pte)); pte_perms_show(PGOFF(pte)); }
// Set up a four-level page table: // boot_pml4e is its linear (virtual) address of the root // // This function only sets up the kernel part of the address space // (ie. addresses >= UTOP). The user part of the address space // will be setup later. // // From UTOP to ULIM, the user is allowed to read but not write. // Above ULIM the user cannot read or write. void x64_vm_init(void) { pml4e_t* pml4e; uint32_t cr0; int i; size_t n; int r; struct Env *env; i386_detect_memory(); //panic("i386_vm_init: This function is not finished\n"); ////////////////////////////////////////////////////////////////////// // create initial page directory. ///panic("x64_vm_init: this function is not finished\n"); pml4e = boot_alloc(PGSIZE); memset(pml4e, 0, PGSIZE); boot_pml4e = pml4e; boot_cr3 = PADDR(pml4e); ////////////////////////////////////////////////////////////////////// // Allocate an array of npage 'struct Page's and store it in 'pages'. // The kernel uses this array to keep track of physical pages: for // each physical page, there is a corresponding struct Page in this // array. 'npage' is the number of physical pages in memory. // User-level programs will get read-only access to the array as well. // Your code goes here: pages = boot_alloc(npages * sizeof(struct Page)); ////////////////////////////////////////////////////////////////////// // Make 'envs' point to an array of size 'NENV' of 'struct Env'. // LAB 3: Your code here. envs = boot_alloc(NENV * sizeof(struct Env)); ////////////////////////////////////////////////////////////////////// // Now that we've allocated the initial kernel data structures, we set // up the list of free physical pages. Once we've done so, all further // memory management will go through the page_* functions. In // particular, we can now map memory using boot_map_segment or page_insert page_init(); check_page_free_list(1); check_page_alloc(); page_check(); ////////////////////////////////////////////////////////////////////// // Now we set up virtual memory ////////////////////////////////////////////////////////////////////// // Map 'pages' read-only by the user at linear address UPAGES // Permissions: // - the new image at UPAGES -- kernel R, user R // (ie. perm = PTE_U | PTE_P) // - pages itself -- kernel RW, user NONE // Your code goes here: ////////////////////////////////////////////////////////////////////// // Map the 'envs' array read-only by the user at linear address UENVS // (ie. perm = PTE_U | PTE_P). // Permissions: // - the new image at UENVS -- kernel R, user R // - envs itself -- kernel RW, user NONE // LAB 3: Your code here. boot_map_segment(boot_pml4e, UPAGES, ROUNDUP(npages*sizeof(struct Page), PGSIZE), PADDR(pages), PTE_U | PTE_P); boot_map_segment(boot_pml4e, (uintptr_t)pages, ROUNDUP(npages *sizeof(struct Page), PGSIZE), PADDR(pages), PTE_P | PTE_W); boot_map_segment(boot_pml4e, UENVS, ROUNDUP(NENV*sizeof(struct Env), PGSIZE), PADDR(envs), PTE_U | PTE_P); boot_map_segment(boot_pml4e, (uintptr_t)envs, ROUNDUP(NENV *sizeof(struct Env), PGSIZE), PADDR(envs), PTE_P | PTE_W); ////////////////////////////////////////////////////////////////////// // Use the physical memory that 'bootstack' refers to as the kernel // stack. The kernel stack grows down from virtual address KSTACKTOP. // We consider the entire range from [KSTACKTOP-PTSIZE, KSTACKTOP) // to be the kernel stack, but break this into two pieces: // * [KSTACKTOP-KSTKSIZE, KSTACKTOP) -- backed by physical memory // * [KSTACKTOP-PTSIZE, KSTACKTOP-KSTKSIZE) -- not backed; so if // the kernel overflows its stack, it will fault rather than // overwrite memory. Known as a "guard page". // Permissions: kernel RW, user NONE // Your code goes here: boot_map_segment(boot_pml4e, KSTACKTOP-KSTKSIZE, KSTKSIZE, PADDR(bootstack), PTE_P | PTE_W); /////////////////////////////////////////////////////////////////////// // Map all of physical memory at KERNBASE. // Ie. the VA range [KERNBASE, 2^32) should map to // the PA range [0, 2^32 - KERNBASE) // We might not have 2^32 - KERNBASE bytes of physical memory, but // we just set up the mapping anyway. // Permissions: kernel RW, user NONE // Your code goes here: boot_map_segment(boot_pml4e, KERNBASE, ~(uint32_t)0 - KERNBASE + 1, 0, PTE_P | PTE_W); // Check that the initial page directory has been set up correctly. // Initialize the SMP-related parts of the memory map mem_init_mp(); check_boot_pml4e(boot_pml4e); ////////////////////////////////////////////////////////////////////// // Permissions: kernel RW, user NONE pdpe_t *pdpe = KADDR(PTE_ADDR(pml4e[0])); pde_t *pgdir = KADDR(PTE_ADDR(pdpe[3])); lcr3(boot_cr3); check_page_free_list(0); }
void page_check(void) { struct Page *pp, *pp0, *pp1, *pp2; struct Page_list fl; // should be able to allocate three pages pp0 = pp1 = pp2 = 0; assert(page_alloc(&pp0) == 0); assert(page_alloc(&pp1) == 0); assert(page_alloc(&pp2) == 0); assert(pp0); assert(pp1 && pp1 != pp0); assert(pp2 && pp2 != pp1 && pp2 != pp0); // temporarily steal the rest of the free pages fl = page_free_list; LIST_INIT(&page_free_list); // should be no free memory assert(page_alloc(&pp) == -E_NO_MEM); // there is no free memory, so we can't allocate a page table assert(page_insert(boot_pgdir, pp1, 0x0, 0) < 0); // free pp0 and try again: pp0 should be used for page table page_free(pp0); assert(page_insert(boot_pgdir, pp1, 0x0, 0) == 0); assert(PTE_ADDR(boot_pgdir[0]) == page2pa(pp0)); assert(va2pa(boot_pgdir, 0x0) == page2pa(pp1)); assert(pp1->pp_ref == 1); // should be able to map pp2 at BY2PG because pp0 is already allocated for page table assert(page_insert(boot_pgdir, pp2, BY2PG, 0) == 0); assert(va2pa(boot_pgdir, BY2PG) == page2pa(pp2)); assert(pp2->pp_ref == 1); // should be no free memory assert(page_alloc(&pp) == -E_NO_MEM); //printf("why\n"); // should be able to map pp2 at BY2PG because it's already there assert(page_insert(boot_pgdir, pp2, BY2PG, 0) == 0); assert(va2pa(boot_pgdir, BY2PG) == page2pa(pp2)); assert(pp2->pp_ref == 1); //printf("It is so unbelivable\n"); // pp2 should NOT be on the free list // could happen in ref counts are handled sloppily in page_insert assert(page_alloc(&pp) == -E_NO_MEM); // should not be able to map at PDMAP because need free page for page table assert(page_insert(boot_pgdir, pp0, PDMAP, 0) < 0); // insert pp1 at BY2PG (replacing pp2) assert(page_insert(boot_pgdir, pp1, BY2PG, 0) == 0); // should have pp1 at both 0 and BY2PG, pp2 nowhere, ... assert(va2pa(boot_pgdir, 0x0) == page2pa(pp1)); assert(va2pa(boot_pgdir, BY2PG) == page2pa(pp1)); // ... and ref counts should reflect this assert(pp1->pp_ref == 2); assert(pp2->pp_ref == 0); // pp2 should be returned by page_alloc assert(page_alloc(&pp) == 0 && pp == pp2); // unmapping pp1 at 0 should keep pp1 at BY2PG page_remove(boot_pgdir, 0x0); assert(va2pa(boot_pgdir, 0x0) == ~0); assert(va2pa(boot_pgdir, BY2PG) == page2pa(pp1)); assert(pp1->pp_ref == 1); assert(pp2->pp_ref == 0); // unmapping pp1 at BY2PG should free it page_remove(boot_pgdir, BY2PG); assert(va2pa(boot_pgdir, 0x0) == ~0); assert(va2pa(boot_pgdir, BY2PG) == ~0); assert(pp1->pp_ref == 0); assert(pp2->pp_ref == 0); // so it should be returned by page_alloc assert(page_alloc(&pp) == 0 && pp == pp1); // should be no free memory assert(page_alloc(&pp) == -E_NO_MEM); // forcibly take pp0 back assert(PTE_ADDR(boot_pgdir[0]) == page2pa(pp0)); boot_pgdir[0] = 0; assert(pp0->pp_ref == 1); pp0->pp_ref = 0; // give free list back page_free_list = fl; // free the pages we took page_free(pp0); page_free(pp1); page_free(pp2); printf("page_check() succeeded!\n"); }