static void check_pgdir(void) { assert(npage <= KMEMSIZE / PGSIZE); assert(boot_pgdir != NULL && (uint32_t)PGOFF(boot_pgdir) == 0); assert(get_page(boot_pgdir, 0x0, NULL) == NULL); struct Page *p1, *p2; p1 = alloc_page(); // cprintf("insert begin\n"); assert(page_insert(boot_pgdir, p1, 0x0, 0) == 0); pte_t *ptep; // cprintf("%08x\n",boot_pgdir); assert((ptep = get_pte(boot_pgdir, 0x0, 0)) != NULL); assert(pte2page(*ptep) == p1); assert(page_ref(p1) == 1); ptep = &((pte_t *)KADDR(PDE_ADDR(boot_pgdir[0])))[1]; assert(get_pte(boot_pgdir, PGSIZE, 0) == ptep); p2 = alloc_page(); assert(page_insert(boot_pgdir, p2, PGSIZE, PTE_TYPE_URW_SRW) == 0); assert((ptep = get_pte(boot_pgdir, PGSIZE, 0)) != NULL); assert(*ptep & PTE_TYPE_URW_SRW); //assert(*ptep & PTE_W); assert(((boot_pgdir[0] & PTE_TYPE)==PTE_TYPE_TABLE)&&(boot_pgdir[0]&PTE_V)); assert(page_ref(p2) == 1); assert(page_insert(boot_pgdir, p1, PGSIZE, 0) == 0); assert(page_ref(p1) == 2); assert(page_ref(p2) == 0); assert((ptep = get_pte(boot_pgdir, PGSIZE, 0)) != NULL); assert(pte2page(*ptep) == p1); assert((*ptep & PTE_TYPE_URW_SRW) == 0); page_remove(boot_pgdir, 0x0); assert(page_ref(p1) == 1); assert(page_ref(p2) == 0); page_remove(boot_pgdir, PGSIZE); assert(page_ref(p1) == 0); assert(page_ref(p2) == 0); // cprintf("haha\n"); assert(page_ref(pde2page(boot_pgdir[0])) == 1); free_page(pde2page(boot_pgdir[0])); boot_pgdir[0] = 0; cprintf("check_pgdir() succeeded\n"); //cprintf("haha2\n"); }
/** * page_remove_pte - free an Page sturct which is related linear address la * - and clean(invalidate) pte which is related linear address la * @param pgdir page directory (not used) * @param la logical address of the page to be removed * @param page table entry of the page to be removed * note: PT is changed, so the TLB need to be invalidate */ void page_remove_pte(pgd_t *pgdir, uintptr_t la, pte_t *ptep) { if (ptep_present(ptep)) { struct Page *page = pte2page(*ptep); if (!PageSwap(page)) { if (page_ref_dec(page) == 0) { //Don't free dma pages if (!PageIO(page)) free_page(page); } } else { if (ptep_dirty(ptep)) { SetPageDirty(page); } page_ref_dec(page); } ptep_unmap(ptep); mp_tlb_invalidate(pgdir, la); } else if (! ptep_invalid(ptep)) { #ifndef CONFIG_NO_SWAP swap_remove_entry(*ptep); #endif ptep_unmap(ptep); } }
//page_remove - free an Page which is related linear address la and has an validated pte void page_remove(pde_t *pgdir, uintptr_t la) { //cprintf("!=%08x\n",(char*)((char*)la+101) ); //cprintf("?=%d\n",*(char *)(0x50000105)); //cprintf("remove addr=%08x\n\n",la); pte_t *ptep = get_pte(pgdir, la, 0); //cprintf("remove *ptep=%08x\n",*ptep); struct Page* page=pte2page(*ptep); //cprintf("pagerefremoveshi %d\n",page->ref); //cprintf("remove page addr=%08x\n",page); //cprintf("ptep=%08x\n",ptep); asm volatile("nop"); asm volatile("nop"); asm volatile("nop"); asm volatile("nop"); asm volatile("nop"); asm volatile("nop"); asm volatile("nop"); asm volatile("nop"); asm volatile("nop"); if (ptep != NULL) { //cprintf("pgrmv=%08x\n",ptep); page_remove_pte(pgdir, la, ptep); } }
static inline void shmem_remove_entry_pte(pte_t * ptep) { //TODO //assert(0); assert(ptep != NULL); if (ptep_present(ptep)) { struct Page *page = pte2page(*ptep); #ifdef UCONFIG_SWAP if (!PageSwap(page)) { if (page_ref_dec(page) == 0) { free_page(page); } } else { if (ptep_dirty(ptep)) { SetPageDirty(page); } page_ref_dec(page); } #else if (page_ref_dec(page) == 0) { free_page(page); } #endif /* UCONFIG_SWAP */ ptep_unmap(ptep); } else if (!ptep_invalid(ptep)) { #ifdef UCONFIG_SWAP swap_remove_entry(*ptep); ptep_unmap(ptep); #else assert(0); #endif } }
// swap_out_vma - try unmap pte & move pages into swap active list. static int swap_out_vma(struct mm_struct *mm, struct vma_struct *vma, uintptr_t addr, size_t require) { if (require == 0 || !(addr >= vma->vm_start && addr < vma->vm_end)) { return 0; } uintptr_t end; size_t free_count = 0; addr = ROUNDDOWN(addr, PGSIZE), end = ROUNDUP(vma->vm_end, PGSIZE); while (addr < end && require != 0) { pte_t *ptep = get_pte(mm->pgdir, addr, 0); if (ptep == NULL) { if (get_pud(mm->pgdir, addr, 0) == NULL) { addr = ROUNDDOWN(addr + PUSIZE, PUSIZE); } else if (get_pmd(mm->pgdir, addr, 0) == NULL) { addr = ROUNDDOWN(addr + PMSIZE, PMSIZE); } else { addr = ROUNDDOWN(addr + PTSIZE, PTSIZE); } continue ; } if (ptep_present(ptep)) { struct Page *page = pte2page(*ptep); assert(!PageReserved(page)); if (ptep_accessed(ptep)) { ptep_unset_accessed(ptep); mp_tlb_invalidate(mm->pgdir, addr); goto try_next_entry; } if (!PageSwap(page)) { if (!swap_page_add(page, 0)) { goto try_next_entry; } swap_active_list_add(page); } else if (ptep_dirty(ptep)) { SetPageDirty(page); } swap_entry_t entry = page->index; swap_duplicate(entry); page_ref_dec(page); ptep_copy(ptep, &entry); mp_tlb_invalidate(mm->pgdir, addr); mm->swap_address = addr + PGSIZE; free_count ++, require --; if ((vma->vm_flags & VM_SHARE) && page_ref(page) == 1) { uintptr_t shmem_addr = addr - vma->vm_start + vma->shmem_off; pte_t *sh_ptep = shmem_get_entry(vma->shmem, shmem_addr, 0); assert(sh_ptep != NULL && ! ptep_invalid(sh_ptep)); if (ptep_present(sh_ptep)) { shmem_insert_entry(vma->shmem, shmem_addr, entry); } } } try_next_entry: addr += PGSIZE; } return free_count; }
/* copy_range - copy content of memory (start, end) of one process A to another process B * @to: the addr of process B's Page Directory * @from: the addr of process A's Page Directory * @share: flags to indicate to dup OR share. We just use dup method, so it didn't be used. * * CALL GRAPH: copy_mm-->dup_mmap-->copy_range */ int copy_range(pde_t *to, pde_t *from, uintptr_t start, uintptr_t end, bool share) { assert(start % PGSIZE == 0 && end % PGSIZE == 0); //assert(USER_ACCESS(start, end)); // copy content by page unit. do { //call get_pte to find process A's pte according to the addr start pte_t *ptep = get_pte(from, start, 0), *nptep; if (ptep == NULL) { start = ROUNDDOWN(start + PTSIZE, PTSIZE); continue ; } //cprintf("sys_fork\n"); //cprintf("start=%08x,end=%08x\n",start,end); //call get_pte to find process B's pte according to the addr start. If pte is NULL, just alloc a PT if (*ptep & PTE_V) { if ((nptep = get_pte(to, start, 1)) == NULL) { return -E_NO_MEM; } uint32_t perm = (*ptep & PTE_TYPE_URWX_SRWX); //get page from ptep struct Page *page = pte2page(*ptep); // alloc a page for process B struct Page *npage=alloc_page(); //cprintf("npage=%08x\n",npage); assert(page!=NULL); assert(npage!=NULL); int ret=0; /* LAB5:EXERCISE2 YOUR CODE * replicate content of page to npage, build the map of phy addr of nage with the linear addr start * * Some Useful MACROs and DEFINEs, you can use them in below implementation. * MACROs or Functions: * page2kva(struct Page *page): return the kernel vritual addr of memory which page managed (SEE pmm.h) * page_insert: build the map of phy addr of an Page with the linear addr la * memcpy: typical memory copy function * * (1) find src_kvaddr: the kernel virtual address of page * (2) find dst_kvaddr: the kernel virtual address of npage * (3) memory copy from src_kvaddr to dst_kvaddr, size is PGSIZE * (4) build the map of phy addr of nage with the linear addr start */ void * kva_src = page2kva(page); void * kva_dst = page2kva(npage); // cprintf("kva_src=%08x, kva_dst=%08x\n",kva_src,kva_dst); memcpy(kva_dst, kva_src, PGSIZE); ret = page_insert(to, npage, start, perm); assert(ret == 0); } start += PGSIZE; } while (start != 0 && start < end); return 0; }
static inline void page_remove_pte(pgd_t *pgdir, uintptr_t la, pte_t *ptep) { if (*ptep & PTE_P) { struct Page *page = pte2page(*ptep); if (page_ref_dec(page) == 0) { free_page(page); } *ptep = 0; tlb_invalidate(pgdir, la); } }
//get_page - get related Page struct for linear address la using PDT pgdir struct Page * get_page(pde_t *pgdir, uintptr_t la, pte_t **ptep_store) { pte_t *ptep = get_pte(pgdir, la, 0); if (ptep_store != NULL) { *ptep_store = ptep; } if (ptep != NULL && ((*ptep) & PTE_V)) { return pte2page(*ptep); } return NULL; }
//page_insert - build the map of phy addr of an Page with the linear addr la // paramemters: // pgdir: the kernel virtual base address of PDT // page: the Page which need to map // la: the linear address need to map // perm: the permission of this Page which is setted in related pte // return value: always 0 //note: PT is changed, so the TLB need to be invalidate int page_insert(pde_t *pgdir, struct Page *page, uintptr_t la, uint32_t perm) { pte_t *ptep = get_pte(pgdir, la, 1); //cprintf("insert ptep=%08x\n",ptep); if (ptep == NULL) { return -E_NO_MEM; } page_ref_inc(page); //cprintf("pageref2 %d\n",page->ref); //cprintf("addr=%08x\n\n",la); if (*ptep & PTE_V) { //cprintf("pageref3 %d\n",page->ref); struct Page *p = pte2page(*ptep); // cprintf("guagua\n"); if (p == page) { page_ref_dec(page); } else { page_remove_pte(pgdir, la, ptep); } } // pte_t* pte = get_pte(mm->pgdir,0x50000000,0); // struct Page* page=pte2page(*pte); //cprintf("insert page addr = %08x\n",page); //cprintf("insert page physical =%08x\n",page2pa(page)); //cprintf(" before *ptep=%08x\n",*ptep); //*ptep = page2pa(page); //cprintf("ptep start=%08x\n",*ptep); //cprintf("perm=%08x\n",perm); *ptep = page2pa(page)| PTE_V | perm | PTE_R; //cprintf("ptep last=%08x\n",*ptep); // cprintf("\ninsert page1 addr=%08x\n",page); // cprintf("insert *ptep=%08x\n",*ptep); // pte_t* pte = get_pte(pgdir,la,0); //cprintf("pgf *pte=%08x\n",*pte); // struct Page* page2=pte2page(*pte); // cprintf("page2 addr = %08x\n",page2); // struct Page* pg=page; // cprintf("%08x\n",pg); // cprintf("%08x\n",page2pa(pg)); //cprintf("%08x\n",pa2page(page2pa(pg))); //tlb_invalidate(pgdir, la); return 0; }
int shmem_insert_entry(struct shmem_struct *shmem, uintptr_t addr, pte_t entry) { pte_t *ptep = shmem_get_entry(shmem, addr, 1); if (ptep == NULL) { return -E_NO_MEM; } if (! ptep_invalid(ptep)) { shmem_remove_entry_pte(ptep); } if (ptep_present(&entry)) { page_ref_inc(pte2page(entry)); } else if (! ptep_invalid(&entry)) { swap_duplicate(entry); } ptep_copy(ptep, &entry); return 0; }
int shmem_insert_entry(struct shmem_struct *shmem, uintptr_t addr, pte_t entry) { pte_t *ptep = shmem_get_entry(shmem, addr, 1); if (ptep == NULL) { return -E_NO_MEM; } if (*ptep != 0) { shmem_remove_entry_pte(ptep); } if (entry & PTE_P) { page_ref_inc(pte2page(entry)); } else if (entry != 0) { swap_duplicate(entry); } *ptep = entry; return 0; }
int page_insert(pgd_t *pgdir, struct Page *page, uintptr_t la, uint32_t perm) { pte_t *ptep = get_pte(pgdir, la, 1); if (ptep == NULL) { return -E_NO_MEM; } page_ref_inc(page); if (*ptep & PTE_P) { struct Page *p = pte2page(*ptep); if (p == page) { page_ref_dec(page); } else { page_remove_pte(pgdir, la, ptep); } } *ptep = page2pa(page) | PTE_P | perm; tlb_invalidate(pgdir, la); return 0; }
/** * page_insert - build the map of phy addr of an Page with the linear addr @la * @param pgdir page directory * @param page the page descriptor of the page to be inserted * @param la logical address of the page * @param perm permission of the page * @return 0 on success and error code when failed */ int page_insert(pgd_t *pgdir, struct Page *page, uintptr_t la, pte_perm_t perm) { pte_t *ptep = get_pte(pgdir, la, 1); if (ptep == NULL) { return -E_NO_MEM; } page_ref_inc(page); if (*ptep != 0) { if (ptep_present(ptep) && pte2page(*ptep) == page) { page_ref_dec(page); goto out; } page_remove_pte(pgdir, la, ptep); } out: ptep_map(ptep, page2pa(page)); ptep_set_perm(ptep, perm); mp_tlb_update(pgdir, la); return 0; }
//page_remove_pte - free an Page sturct which is related linear address la // - and clean(invalidate) pte which is related linear address la //note: PT is changed, so the TLB need to be invalidate static inline void page_remove_pte(pde_t *pgdir, uintptr_t la, pte_t *ptep) { /* LAB2 EXERCISE 3: YOUR CODE * * Please check if ptep is valid, and tlb must be manually updated if mapping is updated * * Maybe you want help comment, BELOW comments can help you finish the code * * Some Useful MACROs and DEFINEs, you can use them in below implementation. * MACROs or Functions: * struct Page *page pte2page(*ptep): get the according page from the value of a ptep * free_page : free a page * page_ref_dec(page) : decrease page->ref. NOTICE: ff page->ref == 0 , then this page should be free. * tlb_invalidate(pde_t *pgdir, uintptr_t la) : Invalidate a TLB entry, but only if the page tables being * edited are the ones currently in use by the processor. * DEFINEs: * PTE_V 0x001 // page table/directory entry flags bit : Present */ #if 0 if (0) { //(1) check if page directory is present struct Page *page = NULL; //(2) find corresponding page to pte //(3) decrease page reference //(4) and free this page when page reference reachs 0 //(5) clear second page table entry //(6) flush tlb } #endif if (*ptep & PTE_V) { struct Page *page = pte2page(*ptep); //cprintf("pgrmv=%08x\n",ptep); int a=page_ref_dec(page); //cprintf("a=%d\n",a); if (a == 0) { free_page(page); } *ptep = 0; //tlb_invalidate(pgdir, la); } }
static inline void shmem_remove_entry_pte(pte_t *ptep) { assert(ptep != NULL); if (*ptep & PTE_P) { struct Page *page = pte2page(*ptep); if (!PageSwap(page)) { if (page_ref_dec(page) == 0) { free_page(page); } } else { if (*ptep & PTE_D) { SetPageDirty(page); } page_ref_dec(page); } *ptep = 0; } else if (*ptep != 0) { swap_remove_entry(*ptep); *ptep = 0; } }
static inline void shmem_remove_entry_pte(pte_t *ptep) { assert(ptep != NULL); if (ptep_present(ptep)) { struct Page *page = pte2page(*ptep); if (!PageSwap(page)) { if (page_ref_dec(page) == 0) { free_page(page); } } else { if (ptep_dirty(ptep)) { SetPageDirty(page); } page_ref_dec(page); } ptep_unmap(ptep); } else if (! ptep_invalid(ptep)) { swap_remove_entry(*ptep); ptep_unmap(ptep); } }
// check_swap - check the correctness of swap & page replacement algorithm static void check_swap(void) { size_t nr_free_pages_store = nr_free_pages(); size_t slab_allocated_store = slab_allocated(); size_t offset; for (offset = 2; offset < max_swap_offset; offset ++) { mem_map[offset] = 1; } struct mm_struct *mm = mm_create(); assert(mm != NULL); extern struct mm_struct *check_mm_struct; assert(check_mm_struct == NULL); check_mm_struct = mm; pde_t *pgdir = mm->pgdir = boot_pgdir; assert(pgdir[0] == 0); struct vma_struct *vma = vma_create(0, PTSIZE, VM_WRITE | VM_READ); assert(vma != NULL); insert_vma_struct(mm, vma); struct Page *rp0 = alloc_page(), *rp1 = alloc_page(); assert(rp0 != NULL && rp1 != NULL); uint32_t perm = PTE_U | PTE_W; int ret = page_insert(pgdir, rp1, 0, perm); assert(ret == 0 && page_ref(rp1) == 1); page_ref_inc(rp1); ret = page_insert(pgdir, rp0, 0, perm); assert(ret == 0 && page_ref(rp1) == 1 && page_ref(rp0) == 1); // check try_alloc_swap_entry swap_entry_t entry = try_alloc_swap_entry(); assert(swap_offset(entry) == 1); mem_map[1] = 1; assert(try_alloc_swap_entry() == 0); // set rp1, Swap, Active, add to hash_list, active_list swap_page_add(rp1, entry); swap_active_list_add(rp1); assert(PageSwap(rp1)); mem_map[1] = 0; entry = try_alloc_swap_entry(); assert(swap_offset(entry) == 1); assert(!PageSwap(rp1)); // check swap_remove_entry assert(swap_hash_find(entry) == NULL); mem_map[1] = 2; swap_remove_entry(entry); assert(mem_map[1] == 1); swap_page_add(rp1, entry); swap_inactive_list_add(rp1); swap_remove_entry(entry); assert(PageSwap(rp1)); assert(rp1->index == entry && mem_map[1] == 0); // check page_launder, move page from inactive_list to active_list assert(page_ref(rp1) == 1); assert(nr_active_pages == 0 && nr_inactive_pages == 1); assert(list_next(&(inactive_list.swap_list)) == &(rp1->swap_link)); page_launder(); assert(nr_active_pages == 1 && nr_inactive_pages == 0); assert(PageSwap(rp1) && PageActive(rp1)); entry = try_alloc_swap_entry(); assert(swap_offset(entry) == 1); assert(!PageSwap(rp1) && nr_active_pages == 0); assert(list_empty(&(active_list.swap_list))); // set rp1 inactive again assert(page_ref(rp1) == 1); swap_page_add(rp1, 0); assert(PageSwap(rp1) && swap_offset(rp1->index) == 1); swap_inactive_list_add(rp1); mem_map[1] = 1; assert(nr_inactive_pages == 1); page_ref_dec(rp1); size_t count = nr_free_pages(); swap_remove_entry(entry); assert(nr_inactive_pages == 0 && nr_free_pages() == count + 1); // check swap_out_mm pte_t *ptep0 = get_pte(pgdir, 0, 0), *ptep1; assert(ptep0 != NULL && pte2page(*ptep0) == rp0); ret = swap_out_mm(mm, 0); assert(ret == 0); ret = swap_out_mm(mm, 10); assert(ret == 1 && mm->swap_address == PGSIZE); ret = swap_out_mm(mm, 10); assert(ret == 0 && *ptep0 == entry && mem_map[1] == 1); assert(PageDirty(rp0) && PageActive(rp0) && page_ref(rp0) == 0); assert(nr_active_pages == 1 && list_next(&(active_list.swap_list)) == &(rp0->swap_link)); // check refill_inactive_scan() refill_inactive_scan(); assert(!PageActive(rp0) && page_ref(rp0) == 0); assert(nr_inactive_pages == 1 && list_next(&(inactive_list.swap_list)) == &(rp0->swap_link)); page_ref_inc(rp0); page_launder(); assert(PageActive(rp0) && page_ref(rp0) == 1); assert(nr_active_pages == 1 && list_next(&(active_list.swap_list)) == &(rp0->swap_link)); page_ref_dec(rp0); refill_inactive_scan(); assert(!PageActive(rp0)); // save data in rp0 int i; for (i = 0; i < PGSIZE; i ++) { ((char *)page2kva(rp0))[i] = (char)i; } page_launder(); assert(nr_inactive_pages == 0 && list_empty(&(inactive_list.swap_list))); assert(mem_map[1] == 1); rp1 = alloc_page(); assert(rp1 != NULL); ret = swapfs_read(entry, rp1); assert(ret == 0); for (i = 0; i < PGSIZE; i ++) { assert(((char *)page2kva(rp1))[i] == (char)i); } // page fault now *(char *)0 = 0xEF; rp0 = pte2page(*ptep0); assert(page_ref(rp0) == 1); assert(PageSwap(rp0) && PageActive(rp0)); entry = try_alloc_swap_entry(); assert(swap_offset(entry) == 1 && mem_map[1] == SWAP_UNUSED); assert(!PageSwap(rp0) && nr_active_pages == 0 && nr_inactive_pages == 0); // clear accessed flag assert(rp0 == pte2page(*ptep0)); assert(!PageSwap(rp0)); ret = swap_out_mm(mm, 10); assert(ret == 0); assert(!PageSwap(rp0) && (*ptep0 & PTE_P)); // change page table ret = swap_out_mm(mm, 10); assert(ret == 1); assert(*ptep0 == entry && page_ref(rp0) == 0 && mem_map[1] == 1); count = nr_free_pages(); refill_inactive_scan(); page_launder(); assert(count + 1 == nr_free_pages()); ret = swapfs_read(entry, rp1); assert(ret == 0 && *(char *)(page2kva(rp1)) == (char)0xEF); free_page(rp1); // duplictate *ptep0 ptep1 = get_pte(pgdir, PGSIZE, 0); assert(ptep1 != NULL && *ptep1 == 0); swap_duplicate(*ptep0); *ptep1 = *ptep0; // page fault again *(char *)0 = 0xFF; *(char *)(PGSIZE + 1) = 0x88; assert(pte2page(*ptep0) == pte2page(*ptep1)); rp0 = pte2page(*ptep0); assert(*(char *)1 == (char)0x88 && *(char *)PGSIZE == (char)0xFF); assert(page_ref(rp0) == 2 && rp0->index == entry && mem_map[1] == 0); assert(PageSwap(rp0) && PageActive(rp0)); entry = try_alloc_swap_entry(); assert(swap_offset(entry) == 1 && mem_map[1] == SWAP_UNUSED); assert(!PageSwap(rp0)); assert(list_empty(&(active_list.swap_list))); assert(list_empty(&(inactive_list.swap_list))); // check swap_out_mm *(char *)0 = *(char *)PGSIZE = 0xEE; mm->swap_address = PGSIZE * 2; ret = swap_out_mm(mm, 2); assert(ret == 0); assert((*ptep0 & PTE_P) && !(*ptep0 & PTE_A)); assert((*ptep1 & PTE_P) && !(*ptep1 & PTE_A)); ret = swap_out_mm(mm, 2); assert(ret == 2); assert(mem_map[1] == 2 && page_ref(rp0) == 0); refill_inactive_scan(); page_launder(); assert(mem_map[1] == 2 && swap_hash_find(entry) == NULL); // check copy entry swap_remove_entry(entry); *ptep1 = 0; assert(mem_map[1] == 1); swap_entry_t store; ret = swap_copy_entry(entry, &store); assert(ret == -E_NO_MEM); mem_map[2] = SWAP_UNUSED; ret = swap_copy_entry(entry, &store); assert(ret == 0 && swap_offset(store) == 2 && mem_map[2] == 0); mem_map[2] = 1; *ptep1 = store; assert(*(char *)PGSIZE == (char)0xEE && *(char *)(PGSIZE + 1)== (char)0x88); *(char *)PGSIZE = 1, *(char *)(PGSIZE + 1) = 2; assert(*(char *)0 == (char)0xEE && *(char *)1 == (char)0x88); ret = swap_in_page(entry, &rp0); assert(ret == 0); ret = swap_in_page(store, &rp1); assert(ret == 0); assert(rp1 != rp0); // free memory swap_list_del(rp0), swap_list_del(rp1); swap_page_del(rp0), swap_page_del(rp1); assert(page_ref(rp0) == 1 && page_ref(rp1) == 1); assert(nr_active_pages == 0 && list_empty(&(active_list.swap_list))); assert(nr_inactive_pages == 0 && list_empty(&(inactive_list.swap_list))); for (i = 0; i < HASH_LIST_SIZE; i ++) { assert(list_empty(hash_list + i)); } page_remove(pgdir, 0); page_remove(pgdir, PGSIZE); free_page(pa2page(pgdir[0])); pgdir[0] = 0; mm->pgdir = NULL; mm_destroy(mm); check_mm_struct = NULL; assert(nr_active_pages == 0 && nr_inactive_pages == 0); for (offset = 0; offset < max_swap_offset; offset ++) { mem_map[offset] = SWAP_UNUSED; } assert(nr_free_pages_store == nr_free_pages()); assert(slab_allocated_store == slab_allocated()); cprintf("check_swap() succeeded.\n"); }
/* ucore use copy-on-write when forking a new process, * thus copy_range only copy pdt/pte and set their permission to * READONLY, a write will be handled in pgfault */ int copy_range(pgd_t *to, pgd_t *from, uintptr_t start, uintptr_t end, bool share) { assert(start % PGSIZE == 0 && end % PGSIZE == 0); assert(USER_ACCESS(start, end)); do { pte_t *ptep = get_pte(from, start, 0), *nptep; if (ptep == NULL) { if (get_pud(from, start, 0) == NULL) { start = ROUNDDOWN(start + PUSIZE, PUSIZE); } else if (get_pmd(from, start, 0) == NULL) { start = ROUNDDOWN(start + PMSIZE, PMSIZE); } else { start = ROUNDDOWN(start + PTSIZE, PTSIZE); } continue ; } if (*ptep != 0) { if ((nptep = get_pte(to, start, 1)) == NULL) { return -E_NO_MEM; } int ret; //kprintf("%08x %08x %08x\n", nptep, *nptep, start); assert(*ptep != 0 && *nptep == 0); #ifdef ARCH_ARM //TODO add code to handle swap if (ptep_present(ptep)){ //no body should be able to write this page //before a W-pgfault pte_perm_t perm = PTE_P; if(ptep_u_read(ptep)) perm |= PTE_U; if(!share){ //Original page should be set to readonly! //because Copy-on-write may happen //after the current proccess modifies its page ptep_set_perm(ptep, perm); }else{ if(ptep_u_write(ptep)){ perm |= PTE_W; } } struct Page *page = pte2page(*ptep); ret = page_insert(to, page, start, perm); } #else /* ARCH_ARM */ if (ptep_present(ptep)) { pte_perm_t perm = ptep_get_perm(ptep, PTE_USER); struct Page *page = pte2page(*ptep); if (!share && ptep_s_write(ptep)) { ptep_unset_s_write(&perm); pte_perm_t perm_with_swap_stat = ptep_get_perm(ptep, PTE_SWAP); ptep_set_perm(&perm_with_swap_stat, perm); page_insert(from, page, start, perm_with_swap_stat); } ret = page_insert(to, page, start, perm); assert(ret == 0); } #endif /* ARCH_ARM */ else { #ifdef CONFIG_NO_SWAP assert(0); #endif swap_entry_t entry; ptep_copy(&entry, ptep); swap_duplicate(entry); ptep_copy(nptep, &entry); } } start += PGSIZE; } while (start != 0 && start < end); #ifdef ARCH_ARM /* we have modified the PTE of the original * process, so invalidate TLB */ tlb_invalidate_all(); #endif return 0; }
/* do_pgfault - interrupt handler to process the page fault execption * @mm : the control struct for a set of vma using the same PDT * @error_code : the error code recorded in trapframe->tf_err which is setted by x86 hardware * @addr : the addr which causes a memory access exception, (the contents of the CR2 register) * * CALL GRAPH: trap--> trap_dispatch-->pgfault_handler-->do_pgfault * The processor provides ucore's do_pgfault function with two items of information to aid in diagnosing * the exception and recovering from it. * (1) The contents of the CR2 register. The processor loads the CR2 register with the * 32-bit linear address that generated the exception. The do_pgfault fun can * use this address to locate the corresponding page directory and page-table * entries. * (2) An error code on the kernel stack. The error code for a page fault has a format different from * that for other exceptions. The error code tells the exception handler three things: * -- The P flag (bit 0) indicates whether the exception was due to a not-present page (0) * or to either an access rights violation or the use of a reserved bit (1). * -- The W/R flag (bit 1) indicates whether the memory access that caused the exception * was a read (0) or write (1). * -- The U/S flag (bit 2) indicates whether the processor was executing at user mode (1) * or supervisor mode (0) at the time of the exception. */ int do_pgfault(struct mm_struct *mm, uint32_t error_code, uintptr_t addr) { int ret = -E_INVAL; //try to find a vma which include addr struct vma_struct *vma = find_vma(mm, addr); pgfault_num++; //If the addr is in the range of a mm's vma? if (vma == NULL || vma->vm_start > addr) { cprintf("not valid addr %x, and can not find it in vma\n", addr); goto failed; } //check the error_code switch (error_code & 3) { default: /* error code flag : default is 3 ( W/R=1, P=1): write, present */ case 2: /* error code flag : (W/R=1, P=0): write, not present */ if (!(vma->vm_flags & VM_WRITE)) { cprintf("do_pgfault failed: error code flag = write AND not present, but the addr's vma cannot write\n"); goto failed; } break; case 1: /* error code flag : (W/R=0, P=1): read, present */ cprintf("do_pgfault failed: error code flag = read AND present\n"); goto failed; case 0: /* error code flag : (W/R=0, P=0): read, not present */ if (!(vma->vm_flags & (VM_READ | VM_EXEC))) { cprintf("do_pgfault failed: error code flag = read AND not present, but the addr's vma cannot read or exec\n"); goto failed; } } /* IF (write an existed addr ) OR * (write an non_existed addr && addr is writable) OR * (read an non_existed addr && addr is readable) * THEN * continue process */ uint32_t perm = PTE_U; if (vma->vm_flags & VM_WRITE) { perm |= PTE_W; } addr = ROUNDDOWN(addr, PGSIZE); ret = -E_NO_MEM; pte_t *ptep=NULL; /*LAB3 EXERCISE 1: YOUR CODE * Maybe you want help comment, BELOW comments can help you finish the code * * Some Useful MACROs and DEFINEs, you can use them in below implementation. * MACROs or Functions: * get_pte : get an pte and return the kernel virtual address of this pte for la * if the PT contians this pte didn't exist, alloc a page for PT (notice the 3th parameter '1') * pgdir_alloc_page : call alloc_page & page_insert functions to allocate a page size memory & setup * an addr map pa<--->la with linear address la and the PDT pgdir * DEFINES: * VM_WRITE : If vma->vm_flags & VM_WRITE == 1/0, then the vma is writable/non writable * PTE_W 0x002 // page table/directory entry flags bit : Writeable * PTE_U 0x004 // page table/directory entry flags bit : User can access * VARIABLES: * mm->pgdir : the PDT of these vma * */ #if 0 /*LAB3 EXERCISE 1: YOUR CODE*/ ptep = ??? //(1) try to find a pte, if pte's PT(Page Table) isn't existed, then create a PT. if (*ptep == 0) { //(2) if the phy addr isn't exist, then alloc a page & map the phy addr with logical addr } else { /*LAB3 EXERCISE 2: YOUR CODE * Now we think this pte is a swap entry, we should load data from disk to a page with phy addr, * and map the phy addr with logical addr, trigger swap manager to record the access situation of this page. * * Some Useful MACROs and DEFINEs, you can use them in below implementation. * MACROs or Functions: * swap_in(mm, addr, &page) : alloc a memory page, then according to the swap entry in PTE for addr, * find the addr of disk page, read the content of disk page into this memroy page * page_insert : build the map of phy addr of an Page with the linear addr la * swap_map_swappable : set the page swappable */ /* * LAB5 CHALLENGE ( the implmentation Copy on Write) There are 2 situlations when code comes here. 1) *ptep & PTE_P == 1, it means one process try to write a readonly page. If the vma includes this addr is writable, then we can set the page writable by rewrite the *ptep. This method could be used to implement the Copy on Write (COW) thchnology(a fast fork process method). 2) *ptep & PTE_P == 0 & but *ptep!=0, it means this pte is a swap entry. We should add the LAB3's results here. */ if(swap_init_ok) { struct Page *page=NULL; //(1)According to the mm AND addr, try to load the content of right disk page // into the memory which page managed. //(2) According to the mm, addr AND page, setup the map of phy addr <---> logical addr //(3) make the page swappable. //(4) [NOTICE]: you myabe need to update your lab3's implementation for LAB5's normal execution. } else { cprintf("no swap_init_ok but ptep is %x, failed\n",*ptep); goto failed; } } #endif ptep = get_pte(mm->pgdir,addr,1); struct Page *page = NULL; if (ptep == NULL) goto failed; if (*ptep == 0) { page = pgdir_alloc_page(mm->pgdir,addr,perm); if (page == NULL) goto failed; } else if (error_code & 3 == 3) { struct Page *page = pte2page(*ptep); struct Page *npage = pgdir_alloc_page(mm->pgdir,addr,perm); uintptr_t src_kvaddr = page2kva(page); uintptr_t dst_kvaddr = page2kva(npage); memcpy(dst_kvaddr, src_kvaddr, PGSIZE); } else { if (swap_init_ok) { swap_in(mm,addr,&page); page_insert(mm->pgdir,page,addr,perm); page->pra_vaddr = addr; swap_map_swappable(mm,addr,page,0); } else goto failed; } ret = 0; failed: return ret; }
// check_swap - check the correctness of swap & page replacement algorithm static void check_swap(void) { size_t nr_used_pages_store = nr_used_pages(); size_t slab_allocated_store = slab_allocated(); size_t offset; for (offset = 2; offset < max_swap_offset; offset ++) { mem_map[offset] = 1; } struct mm_struct *mm = mm_create(); assert(mm != NULL); extern struct mm_struct *check_mm_struct; assert(check_mm_struct == NULL); check_mm_struct = mm; pgd_t *pgdir = mm->pgdir = init_pgdir_get(); assert(pgdir[PGX(TEST_PAGE)] == 0); struct vma_struct *vma = vma_create(TEST_PAGE, TEST_PAGE + PTSIZE, VM_WRITE | VM_READ); assert(vma != NULL); insert_vma_struct(mm, vma); struct Page *rp0 = alloc_page(), *rp1 = alloc_page(); assert(rp0 != NULL && rp1 != NULL); pte_perm_t perm; ptep_unmap (&perm); ptep_set_u_write(&perm); int ret = page_insert(pgdir, rp1, TEST_PAGE, perm); assert(ret == 0 && page_ref(rp1) == 1); page_ref_inc(rp1); ret = page_insert(pgdir, rp0, TEST_PAGE, perm); assert(ret == 0 && page_ref(rp1) == 1 && page_ref(rp0) == 1); // check try_alloc_swap_entry swap_entry_t entry = try_alloc_swap_entry(); assert(swap_offset(entry) == 1); mem_map[1] = 1; assert(try_alloc_swap_entry() == 0); // set rp1, Swap, Active, add to hash_list, active_list swap_page_add(rp1, entry); swap_active_list_add(rp1); assert(PageSwap(rp1)); mem_map[1] = 0; entry = try_alloc_swap_entry(); assert(swap_offset(entry) == 1); assert(!PageSwap(rp1)); // check swap_remove_entry assert(swap_hash_find(entry) == NULL); mem_map[1] = 2; swap_remove_entry(entry); assert(mem_map[1] == 1); swap_page_add(rp1, entry); swap_inactive_list_add(rp1); swap_remove_entry(entry); assert(PageSwap(rp1)); assert(rp1->index == entry && mem_map[1] == 0); // check page_launder, move page from inactive_list to active_list assert(page_ref(rp1) == 1); assert(nr_active_pages == 0 && nr_inactive_pages == 1); assert(list_next(&(inactive_list.swap_list)) == &(rp1->swap_link)); page_launder(); assert(nr_active_pages == 1 && nr_inactive_pages == 0); assert(PageSwap(rp1) && PageActive(rp1)); entry = try_alloc_swap_entry(); assert(swap_offset(entry) == 1); assert(!PageSwap(rp1) && nr_active_pages == 0); assert(list_empty(&(active_list.swap_list))); // set rp1 inactive again assert(page_ref(rp1) == 1); swap_page_add(rp1, 0); assert(PageSwap(rp1) && swap_offset(rp1->index) == 1); swap_inactive_list_add(rp1); mem_map[1] = 1; assert(nr_inactive_pages == 1); page_ref_dec(rp1); size_t count = nr_used_pages(); swap_remove_entry(entry); assert(nr_inactive_pages == 0 && nr_used_pages() == count - 1); // check swap_out_mm pte_t *ptep0 = get_pte(pgdir, TEST_PAGE, 0), *ptep1; assert(ptep0 != NULL && pte2page(*ptep0) == rp0); ret = swap_out_mm(mm, 0); assert(ret == 0); ret = swap_out_mm(mm, 10); assert(ret == 1 && mm->swap_address == TEST_PAGE + PGSIZE); ret = swap_out_mm(mm, 10); assert(ret == 0 && *ptep0 == entry && mem_map[1] == 1); assert(PageDirty(rp0) && PageActive(rp0) && page_ref(rp0) == 0); assert(nr_active_pages == 1 && list_next(&(active_list.swap_list)) == &(rp0->swap_link)); // check refill_inactive_scan() refill_inactive_scan(); assert(!PageActive(rp0) && page_ref(rp0) == 0); assert(nr_inactive_pages == 1 && list_next(&(inactive_list.swap_list)) == &(rp0->swap_link)); page_ref_inc(rp0); page_launder(); assert(PageActive(rp0) && page_ref(rp0) == 1); assert(nr_active_pages == 1 && list_next(&(active_list.swap_list)) == &(rp0->swap_link)); page_ref_dec(rp0); refill_inactive_scan(); assert(!PageActive(rp0)); // save data in rp0 int i; for (i = 0; i < PGSIZE; i ++) { ((char *)page2kva(rp0))[i] = (char)i; } page_launder(); assert(nr_inactive_pages == 0 && list_empty(&(inactive_list.swap_list))); assert(mem_map[1] == 1); rp1 = alloc_page(); assert(rp1 != NULL); ret = swapfs_read(entry, rp1); assert(ret == 0); for (i = 0; i < PGSIZE; i ++) { assert(((char *)page2kva(rp1))[i] == (char)i); } // page fault now *(char *)(TEST_PAGE) = 0xEF; rp0 = pte2page(*ptep0); assert(page_ref(rp0) == 1); assert(PageSwap(rp0) && PageActive(rp0)); entry = try_alloc_swap_entry(); assert(swap_offset(entry) == 1 && mem_map[1] == SWAP_UNUSED); assert(!PageSwap(rp0) && nr_active_pages == 0 && nr_inactive_pages == 0); // clear accessed flag assert(rp0 == pte2page(*ptep0)); assert(!PageSwap(rp0)); ret = swap_out_mm(mm, 10); assert(ret == 0); assert(!PageSwap(rp0) && ptep_present(ptep0)); // change page table ret = swap_out_mm(mm, 10); assert(ret == 1); assert(*ptep0 == entry && page_ref(rp0) == 0 && mem_map[1] == 1); count = nr_used_pages(); refill_inactive_scan(); page_launder(); assert(count - 1 == nr_used_pages()); ret = swapfs_read(entry, rp1); assert(ret == 0 && *(char *)(page2kva(rp1)) == (char)0xEF); free_page(rp1); // duplictate *ptep0 ptep1 = get_pte(pgdir, TEST_PAGE + PGSIZE, 0); assert(ptep1 != NULL && ptep_invalid(ptep1)); swap_duplicate(*ptep0); ptep_copy(ptep1, ptep0); mp_tlb_invalidate (pgdir, TEST_PAGE + PGSIZE); // page fault again // update for copy on write *(char *)(TEST_PAGE + 1) = 0x88; *(char *)(TEST_PAGE + PGSIZE) = 0x8F; *(char *)(TEST_PAGE + PGSIZE + 1) = 0xFF; assert(pte2page(*ptep0) != pte2page(*ptep1)); assert(*(char *)(TEST_PAGE) == (char)0xEF); assert(*(char *)(TEST_PAGE + 1) == (char)0x88); assert(*(char *)(TEST_PAGE + PGSIZE) == (char)0x8F); assert(*(char *)(TEST_PAGE + PGSIZE + 1) == (char)0xFF); rp0 = pte2page(*ptep0); rp1 = pte2page(*ptep1); assert(!PageSwap(rp0) && PageSwap(rp1) && PageActive(rp1)); entry = try_alloc_swap_entry(); assert(!PageSwap(rp0) && !PageSwap(rp1)); assert(swap_offset(entry) == 1 && mem_map[1] == SWAP_UNUSED); assert(list_empty(&(active_list.swap_list))); assert(list_empty(&(inactive_list.swap_list))); ptep_set_accessed(&perm); page_insert(pgdir, rp0, TEST_PAGE + PGSIZE, perm); // check swap_out_mm *(char *)(TEST_PAGE) = *(char *)(TEST_PAGE + PGSIZE) = 0xEE; mm->swap_address = TEST_PAGE + PGSIZE * 2; ret = swap_out_mm(mm, 2); assert(ret == 0); assert(ptep_present(ptep0) && ! ptep_accessed(ptep0)); assert(ptep_present(ptep1) && ! ptep_accessed(ptep1)); ret = swap_out_mm(mm, 2); assert(ret == 2); assert(mem_map[1] == 2 && page_ref(rp0) == 0); refill_inactive_scan(); page_launder(); assert(mem_map[1] == 2 && swap_hash_find(entry) == NULL); // check copy entry swap_remove_entry(entry); ptep_unmap(ptep1); assert(mem_map[1] == 1); swap_entry_t store; ret = swap_copy_entry(entry, &store); assert(ret == -E_NO_MEM); mem_map[2] = SWAP_UNUSED; ret = swap_copy_entry(entry, &store); assert(ret == 0 && swap_offset(store) == 2 && mem_map[2] == 0); mem_map[2] = 1; ptep_copy(ptep1, &store); assert(*(char *)(TEST_PAGE + PGSIZE) == (char)0xEE && *(char *)(TEST_PAGE + PGSIZE + 1)== (char)0x88); *(char *)(TEST_PAGE + PGSIZE) = 1, *(char *)(TEST_PAGE + PGSIZE + 1) = 2; assert(*(char *)TEST_PAGE == (char)0xEE && *(char *)(TEST_PAGE + 1) == (char)0x88); ret = swap_in_page(entry, &rp0); assert(ret == 0); ret = swap_in_page(store, &rp1); assert(ret == 0); assert(rp1 != rp0); // free memory swap_list_del(rp0), swap_list_del(rp1); swap_page_del(rp0), swap_page_del(rp1); assert(page_ref(rp0) == 1 && page_ref(rp1) == 1); assert(nr_active_pages == 0 && list_empty(&(active_list.swap_list))); assert(nr_inactive_pages == 0 && list_empty(&(inactive_list.swap_list))); for (i = 0; i < HASH_LIST_SIZE; i ++) { assert(list_empty(hash_list + i)); } page_remove(pgdir, TEST_PAGE); page_remove(pgdir, (TEST_PAGE + PGSIZE)); #if PMXSHIFT != PUXSHIFT free_page(pa2page(PMD_ADDR(*get_pmd(pgdir, TEST_PAGE, 0)))); #endif #if PUXSHIFT != PGXSHIFT free_page(pa2page(PUD_ADDR(*get_pud(pgdir, TEST_PAGE, 0)))); #endif free_page(pa2page(PGD_ADDR(*get_pgd(pgdir, TEST_PAGE, 0)))); pgdir[PGX(TEST_PAGE)] = 0; mm->pgdir = NULL; mm_destroy(mm); check_mm_struct = NULL; assert(nr_active_pages == 0 && nr_inactive_pages == 0); for (offset = 0; offset < max_swap_offset; offset ++) { mem_map[offset] = SWAP_UNUSED; } assert(nr_used_pages_store == nr_used_pages()); assert(slab_allocated_store == slab_allocated()); kprintf("check_swap() succeeded.\n"); }
// do_pgfault - interrupt handler to process the page fault execption int do_pgfault(struct mm_struct *mm, uint32_t error_code, uintptr_t addr) { if (mm == NULL) { assert(current != NULL); panic("page fault in kernel thread: pid = %d, %d %08x.\n", current->pid, error_code, addr); } lock_mm(mm); int ret = -E_INVAL; struct vma_struct *vma = find_vma(mm, addr); if (vma == NULL || vma->vm_start > addr) { goto failed; } if (vma->vm_flags & VM_STACK) { if (addr < vma->vm_start + PGSIZE) { goto failed; } } switch (error_code & 3) { default: /* default is 3: write, present */ case 2: /* write, not present */ if (!(vma->vm_flags & VM_WRITE)) { goto failed; } break; case 1: /* read, present */ goto failed; case 0: /* read, not present */ if (!(vma->vm_flags & (VM_READ | VM_EXEC))) { goto failed; } } uint32_t perm = PTE_U; if (vma->vm_flags & VM_WRITE) { perm |= PTE_W; } addr = ROUNDDOWN(addr, PGSIZE); ret = -E_NO_MEM; pte_t *ptep; if ((ptep = get_pte(mm->pgdir, addr, 1)) == NULL) { goto failed; } if (*ptep == 0) { if (!(vma->vm_flags & VM_SHARE)) { if (pgdir_alloc_page(mm->pgdir, addr, perm) == NULL) { goto failed; } } else { lock_shmem(vma->shmem); uintptr_t shmem_addr = addr - vma->vm_start + vma->shmem_off; pte_t *sh_ptep = shmem_get_entry(vma->shmem, shmem_addr, 1); if (sh_ptep == NULL || *sh_ptep == 0) { unlock_shmem(vma->shmem); goto failed; } unlock_shmem(vma->shmem); if (*sh_ptep & PTE_P) { page_insert(mm->pgdir, pa2page(*sh_ptep), addr, perm); } else { swap_duplicate(*ptep); *ptep = *sh_ptep; } } } else { struct Page *page, *newpage = NULL; bool cow = ((vma->vm_flags & (VM_SHARE | VM_WRITE)) == VM_WRITE), may_copy = 1; assert(!(*ptep & PTE_P) || ((error_code & 2) && !(*ptep & PTE_W) && cow)); if (cow) { newpage = alloc_page(); } if (*ptep & PTE_P) { page = pte2page(*ptep); } else { if ((ret = swap_in_page(*ptep, &page)) != 0) { if (newpage != NULL) { free_page(newpage); } goto failed; } if (!(error_code & 2) && cow) { perm &= ~PTE_W; may_copy = 0; } } if (cow && may_copy) { if (page_ref(page) + swap_page_count(page) > 1) { if (newpage == NULL) { goto failed; } memcpy(page2kva(newpage), page2kva(page), PGSIZE); page = newpage, newpage = NULL; } } page_insert(mm->pgdir, page, addr, perm); if (newpage != NULL) { free_page(newpage); } } ret = 0; failed: unlock_mm(mm); return ret; }
static void check_swap(void) { //backup mem env int ret, count = 0, total = 0, i; list_entry_t *le = &free_list; while ((le = list_next(le)) != &free_list) { struct Page *p = le2page(le, page_link); assert(PageProperty(p)); count++, total += p->property; } assert(total == nr_free_pages()); cprintf("BEGIN check_swap: count %d, total %d\n", count, total); //now we set the phy pages env struct mm_struct *mm = mm_create(); assert(mm != NULL); extern struct mm_struct *check_mm_struct; assert(check_mm_struct == NULL); check_mm_struct = mm; pde_t *pgdir = mm->pgdir = boot_pgdir; assert(pgdir[0] == 0); struct vma_struct *vma = vma_create(BEING_CHECK_VALID_VADDR, CHECK_VALID_VADDR, VM_WRITE | VM_READ); assert(vma != NULL); insert_vma_struct(mm, vma); //setup the temp Page Table vaddr 0~4MB cprintf("setup Page Table for vaddr 0X1000, so alloc a page\n"); pte_t *temp_ptep = NULL; temp_ptep = get_pte(mm->pgdir, BEING_CHECK_VALID_VADDR, 1); assert(temp_ptep!= NULL); cprintf("setup Page Table vaddr 0~4MB OVER!\n"); for (i = 0; i < CHECK_VALID_PHY_PAGE_NUM; i++) { check_rp[i] = alloc_page(); assert(check_rp[i] != NULL); assert(!PageProperty(check_rp[i])); } list_entry_t free_list_store = free_list; list_init(&free_list); assert(list_empty(&free_list)); //assert(alloc_page() == NULL); unsigned int nr_free_store = nr_free; nr_free = 0; for (i = 0; i < CHECK_VALID_PHY_PAGE_NUM; i++) { free_pages(check_rp[i], 1); } assert(nr_free==CHECK_VALID_PHY_PAGE_NUM); cprintf("set up init env for check_swap begin!\n"); //setup initial vir_page<->phy_page environment for page relpacement algorithm pgfault_num = 0; check_content_set(); assert(nr_free == 0); for (i = 0; i < MAX_SEQ_NO; i++) swap_out_seq_no[i] = swap_in_seq_no[i] = -1; for (i = 0; i < CHECK_VALID_PHY_PAGE_NUM; i++) { check_ptep[i] = 0; check_ptep[i] = get_pte(pgdir, (i + 1) * 0x1000, 0); //cprintf("i %d, check_ptep addr %x, value %x\n", i, check_ptep[i], *check_ptep[i]); assert(check_ptep[i] != NULL); assert(pte2page(*check_ptep[i]) == check_rp[i]); assert((*check_ptep[i] & PTE_P)); } cprintf("set up init env for check_swap over!\n"); // now access the virt pages to test page relpacement algorithm ret = check_content_access(); assert(ret == 0); //restore kernel mem env for (i = 0; i < CHECK_VALID_PHY_PAGE_NUM; i++) { free_pages(check_rp[i], 1); } //free_page(pte2page(*temp_ptep)); free_page(pde2page(pgdir[0])); pgdir[0] = 0; mm->pgdir = NULL; mm_destroy(mm); check_mm_struct = NULL; nr_free = nr_free_store; free_list = free_list_store; le = &free_list; while ((le = list_next(le)) != &free_list) { struct Page *p = le2page(le, page_link); count--, total -= p->property; } cprintf("count is %d, total is %d\n", count, total); //assert(count == 0); cprintf("check_swap() succeeded!\n"); }
int do_pgfault(struct mm_struct *mm, machine_word_t error_code, uintptr_t addr) { if (mm == NULL) { assert(current != NULL); /* Chen Yuheng * give handler a chance to deal with it */ kprintf ("page fault in kernel thread: pid = %d, name = %s, %d %08x.\n", current->pid, current->name, error_code, addr); return -E_KILLED; } bool need_unlock = 1; if (!try_lock_mm(mm)) { if (current != NULL && mm->locked_by == current->pid) { need_unlock = 0; } else { lock_mm(mm); } } int ret = -E_INVAL; struct vma_struct *vma = find_vma(mm, addr); if (vma == NULL || vma->vm_start > addr) { goto failed; } if (vma->vm_flags & VM_STACK) { if (addr < vma->vm_start + PGSIZE) { goto failed; } } //kprintf("@ %x %08x\n", vma->vm_flags, vma->vm_start); //assert((vma->vm_flags & VM_IO)==0); if (vma->vm_flags & VM_IO) { ret = -E_INVAL; goto failed; } switch (error_code & 3) { default: /* default is 3: write, present */ case 2: /* write, not present */ if (!(vma->vm_flags & VM_WRITE)) { goto failed; } break; case 1: /* read, present */ goto failed; case 0: /* read, not present */ if (!(vma->vm_flags & (VM_READ | VM_EXEC))) { goto failed; } } pte_perm_t perm, nperm; #ifdef ARCH_ARM /* ARM9 software emulated PTE_xxx */ perm = PTE_P | PTE_U; if (vma->vm_flags & VM_WRITE) { perm |= PTE_W; } #else ptep_unmap(&perm); ptep_set_u_read(&perm); if (vma->vm_flags & VM_WRITE) { ptep_set_u_write(&perm); } #endif addr = ROUNDDOWN(addr, PGSIZE); ret = -E_NO_MEM; pte_t *ptep; if ((ptep = get_pte(mm->pgdir, addr, 1)) == NULL) { goto failed; } if (ptep_invalid(ptep)) { #ifdef UCONFIG_BIONIC_LIBC if (vma->mfile.file != NULL) { struct file *file = vma->mfile.file; off_t old_pos = file->pos, new_pos = vma->mfile.offset + addr - vma->vm_start; #ifdef SHARE_MAPPED_FILE struct mapped_addr *maddr = find_maddr(file, new_pos, NULL); if (maddr == NULL) { #endif // SHARE_MAPPED_FILE struct Page *page; if ((page = alloc_page()) == NULL) { assert(false); goto failed; } nperm = perm; #ifdef ARCH_ARM /* ARM9 software emulated PTE_xxx */ nperm &= ~PTE_W; #else ptep_unset_s_write(&nperm); #endif page_insert_pte(mm->pgdir, page, ptep, addr, nperm); if ((ret = filestruct_setpos(file, new_pos)) != 0) { assert(false); goto failed; } filestruct_read(file, page2kva(page), PGSIZE); if ((ret = filestruct_setpos(file, old_pos)) != 0) { assert(false); goto failed; } #ifdef SHARE_MAPPED_FILE if ((maddr = (struct mapped_addr *) kmalloc(sizeof(struct mapped_addr))) != NULL) { maddr->page = page; maddr->offset = new_pos; page->maddr = maddr; list_add(& (file->node->mapped_addr_list), &(maddr->list)); } else { assert(false); } } else { nperm = perm; #ifdef ARCH_ARM /* ARM9 software emulated PTE_xxx */ nperm &= ~PTE_W; #else ptep_unset_s_write(&nperm); #endif page_insert_pte(mm->pgdir, maddr->page, ptep, addr, nperm); } #endif //SHARE_MAPPED_FILE } else #endif //UCONFIG_BIONIC_LIBC if (!(vma->vm_flags & VM_SHARE)) { if (pgdir_alloc_page(mm->pgdir, addr, perm) == NULL) { goto failed; } #ifdef UCONFIG_BIONIC_LIBC if (vma->vm_flags & VM_ANONYMOUS) { memset((void *)addr, 0, PGSIZE); } #endif //UCONFIG_BIONIC_LIBC } else { //shared mem lock_shmem(vma->shmem); uintptr_t shmem_addr = addr - vma->vm_start + vma->shmem_off; pte_t *sh_ptep = shmem_get_entry(vma->shmem, shmem_addr, 1); if (sh_ptep == NULL || ptep_invalid(sh_ptep)) { unlock_shmem(vma->shmem); goto failed; } unlock_shmem(vma->shmem); if (ptep_present(sh_ptep)) { page_insert(mm->pgdir, pa2page(*sh_ptep), addr, perm); } else { #ifdef UCONFIG_SWAP swap_duplicate(*ptep); ptep_copy(ptep, sh_ptep); #else panic("NO SWAP\n"); #endif } } } else { //a present page, handle copy-on-write (cow) struct Page *page, *newpage = NULL; bool cow = ((vma->vm_flags & (VM_SHARE | VM_WRITE)) == VM_WRITE), may_copy = 1; #if 1 if (!(!ptep_present(ptep) || ((error_code & 2) && !ptep_u_write(ptep) && cow))) { //assert(PADDR(mm->pgdir) == rcr3()); kprintf("%p %p %d %d %x\n", *ptep, addr, error_code, cow, vma->vm_flags); assert(0); } #endif if (cow) { newpage = alloc_page(); } if (ptep_present(ptep)) { page = pte2page(*ptep); } else { #ifdef UCONFIG_SWAP if ((ret = swap_in_page(*ptep, &page)) != 0) { if (newpage != NULL) { free_page(newpage); } goto failed; } #else assert(0); #endif if (!(error_code & 2) && cow) { #ifdef ARCH_ARM //#warning ARM9 software emulated PTE_xxx perm &= ~PTE_W; #else ptep_unset_s_write(&perm); #endif may_copy = 0; } } if (cow && may_copy) { #ifdef UCONFIG_SWAP if (page_ref(page) + swap_page_count(page) > 1) { #else if (page_ref(page) > 1) { #endif if (newpage == NULL) { goto failed; } memcpy(page2kva(newpage), page2kva(page), PGSIZE); //kprintf("COW!\n"); page = newpage, newpage = NULL; } } #ifdef UCONFIG_BIONIC_LIBC else if (vma->mfile.file != NULL) { #ifdef UCONFIG_SWAP assert(page_reg(page) + swap_page_count(page) == 1); #else assert(page_ref(page) == 1); #endif #ifdef SHARE_MAPPED_FILE off_t offset = vma->mfile.offset + addr - vma->vm_start; struct mapped_addr *maddr = find_maddr(vma->mfile.file, offset, page); if (maddr != NULL) { list_del(&(maddr->list)); kfree(maddr); page->maddr = NULL; assert(find_maddr(vma->mfile.file, offset, page) == NULL); } else { } #endif //SHARE_MAPPED_FILE } #endif //UCONFIG_BIONIC_LIBC else { } page_insert(mm->pgdir, page, addr, perm); if (newpage != NULL) { free_page(newpage); } } ret = 0; failed: if (need_unlock) { unlock_mm(mm); } return ret; }