int vm_do_unmap(addr_t virt, unsigned locked) { /* This gives the virtual address of the table needed, and sets * the correct place as zero */ #if CONFIG_SWAP if(current_task && num_swapdev && current_task->num_swapped) swap_in_page((task_t *)current_task, virt & PAGE_MASK); #endif if(kernel_task && (virt&PAGE_MASK) != PDIR_DATA && !locked) mutex_acquire(&pd_cur_data->lock); addr_t p = page_tables[(virt&PAGE_MASK)/0x1000]; page_tables[(virt&PAGE_MASK)/0x1000] = 0; asm("invlpg (%0)"::"r" (virt)); #if CONFIG_SMP if(kernel_task && (virt&PAGE_MASK) != PDIR_DATA) { if(IS_KERN_MEM(virt)) send_ipi(LAPIC_ICR_SHORT_OTHERS, 0, LAPIC_ICR_LEVELASSERT | LAPIC_ICR_TM_LEVEL | IPI_TLB); else if((IS_THREAD_SHARED_MEM(virt) && pd_cur_data->count > 1)) send_ipi(LAPIC_ICR_SHORT_OTHERS, 0, LAPIC_ICR_LEVELASSERT | LAPIC_ICR_TM_LEVEL | IPI_TLB); } #endif if(kernel_task && (virt&PAGE_MASK) != PDIR_DATA && !locked) mutex_release(&pd_cur_data->lock); if(p && !(p & PAGE_COW)) pm_free_page(p & PAGE_MASK); return 0; }
// swap_copy_entry - copy a content of swap out page frame to a new page // - set this new page PG_swap flag and add to swap active list int swap_copy_entry(swap_entry_t entry, swap_entry_t *store) { if (store == NULL) { return -E_INVAL; } int ret = -E_NO_MEM; struct Page *page, *newpage; swap_duplicate(entry); if ((newpage = alloc_page()) == NULL) { goto failed; } if ((ret = swap_in_page(entry, &page)) != 0) { goto failed_free_page; } ret = -E_NO_MEM; if (!swap_page_add(newpage, 0)) { goto failed_free_page; } swap_active_list_add(newpage); memcpy(page2kva(newpage), page2kva(page), PGSIZE); *store = newpage->index; ret = 0; out: swap_remove_entry(entry); return ret; failed_free_page: free_page(newpage); failed: goto out; }
int do_pgfault(struct mm_struct *mm, uint64_t error_code, uintptr_t addr) { int ret = -E_INVAL; struct vma_struct *vma = find_vma(mm, addr); if (vma == NULL || vma->vm_start > addr) { goto failed; } switch (error_code & 3) { default: /* default is 3: write, present */ case 2: /* write, not present */ if (!(vma->vm_flags & VM_WRITE)) { goto failed; } break; case 1: /* read, present */ goto failed; case 0: /* read, not present */ if (!(vma->vm_flags & (VM_READ | VM_EXEC))) { goto failed; } } uint32_t perm = PTE_U; if (vma->vm_flags & VM_WRITE) { perm |= PTE_W; } addr = ROUNDDOWN(addr, PGSIZE); ret = -E_NO_MEM; pte_t *ptep; // try to find a pte, if pte's PT(Page Table) isn't existed, then create a PT. // (notice the 3th parameter '1') if ((ptep = get_pte(mm->pgdir, addr, 1)) == NULL) { goto failed; } if (*ptep == 0) { // if the phy addr isn't exist, then alloc a page & map the phy addr with logical addr if (pgdir_alloc_page(mm->pgdir, addr, perm) == NULL) { goto failed; } } else { // if this pte is a swap entry, then load datafrom disk to a page with phy addr // and call page_insert to map the phy addr with logical addr struct Page *page; if ((ret = swap_in_page(*ptep, &page)) != 0) { goto failed; } page_insert(mm->pgdir, page, addr, perm); } ret = 0; failed: return ret; }
int vm_do_unmap(addr_t virt, unsigned locked) { /* This gives the virtual address of the table needed, and sets * the correct place as zero */ #if CONFIG_SWAP if(current_task && num_swapdev && current_task->num_swapped) swap_in_page((task_t *)current_task, virt & PAGE_MASK); #endif addr_t vpage = (virt&PAGE_MASK)/0x1000; unsigned vp4 = PML4_IDX(vpage); unsigned vpdpt = PDPT_IDX(vpage); unsigned vdir = PAGE_DIR_IDX(vpage); unsigned vtbl = PAGE_TABLE_IDX(vpage); if(kernel_task && (virt&PAGE_MASK) != PDIR_DATA && !locked) mutex_acquire(&pd_cur_data->lock); page_dir_t *pd; page_table_t *pt; pdpt_t *pdpt; pml4_t *pml4; pml4 = (pml4_t *)((kernel_task && current_task) ? current_task->pd : kernel_dir); if(!pml4[vp4]) pml4[vp4] = pm_alloc_page() | PAGE_PRESENT | PAGE_WRITE; pdpt = (addr_t *)((pml4[vp4]&PAGE_MASK) + PHYS_PAGE_MAP); if(!pdpt[vpdpt]) pdpt[vpdpt] = pm_alloc_page() | PAGE_PRESENT | PAGE_WRITE; pd = (addr_t *)((pdpt[vpdpt]&PAGE_MASK) + PHYS_PAGE_MAP); if(!pd[vdir]) pd[vdir] = pm_alloc_page() | PAGE_PRESENT | PAGE_WRITE; pt = (addr_t *)((pd[vdir]&PAGE_MASK) + PHYS_PAGE_MAP); addr_t p = pt[vtbl]; pt[vtbl] = 0; asm("invlpg (%0)"::"r" (virt)); #if CONFIG_SMP if(kernel_task && (virt&PAGE_MASK) != PDIR_DATA) { if(IS_KERN_MEM(virt)) send_ipi(LAPIC_ICR_SHORT_OTHERS, 0, LAPIC_ICR_LEVELASSERT | LAPIC_ICR_TM_LEVEL | IPI_TLB); else if((IS_THREAD_SHARED_MEM(virt) && pd_cur_data->count > 1)) send_ipi(LAPIC_ICR_SHORT_OTHERS, 0, LAPIC_ICR_LEVELASSERT | LAPIC_ICR_TM_LEVEL | IPI_TLB); } #endif if(kernel_task && (virt&PAGE_MASK) != PDIR_DATA && !locked) mutex_release(&pd_cur_data->lock); if(p && !(p & PAGE_COW)) pm_free_page(p & PAGE_MASK); return 0; }
int vm_do_unmap_only(addr_t virt, unsigned locked) { #if CONFIG_SWAP if(current_task && num_swapdev && current_task->num_swapped) swap_in_page((task_t *)current_task, virt & PAGE_MASK); #endif if(kernel_task && (virt&PAGE_MASK) != PDIR_DATA && !locked) mutex_acquire(&pd_cur_data->lock); page_tables[(virt&PAGE_MASK)/0x1000] = 0; asm("invlpg (%0)"::"r" (virt)); #if CONFIG_SMP if(kernel_task && (virt&PAGE_MASK) != PDIR_DATA) { if(IS_KERN_MEM(virt)) send_ipi(LAPIC_ICR_SHORT_OTHERS, 0, LAPIC_ICR_LEVELASSERT | LAPIC_ICR_TM_LEVEL | IPI_TLB); else if((IS_THREAD_SHARED_MEM(virt) && pd_cur_data->count > 1)) send_ipi(LAPIC_ICR_SHORT_OTHERS, 0, LAPIC_ICR_LEVELASSERT | LAPIC_ICR_TM_LEVEL | IPI_TLB); } #endif if(kernel_task && (virt&PAGE_MASK) != PDIR_DATA && !locked) mutex_release(&pd_cur_data->lock); return 0; }
// check_swap - check the correctness of swap & page replacement algorithm static void check_swap(void) { size_t nr_used_pages_store = nr_used_pages(); size_t slab_allocated_store = slab_allocated(); size_t offset; for (offset = 2; offset < max_swap_offset; offset ++) { mem_map[offset] = 1; } struct mm_struct *mm = mm_create(); assert(mm != NULL); extern struct mm_struct *check_mm_struct; assert(check_mm_struct == NULL); check_mm_struct = mm; pgd_t *pgdir = mm->pgdir = init_pgdir_get(); assert(pgdir[PGX(TEST_PAGE)] == 0); struct vma_struct *vma = vma_create(TEST_PAGE, TEST_PAGE + PTSIZE, VM_WRITE | VM_READ); assert(vma != NULL); insert_vma_struct(mm, vma); struct Page *rp0 = alloc_page(), *rp1 = alloc_page(); assert(rp0 != NULL && rp1 != NULL); pte_perm_t perm; ptep_unmap (&perm); ptep_set_u_write(&perm); int ret = page_insert(pgdir, rp1, TEST_PAGE, perm); assert(ret == 0 && page_ref(rp1) == 1); page_ref_inc(rp1); ret = page_insert(pgdir, rp0, TEST_PAGE, perm); assert(ret == 0 && page_ref(rp1) == 1 && page_ref(rp0) == 1); // check try_alloc_swap_entry swap_entry_t entry = try_alloc_swap_entry(); assert(swap_offset(entry) == 1); mem_map[1] = 1; assert(try_alloc_swap_entry() == 0); // set rp1, Swap, Active, add to hash_list, active_list swap_page_add(rp1, entry); swap_active_list_add(rp1); assert(PageSwap(rp1)); mem_map[1] = 0; entry = try_alloc_swap_entry(); assert(swap_offset(entry) == 1); assert(!PageSwap(rp1)); // check swap_remove_entry assert(swap_hash_find(entry) == NULL); mem_map[1] = 2; swap_remove_entry(entry); assert(mem_map[1] == 1); swap_page_add(rp1, entry); swap_inactive_list_add(rp1); swap_remove_entry(entry); assert(PageSwap(rp1)); assert(rp1->index == entry && mem_map[1] == 0); // check page_launder, move page from inactive_list to active_list assert(page_ref(rp1) == 1); assert(nr_active_pages == 0 && nr_inactive_pages == 1); assert(list_next(&(inactive_list.swap_list)) == &(rp1->swap_link)); page_launder(); assert(nr_active_pages == 1 && nr_inactive_pages == 0); assert(PageSwap(rp1) && PageActive(rp1)); entry = try_alloc_swap_entry(); assert(swap_offset(entry) == 1); assert(!PageSwap(rp1) && nr_active_pages == 0); assert(list_empty(&(active_list.swap_list))); // set rp1 inactive again assert(page_ref(rp1) == 1); swap_page_add(rp1, 0); assert(PageSwap(rp1) && swap_offset(rp1->index) == 1); swap_inactive_list_add(rp1); mem_map[1] = 1; assert(nr_inactive_pages == 1); page_ref_dec(rp1); size_t count = nr_used_pages(); swap_remove_entry(entry); assert(nr_inactive_pages == 0 && nr_used_pages() == count - 1); // check swap_out_mm pte_t *ptep0 = get_pte(pgdir, TEST_PAGE, 0), *ptep1; assert(ptep0 != NULL && pte2page(*ptep0) == rp0); ret = swap_out_mm(mm, 0); assert(ret == 0); ret = swap_out_mm(mm, 10); assert(ret == 1 && mm->swap_address == TEST_PAGE + PGSIZE); ret = swap_out_mm(mm, 10); assert(ret == 0 && *ptep0 == entry && mem_map[1] == 1); assert(PageDirty(rp0) && PageActive(rp0) && page_ref(rp0) == 0); assert(nr_active_pages == 1 && list_next(&(active_list.swap_list)) == &(rp0->swap_link)); // check refill_inactive_scan() refill_inactive_scan(); assert(!PageActive(rp0) && page_ref(rp0) == 0); assert(nr_inactive_pages == 1 && list_next(&(inactive_list.swap_list)) == &(rp0->swap_link)); page_ref_inc(rp0); page_launder(); assert(PageActive(rp0) && page_ref(rp0) == 1); assert(nr_active_pages == 1 && list_next(&(active_list.swap_list)) == &(rp0->swap_link)); page_ref_dec(rp0); refill_inactive_scan(); assert(!PageActive(rp0)); // save data in rp0 int i; for (i = 0; i < PGSIZE; i ++) { ((char *)page2kva(rp0))[i] = (char)i; } page_launder(); assert(nr_inactive_pages == 0 && list_empty(&(inactive_list.swap_list))); assert(mem_map[1] == 1); rp1 = alloc_page(); assert(rp1 != NULL); ret = swapfs_read(entry, rp1); assert(ret == 0); for (i = 0; i < PGSIZE; i ++) { assert(((char *)page2kva(rp1))[i] == (char)i); } // page fault now *(char *)(TEST_PAGE) = 0xEF; rp0 = pte2page(*ptep0); assert(page_ref(rp0) == 1); assert(PageSwap(rp0) && PageActive(rp0)); entry = try_alloc_swap_entry(); assert(swap_offset(entry) == 1 && mem_map[1] == SWAP_UNUSED); assert(!PageSwap(rp0) && nr_active_pages == 0 && nr_inactive_pages == 0); // clear accessed flag assert(rp0 == pte2page(*ptep0)); assert(!PageSwap(rp0)); ret = swap_out_mm(mm, 10); assert(ret == 0); assert(!PageSwap(rp0) && ptep_present(ptep0)); // change page table ret = swap_out_mm(mm, 10); assert(ret == 1); assert(*ptep0 == entry && page_ref(rp0) == 0 && mem_map[1] == 1); count = nr_used_pages(); refill_inactive_scan(); page_launder(); assert(count - 1 == nr_used_pages()); ret = swapfs_read(entry, rp1); assert(ret == 0 && *(char *)(page2kva(rp1)) == (char)0xEF); free_page(rp1); // duplictate *ptep0 ptep1 = get_pte(pgdir, TEST_PAGE + PGSIZE, 0); assert(ptep1 != NULL && ptep_invalid(ptep1)); swap_duplicate(*ptep0); ptep_copy(ptep1, ptep0); mp_tlb_invalidate (pgdir, TEST_PAGE + PGSIZE); // page fault again // update for copy on write *(char *)(TEST_PAGE + 1) = 0x88; *(char *)(TEST_PAGE + PGSIZE) = 0x8F; *(char *)(TEST_PAGE + PGSIZE + 1) = 0xFF; assert(pte2page(*ptep0) != pte2page(*ptep1)); assert(*(char *)(TEST_PAGE) == (char)0xEF); assert(*(char *)(TEST_PAGE + 1) == (char)0x88); assert(*(char *)(TEST_PAGE + PGSIZE) == (char)0x8F); assert(*(char *)(TEST_PAGE + PGSIZE + 1) == (char)0xFF); rp0 = pte2page(*ptep0); rp1 = pte2page(*ptep1); assert(!PageSwap(rp0) && PageSwap(rp1) && PageActive(rp1)); entry = try_alloc_swap_entry(); assert(!PageSwap(rp0) && !PageSwap(rp1)); assert(swap_offset(entry) == 1 && mem_map[1] == SWAP_UNUSED); assert(list_empty(&(active_list.swap_list))); assert(list_empty(&(inactive_list.swap_list))); ptep_set_accessed(&perm); page_insert(pgdir, rp0, TEST_PAGE + PGSIZE, perm); // check swap_out_mm *(char *)(TEST_PAGE) = *(char *)(TEST_PAGE + PGSIZE) = 0xEE; mm->swap_address = TEST_PAGE + PGSIZE * 2; ret = swap_out_mm(mm, 2); assert(ret == 0); assert(ptep_present(ptep0) && ! ptep_accessed(ptep0)); assert(ptep_present(ptep1) && ! ptep_accessed(ptep1)); ret = swap_out_mm(mm, 2); assert(ret == 2); assert(mem_map[1] == 2 && page_ref(rp0) == 0); refill_inactive_scan(); page_launder(); assert(mem_map[1] == 2 && swap_hash_find(entry) == NULL); // check copy entry swap_remove_entry(entry); ptep_unmap(ptep1); assert(mem_map[1] == 1); swap_entry_t store; ret = swap_copy_entry(entry, &store); assert(ret == -E_NO_MEM); mem_map[2] = SWAP_UNUSED; ret = swap_copy_entry(entry, &store); assert(ret == 0 && swap_offset(store) == 2 && mem_map[2] == 0); mem_map[2] = 1; ptep_copy(ptep1, &store); assert(*(char *)(TEST_PAGE + PGSIZE) == (char)0xEE && *(char *)(TEST_PAGE + PGSIZE + 1)== (char)0x88); *(char *)(TEST_PAGE + PGSIZE) = 1, *(char *)(TEST_PAGE + PGSIZE + 1) = 2; assert(*(char *)TEST_PAGE == (char)0xEE && *(char *)(TEST_PAGE + 1) == (char)0x88); ret = swap_in_page(entry, &rp0); assert(ret == 0); ret = swap_in_page(store, &rp1); assert(ret == 0); assert(rp1 != rp0); // free memory swap_list_del(rp0), swap_list_del(rp1); swap_page_del(rp0), swap_page_del(rp1); assert(page_ref(rp0) == 1 && page_ref(rp1) == 1); assert(nr_active_pages == 0 && list_empty(&(active_list.swap_list))); assert(nr_inactive_pages == 0 && list_empty(&(inactive_list.swap_list))); for (i = 0; i < HASH_LIST_SIZE; i ++) { assert(list_empty(hash_list + i)); } page_remove(pgdir, TEST_PAGE); page_remove(pgdir, (TEST_PAGE + PGSIZE)); #if PMXSHIFT != PUXSHIFT free_page(pa2page(PMD_ADDR(*get_pmd(pgdir, TEST_PAGE, 0)))); #endif #if PUXSHIFT != PGXSHIFT free_page(pa2page(PUD_ADDR(*get_pud(pgdir, TEST_PAGE, 0)))); #endif free_page(pa2page(PGD_ADDR(*get_pgd(pgdir, TEST_PAGE, 0)))); pgdir[PGX(TEST_PAGE)] = 0; mm->pgdir = NULL; mm_destroy(mm); check_mm_struct = NULL; assert(nr_active_pages == 0 && nr_inactive_pages == 0); for (offset = 0; offset < max_swap_offset; offset ++) { mem_map[offset] = SWAP_UNUSED; } assert(nr_used_pages_store == nr_used_pages()); assert(slab_allocated_store == slab_allocated()); kprintf("check_swap() succeeded.\n"); }
// check_swap - check the correctness of swap & page replacement algorithm static void check_swap(void) { size_t nr_free_pages_store = nr_free_pages(); size_t slab_allocated_store = slab_allocated(); size_t offset; for (offset = 2; offset < max_swap_offset; offset ++) { mem_map[offset] = 1; } struct mm_struct *mm = mm_create(); assert(mm != NULL); extern struct mm_struct *check_mm_struct; assert(check_mm_struct == NULL); check_mm_struct = mm; pde_t *pgdir = mm->pgdir = boot_pgdir; assert(pgdir[0] == 0); struct vma_struct *vma = vma_create(0, PTSIZE, VM_WRITE | VM_READ); assert(vma != NULL); insert_vma_struct(mm, vma); struct Page *rp0 = alloc_page(), *rp1 = alloc_page(); assert(rp0 != NULL && rp1 != NULL); uint32_t perm = PTE_U | PTE_W; int ret = page_insert(pgdir, rp1, 0, perm); assert(ret == 0 && page_ref(rp1) == 1); page_ref_inc(rp1); ret = page_insert(pgdir, rp0, 0, perm); assert(ret == 0 && page_ref(rp1) == 1 && page_ref(rp0) == 1); // check try_alloc_swap_entry swap_entry_t entry = try_alloc_swap_entry(); assert(swap_offset(entry) == 1); mem_map[1] = 1; assert(try_alloc_swap_entry() == 0); // set rp1, Swap, Active, add to hash_list, active_list swap_page_add(rp1, entry); swap_active_list_add(rp1); assert(PageSwap(rp1)); mem_map[1] = 0; entry = try_alloc_swap_entry(); assert(swap_offset(entry) == 1); assert(!PageSwap(rp1)); // check swap_remove_entry assert(swap_hash_find(entry) == NULL); mem_map[1] = 2; swap_remove_entry(entry); assert(mem_map[1] == 1); swap_page_add(rp1, entry); swap_inactive_list_add(rp1); swap_remove_entry(entry); assert(PageSwap(rp1)); assert(rp1->index == entry && mem_map[1] == 0); // check page_launder, move page from inactive_list to active_list assert(page_ref(rp1) == 1); assert(nr_active_pages == 0 && nr_inactive_pages == 1); assert(list_next(&(inactive_list.swap_list)) == &(rp1->swap_link)); page_launder(); assert(nr_active_pages == 1 && nr_inactive_pages == 0); assert(PageSwap(rp1) && PageActive(rp1)); entry = try_alloc_swap_entry(); assert(swap_offset(entry) == 1); assert(!PageSwap(rp1) && nr_active_pages == 0); assert(list_empty(&(active_list.swap_list))); // set rp1 inactive again assert(page_ref(rp1) == 1); swap_page_add(rp1, 0); assert(PageSwap(rp1) && swap_offset(rp1->index) == 1); swap_inactive_list_add(rp1); mem_map[1] = 1; assert(nr_inactive_pages == 1); page_ref_dec(rp1); size_t count = nr_free_pages(); swap_remove_entry(entry); assert(nr_inactive_pages == 0 && nr_free_pages() == count + 1); // check swap_out_mm pte_t *ptep0 = get_pte(pgdir, 0, 0), *ptep1; assert(ptep0 != NULL && pte2page(*ptep0) == rp0); ret = swap_out_mm(mm, 0); assert(ret == 0); ret = swap_out_mm(mm, 10); assert(ret == 1 && mm->swap_address == PGSIZE); ret = swap_out_mm(mm, 10); assert(ret == 0 && *ptep0 == entry && mem_map[1] == 1); assert(PageDirty(rp0) && PageActive(rp0) && page_ref(rp0) == 0); assert(nr_active_pages == 1 && list_next(&(active_list.swap_list)) == &(rp0->swap_link)); // check refill_inactive_scan() refill_inactive_scan(); assert(!PageActive(rp0) && page_ref(rp0) == 0); assert(nr_inactive_pages == 1 && list_next(&(inactive_list.swap_list)) == &(rp0->swap_link)); page_ref_inc(rp0); page_launder(); assert(PageActive(rp0) && page_ref(rp0) == 1); assert(nr_active_pages == 1 && list_next(&(active_list.swap_list)) == &(rp0->swap_link)); page_ref_dec(rp0); refill_inactive_scan(); assert(!PageActive(rp0)); // save data in rp0 int i; for (i = 0; i < PGSIZE; i ++) { ((char *)page2kva(rp0))[i] = (char)i; } page_launder(); assert(nr_inactive_pages == 0 && list_empty(&(inactive_list.swap_list))); assert(mem_map[1] == 1); rp1 = alloc_page(); assert(rp1 != NULL); ret = swapfs_read(entry, rp1); assert(ret == 0); for (i = 0; i < PGSIZE; i ++) { assert(((char *)page2kva(rp1))[i] == (char)i); } // page fault now *(char *)0 = 0xEF; rp0 = pte2page(*ptep0); assert(page_ref(rp0) == 1); assert(PageSwap(rp0) && PageActive(rp0)); entry = try_alloc_swap_entry(); assert(swap_offset(entry) == 1 && mem_map[1] == SWAP_UNUSED); assert(!PageSwap(rp0) && nr_active_pages == 0 && nr_inactive_pages == 0); // clear accessed flag assert(rp0 == pte2page(*ptep0)); assert(!PageSwap(rp0)); ret = swap_out_mm(mm, 10); assert(ret == 0); assert(!PageSwap(rp0) && (*ptep0 & PTE_P)); // change page table ret = swap_out_mm(mm, 10); assert(ret == 1); assert(*ptep0 == entry && page_ref(rp0) == 0 && mem_map[1] == 1); count = nr_free_pages(); refill_inactive_scan(); page_launder(); assert(count + 1 == nr_free_pages()); ret = swapfs_read(entry, rp1); assert(ret == 0 && *(char *)(page2kva(rp1)) == (char)0xEF); free_page(rp1); // duplictate *ptep0 ptep1 = get_pte(pgdir, PGSIZE, 0); assert(ptep1 != NULL && *ptep1 == 0); swap_duplicate(*ptep0); *ptep1 = *ptep0; // page fault again *(char *)0 = 0xFF; *(char *)(PGSIZE + 1) = 0x88; assert(pte2page(*ptep0) == pte2page(*ptep1)); rp0 = pte2page(*ptep0); assert(*(char *)1 == (char)0x88 && *(char *)PGSIZE == (char)0xFF); assert(page_ref(rp0) == 2 && rp0->index == entry && mem_map[1] == 0); assert(PageSwap(rp0) && PageActive(rp0)); entry = try_alloc_swap_entry(); assert(swap_offset(entry) == 1 && mem_map[1] == SWAP_UNUSED); assert(!PageSwap(rp0)); assert(list_empty(&(active_list.swap_list))); assert(list_empty(&(inactive_list.swap_list))); // check swap_out_mm *(char *)0 = *(char *)PGSIZE = 0xEE; mm->swap_address = PGSIZE * 2; ret = swap_out_mm(mm, 2); assert(ret == 0); assert((*ptep0 & PTE_P) && !(*ptep0 & PTE_A)); assert((*ptep1 & PTE_P) && !(*ptep1 & PTE_A)); ret = swap_out_mm(mm, 2); assert(ret == 2); assert(mem_map[1] == 2 && page_ref(rp0) == 0); refill_inactive_scan(); page_launder(); assert(mem_map[1] == 2 && swap_hash_find(entry) == NULL); // check copy entry swap_remove_entry(entry); *ptep1 = 0; assert(mem_map[1] == 1); swap_entry_t store; ret = swap_copy_entry(entry, &store); assert(ret == -E_NO_MEM); mem_map[2] = SWAP_UNUSED; ret = swap_copy_entry(entry, &store); assert(ret == 0 && swap_offset(store) == 2 && mem_map[2] == 0); mem_map[2] = 1; *ptep1 = store; assert(*(char *)PGSIZE == (char)0xEE && *(char *)(PGSIZE + 1)== (char)0x88); *(char *)PGSIZE = 1, *(char *)(PGSIZE + 1) = 2; assert(*(char *)0 == (char)0xEE && *(char *)1 == (char)0x88); ret = swap_in_page(entry, &rp0); assert(ret == 0); ret = swap_in_page(store, &rp1); assert(ret == 0); assert(rp1 != rp0); // free memory swap_list_del(rp0), swap_list_del(rp1); swap_page_del(rp0), swap_page_del(rp1); assert(page_ref(rp0) == 1 && page_ref(rp1) == 1); assert(nr_active_pages == 0 && list_empty(&(active_list.swap_list))); assert(nr_inactive_pages == 0 && list_empty(&(inactive_list.swap_list))); for (i = 0; i < HASH_LIST_SIZE; i ++) { assert(list_empty(hash_list + i)); } page_remove(pgdir, 0); page_remove(pgdir, PGSIZE); free_page(pa2page(pgdir[0])); pgdir[0] = 0; mm->pgdir = NULL; mm_destroy(mm); check_mm_struct = NULL; assert(nr_active_pages == 0 && nr_inactive_pages == 0); for (offset = 0; offset < max_swap_offset; offset ++) { mem_map[offset] = SWAP_UNUSED; } assert(nr_free_pages_store == nr_free_pages()); assert(slab_allocated_store == slab_allocated()); cprintf("check_swap() succeeded.\n"); }
// do_pgfault - interrupt handler to process the page fault execption int do_pgfault(struct mm_struct *mm, uint32_t error_code, uintptr_t addr) { if (mm == NULL) { assert(current != NULL); panic("page fault in kernel thread: pid = %d, %d %08x.\n", current->pid, error_code, addr); } lock_mm(mm); int ret = -E_INVAL; struct vma_struct *vma = find_vma(mm, addr); if (vma == NULL || vma->vm_start > addr) { goto failed; } if (vma->vm_flags & VM_STACK) { if (addr < vma->vm_start + PGSIZE) { goto failed; } } switch (error_code & 3) { default: /* default is 3: write, present */ case 2: /* write, not present */ if (!(vma->vm_flags & VM_WRITE)) { goto failed; } break; case 1: /* read, present */ goto failed; case 0: /* read, not present */ if (!(vma->vm_flags & (VM_READ | VM_EXEC))) { goto failed; } } uint32_t perm = PTE_U; if (vma->vm_flags & VM_WRITE) { perm |= PTE_W; } addr = ROUNDDOWN(addr, PGSIZE); ret = -E_NO_MEM; pte_t *ptep; if ((ptep = get_pte(mm->pgdir, addr, 1)) == NULL) { goto failed; } if (*ptep == 0) { if (!(vma->vm_flags & VM_SHARE)) { if (pgdir_alloc_page(mm->pgdir, addr, perm) == NULL) { goto failed; } } else { lock_shmem(vma->shmem); uintptr_t shmem_addr = addr - vma->vm_start + vma->shmem_off; pte_t *sh_ptep = shmem_get_entry(vma->shmem, shmem_addr, 1); if (sh_ptep == NULL || *sh_ptep == 0) { unlock_shmem(vma->shmem); goto failed; } unlock_shmem(vma->shmem); if (*sh_ptep & PTE_P) { page_insert(mm->pgdir, pa2page(*sh_ptep), addr, perm); } else { swap_duplicate(*ptep); *ptep = *sh_ptep; } } } else { struct Page *page, *newpage = NULL; bool cow = ((vma->vm_flags & (VM_SHARE | VM_WRITE)) == VM_WRITE), may_copy = 1; assert(!(*ptep & PTE_P) || ((error_code & 2) && !(*ptep & PTE_W) && cow)); if (cow) { newpage = alloc_page(); } if (*ptep & PTE_P) { page = pte2page(*ptep); } else { if ((ret = swap_in_page(*ptep, &page)) != 0) { if (newpage != NULL) { free_page(newpage); } goto failed; } if (!(error_code & 2) && cow) { perm &= ~PTE_W; may_copy = 0; } } if (cow && may_copy) { if (page_ref(page) + swap_page_count(page) > 1) { if (newpage == NULL) { goto failed; } memcpy(page2kva(newpage), page2kva(page), PGSIZE); page = newpage, newpage = NULL; } } page_insert(mm->pgdir, page, addr, perm); if (newpage != NULL) { free_page(newpage); } } ret = 0; failed: unlock_mm(mm); return ret; }
int do_pgfault(struct mm_struct *mm, machine_word_t error_code, uintptr_t addr) { if (mm == NULL) { assert(current != NULL); /* Chen Yuheng * give handler a chance to deal with it */ kprintf ("page fault in kernel thread: pid = %d, name = %s, %d %08x.\n", current->pid, current->name, error_code, addr); return -E_KILLED; } bool need_unlock = 1; if (!try_lock_mm(mm)) { if (current != NULL && mm->locked_by == current->pid) { need_unlock = 0; } else { lock_mm(mm); } } int ret = -E_INVAL; struct vma_struct *vma = find_vma(mm, addr); if (vma == NULL || vma->vm_start > addr) { goto failed; } if (vma->vm_flags & VM_STACK) { if (addr < vma->vm_start + PGSIZE) { goto failed; } } //kprintf("@ %x %08x\n", vma->vm_flags, vma->vm_start); //assert((vma->vm_flags & VM_IO)==0); if (vma->vm_flags & VM_IO) { ret = -E_INVAL; goto failed; } switch (error_code & 3) { default: /* default is 3: write, present */ case 2: /* write, not present */ if (!(vma->vm_flags & VM_WRITE)) { goto failed; } break; case 1: /* read, present */ goto failed; case 0: /* read, not present */ if (!(vma->vm_flags & (VM_READ | VM_EXEC))) { goto failed; } } pte_perm_t perm, nperm; #ifdef ARCH_ARM /* ARM9 software emulated PTE_xxx */ perm = PTE_P | PTE_U; if (vma->vm_flags & VM_WRITE) { perm |= PTE_W; } #else ptep_unmap(&perm); ptep_set_u_read(&perm); if (vma->vm_flags & VM_WRITE) { ptep_set_u_write(&perm); } #endif addr = ROUNDDOWN(addr, PGSIZE); ret = -E_NO_MEM; pte_t *ptep; if ((ptep = get_pte(mm->pgdir, addr, 1)) == NULL) { goto failed; } if (ptep_invalid(ptep)) { #ifdef UCONFIG_BIONIC_LIBC if (vma->mfile.file != NULL) { struct file *file = vma->mfile.file; off_t old_pos = file->pos, new_pos = vma->mfile.offset + addr - vma->vm_start; #ifdef SHARE_MAPPED_FILE struct mapped_addr *maddr = find_maddr(file, new_pos, NULL); if (maddr == NULL) { #endif // SHARE_MAPPED_FILE struct Page *page; if ((page = alloc_page()) == NULL) { assert(false); goto failed; } nperm = perm; #ifdef ARCH_ARM /* ARM9 software emulated PTE_xxx */ nperm &= ~PTE_W; #else ptep_unset_s_write(&nperm); #endif page_insert_pte(mm->pgdir, page, ptep, addr, nperm); if ((ret = filestruct_setpos(file, new_pos)) != 0) { assert(false); goto failed; } filestruct_read(file, page2kva(page), PGSIZE); if ((ret = filestruct_setpos(file, old_pos)) != 0) { assert(false); goto failed; } #ifdef SHARE_MAPPED_FILE if ((maddr = (struct mapped_addr *) kmalloc(sizeof(struct mapped_addr))) != NULL) { maddr->page = page; maddr->offset = new_pos; page->maddr = maddr; list_add(& (file->node->mapped_addr_list), &(maddr->list)); } else { assert(false); } } else { nperm = perm; #ifdef ARCH_ARM /* ARM9 software emulated PTE_xxx */ nperm &= ~PTE_W; #else ptep_unset_s_write(&nperm); #endif page_insert_pte(mm->pgdir, maddr->page, ptep, addr, nperm); } #endif //SHARE_MAPPED_FILE } else #endif //UCONFIG_BIONIC_LIBC if (!(vma->vm_flags & VM_SHARE)) { if (pgdir_alloc_page(mm->pgdir, addr, perm) == NULL) { goto failed; } #ifdef UCONFIG_BIONIC_LIBC if (vma->vm_flags & VM_ANONYMOUS) { memset((void *)addr, 0, PGSIZE); } #endif //UCONFIG_BIONIC_LIBC } else { //shared mem lock_shmem(vma->shmem); uintptr_t shmem_addr = addr - vma->vm_start + vma->shmem_off; pte_t *sh_ptep = shmem_get_entry(vma->shmem, shmem_addr, 1); if (sh_ptep == NULL || ptep_invalid(sh_ptep)) { unlock_shmem(vma->shmem); goto failed; } unlock_shmem(vma->shmem); if (ptep_present(sh_ptep)) { page_insert(mm->pgdir, pa2page(*sh_ptep), addr, perm); } else { #ifdef UCONFIG_SWAP swap_duplicate(*ptep); ptep_copy(ptep, sh_ptep); #else panic("NO SWAP\n"); #endif } } } else { //a present page, handle copy-on-write (cow) struct Page *page, *newpage = NULL; bool cow = ((vma->vm_flags & (VM_SHARE | VM_WRITE)) == VM_WRITE), may_copy = 1; #if 1 if (!(!ptep_present(ptep) || ((error_code & 2) && !ptep_u_write(ptep) && cow))) { //assert(PADDR(mm->pgdir) == rcr3()); kprintf("%p %p %d %d %x\n", *ptep, addr, error_code, cow, vma->vm_flags); assert(0); } #endif if (cow) { newpage = alloc_page(); } if (ptep_present(ptep)) { page = pte2page(*ptep); } else { #ifdef UCONFIG_SWAP if ((ret = swap_in_page(*ptep, &page)) != 0) { if (newpage != NULL) { free_page(newpage); } goto failed; } #else assert(0); #endif if (!(error_code & 2) && cow) { #ifdef ARCH_ARM //#warning ARM9 software emulated PTE_xxx perm &= ~PTE_W; #else ptep_unset_s_write(&perm); #endif may_copy = 0; } } if (cow && may_copy) { #ifdef UCONFIG_SWAP if (page_ref(page) + swap_page_count(page) > 1) { #else if (page_ref(page) > 1) { #endif if (newpage == NULL) { goto failed; } memcpy(page2kva(newpage), page2kva(page), PGSIZE); //kprintf("COW!\n"); page = newpage, newpage = NULL; } } #ifdef UCONFIG_BIONIC_LIBC else if (vma->mfile.file != NULL) { #ifdef UCONFIG_SWAP assert(page_reg(page) + swap_page_count(page) == 1); #else assert(page_ref(page) == 1); #endif #ifdef SHARE_MAPPED_FILE off_t offset = vma->mfile.offset + addr - vma->vm_start; struct mapped_addr *maddr = find_maddr(vma->mfile.file, offset, page); if (maddr != NULL) { list_del(&(maddr->list)); kfree(maddr); page->maddr = NULL; assert(find_maddr(vma->mfile.file, offset, page) == NULL); } else { } #endif //SHARE_MAPPED_FILE } #endif //UCONFIG_BIONIC_LIBC else { } page_insert(mm->pgdir, page, addr, perm); if (newpage != NULL) { free_page(newpage); } } ret = 0; failed: if (need_unlock) { unlock_mm(mm); } return ret; }
void umain(void) { swap_in_page(); }