pte_t *huge_pte_offset(struct mm_struct *mm, unsigned long addr) { pgd_t *pgd; pud_t *pud; pmd_t *pmd; #ifdef CONFIG_HUGETLB_SUPER_PAGES pte_t *pte; #endif /* Get the top-level page table entry. */ pgd = (pgd_t *)get_pte((pte_t *)mm->pgd, pgd_index(addr), 0); /* We don't have four levels. */ pud = pud_offset(pgd, addr); #ifndef __PAGETABLE_PUD_FOLDED # error support fourth page table level #endif if (!pud_present(*pud)) return NULL; /* Check for an L0 huge PTE, if we have three levels. */ #ifndef __PAGETABLE_PMD_FOLDED if (pud_huge(*pud)) return (pte_t *)pud; pmd = (pmd_t *)get_pte((pte_t *)pud_page_vaddr(*pud), pmd_index(addr), 1); if (!pmd_present(*pmd)) return NULL; #else pmd = pmd_offset(pud, addr); #endif /* Check for an L1 huge PTE. */ if (pmd_huge(*pmd)) return (pte_t *)pmd; #ifdef CONFIG_HUGETLB_SUPER_PAGES /* Check for an L2 huge PTE. */ pte = get_pte((pte_t *)pmd_page_vaddr(*pmd), pte_index(addr), 2); if (!pte_present(*pte)) return NULL; if (pte_super(*pte)) return pte; #endif return NULL; }
// swap_out_vma - try unmap pte & move pages into swap active list. static int swap_out_vma(struct mm_struct *mm, struct vma_struct *vma, uintptr_t addr, size_t require) { if (require == 0 || !(addr >= vma->vm_start && addr < vma->vm_end)) { return 0; } uintptr_t end; size_t free_count = 0; addr = ROUNDDOWN(addr, PGSIZE), end = ROUNDUP(vma->vm_end, PGSIZE); while (addr < end && require != 0) { pte_t *ptep = get_pte(mm->pgdir, addr, 0); if (ptep == NULL) { if (get_pud(mm->pgdir, addr, 0) == NULL) { addr = ROUNDDOWN(addr + PUSIZE, PUSIZE); } else if (get_pmd(mm->pgdir, addr, 0) == NULL) { addr = ROUNDDOWN(addr + PMSIZE, PMSIZE); } else { addr = ROUNDDOWN(addr + PTSIZE, PTSIZE); } continue ; } if (ptep_present(ptep)) { struct Page *page = pte2page(*ptep); assert(!PageReserved(page)); if (ptep_accessed(ptep)) { ptep_unset_accessed(ptep); mp_tlb_invalidate(mm->pgdir, addr); goto try_next_entry; } if (!PageSwap(page)) { if (!swap_page_add(page, 0)) { goto try_next_entry; } swap_active_list_add(page); } else if (ptep_dirty(ptep)) { SetPageDirty(page); } swap_entry_t entry = page->index; swap_duplicate(entry); page_ref_dec(page); ptep_copy(ptep, &entry); mp_tlb_invalidate(mm->pgdir, addr); mm->swap_address = addr + PGSIZE; free_count ++, require --; if ((vma->vm_flags & VM_SHARE) && page_ref(page) == 1) { uintptr_t shmem_addr = addr - vma->vm_start + vma->shmem_off; pte_t *sh_ptep = shmem_get_entry(vma->shmem, shmem_addr, 0); assert(sh_ptep != NULL && ! ptep_invalid(sh_ptep)); if (ptep_present(sh_ptep)) { shmem_insert_entry(vma->shmem, shmem_addr, entry); } } } try_next_entry: addr += PGSIZE; } return free_count; }
void unprotect_page(void *vaddr, unsigned long prot) { pteval_t *p_pte = get_pte(table_root, (uintptr_t)vaddr); pteval_t n_pte = *p_pte & ~prot; set_pte(table_root, n_pte, vaddr); }
//page_remove - free an Page which is related linear address la and has an validated pte void page_remove(pde_t *pgdir, uintptr_t la) { //cprintf("!=%08x\n",(char*)((char*)la+101) ); //cprintf("?=%d\n",*(char *)(0x50000105)); //cprintf("remove addr=%08x\n\n",la); pte_t *ptep = get_pte(pgdir, la, 0); //cprintf("remove *ptep=%08x\n",*ptep); struct Page* page=pte2page(*ptep); //cprintf("pagerefremoveshi %d\n",page->ref); //cprintf("remove page addr=%08x\n",page); //cprintf("ptep=%08x\n",ptep); asm volatile("nop"); asm volatile("nop"); asm volatile("nop"); asm volatile("nop"); asm volatile("nop"); asm volatile("nop"); asm volatile("nop"); asm volatile("nop"); asm volatile("nop"); if (ptep != NULL) { //cprintf("pgrmv=%08x\n",ptep); page_remove_pte(pgdir, la, ptep); } }
void page_remove(pgd_t *pgdir, uintptr_t la) { pte_t *ptep = get_pte(pgdir, la, 0); if (ptep != NULL) { page_remove_pte(pgdir, la, ptep); } }
void check_boot_pgdir(void) { pte_t *ptep; int i; for (i = 0; i < npage; i += PGSIZE) { assert((ptep = get_pte(boot_pgdir, (uintptr_t) KADDR(i), 0)) != NULL); assert(PTE_ADDR(*ptep) == i); } assert(PDE_ADDR(boot_pgdir[PDX(VPT)]) == PADDR(boot_pgdir)); assert(boot_pgdir[0] == 0); struct Page *p; p = alloc_page(); assert(page_insert(boot_pgdir, p, 0x100, PTE_W) == 0); assert(page_ref(p) == 1); assert(page_insert(boot_pgdir, p, 0x100 + PGSIZE, PTE_W) == 0); assert(page_ref(p) == 2); const char *str = "ucore: Hello world!!"; strcpy((void *)0x100, str); assert(strcmp((void *)0x100, (void *)(0x100 + PGSIZE)) == 0); *(char *)(page2kva(p) + 0x100) = '\0'; assert(strlen((const char *)0x100) == 0); free_page(p); free_page(pa2page(PDE_ADDR(boot_pgdir[0]))); boot_pgdir[0] = 0; kprintf("check_boot_pgdir() succeeded!\n"); }
/** * Map a range of child's virtual memory to the main process. * This is only used in sys_getcwd, for the 'buf' will be used as an io buffer, * and it is impossible to use 'copy_to_user' everywhere we write to the io_buf. * @param proc the PCB whose container process is the source * @param addr the beginning address of the area to be mapped * @param len the size of the area * @param is_write whether it is required that the area is writable * @return 0 on success, or -1 otherwise */ int host_map_user (struct proc_struct *proc, uintptr_t addr, size_t len, int is_write) { if (proc->mm == NULL) return 0; uintptr_t start = ROUNDDOWN(addr, PGSIZE), end = ROUNDUP(addr + len, PGSIZE); pde_t *pgdir = proc->mm->pgdir; int i, ret; /* Prepare for common arguments of the syscall. */ struct mmap_arg_struct args = { .len = PGSIZE, .prot = PROT_READ | PROT_WRITE | PROT_EXEC, .flags = MAP_SHARED | MAP_FIXED, .fd = ginfo->mem_fd, }; for (i = start; i < end; i += PGSIZE) { /* Touch the page */ if (is_write) ret = host_assign (proc, addr, 0); else ret = host_getvalue (proc, addr, NULL); if (ret < 0) return -1; /* Find the page in the tmp file and map it to the proper address in the main process. */ pte_t *ptep = get_pte (pgdir, addr, 0); assert (ptep != NULL && (*ptep & PTE_P)); args.addr = i; args.offset = PTE_ADDR (*ptep); syscall1 (__NR_mmap, (long)&args); } return 0; }
// Map the physical page at virtual address 'va' // The permissions of the page table entry should // be set to 'perm | PTE_P' // // RETURNS: // 0 on success // -E_NOT_AT_PGBOUND, if pa or va is not at page boundary // -E_NO_MEM, if the page table couldn't be allocated // -E_MAP_EXIST, if there is already a page mapped at 'va' int insert_page(pde_t * pgdir, paddr_t pa, vaddr_t va, uint perm, uint kmap) { if ((pa & 0xfff) || (va & 0xfff)) return -E_NOT_AT_PGBOUND; acquire(&phy_mem_lock); pte_t * pte = get_pte(pgdir, va, 1); if (pte == NULL) { release(&phy_mem_lock); return -E_NO_MEM; } if (*pte & PTE_P) { release(&phy_mem_lock); return -E_MAP_EXIST; } *pte = PTE_ADDR(pa) | PTE_P | perm; if (!kmap) { IncPageCount(page_frame(pa)); } release(&phy_mem_lock); return 0; }
/* use software emulated X86 pgfault */ static void handle_tlbmiss(struct trapframe* tf, int write) { #if 0 if(!trap_in_kernel(tf)){ print_trapframe(tf); while(1); } #endif static int entercnt = 0; entercnt ++; //kprintf("## enter handle_tlbmiss %d times\n", entercnt); int in_kernel = trap_in_kernel(tf); assert(current_pgdir != NULL); //print_trapframe(tf); uint32_t badaddr = tf->tf_vaddr; int ret = 0; pte_t *pte = get_pte(current_pgdir, tf->tf_vaddr, 0); if(pte==NULL || ptep_invalid(pte)){ //PTE miss, pgfault //panic("unimpl"); //TODO //tlb will not be refill in do_pgfault, //so a vmm pgfault will trigger 2 exception //permission check in tlb miss ret = pgfault_handler(tf, badaddr, get_error_code(write, pte)); }else{ //tlb miss only, reload it /* refill two slot */ /* check permission */ if(in_kernel){ tlb_refill(badaddr, pte); //kprintf("## refill K\n"); return; }else{ if(!ptep_u_read(pte)){ ret = -1; goto exit; } if(write && !ptep_u_write(pte)){ ret = -2; goto exit; } //kprintf("## refill U %d %08x\n", write, badaddr); tlb_refill(badaddr, pte); return ; } } exit: if(ret){ print_trapframe(tf); if(in_kernel){ panic("unhandled pgfault"); }else{ do_exit(-E_KILLED); } } return ; }
// Remove the mapping at va // If the page is not mapped any more , free the page // RETURNS: // 0 on success // -E_ALREADY_FREE if va is already free // int remove_page(pde_t * pgdir, vaddr_t va) { pte_t * pte; if (va & 0xfff) return -E_NOT_AT_PGBOUND; pte = get_pte(pgdir, va, 0); return remove_pte(pgdir, pte); }
int do_pgfault(struct mm_struct *mm, uint64_t error_code, uintptr_t addr) { int ret = -E_INVAL; struct vma_struct *vma = find_vma(mm, addr); if (vma == NULL || vma->vm_start > addr) { goto failed; } switch (error_code & 3) { default: /* default is 3: write, present */ case 2: /* write, not present */ if (!(vma->vm_flags & VM_WRITE)) { goto failed; } break; case 1: /* read, present */ goto failed; case 0: /* read, not present */ if (!(vma->vm_flags & (VM_READ | VM_EXEC))) { goto failed; } } uint32_t perm = PTE_U; if (vma->vm_flags & VM_WRITE) { perm |= PTE_W; } addr = ROUNDDOWN(addr, PGSIZE); ret = -E_NO_MEM; pte_t *ptep; // try to find a pte, if pte's PT(Page Table) isn't existed, then create a PT. // (notice the 3th parameter '1') if ((ptep = get_pte(mm->pgdir, addr, 1)) == NULL) { goto failed; } if (*ptep == 0) { // if the phy addr isn't exist, then alloc a page & map the phy addr with logical addr if (pgdir_alloc_page(mm->pgdir, addr, perm) == NULL) { goto failed; } } else { // if this pte is a swap entry, then load datafrom disk to a page with phy addr // and call page_insert to map the phy addr with logical addr struct Page *page; if ((ret = swap_in_page(*ptep, &page)) != 0) { goto failed; } page_insert(mm->pgdir, page, addr, perm); } ret = 0; failed: return ret; }
static pteval_t *set_pte(pgd_t *pgtable, pteval_t val, void *vaddr) { pteval_t *p_pte = get_pte(pgtable, (uintptr_t)vaddr); /* first flush the old entry (if we're replacing anything) */ if (!(*p_pte & PAGE_ENTRY_I)) ipte((uintptr_t)vaddr, p_pte); *p_pte = val; return p_pte; }
//get_page - get related Page struct for linear address la using PDT pgdir struct Page * get_page(pde_t *pgdir, uintptr_t la, pte_t **ptep_store) { pte_t *ptep = get_pte(pgdir, la, 0); if (ptep_store != NULL) { *ptep_store = ptep; } if (ptep != NULL && ((*ptep) & PTE_V)) { return pte2page(*ptep); } return NULL; }
/** * get related Page struct for linear address la using PDT pgdir * @param pgdir page directory * @param la linear address * @param ptep_store table entry stored if not NULL * @return @la's corresponding page descriptor */ struct Page * get_page(pgd_t *pgdir, uintptr_t la, pte_t **ptep_store) { pte_t *ptep = get_pte(pgdir, la, 0); if (ptep_store != NULL) { *ptep_store = ptep; } if (ptep != NULL && ptep_present(ptep)) { return pa2page(*ptep); } return NULL; }
static void check_boot_pgdir(void) { pte_t *ptep; int i; for (i = 0; i < npage; i += PGSIZE) { assert((ptep = get_pte(boot_pgdir, (uintptr_t)KADDR(i), 0)) != NULL); assert(PTE_ADDR(*ptep) == i); } assert(PDE_ADDR(boot_pgdir[PDX(VPT)]) == PADDR(boot_pgdir)); //cprintf("%08x\n",boot_pgdir[PDX(VPT)]); //cprintf("%08x\n",PADDR(boot_pgdir)); assert(boot_pgdir[256] == 0); struct Page *p; p = alloc_page(); assert(page_insert(boot_pgdir, p, 0x40000100, PTE_TYPE_SRW) == 0); assert(page_ref(p) == 1); assert(page_insert(boot_pgdir, p, 0x40000100 + PGSIZE, PTE_TYPE_SRW) == 0); assert(page_ref(p) == 2); const char *str = "ucore: Hello world!!"; strcpy((void *)0x40000100, str); assert(strcmp((void *)0x40000100, (void *)(0x40000100 + PGSIZE)) == 0); cprintf("%s\n\n",(char*)0x40000100); //cprintf("mstatus=%08x\n",read_mstatus_field(MSTATUS_PRV)); // cprintf("bageyalusilasiladi%s\n",((char*)0x40000100)); *(char *)(page2kva(p) + 0x100) = '\0'; //asm volatile("nop"); //asm volatile("nop"); //cprintf("\0\n"); // cprintf("%d\n",strlen((char *)0x40000100)); assert(strlen((const char *)0x40000100) == 0); //assert(((const char *)0x30000100) == '\0'); //asm volatile("nop"); // asm volatile("nop"); free_page(p); free_page(pde2page(boot_pgdir[256])); //cprintf("haah2\n"); boot_pgdir[256] = 0; cprintf("check_boot_pgdir() succeeded!\n"); }
/** * boot_map_segment - setup&enable the paging mechanism * @param la linear address of this memory need to map (after x86 segment map) * @param size memory size * @param pa physical address of this memory * @param perm permission of this memory */ void boot_map_segment(pde_t * pgdir, uintptr_t la, size_t size, uintptr_t pa, uint32_t perm) { assert(PGOFF(la) == PGOFF(pa)); size_t n = ROUNDUP(size + PGOFF(la), PGSIZE) / PGSIZE; la = ROUNDDOWN(la, PGSIZE); pa = ROUNDDOWN(pa, PGSIZE); for (; n > 0; n--, la += PGSIZE, pa += PGSIZE) { pte_t *ptep = get_pte(pgdir, la, 1); assert(ptep != NULL); ptep_map(ptep, pa); ptep_set_perm(ptep, perm); } }
int swap_out(struct mm_struct *mm, int n, int in_tick) { int i; for (i = 0; i != n; ++ i) { uintptr_t v; //struct Page **ptr_page=NULL; struct Page *page; // cprintf("i %d, SWAP: call swap_out_victim\n",i); int r = sm->swap_out_victim(mm, &page, in_tick); if (r != 0) { cprintf("i %d, swap_out: call swap_out_victim failed\n",i); break; } //assert(!PageReserved(page)); //cprintf("SWAP: choose victim page 0x%08x\n", page); v=page->pra_vaddr; pte_t *ptep = get_pte(mm->pgdir, v, 0); assert((*ptep & PTE_P) != 0); //Lab3_X: 2013011509 //Only write back if dirty if (page->need_write_back) { if (swapfs_write((page->pra_vaddr / PGSIZE + 1) << 8, page) != 0) { cprintf("SWAP: failed to save\n"); sm->map_swappable(mm, v, page, 0); continue; } else { cprintf( "swap_out: i %d, store page in vaddr 0x%x to disk swap " "entry %d\n", i, v, page->pra_vaddr / PGSIZE + 1); *ptep = (page->pra_vaddr / PGSIZE + 1) << 8; free_page(page); page->has_backup = 1; } } else { *ptep = (page->pra_vaddr / PGSIZE + 1) << 8; free_page(page); } tlb_invalidate(mm->pgdir, v); } return i; }
//page_insert - build the map of phy addr of an Page with the linear addr la // paramemters: // pgdir: the kernel virtual base address of PDT // page: the Page which need to map // la: the linear address need to map // perm: the permission of this Page which is setted in related pte // return value: always 0 //note: PT is changed, so the TLB need to be invalidate int page_insert(pde_t *pgdir, struct Page *page, uintptr_t la, uint32_t perm) { pte_t *ptep = get_pte(pgdir, la, 1); //cprintf("insert ptep=%08x\n",ptep); if (ptep == NULL) { return -E_NO_MEM; } page_ref_inc(page); //cprintf("pageref2 %d\n",page->ref); //cprintf("addr=%08x\n\n",la); if (*ptep & PTE_V) { //cprintf("pageref3 %d\n",page->ref); struct Page *p = pte2page(*ptep); // cprintf("guagua\n"); if (p == page) { page_ref_dec(page); } else { page_remove_pte(pgdir, la, ptep); } } // pte_t* pte = get_pte(mm->pgdir,0x50000000,0); // struct Page* page=pte2page(*pte); //cprintf("insert page addr = %08x\n",page); //cprintf("insert page physical =%08x\n",page2pa(page)); //cprintf(" before *ptep=%08x\n",*ptep); //*ptep = page2pa(page); //cprintf("ptep start=%08x\n",*ptep); //cprintf("perm=%08x\n",perm); *ptep = page2pa(page)| PTE_V | perm | PTE_R; //cprintf("ptep last=%08x\n",*ptep); // cprintf("\ninsert page1 addr=%08x\n",page); // cprintf("insert *ptep=%08x\n",*ptep); // pte_t* pte = get_pte(pgdir,la,0); //cprintf("pgf *pte=%08x\n",*pte); // struct Page* page2=pte2page(*pte); // cprintf("page2 addr = %08x\n",page2); // struct Page* pg=page; // cprintf("%08x\n",pg); // cprintf("%08x\n",page2pa(pg)); //cprintf("%08x\n",pa2page(page2pa(pg))); //tlb_invalidate(pgdir, la); return 0; }
//boot_map_segment - setup&enable the paging mechanism // parameters // la: linear address of this memory need to map (after x86 segment map) // size: memory size // pa: physical address of this memory // perm: permission of this memory void boot_map_segment(pde_t *pgdir, uintptr_t la, size_t size, uintptr_t pa, uint32_t perm) { assert(PGOFF(la) == PGOFF(pa)); size_t n = ROUNDUP(size + PGOFF(la), PGSIZE) / PGSIZE; // cprintf("n=%08x\n",n); la = ROUNDDOWN(la, PGSIZE); pa = ROUNDDOWN(pa, PGSIZE); //cprintf("la=%08x,pa=%08x\n",la,pa); for (; n > 0; n --, la += PGSIZE, pa += PGSIZE) { pte_t *ptep = get_pte(pgdir, la, 1); assert(ptep != NULL); //if(ptep==0x714020) //cprintf("%08x,%08x\n",ptep,pa); *ptep = pa | PTE_V | perm; } }
void unmap_range(pde_t *pgdir, uintptr_t start, uintptr_t end) { assert(start % PGSIZE == 0 && end % PGSIZE == 0); //assert(USER_ACCESS(start, end)); do { pte_t *ptep = get_pte(pgdir, start, 0); if (ptep == NULL) { start = ROUNDDOWN(start + PTSIZE, PTSIZE); continue ; } if (*ptep != 0) { page_remove_pte(pgdir, start, ptep); } start += PGSIZE; } while (start != 0 && start < end); }
int swap_in(struct mm_struct *mm, uintptr_t addr, struct Page **ptr_result) { struct Page *result = alloc_page(); assert(result!=NULL); pte_t *ptep = get_pte(mm->pgdir, addr, 0); // cprintf("SWAP: load ptep %x swap entry %d to vaddr 0x%08x, page %x, No %d\n", ptep, (*ptep)>>8, addr, result, (result-pages)); int r; if ((r = swapfs_read((*ptep), result)) != 0) { assert(r != 0); } cprintf("swap_in: load disk swap entry %d with swap_page in vadr 0x%x\n", (*ptep) >> 8, addr); *ptr_result = result; return 0; }
static void save_prom_mappings(void) { paddr_t pa; vaddr_t segva, pgva; int pte, sme, i; segva = (vaddr_t)SUN3_MONSTART; while (segva < (vaddr_t)SUN3_MONEND) { sme = get_segmap(segva); if (sme == SEGINV) { segva += NBSG; continue; /* next segment */ } /* * We have a valid segmap entry, so examine the * PTEs for all the pages in this segment. */ pgva = segva; /* starting page */ segva += NBSG; /* ending page (next seg) */ while (pgva < segva) { pte = get_pte(pgva); if ((pte & (PG_VALID | PG_TYPE)) == (PG_VALID | PGT_OBIO)) { /* Have a valid OBIO mapping. */ pa = PG_PA(pte); /* Is it one we want to record? */ if ((pa < SAVE_LAST) && ((pa & SAVE_MASK) == 0)) { i = pa >> SAVE_SHIFT; if (prom_mappings[i] == 0) { prom_mappings[i] = pgva; } } /* Make sure it has the right permissions. */ if ((pte & PGBITS) != PGBITS) { pte |= PGBITS; set_pte(pgva, pte); } } pgva += PAGE_SIZE; /* next page */ }
int page_insert(pgd_t *pgdir, struct Page *page, uintptr_t la, uint32_t perm) { pte_t *ptep = get_pte(pgdir, la, 1); if (ptep == NULL) { return -E_NO_MEM; } page_ref_inc(page); if (*ptep & PTE_P) { struct Page *p = pte2page(*ptep); if (p == page) { page_ref_dec(page); } else { page_remove_pte(pgdir, la, ptep); } } *ptep = page2pa(page) | PTE_P | perm; tlb_invalidate(pgdir, la); return 0; }
/** * Check whether page directory for boot lives well. * NOTE: we don't have mm_struct at present. * as write to a clean page also raises SIGSEGV, we're not able to deal with it now. * so just mark all page inserted to be accessed and dirty. */ void check_boot_pgdir(void) { pte_t *ptep; int i; for (i = 0; i < npage; i += PGSIZE) { assert((ptep = get_pte(boot_pgdir, (uintptr_t)KADDR(i), 0)) != NULL); assert(PTE_ADDR(*ptep) == i); } //assert(PDE_ADDR(boot_pgdir[PDX(VPT)]) == PADDR(boot_pgdir)); assert(boot_pgdir[PDX(TEST_PAGE)] == 0); struct Page *p; p = alloc_page(); assert(page_insert(boot_pgdir, p, TEST_PAGE, PTE_W | PTE_D | PTE_A) == 0); assert(page_ref(p) == 1); assert(page_insert(boot_pgdir, p, TEST_PAGE + PGSIZE, PTE_W | PTE_D | PTE_A) == 0); assert(page_ref(p) == 2); const char *str = "ucore: Hello world!!"; strcpy((void *)TEST_PAGE, str); assert(strcmp((void *)TEST_PAGE, (void *)(TEST_PAGE + PGSIZE)) == 0); *(char *)(page2kva(p)) = '\0'; assert(strlen((const char *)TEST_PAGE) == 0); /* * in um architecture clear page table doesn't mean * the linear address is invalid * so remove them by hand */ tlb_invalidate (boot_pgdir, TEST_PAGE); tlb_invalidate (boot_pgdir, TEST_PAGE + PGSIZE); free_page(p); free_page(pa2page(PDE_ADDR(boot_pgdir[PDX(TEST_PAGE)]))); boot_pgdir[PDX(TEST_PAGE)] = 0; kprintf("check_boot_pgdir() succeeded.\n"); }
/** * page_insert - build the map of phy addr of an Page with the linear addr @la * @param pgdir page directory * @param page the page descriptor of the page to be inserted * @param la logical address of the page * @param perm permission of the page * @return 0 on success and error code when failed */ int page_insert(pgd_t *pgdir, struct Page *page, uintptr_t la, pte_perm_t perm) { pte_t *ptep = get_pte(pgdir, la, 1); if (ptep == NULL) { return -E_NO_MEM; } page_ref_inc(page); if (*ptep != 0) { if (ptep_present(ptep) && pte2page(*ptep) == page) { page_ref_dec(page); goto out; } page_remove_pte(pgdir, la, ptep); } out: ptep_map(ptep, page2pa(page)); ptep_set_perm(ptep, perm); mp_tlb_update(pgdir, la); return 0; }
/* do_pgfault - interrupt handler to process the page fault execption * @mm : the control struct for a set of vma using the same PDT * @error_code : the error code recorded in trapframe->tf_err which is setted by x86 hardware * @addr : the addr which causes a memory access exception, (the contents of the CR2 register) * * CALL GRAPH: trap--> trap_dispatch-->pgfault_handler-->do_pgfault * The processor provides ucore's do_pgfault function with two items of information to aid in diagnosing * the exception and recovering from it. * (1) The contents of the CR2 register. The processor loads the CR2 register with the * 32-bit linear address that generated the exception. The do_pgfault fun can * use this address to locate the corresponding page directory and page-table * entries. * (2) An error code on the kernel stack. The error code for a page fault has a format different from * that for other exceptions. The error code tells the exception handler three things: * -- The P flag (bit 0) indicates whether the exception was due to a not-present page (0) * or to either an access rights violation or the use of a reserved bit (1). * -- The W/R flag (bit 1) indicates whether the memory access that caused the exception * was a read (0) or write (1). * -- The U/S flag (bit 2) indicates whether the processor was executing at user mode (1) * or supervisor mode (0) at the time of the exception. */ int do_pgfault(struct mm_struct *mm, uint32_t error_code, uintptr_t addr) { int ret = -E_INVAL; //try to find a vma which include addr struct vma_struct *vma = find_vma(mm, addr); pgfault_num++; //If the addr is in the range of a mm's vma? if (vma == NULL || vma->vm_start > addr) { cprintf("not valid addr %x, and can not find it in vma\n", addr); goto failed; } //check the error_code switch (error_code & 3) { default: /* error code flag : default is 3 ( W/R=1, P=1): write, present */ case 2: /* error code flag : (W/R=1, P=0): write, not present */ if (!(vma->vm_flags & VM_WRITE)) { cprintf("do_pgfault failed: error code flag = write AND not present, but the addr's vma cannot write\n"); goto failed; } break; case 1: /* error code flag : (W/R=0, P=1): read, present */ cprintf("do_pgfault failed: error code flag = read AND present\n"); goto failed; case 0: /* error code flag : (W/R=0, P=0): read, not present */ if (!(vma->vm_flags & (VM_READ | VM_EXEC))) { cprintf("do_pgfault failed: error code flag = read AND not present, but the addr's vma cannot read or exec\n"); goto failed; } } /* IF (write an existed addr ) OR * (write an non_existed addr && addr is writable) OR * (read an non_existed addr && addr is readable) * THEN * continue process */ uint32_t perm = PTE_U; if (vma->vm_flags & VM_WRITE) { perm |= PTE_W; } addr = ROUNDDOWN(addr, PGSIZE); ret = -E_NO_MEM; pte_t *ptep=NULL; /*LAB3 EXERCISE 1: YOUR CODE * Maybe you want help comment, BELOW comments can help you finish the code * * Some Useful MACROs and DEFINEs, you can use them in below implementation. * MACROs or Functions: * get_pte : get an pte and return the kernel virtual address of this pte for la * if the PT contians this pte didn't exist, alloc a page for PT (notice the 3th parameter '1') * pgdir_alloc_page : call alloc_page & page_insert functions to allocate a page size memory & setup * an addr map pa<--->la with linear address la and the PDT pgdir * DEFINES: * VM_WRITE : If vma->vm_flags & VM_WRITE == 1/0, then the vma is writable/non writable * PTE_W 0x002 // page table/directory entry flags bit : Writeable * PTE_U 0x004 // page table/directory entry flags bit : User can access * VARIABLES: * mm->pgdir : the PDT of these vma * */ //#if 0 /*LAB3 EXERCISE 1: YOUR CODE*/ ptep = get_pte(mm->pgdir, addr, 1); //(1) try to find a pte, if pte's PT(Page Table) isn't existed, then create a PT. if (ptep == NULL) { cprintf("get_pte failed in do_pgfault \n"); goto failed; } //(2) if the phy addr isn't exist, then alloc a page & map the phy addr with logical addr if (*ptep == 0) { if (pgdir_alloc_page(mm->pgdir, addr, perm) == NULL) { cprintf("pgdir_alloc_page failed in do_pgfault \n"); goto failed; } } else { /*LAB3 EXERCISE 2: YOUR CODE * Now we think this pte is a swap entry, we should load data from disk to a page with phy addr, * and map the phy addr with logical addr, trigger swap manager to record the access situation of this page. * * Some Useful MACROs and DEFINEs, you can use them in below implementation. * MACROs or Functions: * swap_in(mm, addr, &page) : alloc a memory page, then according to the swap entry in PTE for addr, * find the addr of disk page, read the content of disk page into this memroy page * page_insert : build the map of phy addr of an Page with the linear addr la * swap_map_swappable : set the page swappable */ if(swap_init_ok) { struct Page *page=NULL; //(1)According to the mm AND addr, try to load the content of right disk page // into the memory which page managed. int r; r = swap_in(mm, addr, &page); if (r != 0) { cprintf("swap_in failed in do_pgfault \n"); goto failed; } //(2) According to the mm, addr AND page, setup the map of phy addr <---> logical addr page_insert(mm->pgdir, page, addr, perm); //(3) make the page swappable. swap_map_swappable(mm, addr, page, 1); page->pra_vaddr = addr; } else { cprintf("no swap_init_ok but ptep is %x, failed\n",*ptep); goto failed; } } //#endif ret = 0; failed: return ret; }
static void check_swap(void) { //backup mem env int ret, count = 0, total = 0, i; list_entry_t *le = &free_list; while ((le = list_next(le)) != &free_list) { struct Page *p = le2page(le, page_link); assert(PageProperty(p)); count++, total += p->property; } assert(total == nr_free_pages()); cprintf("BEGIN check_swap: count %d, total %d\n", count, total); //now we set the phy pages env struct mm_struct *mm = mm_create(); assert(mm != NULL); extern struct mm_struct *check_mm_struct; assert(check_mm_struct == NULL); check_mm_struct = mm; pde_t *pgdir = mm->pgdir = boot_pgdir; assert(pgdir[0] == 0); struct vma_struct *vma = vma_create(BEING_CHECK_VALID_VADDR, CHECK_VALID_VADDR, VM_WRITE | VM_READ); assert(vma != NULL); insert_vma_struct(mm, vma); //setup the temp Page Table vaddr 0~4MB cprintf("setup Page Table for vaddr 0X1000, so alloc a page\n"); pte_t *temp_ptep = NULL; temp_ptep = get_pte(mm->pgdir, BEING_CHECK_VALID_VADDR, 1); assert(temp_ptep!= NULL); cprintf("setup Page Table vaddr 0~4MB OVER!\n"); for (i = 0; i < CHECK_VALID_PHY_PAGE_NUM; i++) { check_rp[i] = alloc_page(); assert(check_rp[i] != NULL); assert(!PageProperty(check_rp[i])); } list_entry_t free_list_store = free_list; list_init(&free_list); assert(list_empty(&free_list)); //assert(alloc_page() == NULL); unsigned int nr_free_store = nr_free; nr_free = 0; for (i = 0; i < CHECK_VALID_PHY_PAGE_NUM; i++) { free_pages(check_rp[i], 1); } assert(nr_free==CHECK_VALID_PHY_PAGE_NUM); cprintf("set up init env for check_swap begin!\n"); //setup initial vir_page<->phy_page environment for page relpacement algorithm pgfault_num = 0; check_content_set(); assert(nr_free == 0); for (i = 0; i < MAX_SEQ_NO; i++) swap_out_seq_no[i] = swap_in_seq_no[i] = -1; for (i = 0; i < CHECK_VALID_PHY_PAGE_NUM; i++) { check_ptep[i] = 0; check_ptep[i] = get_pte(pgdir, (i + 1) * 0x1000, 0); //cprintf("i %d, check_ptep addr %x, value %x\n", i, check_ptep[i], *check_ptep[i]); assert(check_ptep[i] != NULL); assert(pte2page(*check_ptep[i]) == check_rp[i]); assert((*check_ptep[i] & PTE_P)); } cprintf("set up init env for check_swap over!\n"); // now access the virt pages to test page relpacement algorithm ret = check_content_access(); assert(ret == 0); //restore kernel mem env for (i = 0; i < CHECK_VALID_PHY_PAGE_NUM; i++) { free_pages(check_rp[i], 1); } //free_page(pte2page(*temp_ptep)); free_page(pde2page(pgdir[0])); pgdir[0] = 0; mm->pgdir = NULL; mm_destroy(mm); check_mm_struct = NULL; nr_free = nr_free_store; free_list = free_list_store; le = &free_list; while ((le = list_next(le)) != &free_list) { struct Page *p = le2page(le, page_link); count--, total -= p->property; } cprintf("count is %d, total is %d\n", count, total); //assert(count == 0); cprintf("check_swap() succeeded!\n"); }
/** * Remap the specified address to a new page with new permission. * @param pgdir page directory * @param la linear address */ void tlb_update (pde_t *pgdir, uintptr_t la) { la = ROUNDDOWN (la, PGSIZE); pte_t* pte = get_pte (pgdir, la, 0); if (pte == 0 || (*pte & PTE_P) == 0) panic ("invalid tlb flushing\n"); uint32_t pa = PDE_ADDR(*pte); /* A tricky method to make the page table right under most circumstances. * Please consult the internal documentation for details. */ int r = 1, w = 1, x = 1; if (Get_PTE_A(pte) == 0) r = x = w = 0; else if (Get_PTE_W(pte) == 0 || Get_PTE_D(pte) == 0) w = 0; /* Make sure that the page is invalid before mapping * It is better to use 'mprotect' here actually. */ tlb_invalidate (pgdir, la); struct proc_struct *proc = find_proc_by_pgdir (pgdir); if (current != NULL && proc != NULL) { /* Map the page to the container process found using the stub code */ if (host_mmap (proc, (void*)la, PGSIZE, (r ? PROT_READ : 0) | (w ? PROT_WRITE : 0) | (x ? PROT_EXEC : 0), MAP_SHARED | MAP_FIXED, ginfo->mem_fd, pa) == MAP_FAILED) panic ("map in child failed.\n"); } else { /* Map the page to the host process */ struct mmap_arg_struct args = { .addr = la, .len = PGSIZE, .prot = (r ? PROT_READ : 0) | (w ? PROT_WRITE : 0) | (x ? PROT_EXEC : 0), .flags = MAP_SHARED | MAP_FIXED, .fd = ginfo->mem_fd, .offset = pa, }; syscall1 (__NR_mmap, (long)&args); } } /** * unmap the page specified by @la in the container process corresponding to @pgdir * @param pgdir page directory * @param la the logical address of the page to be flushed */ void tlb_invalidate (pde_t *pgdir, uintptr_t la) { struct proc_struct *proc = find_proc_by_pgdir (pgdir); if (current != NULL && proc != NULL) { if (host_munmap (proc, (void*)la, PGSIZE) < 0) panic ("unmap in child failed\n"); } else { syscall2 (__NR_munmap, la, PGSIZE); } } /** * invalidate [USERBASE, USERTOP). * used by tests or do_execve if a 'clean' space is needed (though not neccesary). */ void tlb_invalidate_user (void) { syscall2 (__NR_munmap, USERBASE, USERTOP - USERBASE); }
/* do_pgfault - interrupt handler to process the page fault execption * @mm : the control struct for a set of vma using the same PDT * @error_code : the error code recorded in trapframe->tf_err which is setted by x86 hardware * @addr : the addr which causes a memory access exception, (the contents of the CR2 register) * * CALL GRAPH: trap--> trap_dispatch-->pgfault_handler-->do_pgfault * The processor provides ucore's do_pgfault function with two items of information to aid in diagnosing * the exception and recovering from it. * (1) The contents of the CR2 register. The processor loads the CR2 register with the * 32-bit linear address that generated the exception. The do_pgfault fun can * use this address to locate the corresponding page directory and page-table * entries. * (2) An error code on the kernel stack. The error code for a page fault has a format different from * that for other exceptions. The error code tells the exception handler three things: * -- The P flag (bit 0) indicates whether the exception was due to a not-present page (0) * or to either an access rights violation or the use of a reserved bit (1). * -- The W/R flag (bit 1) indicates whether the memory access that caused the exception * was a read (0) or write (1). * -- The U/S flag (bit 2) indicates whether the processor was executing at user mode (1) * or supervisor mode (0) at the time of the exception. */ int do_pgfault(struct mm_struct *mm, uint32_t error_code, uintptr_t addr) { int ret = -E_INVAL; //try to find a vma which include addr struct vma_struct *vma = find_vma(mm, addr); pgfault_num++; //If the addr is in the range of a mm's vma? if (vma == NULL || vma->vm_start > addr) { cprintf("not valid addr %x, and can not find it in vma\n", addr); goto failed; } //check the error_code switch (error_code & 3) { default: /* error code flag : default is 3 ( W/R=1, P=1): write, present */ case 2: /* error code flag : (W/R=1, P=0): write, not present */ if (!(vma->vm_flags & VM_WRITE)) { cprintf("do_pgfault failed: error code flag = write AND not present, but the addr's vma cannot write\n"); goto failed; } break; case 1: /* error code flag : (W/R=0, P=1): read, present */ cprintf("do_pgfault failed: error code flag = read AND present\n"); goto failed; case 0: /* error code flag : (W/R=0, P=0): read, not present */ if (!(vma->vm_flags & (VM_READ | VM_EXEC))) { cprintf("do_pgfault failed: error code flag = read AND not present, but the addr's vma cannot read or exec\n"); goto failed; } } /* IF (write an existed addr ) OR * (write an non_existed addr && addr is writable) OR * (read an non_existed addr && addr is readable) * THEN * continue process */ uint32_t perm = PTE_U; if (vma->vm_flags & VM_WRITE) { perm |= PTE_W; } addr = ROUNDDOWN(addr, PGSIZE); ret = -E_NO_MEM; pte_t *ptep=NULL; /*LAB3 EXERCISE 1: 2012012139 * Maybe you want help comment, BELOW comments can help you finish the code * * Some Useful MACROs and DEFINEs, you can use them in below implementation. * MACROs or Functions: * get_pte : get an pte and return the kernel virtual address of this pte for la * if the PT contians this pte didn't exist, alloc a page for PT (notice the 3th parameter '1') * pgdir_alloc_page : call alloc_page & page_insert functions to allocate a page size memory & setup * an addr map pa<--->la with linear address la and the PDT pgdir * DEFINES: * VM_WRITE : If vma->vm_flags & VM_WRITE == 1/0, then the vma is writable/non writable * PTE_W 0x002 // page table/directory entry flags bit : Writeable * PTE_U 0x004 // page table/directory entry flags bit : User can access * VARIABLES: * mm->pgdir : the PDT of these vma * */ #if 0 /*LAB3 EXERCISE 1: 2012012139*/ ptep = ??? //(1) try to find a pte, if pte's PT(Page Table) isn't existed, then create a PT. if (*ptep == 0) { //(2) if the phy addr isn't exist, then alloc a page & map the phy addr with logical addr } else { /*LAB3 EXERCISE 2: 2012012139 * Now we think this pte is a swap entry, we should load data from disk to a page with phy addr, * and map the phy addr with logical addr, trigger swap manager to record the access situation of this page. * * Some Useful MACROs and DEFINEs, you can use them in below implementation. * MACROs or Functions: * swap_in(mm, addr, &page) : alloc a memory page, then according to the swap entry in PTE for addr, * find the addr of disk page, read the content of disk page into this memroy page * page_insert : build the map of phy addr of an Page with the linear addr la * swap_map_swappable : set the page swappable */ /* * LAB5 CHALLENGE ( the implmentation Copy on Write) There are 2 situlations when code comes here. 1) *ptep & PTE_P == 1, it means one process try to write a readonly page. If the vma includes this addr is writable, then we can set the page writable by rewrite the *ptep. This method could be used to implement the Copy on Write (COW) thchnology(a fast fork process method). 2) *ptep & PTE_P == 0 & but *ptep!=0, it means this pte is a swap entry. We should add the LAB3's results here. */ if(swap_init_ok) { struct Page *page=NULL; //(1)According to the mm AND addr, try to load the content of right disk page // into the memory which page managed. //(2) According to the mm, addr AND page, setup the map of phy addr <---> logical addr //(3) make the page swappable. //(4) [NOTICE]: you myabe need to update your lab3's implementation for LAB5's normal execution. } else { cprintf("no swap_init_ok but ptep is %x, failed\n",*ptep); goto failed; } } #endif // try to find a pte, if pte's PT(Page Table) isn't existed, then create a PT. // (notice the 3th parameter '1') if ((ptep = get_pte(mm->pgdir, addr, 1)) == NULL) { cprintf("get_pte in do_pgfault failed\n"); goto failed; } if (*ptep == 0) { // if the phy addr isn't exist, then alloc a page & map the phy addr with logical addr if (pgdir_alloc_page(mm->pgdir, addr, perm) == NULL) { cprintf("pgdir_alloc_page in do_pgfault failed\n"); goto failed; } } else { struct Page *page=NULL; cprintf("do pgfault: ptep %x, pte %x\n",ptep, *ptep); if (*ptep & PTE_P) { //if process write to this existed readonly page (PTE_P means existed), then should be here now. //we can implement the delayed memory space copy for fork child process (AKA copy on write, COW). //we didn't implement now, we will do it in future. panic("error write a non-writable pte"); //page = pte2page(*ptep); } else { // if this pte is a swap entry, then load data from disk to a page with phy addr // and call page_insert to map the phy addr with logical addr if(swap_init_ok) { if ((ret = swap_in(mm, addr, &page)) != 0) { cprintf("swap_in in do_pgfault failed\n"); goto failed; } } else { cprintf("no swap_init_ok but ptep is %x, failed\n",*ptep); goto failed; } } page_insert(mm->pgdir, page, addr, perm); swap_map_swappable(mm, addr, page, 1); page->pra_vaddr = addr; } ret = 0; failed: return ret; }
// check_swap - check the correctness of swap & page replacement algorithm static void check_swap(void) { size_t nr_used_pages_store = nr_used_pages(); size_t slab_allocated_store = slab_allocated(); size_t offset; for (offset = 2; offset < max_swap_offset; offset ++) { mem_map[offset] = 1; } struct mm_struct *mm = mm_create(); assert(mm != NULL); extern struct mm_struct *check_mm_struct; assert(check_mm_struct == NULL); check_mm_struct = mm; pgd_t *pgdir = mm->pgdir = init_pgdir_get(); assert(pgdir[PGX(TEST_PAGE)] == 0); struct vma_struct *vma = vma_create(TEST_PAGE, TEST_PAGE + PTSIZE, VM_WRITE | VM_READ); assert(vma != NULL); insert_vma_struct(mm, vma); struct Page *rp0 = alloc_page(), *rp1 = alloc_page(); assert(rp0 != NULL && rp1 != NULL); pte_perm_t perm; ptep_unmap (&perm); ptep_set_u_write(&perm); int ret = page_insert(pgdir, rp1, TEST_PAGE, perm); assert(ret == 0 && page_ref(rp1) == 1); page_ref_inc(rp1); ret = page_insert(pgdir, rp0, TEST_PAGE, perm); assert(ret == 0 && page_ref(rp1) == 1 && page_ref(rp0) == 1); // check try_alloc_swap_entry swap_entry_t entry = try_alloc_swap_entry(); assert(swap_offset(entry) == 1); mem_map[1] = 1; assert(try_alloc_swap_entry() == 0); // set rp1, Swap, Active, add to hash_list, active_list swap_page_add(rp1, entry); swap_active_list_add(rp1); assert(PageSwap(rp1)); mem_map[1] = 0; entry = try_alloc_swap_entry(); assert(swap_offset(entry) == 1); assert(!PageSwap(rp1)); // check swap_remove_entry assert(swap_hash_find(entry) == NULL); mem_map[1] = 2; swap_remove_entry(entry); assert(mem_map[1] == 1); swap_page_add(rp1, entry); swap_inactive_list_add(rp1); swap_remove_entry(entry); assert(PageSwap(rp1)); assert(rp1->index == entry && mem_map[1] == 0); // check page_launder, move page from inactive_list to active_list assert(page_ref(rp1) == 1); assert(nr_active_pages == 0 && nr_inactive_pages == 1); assert(list_next(&(inactive_list.swap_list)) == &(rp1->swap_link)); page_launder(); assert(nr_active_pages == 1 && nr_inactive_pages == 0); assert(PageSwap(rp1) && PageActive(rp1)); entry = try_alloc_swap_entry(); assert(swap_offset(entry) == 1); assert(!PageSwap(rp1) && nr_active_pages == 0); assert(list_empty(&(active_list.swap_list))); // set rp1 inactive again assert(page_ref(rp1) == 1); swap_page_add(rp1, 0); assert(PageSwap(rp1) && swap_offset(rp1->index) == 1); swap_inactive_list_add(rp1); mem_map[1] = 1; assert(nr_inactive_pages == 1); page_ref_dec(rp1); size_t count = nr_used_pages(); swap_remove_entry(entry); assert(nr_inactive_pages == 0 && nr_used_pages() == count - 1); // check swap_out_mm pte_t *ptep0 = get_pte(pgdir, TEST_PAGE, 0), *ptep1; assert(ptep0 != NULL && pte2page(*ptep0) == rp0); ret = swap_out_mm(mm, 0); assert(ret == 0); ret = swap_out_mm(mm, 10); assert(ret == 1 && mm->swap_address == TEST_PAGE + PGSIZE); ret = swap_out_mm(mm, 10); assert(ret == 0 && *ptep0 == entry && mem_map[1] == 1); assert(PageDirty(rp0) && PageActive(rp0) && page_ref(rp0) == 0); assert(nr_active_pages == 1 && list_next(&(active_list.swap_list)) == &(rp0->swap_link)); // check refill_inactive_scan() refill_inactive_scan(); assert(!PageActive(rp0) && page_ref(rp0) == 0); assert(nr_inactive_pages == 1 && list_next(&(inactive_list.swap_list)) == &(rp0->swap_link)); page_ref_inc(rp0); page_launder(); assert(PageActive(rp0) && page_ref(rp0) == 1); assert(nr_active_pages == 1 && list_next(&(active_list.swap_list)) == &(rp0->swap_link)); page_ref_dec(rp0); refill_inactive_scan(); assert(!PageActive(rp0)); // save data in rp0 int i; for (i = 0; i < PGSIZE; i ++) { ((char *)page2kva(rp0))[i] = (char)i; } page_launder(); assert(nr_inactive_pages == 0 && list_empty(&(inactive_list.swap_list))); assert(mem_map[1] == 1); rp1 = alloc_page(); assert(rp1 != NULL); ret = swapfs_read(entry, rp1); assert(ret == 0); for (i = 0; i < PGSIZE; i ++) { assert(((char *)page2kva(rp1))[i] == (char)i); } // page fault now *(char *)(TEST_PAGE) = 0xEF; rp0 = pte2page(*ptep0); assert(page_ref(rp0) == 1); assert(PageSwap(rp0) && PageActive(rp0)); entry = try_alloc_swap_entry(); assert(swap_offset(entry) == 1 && mem_map[1] == SWAP_UNUSED); assert(!PageSwap(rp0) && nr_active_pages == 0 && nr_inactive_pages == 0); // clear accessed flag assert(rp0 == pte2page(*ptep0)); assert(!PageSwap(rp0)); ret = swap_out_mm(mm, 10); assert(ret == 0); assert(!PageSwap(rp0) && ptep_present(ptep0)); // change page table ret = swap_out_mm(mm, 10); assert(ret == 1); assert(*ptep0 == entry && page_ref(rp0) == 0 && mem_map[1] == 1); count = nr_used_pages(); refill_inactive_scan(); page_launder(); assert(count - 1 == nr_used_pages()); ret = swapfs_read(entry, rp1); assert(ret == 0 && *(char *)(page2kva(rp1)) == (char)0xEF); free_page(rp1); // duplictate *ptep0 ptep1 = get_pte(pgdir, TEST_PAGE + PGSIZE, 0); assert(ptep1 != NULL && ptep_invalid(ptep1)); swap_duplicate(*ptep0); ptep_copy(ptep1, ptep0); mp_tlb_invalidate (pgdir, TEST_PAGE + PGSIZE); // page fault again // update for copy on write *(char *)(TEST_PAGE + 1) = 0x88; *(char *)(TEST_PAGE + PGSIZE) = 0x8F; *(char *)(TEST_PAGE + PGSIZE + 1) = 0xFF; assert(pte2page(*ptep0) != pte2page(*ptep1)); assert(*(char *)(TEST_PAGE) == (char)0xEF); assert(*(char *)(TEST_PAGE + 1) == (char)0x88); assert(*(char *)(TEST_PAGE + PGSIZE) == (char)0x8F); assert(*(char *)(TEST_PAGE + PGSIZE + 1) == (char)0xFF); rp0 = pte2page(*ptep0); rp1 = pte2page(*ptep1); assert(!PageSwap(rp0) && PageSwap(rp1) && PageActive(rp1)); entry = try_alloc_swap_entry(); assert(!PageSwap(rp0) && !PageSwap(rp1)); assert(swap_offset(entry) == 1 && mem_map[1] == SWAP_UNUSED); assert(list_empty(&(active_list.swap_list))); assert(list_empty(&(inactive_list.swap_list))); ptep_set_accessed(&perm); page_insert(pgdir, rp0, TEST_PAGE + PGSIZE, perm); // check swap_out_mm *(char *)(TEST_PAGE) = *(char *)(TEST_PAGE + PGSIZE) = 0xEE; mm->swap_address = TEST_PAGE + PGSIZE * 2; ret = swap_out_mm(mm, 2); assert(ret == 0); assert(ptep_present(ptep0) && ! ptep_accessed(ptep0)); assert(ptep_present(ptep1) && ! ptep_accessed(ptep1)); ret = swap_out_mm(mm, 2); assert(ret == 2); assert(mem_map[1] == 2 && page_ref(rp0) == 0); refill_inactive_scan(); page_launder(); assert(mem_map[1] == 2 && swap_hash_find(entry) == NULL); // check copy entry swap_remove_entry(entry); ptep_unmap(ptep1); assert(mem_map[1] == 1); swap_entry_t store; ret = swap_copy_entry(entry, &store); assert(ret == -E_NO_MEM); mem_map[2] = SWAP_UNUSED; ret = swap_copy_entry(entry, &store); assert(ret == 0 && swap_offset(store) == 2 && mem_map[2] == 0); mem_map[2] = 1; ptep_copy(ptep1, &store); assert(*(char *)(TEST_PAGE + PGSIZE) == (char)0xEE && *(char *)(TEST_PAGE + PGSIZE + 1)== (char)0x88); *(char *)(TEST_PAGE + PGSIZE) = 1, *(char *)(TEST_PAGE + PGSIZE + 1) = 2; assert(*(char *)TEST_PAGE == (char)0xEE && *(char *)(TEST_PAGE + 1) == (char)0x88); ret = swap_in_page(entry, &rp0); assert(ret == 0); ret = swap_in_page(store, &rp1); assert(ret == 0); assert(rp1 != rp0); // free memory swap_list_del(rp0), swap_list_del(rp1); swap_page_del(rp0), swap_page_del(rp1); assert(page_ref(rp0) == 1 && page_ref(rp1) == 1); assert(nr_active_pages == 0 && list_empty(&(active_list.swap_list))); assert(nr_inactive_pages == 0 && list_empty(&(inactive_list.swap_list))); for (i = 0; i < HASH_LIST_SIZE; i ++) { assert(list_empty(hash_list + i)); } page_remove(pgdir, TEST_PAGE); page_remove(pgdir, (TEST_PAGE + PGSIZE)); #if PMXSHIFT != PUXSHIFT free_page(pa2page(PMD_ADDR(*get_pmd(pgdir, TEST_PAGE, 0)))); #endif #if PUXSHIFT != PGXSHIFT free_page(pa2page(PUD_ADDR(*get_pud(pgdir, TEST_PAGE, 0)))); #endif free_page(pa2page(PGD_ADDR(*get_pgd(pgdir, TEST_PAGE, 0)))); pgdir[PGX(TEST_PAGE)] = 0; mm->pgdir = NULL; mm_destroy(mm); check_mm_struct = NULL; assert(nr_active_pages == 0 && nr_inactive_pages == 0); for (offset = 0; offset < max_swap_offset; offset ++) { mem_map[offset] = SWAP_UNUSED; } assert(nr_used_pages_store == nr_used_pages()); assert(slab_allocated_store == slab_allocated()); kprintf("check_swap() succeeded.\n"); }