int mm_map(struct mm_struct *mm, uintptr_t addr, size_t len, uint32_t vm_flags, struct vma_struct **vma_store) { uintptr_t start = ROUNDDOWN(addr, PGSIZE), end = ROUNDUP(addr + len, PGSIZE); if (!USER_ACCESS(start, end)) { return -E_INVAL; } assert(mm != NULL); int ret = -E_INVAL; struct vma_struct *vma; if ((vma = find_vma(mm, start)) != NULL && end > vma->vm_start) { goto out; } ret = -E_NO_MEM; if ((vma = vma_create(start, end, vm_flags)) == NULL) { goto out; } insert_vma_struct(mm, vma); if (vma_store != NULL) { *vma_store = vma; } ret = 0; out: return ret; }
bool user_mem_check(struct mm_struct *mm, uintptr_t addr, size_t len, bool write) { if (mm != NULL) { if (!USER_ACCESS(addr, addr + len)) { return 0; } struct vma_struct *vma; uintptr_t start = addr, end = addr + len; while (start < end) { if ((vma = find_vma(mm, start)) == NULL || start < vma->vm_start) { return 0; } if (!(vma->vm_flags & ((write) ? VM_WRITE : VM_READ))) { return 0; } if (write && (vma->vm_flags & VM_STACK)) { if (start < vma->vm_start + PGSIZE) { //check stack start & size return 0; } } start = vma->vm_end; } return 1; } return KERN_ACCESS(addr, addr + len); }
int mm_brk(struct mm_struct *mm, uintptr_t addr, size_t len) { uintptr_t start = ROUNDDOWN(addr, PGSIZE), end = ROUNDUP(addr + len, PGSIZE); if (!USER_ACCESS(start, end)) { return -E_INVAL; } int ret; if ((ret = mm_unmap(mm, start, end - start)) != 0) { return ret; } uint32_t vm_flags = VM_READ | VM_WRITE; struct vma_struct *vma = find_vma(mm, start - 1); if (vma != NULL && vma->vm_end == start && vma->vm_flags == vm_flags) { vma->vm_end = end; return 0; } if ((vma = vma_create(start, end, vm_flags)) == NULL) { return -E_NO_MEM; } insert_vma_struct(mm, vma); return 0; }
static void check_mm_swap(void) { size_t nr_free_pages_store = nr_free_pages(); size_t slab_allocated_store = slab_allocated(); int ret, i, j; for (i = 0; i < max_swap_offset; i ++) { assert(mem_map[i] == SWAP_UNUSED); } extern struct mm_struct *check_mm_struct; assert(check_mm_struct == NULL); // step1: check mm_map struct mm_struct *mm0 = mm_create(), *mm1; assert(mm0 != NULL && list_empty(&(mm0->mmap_list))); uintptr_t addr0, addr1; addr0 = 0; do { ret = mm_map(mm0, addr0, PTSIZE, 0, NULL); assert(ret == (USER_ACCESS(addr0, addr0 + PTSIZE)) ? 0 : -E_INVAL); addr0 += PTSIZE; } while (addr0 != 0); addr0 = 0; for (i = 0; i < 1024; i ++, addr0 += PTSIZE) { ret = mm_map(mm0, addr0, PGSIZE, 0, NULL); assert(ret == -E_INVAL); } mm_destroy(mm0); mm0 = mm_create(); assert(mm0 != NULL && list_empty(&(mm0->mmap_list))); addr0 = 0, i = 0; do { ret = mm_map(mm0, addr0, PTSIZE - PGSIZE, 0, NULL); assert(ret == (USER_ACCESS(addr0, addr0 + PTSIZE)) ? 0 : -E_INVAL); if (ret == 0) { i ++; } addr0 += PTSIZE; } while (addr0 != 0); addr0 = 0, j = 0; do { addr0 += PTSIZE - PGSIZE; ret = mm_map(mm0, addr0, PGSIZE, 0, NULL); assert(ret == (USER_ACCESS(addr0, addr0 + PGSIZE)) ? 0 : -E_INVAL); if (ret == 0) { j ++; } addr0 += PGSIZE; } while (addr0 != 0); assert(j + 1 >= i); mm_destroy(mm0); assert(nr_free_pages_store == nr_free_pages()); assert(slab_allocated_store == slab_allocated()); cprintf("check_mm_swap: step1, mm_map ok.\n"); // step2: check page fault mm0 = mm_create(); assert(mm0 != NULL && list_empty(&(mm0->mmap_list))); // setup page table struct Page *page = alloc_page(); assert(page != NULL); pde_t *pgdir = page2kva(page); memcpy(pgdir, boot_pgdir, PGSIZE); pgdir[PDX(VPT)] = PADDR(pgdir) | PTE_P | PTE_W; // prepare for page fault mm0->pgdir = pgdir; check_mm_struct = mm0; lcr3(PADDR(mm0->pgdir)); uint32_t vm_flags = VM_WRITE | VM_READ; struct vma_struct *vma; addr0 = 0; do { if ((ret = mm_map(mm0, addr0, PTSIZE, vm_flags, &vma)) == 0) { break; } addr0 += PTSIZE; } while (addr0 != 0); assert(ret == 0 && addr0 != 0 && mm0->map_count == 1); assert(vma->vm_start == addr0 && vma->vm_end == addr0 + PTSIZE); // check pte entry pte_t *ptep; for (addr1 = addr0; addr1 < addr0 + PTSIZE; addr1 += PGSIZE) { ptep = get_pte(pgdir, addr1, 0); assert(ptep == NULL); } memset((void *)addr0, 0xEF, PGSIZE * 2); ptep = get_pte(pgdir, addr0, 0); assert(ptep != NULL && (*ptep & PTE_P)); ptep = get_pte(pgdir, addr0 + PGSIZE, 0); assert(ptep != NULL && (*ptep & PTE_P)); ret = mm_unmap(mm0, - PTSIZE, PTSIZE); assert(ret == -E_INVAL); ret = mm_unmap(mm0, addr0 + PTSIZE, PGSIZE); assert(ret == 0); addr1 = addr0 + PTSIZE / 2; ret = mm_unmap(mm0, addr1, PGSIZE); assert(ret == 0 && mm0->map_count == 2); ret = mm_unmap(mm0, addr1 + 2 * PGSIZE, PGSIZE * 4); assert(ret == 0 && mm0->map_count == 3); ret = mm_map(mm0, addr1, PGSIZE * 6, 0, NULL); assert(ret == -E_INVAL); ret = mm_map(mm0, addr1, PGSIZE, 0, NULL); assert(ret == 0 && mm0->map_count == 4); ret = mm_map(mm0, addr1 + 2 * PGSIZE, PGSIZE * 4, 0, NULL); assert(ret == 0 && mm0->map_count == 5); ret = mm_unmap(mm0, addr1 + PGSIZE / 2, PTSIZE / 2 - PGSIZE); assert(ret == 0 && mm0->map_count == 1); addr1 = addr0 + PGSIZE; for (i = 0; i < PGSIZE; i ++) { assert(*(char *)(addr1 + i) == (char)0xEF); } ret = mm_unmap(mm0, addr1 + PGSIZE / 2, PGSIZE / 4); assert(ret == 0 && mm0->map_count == 2); ptep = get_pte(pgdir, addr0, 0); assert(ptep != NULL && (*ptep & PTE_P)); ptep = get_pte(pgdir, addr0 + PGSIZE, 0); assert(ptep != NULL && *ptep == 0); ret = mm_map(mm0, addr1, PGSIZE, vm_flags, NULL); memset((void *)addr1, 0x88, PGSIZE); assert(*(char *)addr1 == (char)0x88 && mm0->map_count == 3); for (i = 1; i < 16; i += 2) { ret = mm_unmap(mm0, addr0 + PGSIZE * i, PGSIZE); assert(ret == 0); if (i < 8) { ret = mm_map(mm0, addr0 + PGSIZE * i, PGSIZE, 0, NULL); assert(ret == 0); } } assert(mm0->map_count == 13); ret = mm_unmap(mm0, addr0 + PGSIZE * 2, PTSIZE - PGSIZE * 2); assert(ret == 0 && mm0->map_count == 2); ret = mm_unmap(mm0, addr0, PGSIZE * 2); assert(ret == 0 && mm0->map_count == 0); cprintf("check_mm_swap: step2, mm_unmap ok.\n"); // step3: check exit_mmap ret = mm_map(mm0, addr0, PTSIZE, vm_flags, NULL); assert(ret == 0); for (i = 0, addr1 = addr0; i < 4; i ++, addr1 += PGSIZE) { *(char *)addr1 = (char)0xFF; } exit_mmap(mm0); for (i = 0; i < PDX(KERNBASE); i ++) { assert(pgdir[i] == 0); } cprintf("check_mm_swap: step3, exit_mmap ok.\n"); // step4: check dup_mmap for (i = 0; i < max_swap_offset; i ++) { assert(mem_map[i] == SWAP_UNUSED); } ret = mm_map(mm0, addr0, PTSIZE, vm_flags, NULL); assert(ret != 0); addr1 = addr0; for (i = 0; i < 4; i ++, addr1 += PGSIZE) { *(char *)addr1 = (char)(i * i); } ret = 0; ret += swap_out_mm(mm0, 10); ret += swap_out_mm(mm0, 10); assert(ret == 4); for (; i < 8; i ++, addr1 += PGSIZE) { *(char *)addr1 = (char)(i * i); } // setup mm1 mm1 = mm_create(); assert(mm1 != NULL); page = alloc_page(); assert(page != NULL); pgdir = page2kva(page); memcpy(pgdir, boot_pgdir, PGSIZE); pgdir[PDX(VPT)] = PADDR(pgdir) | PTE_P | PTE_W; mm1->pgdir = pgdir; ret = dup_mmap(mm1, mm0); assert(ret == 0); // switch to mm1 check_mm_struct = mm1; lcr3(PADDR(mm1->pgdir)); addr1 = addr0; for (i = 0; i < 8; i ++, addr1 += PGSIZE) { assert(*(char *)addr1 == (char)(i * i)); *(char *)addr1 = (char)0x88; } // switch to mm0 check_mm_struct = mm0; lcr3(PADDR(mm0->pgdir)); addr1 = addr0; for (i = 0; i < 8; i ++, addr1 += PGSIZE) { assert(*(char *)addr1 == (char)(i * i)); } // switch to boot_cr3 check_mm_struct = NULL; lcr3(boot_cr3); // free memory exit_mmap(mm0); exit_mmap(mm1); free_page(kva2page(mm0->pgdir)); mm_destroy(mm0); free_page(kva2page(mm1->pgdir)); mm_destroy(mm1); cprintf("check_mm_swap: step4, dup_mmap ok.\n"); refill_inactive_scan(); page_launder(); for (i = 0; i < max_swap_offset; i ++) { assert(mem_map[i] == SWAP_UNUSED); } assert(nr_free_pages_store == nr_free_pages()); assert(slab_allocated_store == slab_allocated()); cprintf("check_mm_swap() succeeded.\n"); }
int mm_unmap(struct mm_struct *mm, uintptr_t addr, size_t len) { uintptr_t start = ROUNDDOWN(addr, PGSIZE), end = ROUNDUP(addr + len, PGSIZE); if (!USER_ACCESS(start, end)) { return -E_INVAL; } assert(mm != NULL); struct vma_struct *vma; if ((vma = find_vma(mm, start)) == NULL || end <= vma->vm_start) { return 0; } if (vma->vm_start < start && end < vma->vm_end) { struct vma_struct *nvma; if ((nvma = vma_create(vma->vm_start, start, vma->vm_flags)) == NULL) { return -E_NO_MEM; } vma_resize(vma, end, vma->vm_end); insert_vma_struct(mm, nvma); unmap_range(mm->pgdir, start, end); return 0; } list_entry_t free_list, *le; list_init(&free_list); while (vma->vm_start < end) { le = list_next(&(vma->list_link)); remove_vma_struct(mm, vma); list_add(&free_list, &(vma->list_link)); if (le == &(mm->mmap_list)) { break; } vma = le2vma(le, list_link); } le = list_next(&free_list); while (le != &free_list) { vma = le2vma(le, list_link); le = list_next(le); uintptr_t un_start, un_end; if (vma->vm_start < start) { un_start = start, un_end = vma->vm_end; vma_resize(vma, vma->vm_start, un_start); insert_vma_struct(mm, vma); } else { un_start = vma->vm_start, un_end = vma->vm_end; if (end < un_end) { un_end = end; vma_resize(vma, un_end, vma->vm_end); insert_vma_struct(mm, vma); } else { vma_destroy(vma); } } unmap_range(mm->pgdir, un_start, un_end); } return 0; }
/* ucore use copy-on-write when forking a new process, * thus copy_range only copy pdt/pte and set their permission to * READONLY, a write will be handled in pgfault */ int copy_range(pgd_t *to, pgd_t *from, uintptr_t start, uintptr_t end, bool share) { assert(start % PGSIZE == 0 && end % PGSIZE == 0); assert(USER_ACCESS(start, end)); do { pte_t *ptep = get_pte(from, start, 0), *nptep; if (ptep == NULL) { if (get_pud(from, start, 0) == NULL) { start = ROUNDDOWN(start + PUSIZE, PUSIZE); } else if (get_pmd(from, start, 0) == NULL) { start = ROUNDDOWN(start + PMSIZE, PMSIZE); } else { start = ROUNDDOWN(start + PTSIZE, PTSIZE); } continue ; } if (*ptep != 0) { if ((nptep = get_pte(to, start, 1)) == NULL) { return -E_NO_MEM; } int ret; //kprintf("%08x %08x %08x\n", nptep, *nptep, start); assert(*ptep != 0 && *nptep == 0); #ifdef ARCH_ARM //TODO add code to handle swap if (ptep_present(ptep)){ //no body should be able to write this page //before a W-pgfault pte_perm_t perm = PTE_P; if(ptep_u_read(ptep)) perm |= PTE_U; if(!share){ //Original page should be set to readonly! //because Copy-on-write may happen //after the current proccess modifies its page ptep_set_perm(ptep, perm); }else{ if(ptep_u_write(ptep)){ perm |= PTE_W; } } struct Page *page = pte2page(*ptep); ret = page_insert(to, page, start, perm); } #else /* ARCH_ARM */ if (ptep_present(ptep)) { pte_perm_t perm = ptep_get_perm(ptep, PTE_USER); struct Page *page = pte2page(*ptep); if (!share && ptep_s_write(ptep)) { ptep_unset_s_write(&perm); pte_perm_t perm_with_swap_stat = ptep_get_perm(ptep, PTE_SWAP); ptep_set_perm(&perm_with_swap_stat, perm); page_insert(from, page, start, perm_with_swap_stat); } ret = page_insert(to, page, start, perm); assert(ret == 0); } #endif /* ARCH_ARM */ else { #ifdef CONFIG_NO_SWAP assert(0); #endif swap_entry_t entry; ptep_copy(&entry, ptep); swap_duplicate(entry); ptep_copy(nptep, &entry); } } start += PGSIZE; } while (start != 0 && start < end); #ifdef ARCH_ARM /* we have modified the PTE of the original * process, so invalidate TLB */ tlb_invalidate_all(); #endif return 0; }
void exit_range(pgd_t *pgdir, uintptr_t start, uintptr_t end) { assert(start % PGSIZE == 0 && end % PGSIZE == 0); assert(USER_ACCESS(start, end)); exit_range_pgd(pgdir, start, end); }