int dune_vm_map_phys(ptent_t *root, void *va, size_t len, void *pa, int perm) { int ret; struct map_phys_data data; int create; // if (!(perm & PERM_R) && (perm & ~(PERM_R))) // return -EINVAL; data.perm = get_pte_perm(perm); data.va_base = (unsigned long) va; data.pa_base = (unsigned long) pa; if (perm & PERM_BIG) create = CREATE_BIG; else if (perm & PERM_BIG_1GB) create = CREATE_BIG_1GB; else create = CREATE_NORMAL; ret = __dune_vm_page_walk(root, va, va + len - 1, &__dune_vm_map_phys_helper, (void *) &data, 3, create); if (ret) return ret; return 0; }
/** * __vm_is_mapped - determines if there are any mappings in the region * @addr: the start address * @len: the length of the region. * * Returns true if a mapping exists, otherwise false. */ bool __vm_is_mapped(void *addr, size_t len) { char *pos = (char *) addr; return __dune_vm_page_walk(pgroot, pos, pos + len - 1, &__vm_is_mapped_helper, (void *) NULL, 3, CREATE_NONE) > 0; }
void dune_vm_unmap(ptent_t *root, void *va, size_t len) { /* FIXME: Doesn't free as much memory as it could */ __dune_vm_page_walk(root, va, va + len - 1, &__dune_vm_free_helper, NULL, 3, CREATE_NONE); dune_flush_tlb(); }
/** * Free the page table and decrement the reference count on any pages. */ void dune_vm_free(ptent_t *root) { // XXX: Should only need one page walk // XXX: Hacky - Until I fix ref counting /*__dune_vm_page_walk(root, VA_START, VA_END, &__dune_vm_free_helper, NULL, 3, CREATE_NONE);*/ __dune_vm_page_walk(root, VA_START, VA_END, &__dune_vm_free_helper, NULL, 2, CREATE_NONE); __dune_vm_page_walk(root, VA_START, VA_END, &__dune_vm_free_helper, NULL, 1, CREATE_NONE); put_page(root); return; }
int __vm_map_phys(physaddr_t pa, virtaddr_t va, int nr, int size, int perm) { int ret; struct vm_arg args; size_t len = nr * size; int create; if (!(perm & VM_PERM_R)) return -EINVAL; args.perm = PTE_P; switch (size) { case PGSIZE_4KB: create = CREATE_NORMAL; break; case PGSIZE_2MB: create = CREATE_BIG; args.perm |= PTE_PS; break; case PGSIZE_1GB: create = CREATE_BIG_1GB; args.perm |= PTE_PS; break; default: return -EINVAL; } if (perm & VM_PERM_W) args.perm |= PTE_W; if (!(perm & VM_PERM_X)) args.perm |= PTE_NX; if (perm & VM_PERM_U) args.perm |= PTE_U; args.pa = pa; args.va = va; ret = __dune_vm_page_walk(pgroot, (void *) va, (void *) (va + len - 1), &__vm_map_phys_helper, (void *) &args, 3, create); /* cleanup partial mappings */ if (ret) __vm_unmap((void *) va, nr, len); return ret; }
int dune_vm_map_pages(ptent_t *root, void *va, size_t len, int perm) { int ret; ptent_t pte_perm; if (!(perm & PERM_R) && (perm & ~(PERM_R))) return -EINVAL; pte_perm = get_pte_perm(perm); ret = __dune_vm_page_walk(root, va, va + len - 1, &__dune_vm_map_pages_helper, (void *) pte_perm, 3, CREATE_NORMAL); return ret; }
/** * Clone a page root. */ ptent_t *dune_vm_clone(ptent_t *root) { int ret; ptent_t *newRoot; newRoot = alloc_page(); memset(newRoot, 0, PGSIZE); ret = __dune_vm_page_walk(root, VA_START, VA_END, &__dune_vm_clone_helper, newRoot, 3, CREATE_NONE); if (ret < 0) { dune_vm_free(newRoot); return NULL; } return newRoot; }
int dune_vm_mprotect(ptent_t *root, void *va, size_t len, int perm) { int ret; ptent_t pte_perm; if (!(perm & PERM_R)) { if (perm & PERM_W) return -EINVAL; perm = PERM_NONE; } pte_perm = get_pte_perm(perm); ret = __dune_vm_page_walk(root, va, va + len - 1, &__dune_vm_mprotect_helper, (void *) pte_perm, 3, CREATE_NONE); if (ret) return ret; dune_flush_tlb(); return 0; }
static int __dune_vm_page_walk(ptent_t *dir, void *start_va, void *end_va, page_walk_cb cb, const void *arg, int level, int create) { // XXX: Using PA == VA int i, ret; //level==3, start_idx is pml4. level==2, start_idx is pdpt. level==1, start_idx is pd. level==0, start_idx is pt. commented by wenjia zhao. int start_idx = PDX(level, start_va); int end_idx = PDX(level, end_va); //discard the 0-47 of va. commented by wenjia zhao void *base_va = (void *) ((unsigned long) start_va & ~(PDADDR(level + 1, 1) - 1)); assert(level >= 0 && level <= NPTLVLS); assert(end_idx < NPTENTRIES); for (i = start_idx; i <= end_idx; i++) { void *n_start_va, *n_end_va; void *cur_va = base_va + PDADDR(level, i); ptent_t *pte = &dir[i]; if (level == 0) { if (create == CREATE_NORMAL || *pte) { ret = cb(arg, pte, cur_va); if (ret) return ret; } continue; } if (level == 1) { if (create == CREATE_BIG || pte_big(*pte)) { ret = cb(arg, pte, cur_va); if (ret) return ret; continue; } } if (level == 2) { if (create == CREATE_BIG_1GB || pte_big(*pte)) { ret = cb(arg, pte, cur_va); if (ret) return ret; continue; } } if (!pte_present(*pte)) { ptent_t *new_pte; if (!create) continue; new_pte = alloc_page(); if (!new_pte) return -ENOMEM; memset(new_pte, 0, PGSIZE); //construct the page that pml4 pointed. commented by wenjia zhao. *pte = PTE_ADDR(new_pte) | PTE_DEF_FLAGS; } n_start_va = (i == start_idx) ? start_va : cur_va; n_end_va = (i == end_idx) ? end_va : cur_va + PDADDR(level, 1) - 1; ret = __dune_vm_page_walk((ptent_t *) PTE_ADDR(dir[i]), n_start_va, n_end_va, cb, arg, level - 1, create); if (ret) return ret; } return 0; }
int dune_vm_page_walk(ptent_t *root, void *start_va, void *end_va, page_walk_cb cb, const void *arg) { return __dune_vm_page_walk(root, start_va, end_va, cb, arg, 3, CREATE_NONE); }