/** * @brief Return the page mapped at virtual address 'va' in * page directory 'pgdir'. * * If pte_store is not NULL, then we store in it the address * of the pte for this page. This is used by page_remove * but should not be used by other callers. * * For jumbos, right now this returns the first Page* in the 4MB range * * @param[in] pgdir the page directory from which we should do the lookup * @param[in] va the virtual address of the page we are looking up * @param[out] pte_store the address of the page table entry for the returned page * * @return PAGE the page mapped at virtual address 'va' * @return NULL No mapping exists at virtual address 'va', or it's paged out */ page_t *page_lookup(pde_t *pgdir, void *va, pte_t **pte_store) { pte_t* pte = pgdir_walk(pgdir, va, 0); if (!pte || !PAGE_PRESENT(*pte)) return 0; if (pte_store) *pte_store = pte; return pa2page(PTE_ADDR(*pte)); }
/** * @brief Unmaps the physical page at virtual address 'va' in page directory * 'pgdir'. * * If there is no physical page at that address, this function silently * does nothing. * * Details: * - The ref count on the physical page is decrement when the page is removed * - The physical page is freed if the refcount reaches 0. * - The pg table entry corresponding to 'va' is set to 0. * (if such a PTE exists) * - The TLB is invalidated if an entry is removes from the pg dir/pg table. * * This may be wonky wrt Jumbo pages and decref. * * @param pgdir the page directory from with the page sholuld be removed * @param va the virtual address at which the page we are trying to * remove is mapped * TODO: consider deprecating this, or at least changing how it works with TLBs. * Might want to have the caller need to manage the TLB. Also note it is used * in env_user_mem_free, minus the walk. */ void page_remove(pde_t *pgdir, void *va) { pte_t *pte; page_t *page; pte = pgdir_walk(pgdir,va,0); if (!pte || PAGE_UNMAPPED(*pte)) return; if (PAGE_PRESENT(*pte)) { /* TODO: (TLB) need to do a shootdown, inval sucks. And might want to * manage the TLB / free pages differently. (like by the caller). * Careful about the proc/memory lock here. */ page = ppn2page(PTE2PPN(*pte)); *pte = 0; tlb_invalidate(pgdir, va); page_decref(page); } else if (PAGE_PAGED_OUT(*pte)) { /* TODO: (SWAP) need to free this from the swap */ panic("Swapping not supported!"); *pte = 0; } }
/* Convert a kernel guest virtual address to physical address. * Assumes that the guest VA is in the high negative address space. * TODO: Takes the vm_thread argument so that we can walk the page tables * instead of just coercing the pointer. Therefore, this is not in vmm.h * since it may get complex. */ int gvatogpa(struct guest_thread *vm_thread, uint64_t va, uint64_t *pa) { assert(vm_thread != NULL); struct vm_trapframe *vm_tf = gth_to_vmtf(vm_thread); uint64_t *ptptr = (uint64_t *)vm_tf->tf_cr3; uint64_t entry; for (int shift = PML4_SHIFT; shift >= PML1_SHIFT; shift -= BITS_PER_PML) { entry = ptptr[PMLx(va, shift)]; if (!PAGE_PRESENT(entry)) return -1; if ((entry & PTE_PS) != 0) { uint64_t bitmask = ((1 << shift) - 1); *pa = (((uint64_t)va & bitmask) | (entry & ~bitmask)); return 0; } ptptr = (uint64_t *)PG_ADDR(entry); } *pa = ((uint64_t)va & 0xfff) | (uint64_t)ptptr; return 0; }