void destroy_area(struct vm_address_space *space, struct vm_area *area) { struct vm_cache *cache; unsigned int va; unsigned int ptentry; rwlock_lock_write(&space->mut); cache = area->cache; // Unmap all pages in this area for (va = area->low_address; va < area->high_address; va += PAGE_SIZE) { ptentry = query_translation_map(space->translation_map, va); if ((ptentry & PAGE_PRESENT) != 0) { VM_DEBUG("destroy_area: decrementing page ref for va %08x pa %08x\n", va, PAGE_ALIGN(ptentry)); dec_page_ref(pa_to_page(ptentry)); } } destroy_vm_area(area); rwlock_unlock_write(&space->mut); if (cache) dec_cache_ref(cache); }
struct page *pgt_get_task_page(struct task_page_table *table, unsigned long base) { unsigned long pte; struct page *page; /* * get the virtual address of the pte entry */ pte = pgt_get_pte_addr(table->pde_base, base); if (!pte) return NULL; /* * translate the pte entry to physical address */ pte = mmu_pte_entry_to_pa(pte); /* * user page can only use pa_to_page can not use * va_to_page */ page = pa_to_page(pte); return page; }
void destroy_translation_map(struct vm_translation_map *map) { int i; unsigned int *pgdir; int old_flags; old_flags = acquire_spinlock_int(&kernel_space_lock); list_remove_node(map); release_spinlock_int(&kernel_space_lock, old_flags); // Free user space page tables pgdir = (unsigned int*) PA_TO_VA(map->page_dir); for (i = 0; i < 768; i++) { if (pgdir[i] & PAGE_PRESENT) dec_page_ref(pa_to_page(PAGE_ALIGN(pgdir[i]))); } dec_page_ref(pa_to_page(map->page_dir)); slab_free(&translation_map_slab, map); }
static inline struct page * pgt_get_pte_page(unsigned long base, unsigned long mbase) { unsigned long pde, pte; struct page *page; pde = pgt_get_pde_entry_addr(base, mbase); pte = mmu_pde_entry_to_pa(pde); if (!pte) return NULL; page = pa_to_page(pte); return page; }
static inline unsigned long pgt_get_pte_addr(unsigned long base, unsigned long map_address) { unsigned long pde, pte; struct page *page; pde = pgt_get_pde_entry_addr(base, map_address); pte = mmu_pde_entry_to_pa(pde); if (!pte) return 0; page = pa_to_page(pte); pte = page_to_va(page); return pgt_get_pte_entry_addr(pte, map_address); }
struct page * pgt_unmap_mmap_page(struct task_page_table *table, unsigned long base) { unsigned long pte; struct page *page; struct page *pte_page; pte_page = pgt_get_pte_page(table->pde_base, base); if (!pte_page) return NULL; pte = pgt_get_pte_entry_addr(page_to_va(pte_page), base); page = pa_to_page(mmu_pte_entry_to_pa(pte)); mmu_clear_pte_entry(pte); return page; }
static unsigned long pgt_get_mapped_pte_addr(struct task_page_table *table, unsigned long map_address) { unsigned long pde, pte; struct page *page; pde = pgt_get_pde_entry_addr(table->pde_base, map_address); pte = mmu_pde_entry_to_pa(pde); if (!pte) { page = pgt_map_new_pde_entry(&table->task_list, table->pde_base, map_address); if (!page) return 0; } else { page = pa_to_page(pte); } return pgt_get_pte_entry_addr(page_to_va(page), map_address); }