/* * Free page tables according to isp start virtual address and end virtual * address. */ static void free_mmu_map(struct isp_mmu *mmu, unsigned int start_isp_virt, unsigned int end_isp_virt) { unsigned int pgnr; unsigned int start, end; start = (start_isp_virt) & ISP_PAGE_MASK; end = (end_isp_virt) & ISP_PAGE_MASK; pgnr = (end - start) >> ISP_PAGE_OFFSET; mmu_unmap(mmu, start, pgnr); }
/* * vm_object_setsize: change the size of a vm_object. */ int vm_object_setsize(struct addrspace *as, struct vm_object *vmo, unsigned npages) { int result; unsigned i; struct lpage *lp; KASSERT(vmo != NULL); KASSERT(vmo->vmo_lpages != NULL); if (npages < lpage_array_num(vmo->vmo_lpages)) { for (i=npages; i<lpage_array_num(vmo->vmo_lpages); i++) { lp = lpage_array_get(vmo->vmo_lpages, i); if (lp != NULL) { KASSERT(as != NULL); /* remove any tlb entry for this mapping */ mmu_unmap(as, vmo->vmo_base+PAGE_SIZE*i); lpage_destroy(lp); } else { swap_unreserve(1); } } result = lpage_array_setsize(vmo->vmo_lpages, npages); /* shrinking an array shouldn't fail */ KASSERT(result==0); } else if (npages > lpage_array_num(vmo->vmo_lpages)) { int oldsize = lpage_array_num(vmo->vmo_lpages); unsigned newpages = npages - oldsize; result = swap_reserve(newpages); if (result) { return result; } result = lpage_array_setsize(vmo->vmo_lpages, npages); if (result) { swap_unreserve(newpages); return result; } for (i=oldsize; i<npages; i++) { lpage_array_set(vmo->vmo_lpages, i, NULL); } } return 0; }
void isp_mmu_unmap(struct isp_mmu *mmu, unsigned int isp_virt, unsigned int pgnr) { mmu_unmap(mmu, isp_virt, pgnr); }