void pg_map(phys_bytes phys, vir_bytes vaddr, vir_bytes vaddr_end, kinfo_t *cbi) { static int mapped_pde = -1; static u32_t *pt = NULL; int pde, pte; assert(kernel_may_alloc); if(phys == PG_ALLOCATEME) { assert(!(vaddr % ARM_PAGE_SIZE)); } else { assert((vaddr % ARM_PAGE_SIZE) == (phys % ARM_PAGE_SIZE)); vaddr = pg_rounddown(vaddr); phys = pg_rounddown(phys); } assert(vaddr < kern_vir_start); while(vaddr < vaddr_end) { phys_bytes source = phys; assert(!(vaddr % ARM_PAGE_SIZE)); if(phys == PG_ALLOCATEME) { source = pg_alloc_page(cbi); } else { assert(!(phys % ARM_PAGE_SIZE)); } assert(!(source % ARM_PAGE_SIZE)); pde = ARM_VM_PDE(vaddr); pte = ARM_VM_PTE(vaddr); if(mapped_pde < pde) { phys_bytes ph; pt = alloc_pagetable(&ph); pagedir[pde] = (ph & ARM_VM_PDE_MASK) | ARM_VM_PAGEDIR | ARM_VM_PDE_DOMAIN; mapped_pde = pde; } assert(pt); pt[pte] = (source & ARM_VM_PTE_MASK) | ARM_VM_PAGETABLE | ARM_VM_PTE_WT | ARM_VM_PTE_USER; vaddr += ARM_PAGE_SIZE; if(phys != PG_ALLOCATEME) phys += ARM_PAGE_SIZE; } }
/*===========================================================================* * vm_addrok * *===========================================================================*/ int vm_addrok(void *vir, int writeflag) { pt_t *pt = &vmprocess->vm_pt; int pde, pte; vir_bytes v = (vir_bytes) vir; #if defined(__i386__) pde = I386_VM_PDE(v); pte = I386_VM_PTE(v); #elif defined(__arm__) pde = ARM_VM_PDE(v); pte = ARM_VM_PTE(v); #endif if(!(pt->pt_dir[pde] & ARCH_VM_PDE_PRESENT)) { printf("addr not ok: missing pde %d\n", pde); return 0; } #if defined(__i386__) if(writeflag && !(pt->pt_dir[pde] & ARCH_VM_PTE_RW)) { printf("addr not ok: pde %d present but pde unwritable\n", pde); return 0; } #endif if(!(pt->pt_pt[pde][pte] & ARCH_VM_PTE_PRESENT)) { printf("addr not ok: missing pde %d / pte %d\n", pde, pte); return 0; } #if defined(__i386__) if(writeflag && !(pt->pt_pt[pde][pte] & ARCH_VM_PTE_RW)) { printf("addr not ok: pde %d / pte %d present but unwritable\n", #elif defined(__arm__) if(!writeflag && !(pt->pt_pt[pde][pte] & ARCH_VM_PTE_RO)) { printf("addr not ok: pde %d / pte %d present but writable\n", #endif pde, pte); return 0; }
/*===========================================================================* * findhole * *===========================================================================*/ static u32_t findhole(int pages) { /* Find a space in the virtual address space of VM. */ u32_t curv; int pde = 0, try_restart; static u32_t lastv = 0; pt_t *pt = &vmprocess->vm_pt; vir_bytes vmin, vmax; #if defined(__arm__) u32_t holev; #endif vmin = (vir_bytes) (&_end); /* marks end of VM BSS */ vmin += 1024*1024*1024; /* reserve 1GB virtual address space for VM heap */ vmin &= ARCH_VM_ADDR_MASK; vmax = VM_STACKTOP; /* Input sanity check. */ assert(vmin + VM_PAGE_SIZE >= vmin); assert(vmax >= vmin + VM_PAGE_SIZE); assert((vmin % VM_PAGE_SIZE) == 0); assert((vmax % VM_PAGE_SIZE) == 0); #if defined(__arm__) assert(pages > 0); #endif #if SANITYCHECKS curv = ((u32_t) random()) % ((vmax - vmin)/VM_PAGE_SIZE); curv *= VM_PAGE_SIZE; curv += vmin; #else curv = lastv; if(curv < vmin || curv >= vmax) curv = vmin; #endif try_restart = 1; /* Start looking for a free page starting at vmin. */ while(curv < vmax) { int pte; #if defined(__arm__) int i, nohole; #endif assert(curv >= vmin); assert(curv < vmax); #if defined(__i386__) pde = I386_VM_PDE(curv); pte = I386_VM_PTE(curv); #elif defined(__arm__) holev = curv; /* the candidate hole */ nohole = 0; for (i = 0; i < pages && !nohole; ++i) { if(curv >= vmax) { break; } #endif #if defined(__i386__) if(!(pt->pt_dir[pde] & ARCH_VM_PDE_PRESENT) || !(pt->pt_pt[pde][pte] & ARCH_VM_PAGE_PRESENT)) { #elif defined(__arm__) pde = ARM_VM_PDE(curv); pte = ARM_VM_PTE(curv); /* if page present, no hole */ if((pt->pt_dir[pde] & ARCH_VM_PDE_PRESENT) && (pt->pt_pt[pde][pte] & ARCH_VM_PTE_PRESENT)) nohole = 1; /* if not contiguous, no hole */ if (curv != holev + i * VM_PAGE_SIZE) nohole = 1; curv+=VM_PAGE_SIZE; } /* there's a large enough hole */ if (!nohole && i == pages) { #endif lastv = curv; #if defined(__i386__) return curv; #elif defined(__arm__) return holev; #endif } #if defined(__i386__) curv+=VM_PAGE_SIZE; #elif defined(__arm__) /* Reset curv */ #endif if(curv >= vmax && try_restart) { curv = vmin; try_restart = 0; } } printf("VM: out of virtual address space in vm\n"); return NO_MEM; } /*===========================================================================* * vm_freepages * *===========================================================================*/ void vm_freepages(vir_bytes vir, int pages) { assert(!(vir % VM_PAGE_SIZE)); if(is_staticaddr(vir)) { printf("VM: not freeing static page\n"); return; } if(pt_writemap(vmprocess, &vmprocess->vm_pt, vir, MAP_NONE, pages*VM_PAGE_SIZE, 0, WMF_OVERWRITE | WMF_FREE) != OK) panic("vm_freepages: pt_writemap failed"); vm_self_pages--; #if SANITYCHECKS /* If SANITYCHECKS are on, flush tlb so accessing freed pages is * always trapped, also if not in tlb. */ if((sys_vmctl(SELF, VMCTL_FLUSHTLB, 0)) != OK) { panic("VMCTL_FLUSHTLB failed"); } #endif }