void pg_map(phys_bytes phys, vir_bytes vaddr, vir_bytes vaddr_end, kinfo_t *cbi) { static int mapped_pde = -1; static u32_t *pt = NULL; int pde, pte; assert(kernel_may_alloc); if(phys == PG_ALLOCATEME) { assert(!(vaddr % ARM_PAGE_SIZE)); } else { assert((vaddr % ARM_PAGE_SIZE) == (phys % ARM_PAGE_SIZE)); vaddr = pg_rounddown(vaddr); phys = pg_rounddown(phys); } assert(vaddr < kern_vir_start); while(vaddr < vaddr_end) { phys_bytes source = phys; assert(!(vaddr % ARM_PAGE_SIZE)); if(phys == PG_ALLOCATEME) { source = pg_alloc_page(cbi); } else { assert(!(phys % ARM_PAGE_SIZE)); } assert(!(source % ARM_PAGE_SIZE)); pde = ARM_VM_PDE(vaddr); pte = ARM_VM_PTE(vaddr); if(mapped_pde < pde) { phys_bytes ph; pt = alloc_pagetable(&ph); pagedir[pde] = (ph & ARM_VM_PDE_MASK) | ARM_VM_PAGEDIR | ARM_VM_PDE_DOMAIN; mapped_pde = pde; } assert(pt); pt[pte] = (source & ARM_VM_PTE_MASK) | ARM_VM_PAGETABLE | ARM_VM_PTE_WT | ARM_VM_PTE_USER; vaddr += ARM_PAGE_SIZE; if(phys != PG_ALLOCATEME) phys += ARM_PAGE_SIZE; } }
/*===========================================================================* * vm_addrok * *===========================================================================*/ int vm_addrok(void *vir, int writeflag) { pt_t *pt = &vmprocess->vm_pt; int pde, pte; vir_bytes v = (vir_bytes) vir; #if defined(__i386__) pde = I386_VM_PDE(v); pte = I386_VM_PTE(v); #elif defined(__arm__) pde = ARM_VM_PDE(v); pte = ARM_VM_PTE(v); #endif if(!(pt->pt_dir[pde] & ARCH_VM_PDE_PRESENT)) { printf("addr not ok: missing pde %d\n", pde); return 0; } #if defined(__i386__) if(writeflag && !(pt->pt_dir[pde] & ARCH_VM_PTE_RW)) { printf("addr not ok: pde %d present but pde unwritable\n", pde); return 0; } #endif if(!(pt->pt_pt[pde][pte] & ARCH_VM_PTE_PRESENT)) { printf("addr not ok: missing pde %d / pte %d\n", pde, pte); return 0; } #if defined(__i386__) if(writeflag && !(pt->pt_pt[pde][pte] & ARCH_VM_PTE_RW)) { printf("addr not ok: pde %d / pte %d present but unwritable\n", #elif defined(__arm__) if(!writeflag && !(pt->pt_pt[pde][pte] & ARCH_VM_PTE_RO)) { printf("addr not ok: pde %d / pte %d present but writable\n", #endif pde, pte); return 0; }
/* This function sets up a mapping from within the kernel's address * space to any other area of memory, either straight physical * memory (pr == NULL) or a process view of memory, in 1MB windows. * I.e., it maps in 1MB chunks of virtual (or physical) address space * to 1MB chunks of kernel virtual address space. * * It recognizes pr already being in memory as a special case (no * mapping required). * * The target (i.e. in-kernel) mapping area is one of the freepdes[] * VM has earlier already told the kernel about that is available. It is * identified as the 'pde' parameter. This value can be chosen freely * by the caller, as long as it is in range (i.e. 0 or higher and corresonds * to a known freepde slot). It is up to the caller to keep track of which * freepde's are in use, and to determine which ones are free to use. * * The logical number supplied by the caller is translated into an actual * pde number to be used, and a pointer to it (linear address) is returned * for actual use by phys_copy or memset. */ static phys_bytes createpde( const struct proc *pr, /* Requested process, NULL for physical. */ const phys_bytes linaddr,/* Address after segment translation. */ phys_bytes *bytes, /* Size of chunk, function may truncate it. */ int free_pde_idx, /* index of the free slot to use */ int *changed /* If mapping is made, this is set to 1. */ ) { u32_t pdeval; phys_bytes offset; int pde; assert(free_pde_idx >= 0 && free_pde_idx < nfreepdes); pde = freepdes[free_pde_idx]; assert(pde >= 0 && pde < 4096); if(pr && ((pr == get_cpulocal_var(ptproc)) || iskernelp(pr))) { /* Process memory is requested, and * it's a process that is already in current page table, or * the kernel, which is always there. * Therefore linaddr is valid directly, with the requested * size. */ return linaddr; } if(pr) { /* Requested address is in a process that is not currently * accessible directly. Grab the PDE entry of that process' * page table that corresponds to the requested address. */ assert(pr->p_seg.p_ttbr_v); pdeval = pr->p_seg.p_ttbr_v[ARM_VM_PDE(linaddr)]; } else { /* Requested address is physical. Make up the PDE entry. */ pdeval = (linaddr & ARM_VM_SECTION_MASK) | ARM_VM_SECTION | ARM_VM_SECTION_DOMAIN | ARM_VM_SECTION_DEVICE | ARM_VM_SECTION_USER; } /* Write the pde value that we need into a pde that the kernel * can access, into the currently loaded page table so it becomes * visible. */ assert(get_cpulocal_var(ptproc)->p_seg.p_ttbr_v); if(get_cpulocal_var(ptproc)->p_seg.p_ttbr_v[pde] != pdeval) { get_cpulocal_var(ptproc)->p_seg.p_ttbr_v[pde] = pdeval; *changed = 1; } /* Memory is now available, but only the 1MB window of virtual * address space that we have mapped; calculate how much of * the requested range is visible and return that in *bytes, * if that is less than the requested range. */ offset = linaddr & ARM_VM_OFFSET_MASK_1MB; /* Offset in 1MB window. */ *bytes = MIN(*bytes, ARM_SECTION_SIZE - offset); /* Return the linear address of the start of the new mapping. */ return ARM_SECTION_SIZE*pde + offset; }
/*===========================================================================* * findhole * *===========================================================================*/ static u32_t findhole(int pages) { /* Find a space in the virtual address space of VM. */ u32_t curv; int pde = 0, try_restart; static u32_t lastv = 0; pt_t *pt = &vmprocess->vm_pt; vir_bytes vmin, vmax; #if defined(__arm__) u32_t holev; #endif vmin = (vir_bytes) (&_end); /* marks end of VM BSS */ vmin += 1024*1024*1024; /* reserve 1GB virtual address space for VM heap */ vmin &= ARCH_VM_ADDR_MASK; vmax = VM_STACKTOP; /* Input sanity check. */ assert(vmin + VM_PAGE_SIZE >= vmin); assert(vmax >= vmin + VM_PAGE_SIZE); assert((vmin % VM_PAGE_SIZE) == 0); assert((vmax % VM_PAGE_SIZE) == 0); #if defined(__arm__) assert(pages > 0); #endif #if SANITYCHECKS curv = ((u32_t) random()) % ((vmax - vmin)/VM_PAGE_SIZE); curv *= VM_PAGE_SIZE; curv += vmin; #else curv = lastv; if(curv < vmin || curv >= vmax) curv = vmin; #endif try_restart = 1; /* Start looking for a free page starting at vmin. */ while(curv < vmax) { int pte; #if defined(__arm__) int i, nohole; #endif assert(curv >= vmin); assert(curv < vmax); #if defined(__i386__) pde = I386_VM_PDE(curv); pte = I386_VM_PTE(curv); #elif defined(__arm__) holev = curv; /* the candidate hole */ nohole = 0; for (i = 0; i < pages && !nohole; ++i) { if(curv >= vmax) { break; } #endif #if defined(__i386__) if(!(pt->pt_dir[pde] & ARCH_VM_PDE_PRESENT) || !(pt->pt_pt[pde][pte] & ARCH_VM_PAGE_PRESENT)) { #elif defined(__arm__) pde = ARM_VM_PDE(curv); pte = ARM_VM_PTE(curv); /* if page present, no hole */ if((pt->pt_dir[pde] & ARCH_VM_PDE_PRESENT) && (pt->pt_pt[pde][pte] & ARCH_VM_PTE_PRESENT)) nohole = 1; /* if not contiguous, no hole */ if (curv != holev + i * VM_PAGE_SIZE) nohole = 1; curv+=VM_PAGE_SIZE; } /* there's a large enough hole */ if (!nohole && i == pages) { #endif lastv = curv; #if defined(__i386__) return curv; #elif defined(__arm__) return holev; #endif } #if defined(__i386__) curv+=VM_PAGE_SIZE; #elif defined(__arm__) /* Reset curv */ #endif if(curv >= vmax && try_restart) { curv = vmin; try_restart = 0; } } printf("VM: out of virtual address space in vm\n"); return NO_MEM; } /*===========================================================================* * vm_freepages * *===========================================================================*/ void vm_freepages(vir_bytes vir, int pages) { assert(!(vir % VM_PAGE_SIZE)); if(is_staticaddr(vir)) { printf("VM: not freeing static page\n"); return; } if(pt_writemap(vmprocess, &vmprocess->vm_pt, vir, MAP_NONE, pages*VM_PAGE_SIZE, 0, WMF_OVERWRITE | WMF_FREE) != OK) panic("vm_freepages: pt_writemap failed"); vm_self_pages--; #if SANITYCHECKS /* If SANITYCHECKS are on, flush tlb so accessing freed pages is * always trapped, also if not in tlb. */ if((sys_vmctl(SELF, VMCTL_FLUSHTLB, 0)) != OK) { panic("VMCTL_FLUSHTLB failed"); } #endif }