uintmem mmuphysaddr(uintptr_t va) { int l; PTE *pte; uintmem mask, pa; /* * Given a VA, find the PA. * This is probably not the right interface, * but will do as an experiment. Usual * question, should va be void* or uintptr? */ l = mmuwalk(UINT2PTR(machp()->MMU.pml4->va), va, 0, &pte, nil); DBG("physaddr: va %#p l %d\n", va, l); if(l < 0) return ~0; mask = PGLSZ(l)-1; pa = (*pte & ~mask) + (va & mask); DBG("physaddr: l %d va %#p pa %#llux\n", l, va, pa); return pa; }
/* * Add kernel mappings for pa -> va for a section of size bytes. * Called only after the va range is known to be unoccupied. */ static int pdmap(uintptr_t pa, int attr, uintptr_t va, usize size) { uintptr_t pae; PTE *pd, *pde, *pt, *pte; int pdx, pgsz; Page *pg; pd = (PTE*)(PDMAP+PDX(PDMAP)*4096); for(pae = pa + size; pa < pae; pa += pgsz) { pdx = PDX(va); pde = &pd[pdx]; /* * Check if it can be mapped using a big page, * i.e. is big enough and starts on a suitable boundary. * Assume processor can do it. */ if(ALIGNED(pa, PGLSZ(1)) && ALIGNED(va, PGLSZ(1)) && (pae-pa) >= PGLSZ(1)) { assert(*pde == 0); *pde = pa|attr|PtePS|PteP; pgsz = PGLSZ(1); } else { if(*pde == 0) { pg = mmuptpalloc(); assert(pg != nil && pg->pa != 0); *pde = pg->pa|PteRW|PteP; memset((PTE*)(PDMAP+pdx*4096), 0, 4096); } assert(*pde != 0); pt = (PTE*)(PDMAP+pdx*4096); pte = &pt[PTX(va)]; assert(!(*pte & PteP)); *pte = pa|attr|PteP; pgsz = PGLSZ(0); } va += pgsz; } return 0; }
/* * Look for free space in the vmap. */ static uintptr_t vmapalloc(usize size) { int i, n, o; PTE *pd, *pt; int pdsz, ptsz; pd = (PTE*)(PDMAP+PDX(PDMAP)*4096); pd += PDX(VMAP); pdsz = VMAPSZ/PGLSZ(1); /* * Look directly in the PD entries if the size is * larger than the range mapped by a single entry. */ if(size >= PGLSZ(1)) { n = HOWMANY(size, PGLSZ(1)); if((o = findhole(pd, pdsz, n)) != -1) return VMAP + o*PGLSZ(1); return 0; } /* * Size is smaller than that mapped by a single PD entry. * Look for an already mapped PT page that has room. */ n = HOWMANY(size, PGLSZ(0)); ptsz = PGLSZ(0)/sizeof(PTE); for(i = 0; i < pdsz; i++) { if(!(pd[i] & PteP) || (pd[i] & PtePS)) continue; pt = (PTE*)(PDMAP+(PDX(VMAP)+i)*4096); if((o = findhole(pt, ptsz, n)) != -1) return VMAP + i*PGLSZ(1) + o*PGLSZ(0); } /* * Nothing suitable, start using a new PD entry. */ if((o = findhole(pd, pdsz, 1)) != -1) return VMAP + o*PGLSZ(1); return 0; }
void pmap(uintptr *pml4, uintptr pa, uintptr va, vlong size) { uintptr *pte, *ptee, flags; int z, l; if(size <= 0 || va < VMAP) panic("pmap: pa=%#p va=%#p size=%lld", pa, va, size); flags = pa; pa = PPN(pa); flags -= pa; if(va >= KZERO) flags |= PTEGLOBAL; while(size > 0){ if(size >= PGLSZ(1) && (va % PGLSZ(1)) == 0) flags |= PTESIZE; l = (flags & PTESIZE) != 0; z = PGLSZ(l); pte = mmuwalk(pml4, va, l, 1); if(pte == 0){ pte = mmuwalk(pml4, va, ++l, 0); if(pte && (*pte & PTESIZE)){ flags |= PTESIZE; z = va & (PGLSZ(l)-1); va -= z; pa -= z; size += z; continue; } panic("pmap: pa=%#p va=%#p size=%lld", pa, va, size); } ptee = pte + ptecount(va, l); while(size > 0 && pte < ptee){ *pte++ = pa | flags; pa += z; va += z; size -= z; } } }
void asmmeminit(void) { Proc *up = externup(); int i, l; Asm* assem; PTE *pte, *pml4; uintptr va; uintmem hi, lo, mem, nextmem, pa; #ifdef ConfCrap int cx; #endif /* ConfCrap */ assert(!((sys->vmunmapped|sys->vmend) & machp()->pgszmask[1])); if((pa = mmuphysaddr(sys->vmunused)) == ~0) panic("asmmeminit 1"); pa += sys->vmunmapped - sys->vmunused; mem = asmalloc(pa, sys->vmend - sys->vmunmapped, 1, 0); if(mem != pa) panic("asmmeminit 2"); DBG("pa %#llux mem %#llux\n", pa, mem); /* assume already 2MiB aligned*/ assert(ALIGNED(sys->vmunmapped, 2*MiB)); pml4 = UINT2PTR(machp()->pml4->va); while(sys->vmunmapped < sys->vmend) { l = mmuwalk(pml4, sys->vmunmapped, 1, &pte, asmwalkalloc); DBG("%#p l %d\n", sys->vmunmapped, l); *pte = pa|PtePS|PteRW|PteP; sys->vmunmapped += 2*MiB; pa += 2*MiB; } #ifdef ConfCrap cx = 0; #endif /* ConfCrap */ for(assem = asmlist; assem != nil; assem = assem->next) { if(assem->type != AsmMEMORY) continue; va = KSEG2+assem->addr; print("asm: addr %#P end %#P type %d size %P\n", assem->addr, assem->addr+assem->size, assem->type, assem->size); lo = assem->addr; hi = assem->addr+assem->size; /* Convert a range into pages */ for(mem = lo; mem < hi; mem = nextmem) { nextmem = (mem + PGLSZ(0)) & ~machp()->pgszmask[0]; /* Try large pages first */ for(i = m->npgsz - 1; i >= 0; i--) { if((mem & machp()->pgszmask[i]) != 0) continue; if(mem + PGLSZ(i) > hi) continue; /* This page fits entirely within the range. */ /* Mark it a usable */ if((l = mmuwalk(pml4, va, i, &pte, asmwalkalloc)) < 0) panic("asmmeminit 3"); *pte = mem|PteRW|PteP; if(l > 0) *pte |= PtePS; nextmem = mem + PGLSZ(i); va += PGLSZ(i); npg[i]++; break; } } #ifdef ConfCrap /* * Fill in conf crap. */ if(cx >= nelem(conf.mem)) continue; lo = ROUNDUP(assem->addr, PGSZ); //if(lo >= 600ull*MiB) // continue; conf.mem[cx].base = lo; hi = ROUNDDN(hi, PGSZ); //if(hi > 600ull*MiB) // hi = 600*MiB; conf.mem[cx].npage = (hi - lo)/PGSZ; conf.npage += conf.mem[cx].npage; print("cm %d: addr %#llux npage %lud\n", cx, conf.mem[cx].base, conf.mem[cx].npage); cx++; #endif /* ConfCrap */ } print("%d %d %d\n", npg[0], npg[1], npg[2]); #ifdef ConfCrap /* * Fill in more conf crap. * This is why I hate Plan 9. */ conf.upages = conf.npage; i = (sys->vmend - sys->vmstart)/PGSZ; /* close enough */ conf.ialloc = (i/2)*PGSZ; print("npage %llud upage %lud kpage %d\n", conf.npage, conf.upages, i); #endif /* ConfCrap */ }
static int ptecount(uintptr va, int level) { return (1<<PTSHIFT) - (va & PGLSZ(level+1)-1) / PGLSZ(level); }