void asminit(void) { sys->pmstart = ROUNDUP(PADDR(end), PGSZ); sys->pmend = sys->pmstart; asmalloc(0, sys->pmstart, AsmNONE, 0); }
static void asminsert(uintmem addr, uintmem size, int type) { if(type == AsmNONE || asmalloc(addr, size, AsmNONE, 0) == 0) return; if(asmfree(addr, size, type) == 0) return; asmfree(addr, size, 0); }
void asmmodinit(uint32_t start, uint32_t end, char* s) { DBG("asmmodinit: %#ux -> %#ux: <%s> %#ux\n", start, end, s, ROUNDUP(end, 4096)); if(start < sys->pmstart) return; end = ROUNDUP(end, 4096); if(end > sys->pmstart) { asmalloc(sys->pmstart, end-sys->pmstart, AsmNONE, 0); sys->pmstart = end; } }
Proc* setupseg(int core) { Mach *m = machp(); Segment *s; uintptr_t ka; Proc *p; static Pgrp *kpgrp; Segment *tseg; int sno; // XXX: we're going to need this for locality domains. USED(core); p = newproc(); p->psstate = 0; p->procmode = 0640; p->kp = 1; p->noswap = 1; p->scallnr = m->externup->scallnr; memmove(p->arg, m->externup->arg, sizeof(m->externup->arg)); p->nerrlab = 0; p->slash = m->externup->slash; p->dot = m->externup->dot; if(p->dot) incref(p->dot); memmove(p->note, m->externup->note, sizeof(p->note)); p->nnote = m->externup->nnote; p->notified = 0; p->lastnote = m->externup->lastnote; p->notify = m->externup->notify; p->ureg = 0; p->dbgreg = 0; kstrdup(&p->user, eve); if(kpgrp == 0) kpgrp = newpgrp(); p->pgrp = kpgrp; incref(kpgrp); memset(p->time, 0, sizeof(p->time)); p->time[TReal] = sys->ticks; procpriority(p, PriKproc, 0); // XXX: kluge 4 pages of address space for this. // how will it expand up? gives us <50 kprocs as is. /* * we create the color and core at allocation time, not execution. This * is probably not the best idea but it's a start. */ sno = 0; // XXX: now that we are asmalloc we are no long proc. /* Stack */ ka = (uintptr_t)KADDR(asmalloc(0, BIGPGSZ, AsmMEMORY, 1)); tseg = newseg(SG_STACK|SG_READ|SG_WRITE, ka, 1); tseg = p->seg[sno++]; ka = (uintptr_t)KADDR(asmalloc(0, BIGPGSZ, AsmMEMORY, 1)); s = newseg(SG_TEXT|SG_READ|SG_EXEC, ka, 1); p->seg[sno++] = s; // s->color = acpicorecolor(core); /* Data. Shared. */ // XXX; Now that the address space is all funky how are we going to handle shared data segments? ka = (uintptr_t)KADDR(asmalloc(0, BIGPGSZ, AsmMEMORY, 2)); s = newseg(SG_DATA|SG_READ|SG_WRITE, ka, 1); p->seg[sno++] = s; s->color = tseg->color; /* BSS. Uses asm from data map. */ p->seg[sno++] = newseg(SG_BSS|SG_READ|SG_WRITE, ka+BIGPGSZ, 1); p->seg[sno++]->color= tseg->color; nixprepage(-1); return p; }
void mmuinit(void) { uint8_t *p; Page *page; uint64_t o, pa, r, sz; archmmu(); DBG("mach%d: %#p pml4 %#p npgsz %d\n", machp()->machno, machp(), machp()->MMU.pml4, sys->npgsz); if(machp()->machno != 0) { /* NIX: KLUDGE: Has to go when each mach is using * its own page table */ p = UINT2PTR(machp()->stack); p += MACHSTKSZ; memmove(p, UINT2PTR(mach0pml4.va), PTSZ); machp()->MMU.pml4 = &machp()->MMU.pml4kludge; machp()->MMU.pml4->va = PTR2UINT(p); machp()->MMU.pml4->pa = PADDR(p); machp()->MMU.pml4->daddr = mach0pml4.daddr; /* # of user mappings in pml4 */ r = rdmsr(Efer); r |= Nxe; wrmsr(Efer, r); cr3put(machp()->MMU.pml4->pa); DBG("m %#p pml4 %#p\n", machp(), machp()->MMU.pml4); return; } page = &mach0pml4; page->pa = cr3get(); page->va = PTR2UINT(KADDR(page->pa)); machp()->MMU.pml4 = page; r = rdmsr(Efer); r |= Nxe; wrmsr(Efer, r); /* * Set up the various kernel memory allocator limits: * pmstart/pmend bound the unused physical memory; * vmstart/vmend bound the total possible virtual memory * used by the kernel; * vmunused is the highest virtual address currently mapped * and used by the kernel; * vmunmapped is the highest virtual address currently * mapped by the kernel. * Vmunused can be bumped up to vmunmapped before more * physical memory needs to be allocated and mapped. * * This is set up here so meminit can map appropriately. */ o = sys->pmstart; sz = ROUNDUP(o, 4*MiB) - o; pa = asmalloc(0, sz, 1, 0); if(pa != o) panic("mmuinit: pa %#llux memstart %#llux\n", pa, o); sys->pmstart += sz; sys->vmstart = KSEG0; sys->vmunused = sys->vmstart + ROUNDUP(o, 4*KiB); sys->vmunmapped = sys->vmstart + o + sz; sys->vmend = sys->vmstart + TMFM; print("mmuinit: vmstart %#p vmunused %#p vmunmapped %#p vmend %#p\n", sys->vmstart, sys->vmunused, sys->vmunmapped, sys->vmend); /* * Set up the map for PD entry access by inserting * the relevant PDP entry into the PD. It's equivalent * to PADDR(sys->pd)|PteRW|PteP. * */ sys->pd[PDX(PDMAP)] = sys->pdp[PDPX(PDMAP)] & ~(PteD|PteA); print("sys->pd %#p %#p\n", sys->pd[PDX(PDMAP)], sys->pdp[PDPX(PDMAP)]); assert((pdeget(PDMAP) & ~(PteD|PteA)) == (PADDR(sys->pd)|PteRW|PteP)); dumpmmuwalk(KZERO); mmuphysaddr(PTR2UINT(end)); }
void asmmeminit(void) { Proc *up = externup(); int i, l; Asm* assem; PTE *pte, *pml4; uintptr va; uintmem hi, lo, mem, nextmem, pa; #ifdef ConfCrap int cx; #endif /* ConfCrap */ assert(!((sys->vmunmapped|sys->vmend) & machp()->pgszmask[1])); if((pa = mmuphysaddr(sys->vmunused)) == ~0) panic("asmmeminit 1"); pa += sys->vmunmapped - sys->vmunused; mem = asmalloc(pa, sys->vmend - sys->vmunmapped, 1, 0); if(mem != pa) panic("asmmeminit 2"); DBG("pa %#llux mem %#llux\n", pa, mem); /* assume already 2MiB aligned*/ assert(ALIGNED(sys->vmunmapped, 2*MiB)); pml4 = UINT2PTR(machp()->pml4->va); while(sys->vmunmapped < sys->vmend) { l = mmuwalk(pml4, sys->vmunmapped, 1, &pte, asmwalkalloc); DBG("%#p l %d\n", sys->vmunmapped, l); *pte = pa|PtePS|PteRW|PteP; sys->vmunmapped += 2*MiB; pa += 2*MiB; } #ifdef ConfCrap cx = 0; #endif /* ConfCrap */ for(assem = asmlist; assem != nil; assem = assem->next) { if(assem->type != AsmMEMORY) continue; va = KSEG2+assem->addr; print("asm: addr %#P end %#P type %d size %P\n", assem->addr, assem->addr+assem->size, assem->type, assem->size); lo = assem->addr; hi = assem->addr+assem->size; /* Convert a range into pages */ for(mem = lo; mem < hi; mem = nextmem) { nextmem = (mem + PGLSZ(0)) & ~machp()->pgszmask[0]; /* Try large pages first */ for(i = m->npgsz - 1; i >= 0; i--) { if((mem & machp()->pgszmask[i]) != 0) continue; if(mem + PGLSZ(i) > hi) continue; /* This page fits entirely within the range. */ /* Mark it a usable */ if((l = mmuwalk(pml4, va, i, &pte, asmwalkalloc)) < 0) panic("asmmeminit 3"); *pte = mem|PteRW|PteP; if(l > 0) *pte |= PtePS; nextmem = mem + PGLSZ(i); va += PGLSZ(i); npg[i]++; break; } } #ifdef ConfCrap /* * Fill in conf crap. */ if(cx >= nelem(conf.mem)) continue; lo = ROUNDUP(assem->addr, PGSZ); //if(lo >= 600ull*MiB) // continue; conf.mem[cx].base = lo; hi = ROUNDDN(hi, PGSZ); //if(hi > 600ull*MiB) // hi = 600*MiB; conf.mem[cx].npage = (hi - lo)/PGSZ; conf.npage += conf.mem[cx].npage; print("cm %d: addr %#llux npage %lud\n", cx, conf.mem[cx].base, conf.mem[cx].npage); cx++; #endif /* ConfCrap */ } print("%d %d %d\n", npg[0], npg[1], npg[2]); #ifdef ConfCrap /* * Fill in more conf crap. * This is why I hate Plan 9. */ conf.upages = conf.npage; i = (sys->vmend - sys->vmstart)/PGSZ; /* close enough */ conf.ialloc = (i/2)*PGSZ; print("npage %llud upage %lud kpage %d\n", conf.npage, conf.upages, i); #endif /* ConfCrap */ }