unsigned long meminitpool(struct mempool *physpool, uintptr_t base, size_t nb) { uintptr_t adr = base; // unsigned long sz = (nb & (MEMMIN - 1)) ? rounddownpow2(nb, MEMMIN) : nb; size_t sz = nb; intptr_t ofs = base & (MEMMIN - 1); size_t nblk; size_t hdrsz; if (ofs) { adr += MEMMIN - ofs; sz -= adr - base; } nblk = sz >> MEMMINLOG2; /* configure slab headers */ hdrsz = nblk * sizeof(struct memslab); hdrsz = rounduppow2(hdrsz, PAGESIZE); #if (__KERNEL__) kprintf("MEM: reserved %lu bytes for %lu slab headers\n", hdrsz, nblk); #endif vmmapseg((uint32_t *)&_pagetab, adr, adr, adr + hdrsz, PAGEPRES | PAGEWRITE); physpool->nblk = nblk; physpool->blktab = (void *)adr; adr += hdrsz; // kbzero((void *)adr, hdrsz); /* configure magazine headers */ hdrsz = nblk * sizeof(struct memmag); hdrsz = rounduppow2(hdrsz, PAGESIZE); #if (__KERNEL__) kprintf("MEM: reserved %lu bytes for %lu magazine headers\n", hdrsz, nblk); #endif memvirtpool.nblk = nblk; memvirtpool.blktab = (void *)adr; vmmapseg((uint32_t *)&_pagetab, adr, adr, adr + hdrsz, PAGEPRES | PAGEWRITE); // kbzero((void *)adr, hdrsz); adr += hdrsz; memvirtpool.base = adr; memphyspool.base = adr; #if (__KERNEL__ && (MEMDIAG)) memdiag(memvirtpool); #endif return adr; }
/* * called on a cpu other than 0 from cpureset in l.s, * from _vrst in lexception.s. * mmu and l1 (and system-wide l2) caches and coherency (smpon) are on, * but interrupts are disabled. * our mmu is using an exact copy of cpu0's l1 page table * as it was after userinit ran. */ void cpustart(void) { int ms; ulong *evp; Power *pwr; up = nil; if (active.machs & (1<<m->machno)) { serialputc('?'); serialputc('r'); panic("cpu%d: resetting after start", m->machno); } assert(m->machno != 0); errata(); cortexa9cachecfg(); memdiag(&testmem); machinit(); /* bumps nmach, adds bit to machs */ machoff(m->machno); /* not ready to go yet */ /* clock signals and scu are system-wide and already on */ clockshutdown(); /* kill any watch-dog timer */ trapinit(); clockinit(); /* sets loop delay */ timersinit(); cpuidprint(); /* * notify cpu0 that we're up so it can proceed to l1diag. */ evp = (ulong *)soc.exceptvec; /* magic */ *evp = m->machno; coherence(); l1diag(); /* contend with other cpus to verify sanity */ /* * pwr->noiopwr == 0 * pwr->detect == 0x1ff (default, all disabled) */ pwr = (Power *)soc.power; assert(pwr->gatests == MASK(7)); /* everything has power */ /* * 8169 has to initialise before we get past this, thus cpu0 * has to schedule processes first. */ if (Debug) iprint("cpu%d: waiting for 8169\n", m->machno); for (ms = 0; !l1ptstable.word && ms < 5000; ms += 10) { delay(10); cachedinvse(&l1ptstable.word, sizeof l1ptstable.word); } if (!l1ptstable.word) iprint("cpu%d: 8169 unreasonably slow; proceeding\n", m->machno); /* now safe to copy cpu0's l1 pt in mmuinit */ mmuinit(); /* update our l1 pt from cpu0's */ fpon(); machon(m->machno); /* now ready to go and be scheduled */ if (Debug) iprint("cpu%d: scheding\n", m->machno); schedinit(); panic("cpu%d: schedinit returned", m->machno); }