Exemple #1
0
static PTE
asmwalkalloc(usize size)
{
    uintmem pa;

    assert(size == PTSZ && sys->vmunused+size <= sys->vmunmapped);

    if(!ALIGNED(sys->vmunused, PTSZ)) {
        DBG("asmwalkalloc: %ulld wasted\n",
            ROUNDUP(sys->vmunused, PTSZ) - sys->vmunused);
        sys->vmunused = ROUNDUP(sys->vmunused, PTSZ);
    }
    if((pa = mmuphysaddr(sys->vmunused)) != ~0)
        sys->vmunused += size;

    return pa;
}
Exemple #2
0
void
physallocinit(void)
{
	uintmem top, avail, base, size, lim, pa, lo, hi;
	RMapel *e;

	if(DBGFLG)
		rmapprint(&rmapram);
	avail = rmapsize(&rmapram);
	DBG("avail: %#P\n", avail);
	top = 0;
	for(e = rmapram.map; e != nil; e = e->next)
		top = e->addr + e->size;
	if(top > 4ull*GiB){
		physgig = bpoolcreate(MidK, MaxK, 4ull*GiB, top, alloc0);
		phys = bpoolcreate(MinK, MaxK, 0, 4ull*GiB, alloc0);
	}else
		phys = bpoolcreate(MinK, MaxK, 0, top, alloc0);
	pa = mmuphysaddr(sys->vmstart) + sys->pmunassigned;
	if(DBGFLG)
		rmapprint(&rmapram);
	DBG("pa lim: %#llux top %#llux\n", pa, top);
	while(rmapfirst(&rmapram, pa, &base, &size)){
		if(base >= 4ull*GiB)
			break;
		lim = base+size;
		if(lim > 4ull*GiB)
			lim = 4ull*GiB;
		lo = ROUNDUP(base, (1<<MinK));
		hi = ROUNDDN(lim, (1<<MinK));
		if(lo != hi){
			DBG("lo=%#llux hi=%#llux\n", lo, hi);
			pa = rmapalloc(&rmapram, lo, hi-lo, 0);
			if(pa == 0)
				break;
			physinitfree(lo, hi);
			sys->pmpaged += hi - lo;
		}
	}
	if(DBGFLG)
		physdump();
}
Exemple #3
0
void
mmuinit(void)
{
    uint8_t *p;
    Page *page;
    uint64_t o, pa, r, sz;

    archmmu();
    DBG("mach%d: %#p pml4 %#p npgsz %d\n", machp()->machno, machp(), machp()->MMU.pml4, sys->npgsz);

    if(machp()->machno != 0) {
        /* NIX: KLUDGE: Has to go when each mach is using
         * its own page table
         */
        p = UINT2PTR(machp()->stack);
        p += MACHSTKSZ;

        memmove(p, UINT2PTR(mach0pml4.va), PTSZ);
        machp()->MMU.pml4 = &machp()->MMU.pml4kludge;
        machp()->MMU.pml4->va = PTR2UINT(p);
        machp()->MMU.pml4->pa = PADDR(p);
        machp()->MMU.pml4->daddr = mach0pml4.daddr;	/* # of user mappings in pml4 */

        r = rdmsr(Efer);
        r |= Nxe;
        wrmsr(Efer, r);
        cr3put(machp()->MMU.pml4->pa);
        DBG("m %#p pml4 %#p\n", machp(), machp()->MMU.pml4);
        return;
    }

    page = &mach0pml4;
    page->pa = cr3get();
    page->va = PTR2UINT(KADDR(page->pa));

    machp()->MMU.pml4 = page;

    r = rdmsr(Efer);
    r |= Nxe;
    wrmsr(Efer, r);

    /*
     * Set up the various kernel memory allocator limits:
     * pmstart/pmend bound the unused physical memory;
     * vmstart/vmend bound the total possible virtual memory
     * used by the kernel;
     * vmunused is the highest virtual address currently mapped
     * and used by the kernel;
     * vmunmapped is the highest virtual address currently
     * mapped by the kernel.
     * Vmunused can be bumped up to vmunmapped before more
     * physical memory needs to be allocated and mapped.
     *
     * This is set up here so meminit can map appropriately.
     */
    o = sys->pmstart;
    sz = ROUNDUP(o, 4*MiB) - o;
    pa = asmalloc(0, sz, 1, 0);
    if(pa != o)
        panic("mmuinit: pa %#llux memstart %#llux\n", pa, o);
    sys->pmstart += sz;

    sys->vmstart = KSEG0;
    sys->vmunused = sys->vmstart + ROUNDUP(o, 4*KiB);
    sys->vmunmapped = sys->vmstart + o + sz;
    sys->vmend = sys->vmstart + TMFM;

    print("mmuinit: vmstart %#p vmunused %#p vmunmapped %#p vmend %#p\n",
          sys->vmstart, sys->vmunused, sys->vmunmapped, sys->vmend);

    /*
     * Set up the map for PD entry access by inserting
     * the relevant PDP entry into the PD. It's equivalent
     * to PADDR(sys->pd)|PteRW|PteP.
     *
     */
    sys->pd[PDX(PDMAP)] = sys->pdp[PDPX(PDMAP)] & ~(PteD|PteA);
    print("sys->pd %#p %#p\n", sys->pd[PDX(PDMAP)], sys->pdp[PDPX(PDMAP)]);
    assert((pdeget(PDMAP) & ~(PteD|PteA)) == (PADDR(sys->pd)|PteRW|PteP));


    dumpmmuwalk(KZERO);

    mmuphysaddr(PTR2UINT(end));
}
Exemple #4
0
void
sipi(void)
{
	Apic *apic;
	Mach *mach;
	int apicno, i;
	u32int *sipiptr;
	uintmem sipipa;
	u8int *alloc, *p;
	extern void squidboy(int);

	/*
	 * Move the startup code into place,
	 * must be aligned properly.
	 */
	sipipa = mmuphysaddr(SIPIHANDLER);
	if((sipipa & (4*KiB - 1)) || sipipa > (1*MiB - 2*4*KiB))
		return;
	sipiptr = UINT2PTR(SIPIHANDLER);
	memmove(sipiptr, sipihandler, sizeof(sipihandler));
	DBG("sipiptr %#p sipipa %#llux\n", sipiptr, sipipa);

	/*
	 * Notes:
	 * The Universal Startup Algorithm described in the MP Spec. 1.4.
	 * The data needed per-processor is the sum of the stack, page
	 * table pages, vsvm page and the Mach page. The layout is similar
	 * to that described in data.h for the bootstrap processor, but
	 * with any unused space elided.
	 */
	for(apicno = 0; apicno < Napic; apicno++){
		apic = &xlapic[apicno];
		if(!apic->useable || apic->addr || apic->machno == 0)
			continue;

		/*
		 * NOTE: for now, share the page tables with the
		 * bootstrap processor, until the lsipi code is worked out,
		 * so only the Mach and stack portions are used below.
		 */
		alloc = mallocalign(MACHSTKSZ+4*PTSZ+4*KiB+MACHSZ, 4096, 0, 0);
		if(alloc == nil)
			continue;
		memset(alloc, 0, MACHSTKSZ+4*PTSZ+4*KiB+MACHSZ);
		p = alloc+MACHSTKSZ;

		sipiptr[-1] = mmuphysaddr(PTR2UINT(p));
		DBG("p %#p sipiptr[-1] %#ux\n", p, sipiptr[-1]);

		p += 4*PTSZ+4*KiB;

		/*
		 * Committed. If the AP startup fails, can't safely
		 * release the resources, who knows what mischief
		 * the AP is up to. Perhaps should try to put it
		 * back into the INIT state?
		 */
		mach = (Mach*)p;
		mach->machno = apic->machno;		/* NOT one-to-one... */
		mach->splpc = PTR2UINT(squidboy);
		mach->apicno = apicno;
		mach->stack = PTR2UINT(alloc);
		mach->vsvm = alloc+MACHSTKSZ+4*PTSZ;
//OH OH		mach->pml4 = (PTE*)(alloc+MACHSTKSZ);

		p = KADDR(0x467);
		*p++ = sipipa;
		*p++ = sipipa>>8;
		*p++ = 0;
		*p = 0;

		nvramwrite(0x0f, 0x0a);
		apicsipi(apicno, sipipa);

		for(i = 0; i < 1000; i++){
			if(mach->splpc == 0)
				break;
			millidelay(5);
		}
		nvramwrite(0x0f, 0x00);

		DBG("mach %#p (%#p) apicid %d machno %2d %dMHz\n",
			mach, sys->machptr[mach->machno],
			apicno, mach->machno, mach->cpumhz);
	}
}
Exemple #5
0
void
asmmeminit(void)
{
    Proc *up = externup();
    int i, l;
    Asm* assem;
    PTE *pte, *pml4;
    uintptr va;
    uintmem hi, lo, mem, nextmem, pa;
#ifdef ConfCrap
    int cx;
#endif /* ConfCrap */

    assert(!((sys->vmunmapped|sys->vmend) & machp()->pgszmask[1]));

    if((pa = mmuphysaddr(sys->vmunused)) == ~0)
        panic("asmmeminit 1");
    pa += sys->vmunmapped - sys->vmunused;
    mem = asmalloc(pa, sys->vmend - sys->vmunmapped, 1, 0);
    if(mem != pa)
        panic("asmmeminit 2");
    DBG("pa %#llux mem %#llux\n", pa, mem);

    /* assume already 2MiB aligned*/
    assert(ALIGNED(sys->vmunmapped, 2*MiB));
    pml4 = UINT2PTR(machp()->pml4->va);
    while(sys->vmunmapped < sys->vmend) {
        l = mmuwalk(pml4, sys->vmunmapped, 1, &pte, asmwalkalloc);
        DBG("%#p l %d\n", sys->vmunmapped, l);
        *pte = pa|PtePS|PteRW|PteP;
        sys->vmunmapped += 2*MiB;
        pa += 2*MiB;
    }

#ifdef ConfCrap
    cx = 0;
#endif /* ConfCrap */
    for(assem = asmlist; assem != nil; assem = assem->next) {
        if(assem->type != AsmMEMORY)
            continue;
        va = KSEG2+assem->addr;
        print("asm: addr %#P end %#P type %d size %P\n",
              assem->addr, assem->addr+assem->size,
              assem->type, assem->size);

        lo = assem->addr;
        hi = assem->addr+assem->size;
        /* Convert a range into pages */
        for(mem = lo; mem < hi; mem = nextmem) {
            nextmem = (mem + PGLSZ(0)) & ~machp()->pgszmask[0];

            /* Try large pages first */
            for(i = m->npgsz - 1; i >= 0; i--) {
                if((mem & machp()->pgszmask[i]) != 0)
                    continue;
                if(mem + PGLSZ(i) > hi)
                    continue;
                /* This page fits entirely within the range. */
                /* Mark it a usable */
                if((l = mmuwalk(pml4, va, i, &pte, asmwalkalloc)) < 0)
                    panic("asmmeminit 3");

                *pte = mem|PteRW|PteP;
                if(l > 0)
                    *pte |= PtePS;

                nextmem = mem + PGLSZ(i);
                va += PGLSZ(i);
                npg[i]++;

                break;
            }
        }

#ifdef ConfCrap
        /*
         * Fill in conf crap.
         */
        if(cx >= nelem(conf.mem))
            continue;
        lo = ROUNDUP(assem->addr, PGSZ);
//if(lo >= 600ull*MiB)
//    continue;
        conf.mem[cx].base = lo;
        hi = ROUNDDN(hi, PGSZ);
//if(hi > 600ull*MiB)
//  hi = 600*MiB;
        conf.mem[cx].npage = (hi - lo)/PGSZ;
        conf.npage += conf.mem[cx].npage;
        print("cm %d: addr %#llux npage %lud\n",
              cx, conf.mem[cx].base, conf.mem[cx].npage);
        cx++;
#endif /* ConfCrap */
    }
    print("%d %d %d\n", npg[0], npg[1], npg[2]);

#ifdef ConfCrap
    /*
     * Fill in more conf crap.
     * This is why I hate Plan 9.
     */
    conf.upages = conf.npage;
    i = (sys->vmend - sys->vmstart)/PGSZ;		/* close enough */
    conf.ialloc = (i/2)*PGSZ;
    print("npage %llud upage %lud kpage %d\n",
          conf.npage, conf.upages, i);

#endif /* ConfCrap */
}