Exemple #1
0
void printmem(struct proc *p)
{
	int i;
	int j;
	int t=0;
	pde_t* pgdir = p->pgdir;
	pte_t* pt;
	int vpn[1024];
	int ppn[1024];
	char read_only[1024];
	char shared[1024];

	cprintf("Page Tables:\n  memory location of page directory = %x\n",v2p(pgdir));
	for(i=0;i<NPDENTRIES;i++)
	{
		if((pgdir[i] & PTE_P) && (pgdir[i] & PTE_U) && (pgdir[i] & PTE_A))
		{
			pt = (pte_t*)p2v(PTE_ADDR(pgdir[i]));
			cprintf("  pdir PTE %d, %d\n",i,PPN(pgdir[i]));
			cprintf("    memory location of page table = %x\n", v2p(pt));
			for(j=0;j<NPTENTRIES;j++)
			{
				if((pt[j] & PTE_P) && (pt[j] & PTE_U) && (pt[j] & PTE_A))
				{
					cprintf("    ptbl PTE %d, %d, %x\n", j, PPN(pt[j]), (PTE_ADDR(pt[j])));
					read_only[t]= pt[j] & PTE_W;
					shared[t] = (pt[j] & PTE_S)>>8;
					vpn[t]=i<<10|j;
					ppn[t]=PPN(pt[j]);
					t++;
				}
			}
		}
Exemple #2
0
int
mmuwalk(PTE* pml4, uintptr_t va, int level, PTE** ret,
        uint64_t (*alloc)(usize))
{
    int l;
    uintmem pa;
    PTE *pte;

    Mpl pl;

    pl = splhi();
    if(DBGFLG > 1)
        DBG("mmuwalk%d: va %#p level %d\n", machp()->machno, va, level);
    pte = &pml4[PTLX(va, 3)];
    for(l = 3; l >= 0; l--) {
        if(l == level)
            break;
        if(!(*pte & PteP)) {
            if(alloc == nil)
                break;
            pa = alloc(PTSZ);
            if(pa == ~0)
                return -1;
            memset(UINT2PTR(KADDR(pa)), 0, PTSZ);
            *pte = pa|PteRW|PteP;
        }
        else if(*pte & PtePS)
            break;
        pte = UINT2PTR(KADDR(PPN(*pte)));
        pte += PTLX(va, l-1);
    }
    *ret = pte;
    splx(pl);
    return l;
}
Exemple #3
0
/* copy the page tables, and set both pte's flag. 
 * note: on Copy On Write, both parent and child process will be marked 
 * write-protected, and increase the reference count of each shared physical 
 * page. 
 * */
int pt_copy(struct pde *npgd, struct pde *opgd){
    struct pde *opde, *npde;
    struct pte *opte, *npte, *old_pt, *new_pt;
    struct page *pg;
    uint pdn, pn;

    for(pdn=PDX(KMEM_END); pdn<1024; pdn++) {
        opde = &opgd[pdn];
        npde = &npgd[pdn];
        npde->pd_flag = opde->pd_flag;
        if (opde->pd_flag & PTE_P) {
            old_pt = (struct pte*)(opde->pd_off * PAGE);
            new_pt = (struct pte*)kmalloc(PAGE);
            npde->pd_off = PPN(new_pt);
            for(pn=0; pn<1024; pn++){
                opte = &old_pt[pn];
                npte = &new_pt[pn];
                npte->pt_off  = opte->pt_off;
                npte->pt_flag = opte->pt_flag;
                if (opte->pt_flag & PTE_P) {
                    // turn off each pte's PTE_W
                    npte->pt_flag &= ~PTE_W;
                    opte->pt_flag &= ~PTE_W;
                    // increase the ref count
                    pg = pgfind(opte->pt_off);
                    pg->pg_count++;
                }
            }
        }
    }
    return 0;
}
    void
free(void *v)
{
    uint8_t *c;
    uint32_t *ref;

    if (v == 0)
        return;
    assert(mbegin <= (uint8_t*) v && (uint8_t*) v < mend);

    c = ROUNDDOWN(v, PGSIZE);

    while (vpt[PPN(c)] & PTE_CONTINUED) {
        sys_page_unmap(0, c);
        c += PGSIZE;
        assert(mbegin <= c && c < mend);
    }

    /*
     * c is just a piece of this page, so dec the ref count
     * and maybe free the page.
     */
    ref = (uint32_t*) (c + PGSIZE - 4);
    if (--(*ref) == 0)
        sys_page_unmap(0, c);
}
Exemple #5
0
void
mmuswitch(Proc* proc)
{
    PTE *pte;
    Page *page;
    Mpl pl;

    pl = splhi();
    if(proc->newtlb) {
        /*
         * NIX: We cannot clear our page tables if they are going to
         * be used in the AC
         */
        if(proc->ac == nil)
            mmuptpfree(proc, 1);
        proc->newtlb = 0;
    }

    if(machp()->MMU.pml4->daddr) {
        memset(UINT2PTR(machp()->MMU.pml4->va), 0, machp()->MMU.pml4->daddr*sizeof(PTE));
        machp()->MMU.pml4->daddr = 0;
    }

    pte = UINT2PTR(machp()->MMU.pml4->va);
    for(page = proc->MMU.mmuptp[3]; page != nil; page = page->next) {
        pte[page->daddr] = PPN(page->pa)|PteU|PteRW|PteP;
        if(page->daddr >= machp()->MMU.pml4->daddr)
            machp()->MMU.pml4->daddr = page->daddr+1;
        page->prev = machp()->MMU.pml4;
    }

    tssrsp0(machp(), STACKALIGN(PTR2UINT(proc->kstack+KSTACK)));
    cr3put(machp()->MMU.pml4->pa);
    splx(pl);
}
Exemple #6
0
void
fs_test(void)
{
	struct File *f;
	int r;
	char *blk;
	uint32_t *bits;

	// back up bitmap
	if ((r = sys_page_alloc(0, (void*) PGSIZE, PTE_P|PTE_U|PTE_W)) < 0)
		panic("sys_page_alloc: %e", r);
	bits = (uint32_t*) PGSIZE;
	memmove(bits, bitmap, PGSIZE);
	// allocate block
	if ((r = alloc_block()) < 0)
		panic("alloc_block: %e", r);
	// check that block was free
	assert(bits[r/32] & (1 << (r%32)));
	// and is not free any more
	assert(!(bitmap[r/32] & (1 << (r%32))));
	cprintf("alloc_block is good\n");

	if ((r = file_open("/not-found", &f)) < 0 && r != -E_NOT_FOUND)
		panic("file_open /not-found: %e", r);
	else if (r == 0)
		panic("file_open /not-found succeeded!");
	if ((r = file_open("/newmotd", &f)) < 0)
		panic("file_open /newmotd: %e", r);
	cprintf("file_open is good\n");

	if ((r = file_get_block(f, 0, &blk)) < 0)
		panic("file_get_block: %e", r);
	if (strcmp(blk, msg) != 0)
		panic("file_get_block returned wrong data");
	cprintf("file_get_block is good\n");

	*(volatile char*)blk = *(volatile char*)blk;
	assert((vpt[PPN(blk)] & PTE_D));
	file_flush(f);
	assert(!(vpt[PPN(blk)] & PTE_D));
	cprintf("file_flush is good\n");

	if ((r = file_set_size(f, 0)) < 0)
		panic("file_set_size: %e", r);
	assert(f->f_direct[0] == 0);
	assert(!(vpt[PPN(f)] & PTE_D));
	cprintf("file_truncate is good\n");

	if ((r = file_set_size(f, strlen(msg))) < 0)
		panic("file_set_size 2: %e", r);
	assert(!(vpt[PPN(f)] & PTE_D));
	if ((r = file_get_block(f, 0, &blk)) < 0)
		panic("file_get_block 2: %e", r);
	strcpy(blk, msg);
	assert((vpt[PPN(blk)] & PTE_D));
	file_flush(f);
	assert(!(vpt[PPN(blk)] & PTE_D));
	assert(!(vpt[PPN(f)] & PTE_D));
	cprintf("file rewrite is good\n");
}
Exemple #7
0
/*
 * Double-check the user MMU.
 * Error checking only.
 */
void
checkmmu(uintptr va, uintptr pa)
{
	uintptr *pte;

	pte = mmuwalk(m->pml4, va, 0, 0);
	if(pte != 0 && (*pte & PTEVALID) != 0 && PPN(*pte) != pa)
		print("%ld %s: va=%#p pa=%#p pte=%#p\n",
			up->pid, up->text, va, pa, *pte);
}
Exemple #8
0
int
pageref(void *v)
{
	pte_t pte;

	if (!(vpd[PDX(v)] & PTE_P))
		return 0;
	pte = vpt[VPN(v)];
	if (!(pte & PTE_P))
		return 0;
	return atomic_read(&pages[PPN(pte)].pp_ref);
}
Exemple #9
0
int
pageref(void *v)
{
	u_int pte;

	if (!((* vpd)[PDX(v)]&PTE_V)) {
		return 0;
	}

	pte = (* vpt)[VPN(v)];

	if (!(pte & PTE_V)) {
		return 0;
	}

	return pages[PPN(pte)].pp_ref;
}
Exemple #10
0
void
mmuswitch(Proc* proc)
{
	int x;
	PTE *l1;
	Page *page;

	/* do kprocs get here and if so, do they need to? */
	if(m->mmupid == proc->pid && !proc->newtlb)
		return;
	m->mmupid = proc->pid;

	/* write back dirty and invalidate l1 caches */
	cacheuwbinv();

	if(proc->newtlb){
		mmul2empty(proc, 1);
		proc->newtlb = 0;
	}

	mmul1empty();

	/* move in new map */
	l1 = m->mmul1;
	for(page = proc->mmul2; page != nil; page = page->next){
		x = page->daddr;
		l1[x] = PPN(page->pa)|Dom0|Coarse;
		/* know here that L1lo < x < L1hi */
		if(x+1 - m->mmul1lo < m->mmul1hi - x)
			m->mmul1lo = x+1;
		else
			m->mmul1hi = x;
	}

	/* make sure map is in memory */
	/* could be smarter about how much? */
	cachedwbse(&l1[L1X(UZERO)], (L1hi - L1lo)*sizeof(PTE));

	/* lose any possible stale tlb entries */
	mmuinvalidate();

	//print("mmuswitch l1lo %d l1hi %d %d\n",
	//	m->mmul1lo, m->mmul1hi, proc->kp);
}
Exemple #11
0
void
pmap(uintptr *pml4, uintptr pa, uintptr va, vlong size)
{
	uintptr *pte, *ptee, flags;
	int z, l;

	if(size <= 0 || va < VMAP)
		panic("pmap: pa=%#p va=%#p size=%lld", pa, va, size);
	flags = pa;
	pa = PPN(pa);
	flags -= pa;
	if(va >= KZERO)
		flags |= PTEGLOBAL;
	while(size > 0){
		if(size >= PGLSZ(1) && (va % PGLSZ(1)) == 0)
			flags |= PTESIZE;
		l = (flags & PTESIZE) != 0;
		z = PGLSZ(l);
		pte = mmuwalk(pml4, va, l, 1);
		if(pte == 0){
			pte = mmuwalk(pml4, va, ++l, 0);
			if(pte && (*pte & PTESIZE)){
				flags |= PTESIZE;
				z = va & (PGLSZ(l)-1);
				va -= z;
				pa -= z;
				size += z;
				continue;
			}
			panic("pmap: pa=%#p va=%#p size=%lld", pa, va, size);
		}
		ptee = pte + ptecount(va, l);
		while(size > 0 && pte < ptee){
			*pte++ = pa | flags;
			pa += z;
			va += z;
			size -= z;
		}
	}
}
Exemple #12
0
    void
serve(void)
{
    uint32_t req, whom;
    int perm, r;
    void *pg;


    while (1) {

       
        perm = 0;
        req = ipc_recv((int32_t *) &whom, fsreq, &perm);
        if (debug)
            cprintf("fs req %d from %08x [page %08x: %s]\n",
                    req, whom, vpt[PPN(fsreq)], fsreq);

        // All requests must contain an argument page
        if (!(perm & PTE_P)) {
            cprintf("Invalid request from %08x: no argument page\n",
                    whom);
            continue; // just leave it hanging...
        }

        pg = NULL;
        if (req == FSREQ_OPEN) {
            r = serve_open(whom, (struct Fsreq_open*)fsreq, &pg, &perm);
        } else if (req < NHANDLERS && handlers[req]) {
            r = handlers[req](whom, fsreq);
        } else {
            cprintf("Invalid request code %d from %08x\n", whom, req);
            r = -E_INVAL;
        }

        ipc_send(whom, r, pg, perm);
        if(debug)
            cprintf("FS: Sent response %d to %x\n", r, whom);
        sys_page_unmap(0, fsreq);
    }
}
Exemple #13
0
uintptr*
mmuwalk(uintptr* table, uintptr va, int level, int create)
{
	uintptr pte;
	int i, x;

	x = PTLX(va, 3);
	for(i = 2; i >= level; i--){
		pte = table[x];
		if(pte & PTEVALID){
			if(pte & PTESIZE)
				return 0;
			table = KADDR(PPN(pte));
		} else {
			if(!create)
				return 0;
			table = mmucreate(table, va, i, x);
		}
		x = PTLX(va, i);
	}
	return &table[x];
}
Exemple #14
0
void
dumpptepg(int lvl, uintptr_t pa)
{
    PTE *pte;
    int tab, i;

    tab = 4 - lvl;
    pte = UINT2PTR(KADDR(pa));
    for(i = 0; i < PTSZ/sizeof(PTE); i++)
        if(pte[i] & PteP) {
            tabs(tab);
            print("l%d %#p[%#05x]: %#ullx\n", lvl, pa, i, pte[i]);

            /* skip kernel mappings */
            if((pte[i]&PteU) == 0) {
                tabs(tab+1);
                print("...kern...\n");
                continue;
            }
            if(lvl > 2)
                dumpptepg(lvl-1, PPN(pte[i]));
        }
}
Exemple #15
0
ulong*
mmuwalk(ulong* pdb, ulong va, int level, int create)
{
	ulong pa, *table;

	/*
	 * Walk the page-table pointed to by pdb and return a pointer
	 * to the entry for virtual address va at the requested level.
	 * If the entry is invalid and create isn't requested then bail
	 * out early. Otherwise, for the 2nd level walk, allocate a new
	 * page-table page and register it in the 1st level.
	 */
	table = &pdb[PDX(va)];
	if(!(*table & PTEVALID) && create == 0)
		return 0;

	switch(level){

	default:
		return 0;

	case 1:
		return table;

	case 2:
		if(*table & PTESIZE)
			panic("mmuwalk2: va 0x%ux entry 0x%ux\n", va, *table);
		if(!(*table & PTEVALID)){
			pa = PADDR(ialloc(BY2PG, BY2PG));
			*table = pa|PTEWRITE|PTEVALID;
		}
		table = KADDR(PPN(*table));

		return &table[PTX(va)];
	}
}
Exemple #16
0
ulong
mmukmap(ulong pa, ulong va, int size)
{
	ulong pae, *table, *pdb, pgsz, *pte, x;
	int pse, sync;
	extern int cpuidax, cpuiddx;

	pdb = KADDR(getcr3());
	if((cpuiddx & 0x08) && (getcr4() & 0x10))
		pse = 1;
	else
		pse = 0;
	sync = 0;

	pa = PPN(pa);
	if(va == 0)
		va = (ulong)KADDR(pa);
	else
		va = PPN(va);

	pae = pa + size;
	lock(&mmukmaplock);
	while(pa < pae){
		table = &pdb[PDX(va)];
		/*
		 * Possibly already mapped.
		 */
		if(*table & PTEVALID){
			if(*table & PTESIZE){
				/*
				 * Big page. Does it fit within?
				 * If it does, adjust pgsz so the correct end can be
				 * returned and get out.
				 * If not, adjust pgsz up to the next 4MiB boundary
				 * and continue.
				 */
				x = PPN(*table);
				if(x != pa)
					panic("mmukmap1: pa 0x%ux  entry 0x%ux\n",
						pa, *table);
				x += 4*MiB;
				if(pae <= x){
					pa = pae;
					break;
				}
				pgsz = x - pa;
				pa += pgsz;
				va += pgsz;

				continue;
			}
			else{
				/*
				 * Little page. Walk to the entry.
				 * If the entry is valid, set pgsz and continue.
				 * If not, make it so, set pgsz, sync and continue.
				 */
				pte = mmuwalk(pdb, va, 2, 0);
				if(pte && *pte & PTEVALID){
					x = PPN(*pte);
					if(x != pa)
						panic("mmukmap2: pa 0x%ux entry 0x%ux\n",
							pa, *pte);
					pgsz = BY2PG;
					pa += pgsz;
					va += pgsz;
					sync++;

					continue;
				}
			}
		}

		/*
		 * Not mapped. Check if it can be mapped using a big page -
		 * starts on a 4MiB boundary, size >= 4MiB and processor can do it.
		 * If not a big page, walk the walk, talk the talk.
		 * Sync is set.
		 */
		if(pse && (pa % (4*MiB)) == 0 && (pae >= pa+4*MiB)){
			*table = pa|PTESIZE|PTEWRITE|PTEUNCACHED|PTEVALID;
			pgsz = 4*MiB;
		}
		else{
			pte = mmuwalk(pdb, va, 2, 1);
			*pte = pa|PTEWRITE|PTEUNCACHED|PTEVALID;
			pgsz = BY2PG;
		}
		pa += pgsz;
		va += pgsz;
		sync++;
	}
	unlock(&mmukmaplock);

	/*
	 * If something was added
	 * then need to sync up.
	 */
	if(sync)
		putcr3(PADDR(pdb));

	return pa;
}
Exemple #17
0
static void
mpstartap(Apic* apic)
{
	ulong *apbootp, *pdb, *pte;
	Mach *mach, *mach0;
	int i, machno;
	uchar *p;

	mach0 = MACHP(0);

	/*
	 * Initialise the AP page-tables and Mach structure. The page-tables
	 * are the same as for the bootstrap processor with the exception of
	 * the PTE for the Mach structure.
	 * Xspanalloc will panic if an allocation can't be made.
	 */
	p = xspanalloc(4*BY2PG, BY2PG, 0);
	pdb = (ulong*)p;
	memmove(pdb, mach0->pdb, BY2PG);
	p += BY2PG;

	if((pte = mmuwalk(pdb, MACHADDR, 1, 0)) == nil)
		return;
	memmove(p, KADDR(PPN(*pte)), BY2PG);
	*pte = PADDR(p)|PTEWRITE|PTEVALID;
	if(mach0->havepge)
		*pte |= PTEGLOBAL;
	p += BY2PG;

	mach = (Mach*)p;
	if((pte = mmuwalk(pdb, MACHADDR, 2, 0)) == nil)
		return;
	*pte = PADDR(mach)|PTEWRITE|PTEVALID;
	if(mach0->havepge)
		*pte |= PTEGLOBAL;
	p += BY2PG;

	machno = apic->machno;
	MACHP(machno) = mach;
	mach->machno = machno;
	mach->pdb = pdb;
	mach->gdt = (Segdesc*)p;	/* filled by mmuinit */

	/*
	 * Tell the AP where its kernel vector and pdb are.
	 * The offsets are known in the AP bootstrap code.
	 */
	apbootp = (ulong*)(APBOOTSTRAP+0x08);
	*apbootp++ = (ulong)squidboy;	/* assembler jumps here eventually */
	*apbootp++ = PADDR(pdb);
	*apbootp = (ulong)apic;

	/*
	 * Universal Startup Algorithm.
	 */
	p = KADDR(0x467);		/* warm-reset vector */
	*p++ = PADDR(APBOOTSTRAP);
	*p++ = PADDR(APBOOTSTRAP)>>8;
	i = (PADDR(APBOOTSTRAP) & ~0xFFFF)/16;
	/* code assumes i==0 */
	if(i != 0)
		print("mp: bad APBOOTSTRAP\n");
	*p++ = i;
	*p = i>>8;

	coherence();

	nvramwrite(0x0F, 0x0A);	/* shutdown code: warm reset upon init ipi */
	lapicstartap(apic, PADDR(APBOOTSTRAP));
	for(i = 0; i < 1000; i++){
		if(apic->online)
			break;
		delay(10);
	}
	nvramwrite(0x0F, 0x00);
}
Exemple #18
0
// Is this virtual address mapped?
bool
va_is_mapped(void *va)
{
	return (vpml4e[VPML4E(va)] & PTE_P) && (vpde[VPDPE(va)] & PTE_P) && (vpd[VPD(va)] & PTE_P) && (vpt[PPN(va)] & PTE_P);
}
Exemple #19
0
// Is this virtual address dirty?
bool
va_is_dirty(void *va)
{
	return (vpt[PPN(va)] & PTE_D) != 0;
}
Exemple #20
0
void
putmmu(uintptr va, uintptr pa, Page* page)
{
	int x;
	Page *pg;
	PTE *l1, *pte;

	x = L1X(va);
	l1 = &m->mmul1[x];
	if(*l1 == Fault){
		/* wasteful - l2 pages only have 256 entries - fix */
		if(up->mmul2cache == nil){
			/* auxpg since we don't need much? memset if so */
			pg = newpage(1, 0, 0);
			pg->va = VA(kmap(pg));
		}
		else{
			pg = up->mmul2cache;
			up->mmul2cache = pg->next;
			memset(UINT2PTR(pg->va), 0, BY2PG);
		}
		pg->daddr = x;
		pg->next = up->mmul2;
		up->mmul2 = pg;

		/* force l2 page to memory */
		cachedwbse((void *)pg->va, BY2PG);

		*l1 = PPN(pg->pa)|Dom0|Coarse;
		cachedwbse(l1, sizeof *l1);

		if(x >= m->mmul1lo && x < m->mmul1hi){
			if(x+1 - m->mmul1lo < m->mmul1hi - x)
				m->mmul1lo = x+1;
			else
				m->mmul1hi = x;
		}
	}
	pte = UINT2PTR(KADDR(PPN(*l1)));

	/* protection bits are
	 *	PTERONLY|PTEVALID;
	 *	PTEWRITE|PTEVALID;
	 *	PTEWRITE|PTEUNCACHED|PTEVALID;
	 */
	x = Small;
	if(!(pa & PTEUNCACHED))
		x |= Cached|Buffered;
	if(pa & PTEWRITE)
		x |= L2AP(Urw);
	else
		x |= L2AP(Uro);
	pte[L2X(va)] = PPN(pa)|x;
	cachedwbse(&pte[L2X(va)], sizeof pte[0]);

	/* clear out the current entry */
	mmuinvalidateaddr(PPN(va));

	/*  write back dirty entries - we need this because the pio() in
	 *  fault.c is writing via a different virt addr and won't clean
	 *  its changes out of the dcache.  Page coloring doesn't work
	 *  on this mmu because the virtual cache is set associative
	 *  rather than direct mapped.
	 */
	cachedwbinv();
	if(page->txtflush){
		cacheiinv();
		page->txtflush = 0;
	}
	checkmmu(va, PPN(pa));
}
Exemple #21
0
			ref = lkp->ref;
		if(ref == 1 && lkp->image){
			/* save a copy of the original for the image cache */
			duppage(lkp);
			ref = lkp->ref;
		}
		unlock(lkp);
		if(ref > 1){
			new = newpage(0, &s, addr);
			if(s == 0)
				return -1;
			*pg = new;
			copypage(lkp, *pg);
			putpage(lkp);
		}
		mmuphys = PPN((*pg)->pa) | PTEWRITE | PTEVALID;
		(*pg)->modref = PG_MOD|PG_REF;
		break;

	case SG_PHYSICAL:
		if(*pg == 0) {
			fn = s->pseg->pgalloc;
			if(fn)
				*pg = (*fn)(s, addr);
			else {
				new = smalloc(sizeof(Page));
				new->va = addr;
				new->pa = s->pseg->pa+(addr-s->base);
				new->ref = 1;
				*pg = new;
			}
Exemple #22
0
/*
 * pg->pgszi indicates the page size in machp()->pgsz[] used for the mapping.
 * For the user, it can be either 2*MiB or 1*GiB pages.
 * For 2*MiB pages, we use three levels, not four.
 * For 1*GiB pages, we use two levels.
 */
void
mmuput(uintptr_t va, Page *pg, uint attr)
{
    Proc *up = externup();
    int lvl, user, x, pgsz;
    PTE *pte;
    Page *page, *prev;
    Mpl pl;
    uintmem pa, ppn;
    char buf[80];

    ppn = 0;
    pa = pg->pa;
    if(pa == 0)
        panic("mmuput: zero pa");

    if(DBGFLG) {
        snprint(buf, sizeof buf, "cpu%d: up %#p mmuput %#p %#P %#ux\n",
                machp()->machno, up, va, pa, attr);
        print("%s", buf);
    }
    assert(pg->pgszi >= 0);
    pgsz = sys->pgsz[pg->pgszi];
    if(pa & (pgsz-1))
        panic("mmuput: pa offset non zero: %#ullx\n", pa);
    pa |= pteflags(attr);

    pl = splhi();
    if(DBGFLG)
        mmuptpcheck(up);
    user = (va < KZERO);
    x = PTLX(va, 3);

    pte = UINT2PTR(machp()->MMU.pml4->va);
    pte += x;
    prev = machp()->MMU.pml4;

    for(lvl = 3; lvl >= 0; lvl--) {
        if(user) {
            if(pgsz == 2*MiB && lvl == 1)	 /* use 2M */
                break;
            if(pgsz == 1ull*GiB && lvl == 2)	/* use 1G */
                break;
        }
        for(page = up->MMU.mmuptp[lvl]; page != nil; page = page->next)
            if(page->prev == prev && page->daddr == x) {
                if(*pte == 0) {
                    print("mmu: jmk and nemo had fun\n");
                    *pte = PPN(page->pa)|PteU|PteRW|PteP;
                }
                break;
            }

        if(page == nil) {
            if(up->MMU.mmuptp[0] == nil)
                page = mmuptpalloc();
            else {
                page = up->MMU.mmuptp[0];
                up->MMU.mmuptp[0] = page->next;
            }
            page->daddr = x;
            page->next = up->MMU.mmuptp[lvl];
            up->MMU.mmuptp[lvl] = page;
            page->prev = prev;
            *pte = PPN(page->pa)|PteU|PteRW|PteP;
            if(lvl == 3 && x >= machp()->MMU.pml4->daddr)
                machp()->MMU.pml4->daddr = x+1;
        }
        x = PTLX(va, lvl-1);

        ppn = PPN(*pte);
        if(ppn == 0)
            panic("mmuput: ppn=0 l%d pte %#p = %#P\n", lvl, pte, *pte);

        pte = UINT2PTR(KADDR(ppn));
        pte += x;
        prev = page;
    }

    if(DBGFLG)
        checkpte(ppn, pte);
    *pte = pa|PteU;

    if(user)
        switch(pgsz) {
        case 2*MiB:
        case 1*GiB:
            *pte |= PtePS;
            break;
        default:
            panic("mmuput: user pages must be 2M or 1G");
        }
    splx(pl);

    if(DBGFLG) {
        snprint(buf, sizeof buf, "cpu%d: up %#p new pte %#p = %#llux\n",
                machp()->machno, up, pte, pte?*pte:~0);
        print("%s", buf);
    }

    invlpg(va);			/* only if old entry valid? */
}
Exemple #23
0
static void
ramscan(ulong maxmem)
{
	ulong *k0, kzero, map, maxkpa, maxpa, pa, *pte, *table, *va, vbase, x;
	int nvalid[NMemType];

	/*
	 * The bootstrap code has has created a prototype page
	 * table which maps the first MemMin of physical memory to KZERO.
	 * The page directory is at m->pdb and the first page of
	 * free memory is after the per-processor MMU information.
	 */
	pa = MemMin;

	/*
	 * Check if the extended memory size can be obtained from the CMOS.
	 * If it's 0 then it's either not known or >= 64MB. Always check
	 * at least 24MB in case there's a memory gap (up to 8MB) below 16MB;
	 * in this case the memory from the gap is remapped to the top of
	 * memory.
	 * The value in CMOS is supposed to be the number of KB above 1MB.
	 */
	if(maxmem == 0){
		x = (nvramread(0x18)<<8)|nvramread(0x17);
		if(x == 0 || x >= (63*KB))
			maxpa = MemMax;
		else
			maxpa = MB+x*KB;
		if(maxpa < 24*MB)
			maxpa = 24*MB;
	}else
		maxpa = maxmem;
	maxkpa = (u32int)-KZERO;	/* 2^32 - KZERO */

	/*
	 * March up memory from MemMin to maxpa 1MB at a time,
	 * mapping the first page and checking the page can
	 * be written and read correctly. The page tables are created here
	 * on the fly, allocating from low memory as necessary.
	 */
	k0 = (ulong*)KADDR(0);
	kzero = *k0;
	map = 0;
	x = 0x12345678;
	memset(nvalid, 0, sizeof(nvalid));
	
	/*
	 * Can't map memory to KADDR(pa) when we're walking because
	 * can only use KADDR for relatively low addresses.
	 * Instead, map each 4MB we scan to the virtual address range
	 * MemMin->MemMin+4MB while we are scanning.
	 */
	vbase = MemMin;
	while(pa < maxpa){
		/*
		 * Map the page. Use mapalloc(&rmapram, ...) to make
		 * the page table if necessary, it will be returned to the
		 * pool later if it isn't needed.  Map in a fixed range (the second 4M)
		 * because high physical addresses cannot be passed to KADDR.
		 */
		va = (void*)(vbase + pa%(4*MB));
		table = &m->pdb[PDX(va)];
		if(pa%(4*MB) == 0){
			if(map == 0 && (map = mapalloc(&rmapram, 0, BY2PG, BY2PG)) == 0)
				break;
			memset(KADDR(map), 0, BY2PG);
			*table = map|PTEWRITE|PTEVALID;
			memset(nvalid, 0, sizeof(nvalid));
		}
		table = KADDR(PPN(*table));
		pte = &table[PTX(va)];

		*pte = pa|PTEWRITE|PTEUNCACHED|PTEVALID;
		mmuflushtlb(PADDR(m->pdb));
		/*
		 * Write a pattern to the page and write a different
		 * pattern to a possible mirror at KZERO. If the data
		 * reads back correctly the chunk is some type of RAM (possibly
		 * a linearly-mapped VGA framebuffer, for instance...) and
		 * can be cleared and added to the memory pool. If not, the
		 * chunk is marked uncached and added to the UMB pool if <16MB
		 * or is marked invalid and added to the UPA pool.
		 */
		*va = x;
		*k0 = ~x;
		if(*va == x){
			nvalid[MemRAM] += MB/BY2PG;
			mapfree(&rmapram, pa, MB);

			do{
				*pte++ = pa|PTEWRITE|PTEVALID;
				pa += BY2PG;
			}while(pa % MB);
			mmuflushtlb(PADDR(m->pdb));
			/* memset(va, 0, MB); so damn slow to memset all of memory */
		}
		else if(pa < 16*MB){
			nvalid[MemUMB] += MB/BY2PG;
			mapfree(&rmapumb, pa, MB);

			do{
				*pte++ = pa|PTEWRITE|PTEUNCACHED|PTEVALID;
				pa += BY2PG;
			}while(pa % MB);
		}
		else{
			nvalid[MemUPA] += MB/BY2PG;
			mapfree(&rmapupa, pa, MB);

			*pte = 0;
			pa += MB;
		}
		/*
		 * Done with this 4MB chunk, review the options:
		 * 1) not physical memory and >=16MB - invalidate the PDB entry;
		 * 2) physical memory - use the 4MB page extension if possible;
		 * 3) not physical memory and <16MB - use the 4MB page extension
		 *    if possible;
		 * 4) mixed or no 4MB page extension - commit the already
		 *    initialised space for the page table.
		 */
		if(pa%(4*MB) == 0 && pa >= 32*MB && nvalid[MemUPA] == (4*MB)/BY2PG){
			/*
			 * If we encounter a 4MB chunk of missing memory
			 * at a sufficiently high offset, call it the end of
			 * memory.  Otherwise we run the risk of thinking
			 * that video memory is real RAM.
			 */
			break;
		}
		if(pa <= maxkpa && pa%(4*MB) == 0){
			table = &m->pdb[PDX(KADDR(pa - 4*MB))];
			if(nvalid[MemUPA] == (4*MB)/BY2PG)
				*table = 0;
			else if(nvalid[MemRAM] == (4*MB)/BY2PG && (m->cpuiddx & 0x08))
				*table = (pa - 4*MB)|PTESIZE|PTEWRITE|PTEVALID;
			else if(nvalid[MemUMB] == (4*MB)/BY2PG && (m->cpuiddx & 0x08))
				*table = (pa - 4*MB)|PTESIZE|PTEWRITE|PTEUNCACHED|PTEVALID;
			else{
				*table = map|PTEWRITE|PTEVALID;
				map = 0;
			}
		}
		mmuflushtlb(PADDR(m->pdb));
		x += 0x3141526;
	}
	/*
	 * If we didn't reach the end of the 4MB chunk, that part won't
	 * be mapped.  Commit the already initialised space for the page table.
	 */
	if(pa % (4*MB) && pa <= maxkpa){
		m->pdb[PDX(KADDR(pa))] = map|PTEWRITE|PTEVALID;
		map = 0;
	}
	if(map)
		mapfree(&rmapram, map, BY2PG);

	m->pdb[PDX(vbase)] = 0;
	mmuflushtlb(PADDR(m->pdb));

	mapfree(&rmapupa, pa, (u32int)-pa);
	*k0 = kzero;
}
Exemple #24
0
Fichier : mp.c Projet : 8l/inferno
static void
mpstartap(Apic* apic)
{
	ulong *apbootp, *pdb, *pte;
	Mach *mach, *mach0;
	int i, machno;
	uchar *p;

	mach0 = MACHP(0);

	/*
	 * Initialise the AP page-tables and Mach structure. The page-tables
	 * are the same as for the bootstrap processor with the exception of
	 * the PTE for the Mach structure.
	 * Xspanalloc will panic if an allocation can't be made.
	 */
	p = xspanalloc(4*BY2PG, BY2PG, 0);
	pdb = (ulong*)p;
	memmove(pdb, mach0->pdb, BY2PG);
	p += BY2PG;

	if((pte = mmuwalk(pdb, MACHADDR, 1, 0)) == nil)
		return;
	memmove(p, KADDR(PPN(*pte)), BY2PG);
	*pte = PADDR(p)|PTEWRITE|PTEVALID;
	if(mach0->havepge)
		*pte |= PTEGLOBAL;
	p += BY2PG;

	mach = (Mach*)p;
	if((pte = mmuwalk(pdb, MACHADDR, 2, 0)) == nil)
		return;
	*pte = PADDR(mach)|PTEWRITE|PTEVALID;
	if(mach0->havepge)
		*pte |= PTEGLOBAL;
	p += BY2PG;

	machno = apic->machno;
	MACHP(machno) = mach;
	mach->machno = machno;
	mach->pdb = pdb;
	mach->gdt = (Segdesc*)p;	/* filled by mmuinit */

	/*
	 * Tell the AP where its kernel vector and pdb are.
	 * The offsets are known in the AP bootstrap code.
	 */
	apbootp = (ulong*)(APBOOTSTRAP+0x08);
	*apbootp++ = (ulong)squidboy;
	*apbootp++ = PADDR(pdb);
	*apbootp = (ulong)apic;

	/*
	 * Universal Startup Algorithm.
	 */
	p = KADDR(0x467);
	*p++ = PADDR(APBOOTSTRAP);
	*p++ = PADDR(APBOOTSTRAP)>>8;
	i = (PADDR(APBOOTSTRAP) & ~0xFFFF)/16;
	*p++ = i;
	*p = i>>8;

	nvramwrite(0x0F, 0x0A);
	lapicstartap(apic, PADDR(APBOOTSTRAP));
	for(i = 0; i < 1000; i++){
		lock(&mprdthilock);
		if(mprdthi & ((1<<apic->apicno)<<24)){
			unlock(&mprdthilock);
			break;
		}
		unlock(&mprdthilock);
		delay(10);
	}
	nvramwrite(0x0F, 0x00);
}
Exemple #25
0
int
fixfault(Segment *s, uintptr addr, int read, int doputmmu)
{
	int type;
	int ref;
	Pte **p, *etp;
	uintptr mmuphys=0, soff;
	Page **pg, *lkp, *new;
	Page *(*fn)(Segment*, uintptr);

	addr &= ~(BY2PG-1);
	soff = addr-s->base;
	p = &s->map[soff/PTEMAPMEM];
	if(*p == 0)
		*p = ptealloc();

	etp = *p;
	pg = &etp->pages[(soff&(PTEMAPMEM-1))/BY2PG];
	type = s->type&SG_TYPE;

	if(pg < etp->first)
		etp->first = pg;
	if(pg > etp->last)
		etp->last = pg;

	switch(type) {
	default:
		panic("fault");
		break;

	case SG_TEXT: 			/* Demand load */
		if(pagedout(*pg))
			pio(s, addr, soff, pg);

		mmuphys = PPN((*pg)->pa) | PTERONLY|PTEVALID;
		(*pg)->modref = PG_REF;
		break;

	case SG_BSS:
	case SG_SHARED:			/* Zero fill on demand */
	case SG_STACK:
		if(*pg == 0) {
			new = newpage(1, &s, addr);
			if(s == 0)
				return -1;

			*pg = new;
		}
		goto common;

	case SG_DATA:
	common:			/* Demand load/pagein/copy on write */
		if(pagedout(*pg))
			pio(s, addr, soff, pg);

		/*
		 *  It's only possible to copy on write if
		 *  we're the only user of the segment.
		 */
		if(read && conf.copymode == 0 && s->ref == 1) {
			mmuphys = PPN((*pg)->pa)|PTERONLY|PTEVALID;
			(*pg)->modref |= PG_REF;
			break;
		}

		lkp = *pg;
		lock(lkp);

		if(lkp->image == &swapimage)
			ref = lkp->ref + swapcount(lkp->daddr);
		else
			ref = lkp->ref;
		if(ref == 1 && lkp->image){
			/* save a copy of the original for the image cache */
			duppage(lkp);
			ref = lkp->ref;
		}
		unlock(lkp);
		if(ref > 1){
			new = newpage(0, &s, addr);
			if(s == 0)
				return -1;
			*pg = new;
			copypage(lkp, *pg);
			putpage(lkp);
		}
Exemple #26
0
static void
map(ulong base, ulong len, int type)
{
	ulong e, n;
	ulong *table, flags, maxkpa;
	
	/*
	 * Split any call crossing MemMin to make below simpler.
	 */
	if(base < MemMin && len > MemMin-base){
		n = MemMin - base;
		map(base, n, type);
		map(MemMin, len-n, type);
	}
	
	/*
	 * Let lowraminit and umbscan hash out the low MemMin.
	 */
	if(base < MemMin)
		return;

	/*
	 * Any non-memory below 16*MB is used as upper mem blocks.
	 */
	if(type == MemUPA && base < 16*MB && base+len > 16*MB){
		map(base, 16*MB-base, MemUMB);
		map(16*MB, len-(16*MB-base), MemUPA);
		return;
	}
	
	/*
	 * Memory below CPU0END is reserved for the kernel
	 * and already mapped.
	 */
	if(base < PADDR(CPU0END)){
		n = PADDR(CPU0END) - base;
		if(len <= n)
			return;
		map(PADDR(CPU0END), len-n, type);
		return;
	}
	
	/*
	 * Memory between KTZERO and end is the kernel itself
	 * and is already mapped.
	 */
	if(base < PADDR(KTZERO) && base+len > PADDR(KTZERO)){
		map(base, PADDR(KTZERO)-base, type);
		return;
	}
	if(PADDR(KTZERO) < base && base < PADDR(PGROUND((ulong)end))){
		n = PADDR(PGROUND((ulong)end));
		if(len <= n)
			return;
		map(PADDR(PGROUND((ulong)end)), len-n, type);
		return;
	}
	
	/*
	 * Now we have a simple case.
	 */
	// print("map %.8lux %.8lux %d\n", base, base+len, type);
	switch(type){
	case MemRAM:
		mapfree(&rmapram, base, len);
		flags = PTEWRITE|PTEVALID;
		break;
	case MemUMB:
		mapfree(&rmapumb, base, len);
		flags = PTEWRITE|PTEUNCACHED|PTEVALID;
		break;
	case MemUPA:
		mapfree(&rmapupa, base, len);
		flags = 0;
		break;
	default:
	case MemReserved:
		flags = 0;
		break;
	}
	
	/*
	 * bottom MemMin is already mapped - just twiddle flags.
	 * (not currently used - see above)
	 */
	if(base < MemMin){
		table = KADDR(PPN(m->pdb[PDX(base)]));
		e = base+len;
		base = PPN(base);
		for(; base<e; base+=BY2PG)
			table[PTX(base)] |= flags;
		return;
	}
	
	/*
	 * Only map from KZERO to 2^32.
	 */
	if(flags){
		maxkpa = -KZERO;
		if(base >= maxkpa)
			return;
		if(len > maxkpa-base)
			len = maxkpa - base;
		pdbmap(m->pdb, base|flags, base+KZERO, len);
	}
}