Example #1
0
void
putmmu(ulong va, ulong pa, Page *)
{
	ulong *l1a, *l1b, *l2;
	int l1o, l2o;
	
	l1o = va / MiB;
	l2o = (va % MiB) / BY2PG;
	l1a = KADDR(L1PT);
	l1b = up->l1;
	if(l1a[l1o] == 0){
		if((pa & PTEVALID) == 0)
			return;
		l2 = xspanalloc(L2SIZ, L2SIZ, 0);
		l1a[l1o] = l1b[l1o] = PADDR(l2) | Coarse;
	} else
		l2 = KADDR(ROUNDDN(l1a[l1o], L2SIZ));
	l2 += l2o;
	if((pa & PTEVALID) == 0){
		*l2 = 0;
		flushtlb();
		return;
	}
	*l2 = ROUNDDN(pa, BY2PG) | Small;
	if((pa & PTEWRITE) == 0)
		*l2 |= L2AP(Uro);
	else
		*l2 |= L2AP(Urw);
	if((pa & PTEUNCACHED) == 0)
		*l2 |= Buffered | Cached;
	flushtlb();
}
Example #2
0
void
mmuinit(void)
{
	ulong *l1, l2, *pl2;
	int i, n;
	extern ulong *uart;

	l1 = KADDR(L1PT);
	l2 = IOPT;
	n = NIOPAGES / 256;
	memset(KADDR(l2), 0, n * L2SIZ);
	for(i = 0; i < n; i++){
		l1[(IZERO / MiB) + i] = l2 | Coarse;
		l2 += L2SIZ;
	}
	uart = vmap((ulong) uart, BY2PG);
	periph = vmap(0x48240000, 2 * BY2PG);
	memset(l1, 0, sizeof(ulong) * (IZERO / MiB));
	l1[4095] = PRIVL2 | Coarse;
	pl2 = KADDR(PRIVL2);
	for(i = 0; i < 240; i++)
		pl2[i] = (0x8FF00000 + i * BY2PG) | L2AP(Krw) | Small | Cached | Buffered;
	pl2[240] = PHYSVECTORS | L2AP(Krw) | Small | Cached | Buffered;
	pl2[241] = FIRSTMACH | L2AP(Krw) | Small | Cached | Buffered;
	flushtlb();
	m = (Mach *) MACHADDR;
}
Example #3
0
void
mmuinit(void)
{
	PTE *l1, *l2;
	uintptr pa, va;

	l1 = (PTE*)PADDR(L1);
	l2 = (PTE*)PADDR(L2);

	/* map all of ram at KZERO */
	va = KZERO;
	for(pa = PHYSDRAM; pa < PHYSDRAM+DRAMSIZE; pa += MiB){
		l1[L1X(va)] = pa|Dom0|L1AP(Krw)|Section|Cached|Buffered;
		va += MiB;
	}

	/* identity map first MB of ram so mmu can be enabled */
	l1[L1X(PHYSDRAM)] = PHYSDRAM|Dom0|L1AP(Krw)|Section|Cached|Buffered;

	/* map i/o registers */
	va = VIRTIO;
	for(pa = PHYSIO; pa < PHYSIO+IOSIZE; pa += MiB){
		l1[L1X(va)] = pa|Dom0|L1AP(Krw)|Section;
		va += MiB;
	}

	/* double map exception vectors at top of virtual memory */
	va = HVECTORS;
	l1[L1X(va)] = (uintptr)l2|Dom0|Coarse;
	l2[L2X(va)] = PHYSDRAM|L2AP(Krw)|Small;
}
Example #4
0
void
mmuinit(void)
{
	uintptr pa;
	PTE *l1, *l2;

	pa = ttbget();
	l1 = KADDR(pa);

	/* redundant with l.s; only covers first MB of 17MB */
	l1[L1X(VIRTIO)] = PHYSIO|Dom0|L1AP(Krw)|Section;

	idmap(l1, PHYSETHER);		/* igep 9221 ethernet regs */
	idmap(l1, PHYSL4PROT);
	idmap(l1, PHYSL3);
	idmap(l1, PHYSSMS);
	idmap(l1, PHYSDRC);
	idmap(l1, PHYSGPMC);

	/* map high vectors to start of dram, but only 4K, not 1MB */
	pa -= MACHSIZE+2*1024;
	l2 = KADDR(pa);
	memset(l2, 0, 1024);
	/* vectors step on u-boot, but so do page tables */
	l2[L2X(HVECTORS)] = PHYSDRAM|L2AP(Krw)|Small;
	l1[L1X(HVECTORS)] = pa|Dom0|Coarse;	/* vectors -> ttb-machsize-2k */
	coherence();

	cacheuwbinv();
	l2cacheuwbinv();
	mmuinvalidate();

	m->mmul1 = l1;
//	mmudump(l1);			/* DEBUG */
}
Example #5
0
void *
vmap(ulong phys, ulong length)
{
	ulong virt, off, *l2;

	off = phys % BY2PG;
	length = (ROUNDUP(phys + length, BY2PG) - ROUNDDN(phys, BY2PG)) / BY2PG;
	if(length == 0)
		return nil;
	phys = ROUNDDN(phys, BY2PG);
	virt = getiopages(length);
	l2 = KADDR(IOPT);
	l2 += virt;
	while(length--){
		*l2++ = phys | L2AP(Krw) | Small | PTEIO;
		phys += BY2PG;
	}
	flushtlb();
	return (void *) (IZERO + BY2PG * virt + off);
}
Example #6
0
void
putmmu(uintptr va, uintptr pa, Page* page)
{
	int x;
	Page *pg;
	PTE *l1, *pte;

	x = L1X(va);
	l1 = &m->mmul1[x];
	if(*l1 == Fault){
		/* wasteful - l2 pages only have 256 entries - fix */
		if(up->mmul2cache == nil){
			/* auxpg since we don't need much? memset if so */
			pg = newpage(1, 0, 0);
			pg->va = VA(kmap(pg));
		}
		else{
			pg = up->mmul2cache;
			up->mmul2cache = pg->next;
			memset(UINT2PTR(pg->va), 0, BY2PG);
		}
		pg->daddr = x;
		pg->next = up->mmul2;
		up->mmul2 = pg;

		/* force l2 page to memory */
		cachedwbse((void *)pg->va, BY2PG);

		*l1 = PPN(pg->pa)|Dom0|Coarse;
		cachedwbse(l1, sizeof *l1);

		if(x >= m->mmul1lo && x < m->mmul1hi){
			if(x+1 - m->mmul1lo < m->mmul1hi - x)
				m->mmul1lo = x+1;
			else
				m->mmul1hi = x;
		}
	}
	pte = UINT2PTR(KADDR(PPN(*l1)));

	/* protection bits are
	 *	PTERONLY|PTEVALID;
	 *	PTEWRITE|PTEVALID;
	 *	PTEWRITE|PTEUNCACHED|PTEVALID;
	 */
	x = Small;
	if(!(pa & PTEUNCACHED))
		x |= Cached|Buffered;
	if(pa & PTEWRITE)
		x |= L2AP(Urw);
	else
		x |= L2AP(Uro);
	pte[L2X(va)] = PPN(pa)|x;
	cachedwbse(&pte[L2X(va)], sizeof pte[0]);

	/* clear out the current entry */
	mmuinvalidateaddr(PPN(va));

	/*  write back dirty entries - we need this because the pio() in
	 *  fault.c is writing via a different virt addr and won't clean
	 *  its changes out of the dcache.  Page coloring doesn't work
	 *  on this mmu because the virtual cache is set associative
	 *  rather than direct mapped.
	 */
	cachedwbinv();
	if(page->txtflush){
		cacheiinv();
		page->txtflush = 0;
	}
	checkmmu(va, PPN(pa));
}