Beispiel #1
0
void
mmurelease(Proc* proc)
{
	Page *page, *next;

	/* write back dirty and invalidate l1 caches */
	cacheuwbinv();

	mmul2empty(proc, 0);
	for(page = proc->mmul2cache; page != nil; page = next){
		next = page->next;
		if(--page->ref)
			panic("mmurelease: page->ref %lud", page->ref);
		pagechainhead(page);
	}
	if(proc->mmul2cache != nil)
		pagechaindone();
	proc->mmul2cache = nil;

	mmul1empty();

	/* make sure map is in memory */
	/* could be smarter about how much? */
	cachedwbse(&m->mmul1[L1X(UZERO)], (L1hi - L1lo)*sizeof(PTE));

	/* lose any possible stale tlb entries */
	mmuinvalidate();
}
Beispiel #2
0
/*
 * transmit strategy: fill the output ring as far as possible,
 * perhaps leaving a few spare; kick off the output and take
 * an interrupt only when the transmit queue is empty.
 */
static void
transmit(Ether *ether)
{
	int i, kick, len;
	Block *b;
	Ctlr *ctlr = ether->ctlr;
	Gbereg *reg = ctlr->reg;
	Tx *t;

	ethercheck(ether);
	ilock(ctlr);
	txreplenish(ether);			/* reap old packets */

	/* queue new packets; use at most half the tx descs to avoid livelock */
	kick = 0;
	for (i = Ntx/2 - 2; i > 0; i--) {
		t = &ctlr->tx[ctlr->txhead];	/* *t is uncached */
		assert(((uintptr)t & (Descralign - 1)) == 0);
		if(t->cs & TCSdmaown) {		/* descriptor busy? */
			ctlr->txringfull++;
			break;
		}

		b = qget(ether->oq);		/* outgoing packet? */
		if (b == nil)
			break;
		len = BLEN(b);
		if(len < ether->minmtu || len > ether->maxmtu) {
			freeb(b);
			continue;
		}
		ctlr->txb[ctlr->txhead] = b;

		/* make sure the whole packet is in memory */
		cachedwbse(b->rp, len);
		l2cacheuwbse(b->rp, len);

		/* set up the transmit descriptor */
		t->buf = PADDR(b->rp);
		t->countchk = len << 16;
		coherence();

		/* and fire */
		t->cs = TCSpadding | TCSfirst | TCSlast | TCSdmaown |
			TCSenableintr;
		coherence();

		kick++;
		ctlr->txhead = NEXT(ctlr->txhead, Ntx);
	}
	if (kick) {
		txkick(ctlr);

		reg->irqmask  |= Itxendq(Qno);
		reg->irqemask |= IEtxerrq(Qno) | IEtxunderrun;
	}
	iunlock(ctlr);
}
Beispiel #3
0
static void
sdioiosetup(int write, void *buf, int bsize, int bcount)
{
	int len;
	uintptr pa;

	pa = PADDR(buf);
	if(write && !PIOwrite){
		WR(DmaLSB, pa);
		WR(DmaMSB, pa>>16);
		len = bsize * bcount;
		cachedwbse(buf, len);
		l2cacheuwbse(buf, len);
	}else if(!write && !PIOread){
Beispiel #4
0
void
mmuinit1(void)
{
	PTE *l1;

	l1 = (PTE*)L1;
	m->mmul1 = l1;

	/*
	 * undo identity map of first MB of ram
	 */
	l1[L1X(PHYSDRAM)] = 0;
	cachedwbse(&l1[L1X(PHYSDRAM)], sizeof(PTE));
	mmuinvalidate();
}
Beispiel #5
0
void
mmuswitch(Proc* proc)
{
	int x;
	PTE *l1;
	Page *page;

	/* do kprocs get here and if so, do they need to? */
	if(m->mmupid == proc->pid && !proc->newtlb)
		return;
	m->mmupid = proc->pid;

	/* write back dirty and invalidate l1 caches */
	cacheuwbinv();

	if(proc->newtlb){
		mmul2empty(proc, 1);
		proc->newtlb = 0;
	}

	mmul1empty();

	/* move in new map */
	l1 = m->mmul1;
	for(page = proc->mmul2; page != nil; page = page->next){
		x = page->daddr;
		l1[x] = PPN(page->pa)|Dom0|Coarse;
		/* know here that L1lo < x < L1hi */
		if(x+1 - m->mmul1lo < m->mmul1hi - x)
			m->mmul1lo = x+1;
		else
			m->mmul1hi = x;
	}

	/* make sure map is in memory */
	/* could be smarter about how much? */
	cachedwbse(&l1[L1X(UZERO)], (L1hi - L1lo)*sizeof(PTE));

	/* lose any possible stale tlb entries */
	mmuinvalidate();

	//print("mmuswitch l1lo %d l1hi %d %d\n",
	//	m->mmul1lo, m->mmul1hi, proc->kp);
}
Beispiel #6
0
uintptr
mmukmap(uintptr va, uintptr pa, uint size)
{
	int o;
	uint n;
	PTE *pte, *pte0;

	assert((va & (MiB-1)) == 0);
	o = pa & (MiB-1);
	pa -= o;
	size += o;
	pte = pte0 = &m->mmul1[L1X(va)];
	for(n = 0; n < size; n += MiB)
		if(*pte++ != Fault)
			return 0;
	pte = pte0;
	for(n = 0; n < size; n += MiB){
		*pte++ = (pa+n)|Dom0|L1AP(Krw)|Section;
		mmuinvalidateaddr(va+n);
	}
	cachedwbse(pte0, pte - pte0);
	return va + o;
}
Beispiel #7
0
void
putmmu(uintptr va, uintptr pa, Page* page)
{
	int x;
	Page *pg;
	PTE *l1, *pte;

	x = L1X(va);
	l1 = &m->mmul1[x];
	if(*l1 == Fault){
		/* wasteful - l2 pages only have 256 entries - fix */
		if(up->mmul2cache == nil){
			/* auxpg since we don't need much? memset if so */
			pg = newpage(1, 0, 0);
			pg->va = VA(kmap(pg));
		}
		else{
			pg = up->mmul2cache;
			up->mmul2cache = pg->next;
			memset(UINT2PTR(pg->va), 0, BY2PG);
		}
		pg->daddr = x;
		pg->next = up->mmul2;
		up->mmul2 = pg;

		/* force l2 page to memory */
		cachedwbse((void *)pg->va, BY2PG);

		*l1 = PPN(pg->pa)|Dom0|Coarse;
		cachedwbse(l1, sizeof *l1);

		if(x >= m->mmul1lo && x < m->mmul1hi){
			if(x+1 - m->mmul1lo < m->mmul1hi - x)
				m->mmul1lo = x+1;
			else
				m->mmul1hi = x;
		}
	}
	pte = UINT2PTR(KADDR(PPN(*l1)));

	/* protection bits are
	 *	PTERONLY|PTEVALID;
	 *	PTEWRITE|PTEVALID;
	 *	PTEWRITE|PTEUNCACHED|PTEVALID;
	 */
	x = Small;
	if(!(pa & PTEUNCACHED))
		x |= Cached|Buffered;
	if(pa & PTEWRITE)
		x |= L2AP(Urw);
	else
		x |= L2AP(Uro);
	pte[L2X(va)] = PPN(pa)|x;
	cachedwbse(&pte[L2X(va)], sizeof pte[0]);

	/* clear out the current entry */
	mmuinvalidateaddr(PPN(va));

	/*  write back dirty entries - we need this because the pio() in
	 *  fault.c is writing via a different virt addr and won't clean
	 *  its changes out of the dcache.  Page coloring doesn't work
	 *  on this mmu because the virtual cache is set associative
	 *  rather than direct mapped.
	 */
	cachedwbinv();
	if(page->txtflush){
		cacheiinv();
		page->txtflush = 0;
	}
	checkmmu(va, PPN(pa));
}
Beispiel #8
0
void
dmastart(int chan, int dev, int dir, void *src, void *dst, int len)
{
	Ctlr *ctlr;
	Cb *cb;
	int ti;

	ctlr = &dma[chan];
	if(ctlr->regs == nil){
		ctlr->regs = (u32int*)(DMAREGS + chan*Regsize);
		ctlr->cb = xspanalloc(sizeof(Cb), Cbalign, 0);
		assert(ctlr->cb != nil);
		dmaregs[Enable] |= 1<<chan;
		ctlr->regs[Cs] = Reset;
		while(ctlr->regs[Cs] & Reset)
			;
		intrenable(IRQDMA(chan), dmainterrupt, ctlr, 0, "dma");
	}
	cb = ctlr->cb;
	ti = 0;
	switch(dir){
	case DmaD2M:
		cachedwbinvse(dst, len);
		ti = Srcdreq | Destinc;
		cb->sourcead = DMAIO(src);
		cb->destad = DMAADDR(dst);
		break;
	case DmaM2D:
		cachedwbse(src, len);
		ti = Destdreq | Srcinc;
		cb->sourcead = DMAADDR(src);
		cb->destad = DMAIO(dst);
		break;
	case DmaM2M:
		cachedwbse(src, len);
		cachedwbinvse(dst, len);
		ti = Srcinc | Destinc;
		cb->sourcead = DMAADDR(src);
		cb->destad = DMAADDR(dst);
		break;
	}
	cb->ti = ti | dev<<Permapshift | Inten;
	cb->txfrlen = len;
	cb->stride = 0;
	cb->nextconbk = 0;
	cachedwbse(cb, sizeof(Cb));
	ctlr->regs[Cs] = 0;
	microdelay(1);
	ctlr->regs[Conblkad] = DMAADDR(cb);
	DBG print("dma start: %ux %ux %ux %ux %ux %ux\n",
		cb->ti, cb->sourcead, cb->destad, cb->txfrlen,
		cb->stride, cb->nextconbk);
	DBG print("intstatus %ux\n", dmaregs[Intstatus]);
	dmaregs[Intstatus] = 0;
	ctlr->regs[Cs] = Int;
	microdelay(1);
	coherence();
	DBG dumpdregs("before Active", ctlr->regs);
	ctlr->regs[Cs] = Active;
	DBG dumpdregs("after Active", ctlr->regs);
}