static void rxreplenish(Ctlr *ctlr) { Rx *r; Block *b; while(ctlr->rxb[ctlr->rxtail] == nil) { b = rxallocb(); if(b == nil) { iprint("#l%d: rxreplenish out of buffers\n", ctlr->ether->ctlrno); break; } ctlr->rxb[ctlr->rxtail] = b; /* set up uncached receive descriptor */ r = &ctlr->rx[ctlr->rxtail]; assert(((uintptr)r & (Descralign - 1)) == 0); r->countsize = ROUNDUP(Rxblklen, 8); r->buf = PADDR(b->rp); coherence(); /* and fire */ r->cs = RCSdmaown | RCSenableintr; coherence(); ctlr->rxtail = NEXT(ctlr->rxtail, Nrx); } }
static void shutdown(Ether *ether) { int i; Ctlr *ctlr = ether->ctlr; Gbereg *reg = ctlr->reg; ilock(ctlr); quiesce(reg); reg->euc |= Portreset; coherence(); iunlock(ctlr); delay(100); ilock(ctlr); reg->euc &= ~Portreset; coherence(); delay(20); reg->psc0 = 0; /* no PSC0porton */ reg->psc1 |= PSC1portreset; coherence(); delay(50); reg->psc1 &= ~PSC1portreset; coherence(); for (i = 0; i < nelem(reg->tcqdp); i++) reg->tcqdp[i] = 0; for (i = 0; i < nelem(reg->crdp); i++) reg->crdp[i].r = 0; coherence(); iunlock(ctlr); }
void iunlock(Lock *l) { ulong sr; #ifdef LOCKCYCLES l->lockcycles += lcycles(); cumilockcycles += l->lockcycles; if(l->lockcycles > maxilockcycles){ maxilockcycles = l->lockcycles; maxilockpc = l->pc; } if(l->lockcycles > 2400) ilockpcs[n++ & 0xff] = l->pc; #endif if(l->key == 0) print("iunlock: not locked: pc %#p\n", getcallerpc(&l)); if(!l->isilock) print("iunlock of lock: pc %#p, held by %#lux\n", getcallerpc(&l), l->pc); if(islo()) print("iunlock while lo: pc %#p, held by %#lux\n", getcallerpc(&l), l->pc); sr = l->sr; l->m = nil; coherence(); l->key = 0; coherence(); m->ilockdepth--; if(up) up->lastilock = nil; splx(sr); }
static void interrupt(Ureg*, void *) { u32int *gp, *field; char pin; gp = (u32int*)GPIOREGS; int set; coherence(); eventvalue = 0; for(pin = 0; pin < PIN_TABLE_SIZE; pin++) { set = (gp[Evds0 + pin/32] & (1 << (pin % 32))) != 0; if(set) { field = &gp[Evds0 + pin/32]; SET_BIT(field, pin, 1); SET_BIT(&eventvalue, pin, 1); } } coherence(); wakeup(&rend); }
/* Acquire semaphore (subtract 1). */ static int semacquire(Segment *s, long *addr, int block) { int acquired; Sema phore; if(canacquire(addr)) return 1; if(!block) return 0; acquired = 0; semqueue(s, addr, &phore); for(;;){ phore.waiting = 1; coherence(); if(canacquire(addr)){ acquired = 1; break; } if(waserror()) break; sleep(&phore, semawoke, &phore); poperror(); } semdequeue(s, &phore); coherence(); /* not strictly necessary due to lock in semdequeue */ if(!phore.waiting) semwakeup(s, addr, 1); if(!acquired) nexterror(); return 1; }
void unlock(Lock *l) { #ifdef LOCKCYCLES l->lockcycles += lcycles(); cumlockcycles += l->lockcycles; if(l->lockcycles > maxlockcycles){ maxlockcycles = l->lockcycles; maxlockpc = l->pc; } #endif if(l->key == 0) print("unlock: not locked: pc %#p\n", getcallerpc(&l)); if(l->isilock) print("unlock of ilock: pc %lux, held by %lux\n", getcallerpc(&l), l->pc); if(l->p != up) print("unlock: up changed: pc %#p, acquired at pc %lux, lock p %#p, unlock up %#p\n", getcallerpc(&l), l->pc, l->p, up); l->m = nil; coherence(); l->key = 0; coherence(); if(up && deccnt(&up->nlocks) == 0 && up->delaysched && islo()){ /* * Call sched if the need arose while locks were held * But, don't do it from interrupt routines, hence the islo() test */ sched(); } }
static void shutdown(Hci *hp) { int i; Ctlr *ctlr; Eopio *opio; ctlr = hp->aux; ilock(ctlr); opio = ctlr->opio; opio->cmd |= Chcreset; /* controller reset */ coherence(); for(i = 0; i < 100; i++){ if((opio->cmd & Chcreset) == 0) break; delay(1); } if(i >= 100) print("ehci %#p controller reset timed out\n", ctlr->capio); delay(100); ehcirun(ctlr, 0); opio->frbase = 0; coherence(); iunlock(ctlr); }
static void dmaintr(Ureg *, void *a) { int i = (int)a; /* dma request & chan # */ Dchan *cp; Regs *regs = (Regs *)PHYSSDMA; assert(i >= 0 && i < Nirq); *xfer[i].done = 1; assert(xfer[i].rend != nil); wakeup(xfer[i].rend); cp = regs->chan + i; if(!(cp->csr & Blocki)) iprint("dmaintr: req %d: Blocki not set; csr %#lux\n", i, cp->csr); cp->csr |= cp->csr; /* extinguish intr source */ coherence(); regs->irqsts[i] = regs->irqsts[i]; /* extinguish intr source */ coherence(); regs->irqen[i] &= ~(1 << i); coherence(); xfer[i].rend = nil; coherence(); }
static void interrupt(Ureg*, void *arg) { Uart *uart; u32int *ap; uart = arg; ap = (u32int*)uart->regs; coherence(); if(0 && (ap[Irq] & UartIrq) == 0) return; if(ap[MuLsr] & TxRdy) uartkick(uart); if(ap[MuLsr] & RxRdy){ //if(uart->console){ // if(uart->opens == 1) // uart->putc = kbdcr2nl; // else // uart->putc = nil; //} do{ uartrecv(uart, ap[MuIo] & 0xFF); }while(ap[MuLsr] & RxRdy); } coherence(); }
/* * transmit strategy: fill the output ring as far as possible, * perhaps leaving a few spare; kick off the output and take * an interrupt only when the transmit queue is empty. */ static void transmit(Ether *ether) { int i, kick, len; Block *b; Ctlr *ctlr = ether->ctlr; Gbereg *reg = ctlr->reg; Tx *t; ethercheck(ether); ilock(ctlr); txreplenish(ether); /* reap old packets */ /* queue new packets; use at most half the tx descs to avoid livelock */ kick = 0; for (i = Ntx/2 - 2; i > 0; i--) { t = &ctlr->tx[ctlr->txhead]; /* *t is uncached */ assert(((uintptr)t & (Descralign - 1)) == 0); if(t->cs & TCSdmaown) { /* descriptor busy? */ ctlr->txringfull++; break; } b = qget(ether->oq); /* outgoing packet? */ if (b == nil) break; len = BLEN(b); if(len < ether->minmtu || len > ether->maxmtu) { freeb(b); continue; } ctlr->txb[ctlr->txhead] = b; /* make sure the whole packet is in memory */ cachedwbse(b->rp, len); l2cacheuwbse(b->rp, len); /* set up the transmit descriptor */ t->buf = PADDR(b->rp); t->countchk = len << 16; coherence(); /* and fire */ t->cs = TCSpadding | TCSfirst | TCSlast | TCSdmaown | TCSenableintr; coherence(); kick++; ctlr->txhead = NEXT(ctlr->txhead, Ntx); } if (kick) { txkick(ctlr); reg->irqmask |= Itxendq(Qno); reg->irqemask |= IEtxerrq(Qno) | IEtxunderrun; } iunlock(ctlr); }
void wdogreset(void) { *Rstwdogtimer = Basetickfreq / 100; *Rstwdogctl = Wdogreset; /* wake the dog */ coherence(); *Rstwdogtimer = Basetickfreq / 10000; coherence(); }
static void ehcireset(Ctlr *ctlr) { Eopio *opio; int i; ilock(ctlr); dprint("ehci %#p reset\n", ctlr->capio); opio = ctlr->opio; /* * Turn off legacy mode. Some controllers won't * interrupt us as expected otherwise. */ ehcirun(ctlr, 0); /* clear high 32 bits of address signals if it's 64 bits capable. * This is probably not needed but it does not hurt and others do it. */ if((ctlr->capio->capparms & C64) != 0){ dprint("ehci: 64 bits\n"); opio->seg = 0; } if(ehcidebugcapio != ctlr->capio){ opio->cmd |= Chcreset; /* controller reset */ coherence(); for(i = 0; i < 100; i++){ if((opio->cmd & Chcreset) == 0) break; delay(1); } if(i == 100) print("ehci %#p controller reset timed out\n", ctlr->capio); } /* requesting more interrupts per µframe may miss interrupts */ opio->cmd &= ~Citcmask; opio->cmd |= 1 << Citcshift; /* max of 1 intr. per 125 µs */ coherence(); switch(opio->cmd & Cflsmask){ case Cfls1024: ctlr->nframes = 1024; break; case Cfls512: ctlr->nframes = 512; break; case Cfls256: ctlr->nframes = 256; break; default: panic("ehci: unknown fls %ld", opio->cmd & Cflsmask); } coherence(); dprint("ehci: %d frames\n", ctlr->nframes); iunlock(ctlr); }
static void clockreset(Timerregs *tn) { if (probeaddr((uintptr)&tn->ticpcfg) < 0) panic("no clock at %#p", tn); tn->ticpcfg = Softreset | Noidle; coherence(); resetwait(tn); tn->tier = tn->tclr = 0; coherence(); }
void scuon(void) { Scu *scu = (Scu *)soc.scu; if (scu->ctl & Scuenable) return; scu->inval = MASK(16); coherence(); scu->ctl = Scuparity | Scuenable | Specfill; coherence(); }
/* called by trap to handle irq interrupts. */ static void irq(Ureg* ureg) { Vctl *v; for(v = vctl; v; v = v->next) { if(*v->reg & v->mask) { coherence(); v->f(ureg, v->a); coherence(); } } }
int llfifointr(ulong bit) { ulong len, sts; Ether *ether; RingBuf *rb; static uchar zaddrs[Eaddrlen * 2]; sts = frp->isr; if (sts == 0) return 0; /* not for me */ ether = llether; /* it's important to drain all packets in the rx fifo */ while ((frp->rdfo & Wordcnt) != 0) { assert((frp->rdfo & ~Wordcnt) == 0); len = frp->rlf & Bytecnt; /* read rlf from fifo */ assert((len & ~Bytecnt) == 0); assert(len > 0 && len <= ETHERMAXTU); rb = ðer->rb[ether->ri]; if (rb->owner == Interface) { /* from rx fifo into ring buffer */ fifocpy(rb->pkt, &frp->rdfd, len, Dinc); if (memcmp(rb->pkt, zaddrs, sizeof zaddrs) == 0) { iprint("ether header with all-zero mac " "addresses\n"); continue; } rb->len = len; rb->owner = Host; coherence(); ether->ri = NEXT(ether->ri, ether->nrb); coherence(); } else { discardinpkt(len); /* not too informative during booting */ iprint("llfifo: no buffer for input pkt\n"); } } if (sts & Tc) ether->tbusy = 0; ether->transmit(ether); frp->isr = sts; /* extinguish intr source */ coherence(); intrack(bit); sts &= ~(Tc | Rc); if (sts) iprint("llfifo isr %#lux\n", sts); return 1; }
static void wdogoff(Timerregs *tn) { resetwait(tn); wdogwrss(tn, 0xaaaa); /* magic off sequence */ wdogwrss(tn, 0x5555); tn->tldr = 1; coherence(); tn->tcrr = 1; /* paranoia */ coherence(); }
void llfifoinit(Ether *ether) { llether = ether; frp->ier = 0; frp->isr = frp->isr; /* extinguish intr source */ coherence(); intrenable(Intllfifo, llfifointr); coherence(); frp->ier = Rc | Tc; coherence(); }
/* * called direct from intr.s to handle fiq interrupt. */ void fiq(Ureg *ureg) { Vctl *v; v = vfiq; if(v == nil) panic("unexpected item in bagging area"); m->intr++; ureg->pc -= 4; coherence(); v->f(ureg, v->a); coherence(); }
static void clockson(void) { Clkrst *clk = (Clkrst *)soc.clkrst; /* enable all by clearing resets */ clk->rstdevl = clk->rstdevh = clk->rstdevu = 0; coherence(); clk->clkoutl = clk->clkouth = clk->clkoutu = ~0; /* enable all clocks */ coherence(); clk->rstsrc = Wdcpurst | Wdcoprst | Wdsysrst | Wdena; coherence(); }
/* addresses are physical */ int dmastart(void *to, int tmode, void *from, int fmode, uint len, Rendez *rend, int *done) { int irq, chan; uint ruplen; Dchan *cp; Regs *regs = (Regs *)PHYSSDMA; static Lock alloclck; /* allocate free irq (and chan) */ ilock(&alloclck); for (irq = 0; irq < Nirq && xfer[irq].rend != nil; irq++) ; if (irq >= Nirq) panic("dmastart: no available irqs; too many concurrent dmas"); chan = irq; xfer[irq].rend = rend; /* for wakeup at intr time */ xfer[irq].done = done; *done = 0; iunlock(&alloclck); ruplen = ROUNDUP(len, sizeof(ulong)); assert(to != from); cp = regs->chan + chan; cp->ccr &= ~Enable; /* paranoia */ cp->cicr = 0; regs->irqen[irq] &= ~(1 << chan); coherence(); cp->csdp = 2; /* 2 = log2(sizeof(ulong)) */ cp->cssa = (uintptr)from; cp->cdsa = (uintptr)to; cp->ccr = tmode << 14 | fmode << 12; cp->csei = cp->csfi = cp->cdei = cp->cdfi = 1; cp->cen = ruplen / sizeof(ulong); /* ulongs / frame */ cp->cfn = 1; /* 1 frame / xfer */ cp->cicr = Blocki; /* intr at end of block */ regs->irqen[irq] |= 1 << chan; coherence(); cp->ccr |= Enable; /* fire! */ coherence(); return irq; }
/* * Copy the new kernel to its correct location in physical memory, * flush caches, ignore TLBs (we're in KSEG0 space), and jump to * the start of the kernel. */ void main(ulong aentry, ulong acode, ulong asize, ulong argc) { void *kernel; static ulong entry, code, size; putc('B'); putc('o'); putc('o'); putc('t'); /* copy args to heap before moving stack to before a.out header */ entry = aentry; code = acode; size = asize; _argc = argc; /* argc passed to kernel */ _env = (ulong)&((char**)CONFARGV)[argc]; setsp(entry-0x20-4); memmove((void *)entry, (void *)code, size); cleancache(); coherence(); /* * jump to kernel entry point. */ putc(' '); kernel = (void*)entry; go(kernel); /* off we go - never to return */ putc('?'); putc('!'); for(;;) ; }
static int miird(Mii *mii, int pa, int ra) { ulong smi_reg, timeout; Gbereg *reg; reg = ((Ctlr*)mii->ctlr)->reg; /* check params */ if ((pa<<Physmiaddroff) & ~Physmiaddrmask || (ra<<SmiRegaddroff) & ~SmiRegaddrmask) return -1; smibusywait(reg, PhysmiBusy); /* fill the phy address and register offset and read opcode */ reg->smi = pa << Physmiaddroff | ra << SmiRegaddroff | PhysmiopRd; coherence(); /* wait til read value is ready */ timeout = PhysmiTimeout; do { smi_reg = reg->smi; if (timeout-- == 0) { MIIDBG("SMI read-valid timeout\n"); return -1; } } while (!(smi_reg & PhysmiReadok)); /* Wait for the data to update in the SMI register */ for (timeout = 0; timeout < PhysmiTimeout; timeout++) ; return reg->smi & Physmidatamask; }
static void prphwrite(Ctlr *ctlr, uint off, u32int data) { csr32w(ctlr, PrphWaddr, ((sizeof(u32int)-1)<<24) | off); coherence(); csr32w(ctlr, PrphWdata, data); }
static u32int prphread(Ctlr *ctlr, uint off) { csr32w(ctlr, PrphRaddr, ((sizeof(u32int)-1)<<24) | off); coherence(); return csr32r(ctlr, PrphRdata); }
static void squidboy(Apic* apic) { // iprint("Hello Squidboy\n"); machinit(); fpsavealloc(); mmuinit(); cpuidentify(); cpuidprint(); checkmtrr(); apic->online = 1; coherence(); lapicinit(apic); lapiconline(); syncclock(); timersinit(); fpoff(); lock(&active); active.machs |= 1<<m->machno; unlock(&active); while(!active.thunderbirdsarego) microdelay(100); schedinit(); }
/* * this is all a bit magic. the soc.exceptvec register is effectively * undocumented. we had to look at linux and experiment, alas. this is the * sort of thing that should be standardised as part of the cortex mpcore spec. * even intel document their equivalent procedure. */ int startcpu(uint cpu) { int i, r; ulong oldvec, rstaddr; ulong *evp = (ulong *)soc.exceptvec; /* magic */ r = 0; if (getncpus() < 2 || cpu == m->machno || cpu >= MAXMACH || cpu >= navailcpus) return -1; oldvec = *evp; l1cache->wb(); /* start next cpu w same view of ram */ *evp = rstaddr = PADDR(_vrst); /* will start cpu executing at _vrst */ coherence(); l1cache->wb(); unfreeze(cpu); for (i = 2000; i > 0 && *evp == rstaddr; i--) delay(1); if (i <= 0 || *evp != cpu) { iprint("cpu%d: didn't start!\n", cpu); stopcpu(cpu); /* make sure it's stopped */ r = -1; } *evp = oldvec; return r; }
void unlock(Lock *l) { Proc *up = externup(); uint64_t x; if(LOCKCYCLES){ cycles(&x); l->lockcycles = x - l->lockcycles; if(l->lockcycles > maxlockcycles){ maxlockcycles = l->lockcycles; maxlockpc = l->_pc; } } if(l->key == 0) print("unlock: not locked: pc %#p\n", getcallerpc()); if(l->isilock) print("unlock of ilock: pc %#p, held by %#p\n", getcallerpc(), l->_pc); if(l->p != up) print("unlock: up changed: pc %#p, acquired at pc %#p, lock p %#p, unlock up %#p\n", getcallerpc(), l->_pc, l->p, up); l->m = nil; l->key = 0; coherence(); if(up && adec(&up->nlocks) == 0 && up->delaysched && islo()){ /* * Call sched if the need arose while locks were held * But, don't do it from interrupt routines, hence the islo() test */ sched(); } }
void mmuinit(void) { uintptr pa; PTE *l1, *l2; pa = ttbget(); l1 = KADDR(pa); /* redundant with l.s; only covers first MB of 17MB */ l1[L1X(VIRTIO)] = PHYSIO|Dom0|L1AP(Krw)|Section; idmap(l1, PHYSETHER); /* igep 9221 ethernet regs */ idmap(l1, PHYSL4PROT); idmap(l1, PHYSL3); idmap(l1, PHYSSMS); idmap(l1, PHYSDRC); idmap(l1, PHYSGPMC); /* map high vectors to start of dram, but only 4K, not 1MB */ pa -= MACHSIZE+2*1024; l2 = KADDR(pa); memset(l2, 0, 1024); /* vectors step on u-boot, but so do page tables */ l2[L2X(HVECTORS)] = PHYSDRAM|L2AP(Krw)|Small; l1[L1X(HVECTORS)] = pa|Dom0|Coarse; /* vectors -> ttb-machsize-2k */ coherence(); cacheuwbinv(); l2cacheuwbinv(); mmuinvalidate(); m->mmul1 = l1; // mmudump(l1); /* DEBUG */ }
void reboot(void *entry, void *code, ulong size) { void (*f)(ulong, ulong, ulong); ulong *pdb; writeconf(); /* * the boot processor is cpu0. execute this function on it * so that the new kernel has the same cpu0. this only matters * because the hardware has a notion of which processor was the * boot processor and we look at it at start up. */ if (m->machno != 0) { procwired(up, 0); sched(); } shutdown(0); /* * should be the only processor running now */ if (m->machno != 0) print("on cpu%d (not 0)!\n", m->machno); if (active.machs) print("still have active ap processors!\n"); print("shutting down...\n"); delay(200); splhi(); /* turn off buffered serial console */ serialoq = nil; /* shutdown devices */ chandevshutdown(); arch->introff(); /* * Modify the machine page table to directly map the low 4MB of memory * This allows the reboot code to turn off the page mapping */ pdb = m->pdb; pdb[PDX(0)] = pdb[PDX(KZERO)]; mmuflushtlb(PADDR(pdb)); /* setup reboot trampoline function */ f = (void*)REBOOTADDR; memmove(f, rebootcode, sizeof(rebootcode)); print("rebooting...\n"); /* off we go - never to return */ coherence(); (*f)(PADDR(entry), PADDR(code), size); }