/* * Set a 1-1 map of virtual to physical memory, except: * doubly-map page0 at the alternative interrupt vector address, * doubly-map physical memory at KZERO+256*MB as uncached but buffered, and * disable access to 0 (nil pointers). */ void mmuinit(void) { int i; ulong *ttb, *ptable, va; ttb = (ulong*)KTTB; for(i=0; i<MmuL1x(0x10000000); i++) ttb[i] = 0; for(; i < 0x1000; i++) ttb[i] = (i<<20) | 0xC10 | MmuL1section; for(va = KZERO; va < KZERO+64*MB; va += MB) ttb[MmuL1x(va)] |= MmuWB | MmuIDC; /* DRAM is cacheable */ for(i = 0; i < 64*MB; i += MB) ttb[MmuL1x(UCDRAMZERO+i)] = (PHYSMEM0+i) | 0xC10 | MmuL1section; /* TO DO: make the text read only */ for(va = KZERO; va < KZERO+64*MB; va += MB) ttb[MmuL1x(va|MINICACHED)] = va | 0xC10 | MmuIDC | MmuL1section; /* cached but unbuffered (thus minicache) for frame buffer */ ttb[MmuL1x(DCFADDR)] |= MmuIDC | MmuWB; /* cached and buffered for cache writeback */ ttb[MmuL1x(MCFADDR)] |= MmuIDC; /* cached and unbuffered for minicache writeback */ /* remap flash */ for(i=0; i<32*MB; i+=MB) ttb[MmuL1x(FLASHMEM+i)] = (PHYSFLASH0+i) | 0xC10 | MmuL1section; /* we'll make flash uncached for now */ /* * build page table for alternative vector page, mapping trap vectors in *page0 */ ptable = xspanalloc(SectionPages*sizeof(*ptable), PtAlign, 0); ptable[MmuL2x(AIVECADDR)] = PADDR(page0) | MmuL2AP(MmuAPsrw) | MmuWB | MmuIDC | MmuL2small; ttb[MmuL1x(AIVECADDR)] = PADDR(ptable) | MmuL1page; mmuputttb(KTTB); mmuputdac(1); /* client */ mmuenable(CpCaltivec | CpCIcache | CpCsystem | (1<<6) | CpCd32 | CpCi32 | CpCwb | CpCDcache | CpCmmu); }
void putmmu(ulong va, ulong pa, Page *) { ulong *l1a, *l1b, *l2; int l1o, l2o; l1o = va / MiB; l2o = (va % MiB) / BY2PG; l1a = KADDR(L1PT); l1b = up->l1; if(l1a[l1o] == 0){ if((pa & PTEVALID) == 0) return; l2 = xspanalloc(L2SIZ, L2SIZ, 0); l1a[l1o] = l1b[l1o] = PADDR(l2) | Coarse; } else l2 = KADDR(ROUNDDN(l1a[l1o], L2SIZ)); l2 += l2o; if((pa & PTEVALID) == 0){ *l2 = 0; flushtlb(); return; } *l2 = ROUNDDN(pa, BY2PG) | Small; if((pa & PTEWRITE) == 0) *l2 |= L2AP(Uro); else *l2 |= L2AP(Urw); if((pa & PTEUNCACHED) == 0) *l2 |= Buffered | Cached; flushtlb(); }
int floppyinit(void) { FDrive *dp; FType *t; ulong maxtsize; int mask; dmainit(DMAchan); floppysetup0(&fl); /* * init dependent parameters */ maxtsize = 0; for(t = floppytype; t < &floppytype[nelem(floppytype)]; t++) { t->cap = t->bytes * t->heads * t->sectors * t->tracks; t->bcode = b2c[t->bytes/128]; t->tsize = t->bytes * t->sectors; if(maxtsize < t->tsize) maxtsize = t->tsize; } fl.selected = fl.d; floppydetach = _floppydetach; floppydetach(); /* * init drives */ mask = 0; for(dp = fl.d; dp < &fl.d[fl.ndrive]; dp++) { dp->dev = dp - fl.d; if(dp->dt == Tnone) continue; mask |= 1<<dp->dev; floppysetdef(dp); dp->cyl = -1; /* because we don't know */ dp->cache = (uchar*)xspanalloc(maxtsize, BY2PG, 64*1024); dp->ccyl = -1; dp->vers = 0; dp->maxtries = 5; } /* * first operation will recalibrate */ fl.confused = 1; floppysetup1(&fl); /* to turn the motor off when inactive */ alarm(5*1000, floppyalarm, 0); return mask; }
void screeninit(void) { int i; /* map the lcd regs into the kernel's virtual space */ lcd = (struct sa1110regs*)mapspecial(LCDREGS, sizeof(struct sa1110regs));; framebuf = xspanalloc(sizeof *framebuf, 0x100, 0); vscreen = xalloc(sizeof(ushort)*Wid*Ht); lcdpower(1); lcdinit(); gscreen = &xgscreen; xgdata.ref = 1; i = 0; if (landscape) { gscreen->r = Rect(0, 0, Ht, Wid); gscreen->clipr = gscreen->r; gscreen->width = Ht/2; xgdata.bdata = (uchar *)framebuf->pixel; while (i < Wid*Ht*1/3) framebuf->pixel[i++] = 0xf800; /* red */ while (i < Wid*Ht*2/3) framebuf->pixel[i++] = 0xffff; /* white */ while (i < Wid*Ht*3/3) framebuf->pixel[i++] = 0x001f; /* blue */ } else { gscreen->r = Rect(0, 0, Wid, Ht); gscreen->clipr = gscreen->r; gscreen->width = Wid/2; xgdata.bdata = (uchar *)vscreen; while (i < Wid*Ht*1/3) vscreen[i++] = 0xf800; /* red */ while (i < Wid*Ht*2/3) vscreen[i++] = 0xffff; /* white */ while (i < Wid*Ht*3/3) vscreen[i++] = 0x001f; /* blue */ flushmemscreen(gscreen->r); } memimageinit(); memdefont = getmemdefont(); out.pos.x = MINX; out.pos.y = 0; out.bwid = memdefont->info[' '].width; blanktime = 3; /* minutes */ screenwin(); // screenputs = bitsyscreenputs; screenputs = nil; }
void screeninit(void) { uchar *fb; fb = xspanalloc(Dx(xgscreen.r) * Dy(xgscreen.r) * 3, 64, 0); print("%p\n", PADDR(fb)); memsetchan(&xgscreen, BGR24); conf.monitor = 1; xgdata.bdata = fb; xgdata.ref = 1; gscreen = &xgscreen; gscreen->width = wordsperline(gscreen->r, gscreen->depth); memimageinit(); }
void mmuinit(void) { ulong *pte, npgs, pa; if(paemode){ int i; xenpdpt = (uvlong*)m->pdb; m->pdb = xspanalloc(32, 32, 0); /* clear "reserved" bits in initial page directory pointers -- Xen bug? */ for(i = 0; i < 4; i++) ((uvlong*)m->pdb)[i] = xenpdpt[i] & ~0x1E6LL; } /* * So far only memory up to xentop is mapped, map the rest. * We cant use large pages because our contiguous PA space * is not necessarily contiguous in MA. */ npgs = conf.mem[0].npage; for(pa=conf.mem[0].base; npgs; npgs--, pa+=BY2PG) { pte = mmuwalk(m->pdb, (ulong)KADDR(pa), 2, 1); if(!pte) panic("mmuinit"); xenupdate(pte, pa|PTEVALID|PTEWRITE); } memglobal(); #ifdef we_may_eventually_want_this /* make kernel text unwritable */ for(x = KTZERO; x < (ulong)etext; x += BY2PG){ p = mmuwalk(m->pdb, x, 2, 0); if(p == nil) panic("mmuinit"); *p &= ~PTEWRITE; } #endif taskswitch(0, (ulong)m + BY2PG); }
static int igbeinit(Ether* edev) { int csr, i, r, ctrl, timeo; MiiPhy *phy; Ctlr *ctlr; ctlr = edev->ctlr; /* * Set up the receive addresses. * There are 16 addresses. The first should be the MAC address. * The others are cleared and not marked valid (MS bit of Rah). */ csr = (edev->ea[3]<<24)|(edev->ea[2]<<16)|(edev->ea[1]<<8)|edev->ea[0]; csr32w(ctlr, Ral, csr); csr = 0x80000000|(edev->ea[5]<<8)|edev->ea[4]; csr32w(ctlr, Rah, csr); for(i = 1; i < 16; i++){ csr32w(ctlr, Ral+i*8, 0); csr32w(ctlr, Rah+i*8, 0); } /* * Clear the Multicast Table Array. * It's a 4096 bit vector accessed as 128 32-bit registers. */ for(i = 0; i < 128; i++) csr32w(ctlr, Mta+i*4, 0); /* * Receive initialisation. * Mostly defaults from the datasheet, will * need some tuning for performance: * Rctl descriptor mimimum threshold size * discard pause frames * strip CRC * Rdtr interrupt delay * Rxdctl all the thresholds */ csr32w(ctlr, Rctl, 0); /* * Allocate the descriptor ring and load its * address and length into the NIC. */ ctlr->rdba = xspanalloc(Nrdesc*sizeof(Rdesc), 128 /* was 16 */, 0); csr32w(ctlr, Rdbal, PCIWADDR(ctlr->rdba)); csr32w(ctlr, Rdbah, 0); csr32w(ctlr, Rdlen, Nrdesc*sizeof(Rdesc)); /* * Initialise the ring head and tail pointers and * populate the ring with Blocks. * The datasheet says the tail pointer is set to beyond the last * descriptor hardware can process, which implies the initial * condition is Rdh == Rdt. However, experience shows Rdt must * always be 'behind' Rdh; the replenish routine ensures this. */ ctlr->rdh = 0; csr32w(ctlr, Rdh, ctlr->rdh); ctlr->rdt = 0; csr32w(ctlr, Rdt, ctlr->rdt); ctlr->rb = malloc(sizeof(Block*)*Nrdesc); igbereplenish(ctlr); /* * Set up Rctl but don't enable receiver (yet). */ csr32w(ctlr, Rdtr, 0); switch(ctlr->id){ case i82540em: case i82540eplp: case i82541gi: case i82541pi: case i82546gb: case i82546eb: case i82547gi: csr32w(ctlr, Radv, 64); break; } csr32w(ctlr, Rxdctl, (8<<WthreshSHIFT)|(8<<HthreshSHIFT)|4); /* * Enable checksum offload. */ csr32w(ctlr, Rxcsum, Tuofl|Ipofl|(ETHERHDRSIZE<<PcssSHIFT)); csr32w(ctlr, Rctl, Dpf|Bsize2048|Bam|RdtmsHALF); /* * VirtualBox does not like Rxt0, * it continually interrupts. */ ctlr->im |= /*Rxt0|*/Rxo|Rxdmt0|Rxseq; /* * Transmit initialisation. * Mostly defaults from the datasheet, will * need some tuning for performance. The normal mode will * be full-duplex and things to tune for half-duplex are * Tctl re-transmit on late collision * Tipg all IPG times * Tbt burst timer * Ait adaptive IFS throttle * and in general * Txdmac packet prefetching * Ett transmit early threshold * Tidv interrupt delay value * Txdctl all the thresholds */ csr32w(ctlr, Tctl, (0x0F<<CtSHIFT)|Psp|(66<<ColdSHIFT)); /* Fd */ switch(ctlr->id){ default: r = 6; break; case i82543gc: case i82544ei: case i82547ei: case i82540em: case i82540eplp: case i82541gi: case i82541pi: case i82546gb: case i82546eb: case i82547gi: r = 8; break; } csr32w(ctlr, Tipg, (6<<20)|(8<<10)|r); csr32w(ctlr, Ait, 0); csr32w(ctlr, Txdmac, 0); csr32w(ctlr, Tidv, 128); /* * Allocate the descriptor ring and load its * address and length into the NIC. */ ctlr->tdba = xspanalloc(Ntdesc*sizeof(Tdesc), 128 /* was 16 */, 0); csr32w(ctlr, Tdbal, PCIWADDR(ctlr->tdba)); csr32w(ctlr, Tdbah, 0); csr32w(ctlr, Tdlen, Ntdesc*sizeof(Tdesc)); /* * Initialise the ring head and tail pointers. */ ctlr->tdh = 0; csr32w(ctlr, Tdh, ctlr->tdh); ctlr->tdt = 0; csr32w(ctlr, Tdt, ctlr->tdt); ctlr->tb = malloc(sizeof(Block*)*Ntdesc); // ctlr->im |= Txqe|Txdw; r = (4<<WthreshSHIFT)|(4<<HthreshSHIFT)|(8<<PthreshSHIFT); switch(ctlr->id){ default: break; case i82540em: case i82540eplp: case i82547gi: case i82541pi: case i82546gb: case i82546eb: case i82541gi: r = csr32r(ctlr, Txdctl); r &= ~WthreshMASK; r |= Gran|(4<<WthreshSHIFT); csr32w(ctlr, Tadv, 64); break; } csr32w(ctlr, Txdctl, r); r = csr32r(ctlr, Tctl); r |= Ten; csr32w(ctlr, Tctl, r); igbeim(ctlr, ctlr->im); if(ctlr->mii == nil || ctlr->mii->curphy == nil) { print("igbe: no mii (yet)\n"); return 0; } /* wait for the link to come up */ for(timeo = 0; timeo < 3500; timeo++){ if(miistatus(ctlr->mii) == 0) break; delay(10); } print("igbe: phy: "); phy = ctlr->mii->curphy; if (phy->fd) print("full duplex"); else print("half duplex"); print(", %d Mb/s\n", phy->speed); /* * Flow control. */ ctrl = csr32r(ctlr, Ctrl); if(phy->rfc) ctrl |= Rfce; if(phy->tfc) ctrl |= Tfce; csr32w(ctlr, Ctrl, ctrl); return 0; }
static void mpstartap(Apic* apic) { ulong *apbootp, *pdb, *pte; Mach *mach, *mach0; int i, machno; uchar *p; mach0 = MACHP(0); /* * Initialise the AP page-tables and Mach structure. The page-tables * are the same as for the bootstrap processor with the exception of * the PTE for the Mach structure. * Xspanalloc will panic if an allocation can't be made. */ p = xspanalloc(4*BY2PG, BY2PG, 0); pdb = (ulong*)p; memmove(pdb, mach0->pdb, BY2PG); p += BY2PG; if((pte = mmuwalk(pdb, MACHADDR, 1, 0)) == nil) return; memmove(p, KADDR(PPN(*pte)), BY2PG); *pte = PADDR(p)|PTEWRITE|PTEVALID; if(mach0->havepge) *pte |= PTEGLOBAL; p += BY2PG; mach = (Mach*)p; if((pte = mmuwalk(pdb, MACHADDR, 2, 0)) == nil) return; *pte = PADDR(mach)|PTEWRITE|PTEVALID; if(mach0->havepge) *pte |= PTEGLOBAL; p += BY2PG; machno = apic->machno; MACHP(machno) = mach; mach->machno = machno; mach->pdb = pdb; mach->gdt = (Segdesc*)p; /* filled by mmuinit */ /* * Tell the AP where its kernel vector and pdb are. * The offsets are known in the AP bootstrap code. */ apbootp = (ulong*)(APBOOTSTRAP+0x08); *apbootp++ = (ulong)squidboy; /* assembler jumps here eventually */ *apbootp++ = PADDR(pdb); *apbootp = (ulong)apic; /* * Universal Startup Algorithm. */ p = KADDR(0x467); /* warm-reset vector */ *p++ = PADDR(APBOOTSTRAP); *p++ = PADDR(APBOOTSTRAP)>>8; i = (PADDR(APBOOTSTRAP) & ~0xFFFF)/16; /* code assumes i==0 */ if(i != 0) print("mp: bad APBOOTSTRAP\n"); *p++ = i; *p = i>>8; coherence(); nvramwrite(0x0F, 0x0A); /* shutdown code: warm reset upon init ipi */ lapicstartap(apic, PADDR(APBOOTSTRAP)); for(i = 0; i < 1000; i++){ if(apic->online) break; delay(10); } nvramwrite(0x0F, 0x00); }
static void mpstartap(Apic* apic) { ulong *apbootp, *pdb, *pte; Mach *mach, *mach0; int i, machno; uchar *p; mach0 = MACHP(0); /* * Initialise the AP page-tables and Mach structure. The page-tables * are the same as for the bootstrap processor with the exception of * the PTE for the Mach structure. * Xspanalloc will panic if an allocation can't be made. */ p = xspanalloc(4*BY2PG, BY2PG, 0); pdb = (ulong*)p; memmove(pdb, mach0->pdb, BY2PG); p += BY2PG; if((pte = mmuwalk(pdb, MACHADDR, 1, 0)) == nil) return; memmove(p, KADDR(PPN(*pte)), BY2PG); *pte = PADDR(p)|PTEWRITE|PTEVALID; if(mach0->havepge) *pte |= PTEGLOBAL; p += BY2PG; mach = (Mach*)p; if((pte = mmuwalk(pdb, MACHADDR, 2, 0)) == nil) return; *pte = PADDR(mach)|PTEWRITE|PTEVALID; if(mach0->havepge) *pte |= PTEGLOBAL; p += BY2PG; machno = apic->machno; MACHP(machno) = mach; mach->machno = machno; mach->pdb = pdb; mach->gdt = (Segdesc*)p; /* filled by mmuinit */ /* * Tell the AP where its kernel vector and pdb are. * The offsets are known in the AP bootstrap code. */ apbootp = (ulong*)(APBOOTSTRAP+0x08); *apbootp++ = (ulong)squidboy; *apbootp++ = PADDR(pdb); *apbootp = (ulong)apic; /* * Universal Startup Algorithm. */ p = KADDR(0x467); *p++ = PADDR(APBOOTSTRAP); *p++ = PADDR(APBOOTSTRAP)>>8; i = (PADDR(APBOOTSTRAP) & ~0xFFFF)/16; *p++ = i; *p = i>>8; nvramwrite(0x0F, 0x0A); lapicstartap(apic, PADDR(APBOOTSTRAP)); for(i = 0; i < 1000; i++){ lock(&mprdthilock); if(mprdthi & ((1<<apic->apicno)<<24)){ unlock(&mprdthilock); break; } unlock(&mprdthilock); delay(10); } nvramwrite(0x0F, 0x00); }
void dmastart(int chan, int dev, int dir, void *src, void *dst, int len) { Ctlr *ctlr; Cb *cb; int ti; ctlr = &dma[chan]; if(ctlr->regs == nil){ ctlr->regs = (u32int*)(DMAREGS + chan*Regsize); ctlr->cb = xspanalloc(sizeof(Cb), Cbalign, 0); assert(ctlr->cb != nil); dmaregs[Enable] |= 1<<chan; ctlr->regs[Cs] = Reset; while(ctlr->regs[Cs] & Reset) ; intrenable(IRQDMA(chan), dmainterrupt, ctlr, 0, "dma"); } cb = ctlr->cb; ti = 0; switch(dir){ case DmaD2M: cachedwbinvse(dst, len); ti = Srcdreq | Destinc; cb->sourcead = DMAIO(src); cb->destad = DMAADDR(dst); break; case DmaM2D: cachedwbse(src, len); ti = Destdreq | Srcinc; cb->sourcead = DMAADDR(src); cb->destad = DMAIO(dst); break; case DmaM2M: cachedwbse(src, len); cachedwbinvse(dst, len); ti = Srcinc | Destinc; cb->sourcead = DMAADDR(src); cb->destad = DMAADDR(dst); break; } cb->ti = ti | dev<<Permapshift | Inten; cb->txfrlen = len; cb->stride = 0; cb->nextconbk = 0; cachedwbse(cb, sizeof(Cb)); ctlr->regs[Cs] = 0; microdelay(1); ctlr->regs[Conblkad] = DMAADDR(cb); DBG print("dma start: %ux %ux %ux %ux %ux %ux\n", cb->ti, cb->sourcead, cb->destad, cb->txfrlen, cb->stride, cb->nextconbk); DBG print("intstatus %ux\n", dmaregs[Intstatus]); dmaregs[Intstatus] = 0; ctlr->regs[Cs] = Int; microdelay(1); coherence(); DBG dumpdregs("before Active", ctlr->regs); ctlr->regs[Cs] = Active; DBG dumpdregs("after Active", ctlr->regs); }
static void floppyreset(void) { FDrive *dp; FType *t; ulong maxtsize; floppysetup0(&fl); if(fl.ndrive == 0) return; /* * init dependent parameters */ maxtsize = 0; for(t = floppytype; t < &floppytype[nelem(floppytype)]; t++){ t->cap = t->bytes * t->heads * t->sectors * t->tracks; t->bcode = b2c[t->bytes/128]; t->tsize = t->bytes * t->sectors; if(maxtsize < t->tsize) maxtsize = t->tsize; } /* * Should check if this fails. Can do so * if there is no space <= 16MB for the DMA * bounce buffer. */ dmainit(DMAchan, maxtsize); /* * allocate the drive storage */ fl.d = xalloc(fl.ndrive*sizeof(FDrive)); fl.selected = fl.d; /* * stop the motors */ fl.motor = 0; delay(10); outb(Pdor, fl.motor | Fintena | Fena); delay(10); /* * init drives */ for(dp = fl.d; dp < &fl.d[fl.ndrive]; dp++){ dp->dev = dp - fl.d; dp->dt = T1440kb; floppysetdef(dp); dp->cyl = -1; /* because we don't know */ dp->cache = (uchar*)xspanalloc(maxtsize, BY2PG, 64*1024); dp->ccyl = -1; dp->vers = 0; } /* * first operation will recalibrate */ fl.confused = 1; floppysetup1(&fl); }
/* * set up the lance */ void lancesetup(Lance *lp) { KMap *k; DMAdev *dma; ulong pa, va; int i; k = kmappa(ETHER, PTEIO|PTENOCACHE); lp->rdp = (void*)(VA(k)+0); lp->rap = (void*)(VA(k)+2); for(i=0; i<6; i++) lp->ea[i] = idprom.ea[i]; lp->lognrrb = 7; lp->logntrb = 7; lp->nrrb = 1<<lp->lognrrb; lp->ntrb = 1<<lp->logntrb; lp->sep = 1; lp->busctl = BSWP | ACON | BCON; /* * Allocate area for lance init block and descriptor rings */ pa = PADDR(xspanalloc(BY2PG, BY2PG, 0)); /* map at LANCESEGM */ va = kmapdma(pa, BY2PG); lp->lanceram = (ushort*)va; lp->lm = (Lancemem*)va; /* * Allocate space in host memory for the io buffers. */ i = (lp->nrrb+lp->ntrb)*sizeof(Lancepkt); i = (i+(BY2PG-1))/BY2PG; pa = PADDR(xspanalloc(i*BY2PG, BY2PG, 0)); va = kmapdma(pa, i*BY2PG); lp->lrp = (Lancepkt*)va; lp->rp = (Lancepkt*)va; lp->ltp = lp->lrp+lp->nrrb; lp->tp = lp->rp+lp->nrrb; k = kmappa(DMA, PTEIO|PTENOCACHE); dma = (DMAdev*)VA(k); dma->base = 0xff; /* * for now, let's assume the ROM has left the results of its * auto-sensing */ #ifdef notdef if(dma->ecsr & E_TP_select) print("Twisted pair ethernet\n"); else print("AUI ethernet\n"); #endif microdelay(1); dma->ecsr |= E_Int_en|E_Invalidate|E_Dsbl_wr_inval|E_Dsbl_rd_drn; microdelay(1); }