long vbe2init(struct mboothdr *hdr) { struct vbemode *mode = (struct vbemode *)hdr->vbemodeinfo; long bpp = (mode) ? mode->npixbit : 0; long retval; retval = (hdr->flags & GRUBVBE); if (retval) { kprintf("framebuffer @ %x\n", mode->fbadr); vbe2screen.fbuf = (void *)mode->fbadr; vbe2screen.w = mode->xres; vbe2screen.h = mode->yres; vbe2screen.nbpp = bpp; vbe2screen.fmt = ((bpp == 24) ? GFXRGB888 : ((bpp == 16) ? GFXRGB565 : GFXRGB555)); vmmapseg((uint32_t *)&_pagetab, (uint32_t)vbe2screen.fbuf, (uint32_t)vbe2screen.fbuf, (uint32_t)vbe2screen.fbuf + ((bpp == 24) ? mode->xres * mode->yres * 3 : mode->xres * mode->yres * 2), PAGEPRES | PAGEWRITE); } return retval; }
unsigned long meminitpool(struct mempool *physpool, uintptr_t base, size_t nb) { uintptr_t adr = base; // unsigned long sz = (nb & (MEMMIN - 1)) ? rounddownpow2(nb, MEMMIN) : nb; size_t sz = nb; intptr_t ofs = base & (MEMMIN - 1); size_t nblk; size_t hdrsz; if (ofs) { adr += MEMMIN - ofs; sz -= adr - base; } nblk = sz >> MEMMINLOG2; /* configure slab headers */ hdrsz = nblk * sizeof(struct memslab); hdrsz = rounduppow2(hdrsz, PAGESIZE); #if (__KERNEL__) kprintf("MEM: reserved %lu bytes for %lu slab headers\n", hdrsz, nblk); #endif vmmapseg((uint32_t *)&_pagetab, adr, adr, adr + hdrsz, PAGEPRES | PAGEWRITE); physpool->nblk = nblk; physpool->blktab = (void *)adr; adr += hdrsz; // kbzero((void *)adr, hdrsz); /* configure magazine headers */ hdrsz = nblk * sizeof(struct memmag); hdrsz = rounduppow2(hdrsz, PAGESIZE); #if (__KERNEL__) kprintf("MEM: reserved %lu bytes for %lu magazine headers\n", hdrsz, nblk); #endif memvirtpool.nblk = nblk; memvirtpool.blktab = (void *)adr; vmmapseg((uint32_t *)&_pagetab, adr, adr, adr + hdrsz, PAGEPRES | PAGEWRITE); // kbzero((void *)adr, hdrsz); adr += hdrsz; memvirtpool.base = adr; memphyspool.base = adr; #if (__KERNEL__ && (MEMDIAG)) memdiag(memvirtpool); #endif return adr; }
void vbeinitscr(void) { struct vbemode *mode = (void *)VBEMODEADR; long npix; vbescreen.mode = mode; vbescreen.nbpp = mode->npixbit; npix = mode->xres * mode->yres; if (vbescreen.nbpp == 32) { vbescreen.pixsize = 4; vbescreen.fbufsize = npix << 2; } else if (vbescreen.nbpp == 24) { vbescreen.pixsize = 3; vbescreen.fbufsize = npix + (npix << 1); } else if (vbescreen.nbpp == 15 || vbescreen.nbpp == 16) { vbescreen.pixsize = 2; vbescreen.fbufsize = npix << 1; } else if (vbescreen.nbpp == 8) { vbescreen.pixsize = 2; vbescreen.fbufsize = npix; } vbescreen.fbuf = (void *)mode->fbadr; vbescreen.w = mode->xres; vbescreen.h = mode->yres; // TODO: set vbescreen->fmt /* identity-map VBE framebuffer */ vmmapseg(&_pagetab, (uint32_t)0xa000, (uint32_t)0xa000, (uint32_t)0xa000 + sizeof(struct vbeinfo), PAGEPRES | PAGEWRITE | PAGENOCACHE | PAGEWIRED); vmmapseg(&_pagetab, (uint32_t)vbescreen.fbuf, (uint32_t)vbescreen.fbuf, (uint32_t)vbescreen.fbuf + vbescreen.fbufsize, PAGEPRES | PAGEWRITE | PAGENOCACHE | PAGEWIRED); #if (VBEMTRR) mtrrsetwrcomb((uint32_t)vbescreen.fbuf, vbescreen.fbufsize); #endif vbeclrscr(GFX_BLACK); return; }
void vbeinitscr(void) { struct vbemode *mode = (void *)VBEMODEADR; vbescreen.fbuf = (void *)mode->fbadr; vbescreen.w = mode->xres; vbescreen.h = mode->yres; vbescreen.nbpp = mode->npixbit; // TODO: set vbescreen->fmt vbescreen.mode = mode; /* identity-map VBE framebuffer */ vmmapseg((uint32_t *)&_pagetab, (uint32_t)vbescreen.fbuf, (uint32_t)vbescreen.fbuf, (uint32_t)vbescreen.fbuf + ((vbescreen.nbpp == 24) ? vbescreen.mode->xres * vbescreen.mode->yres * 3 : vbescreen.mode->xres * vbescreen.mode->yres * 2), PAGEPRES | PAGEWRITE | PAGENOCACHE | PAGEWIRED); return; }
void kinitlong(unsigned long pmemsz) { #if (NEWTMR) uint32_t tmrcnt = 0; #endif /* initialise interrupt management */ #if (VBE) trapinitprot(); #endif /* initialise virtual memory */ vminitlong((uint64_t *)kernpagemapl4tab); #if 0 /* FIXME: map possible device memory */ vmmapseg((uint32_t *)&_pagetab, DEVMEMBASE, DEVMEMBASE, 0xffffffffU, PAGEPRES | PAGEWRITE | PAGENOCACHE); #endif // schedinit(); /* zero kernel BSS segment */ kbzero(&_bssvirt, (uint32_t)&_ebssvirt - (uint32_t)&_bssvirt); /* set kernel I/O permission bitmap to all 1-bits */ kmemset(&kerniomap, 0xff, sizeof(kerniomap)); /* INITIALIZE CONSOLES AND SCREEN */ #if (VBE) vbeinitscr(); #endif #if (VBE) && (NEWFONT) consinit(768 / vbefontw, 1024 / vbefonth); #elif (VBE) consinit(768 >> 3, 1024 >> 3); #endif /* TODO: use memory map from GRUB? */ // vminitphys((uintptr_t)&_epagetab, pmemsz); vminitphys((uintptr_t)&_epagetab, pmemsz); meminit(pmemsz); tssinit(0); #if (VBE) && (NEWFONT) // consinit(768 / vbefontw, 1024 / vbefonth); #elif (VBE) consinit(768 >> 3, 1024 >> 3); #endif #if (SMBIOS) smbiosinit(); #endif #if (PS2DRV) ps2init(); #endif #if (VBE) && (PLASMA) plasmaloop(); #endif #if (VBE) vbeprintinfo(); #endif logoprint(); // vminitphys((uintptr_t)&_ebss, pmemsz - (unsigned long)&_ebss); /* HID devices */ #if (PCI) /* initialise PCI bus driver */ pciinit(); #endif #if (ATA) /* initialise ATA driver */ atainit(); #endif #if (SB16) /* initialise Soundblaster 16 driver */ sb16init(); #endif #if (ACPI) /* initialise ACPI subsystem */ acpiinit(); #endif /* initialise block I/O buffer cache */ if (!bufinit()) { kprintf("failed to allocate buffer cache\n"); while (1) { ; } } /* allocate unused device regions (in 3.5G..4G) */ // pageaddzone(DEVMEMBASE, &vmshmq, 0xffffffffU - DEVMEMBASE + 1); #if (SMP) || (APIC) //#if (SMP) /* multiprocessor initialisation */ // mpinit(); //#endif if (mpncpu == 1) { kprintf("found %ld processor\n", mpncpu); } else { kprintf("found %ld processors\n", mpncpu); } #if (HPET) /* initialise high precision event timers */ hpetinit(); #endif #if (NEWTMR) tmrcnt = apicinitcpu(0); #else apicinitcpu(0); #endif #if (IOAPIC) ioapicinit(0); #endif #endif /* SMP || APIC */ #if (SMP) if (mpmultiproc) { mpstart(); } #endif /* CPU interface */ taskinit(); // tssinit(0); // machinit(); /* execution environment */ procinit(PROCKERN); // k_curtask = &k_curproc->task; // sysinit(); kprintf("DMA buffers (%ul x %ul kilobytes) @ 0x%p\n", DMANCHAN, DMACHANBUFSIZE >> 10, DMABUFBASE); kprintf("VM page tables @ 0x%p\n", (unsigned long)&_pagetab); // kprintf("%ld kilobytes physical memory\n", pmemsz >> 10); kprintf("%ld kilobytes kernel memory\n", (uint32_t)&_ebss >> 10); kprintf("%ld kilobytes allocated physical memory (%ld wired, %ld total)\n", (vmpagestat.nwired + vmpagestat.nmapped + vmpagestat.nbuf) << (PAGESIZELOG2 - 10), vmpagestat.nwired << (PAGESIZELOG2 - 10), vmpagestat.nphys << (PAGESIZELOG2 - 10)); k_curcpu = &cputab[0]; cpuinit(k_curcpu); schedinit(); #if (APIC) apicstarttmr(tmrcnt); #else pitinit(); #endif schedloop(); /* NOTREACHED */ }
/* * A new file included. * If ifiles == NULL, this is the first file and already opened (stdin). * Return 0 on success, -1 if file to be included is not found. */ int pushfile(const usch *file, const usch *fn, int idx, void *incs) { struct includ ibuf; struct includ *ic; int otrulvl; ic = &ibuf; ic->next = ifiles; if (file != NULL) { if ((ic->infil = open((const char *)file, O_RDONLY)) < 0) return -1; ic->orgfn = ic->fname = file; if (++inclevel > MAX_INCLEVEL) error("limit for nested includes exceeded"); } else { ic->infil = 0; ic->orgfn = ic->fname = (const usch *)"<stdin>"; } #if LIBVMF if (ifiles) { vmmodify(ifiles->vseg); vmunlock(ifiles->vseg); } ic->vseg = vmmapseg(&ibspc, inclevel); vmlock(ic->vseg); #endif ifiles = ic; ic->ib = getobuf(BINBUF); ic->lineno = 1; ic->escln = 0; ic->maxread = ic->curptr; ic->idx = idx; ic->incs = incs; ic->fn = fn; prtline(1); otrulvl = trulvl; fastscan(); if (otrulvl != trulvl || flslvl) error("unterminated conditional"); ifiles = ic->next; inclevel--; #if LIBVMF vmmodify(ic->vseg); vmunlock(ic->vseg); ic->ib->ro = 1; /* XXX no free */ if (ifiles) { ifiles->vseg = vmmapseg(&ibspc, inclevel); vmlock(ifiles->vseg); ifiles->ib->buf = (usch *)ifiles->vseg->s_cinfo; } #endif close(ic->infil); bufree(ic->ib); return 0; }
/* allocate and initialise buffer cache; called at boot time */ long ioinitbuf(void) { uint8_t *u8ptr; void *ptr = NULL; struct bufblk *blk; struct bufblk *prev; long n; long sz; unsigned long lim; #if (BUFDYNALLOC) sz = BUFNBLK * sizeof(struct bufblk); ptr = memalloc(sz, PAGEWIRED); if (!ptr) { kprintf("failed to allocate buffer cache headers\n"); return 0; } bufhdrtab = ptr; #endif /* allocate block I/O buffer cache */ sz = BUFNBYTE; ptr = memalloc(sz, PAGEWIRED); if (!ptr) { do { sz >>= 1; ptr = memalloc(sz, PAGEWIRED); } while ((sz) >= BUFMINBYTES && !ptr); } if (!ptr) { kprintf("failed to allocate buffer cache\n"); return 0; } #if (__KERNEL__) kprintf("BUF: reserved %lu bytes for buffer cache\n", sz); #endif u8ptr = ptr; lim = (unsigned long)(u8ptr + sz); vmpagestat.nbuf = sz >> BUFMINSIZELOG2; vmpagestat.buf = ptr; vmpagestat.bufend = u8ptr + sz; vmmapseg((uint32_t *)&_pagetab, (uint32_t)ptr, (uint32_t)ptr, (uint32_t)lim, PAGEBUF | PAGEPRES | PAGEWRITE | PAGEWIRED); vmpagestat.nphys += (sz >> PAGESIZELOG2); vmpagestat.nvirt += (sz >> PAGESIZELOG2); vmpagestat.nwire += (sz >> PAGESIZELOG2); vmpagestat.nbuf += (sz >> PAGESIZELOG2); kprintf("BUF: mapped buffer cache to %lx..%lx\n", (unsigned long)ptr, (unsigned long)(lim - 1)); if (ptr) { /* zero buffer cache */ kbzero(ptr, sz); /* initialise buffer headers */ n = sz >> BUFMINSIZELOG2; blk = &bufhdrtab[0]; blk->flg = BUFMINSIZELOG2; blk->data = u8ptr; // deqappend(blk, &buffreelist.head); u8ptr += BUFMINSIZE; prev = blk; blk++; while (--n) { prev->next = blk; blk->flg = BUFMINSIZELOG2; blk->data = u8ptr; // deqappend(blk, &buffreelist.head); u8ptr += BUFMINSIZE; blk++; prev = blk; } buffreelist.head = ptr; bufzone = ptr; bufnbyte = sz; } #if 0 if (ptr) { /* allocate and zero buffer cache */ kbzero(ptr, sz); /* initialise buffer headers */ n = sz >> BUFMINSIZELOG2; blk = &bufhdrtab[n - 1]; u8ptr += sz; while (n--) { u8ptr -= BUFMINSIZE; blk->data = u8ptr; deqpush(blk, &buffreelist.head); blk--; } bufzone = ptr; bufnbyte = sz; } #endif return 1; }