/* * - allocate the max of 32 * 4K buffers for both input and output * - receive interrupts on completion of every 16 buffers */ long ac97initbuf(void) { long l = AC97NBUF; struct ac97bufdesc *inbuf = ac97drv.inbuftab; struct ac97bufdesc *outbuf = ac97drv.outbuftab; uint8_t *inptr; uint8_t *outptr; /* allocate input buffer */ inptr = kwalloc(AC97NBUF * AC97BUFSIZE); if (!inptr) { kprintf("AC97: failed to allocate audio input buffer\n"); return 0; } /* allocate output buffer */ outptr = kwalloc(AC97NBUF * AC97BUFSIZE); if (!outptr) { kprintf("AC97: failed to allocate audio output buffer\n"); kfree(inptr); return 0; } /* clear buffers */ kbzero(inptr, AC97NBUF * AC97BUFSIZE); kbzero(outptr, AC97NBUF * AC97BUFSIZE); /* set up buffer descriptors */ while (l--) { inbuf->adr = (uint32_t)inptr; inbuf->info = AC97BUFSIZE - 1; outbuf->adr = (uint32_t)outptr; outbuf->info = AC97BUFSIZE - 1; inbuf++; outbuf++; inptr += AC97BUFSIZE; outptr += AC97BUFSIZE; } return 1; }
void vbeinit(void) { struct realregs *regs = (void *)(KERNREALSTK - sizeof(struct realregs)); struct vbeinfo *info = (void *)VBEINFOADR; kbzero(regs, sizeof(struct realregs)); kbzero(info, sizeof(struct vbeinfo)); regs->ax = VBEGETINFO; regs->di = VBEINFOADR; info->sig[0] = 'V'; info->sig[1] = 'B'; info->sig[2] = 'E'; info->sig[3] = '2'; vbeint10(regs); if (regs->ax != VBESUPPORTED) { return; } kbzero(regs, sizeof(struct realregs)); regs->ax = VBEGETMODEINFO; regs->cx = 0x118; regs->di = VBEMODEADR; vbeint10(regs); if (regs->ax != VBESUPPORTED) { return; } kbzero(regs, sizeof(struct realregs)); regs->ax = VBESETMODE; regs->bx = 0x118 | VBELINFBBIT; vbeint10(regs); if (regs->ax != VBESUPPORTED) { return; } return; }
void pageinitdev(unsigned long id, unsigned long npage) { struct swapdev *dev = &_swapdevtab[id]; unsigned long nbmap = npage * sizeof(swapoff_t); unsigned long nbhdr = npage * sizeof(struct physpage); struct physpage *page; struct physpage *pq = dev->freeq; dev->npage = npage; dev->pagemap = kmalloc(nbmap); kbzero(dev->pagemap, nbmap); page = kmalloc(nbhdr); dev->pagetab = page; while (npage--) { queuepush(page, pq); page++; } return; }
void kinitlong(unsigned long pmemsz) { #if (NEWTMR) uint32_t tmrcnt = 0; #endif /* initialise interrupt management */ #if (VBE) trapinitprot(); #endif /* initialise virtual memory */ vminitlong((uint64_t *)kernpagemapl4tab); #if 0 /* FIXME: map possible device memory */ vmmapseg((uint32_t *)&_pagetab, DEVMEMBASE, DEVMEMBASE, 0xffffffffU, PAGEPRES | PAGEWRITE | PAGENOCACHE); #endif // schedinit(); /* zero kernel BSS segment */ kbzero(&_bssvirt, (uint32_t)&_ebssvirt - (uint32_t)&_bssvirt); /* set kernel I/O permission bitmap to all 1-bits */ kmemset(&kerniomap, 0xff, sizeof(kerniomap)); /* INITIALIZE CONSOLES AND SCREEN */ #if (VBE) vbeinitscr(); #endif #if (VBE) && (NEWFONT) consinit(768 / vbefontw, 1024 / vbefonth); #elif (VBE) consinit(768 >> 3, 1024 >> 3); #endif /* TODO: use memory map from GRUB? */ // vminitphys((uintptr_t)&_epagetab, pmemsz); vminitphys((uintptr_t)&_epagetab, pmemsz); meminit(pmemsz); tssinit(0); #if (VBE) && (NEWFONT) // consinit(768 / vbefontw, 1024 / vbefonth); #elif (VBE) consinit(768 >> 3, 1024 >> 3); #endif #if (SMBIOS) smbiosinit(); #endif #if (PS2DRV) ps2init(); #endif #if (VBE) && (PLASMA) plasmaloop(); #endif #if (VBE) vbeprintinfo(); #endif logoprint(); // vminitphys((uintptr_t)&_ebss, pmemsz - (unsigned long)&_ebss); /* HID devices */ #if (PCI) /* initialise PCI bus driver */ pciinit(); #endif #if (ATA) /* initialise ATA driver */ atainit(); #endif #if (SB16) /* initialise Soundblaster 16 driver */ sb16init(); #endif #if (ACPI) /* initialise ACPI subsystem */ acpiinit(); #endif /* initialise block I/O buffer cache */ if (!bufinit()) { kprintf("failed to allocate buffer cache\n"); while (1) { ; } } /* allocate unused device regions (in 3.5G..4G) */ // pageaddzone(DEVMEMBASE, &vmshmq, 0xffffffffU - DEVMEMBASE + 1); #if (SMP) || (APIC) //#if (SMP) /* multiprocessor initialisation */ // mpinit(); //#endif if (mpncpu == 1) { kprintf("found %ld processor\n", mpncpu); } else { kprintf("found %ld processors\n", mpncpu); } #if (HPET) /* initialise high precision event timers */ hpetinit(); #endif #if (NEWTMR) tmrcnt = apicinitcpu(0); #else apicinitcpu(0); #endif #if (IOAPIC) ioapicinit(0); #endif #endif /* SMP || APIC */ #if (SMP) if (mpmultiproc) { mpstart(); } #endif /* CPU interface */ taskinit(); // tssinit(0); // machinit(); /* execution environment */ procinit(PROCKERN); // k_curtask = &k_curproc->task; // sysinit(); kprintf("DMA buffers (%ul x %ul kilobytes) @ 0x%p\n", DMANCHAN, DMACHANBUFSIZE >> 10, DMABUFBASE); kprintf("VM page tables @ 0x%p\n", (unsigned long)&_pagetab); // kprintf("%ld kilobytes physical memory\n", pmemsz >> 10); kprintf("%ld kilobytes kernel memory\n", (uint32_t)&_ebss >> 10); kprintf("%ld kilobytes allocated physical memory (%ld wired, %ld total)\n", (vmpagestat.nwired + vmpagestat.nmapped + vmpagestat.nbuf) << (PAGESIZELOG2 - 10), vmpagestat.nwired << (PAGESIZELOG2 - 10), vmpagestat.nphys << (PAGESIZELOG2 - 10)); k_curcpu = &cputab[0]; cpuinit(k_curcpu); schedinit(); #if (APIC) apicstarttmr(tmrcnt); #else pitinit(); #endif schedloop(); /* NOTREACHED */ }
long procinit(long id, long sched) { volatile struct cpu *cpu; struct proc *proc; struct task *task; long prio; long val; struct taskstk *stk; void *ptr; uint8_t *u8ptr; if (id < TASKNPREDEF) { // cpu = &cputab[0]; cpu = k_curcpu; proc = &proctab[id]; task = &tasktab[id]; prio = SCHEDSYSPRIOMIN; task->sched = SCHEDSYSTEM; task->prio = prio; proc->pagedir = (pde_t *)kernpagedir; proc->pagetab = (pte_t *)&_pagetab; task->state = TASKREADY; if (cpu->info.flg & CPUHASFXSR) { task->flg |= CPUHASFXSR; } k_curcpu = cpu; k_curunit = 0; k_curtask = task; k_curpid = id; return id; } else { cpu = &cputab[0]; id = taskgetid(); proc = &proctab[id]; task = &tasktab[id]; task->state = TASKNEW; proc->pid = id; proc->nice = 0; proc->task = task; k_curtask = task; task->proc = proc; val = 0; if (cpu->flg & CPUHASFXSR) { val = CPUHASFXSR; task->flg |= val; } val = 0; task->flg = val; task->score = val; task->slice = val; task->runtime = val; task->slptime = val; task->ntick = val; val = cpu->ntick; task->lastrun = val; task->firstrun = val; task->lasttick = val; if (sched == SCHEDNOCLASS) { prio = SCHEDUSERPRIOMIN; task->sched = SCHEDNORMAL; task->prio = prio; } else { prio = schedclassminprio(sched); task->sched = sched; task->prio = prio; } if (task->state == TASKNEW) { /* initialise page directory */ ptr = kwalloc(NPDE * sizeof(pde_t)); if (ptr) { kbzero(ptr, NPDE * sizeof(pde_t)); proc->pagedir = ptr; } else { kfree(proc); return -1; } #if (VMFLATPHYSTAB) /* initialise page tables */ ptr = kwalloc(PAGETABSIZE); if (ptr) { kbzero(ptr, PAGETABSIZE); proc->pagetab = ptr; } else { kfree(proc->pagedir); kfree(proc); return -1; } #endif /* initialise descriptor table */ ptr = kmalloc(NPROCFD * sizeof(struct desc)); if (ptr) { kbzero(ptr, NPROCFD * sizeof(struct desc)); proc->desctab = ptr; proc->ndesctab = NPROCFD; } else { if (id >= TASKNPREDEF) { kfree(proc->pagetab); kfree(proc->pagedir); kfree(proc); } } } } return id; }
void * memalloc(size_t nb, long flg) { struct mempool *physpool = &memphyspool; struct mempool *virtpool = &memvirtpool; struct memmag **magtab = (struct maghdr **)virtpool->tab; void *ptr = NULL; size_t sz = max(MEMMIN, nb); size_t bsz; unsigned long slab = 0; unsigned long bkt = memcalcbkt(sz); #if defined(MEMPARANOIA) unsigned long *bmap; #endif struct memmag *mag; uint8_t *u8ptr; unsigned long ndx; unsigned long n; struct membkt *hdr = &virtpool->tab[bkt]; mtxlk(&hdr->lk); if (bkt >= MEMSLABMINLOG2) { ptr = slaballoc(physpool, sz, flg); if (ptr) { #if (!MEMTEST) vminitvirt(&_pagetab, ptr, sz, flg); #endif slab++; mag = memgetmag(ptr, virtpool); mag->base = (uintptr_t)ptr; mag->n = 1; mag->ndx = 1; mag->bkt = bkt; mag->prev = NULL; mag->next = NULL; } } else { mag = magtab[bkt]; if (mag) { ptr = mempop(mag); if (memmagempty(mag)) { if (mag->next) { mag->next->prev = NULL; } magtab[bkt] = mag->next; } } else { ptr = slaballoc(physpool, sz, flg); if (ptr) { #if (!MEMTEST) vminitvirt(&_pagetab, ptr, sz, flg); #endif u8ptr = ptr; slab++; bsz = (uintptr_t)1 << bkt; n = (uintptr_t)1 << (MEMSLABMINLOG2 - bkt); mag = memgetmag(ptr, virtpool); mag->base = (uintptr_t)ptr; mag->n = n; mag->ndx = 1; mag->bkt = bkt; for (ndx = 1 ; ndx < n ; ndx++) { u8ptr += sz; mag->ptab[ndx] = u8ptr; } mag->prev = NULL; mag->next = NULL; if (n > 1) { mag->next = magtab[bkt]; magtab[bkt] = mag; } } } } if (ptr) { #if defined(MEMPARANOIA) #if ((MEMSLABMINLOG2 - MEMMINLOG2) < (LONGSIZELOG2 + 3)) bmap = &mag->bmap; #else bmap = mag->bmap; #endif ndx = ((uintptr_t)ptr - mag->base) >> bkt; if (bitset(bmap, ndx)) { kprintf("duplicate allocation %p (%ld/%ld)\n", ptr, ndx, mag->n); panic(k_curproc->pid, TRAPNONE, -EINVAL); } setbit(bmap, ndx); #endif /* defined(MEMPARANOIA) */ if (!slab && (flg & MEMZERO)) { kbzero(ptr, 1UL << bkt); } } if (!ptr) { panic(k_curproc->pid, TRAPNONE, -ENOMEM); } mtxunlk(&hdr->lk); return ptr; }
/* add block to buffer cache */ void bufaddblk(struct bufblk *blk) { int64_t key = bufkey(blk->num); long dkey = blk->dev & BUFDEVMASK; long bkey1 = (key >> BUFL1SHIFT) & BUFL1MASK; long bkey2 = (key >> BUFL2SHIFT) & BUFL2MASK; long bkey3 = (key >> BUFL3SHIFT) & BUFL3MASK; long fail = 0; long ndx; long nref; struct bufblk *tab1; struct bufblk *tab2; struct bufblk *ptr = NULL; struct bufblk *btab; struct bufblk *bptr; void *stk[3]; mtxlk(&buflktab[dkey]); /* device table */ tab1 = buftab[dkey]; if (!tab1) { /* allocate */ tab1 = kmalloc(BUFNL1ITEM * sizeof(struct bufblk)); kbzero(tab1, BUFNL1ITEM * sizeof(struct bufblk)); buftab[dkey] = tab1; } /* block table level #1 */ if (tab1) { ptr = tab1; stk[0] = ptr; tab2 = ((struct bufblk **)tab1)[bkey1]; if (!tab2) { /* allocate */ tab2 = kmalloc(BUFNL2ITEM * sizeof(struct bufblk)); kbzero(tab2, BUFNL2ITEM * sizeof(struct bufblk)); ((struct bufblk **)tab1)[bkey1] = tab2; } if (tab2) { ptr->nref++; /* block table level #2 */ ptr = tab2; stk[1] = ptr; tab1 = ((struct bufblk **)tab2)[bkey2]; if (!tab1) { tab1 = kmalloc(BUFNL3ITEM * sizeof(struct bufblk)); kbzero(tab1, BUFNL3ITEM * sizeof(struct bufblk)); ((struct bufblk **)tab2)[bkey2] = tab1; } if (tab1) { ptr->nref++; ptr = tab1; stk[2] = ptr; /* block table level #3 */ btab = ((struct bufblk **)tab1)[bkey3]; if (btab) { ptr->nref++; /* add to beginning of chain */ bptr = btab; if (bptr) { bptr->tabprev = blk; } blk->tabnext = bptr; *((struct bufblk **)btab) = bptr; } } else { fail++; } } else { fail++; } } else { fail++; } if (fail) { ndx = 3; while (ndx--) { ptr = stk[ndx]; if (ptr) { nref = ptr->nref; nref--; ptr->nref = nref; if (!nref) { kfree(ptr); } } } } mtxunlk(&buflktab[dkey]); if (!fail) { queueappend(blk, &buflruqueue.head); } return; }
long procinit(long id) { long taskid = ((id < TASKNPREDEF && (id >= 0)) ? id : taskgetid()); struct proc *proc = &proctab[taskid]; struct task *task = &tasktab[taskid]; struct taskstk *stk; void *ptr; uint8_t *u8ptr; if (taskid < TASKNPREDEF) { /* bootstrap */ if (k_curcpu->info->flags & CPUHASFXSR) { task->m_tcb.fxsave = 1; } else { task->m_tcb.fxsave = 0; } proc->task = task; task->proc = proc; task->state = TASKREADY; task->nice = 0; task->sched = SCHEDNORMAL; task->prio = id; k_curproc = proc; k_curtask = task; } if (proc) { if (taskid >= TASKNPREDEF) { /* initialise page directory */ ptr = kmalloc(NPDE * sizeof(pde_t)); if (ptr) { kbzero(ptr, NPDE * sizeof(pde_t)); proc->vmpagemap.dir = ptr; } else { kfree(proc); return -1; } ptr = kmalloc(KERNSTKSIZE); if (ptr) { u8ptr = ptr; stk = &task->kstk; u8ptr += KERNSTKSIZE; kbzero(ptr, KERNSTKSIZE); stk->top = u8ptr; stk->sp = u8ptr; stk->base = ptr; stk->size = KERNSTKSIZE; } ptr = kmalloc(TASKSTKSIZE); if (ptr) { u8ptr = ptr; stk = &task->ustk; u8ptr += KERNSTKSIZE; kbzero(ptr, TASKSTKSIZE); stk->top = u8ptr; stk->sp = u8ptr; stk->base = ptr; stk->size = TASKSTKSIZE; } else { kfree(proc->vmpagemap.dir); kfree(task->kstk.base); kfree(proc); return -1; } /* initialise descriptor table */ ptr = kmalloc(TASKNDESC * sizeof(struct desc)); if (ptr) { kbzero(ptr, TASKNDESC * sizeof(struct desc)); proc->desctab = ptr; proc->ndesctab = TASKNDESC; } else { kfree(proc->vmpagemap.dir); kfree(task->ustk.base); kfree(task->kstk.base); kfree(proc); return -1; } #if 0 /* initialise VM structures */ ptr = kmalloc(NPAGEMAX * sizeof(struct userpage)); if (ptr) { kbzero(ptr, NPAGEMAX * sizeof(struct userpage)); proc->pagetab = ptr; proc->npagetab = NPAGEMAX; } else { kfree(proc->vmpagemap.dir); kfree(task->ustk.base); kfree(task->kstk.base); kfree(proc->desctab); kfree(proc); return -1; } #endif task->state = TASKREADY; } } return 0; }
/* allocate and initialise buffer cache; called at boot time */ long ioinitbuf(void) { uint8_t *u8ptr; void *ptr = NULL; struct bufblk *blk; struct bufblk *prev; long n; long sz; unsigned long lim; #if (BUFDYNALLOC) sz = BUFNBLK * sizeof(struct bufblk); ptr = memalloc(sz, PAGEWIRED); if (!ptr) { kprintf("failed to allocate buffer cache headers\n"); return 0; } bufhdrtab = ptr; #endif /* allocate block I/O buffer cache */ sz = BUFNBYTE; ptr = memalloc(sz, PAGEWIRED); if (!ptr) { do { sz >>= 1; ptr = memalloc(sz, PAGEWIRED); } while ((sz) >= BUFMINBYTES && !ptr); } if (!ptr) { kprintf("failed to allocate buffer cache\n"); return 0; } #if (__KERNEL__) kprintf("BUF: reserved %lu bytes for buffer cache\n", sz); #endif u8ptr = ptr; lim = (unsigned long)(u8ptr + sz); vmpagestat.nbuf = sz >> BUFMINSIZELOG2; vmpagestat.buf = ptr; vmpagestat.bufend = u8ptr + sz; vmmapseg((uint32_t *)&_pagetab, (uint32_t)ptr, (uint32_t)ptr, (uint32_t)lim, PAGEBUF | PAGEPRES | PAGEWRITE | PAGEWIRED); vmpagestat.nphys += (sz >> PAGESIZELOG2); vmpagestat.nvirt += (sz >> PAGESIZELOG2); vmpagestat.nwire += (sz >> PAGESIZELOG2); vmpagestat.nbuf += (sz >> PAGESIZELOG2); kprintf("BUF: mapped buffer cache to %lx..%lx\n", (unsigned long)ptr, (unsigned long)(lim - 1)); if (ptr) { /* zero buffer cache */ kbzero(ptr, sz); /* initialise buffer headers */ n = sz >> BUFMINSIZELOG2; blk = &bufhdrtab[0]; blk->flg = BUFMINSIZELOG2; blk->data = u8ptr; // deqappend(blk, &buffreelist.head); u8ptr += BUFMINSIZE; prev = blk; blk++; while (--n) { prev->next = blk; blk->flg = BUFMINSIZELOG2; blk->data = u8ptr; // deqappend(blk, &buffreelist.head); u8ptr += BUFMINSIZE; blk++; prev = blk; } buffreelist.head = ptr; bufzone = ptr; bufnbyte = sz; } #if 0 if (ptr) { /* allocate and zero buffer cache */ kbzero(ptr, sz); /* initialise buffer headers */ n = sz >> BUFMINSIZELOG2; blk = &bufhdrtab[n - 1]; u8ptr += sz; while (n--) { u8ptr -= BUFMINSIZE; blk->data = u8ptr; deqpush(blk, &buffreelist.head); blk--; } bufzone = ptr; bufnbyte = sz; } #endif return 1; }