void zerounlkmtx(zeromtx *mtx) { volatile long res; long thr; if (!(mtx->atr.flg & ZEROMTX_RECURSIVE)) { /* non-recursive mutex */ mtxunlk(&mtx->val); } else { /* recursive mutex */ thr = thrid(); if (mtx->val == thr) { mtx->rec--; if (!mtx->rec) { mtx->val = ZEROMTX_FREE; } res = m_fetchadd(&mtx->cnt, -1); if (res > 1) { if (!mtx->rec) { mtxunlk(&mtx->lk); } } } } return; }
static void sysconfinit(long *tab) { struct m_cpuinfo cpuinfo; long *ptr = tab - MINSYSCONF; mtxlk(&sysconflk); if (!(sysconfbits & SYSCONF_CPUPROBE)) { /* probe persistent values */ cpuprobe(&cpuinfo); ptr[_SC_L2_NWAY] = cpuinfo.l2.nway; ptr[_SC_L2_SIZE] = cpuinfo.l2.size; ptr[_SC_L1_DATA_NWAY] = cpuinfo.l1d.nway; ptr[_SC_L1_INST_NWAY] = cpuinfo.l1i.nway; ptr[_SC_L1_DATA_SIZE] = cpuinfo.l1d.size; ptr[_SC_L1_INST_SIZE] = cpuinfo.l1i.size; ptr[_SC_CACHELINE_SIZE] = cpuinfo.l2.clsz; sysconfbits |= SYSCONF_CPUPROBE; } if (sysconfbits & SYSCONF_INIT) { mtxunlk(&sysconflk); return; } // sysconfupd(); sysconfbits |= SYSCONF_INIT; mtxunlk(&sysconflk); return; }
/* * TODO: evict pages from LRU if none free / low water */ struct physpage * pageallocphys(void) { struct physpage *page = NULL; struct physpage **queue; long found = 0; long qid; long q; mtxlk(&vmphyslk); page = queuepop(&vmphysqueue); mtxunlk(&vmphyslk); if (!page) { do { for (q = 0 ; q < LONGSIZE * CHAR_BIT ; q++) { mtxlk(&vmlrutab[q].lk); queue = &vmlrutab[q].list; page = queuegetlast(queue); if (page) { found++; page->nflt++; qid = pagecalcqid(page); if (qid != q) { mtxlk(&vmlrutab[q].lk); } queue = &vmlrutab[qid].list; queuepush(page, queue); if (qid != q) { mtxunlk(&vmlrutab[qid].lk); } break; } mtxunlk(&vmlrutab[q].lk); } if (found) { break; } } while (!found); } return page; }
/* look buffer up from buffer cache; dev is buffer-device ID, not system */ struct bufblk * buffindblk(long dev, off_t num, long rel) { #if (BUFNEWHASH) int64_t val = bufmkhashkey(dev, num); int64_t key = hashq128(&val, sizeof(int64_t), BUFNHASHBIT); struct bufblk *blk; struct bufchain *chain = &bufhash[key]; #else int64_t key = hashq128(&num, sizeof(int64_t), BUFNHASHBIT); long dkey = dev & BUFDEVMASK; struct bufblk *blk = NULL; #endif #if (BUFNEWHASH) mtxlk(&chain->lk); blk = chain->list; while ((blk) && blk->num != num) { blk = blk->tabnext; } if ((blk) && (rel)) { /* remove block from buffer hash chain */ if (blk->tabprev) { blk->tabprev->tabnext = blk->tabnext; } else { chain->list = blk->tabnext; } if (blk->tabnext) { blk->tabnext->tabprev = blk->tabprev; } } return blk; #else mtxlk(&buflktab[dkey]); blk = bufhash[dkey][key]; while ((blk) && blk->num != num) { blk = blk->tabnext; } if ((blk) && (rel)) { /* remove block from buffer hash chain */ if (blk->tabprev) { blk->tabprev->tabnext = blk->tabnext; } else { bufhash[dkey][key] = blk->tabnext; } if (blk->tabnext) { blk->tabnext->tabprev = blk->tabprev; } } mtxunlk(&buflktab[dkey]); return blk; #endif }
void pagefreephys(void *adr) { unsigned long id; struct physpage *page; /* free physical page */ mtxlk(&vmphyslk); id = vmpagenum(adr); page = &vmphystab[id]; mtxlk(&page->lk); if (!--page->nref) { vmflushtlb(adr); queuepush(page, &vmphysqueue); } mtxunlk(&page->lk); mtxunlk(&vmphyslk); return; }
unsigned long pageinitphys(uintptr_t base, unsigned long nb) { unsigned long size; mtxlk(&vmphyslk); size = pageinitphyszone(base, &vmphysqueue, min(nb, KERNVIRTBASE - NCPU * KERNSTKSIZE)); mtxunlk(&vmphyslk); return size; }
void taskfreeid(long id) { struct taskid *queue = &taskidqueue; struct taskid *taskid = &taskidtab[id]; mtxlk(&queue->lk); queueappend(taskid, &queue); mtxunlk(&queue->lk); return; }
/* allocate buffer entry */ struct bufblk * bufalloc(void) { struct bufblk *blk = NULL; mtxlk(&buffreelist.lk); blk = queuepop(&buffreelist.head); mtxunlk(&buffreelist.lk); if (!blk) { blk = bufevict(); } return blk; }
void bufaddblk(struct bufblk *blk) { #if (BUFNEWHASH) int64_t val = bufmkhashkey(blk->dev, blk->num); int64_t key = hashq128(&val, sizeof(int64_t), BUFNHASHBIT); struct bufblk *buf; struct bufchain *chain = &bufhash[key]; #else int64_t key = hashq128(&blk->num, sizeof(int64_t), BUFNHASHBIT); long dkey = blk->dev & BUFDEVMASK; struct bufblk *buf; #endif #if (BUFNEWHASH) mtxlk(&chain->lk); buf = chain->list; buf->tabprev = NULL; if (buf) { buf->tabprev = blk; } blk->tabnext = buf; chain->list = buf; mtxunlk(&chain->lk); #else mtxlk(&buflktab[dkey]); buf = bufhash[dkey][key]; if (buf) { buf->tabprev = blk; } bufhash[dkey][key] = blk; mtxunlk(&buflktab[dkey]); #endif return; }
long taskgetid(void) { struct taskid *queue = &taskidqueue; struct taskid *taskid; long retval = -1; mtxlk(&queue->lk); taskid = queuepop(&queue); if (taskid) { retval = taskid->id; } mtxunlk(&taskid->lk); return retval; }
void taskinitids(void) { struct taskid *queue = &taskidqueue; struct taskid *taskid; long id; mtxlk(&queue->lk); for (id = TASKNPREDEF ; id < NTASK ; id++) { taskid = &taskidtab[id]; taskid->id = id; queueappend(taskid, &queue); } mtxunlk(&queue->lk); return; }
void bufrel(long dev, int64_t num, long flush) { struct bufblk *blk = buffindblk(dev, num, 1); if (blk) { #if 0 if (flush) { bufwrite(blk); } #endif mtxlk(&buffreelist.lk); queuepush(blk, &buffreelist.head); mtxunlk(&buffreelist.lk); } return; }
/* evict buffer; write back to disk */ struct bufblk * bufevict(void) { struct bufblk *blk = NULL; do { mtxlk(&buflruqueue.lk); blk = queuepop(&buflruqueue.head); mtxunlk(&buflruqueue.lk); if (!blk) { /* TODO: wait for queuepop(&buflruqueue.head) */ } else { if (blk->flg & BUFDIRTY) { bufwrite(blk); } bufclr(blk); } } while (!blk); return blk; }
void * memalloc(size_t nb, long flg) { struct mempool *physpool = &memphyspool; struct mempool *virtpool = &memvirtpool; struct memmag **magtab = (struct maghdr **)virtpool->tab; void *ptr = NULL; size_t sz = max(MEMMIN, nb); size_t bsz; unsigned long slab = 0; unsigned long bkt = memcalcbkt(sz); #if defined(MEMPARANOIA) unsigned long *bmap; #endif struct memmag *mag; uint8_t *u8ptr; unsigned long ndx; unsigned long n; struct membkt *hdr = &virtpool->tab[bkt]; mtxlk(&hdr->lk); if (bkt >= MEMSLABMINLOG2) { ptr = slaballoc(physpool, sz, flg); if (ptr) { #if (!MEMTEST) vminitvirt(&_pagetab, ptr, sz, flg); #endif slab++; mag = memgetmag(ptr, virtpool); mag->base = (uintptr_t)ptr; mag->n = 1; mag->ndx = 1; mag->bkt = bkt; mag->prev = NULL; mag->next = NULL; } } else { mag = magtab[bkt]; if (mag) { ptr = mempop(mag); if (memmagempty(mag)) { if (mag->next) { mag->next->prev = NULL; } magtab[bkt] = mag->next; } } else { ptr = slaballoc(physpool, sz, flg); if (ptr) { #if (!MEMTEST) vminitvirt(&_pagetab, ptr, sz, flg); #endif u8ptr = ptr; slab++; bsz = (uintptr_t)1 << bkt; n = (uintptr_t)1 << (MEMSLABMINLOG2 - bkt); mag = memgetmag(ptr, virtpool); mag->base = (uintptr_t)ptr; mag->n = n; mag->ndx = 1; mag->bkt = bkt; for (ndx = 1 ; ndx < n ; ndx++) { u8ptr += sz; mag->ptab[ndx] = u8ptr; } mag->prev = NULL; mag->next = NULL; if (n > 1) { mag->next = magtab[bkt]; magtab[bkt] = mag; } } } } if (ptr) { #if defined(MEMPARANOIA) #if ((MEMSLABMINLOG2 - MEMMINLOG2) < (LONGSIZELOG2 + 3)) bmap = &mag->bmap; #else bmap = mag->bmap; #endif ndx = ((uintptr_t)ptr - mag->base) >> bkt; if (bitset(bmap, ndx)) { kprintf("duplicate allocation %p (%ld/%ld)\n", ptr, ndx, mag->n); panic(k_curproc->pid, TRAPNONE, -EINVAL); } setbit(bmap, ndx); #endif /* defined(MEMPARANOIA) */ if (!slab && (flg & MEMZERO)) { kbzero(ptr, 1UL << bkt); } } if (!ptr) { panic(k_curproc->pid, TRAPNONE, -ENOMEM); } mtxunlk(&hdr->lk); return ptr; }
/* TODO: deal with unmapping/freeing physical memory */ void kfree(void *ptr) { struct mempool *physpool = &memphyspool; struct mempool *virtpool = &memvirtpool; struct memmag *mag = memgetmag(ptr, virtpool); unsigned long bkt = (mag) ? mag->bkt : 0; #if defined(MEMPARANOIA) unsigned long ndx; unsigned long *bmap; #endif struct membkt *hdr = &virtpool->tab[bkt]; struct memmag *list = hdr->list; if (!ptr || !mag) { return; } mtxlk(&hdr->lk); #if defined(MEMPARANOIA) ndx = ((uintptr_t)ptr - mag->base) >> bkt; #if ((MEMSLABMINLOG2 - MEMMINLOG2) < (LONGSIZELOG2 + 3)) bmap = &mag->bmap; #else bmap = mag->bmap; #endif if (!bitset(bmap, ndx)) { kprintf("invalid free: %p (%ld/%ld)\n", ptr, ndx, mag->n); panic(k_curproc->pid, TRAPNONE, -EINVAL); } #endif mempush(mag, ptr); if (memmagfull(mag)) { if (gtpow2(mag->n, 1)) { if ((mag->prev) && (mag->next)) { mag->prev->next = mag->next; mag->next->prev = mag->prev; } else if (mag->prev) { mag->prev->next = NULL; } else if (mag->next) { mag->next->prev = NULL; hdr->list = mag->next; } else { hdr->list = NULL; } } slabfree(physpool, ptr); mag->base = 0; } else if (mag->ndx == mag->n - 1) { mag->prev = NULL; if (list) { list->prev = mag; } mag->next = list; hdr->list = mag; } #if defined(MEMPARANOIA) clrbit(bmap, ndx); #endif mtxunlk(&hdr->lk); return; }
/* look buffer up from buffer cache */ struct bufblk * buffindblk(dev_t dev, off_t num, long rel) { int64_t key = bufkey(num); struct bufblk *blk = NULL; long dkey = dev & BUFDEVMASK; long bkey1 = (key >> BUFL1SHIFT) & BUFL1MASK; long bkey2 = (key >> BUFL2SHIFT) & BUFL2MASK; long bkey3 = (key >> BUFL3SHIFT) & BUFL3MASK; long ndx; long nref; struct bufblk *ptr; struct bufblk *tab1; struct bufblk *tab2; struct bufblk *prev; struct bufblk *next; struct bufblk *stk[3]; mtxlk(&buflktab[dkey]); /* device table */ tab1 = buftab[dkey]; if (tab1) { /* block table level #1 */ stk[0] = tab1; tab2 = ((struct bufblk **)tab1)[bkey1]; if (tab2) { /* block table level #2 */ stk[1] = tab2; tab1 = ((struct bufblk **)tab2)[bkey2]; if (tab1) { /* block table level #3 */ stk[2] = tab1; blk = ((struct bufblk **)tab1)[bkey3]; while (blk) { /* scan chain */ if (blk->dev == dev && blk->num == num) { if (rel) { /* remove from chain */ prev = blk->tabprev; next = blk->tabnext; if (prev) { prev->tabnext = blk->tabnext; } else { ((struct bufblk **)tab1)[bkey3] = next; } if (next) { next->tabprev = prev; } /* deallocate empty subtables */ ndx = 3; while (ndx--) { ptr = stk[ndx]; nref = ptr->nref; nref--; ptr->nref = nref; if (!nref) { kfree(ptr); } } } break; } blk = blk->tabnext; } } } } mtxunlk(&buflktab[dkey]); return blk; }
/* add block to buffer cache */ void bufaddblk(struct bufblk *blk) { int64_t key = bufkey(blk->num); long dkey = blk->dev & BUFDEVMASK; long bkey1 = (key >> BUFL1SHIFT) & BUFL1MASK; long bkey2 = (key >> BUFL2SHIFT) & BUFL2MASK; long bkey3 = (key >> BUFL3SHIFT) & BUFL3MASK; long fail = 0; long ndx; long nref; struct bufblk *tab1; struct bufblk *tab2; struct bufblk *ptr = NULL; struct bufblk *btab; struct bufblk *bptr; void *stk[3]; mtxlk(&buflktab[dkey]); /* device table */ tab1 = buftab[dkey]; if (!tab1) { /* allocate */ tab1 = kmalloc(BUFNL1ITEM * sizeof(struct bufblk)); kbzero(tab1, BUFNL1ITEM * sizeof(struct bufblk)); buftab[dkey] = tab1; } /* block table level #1 */ if (tab1) { ptr = tab1; stk[0] = ptr; tab2 = ((struct bufblk **)tab1)[bkey1]; if (!tab2) { /* allocate */ tab2 = kmalloc(BUFNL2ITEM * sizeof(struct bufblk)); kbzero(tab2, BUFNL2ITEM * sizeof(struct bufblk)); ((struct bufblk **)tab1)[bkey1] = tab2; } if (tab2) { ptr->nref++; /* block table level #2 */ ptr = tab2; stk[1] = ptr; tab1 = ((struct bufblk **)tab2)[bkey2]; if (!tab1) { tab1 = kmalloc(BUFNL3ITEM * sizeof(struct bufblk)); kbzero(tab1, BUFNL3ITEM * sizeof(struct bufblk)); ((struct bufblk **)tab2)[bkey2] = tab1; } if (tab1) { ptr->nref++; ptr = tab1; stk[2] = ptr; /* block table level #3 */ btab = ((struct bufblk **)tab1)[bkey3]; if (btab) { ptr->nref++; /* add to beginning of chain */ bptr = btab; if (bptr) { bptr->tabprev = blk; } blk->tabnext = bptr; *((struct bufblk **)btab) = bptr; } } else { fail++; } } else { fail++; } } else { fail++; } if (fail) { ndx = 3; while (ndx--) { ptr = stk[ndx]; if (ptr) { nref = ptr->nref; nref--; ptr->nref = nref; if (!nref) { kfree(ptr); } } } } mtxunlk(&buflktab[dkey]); if (!fail) { queueappend(blk, &buflruqueue.head); } return; }