unsigned long pageaddphyszone(uintptr_t base, struct physpage **zone, unsigned long nb) { uintptr_t adr = rounduppow2(base, PAGESIZE); struct physpage *page = &vmphystab[pagenum(adr)]; uint32_t *pte = (uint32_t *)&_pagetab + vmpagenum(adr); unsigned long n = rounduppow2(nb - adr, PAGESIZE) >> PAGESIZELOG2; unsigned long size = n * PAGESIZE; adr += n << PAGESIZELOG2; page += n; vmpagestat.nphys = n; kprintf("reserving %ld (%lx) maps @ %p (%lx)\n", n, n, vmphystab, pagenum(base)); while (n--) { if (!*pte) { page--; page->adr = adr; page->nflt = 0; queuepush(page, zone); adr -= PAGESIZE; } pte++; } return size; }
unsigned long pageinitphyszone(uintptr_t base, struct physpage **zone, unsigned long nb) { struct physpage *page = &vmphystab[pagenum(base)]; uintptr_t adr = rounduppow2(base, PAGESIZE); unsigned long n = rounduppow2(nb - adr, PAGESIZE) >> PAGESIZELOG2; unsigned long size = n * PAGESIZE; adr += n << PAGESIZELOG2; page += n; vmpagestat.nphys = n; kprintf("initializing %ld (%lx) pages @ %p (%lu, %lu)\n", n, n, vmphystab, pagenum(base), n, adr, adr + size); while (n--) { page--; page->adr = adr; page->nflt = 0; queuepush(page, zone); adr -= PAGESIZE; } return size; }
void swapfree(uintptr_t adr) { struct swapdev *dev = &_swapdevtab[vmdevid(adr)]; unsigned long blk = swapblkid(adr); struct physpage *page = dev->pagetab + blk; queuepush(page, &dev->freeq); dev->pagemap[blk] = 0; return; }
/* initialise buffer cache; called at boot time */ long bufinit(void) { long retval = 0; uint8_t *u8ptr; void *ptr = NULL; struct bufblk *blk; long n; long sz; long end; sz = BUFNBYTE; do { ptr = memalloc(sz, PAGEWIRED); sz >>= 1; } while ((sz) && !ptr); if (!ptr) { kprintf("failed to allocate buffer cache\n"); return 0; } #if (__KERNEL__) kprintf("BUF: reserved %lu bytes for buffer cache\n", sz); #endif u8ptr = ptr; vmpagestat.nbuf = sz >> PAGESIZELOG2; vmpagestat.buf = ptr; vmpagestat.bufend = u8ptr + sz; if (ptr) { /* allocate and zero buffer cache */ // kbzero(ptr, sz); /* initialise buffer headers */ n = sz >> BUFMINSIZELOG2; blk = &bufhdrtab[n - 1]; u8ptr += sz; while (n--) { u8ptr -= BUFMINSIZE; blk->data = u8ptr; queuepush(blk, &buffreelist.head); blk--; } bufzone = ptr; bufnbyte = sz; retval = 1; } return retval; }
void bufrel(long dev, int64_t num, long flush) { struct bufblk *blk = buffindblk(dev, num, 1); if (blk) { #if 0 if (flush) { bufwrite(blk); } #endif mtxlk(&buffreelist.lk); queuepush(blk, &buffreelist.head); mtxunlk(&buffreelist.lk); } return; }
/* * TODO: evict pages from LRU if none free / low water */ struct physpage * pageallocphys(void) { struct physpage *page = NULL; struct physpage **queue; long found = 0; long qid; long q; mtxlk(&vmphyslk); page = queuepop(&vmphysqueue); mtxunlk(&vmphyslk); if (!page) { do { for (q = 0 ; q < LONGSIZE * CHAR_BIT ; q++) { mtxlk(&vmlrutab[q].lk); queue = &vmlrutab[q].list; page = queuegetlast(queue); if (page) { found++; page->nflt++; qid = pagecalcqid(page); if (qid != q) { mtxlk(&vmlrutab[q].lk); } queue = &vmlrutab[qid].list; queuepush(page, queue); if (qid != q) { mtxunlk(&vmlrutab[qid].lk); } break; } mtxunlk(&vmlrutab[q].lk); } if (found) { break; } } while (!found); } return page; }
void pagefreephys(void *adr) { unsigned long id; struct physpage *page; /* free physical page */ mtxlk(&vmphyslk); id = vmpagenum(adr); page = &vmphystab[id]; mtxlk(&page->lk); if (!--page->nref) { vmflushtlb(adr); queuepush(page, &vmphysqueue); } mtxunlk(&page->lk); mtxunlk(&vmphyslk); return; }
void pageinitdev(unsigned long id, unsigned long npage) { struct swapdev *dev = &_swapdevtab[id]; unsigned long nbmap = npage * sizeof(swapoff_t); unsigned long nbhdr = npage * sizeof(struct physpage); struct physpage *page; struct physpage *pq = dev->freeq; dev->npage = npage; dev->pagemap = kmalloc(nbmap); kbzero(dev->pagemap, nbmap); page = kmalloc(nbhdr); dev->pagetab = page; while (npage--) { queuepush(page, pq); page++; } return; }