/* * Preallocate some pages: * some 2M ones will be used by the first process. * some 1G ones will be allocated for each domain so processes may use them. */ void pageinit(void) { int si, i, color; Page *pg; pga.userinit = 1; DBG("pageinit: npgsz = %d\n", sys->npgsz); /* * Don't pre-allocate 4K pages, we are not using them anymore. */ for(si = 1; si < sys->npgsz; si++){ for(i = 0; i < Nstartpgs; i++){ if(si < 2) color = -1; else color = i; pg = pgalloc(sys->pgsz[si], color); if(pg == nil){ DBG("pageinit: pgalloc failed. breaking.\n"); break; /* don't consume more memory */ } DBG("pageinit: alloced pa %#P sz %#ux color %d\n", pg->pa, sys->pgsz[si], pg->color); lock(&pga.l); pg->ref = 0; pagechainhead(pg); unlock(&pga.l); } } pga.userinit = 0; }
void putpage(Page *p) { if(onswap(p)) { putswap(p); return; } lock(&palloc); lock(p); if(p->ref == 0) panic("putpage"); if(--p->ref > 0) { unlock(p); unlock(&palloc); return; } if(p->image && p->image != &swapimage) pagechaintail(p); else pagechainhead(p); if(palloc.r.p != 0) wakeup(&palloc.r); unlock(p); unlock(&palloc); }
void mmurelease(Proc* proc) { Page *page, *next; /* write back dirty and invalidate l1 caches */ cacheuwbinv(); mmul2empty(proc, 0); for(page = proc->mmul2cache; page != nil; page = next){ next = page->next; if(--page->ref) panic("mmurelease: page->ref %lud", page->ref); pagechainhead(page); } if(proc->mmul2cache != nil) pagechaindone(); proc->mmul2cache = nil; mmul1empty(); /* make sure map is in memory */ /* could be smarter about how much? */ cachedwbse(&m->mmul1[L1X(UZERO)], (L1hi - L1lo)*sizeof(PTE)); /* lose any possible stale tlb entries */ mmuinvalidate(); }
static int tryalloc(int pgszi, int color) { Page *p; p = pgalloc(sys->pgsz[pgszi], color); if(p != nil){ lock(&pga); pagechainhead(p); unlock(&pga); return 0; } return -1; }
void mmurelease(Proc* proc) { Page *page, *next; /* * Release any pages allocated for a page directory base or page-tables * for this process: * switch to the prototype pdb for this processor (m->pdb); * call mmuptefree() to place all pages used for page-tables (proc->mmuused) * onto the process' free list (proc->mmufree). This has the side-effect of * cleaning any user entries in the pdb (proc->mmupdb); * if there's a pdb put it in the cache of pre-initialised pdb's * for this processor (m->pdbpool) or on the process' free list; * finally, place any pages freed back into the free pool (palloc). * This routine is only called from sched() with palloc locked. */ taskswitch(0, (ulong)m + BY2PG); mmuptefree(proc); if((page = proc->mmupdb) != 0){ proc->mmupdb = 0; while(page){ next = page->next; /* its not a page table anymore, mark it rw */ xenptunpin(page->va); if(paemode || m->pdbcnt > 10){ page->next = proc->mmufree; proc->mmufree = page; } else{ page->next = m->pdbpool; m->pdbpool = page; m->pdbcnt++; } page = next; } } for(page = proc->mmufree; page; page = next){ next = page->next; if(--page->ref) panic("mmurelease: page->ref %ld\n", page->ref); pagechainhead(page); } if(proc->mmufree && palloc.r.p) wakeup(&palloc.r); proc->mmufree = 0; }
void putpage(Page *p) { Pgsza *pa; int rlse; lock(&pga.l); lock(&p->l); if(p->ref == 0) panic("putpage"); if(--p->ref > 0) { unlock(&p->l); unlock(&pga.l); return; } rlse = 0; if(p->image != nil) pagechaintail(p); else{ /* * Free pages if we have plenty in the free list. */ pa = &pga.pgsza[p->pgszi]; if(pa->freecount > Nfreepgs) rlse = 1; else pagechainhead(p); } if(pga.rend.l.p != nil) wakeup(&pga.rend); unlock(&p->l); if(rlse) pgfree(p); unlock(&pga.l); }
/* * can be called with up == nil during boot. */ Page* newpage(int clear, Segment **s, uintptr_t va, usize size, int color) { Page *p; KMap *k; uint8_t ct; Pgsza *pa; int i, dontalloc, si; // static int once; si = getpgszi(size); //iprint("(remove this print and diea)newpage, size %x, si %d\n", size, si); pa = &pga.pgsza[si]; lock(&pga.l); /* * Beware, new page may enter a loop even if this loop does not * loop more than once, if the segment is lost and fault calls us * again. Either way, we accept any color if we failed a couple of times. */ for(i = 0;; i++){ if(i > 3) color = NOCOLOR; /* * 1. try to reuse a free one. */ p = findpg(pa->head, color); if(p != nil) break; /* * 2. try to allocate a new one from physical memory */ p = pgalloc(size, color); if(p != nil){ pagechainhead(p); break; } /* * 3. out of memory, try with the pager. * but release the segment (if any) while in the pager. */ unlock(&pga.l); dontalloc = 0; if(s && *s) { qunlock(&((*s)->lk)); *s = 0; dontalloc = 1; } /* * Try to get any page of the desired color * or any color for NOCOLOR. */ kickpager(si, color); /* * If called from fault and we lost the segment from * underneath don't waste time allocating and freeing * a page. Fault will call newpage again when it has * reacquired the segment locks */ if(dontalloc) return 0; lock(&pga.l); } assert(p != nil); ct = PG_NEWCOL; pageunchain(p); lock(&p->l); if(p->ref != 0) panic("newpage pa %#ullx", p->pa); uncachepage(p); p->ref++; p->va = va; p->modref = 0; for(i = 0; i < nelem(p->cachectl); i++) p->cachectl[i] = ct; unlock(&p->l); unlock(&pga.l); if(clear) { k = kmap(p); if (VA(k) == 0xfffffe007d800000ULL) trip++; // if (trip) die("trip before memset"); // This will frequently die if we use 3K-1 (3071 -- 0xbff) // it will not if we use 3070. // The fault is a null pointer deref. //memset((void*)VA(k), 0, machp()->pgsz[p->pgszi]); // thinking about it, using memset is stupid. // Don't get upset about this loop; // we make it readable, compilers optimize it. int i; uint64_t *v = (void *)VA(k); if (1) for(i = 0; i < sys->pgsz[p->pgszi]/sizeof(*v); i++) v[i] = 0; //if (trip) die("trip"); kunmap(k); } DBG("newpage: va %#p pa %#ullx pgsz %#ux color %d\n", p->va, p->pa, sys->pgsz[p->pgszi], p->color); return p; }