Beispiel #1
0
/*
 * Get an auxiliary page.
 * Don't do so if less than Nminfree pages.
 * Only used by cache.
 * The interface must specify page size.
 */
Page*
auxpage(usize size)
{
	Page *p;
	Pgsza *pa;
	int si;

	si = getpgszi(size);
	lock(&pga.l);
	pa = &pga.pgsza[si];
	p = pa->head;
	if(pa->freecount < Nminfree){
		unlock(&pga.l);
		return nil;
	}
	pageunchain(p);
	lock(&p->l);
	if(p->ref != 0)
		panic("auxpage");
	p->ref++;
	uncachepage(p);
	unlock(&p->l);
	unlock(&pga.l);

	return p;
}
Beispiel #2
0
/*
 * Called from imagereclaim, to try to release Images.
 * The argument shows the preferred image to release pages from.
 * All images will be tried, from lru to mru.
 */
uint64_t
pagereclaim(Image *i)
{
	Page *p;
	uint64_t ticks;

	lock(&pga.l);
	ticks = fastticks(nil);

	/*
	 * All the pages with images backing them are at the
	 * end of the list (see putpage) so start there and work
	 * backward.
	 */
	for(p = pga.pgsza[0].tail; p && p->image == i; p = p->prev){
		if(p->ref == 0 && canlock(&p->l)){
			if(p->ref == 0) {
				uncachepage(p);
			}
			unlock(&p->l);
		}
	}
	ticks = fastticks(nil) - ticks;
	unlock(&pga.l);

	return ticks;
}
Beispiel #3
0
static void
pagepte(int type, Page **pg)
{
	ulong daddr;
	Page *outp;

	outp = *pg;
	switch(type) {
	case SG_TEXT:				/* Revert to demand load */
		putpage(outp);
		*pg = 0;
		break;

	case SG_DATA:
	case SG_BSS:
	case SG_STACK:
	case SG_SHARED:
		/*
		 *  get a new swap address and clear any pages
		 *  referring to it from the cache
		 */
		daddr = newswap();
		if(daddr == ~0)
			break;
		cachedel(&swapimage, daddr);

		lock(&outp->lk);

		/* forget anything that it used to cache */
		uncachepage(outp);

		/*
		 *  incr the reference count to make sure it sticks around while
		 *  being written
		 */
		outp->ref++;

		/*
		 *  enter it into the cache so that a fault happening
		 *  during the write will grab the page from the cache
		 *  rather than one partially written to the disk
		 */
		outp->daddr = daddr;
		cachepage(outp, &swapimage);
		*pg = (Page*)(daddr|PG_ONSWAP);
		unlock(&outp->lk);

		/* Add page to IO transaction list */
		iolist[ioptr++] = outp;
		break;
	}
}
Beispiel #4
0
static void
imagereclaim(void)
{
	int n;
	Page *p;
	uvlong ticks;

	irstats.calls++;
	/* Somebody is already cleaning the page cache */
	if(!canqlock(&imagealloc.ireclaim))
		return;

	lock(&palloc);
	ticks = fastticks(nil);
	n = 0;
	/*
	 * All the pages with images backing them are at the
	 * end of the list (see putpage) so start there and work
	 * backward.
	 */
	for(p = palloc.tail; p && p->image && (n<1000 || !imagealloc.free); p = p->prev) {
		if(p->ref == 0 && canlock(p)) {
			if(p->ref == 0 && p->image && !p->image->notext) {
				n++;
				uncachepage(p);
			}
			unlock(p);
		}
	}
	ticks = fastticks(nil) - ticks;
	unlock(&palloc);
	irstats.loops++;
	irstats.ticks += ticks;
	if(ticks > irstats.maxt)
		irstats.maxt = ticks;
	//print("T%llud+", ticks);
	qunlock(&imagealloc.ireclaim);
}
Beispiel #5
0
Page*
auxpage(void)
{
	Page *p;

	lock(&palloc);
	p = palloc.head;
	if(palloc.freecount < swapalloc.highwater) {
		unlock(&palloc);
		return 0;
	}
	pageunchain(p);

	lock(p);
	if(p->ref != 0)
		panic("auxpage");
	p->ref++;
	uncachepage(p);
	unlock(p);
	unlock(&palloc);

	return p;
}
Beispiel #6
0
int
duppage(Page *p)				/* Always call with p locked */
{
	Proc *up = externup();
	Pgsza *pa;
	Page *np;
	int color;
	int retries;

	retries = 0;
retry:

	if(retries++ > dupretries){
		print("duppage %d, up %#p\n", retries, up);
		dupretries += 100;
		if(dupretries > 100000)
			panic("duppage\n");
		uncachepage(p);
		return 1;
	}


	/* don't dup pages with no image */
	if(p->ref == 0 || p->image == nil || p->image->notext)
		return 0;

	/*
	 *  normal lock ordering is to call
	 *  lock(&pga.l) before lock(&p->l).
	 *  To avoid deadlock, we have to drop
	 *  our locks and try again.
	 */
	if(!canlock(&pga.l)){
		unlock(&p->l);
		if(up)
			sched();
		lock(&p->l);
		goto retry;
	}

	pa = &pga.pgsza[p->pgszi];
	/* No freelist cache when memory is very low */
	if(pa->freecount < Nminfree){
		unlock(&pga.l);
		uncachepage(p);
		return 1;
	}

	color = p->color;
	for(np = pa->head; np; np = np->next)
		if(np->color == color)
			break;

	/* No page of the correct color */
	if(np == 0){
		unlock(&pga.l);
		uncachepage(p);
		return 1;
	}

	pageunchain(np);
	pagechaintail(np);
	/*
	 * XXX - here's a bug? - np is on the freelist but it's not really free.
	 * when we unlock palloc someone else can come in, decide to
	 * use np, and then try to lock it.  they succeed after we've
	 * run copypage and cachepage and unlock(np).  then what?
	 * they call pageunchain before locking(np), so it's removed
	 * from the freelist, but still in the cache because of
	 * cachepage below.  if someone else looks in the cache
	 * before they remove it, the page will have a nonzero ref
	 * once they finally lock(np).
	 *
	 * What I know is that not doing the pagechaintail, but
	 * doing it at the end, to prevent the race, leads to a
	 * deadlock, even following the pga, pg lock ordering. -nemo
	 */
	lock(&np->l);
	unlock(&pga.l);

	/* Cache the new version */
	uncachepage(np);
	np->va = p->va;
	np->daddr = p->daddr;
	copypage(p, np);
	cachepage(np, p->image);
	unlock(&np->l);
	uncachepage(p);

	return 0;
}
Beispiel #7
0
/*
 * can be called with up == nil during boot.
 */
Page*
newpage(int clear, Segment **s, uintptr_t va, usize size, int color)
{
	Page *p;
	KMap *k;
	uint8_t ct;
	Pgsza *pa;
	int i, dontalloc, si;
	//	static int once;

	si = getpgszi(size);
//iprint("(remove this print and diea)newpage, size %x, si %d\n", size, si);
	pa = &pga.pgsza[si];

	lock(&pga.l);
	/*
	 * Beware, new page may enter a loop even if this loop does not
	 * loop more than once, if the segment is lost and fault calls us
	 * again. Either way, we accept any color if we failed a couple of times.
	 */
	for(i = 0;; i++){
		if(i > 3)
			color = NOCOLOR;

		/*
		 * 1. try to reuse a free one.
		 */
		p = findpg(pa->head, color);
		if(p != nil)
			break;

		/*
		 * 2. try to allocate a new one from physical memory
		 */
		p = pgalloc(size, color);
		if(p != nil){
			pagechainhead(p);
			break;
		}

		/*
		 * 3. out of memory, try with the pager.
		 * but release the segment (if any) while in the pager.
		 */
		unlock(&pga.l);

		dontalloc = 0;
		if(s && *s) {
			qunlock(&((*s)->lk));
			*s = 0;
			dontalloc = 1;
		}

		/*
		 * Try to get any page of the desired color
		 * or any color for NOCOLOR.
		 */
		kickpager(si, color);

		/*
		 * If called from fault and we lost the segment from
		 * underneath don't waste time allocating and freeing
		 * a page. Fault will call newpage again when it has
		 * reacquired the segment locks
		 */
		if(dontalloc)
			return 0;

		lock(&pga.l);
	}

	assert(p != nil);
	ct = PG_NEWCOL;

	pageunchain(p);

	lock(&p->l);
	if(p->ref != 0)
		panic("newpage pa %#ullx", p->pa);

	uncachepage(p);
	p->ref++;
	p->va = va;
	p->modref = 0;
	for(i = 0; i < nelem(p->cachectl); i++)
		p->cachectl[i] = ct;
	unlock(&p->l);
	unlock(&pga.l);

	if(clear) {
		k = kmap(p);
if (VA(k) == 0xfffffe007d800000ULL) trip++;
//	if (trip) die("trip before memset");
		// This will frequently die if we use 3K-1 (3071 -- 0xbff)
		// it will not if we use 3070.
		// The fault is a null pointer deref.
		//memset((void*)VA(k), 0, machp()->pgsz[p->pgszi]);
		// thinking about it, using memset is stupid.
		// Don't get upset about this loop;
		// we make it readable, compilers optimize it.
		int i;
		uint64_t *v = (void *)VA(k);
		if (1)
		for(i = 0; i < sys->pgsz[p->pgszi]/sizeof(*v); i++)
			v[i] = 0;
//if (trip) die("trip");
		kunmap(k);
	}
	DBG("newpage: va %#p pa %#ullx pgsz %#ux color %d\n",
		p->va, p->pa, sys->pgsz[p->pgszi], p->color);

	return p;
}
Beispiel #8
0
int
duppage(Page *p)				/* Always call with p locked */
{
	Page *np;
	int color;
	int retries;

	retries = 0;
retry:

	if(retries++ > dupretries){
		print("duppage %d, up %p\n", retries, up);
		dupretries += 100;
		if(dupretries > 100000)
			panic("duppage\n");
		uncachepage(p);
		return 1;
	}
		

	/* don't dup pages with no image */
	if(p->ref == 0 || p->image == nil || p->image->notext)
		return 0;

	/*
	 *  normal lock ordering is to call
	 *  lock(&palloc) before lock(p).
	 *  To avoid deadlock, we have to drop
	 *  our locks and try again.
	 */
	if(!canlock(&palloc)){
		unlock(p);
		if(up)
			sched();
		lock(p);
		goto retry;
	}

	/* No freelist cache when memory is very low */
	if(palloc.freecount < swapalloc.highwater) {
		unlock(&palloc);
		uncachepage(p);
		return 1;
	}

	color = getpgcolor(p->va);
	for(np = palloc.head; np; np = np->next)
		if(np->color == color)
			break;

	/* No page of the correct color */
	if(np == 0) {
		unlock(&palloc);
		uncachepage(p);
		return 1;
	}

	pageunchain(np);
	pagechaintail(np);
/*
* XXX - here's a bug? - np is on the freelist but it's not really free.
* when we unlock palloc someone else can come in, decide to
* use np, and then try to lock it.  they succeed after we've 
* run copypage and cachepage and unlock(np).  then what?
* they call pageunchain before locking(np), so it's removed
* from the freelist, but still in the cache because of
* cachepage below.  if someone else looks in the cache
* before they remove it, the page will have a nonzero ref
* once they finally lock(np).
*/
	lock(np);
	unlock(&palloc);

	/* Cache the new version */
	uncachepage(np);
	np->va = p->va;
	np->daddr = p->daddr;
	copypage(p, np);
	cachepage(np, p->image);
	unlock(np);
	uncachepage(p);

	return 0;
}
Beispiel #9
0
Page*
newpage(int clear, Segment **s, ulong va)
{
	Page *p;
	KMap *k;
	uchar ct;
	int i, hw, dontalloc, color;

	lock(&palloc);
	color = getpgcolor(va);
	hw = swapalloc.highwater;
	for(;;) {
		if(palloc.freecount > hw)
			break;
		if(up->kp && palloc.freecount > 0)
			break;

		unlock(&palloc);
		dontalloc = 0;
		if(s && *s) {
			qunlock(&((*s)->lk));
			*s = 0;
			dontalloc = 1;
		}
		qlock(&palloc.pwait);	/* Hold memory requesters here */

		while(waserror())	/* Ignore interrupts */
			;

		kickpager();
		tsleep(&palloc.r, ispages, 0, 1000);

		poperror();

		qunlock(&palloc.pwait);

		/*
		 * If called from fault and we lost the segment from
		 * underneath don't waste time allocating and freeing
		 * a page. Fault will call newpage again when it has
		 * reacquired the segment locks
		 */
		if(dontalloc)
			return 0;

		lock(&palloc);
	}

	/* First try for our colour */
	for(p = palloc.head; p; p = p->next)
		if(p->color == color)
			break;

	ct = PG_NOFLUSH;
	if(p == 0) {
		p = palloc.head;
		p->color = color;
		ct = PG_NEWCOL;
	}

	pageunchain(p);

	lock(p);
	if(p->ref != 0)
		panic("newpage: p->ref %d != 0", p->ref);

	uncachepage(p);
	p->ref++;
	p->va = va;
	p->modref = 0;
	for(i = 0; i < MAXMACH; i++)
		p->cachectl[i] = ct;
	unlock(p);
	unlock(&palloc);

	if(clear) {
		k = kmap(p);
		memset((void*)VA(k), 0, BY2PG);
		kunmap(k);
	}

	return p;
}