示例#1
0
/*
 * Initialize buffers and hash links for buffers.
 */
void
bufinit(void)
{
	struct buf *bp;
	struct bqueues *dp;
	int i;
	int base, residual;

	pool_init(&bufpool, sizeof(struct buf), 0, 0, 0, "bufpl", NULL);
	for (dp = bufqueues; dp < &bufqueues[BQUEUES]; dp++)
		TAILQ_INIT(dp);
	bufhashtbl = hashinit(nbuf, M_CACHE, M_WAITOK, &bufhash);
	base = bufpages / nbuf;
	residual = bufpages % nbuf;
	for (i = 0; i < nbuf; i++) {
		bp = &buf[i];
		bzero((char *)bp, sizeof *bp);
		bp->b_dev = NODEV;
		bp->b_vnbufs.le_next = NOLIST;
		bp->b_data = buffers + i * MAXBSIZE;
		LIST_INIT(&bp->b_dep);
		if (i < residual)
			bp->b_bufsize = (base + 1) * PAGE_SIZE;
		else
			bp->b_bufsize = base * PAGE_SIZE;
		bp->b_flags = B_INVAL;
		if (bp->b_bufsize) {
			dp = &bufqueues[BQ_CLEAN];
			numfreepages += btoc(bp->b_bufsize);
			numcleanpages += btoc(bp->b_bufsize);
		} else {
			dp = &bufqueues[BQ_EMPTY];
			numemptybufs++;
		}
		binsheadfree(bp, dp);
		binshash(bp, &invalhash);
	}

	hidirtypages = bufpages / 4;
	lodirtypages = hidirtypages / 2;

	/*
	 * Reserve 5% of bufpages for syncer's needs,
	 * but not more than 25% and if possible
	 * not less then 2 * MAXBSIZE. locleanpages
	 * value must be not too small, but probably
	 * there are no reason to set it more than 1-2 MB.
	 */
	locleanpages = bufpages / 20;
	if (locleanpages < btoc(2 * MAXBSIZE))
		locleanpages = btoc(2 * MAXBSIZE);
	if (locleanpages > bufpages / 4)
		locleanpages = bufpages / 4;
	if (locleanpages > btoc(2 * 1024 * 1024))
		locleanpages = btoc(2 * 1024 * 1024);

#ifdef DEBUG
	mincleanpages = locleanpages;
#endif
}
示例#2
0
文件: vfs__bio.c 项目: dank101/386BSD
/*
 * Release a buffer.
 * Even if the buffer is dirty, no I/O is started.
 */
void
brelse(register struct buf *bp)
{
	int x;

	/* anyone need a "free" block? */
	x=splbio();
	if ((bfreelist + BQ_AGE)->b_flags & B_WANTED) {
		(bfreelist + BQ_AGE) ->b_flags &= ~B_WANTED;
		wakeup(bfreelist);
	}
	/* anyone need this very block? */
	if (bp->b_flags & B_WANTED) {
		bp->b_flags &= ~B_WANTED;
		wakeup(bp);
	}

	if (bp->b_flags & (B_INVAL|B_ERROR)) {
		bp->b_flags |= B_INVAL;
		bp->b_flags &= ~(B_DELWRI|B_CACHE);
		if(bp->b_vp)
			brelvp(bp);
	}

	/* enqueue */
	/* just an empty buffer head ... */
	/*if(bp->b_flags & B_HEAD)
		binsheadfree(bp, bfreelist + BQ_EMPTY)*/
	/* buffers with junk contents */
	/*else*/ if(bp->b_flags & (B_ERROR|B_INVAL|B_NOCACHE))
		binsheadfree(bp, bfreelist + BQ_AGE)
	/* buffers with stale but valid contents */
	else if(bp->b_flags & B_AGE)
		binstailfree(bp, bfreelist + BQ_AGE)
	/* buffers with valid and quite potentially reuseable contents */
	else
		binstailfree(bp, bfreelist + BQ_LRU)

	/* unlock */
	bp->b_flags &= ~B_BUSY;
	splx(x);

}
示例#3
0
/*
 * Release a buffer on to the free lists.
 * Described in Bach (p. 46).
 */
void
brelse(struct buf *bp)
{
	struct bqueues *bufq;
	int s;

	/* Block disk interrupts. */
	s = splbio();

	/*
	 * Determine which queue the buffer should be on, then put it there.
	 */

	/* If it's locked, don't report an error; try again later. */
	if (ISSET(bp->b_flags, (B_LOCKED|B_ERROR)) == (B_LOCKED|B_ERROR))
		CLR(bp->b_flags, B_ERROR);

	/* If it's not cacheable, or an error, mark it invalid. */
	if (ISSET(bp->b_flags, (B_NOCACHE|B_ERROR)))
		SET(bp->b_flags, B_INVAL);

	if ((bp->b_bufsize <= 0) || ISSET(bp->b_flags, B_INVAL)) {
		/*
		 * If it's invalid or empty, dissociate it from its vnode
		 * and put on the head of the appropriate queue.
		 */
		if (LIST_FIRST(&bp->b_dep) != NULL)
			buf_deallocate(bp);

		if (ISSET(bp->b_flags, B_DELWRI)) {
			CLR(bp->b_flags, B_DELWRI);
		}

		if (bp->b_vp) {
			reassignbuf(bp);
			brelvp(bp);
		}
		if (bp->b_bufsize <= 0) {
			/* no data */
			bufq = &bufqueues[BQ_EMPTY];
			numemptybufs++;
		} else {
			/* invalid data */
			bufq = &bufqueues[BQ_CLEAN];
			numfreepages += btoc(bp->b_bufsize);
			numcleanpages += btoc(bp->b_bufsize);
		}
		binsheadfree(bp, bufq);
	} else {
		/*
		 * It has valid data.  Put it on the end of the appropriate
		 * queue, so that it'll stick around for as long as possible.
		 */
		if (ISSET(bp->b_flags, B_LOCKED))
			/* locked in core */
			bufq = &bufqueues[BQ_LOCKED];
		else {
			numfreepages += btoc(bp->b_bufsize);
			if (!ISSET(bp->b_flags, B_DELWRI)) {
				numcleanpages += btoc(bp->b_bufsize);
				bufq = &bufqueues[BQ_CLEAN];
			} else {
				numdirtypages += btoc(bp->b_bufsize);
				bufq = &bufqueues[BQ_DIRTY];
			}
		}
		if (ISSET(bp->b_flags, B_AGE))
			binsheadfree(bp, bufq);
		else
			binstailfree(bp, bufq);
	}

	/* Unlock the buffer. */
	CLR(bp->b_flags, (B_AGE | B_ASYNC | B_BUSY | B_NOCACHE | B_DEFERRED));


	/* Wake up syncer and cleaner processes waiting for buffers */
	if (nobuffers) {
		wakeup(&nobuffers);
		nobuffers = 0;
	}

	/* Wake up any processes waiting for any buffer to become free. */
	if (needbuffer && (numcleanpages > locleanpages)) {
		needbuffer--;
		wakeup_one(&needbuffer);
	}

	splx(s);

	/* Wake up any processes waiting for _this_ buffer to become free. */
	if (ISSET(bp->b_flags, B_WANTED)) {
		CLR(bp->b_flags, B_WANTED);
		wakeup(bp);
	}
}