/*
 * Get rid of a swap buffer structure which has been used in physical I/O.
 * Mostly taken from /sys/vm/swap_pager.c, except that it now uses
 * wakeup() rather than the VM-internal thread_wakeup(), and that the caller
 * must mask disk interrupts, rather than putphysbuf() itself.
 */
void
putphysbuf(struct buf *bp)
{
	splassert(IPL_BIO);

	/* XXXCDC: is this necessary? */
	if (bp->b_vp)
		brelvp(bp);

#ifdef DIAGNOSTIC
	if (bp->b_flags & B_WANTED)
		panic("putphysbuf: private buf B_WANTED");
#endif
	pool_put(&bufpool, bp);
}
Example #2
0
/*
 * Release a buffer.
 * Even if the buffer is dirty, no I/O is started.
 */
void
brelse(register struct buf *bp)
{
	int x;

	/* anyone need a "free" block? */
	x=splbio();
	if ((bfreelist + BQ_AGE)->b_flags & B_WANTED) {
		(bfreelist + BQ_AGE) ->b_flags &= ~B_WANTED;
		wakeup(bfreelist);
	}
	/* anyone need this very block? */
	if (bp->b_flags & B_WANTED) {
		bp->b_flags &= ~B_WANTED;
		wakeup(bp);
	}

	if (bp->b_flags & (B_INVAL|B_ERROR)) {
		bp->b_flags |= B_INVAL;
		bp->b_flags &= ~(B_DELWRI|B_CACHE);
		if(bp->b_vp)
			brelvp(bp);
	}

	/* enqueue */
	/* just an empty buffer head ... */
	/*if(bp->b_flags & B_HEAD)
		binsheadfree(bp, bfreelist + BQ_EMPTY)*/
	/* buffers with junk contents */
	/*else*/ if(bp->b_flags & (B_ERROR|B_INVAL|B_NOCACHE))
		binsheadfree(bp, bfreelist + BQ_AGE)
	/* buffers with stale but valid contents */
	else if(bp->b_flags & B_AGE)
		binstailfree(bp, bfreelist + BQ_AGE)
	/* buffers with valid and quite potentially reuseable contents */
	else
		binstailfree(bp, bfreelist + BQ_LRU)

	/* unlock */
	bp->b_flags &= ~B_BUSY;
	splx(x);

}
Example #3
0
/*
 * Change cachepct
 */
void
bufadjust(int newbufpages)
{
	struct buf *bp;
	int s;

	if (newbufpages < buflowpages)
		newbufpages = buflowpages;

	s = splbio();
	bufpages = newbufpages;

	/*
	 * We are allowed to use up to the reserve
	 */
	targetpages = bufpages - RESERVE_PAGES;

	/*
	 * Shrinking the cache happens here only if someone has manually
	 * adjusted bufcachepercent - or the pagedaemon has told us
	 * to give back memory *now* - so we give it all back.
	 */
	while ((bp = bufcache_getcleanbuf()) &&
	    (bcstats.numbufpages > targetpages)) {
		bufcache_take(bp);
		if (bp->b_vp) {
			RB_REMOVE(buf_rb_bufs,
			    &bp->b_vp->v_bufs_tree, bp);
			brelvp(bp);
		}
		buf_put(bp);
	}

	/*
	 * Wake up the cleaner if we have lots of dirty pages,
	 * or if we are getting low on buffer cache kva.
	 */
	if ((UNCLEAN_PAGES >= hidirtypages) ||
	    bcstats.kvaslots_avail <= 2 * RESERVE_SLOTS)
		wakeup(&bd_req);

	splx(s);
}
Example #4
0
/*
 * Find a buffer which is available for use.
 *
 * We must notify getblk if we slept during the buffer allocation. When
 * that happens, we allocate a buffer anyway (unless tsleep is interrupted
 * or times out) and return !0.
 */
int
getnewbuf(int slpflag, int slptimeo, struct buf **bpp)
{
	struct buf *bp;
	int s, ret, error;

	*bpp = NULL;
	ret = 0;

start:
	s = splbio();
	/*
	 * Wake up cleaner if we're getting low on buffers.
	 */
	if (numdirtypages >= hidirtypages)
		wakeup(&bd_req);

	if ((numcleanpages <= locleanpages) &&
	    curproc != syncerproc && curproc != cleanerproc) {
		needbuffer++;
		error = tsleep(&needbuffer, slpflag|(PRIBIO+1), "getnewbuf",
				slptimeo);
		splx(s);
		if (error)
			return (1);
		ret = 1;
		goto start;
	}
	if ((bp = TAILQ_FIRST(&bufqueues[BQ_CLEAN])) == NULL) {
		/* wait for a free buffer of any kind */
		nobuffers = 1;
		error = tsleep(&nobuffers, slpflag|(PRIBIO-3),
				"getnewbuf", slptimeo);
		splx(s);
		if (error)
			return (1);
		ret = 1;
		goto start;
	}

	bremfree(bp);

	/* Buffer is no longer on free lists. */
	SET(bp->b_flags, B_BUSY);

#ifdef DIAGNOSTIC
	if (ISSET(bp->b_flags, B_DELWRI))
		panic("Dirty buffer on BQ_CLEAN");
#endif

	/* disassociate us from our vnode, if we had one... */
	if (bp->b_vp)
		brelvp(bp);

	splx(s);

#ifdef DIAGNOSTIC
	/* CLEAN buffers must have no dependencies */ 
	if (LIST_FIRST(&bp->b_dep) != NULL)
		panic("BQ_CLEAN has buffer with dependencies");
#endif

	/* clear out various other fields */
	bp->b_flags = B_BUSY;
	bp->b_dev = NODEV;
	bp->b_blkno = bp->b_lblkno = 0;
	bp->b_iodone = 0;
	bp->b_error = 0;
	bp->b_resid = 0;
	bp->b_bcount = 0;
	bp->b_dirtyoff = bp->b_dirtyend = 0;
	bp->b_validoff = bp->b_validend = 0;

	bremhash(bp);
	*bpp = bp;
	return (ret);
}
Example #5
0
/*
 * Release a buffer on to the free lists.
 * Described in Bach (p. 46).
 */
void
brelse(struct buf *bp)
{
	struct bqueues *bufq;
	int s;

	/* Block disk interrupts. */
	s = splbio();

	/*
	 * Determine which queue the buffer should be on, then put it there.
	 */

	/* If it's locked, don't report an error; try again later. */
	if (ISSET(bp->b_flags, (B_LOCKED|B_ERROR)) == (B_LOCKED|B_ERROR))
		CLR(bp->b_flags, B_ERROR);

	/* If it's not cacheable, or an error, mark it invalid. */
	if (ISSET(bp->b_flags, (B_NOCACHE|B_ERROR)))
		SET(bp->b_flags, B_INVAL);

	if ((bp->b_bufsize <= 0) || ISSET(bp->b_flags, B_INVAL)) {
		/*
		 * If it's invalid or empty, dissociate it from its vnode
		 * and put on the head of the appropriate queue.
		 */
		if (LIST_FIRST(&bp->b_dep) != NULL)
			buf_deallocate(bp);

		if (ISSET(bp->b_flags, B_DELWRI)) {
			CLR(bp->b_flags, B_DELWRI);
		}

		if (bp->b_vp) {
			reassignbuf(bp);
			brelvp(bp);
		}
		if (bp->b_bufsize <= 0) {
			/* no data */
			bufq = &bufqueues[BQ_EMPTY];
			numemptybufs++;
		} else {
			/* invalid data */
			bufq = &bufqueues[BQ_CLEAN];
			numfreepages += btoc(bp->b_bufsize);
			numcleanpages += btoc(bp->b_bufsize);
		}
		binsheadfree(bp, bufq);
	} else {
		/*
		 * It has valid data.  Put it on the end of the appropriate
		 * queue, so that it'll stick around for as long as possible.
		 */
		if (ISSET(bp->b_flags, B_LOCKED))
			/* locked in core */
			bufq = &bufqueues[BQ_LOCKED];
		else {
			numfreepages += btoc(bp->b_bufsize);
			if (!ISSET(bp->b_flags, B_DELWRI)) {
				numcleanpages += btoc(bp->b_bufsize);
				bufq = &bufqueues[BQ_CLEAN];
			} else {
				numdirtypages += btoc(bp->b_bufsize);
				bufq = &bufqueues[BQ_DIRTY];
			}
		}
		if (ISSET(bp->b_flags, B_AGE))
			binsheadfree(bp, bufq);
		else
			binstailfree(bp, bufq);
	}

	/* Unlock the buffer. */
	CLR(bp->b_flags, (B_AGE | B_ASYNC | B_BUSY | B_NOCACHE | B_DEFERRED));


	/* Wake up syncer and cleaner processes waiting for buffers */
	if (nobuffers) {
		wakeup(&nobuffers);
		nobuffers = 0;
	}

	/* Wake up any processes waiting for any buffer to become free. */
	if (needbuffer && (numcleanpages > locleanpages)) {
		needbuffer--;
		wakeup_one(&needbuffer);
	}

	splx(s);

	/* Wake up any processes waiting for _this_ buffer to become free. */
	if (ISSET(bp->b_flags, B_WANTED)) {
		CLR(bp->b_flags, B_WANTED);
		wakeup(bp);
	}
}
Example #6
0
/*
 * Find a buffer which is available for use.
 * If free memory for buffer space and an empty header from the empty list,
 * use that. Otherwise, select something from a free list.
 * Preference is to AGE list, then LRU list.
 */
static struct buf *
getnewbuf(int sz)
{
	struct buf *bp;
	int x;

	x = splbio();
start:
	/* can we constitute a new buffer? */
	if (freebufspace > sz
		&& bfreelist[BQ_EMPTY].av_forw != (struct buf *)bfreelist+BQ_EMPTY) {
		caddr_t addr;

/*#define notyet*/
#ifndef notyet
		if ((addr = malloc (sz, M_TEMP, M_WAITOK)) == 0) goto tryfree;
#else /* notyet */
		/* get new memory buffer */
		if (round_page(sz) == sz)
			addr = (caddr_t) kmem_alloc_wired_wait(buffer_map, sz);
		else
			addr = (caddr_t) malloc (sz, M_TEMP, M_WAITOK);
	/*if ((addr = malloc (sz, M_TEMP, M_NOWAIT)) == 0) goto tryfree;*/
		bzero(addr, sz);
#endif /* notyet */
		freebufspace -= sz;
		allocbufspace += sz;

		bp = bfreelist[BQ_EMPTY].av_forw;
		bp->b_flags = B_BUSY | B_INVAL;
		bremfree(bp);
		bp->b_un.b_addr = addr;
		bp->b_bufsize = sz;	/* 20 Aug 92*/
		goto fillin;
	}

tryfree:
	if (bfreelist[BQ_AGE].av_forw != (struct buf *)bfreelist+BQ_AGE) {
		bp = bfreelist[BQ_AGE].av_forw;
		bremfree(bp);
	} else if (bfreelist[BQ_LRU].av_forw != (struct buf *)bfreelist+BQ_LRU) {
		bp = bfreelist[BQ_LRU].av_forw;
		bremfree(bp);
	} else	{
		/* wait for a free buffer of any kind */
		(bfreelist + BQ_AGE)->b_flags |= B_WANTED;
		sleep(bfreelist, PRIBIO);
		splx(x);
		return (0);
	}

	/* if we are a delayed write, convert to an async write! */
	if (bp->b_flags & B_DELWRI) {
		bp->b_flags |= B_BUSY;
		bawrite (bp);
		goto start;
	}


	if(bp->b_vp)
		brelvp(bp);

	/* we are not free, nor do we contain interesting data */
	if (bp->b_rcred != NOCRED) crfree(bp->b_rcred);		/* 25 Apr 92*/
	if (bp->b_wcred != NOCRED) crfree(bp->b_wcred);
	bp->b_flags = B_BUSY;
fillin:
	bremhash(bp);
	splx(x);
	bp->b_dev = NODEV;
	bp->b_vp = NULL;
	bp->b_blkno = bp->b_lblkno = 0;
	bp->b_iodone = 0;
	bp->b_error = 0;
	bp->b_wcred = bp->b_rcred = NOCRED;
	if (bp->b_bufsize != sz)
		allocbuf(bp, sz);
	bp->b_bcount = bp->b_bufsize = sz;
	bp->b_dirtyoff = bp->b_dirtyend = 0;
	return (bp);
}
Example #7
0
/*
 * Allocate a buffer.
 */
struct buf *
buf_get(struct vnode *vp, daddr_t blkno, size_t size)
{
	struct buf *bp;
	int poolwait = size == 0 ? PR_NOWAIT : PR_WAITOK;
	int npages;
	int s;

	s = splbio();
	if (size) {
		/*
		 * Wake up the cleaner if we have lots of dirty pages,
		 * or if we are getting low on buffer cache kva.
		 */
		if (UNCLEAN_PAGES >= hidirtypages ||
			bcstats.kvaslots_avail <= 2 * RESERVE_SLOTS)
			wakeup(&bd_req);

		npages = atop(round_page(size));

		/*
		 * if our cache has been previously shrunk,
		 * allow it to grow again with use up to
		 * bufhighpages (cachepercent)
		 */
		if (bufpages < bufhighpages)
			bufadjust(bufhighpages);

		/*
		 * If would go over the page target with our
		 * new allocation, free enough buffers first
		 * to stay at the target with our new allocation.
		 */
		while ((bcstats.numbufpages + npages > targetpages) &&
		    (bp = bufcache_getcleanbuf())) {
			bufcache_take(bp);
			if (bp->b_vp) {
				RB_REMOVE(buf_rb_bufs,
				    &bp->b_vp->v_bufs_tree, bp);
				brelvp(bp);
			}
			buf_put(bp);
		}

		/*
		 * If we get here, we tried to free the world down
		 * above, and couldn't get down - Wake the cleaner
		 * and wait for it to push some buffers out.
		 */
		if ((bcstats.numbufpages + npages > targetpages ||
		    bcstats.kvaslots_avail <= RESERVE_SLOTS) &&
		    curproc != syncerproc && curproc != cleanerproc) {
			wakeup(&bd_req);
			needbuffer++;
			tsleep(&needbuffer, PRIBIO, "needbuffer", 0);
			splx(s);
			return (NULL);
		}
		if (bcstats.numbufpages + npages > bufpages) {
			/* cleaner or syncer */
			nobuffers = 1;
			tsleep(&nobuffers, PRIBIO, "nobuffers", 0);
			splx(s);
			return (NULL);
		}
	}

	bp = pool_get(&bufpool, poolwait|PR_ZERO);

	if (bp == NULL) {
		splx(s);
		return (NULL);
	}

	bp->b_freelist.tqe_next = NOLIST;
	bp->b_dev = NODEV;
	LIST_INIT(&bp->b_dep);
	bp->b_bcount = size;

	buf_acquire_nomap(bp);

	if (vp != NULL) {
		/*
		 * We insert the buffer into the hash with B_BUSY set
		 * while we allocate pages for it. This way any getblk
		 * that happens while we allocate pages will wait for
		 * this buffer instead of starting its own guf_get.
		 *
		 * But first, we check if someone beat us to it.
		 */
		if (incore(vp, blkno)) {
			pool_put(&bufpool, bp);
			splx(s);
			return (NULL);
		}

		bp->b_blkno = bp->b_lblkno = blkno;
		bgetvp(vp, bp);
		if (RB_INSERT(buf_rb_bufs, &vp->v_bufs_tree, bp))
			panic("buf_get: dup lblk vp %p bp %p", vp, bp);
	} else {
		bp->b_vnbufs.le_next = NOLIST;
		SET(bp->b_flags, B_INVAL);
		bp->b_vp = NULL;
	}

	LIST_INSERT_HEAD(&bufhead, bp, b_list);
	bcstats.numbufs++;

	if (size) {
		buf_alloc_pages(bp, round_page(size));
		buf_map(bp);
	}

	splx(s);

	return (bp);
}
Example #8
0
/*
 * Release a buffer on to the free lists.
 * Described in Bach (p. 46).
 */
void
brelse(struct buf *bp)
{
	int s;

	s = splbio();

	if (bp->b_data != NULL)
		KASSERT(bp->b_bufsize > 0);

	/*
	 * Determine which queue the buffer should be on, then put it there.
	 */

	/* If it's not cacheable, or an error, mark it invalid. */
	if (ISSET(bp->b_flags, (B_NOCACHE|B_ERROR)))
		SET(bp->b_flags, B_INVAL);

	if (ISSET(bp->b_flags, B_INVAL)) {
		/*
		 * If the buffer is invalid, free it now rather than leaving
		 * it in a queue and wasting memory.
		 */
		if (LIST_FIRST(&bp->b_dep) != NULL)
			buf_deallocate(bp);

		if (ISSET(bp->b_flags, B_DELWRI)) {
			CLR(bp->b_flags, B_DELWRI);
		}

		if (bp->b_vp) {
			RB_REMOVE(buf_rb_bufs, &bp->b_vp->v_bufs_tree,
			    bp);
			brelvp(bp);
		}
		bp->b_vp = NULL;

		/*
		 * Wake up any processes waiting for _this_ buffer to
		 * become free. They are not allowed to grab it
		 * since it will be freed. But the only sleeper is
		 * getblk and it will restart the operation after
		 * sleep.
		 */
		if (ISSET(bp->b_flags, B_WANTED)) {
			CLR(bp->b_flags, B_WANTED);
			wakeup(bp);
		}
		buf_put(bp);
	} else {
		/*
		 * It has valid data.  Put it on the end of the appropriate
		 * queue, so that it'll stick around for as long as possible.
		 */
		bufcache_release(bp);

		/* Unlock the buffer. */
		CLR(bp->b_flags, (B_AGE | B_ASYNC | B_NOCACHE | B_DEFERRED));
		buf_release(bp);

		/* Wake up any processes waiting for _this_ buffer to
		 * become free. */
		if (ISSET(bp->b_flags, B_WANTED)) {
			CLR(bp->b_flags, B_WANTED);
			wakeup(bp);
		}
	}

	/* Wake up syncer and cleaner processes waiting for buffers. */
	if (nobuffers) {
		nobuffers = 0;
		wakeup(&nobuffers);
	}

	/* Wake up any processes waiting for any buffer to become free. */
	if (needbuffer && bcstats.numbufpages < targetpages &&
	    bcstats.kvaslots_avail > RESERVE_SLOTS) {
		needbuffer = 0;
		wakeup(&needbuffer);
	}

	splx(s);
}