/* * Initialize buffer headers and related structures. */ void bufinit() { struct bufhd *bh; struct buf *bp; /* first, make a null hash table */ for(bh = bufhash; bh < bufhash + BUFHSZ; bh++) { bh->b_flags = 0; bh->b_forw = (struct buf *)bh; bh->b_back = (struct buf *)bh; } /* next, make a null set of free lists */ for(bp = bfreelist; bp < bfreelist + BQUEUES; bp++) { bp->b_flags = 0; bp->av_forw = bp; bp->av_back = bp; bp->b_forw = bp; bp->b_back = bp; } /* finally, initialize each buffer header and stick on empty q */ for(bp = buf; bp < buf + nbuf ; bp++) { bp->b_flags = B_HEAD | B_INVAL; /* we're just an empty header */ bp->b_dev = NODEV; bp->b_vp = 0; binstailfree(bp, bfreelist + BQ_EMPTY); binshash(bp, bfreelist + BQ_EMPTY); } }
/* * Release a buffer. * Even if the buffer is dirty, no I/O is started. */ void brelse(register struct buf *bp) { int x; /* anyone need a "free" block? */ x=splbio(); if ((bfreelist + BQ_AGE)->b_flags & B_WANTED) { (bfreelist + BQ_AGE) ->b_flags &= ~B_WANTED; wakeup(bfreelist); } /* anyone need this very block? */ if (bp->b_flags & B_WANTED) { bp->b_flags &= ~B_WANTED; wakeup(bp); } if (bp->b_flags & (B_INVAL|B_ERROR)) { bp->b_flags |= B_INVAL; bp->b_flags &= ~(B_DELWRI|B_CACHE); if(bp->b_vp) brelvp(bp); } /* enqueue */ /* just an empty buffer head ... */ /*if(bp->b_flags & B_HEAD) binsheadfree(bp, bfreelist + BQ_EMPTY)*/ /* buffers with junk contents */ /*else*/ if(bp->b_flags & (B_ERROR|B_INVAL|B_NOCACHE)) binsheadfree(bp, bfreelist + BQ_AGE) /* buffers with stale but valid contents */ else if(bp->b_flags & B_AGE) binstailfree(bp, bfreelist + BQ_AGE) /* buffers with valid and quite potentially reuseable contents */ else binstailfree(bp, bfreelist + BQ_LRU) /* unlock */ bp->b_flags &= ~B_BUSY; splx(x); }
/* * Buffer cleaning daemon. */ void buf_daemon(struct proc *p) { int s; struct buf *bp; struct timeval starttime, timediff; cleanerproc = curproc; for (;;) { if (numdirtypages < hidirtypages) { tsleep(&bd_req, PRIBIO - 7, "cleaner", 0); } starttime = time; s = splbio(); while ((bp = TAILQ_FIRST(&bufqueues[BQ_DIRTY]))) { bremfree(bp); SET(bp->b_flags, B_BUSY); splx(s); if (ISSET(bp->b_flags, B_INVAL)) { brelse(bp); s = splbio(); continue; } #ifdef DIAGNOSTIC if (!ISSET(bp->b_flags, B_DELWRI)) panic("Clean buffer on BQ_DIRTY"); #endif if (LIST_FIRST(&bp->b_dep) != NULL && !ISSET(bp->b_flags, B_DEFERRED) && buf_countdeps(bp, 0, 1)) { SET(bp->b_flags, B_DEFERRED); s = splbio(); numfreepages += btoc(bp->b_bufsize); numdirtypages += btoc(bp->b_bufsize); binstailfree(bp, &bufqueues[BQ_DIRTY]); CLR(bp->b_flags, B_BUSY); continue; } bawrite(bp); if (numdirtypages < lodirtypages) break; /* Never allow processing to run for more than 1 sec */ timersub(&time, &starttime, &timediff); if (timediff.tv_sec) break; s = splbio(); } } }
/* * Release a buffer on to the free lists. * Described in Bach (p. 46). */ void brelse(struct buf *bp) { struct bqueues *bufq; int s; /* Block disk interrupts. */ s = splbio(); /* * Determine which queue the buffer should be on, then put it there. */ /* If it's locked, don't report an error; try again later. */ if (ISSET(bp->b_flags, (B_LOCKED|B_ERROR)) == (B_LOCKED|B_ERROR)) CLR(bp->b_flags, B_ERROR); /* If it's not cacheable, or an error, mark it invalid. */ if (ISSET(bp->b_flags, (B_NOCACHE|B_ERROR))) SET(bp->b_flags, B_INVAL); if ((bp->b_bufsize <= 0) || ISSET(bp->b_flags, B_INVAL)) { /* * If it's invalid or empty, dissociate it from its vnode * and put on the head of the appropriate queue. */ if (LIST_FIRST(&bp->b_dep) != NULL) buf_deallocate(bp); if (ISSET(bp->b_flags, B_DELWRI)) { CLR(bp->b_flags, B_DELWRI); } if (bp->b_vp) { reassignbuf(bp); brelvp(bp); } if (bp->b_bufsize <= 0) { /* no data */ bufq = &bufqueues[BQ_EMPTY]; numemptybufs++; } else { /* invalid data */ bufq = &bufqueues[BQ_CLEAN]; numfreepages += btoc(bp->b_bufsize); numcleanpages += btoc(bp->b_bufsize); } binsheadfree(bp, bufq); } else { /* * It has valid data. Put it on the end of the appropriate * queue, so that it'll stick around for as long as possible. */ if (ISSET(bp->b_flags, B_LOCKED)) /* locked in core */ bufq = &bufqueues[BQ_LOCKED]; else { numfreepages += btoc(bp->b_bufsize); if (!ISSET(bp->b_flags, B_DELWRI)) { numcleanpages += btoc(bp->b_bufsize); bufq = &bufqueues[BQ_CLEAN]; } else { numdirtypages += btoc(bp->b_bufsize); bufq = &bufqueues[BQ_DIRTY]; } } if (ISSET(bp->b_flags, B_AGE)) binsheadfree(bp, bufq); else binstailfree(bp, bufq); } /* Unlock the buffer. */ CLR(bp->b_flags, (B_AGE | B_ASYNC | B_BUSY | B_NOCACHE | B_DEFERRED)); /* Wake up syncer and cleaner processes waiting for buffers */ if (nobuffers) { wakeup(&nobuffers); nobuffers = 0; } /* Wake up any processes waiting for any buffer to become free. */ if (needbuffer && (numcleanpages > locleanpages)) { needbuffer--; wakeup_one(&needbuffer); } splx(s); /* Wake up any processes waiting for _this_ buffer to become free. */ if (ISSET(bp->b_flags, B_WANTED)) { CLR(bp->b_flags, B_WANTED); wakeup(bp); } }