/* * Delayed write. * * The buffer is marked dirty, but is not queued for I/O. * This routine should be used when the buffer is expected * to be modified again soon, typically a small write that * partially fills a buffer. * * NB: magnetic tapes cannot be delayed; they must be * written in the order that the writes are requested. * * Described in Leffler, et al. (pp. 208-213). */ void bdwrite(struct buf *bp) { int s; /* * If the block hasn't been seen before: * (1) Mark it as having been seen, * (2) Charge for the write. * (3) Make sure it's on its vnode's correct block list, * (4) If a buffer is rewritten, move it to end of dirty list */ if (!ISSET(bp->b_flags, B_DELWRI)) { SET(bp->b_flags, B_DELWRI); s = splbio(); reassignbuf(bp); splx(s); curproc->p_stats->p_ru.ru_oublock++; /* XXX */ } /* If this is a tape block, write the block now. */ if (major(bp->b_dev) < nblkdev && bdevsw[major(bp->b_dev)].d_type == D_TAPE) { bawrite(bp); return; } /* Otherwise, the "write" is done, so mark and release the buffer. */ CLR(bp->b_flags, B_NEEDCOMMIT); SET(bp->b_flags, B_DONE); brelse(bp); }
/* * Synchronous write. * Release buffer on completion. */ int bwrite(register struct buf *bp) { int rv; if(bp->b_flags & B_INVAL) { brelse(bp); return (0); } else { int wasdelayed; if(!(bp->b_flags & B_BUSY)) panic("bwrite: not busy"); wasdelayed = bp->b_flags & B_DELWRI; bp->b_flags &= ~(B_READ|B_DONE|B_ERROR|B_ASYNC|B_DELWRI); if(wasdelayed) reassignbuf(bp, bp->b_vp); bp->b_flags |= B_DIRTY; bp->b_vp->v_numoutput++; VOP_STRATEGY(bp); rv = biowait(bp); brelse(bp); return (rv); } }
/* * Must be called at splbio() */ void buf_undirty(struct buf *bp) { splassert(IPL_BIO); if (ISSET(bp->b_flags, B_DELWRI)) { CLR(bp->b_flags, B_DELWRI); reassignbuf(bp); } }
/* * Must be called at splbio() */ void buf_undirty(struct buf *bp) { splassert(IPL_BIO); #ifdef DIAGNOSTIC if (!ISSET(bp->b_flags, B_BUSY)) panic("Trying to undirty buffer on freelist!"); #endif if (ISSET(bp->b_flags, B_DELWRI)) { CLR(bp->b_flags, B_DELWRI); reassignbuf(bp); } }
/* * Delayed write. * * The buffer is marked dirty, but is not queued for I/O. * This routine should be used when the buffer is expected * to be modified again soon, typically a small write that * partially fills a buffer. * * NB: magnetic tapes cannot be delayed; they must be * written in the order that the writes are requested. */ void bdwrite(register struct buf *bp) { if(!(bp->b_flags & B_BUSY)) panic("bdwrite: not busy"); if(bp->b_flags & B_INVAL) { brelse(bp); } if(bp->b_flags & B_TAPE) { bwrite(bp); return; } bp->b_flags &= ~(B_READ|B_DONE); bp->b_flags |= B_DIRTY|B_DELWRI; reassignbuf(bp, bp->b_vp); brelse(bp); return; }
/* * Asynchronous write. * Start I/O on a buffer, but do not wait for it to complete. * The buffer is released when the I/O completes. */ void bawrite(register struct buf *bp) { if(!(bp->b_flags & B_BUSY)) panic("bawrite: not busy"); if(bp->b_flags & B_INVAL) brelse(bp); else { int wasdelayed; wasdelayed = bp->b_flags & B_DELWRI; bp->b_flags &= ~(B_READ|B_DONE|B_ERROR|B_DELWRI); if(wasdelayed) reassignbuf(bp, bp->b_vp); bp->b_flags |= B_DIRTY | B_ASYNC; bp->b_vp->v_numoutput++; VOP_STRATEGY(bp); } }
/* * Release a buffer on to the free lists. * Described in Bach (p. 46). */ void brelse(struct buf *bp) { struct bqueues *bufq; int s; /* Block disk interrupts. */ s = splbio(); /* * Determine which queue the buffer should be on, then put it there. */ /* If it's locked, don't report an error; try again later. */ if (ISSET(bp->b_flags, (B_LOCKED|B_ERROR)) == (B_LOCKED|B_ERROR)) CLR(bp->b_flags, B_ERROR); /* If it's not cacheable, or an error, mark it invalid. */ if (ISSET(bp->b_flags, (B_NOCACHE|B_ERROR))) SET(bp->b_flags, B_INVAL); if ((bp->b_bufsize <= 0) || ISSET(bp->b_flags, B_INVAL)) { /* * If it's invalid or empty, dissociate it from its vnode * and put on the head of the appropriate queue. */ if (LIST_FIRST(&bp->b_dep) != NULL) buf_deallocate(bp); if (ISSET(bp->b_flags, B_DELWRI)) { CLR(bp->b_flags, B_DELWRI); } if (bp->b_vp) { reassignbuf(bp); brelvp(bp); } if (bp->b_bufsize <= 0) { /* no data */ bufq = &bufqueues[BQ_EMPTY]; numemptybufs++; } else { /* invalid data */ bufq = &bufqueues[BQ_CLEAN]; numfreepages += btoc(bp->b_bufsize); numcleanpages += btoc(bp->b_bufsize); } binsheadfree(bp, bufq); } else { /* * It has valid data. Put it on the end of the appropriate * queue, so that it'll stick around for as long as possible. */ if (ISSET(bp->b_flags, B_LOCKED)) /* locked in core */ bufq = &bufqueues[BQ_LOCKED]; else { numfreepages += btoc(bp->b_bufsize); if (!ISSET(bp->b_flags, B_DELWRI)) { numcleanpages += btoc(bp->b_bufsize); bufq = &bufqueues[BQ_CLEAN]; } else { numdirtypages += btoc(bp->b_bufsize); bufq = &bufqueues[BQ_DIRTY]; } } if (ISSET(bp->b_flags, B_AGE)) binsheadfree(bp, bufq); else binstailfree(bp, bufq); } /* Unlock the buffer. */ CLR(bp->b_flags, (B_AGE | B_ASYNC | B_BUSY | B_NOCACHE | B_DEFERRED)); /* Wake up syncer and cleaner processes waiting for buffers */ if (nobuffers) { wakeup(&nobuffers); nobuffers = 0; } /* Wake up any processes waiting for any buffer to become free. */ if (needbuffer && (numcleanpages > locleanpages)) { needbuffer--; wakeup_one(&needbuffer); } splx(s); /* Wake up any processes waiting for _this_ buffer to become free. */ if (ISSET(bp->b_flags, B_WANTED)) { CLR(bp->b_flags, B_WANTED); wakeup(bp); } }
/* * Block write. Described in Bach (p.56) */ int bwrite(struct buf *bp) { int rv, async, wasdelayed, s; struct vnode *vp; struct mount *mp; /* * Remember buffer type, to switch on it later. If the write was * synchronous, but the file system was mounted with MNT_ASYNC, * convert it to a delayed write. * XXX note that this relies on delayed tape writes being converted * to async, not sync writes (which is safe, but ugly). */ async = ISSET(bp->b_flags, B_ASYNC); if (!async && bp->b_vp && bp->b_vp->v_mount && ISSET(bp->b_vp->v_mount->mnt_flag, MNT_ASYNC)) { bdwrite(bp); return (0); } /* * Collect statistics on synchronous and asynchronous writes. * Writes to block devices are charged to their associated * filesystem (if any). */ if ((vp = bp->b_vp) != NULL) { if (vp->v_type == VBLK) mp = vp->v_specmountpoint; else mp = vp->v_mount; if (mp != NULL) { if (async) mp->mnt_stat.f_asyncwrites++; else mp->mnt_stat.f_syncwrites++; } } wasdelayed = ISSET(bp->b_flags, B_DELWRI); CLR(bp->b_flags, (B_READ | B_DONE | B_ERROR | B_DELWRI)); s = splbio(); /* * If not synchronous, pay for the I/O operation and make * sure the buf is on the correct vnode queue. We have * to do this now, because if we don't, the vnode may not * be properly notified that its I/O has completed. */ if (wasdelayed) { reassignbuf(bp); } else curproc->p_stats->p_ru.ru_oublock++; /* Initiate disk write. Make sure the appropriate party is charged. */ bp->b_vp->v_numoutput++; splx(s); SET(bp->b_flags, B_WRITEINPROG); VOP_STRATEGY(bp); if (async) return (0); /* * If I/O was synchronous, wait for it to complete. */ rv = biowait(bp); /* Release the buffer. */ brelse(bp); return (rv); }