static int ffs_rawread_sync(struct vnode *vp) { int error; /* * Check for dirty mmap, pending writes and dirty buffers */ lwkt_gettoken(&vp->v_token); if (bio_track_active(&vp->v_track_write) || !RB_EMPTY(&vp->v_rbdirty_tree) || (vp->v_flag & VOBJDIRTY) != 0) { /* Attempt to msync mmap() regions to clean dirty mmap */ if ((vp->v_flag & VOBJDIRTY) != 0) { struct vm_object *obj; if ((obj = vp->v_object) != NULL) vm_object_page_clean(obj, 0, 0, OBJPC_SYNC); } /* Wait for pending writes to complete */ error = bio_track_wait(&vp->v_track_write, 0, 0); if (error != 0) { goto done; } /* Flush dirty buffers */ if (!RB_EMPTY(&vp->v_rbdirty_tree)) { if ((error = VOP_FSYNC(vp, MNT_WAIT, 0)) != 0) { goto done; } if (bio_track_active(&vp->v_track_write) || !RB_EMPTY(&vp->v_rbdirty_tree)) panic("ffs_rawread_sync: dirty bufs"); } } else { error = 0; } done: lwkt_reltoken(&vp->v_token); return error; }
int nvtruncbuf(struct vnode *vp, off_t length, int blksize, int boff, int trivial) { struct truncbuf_info info; off_t truncboffset; const char *filename; struct buf *bp; int count; int error; /* * Round up to the *next* block, then destroy the buffers in question. * Since we are only removing some of the buffers we must rely on the * scan count to determine whether a loop is necessary. * * Destroy any pages beyond the last buffer. */ if (boff < 0) boff = (int)(length % blksize); if (boff) info.truncloffset = length + (blksize - boff); else info.truncloffset = length; info.vp = vp; lwkt_gettoken(&vp->v_token); do { info.clean = 1; count = RB_SCAN(buf_rb_tree, &vp->v_rbclean_tree, nvtruncbuf_bp_trunc_cmp, nvtruncbuf_bp_trunc, &info); info.clean = 0; count += RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree, nvtruncbuf_bp_trunc_cmp, nvtruncbuf_bp_trunc, &info); } while(count); nvnode_pager_setsize(vp, length, blksize, boff); /* * Zero-fill the area beyond the file EOF that still fits within * the last buffer. We must mark the buffer as dirty even though * the modified area is beyond EOF to avoid races where the kernel * might flush the buffer before the filesystem is able to reallocate * the block. * * The VFS is responsible for dealing with the actual truncation. * * Only do this if trivial is zero, otherwise it is up to the * VFS to handle the block straddling the EOF. */ if (boff && trivial == 0) { truncboffset = length - boff; error = bread(vp, truncboffset, blksize, &bp); if (error == 0) { bzero(bp->b_data + boff, blksize - boff); if (bp->b_flags & B_DELWRI) { if (bp->b_dirtyoff > boff) bp->b_dirtyoff = boff; if (bp->b_dirtyend > boff) bp->b_dirtyend = boff; } bp->b_bio2.bio_offset = NOOFFSET; bdwrite(bp); } } else { error = 0; } /* * For safety, fsync any remaining metadata if the file is not being * truncated to 0. Since the metadata does not represent the entire * dirty list we have to rely on the hit count to ensure that we get * all of it. * * This is typically applicable only to UFS. NFS and HAMMER do * not store indirect blocks in the per-vnode buffer cache. */ if (length > 0) { do { count = RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree, nvtruncbuf_bp_metasync_cmp, nvtruncbuf_bp_metasync, &info); } while (count); } /* * It is possible to have in-progress I/O from buffers that were * not part of the truncation. This should not happen if we * are truncating to 0-length. */ bio_track_wait(&vp->v_track_write, 0, 0); /* * Debugging only */ spin_lock(&vp->v_spin); filename = TAILQ_FIRST(&vp->v_namecache) ? TAILQ_FIRST(&vp->v_namecache)->nc_name : "?"; spin_unlock(&vp->v_spin); /* * Make sure no buffers were instantiated while we were trying * to clean out the remaining VM pages. This could occur due * to busy dirty VM pages being flushed out to disk. */ do { info.clean = 1; count = RB_SCAN(buf_rb_tree, &vp->v_rbclean_tree, nvtruncbuf_bp_trunc_cmp, nvtruncbuf_bp_trunc, &info); info.clean = 0; count += RB_SCAN(buf_rb_tree, &vp->v_rbdirty_tree, nvtruncbuf_bp_trunc_cmp, nvtruncbuf_bp_trunc, &info); if (count) { kprintf("Warning: vtruncbuf(): Had to re-clean %d " "left over buffers in %s\n", count, filename); } } while(count); lwkt_reltoken(&vp->v_token); return (error); }