/* * Actually mark the segment clean. * Must be called with the segment lock held. */ int lfs_do_segclean(struct lfs *fs, unsigned long segnum) { extern int lfs_dostats; struct buf *bp; CLEANERINFO *cip; SEGUSE *sup; if (lfs_dtosn(fs, lfs_sb_getcurseg(fs)) == segnum) { return (EBUSY); } LFS_SEGENTRY(sup, fs, segnum, bp); if (sup->su_nbytes) { DLOG((DLOG_CLEAN, "lfs_segclean: not cleaning segment %lu:" " %d live bytes\n", segnum, sup->su_nbytes)); brelse(bp, 0); return (EBUSY); } if (sup->su_flags & SEGUSE_ACTIVE) { DLOG((DLOG_CLEAN, "lfs_segclean: not cleaning segment %lu:" " segment is active\n", segnum)); brelse(bp, 0); return (EBUSY); } if (!(sup->su_flags & SEGUSE_DIRTY)) { DLOG((DLOG_CLEAN, "lfs_segclean: not cleaning segment %lu:" " segment is already clean\n", segnum)); brelse(bp, 0); return (EALREADY); } lfs_sb_addavail(fs, lfs_segtod(fs, 1)); if (sup->su_flags & SEGUSE_SUPERBLOCK) lfs_sb_subavail(fs, lfs_btofsb(fs, LFS_SBPAD)); if (lfs_sb_getversion(fs) > 1 && segnum == 0 && lfs_sb_gets0addr(fs) < lfs_btofsb(fs, LFS_LABELPAD)) lfs_sb_subavail(fs, lfs_btofsb(fs, LFS_LABELPAD) - lfs_sb_gets0addr(fs)); mutex_enter(&lfs_lock); lfs_sb_addbfree(fs, sup->su_nsums * lfs_btofsb(fs, lfs_sb_getsumsize(fs)) + lfs_btofsb(fs, sup->su_ninos * lfs_sb_getibsize(fs))); lfs_sb_subdmeta(fs, sup->su_nsums * lfs_btofsb(fs, lfs_sb_getsumsize(fs)) + lfs_btofsb(fs, sup->su_ninos * lfs_sb_getibsize(fs))); if (lfs_sb_getdmeta(fs) < 0) lfs_sb_setdmeta(fs, 0); mutex_exit(&lfs_lock); sup->su_flags &= ~SEGUSE_DIRTY; LFS_WRITESEGENTRY(sup, fs, segnum, bp); LFS_CLEANERINFO(cip, fs, bp); lfs_ci_shiftdirtytoclean(fs, cip, 1); lfs_sb_setnclean(fs, lfs_ci_getclean(fs, cip)); mutex_enter(&lfs_lock); lfs_ci_setbfree(fs, cip, lfs_sb_getbfree(fs)); lfs_ci_setavail(fs, cip, lfs_sb_getavail(fs) - fs->lfs_ravail - fs->lfs_favail); wakeup(&fs->lfs_availsleep); mutex_exit(&lfs_lock); (void) LFS_BWRITE_LOG(bp); if (lfs_dostats) ++lfs_stats.segs_reclaimed; return (0); }
/* VOP_BWRITE 1 time */ int lfs_fragextend(struct vnode *vp, int osize, int nsize, daddr_t lbn, struct buf **bpp, kauth_cred_t cred) { struct inode *ip; struct lfs *fs; long frags; int error; extern long locked_queue_bytes; size_t obufsize; ip = VTOI(vp); fs = ip->i_lfs; frags = (long)lfs_numfrags(fs, nsize - osize); error = 0; ASSERT_NO_SEGLOCK(fs); /* * Get the seglock so we don't enlarge blocks while a segment * is being written. If we're called with bpp==NULL, though, * we are only pretending to change a buffer, so we don't have to * lock. */ top: if (bpp) { rw_enter(&fs->lfs_fraglock, RW_READER); LFS_DEBUG_COUNTLOCKED("frag"); } if (!ISSPACE(fs, frags, cred)) { error = ENOSPC; goto out; } /* * If we are not asked to actually return the block, all we need * to do is allocate space for it. UBC will handle dirtying the * appropriate things and making sure it all goes to disk. * Don't bother to read in that case. */ if (bpp && (error = bread(vp, lbn, osize, 0, bpp))) { goto out; } #if defined(LFS_QUOTA) || defined(LFS_QUOTA2) if ((error = lfs_chkdq(ip, frags, cred, 0))) { if (bpp) brelse(*bpp, 0); goto out; } #endif /* * Adjust accounting for lfs_avail. If there's not enough room, * we will have to wait for the cleaner, which we can't do while * holding a block busy or while holding the seglock. In that case, * release both and start over after waiting. */ if (bpp && ((*bpp)->b_oflags & BO_DELWRI)) { if (!lfs_fits(fs, frags)) { if (bpp) brelse(*bpp, 0); #if defined(LFS_QUOTA) || defined(LFS_QUOTA2) lfs_chkdq(ip, -frags, cred, 0); #endif rw_exit(&fs->lfs_fraglock); lfs_availwait(fs, frags); goto top; } lfs_sb_subavail(fs, frags); } mutex_enter(&lfs_lock); lfs_sb_subbfree(fs, frags); mutex_exit(&lfs_lock); ip->i_lfs_effnblks += frags; ip->i_flag |= IN_CHANGE | IN_UPDATE; if (bpp) { obufsize = (*bpp)->b_bufsize; allocbuf(*bpp, nsize, 1); /* Adjust locked-list accounting */ if (((*bpp)->b_flags & B_LOCKED) != 0 && (*bpp)->b_iodone == NULL) { mutex_enter(&lfs_lock); locked_queue_bytes += (*bpp)->b_bufsize - obufsize; mutex_exit(&lfs_lock); } memset((char *)((*bpp)->b_data) + osize, 0, (u_int)(nsize - osize)); } out: if (bpp) { rw_exit(&fs->lfs_fraglock); } return (error); }