예제 #1
0
/*
 * Update the access, modified, and inode change times as specified by the
 * IN_ACCESS, IN_UPDATE, and IN_CHANGE flags respectively.  Write the inode
 * to disk if the IN_MODIFIED flag is set (it may be set initially, or by
 * the timestamp update).  The IN_LAZYMOD flag is set to force a write
 * later if not now.  If we write now, then clear both IN_MODIFIED and
 * IN_LAZYMOD to reflect the presumably successful write, and if waitfor is
 * set, then wait for the write to complete.
 */
int
ffs_update(struct vnode *vp, int waitfor)
{
	struct fs *fs;
	struct buf *bp;
	struct inode *ip;
	int error;

	ufs_itimes(vp);
	ip = VTOI(vp);
	if ((ip->i_flag & IN_MODIFIED) == 0 && waitfor == 0)
		return (0);
	ip->i_flag &= ~(IN_LAZYMOD | IN_MODIFIED);
	fs = ip->i_fs;
	if (fs->fs_ronly)
		return (0);

	/*
	 * The vnode type is usually set to VBAD if an unrecoverable I/O
	 * error has occured (such as when reading the inode).  Clear the
	 * modified bits but do not write anything out in this case.
	 */
	if (vp->v_type == VBAD)
		return (0);
	/*
	 * Ensure that uid and gid are correct. This is a temporary
	 * fix until fsck has been changed to do the update.
	 */
	if (fs->fs_inodefmt < FS_44INODEFMT) {		/* XXX */
		ip->i_din.di_ouid = ip->i_uid;		/* XXX */
		ip->i_din.di_ogid = ip->i_gid;		/* XXX */
	}						/* XXX */
	error = bread(ip->i_devvp, 
		      fsbtodoff(fs, ino_to_fsba(fs, ip->i_number)),
		      (int)fs->fs_bsize, &bp);
	if (error) {
		brelse(bp);
		return (error);
	}
	if (DOINGSOFTDEP(vp))
		softdep_update_inodeblock(ip, bp, waitfor);
	else if (ip->i_effnlink != ip->i_nlink)
		panic("ffs_update: bad link cnt");
	*((struct ufs1_dinode *)bp->b_data +
	    ino_to_fsbo(fs, ip->i_number)) = ip->i_din;
	if (waitfor && !DOINGASYNC(vp)) {
		return (bwrite(bp));
	} else if (vm_page_count_severe() || buf_dirty_count_severe()) {
		return (bwrite(bp));
	} else {
		if (bp->b_bufsize == fs->fs_bsize)
			bp->b_flags |= B_CLUSTEROK;
		bdwrite(bp);
		return (0);
	}
}
예제 #2
0
/*
 * Update the access, modified, and inode change times as specified by the
 * IN_ACCESS, IN_UPDATE, and IN_CHANGE flags respectively.  Write the inode
 * to disk if the IN_MODIFIED flag is set (it may be set initially, or by
 * the timestamp update).  The IN_LAZYMOD flag is set to force a write
 * later if not now.  The IN_LAZYACCESS is set instead of IN_MODIFIED if the fs
 * is currently being suspended (or is suspended) and vnode has been accessed.
 * If we write now, then clear IN_MODIFIED, IN_LAZYACCESS and IN_LAZYMOD to
 * reflect the presumably successful write, and if waitfor is set, then wait
 * for the write to complete.
 */
int 
ffs_update (vnode *vp, int waitfor)
{
	int error = 0;
	print("HARVEY TODO: %s\n", __func__);
#if 0
	struct fs *fs;
	struct buf *bp;
	struct inode *ip;
	int flags, error;

	ASSERT_VOP_ELOCKED(vp, "ffs_update");
	ufs_itimes(vp);
	ip = VTOI(vp);
	if ((ip->i_flag & IN_MODIFIED) == 0 && waitfor == 0)
		return (0);
	ip->i_flag &= ~(IN_LAZYACCESS | IN_LAZYMOD | IN_MODIFIED);
	fs = ITOFS(ip);
	if (fs->fs_ronly && ITOUMP(ip)->um_fsckpid == 0)
		return (0);
	/*
	 * If we are updating a snapshot and another process is currently
	 * writing the buffer containing the inode for this snapshot then
	 * a deadlock can occur when it tries to check the snapshot to see
	 * if that block needs to be copied. Thus when updating a snapshot
	 * we check to see if the buffer is already locked, and if it is
	 * we drop the snapshot lock until the buffer has been written
	 * and is available to us. We have to grab a reference to the
	 * snapshot vnode to prevent it from being removed while we are
	 * waiting for the buffer.
	 */
	flags = 0;
	if (IS_SNAPSHOT(ip))
		flags = GB_LOCK_NOWAIT;
loop:
	error = breadn_flags(ITODEVVP(ip),
	     fsbtodb(fs, ino_to_fsba(fs, ip->i_number)),
	     (int) fs->fs_bsize, 0, 0, 0, NOCRED, flags, &bp);
	if (error != 0) {
		if (error != EBUSY)
			return (error);
		KASSERT((IS_SNAPSHOT(ip)), ("EBUSY from non-snapshot"));
		/*
		 * Wait for our inode block to become available.
		 *
		 * Hold a reference to the vnode to protect against
		 * ffs_snapgone(). Since we hold a reference, it can only
		 * get reclaimed (VI_DOOMED flag) in a forcible downgrade
		 * or unmount. For an unmount, the entire filesystem will be
		 * gone, so we cannot attempt to touch anything associated
		 * with it while the vnode is unlocked; all we can do is 
		 * pause briefly and try again. If when we relock the vnode
		 * we discover that it has been reclaimed, updating it is no
		 * longer necessary and we can just return an error.
		 */
		vref(vp);
		VOP_UNLOCK(vp, 0);
		pause("ffsupd", 1);
		vn_lock(vp, LK_EXCLUSIVE | LK_RETRY);
		vrele(vp);
		if ((vp->v_iflag & VI_DOOMED) != 0)
			return (ENOENT);
		goto loop;
	}
	if (DOINGSOFTDEP(vp))
		softdep_update_inodeblock(ip, bp, waitfor);
	else if (ip->i_effnlink != ip->i_nlink)
		panic("ffs_update: bad link cnt");
	if (I_IS_UFS1(ip)) {
		*((struct ufs1_dinode *)bp->b_data +
		    ino_to_fsbo(fs, ip->i_number)) = *ip->i_din1;
		/* XXX: FIX? The entropy here is desirable, but the harvesting may be expensive */
		random_harvest_queue(&(ip->i_din1), sizeof(ip->i_din1), 1, RANDOM_FS_ATIME);
	} else {
		*((struct ufs2_dinode *)bp->b_data +
		    ino_to_fsbo(fs, ip->i_number)) = *ip->i_din2;
		/* XXX: FIX? The entropy here is desirable, but the harvesting may be expensive */
		random_harvest_queue(&(ip->i_din2), sizeof(ip->i_din2), 1, RANDOM_FS_ATIME);
	}
	if (waitfor)
		error = bwrite(bp);
	else if (vm_page_count_severe() || buf_dirty_count_severe()) {
		bawrite(bp);
		error = 0;
	} else {
		if (bp->b_bufsize == fs->fs_bsize)
			bp->b_flags |= B_CLUSTEROK;
		bdwrite(bp);
		error = 0;
	}
#endif // 0
	return (error);
}