Exemple #1
0
/*
 * Given a locked inode, attach dquot(s) to it, taking U/G/P-QUOTAON
 * into account.
 * If XFS_QMOPT_DQALLOC, the dquot(s) will be allocated if needed.
 * Inode may get unlocked and relocked in here, and the caller must deal with
 * the consequences.
 */
int
xfs_qm_dqattach_locked(
	xfs_inode_t	*ip,
	uint		flags)
{
	xfs_mount_t	*mp = ip->i_mount;
	int		error = 0;

	if (!xfs_qm_need_dqattach(ip))
		return 0;

	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));

	if (XFS_IS_UQUOTA_ON(mp) && !ip->i_udquot) {
		error = xfs_qm_dqattach_one(ip, ip->i_d.di_uid, XFS_DQ_USER,
						flags & XFS_QMOPT_DQALLOC,
						&ip->i_udquot);
		if (error)
			goto done;
		ASSERT(ip->i_udquot);
	}

	if (XFS_IS_GQUOTA_ON(mp) && !ip->i_gdquot) {
		error = xfs_qm_dqattach_one(ip, ip->i_d.di_gid, XFS_DQ_GROUP,
						flags & XFS_QMOPT_DQALLOC,
						&ip->i_gdquot);
		if (error)
			goto done;
		ASSERT(ip->i_gdquot);
	}

	if (XFS_IS_PQUOTA_ON(mp) && !ip->i_pdquot) {
		error = xfs_qm_dqattach_one(ip, xfs_get_projid(ip), XFS_DQ_PROJ,
						flags & XFS_QMOPT_DQALLOC,
						&ip->i_pdquot);
		if (error)
			goto done;
		ASSERT(ip->i_pdquot);
	}

done:
	/*
	 * Don't worry about the dquots that we may have attached before any
	 * error - they'll get detached later if it has not already been done.
	 */
	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
	return error;
}
Exemple #2
0
/*
 * This is called to asynchronously write the inode associated with this
 * inode log item out to disk. The inode will already have been locked by
 * a successful call to xfs_inode_item_trylock().
 */
STATIC void
xfs_inode_item_push(
	struct xfs_log_item	*lip)
{
	struct xfs_inode_log_item *iip = INODE_ITEM(lip);
	struct xfs_inode	*ip = iip->ili_inode;

	ASSERT(xfs_isilocked(ip, XFS_ILOCK_SHARED));
	ASSERT(!completion_done(&ip->i_flush));

	/*
	 * Since we were able to lock the inode's flush lock and
	 * we found it on the AIL, the inode must be dirty.  This
	 * is because the inode is removed from the AIL while still
	 * holding the flush lock in xfs_iflush_done().  Thus, if
	 * we found it in the AIL and were able to obtain the flush
	 * lock without sleeping, then there must not have been
	 * anyone in the process of flushing the inode.
	 */
	ASSERT(XFS_FORCED_SHUTDOWN(ip->i_mount) ||
	       iip->ili_format.ilf_fields != 0);

	/*
	 * Push the inode to it's backing buffer. This will not remove the
	 * inode from the AIL - a further push will be required to trigger a
	 * buffer push. However, this allows all the dirty inodes to be pushed
	 * to the buffer before it is pushed to disk. The buffer IO completion
	 * will pull the inode from the AIL, mark it clean and unlock the flush
	 * lock.
	 */
	(void) xfs_iflush(ip, SYNC_TRYLOCK);
	xfs_iunlock(ip, XFS_ILOCK_SHARED);
}
STATIC void
xfs_inode_item_push(
	struct xfs_log_item	*lip)
{
	struct xfs_inode_log_item *iip = INODE_ITEM(lip);
	struct xfs_inode	*ip = iip->ili_inode;

	ASSERT(xfs_isilocked(ip, XFS_ILOCK_SHARED));
	ASSERT(xfs_isiflocked(ip));

	/*
                                                         
                                                          
                                                            
                                                          
                                                            
                                                        
                                                
  */
	ASSERT(XFS_FORCED_SHUTDOWN(ip->i_mount) || iip->ili_fields != 0);

	/*
                                                                   
                                                                     
                                                                       
                                                                       
                                                                        
         
  */
	(void) xfs_iflush(ip, SYNC_TRYLOCK);
	xfs_iunlock(ip, XFS_ILOCK_SHARED);
}
STATIC bool
xfs_inode_item_pushbuf(
	struct xfs_log_item	*lip)
{
	struct xfs_inode_log_item *iip = INODE_ITEM(lip);
	struct xfs_inode	*ip = iip->ili_inode;
	struct xfs_buf		*bp;
	bool			ret = true;

	ASSERT(xfs_isilocked(ip, XFS_ILOCK_SHARED));

	/*
                                                               
                                                  
  */
	if (!xfs_isiflocked(ip) ||
	    !(lip->li_flags & XFS_LI_IN_AIL)) {
		xfs_iunlock(ip, XFS_ILOCK_SHARED);
		return true;
	}

	bp = xfs_incore(ip->i_mount->m_ddev_targp, iip->ili_format.ilf_blkno,
			iip->ili_format.ilf_len, XBF_TRYLOCK);

	xfs_iunlock(ip, XFS_ILOCK_SHARED);
	if (!bp)
		return true;
	if (XFS_BUF_ISDELAYWRITE(bp))
		xfs_buf_delwri_promote(bp);
	if (xfs_buf_ispinned(bp))
		ret = false;
	xfs_buf_relse(bp);
	return ret;
}
Exemple #5
0
int
xfs_break_layouts(
	struct inode		*inode,
	uint			*iolock,
	enum layout_break_reason reason)
{
	bool			retry;
	int			error;

	ASSERT(xfs_isilocked(XFS_I(inode), XFS_IOLOCK_SHARED|XFS_IOLOCK_EXCL));

	do {
		retry = false;
		switch (reason) {
		case BREAK_UNMAP:
			error = xfs_break_dax_layouts(inode, &retry);
			if (error || retry)
				break;
			/* fall through */
		case BREAK_WRITE:
			error = xfs_break_leased_layouts(inode, iolock, &retry);
			break;
		default:
			WARN_ON_ONCE(1);
			error = -EINVAL;
		}
	} while (error == 0 && retry);

	return error;
}
Exemple #6
0
/*
 * This gets called by xfs_trans_push_ail(), when IOP_TRYLOCK
 * failed to get the inode flush lock but did get the inode locked SHARED.
 * Here we're trying to see if the inode buffer is incore, and if so whether it's
 * marked delayed write. If that's the case, we'll promote it and that will
 * allow the caller to write the buffer by triggering the xfsbufd to run.
 */
STATIC bool
xfs_inode_item_pushbuf(
	struct xfs_log_item	*lip)
{
	struct xfs_inode_log_item *iip = INODE_ITEM(lip);
	struct xfs_inode	*ip = iip->ili_inode;
	struct xfs_buf		*bp;
	bool			ret = true;

	ASSERT(xfs_isilocked(ip, XFS_ILOCK_SHARED));

	/*
	 * If a flush is not in progress anymore, chances are that the
	 * inode was taken off the AIL. So, just get out.
	 */
	if (completion_done(&ip->i_flush) ||
	    !(lip->li_flags & XFS_LI_IN_AIL)) {
		xfs_iunlock(ip, XFS_ILOCK_SHARED);
		return true;
	}

	bp = xfs_incore(ip->i_mount->m_ddev_targp, iip->ili_format.ilf_blkno,
			iip->ili_format.ilf_len, XBF_TRYLOCK);

	xfs_iunlock(ip, XFS_ILOCK_SHARED);
	if (!bp)
		return true;
	if (XFS_BUF_ISDELAYWRITE(bp))
		xfs_buf_delwri_promote(bp);
	if (xfs_buf_ispinned(bp))
		ret = false;
	xfs_buf_relse(bp);
	return ret;
}
/*
 * This is called to pin the inode associated with the inode log
 * item in memory so it cannot be written out.  Do this by calling
 * xfs_ipin() to bump the pin count in the inode while holding the
 * inode pin lock.
 */
STATIC void
xfs_inode_item_pin(
	xfs_inode_log_item_t	*iip)
{
	ASSERT(xfs_isilocked(iip->ili_inode, XFS_ILOCK_EXCL));
	xfs_ipin(iip->ili_inode);
}
Exemple #8
0
/*
 * Actually transfer ownership, and do dquot modifications.
 * These were already reserved.
 */
xfs_dquot_t *
xfs_qm_vop_chown(
	xfs_trans_t	*tp,
	xfs_inode_t	*ip,
	xfs_dquot_t	**IO_olddq,
	xfs_dquot_t	*newdq)
{
	xfs_dquot_t	*prevdq;
	uint		bfield = XFS_IS_REALTIME_INODE(ip) ?
				 XFS_TRANS_DQ_RTBCOUNT : XFS_TRANS_DQ_BCOUNT;


	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
	ASSERT(XFS_IS_QUOTA_RUNNING(ip->i_mount));

	/* old dquot */
	prevdq = *IO_olddq;
	ASSERT(prevdq);
	ASSERT(prevdq != newdq);

	xfs_trans_mod_dquot(tp, prevdq, bfield, -(ip->i_d.di_nblocks));
	xfs_trans_mod_dquot(tp, prevdq, XFS_TRANS_DQ_ICOUNT, -1);

	/* the sparkling new dquot */
	xfs_trans_mod_dquot(tp, newdq, bfield, ip->i_d.di_nblocks);
	xfs_trans_mod_dquot(tp, newdq, XFS_TRANS_DQ_ICOUNT, 1);

	/*
	 * Take an extra reference, because the inode is going to keep
	 * this dquot pointer even after the trans_commit.
	 */
	*IO_olddq = xfs_qm_dqhold(newdq);

	return prevdq;
}
Exemple #9
0
/*
 * Lock the dquot and change the reservation if we can.
 * This doesn't change the actual usage, just the reservation.
 * The inode sent in is locked.
 */
int
xfs_trans_reserve_quota_nblks(
    struct xfs_trans	*tp,
    struct xfs_inode	*ip,
    long			nblks,
    long			ninos,
    uint			flags)
{
    struct xfs_mount	*mp = ip->i_mount;

    if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
        return 0;
    if (XFS_IS_PQUOTA_ON(mp))
        flags |= XFS_QMOPT_ENOSPC;

    ASSERT(!xfs_is_quota_inode(&mp->m_sb, ip->i_ino));

    ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
    ASSERT((flags & ~(XFS_QMOPT_FORCE_RES | XFS_QMOPT_ENOSPC)) ==
           XFS_TRANS_DQ_RES_RTBLKS ||
           (flags & ~(XFS_QMOPT_FORCE_RES | XFS_QMOPT_ENOSPC)) ==
           XFS_TRANS_DQ_RES_BLKS);

    /*
     * Reserve nblks against these dquots, with trans as the mediator.
     */
    return xfs_trans_reserve_quota_bydquots(tp, mp,
                                            ip->i_udquot, ip->i_gdquot,
                                            ip->i_pdquot,
                                            nblks, ninos, flags);
}
Exemple #10
0
static void
xfs_setattr_time(
	struct xfs_inode	*ip,
	struct iattr		*iattr)
{
	struct inode		*inode = VFS_I(ip);

	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));

	if (iattr->ia_valid & ATTR_ATIME) {
		inode->i_atime = iattr->ia_atime;
		ip->i_d.di_atime.t_sec = iattr->ia_atime.tv_sec;
		ip->i_d.di_atime.t_nsec = iattr->ia_atime.tv_nsec;
	}
	if (iattr->ia_valid & ATTR_CTIME) {
		inode->i_ctime = iattr->ia_ctime;
		ip->i_d.di_ctime.t_sec = iattr->ia_ctime.tv_sec;
		ip->i_d.di_ctime.t_nsec = iattr->ia_ctime.tv_nsec;
	}
	if (iattr->ia_valid & ATTR_MTIME) {
		inode->i_mtime = iattr->ia_mtime;
		ip->i_d.di_mtime.t_sec = iattr->ia_mtime.tv_sec;
		ip->i_d.di_mtime.t_nsec = iattr->ia_mtime.tv_nsec;
	}
}
Exemple #11
0
void
xfs_qm_vop_create_dqattach(
	struct xfs_trans	*tp,
	struct xfs_inode	*ip,
	struct xfs_dquot	*udqp,
	struct xfs_dquot	*gdqp)
{
	struct xfs_mount	*mp = tp->t_mountp;

	if (!XFS_IS_QUOTA_RUNNING(mp) || !XFS_IS_QUOTA_ON(mp))
		return;

	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
	ASSERT(XFS_IS_QUOTA_RUNNING(mp));

	if (udqp) {
		ASSERT(ip->i_udquot == NULL);
		ASSERT(XFS_IS_UQUOTA_ON(mp));
		ASSERT(ip->i_d.di_uid == be32_to_cpu(udqp->q_core.d_id));

		ip->i_udquot = xfs_qm_dqhold(udqp);
		xfs_trans_mod_dquot(tp, udqp, XFS_TRANS_DQ_ICOUNT, 1);
	}
	if (gdqp) {
		ASSERT(ip->i_gdquot == NULL);
		ASSERT(XFS_IS_OQUOTA_ON(mp));
		ASSERT((XFS_IS_GQUOTA_ON(mp) ?
			ip->i_d.di_gid : xfs_get_projid(ip)) ==
				be32_to_cpu(gdqp->q_core.d_id));

		ip->i_gdquot = xfs_qm_dqhold(gdqp);
		xfs_trans_mod_dquot(tp, gdqp, XFS_TRANS_DQ_ICOUNT, 1);
	}
}
Exemple #12
0
/*
 * This gets called when the inode's version needs to be changed from 1 to 2.
 * Currently this happens when the nlink field overflows the old 16-bit value
 * or when chproj is called to change the project for the first time.
 * As a side effect the superblock version will also get rev'd
 * to contain the NLINK bit.
 */
void
xfs_bump_ino_vers2(
	xfs_trans_t	*tp,
	xfs_inode_t	*ip)
{
	xfs_mount_t	*mp;

	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
	ASSERT(ip->i_d.di_version == 1);

	ip->i_d.di_version = 2;
	ip->i_d.di_onlink = 0;
	memset(&(ip->i_d.di_pad[0]), 0, sizeof(ip->i_d.di_pad));
	mp = tp->t_mountp;
	if (!xfs_sb_version_hasnlink(&mp->m_sb)) {
		spin_lock(&mp->m_sb_lock);
		if (!xfs_sb_version_hasnlink(&mp->m_sb)) {
			xfs_sb_version_addnlink(&mp->m_sb);
			spin_unlock(&mp->m_sb_lock);
			xfs_mod_sb(tp, XFS_SB_VERSIONNUM);
		} else {
			spin_unlock(&mp->m_sb_lock);
		}
	}
	/* Caller must log the inode */
}
/*
 * Transactional inode timestamp update. Requires the inode to be locked and
 * joined to the transaction supplied. Relies on the transaction subsystem to
 * track dirty state and update/writeback the inode accordingly.
 */
void
xfs_trans_ichgtime(
	struct xfs_trans	*tp,
	struct xfs_inode	*ip,
	int			flags)
{
	struct inode		*inode = VFS_I(ip);
	timespec_t		tv;

	ASSERT(tp);
	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));

	tv = current_fs_time(inode->i_sb);

	if ((flags & XFS_ICHGTIME_MOD) &&
	    !timespec_equal(&inode->i_mtime, &tv)) {
		inode->i_mtime = tv;
		ip->i_d.di_mtime.t_sec = tv.tv_sec;
		ip->i_d.di_mtime.t_nsec = tv.tv_nsec;
	}
	if ((flags & XFS_ICHGTIME_CHG) &&
	    !timespec_equal(&inode->i_ctime, &tv)) {
		inode->i_ctime = tv;
		ip->i_d.di_ctime.t_sec = tv.tv_sec;
		ip->i_d.di_ctime.t_nsec = tv.tv_nsec;
	}
}
Exemple #14
0
/*
 * Find the CoW reservation for a given byte offset of a file.
 */
bool
xfs_reflink_find_cow_mapping(
	struct xfs_inode		*ip,
	xfs_off_t			offset,
	struct xfs_bmbt_irec		*imap)
{
	struct xfs_ifork		*ifp = XFS_IFORK_PTR(ip, XFS_COW_FORK);
	xfs_fileoff_t			offset_fsb;
	struct xfs_bmbt_irec		got;
	xfs_extnum_t			idx;

	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL | XFS_ILOCK_SHARED));
	ASSERT(xfs_is_reflink_inode(ip));

	offset_fsb = XFS_B_TO_FSBT(ip->i_mount, offset);
	if (!xfs_iext_lookup_extent(ip, ifp, offset_fsb, &idx, &got))
		return false;
	if (got.br_startoff > offset_fsb)
		return false;

	trace_xfs_reflink_find_cow_mapping(ip, offset, 1, XFS_IO_OVERWRITE,
			&got);
	*imap = got;
	return true;
}
/*
 * This is called to mark the fields indicated in fieldmask as needing
 * to be logged when the transaction is committed.  The inode must
 * already be associated with the given transaction.
 *
 * The values for fieldmask are defined in xfs_inode_item.h.  We always
 * log all of the core inode if any of it has changed, and we always log
 * all of the inline data/extents/b-tree root if any of them has changed.
 */
void
xfs_trans_log_inode(
	xfs_trans_t	*tp,
	xfs_inode_t	*ip,
	uint		flags)
{
	ASSERT(ip->i_itemp != NULL);
	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));

	/*
	 * First time we log the inode in a transaction, bump the inode change
	 * counter if it is configured for this to occur.
	 */
	if (!(ip->i_itemp->ili_item.li_desc->lid_flags & XFS_LID_DIRTY) &&
	    IS_I_VERSION(VFS_I(ip))) {
		inode_inc_iversion(VFS_I(ip));
		ip->i_d.di_changecount = VFS_I(ip)->i_version;
		flags |= XFS_ILOG_CORE;
	}

	tp->t_flags |= XFS_TRANS_DIRTY;
	ip->i_itemp->ili_item.li_desc->lid_flags |= XFS_LID_DIRTY;

	/*
	 * Always OR in the bits from the ili_last_fields field.
	 * This is to coordinate with the xfs_iflush() and xfs_iflush_done()
	 * routines in the eventual clearing of the ili_fields bits.
	 * See the big comment in xfs_iflush() for an explanation of
	 * this coordination mechanism.
	 */
	flags |= ip->i_itemp->ili_last_fields;
	ip->i_itemp->ili_fields |= flags;
}
/*
 * This is called to asynchronously write the inode associated with this
 * inode log item out to disk. The inode will already have been locked by
 * a successful call to xfs_inode_item_trylock().
 */
STATIC void
xfs_inode_item_push(
	xfs_inode_log_item_t	*iip)
{
	xfs_inode_t	*ip;

	ip = iip->ili_inode;

	ASSERT(xfs_isilocked(ip, XFS_ILOCK_SHARED));
	ASSERT(!completion_done(&ip->i_flush));
	/*
	 * Since we were able to lock the inode's flush lock and
	 * we found it on the AIL, the inode must be dirty.  This
	 * is because the inode is removed from the AIL while still
	 * holding the flush lock in xfs_iflush_done().  Thus, if
	 * we found it in the AIL and were able to obtain the flush
	 * lock without sleeping, then there must not have been
	 * anyone in the process of flushing the inode.
	 */
	ASSERT(XFS_FORCED_SHUTDOWN(ip->i_mount) ||
	       iip->ili_format.ilf_fields != 0);

	/*
	 * Write out the inode.  The completion routine ('iflush_done') will
	 * pull it from the AIL, mark it clean, unlock the flush lock.
	 */
	(void) xfs_iflush(ip, XFS_IFLUSH_ASYNC);
	xfs_iunlock(ip, XFS_ILOCK_SHARED);

	return;
}
Exemple #17
0
/*
 * Add a locked inode to the transaction.
 *
 * The inode must be locked, and it cannot be associated with any transaction.
 */
void
xfs_trans_ijoin(
    struct xfs_trans	*tp,
    struct xfs_inode	*ip)
{
    xfs_inode_log_item_t	*iip;

    ASSERT(ip->i_transp == NULL);
    ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
    if (ip->i_itemp == NULL)
        xfs_inode_item_init(ip, ip->i_mount);
    iip = ip->i_itemp;
    ASSERT(iip->ili_lock_flags == 0);

    /*
     * Get a log_item_desc to point at the new item.
     */
    xfs_trans_add_item(tp, &iip->ili_item);

    xfs_trans_inode_broot_debug(ip);

    /*
     * Initialize i_transp so we can find it with xfs_inode_incore()
     * in xfs_trans_iget() above.
     */
    ip->i_transp = tp;
}
Exemple #18
0
/*
 * This is called to mark the fields indicated in fieldmask as needing
 * to be logged when the transaction is committed.  The inode must
 * already be associated with the given transaction.
 *
 * The values for fieldmask are defined in xfs_inode_item.h.  We always
 * log all of the core inode if any of it has changed, and we always log
 * all of the inline data/extents/b-tree root if any of them has changed.
 */
void
xfs_trans_log_inode(
	xfs_trans_t	*tp,
	xfs_inode_t	*ip,
	uint		flags)
{
	struct inode	*inode = VFS_I(ip);

	ASSERT(ip->i_itemp != NULL);
	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));

	/*
	 * Don't bother with i_lock for the I_DIRTY_TIME check here, as races
	 * don't matter - we either will need an extra transaction in 24 hours
	 * to log the timestamps, or will clear already cleared fields in the
	 * worst case.
	 */
	if (inode->i_state & (I_DIRTY_TIME | I_DIRTY_TIME_EXPIRED)) {
		spin_lock(&inode->i_lock);
		inode->i_state &= ~(I_DIRTY_TIME | I_DIRTY_TIME_EXPIRED);
		spin_unlock(&inode->i_lock);
	}

	/*
	 * Record the specific change for fdatasync optimisation. This
	 * allows fdatasync to skip log forces for inodes that are only
	 * timestamp dirty. We do this before the change count so that
	 * the core being logged in this case does not impact on fdatasync
	 * behaviour.
	 */
	ip->i_itemp->ili_fsync_fields |= flags;

	/*
	 * First time we log the inode in a transaction, bump the inode change
	 * counter if it is configured for this to occur. While we have the
	 * inode locked exclusively for metadata modification, we can usually
	 * avoid setting XFS_ILOG_CORE if no one has queried the value since
	 * the last time it was incremented. If we have XFS_ILOG_CORE already
	 * set however, then go ahead and bump the i_version counter
	 * unconditionally.
	 */
	if (!test_and_set_bit(XFS_LI_DIRTY, &ip->i_itemp->ili_item.li_flags) &&
	    IS_I_VERSION(VFS_I(ip))) {
		if (inode_maybe_inc_iversion(VFS_I(ip), flags & XFS_ILOG_CORE))
			flags |= XFS_ILOG_CORE;
	}

	tp->t_flags |= XFS_TRANS_DIRTY;

	/*
	 * Always OR in the bits from the ili_last_fields field.
	 * This is to coordinate with the xfs_iflush() and xfs_iflush_done()
	 * routines in the eventual clearing of the ili_fields bits.
	 * See the big comment in xfs_iflush() for an explanation of
	 * this coordination mechanism.
	 */
	flags |= ip->i_itemp->ili_last_fields;
	ip->i_itemp->ili_fields |= flags;
}
/*
 * This routine is called to handle zeroing any space in the last
 * block of the file that is beyond the EOF.  We do this since the
 * size is being increased without writing anything to that block
 * and we don't want anyone to read the garbage on the disk.
 */
STATIC int				/* error (positive) */
xfs_zero_last_block(
	xfs_inode_t	*ip,
	xfs_fsize_t	offset,
	xfs_fsize_t	isize)
{
	xfs_fileoff_t	last_fsb;
	xfs_mount_t	*mp = ip->i_mount;
	int		nimaps;
	int		zero_offset;
	int		zero_len;
	int		error = 0;
	xfs_bmbt_irec_t	imap;

	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));

	zero_offset = XFS_B_FSB_OFFSET(mp, isize);
	if (zero_offset == 0) {
		/*
		 * There are no extra bytes in the last block on disk to
		 * zero, so return.
		 */
		return 0;
	}

	last_fsb = XFS_B_TO_FSBT(mp, isize);
	nimaps = 1;
	error = xfs_bmapi(NULL, ip, last_fsb, 1, 0, NULL, 0, &imap,
			  &nimaps, NULL, NULL);
	if (error) {
		return error;
	}
	ASSERT(nimaps > 0);
	/*
	 * If the block underlying isize is just a hole, then there
	 * is nothing to zero.
	 */
	if (imap.br_startblock == HOLESTARTBLOCK) {
		return 0;
	}
	/*
	 * Zero the part of the last block beyond the EOF, and write it
	 * out sync.  We need to drop the ilock while we do this so we
	 * don't deadlock when the buffer cache calls back to us.
	 */
	xfs_iunlock(ip, XFS_ILOCK_EXCL);

	zero_len = mp->m_sb.sb_blocksize - zero_offset;
	if (isize + zero_len > offset)
		zero_len = offset - isize;
	error = xfs_iozero(ip, isize, zero_len);

	xfs_ilock(ip, XFS_ILOCK_EXCL);
	ASSERT(error >= 0);
	return error;
}
Exemple #20
0
/*
 * This is called to pin the inode associated with the inode log
 * item in memory so it cannot be written out.
 */
STATIC void
xfs_inode_item_pin(
	struct xfs_log_item	*lip)
{
	struct xfs_inode	*ip = INODE_ITEM(lip)->ili_inode;

	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));

	trace_xfs_inode_pin(ip, _RET_IP_);
	atomic_inc(&ip->i_pincount);
}
Exemple #21
0
static void
xfs_setattr_mode(
	struct xfs_inode	*ip,
	struct iattr		*iattr)
{
	struct inode		*inode = VFS_I(ip);
	umode_t			mode = iattr->ia_mode;

	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));

	inode->i_mode &= S_IFMT;
	inode->i_mode |= mode & ~S_IFMT;
}
Exemple #22
0
/*
 * Zero any on disk space between the current EOF and the new, larger EOF.
 *
 * This handles the normal case of zeroing the remainder of the last block in
 * the file and the unusual case of zeroing blocks out beyond the size of the
 * file.  This second case only happens with fixed size extents and when the
 * system crashes before the inode size was updated but after blocks were
 * allocated.
 *
 * Expects the iolock to be held exclusive, and will take the ilock internally.
 */
int					/* error (positive) */
xfs_zero_eof(
	struct xfs_inode	*ip,
	xfs_off_t		offset,		/* starting I/O offset */
	xfs_fsize_t		isize,		/* current inode size */
	bool			*did_zeroing)
{
	ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));
	ASSERT(offset > isize);

	trace_xfs_zero_eof(ip, isize, offset - isize);
	return xfs_zero_range(ip, isize, offset - isize, did_zeroing);
}
Exemple #23
0
/*
 * Unlock the inode associated with the inode log item.
 * Clear the fields of the inode and inode log item that
 * are specific to the current transaction.  If the
 * hold flags is set, do not unlock the inode.
 */
STATIC void
xfs_inode_item_unlock(
    struct xfs_log_item	*lip)
{
    struct xfs_inode_log_item *iip = INODE_ITEM(lip);
    struct xfs_inode	*ip = iip->ili_inode;
    unsigned short		lock_flags;

    ASSERT(ip->i_itemp != NULL);
    ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));

    lock_flags = iip->ili_lock_flags;
    iip->ili_lock_flags = 0;
    if (lock_flags)
        xfs_iunlock(ip, lock_flags);
}
Exemple #24
0
void
xfs_setattr_time(
	struct xfs_inode	*ip,
	struct iattr		*iattr)
{
	struct inode		*inode = VFS_I(ip);

	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));

	if (iattr->ia_valid & ATTR_ATIME)
		inode->i_atime = iattr->ia_atime;
	if (iattr->ia_valid & ATTR_CTIME)
		inode->i_ctime = iattr->ia_ctime;
	if (iattr->ia_valid & ATTR_MTIME)
		inode->i_mtime = iattr->ia_mtime;
}
Exemple #25
0
/* Retrieve an extended attribute and its value.  Must have ilock. */
int
xfs_attr_get_ilocked(
	struct xfs_inode	*ip,
	struct xfs_da_args	*args)
{
	ASSERT(xfs_isilocked(ip, XFS_ILOCK_SHARED | XFS_ILOCK_EXCL));

	if (!xfs_inode_hasattr(ip))
		return -ENOATTR;
	else if (ip->i_d.di_aformat == XFS_DINODE_FMT_LOCAL)
		return xfs_attr_shortform_getvalue(args);
	else if (xfs_bmap_one_block(ip, XFS_ATTR_FORK))
		return xfs_attr_leaf_get(args);
	else
		return xfs_attr_node_get(args);
}
Exemple #26
0
STATIC int
xfs_qm_dqattach_one(
	xfs_inode_t	*ip,
	xfs_dqid_t	id,
	uint		type,
	uint		doalloc,
	xfs_dquot_t	**IO_idqpp)
{
	xfs_dquot_t	*dqp;
	int		error;

	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));
	error = 0;

	/*
	 * See if we already have it in the inode itself. IO_idqpp is &i_udquot
	 * or &i_gdquot. This made the code look weird, but made the logic a lot
	 * simpler.
	 */
	dqp = *IO_idqpp;
	if (dqp) {
		trace_xfs_dqattach_found(dqp);
		return 0;
	}

	/*
	 * Find the dquot from somewhere. This bumps the reference count of
	 * dquot and returns it locked.  This can return ENOENT if dquot didn't
	 * exist on disk and we didn't ask it to allocate; ESRCH if quotas got
	 * turned off suddenly.
	 */
	error = xfs_qm_dqget(ip->i_mount, ip, id, type,
			     doalloc | XFS_QMOPT_DOWARN, &dqp);
	if (error)
		return error;

	trace_xfs_dqattach_get(dqp);

	/*
	 * dqget may have dropped and re-acquired the ilock, but it guarantees
	 * that the dquot returned is the one that should go in the inode.
	 */
	*IO_idqpp = dqp;
	xfs_dqunlock(dqp);
	return 0;
}
Exemple #27
0
static int
xfs_break_dax_layouts(
	struct inode		*inode,
	bool			*retry)
{
	struct page		*page;

	ASSERT(xfs_isilocked(XFS_I(inode), XFS_MMAPLOCK_EXCL));

	page = dax_layout_busy_page(inode->i_mapping);
	if (!page)
		return 0;

	*retry = true;
	return ___wait_var_event(&page->_refcount,
			atomic_read(&page->_refcount) == 1, TASK_INTERRUPTIBLE,
			0, 0, xfs_wait_dax_page(inode));
}
Exemple #28
0
/*
 * Run eofblocks scans on the quotas applicable to the inode. For inodes with
 * multiple quotas, we don't know exactly which quota caused an allocation
 * failure. We make a best effort by including each quota under low free space
 * conditions (less than 1% free space) in the scan.
 */
static int
__xfs_inode_free_quota_eofblocks(
	struct xfs_inode	*ip,
	int			(*execute)(struct xfs_mount *mp,
					   struct xfs_eofblocks	*eofb))
{
	int scan = 0;
	struct xfs_eofblocks eofb = {0};
	struct xfs_dquot *dq;

	ASSERT(xfs_isilocked(ip, XFS_IOLOCK_EXCL));

	/*
	 * Set the scan owner to avoid a potential livelock. Otherwise, the scan
	 * can repeatedly trylock on the inode we're currently processing. We
	 * run a sync scan to increase effectiveness and use the union filter to
	 * cover all applicable quotas in a single scan.
	 */
	eofb.eof_scan_owner = ip->i_ino;
	eofb.eof_flags = XFS_EOF_FLAGS_UNION|XFS_EOF_FLAGS_SYNC;

	if (XFS_IS_UQUOTA_ENFORCED(ip->i_mount)) {
		dq = xfs_inode_dquot(ip, XFS_DQ_USER);
		if (dq && xfs_dquot_lowsp(dq)) {
			eofb.eof_uid = VFS_I(ip)->i_uid;
			eofb.eof_flags |= XFS_EOF_FLAGS_UID;
			scan = 1;
		}
	}

	if (XFS_IS_GQUOTA_ENFORCED(ip->i_mount)) {
		dq = xfs_inode_dquot(ip, XFS_DQ_GROUP);
		if (dq && xfs_dquot_lowsp(dq)) {
			eofb.eof_gid = VFS_I(ip)->i_gid;
			eofb.eof_flags |= XFS_EOF_FLAGS_GID;
			scan = 1;
		}
	}

	if (scan)
		execute(ip->i_mount, &eofb);

	return scan;
}
Exemple #29
0
/*
 * This is called to mark the fields indicated in fieldmask as needing
 * to be logged when the transaction is committed.  The inode must
 * already be associated with the given transaction.
 *
 * The values for fieldmask are defined in xfs_inode_item.h.  We always
 * log all of the core inode if any of it has changed, and we always log
 * all of the inline data/extents/b-tree root if any of them has changed.
 */
void
xfs_trans_log_inode(
	xfs_trans_t	*tp,
	xfs_inode_t	*ip,
	uint		flags)
{
	ASSERT(ip->i_itemp != NULL);
	ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL));

	/*
	 * Record the specific change for fdatasync optimisation. This
	 * allows fdatasync to skip log forces for inodes that are only
	 * timestamp dirty. We do this before the change count so that
	 * the core being logged in this case does not impact on fdatasync
	 * behaviour.
	 */
	ip->i_itemp->ili_fsync_fields |= flags;

	/*
	 * First time we log the inode in a transaction, bump the inode change
	 * counter if it is configured for this to occur. We don't use
	 * inode_inc_version() because there is no need for extra locking around
	 * i_version as we already hold the inode locked exclusively for
	 * metadata modification.
	 */
	if (!(ip->i_itemp->ili_item.li_desc->lid_flags & XFS_LID_DIRTY) &&
	    IS_I_VERSION(VFS_I(ip))) {
		VFS_I(ip)->i_version++;
		flags |= XFS_ILOG_CORE;
	}

	tp->t_flags |= XFS_TRANS_DIRTY;
	ip->i_itemp->ili_item.li_desc->lid_flags |= XFS_LID_DIRTY;

	/*
	 * Always OR in the bits from the ili_last_fields field.
	 * This is to coordinate with the xfs_iflush() and xfs_iflush_done()
	 * routines in the eventual clearing of the ili_fields bits.
	 * See the big comment in xfs_iflush() for an explanation of
	 * this coordination mechanism.
	 */
	flags |= ip->i_itemp->ili_last_fields;
	ip->i_itemp->ili_fields |= flags;
}
/*
 * Unlock the inode associated with the inode log item.
 * Clear the fields of the inode and inode log item that
 * are specific to the current transaction.  If the
 * hold flags is set, do not unlock the inode.
 */
STATIC void
xfs_inode_item_unlock(
    struct xfs_log_item	*lip)
{
    struct xfs_inode_log_item *iip = INODE_ITEM(lip);
    struct xfs_inode	*ip = iip->ili_inode;
    unsigned short		lock_flags;

    ASSERT(iip->ili_inode->i_itemp != NULL);
    ASSERT(xfs_isilocked(iip->ili_inode, XFS_ILOCK_EXCL));

    /*
     * Clear the transaction pointer in the inode.
     */
    ip->i_transp = NULL;

    /*
     * If the inode needed a separate buffer with which to log
     * its extents, then free it now.
     */
    if (iip->ili_extents_buf != NULL) {
        ASSERT(ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS);
        ASSERT(ip->i_d.di_nextents > 0);
        ASSERT(iip->ili_format.ilf_fields & XFS_ILOG_DEXT);
        ASSERT(ip->i_df.if_bytes > 0);
        kmem_free(iip->ili_extents_buf);
        iip->ili_extents_buf = NULL;
    }
    if (iip->ili_aextents_buf != NULL) {
        ASSERT(ip->i_d.di_aformat == XFS_DINODE_FMT_EXTENTS);
        ASSERT(ip->i_d.di_anextents > 0);
        ASSERT(iip->ili_format.ilf_fields & XFS_ILOG_AEXT);
        ASSERT(ip->i_afp->if_bytes > 0);
        kmem_free(iip->ili_aextents_buf);
        iip->ili_aextents_buf = NULL;
    }

    lock_flags = iip->ili_lock_flags;
    iip->ili_lock_flags = 0;
    if (lock_flags) {
        xfs_iunlock(iip->ili_inode, lock_flags);
        IRELE(iip->ili_inode);
    }
}