Ejemplo n.º 1
0
/*
 * Called from the vfsops layer.
 */
void
xfs_qm_unmount_quotas(
	xfs_mount_t	*mp)
{
	/*
	 * Release the dquots that root inode, et al might be holding,
	 * before we flush quotas and blow away the quotainfo structure.
	 */
	ASSERT(mp->m_rootip);
	xfs_qm_dqdetach(mp->m_rootip);
	if (mp->m_rbmip)
		xfs_qm_dqdetach(mp->m_rbmip);
	if (mp->m_rsumip)
		xfs_qm_dqdetach(mp->m_rsumip);

	/*
	 * Release the quota inodes.
	 */
	if (mp->m_quotainfo) {
		if (mp->m_quotainfo->qi_uquotaip) {
			IRELE(mp->m_quotainfo->qi_uquotaip);
			mp->m_quotainfo->qi_uquotaip = NULL;
		}
		if (mp->m_quotainfo->qi_gquotaip) {
			IRELE(mp->m_quotainfo->qi_gquotaip);
			mp->m_quotainfo->qi_gquotaip = NULL;
		}
	}
}
STATIC void
xfs_fstrm_free_func(
	unsigned long	ino,
	void		*data)
{
	fstrm_item_t	*item  = (fstrm_item_t *)data;
	xfs_inode_t	*ip = item->ip;

	ASSERT(ip->i_ino == ino);

	xfs_iflags_clear(ip, XFS_IFILESTREAM);

	
	xfs_filestream_put_ag(ip->i_mount, item->ag);

	TRACE_FREE(ip->i_mount, ip, item->pip, item->ag,
		xfs_filestream_peek_ag(ip->i_mount, item->ag));

	IRELE(ip);

	if (item->pip)
		IRELE(item->pip);

	
	kmem_zone_free(item_zone, item);
}
/*
 * Gets called when unmounting a filesystem or when all quotas get
 * turned off.
 * This purges the quota inodes, destroys locks and frees itself.
 */
void
xfs_qm_destroy_quotainfo(
	xfs_mount_t	*mp)
{
	xfs_quotainfo_t *qi;

	qi = mp->m_quotainfo;
	ASSERT(qi != NULL);

	unregister_shrinker(&qi->qi_shrinker);

	if (qi->qi_uquotaip) {
		IRELE(qi->qi_uquotaip);
		qi->qi_uquotaip = NULL; /* paranoia */
	}
	if (qi->qi_gquotaip) {
		IRELE(qi->qi_gquotaip);
		qi->qi_gquotaip = NULL;
	}
	if (qi->qi_pquotaip) {
		IRELE(qi->qi_pquotaip);
		qi->qi_pquotaip = NULL;
	}
	mutex_destroy(&qi->qi_quotaofflock);
	kmem_free(qi);
	mp->m_quotainfo = NULL;
}
Ejemplo n.º 4
0
int
xfs_qm_scall_trunc_qfiles(
	xfs_mount_t	*mp,
	uint		flags)
{
	int		error = 0, error2 = 0;
	xfs_inode_t	*qip;

	if (!xfs_sb_version_hasquota(&mp->m_sb) || flags == 0) {
		qdprintk("qtrunc flags=%x m_qflags=%x\n", flags, mp->m_qflags);
		return XFS_ERROR(EINVAL);
	}

	if ((flags & XFS_DQ_USER) && mp->m_sb.sb_uquotino != NULLFSINO) {
		error = xfs_iget(mp, NULL, mp->m_sb.sb_uquotino, 0, 0, &qip, 0);
		if (!error) {
			error = xfs_truncate_file(mp, qip);
			IRELE(qip);
		}
	}

	if ((flags & (XFS_DQ_GROUP|XFS_DQ_PROJ)) &&
	    mp->m_sb.sb_gquotino != NULLFSINO) {
		error2 = xfs_iget(mp, NULL, mp->m_sb.sb_gquotino, 0, 0, &qip, 0);
		if (!error2) {
			error2 = xfs_truncate_file(mp, qip);
			IRELE(qip);
		}
	}

	return error ? error : error2;
}
Ejemplo n.º 5
0
void
libxfs_rtmount_destroy(xfs_mount_t *mp)
{
	if (mp->m_rsumip)
		IRELE(mp->m_rsumip);
	if (mp->m_rbmip)
		IRELE(mp->m_rbmip);
	mp->m_rsumip = mp->m_rbmip = NULL;
}
STATIC struct inode *
xfs_nfs_get_inode(
	struct super_block	*sb,
	u64			ino,
	u32			generation)
 {
 	xfs_mount_t		*mp = XFS_M(sb);
	xfs_inode_t		*ip;
	int			error;

	if (ino == 0)
		return ERR_PTR(-ESTALE);

	error = xfs_iget(mp, NULL, ino, XFS_IGET_UNTRUSTED, 0, &ip);
	if (error) {
		if (error == EINVAL || error == ENOENT)
			error = ESTALE;
		return ERR_PTR(-error);
	}

	if (ip->i_d.di_gen != generation) {
		IRELE(ip);
		return ERR_PTR(-ESTALE);
	}

	return VFS_I(ip);
}
Ejemplo n.º 7
0
STATIC int
xfs_sync_inode_attr(
	struct xfs_inode	*ip,
	struct xfs_perag	*pag,
	int			flags)
{
	int			error = 0;

	error = xfs_sync_inode_valid(ip, pag);
	if (error)
		return error;

	xfs_ilock(ip, XFS_ILOCK_SHARED);
	if (xfs_inode_clean(ip))
		goto out_unlock;
	if (!xfs_iflock_nowait(ip)) {
		if (!(flags & SYNC_WAIT))
			goto out_unlock;
		xfs_iflock(ip);
	}

	if (xfs_inode_clean(ip)) {
		xfs_ifunlock(ip);
		goto out_unlock;
	}

	error = xfs_iflush(ip, (flags & SYNC_WAIT) ?
			   XFS_IFLUSH_SYNC : XFS_IFLUSH_DELWRI);

 out_unlock:
	xfs_iunlock(ip, XFS_ILOCK_SHARED);
	IRELE(ip);
	return error;
}
Ejemplo n.º 8
0
STATIC int
xfs_sync_inode_data(
	struct xfs_inode	*ip,
	struct xfs_perag	*pag,
	int			flags)
{
	struct inode		*inode = VFS_I(ip);
	struct address_space *mapping = inode->i_mapping;
	int			error = 0;

	error = xfs_sync_inode_valid(ip, pag);
	if (error)
		return error;

	if (!mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
		goto out_wait;

	if (!xfs_ilock_nowait(ip, XFS_IOLOCK_SHARED)) {
		if (flags & SYNC_TRYLOCK)
			goto out_wait;
		xfs_ilock(ip, XFS_IOLOCK_SHARED);
	}

	error = xfs_flush_pages(ip, 0, -1, (flags & SYNC_WAIT) ?
				0 : XFS_B_ASYNC, FI_NONE);
	xfs_iunlock(ip, XFS_IOLOCK_SHARED);

 out_wait:
	if (flags & SYNC_WAIT)
		xfs_ioend_wait(ip);
	IRELE(ip);
	return error;
}
Ejemplo n.º 9
0
/* must be called with pag_ici_lock held and releases it */
int
xfs_sync_inode_valid(
	struct xfs_inode	*ip,
	struct xfs_perag	*pag)
{
	struct inode		*inode = VFS_I(ip);
	int			error = EFSCORRUPTED;

	/* nothing to sync during shutdown */
	if (XFS_FORCED_SHUTDOWN(ip->i_mount))
		goto out_unlock;

	/* avoid new or reclaimable inodes. Leave for reclaim code to flush */
	error = ENOENT;
	if (xfs_iflags_test(ip, XFS_INEW | XFS_IRECLAIMABLE | XFS_IRECLAIM))
		goto out_unlock;

	/* If we can't grab the inode, it must on it's way to reclaim. */
	if (!igrab(inode))
		goto out_unlock;

	if (is_bad_inode(inode)) {
		IRELE(ip);
		goto out_unlock;
	}

	/* inode is valid */
	error = 0;
out_unlock:
	read_unlock(&pag->pag_ici_lock);
	return error;
}
Ejemplo n.º 10
0
/* xfs_fstrm_free_func(): callback for freeing cached stream items. */
STATIC void
xfs_fstrm_free_func(
	unsigned long	ino,
	void		*data)
{
	fstrm_item_t	*item  = (fstrm_item_t *)data;
	xfs_inode_t	*ip = item->ip;
	int ref;

	ASSERT(ip->i_ino == ino);

	xfs_iflags_clear(ip, XFS_IFILESTREAM);

	/* Drop the reference taken on the AG when the item was added. */
	ref = xfs_filestream_put_ag(ip->i_mount, item->ag);

	ASSERT(ref >= 0);
	TRACE_FREE(ip->i_mount, ip, item->pip, item->ag,
		xfs_filestream_peek_ag(ip->i_mount, item->ag));

	/*
	 * _xfs_filestream_update_ag() always takes a reference on the inode
	 * itself, whether it's a file or a directory.  Release it here.
	 * This can result in the inode being freed and so we must
	 * not hold any inode locks when freeing filesstreams objects
	 * otherwise we can deadlock here.
	 */
	IRELE(ip);

	/*
	 * In the case of a regular file, _xfs_filestream_update_ag() also
	 * takes a ref on the parent inode to keep it in-core.  Release that
	 * too.
	 */
	if (item->pip)
		IRELE(item->pip);

	/* Finally, free the memory allocated for the item. */
	kmem_zone_free(item_zone, item);
}
Ejemplo n.º 11
0
STATIC int
xfs_qm_scall_trunc_qfile(
	struct xfs_mount	*mp,
	xfs_ino_t		ino)
{
	struct xfs_inode	*ip;
	struct xfs_trans	*tp;
	int			error;

	if (ino == NULLFSINO)
		return 0;

	error = xfs_iget(mp, NULL, ino, 0, 0, &ip);
	if (error)
		return error;

	xfs_ilock(ip, XFS_IOLOCK_EXCL);

	tp = xfs_trans_alloc(mp, XFS_TRANS_TRUNCATE_FILE);
	error = xfs_trans_reserve(tp, 0, XFS_ITRUNCATE_LOG_RES(mp), 0,
				  XFS_TRANS_PERM_LOG_RES,
				  XFS_ITRUNCATE_LOG_COUNT);
	if (error) {
		xfs_trans_cancel(tp, 0);
		xfs_iunlock(ip, XFS_IOLOCK_EXCL);
		goto out_put;
	}

	xfs_ilock(ip, XFS_ILOCK_EXCL);
	xfs_trans_ijoin(tp, ip, 0);

	ip->i_d.di_size = 0;
	xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);

	error = xfs_itruncate_extents(&tp, ip, XFS_DATA_FORK, 0);
	if (error) {
		xfs_trans_cancel(tp, XFS_TRANS_RELEASE_LOG_RES |
				     XFS_TRANS_ABORT);
		goto out_unlock;
	}

	ASSERT(ip->i_d.di_nextents == 0);

	xfs_trans_ichgtime(tp, ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
	error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);

out_unlock:
	xfs_iunlock(ip, XFS_ILOCK_EXCL | XFS_IOLOCK_EXCL);
out_put:
	IRELE(ip);
	return error;
}
Ejemplo n.º 12
0
/*
 * xfs_get_file_rt()
 *	This routine determines if the given file has real time
 *	extents. If so a 1 is written to the user memory pointed at
 *	by rt, if not a 0 is written.
 *
 *
 * RETURNS:
 *	0 on success
 *	non zero on failure
 */
int
xfs_get_file_rt( 
	sysarg_t sysarg_file_id,
	sysarg_t sysarg_rt)
{
	int 		inodert = 0, error = 0;
	dev_t		fs_dev;
	xfs_ino_t	ino;
	xfs_inode_t 	*ip;
	xfs_caddr_t		rt;
	grio_file_id_t	fileid;


	if ( copy_from_user(&fileid, SYSARG_TO_PTR(sysarg_file_id), sizeof(grio_file_id_t))) {
		error = -XFS_ERROR(EFAULT);
		return( error );
	}

	rt		= (xfs_caddr_t)SYSARG_TO_PTR(sysarg_rt);
	fs_dev 		= fileid.fs_dev;
	ino		= fileid.ino;

	/*
 	 * Get the inode.
	 */
	if (!(ip = xfs_get_inode( fs_dev, ino ))) {
		return -XFS_ERROR( ENOENT );
	}

	/*
	 * Check if the inode is marked as real time.
	 */
	if (ip->i_d.di_flags & XFS_DIFLAG_REALTIME) {
		inodert = 1;
	}

	/*
 	 * Copy the results to user space.
 	 */
	if (copy_to_user((xfs_caddr_t)rt, &inodert, sizeof(int)) ) {
		error = -XFS_ERROR(EFAULT);
	}

	xfs_iunlock( ip, XFS_ILOCK_SHARED );
	IRELE( ip );
	return( error );

}
Ejemplo n.º 13
0
STATIC int
xfs_inode_ag_walk_grab(
	struct xfs_inode	*ip)
{
	struct inode		*inode = VFS_I(ip);

	ASSERT(rcu_read_lock_held());

	/*
	 * check for stale RCU freed inode
	 *
	 * If the inode has been reallocated, it doesn't matter if it's not in
	 * the AG we are walking - we are walking for writeback, so if it
	 * passes all the "valid inode" checks and is dirty, then we'll write
	 * it back anyway.  If it has been reallocated and still being
	 * initialised, the XFS_INEW check below will catch it.
	 */
	spin_lock(&ip->i_flags_lock);
	if (!ip->i_ino)
		goto out_unlock_noent;

	/* avoid new or reclaimable inodes. Leave for reclaim code to flush */
	if (__xfs_iflags_test(ip, XFS_INEW | XFS_IRECLAIMABLE | XFS_IRECLAIM))
		goto out_unlock_noent;
	spin_unlock(&ip->i_flags_lock);

	/* nothing to sync during shutdown */
	if (XFS_FORCED_SHUTDOWN(ip->i_mount))
		return EFSCORRUPTED;

	/* If we can't grab the inode, it must on it's way to reclaim. */
	if (!igrab(inode))
		return ENOENT;

	if (is_bad_inode(inode)) {
		IRELE(ip);
		return ENOENT;
	}

	/* inode is valid */
	return 0;

out_unlock_noent:
	spin_unlock(&ip->i_flags_lock);
	return ENOENT;
}
Ejemplo n.º 14
0
/*
 * "Is this a cached inode that's also allocated?"
 *
 * Look up an inode by number in the given file system.  If the inode is
 * in cache and isn't in purgatory, return 1 if the inode is allocated
 * and 0 if it is not.  For all other cases (not in cache, being torn
 * down, etc.), return a negative error code.
 *
 * The caller has to prevent inode allocation and freeing activity,
 * presumably by locking the AGI buffer.   This is to ensure that an
 * inode cannot transition from allocated to freed until the caller is
 * ready to allow that.  If the inode is in an intermediate state (new,
 * reclaimable, or being reclaimed), -EAGAIN will be returned; if the
 * inode is not in the cache, -ENOENT will be returned.  The caller must
 * deal with these scenarios appropriately.
 *
 * This is a specialized use case for the online scrubber; if you're
 * reading this, you probably want xfs_iget.
 */
int
xfs_icache_inode_is_allocated(
	struct xfs_mount	*mp,
	struct xfs_trans	*tp,
	xfs_ino_t		ino,
	bool			*inuse)
{
	struct xfs_inode	*ip;
	int			error;

	error = xfs_iget(mp, tp, ino, XFS_IGET_INCORE, 0, &ip);
	if (error)
		return error;

	*inuse = !!(VFS_I(ip)->i_mode);
	IRELE(ip);
	return 0;
}
Ejemplo n.º 15
0
/*
 * Unlock the inode associated with the inode log item.
 * Clear the fields of the inode and inode log item that
 * are specific to the current transaction.  If the
 * hold flags is set, do not unlock the inode.
 */
STATIC void
xfs_inode_item_unlock(
    struct xfs_log_item	*lip)
{
    struct xfs_inode_log_item *iip = INODE_ITEM(lip);
    struct xfs_inode	*ip = iip->ili_inode;
    unsigned short		lock_flags;

    ASSERT(iip->ili_inode->i_itemp != NULL);
    ASSERT(xfs_isilocked(iip->ili_inode, XFS_ILOCK_EXCL));

    /*
     * Clear the transaction pointer in the inode.
     */
    ip->i_transp = NULL;

    /*
     * If the inode needed a separate buffer with which to log
     * its extents, then free it now.
     */
    if (iip->ili_extents_buf != NULL) {
        ASSERT(ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS);
        ASSERT(ip->i_d.di_nextents > 0);
        ASSERT(iip->ili_format.ilf_fields & XFS_ILOG_DEXT);
        ASSERT(ip->i_df.if_bytes > 0);
        kmem_free(iip->ili_extents_buf);
        iip->ili_extents_buf = NULL;
    }
    if (iip->ili_aextents_buf != NULL) {
        ASSERT(ip->i_d.di_aformat == XFS_DINODE_FMT_EXTENTS);
        ASSERT(ip->i_d.di_anextents > 0);
        ASSERT(iip->ili_format.ilf_fields & XFS_ILOG_AEXT);
        ASSERT(ip->i_afp->if_bytes > 0);
        kmem_free(iip->ili_aextents_buf);
        iip->ili_aextents_buf = NULL;
    }

    lock_flags = iip->ili_lock_flags;
    iip->ili_lock_flags = 0;
    if (lock_flags) {
        xfs_iunlock(iip->ili_inode, lock_flags);
        IRELE(iip->ili_inode);
    }
}
Ejemplo n.º 16
0
/*
 * Find the right allocation group for a file, either by finding an
 * existing file stream or creating a new one.
 *
 * Returns NULLAGNUMBER in case of an error.
 */
xfs_agnumber_t
xfs_filestream_lookup_ag(
	struct xfs_inode	*ip)
{
	struct xfs_mount	*mp = ip->i_mount;
	struct xfs_inode	*pip = NULL;
	xfs_agnumber_t		startag, ag = NULLAGNUMBER;
	struct xfs_mru_cache_elem *mru;

	ASSERT(S_ISREG(ip->i_d.di_mode));

	pip = xfs_filestream_get_parent(ip);
	if (!pip)
		goto out;

	mru = xfs_mru_cache_lookup(mp->m_filestream, pip->i_ino);
	if (mru) {
		ag = container_of(mru, struct xfs_fstrm_item, mru)->ag;
		xfs_mru_cache_done(mp->m_filestream);

		trace_xfs_filestream_lookup(ip, ag);
		goto out;
	}

	/*
	 * Set the starting AG using the rotor for inode32, otherwise
	 * use the directory inode's AG.
	 */
	if (mp->m_flags & XFS_MOUNT_32BITINODES) {
		xfs_agnumber_t	 rotorstep = xfs_rotorstep;
		startag = (mp->m_agfrotor / rotorstep) % mp->m_sb.sb_agcount;
		mp->m_agfrotor = (mp->m_agfrotor + 1) %
		                 (mp->m_sb.sb_agcount * rotorstep);
	} else
		startag = XFS_INO_TO_AGNO(mp, pip->i_ino);

	if (xfs_filestream_pick_ag(pip, startag, &ag, 0, 0))
		ag = NULLAGNUMBER;
out:
	IRELE(pip);
	return ag;
}
Ejemplo n.º 17
0
/*
 * Pick a new allocation group for the current file and its file stream.
 *
 * This is called when the allocator can't find a suitable extent in the
 * current AG, and we have to move the stream into a new AG with more space.
 */
int
xfs_filestream_new_ag(
	struct xfs_bmalloca	*ap,
	xfs_agnumber_t		*agp)
{
	struct xfs_inode	*ip = ap->ip, *pip;
	struct xfs_mount	*mp = ip->i_mount;
	xfs_extlen_t		minlen = ap->length;
	xfs_agnumber_t		startag = 0;
	int			flags, err = 0;
	struct xfs_mru_cache_elem *mru;

	*agp = NULLAGNUMBER;

	pip = xfs_filestream_get_parent(ip);
	if (!pip)
		goto exit;

	mru = xfs_mru_cache_remove(mp->m_filestream, pip->i_ino);
	if (mru) {
		struct xfs_fstrm_item *item =
			container_of(mru, struct xfs_fstrm_item, mru);
		startag = (item->ag + 1) % mp->m_sb.sb_agcount;
	}

	flags = (ap->userdata ? XFS_PICK_USERDATA : 0) |
	        (ap->flist->xbf_low ? XFS_PICK_LOWSPACE : 0);

	err = xfs_filestream_pick_ag(pip, startag, agp, flags, minlen);

	/*
	 * Only free the item here so we skip over the old AG earlier.
	 */
	if (mru)
		xfs_fstrm_free_func(mru);

	IRELE(pip);
exit:
	if (*agp == NULLAGNUMBER)
		*agp = 0;
	return err;
}
Ejemplo n.º 18
0
STATIC struct inode *
xfs_nfs_get_inode(
	struct super_block	*sb,
	u64			ino,
	u32			generation)
 {
 	xfs_mount_t		*mp = XFS_M(sb);
	xfs_inode_t		*ip;
	int			error;

	/*
	 * NFS can sometimes send requests for ino 0.  Fail them gracefully.
	 */
	if (ino == 0)
		return ERR_PTR(-ESTALE);

	/*
	 * The XFS_IGET_UNTRUSTED means that an invalid inode number is just
	 * fine and not an indication of a corrupted filesystem as clients can
	 * send invalid file handles and we have to handle it gracefully..
	 */
	error = xfs_iget(mp, NULL, ino, XFS_IGET_UNTRUSTED, 0, &ip);
	if (error) {
		/*
		 * EINVAL means the inode cluster doesn't exist anymore.
		 * This implies the filehandle is stale, so we should
		 * translate it here.
		 * We don't use ESTALE directly down the chain to not
		 * confuse applications using bulkstat that expect EINVAL.
		 */
		if (error == EINVAL)
			error = ESTALE;
		return ERR_PTR(-error);
	}

	if (ip->i_d.di_gen != generation) {
		IRELE(ip);
		return ERR_PTR(-ENOENT);
	}

	return VFS_I(ip);
}
Ejemplo n.º 19
0
STATIC int
xfs_inode_ag_walk_grab(
	struct xfs_inode	*ip)
{
	struct inode		*inode = VFS_I(ip);

	ASSERT(rcu_read_lock_held());

	spin_lock(&ip->i_flags_lock);
	if (!ip->i_ino)
		goto out_unlock_noent;

	
	if (__xfs_iflags_test(ip, XFS_INEW | XFS_IRECLAIMABLE | XFS_IRECLAIM))
		goto out_unlock_noent;
	spin_unlock(&ip->i_flags_lock);

	
	if (XFS_FORCED_SHUTDOWN(ip->i_mount))
		return EFSCORRUPTED;

	
	if (!igrab(inode))
		return ENOENT;

	if (is_bad_inode(inode)) {
		IRELE(ip);
		return ENOENT;
	}

	
	return 0;

out_unlock_noent:
	spin_unlock(&ip->i_flags_lock);
	return ENOENT;
}
/*
 * Return quota status information, such as uquota-off, enforcements, etc.
 * for Q_XGETQSTATV command, to support separate project quota field.
 */
int
xfs_qm_scall_getqstatv(
    struct xfs_mount	*mp,
    struct fs_quota_statv	*out)
{
    struct xfs_quotainfo	*q = mp->m_quotainfo;
    struct xfs_inode	*uip = NULL;
    struct xfs_inode	*gip = NULL;
    struct xfs_inode	*pip = NULL;
    bool                    tempuqip = false;
    bool                    tempgqip = false;
    bool                    temppqip = false;

    if (!xfs_sb_version_hasquota(&mp->m_sb)) {
        out->qs_uquota.qfs_ino = NULLFSINO;
        out->qs_gquota.qfs_ino = NULLFSINO;
        out->qs_pquota.qfs_ino = NULLFSINO;
        return (0);
    }

    out->qs_flags = (__uint16_t) xfs_qm_export_flags(mp->m_qflags &
                    (XFS_ALL_QUOTA_ACCT|
                     XFS_ALL_QUOTA_ENFD));
    out->qs_uquota.qfs_ino = mp->m_sb.sb_uquotino;
    out->qs_gquota.qfs_ino = mp->m_sb.sb_gquotino;
    out->qs_pquota.qfs_ino = mp->m_sb.sb_pquotino;

    if (q) {
        uip = q->qi_uquotaip;
        gip = q->qi_gquotaip;
        pip = q->qi_pquotaip;
    }
    if (!uip && mp->m_sb.sb_uquotino != NULLFSINO) {
        if (xfs_iget(mp, NULL, mp->m_sb.sb_uquotino,
                     0, 0, &uip) == 0)
            tempuqip = true;
    }
    if (!gip && mp->m_sb.sb_gquotino != NULLFSINO) {
        if (xfs_iget(mp, NULL, mp->m_sb.sb_gquotino,
                     0, 0, &gip) == 0)
            tempgqip = true;
    }
    if (!pip && mp->m_sb.sb_pquotino != NULLFSINO) {
        if (xfs_iget(mp, NULL, mp->m_sb.sb_pquotino,
                     0, 0, &pip) == 0)
            temppqip = true;
    }
    if (uip) {
        out->qs_uquota.qfs_nblks = uip->i_d.di_nblocks;
        out->qs_uquota.qfs_nextents = uip->i_d.di_nextents;
        if (tempuqip)
            IRELE(uip);
    }

    if (gip) {
        out->qs_gquota.qfs_nblks = gip->i_d.di_nblocks;
        out->qs_gquota.qfs_nextents = gip->i_d.di_nextents;
        if (tempgqip)
            IRELE(gip);
    }
    if (pip) {
        out->qs_pquota.qfs_nblks = pip->i_d.di_nblocks;
        out->qs_pquota.qfs_nextents = pip->i_d.di_nextents;
        if (temppqip)
            IRELE(pip);
    }
    if (q) {
        out->qs_incoredqs = q->qi_dquots;
        out->qs_btimelimit = q->qi_btimelimit;
        out->qs_itimelimit = q->qi_itimelimit;
        out->qs_rtbtimelimit = q->qi_rtbtimelimit;
        out->qs_bwarnlimit = q->qi_bwarnlimit;
        out->qs_iwarnlimit = q->qi_iwarnlimit;
    }
    return 0;
}
Ejemplo n.º 21
0
/*
 * This is called after the superblock has been read in and we're ready to
 * iget the quota inodes.
 */
STATIC int
xfs_qm_init_quotainos(
	xfs_mount_t	*mp)
{
	xfs_inode_t	*uip, *gip;
	int		error;
	__int64_t	sbflags;
	uint		flags;

	ASSERT(mp->m_quotainfo);
	uip = gip = NULL;
	sbflags = 0;
	flags = 0;

	/*
	 * Get the uquota and gquota inodes
	 */
	if (xfs_sb_version_hasquota(&mp->m_sb)) {
		if (XFS_IS_UQUOTA_ON(mp) &&
		    mp->m_sb.sb_uquotino != NULLFSINO) {
			ASSERT(mp->m_sb.sb_uquotino > 0);
			if ((error = xfs_iget(mp, NULL, mp->m_sb.sb_uquotino,
					     0, 0, &uip)))
				return XFS_ERROR(error);
		}
		if (XFS_IS_OQUOTA_ON(mp) &&
		    mp->m_sb.sb_gquotino != NULLFSINO) {
			ASSERT(mp->m_sb.sb_gquotino > 0);
			if ((error = xfs_iget(mp, NULL, mp->m_sb.sb_gquotino,
					     0, 0, &gip))) {
				if (uip)
					IRELE(uip);
				return XFS_ERROR(error);
			}
		}
	} else {
		flags |= XFS_QMOPT_SBVERSION;
		sbflags |= (XFS_SB_VERSIONNUM | XFS_SB_UQUOTINO |
			    XFS_SB_GQUOTINO | XFS_SB_QFLAGS);
	}

	/*
	 * Create the two inodes, if they don't exist already. The changes
	 * made above will get added to a transaction and logged in one of
	 * the qino_alloc calls below.  If the device is readonly,
	 * temporarily switch to read-write to do this.
	 */
	if (XFS_IS_UQUOTA_ON(mp) && uip == NULL) {
		if ((error = xfs_qm_qino_alloc(mp, &uip,
					      sbflags | XFS_SB_UQUOTINO,
					      flags | XFS_QMOPT_UQUOTA)))
			return XFS_ERROR(error);

		flags &= ~XFS_QMOPT_SBVERSION;
	}
	if (XFS_IS_OQUOTA_ON(mp) && gip == NULL) {
		flags |= (XFS_IS_GQUOTA_ON(mp) ?
				XFS_QMOPT_GQUOTA : XFS_QMOPT_PQUOTA);
		error = xfs_qm_qino_alloc(mp, &gip,
					  sbflags | XFS_SB_GQUOTINO, flags);
		if (error) {
			if (uip)
				IRELE(uip);

			return XFS_ERROR(error);
		}
	}

	mp->m_quotainfo->qi_uquotaip = uip;
	mp->m_quotainfo->qi_gquotaip = gip;

	return 0;
}
Ejemplo n.º 22
0
/* ARGSUSED */
STATIC int
xfs_qm_dqusage_adjust(
	xfs_mount_t	*mp,		/* mount point for filesystem */
	xfs_ino_t	ino,		/* inode number to get data for */
	void		__user *buffer,	/* not used */
	int		ubsize,		/* not used */
	int		*ubused,	/* not used */
	int		*res)		/* result code value */
{
	xfs_inode_t	*ip;
	xfs_qcnt_t	nblks, rtblks = 0;
	int		error;

	ASSERT(XFS_IS_QUOTA_RUNNING(mp));

	/*
	 * rootino must have its resources accounted for, not so with the quota
	 * inodes.
	 */
	if (ino == mp->m_sb.sb_uquotino || ino == mp->m_sb.sb_gquotino) {
		*res = BULKSTAT_RV_NOTHING;
		return XFS_ERROR(EINVAL);
	}

	/*
	 * We don't _need_ to take the ilock EXCL. However, the xfs_qm_dqget
	 * interface expects the inode to be exclusively locked because that's
	 * the case in all other instances. It's OK that we do this because
	 * quotacheck is done only at mount time.
	 */
	error = xfs_iget(mp, NULL, ino, 0, XFS_ILOCK_EXCL, &ip);
	if (error) {
		*res = BULKSTAT_RV_NOTHING;
		return error;
	}

	ASSERT(ip->i_delayed_blks == 0);

	if (XFS_IS_REALTIME_INODE(ip)) {
		/*
		 * Walk thru the extent list and count the realtime blocks.
		 */
		error = xfs_qm_get_rtblks(ip, &rtblks);
		if (error)
			goto error0;
	}

	nblks = (xfs_qcnt_t)ip->i_d.di_nblocks - rtblks;

	/*
	 * Add the (disk blocks and inode) resources occupied by this
	 * inode to its dquots. We do this adjustment in the incore dquot,
	 * and also copy the changes to its buffer.
	 * We don't care about putting these changes in a transaction
	 * envelope because if we crash in the middle of a 'quotacheck'
	 * we have to start from the beginning anyway.
	 * Once we're done, we'll log all the dquot bufs.
	 *
	 * The *QUOTA_ON checks below may look pretty racy, but quotachecks
	 * and quotaoffs don't race. (Quotachecks happen at mount time only).
	 */
	if (XFS_IS_UQUOTA_ON(mp)) {
		error = xfs_qm_quotacheck_dqadjust(ip, ip->i_d.di_uid,
						   XFS_DQ_USER, nblks, rtblks);
		if (error)
			goto error0;
	}

	if (XFS_IS_GQUOTA_ON(mp)) {
		error = xfs_qm_quotacheck_dqadjust(ip, ip->i_d.di_gid,
						   XFS_DQ_GROUP, nblks, rtblks);
		if (error)
			goto error0;
	}

	if (XFS_IS_PQUOTA_ON(mp)) {
		error = xfs_qm_quotacheck_dqadjust(ip, xfs_get_projid(ip),
						   XFS_DQ_PROJ, nblks, rtblks);
		if (error)
			goto error0;
	}

	xfs_iunlock(ip, XFS_ILOCK_EXCL);
	IRELE(ip);
	*res = BULKSTAT_RV_DIDONE;
	return 0;

error0:
	xfs_iunlock(ip, XFS_ILOCK_EXCL);
	IRELE(ip);
	*res = BULKSTAT_RV_GIVEUP;
	return error;
}
Ejemplo n.º 23
0
/*
 * The following routine will acquire the locks required for a rename
 * operation. The code understands the semantics of renames and will
 * validate that name1 exists under dp1 & that name2 may or may not
 * exist under dp2.
 *
 * We are renaming dp1/name1 to dp2/name2.
 *
 * Return ENOENT if dp1 does not exist, other lookup errors, or 0 for success.
 */
STATIC int
xfs_lock_for_rename(
	xfs_inode_t	*dp1,	/* old (source) directory inode */
	xfs_inode_t	*dp2,	/* new (target) directory inode */
	bhv_vname_t	*vname1,/* old entry name */
	bhv_vname_t	*vname2,/* new entry name */
	xfs_inode_t	**ipp1,	/* inode of old entry */
	xfs_inode_t	**ipp2,	/* inode of new entry, if it
				   already exists, NULL otherwise. */
	xfs_inode_t	**i_tab,/* array of inode returned, sorted */
	int		*num_inodes)  /* number of inodes in array */
{
	xfs_inode_t		*ip1, *ip2, *temp;
	xfs_ino_t		inum1, inum2;
	int			error;
	int			i, j;
	uint			lock_mode;
	int			diff_dirs = (dp1 != dp2);

	ip2 = NULL;

	/*
	 * First, find out the current inums of the entries so that we
	 * can determine the initial locking order.  We'll have to
	 * sanity check stuff after all the locks have been acquired
	 * to see if we still have the right inodes, directories, etc.
	 */
	lock_mode = xfs_ilock_map_shared(dp1);
	error = xfs_get_dir_entry(vname1, &ip1);
	if (error) {
		xfs_iunlock_map_shared(dp1, lock_mode);
		return error;
	}

	inum1 = ip1->i_ino;

	ASSERT(ip1);
	ITRACE(ip1);

	/*
	 * Unlock dp1 and lock dp2 if they are different.
	 */

	if (diff_dirs) {
		xfs_iunlock_map_shared(dp1, lock_mode);
		lock_mode = xfs_ilock_map_shared(dp2);
	}

	error = xfs_dir_lookup_int(XFS_ITOBHV(dp2), lock_mode,
				   vname2, &inum2, &ip2);
	if (error == ENOENT) {		/* target does not need to exist. */
		inum2 = 0;
	} else if (error) {
		/*
		 * If dp2 and dp1 are the same, the next line unlocks dp1.
		 * Got it?
		 */
		xfs_iunlock_map_shared(dp2, lock_mode);
		IRELE (ip1);
		return error;
	} else {
		ITRACE(ip2);
	}

	/*
	 * i_tab contains a list of pointers to inodes.  We initialize
	 * the table here & we'll sort it.  We will then use it to
	 * order the acquisition of the inode locks.
	 *
	 * Note that the table may contain duplicates.  e.g., dp1 == dp2.
	 */
	i_tab[0] = dp1;
	i_tab[1] = dp2;
	i_tab[2] = ip1;
	if (inum2 == 0) {
		*num_inodes = 3;
		i_tab[3] = NULL;
	} else {
		*num_inodes = 4;
		i_tab[3] = ip2;
	}

	/*
	 * Sort the elements via bubble sort.  (Remember, there are at
	 * most 4 elements to sort, so this is adequate.)
	 */
	for (i=0; i < *num_inodes; i++) {
		for (j=1; j < *num_inodes; j++) {
			if (i_tab[j]->i_ino < i_tab[j-1]->i_ino) {
				temp = i_tab[j];
				i_tab[j] = i_tab[j-1];
				i_tab[j-1] = temp;
			}
		}
	}

	/*
	 * We have dp2 locked. If it isn't first, unlock it.
	 * If it is first, tell xfs_lock_inodes so it can skip it
	 * when locking. if dp1 == dp2, xfs_lock_inodes will skip both
	 * since they are equal. xfs_lock_inodes needs all these inodes
	 * so that it can unlock and retry if there might be a dead-lock
	 * potential with the log.
	 */

	if (i_tab[0] == dp2 && lock_mode == XFS_ILOCK_SHARED) {
#ifdef DEBUG
		xfs_rename_skip++;
#endif
		xfs_lock_inodes(i_tab, *num_inodes, 1, XFS_ILOCK_SHARED);
	} else {
#ifdef DEBUG
		xfs_rename_nskip++;
#endif
		xfs_iunlock_map_shared(dp2, lock_mode);
		xfs_lock_inodes(i_tab, *num_inodes, 0, XFS_ILOCK_SHARED);
	}

	/*
	 * Set the return value. Null out any unused entries in i_tab.
	 */
	*ipp1 = *ipp2 = NULL;
	for (i=0; i < *num_inodes; i++) {
		if (i_tab[i]->i_ino == inum1) {
			*ipp1 = i_tab[i];
		}
		if (i_tab[i]->i_ino == inum2) {
			*ipp2 = i_tab[i];
		}
	}
	for (;i < 4; i++) {
		i_tab[i] = NULL;
	}
	return 0;
}
Ejemplo n.º 24
0
/*
 * xfs_rename
 */
int
xfs_rename(
	bhv_desc_t	*src_dir_bdp,
	bhv_vname_t	*src_vname,
	bhv_vnode_t	*target_dir_vp,
	bhv_vname_t	*target_vname,
	cred_t		*credp)
{
	xfs_trans_t	*tp;
	xfs_inode_t	*src_dp, *target_dp, *src_ip, *target_ip;
	xfs_mount_t	*mp;
	int		new_parent;		/* moving to a new dir */
	int		src_is_directory;	/* src_name is a directory */
	int		error;
	xfs_bmap_free_t free_list;
	xfs_fsblock_t   first_block;
	int		cancel_flags;
	int		committed;
	xfs_inode_t	*inodes[4];
	int		target_ip_dropped = 0;	/* dropped target_ip link? */
	bhv_vnode_t	*src_dir_vp;
	int		spaceres;
	int		target_link_zero = 0;
	int		num_inodes;
	char		*src_name = VNAME(src_vname);
	char		*target_name = VNAME(target_vname);
	int		src_namelen = VNAMELEN(src_vname);
	int		target_namelen = VNAMELEN(target_vname);

	src_dir_vp = BHV_TO_VNODE(src_dir_bdp);
	vn_trace_entry(src_dir_vp, "xfs_rename", (inst_t *)__return_address);
	vn_trace_entry(target_dir_vp, "xfs_rename", (inst_t *)__return_address);

	/*
	 * Find the XFS behavior descriptor for the target directory
	 * vnode since it was not handed to us.
	 */
	target_dp = xfs_vtoi(target_dir_vp);
	if (target_dp == NULL) {
		return XFS_ERROR(EXDEV);
	}

	src_dp = XFS_BHVTOI(src_dir_bdp);
	mp = src_dp->i_mount;

	if (DM_EVENT_ENABLED(src_dir_vp->v_vfsp, src_dp, DM_EVENT_RENAME) ||
	    DM_EVENT_ENABLED(target_dir_vp->v_vfsp,
				target_dp, DM_EVENT_RENAME)) {
		error = XFS_SEND_NAMESP(mp, DM_EVENT_RENAME,
					src_dir_vp, DM_RIGHT_NULL,
					target_dir_vp, DM_RIGHT_NULL,
					src_name, target_name,
					0, 0, 0);
		if (error) {
			return error;
		}
	}
	/* Return through std_return after this point. */

	/*
	 * Lock all the participating inodes. Depending upon whether
	 * the target_name exists in the target directory, and
	 * whether the target directory is the same as the source
	 * directory, we can lock from 2 to 4 inodes.
	 * xfs_lock_for_rename() will return ENOENT if src_name
	 * does not exist in the source directory.
	 */
	tp = NULL;
	error = xfs_lock_for_rename(src_dp, target_dp, src_vname,
			target_vname, &src_ip, &target_ip, inodes,
			&num_inodes);

	if (error) {
		/*
		 * We have nothing locked, no inode references, and
		 * no transaction, so just get out.
		 */
		goto std_return;
	}

	ASSERT(src_ip != NULL);

	if ((src_ip->i_d.di_mode & S_IFMT) == S_IFDIR) {
		/*
		 * Check for link count overflow on target_dp
		 */
		if (target_ip == NULL && (src_dp != target_dp) &&
		    target_dp->i_d.di_nlink >= XFS_MAXLINK) {
			error = XFS_ERROR(EMLINK);
			xfs_rename_unlock4(inodes, XFS_ILOCK_SHARED);
			goto rele_return;
		}
	}

	/*
	 * If we are using project inheritance, we only allow renames
	 * into our tree when the project IDs are the same; else the
	 * tree quota mechanism would be circumvented.
	 */
	if (unlikely((target_dp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT) &&
		     (target_dp->i_d.di_projid != src_ip->i_d.di_projid))) {
		error = XFS_ERROR(EXDEV);
		xfs_rename_unlock4(inodes, XFS_ILOCK_SHARED);
		goto rele_return;
	}

	new_parent = (src_dp != target_dp);
	src_is_directory = ((src_ip->i_d.di_mode & S_IFMT) == S_IFDIR);

	/*
	 * Drop the locks on our inodes so that we can start the transaction.
	 */
	xfs_rename_unlock4(inodes, XFS_ILOCK_SHARED);

	XFS_BMAP_INIT(&free_list, &first_block);
	tp = xfs_trans_alloc(mp, XFS_TRANS_RENAME);
	cancel_flags = XFS_TRANS_RELEASE_LOG_RES;
	spaceres = XFS_RENAME_SPACE_RES(mp, target_namelen);
	error = xfs_trans_reserve(tp, spaceres, XFS_RENAME_LOG_RES(mp), 0,
			XFS_TRANS_PERM_LOG_RES, XFS_RENAME_LOG_COUNT);
	if (error == ENOSPC) {
		spaceres = 0;
		error = xfs_trans_reserve(tp, 0, XFS_RENAME_LOG_RES(mp), 0,
				XFS_TRANS_PERM_LOG_RES, XFS_RENAME_LOG_COUNT);
	}
	if (error) {
		xfs_trans_cancel(tp, 0);
		goto rele_return;
	}

	/*
	 * Attach the dquots to the inodes
	 */
	if ((error = XFS_QM_DQVOPRENAME(mp, inodes))) {
		xfs_trans_cancel(tp, cancel_flags);
		goto rele_return;
	}

	/*
	 * Reacquire the inode locks we dropped above.
	 */
	xfs_lock_inodes(inodes, num_inodes, 0, XFS_ILOCK_EXCL);

	/*
	 * Join all the inodes to the transaction. From this point on,
	 * we can rely on either trans_commit or trans_cancel to unlock
	 * them.  Note that we need to add a vnode reference to the
	 * directories since trans_commit & trans_cancel will decrement
	 * them when they unlock the inodes.  Also, we need to be careful
	 * not to add an inode to the transaction more than once.
	 */
	VN_HOLD(src_dir_vp);
	xfs_trans_ijoin(tp, src_dp, XFS_ILOCK_EXCL);
	if (new_parent) {
		VN_HOLD(target_dir_vp);
		xfs_trans_ijoin(tp, target_dp, XFS_ILOCK_EXCL);
	}
	if ((src_ip != src_dp) && (src_ip != target_dp)) {
		xfs_trans_ijoin(tp, src_ip, XFS_ILOCK_EXCL);
	}
	if ((target_ip != NULL) &&
	    (target_ip != src_ip) &&
	    (target_ip != src_dp) &&
	    (target_ip != target_dp)) {
		xfs_trans_ijoin(tp, target_ip, XFS_ILOCK_EXCL);
	}

	/*
	 * Set up the target.
	 */
	if (target_ip == NULL) {
		/*
		 * If there's no space reservation, check the entry will
		 * fit before actually inserting it.
		 */
		if (spaceres == 0 &&
		    (error = xfs_dir_canenter(tp, target_dp, target_name,
						target_namelen)))
			goto error_return;
		/*
		 * If target does not exist and the rename crosses
		 * directories, adjust the target directory link count
		 * to account for the ".." reference from the new entry.
		 */
		error = xfs_dir_createname(tp, target_dp, target_name,
					   target_namelen, src_ip->i_ino,
					   &first_block, &free_list, spaceres);
		if (error == ENOSPC)
			goto error_return;
		if (error)
			goto abort_return;
		xfs_ichgtime(target_dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);

		if (new_parent && src_is_directory) {
			error = xfs_bumplink(tp, target_dp);
			if (error)
				goto abort_return;
		}
	} else { /* target_ip != NULL */
		/*
		 * If target exists and it's a directory, check that both
		 * target and source are directories and that target can be
		 * destroyed, or that neither is a directory.
		 */
		if ((target_ip->i_d.di_mode & S_IFMT) == S_IFDIR) {
			/*
			 * Make sure target dir is empty.
			 */
			if (!(xfs_dir_isempty(target_ip)) ||
			    (target_ip->i_d.di_nlink > 2)) {
				error = XFS_ERROR(EEXIST);
				goto error_return;
			}
		}

		/*
		 * Link the source inode under the target name.
		 * If the source inode is a directory and we are moving
		 * it across directories, its ".." entry will be
		 * inconsistent until we replace that down below.
		 *
		 * In case there is already an entry with the same
		 * name at the destination directory, remove it first.
		 */
		error = xfs_dir_replace(tp, target_dp, target_name,
					target_namelen, src_ip->i_ino,
					&first_block, &free_list, spaceres);
		if (error)
			goto abort_return;
		xfs_ichgtime(target_dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);

		/*
		 * Decrement the link count on the target since the target
		 * dir no longer points to it.
		 */
		error = xfs_droplink(tp, target_ip);
		if (error)
			goto abort_return;
		target_ip_dropped = 1;

		if (src_is_directory) {
			/*
			 * Drop the link from the old "." entry.
			 */
			error = xfs_droplink(tp, target_ip);
			if (error)
				goto abort_return;
		}

		/* Do this test while we still hold the locks */
		target_link_zero = (target_ip)->i_d.di_nlink==0;

	} /* target_ip != NULL */

	/*
	 * Remove the source.
	 */
	if (new_parent && src_is_directory) {
		/*
		 * Rewrite the ".." entry to point to the new
		 * directory.
		 */
		error = xfs_dir_replace(tp, src_ip, "..", 2, target_dp->i_ino,
					&first_block, &free_list, spaceres);
		ASSERT(error != EEXIST);
		if (error)
			goto abort_return;
		xfs_ichgtime(src_ip, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);

	} else {
		/*
		 * We always want to hit the ctime on the source inode.
		 * We do it in the if clause above for the 'new_parent &&
		 * src_is_directory' case, and here we get all the other
		 * cases.  This isn't strictly required by the standards
		 * since the source inode isn't really being changed,
		 * but old unix file systems did it and some incremental
		 * backup programs won't work without it.
		 */
		xfs_ichgtime(src_ip, XFS_ICHGTIME_CHG);
	}

	/*
	 * Adjust the link count on src_dp.  This is necessary when
	 * renaming a directory, either within one parent when
	 * the target existed, or across two parent directories.
	 */
	if (src_is_directory && (new_parent || target_ip != NULL)) {

		/*
		 * Decrement link count on src_directory since the
		 * entry that's moved no longer points to it.
		 */
		error = xfs_droplink(tp, src_dp);
		if (error)
			goto abort_return;
	}

	error = xfs_dir_removename(tp, src_dp, src_name, src_namelen,
			src_ip->i_ino, &first_block, &free_list, spaceres);
	if (error)
		goto abort_return;
	xfs_ichgtime(src_dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);

	/*
	 * Update the generation counts on all the directory inodes
	 * that we're modifying.
	 */
	src_dp->i_gen++;
	xfs_trans_log_inode(tp, src_dp, XFS_ILOG_CORE);

	if (new_parent) {
		target_dp->i_gen++;
		xfs_trans_log_inode(tp, target_dp, XFS_ILOG_CORE);
	}

	/*
	 * If there was a target inode, take an extra reference on
	 * it here so that it doesn't go to xfs_inactive() from
	 * within the commit.
	 */
	if (target_ip != NULL) {
		IHOLD(target_ip);
	}

	/*
	 * If this is a synchronous mount, make sure that the
	 * rename transaction goes to disk before returning to
	 * the user.
	 */
	if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC)) {
		xfs_trans_set_sync(tp);
	}

	/*
	 * Take refs. for vop_link_removed calls below.  No need to worry
	 * about directory refs. because the caller holds them.
	 *
	 * Do holds before the xfs_bmap_finish since it might rele them down
	 * to zero.
	 */

	if (target_ip_dropped)
		IHOLD(target_ip);
	IHOLD(src_ip);

	error = xfs_bmap_finish(&tp, &free_list, &committed);
	if (error) {
		xfs_bmap_cancel(&free_list);
		xfs_trans_cancel(tp, (XFS_TRANS_RELEASE_LOG_RES |
				 XFS_TRANS_ABORT));
		if (target_ip != NULL) {
			IRELE(target_ip);
		}
		if (target_ip_dropped) {
			IRELE(target_ip);
		}
		IRELE(src_ip);
		goto std_return;
	}

	/*
	 * trans_commit will unlock src_ip, target_ip & decrement
	 * the vnode references.
	 */
	error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
	if (target_ip != NULL) {
		xfs_refcache_purge_ip(target_ip);
		IRELE(target_ip);
	}
	/*
	 * Let interposed file systems know about removed links.
	 */
	if (target_ip_dropped) {
		bhv_vop_link_removed(XFS_ITOV(target_ip), target_dir_vp,
					target_link_zero);
		IRELE(target_ip);
	}

	IRELE(src_ip);

	/* Fall through to std_return with error = 0 or errno from
	 * xfs_trans_commit	 */
std_return:
	if (DM_EVENT_ENABLED(src_dir_vp->v_vfsp, src_dp, DM_EVENT_POSTRENAME) ||
	    DM_EVENT_ENABLED(target_dir_vp->v_vfsp,
				target_dp, DM_EVENT_POSTRENAME)) {
		(void) XFS_SEND_NAMESP (mp, DM_EVENT_POSTRENAME,
					src_dir_vp, DM_RIGHT_NULL,
					target_dir_vp, DM_RIGHT_NULL,
					src_name, target_name,
					0, error, 0);
	}
	return error;

 abort_return:
	cancel_flags |= XFS_TRANS_ABORT;
	/* FALLTHROUGH */
 error_return:
	xfs_bmap_cancel(&free_list);
	xfs_trans_cancel(tp, cancel_flags);
	goto std_return;

 rele_return:
	IRELE(src_ip);
	if (target_ip != NULL) {
		IRELE(target_ip);
	}
	goto std_return;
}
Ejemplo n.º 25
0
int
xfs_symlink(
	struct xfs_inode	*dp,
	struct xfs_name		*link_name,
	const char		*target_path,
	umode_t			mode,
	struct xfs_inode	**ipp)
{
	struct xfs_mount	*mp = dp->i_mount;
	struct xfs_trans	*tp = NULL;
	struct xfs_inode	*ip = NULL;
	int			error = 0;
	int			pathlen;
	struct xfs_bmap_free	free_list;
	xfs_fsblock_t		first_block;
	bool			unlock_dp_on_error = false;
	uint			cancel_flags;
	int			committed;
	xfs_fileoff_t		first_fsb;
	xfs_filblks_t		fs_blocks;
	int			nmaps;
	struct xfs_bmbt_irec	mval[XFS_SYMLINK_MAPS];
	xfs_daddr_t		d;
	const char		*cur_chunk;
	int			byte_cnt;
	int			n;
	xfs_buf_t		*bp;
	prid_t			prid;
	struct xfs_dquot	*udqp = NULL;
	struct xfs_dquot	*gdqp = NULL;
	struct xfs_dquot	*pdqp = NULL;
	uint			resblks;

	*ipp = NULL;

	trace_xfs_symlink(dp, link_name);

	if (XFS_FORCED_SHUTDOWN(mp))
		return XFS_ERROR(EIO);

	/*
	 * Check component lengths of the target path name.
	 */
	pathlen = strlen(target_path);
	if (pathlen >= MAXPATHLEN)      /* total string too long */
		return XFS_ERROR(ENAMETOOLONG);

	udqp = gdqp = NULL;
	if (dp->i_d.di_flags & XFS_DIFLAG_PROJINHERIT)
		prid = xfs_get_projid(dp);
	else
		prid = XFS_PROJID_DEFAULT;

	/*
	 * Make sure that we have allocated dquot(s) on disk.
	 */
	error = xfs_qm_vop_dqalloc(dp,
			xfs_kuid_to_uid(current_fsuid()),
			xfs_kgid_to_gid(current_fsgid()), prid,
			XFS_QMOPT_QUOTALL | XFS_QMOPT_INHERIT,
			&udqp, &gdqp, &pdqp);
	if (error)
		goto std_return;

	tp = xfs_trans_alloc(mp, XFS_TRANS_SYMLINK);
	cancel_flags = XFS_TRANS_RELEASE_LOG_RES;
	/*
	 * The symlink will fit into the inode data fork?
	 * There can't be any attributes so we get the whole variable part.
	 */
	if (pathlen <= XFS_LITINO(mp, dp->i_d.di_version))
		fs_blocks = 0;
	else
		fs_blocks = xfs_symlink_blocks(mp, pathlen);
	resblks = XFS_SYMLINK_SPACE_RES(mp, link_name->len, fs_blocks);
	error = xfs_trans_reserve(tp, &M_RES(mp)->tr_symlink, resblks, 0);
	if (error == ENOSPC && fs_blocks == 0) {
		resblks = 0;
		error = xfs_trans_reserve(tp, &M_RES(mp)->tr_symlink, 0, 0);
	}
	if (error) {
		cancel_flags = 0;
		goto error_return;
	}

	xfs_ilock(dp, XFS_ILOCK_EXCL | XFS_ILOCK_PARENT);
	unlock_dp_on_error = true;

	/*
	 * Check whether the directory allows new symlinks or not.
	 */
	if (dp->i_d.di_flags & XFS_DIFLAG_NOSYMLINKS) {
		error = XFS_ERROR(EPERM);
		goto error_return;
	}

	/*
	 * Reserve disk quota : blocks and inode.
	 */
	error = xfs_trans_reserve_quota(tp, mp, udqp, gdqp,
						pdqp, resblks, 1, 0);
	if (error)
		goto error_return;

	/*
	 * Check for ability to enter directory entry, if no space reserved.
	 */
	error = xfs_dir_canenter(tp, dp, link_name, resblks);
	if (error)
		goto error_return;
	/*
	 * Initialize the bmap freelist prior to calling either
	 * bmapi or the directory create code.
	 */
	xfs_bmap_init(&free_list, &first_block);

	/*
	 * Allocate an inode for the symlink.
	 */
	error = xfs_dir_ialloc(&tp, dp, S_IFLNK | (mode & ~S_IFMT), 1, 0,
			       prid, resblks > 0, &ip, NULL);
	if (error) {
		if (error == ENOSPC)
			goto error_return;
		goto error1;
	}

	/*
	 * An error after we've joined dp to the transaction will result in the
	 * transaction cancel unlocking dp so don't do it explicitly in the
	 * error path.
	 */
	xfs_trans_ijoin(tp, dp, XFS_ILOCK_EXCL);
	unlock_dp_on_error = false;

	/*
	 * Also attach the dquot(s) to it, if applicable.
	 */
	xfs_qm_vop_create_dqattach(tp, ip, udqp, gdqp, pdqp);

	if (resblks)
		resblks -= XFS_IALLOC_SPACE_RES(mp);
	/*
	 * If the symlink will fit into the inode, write it inline.
	 */
	if (pathlen <= XFS_IFORK_DSIZE(ip)) {
		xfs_idata_realloc(ip, pathlen, XFS_DATA_FORK);
		memcpy(ip->i_df.if_u1.if_data, target_path, pathlen);
		ip->i_d.di_size = pathlen;

		/*
		 * The inode was initially created in extent format.
		 */
		ip->i_df.if_flags &= ~(XFS_IFEXTENTS | XFS_IFBROOT);
		ip->i_df.if_flags |= XFS_IFINLINE;

		ip->i_d.di_format = XFS_DINODE_FMT_LOCAL;
		xfs_trans_log_inode(tp, ip, XFS_ILOG_DDATA | XFS_ILOG_CORE);

	} else {
		int	offset;

		first_fsb = 0;
		nmaps = XFS_SYMLINK_MAPS;

		error = xfs_bmapi_write(tp, ip, first_fsb, fs_blocks,
				  XFS_BMAPI_METADATA, &first_block, resblks,
				  mval, &nmaps, &free_list);
		if (error)
			goto error2;

		if (resblks)
			resblks -= fs_blocks;
		ip->i_d.di_size = pathlen;
		xfs_trans_log_inode(tp, ip, XFS_ILOG_CORE);

		cur_chunk = target_path;
		offset = 0;
		for (n = 0; n < nmaps; n++) {
			char	*buf;

			d = XFS_FSB_TO_DADDR(mp, mval[n].br_startblock);
			byte_cnt = XFS_FSB_TO_B(mp, mval[n].br_blockcount);
			bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, d,
					       BTOBB(byte_cnt), 0);
			if (!bp) {
				error = ENOMEM;
				goto error2;
			}
			bp->b_ops = &xfs_symlink_buf_ops;

			byte_cnt = XFS_SYMLINK_BUF_SPACE(mp, byte_cnt);
			byte_cnt = min(byte_cnt, pathlen);

			buf = bp->b_addr;
			buf += xfs_symlink_hdr_set(mp, ip->i_ino, offset,
						   byte_cnt, bp);

			memcpy(buf, cur_chunk, byte_cnt);

			cur_chunk += byte_cnt;
			pathlen -= byte_cnt;
			offset += byte_cnt;

			xfs_trans_buf_set_type(tp, bp, XFS_BLFT_SYMLINK_BUF);
			xfs_trans_log_buf(tp, bp, 0, (buf + byte_cnt - 1) -
							(char *)bp->b_addr);
		}
		ASSERT(pathlen == 0);
	}

	/*
	 * Create the directory entry for the symlink.
	 */
	error = xfs_dir_createname(tp, dp, link_name, ip->i_ino,
					&first_block, &free_list, resblks);
	if (error)
		goto error2;
	xfs_trans_ichgtime(tp, dp, XFS_ICHGTIME_MOD | XFS_ICHGTIME_CHG);
	xfs_trans_log_inode(tp, dp, XFS_ILOG_CORE);

	/*
	 * If this is a synchronous mount, make sure that the
	 * symlink transaction goes to disk before returning to
	 * the user.
	 */
	if (mp->m_flags & (XFS_MOUNT_WSYNC|XFS_MOUNT_DIRSYNC)) {
		xfs_trans_set_sync(tp);
	}

	error = xfs_bmap_finish(&tp, &free_list, &committed);
	if (error) {
		goto error2;
	}
	error = xfs_trans_commit(tp, XFS_TRANS_RELEASE_LOG_RES);
	xfs_qm_dqrele(udqp);
	xfs_qm_dqrele(gdqp);
	xfs_qm_dqrele(pdqp);

	*ipp = ip;
	return 0;

 error2:
	IRELE(ip);
 error1:
	xfs_bmap_cancel(&free_list);
	cancel_flags |= XFS_TRANS_ABORT;
 error_return:
	xfs_trans_cancel(tp, cancel_flags);
	xfs_qm_dqrele(udqp);
	xfs_qm_dqrele(gdqp);
	xfs_qm_dqrele(pdqp);

	if (unlock_dp_on_error)
		xfs_iunlock(dp, XFS_ILOCK_EXCL);
 std_return:
	return error;
}
Ejemplo n.º 26
0
STATIC int
xfs_inode_ag_walk(
	struct xfs_mount	*mp,
	struct xfs_perag	*pag,
	int			(*execute)(struct xfs_inode *ip,
					   struct xfs_perag *pag, int flags),
	int			flags)
{
	uint32_t		first_index;
	int			last_error = 0;
	int			skipped;
	int			done;
	int			nr_found;

restart:
	done = 0;
	skipped = 0;
	first_index = 0;
	nr_found = 0;
	do {
		struct xfs_inode *batch[XFS_LOOKUP_BATCH];
		int		error = 0;
		int		i;

		rcu_read_lock();
		nr_found = radix_tree_gang_lookup(&pag->pag_ici_root,
					(void **)batch, first_index,
					XFS_LOOKUP_BATCH);
		if (!nr_found) {
			rcu_read_unlock();
			break;
		}

		/*
		 * Grab the inodes before we drop the lock. if we found
		 * nothing, nr == 0 and the loop will be skipped.
		 */
		for (i = 0; i < nr_found; i++) {
			struct xfs_inode *ip = batch[i];

			if (done || xfs_inode_ag_walk_grab(ip))
				batch[i] = NULL;

			/*
			 * Update the index for the next lookup. Catch
			 * overflows into the next AG range which can occur if
			 * we have inodes in the last block of the AG and we
			 * are currently pointing to the last inode.
			 *
			 * Because we may see inodes that are from the wrong AG
			 * due to RCU freeing and reallocation, only update the
			 * index if it lies in this AG. It was a race that lead
			 * us to see this inode, so another lookup from the
			 * same index will not find it again.
			 */
			if (XFS_INO_TO_AGNO(mp, ip->i_ino) != pag->pag_agno)
				continue;
			first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
			if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
				done = 1;
		}

		/* unlock now we've grabbed the inodes. */
		rcu_read_unlock();

		for (i = 0; i < nr_found; i++) {
			if (!batch[i])
				continue;
			error = execute(batch[i], pag, flags);
			IRELE(batch[i]);
			if (error == EAGAIN) {
				skipped++;
				continue;
			}
			if (error && last_error != EFSCORRUPTED)
				last_error = error;
		}

		/* bail out if the filesystem is corrupted.  */
		if (error == EFSCORRUPTED)
			break;

	} while (nr_found && !done);

	if (skipped) {
		delay(1);
		goto restart;
	}
	return last_error;
}
Ejemplo n.º 27
0
/*
 * Process a bmap update intent item that was recovered from the log.
 * We need to update some inode's bmbt.
 */
int
xfs_bui_recover(
	struct xfs_mount		*mp,
	struct xfs_bui_log_item		*buip,
	struct xfs_defer_ops		*dfops)
{
	int				error = 0;
	unsigned int			bui_type;
	struct xfs_map_extent		*bmap;
	xfs_fsblock_t			startblock_fsb;
	xfs_fsblock_t			inode_fsb;
	xfs_filblks_t			count;
	bool				op_ok;
	struct xfs_bud_log_item		*budp;
	enum xfs_bmap_intent_type	type;
	int				whichfork;
	xfs_exntst_t			state;
	struct xfs_trans		*tp;
	struct xfs_inode		*ip = NULL;
	struct xfs_bmbt_irec		irec;

	ASSERT(!test_bit(XFS_BUI_RECOVERED, &buip->bui_flags));

	/* Only one mapping operation per BUI... */
	if (buip->bui_format.bui_nextents != XFS_BUI_MAX_FAST_EXTENTS) {
		set_bit(XFS_BUI_RECOVERED, &buip->bui_flags);
		xfs_bui_release(buip);
		return -EIO;
	}

	/*
	 * First check the validity of the extent described by the
	 * BUI.  If anything is bad, then toss the BUI.
	 */
	bmap = &buip->bui_format.bui_extents[0];
	startblock_fsb = XFS_BB_TO_FSB(mp,
			   XFS_FSB_TO_DADDR(mp, bmap->me_startblock));
	inode_fsb = XFS_BB_TO_FSB(mp, XFS_FSB_TO_DADDR(mp,
			XFS_INO_TO_FSB(mp, bmap->me_owner)));
	switch (bmap->me_flags & XFS_BMAP_EXTENT_TYPE_MASK) {
	case XFS_BMAP_MAP:
	case XFS_BMAP_UNMAP:
		op_ok = true;
		break;
	default:
		op_ok = false;
		break;
	}
	if (!op_ok || startblock_fsb == 0 ||
	    bmap->me_len == 0 ||
	    inode_fsb == 0 ||
	    startblock_fsb >= mp->m_sb.sb_dblocks ||
	    bmap->me_len >= mp->m_sb.sb_agblocks ||
	    inode_fsb >= mp->m_sb.sb_dblocks ||
	    (bmap->me_flags & ~XFS_BMAP_EXTENT_FLAGS)) {
		/*
		 * This will pull the BUI from the AIL and
		 * free the memory associated with it.
		 */
		set_bit(XFS_BUI_RECOVERED, &buip->bui_flags);
		xfs_bui_release(buip);
		return -EIO;
	}

	error = xfs_trans_alloc(mp, &M_RES(mp)->tr_itruncate,
			XFS_EXTENTADD_SPACE_RES(mp, XFS_DATA_FORK), 0, 0, &tp);
	if (error)
		return error;
	budp = xfs_trans_get_bud(tp, buip);

	/* Grab the inode. */
	error = xfs_iget(mp, tp, bmap->me_owner, 0, XFS_ILOCK_EXCL, &ip);
	if (error)
		goto err_inode;

	if (VFS_I(ip)->i_nlink == 0)
		xfs_iflags_set(ip, XFS_IRECOVERY);

	/* Process deferred bmap item. */
	state = (bmap->me_flags & XFS_BMAP_EXTENT_UNWRITTEN) ?
			XFS_EXT_UNWRITTEN : XFS_EXT_NORM;
	whichfork = (bmap->me_flags & XFS_BMAP_EXTENT_ATTR_FORK) ?
			XFS_ATTR_FORK : XFS_DATA_FORK;
	bui_type = bmap->me_flags & XFS_BMAP_EXTENT_TYPE_MASK;
	switch (bui_type) {
	case XFS_BMAP_MAP:
	case XFS_BMAP_UNMAP:
		type = bui_type;
		break;
	default:
		error = -EFSCORRUPTED;
		goto err_inode;
	}
	xfs_trans_ijoin(tp, ip, 0);

	count = bmap->me_len;
	error = xfs_trans_log_finish_bmap_update(tp, budp, dfops, type,
			ip, whichfork, bmap->me_startoff,
			bmap->me_startblock, &count, state);
	if (error)
		goto err_inode;

	if (count > 0) {
		ASSERT(type == XFS_BMAP_UNMAP);
		irec.br_startblock = bmap->me_startblock;
		irec.br_blockcount = count;
		irec.br_startoff = bmap->me_startoff;
		irec.br_state = state;
		error = xfs_bmap_unmap_extent(tp->t_mountp, dfops, ip, &irec);
		if (error)
			goto err_inode;
	}

	set_bit(XFS_BUI_RECOVERED, &buip->bui_flags);
	error = xfs_trans_commit(tp);
	xfs_iunlock(ip, XFS_ILOCK_EXCL);
	IRELE(ip);

	return error;

err_inode:
	xfs_trans_cancel(tp);
	if (ip) {
		xfs_iunlock(ip, XFS_ILOCK_EXCL);
		IRELE(ip);
	}
	return error;
}
Ejemplo n.º 28
0
/*
 * Return stat information for one inode.
 * Return 0 if ok, else errno.
 */
int
xfs_bulkstat_one_int(
	struct xfs_mount	*mp,		/* mount point for filesystem */
	xfs_ino_t		ino,		/* inode to get data for */
	void __user		*buffer,	/* buffer to place output in */
	int			ubsize,		/* size of buffer */
	bulkstat_one_fmt_pf	formatter,	/* formatter, copy to user */
	int			*ubused,	/* bytes used by me */
	int			*stat)		/* BULKSTAT_RV_... */
{
	struct xfs_icdinode	*dic;		/* dinode core info pointer */
	struct xfs_inode	*ip;		/* incore inode pointer */
	struct xfs_bstat	*buf;		/* return buffer */
	int			error = 0;	/* error value */

	*stat = BULKSTAT_RV_NOTHING;

	if (!buffer || xfs_internal_inum(mp, ino))
		return -EINVAL;

	buf = kmem_alloc(sizeof(*buf), KM_SLEEP | KM_MAYFAIL);
	if (!buf)
		return -ENOMEM;

	error = xfs_iget(mp, NULL, ino,
			 (XFS_IGET_DONTCACHE | XFS_IGET_UNTRUSTED),
			 XFS_ILOCK_SHARED, &ip);
	if (error)
		goto out_free;

	ASSERT(ip != NULL);
	ASSERT(ip->i_imap.im_blkno != 0);

	dic = &ip->i_d;

	/* xfs_iget returns the following without needing
	 * further change.
	 */
	buf->bs_nlink = dic->di_nlink;
	buf->bs_projid_lo = dic->di_projid_lo;
	buf->bs_projid_hi = dic->di_projid_hi;
	buf->bs_ino = ino;
	buf->bs_mode = dic->di_mode;
	buf->bs_uid = dic->di_uid;
	buf->bs_gid = dic->di_gid;
	buf->bs_size = dic->di_size;
	buf->bs_atime.tv_sec = dic->di_atime.t_sec;
	buf->bs_atime.tv_nsec = dic->di_atime.t_nsec;
	buf->bs_mtime.tv_sec = dic->di_mtime.t_sec;
	buf->bs_mtime.tv_nsec = dic->di_mtime.t_nsec;
	buf->bs_ctime.tv_sec = dic->di_ctime.t_sec;
	buf->bs_ctime.tv_nsec = dic->di_ctime.t_nsec;
	buf->bs_xflags = xfs_ip2xflags(ip);
	buf->bs_extsize = dic->di_extsize << mp->m_sb.sb_blocklog;
	buf->bs_extents = dic->di_nextents;
	buf->bs_gen = dic->di_gen;
	memset(buf->bs_pad, 0, sizeof(buf->bs_pad));
	buf->bs_dmevmask = dic->di_dmevmask;
	buf->bs_dmstate = dic->di_dmstate;
	buf->bs_aextents = dic->di_anextents;
	buf->bs_forkoff = XFS_IFORK_BOFF(ip);

	switch (dic->di_format) {
	case XFS_DINODE_FMT_DEV:
		buf->bs_rdev = ip->i_df.if_u2.if_rdev;
		buf->bs_blksize = BLKDEV_IOSIZE;
		buf->bs_blocks = 0;
		break;
	case XFS_DINODE_FMT_LOCAL:
	case XFS_DINODE_FMT_UUID:
		buf->bs_rdev = 0;
		buf->bs_blksize = mp->m_sb.sb_blocksize;
		buf->bs_blocks = 0;
		break;
	case XFS_DINODE_FMT_EXTENTS:
	case XFS_DINODE_FMT_BTREE:
		buf->bs_rdev = 0;
		buf->bs_blksize = mp->m_sb.sb_blocksize;
		buf->bs_blocks = dic->di_nblocks + ip->i_delayed_blks;
		break;
	}
	xfs_iunlock(ip, XFS_ILOCK_SHARED);
	IRELE(ip);

	error = formatter(buffer, ubsize, ubused, buf);
	if (!error)
		*stat = BULKSTAT_RV_DIDONE;

 out_free:
	kmem_free(buf);
	return error;
}
Ejemplo n.º 29
0
STATIC int
xfs_inode_ag_walk(
	struct xfs_mount	*mp,
	struct xfs_perag	*pag,
	int			(*execute)(struct xfs_inode *ip,
					   struct xfs_perag *pag, int flags),
	int			flags)
{
	uint32_t		first_index;
	int			last_error = 0;
	int			skipped;
	int			done;
	int			nr_found;

restart:
	done = 0;
	skipped = 0;
	first_index = 0;
	nr_found = 0;
	do {
		struct xfs_inode *batch[XFS_LOOKUP_BATCH];
		int		error = 0;
		int		i;

		rcu_read_lock();
		nr_found = radix_tree_gang_lookup(&pag->pag_ici_root,
					(void **)batch, first_index,
					XFS_LOOKUP_BATCH);
		if (!nr_found) {
			rcu_read_unlock();
			break;
		}

		for (i = 0; i < nr_found; i++) {
			struct xfs_inode *ip = batch[i];

			if (done || xfs_inode_ag_walk_grab(ip))
				batch[i] = NULL;

			if (XFS_INO_TO_AGNO(mp, ip->i_ino) != pag->pag_agno)
				continue;
			first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1);
			if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino))
				done = 1;
		}

		
		rcu_read_unlock();

		for (i = 0; i < nr_found; i++) {
			if (!batch[i])
				continue;
			error = execute(batch[i], pag, flags);
			IRELE(batch[i]);
			if (error == EAGAIN) {
				skipped++;
				continue;
			}
			if (error && last_error != EFSCORRUPTED)
				last_error = error;
		}

		
		if (error == EFSCORRUPTED)
			break;

		cond_resched();

	} while (nr_found && !done);

	if (skipped) {
		delay(1);
		goto restart;
	}
	return last_error;
}
/*
 * Turn off quota accounting and/or enforcement for all udquots and/or
 * gdquots. Called only at unmount time.
 *
 * This assumes that there are no dquots of this file system cached
 * incore, and modifies the ondisk dquot directly. Therefore, for example,
 * it is an error to call this twice, without purging the cache.
 */
int
xfs_qm_scall_quotaoff(
    xfs_mount_t		*mp,
    uint			flags)
{
    struct xfs_quotainfo	*q = mp->m_quotainfo;
    uint			dqtype;
    int			error;
    uint			inactivate_flags;
    xfs_qoff_logitem_t	*qoffstart;

    /*
     * No file system can have quotas enabled on disk but not in core.
     * Note that quota utilities (like quotaoff) _expect_
     * errno == EEXIST here.
     */
    if ((mp->m_qflags & flags) == 0)
        return XFS_ERROR(EEXIST);
    error = 0;

    flags &= (XFS_ALL_QUOTA_ACCT | XFS_ALL_QUOTA_ENFD);

    /*
     * We don't want to deal with two quotaoffs messing up each other,
     * so we're going to serialize it. quotaoff isn't exactly a performance
     * critical thing.
     * If quotaoff, then we must be dealing with the root filesystem.
     */
    ASSERT(q);
    mutex_lock(&q->qi_quotaofflock);

    /*
     * If we're just turning off quota enforcement, change mp and go.
     */
    if ((flags & XFS_ALL_QUOTA_ACCT) == 0) {
        mp->m_qflags &= ~(flags);

        spin_lock(&mp->m_sb_lock);
        mp->m_sb.sb_qflags = mp->m_qflags;
        spin_unlock(&mp->m_sb_lock);
        mutex_unlock(&q->qi_quotaofflock);

        /* XXX what to do if error ? Revert back to old vals incore ? */
        error = xfs_qm_write_sb_changes(mp, XFS_SB_QFLAGS);
        return (error);
    }

    dqtype = 0;
    inactivate_flags = 0;
    /*
     * If accounting is off, we must turn enforcement off, clear the
     * quota 'CHKD' certificate to make it known that we have to
     * do a quotacheck the next time this quota is turned on.
     */
    if (flags & XFS_UQUOTA_ACCT) {
        dqtype |= XFS_QMOPT_UQUOTA;
        flags |= (XFS_UQUOTA_CHKD | XFS_UQUOTA_ENFD);
        inactivate_flags |= XFS_UQUOTA_ACTIVE;
    }
    if (flags & XFS_GQUOTA_ACCT) {
        dqtype |= XFS_QMOPT_GQUOTA;
        flags |= (XFS_GQUOTA_CHKD | XFS_GQUOTA_ENFD);
        inactivate_flags |= XFS_GQUOTA_ACTIVE;
    }
    if (flags & XFS_PQUOTA_ACCT) {
        dqtype |= XFS_QMOPT_PQUOTA;
        flags |= (XFS_PQUOTA_CHKD | XFS_PQUOTA_ENFD);
        inactivate_flags |= XFS_PQUOTA_ACTIVE;
    }

    /*
     * Nothing to do?  Don't complain. This happens when we're just
     * turning off quota enforcement.
     */
    if ((mp->m_qflags & flags) == 0)
        goto out_unlock;

    /*
     * Write the LI_QUOTAOFF log record, and do SB changes atomically,
     * and synchronously. If we fail to write, we should abort the
     * operation as it cannot be recovered safely if we crash.
     */
    error = xfs_qm_log_quotaoff(mp, &qoffstart, flags);
    if (error)
        goto out_unlock;

    /*
     * Next we clear the XFS_MOUNT_*DQ_ACTIVE bit(s) in the mount struct
     * to take care of the race between dqget and quotaoff. We don't take
     * any special locks to reset these bits. All processes need to check
     * these bits *after* taking inode lock(s) to see if the particular
     * quota type is in the process of being turned off. If *ACTIVE, it is
     * guaranteed that all dquot structures and all quotainode ptrs will all
     * stay valid as long as that inode is kept locked.
     *
     * There is no turning back after this.
     */
    mp->m_qflags &= ~inactivate_flags;

    /*
     * Give back all the dquot reference(s) held by inodes.
     * Here we go thru every single incore inode in this file system, and
     * do a dqrele on the i_udquot/i_gdquot that it may have.
     * Essentially, as long as somebody has an inode locked, this guarantees
     * that quotas will not be turned off. This is handy because in a
     * transaction once we lock the inode(s) and check for quotaon, we can
     * depend on the quota inodes (and other things) being valid as long as
     * we keep the lock(s).
     */
    xfs_qm_dqrele_all_inodes(mp, flags);

    /*
     * Next we make the changes in the quota flag in the mount struct.
     * This isn't protected by a particular lock directly, because we
     * don't want to take a mrlock every time we depend on quotas being on.
     */
    mp->m_qflags &= ~flags;

    /*
     * Go through all the dquots of this file system and purge them,
     * according to what was turned off.
     */
    xfs_qm_dqpurge_all(mp, dqtype);

    /*
     * Transactions that had started before ACTIVE state bit was cleared
     * could have logged many dquots, so they'd have higher LSNs than
     * the first QUOTAOFF log record does. If we happen to crash when
     * the tail of the log has gone past the QUOTAOFF record, but
     * before the last dquot modification, those dquots __will__
     * recover, and that's not good.
     *
     * So, we have QUOTAOFF start and end logitems; the start
     * logitem won't get overwritten until the end logitem appears...
     */
    error = xfs_qm_log_quotaoff_end(mp, qoffstart, flags);
    if (error) {
        /* We're screwed now. Shutdown is the only option. */
        xfs_force_shutdown(mp, SHUTDOWN_CORRUPT_INCORE);
        goto out_unlock;
    }

    /*
     * If all quotas are completely turned off, close shop.
     */
    if (mp->m_qflags == 0) {
        mutex_unlock(&q->qi_quotaofflock);
        xfs_qm_destroy_quotainfo(mp);
        return (0);
    }

    /*
     * Release our quotainode references if we don't need them anymore.
     */
    if ((dqtype & XFS_QMOPT_UQUOTA) && q->qi_uquotaip) {
        IRELE(q->qi_uquotaip);
        q->qi_uquotaip = NULL;
    }
    if ((dqtype & XFS_QMOPT_GQUOTA) && q->qi_gquotaip) {
        IRELE(q->qi_gquotaip);
        q->qi_gquotaip = NULL;
    }
    if ((dqtype & XFS_QMOPT_PQUOTA) && q->qi_pquotaip) {
        IRELE(q->qi_pquotaip);
        q->qi_pquotaip = NULL;
    }

out_unlock:
    mutex_unlock(&q->qi_quotaofflock);
    return error;
}