STATIC int xfs_sync_inode_attr( struct xfs_inode *ip, struct xfs_perag *pag, int flags) { int error = 0; error = xfs_sync_inode_valid(ip, pag); if (error) return error; xfs_ilock(ip, XFS_ILOCK_SHARED); if (xfs_inode_clean(ip)) goto out_unlock; if (!xfs_iflock_nowait(ip)) { if (!(flags & SYNC_WAIT)) goto out_unlock; xfs_iflock(ip); } if (xfs_inode_clean(ip)) { xfs_ifunlock(ip); goto out_unlock; } error = xfs_iflush(ip, (flags & SYNC_WAIT) ? XFS_IFLUSH_SYNC : XFS_IFLUSH_DELWRI); out_unlock: xfs_iunlock(ip, XFS_ILOCK_SHARED); IRELE(ip); return error; }
STATIC int xfs_dqrele_inode( struct xfs_inode *ip, struct xfs_perag *pag, int flags) { int error; /* skip quota inodes */ if (ip == XFS_QI_UQIP(ip->i_mount) || ip == XFS_QI_GQIP(ip->i_mount)) { ASSERT(ip->i_udquot == NULL); ASSERT(ip->i_gdquot == NULL); read_unlock(&pag->pag_ici_lock); return 0; } error = xfs_sync_inode_valid(ip, pag); if (error) return error; xfs_ilock(ip, XFS_ILOCK_EXCL); if ((flags & XFS_UQUOTA_ACCT) && ip->i_udquot) { xfs_qm_dqrele(ip->i_udquot); ip->i_udquot = NULL; } if (flags & (XFS_PQUOTA_ACCT|XFS_GQUOTA_ACCT) && ip->i_gdquot) { xfs_qm_dqrele(ip->i_gdquot); ip->i_gdquot = NULL; } xfs_iput(ip, XFS_ILOCK_EXCL); return 0; }
STATIC int xfs_sync_inode_data( struct xfs_inode *ip, struct xfs_perag *pag, int flags) { struct inode *inode = VFS_I(ip); struct address_space *mapping = inode->i_mapping; int error = 0; error = xfs_sync_inode_valid(ip, pag); if (error) return error; if (!mapping_tagged(mapping, PAGECACHE_TAG_DIRTY)) goto out_wait; if (!xfs_ilock_nowait(ip, XFS_IOLOCK_SHARED)) { if (flags & SYNC_TRYLOCK) goto out_wait; xfs_ilock(ip, XFS_IOLOCK_SHARED); } error = xfs_flush_pages(ip, 0, -1, (flags & SYNC_WAIT) ? 0 : XFS_B_ASYNC, FI_NONE); xfs_iunlock(ip, XFS_IOLOCK_SHARED); out_wait: if (flags & SYNC_WAIT) xfs_ioend_wait(ip); IRELE(ip); return error; }