STATIC int xfs_dqrele_inode( struct xfs_inode *ip, struct xfs_perag *pag, int flags) { int error; /* skip quota inodes */ if (ip == XFS_QI_UQIP(ip->i_mount) || ip == XFS_QI_GQIP(ip->i_mount)) { ASSERT(ip->i_udquot == NULL); ASSERT(ip->i_gdquot == NULL); read_unlock(&pag->pag_ici_lock); return 0; } error = xfs_sync_inode_valid(ip, pag); if (error) return error; xfs_ilock(ip, XFS_ILOCK_EXCL); if ((flags & XFS_UQUOTA_ACCT) && ip->i_udquot) { xfs_qm_dqrele(ip->i_udquot); ip->i_udquot = NULL; } if (flags & (XFS_PQUOTA_ACCT|XFS_GQUOTA_ACCT) && ip->i_gdquot) { xfs_qm_dqrele(ip->i_gdquot); ip->i_gdquot = NULL; } xfs_iput(ip, XFS_ILOCK_EXCL); return 0; }
STATIC int xfs_qm_internalqcheck_adjust( xfs_mount_t *mp, /* mount point for filesystem */ xfs_ino_t ino, /* inode number to get data for */ void __user *buffer, /* not used */ int ubsize, /* not used */ void *private_data, /* not used */ xfs_daddr_t bno, /* starting block of inode cluster */ int *ubused, /* not used */ void *dip, /* not used */ int *res) /* bulkstat result code */ { xfs_inode_t *ip; xfs_dqtest_t *ud, *gd; uint lock_flags; boolean_t ipreleased; int error; ASSERT(XFS_IS_QUOTA_RUNNING(mp)); if (ino == mp->m_sb.sb_uquotino || ino == mp->m_sb.sb_gquotino) { *res = BULKSTAT_RV_NOTHING; qdprintk("internalqcheck: ino=%llu, uqino=%llu, gqino=%llu\n", (unsigned long long) ino, (unsigned long long) mp->m_sb.sb_uquotino, (unsigned long long) mp->m_sb.sb_gquotino); return XFS_ERROR(EINVAL); } ipreleased = B_FALSE; again: lock_flags = XFS_ILOCK_SHARED; if ((error = xfs_iget(mp, NULL, ino, 0, lock_flags, &ip, bno))) { *res = BULKSTAT_RV_NOTHING; return (error); } if (ip->i_d.di_mode == 0) { xfs_iput_new(ip, lock_flags); *res = BULKSTAT_RV_NOTHING; return XFS_ERROR(ENOENT); } /* * This inode can have blocks after eof which can get released * when we send it to inactive. Since we don't check the dquot * until the after all our calculations are done, we must get rid * of those now. */ if (! ipreleased) { xfs_iput(ip, lock_flags); ipreleased = B_TRUE; goto again; } xfs_qm_internalqcheck_get_dquots(mp, (xfs_dqid_t) ip->i_d.di_uid, (xfs_dqid_t) ip->i_d.di_projid, (xfs_dqid_t) ip->i_d.di_gid, &ud, &gd); if (XFS_IS_UQUOTA_ON(mp)) { ASSERT(ud); xfs_qm_internalqcheck_dqadjust(ip, ud); } if (XFS_IS_OQUOTA_ON(mp)) { ASSERT(gd); xfs_qm_internalqcheck_dqadjust(ip, gd); } xfs_iput(ip, lock_flags); *res = BULKSTAT_RV_DIDONE; return (0); }
/* * Unlock the inode associated with the inode log item. * Clear the fields of the inode and inode log item that * are specific to the current transaction. If the * hold flags is set, do not unlock the inode. */ STATIC void xfs_inode_item_unlock( xfs_inode_log_item_t *iip) { uint hold; uint iolocked; uint lock_flags; xfs_inode_t *ip; ASSERT(iip != NULL); ASSERT(iip->ili_inode->i_itemp != NULL); ASSERT(xfs_isilocked(iip->ili_inode, XFS_ILOCK_EXCL)); ASSERT((!(iip->ili_inode->i_itemp->ili_flags & XFS_ILI_IOLOCKED_EXCL)) || xfs_isilocked(iip->ili_inode, XFS_IOLOCK_EXCL)); ASSERT((!(iip->ili_inode->i_itemp->ili_flags & XFS_ILI_IOLOCKED_SHARED)) || xfs_isilocked(iip->ili_inode, XFS_IOLOCK_SHARED)); /* * Clear the transaction pointer in the inode. */ ip = iip->ili_inode; ip->i_transp = NULL; /* * If the inode needed a separate buffer with which to log * its extents, then free it now. */ if (iip->ili_extents_buf != NULL) { ASSERT(ip->i_d.di_format == XFS_DINODE_FMT_EXTENTS); ASSERT(ip->i_d.di_nextents > 0); ASSERT(iip->ili_format.ilf_fields & XFS_ILOG_DEXT); ASSERT(ip->i_df.if_bytes > 0); kmem_free(iip->ili_extents_buf); iip->ili_extents_buf = NULL; } if (iip->ili_aextents_buf != NULL) { ASSERT(ip->i_d.di_aformat == XFS_DINODE_FMT_EXTENTS); ASSERT(ip->i_d.di_anextents > 0); ASSERT(iip->ili_format.ilf_fields & XFS_ILOG_AEXT); ASSERT(ip->i_afp->if_bytes > 0); kmem_free(iip->ili_aextents_buf); iip->ili_aextents_buf = NULL; } /* * Figure out if we should unlock the inode or not. */ hold = iip->ili_flags & XFS_ILI_HOLD; /* * Before clearing out the flags, remember whether we * are holding the inode's IO lock. */ iolocked = iip->ili_flags & XFS_ILI_IOLOCKED_ANY; /* * Clear out the fields of the inode log item particular * to the current transaction. */ iip->ili_ilock_recur = 0; iip->ili_iolock_recur = 0; iip->ili_flags = 0; /* * Unlock the inode if XFS_ILI_HOLD was not set. */ if (!hold) { lock_flags = XFS_ILOCK_EXCL; if (iolocked & XFS_ILI_IOLOCKED_EXCL) { lock_flags |= XFS_IOLOCK_EXCL; } else if (iolocked & XFS_ILI_IOLOCKED_SHARED) { lock_flags |= XFS_IOLOCK_SHARED; } xfs_iput(iip->ili_inode, lock_flags); } }
/* * Release the inode ip which was previously acquired with xfs_trans_iget() * or added with xfs_trans_ijoin(). This will decrement the lock * recursion count of the inode item. If the count goes to less than 0, * the inode will be unlocked and disassociated from the transaction. * * If the inode has been modified within the transaction, it will not be * unlocked until the transaction commits. */ void xfs_trans_iput( xfs_trans_t *tp, xfs_inode_t *ip, uint lock_flags) { xfs_inode_log_item_t *iip; xfs_log_item_desc_t *lidp; /* * If the transaction pointer is NULL, just call xfs_iput(). */ if (tp == NULL) { xfs_iput(ip, lock_flags); } ASSERT(ip->i_transp == tp); iip = ip->i_itemp; ASSERT(iip != NULL); /* * Find the item descriptor pointing to this inode's * log item. It must be there. */ lidp = xfs_trans_find_item(tp, (xfs_log_item_t*)iip); ASSERT(lidp != NULL); ASSERT(lidp->lid_item == (xfs_log_item_t*)iip); /* * Be consistent about the bookkeeping for the inode's * io lock, but it doesn't mean much really. */ ASSERT((iip->ili_flags & XFS_ILI_IOLOCKED_ANY) != XFS_ILI_IOLOCKED_ANY); if (lock_flags & (XFS_IOLOCK_EXCL | XFS_IOLOCK_SHARED)) { ASSERT(iip->ili_flags & XFS_ILI_IOLOCKED_ANY); ASSERT((!(lock_flags & XFS_IOLOCK_EXCL)) || (iip->ili_flags & XFS_ILI_IOLOCKED_EXCL)); ASSERT((!(lock_flags & XFS_IOLOCK_SHARED)) || (iip->ili_flags & (XFS_ILI_IOLOCKED_EXCL | XFS_ILI_IOLOCKED_SHARED))); if (iip->ili_iolock_recur > 0) { iip->ili_iolock_recur--; } } /* * If the release is just for a recursive lock on the inode lock, * then decrement the count and return. We can assert that * the caller is dropping an EXCL lock on the inode, because * inode must be locked EXCL within transactions. */ ASSERT(lock_flags & XFS_ILOCK_EXCL); if (iip->ili_ilock_recur > 0) { iip->ili_ilock_recur--; return; } ASSERT(iip->ili_iolock_recur == 0); /* * If the inode was dirtied within this transaction, it cannot * be released until the transaction commits. */ if (lidp->lid_flags & XFS_LID_DIRTY) { return; } xfs_trans_free_item(tp, lidp); /* * Clear the hold and iolocked flags in the inode log item. * We wouldn't want the next user of the inode to * get confused. Assert that if the iolocked flag is set * in the item then we are unlocking it in the call to xfs_iput() * below. */ ASSERT((!(iip->ili_flags & XFS_ILI_IOLOCKED_ANY)) || (lock_flags & (XFS_IOLOCK_EXCL | XFS_IOLOCK_SHARED))); if (iip->ili_flags & (XFS_ILI_HOLD | XFS_ILI_IOLOCKED_ANY)) { iip->ili_flags &= ~(XFS_ILI_HOLD | XFS_ILI_IOLOCKED_ANY); } /* * Unlike xfs_brelse() the inode log item cannot be * freed, because it is embedded within the inode. * All we have to do is release the inode. */ xfs_iput(ip, lock_flags); return; }
/* * Sync all the inodes in the given AG according to the * direction given by the flags. */ STATIC int xfs_sync_inodes_ag( xfs_mount_t *mp, int ag, int flags) { xfs_perag_t *pag = &mp->m_perag[ag]; int nr_found; uint32_t first_index = 0; int error = 0; int last_error = 0; int fflag = XFS_B_ASYNC; if (flags & SYNC_DELWRI) fflag = XFS_B_DELWRI; if (flags & SYNC_WAIT) fflag = 0; /* synchronous overrides all */ do { struct inode *inode; xfs_inode_t *ip = NULL; int lock_flags = XFS_ILOCK_SHARED; /* * use a gang lookup to find the next inode in the tree * as the tree is sparse and a gang lookup walks to find * the number of objects requested. */ read_lock(&pag->pag_ici_lock); nr_found = radix_tree_gang_lookup(&pag->pag_ici_root, (void**)&ip, first_index, 1); if (!nr_found) { read_unlock(&pag->pag_ici_lock); break; } /* * Update the index for the next lookup. Catch overflows * into the next AG range which can occur if we have inodes * in the last block of the AG and we are currently * pointing to the last inode. */ first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1); if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino)) { read_unlock(&pag->pag_ici_lock); break; } /* nothing to sync during shutdown */ if (XFS_FORCED_SHUTDOWN(mp)) { read_unlock(&pag->pag_ici_lock); return 0; } /* * If we can't get a reference on the inode, it must be * in reclaim. Leave it for the reclaim code to flush. */ inode = VFS_I(ip); if (!igrab(inode)) { read_unlock(&pag->pag_ici_lock); continue; } read_unlock(&pag->pag_ici_lock); /* avoid new or bad inodes */ if (is_bad_inode(inode) || xfs_iflags_test(ip, XFS_INEW)) { IRELE(ip); continue; } /* * If we have to flush data or wait for I/O completion * we need to hold the iolock. */ if ((flags & SYNC_DELWRI) && VN_DIRTY(inode)) { xfs_ilock(ip, XFS_IOLOCK_SHARED); lock_flags |= XFS_IOLOCK_SHARED; error = xfs_flush_pages(ip, 0, -1, fflag, FI_NONE); if (flags & SYNC_IOWAIT) xfs_ioend_wait(ip); } xfs_ilock(ip, XFS_ILOCK_SHARED); if ((flags & SYNC_ATTR) && !xfs_inode_clean(ip)) { if (flags & SYNC_WAIT) { xfs_iflock(ip); if (!xfs_inode_clean(ip)) error = xfs_iflush(ip, XFS_IFLUSH_SYNC); else xfs_ifunlock(ip); } else if (xfs_iflock_nowait(ip)) { if (!xfs_inode_clean(ip)) error = xfs_iflush(ip, XFS_IFLUSH_DELWRI); else xfs_ifunlock(ip); } } xfs_iput(ip, lock_flags); if (error) last_error = error; /* * bail out if the filesystem is corrupted. */ if (error == EFSCORRUPTED) return XFS_ERROR(error); } while (nr_found); return last_error; }