int libxfs_trans_iget( xfs_mount_t *mp, xfs_trans_t *tp, xfs_ino_t ino, uint flags, uint lock_flags, xfs_inode_t **ipp) { int error; xfs_inode_t *ip; xfs_inode_log_item_t *iip; if (tp == NULL) return libxfs_iget(mp, tp, ino, lock_flags, ipp, 0); error = libxfs_iget(mp, tp, ino, lock_flags, &ip, 0); if (error) return error; ASSERT(ip != NULL); if (ip->i_itemp == NULL) xfs_inode_item_init(ip, mp); iip = ip->i_itemp; xfs_trans_add_item(tp, (xfs_log_item_t *)(iip)); /* initialize i_transp so we can find it incore */ ip->i_transp = tp; *ipp = ip; return 0; }
/* * Initialize the inode log item for a newly allocated (in-core) inode. * * Inode extents can only reside within an AG. Hence specify the starting * block for the inode chunk by offset within an AG as well as the * length of the allocated extent. * * This joins the item to the transaction and marks it dirty so * that we don't need a separate call to do this, nor does the * caller need to know anything about the icreate item. */ void xfs_icreate_log( struct xfs_trans *tp, xfs_agnumber_t agno, xfs_agblock_t agbno, unsigned int count, unsigned int inode_size, xfs_agblock_t length, unsigned int generation) { struct xfs_icreate_item *icp; icp = kmem_zone_zalloc(xfs_icreate_zone, KM_SLEEP); xfs_log_item_init(tp->t_mountp, &icp->ic_item, XFS_LI_ICREATE, &xfs_icreate_item_ops); icp->ic_format.icl_type = XFS_LI_ICREATE; icp->ic_format.icl_size = 1; /* single vector */ icp->ic_format.icl_ag = cpu_to_be32(agno); icp->ic_format.icl_agbno = cpu_to_be32(agbno); icp->ic_format.icl_count = cpu_to_be32(count); icp->ic_format.icl_isize = cpu_to_be32(inode_size); icp->ic_format.icl_length = cpu_to_be32(length); icp->ic_format.icl_gen = cpu_to_be32(generation); xfs_trans_add_item(tp, &icp->ic_item); tp->t_flags |= XFS_TRANS_DIRTY; icp->ic_item.li_desc->lid_flags |= XFS_LID_DIRTY; }
/* * Add a locked inode to the transaction. * * The inode must be locked, and it cannot be associated with any transaction. */ void xfs_trans_ijoin( struct xfs_trans *tp, struct xfs_inode *ip) { xfs_inode_log_item_t *iip; ASSERT(ip->i_transp == NULL); ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); if (ip->i_itemp == NULL) xfs_inode_item_init(ip, ip->i_mount); iip = ip->i_itemp; ASSERT(iip->ili_lock_flags == 0); /* * Get a log_item_desc to point at the new item. */ xfs_trans_add_item(tp, &iip->ili_item); xfs_trans_inode_broot_debug(ip); /* * Initialize i_transp so we can find it with xfs_inode_incore() * in xfs_trans_iget() above. */ ip->i_transp = tp; }
/* * This routine is called to allocate a "refcount update done" * log item. */ struct xfs_cud_log_item * xfs_trans_get_cud( struct xfs_trans *tp, struct xfs_cui_log_item *cuip) { struct xfs_cud_log_item *cudp; cudp = xfs_cud_init(tp->t_mountp, cuip); xfs_trans_add_item(tp, &cudp->cud_item); return cudp; }
/* * This routine is called to allocate a "bmap update done" * log item. */ struct xfs_bud_log_item * xfs_trans_get_bud( struct xfs_trans *tp, struct xfs_bui_log_item *buip) { struct xfs_bud_log_item *budp; budp = xfs_bud_init(tp->t_mountp, buip); xfs_trans_add_item(tp, &budp->bud_item); return budp; }
/* * This routine is called to allocate an "extent free intention" * log item that will hold nextents worth of extents. The * caller must use all nextents extents, because we are not * flexible about this at all. */ xfs_efi_log_item_t * xfs_trans_get_efi(xfs_trans_t *tp, uint nextents) { xfs_efi_log_item_t *efip; ASSERT(tp != NULL); ASSERT(nextents > 0); efip = xfs_efi_init(tp->t_mountp, nextents); ASSERT(efip != NULL); /* * Get a log_item_desc to point at the new item. */ xfs_trans_add_item(tp, &efip->efi_item); return efip; }
/* * This routine is called to allocate a quotaoff log item. */ xfs_qoff_logitem_t * xfs_trans_get_qoff_item( xfs_trans_t *tp, xfs_qoff_logitem_t *startqoff, uint flags) { xfs_qoff_logitem_t *q; ASSERT(tp != NULL); q = xfs_qm_qoff_logitem_init(tp->t_mountp, startqoff, flags); ASSERT(q != NULL); /* * Get a log_item_desc to point at the new item. */ xfs_trans_add_item(tp, &q->qql_item); return q; }
/* Get an CUI. */ STATIC void * xfs_refcount_update_create_intent( struct xfs_trans *tp, unsigned int count) { struct xfs_cui_log_item *cuip; ASSERT(tp != NULL); ASSERT(count > 0); cuip = xfs_cui_init(tp->t_mountp, count); ASSERT(cuip != NULL); /* * Get a log_item_desc to point at the new item. */ xfs_trans_add_item(tp, &cuip->cui_item); return cuip; }
/* Get an BUI. */ STATIC void * xfs_bmap_update_create_intent( struct xfs_trans *tp, unsigned int count) { struct xfs_bui_log_item *buip; ASSERT(count == XFS_BUI_MAX_FAST_EXTENTS); ASSERT(tp != NULL); buip = xfs_bui_init(tp->t_mountp); ASSERT(buip != NULL); /* * Get a log_item_desc to point at the new item. */ xfs_trans_add_item(tp, &buip->bui_item); return buip; }
/* * Add the locked dquot to the transaction. * The dquot must be locked, and it cannot be associated with any * transaction. */ void xfs_trans_dqjoin( xfs_trans_t *tp, xfs_dquot_t *dqp) { ASSERT(dqp->q_transp != tp); ASSERT(XFS_DQ_IS_LOCKED(dqp)); ASSERT(dqp->q_logitem.qli_dquot == dqp); /* * Get a log_item_desc to point at the new item. */ xfs_trans_add_item(tp, &dqp->q_logitem.qli_item); /* * Initialize d_transp so we can later determine if this dquot is * associated with this transaction. */ dqp->q_transp = tp; }
/* * This routine is called to allocate an "extent free done" * log item that will hold nextents worth of extents. The * caller must use all nextents extents, because we are not * flexible about this at all. */ xfs_efd_log_item_t * xfs_trans_get_efd(xfs_trans_t *tp, xfs_efi_log_item_t *efip, uint nextents) { xfs_efd_log_item_t *efdp; ASSERT(tp != NULL); ASSERT(nextents > 0); efdp = xfs_efd_init(tp->t_mountp, efip, nextents); ASSERT(efdp != NULL); /* * Get a log_item_desc to point at the new item. */ (void) xfs_trans_add_item(tp, (xfs_log_item_t*)efdp); return (efdp); }
/* * Add a locked inode to the transaction. * * The inode must be locked, and it cannot be associated with any transaction. * If lock_flags is non-zero the inode will be unlocked on transaction commit. */ void xfs_trans_ijoin( struct xfs_trans *tp, struct xfs_inode *ip, uint lock_flags) { xfs_inode_log_item_t *iip; ASSERT(xfs_isilocked(ip, XFS_ILOCK_EXCL)); if (ip->i_itemp == NULL) xfs_inode_item_init(ip, ip->i_mount); iip = ip->i_itemp; ASSERT(iip->ili_lock_flags == 0); iip->ili_lock_flags = lock_flags; /* * Get a log_item_desc to point at the new item. */ xfs_trans_add_item(tp, &iip->ili_item); }
/* * Add the locked inode to the transaction. * The inode must be locked, and it cannot be associated with any * transaction. The caller must specify the locks already held * on the inode. */ void xfs_trans_ijoin( xfs_trans_t *tp, xfs_inode_t *ip, uint lock_flags) { xfs_inode_log_item_t *iip; ASSERT(ip->i_transp == NULL); ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE)); ASSERT(lock_flags & XFS_ILOCK_EXCL); if (ip->i_itemp == NULL) xfs_inode_item_init(ip, ip->i_mount); iip = ip->i_itemp; ASSERT(iip->ili_flags == 0); ASSERT(iip->ili_ilock_recur == 0); ASSERT(iip->ili_iolock_recur == 0); /* * Get a log_item_desc to point at the new item. */ (void) xfs_trans_add_item(tp, (xfs_log_item_t*)(iip)); xfs_trans_inode_broot_debug(ip); /* * If the IO lock is already held, mark that in the inode log item. */ if (lock_flags & XFS_IOLOCK_EXCL) { iip->ili_flags |= XFS_ILI_IOLOCKED_EXCL; } else if (lock_flags & XFS_IOLOCK_SHARED) { iip->ili_flags |= XFS_ILI_IOLOCKED_SHARED; } /* * Initialize i_transp so we can find it with xfs_inode_incore() * in xfs_trans_iget() above. */ ip->i_transp = tp; }
void libxfs_trans_ijoin( xfs_trans_t *tp, xfs_inode_t *ip, uint lock_flags) { xfs_inode_log_item_t *iip; ASSERT(ip->i_transp == NULL); if (ip->i_itemp == NULL) xfs_inode_item_init(ip, ip->i_mount); iip = ip->i_itemp; ASSERT(iip->ili_flags == 0); ASSERT(iip->ili_inode != NULL); xfs_trans_add_item(tp, (xfs_log_item_t *)(iip)); ip->i_transp = tp; #ifdef XACT_DEBUG fprintf(stderr, "ijoin'd inode %llu, transaction %p\n", ip->i_ino, tp); #endif }
/* * Add the locked dquot to the transaction. * The dquot must be locked, and it cannot be associated with any * transaction. */ void xfs_trans_dqjoin( xfs_trans_t *tp, xfs_dquot_t *dqp) { xfs_dq_logitem_t *lp; ASSERT(! XFS_DQ_IS_ADDEDTO_TRX(tp, dqp)); ASSERT(XFS_DQ_IS_LOCKED(dqp)); ASSERT(XFS_DQ_IS_LOGITEM_INITD(dqp)); lp = &dqp->q_logitem; /* * Get a log_item_desc to point at the new item. */ (void) xfs_trans_add_item(tp, (xfs_log_item_t*)(lp)); /* * Initialize i_transp so we can later determine if this dquot is * associated with this transaction. */ dqp->q_transp = tp; }
/* * Get and lock the inode for the caller if it is not already * locked within the given transaction. If it is already locked * within the transaction, just increment its lock recursion count * and return a pointer to it. * * For an inode to be locked in a transaction, the inode lock, as * opposed to the io lock, must be taken exclusively. This ensures * that the inode can be involved in only 1 transaction at a time. * Lock recursion is handled on the io lock, but only for lock modes * of equal or lesser strength. That is, you can recur on the io lock * held EXCL with a SHARED request but not vice versa. Also, if * the inode is already a part of the transaction then you cannot * go from not holding the io lock to having it EXCL or SHARED. * * Use the inode cache routine xfs_inode_incore() to find the inode * if it is already owned by this transaction. * * If we don't already own the inode, use xfs_iget() to get it. * Since the inode log item structure is embedded in the incore * inode structure and is initialized when the inode is brought * into memory, there is nothing to do with it here. * * If the given transaction pointer is NULL, just call xfs_iget(). * This simplifies code which must handle both cases. */ int xfs_trans_iget( xfs_mount_t *mp, xfs_trans_t *tp, xfs_ino_t ino, uint lock_flags, xfs_inode_t **ipp) { int error; xfs_inode_t *ip; xfs_inode_log_item_t *iip; /* * If the transaction pointer is NULL, just call the normal * xfs_iget(). */ if (tp == NULL) { return (xfs_iget(mp, NULL, ino, lock_flags, ipp, 0)); } /* * If we find the inode in core with this transaction * pointer in its i_transp field, then we know we already * have it locked. In this case we just increment the lock * recursion count and return the inode to the caller. * Assert that the inode is already locked in the mode requested * by the caller. We cannot do lock promotions yet, so * die if someone gets this wrong. */ if ((ip = xfs_inode_incore(tp->t_mountp, ino, tp)) != NULL) { /* * Make sure that the inode lock is held EXCL and * that the io lock is never upgraded when the inode * is already a part of the transaction. */ ASSERT(ip->i_itemp != NULL); ASSERT(lock_flags & XFS_ILOCK_EXCL); ASSERT(ismrlocked(&ip->i_lock, MR_UPDATE)); ASSERT((!(lock_flags & XFS_IOLOCK_EXCL)) || ismrlocked(&ip->i_iolock, MR_UPDATE)); ASSERT((!(lock_flags & XFS_IOLOCK_EXCL)) || (ip->i_itemp->ili_flags & XFS_ILI_IOLOCKED_EXCL)); ASSERT((!(lock_flags & XFS_IOLOCK_SHARED)) || ismrlocked(&ip->i_iolock, (MR_UPDATE | MR_ACCESS))); ASSERT((!(lock_flags & XFS_IOLOCK_SHARED)) || (ip->i_itemp->ili_flags & XFS_ILI_IOLOCKED_ANY)); if (lock_flags & (XFS_IOLOCK_SHARED | XFS_IOLOCK_EXCL)) { ip->i_itemp->ili_iolock_recur++; } if (lock_flags & XFS_ILOCK_EXCL) { ip->i_itemp->ili_ilock_recur++; } *ipp = ip; return 0; } ASSERT(lock_flags & XFS_ILOCK_EXCL); error = xfs_iget(tp->t_mountp, tp, ino, lock_flags, &ip, 0); if (error) { return error; } ASSERT(ip != NULL); /* * Get a log_item_desc to point at the new item. */ if (ip->i_itemp == NULL) xfs_inode_item_init(ip, mp); iip = ip->i_itemp; (void) xfs_trans_add_item(tp, (xfs_log_item_t *)(iip)); xfs_trans_inode_broot_debug(ip); /* * If the IO lock has been acquired, mark that in * the inode log item so we'll know to unlock it * when the transaction commits. */ ASSERT(iip->ili_flags == 0); if (lock_flags & XFS_IOLOCK_EXCL) { iip->ili_flags |= XFS_ILI_IOLOCKED_EXCL; } else if (lock_flags & XFS_IOLOCK_SHARED) { iip->ili_flags |= XFS_ILI_IOLOCKED_SHARED; } /* * Initialize i_transp so we can find it with xfs_inode_incore() * above. */ ip->i_transp = tp; *ipp = ip; return 0; }