/* * Return a pointer to the first descriptor in the chunk list. * This does not return NULL if there are none, it panics. * * The first descriptor must be in either the first or second chunk. * This is because the only chunk allowed to be empty is the first. * All others are freed when they become empty. * * At some point this and xfs_trans_next_item() should be optimized * to quickly look at the mask to determine if there is anything to * look at. */ xfs_log_item_desc_t * xfs_trans_first_item(xfs_trans_t *tp) { xfs_log_item_chunk_t *licp; int i; licp = &tp->t_items; /* * If it's not in the first chunk, skip to the second. */ if (XFS_LIC_ARE_ALL_FREE(licp)) { licp = licp->lic_next; } /* * Return the first non-free descriptor in the chunk. */ ASSERT(!XFS_LIC_ARE_ALL_FREE(licp)); for (i = 0; i < licp->lic_unused; i++) { if (XFS_LIC_ISFREE(licp, i)) { continue; } return XFS_LIC_SLOT(licp, i); } cmn_err(CE_WARN, "xfs_trans_first_item() -- no first item"); return NULL; }
/*ARGSUSED*/ xfs_log_item_desc_t * xfs_trans_next_item(xfs_trans_t *tp, xfs_log_item_desc_t *lidp) { xfs_log_item_chunk_t *licp; int i; licp = XFS_LIC_DESC_TO_CHUNK(lidp); /* * First search the rest of the chunk. The for loop keeps us * from referencing things beyond the end of the chunk. */ for (i = (int)XFS_LIC_DESC_TO_SLOT(lidp) + 1; i < licp->lic_unused; i++) { if (XFS_LIC_ISFREE(licp, i)) { continue; } return XFS_LIC_SLOT(licp, i); } /* * Now search the next chunk. It must be there, because the * next chunk would have been freed if it were empty. * If there is no next chunk, return NULL. */ if (licp->lic_next == NULL) { return NULL; } licp = licp->lic_next; ASSERT(!XFS_LIC_ARE_ALL_FREE(licp)); for (i = 0; i < licp->lic_unused; i++) { if (XFS_LIC_ISFREE(licp, i)) { continue; } return XFS_LIC_SLOT(licp, i); } ASSERT(0); /* NOTREACHED */ return NULL; /* keep gcc quite */ }
/* * Unlock each item pointed to by a descriptor in the given chunk. * Stamp the commit lsn into each item if necessary. * Free descriptors pointing to items which are not dirty if freeing_chunk * is zero. If freeing_chunk is non-zero, then we need to unlock all * items in the chunk including those with XFS_LID_SYNC_UNLOCK set. * Return the number of descriptors freed. */ STATIC int xfs_trans_unlock_chunk( xfs_log_item_chunk_t *licp, int freeing_chunk, int abort, xfs_lsn_t commit_lsn) { xfs_log_item_desc_t *lidp; xfs_log_item_t *lip; int i; int freed; freed = 0; lidp = licp->lic_descs; for (i = 0; i < licp->lic_unused; i++, lidp++) { if (XFS_LIC_ISFREE(licp, i)) { continue; } lip = lidp->lid_item; lip->li_desc = NULL; if (commit_lsn != NULLCOMMITLSN) IOP_COMMITTING(lip, commit_lsn); /* XXXsup */ if (abort) lip->li_flags |= XFS_LI_ABORTED; /* if (abort) { IOP_ABORT(lip); } else */ if (!(lidp->lid_flags & XFS_LID_SYNC_UNLOCK) || freeing_chunk || abort) { IOP_UNLOCK(lip); } /* * Free the descriptor if the item is not dirty * within this transaction and the caller is not * going to just free the entire thing regardless. */ if (!(freeing_chunk) && (!(lidp->lid_flags & XFS_LID_DIRTY) || abort)) { XFS_LIC_RELSE(licp, i); freed++; } } return (freed); }
/* * This is called to perform the commit processing for each * item described by the given chunk. * * The commit processing consists of unlocking items which were * held locked with the SYNC_UNLOCK attribute, calling the committed * routine of each logged item, updating the item's position in the AIL * if necessary, and unpinning each item. If the committed routine * returns -1, then do nothing further with the item because it * may have been freed. * * Since items are unlocked when they are copied to the incore * log, it is possible for two transactions to be completing * and manipulating the same item simultaneously. The AIL lock * will protect the lsn field of each item. The value of this * field can never go backwards. * * We unpin the items after repositioning them in the AIL, because * otherwise they could be immediately flushed and we'd have to race * with the flusher trying to pull the item from the AIL as we add it. */ STATIC void xfs_trans_chunk_committed( xfs_log_item_chunk_t *licp, xfs_lsn_t lsn, int aborted) { xfs_log_item_desc_t *lidp; xfs_log_item_t *lip; xfs_lsn_t item_lsn; struct xfs_mount *mp; int i; SPLDECL(s); lidp = licp->lic_descs; for (i = 0; i < licp->lic_unused; i++, lidp++) { if (XFS_LIC_ISFREE(licp, i)) { continue; } lip = lidp->lid_item; if (aborted) lip->li_flags |= XFS_LI_ABORTED; /* * Send in the ABORTED flag to the COMMITTED routine * so that it knows whether the transaction was aborted * or not. */ item_lsn = IOP_COMMITTED(lip, lsn); /* * If the committed routine returns -1, make * no more references to the item. */ if (XFS_LSN_CMP(item_lsn, (xfs_lsn_t)-1) == 0) { continue; } /* * If the returned lsn is greater than what it * contained before, update the location of the * item in the AIL. If it is not, then do nothing. * Items can never move backwards in the AIL. * * While the new lsn should usually be greater, it * is possible that a later transaction completing * simultaneously with an earlier one using the * same item could complete first with a higher lsn. * This would cause the earlier transaction to fail * the test below. */ mp = lip->li_mountp; AIL_LOCK(mp,s); if (XFS_LSN_CMP(item_lsn, lip->li_lsn) > 0) { /* * This will set the item's lsn to item_lsn * and update the position of the item in * the AIL. * * xfs_trans_update_ail() drops the AIL lock. */ xfs_trans_update_ail(mp, lip, item_lsn, s); } else { AIL_UNLOCK(mp, s); } /* * Now that we've repositioned the item in the AIL, * unpin it so it can be flushed. Pass information * about buffer stale state down from the log item * flags, if anyone else stales the buffer we do not * want to pay any attention to it. */ IOP_UNPIN(lip, lidp->lid_flags & XFS_LID_BUF_STALE); } }
/* * Unlock all of the transaction's items and free the transaction. * The transaction must not have modified any of its items, because * there is no way to restore them to their previous state. * * If the transaction has made a log reservation, make sure to release * it as well. */ void xfs_trans_cancel( xfs_trans_t *tp, int flags) { int log_flags; #ifdef DEBUG xfs_log_item_chunk_t *licp; xfs_log_item_desc_t *lidp; xfs_log_item_t *lip; int i; #endif xfs_mount_t *mp = tp->t_mountp; /* * See if the caller is being too lazy to figure out if * the transaction really needs an abort. */ if ((flags & XFS_TRANS_ABORT) && !(tp->t_flags & XFS_TRANS_DIRTY)) flags &= ~XFS_TRANS_ABORT; /* * See if the caller is relying on us to shut down the * filesystem. This happens in paths where we detect * corruption and decide to give up. */ if ((tp->t_flags & XFS_TRANS_DIRTY) && !XFS_FORCED_SHUTDOWN(mp)) { XFS_ERROR_REPORT("xfs_trans_cancel", XFS_ERRLEVEL_LOW, mp); xfs_force_shutdown(mp, XFS_CORRUPT_INCORE); } #ifdef DEBUG if (!(flags & XFS_TRANS_ABORT)) { licp = &(tp->t_items); while (licp != NULL) { lidp = licp->lic_descs; for (i = 0; i < licp->lic_unused; i++, lidp++) { if (XFS_LIC_ISFREE(licp, i)) { continue; } lip = lidp->lid_item; if (!XFS_FORCED_SHUTDOWN(mp)) ASSERT(!(lip->li_type == XFS_LI_EFD)); } licp = licp->lic_next; } } #endif xfs_trans_unreserve_and_mod_sb(tp); XFS_TRANS_UNRESERVE_AND_MOD_DQUOTS(mp, tp); if (tp->t_ticket) { if (flags & XFS_TRANS_RELEASE_LOG_RES) { ASSERT(tp->t_flags & XFS_TRANS_PERM_LOG_RES); log_flags = XFS_LOG_REL_PERM_RESERV; } else { log_flags = 0; } xfs_log_done(mp, tp->t_ticket, NULL, log_flags); } /* mark this thread as no longer being in a transaction */ PFLAGS_RESTORE_FSTRANS(&tp->t_pflags); xfs_trans_free_items(tp, flags); xfs_trans_free_busy(tp); xfs_trans_free(tp); }
int xfs_lic_isfree(xfs_log_item_chunk_t *cp, int slot) { return XFS_LIC_ISFREE(cp, slot); }
/* * This is called to add the given log item to the transaction's * list of log items. It must find a free log item descriptor * or allocate a new one and add the item to that descriptor. * The function returns a pointer to item descriptor used to point * to the new item. The log item will now point to its new descriptor * with its li_desc field. */ xfs_log_item_desc_t * xfs_trans_add_item(xfs_trans_t *tp, xfs_log_item_t *lip) { xfs_log_item_desc_t *lidp; xfs_log_item_chunk_t *licp; int i=0; /* * If there are no free descriptors, allocate a new chunk * of them and put it at the front of the chunk list. */ if (tp->t_items_free == 0) { licp = (xfs_log_item_chunk_t*) kmem_alloc(sizeof(xfs_log_item_chunk_t), KM_SLEEP); ASSERT(licp != NULL); /* * Initialize the chunk, and then * claim the first slot in the newly allocated chunk. */ XFS_LIC_INIT(licp); XFS_LIC_CLAIM(licp, 0); licp->lic_unused = 1; XFS_LIC_INIT_SLOT(licp, 0); lidp = XFS_LIC_SLOT(licp, 0); /* * Link in the new chunk and update the free count. */ licp->lic_next = tp->t_items.lic_next; tp->t_items.lic_next = licp; tp->t_items_free = XFS_LIC_NUM_SLOTS - 1; /* * Initialize the descriptor and the generic portion * of the log item. * * Point the new slot at this item and return it. * Also point the log item at its currently active * descriptor and set the item's mount pointer. */ lidp->lid_item = lip; lidp->lid_flags = 0; lidp->lid_size = 0; lip->li_desc = lidp; lip->li_mountp = tp->t_mountp; return lidp; } /* * Find the free descriptor. It is somewhere in the chunklist * of descriptors. */ licp = &tp->t_items; while (licp != NULL) { if (XFS_LIC_VACANCY(licp)) { if (licp->lic_unused <= XFS_LIC_MAX_SLOT) { i = licp->lic_unused; ASSERT(XFS_LIC_ISFREE(licp, i)); break; } for (i = 0; i <= XFS_LIC_MAX_SLOT; i++) { if (XFS_LIC_ISFREE(licp, i)) break; } ASSERT(i <= XFS_LIC_MAX_SLOT); break; } licp = licp->lic_next; } ASSERT(licp != NULL); /* * If we find a free descriptor, claim it, * initialize it, and return it. */ XFS_LIC_CLAIM(licp, i); if (licp->lic_unused <= i) { licp->lic_unused = i + 1; XFS_LIC_INIT_SLOT(licp, i); } lidp = XFS_LIC_SLOT(licp, i); tp->t_items_free--; lidp->lid_item = lip; lidp->lid_flags = 0; lidp->lid_size = 0; lip->li_desc = lidp; lip->li_mountp = tp->t_mountp; return lidp; }