STATIC void xfs_inode_free_callback( struct rcu_head *head) { struct inode *inode = container_of(head, struct inode, i_rcu); struct xfs_inode *ip = XFS_I(inode); switch (VFS_I(ip)->i_mode & S_IFMT) { case S_IFREG: case S_IFDIR: case S_IFLNK: xfs_idestroy_fork(ip, XFS_DATA_FORK); break; } if (ip->i_afp) xfs_idestroy_fork(ip, XFS_ATTR_FORK); if (ip->i_cowfp) xfs_idestroy_fork(ip, XFS_COW_FORK); if (ip->i_itemp) { ASSERT(!(ip->i_itemp->ili_item.li_flags & XFS_LI_IN_AIL)); xfs_inode_item_destroy(ip); ip->i_itemp = NULL; } kmem_zone_free(xfs_inode_zone, ip); }
void xfs_trans_free_dqinfo( xfs_trans_t *tp) { kmem_zone_free(xfs_Gqm->qm_dqtrxzone, (tp)->t_dqinfo); (tp)->t_dqinfo = NULL; }
STATIC void xfs_buf_item_free( xfs_buf_log_item_t *bip) { xfs_buf_item_free_format(bip); kmem_zone_free(xfs_buf_item_zone, bip); }
/* * This is called when the transaction that should be committing the * EFD corresponding to the given EFI is aborted. The committed and * canceled flags are used to coordinate the freeing of the EFI and * the references by the transaction that committed it. */ STATIC void xfs_efi_cancel( xfs_efi_log_item_t *efip) { int nexts; int size; xfs_mount_t *mp; SPLDECL(s); mp = efip->efi_item.li_mountp; AIL_LOCK(mp, s); if (efip->efi_flags & XFS_EFI_COMMITTED) { /* * xfs_trans_delete_ail() drops the AIL lock. */ xfs_trans_delete_ail(mp, (xfs_log_item_t *)efip, s); nexts = efip->efi_format.efi_nextents; if (nexts > XFS_EFI_MAX_FAST_EXTENTS) { size = sizeof(xfs_efi_log_item_t); size += (nexts - 1) * sizeof(xfs_extent_t); kmem_free(efip, size); } else { kmem_zone_free(xfs_efi_zone, efip); } } else { efip->efi_flags |= XFS_EFI_CANCELED; AIL_UNLOCK(mp, s); } return; }
STATIC void xfs_fstrm_free_func( unsigned long ino, void *data) { fstrm_item_t *item = (fstrm_item_t *)data; xfs_inode_t *ip = item->ip; ASSERT(ip->i_ino == ino); xfs_iflags_clear(ip, XFS_IFILESTREAM); xfs_filestream_put_ag(ip->i_mount, item->ag); TRACE_FREE(ip->i_mount, ip, item->pip, item->ag, xfs_filestream_peek_ag(ip->i_mount, item->ag)); IRELE(ip); if (item->pip) IRELE(item->pip); kmem_zone_free(item_zone, item); }
STATIC void xfs_efd_item_free(struct xfs_efd_log_item *efdp) { if (efdp->efd_format.efd_nextents > XFS_EFD_MAX_FAST_EXTENTS) kmem_free(efdp); else kmem_zone_free(xfs_efd_zone, efdp); }
/* * Free the transaction structure. If there is more clean up * to do when the structure is freed, add it here. */ STATIC void xfs_trans_free( xfs_trans_t *tp) { atomic_dec(&tp->t_mountp->m_active_trans); XFS_TRANS_FREE_DQINFO(tp->t_mountp, tp); kmem_zone_free(xfs_trans_zone, tp); }
/* * Free the transaction structure. If there is more clean up * to do when the structure is freed, add it here. */ STATIC void xfs_trans_free( xfs_trans_t *tp) { atomic_dec(&tp->t_mountp->m_active_trans); xfs_trans_free_dqinfo(tp); kmem_zone_free(xfs_trans_zone, tp); }
void xfs_trans_free_dqinfo( xfs_trans_t *tp) { if (!tp->t_dqinfo) return; kmem_zone_free(xfs_qm_dqtrxzone, tp->t_dqinfo); tp->t_dqinfo = NULL; }
void xfs_cui_item_free( struct xfs_cui_log_item *cuip) { if (cuip->cui_format.cui_nextents > XFS_CUI_MAX_FAST_EXTENTS) kmem_free(cuip); else kmem_zone_free(xfs_cui_zone, cuip); }
void xfs_efi_item_free( struct xfs_efi_log_item *efip) { if (efip->efi_format.efi_nextents > XFS_EFI_MAX_FAST_EXTENTS) kmem_free(efip); else kmem_zone_free(xfs_efi_zone, efip); }
STATIC void xfs_inode_free_callback( struct rcu_head *head) { struct inode *inode = container_of(head, struct inode, i_rcu); struct xfs_inode *ip = XFS_I(inode); kmem_zone_free(xfs_inode_zone, ip); }
STATIC void xfs_trans_free_dqinfo( xfs_trans_t *tp) { if (!tp->t_dqinfo) return; kmem_zone_free(xfs_Gqm->qm_dqtrxzone, (tp)->t_dqinfo); (tp)->t_dqinfo = NULL; }
STATIC void xfs_inode_free_callback( struct rcu_head *head) { struct inode *inode = container_of(head, struct inode, i_rcu); struct xfs_inode *ip = XFS_I(inode); INIT_LIST_HEAD(&inode->i_dentry); kmem_zone_free(xfs_inode_zone, ip); }
/* * Because we have ordered buffers being tracked in the AIL for the inode * creation, we don't need the create item after this. Hence we can free * the log item and return -1 to tell the caller we're done with the item. */ STATIC xfs_lsn_t xfs_icreate_item_committed( struct xfs_log_item *lip, xfs_lsn_t lsn) { struct xfs_icreate_item *icp = ICR_ITEM(lip); kmem_zone_free(xfs_icreate_zone, icp); return (xfs_lsn_t)-1; }
STATIC void xfs_icreate_item_unlock( struct xfs_log_item *lip) { struct xfs_icreate_item *icp = ICR_ITEM(lip); if (icp->ic_item.li_flags & XFS_LI_ABORTED) kmem_zone_free(xfs_icreate_zone, icp); return; }
STATIC void xfs_efd_item_free(xfs_efd_log_item_t *efdp) { int nexts = efdp->efd_format.efd_nextents; if (nexts > XFS_EFD_MAX_FAST_EXTENTS) { kmem_free(efdp); } else { kmem_zone_free(xfs_efd_zone, efdp); } }
/* * This is called to free all the memory associated with a dquot */ void xfs_qm_dqdestroy( xfs_dquot_t *dqp) { ASSERT(list_empty(&dqp->q_lru)); mutex_destroy(&dqp->q_qlock); kmem_zone_free(xfs_qm_dqzone, dqp); XFS_STATS_DEC(xs_qm_dquot); }
/* * The BUD is either committed or aborted if the transaction is cancelled. If * the transaction is cancelled, drop our reference to the BUI and free the * BUD. */ STATIC void xfs_bud_item_unlock( struct xfs_log_item *lip) { struct xfs_bud_log_item *budp = BUD_ITEM(lip); if (lip->li_flags & XFS_LI_ABORTED) { xfs_bui_release(budp->bud_buip); kmem_zone_free(xfs_bud_zone, budp); } }
void xfs_efi_item_free(xfs_efi_log_item_t *efip) { int nexts = efip->efi_format.efi_nextents; if (nexts > XFS_EFI_MAX_FAST_EXTENTS) { kmem_free(efip); } else { kmem_zone_free(xfs_efi_zone, efip); } }
/* * Free the inode log item and any memory hanging off of it. */ void xfs_inode_item_destroy( xfs_inode_t *ip) { #ifdef XFS_TRANS_DEBUG if (ip->i_itemp->ili_root_size != 0) { kmem_free(ip->i_itemp->ili_orig_root); } #endif kmem_zone_free(xfs_ili_zone, ip->i_itemp); }
/* * The CUD is either committed or aborted if the transaction is cancelled. If * the transaction is cancelled, drop our reference to the CUI and free the * CUD. */ STATIC void xfs_cud_item_unlock( struct xfs_log_item *lip) { struct xfs_cud_log_item *cudp = CUD_ITEM(lip); if (lip->li_flags & XFS_LI_ABORTED) { xfs_cui_release(cudp->cud_cuip); kmem_zone_free(xfs_cud_zone, cudp); } }
/* * This is called to free all the memory associated with a dquot */ void xfs_qm_dqdestroy( xfs_dquot_t *dqp) { ASSERT(list_empty(&dqp->q_lru)); kmem_free(dqp->q_logitem.qli_item.li_lv_shadow); mutex_destroy(&dqp->q_qlock); XFS_STATS_DEC(dqp->q_mount, xs_qm_dquot); kmem_zone_free(xfs_qm_dqzone, dqp); }
STATIC void xfs_buf_item_free( xfs_buf_log_item_t *bip) { #ifdef XFS_TRANS_DEBUG kmem_free(bip->bli_orig); kmem_free(bip->bli_logged); #endif /* XFS_TRANS_DEBUG */ xfs_buf_item_free_format(bip); kmem_zone_free(xfs_buf_item_zone, bip); }
/* * ktrace_free() * * Free up the ktrace header and buffer. It is up to the caller * to ensure that no-one is referencing it. */ void ktrace_free(ktrace_t *ktp) { int entries_size; if (ktp == (ktrace_t *)NULL) return; spinlock_destroy(&ktp->kt_lock); /* * Special treatment for the Vnode trace buffer. */ if (ktp->kt_nentries == ktrace_zentries) { kmem_zone_free(ktrace_ent_zone, ktp->kt_entries); } else { entries_size = (int)(ktp->kt_nentries * sizeof(ktrace_entry_t)); kmem_free(ktp->kt_entries, entries_size); } kmem_zone_free(ktrace_hdr_zone, ktp); }
/* * This is called when the transaction logging the EFI is aborted. * Free up the EFI and return. No need to clean up the slot for * the item in the transaction. That was done by the unpin code * which is called prior to this routine in the abort/fs-shutdown path. */ STATIC void xfs_efi_item_abort(xfs_efi_log_item_t *efip) { int nexts; int size; nexts = efip->efi_format.efi_nextents; if (nexts > XFS_EFI_MAX_FAST_EXTENTS) { size = sizeof(xfs_efi_log_item_t); size += (nexts - 1) * sizeof(xfs_extent_t); kmem_free(efip, size); } else { kmem_zone_free(xfs_efi_zone, efip); } return; }
/* * Allocate and initialise an xfs_inode. */ struct xfs_inode * xfs_inode_alloc( struct xfs_mount *mp, xfs_ino_t ino) { struct xfs_inode *ip; /* * if this didn't occur in transactions, we could use * KM_MAYFAIL and return NULL here on ENOMEM. Set the * code up to do this anyway. */ ip = kmem_zone_alloc(xfs_inode_zone, KM_SLEEP); if (!ip) return NULL; if (inode_init_always(mp->m_super, VFS_I(ip))) { kmem_zone_free(xfs_inode_zone, ip); return NULL; } /* VFS doesn't initialise i_mode! */ VFS_I(ip)->i_mode = 0; XFS_STATS_INC(mp, vn_active); ASSERT(atomic_read(&ip->i_pincount) == 0); ASSERT(!spin_is_locked(&ip->i_flags_lock)); ASSERT(!xfs_isiflocked(ip)); ASSERT(ip->i_ino == 0); mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino); /* initialise the xfs inode */ ip->i_ino = ino; ip->i_mount = mp; memset(&ip->i_imap, 0, sizeof(struct xfs_imap)); ip->i_afp = NULL; ip->i_cowfp = NULL; ip->i_cnextents = 0; ip->i_cformat = XFS_DINODE_FMT_EXTENTS; memset(&ip->i_df, 0, sizeof(xfs_ifork_t)); ip->i_flags = 0; ip->i_delayed_blks = 0; memset(&ip->i_d, 0, sizeof(ip->i_d)); return ip; }
/* * This is called to free all the memory associated with a dquot */ void xfs_qm_dqdestroy( xfs_dquot_t *dqp) { ASSERT(! XFS_DQ_IS_ON_FREELIST(dqp)); mutex_destroy(&dqp->q_qlock); sv_destroy(&dqp->q_pinwait); #ifdef XFS_DQUOT_TRACE if (dqp->q_trace) ktrace_free(dqp->q_trace); dqp->q_trace = NULL; #endif kmem_zone_free(xfs_Gqm->qm_dqzone, dqp); atomic_dec(&xfs_Gqm->qm_totaldquots); }
void xfs_inode_free( struct xfs_inode *ip) { switch (ip->i_d.di_mode & S_IFMT) { case S_IFREG: case S_IFDIR: case S_IFLNK: xfs_idestroy_fork(ip, XFS_DATA_FORK); break; } if (ip->i_afp) xfs_idestroy_fork(ip, XFS_ATTR_FORK); if (ip->i_itemp) { /* * Only if we are shutting down the fs will we see an * inode still in the AIL. If it is there, we should remove * it to prevent a use-after-free from occurring. */ xfs_log_item_t *lip = &ip->i_itemp->ili_item; struct xfs_ail *ailp = lip->li_ailp; ASSERT(((lip->li_flags & XFS_LI_IN_AIL) == 0) || XFS_FORCED_SHUTDOWN(ip->i_mount)); if (lip->li_flags & XFS_LI_IN_AIL) { spin_lock(&ailp->xa_lock); if (lip->li_flags & XFS_LI_IN_AIL) xfs_trans_ail_delete(ailp, lip); else spin_unlock(&ailp->xa_lock); } xfs_inode_item_destroy(ip); ip->i_itemp = NULL; } /* asserts to verify all state is correct here */ ASSERT(atomic_read(&ip->i_iocount) == 0); ASSERT(atomic_read(&ip->i_pincount) == 0); ASSERT(!spin_is_locked(&ip->i_flags_lock)); ASSERT(completion_done(&ip->i_flush)); kmem_zone_free(xfs_inode_zone, ip); }
/* * Allocate and initialise an xfs_inode. */ STATIC struct xfs_inode * xfs_inode_alloc( struct xfs_mount *mp, xfs_ino_t ino) { struct xfs_inode *ip; /* * if this didn't occur in transactions, we could use * KM_MAYFAIL and return NULL here on ENOMEM. Set the * code up to do this anyway. */ ip = kmem_zone_alloc(xfs_inode_zone, KM_SLEEP); if (!ip) return NULL; if (inode_init_always(mp->m_super, VFS_I(ip))) { kmem_zone_free(xfs_inode_zone, ip); return NULL; } ASSERT(atomic_read(&ip->i_iocount) == 0); ASSERT(atomic_read(&ip->i_pincount) == 0); ASSERT(!spin_is_locked(&ip->i_flags_lock)); ASSERT(completion_done(&ip->i_flush)); ASSERT(ip->i_ino == 0); mrlock_init(&ip->i_iolock, MRLOCK_BARRIER, "xfsio", ip->i_ino); lockdep_set_class_and_name(&ip->i_iolock.mr_lock, &xfs_iolock_active, "xfs_iolock_active"); /* initialise the xfs inode */ ip->i_ino = ino; ip->i_mount = mp; memset(&ip->i_imap, 0, sizeof(struct xfs_imap)); ip->i_afp = NULL; memset(&ip->i_df, 0, sizeof(xfs_ifork_t)); ip->i_flags = 0; ip->i_update_core = 0; ip->i_delayed_blks = 0; memset(&ip->i_d, 0, sizeof(xfs_icdinode_t)); ip->i_size = 0; ip->i_new_size = 0; return ip; }