/* * Variant on the above which avoids querying the system clock * in situations where we know the Linux inode timestamps have * just been updated (and so we can update our inode cheaply). */ void xfs_ichgtime_fast( xfs_inode_t *ip, struct inode *inode, int flags) { timespec_t *tvp; /* * Atime updates for read() & friends are handled lazily now, and * explicit updates must go through xfs_ichgtime() */ ASSERT((flags & XFS_ICHGTIME_ACC) == 0); /* * We're not supposed to change timestamps in readonly-mounted * filesystems. Throw it away if anyone asks us. */ if (unlikely(IS_RDONLY(inode))) return; if (flags & XFS_ICHGTIME_MOD) { tvp = &inode->i_mtime; ip->i_d.di_mtime.t_sec = (__int32_t)tvp->tv_sec; ip->i_d.di_mtime.t_nsec = (__int32_t)tvp->tv_nsec; } if (flags & XFS_ICHGTIME_CHG) { tvp = &inode->i_ctime; ip->i_d.di_ctime.t_sec = (__int32_t)tvp->tv_sec; ip->i_d.di_ctime.t_nsec = (__int32_t)tvp->tv_nsec; } /* * We update the i_update_core field _after_ changing * the timestamps in order to coordinate properly with * xfs_iflush() so that we don't lose timestamp updates. * This keeps us from having to hold the inode lock * while doing this. We use the SYNCHRONIZE macro to * ensure that the compiler does not reorder the update * of i_update_core above the timestamp updates above. */ SYNCHRONIZE(); ip->i_update_core = 1; if (!(inode->i_state & I_NEW)) mark_inode_dirty_sync(inode); }
/* * Change the requested timestamp in the given inode. * We don't lock across timestamp updates, and we don't log them but * we do record the fact that there is dirty information in core. */ void xfs_ichgtime( xfs_inode_t *ip, int flags) { struct inode *inode = VFS_I(ip); timespec_t tv; int sync_it = 0; tv = current_fs_time(inode->i_sb); if ((flags & XFS_ICHGTIME_MOD) && !timespec_equal(&inode->i_mtime, &tv)) { inode->i_mtime = tv; ip->i_d.di_mtime.t_sec = (__int32_t)tv.tv_sec; ip->i_d.di_mtime.t_nsec = (__int32_t)tv.tv_nsec; sync_it = 1; } if ((flags & XFS_ICHGTIME_CHG) && !timespec_equal(&inode->i_ctime, &tv)) { inode->i_ctime = tv; ip->i_d.di_ctime.t_sec = (__int32_t)tv.tv_sec; ip->i_d.di_ctime.t_nsec = (__int32_t)tv.tv_nsec; sync_it = 1; } /* * We update the i_update_core field _after_ changing * the timestamps in order to coordinate properly with * xfs_iflush() so that we don't lose timestamp updates. * This keeps us from having to hold the inode lock * while doing this. We use the SYNCHRONIZE macro to * ensure that the compiler does not reorder the update * of i_update_core above the timestamp updates above. */ if (sync_it) { SYNCHRONIZE(); ip->i_update_core = 1; xfs_mark_inode_dirty_sync(ip); } }
/* * Change the requested timestamp in the given inode. * We don't lock across timestamp updates, and we don't log them but * we do record the fact that there is dirty information in core. * * NOTE -- callers MUST combine XFS_ICHGTIME_MOD or XFS_ICHGTIME_CHG * with XFS_ICHGTIME_ACC to be sure that access time * update will take. Calling first with XFS_ICHGTIME_ACC * and then XFS_ICHGTIME_MOD may fail to modify the access * timestamp if the filesystem is mounted noacctm. */ void xfs_ichgtime( xfs_inode_t *ip, int flags) { struct inode *inode = vn_to_inode(XFS_ITOV(ip)); timespec_t tv; nanotime(&tv); if (flags & XFS_ICHGTIME_MOD) { inode->i_mtime = tv; ip->i_d.di_mtime.t_sec = (__int32_t)tv.tv_sec; ip->i_d.di_mtime.t_nsec = (__int32_t)tv.tv_nsec; } if (flags & XFS_ICHGTIME_ACC) { inode->i_atime = tv; ip->i_d.di_atime.t_sec = (__int32_t)tv.tv_sec; ip->i_d.di_atime.t_nsec = (__int32_t)tv.tv_nsec; } if (flags & XFS_ICHGTIME_CHG) { inode->i_ctime = tv; ip->i_d.di_ctime.t_sec = (__int32_t)tv.tv_sec; ip->i_d.di_ctime.t_nsec = (__int32_t)tv.tv_nsec; } /* * We update the i_update_core field _after_ changing * the timestamps in order to coordinate properly with * xfs_iflush() so that we don't lose timestamp updates. * This keeps us from having to hold the inode lock * while doing this. We use the SYNCHRONIZE macro to * ensure that the compiler does not reorder the update * of i_update_core above the timestamp updates above. */ SYNCHRONIZE(); ip->i_update_core = 1; if (!(inode->i_state & I_NEW)) mark_inode_dirty_sync(inode); }
/* * This is called to fill in the vector of log iovecs for the * given inode log item. It fills the first item with an inode * log format structure, the second with the on-disk inode structure, * and a possible third and/or fourth with the inode data/extents/b-tree * root and inode attributes data/extents/b-tree root. */ STATIC void xfs_inode_item_format( struct xfs_log_item *lip, struct xfs_log_iovec *vecp) { struct xfs_inode_log_item *iip = INODE_ITEM(lip); struct xfs_inode *ip = iip->ili_inode; uint nvecs; size_t data_bytes; xfs_mount_t *mp; vecp->i_addr = &iip->ili_format; vecp->i_len = sizeof(xfs_inode_log_format_t); vecp->i_type = XLOG_REG_TYPE_IFORMAT; vecp++; nvecs = 1; /* * Clear i_update_core if the timestamps (or any other * non-transactional modification) need flushing/logging * and we're about to log them with the rest of the core. * * This is the same logic as xfs_iflush() but this code can't * run at the same time as xfs_iflush because we're in commit * processing here and so we have the inode lock held in * exclusive mode. Although it doesn't really matter * for the timestamps if both routines were to grab the * timestamps or not. That would be ok. * * We clear i_update_core before copying out the data. * This is for coordination with our timestamp updates * that don't hold the inode lock. They will always * update the timestamps BEFORE setting i_update_core, * so if we clear i_update_core after they set it we * are guaranteed to see their updates to the timestamps * either here. Likewise, if they set it after we clear it * here, we'll see it either on the next commit of this * inode or the next time the inode gets flushed via * xfs_iflush(). This depends on strongly ordered memory * semantics, but we have that. We use the SYNCHRONIZE * macro to make sure that the compiler does not reorder * the i_update_core access below the data copy below. */ if (ip->i_update_core) { ip->i_update_core = 0; SYNCHRONIZE(); } /* * Make sure to get the latest timestamps from the Linux inode. */ xfs_synchronize_times(ip); vecp->i_addr = &ip->i_d; vecp->i_len = sizeof(struct xfs_icdinode); vecp->i_type = XLOG_REG_TYPE_ICORE; vecp++; nvecs++; iip->ili_format.ilf_fields |= XFS_ILOG_CORE; /* * If this is really an old format inode, then we need to * log it as such. This means that we have to copy the link * count from the new field to the old. We don't have to worry * about the new fields, because nothing trusts them as long as * the old inode version number is there. If the superblock already * has a new version number, then we don't bother converting back. */ mp = ip->i_mount; ASSERT(ip->i_d.di_version == 1 || xfs_sb_version_hasnlink(&mp->m_sb)); if (ip->i_d.di_version == 1) { if (!xfs_sb_version_hasnlink(&mp->m_sb)) { /* * Convert it back. */ ASSERT(ip->i_d.di_nlink <= XFS_MAXLINK_1); ip->i_d.di_onlink = ip->i_d.di_nlink; } else { /* * The superblock version has already been bumped, * so just make the conversion to the new inode * format permanent. */ ip->i_d.di_version = 2; ip->i_d.di_onlink = 0; memset(&(ip->i_d.di_pad[0]), 0, sizeof(ip->i_d.di_pad)); } } switch (ip->i_d.di_format) { case XFS_DINODE_FMT_EXTENTS: ASSERT(!(iip->ili_format.ilf_fields & (XFS_ILOG_DDATA | XFS_ILOG_DBROOT | XFS_ILOG_DEV | XFS_ILOG_UUID))); if (iip->ili_format.ilf_fields & XFS_ILOG_DEXT) { ASSERT(ip->i_df.if_bytes > 0); ASSERT(ip->i_df.if_u1.if_extents != NULL); ASSERT(ip->i_d.di_nextents > 0); ASSERT(iip->ili_extents_buf == NULL); ASSERT((ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t)) > 0); #ifdef XFS_NATIVE_HOST if (ip->i_d.di_nextents == ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t)) { /* * There are no delayed allocation * extents, so just point to the * real extents array. */ vecp->i_addr = ip->i_df.if_u1.if_extents; vecp->i_len = ip->i_df.if_bytes; vecp->i_type = XLOG_REG_TYPE_IEXT; } else #endif { xfs_inode_item_format_extents(ip, vecp, XFS_DATA_FORK, XLOG_REG_TYPE_IEXT); } ASSERT(vecp->i_len <= ip->i_df.if_bytes); iip->ili_format.ilf_dsize = vecp->i_len; vecp++; nvecs++; } break; case XFS_DINODE_FMT_BTREE: ASSERT(!(iip->ili_format.ilf_fields & (XFS_ILOG_DDATA | XFS_ILOG_DEXT | XFS_ILOG_DEV | XFS_ILOG_UUID))); if (iip->ili_format.ilf_fields & XFS_ILOG_DBROOT) { ASSERT(ip->i_df.if_broot_bytes > 0); ASSERT(ip->i_df.if_broot != NULL); vecp->i_addr = ip->i_df.if_broot; vecp->i_len = ip->i_df.if_broot_bytes; vecp->i_type = XLOG_REG_TYPE_IBROOT; vecp++; nvecs++; iip->ili_format.ilf_dsize = ip->i_df.if_broot_bytes; } break; case XFS_DINODE_FMT_LOCAL: ASSERT(!(iip->ili_format.ilf_fields & (XFS_ILOG_DBROOT | XFS_ILOG_DEXT | XFS_ILOG_DEV | XFS_ILOG_UUID))); if (iip->ili_format.ilf_fields & XFS_ILOG_DDATA) { ASSERT(ip->i_df.if_bytes > 0); ASSERT(ip->i_df.if_u1.if_data != NULL); ASSERT(ip->i_d.di_size > 0); vecp->i_addr = ip->i_df.if_u1.if_data; /* * Round i_bytes up to a word boundary. * The underlying memory is guaranteed to * to be there by xfs_idata_realloc(). */ data_bytes = roundup(ip->i_df.if_bytes, 4); ASSERT((ip->i_df.if_real_bytes == 0) || (ip->i_df.if_real_bytes == data_bytes)); vecp->i_len = (int)data_bytes; vecp->i_type = XLOG_REG_TYPE_ILOCAL; vecp++; nvecs++; iip->ili_format.ilf_dsize = (unsigned)data_bytes; } break; case XFS_DINODE_FMT_DEV: ASSERT(!(iip->ili_format.ilf_fields & (XFS_ILOG_DBROOT | XFS_ILOG_DEXT | XFS_ILOG_DDATA | XFS_ILOG_UUID))); if (iip->ili_format.ilf_fields & XFS_ILOG_DEV) { iip->ili_format.ilf_u.ilfu_rdev = ip->i_df.if_u2.if_rdev; } break; case XFS_DINODE_FMT_UUID: ASSERT(!(iip->ili_format.ilf_fields & (XFS_ILOG_DBROOT | XFS_ILOG_DEXT | XFS_ILOG_DDATA | XFS_ILOG_DEV))); if (iip->ili_format.ilf_fields & XFS_ILOG_UUID) { iip->ili_format.ilf_u.ilfu_uuid = ip->i_df.if_u2.if_uuid; } break; default: ASSERT(0); break; } /* * If there are no attributes associated with the file, * then we're done. * Assert that no attribute-related log flags are set. */ if (!XFS_IFORK_Q(ip)) { ASSERT(nvecs == lip->li_desc->lid_size); iip->ili_format.ilf_size = nvecs; ASSERT(!(iip->ili_format.ilf_fields & (XFS_ILOG_ADATA | XFS_ILOG_ABROOT | XFS_ILOG_AEXT))); return; } switch (ip->i_d.di_aformat) { case XFS_DINODE_FMT_EXTENTS: ASSERT(!(iip->ili_format.ilf_fields & (XFS_ILOG_ADATA | XFS_ILOG_ABROOT))); if (iip->ili_format.ilf_fields & XFS_ILOG_AEXT) { #ifdef DEBUG int nrecs = ip->i_afp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); ASSERT(nrecs > 0); ASSERT(nrecs == ip->i_d.di_anextents); ASSERT(ip->i_afp->if_bytes > 0); ASSERT(ip->i_afp->if_u1.if_extents != NULL); ASSERT(ip->i_d.di_anextents > 0); #endif #ifdef XFS_NATIVE_HOST /* * There are not delayed allocation extents * for attributes, so just point at the array. */ vecp->i_addr = ip->i_afp->if_u1.if_extents; vecp->i_len = ip->i_afp->if_bytes; vecp->i_type = XLOG_REG_TYPE_IATTR_EXT; #else ASSERT(iip->ili_aextents_buf == NULL); xfs_inode_item_format_extents(ip, vecp, XFS_ATTR_FORK, XLOG_REG_TYPE_IATTR_EXT); #endif iip->ili_format.ilf_asize = vecp->i_len; vecp++; nvecs++; } break; case XFS_DINODE_FMT_BTREE: ASSERT(!(iip->ili_format.ilf_fields & (XFS_ILOG_ADATA | XFS_ILOG_AEXT))); if (iip->ili_format.ilf_fields & XFS_ILOG_ABROOT) { ASSERT(ip->i_afp->if_broot_bytes > 0); ASSERT(ip->i_afp->if_broot != NULL); vecp->i_addr = ip->i_afp->if_broot; vecp->i_len = ip->i_afp->if_broot_bytes; vecp->i_type = XLOG_REG_TYPE_IATTR_BROOT; vecp++; nvecs++; iip->ili_format.ilf_asize = ip->i_afp->if_broot_bytes; } break; case XFS_DINODE_FMT_LOCAL: ASSERT(!(iip->ili_format.ilf_fields & (XFS_ILOG_ABROOT | XFS_ILOG_AEXT))); if (iip->ili_format.ilf_fields & XFS_ILOG_ADATA) { ASSERT(ip->i_afp->if_bytes > 0); ASSERT(ip->i_afp->if_u1.if_data != NULL); vecp->i_addr = ip->i_afp->if_u1.if_data; /* * Round i_bytes up to a word boundary. * The underlying memory is guaranteed to * to be there by xfs_idata_realloc(). */ data_bytes = roundup(ip->i_afp->if_bytes, 4); ASSERT((ip->i_afp->if_real_bytes == 0) || (ip->i_afp->if_real_bytes == data_bytes)); vecp->i_len = (int)data_bytes; vecp->i_type = XLOG_REG_TYPE_IATTR_LOCAL; vecp++; nvecs++; iip->ili_format.ilf_asize = (unsigned)data_bytes; } break; default: ASSERT(0); break; } ASSERT(nvecs == lip->li_desc->lid_size); iip->ili_format.ilf_size = nvecs; }
/* * This is called to fill in the vector of log iovecs for the * given inode log item. It fills the first item with an inode * log format structure, the second with the on-disk inode structure, * and a possible third and/or fourth with the inode data/extents/b-tree * root and inode attributes data/extents/b-tree root. */ STATIC void xfs_inode_item_format( xfs_inode_log_item_t *iip, xfs_log_iovec_t *log_vector) { uint nvecs; xfs_log_iovec_t *vecp; xfs_inode_t *ip; size_t data_bytes; xfs_bmbt_rec_t *ext_buffer; int nrecs; xfs_mount_t *mp; ip = iip->ili_inode; vecp = log_vector; vecp->i_addr = (xfs_caddr_t)&iip->ili_format; vecp->i_len = sizeof(xfs_inode_log_format_t); vecp++; nvecs = 1; /* * Clear i_update_core if the timestamps (or any other * non-transactional modification) need flushing/logging * and we're about to log them with the rest of the core. * * This is the same logic as xfs_iflush() but this code can't * run at the same time as xfs_iflush because we're in commit * processing here and so we have the inode lock held in * exclusive mode. Although it doesn't really matter * for the timestamps if both routines were to grab the * timestamps or not. That would be ok. * * We clear i_update_core before copying out the data. * This is for coordination with our timestamp updates * that don't hold the inode lock. They will always * update the timestamps BEFORE setting i_update_core, * so if we clear i_update_core after they set it we * are guaranteed to see their updates to the timestamps * either here. Likewise, if they set it after we clear it * here, we'll see it either on the next commit of this * inode or the next time the inode gets flushed via * xfs_iflush(). This depends on strongly ordered memory * semantics, but we have that. We use the SYNCHRONIZE * macro to make sure that the compiler does not reorder * the i_update_core access below the data copy below. */ if (ip->i_update_core) { ip->i_update_core = 0; SYNCHRONIZE(); } /* * We don't have to worry about re-ordering here because * the update_size field is protected by the inode lock * and we have that held in exclusive mode. */ if (ip->i_update_size) ip->i_update_size = 0; vecp->i_addr = (xfs_caddr_t)&ip->i_d; vecp->i_len = sizeof(xfs_dinode_core_t); vecp++; nvecs++; iip->ili_format.ilf_fields |= XFS_ILOG_CORE; /* * If this is really an old format inode, then we need to * log it as such. This means that we have to copy the link * count from the new field to the old. We don't have to worry * about the new fields, because nothing trusts them as long as * the old inode version number is there. If the superblock already * has a new version number, then we don't bother converting back. */ mp = ip->i_mount; ASSERT(ip->i_d.di_version == XFS_DINODE_VERSION_1 || XFS_SB_VERSION_HASNLINK(&mp->m_sb)); if (ip->i_d.di_version == XFS_DINODE_VERSION_1) { if (!XFS_SB_VERSION_HASNLINK(&mp->m_sb)) { /* * Convert it back. */ ASSERT(ip->i_d.di_nlink <= XFS_MAXLINK_1); ip->i_d.di_onlink = ip->i_d.di_nlink; } else { /* * The superblock version has already been bumped, * so just make the conversion to the new inode * format permanent. */ ip->i_d.di_version = XFS_DINODE_VERSION_2; ip->i_d.di_onlink = 0; memset(&(ip->i_d.di_pad[0]), 0, sizeof(ip->i_d.di_pad)); } } switch (ip->i_d.di_format) { case XFS_DINODE_FMT_EXTENTS: ASSERT(!(iip->ili_format.ilf_fields & (XFS_ILOG_DDATA | XFS_ILOG_DBROOT | XFS_ILOG_DEV | XFS_ILOG_UUID))); if (iip->ili_format.ilf_fields & XFS_ILOG_DEXT) { ASSERT(ip->i_df.if_bytes > 0); ASSERT(ip->i_df.if_u1.if_extents != NULL); ASSERT(ip->i_d.di_nextents > 0); ASSERT(iip->ili_extents_buf == NULL); nrecs = ip->i_df.if_bytes / (uint)sizeof(xfs_bmbt_rec_t); ASSERT(nrecs > 0); #if __BYTE_ORDER == __BIG_ENDIAN if (nrecs == ip->i_d.di_nextents) { /* * There are no delayed allocation * extents, so just point to the * real extents array. */ vecp->i_addr = (char *)(ip->i_df.if_u1.if_extents); vecp->i_len = ip->i_df.if_bytes; } else #endif { /* * There are delayed allocation extents * in the inode, or we need to convert * the extents to on disk format. * Use xfs_iextents_copy() * to copy only the real extents into * a separate buffer. We'll free the * buffer in the unlock routine. */ ext_buffer = kmem_alloc(ip->i_df.if_bytes, KM_SLEEP); iip->ili_extents_buf = ext_buffer; vecp->i_addr = (xfs_caddr_t)ext_buffer; vecp->i_len = xfs_iextents_copy(ip, ext_buffer, XFS_DATA_FORK); } ASSERT(vecp->i_len <= ip->i_df.if_bytes); iip->ili_format.ilf_dsize = vecp->i_len; vecp++; nvecs++; } break; case XFS_DINODE_FMT_BTREE: ASSERT(!(iip->ili_format.ilf_fields & (XFS_ILOG_DDATA | XFS_ILOG_DEXT | XFS_ILOG_DEV | XFS_ILOG_UUID))); if (iip->ili_format.ilf_fields & XFS_ILOG_DBROOT) { ASSERT(ip->i_df.if_broot_bytes > 0); ASSERT(ip->i_df.if_broot != NULL); vecp->i_addr = (xfs_caddr_t)ip->i_df.if_broot; vecp->i_len = ip->i_df.if_broot_bytes; vecp++; nvecs++; iip->ili_format.ilf_dsize = ip->i_df.if_broot_bytes; } break; case XFS_DINODE_FMT_LOCAL: ASSERT(!(iip->ili_format.ilf_fields & (XFS_ILOG_DBROOT | XFS_ILOG_DEXT | XFS_ILOG_DEV | XFS_ILOG_UUID))); if (iip->ili_format.ilf_fields & XFS_ILOG_DDATA) { ASSERT(ip->i_df.if_bytes > 0); ASSERT(ip->i_df.if_u1.if_data != NULL); ASSERT(ip->i_d.di_size > 0); vecp->i_addr = (xfs_caddr_t)ip->i_df.if_u1.if_data; /* * Round i_bytes up to a word boundary. * The underlying memory is guaranteed to * to be there by xfs_idata_realloc(). */ data_bytes = roundup(ip->i_df.if_bytes, 4); ASSERT((ip->i_df.if_real_bytes == 0) || (ip->i_df.if_real_bytes == data_bytes)); vecp->i_len = (int)data_bytes; vecp++; nvecs++; iip->ili_format.ilf_dsize = (unsigned)data_bytes; } break; case XFS_DINODE_FMT_DEV: ASSERT(!(iip->ili_format.ilf_fields & (XFS_ILOG_DBROOT | XFS_ILOG_DEXT | XFS_ILOG_DDATA | XFS_ILOG_UUID))); if (iip->ili_format.ilf_fields & XFS_ILOG_DEV) { iip->ili_format.ilf_u.ilfu_rdev = ip->i_df.if_u2.if_rdev; } break; case XFS_DINODE_FMT_UUID: ASSERT(!(iip->ili_format.ilf_fields & (XFS_ILOG_DBROOT | XFS_ILOG_DEXT | XFS_ILOG_DDATA | XFS_ILOG_DEV))); if (iip->ili_format.ilf_fields & XFS_ILOG_UUID) { iip->ili_format.ilf_u.ilfu_uuid = ip->i_df.if_u2.if_uuid; } break; default: ASSERT(0); break; } /* * If there are no attributes associated with the file, * then we're done. * Assert that no attribute-related log flags are set. */ if (!XFS_IFORK_Q(ip)) { ASSERT(nvecs == iip->ili_item.li_desc->lid_size); iip->ili_format.ilf_size = nvecs; ASSERT(!(iip->ili_format.ilf_fields & (XFS_ILOG_ADATA | XFS_ILOG_ABROOT | XFS_ILOG_AEXT))); return; } switch (ip->i_d.di_aformat) { case XFS_DINODE_FMT_EXTENTS: ASSERT(!(iip->ili_format.ilf_fields & (XFS_ILOG_ADATA | XFS_ILOG_ABROOT))); if (iip->ili_format.ilf_fields & XFS_ILOG_AEXT) { ASSERT(ip->i_afp->if_bytes > 0); ASSERT(ip->i_afp->if_u1.if_extents != NULL); ASSERT(ip->i_d.di_anextents > 0); #ifdef DEBUG nrecs = ip->i_afp->if_bytes / (uint)sizeof(xfs_bmbt_rec_t); #endif ASSERT(nrecs > 0); ASSERT(nrecs == ip->i_d.di_anextents); #if __BYTE_ORDER == __BIG_ENDIAN /* * There are not delayed allocation extents * for attributes, so just point at the array. */ vecp->i_addr = (char *)(ip->i_afp->if_u1.if_extents); vecp->i_len = ip->i_afp->if_bytes; #else ASSERT(iip->ili_aextents_buf == NULL); /* * Need to endian flip before logging */ ext_buffer = kmem_alloc(ip->i_afp->if_bytes, KM_SLEEP); iip->ili_aextents_buf = ext_buffer; vecp->i_addr = (xfs_caddr_t)ext_buffer; vecp->i_len = xfs_iextents_copy(ip, ext_buffer, XFS_ATTR_FORK); #endif iip->ili_format.ilf_asize = vecp->i_len; vecp++; nvecs++; } break; case XFS_DINODE_FMT_BTREE: ASSERT(!(iip->ili_format.ilf_fields & (XFS_ILOG_ADATA | XFS_ILOG_AEXT))); if (iip->ili_format.ilf_fields & XFS_ILOG_ABROOT) { ASSERT(ip->i_afp->if_broot_bytes > 0); ASSERT(ip->i_afp->if_broot != NULL); vecp->i_addr = (xfs_caddr_t)ip->i_afp->if_broot; vecp->i_len = ip->i_afp->if_broot_bytes; vecp++; nvecs++; iip->ili_format.ilf_asize = ip->i_afp->if_broot_bytes; } break; case XFS_DINODE_FMT_LOCAL: ASSERT(!(iip->ili_format.ilf_fields & (XFS_ILOG_ABROOT | XFS_ILOG_AEXT))); if (iip->ili_format.ilf_fields & XFS_ILOG_ADATA) { ASSERT(ip->i_afp->if_bytes > 0); ASSERT(ip->i_afp->if_u1.if_data != NULL); vecp->i_addr = (xfs_caddr_t)ip->i_afp->if_u1.if_data; /* * Round i_bytes up to a word boundary. * The underlying memory is guaranteed to * to be there by xfs_idata_realloc(). */ data_bytes = roundup(ip->i_afp->if_bytes, 4); ASSERT((ip->i_afp->if_real_bytes == 0) || (ip->i_afp->if_real_bytes == data_bytes)); vecp->i_len = (int)data_bytes; vecp++; nvecs++; iip->ili_format.ilf_asize = (unsigned)data_bytes; } break; default: ASSERT(0); break; } ASSERT(nvecs == iip->ili_item.li_desc->lid_size); iip->ili_format.ilf_size = nvecs; }