lloff_t Ext2Partition::extent_binarysearch(EXT4_EXTENT_HEADER *header, lloff_t lbn, bool isallocated) { EXT4_EXTENT *extent; EXT4_EXTENT_IDX *index; EXT4_EXTENT_HEADER *child; lloff_t physical_block = 0; lloff_t block; if(header->eh_magic != EXT4_EXT_MAGIC) { LOG("Invalid magic in Extent Header: %X\n", header->eh_magic); return 0; } extent = EXT_FIRST_EXTENT(header); // LOG("HEADER: magic %x Entries: %d depth %d\n", header->eh_magic, header->eh_entries, header->eh_depth); if(header->eh_depth == 0) { for(int i = 0; i < header->eh_entries; i++) { // LOG("EXTENT: Block: %d Length: %d LBN: %d\n", extent->ee_block, extent->ee_len, lbn); if((lbn >= extent->ee_block) && (lbn < (extent->ee_block + extent->ee_len))) { physical_block = ext_to_block(extent) + lbn; physical_block = physical_block - (lloff_t)extent->ee_block; if(isallocated) delete [] header; // LOG("Physical Block: %d\n", physical_block); return physical_block; } extent++; // Pointer increment by size of Extent. } return 0; } index = EXT_FIRST_INDEX(header); for(int i = 0; i < header->eh_entries; i++) { // LOG("INDEX: Block: %d Leaf: %d \n", index->ei_block, index->ei_leaf_lo); if((i == (header->eh_entries - 1)) || (lbn < (index + 1)->ei_block)) { child = (EXT4_EXTENT_HEADER *) new char [blocksize]; block = idx_to_block(index); ext2_readblock(block, (void *) child); return extent_binarysearch(child, lbn, true); } index++; } // We reach here if we do not find the key if(isallocated) delete [] header; return physical_block; }
/* * Migrate a simple extent-based inode to use the i_blocks[] array */ int ext4_ind_migrate(struct inode *inode) { struct ext4_extent_header *eh; struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es; struct ext4_inode_info *ei = EXT4_I(inode); struct ext4_extent *ex; unsigned int i, len; ext4_fsblk_t blk; handle_t *handle; int ret; if (!EXT4_HAS_INCOMPAT_FEATURE(inode->i_sb, EXT4_FEATURE_INCOMPAT_EXTENTS) || (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) return -EINVAL; if (EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb, EXT4_FEATURE_RO_COMPAT_BIGALLOC)) return -EOPNOTSUPP; handle = ext4_journal_start(inode, EXT4_HT_MIGRATE, 1); if (IS_ERR(handle)) return PTR_ERR(handle); down_write(&EXT4_I(inode)->i_data_sem); ret = ext4_ext_check_inode(inode); if (ret) goto errout; eh = ext_inode_hdr(inode); ex = EXT_FIRST_EXTENT(eh); if (ext4_blocks_count(es) > EXT4_MAX_BLOCK_FILE_PHYS || eh->eh_depth != 0 || le16_to_cpu(eh->eh_entries) > 1) { ret = -EOPNOTSUPP; goto errout; } if (eh->eh_entries == 0) blk = len = 0; else { len = le16_to_cpu(ex->ee_len); blk = ext4_ext_pblock(ex); if (len > EXT4_NDIR_BLOCKS) { ret = -EOPNOTSUPP; goto errout; } } ext4_clear_inode_flag(inode, EXT4_INODE_EXTENTS); memset(ei->i_data, 0, sizeof(ei->i_data)); for (i=0; i < len; i++) ei->i_data[i] = cpu_to_le32(blk++); ext4_mark_inode_dirty(handle, inode); errout: ext4_journal_stop(handle); up_write(&EXT4_I(inode)->i_data_sem); return ret; }
uint64_t Partition::ExtentBinarySearch(EXT4_EXTENT_HEADER *header, uint64_t lbn, bool isallocated) { EXT4_EXTENT *extent; EXT4_EXTENT_IDX *index; EXT4_EXTENT_HEADER *child; uint64_t physical_block = 0; uint64_t block; if(header->eh_magic != EXT4_EXT_MAGIC) { LOG("Invalid magic in Extent Header: %X\n", header->eh_magic); return 0; } extent = EXT_FIRST_EXTENT(header); if(header->eh_depth == 0) { for(int i = 0; i < header->eh_entries; i++) { if((lbn >= extent->ee_block) && (lbn < (extent->ee_block + extent->ee_len))) { physical_block = ext_to_block(extent) + lbn; physical_block = physical_block - (uint64_t)extent->ee_block; if(isallocated) delete [] header; return physical_block; } extent++; } return 0; } index = EXT_FIRST_INDEX(header); for(int i = 0; i < header->eh_entries; i++) { if(lbn >= index->ei_block) { child = (EXT4_EXTENT_HEADER *) new char [blocksize]; block = idx_to_block(index); ReadBlock(block, (void *) child); return ExtentBinarySearch(child, lbn, true); } } if(isallocated) delete [] header; return physical_block; }
/* * Migrate a simple extent-based inode to use the i_blocks[] array */ int ext4_ind_migrate(struct inode *inode) { struct ext4_extent_header *eh; struct ext4_super_block *es = EXT4_SB(inode->i_sb)->s_es; struct ext4_inode_info *ei = EXT4_I(inode); struct ext4_extent *ex; unsigned int i, len; ext4_lblk_t start, end; ext4_fsblk_t blk; handle_t *handle; int ret; if (!EXT4_HAS_INCOMPAT_FEATURE(inode->i_sb, EXT4_FEATURE_INCOMPAT_EXTENTS) || (!ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))) return -EINVAL; if (EXT4_HAS_RO_COMPAT_FEATURE(inode->i_sb, EXT4_FEATURE_RO_COMPAT_BIGALLOC)) return -EOPNOTSUPP; /* * In order to get correct extent info, force all delayed allocation * blocks to be allocated, otherwise delayed allocation blocks may not * be reflected and bypass the checks on extent header. */ if (test_opt(inode->i_sb, DELALLOC)) ext4_alloc_da_blocks(inode); handle = ext4_journal_start(inode, EXT4_HT_MIGRATE, 1); if (IS_ERR(handle)) return PTR_ERR(handle); down_write(&EXT4_I(inode)->i_data_sem); ret = ext4_ext_check_inode(inode); if (ret) goto errout; eh = ext_inode_hdr(inode); ex = EXT_FIRST_EXTENT(eh); if (ext4_blocks_count(es) > EXT4_MAX_BLOCK_FILE_PHYS || eh->eh_depth != 0 || le16_to_cpu(eh->eh_entries) > 1) { ret = -EOPNOTSUPP; goto errout; } if (eh->eh_entries == 0) blk = len = start = end = 0; else { len = le16_to_cpu(ex->ee_len); blk = ext4_ext_pblock(ex); start = le32_to_cpu(ex->ee_block); end = start + len - 1; if (end >= EXT4_NDIR_BLOCKS) { ret = -EOPNOTSUPP; goto errout; } } ext4_clear_inode_flag(inode, EXT4_INODE_EXTENTS); memset(ei->i_data, 0, sizeof(ei->i_data)); for (i = start; i <= end; i++) ei->i_data[i] = cpu_to_le32(blk++); ext4_mark_inode_dirty(handle, inode); errout: ext4_journal_stop(handle); up_write(&EXT4_I(inode)->i_data_sem); return ret; }