static int free_ext_idx(handle_t *handle, struct inode *inode, struct ext4_extent_idx *ix) { int i, retval = 0; ext4_fsblk_t block; struct buffer_head *bh; struct ext4_extent_header *eh; block = ext4_idx_pblock(ix); bh = sb_bread(inode->i_sb, block); if (!bh) return -EIO; eh = (struct ext4_extent_header *)bh->b_data; if (eh->eh_depth != 0) { ix = EXT_FIRST_INDEX(eh); for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ix++) { retval = free_ext_idx(handle, inode, ix); if (retval) break; } } put_bh(bh); extend_credit_for_blkdel(handle, inode); ext4_free_blocks(handle, inode, NULL, block, 1, EXT4_FREE_BLOCKS_METADATA | EXT4_FREE_BLOCKS_FORGET); return retval; }
lloff_t Ext2Partition::extent_binarysearch(EXT4_EXTENT_HEADER *header, lloff_t lbn, bool isallocated) { EXT4_EXTENT *extent; EXT4_EXTENT_IDX *index; EXT4_EXTENT_HEADER *child; lloff_t physical_block = 0; lloff_t block; if(header->eh_magic != EXT4_EXT_MAGIC) { LOG("Invalid magic in Extent Header: %X\n", header->eh_magic); return 0; } extent = EXT_FIRST_EXTENT(header); // LOG("HEADER: magic %x Entries: %d depth %d\n", header->eh_magic, header->eh_entries, header->eh_depth); if(header->eh_depth == 0) { for(int i = 0; i < header->eh_entries; i++) { // LOG("EXTENT: Block: %d Length: %d LBN: %d\n", extent->ee_block, extent->ee_len, lbn); if((lbn >= extent->ee_block) && (lbn < (extent->ee_block + extent->ee_len))) { physical_block = ext_to_block(extent) + lbn; physical_block = physical_block - (lloff_t)extent->ee_block; if(isallocated) delete [] header; // LOG("Physical Block: %d\n", physical_block); return physical_block; } extent++; // Pointer increment by size of Extent. } return 0; } index = EXT_FIRST_INDEX(header); for(int i = 0; i < header->eh_entries; i++) { // LOG("INDEX: Block: %d Leaf: %d \n", index->ei_block, index->ei_leaf_lo); if((i == (header->eh_entries - 1)) || (lbn < (index + 1)->ei_block)) { child = (EXT4_EXTENT_HEADER *) new char [blocksize]; block = idx_to_block(index); ext2_readblock(block, (void *) child); return extent_binarysearch(child, lbn, true); } index++; } // We reach here if we do not find the key if(isallocated) delete [] header; return physical_block; }
uint64_t Partition::ExtentBinarySearch(EXT4_EXTENT_HEADER *header, uint64_t lbn, bool isallocated) { EXT4_EXTENT *extent; EXT4_EXTENT_IDX *index; EXT4_EXTENT_HEADER *child; uint64_t physical_block = 0; uint64_t block; if(header->eh_magic != EXT4_EXT_MAGIC) { LOG("Invalid magic in Extent Header: %X\n", header->eh_magic); return 0; } extent = EXT_FIRST_EXTENT(header); if(header->eh_depth == 0) { for(int i = 0; i < header->eh_entries; i++) { if((lbn >= extent->ee_block) && (lbn < (extent->ee_block + extent->ee_len))) { physical_block = ext_to_block(extent) + lbn; physical_block = physical_block - (uint64_t)extent->ee_block; if(isallocated) delete [] header; return physical_block; } extent++; } return 0; } index = EXT_FIRST_INDEX(header); for(int i = 0; i < header->eh_entries; i++) { if(lbn >= index->ei_block) { child = (EXT4_EXTENT_HEADER *) new char [blocksize]; block = idx_to_block(index); ReadBlock(block, (void *) child); return ExtentBinarySearch(child, lbn, true); } } if(isallocated) delete [] header; return physical_block; }
static int update_metadata_move(struct defrag_ctx *c, struct inode *inode, blk64_t from, blk64_t to, __u32 logical, blk64_t at_block) { int ret = 0; struct ext3_extent_header *header; struct ext3_extent_idx *idx; if (at_block == 0) { header = &inode->on_disk->extents.hdr; } else { header = malloc(EXT2_BLOCK_SIZE(&c->sb)); ret = read_block(c, header, at_block); if (ret) goto out_noupdate; } if (!header->eh_depth) { errno = EINVAL; goto out_noupdate; } for (idx = EXT_FIRST_INDEX(header); idx <= EXT_LAST_INDEX(header); idx++) { if (idx->ei_block > logical) { errno = EINVAL; goto out_noupdate; } if (idx->ei_block == logical && EI_BLOCK(idx) == from) { EI_LEAF_SET(idx, to); goto out_update; } if (idx + 1 > EXT_LAST_INDEX(header) || (idx + 1)->ei_block > logical) { ret = update_metadata_move(c, inode, from, to, logical, EI_BLOCK(idx)); goto out_noupdate; } } errno = EINVAL; goto out_noupdate; out_update: if (at_block) { ret = write_block(c, header, at_block); } else { ret = msync(PAGE_START(header), getpagesize(), MS_SYNC); } out_noupdate: if (at_block) free(header); return ret; }
/* * Free the extent meta data blocks only */ static int free_ext_block(handle_t *handle, struct inode *inode) { int i, retval = 0; struct ext4_inode_info *ei = EXT4_I(inode); struct ext4_extent_header *eh = (struct ext4_extent_header *)ei->i_data; struct ext4_extent_idx *ix; if (eh->eh_depth == 0) /* * No extra blocks allocated for extent meta data */ return 0; ix = EXT_FIRST_INDEX(eh); for (i = 0; i < le16_to_cpu(eh->eh_entries); i++, ix++) { retval = free_ext_idx(handle, inode, ix); if (retval) return retval; } return retval; }
/* * This function is responsible for (optionally) moving through the * extent tree and then returning the current extent */ errcode_t ext2fs_extent_get(ext2_extent_handle_t handle, int flags, struct ext2fs_extent *extent) { struct extent_path *path, *newpath; struct ext3_extent_header *eh; struct ext3_extent_idx *ix = 0; struct ext3_extent *ex; errcode_t retval; blk_t blk; blk64_t end_blk; int orig_op, op; EXT2_CHECK_MAGIC(handle, EXT2_ET_MAGIC_EXTENT_HANDLE); if (!handle->path) return EXT2_ET_NO_CURRENT_NODE; orig_op = op = flags & EXT2_EXTENT_MOVE_MASK; retry: path = handle->path + handle->level; if ((orig_op == EXT2_EXTENT_NEXT) || (orig_op == EXT2_EXTENT_NEXT_LEAF)) { if (handle->level < handle->max_depth) { /* interior node */ if (path->visit_num == 0) { path->visit_num++; op = EXT2_EXTENT_DOWN; } else if (path->left > 0) op = EXT2_EXTENT_NEXT_SIB; else if (handle->level > 0) op = EXT2_EXTENT_UP; else return EXT2_ET_EXTENT_NO_NEXT; } else { /* leaf node */ if (path->left > 0) op = EXT2_EXTENT_NEXT_SIB; else if (handle->level > 0) op = EXT2_EXTENT_UP; else return EXT2_ET_EXTENT_NO_NEXT; } if (op != EXT2_EXTENT_NEXT_SIB) { #ifdef DEBUG printf("<<<< OP = %s\n", (op == EXT2_EXTENT_DOWN) ? "down" : ((op == EXT2_EXTENT_UP) ? "up" : "unknown")); #endif } } if ((orig_op == EXT2_EXTENT_PREV) || (orig_op == EXT2_EXTENT_PREV_LEAF)) { if (handle->level < handle->max_depth) { /* interior node */ if (path->visit_num > 0 ) { /* path->visit_num = 0; */ op = EXT2_EXTENT_DOWN_AND_LAST; } else if (path->left < path->entries-1) op = EXT2_EXTENT_PREV_SIB; else if (handle->level > 0) op = EXT2_EXTENT_UP; else return EXT2_ET_EXTENT_NO_PREV; } else { /* leaf node */ if (path->left < path->entries-1) op = EXT2_EXTENT_PREV_SIB; else if (handle->level > 0) op = EXT2_EXTENT_UP; else return EXT2_ET_EXTENT_NO_PREV; } if (op != EXT2_EXTENT_PREV_SIB) { #ifdef DEBUG printf("<<<< OP = %s\n", (op == EXT2_EXTENT_DOWN_AND_LAST) ? "down/last" : ((op == EXT2_EXTENT_UP) ? "up" : "unknown")); #endif } } if (orig_op == EXT2_EXTENT_LAST_LEAF) { if ((handle->level < handle->max_depth) && (path->left == 0)) op = EXT2_EXTENT_DOWN; else op = EXT2_EXTENT_LAST_SIB; #ifdef DEBUG printf("<<<< OP = %s\n", (op == EXT2_EXTENT_DOWN) ? "down" : "last_sib"); #endif } switch (op) { case EXT2_EXTENT_CURRENT: ix = path->curr; break; case EXT2_EXTENT_ROOT: handle->level = 0; path = handle->path + handle->level; case EXT2_EXTENT_FIRST_SIB: path->left = path->entries; path->curr = 0; case EXT2_EXTENT_NEXT_SIB: if (path->left <= 0) return EXT2_ET_EXTENT_NO_NEXT; if (path->curr) { ix = path->curr; ix++; } else { eh = (struct ext3_extent_header *) path->buf; ix = EXT_FIRST_INDEX(eh); } path->left--; path->curr = ix; path->visit_num = 0; break; case EXT2_EXTENT_PREV_SIB: if (!path->curr || path->left+1 >= path->entries) return EXT2_ET_EXTENT_NO_PREV; ix = path->curr; ix--; path->curr = ix; path->left++; if (handle->level < handle->max_depth) path->visit_num = 1; break; case EXT2_EXTENT_LAST_SIB: eh = (struct ext3_extent_header *) path->buf; path->curr = EXT_LAST_EXTENT(eh); ix = path->curr; path->left = 0; path->visit_num = 0; break; case EXT2_EXTENT_UP: if (handle->level <= 0) return EXT2_ET_EXTENT_NO_UP; handle->level--; path--; ix = path->curr; if ((orig_op == EXT2_EXTENT_PREV) || (orig_op == EXT2_EXTENT_PREV_LEAF)) path->visit_num = 0; break; case EXT2_EXTENT_DOWN: case EXT2_EXTENT_DOWN_AND_LAST: if (!path->curr ||(handle->level >= handle->max_depth)) return EXT2_ET_EXTENT_NO_DOWN; ix = path->curr; newpath = path + 1; if (!newpath->buf) { retval = ext2fs_get_mem(handle->fs->blocksize, &newpath->buf); if (retval) return retval; } blk = ext2fs_le32_to_cpu(ix->ei_leaf) + ((__u64) ext2fs_le16_to_cpu(ix->ei_leaf_hi) << 32); if ((handle->fs->flags & EXT2_FLAG_IMAGE_FILE) && (handle->fs->io != handle->fs->image_io)) memset(newpath->buf, 0, handle->fs->blocksize); else { retval = io_channel_read_blk(handle->fs->io, blk, 1, newpath->buf); if (retval) return retval; } handle->level++; eh = (struct ext3_extent_header *) newpath->buf; retval = ext2fs_extent_header_verify(eh, handle->fs->blocksize); if (retval) { handle->level--; return retval; } newpath->left = newpath->entries = ext2fs_le16_to_cpu(eh->eh_entries); newpath->max_entries = ext2fs_le16_to_cpu(eh->eh_max); if (path->left > 0) { ix++; newpath->end_blk = ext2fs_le32_to_cpu(ix->ei_block); } else newpath->end_blk = path->end_blk; path = newpath; if (op == EXT2_EXTENT_DOWN) { ix = EXT_FIRST_INDEX((struct ext3_extent_header *) eh); path->curr = ix; path->left = path->entries - 1; path->visit_num = 0; } else { ix = EXT_LAST_INDEX((struct ext3_extent_header *) eh); path->curr = ix; path->left = 0; if (handle->level < handle->max_depth) path->visit_num = 1; } #ifdef DEBUG printf("Down to level %d/%d, end_blk=%llu\n", handle->level, handle->max_depth, path->end_blk); #endif break; default: return EXT2_ET_OP_NOT_SUPPORTED; } if (!ix) return EXT2_ET_NO_CURRENT_NODE; extent->e_flags = 0; #ifdef DEBUG printf("(Left %d)\n", path->left); #endif if (handle->level == handle->max_depth) { ex = (struct ext3_extent *) ix; extent->e_pblk = ext2fs_le32_to_cpu(ex->ee_start) + ((__u64) ext2fs_le16_to_cpu(ex->ee_start_hi) << 32); extent->e_lblk = ext2fs_le32_to_cpu(ex->ee_block); extent->e_len = ext2fs_le16_to_cpu(ex->ee_len); extent->e_flags |= EXT2_EXTENT_FLAGS_LEAF; if (extent->e_len > EXT_INIT_MAX_LEN) { extent->e_len -= EXT_INIT_MAX_LEN; extent->e_flags |= EXT2_EXTENT_FLAGS_UNINIT; } } else { extent->e_pblk = ext2fs_le32_to_cpu(ix->ei_leaf) + ((__u64) ext2fs_le16_to_cpu(ix->ei_leaf_hi) << 32); extent->e_lblk = ext2fs_le32_to_cpu(ix->ei_block); if (path->left > 0) { ix++; end_blk = ext2fs_le32_to_cpu(ix->ei_block); } else end_blk = path->end_blk; extent->e_len = end_blk - extent->e_lblk; } if (path->visit_num) extent->e_flags |= EXT2_EXTENT_FLAGS_SECOND_VISIT; if (((orig_op == EXT2_EXTENT_NEXT_LEAF) || (orig_op == EXT2_EXTENT_PREV_LEAF)) && (handle->level != handle->max_depth)) goto retry; if ((orig_op == EXT2_EXTENT_LAST_LEAF) && ((handle->level != handle->max_depth) || (path->left != 0))) goto retry; return 0; }