static void dbg_show_index(struct ext3_extent_idx *ix) { printf("index: block=%u leaf=%u leaf_hi=%u unused=%u\n", ext2fs_le32_to_cpu(ix->ei_block), ext2fs_le32_to_cpu(ix->ei_leaf), ext2fs_le16_to_cpu(ix->ei_leaf_hi), ext2fs_le16_to_cpu(ix->ei_unused)); }
// On my current version of libext2 the extra time fields ar not bigendian corrected // We want this solved temporarily here with this function static void le_to_cpu_swap_extra_time(struct ext2_inode_large *inode, char *inode_buf){ //inode->i_pad1 = ext2fs_le16_to_cpu(((struct ext2_inode_large *))inode_buf->i_pad1); inode->i_ctime_extra = ext2fs_le32_to_cpu(((struct ext2_inode_large *)inode_buf)->i_ctime_extra); inode->i_mtime_extra = ext2fs_le32_to_cpu(((struct ext2_inode_large *)inode_buf)->i_mtime_extra ); inode->i_atime_extra = ext2fs_le32_to_cpu(((struct ext2_inode_large *)inode_buf)->i_atime_extra ); inode->i_crtime = ext2fs_le32_to_cpu(((struct ext2_inode_large *)inode_buf)->i_crtime ); inode->i_crtime_extra = ext2fs_le32_to_cpu(((struct ext2_inode_large *)inode_buf)->i_crtime_extra ); //inode->i_version_hi = ext2fs_le32_to_cpu(((struct ext2_inode_large *)inode_buf)->i_version_hi ); }
static void htree_dump_int_node(ext2_filsys fs, ext2_ino_t ino, struct ext2_inode *inode, struct ext2_dx_root_info * rootnode, struct ext2_dx_entry *ent, char *buf, int level) { struct ext2_dx_countlimit limit; struct ext2_dx_entry e; struct ext2_dx_tail *tail; int hash, i; int remainder; limit = *((struct ext2_dx_countlimit *) ent); limit.count = ext2fs_le16_to_cpu(limit.count); limit.limit = ext2fs_le16_to_cpu(limit.limit); fprintf(pager, "Number of entries (count): %d\n", limit.count); fprintf(pager, "Number of entries (limit): %d\n", limit.limit); remainder = fs->blocksize - (limit.limit * sizeof(struct ext2_dx_entry)); if (ent == (struct ext2_dx_entry *)(rootnode + 1)) remainder -= sizeof(struct ext2_dx_root_info) + 24; else remainder -= 8; if (EXT2_HAS_RO_COMPAT_FEATURE(fs->super, EXT4_FEATURE_RO_COMPAT_METADATA_CSUM) && remainder == sizeof(struct ext2_dx_tail)) { tail = (struct ext2_dx_tail *)(ent + limit.limit); fprintf(pager, "Checksum: 0x%08x\n", ext2fs_le32_to_cpu(tail->dt_checksum)); } for (i=0; i < limit.count; i++) { hash = i ? ext2fs_le32_to_cpu(ent[i].hash) : 0; fprintf(pager, "Entry #%d: Hash 0x%08x%s, block %u\n", i, hash, (hash & 1) ? " (**)" : "", ext2fs_le32_to_cpu(ent[i].block)); } fprintf(pager, "\n"); for (i=0; i < limit.count; i++) { e.hash = ext2fs_le32_to_cpu(ent[i].hash); e.block = ext2fs_le32_to_cpu(ent[i].block); fprintf(pager, "Entry #%d: Hash 0x%08x, block %u\n", i, i ? e.hash : 0, e.block); if (level) htree_dump_int_block(fs, ino, inode, rootnode, e.block, buf, level-1); else htree_dump_leaf_node(fs, ino, inode, rootnode, e.block, buf); } fprintf(pager, "---------------------\n"); }
static void dbg_show_extent(struct ext3_extent *ex) { printf("extent: block=%u-%u len=%u start=%u start_hi=%u\n", ext2fs_le32_to_cpu(ex->ee_block), ext2fs_le32_to_cpu(ex->ee_block) + ext2fs_le16_to_cpu(ex->ee_len) - 1, ext2fs_le16_to_cpu(ex->ee_len), ext2fs_le32_to_cpu(ex->ee_start), ext2fs_le16_to_cpu(ex->ee_start_hi)); }
static errcode_t update_path(ext2_extent_handle_t handle) { blk64_t blk; errcode_t retval; struct ext3_extent_idx *ix; struct ext3_extent_header *eh; if (handle->level == 0) { retval = ext2fs_write_inode(handle->fs, handle->ino, handle->inode); } else { ix = handle->path[handle->level - 1].curr; blk = ext2fs_le32_to_cpu(ix->ei_leaf) + ((__u64) ext2fs_le16_to_cpu(ix->ei_leaf_hi) << 32); /* then update the checksum */ eh = (struct ext3_extent_header *) handle->path[handle->level].buf; retval = ext2fs_extent_block_csum_set(handle->fs, handle->ino, eh); if (retval) return retval; retval = io_channel_write_blk64(handle->fs->io, blk, 1, handle->path[handle->level].buf); } return retval; }
static void dbg_show_header(struct ext3_extent_header *eh) { printf("header: magic=%x entries=%u max=%u depth=%u generation=%u\n", ext2fs_le16_to_cpu(eh->eh_magic), ext2fs_le16_to_cpu(eh->eh_entries), ext2fs_le16_to_cpu(eh->eh_max), ext2fs_le16_to_cpu(eh->eh_depth), ext2fs_le32_to_cpu(eh->eh_generation)); }
static int v2r1_is_id(void *dp, struct dquot *dquot) { struct v2r1_disk_dqblk *d = dp; struct qtree_mem_dqinfo *info = &dquot->dq_h->qh_info.u.v2_mdqi.dqi_qtree; if (qtree_entry_unused(info, dp)) return 0; return ext2fs_le32_to_cpu(d->dqb_id) == dquot->dq_id; }
static void htree_dump_int_node(ext2_filsys fs, ext2_ino_t ino, struct ext2_inode *inode, struct ext2_dx_root_info * rootnode, struct ext2_dx_entry *ent, char *buf, int level) { struct ext2_dx_countlimit limit; struct ext2_dx_entry e; int hash, i; limit = *((struct ext2_dx_countlimit *) ent); limit.count = ext2fs_le16_to_cpu(limit.count); limit.limit = ext2fs_le16_to_cpu(limit.limit); fprintf(pager, "Number of entries (count): %d\n", limit.count); fprintf(pager, "Number of entries (limit): %d\n", limit.limit); for (i=0; i < limit.count; i++) { hash = i ? ext2fs_le32_to_cpu(ent[i].hash) : 0; fprintf(pager, "Entry #%d: Hash 0x%08x%s, block %u\n", i, hash, (hash & 1) ? " (**)" : "", ext2fs_le32_to_cpu(ent[i].block)); } fprintf(pager, "\n"); for (i=0; i < limit.count; i++) { e.hash = ext2fs_le32_to_cpu(ent[i].hash); e.block = ext2fs_le32_to_cpu(ent[i].block); fprintf(pager, "Entry #%d: Hash 0x%08x, block %u\n", i, i ? e.hash : 0, e.block); if (level) htree_dump_int_block(fs, ino, inode, rootnode, e.block, buf, level-1); else htree_dump_leaf_node(fs, ino, inode, rootnode, e.block, buf); } fprintf(pager, "---------------------\n"); }
/* * Check whether given quota file is in our format */ static int v2_check_file(struct quota_handle *h, int type, int fmt) { struct v2_disk_dqheader dqh; int file_magics[] = INITQMAGICS; if (fmt != QFMT_VFS_V1) return 0; if (!v2_read_header(h, &dqh)) return 0; if (ext2fs_le32_to_cpu(dqh.dqh_magic) != file_magics[type]) { if (ext2fs_be32_to_cpu(dqh.dqh_magic) == file_magics[type]) log_err("Your quota file is stored in wrong endianity"); return 0; } if (V2_VERSION != ext2fs_le32_to_cpu(dqh.dqh_version)) return 0; return 1; }
int ext4_to_fuse_acl(acl_ea_header **facl, size_t *facl_sz, const ext4_acl_header *eacl, size_t eacl_sz) { int i, eacl_count; acl_ea_header *f; ext4_acl_entry *e; acl_ea_entry *a; size_t f_sz; unsigned char *hptr; int err = 0; eacl_count = ext4_acl_count(eacl_sz); f_sz = acl_ea_size(eacl_count); if (eacl_count < 0 || eacl->a_version != ext2fs_cpu_to_le32(EXT4_ACL_VERSION)) return -EINVAL; f = malloc(f_sz); if (!f) return -ENOMEM; f->a_version = ACL_EA_VERSION; hptr = (unsigned char *) (eacl + 1); for (i = 0, a = f->a_entries; i < eacl_count; i++, a++) { e = (ext4_acl_entry *) hptr; a->e_tag = ext2fs_le16_to_cpu(e->e_tag); a->e_perm = ext2fs_le16_to_cpu(e->e_perm); switch (a->e_tag) { case ACL_USER: case ACL_GROUP: a->e_id = ext2fs_le32_to_cpu(e->e_id); hptr += sizeof(ext4_acl_entry); break; case ACL_USER_OBJ: case ACL_GROUP_OBJ: case ACL_MASK: case ACL_OTHER: hptr += sizeof(ext4_acl_entry_short); break; default: err = -EINVAL; goto out; } } *facl = f; *facl_sz = f_sz; return err; out: free(f); return err; }
static void dump_header(struct undo_header *hdr) { printf("nr keys:\t%llu\n", ext2fs_le64_to_cpu(hdr->num_keys)); printf("super block:\t%llu\n", ext2fs_le64_to_cpu(hdr->super_offset)); printf("key block:\t%llu\n", ext2fs_le64_to_cpu(hdr->key_offset)); printf("block size:\t%u\n", ext2fs_le32_to_cpu(hdr->block_size)); printf("fs block size:\t%u\n", ext2fs_le32_to_cpu(hdr->fs_block_size)); printf("super crc:\t0x%x\n", ext2fs_le32_to_cpu(hdr->sb_crc)); printf("state:\t\t0x%x\n", ext2fs_le32_to_cpu(hdr->state)); printf("compat:\t\t0x%x\n", ext2fs_le32_to_cpu(hdr->f_compat)); printf("incompat:\t0x%x\n", ext2fs_le32_to_cpu(hdr->f_incompat)); printf("rocompat:\t0x%x\n", ext2fs_le32_to_cpu(hdr->f_rocompat)); printf("header crc:\t0x%x\n", ext2fs_le32_to_cpu(hdr->header_crc)); }
static int check_filesystem(struct undo_context *ctx, io_channel channel) { struct ext2_super_block super, *sb; char *buf; __u32 sb_crc; errcode_t retval; io_channel_set_blksize(channel, SUPERBLOCK_OFFSET); retval = io_channel_read_blk64(channel, 1, -SUPERBLOCK_SIZE, &super); if (retval) { com_err(prg_name, retval, "%s", _("while reading filesystem superblock.")); return retval; } /* * Compare the FS and the undo file superblock so that we can't apply * e2undo "patches" out of order. */ retval = ext2fs_get_mem(ctx->blocksize, &buf); if (retval) { com_err(prg_name, retval, "%s", _("while allocating memory")); return retval; } retval = io_channel_read_blk64(ctx->undo_file, ctx->super_block, -SUPERBLOCK_SIZE, buf); if (retval) { com_err(prg_name, retval, "%s", _("while fetching superblock")); goto out; } sb = (struct ext2_super_block *)buf; sb->s_magic = ~sb->s_magic; if (memcmp(&super, buf, sizeof(super))) { print_undo_mismatch(&super, (struct ext2_super_block *)buf); retval = -1; goto out; } sb_crc = ext2fs_crc32c_le(~0, (unsigned char *)buf, SUPERBLOCK_SIZE); if (ext2fs_le32_to_cpu(ctx->hdr.sb_crc) != sb_crc) { fprintf(stderr, _("Undo file superblock checksum doesn't match.\n")); retval = -1; goto out; } out: ext2fs_free_mem(&buf); return retval; }
/* * Copy dqinfo from disk to memory */ static inline void v2_disk2memdqinfo(struct util_dqinfo *m, struct v2_disk_dqinfo *d) { m->dqi_bgrace = ext2fs_le32_to_cpu(d->dqi_bgrace); m->dqi_igrace = ext2fs_le32_to_cpu(d->dqi_igrace); m->u.v2_mdqi.dqi_flags = ext2fs_le32_to_cpu(d->dqi_flags) & V2_DQF_MASK; m->u.v2_mdqi.dqi_qtree.dqi_blocks = ext2fs_le32_to_cpu(d->dqi_blocks); m->u.v2_mdqi.dqi_qtree.dqi_free_blk = ext2fs_le32_to_cpu(d->dqi_free_blk); m->u.v2_mdqi.dqi_qtree.dqi_free_entry = ext2fs_le32_to_cpu(d->dqi_free_entry); }
static errcode_t update_path(ext2_extent_handle_t handle) { blk64_t blk; errcode_t retval; struct ext3_extent_idx *ix; if (handle->level == 0) { retval = ext2fs_write_inode_full(handle->fs, handle->ino, handle->inode, EXT2_INODE_SIZE(handle->fs->super)); } else { ix = handle->path[handle->level - 1].curr; blk = ext2fs_le32_to_cpu(ix->ei_leaf) + ((__u64) ext2fs_le16_to_cpu(ix->ei_leaf_hi) << 32); retval = io_channel_write_blk(handle->fs->io, blk, 1, handle->path[handle->level].buf); } return retval; }
/* * Copy dquot from disk to memory */ static void v2r1_disk2memdqblk(struct dquot *dquot, void *dp) { struct util_dqblk *m = &dquot->dq_dqb; struct v2r1_disk_dqblk *d = dp, empty; dquot->dq_id = ext2fs_le32_to_cpu(d->dqb_id); m->dqb_ihardlimit = ext2fs_le64_to_cpu(d->dqb_ihardlimit); m->dqb_isoftlimit = ext2fs_le64_to_cpu(d->dqb_isoftlimit); m->dqb_bhardlimit = ext2fs_le64_to_cpu(d->dqb_bhardlimit); m->dqb_bsoftlimit = ext2fs_le64_to_cpu(d->dqb_bsoftlimit); m->dqb_curinodes = ext2fs_le64_to_cpu(d->dqb_curinodes); m->dqb_curspace = ext2fs_le64_to_cpu(d->dqb_curspace); m->dqb_itime = ext2fs_le64_to_cpu(d->dqb_itime); m->dqb_btime = ext2fs_le64_to_cpu(d->dqb_btime); memset(&empty, 0, sizeof(struct v2r1_disk_dqblk)); empty.dqb_itime = ext2fs_cpu_to_le64(1); if (!memcmp(&empty, dp, sizeof(struct v2r1_disk_dqblk))) m->dqb_itime = 0; }
static inline void e2undo_set_feature_fs_offset(struct undo_header *header) { header->f_compat |= ext2fs_le32_to_cpu(E2UNDO_FEATURE_COMPAT_FS_OFFSET); }
static errcode_t device_gekko_io_open(const char *name, int flags, io_channel *dev) { // Get the device driver descriptor gekko_fd *fd = DEV_FD((*dev)); if (!fd) { errno = EBADF; return -1; } // Get the device interface const DISC_INTERFACE* interface = fd->interface; if (!interface) { errno = ENODEV; return -1; } // Start the device interface and ensure that it is inserted if (!interface->startup()) { ext2_log_trace("device failed to start\n"); errno = EIO; return -1; } if (!interface->isInserted()) { ext2_log_trace("device media is not inserted\n"); errno = EIO; return -1; } struct ext2_super_block * super = (struct ext2_super_block *) mem_alloc(SUPERBLOCK_SIZE); //1024 bytes if(!super) { ext2_log_trace("no memory for superblock"); errno = ENOMEM; return -1; } // Check that there is a valid EXT boot sector at the start of the device if (!interface->readSectors(fd->startSector+SUPERBLOCK_OFFSET/BYTES_PER_SECTOR, SUPERBLOCK_SIZE/BYTES_PER_SECTOR, super)) { ext2_log_trace("read failure @ sector %d\n", fd->startSector); errno = EROFS; mem_free(super); return -1; } if(ext2fs_le16_to_cpu(super->s_magic) != EXT2_SUPER_MAGIC) { mem_free(super); errno = EROFS; return -1; } // Parse the boot sector fd->sectorSize = BYTES_PER_SECTOR; fd->offset = 0; fd->sectorCount = 0; switch(ext2fs_le32_to_cpu(super->s_log_block_size)) { case 1: fd->sectorCount = (sec_t) ((u64) ext2fs_le32_to_cpu(super->s_blocks_count) * (u64) 2048 / (u64) BYTES_PER_SECTOR); break; case 2: fd->sectorCount = (sec_t) ((u64) ext2fs_le32_to_cpu(super->s_blocks_count) * (u64) 4096 / (u64) BYTES_PER_SECTOR); break; case 3: fd->sectorCount = (sec_t) ((u64) ext2fs_le32_to_cpu(super->s_blocks_count) * (u64) 8192 / (u64) BYTES_PER_SECTOR); break; default: case 0: fd->sectorCount = (sec_t) ((u64) ext2fs_le32_to_cpu(super->s_blocks_count) * (u64) 1024 / (u64) BYTES_PER_SECTOR); break; } mem_free(super); // Create the cache fd->cache = _EXT2_cache_constructor(fd->cachePageCount, fd->cachePageSize, interface, fd->startSector + fd->sectorCount, fd->sectorSize); return 0; }
static errcode_t ext2fs_inline_data_convert_dir(ext2_filsys fs, ext2_ino_t ino, char *bbuf, char *ibuf, int size) { struct ext2_dir_entry *dir, *dir2; struct ext2_dir_entry_tail *t; errcode_t retval; unsigned int offset, rec_len; int csum_size = 0; int filetype = 0; if (EXT2_HAS_RO_COMPAT_FEATURE(fs->super, EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) csum_size = sizeof(struct ext2_dir_entry_tail); /* Create '.' and '..' */ if (EXT2_HAS_INCOMPAT_FEATURE(fs->super, EXT2_FEATURE_INCOMPAT_FILETYPE)) filetype = EXT2_FT_DIR; /* * Set up entry for '.' */ dir = (struct ext2_dir_entry *) bbuf; dir->inode = ino; ext2fs_dirent_set_name_len(dir, 1); ext2fs_dirent_set_file_type(dir, filetype); dir->name[0] = '.'; rec_len = (fs->blocksize - csum_size) - EXT2_DIR_REC_LEN(1); dir->rec_len = EXT2_DIR_REC_LEN(1); /* * Set up entry for '..' */ dir = (struct ext2_dir_entry *) (bbuf + dir->rec_len); dir->rec_len = EXT2_DIR_REC_LEN(2); dir->inode = ext2fs_le32_to_cpu(((__u32 *)ibuf)[0]); ext2fs_dirent_set_name_len(dir, 2); ext2fs_dirent_set_file_type(dir, filetype); dir->name[0] = '.'; dir->name[1] = '.'; /* * Ajust the last rec_len */ offset = EXT2_DIR_REC_LEN(1) + EXT2_DIR_REC_LEN(2); dir = (struct ext2_dir_entry *) (bbuf + offset); memcpy(bbuf + offset, ibuf + EXT4_INLINE_DATA_DOTDOT_SIZE, size - EXT4_INLINE_DATA_DOTDOT_SIZE); size += EXT2_DIR_REC_LEN(1) + EXT2_DIR_REC_LEN(2) - EXT4_INLINE_DATA_DOTDOT_SIZE; do { dir2 = dir; retval = ext2fs_get_rec_len(fs, dir, &rec_len); if (retval) goto err; offset += rec_len; dir = (struct ext2_dir_entry *) (bbuf + offset); } while (offset < size); rec_len += fs->blocksize - csum_size - offset; retval = ext2fs_set_rec_len(fs, rec_len, dir2); if (retval) goto err; if (csum_size) { t = EXT2_DIRENT_TAIL(bbuf, fs->blocksize); ext2fs_initialize_dirent_tail(fs, t); } err: return retval; }
/* * Functions to read and write a single inode. */ errcode_t ext2fs_read_inode2(ext2_filsys fs, ext2_ino_t ino, struct ext2_inode * inode, int bufsize, int flags) { blk64_t block_nr; dgrp_t group; unsigned long block, offset; char *ptr; errcode_t retval; unsigned i; int clen, inodes_per_block; io_channel io; int length = EXT2_INODE_SIZE(fs->super); struct ext2_inode_large *iptr; int cache_slot, fail_csum; EXT2_CHECK_MAGIC(fs, EXT2_ET_MAGIC_EXT2FS_FILSYS); /* Check to see if user has an override function */ if (fs->read_inode && ((bufsize == sizeof(struct ext2_inode)) || (EXT2_INODE_SIZE(fs->super) == sizeof(struct ext2_inode)))) { retval = (fs->read_inode)(fs, ino, inode); if (retval != EXT2_ET_CALLBACK_NOTHANDLED) return retval; } if ((ino == 0) || (ino > fs->super->s_inodes_count)) return EXT2_ET_BAD_INODE_NUM; /* Create inode cache if not present */ if (!fs->icache) { retval = ext2fs_create_inode_cache(fs, 4); if (retval) return retval; } /* Check to see if it's in the inode cache */ for (i = 0; i < fs->icache->cache_size; i++) { if (fs->icache->cache[i].ino == ino) { memcpy(inode, fs->icache->cache[i].inode, (bufsize > length) ? length : bufsize); return 0; } } if (fs->flags & EXT2_FLAG_IMAGE_FILE) { inodes_per_block = fs->blocksize / EXT2_INODE_SIZE(fs->super); block_nr = ext2fs_le32_to_cpu(fs->image_header->offset_inode) / fs->blocksize; block_nr += (ino - 1) / inodes_per_block; offset = ((ino - 1) % inodes_per_block) * EXT2_INODE_SIZE(fs->super); io = fs->image_io; } else { group = (ino - 1) / EXT2_INODES_PER_GROUP(fs->super); if (group > fs->group_desc_count) return EXT2_ET_BAD_INODE_NUM; offset = ((ino - 1) % EXT2_INODES_PER_GROUP(fs->super)) * EXT2_INODE_SIZE(fs->super); block = offset >> EXT2_BLOCK_SIZE_BITS(fs->super); block_nr = ext2fs_inode_table_loc(fs, group); if (!block_nr) return EXT2_ET_MISSING_INODE_TABLE; if ((block_nr < fs->super->s_first_data_block) || (block_nr + fs->inode_blocks_per_group - 1 >= ext2fs_blocks_count(fs->super))) return EXT2_ET_GDESC_BAD_INODE_TABLE; block_nr += block; io = fs->io; } offset &= (EXT2_BLOCK_SIZE(fs->super) - 1); cache_slot = (fs->icache->cache_last + 1) % fs->icache->cache_size; iptr = (struct ext2_inode_large *)fs->icache->cache[cache_slot].inode; ptr = (char *) iptr; while (length) { clen = length; if ((offset + length) > fs->blocksize) clen = fs->blocksize - offset; if (block_nr != fs->icache->buffer_blk) { retval = io_channel_read_blk64(io, block_nr, 1, fs->icache->buffer); if (retval) return retval; fs->icache->buffer_blk = block_nr; } memcpy(ptr, ((char *) fs->icache->buffer) + (unsigned) offset, clen); offset = 0; length -= clen; ptr += clen; block_nr++; } length = EXT2_INODE_SIZE(fs->super); /* Verify the inode checksum. */ fail_csum = !ext2fs_inode_csum_verify(fs, ino, iptr); #ifdef WORDS_BIGENDIAN ext2fs_swap_inode_full(fs, (struct ext2_inode_large *) iptr, (struct ext2_inode_large *) iptr, 0, length); #endif /* Update the inode cache bookkeeping */ if (!fail_csum) { fs->icache->cache_last = cache_slot; fs->icache->cache[cache_slot].ino = ino; } memcpy(inode, iptr, (bufsize > length) ? length : bufsize); if (!(fs->flags & EXT2_FLAG_IGNORE_CSUM_ERRORS) && !(flags & READ_INODE_NOCSUM) && fail_csum) return EXT2_ET_INODE_CSUM_INVALID; return 0; }
static errcode_t read_bitmaps(ext2_filsys fs, int do_inode, int do_block) { dgrp_t i; char *block_bitmap = 0, *inode_bitmap = 0; char *buf; errcode_t retval; int block_nbytes = EXT2_CLUSTERS_PER_GROUP(fs->super) / 8; int inode_nbytes = EXT2_INODES_PER_GROUP(fs->super) / 8; int csum_flag; unsigned int cnt; blk64_t blk; blk64_t blk_itr = EXT2FS_B2C(fs, fs->super->s_first_data_block); blk64_t blk_cnt; ext2_ino_t ino_itr = 1; ext2_ino_t ino_cnt; EXT2_CHECK_MAGIC(fs, EXT2_ET_MAGIC_EXT2FS_FILSYS); if ((block_nbytes > (int) fs->blocksize) || (inode_nbytes > (int) fs->blocksize)) return EXT2_ET_CORRUPT_SUPERBLOCK; fs->write_bitmaps = ext2fs_write_bitmaps; csum_flag = ext2fs_has_group_desc_csum(fs); retval = ext2fs_get_mem(strlen(fs->device_name) + 80, &buf); if (retval) return retval; if (do_block) { if (fs->block_map) ext2fs_free_block_bitmap(fs->block_map); strcpy(buf, "block bitmap for "); strcat(buf, fs->device_name); retval = ext2fs_allocate_block_bitmap(fs, buf, &fs->block_map); if (retval) goto cleanup; retval = io_channel_alloc_buf(fs->io, 0, &block_bitmap); if (retval) goto cleanup; } else block_nbytes = 0; if (do_inode) { if (fs->inode_map) ext2fs_free_inode_bitmap(fs->inode_map); strcpy(buf, "inode bitmap for "); strcat(buf, fs->device_name); retval = ext2fs_allocate_inode_bitmap(fs, buf, &fs->inode_map); if (retval) goto cleanup; retval = io_channel_alloc_buf(fs->io, 0, &inode_bitmap); if (retval) goto cleanup; } else inode_nbytes = 0; ext2fs_free_mem(&buf); if (fs->flags & EXT2_FLAG_IMAGE_FILE) { blk = (ext2fs_le32_to_cpu(fs->image_header->offset_inodemap) / fs->blocksize); ino_cnt = fs->super->s_inodes_count; while (inode_bitmap && ino_cnt > 0) { retval = io_channel_read_blk64(fs->image_io, blk++, 1, inode_bitmap); if (retval) goto cleanup; cnt = fs->blocksize << 3; if (cnt > ino_cnt) cnt = ino_cnt; retval = ext2fs_set_inode_bitmap_range2(fs->inode_map, ino_itr, cnt, inode_bitmap); if (retval) goto cleanup; ino_itr += cnt; ino_cnt -= cnt; } blk = (ext2fs_le32_to_cpu(fs->image_header->offset_blockmap) / fs->blocksize); blk_cnt = EXT2_GROUPS_TO_CLUSTERS(fs->super, fs->group_desc_count); while (block_bitmap && blk_cnt > 0) { retval = io_channel_read_blk64(fs->image_io, blk++, 1, block_bitmap); if (retval) goto cleanup; cnt = fs->blocksize << 3; if (cnt > blk_cnt) cnt = blk_cnt; retval = ext2fs_set_block_bitmap_range2(fs->block_map, blk_itr, cnt, block_bitmap); if (retval) goto cleanup; blk_itr += cnt; blk_cnt -= cnt; } goto success_cleanup; } for (i = 0; i < fs->group_desc_count; i++) { if (block_bitmap) { blk = ext2fs_block_bitmap_loc(fs, i); if (csum_flag && ext2fs_bg_flags_test(fs, i, EXT2_BG_BLOCK_UNINIT) && ext2fs_group_desc_csum_verify(fs, i)) blk = 0; if (blk) { retval = io_channel_read_blk64(fs->io, blk, 1, block_bitmap); if (retval) { retval = EXT2_ET_BLOCK_BITMAP_READ; goto cleanup; } /* verify block bitmap checksum */ if (!(fs->flags & EXT2_FLAG_IGNORE_CSUM_ERRORS) && !ext2fs_block_bitmap_csum_verify(fs, i, block_bitmap, block_nbytes)) { retval = EXT2_ET_BLOCK_BITMAP_CSUM_INVALID; goto cleanup; } } else memset(block_bitmap, 0, block_nbytes); cnt = block_nbytes << 3; retval = ext2fs_set_block_bitmap_range2(fs->block_map, blk_itr, cnt, block_bitmap); if (retval) goto cleanup; blk_itr += block_nbytes << 3; } if (inode_bitmap) { blk = ext2fs_inode_bitmap_loc(fs, i); if (csum_flag && ext2fs_bg_flags_test(fs, i, EXT2_BG_INODE_UNINIT) && ext2fs_group_desc_csum_verify(fs, i)) blk = 0; if (blk) { retval = io_channel_read_blk64(fs->io, blk, 1, inode_bitmap); if (retval) { retval = EXT2_ET_INODE_BITMAP_READ; goto cleanup; } /* verify inode bitmap checksum */ if (!(fs->flags & EXT2_FLAG_IGNORE_CSUM_ERRORS) && !ext2fs_inode_bitmap_csum_verify(fs, i, inode_bitmap, inode_nbytes)) { retval = EXT2_ET_INODE_BITMAP_CSUM_INVALID; goto cleanup; } } else memset(inode_bitmap, 0, inode_nbytes); cnt = inode_nbytes << 3; retval = ext2fs_set_inode_bitmap_range2(fs->inode_map, ino_itr, cnt, inode_bitmap); if (retval) goto cleanup; ino_itr += inode_nbytes << 3; } } /* Mark group blocks for any BLOCK_UNINIT groups */ if (do_block) { retval = mark_uninit_bg_group_blocks(fs); if (retval) goto cleanup; } success_cleanup: if (inode_bitmap) ext2fs_free_mem(&inode_bitmap); if (block_bitmap) ext2fs_free_mem(&block_bitmap); return 0; cleanup: if (do_block) { ext2fs_free_mem(&fs->block_map); fs->block_map = 0; } if (do_inode) { ext2fs_free_mem(&fs->inode_map); fs->inode_map = 0; } if (inode_bitmap) ext2fs_free_mem(&inode_bitmap); if (block_bitmap) ext2fs_free_mem(&block_bitmap); if (buf) ext2fs_free_mem(&buf); return retval; }
int ext2fs_inline_data_dir_iterate(ext2_filsys fs, ext2_ino_t ino, void *priv_data) { struct dir_context *ctx; struct ext2_inode inode; struct ext2_dir_entry dirent; struct ext2_inline_data data; int ret = BLOCK_ABORT; e2_blkcnt_t blockcnt = 0; char *old_buf; unsigned int old_buflen; int old_flags; ctx = (struct dir_context *)priv_data; old_buf = ctx->buf; old_buflen = ctx->buflen; old_flags = ctx->flags; ctx->flags |= DIRENT_FLAG_INCLUDE_INLINE_DATA; ctx->errcode = ext2fs_read_inode(fs, ino, &inode); if (ctx->errcode) goto out; if (!(inode.i_flags & EXT4_INLINE_DATA_FL)) { ctx->errcode = EXT2_ET_NO_INLINE_DATA; goto out; } if (!LINUX_S_ISDIR(inode.i_mode)) { ctx->errcode = EXT2_ET_NO_DIRECTORY; goto out; } ret = 0; /* we first check '.' and '..' dir */ dirent.inode = ino; dirent.name_len = 1; ext2fs_set_rec_len(fs, EXT2_DIR_REC_LEN(2), &dirent); dirent.name[0] = '.'; dirent.name[1] = '\0'; ctx->buf = (char *)&dirent; ext2fs_get_rec_len(fs, &dirent, &ctx->buflen); ret |= ext2fs_process_dir_block(fs, 0, blockcnt++, 0, 0, priv_data); if (ret & BLOCK_ABORT) goto out; dirent.inode = ext2fs_le32_to_cpu(inode.i_block[0]); dirent.name_len = 2; ext2fs_set_rec_len(fs, EXT2_DIR_REC_LEN(3), &dirent); dirent.name[0] = '.'; dirent.name[1] = '.'; dirent.name[2] = '\0'; ctx->buf = (char *)&dirent; ext2fs_get_rec_len(fs, &dirent, &ctx->buflen); ret |= ext2fs_process_dir_block(fs, 0, blockcnt++, 0, 0, priv_data); if (ret & BLOCK_INLINE_DATA_CHANGED) { errcode_t err; inode.i_block[0] = ext2fs_cpu_to_le32(dirent.inode); err = ext2fs_write_inode(fs, ino, &inode); if (err) goto out; ret &= ~BLOCK_INLINE_DATA_CHANGED; } if (ret & BLOCK_ABORT) goto out; ctx->buf = (char *)inode.i_block + EXT4_INLINE_DATA_DOTDOT_SIZE; ctx->buflen = EXT4_MIN_INLINE_DATA_SIZE - EXT4_INLINE_DATA_DOTDOT_SIZE; #ifdef WORDS_BIGENDIAN ctx->errcode = ext2fs_dirent_swab_in2(fs, ctx->buf, ctx->buflen, 0); if (ctx->errcode) { ret |= BLOCK_ABORT; goto out; } #endif ret |= ext2fs_process_dir_block(fs, 0, blockcnt++, 0, 0, priv_data); if (ret & BLOCK_INLINE_DATA_CHANGED) { #ifdef WORDS_BIGENDIAN ctx->errcode = ext2fs_dirent_swab_out2(fs, ctx->buf, ctx->buflen, 0); if (ctx->errcode) { ret |= BLOCK_ABORT; goto out; } #endif ctx->errcode = ext2fs_write_inode(fs, ino, &inode); if (ctx->errcode) ret |= BLOCK_ABORT; ret &= ~BLOCK_INLINE_DATA_CHANGED; } if (ret & BLOCK_ABORT) goto out; data.fs = fs; data.ino = ino; ctx->errcode = ext2fs_inline_data_ea_get(&data); if (ctx->errcode) { ret |= BLOCK_ABORT; goto out; } if (data.ea_size <= 0) goto out1; ctx->buf = data.ea_data; ctx->buflen = data.ea_size; #ifdef WORDS_BIGENDIAN ctx->errcode = ext2fs_dirent_swab_in2(fs, ctx->buf, ctx->buflen, 0); if (ctx->errcode) { ret |= BLOCK_ABORT; goto out1; } #endif ret |= ext2fs_process_dir_block(fs, 0, blockcnt++, 0, 0, priv_data); if (ret & BLOCK_INLINE_DATA_CHANGED) { #ifdef WORDS_BIGENDIAN ctx->errcode = ext2fs_dirent_swab_out2(fs, ctx->buf, ctx->buflen, 0); if (ctx->errcode) { ret |= BLOCK_ABORT; goto out1; } #endif ctx->errcode = ext2fs_inline_data_ea_set(&data); if (ctx->errcode) ret |= BLOCK_ABORT; } out1: ext2fs_free_mem(&data.ea_data); out: ctx->buf = old_buf; ctx->buflen = old_buflen; ctx->flags = old_flags; ret &= ~(BLOCK_ABORT | BLOCK_INLINE_DATA_CHANGED); return ret; }
static errcode_t device_gekko_io_open(const char *name, int flags, io_channel *dev) { // Get the device driver descriptor gekko_fd *fd = DEV_FD((*dev)); if (!fd) { errno = EBADF; return -1; } // Get the device interface const DISC_INTERFACE* interface = fd->interface; if (!interface) { errno = ENODEV; return -1; } // Start the device interface and ensure that it is inserted if (!interface->startup()) { ext2_log_trace("device failed to start\n"); errno = EIO; return -1; } if (!interface->isInserted()) { ext2_log_trace("device media is not inserted\n"); errno = EIO; return -1; } // Allocate 4 x max sector size in case of 4096 sector size u8 *buffer = (u8 *) mem_alloc(4 * MAX_SECTOR_SIZE); if(!buffer) { ext2_log_trace("no memory for superblock"); errno = ENOMEM; return -1; } // Check that there is a valid EXT boot sector at the start of the device if (!interface->readSectors(fd->startSector, 4, buffer)) { ext2_log_trace("read failure @ sector %d\n", fd->startSector); errno = EROFS; mem_free(buffer); return -1; } struct ext2_super_block * super = (struct ext2_super_block *) (buffer + SUPERBLOCK_OFFSET); if(ext2fs_le16_to_cpu(super->s_magic) != EXT2_SUPER_MAGIC) { ext2_log_trace("super mismatch: read %04X - expected %04X\n", ext2fs_le16_to_cpu(super->s_magic), EXT2_SUPER_MAGIC); mem_free(buffer); errno = EROFS; return -1; } switch(ext2fs_le32_to_cpu(super->s_log_block_size)) { case 1: (*dev)->block_size = 2048; break; case 2: (*dev)->block_size = 4096; break; case 3: (*dev)->block_size = 8192; break; default: case 0: (*dev)->block_size = 1024; break; } // Parse the boot sector fd->sectorSize = readSectorSize(interface); fd->offset = 0; fd->sectorCount = 0; fd->sectorCount = (sec_t) ((u64) ext2fs_le32_to_cpu(super->s_blocks_count) * (u64) ((*dev)->block_size) / (u64) fd->sectorSize); mem_free(buffer); // Create the cache fd->cache = cache_constructor(fd->cachePageCount, fd->cachePageSize, interface, fd->startSector, fd->startSector + fd->sectorCount, fd->sectorSize); return 0; }
static inline int e2undo_has_feature_fs_offset(struct undo_header *header) { return ext2fs_le32_to_cpu(header->f_compat) & E2UNDO_FEATURE_COMPAT_FS_OFFSET; }
int main(int argc, char *argv[]) { int c, force = 0, dry_run = 0, verbose = 0, dump = 0; io_channel channel; errcode_t retval; int mount_flags, csum_error = 0, io_error = 0; size_t i, keys_per_block; char *device_name, *tdb_file; io_manager manager = unix_io_manager; struct undo_context undo_ctx; char *buf; struct undo_key_block *keyb; struct undo_key *dkey; struct undo_key_info *ikey; __u32 key_crc, blk_crc, hdr_crc; blk64_t lblk; ext2_filsys fs; __u64 offset = 0; char opt_offset_string[40] = { 0 }; #ifdef ENABLE_NLS setlocale(LC_MESSAGES, ""); setlocale(LC_CTYPE, ""); bindtextdomain(NLS_CAT_NAME, LOCALEDIR); textdomain(NLS_CAT_NAME); set_com_err_gettext(gettext); #endif add_error_table(&et_ext2_error_table); prg_name = argv[0]; while ((c = getopt(argc, argv, "fhno:vz:")) != EOF) { switch (c) { case 'f': force = 1; break; case 'h': dump = 1; break; case 'n': dry_run = 1; break; case 'o': offset = strtoull(optarg, &buf, 0); if (*buf) { com_err(prg_name, 0, _("illegal offset - %s"), optarg); exit(1); } /* used to indicate that an offset was specified */ opt_offset_string[0] = 1; break; case 'v': verbose = 1; break; case 'z': undo_file = optarg; break; default: usage(); } } if (argc != optind + 2) usage(); tdb_file = argv[optind]; device_name = argv[optind+1]; if (undo_file && strcmp(tdb_file, undo_file) == 0) { printf(_("Will not write to an undo file while replaying it.\n")); exit(1); } /* Interpret the undo file */ retval = manager->open(tdb_file, IO_FLAG_EXCLUSIVE, &undo_ctx.undo_file); if (retval) { com_err(prg_name, errno, _("while opening undo file `%s'\n"), tdb_file); exit(1); } retval = io_channel_read_blk64(undo_ctx.undo_file, 0, -(int)sizeof(undo_ctx.hdr), &undo_ctx.hdr); if (retval) { com_err(prg_name, retval, _("while reading undo file")); exit(1); } if (memcmp(undo_ctx.hdr.magic, E2UNDO_MAGIC, sizeof(undo_ctx.hdr.magic))) { fprintf(stderr, _("%s: Not an undo file.\n"), tdb_file); exit(1); } if (dump) { dump_header(&undo_ctx.hdr); exit(1); } hdr_crc = ext2fs_crc32c_le(~0, (unsigned char *)&undo_ctx.hdr, sizeof(struct undo_header) - sizeof(__u32)); if (!force && ext2fs_le32_to_cpu(undo_ctx.hdr.header_crc) != hdr_crc) { fprintf(stderr, _("%s: Header checksum doesn't match.\n"), tdb_file); exit(1); } undo_ctx.blocksize = ext2fs_le32_to_cpu(undo_ctx.hdr.block_size); undo_ctx.fs_blocksize = ext2fs_le32_to_cpu(undo_ctx.hdr.fs_block_size); if (undo_ctx.blocksize == 0 || undo_ctx.fs_blocksize == 0) { fprintf(stderr, _("%s: Corrupt undo file header.\n"), tdb_file); exit(1); } if (!force && undo_ctx.blocksize > E2UNDO_MAX_BLOCK_SIZE) { fprintf(stderr, _("%s: Undo block size too large.\n"), tdb_file); exit(1); } if (!force && undo_ctx.blocksize < E2UNDO_MIN_BLOCK_SIZE) { fprintf(stderr, _("%s: Undo block size too small.\n"), tdb_file); exit(1); } undo_ctx.super_block = ext2fs_le64_to_cpu(undo_ctx.hdr.super_offset); undo_ctx.num_keys = ext2fs_le64_to_cpu(undo_ctx.hdr.num_keys); io_channel_set_blksize(undo_ctx.undo_file, undo_ctx.blocksize); /* * Do not compare undo_ctx.hdr.f_compat with the available compatible * features set, because a "missing" compatible feature should * not cause any problems. */ if (!force && (undo_ctx.hdr.f_incompat || undo_ctx.hdr.f_rocompat)) { fprintf(stderr, _("%s: Unknown undo file feature set.\n"), tdb_file); exit(1); } /* open the fs */ retval = ext2fs_check_if_mounted(device_name, &mount_flags); if (retval) { com_err(prg_name, retval, _("Error while determining whether " "%s is mounted."), device_name); exit(1); } if (mount_flags & EXT2_MF_MOUNTED) { com_err(prg_name, retval, "%s", _("e2undo should only be run " "on unmounted filesystems")); exit(1); } if (undo_file) { retval = e2undo_setup_tdb(device_name, &manager); if (retval) exit(1); } retval = manager->open(device_name, IO_FLAG_EXCLUSIVE | (dry_run ? 0 : IO_FLAG_RW), &channel); if (retval) { com_err(prg_name, retval, _("while opening `%s'"), device_name); exit(1); } if (*opt_offset_string || e2undo_has_feature_fs_offset(&undo_ctx.hdr)) { if (!*opt_offset_string) offset = ext2fs_le64_to_cpu(undo_ctx.hdr.fs_offset); retval = snprintf(opt_offset_string, sizeof(opt_offset_string), "offset=%llu", offset); if ((size_t) retval >= sizeof(opt_offset_string)) { /* should not happen... */ com_err(prg_name, 0, _("specified offset is too large")); exit(1); } io_channel_set_options(channel, opt_offset_string); } if (!force && check_filesystem(&undo_ctx, channel)) exit(1); /* prepare to read keys */ retval = ext2fs_get_mem(sizeof(struct undo_key_info) * undo_ctx.num_keys, &undo_ctx.keys); if (retval) { com_err(prg_name, retval, "%s", _("while allocating memory")); exit(1); } ikey = undo_ctx.keys; retval = ext2fs_get_mem(undo_ctx.blocksize, &keyb); if (retval) { com_err(prg_name, retval, "%s", _("while allocating memory")); exit(1); } retval = ext2fs_get_mem(E2UNDO_MAX_EXTENT_BLOCKS * undo_ctx.blocksize, &buf); if (retval) { com_err(prg_name, retval, "%s", _("while allocating memory")); exit(1); } /* load keys */ keys_per_block = KEYS_PER_BLOCK(&undo_ctx); lblk = ext2fs_le64_to_cpu(undo_ctx.hdr.key_offset); dbg_printf("nr_keys=%lu, kpb=%zu, blksz=%u\n", undo_ctx.num_keys, keys_per_block, undo_ctx.blocksize); for (i = 0; i < undo_ctx.num_keys; i += keys_per_block) { size_t j, max_j; __le32 crc; retval = io_channel_read_blk64(undo_ctx.undo_file, lblk, 1, keyb); if (retval) { com_err(prg_name, retval, "%s", _("while reading keys")); if (force) { io_error = 1; undo_ctx.num_keys = i - 1; break; } exit(1); } /* check keys */ if (!force && ext2fs_le32_to_cpu(keyb->magic) != KEYBLOCK_MAGIC) { fprintf(stderr, _("%s: wrong key magic at %llu\n"), tdb_file, lblk); exit(1); } crc = keyb->crc; keyb->crc = 0; key_crc = ext2fs_crc32c_le(~0, (unsigned char *)keyb, undo_ctx.blocksize); if (!force && ext2fs_le32_to_cpu(crc) != key_crc) { fprintf(stderr, _("%s: key block checksum error at %llu.\n"), tdb_file, lblk); exit(1); } /* load keys from key block */ lblk++; max_j = undo_ctx.num_keys - i; if (max_j > keys_per_block) max_j = keys_per_block; for (j = 0, dkey = keyb->keys; j < max_j; j++, ikey++, dkey++) { ikey->fsblk = ext2fs_le64_to_cpu(dkey->fsblk); ikey->fileblk = lblk; ikey->blk_crc = ext2fs_le32_to_cpu(dkey->blk_crc); ikey->size = ext2fs_le32_to_cpu(dkey->size); lblk += (ikey->size + undo_ctx.blocksize - 1) / undo_ctx.blocksize; if (E2UNDO_MAX_EXTENT_BLOCKS * undo_ctx.blocksize < ikey->size) { com_err(prg_name, retval, _("%s: block %llu is too long."), tdb_file, ikey->fsblk); exit(1); } /* check each block's crc */ retval = io_channel_read_blk64(undo_ctx.undo_file, ikey->fileblk, -(int)ikey->size, buf); if (retval) { com_err(prg_name, retval, _("while fetching block %llu."), ikey->fileblk); if (!force) exit(1); io_error = 1; continue; } blk_crc = ext2fs_crc32c_le(~0, (unsigned char *)buf, ikey->size); if (blk_crc != ikey->blk_crc) { fprintf(stderr, _("checksum error in filesystem block " "%llu (undo blk %llu)\n"), ikey->fsblk, ikey->fileblk); if (!force) exit(1); csum_error = 1; } } } ext2fs_free_mem(&keyb); /* sort keys in fs block order */ qsort(undo_ctx.keys, undo_ctx.num_keys, sizeof(struct undo_key_info), key_compare); /* replay */ io_channel_set_blksize(channel, undo_ctx.fs_blocksize); for (i = 0, ikey = undo_ctx.keys; i < undo_ctx.num_keys; i++, ikey++) { retval = io_channel_read_blk64(undo_ctx.undo_file, ikey->fileblk, -(int)ikey->size, buf); if (retval) { com_err(prg_name, retval, _("while fetching block %llu."), ikey->fileblk); io_error = 1; continue; } if (verbose) printf("Replayed block of size %u from %llu to %llu\n", ikey->size, ikey->fileblk, ikey->fsblk); if (dry_run) continue; retval = io_channel_write_blk64(channel, ikey->fsblk, -(int)ikey->size, buf); if (retval) { com_err(prg_name, retval, _("while writing block %llu."), ikey->fsblk); io_error = 1; } } if (csum_error) fprintf(stderr, _("Undo file corruption; run e2fsck NOW!\n")); if (io_error) fprintf(stderr, _("IO error during replay; run e2fsck NOW!\n")); if (!(ext2fs_le32_to_cpu(undo_ctx.hdr.state) & E2UNDO_STATE_FINISHED)) { force = 1; fprintf(stderr, _("Incomplete undo record; run e2fsck.\n")); } ext2fs_free_mem(&buf); ext2fs_free_mem(&undo_ctx.keys); io_channel_close(channel); /* If there were problems, try to force a fsck */ if (!dry_run && (force || csum_error || io_error)) { retval = ext2fs_open2(device_name, NULL, EXT2_FLAG_RW | EXT2_FLAG_64BITS, 0, 0, manager, &fs); if (retval) goto out; fs->super->s_state &= ~EXT2_VALID_FS; if (csum_error || io_error) fs->super->s_state |= EXT2_ERROR_FS; ext2fs_mark_super_dirty(fs); ext2fs_close_free(&fs); } out: io_channel_close(undo_ctx.undo_file); return csum_error; }
static errcode_t undo_write_tdb(io_channel channel, unsigned long long block, int count) { int size, sz; unsigned long long block_num, backing_blk_num; errcode_t retval = 0; ext2_loff_t offset; struct undo_private_data *data; unsigned char *read_ptr; unsigned long long end_block; unsigned long long data_size; void *data_ptr; struct undo_key *key; __u32 blk_crc; data = (struct undo_private_data *) channel->private_data; if (data->undo_file == NULL) { /* * Transaction database not initialized */ return 0; } if (count == 1) size = channel->block_size; else { if (count < 0) size = -count; else size = count * channel->block_size; } retval = undo_setup_tdb(data); if (retval) return retval; /* * Data is stored in tdb database as blocks of tdb_data_size size * This helps in efficient lookup further. * * We divide the disk to blocks of tdb_data_size. */ offset = (block * channel->block_size) + data->offset ; block_num = offset / data->tdb_data_size; end_block = (offset + size - 1) / data->tdb_data_size; while (block_num <= end_block) { __u32 keysz; /* * Check if we have the record already */ if (ext2fs_test_block_bitmap2(data->written_block_map, block_num)) { /* Try the next block */ block_num++; continue; } ext2fs_mark_block_bitmap2(data->written_block_map, block_num); /* * Read one block using the backing I/O manager * The backing I/O manager block size may be * different from the tdb_data_size. * Also we need to recalcuate the block number with respect * to the backing I/O manager. */ offset = block_num * data->tdb_data_size; backing_blk_num = (offset - data->offset) / channel->block_size; count = data->tdb_data_size + ((offset - data->offset) % channel->block_size); retval = ext2fs_get_mem(count, &read_ptr); if (retval) { return retval; } memset(read_ptr, 0, count); actual_size = 0; if ((count % channel->block_size) == 0) sz = count / channel->block_size; else sz = -count; retval = io_channel_read_blk64(data->real, backing_blk_num, sz, read_ptr); if (retval) { if (retval != EXT2_ET_SHORT_READ) { free(read_ptr); return retval; } /* * short read so update the record size * accordingly */ data_size = actual_size; } else { data_size = data->tdb_data_size; } if (data_size == 0) { free(read_ptr); block_num++; continue; } dbg_printf("Read %llu bytes from FS block %llu (blk=%llu cnt=%u)\n", data_size, backing_blk_num, block, count); if ((data_size % data->undo_file->block_size) == 0) sz = data_size / data->undo_file->block_size; else sz = -actual_size; data_ptr = read_ptr + ((offset - data->offset) % data->undo_file->block_size); /* extend this key? */ if (data->keys_in_block) { key = data->keyb->keys + data->keys_in_block - 1; keysz = ext2fs_le32_to_cpu(key->size); } else { key = NULL; keysz = 0; } if (key != NULL && ext2fs_le64_to_cpu(key->fsblk) + ((keysz + data->tdb_data_size - 1) / data->tdb_data_size) == backing_blk_num && E2UNDO_MAX_EXTENT_BLOCKS * data->tdb_data_size > keysz + sz) { blk_crc = ext2fs_le32_to_cpu(key->blk_crc); blk_crc = ext2fs_crc32c_le(blk_crc, (unsigned char *)data_ptr, data_size); key->blk_crc = ext2fs_cpu_to_le32(blk_crc); key->size = ext2fs_cpu_to_le32(keysz + data_size); } else { data->num_keys++; key = data->keyb->keys + data->keys_in_block; data->keys_in_block++; key->fsblk = ext2fs_cpu_to_le64(backing_blk_num); blk_crc = ext2fs_crc32c_le(~0, (unsigned char *)data_ptr, data_size); key->blk_crc = ext2fs_cpu_to_le32(blk_crc); key->size = ext2fs_cpu_to_le32(data_size); } dbg_printf("Writing block %llu to offset %llu size %d key %zu\n", block_num, data->undo_blk_num, sz, data->num_keys - 1); retval = io_channel_write_blk64(data->undo_file, data->undo_blk_num, sz, data_ptr); if (retval) { free(read_ptr); return retval; } data->undo_blk_num++; free(read_ptr); /* Write out the key block */ retval = write_undo_indexes(data, 0); if (retval) return retval; /* Next block */ block_num++; } return retval; }
/* * This function is responsible for (optionally) moving through the * extent tree and then returning the current extent */ errcode_t ext2fs_extent_get(ext2_extent_handle_t handle, int flags, struct ext2fs_extent *extent) { struct extent_path *path, *newpath; struct ext3_extent_header *eh; struct ext3_extent_idx *ix = 0; struct ext3_extent *ex; errcode_t retval; blk_t blk; blk64_t end_blk; int orig_op, op; EXT2_CHECK_MAGIC(handle, EXT2_ET_MAGIC_EXTENT_HANDLE); if (!handle->path) return EXT2_ET_NO_CURRENT_NODE; orig_op = op = flags & EXT2_EXTENT_MOVE_MASK; retry: path = handle->path + handle->level; if ((orig_op == EXT2_EXTENT_NEXT) || (orig_op == EXT2_EXTENT_NEXT_LEAF)) { if (handle->level < handle->max_depth) { /* interior node */ if (path->visit_num == 0) { path->visit_num++; op = EXT2_EXTENT_DOWN; } else if (path->left > 0) op = EXT2_EXTENT_NEXT_SIB; else if (handle->level > 0) op = EXT2_EXTENT_UP; else return EXT2_ET_EXTENT_NO_NEXT; } else { /* leaf node */ if (path->left > 0) op = EXT2_EXTENT_NEXT_SIB; else if (handle->level > 0) op = EXT2_EXTENT_UP; else return EXT2_ET_EXTENT_NO_NEXT; } if (op != EXT2_EXTENT_NEXT_SIB) { #ifdef DEBUG printf("<<<< OP = %s\n", (op == EXT2_EXTENT_DOWN) ? "down" : ((op == EXT2_EXTENT_UP) ? "up" : "unknown")); #endif } } if ((orig_op == EXT2_EXTENT_PREV) || (orig_op == EXT2_EXTENT_PREV_LEAF)) { if (handle->level < handle->max_depth) { /* interior node */ if (path->visit_num > 0 ) { /* path->visit_num = 0; */ op = EXT2_EXTENT_DOWN_AND_LAST; } else if (path->left < path->entries-1) op = EXT2_EXTENT_PREV_SIB; else if (handle->level > 0) op = EXT2_EXTENT_UP; else return EXT2_ET_EXTENT_NO_PREV; } else { /* leaf node */ if (path->left < path->entries-1) op = EXT2_EXTENT_PREV_SIB; else if (handle->level > 0) op = EXT2_EXTENT_UP; else return EXT2_ET_EXTENT_NO_PREV; } if (op != EXT2_EXTENT_PREV_SIB) { #ifdef DEBUG printf("<<<< OP = %s\n", (op == EXT2_EXTENT_DOWN_AND_LAST) ? "down/last" : ((op == EXT2_EXTENT_UP) ? "up" : "unknown")); #endif } } if (orig_op == EXT2_EXTENT_LAST_LEAF) { if ((handle->level < handle->max_depth) && (path->left == 0)) op = EXT2_EXTENT_DOWN; else op = EXT2_EXTENT_LAST_SIB; #ifdef DEBUG printf("<<<< OP = %s\n", (op == EXT2_EXTENT_DOWN) ? "down" : "last_sib"); #endif } switch (op) { case EXT2_EXTENT_CURRENT: ix = path->curr; break; case EXT2_EXTENT_ROOT: handle->level = 0; path = handle->path + handle->level; case EXT2_EXTENT_FIRST_SIB: path->left = path->entries; path->curr = 0; case EXT2_EXTENT_NEXT_SIB: if (path->left <= 0) return EXT2_ET_EXTENT_NO_NEXT; if (path->curr) { ix = path->curr; ix++; } else { eh = (struct ext3_extent_header *) path->buf; ix = EXT_FIRST_INDEX(eh); } path->left--; path->curr = ix; path->visit_num = 0; break; case EXT2_EXTENT_PREV_SIB: if (!path->curr || path->left+1 >= path->entries) return EXT2_ET_EXTENT_NO_PREV; ix = path->curr; ix--; path->curr = ix; path->left++; if (handle->level < handle->max_depth) path->visit_num = 1; break; case EXT2_EXTENT_LAST_SIB: eh = (struct ext3_extent_header *) path->buf; path->curr = EXT_LAST_EXTENT(eh); ix = path->curr; path->left = 0; path->visit_num = 0; break; case EXT2_EXTENT_UP: if (handle->level <= 0) return EXT2_ET_EXTENT_NO_UP; handle->level--; path--; ix = path->curr; if ((orig_op == EXT2_EXTENT_PREV) || (orig_op == EXT2_EXTENT_PREV_LEAF)) path->visit_num = 0; break; case EXT2_EXTENT_DOWN: case EXT2_EXTENT_DOWN_AND_LAST: if (!path->curr ||(handle->level >= handle->max_depth)) return EXT2_ET_EXTENT_NO_DOWN; ix = path->curr; newpath = path + 1; if (!newpath->buf) { retval = ext2fs_get_mem(handle->fs->blocksize, &newpath->buf); if (retval) return retval; } blk = ext2fs_le32_to_cpu(ix->ei_leaf) + ((__u64) ext2fs_le16_to_cpu(ix->ei_leaf_hi) << 32); if ((handle->fs->flags & EXT2_FLAG_IMAGE_FILE) && (handle->fs->io != handle->fs->image_io)) memset(newpath->buf, 0, handle->fs->blocksize); else { retval = io_channel_read_blk(handle->fs->io, blk, 1, newpath->buf); if (retval) return retval; } handle->level++; eh = (struct ext3_extent_header *) newpath->buf; retval = ext2fs_extent_header_verify(eh, handle->fs->blocksize); if (retval) { handle->level--; return retval; } newpath->left = newpath->entries = ext2fs_le16_to_cpu(eh->eh_entries); newpath->max_entries = ext2fs_le16_to_cpu(eh->eh_max); if (path->left > 0) { ix++; newpath->end_blk = ext2fs_le32_to_cpu(ix->ei_block); } else newpath->end_blk = path->end_blk; path = newpath; if (op == EXT2_EXTENT_DOWN) { ix = EXT_FIRST_INDEX((struct ext3_extent_header *) eh); path->curr = ix; path->left = path->entries - 1; path->visit_num = 0; } else { ix = EXT_LAST_INDEX((struct ext3_extent_header *) eh); path->curr = ix; path->left = 0; if (handle->level < handle->max_depth) path->visit_num = 1; } #ifdef DEBUG printf("Down to level %d/%d, end_blk=%llu\n", handle->level, handle->max_depth, path->end_blk); #endif break; default: return EXT2_ET_OP_NOT_SUPPORTED; } if (!ix) return EXT2_ET_NO_CURRENT_NODE; extent->e_flags = 0; #ifdef DEBUG printf("(Left %d)\n", path->left); #endif if (handle->level == handle->max_depth) { ex = (struct ext3_extent *) ix; extent->e_pblk = ext2fs_le32_to_cpu(ex->ee_start) + ((__u64) ext2fs_le16_to_cpu(ex->ee_start_hi) << 32); extent->e_lblk = ext2fs_le32_to_cpu(ex->ee_block); extent->e_len = ext2fs_le16_to_cpu(ex->ee_len); extent->e_flags |= EXT2_EXTENT_FLAGS_LEAF; if (extent->e_len > EXT_INIT_MAX_LEN) { extent->e_len -= EXT_INIT_MAX_LEN; extent->e_flags |= EXT2_EXTENT_FLAGS_UNINIT; } } else { extent->e_pblk = ext2fs_le32_to_cpu(ix->ei_leaf) + ((__u64) ext2fs_le16_to_cpu(ix->ei_leaf_hi) << 32); extent->e_lblk = ext2fs_le32_to_cpu(ix->ei_block); if (path->left > 0) { ix++; end_blk = ext2fs_le32_to_cpu(ix->ei_block); } else end_blk = path->end_blk; extent->e_len = end_blk - extent->e_lblk; } if (path->visit_num) extent->e_flags |= EXT2_EXTENT_FLAGS_SECOND_VISIT; if (((orig_op == EXT2_EXTENT_NEXT_LEAF) || (orig_op == EXT2_EXTENT_PREV_LEAF)) && (handle->level != handle->max_depth)) goto retry; if ((orig_op == EXT2_EXTENT_LAST_LEAF) && ((handle->level != handle->max_depth) || (path->left != 0))) goto retry; return 0; }
static void parse_int_node(ext2_filsys fs, struct ext2_db_entry2 *db, struct check_dir_struct *cd, struct dx_dir_info *dx_dir, char *block_buf, int failed_csum) { struct ext2_dx_root_info *root; struct ext2_dx_entry *ent; struct ext2_dx_countlimit *limit; struct dx_dirblock_info *dx_db; int i, expect_limit, count; blk_t blk; ext2_dirhash_t min_hash = 0xffffffff; ext2_dirhash_t max_hash = 0; ext2_dirhash_t hash = 0, prev_hash; int csum_size = 0; if (db->blockcnt == 0) { root = (struct ext2_dx_root_info *) (block_buf + 24); #ifdef DX_DEBUG printf("Root node dump:\n"); printf("\t Reserved zero: %u\n", root->reserved_zero); printf("\t Hash Version: %d\n", root->hash_version); printf("\t Info length: %d\n", root->info_length); printf("\t Indirect levels: %d\n", root->indirect_levels); printf("\t Flags: %d\n", root->unused_flags); #endif ent = (struct ext2_dx_entry *) (block_buf + 24 + root->info_length); if (failed_csum && (e2fsck_dir_will_be_rehashed(cd->ctx, cd->pctx.ino) || fix_problem(cd->ctx, PR_2_HTREE_ROOT_CSUM_INVALID, &cd->pctx))) goto clear_and_exit; } else { ent = (struct ext2_dx_entry *) (block_buf+8); if (failed_csum && (e2fsck_dir_will_be_rehashed(cd->ctx, cd->pctx.ino) || fix_problem(cd->ctx, PR_2_HTREE_NODE_CSUM_INVALID, &cd->pctx))) goto clear_and_exit; } limit = (struct ext2_dx_countlimit *) ent; #ifdef DX_DEBUG printf("Number of entries (count): %d\n", ext2fs_le16_to_cpu(limit->count)); printf("Number of entries (limit): %d\n", ext2fs_le16_to_cpu(limit->limit)); #endif count = ext2fs_le16_to_cpu(limit->count); if (EXT2_HAS_RO_COMPAT_FEATURE(fs->super, EXT4_FEATURE_RO_COMPAT_METADATA_CSUM)) csum_size = sizeof(struct ext2_dx_tail); expect_limit = (fs->blocksize - (csum_size + ((char *) ent - block_buf))) / sizeof(struct ext2_dx_entry); if (ext2fs_le16_to_cpu(limit->limit) != expect_limit) { cd->pctx.num = ext2fs_le16_to_cpu(limit->limit); if (fix_problem(cd->ctx, PR_2_HTREE_BAD_LIMIT, &cd->pctx)) goto clear_and_exit; } if (count > expect_limit) { cd->pctx.num = count; if (fix_problem(cd->ctx, PR_2_HTREE_BAD_COUNT, &cd->pctx)) goto clear_and_exit; count = expect_limit; } for (i=0; i < count; i++) { prev_hash = hash; hash = i ? (ext2fs_le32_to_cpu(ent[i].hash) & ~1) : 0; #ifdef DX_DEBUG printf("Entry #%d: Hash 0x%08x, block %u\n", i, hash, ext2fs_le32_to_cpu(ent[i].block)); #endif blk = ext2fs_le32_to_cpu(ent[i].block) & 0x0ffffff; /* Check to make sure the block is valid */ if (blk >= (blk_t) dx_dir->numblocks) { cd->pctx.blk = blk; if (fix_problem(cd->ctx, PR_2_HTREE_BADBLK, &cd->pctx)) goto clear_and_exit; continue; } if (hash < prev_hash && fix_problem(cd->ctx, PR_2_HTREE_HASH_ORDER, &cd->pctx)) goto clear_and_exit; dx_db = &dx_dir->dx_block[blk]; if (dx_db->flags & DX_FLAG_REFERENCED) { dx_db->flags |= DX_FLAG_DUP_REF; } else { dx_db->flags |= DX_FLAG_REFERENCED; dx_db->parent = db->blockcnt; } if (hash < min_hash) min_hash = hash; if (hash > max_hash) max_hash = hash; dx_db->node_min_hash = hash; if ((i+1) < count) dx_db->node_max_hash = ext2fs_le32_to_cpu(ent[i+1].hash) & ~1; else { dx_db->node_max_hash = 0xfffffffe; dx_db->flags |= DX_FLAG_LAST; } if (i == 0) dx_db->flags |= DX_FLAG_FIRST; } #ifdef DX_DEBUG printf("Blockcnt = %d, min hash 0x%08x, max hash 0x%08x\n", db->blockcnt, min_hash, max_hash); #endif dx_db = &dx_dir->dx_block[db->blockcnt]; dx_db->min_hash = min_hash; dx_db->max_hash = max_hash; return; clear_and_exit: clear_htree(cd->ctx, cd->pctx.ino); dx_dir->numblocks = 0; e2fsck_rehash_dir_later(cd->ctx, cd->pctx.ino); }
// search inode by use imap (step1: flag 1 = only directory ; step2: flag 0 = only file) static void search_imap_inode(char* des_dir, __u32 t_after, __u32 t_before, int flag) { struct ext2_group_desc *gdp; struct ext2_inode_large *inode; //struct dir_list_head_t *dir = NULL; struct ring_buf* i_list = NULL; r_item* item = NULL; int zero_flag, retval, load, x ,i ; char *pathname = NULL; char *i_pathname = NULL; char *buf= NULL; unsigned char *tmp_buf = NULL; __u32 blocksize, inodesize, inode_max, inode_per_group, block_count; __u32 inode_per_block , inode_block_group, group; blk_t block_nr; __u32 c_time, d_time, mode; ext2_ino_t first_block_inode_nr , inode_nr; pathname = malloc(26); blocksize = current_fs->blocksize; inodesize = current_fs->super->s_inode_size; inode_max = current_fs->super->s_inodes_count; inode_per_group = current_fs->super->s_inodes_per_group; buf = malloc(blocksize); if (! (flag & 0x01) ){ tmp_buf = malloc (12 * blocksize); if (!tmp_buf) goto errout; cookie = magic_open(MAGIC_MIME | MAGIC_NO_CHECK_COMPRESS | MAGIC_NO_CHECK_ELF | MAGIC_CONTINUE); if ((! cookie) || magic_load(cookie, NULL)){ fprintf(stderr,"ERROR: can't find libmagic\n"); goto errout; } } inode_per_block = blocksize / inodesize; inode_block_group = inode_per_group / inode_per_block; for (group = 0 ; group < current_fs->group_desc_count ; group++){ #ifdef EXT2_FLAG_64BITS gdp = ext2fs_group_desc(current_fs, current_fs->group_desc, group); #else gdp = ¤t_fs->group_desc[group]; #endif zero_flag = 0; if (!(flag & 0x02)){ //skip this in disaster mode // NEXT GROUP IF INODE NOT INIT if (gdp->bg_flags & (EXT2_BG_INODE_UNINIT)) continue; // SET ZERO-FLAG IF FREE INODES == INODE/GROUP for fast ext3 if (gdp->bg_free_inodes_count == inode_per_group) zero_flag = 1; } //FIXME for struct ext4_group_desc 48/64BIT for (block_nr = gdp->bg_inode_table , block_count = 0 ; block_nr < (gdp->bg_inode_table + inode_block_group); block_nr++, block_count++) { if (!(flag & 0x02)){ //skip this in disaster mode // break if the first block only zero inode if ((block_count ==1) && (zero_flag == (inode_per_block + 1))) break; } //FIXME inode_max ???? first_block_inode_nr = (group * inode_per_group) + (block_count * inode_per_block) + 1; load = 0; for (i = 0; i<inode_per_block;i++){ if ( ! ext2fs_test_block_bitmap(imap,first_block_inode_nr + i)){ load++; break; } } if (load){ retval = read_block ( current_fs , &block_nr , buf); if (retval) return; for (inode_nr = first_block_inode_nr ,x = 0; x < inode_per_block ; inode_nr++ , x++){ if ( ! ext2fs_test_block_bitmap(imap,inode_nr)){ inode = (struct ext2_inode_large*) (buf + (x*inodesize)); c_time = ext2fs_le32_to_cpu(inode->i_ctime); mode = ext2fs_le32_to_cpu(inode->i_mode); if ( ! ( flag & 0x02)) { //no check this inode in disaster mode if ((! c_time ) && (!(inode->i_mode & LINUX_S_IFMT)) ) { if(zero_flag) zero_flag++ ; continue; } d_time = ext2fs_le32_to_cpu(inode->i_dtime); if ( (! d_time) || d_time <= t_after){ ext2fs_mark_generic_bitmap(imap,inode_nr); continue; } } // 1. magical step if (LINUX_S_ISDIR(mode) && ( flag & 0x01) && (pathname)){ sprintf(pathname,"<%lu>",(long unsigned int)inode_nr); struct dir_list_head_t * dir = NULL; if (flag & 0x02){ //disaster mode //only search for undeleted entry dir = get_dir3(NULL,0, inode_nr , "MAGIC-1",pathname, t_after,t_before, 0); if (dir) { lookup_local(des_dir, dir,t_after,t_before, RECOV_ALL | LOST_DIR_SEARCH ); clear_dir_list(dir); } } else{ //search for all dir = get_dir3(NULL,0, inode_nr , "MAGIC-1",pathname, t_after,t_before, DELETED_OPT); if (dir) { lookup_local(des_dir,dir,t_after,t_before,DELETED_OPT|RECOV_ALL|LOST_DIR_SEARCH); clear_dir_list(dir); } } } // 2. magical step if (! (flag & 0x01) ){ i_list = get_j_inode_list(current_fs->super, inode_nr); item = get_undel_inode(i_list,t_after,t_before); ext2fs_mark_generic_bitmap(imap,inode_nr); if (item) { if (! LINUX_S_ISDIR(item->inode->i_mode) ) { i_pathname = identify_filename(i_pathname, tmp_buf, (struct ext2_inode*)item->inode, inode_nr); sprintf(pathname,"<%lu>",(long unsigned int)inode_nr); recover_file(des_dir,"MAGIC-2", ((i_pathname)?i_pathname : pathname), (struct ext2_inode*)item->inode, inode_nr, 0); if(i_pathname){ free(i_pathname); i_pathname = NULL; } } } if (i_list) ring_del(i_list); } } } } } } errout: if (pathname) free(pathname); if(buf) { free(buf); buf = NULL; } if (tmp_buf){ free(tmp_buf); tmp_buf = NULL; } if (cookie){ magic_close(cookie); cookie = 0; } return; }