/** * ext2_alloc_block - Allocate a new block id * * This functions allocates a new block from device @dev, and applies * the allocation to the superblock. Outputs to @out */ void ext2_alloc_block(uint32_t *out, struct device *dev) { /* Algorithm: Loop through block group descriptors, * find which bg has a free block * and set that. */ struct ext2_priv_data *priv = EXT2_PRIV(dev); uint8_t *buffer = malloc(priv->blocksize); ext2_read_block(buffer, priv->first_bgd, dev); struct ext2_block_group_desc *bg = (struct ext2_block_group_desc *)buffer; for(int i = 0; i < priv->number_of_bgs; i++) { if(bg->num_of_unalloc_block) { *out = priv->sb.blocks - bg->num_of_unalloc_block + 1; bg->num_of_unalloc_block --; ext2_write_block(buffer, priv->first_bgd + i, dev); ext2_read_block(buffer, priv->sb.superblock_id, dev); struct ext2_superblock *sb = (struct ext2_superblock *)buffer; sb->unallocatedblocks --; ext2_write_block(buffer, priv->sb.superblock_id, dev); goto out; } bg++; } out: free(buffer); }
static int ext2_read(vnode *node, off_t offset, size_t size, char *buffer) { int blocksize = node->sb->blocksize; if (offset > node->size) return 0; if (offset + size > node->size) size = node->size - offset; int b_s = offset / blocksize, boff = offset % blocksize, b_e = (offset + size - 1) / blocksize, block; int bsize = blocksize - boff; char *buf = malloc(blocksize); if (ext2_read_block(node->sb, file_ext2_block(node->u.ext2_i, b_s), buf) <= 0) { free(buf); return -EIO; } memcpy(buffer, buf + boff, bsize); buffer += bsize; for (block = b_s + 1; block < b_e; block++) { if (ext2_read_block(node->sb, file_ext2_block(node->u.ext2_i, block), buf) <= 0) { free(buf); return -EIO; } memcpy(buffer, buf, blocksize); buffer += blocksize; } if (b_s != b_e) { if (ext2_read_block(node->sb, file_ext2_block(node->u.ext2_i, b_e), buf) <= 0) { free(buf); return -EIO; } memcpy(buffer, buf, (offset + size) % blocksize); } free(buf); return size; }
int ext2_read_data(ext2_VOLUME* volume, struct ext2_inode *inode, off_t offset, char *buffer, size_t length) { unsigned int logical, physical; int blocksize = EXT2_BLOCK_SIZE(volume->super); int shift; size_t read; if (offset >= inode->i_size) return -1; if (offset + length >= inode->i_size) length = inode->i_size - offset; read = 0; logical = offset / blocksize; shift = offset % blocksize; if (shift) { physical = ext2_get_block_addr(volume, inode, logical); ext2_read_block(volume, physical); if (length < blocksize - shift) { memcpy(buffer, volume->buffer + shift, length); return length; } read += blocksize - shift; memcpy(buffer, volume->buffer + shift, read); buffer += read; length -= read; logical++; } while (length) { physical = ext2_get_block_addr(volume, inode, logical); ext2_read_block(volume, physical); if (length < blocksize) { memcpy(buffer, volume->buffer, length); read += length; return read; } memcpy(buffer, volume->buffer, blocksize); buffer += blocksize; length -= blocksize; read += blocksize; logical++; } return read; }
/* inode read from given position */ off_t inode_read_at(struct block *d, struct inode *inode, void *buffer_, off_t size, off_t offset){ struct ext2_meta_data *meta; uint32_t block_size,block_id,block_idx,block_ofs; uint8_t *buffer = buffer_, *bounce = NULL; off_t bytes_read = 0; ASSERT(d != NULL && inode != NULL); // get device meta data meta = ext2_get_meta(d); ASSERT(meta != NULL && meta->sb != NULL); block_size = ext2_get_block_size(meta->sb); // read from disk while(size > 0){ // get data block id block_idx = offset / block_size; // the nth data block block_ofs = offset % block_size; // byte offset with data block block_id = inode_get_data_block(d,inode,block_idx); // data block id in fs ASSERT(block_id != UINT32_MAX); // Calculate bytes to copy // off_t is signed. off_t inode_left = inode->i_size - offset; off_t block_left = block_size - block_ofs; off_t min_left = inode_left < block_left ? inode_left : block_left; off_t chunk_size = size < min_left ? size : min_left; // no bytes to be read if(chunk_size <= 0) break; // whole block data if(block_ofs == 0 && chunk_size == block_size) ext2_read_block(d,block_id,block_size,buffer+bytes_read); else{ if(bounce == NULL){ bounce = kmalloc(block_size); if(bounce == NULL) break; } ext2_read_block(d,block_id,block_size,bounce); memcpy(buffer+bytes_read,bounce+block_ofs,chunk_size); } // advance. size -= chunk_size; offset += chunk_size; bytes_read += chunk_size; } if(bounce != NULL) kfree(bounce); return bytes_read; }
/* Get IDX th entry from block specified by BLOCK ID recursively until LEVEL == 0*/ static uint32_t inode_traverse_linklist(struct block *d, uint32_t block_id, uint32_t idx, uint32_t level){ int i; uint32_t block_size, items_per_block, ids_per_entry, table_idx; struct ext2_meta_data *meta; uint32_t *array; ASSERT(d != NULL); // get device meta data meta = ext2_get_meta(d); ASSERT(meta != NULL && meta->sb != NULL); block_size = ext2_get_block_size(meta->sb); items_per_block = block_size / sizeof(uint32_t); // calculate number of ids per entry // and local table index ids_per_entry = 1; for(i=0;i<level;i++) ids_per_entry *= items_per_block; table_idx = idx / ids_per_entry; // get local table entry array = ext2_read_block(d,block_id,block_size,NULL); block_id = array[table_idx]; kfree(array); // if not reach leaf level if(level > 0){ idx = idx - table_idx * ids_per_entry; level = level - 1; block_id = inode_traverse_linklist(d,block_id,idx,level); } return block_id; }
int ext2_read_directory(struct filesystem *fs, int dino, char *f) { /* read the directory inode in */ struct ext2_inode *inode = kmalloc(sizeof(*inode)); ext2_read_inode(fs, inode, dino); if (inode->type & 0x4000 == 0) return -ENOTDIR; /* the block pointers contain some 'struct ext2_dir's, so parse */ void *bbuf = kmalloc(EXT2_PRIV(fs)->blocksize); for (int i = 0; i < 12; i++) { ext2_read_block(fs, bbuf, inode->dbp[i]); struct ext2_dir *d = (void *)bbuf; if (d->size == 0 || d->namelength == 0) break; int r = 0; while (r < EXT2_PRIV(fs)->blocksize) { if (strncmp(&d->reserved + 1, f, d->namelength) == 0) { int k = d->inode; new_free(bbuf); return k; } r += d->size; if (d->size == 0 || d->namelength == 0) { goto c1; } d = (struct ext2_dir *)((uintptr_t)d + d->size); } c1:; } new_free(bbuf); return -ENOENT; }
SVFUNC(ext2_free_block, ext2_device_t *device, uint32_t block_id) { uint32_t b_count = device->superblock.block_count; uint32_t bgrp_id = 0; uint32_t bgrp_bcnt = device->superblock.blocks_per_group; uint32_t bm_block_size = 256 << device->superblock.block_size_enc; uint32_t bm_block_bcnt = bm_block_size * 32; uint32_t bm_id; uint32_t block_map[bm_block_size]; uint32_t first_b = device->superblock.block_size_enc ? 0 : 1; uint32_t idx ,nb; aoff_t rsize; int status; ext2_block_group_desc_t *bgd; assert ( device != NULL ); assert (block_id < b_count); assert (block_id > first_b); bgrp_id = (block_id - first_b) / bgrp_bcnt; idx = block_id - first_b - bgrp_id * bgrp_bcnt; bm_id = idx / bm_block_bcnt; idx -= bm_id * bm_block_bcnt; nb = idx % 32; idx -= nb; idx /= 32; status = ext2_load_bgd(device, bgrp_id, &bgd); if (status) THROWV(status); status = ext2_read_block(device, bgd->block_bitmap + bm_id, 0, block_map, bm_block_size * 4, &rsize); if (status) THROWV(status); EXT2_BITMAP_CLR(block_map[idx], nb); status = ext2_write_block(device, bgd->block_bitmap + bm_id, 0, block_map, bm_block_size * 4, &rsize); if (status) { debugcon_printf("ext2: MAYDAY MAYDAY MAYDAY: write error (block bitmap), FILESYSTEM IS PROBABLY CORRUPTED NOW!"); ext2_handle_error(device); THROWV(status); } bgd->free_block_count++; status = ext2_store_bgd(device, bgrp_id, bgd); if (status) { debugcon_printf("ext2: MAYDAY MAYDAY MAYDAY: write error (BGD), FILESYSTEM IS PROBABLY CORRUPTED NOW!"); ext2_handle_error(device); THROWV(status); } ext2_free_bgd(device, bgd); device->superblock.free_block_count++; RETURNV; }
int ext2_get_inode(ext2_VOLUME* volume, unsigned int ino, struct ext2_inode *inode) { struct ext2_group_desc desc; unsigned int block; unsigned int group_id; unsigned int offset; struct ext2_inode *le_inode; int i; ino--; group_id = ino / EXT2_INODES_PER_GROUP(volume->super); ext2_get_group_desc(volume, group_id, &desc); ino %= EXT2_INODES_PER_GROUP(volume->super); block = desc.bg_inode_table; block += ino / (EXT2_BLOCK_SIZE(volume->super) / EXT2_INODE_SIZE(volume->super)); ext2_read_block(volume, block); offset = ino % (EXT2_BLOCK_SIZE(volume->super) / EXT2_INODE_SIZE(volume->super)); offset *= EXT2_INODE_SIZE(volume->super); le_inode = (struct ext2_inode *)(volume->buffer + offset); inode->i_mode = __le16_to_cpu(le_inode->i_mode); inode->i_uid = __le16_to_cpu(le_inode->i_uid); inode->i_size = __le32_to_cpu(le_inode->i_size); inode->i_atime = __le32_to_cpu(le_inode->i_atime); inode->i_ctime = __le32_to_cpu(le_inode->i_ctime); inode->i_mtime = __le32_to_cpu(le_inode->i_mtime); inode->i_dtime = __le32_to_cpu(le_inode->i_dtime); inode->i_gid = __le16_to_cpu(le_inode->i_gid); inode->i_links_count = __le16_to_cpu(le_inode->i_links_count); inode->i_blocks = __le32_to_cpu(le_inode->i_blocks); inode->i_flags = __le32_to_cpu(le_inode->i_flags); if (S_ISLNK(inode->i_mode)) { memcpy(inode->i_block, le_inode->i_block, EXT2_N_BLOCKS * 4); } else { for (i = 0; i < EXT2_N_BLOCKS; i++) inode->i_block[i] = __le32_to_cpu(le_inode->i_block[i]); } inode->i_generation = __le32_to_cpu(le_inode->i_generation); inode->i_file_acl = __le32_to_cpu(le_inode->i_file_acl); inode->i_dir_acl = __le32_to_cpu(le_inode->i_dir_acl); inode->i_faddr = __le32_to_cpu(le_inode->i_faddr); inode->osd2.linux2.l_i_frag = le_inode->osd2.linux2.l_i_frag; inode->osd2.linux2.l_i_fsize = le_inode->osd2.linux2.l_i_fsize; inode->osd2.linux2.l_i_uid_high = __le16_to_cpu(le_inode->osd2.linux2.l_i_uid_high); inode->osd2.linux2.l_i_gid_high = __le16_to_cpu(le_inode->osd2.linux2.l_i_gid_high); return 0; }
bool ext2_get_block(PEXT2_FILESYS Ext2Sys, ULONG dwContent, ULONG Index, int layer, ULONG *dwRet) { ULONG *pData = NULL; LONGLONG Offset = 0; ULONG i = 0, j = 0, temp = 1; ULONG dwBlk = 0; bool bRet = true; PEXT2_SUPER_BLOCK pExt2Sb = Ext2Sys->ext2_sb; Offset = (LONGLONG) dwContent; Offset = Offset * Ext2Sys->blocksize; pData = (ULONG *)RtlAllocateHeap(RtlGetProcessHeap(), 0, Ext2Sys->blocksize); if (!pData) { return false; } memset(pData, 0, Ext2Sys->blocksize); if (layer == 0) { dwBlk = dwContent; } else if (layer <= 3) { if (!ext2_read_block(Ext2Sys, dwContent, (void *)pData)) { bRet = false; goto errorout; } temp = 1 << ((10 + pExt2Sb->s_log_block_size - 2) * (layer - 1)); i = Index / temp; j = Index % temp; if (!ext2_get_block(Ext2Sys, pData[i], j, layer - 1, &dwBlk)) { bRet = false; DPRINT1("Mke2fs: ext2_get_block: ... error recuise...\n"); goto errorout; } } errorout: if (pData) RtlFreeHeap(RtlGetProcessHeap(), 0, pData); if (bRet && dwRet) *dwRet = dwBlk; return bRet; }
SFUNC(aoff_t, ext2_read_inode, inode_t *_inode, void *_buffer, aoff_t f_offset, aoff_t length) { ext2_device_t *device; ext2_vinode_t *inode; aoff_t count; aoff_t block_size; aoff_t in_blk_size; aoff_t in_blk; aoff_t in_file; aoff_t rsize; uint32_t block_addr; uint8_t *buffer = _buffer; int status; assert( _inode != NULL ); assert( buffer != NULL ); device = (ext2_device_t *) _inode->device; inode = (ext2_vinode_t *) _inode; if (f_offset > _inode->size) return EINVAL; if ((length + f_offset) > _inode->size) length = _inode->size - f_offset; block_size = 1024 << device->superblock.block_size_enc; for (count = 0; count < length; count += in_blk_size) { in_file = count + f_offset; in_blk = in_file % block_size; in_blk_size = length - count; if (in_blk_size > (block_size - in_blk)) in_blk_size = block_size - in_blk; status = ext2_decode_block_id (device, &(inode->inode), in_file / block_size, &block_addr); if (status) THROW(status, 0); if (block_addr) { status = ext2_read_block(device, block_addr, in_blk, &(buffer[count]), in_blk_size, &rsize); if (status) THROW(status, 0); } else { memset(&(buffer[count]), 0, in_blk_size); } } RETURN(count); }
int ext2_read_inode( ext2_device_t *dev, ext2_inode_t *inode, char *buf, unsigned int size ){ unsigned int i, ptr, readsize, total = 0; for ( i = ptr = 0; i < size && ptr < 12; i += dev->block_size, ptr++ ){ if ( size - i >= dev->block_size ) readsize = dev->block_size; else readsize = size % dev->block_size; total += ext2_read_block( dev, inode->d_ptr[ptr], buf + i, 0, dev->block_size ); } return total; }
/* Write ENTRY item of the inode table from BLOCK_GROUP */ void ext2_write_inode(struct block *b, uint32_t ino_idx, struct inode *inode){ struct ext2_meta_data *meta = NULL; struct bg_desc_table *bg_desc_tabs = NULL; uint32_t block_size = 0; uint32_t inode_table = 0, inodes_per_group = 0, inodes_per_block = 0; uint32_t block_group = 0, block_idx = 0, block_offset = 0; struct inode *inode_tab = NULL; //get meta data ASSERT(b != NULL); meta = ext2_get_meta(b); ASSERT(meta != NULL); bg_desc_tabs = meta->bg_desc_tabs; ASSERT(bg_desc_tabs != NULL); ASSERT(block_group < DIV_ROUND_UP(meta->sb->s_blocks_count, meta->sb->s_blocks_per_group)); inodes_per_group = meta->sb->s_inodes_per_group; block_size = ext2_get_block_size(meta->sb); // get block group ino_idx = ino_idx - 1; // inode index starts from 1 !!! block_group = ino_idx / inodes_per_group; bg_desc_tabs = &bg_desc_tabs[block_group]; // get inode table inode_table = bg_desc_tabs->bg_inode_table; inodes_per_block = block_size/sizeof(struct inode); ASSERT((block_size % sizeof(struct inode)) == 0); // calculate inode index in local table ino_idx -= block_group * inodes_per_group; // get block location of inode block_idx = inode_table + ino_idx/inodes_per_block; block_offset = ino_idx % inodes_per_block; // read block data inode_tab = ext2_read_block(b,block_idx,block_size,NULL); // modify corresponding entry memcpy(&inode_tab[block_offset], inode, sizeof(struct inode)); // write to disk ext2_write_block(b,block_idx,block_size,inode_tab); // release memory kfree(inode_tab); }
/* Get the ENTRY item of the inode table from BLOCK_GROUP */ struct inode *ext2_get_inode(struct block *b, uint32_t ino_idx){ struct ext2_meta_data *meta = NULL; struct bg_desc_table *bg_desc_tabs = NULL; uint32_t block_size = 0; uint32_t inode_table = 0, inodes_per_group = 0, inodes_per_block = 0; uint32_t block_group = 0, block_idx = 0, block_offset = 0; struct inode *inode_tab = NULL, *inode = NULL; //get meta data ASSERT(b != NULL); meta = ext2_get_meta(b); ASSERT(meta != NULL); bg_desc_tabs = meta->bg_desc_tabs; ASSERT(bg_desc_tabs != NULL); ASSERT(block_group < DIV_ROUND_UP(meta->sb->s_blocks_count, meta->sb->s_blocks_per_group)); inodes_per_group = meta->sb->s_inodes_per_group; block_size = ext2_get_block_size(meta->sb); // get block group ino_idx = ino_idx - 1; // inode index starts from 1 !!! block_group = ino_idx / inodes_per_group; bg_desc_tabs = &bg_desc_tabs[block_group]; // get inode table inode_table = bg_desc_tabs->bg_inode_table; inodes_per_block = block_size/sizeof(struct inode); ASSERT((block_size % sizeof(struct inode)) == 0); // calculate inode index in local table ino_idx -= block_group * inodes_per_group; // get block location of inode block_idx = inode_table + ino_idx/inodes_per_block; block_offset = ino_idx % inodes_per_block; // read block data inode_tab = ext2_read_block(b,block_idx,block_size,NULL); inode = kmalloc(sizeof(struct inode)); memcpy(inode, &inode_tab[block_offset], sizeof(struct inode)); kfree(inode_tab); return inode; }
unsigned int ext2_get_block_addr(ext2_VOLUME* volume, struct ext2_inode *inode, unsigned int logical) { unsigned int physical; unsigned int addr_per_block; /* direct */ if (logical < EXT2_NDIR_BLOCKS) { physical = inode->i_block[logical]; return physical; } /* indirect */ logical -= EXT2_NDIR_BLOCKS; addr_per_block = EXT2_ADDR_PER_BLOCK (volume->super); if (logical < addr_per_block) { ext2_read_block(volume, inode->i_block[EXT2_IND_BLOCK]); physical = __le32_to_cpu(((unsigned int *)volume->buffer)[logical]); return physical; } /* double indirect */ logical -= addr_per_block; if (logical < addr_per_block * addr_per_block) { ext2_read_block(volume, inode->i_block[EXT2_DIND_BLOCK]); physical = __le32_to_cpu(((unsigned int *)volume->buffer) [logical / addr_per_block]); ext2_read_block(volume, physical); physical = __le32_to_cpu(((unsigned int *)volume->buffer) [logical % addr_per_block]); return physical; } /* triple indirect */ logical -= addr_per_block * addr_per_block; ext2_read_block(volume, inode->i_block[EXT2_DIND_BLOCK]); physical = __le32_to_cpu(((unsigned int *)volume->buffer) [logical / (addr_per_block * addr_per_block)]); ext2_read_block(volume, physical); logical = logical % (addr_per_block * addr_per_block); physical = __le32_to_cpu(((unsigned int *)volume->buffer)[logical / addr_per_block]); ext2_read_block(volume, physical); physical = __le32_to_cpu(((unsigned int *)volume->buffer)[logical % addr_per_block]); return physical; }
/* Get N th data block of inode */ void *inode_get_block_data(struct block *d, struct inode *inode, uint32_t block_idx){ struct ext2_meta_data *meta; uint32_t block_size,block_id; void *block_data; ASSERT(d != NULL && inode != NULL); // get data block id block_id = inode_get_data_block(d,inode,block_idx); ASSERT(block_id != UINT32_MAX); // get device meta data meta = ext2_get_meta(d); ASSERT(meta != NULL && meta->sb != NULL); block_size = ext2_get_block_size(meta->sb); // read from disk block_data = ext2_read_block(d,block_id,block_size,NULL); return block_data; }
void ext2_get_group_desc(ext2_VOLUME* volume, int group_id, struct ext2_group_desc *gdp) { unsigned int block, offset; struct ext2_group_desc *le_gdp; block = 1 + volume->super->s_first_data_block; block += group_id / EXT2_DESC_PER_BLOCK(volume->super); ext2_read_block(volume, block); offset = group_id % EXT2_DESC_PER_BLOCK(volume->super); offset *= sizeof(*gdp); le_gdp = (struct ext2_group_desc *)(volume->buffer + offset); gdp->bg_block_bitmap = __le32_to_cpu(le_gdp->bg_block_bitmap); gdp->bg_inode_bitmap = __le32_to_cpu(le_gdp->bg_inode_bitmap); gdp->bg_inode_table = __le32_to_cpu(le_gdp->bg_inode_table); gdp->bg_free_blocks_count = __le16_to_cpu(le_gdp->bg_free_blocks_count); gdp->bg_free_inodes_count = __le16_to_cpu(le_gdp->bg_free_inodes_count); gdp->bg_used_dirs_count = __le16_to_cpu(le_gdp->bg_used_dirs_count); }
ext2_VOLUME* ext2_mount(int fd) { ext2_VOLUME *volume; struct ext2_super_block *super; char *buffer; super = (struct ext2_super_block*)malloc(sizeof(struct ext2_super_block)); if (super == NULL) return NULL; ext2_get_super(fd, super); if (super->s_magic != EXT2_SUPER_MAGIC) { free(super); return NULL; } buffer = (char*)malloc(EXT2_BLOCK_SIZE(super)); if (buffer == NULL) { free(super); return NULL; } volume = (ext2_VOLUME*)malloc(sizeof(ext2_VOLUME)); if (volume == NULL) { free(super); free(buffer); return NULL; } volume->buffer = buffer; volume->fd = fd; volume->super = super; volume->current = -1; ext2_read_block(volume, 0); return volume; }
SFUNC(uint32_t, ext2_alloc_inode, ext2_device_t *device) { uint32_t i_count = device->superblock.inode_count; uint32_t inode_id = 11; uint32_t bgrp_id = 0; uint32_t bgrp_icnt = device->superblock.inodes_per_group; uint32_t bgrp_count = ext2_divup(i_count,bgrp_icnt);//DIVUP uint32_t bm_block_size = 256 << device->superblock.block_size_enc; uint32_t bm_block_icnt = bm_block_size * 32; uint32_t bgrp_bmcnt = ext2_divup(bgrp_icnt,bm_block_icnt);//DIVUP uint32_t bm_id; uint32_t inode_map[bm_block_size]; uint32_t first_i = 1; uint32_t idx ,nb; aoff_t rsize; int status; ext2_block_group_desc_t *bgd; assert ( device != NULL ); bgrp_id = (inode_id - first_i) / bgrp_icnt; idx = inode_id - first_i - bgrp_id * bgrp_icnt; bm_id = idx / bm_block_icnt; idx -= bm_id * bm_block_icnt; nb = idx % 32; idx -= nb; idx /= 32; for (; bgrp_id < bgrp_count; bgrp_id++) { status = ext2_load_bgd(device, bgrp_id, &bgd); if (status) THROW(status, 0); if (bgd->free_inode_count) { for (; bm_id < bgrp_bmcnt; bm_id++) { status = ext2_read_block(device, bgd->inode_bitmap + bm_id, 0, inode_map, bm_block_size * 4, &rsize); if (status) THROW(status, 0); for (; idx < bm_block_size; idx++) { if (inode_map[idx] != 0xFFFFFFFF) { for (; nb < 32; nb++) if (!EXT2_BITMAP_GET(inode_map[idx], nb)) goto found_it; } nb = 0; } idx = 0; } } bm_id = 0; ext2_free_bgd(device, bgd); } bgrp_id = 0; THROW(ENOSPC, 0); found_it: inode_id = nb + idx * 32 + bm_id * bm_block_icnt + bgrp_id * bgrp_icnt + first_i; EXT2_BITMAP_SET(inode_map[idx], nb); status = ext2_write_block(device, bgd->inode_bitmap + bm_id, 0, inode_map, bm_block_size * 4, &rsize); if (status) { debugcon_printf("ext2: MAYDAY MAYDAY MAYDAY: write error (inode bitmap), FILESYSTEM IS PROBABLY CORRUPTED NOW!"); ext2_handle_error(device); THROW(status, 0); } bgd->free_inode_count--; status = ext2_store_bgd(device, bgrp_id, bgd); if (status) { debugcon_printf("ext2: MAYDAY MAYDAY MAYDAY: write error (BGD), FILESYSTEM IS PROBABLY CORRUPTED NOW!"); ext2_handle_error(device); THROW(status, 0); } ext2_free_bgd(device, bgd); device->superblock.free_inode_count--; RETURN(inode_id); }
static int inode_expand_range(uint32_t block_id, uint32_t level, uint32_t start, uint32_t end, uint32_t items_per_block,uint32_t l0, uint32_t l1, uint32_t l2, uint32_t l3){ struct block *d = block_get_role(BLOCK_FILESYS); uint32_t item_start, item_end; uint32_t *level_data; uint32_t block_id2; int i, ret = 0; enum RANGE range_comparison; ASSERT(d != NULL); ASSERT(level > 0); level_data = ext2_read_block(d,block_id,items_per_block*sizeof(uint32_t),NULL); for(i = 0; i < items_per_block; i++){ // Get Block range the item represents if(level == 1){ item_start = inode_get_direct_block_idx(items_per_block,l0,i,0,0); item_end = inode_get_direct_block_idx(items_per_block,l0,i,items_per_block-1,items_per_block-1); } else if (level == 2){ item_start = inode_get_direct_block_idx(items_per_block,l0,l1,i,0); item_end = inode_get_direct_block_idx(items_per_block,l0,l1,i,items_per_block-1); } else { item_start = inode_get_direct_block_idx(items_per_block,l0,l1,l2,i); item_end = item_start; } // Compare range range_comparison = inode_range_compare(item_start,item_end,start,end); if((range_comparison & RANGE_OVERLAP) > 0){ // In Range block_id2 = level_data[i]; // Allocate block in disk if not exist. if(block_id2 == 0){ if(item_start == item_end) block_id2 = freemap_get_block(false); else block_id2 = freemap_get_block(true); #ifdef FILESYS_EXT2_DEBUG printf(" New block id: 0x%x for %d/%d/%d/%d:%d\n", block_id2,l0,l1,l2,l3,i); #endif } if(block_id2 != FREEMAP_GET_ERROR) {level_data[i] = block_id2;} else {ret = -1; break;} /* If it is a leaf node */ if(item_start == item_end) continue; /* If not leaf node*/ if(level == 1) inode_expand_range(block_id2,level+1,start,end,items_per_block,l0,i,0,0); else if(level == 2) inode_expand_range(block_id2,level+1,start,end,items_per_block,l0,l1,i,0); else PANIC("Inode Fill Range Reach Unexpected Level.\n"); } else if((range_comparison & RANGE_AHEAD) > 0){ //Ahead of start continue; } else{ // Passed End break; } } ext2_write_block(d,block_id,items_per_block*sizeof(uint32_t),level_data); kfree(level_data); return ret; }
/* inode read from given position */ off_t inode_write_at(struct block *d, struct inode *inode, const void *buffer_, off_t size, off_t offset){ struct ext2_meta_data *meta; uint32_t block_size,block_id,block_idx,block_ofs; const uint8_t *buffer = buffer_; uint8_t *bounce = NULL; off_t bytes_written = 0, err; ASSERT(d != NULL && inode != NULL); // Resize inode err = inode_resize(inode,offset + size); if(err < 0) { printf("inode_write_at: resize failed.\n"); return 0; } // get device meta data meta = ext2_get_meta(d); ASSERT(meta != NULL && meta->sb != NULL); block_size = ext2_get_block_size(meta->sb); // read from disk while(size > 0){ // get data block id block_idx = offset / block_size; // the nth data block block_ofs = offset % block_size; // byte offset with data block block_id = inode_get_data_block(d,inode,block_idx); // data block id in fs ASSERT(block_id != UINT32_MAX); // Calculate bytes to write // off_t is signed. off_t inode_left = inode->i_size - offset; off_t block_left = block_size - block_ofs; off_t min_left = inode_left < block_left ? inode_left : block_left; off_t chunk_size = size < min_left ? size : min_left; // no bytes to be read if(chunk_size <= 0) break; // whole block data if(block_ofs == 0 && chunk_size == block_size) ext2_write_block(d,block_id,block_size,buffer+bytes_written); else{ if(bounce == NULL){ bounce = kmalloc(block_size); if(bounce == NULL) break; } // First read the block from disk ext2_read_block(d,block_id,block_size,bounce); // Modify data read memcpy(bounce+block_ofs,buffer+bytes_written,chunk_size); // Write to disk ext2_write_block(d,block_id,block_size,bounce); } // advance. size -= chunk_size; offset += chunk_size; bytes_written += chunk_size; } if(bounce != NULL) kfree(bounce); return bytes_written; }
ssize_t ext2_read_inode(ext2_t *ext2, struct ext2_inode *inode, void *_buf, off_t offset, size_t len) { int err = 0; size_t bytes_read = 0; uint8_t *buf = _buf; /* calculate the file size */ off_t file_size = ext2_file_len(ext2, inode); LTRACEF("inode %p, offset %lld, len %zd, file_size %lld\n", inode, offset, len, file_size); /* trim the read */ if (offset > file_size) return 0; if (offset + len >= file_size) len = file_size - offset; if (len == 0) return 0; /* calculate the starting file block */ uint file_block = offset / EXT2_BLOCK_SIZE(ext2->sb); /* handle partial first block */ if ((offset % EXT2_BLOCK_SIZE(ext2->sb)) != 0) { uint8_t temp[EXT2_BLOCK_SIZE(ext2->sb)]; /* calculate the block and read it */ blocknum_t phys_block = file_block_to_fs_block(ext2, inode, file_block); if (phys_block == 0) { memset(temp, 0, EXT2_BLOCK_SIZE(ext2->sb)); } else { ext2_read_block(ext2, temp, phys_block); } /* copy out what we need */ size_t block_offset = offset % EXT2_BLOCK_SIZE(ext2->sb); size_t tocopy = MIN(len, EXT2_BLOCK_SIZE(ext2->sb) - block_offset); memcpy(buf, temp + block_offset, tocopy); /* increment our stuff */ file_block++; len -= tocopy; bytes_read += tocopy; buf += tocopy; } /* handle middle blocks */ while (len >= EXT2_BLOCK_SIZE(ext2->sb)) { /* calculate the block and read it */ blocknum_t phys_block = file_block_to_fs_block(ext2, inode, file_block); if (phys_block == 0) { memset(buf, 0, EXT2_BLOCK_SIZE(ext2->sb)); } else { ext2_read_block(ext2, buf, phys_block); } /* increment our stuff */ file_block++; len -= EXT2_BLOCK_SIZE(ext2->sb); bytes_read += EXT2_BLOCK_SIZE(ext2->sb); buf += EXT2_BLOCK_SIZE(ext2->sb); } /* handle partial last block */ if (len > 0) { uint8_t temp[EXT2_BLOCK_SIZE(ext2->sb)]; /* calculate the block and read it */ blocknum_t phys_block = file_block_to_fs_block(ext2, inode, file_block); if (phys_block == 0) { memset(temp, 0, EXT2_BLOCK_SIZE(ext2->sb)); } else { ext2_read_block(ext2, temp, phys_block); } /* copy out what we need */ memcpy(buf, temp, len); /* increment our stuff */ bytes_read += len; } LTRACEF("err %d, bytes_read %zu\n", err, bytes_read); return (err < 0) ? err : (ssize_t)bytes_read; }
struct inode *ext2_mount(dev_t dev, u64 block, char *node) { ext2_fs_t *fs = get_new_fsvol(); if(!fs) { printk(5, "[ext2]: Unable to allocate new filesystem!\n"); release_fsvol(fs); return 0; } struct inode *in = get_idir(node, 0); if(in && dev == -1) dev = in->dev; if(in && (int)in->dev != dev) printk(4, "[ext2]: Odd...node device is different from given device...\n"); iput(in); fs->block = block; fs->dev = dev; fs->sb->block_size=0; if(node) strncpy(fs->node, node, 16); ext2_read_block(fs, 1, (unsigned char *)fs->sb); if(fs->sb->magic != EXT2_SB_MAGIC) { release_fsvol(fs); return 0; } if(fs->sb->state == 2) { printk(5, "[ext2]: Filesystem has errors: "); if(fs->sb->errors == 2) { printk(5, "Mounting as read-only\n"); fs->read_only=1; } else if(fs->sb->errors == 3) panic(0, "ext2 mount failed!"); else printk(5, "Ignoring...\n"); } unsigned reqf = fs->sb->features_req; if(!(reqf&0x2) || (reqf & 0x1) || (reqf &0x4) || (reqf&0x8)) { release_fsvol(fs); printk(5, "[ext2]: Cannot mount %s due to feature flags\n", node); return 0; } unsigned rof = fs->sb->features_ro; if(ext2_sb_inodesize(fs->sb) != 128) { release_fsvol(fs); printk(5, "[ext2]: Inode size %d is not supported\n", ext2_sb_inodesize(fs->sb)); return 0; } if(!(rof&0x1) || (rof & 0x2) || (rof&0x4)) { printk(5, "[ext2]: Filesystem on %s must be mounted read-only due to feature flags\n", node); fs->read_only=1; } ext2_inode_t root; ext2_inode_read(fs, 2, &root); fs->root = create_sea_inode(&root, "ext2"); strncpy(fs->root->node_str, node, 128); if(fs->sb->mount_count > fs->sb->max_mount_count) fs->sb->mount_count=0; fs->sb->mount_time = get_epoch_time(); ext2_sb_update(fs, fs->sb); printk(0, "[ext2]: Optional features flags are %x\n", fs->sb->features_opt); if(fs->sb->features_opt & 0x4) printk(0, "[ext2]: Hmm...looks like an ext3 filesystem to me. Oh well. It should still work.\n"); if(fs->sb->features_opt & 0x20) printk(0, "[ext2]: Hmm...directories have a hash index. I'll look into that later...\n"); return fs->root; }
static int inode_shrink_range(uint32_t block_id, uint32_t level, uint32_t start, uint32_t end, uint32_t items_per_block,uint32_t l0, uint32_t l1, uint32_t l2, uint32_t l3){ struct block *d = block_get_role(BLOCK_FILESYS); uint32_t item_start, item_end; uint32_t *level_data; uint32_t block_id2; int i, ret = 0; enum RANGE range_comparison; ASSERT(d != NULL); ASSERT(level > 0); level_data = ext2_read_block(d,block_id,items_per_block*sizeof(uint32_t),NULL); for(i = 0; i < items_per_block; i++){ // Get Block range the item represents if(level == 1){ item_start = inode_get_direct_block_idx(items_per_block,l0,i,0,0); item_end = inode_get_direct_block_idx(items_per_block,l0,i,items_per_block-1,items_per_block-1); } else if (level == 2){ item_start = inode_get_direct_block_idx(items_per_block,l0,l1,i,0); item_end = inode_get_direct_block_idx(items_per_block,l0,l1,i,items_per_block-1); } else { item_start = inode_get_direct_block_idx(items_per_block,l0,l1,l2,i); item_end = item_start; } // Compare range range_comparison = inode_range_compare(item_start,item_end,start,end); if((range_comparison & RANGE_OVERLAP) > 0){ // In Range block_id2 = level_data[i]; // If the block is cleared already. if(block_id2 == 0) continue; /* If it is a leaf node */ if(item_start == item_end) { // free block and set entry to zero freemap_free_block(block_id2); level_data[i] = 0; continue; } /* If not a leaf node*/ // free sub level if(level == 1) inode_shrink_range(block_id2,level+1,start,end,items_per_block,l0,i,0,0); else if(level == 2) inode_shrink_range(block_id2,level+1,start,end,items_per_block,l0,l1,i,0); else PANIC("Inode Shrink Range Reach Unexpected Level.\n"); // if shrink range contains entire level if(start <= item_start){ // free block and set entry to zero freemap_free_block(block_id2); level_data[i] = 0; } } else if((range_comparison & RANGE_AHEAD) > 0){ //Item Ahead of start continue; } else{ // Item Passed End break; } } ext2_write_block(d,block_id,items_per_block*sizeof(uint32_t),level_data); kfree(level_data); return ret; }
SFUNC(uint32_t, ext2_decode_block_id, ext2_device_t *device, ext2_inode_t *inode, uint32_t block_id) { uint32_t indirect_count = 256 << device->superblock.block_size_enc; uint32_t indirect_id, indirect_off, indirect_rd; uint32_t s_indir_l = indirect_count; uint32_t d_indir_l = indirect_count * indirect_count; uint32_t s_indir_s = 12; uint32_t d_indir_s = s_indir_s + s_indir_l; uint32_t t_indir_s = d_indir_s + d_indir_l; int status; aoff_t rsize; assert ( device != NULL ); if (block_id >= t_indir_s) { //Triply indirect indirect_id = inode->block[14]; indirect_off = (block_id - t_indir_s) / d_indir_l; if (!indirect_id) RETURN(0); status = ext2_read_block(device, indirect_id, indirect_off * 4, &indirect_rd, 4, &rsize); if (status || !indirect_rd) { THROW(status, 0); } indirect_id = indirect_rd; block_id -= indirect_off * d_indir_l + d_indir_l; } else if (block_id >= d_indir_s) { indirect_id = inode->block[13]; if (!indirect_id) RETURN(0); } if (block_id >= d_indir_s) { //Doubly indirect indirect_off = (block_id - d_indir_s) / s_indir_l; status = ext2_read_block(device, indirect_id, indirect_off * 4, &indirect_rd, 4, &rsize); if (status || !indirect_rd) { THROW(status, 0); } indirect_id = indirect_rd; block_id -= s_indir_l + indirect_off * s_indir_l; } else if (block_id >= s_indir_s) { indirect_id = inode->block[12]; if (!indirect_id) RETURN(indirect_id); } if (block_id >= s_indir_s) { //Singly Indirect indirect_off = block_id - s_indir_s; status = ext2_read_block(device, indirect_id, indirect_off * 4, &indirect_rd, 4, &rsize); if (status || !indirect_rd) { THROW(status, 0); } RETURN(indirect_rd); } RETURN(inode->block[block_id]); }
SVFUNC(ext2_set_block_id, ext2_device_t *device, ext2_inode_t *inode, uint32_t block_id, uint32_t block_v) { uint32_t indirect_count = 256 << device->superblock.block_size_enc; uint32_t indirect_id, indirect_off, indirect_rd; uint32_t s_indir_l = indirect_count; uint32_t d_indir_l = indirect_count * indirect_count; uint32_t s_indir_s = 12; uint32_t d_indir_s = s_indir_s + s_indir_l; uint32_t t_indir_s = d_indir_s + d_indir_l; int status; aoff_t rsize; assert ( device != NULL ); if (block_id >= t_indir_s) { //Triply indirect indirect_id = inode->block[14]; indirect_off = (block_id - t_indir_s) / d_indir_l; if (!indirect_id) { status = ext2_allocate_indirect_block(device, inode, &indirect_id); if (status) THROWV(status); inode->block[14] = indirect_id; } status = ext2_read_block(device, indirect_id, indirect_off * 4, &indirect_rd, 4, &rsize); if (status) THROWV(status); if (!indirect_rd) { status = ext2_allocate_indirect_block(device, inode, &indirect_id); if (status) THROWV(status); status = ext2_write_block(device, indirect_id, indirect_off * 4, &indirect_rd, 4, &rsize); if (status) THROWV(status); } indirect_id = indirect_rd; block_id -= indirect_off * d_indir_l + d_indir_l; } else if (block_id >= d_indir_s) { indirect_id = inode->block[13]; if (!indirect_id) { status = ext2_allocate_indirect_block(device, inode, &indirect_id); if (status) THROWV(status); inode->block[13] = indirect_id; } } if (block_id >= d_indir_s) { //Doubly indirect indirect_off = (block_id - d_indir_s) / s_indir_l; status = ext2_read_block(device, indirect_id, indirect_off * 4, &indirect_rd, 4, &rsize); if (status) THROWV(status); if (!indirect_rd) { status = ext2_allocate_indirect_block(device, inode, &indirect_id); if (status) THROWV(status); status = ext2_write_block(device, indirect_id, indirect_off * 4, &indirect_rd, 4, &rsize); if (status) THROWV(status); } indirect_id = indirect_rd; block_id -= s_indir_l + indirect_off * s_indir_l; } else if (block_id >= s_indir_s){ indirect_id = inode->block[12]; if (!indirect_id) { status = ext2_allocate_indirect_block(device, inode, &indirect_id); if (status) THROWV(status); inode->block[12] = indirect_id; } } if (block_id >= s_indir_s) { //Singly Indirect indirect_off = block_id - s_indir_s; status = ext2_write_block(device, indirect_id, indirect_off * 4, &block_v, 4, &rsize); if (status) THROWV(status); RETURNV; } inode->block[block_id] = block_v; RETURNV; }
SFUNC(uint32_t, ext2_alloc_block, ext2_device_t *device, uint32_t start) { uint32_t b_count = device->superblock.block_count; uint32_t block_id = start; uint32_t bgrp_id = 0; uint32_t bgrp_bcnt = device->superblock.blocks_per_group; uint32_t bgrp_count = ext2_divup(b_count,bgrp_bcnt);//DIVUP uint32_t bm_block_size = 256 << device->superblock.block_size_enc; uint32_t bm_block_bcnt = bm_block_size * 32; uint32_t bgrp_bmcnt = ext2_divup(bgrp_bcnt,bm_block_bcnt);//DIVUP uint32_t bm_id; uint32_t block_map[bm_block_size]; uint32_t first_b = device->superblock.block_size_enc ? 0 : 1; uint32_t idx ,nb; aoff_t rsize; int status; ext2_block_group_desc_t *bgd; assert ( device != NULL ); assert (start < b_count); if (start == 0) block_id = first_b; bgrp_id = (block_id - first_b) / bgrp_bcnt; idx = block_id - first_b - bgrp_id * bgrp_bcnt; bm_id = idx / bm_block_bcnt; idx -= bm_id * bm_block_bcnt; nb = idx % 32; idx -= nb; idx /= 32; for (; bgrp_id < bgrp_count; bgrp_id++) { status = ext2_load_bgd(device, bgrp_id, &bgd); if (status) THROW(status, 0); if (bgd->free_block_count) { for (; bm_id < bgrp_bmcnt; bm_id++) { status = ext2_read_block(device, bgd->block_bitmap + bm_id, 0, block_map, bm_block_size * 4, &rsize); if (status) THROW(status, 0); for (; idx < bm_block_size; idx++) { if (block_map[idx] != 0xFFFFFFFF) { for (; nb < 32; nb++) if (!EXT2_BITMAP_GET(block_map[idx], nb)) goto found_it; } nb = 0; } idx = 0; } } bm_id = 0; ext2_free_bgd(device, bgd); } bgrp_id = 0; for (; bgrp_id < bgrp_count; bgrp_id++) { status = ext2_load_bgd(device, bgrp_id, &bgd); if (status) THROW(status, 0); if (bgd->free_block_count) { for (; bm_id < bgrp_bmcnt; bm_id++) { status = ext2_read_block(device, bgd->block_bitmap + bm_id, 0, block_map, bm_block_size * 4, &rsize); if (status) THROW(status, 0); for (; idx < bm_block_size; idx++) { if (block_map[idx] != 0xFFFFFFFF) { for (; nb < 32; nb++) if (!EXT2_BITMAP_GET(block_map[idx], nb)) goto found_it; } nb = 0; } idx = 0; } } bm_id = 0; ext2_free_bgd(device, bgd); } THROW(ENOSPC, 0); found_it: block_id = nb + idx * 32 + bm_id * bm_block_bcnt + bgrp_id * bgrp_bcnt + first_b; EXT2_BITMAP_SET(block_map[idx], nb); status = ext2_write_block(device, bgd->block_bitmap + bm_id, 0, block_map, bm_block_size * 4, &rsize); if (status) { debugcon_printf("ext2: MAYDAY MAYDAY MAYDAY: write error (block bitmap), FILESYSTEM IS PROBABLY CORRUPTED NOW!"); ext2_handle_error(device); THROW(status, 0); } bgd->free_block_count--; status = ext2_store_bgd(device, bgrp_id, bgd); if (status) { debugcon_printf("ext2: MAYDAY MAYDAY MAYDAY: write error (BGD), FILESYSTEM IS PROBABLY CORRUPTED NOW!"); ext2_handle_error(device); THROW(status, 0); } ext2_free_bgd(device, bgd); device->superblock.free_block_count--; memset(block_map, 0, bm_block_size * 4); status = ext2_write_block(device, block_id, 0, block_map, bm_block_size * 4, &rsize); if (status) { debugcon_printf("ext2: MAYDAY MAYDAY MAYDAY: write error (block clear)!"); ext2_handle_error(device); THROW(status, 0); } RETURN(block_id); }
bool ext2_expand_block( PEXT2_FILESYS Ext2Sys, PEXT2_INODE Inode, ULONG dwContent, ULONG Index, int layer, ULONG newBlk, ULONG *dwRet, ULONG *off ) { ULONG *pData = NULL; ULONG i = 0, j = 0, temp = 1; ULONG dwBlk; ULONG dwNewBlk = newBlk; bool bDirty = false; bool bRet = true; ULONG Offset = 0; PEXT2_SUPER_BLOCK pExt2Sb = Ext2Sys->ext2_sb; pData = (ULONG *)RtlAllocateHeap(RtlGetProcessHeap(), 0, Ext2Sys->blocksize); if (!pData) { bRet = false; goto errorout; } if (!ext2_read_block(Ext2Sys, dwContent, (void *)pData)) { bRet = false; goto errorout; } if (layer == 1) { *dwRet = dwContent; *off = Index; pData[Index] = newBlk; bDirty = TRUE; } else if (layer <= 3) { temp = 1 << ((10 + pExt2Sb->s_log_block_size - 2) * (layer - 1)); i = Index / temp; j = Index % temp; dwBlk = pData[i]; if (dwBlk == 0) { if (ext2_alloc_block(Ext2Sys, 0, &dwBlk) ) { pData[i] = dwBlk; bDirty = true; Inode->i_blocks += (Ext2Sys->blocksize / SECTOR_SIZE); } if (!bDirty) goto errorout; } if (!ext2_expand_block(Ext2Sys, Inode, dwBlk, j, layer - 1, bDirty, &dwNewBlk, &Offset)) { bRet = false; DPRINT1("Mke2fs: ext2_expand_block: ... error recuise...\n"); goto errorout; } } if (bDirty) { bRet = ext2_write_block(Ext2Sys, dwContent, (void *)pData); } errorout: if (pData) RtlFreeHeap(RtlGetProcessHeap(), 0, pData); if (bRet && dwRet) *dwRet = dwNewBlk; return bRet; }