bool ext2_get_block(PEXT2_FILESYS Ext2Sys, ULONG dwContent, ULONG Index, int layer, ULONG *dwRet) { ULONG *pData = NULL; LONGLONG Offset = 0; ULONG i = 0, j = 0, temp = 1; ULONG dwBlk = 0; bool bRet = true; PEXT2_SUPER_BLOCK pExt2Sb = Ext2Sys->ext2_sb; Offset = (LONGLONG) dwContent; Offset = Offset * Ext2Sys->blocksize; pData = (ULONG *)RtlAllocateHeap(RtlGetProcessHeap(), 0, Ext2Sys->blocksize); if (!pData) { return false; } memset(pData, 0, Ext2Sys->blocksize); if (layer == 0) { dwBlk = dwContent; } else if (layer <= 3) { if (!ext2_read_block(Ext2Sys, dwContent, (void *)pData)) { bRet = false; goto errorout; } temp = 1 << ((10 + pExt2Sb->s_log_block_size - 2) * (layer - 1)); i = Index / temp; j = Index % temp; if (!ext2_get_block(Ext2Sys, pData[i], j, layer - 1, &dwBlk)) { bRet = false; DPRINT1("Mke2fs: ext2_get_block: ... error recuise...\n"); goto errorout; } } errorout: if (pData) RtlFreeHeap(RtlGetProcessHeap(), 0, pData); if (bRet && dwRet) *dwRet = dwBlk; return bRet; }
/* Write to quotafile */ static ssize_t ext2_quota_write(struct super_block *sb, int type, const char *data, size_t len, loff_t off) { struct inode *inode = sb_dqopt(sb)->files[type]; sector_t blk = off >> EXT2_BLOCK_SIZE_BITS(sb); int err = 0; int offset = off & (sb->s_blocksize - 1); int tocopy; size_t towrite = len; struct buffer_head tmp_bh; struct buffer_head *bh; mutex_lock_nested(&inode->i_mutex, I_MUTEX_QUOTA); while (towrite > 0) { tocopy = sb->s_blocksize - offset < towrite ? sb->s_blocksize - offset : towrite; tmp_bh.b_state = 0; err = ext2_get_block(inode, blk, &tmp_bh, 1); if (err < 0) goto out; if (offset || tocopy != EXT2_BLOCK_SIZE(sb)) bh = sb_bread(sb, tmp_bh.b_blocknr); else bh = sb_getblk(sb, tmp_bh.b_blocknr); if (!bh) { err = -EIO; goto out; } lock_buffer(bh); memcpy(bh->b_data+offset, data, tocopy); flush_dcache_page(bh->b_page); set_buffer_uptodate(bh); mark_buffer_dirty(bh); unlock_buffer(bh); brelse(bh); offset = 0; towrite -= tocopy; data += tocopy; blk++; } out: if (len == towrite) { mutex_unlock(&inode->i_mutex); return err; } if (inode->i_size < off+len-towrite) i_size_write(inode, off+len-towrite); inode->i_version++; inode->i_mtime = inode->i_ctime = CURRENT_TIME; mark_inode_dirty(inode); mutex_unlock(&inode->i_mutex); return len - towrite; }
uint32_t ext2_get_inode_block_num( ext2_inodetable_t * inode, uint32_t block ) { if (block < 12) { return inode->block[block]; } else if (block < 12 + (1024 << sblock->log_block_size) / sizeof(uint32_t)) { return *(uint32_t*)((uintptr_t)ext2_get_block(inode->block[12]) + (block - 12) * sizeof(uint32_t)); } return 0; }
// This function returns a pointer to the cache block that corresponds to the indirect block pointer. int ext2_get_indirect_block_pointer_cache_block(ext2_t *ext2, struct ext2_inode *inode, blocknum_t **cache_block, uint32_t level, uint32_t pos[], uint *block_loaded) { uint32_t current_level = 0; uint current_block = 0, last_block; blocknum_t *block = NULL; int err; if ((level > 3) || (level == 0)) { err = -1; goto error; } // Dig down into the indirect blocks. When done, current_block should point to the target. while (current_level < level) { if (current_level == 0) { // read the direct block, simulates a prior loop current_block = LE32(inode->i_block[pos[0]]); } if (current_block == 0) { err = -1; goto error; } last_block = current_block; current_level++; *block_loaded = current_block; err = ext2_get_block(ext2, (void **)(void *)&block, current_block); if (err < 0) { goto error; } if (current_level < level) { current_block = LE32(block[pos[current_level]]); ext2_put_block(ext2, last_block); } } *cache_block = block; return 0; error: *cache_block = NULL; *block_loaded = 0; return err; }
bool ext2_block_map(PEXT2_FILESYS Ext2Sys, PEXT2_INODE inode, ULONG block, ULONG *dwRet) { ULONG dwSizes[4] = { 12, 1, 1, 1 }; ULONG Index = 0; ULONG dwBlk = 0; PEXT2_SUPER_BLOCK pExt2Sb = Ext2Sys->ext2_sb; UINT i; bool bRet = false; Index = block; for (i = 0; i < 4; i++) { dwSizes[i] = dwSizes[i] << ((10 + pExt2Sb->s_log_block_size - 2) * i); } if (Index >= inode->i_blocks / (Ext2Sys->blocksize / SECTOR_SIZE)) { DPRINT1("Mke2fs: ext2_block_map: beyond the size of the inode.\n"); return false; } for (i = 0; i < 4; i++) { if (Index < dwSizes[i]) { dwBlk = inode->i_block[i==0 ? (Index):(i + 12 - 1)]; bRet = ext2_get_block(Ext2Sys, dwBlk, Index , i, &dwBlk); break; } Index -= dwSizes[i]; } if (bRet && dwBlk) { if (dwRet) *dwRet = dwBlk; return true; } else return false; }
/* Read data from quotafile - avoid pagecache and such because we cannot afford * acquiring the locks... As quota files are never truncated and quota code * itself serializes the operations (and noone else should touch the files) * we don't have to be afraid of races */ static ssize_t ext2_quota_read(struct super_block *sb, int type, char *data, size_t len, loff_t off) { struct inode *inode = sb_dqopt(sb)->files[type]; sector_t blk = off >> EXT2_BLOCK_SIZE_BITS(sb); int err = 0; int offset = off & (sb->s_blocksize - 1); int tocopy; size_t toread; struct buffer_head tmp_bh; struct buffer_head *bh; loff_t i_size = i_size_read(inode); if (off > i_size) return 0; if (off+len > i_size) len = i_size-off; toread = len; while (toread > 0) { tocopy = sb->s_blocksize - offset < toread ? sb->s_blocksize - offset : toread; tmp_bh.b_state = 0; tmp_bh.b_size = sb->s_blocksize; err = ext2_get_block(inode, blk, &tmp_bh, 0); if (err < 0) return err; if (!buffer_mapped(&tmp_bh)) /* A hole? */ memset(data, 0, tocopy); else { bh = sb_bread(sb, tmp_bh.b_blocknr); if (!bh) return -EIO; memcpy(data, bh->b_data+offset, tocopy); brelse(bh); } offset = 0; toread -= tocopy; data += tocopy; blk++; } return len; }
static inline int __ext2_get_block(struct inode *inode, pgoff_t pgoff, int create, sector_t *result) { struct buffer_head tmp; int rc; memset(&tmp, 0, sizeof(struct buffer_head)); rc = ext2_get_block(inode, pgoff, &tmp, create); *result = tmp.b_blocknr; /* did we get a sparse block (hole in the file)? */ if (!tmp.b_blocknr && !rc) { BUG_ON(create); rc = -ENODATA; } return rc; }
ext2_inodetable_t * ext2_finddir( ext2_inodetable_t * rnode, char * name ) { void * block; ext2_dir_t * direntry = NULL; block = (void *)ext2_get_block((rnode->block[0])); uint32_t dir_offset; dir_offset = 0; /* * Look through the requested entries until we find what we're looking for */ while (dir_offset < rnode->size) { ext2_dir_t * d_ent = (ext2_dir_t *)((uintptr_t)block + dir_offset); char * dname = malloc(sizeof(char) * (d_ent->name_len + 1)); memcpy(dname, &d_ent->name, d_ent->name_len); dname[d_ent->name_len] = '\0'; if (!strcmp(dname, name)) { free(dname); direntry = d_ent; break; } free(dname); dir_offset += d_ent->rec_len; } if (!direntry) { /* * We could not find the requested entry in this directory. */ fprintf(stderr, "Failed to locate %s!\n", name); return NULL; } else { return ext2_get_inode(direntry->inode); } }
long ext2_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) { struct inode *inode = filp->f_dentry->d_inode; struct ext2_inode_info *ei = EXT2_I(inode); unsigned int flags; unsigned short rsv_window_size; int ret; ext2_debug ("cmd = %u, arg = %lu\n", cmd, arg); switch (cmd) { case EXT2_FAKE_B_ALLOC: /* Fake allocation for ext2 filesystem. * */ { struct ext2_fake_b_alloc_arg config; struct buffer_head bh_result; sector_t iblock, off; int ret = 0; ret = copy_from_user(&config, (struct ext2_fake_b_alloc_arg __user *)arg, sizeof(struct ext2_fake_b_alloc_arg)); if (ret != 0) { printk (KERN_DEBUG "can't copy from user"); return -EIO; } else ret = 0; /* Allocate blocks. */ off = config.efba_off; iblock = config.efba_off >> inode->i_blkbits; while ((iblock << inode->i_blkbits) < (config.efba_off + config.efba_size)) { memset(&bh_result, 0, sizeof(struct ext2_fake_b_alloc_arg)); ret = ext2_get_block(inode, iblock, &bh_result, 1); if (ret < 0) { printk (KERN_DEBUG "get_block_error %d, escaping", ret); break; } iblock++; } /* Set metadata */ write_lock(&EXT2_I(inode)->i_meta_lock); if (ret == 0) { printk (KERN_DEBUG "ok, set size"); inode->i_size = max_t(loff_t, inode->i_size, config.efba_off + config.efba_size); } else if(iblock != config.efba_off >> inode->i_blkbits) { /* Partially allocated, size must be fixed. * * But `i_blocks` should containt actual information. */ inode->i_size = inode->i_blocks << inode->i_blkbits; } inode->i_mtime = inode->i_atime = CURRENT_TIME_SEC; inode->i_version++; write_unlock(&EXT2_I(inode)->i_meta_lock); printk(KERN_DEBUG, "returning %d", ret); return ret; } case EXT2_IOC_GETFLAGS: ext2_get_inode_flags(ei); flags = ei->i_flags & EXT2_FL_USER_VISIBLE; return put_user(flags, (int __user *) arg); case EXT2_IOC_SETFLAGS: { unsigned int oldflags; ret = mnt_want_write(filp->f_path.mnt); if (ret) return ret; if (!is_owner_or_cap(inode)) { ret = -EACCES; goto setflags_out; } if (get_user(flags, (int __user *) arg)) { ret = -EFAULT; goto setflags_out; } flags = ext2_mask_flags(inode->i_mode, flags); mutex_lock(&inode->i_mutex); /* Is it quota file? Do not allow user to mess with it */ if (IS_NOQUOTA(inode)) { mutex_unlock(&inode->i_mutex); ret = -EPERM; goto setflags_out; } oldflags = ei->i_flags; /* * The IMMUTABLE and APPEND_ONLY flags can only be changed by * the relevant capability. * * This test looks nicer. Thanks to Pauline Middelink */ if ((flags ^ oldflags) & (EXT2_APPEND_FL | EXT2_IMMUTABLE_FL)) { if (!capable(CAP_LINUX_IMMUTABLE)) { mutex_unlock(&inode->i_mutex); ret = -EPERM; goto setflags_out; } } flags = flags & EXT2_FL_USER_MODIFIABLE; flags |= oldflags & ~EXT2_FL_USER_MODIFIABLE; ei->i_flags = flags; mutex_unlock(&inode->i_mutex); ext2_set_inode_flags(inode); inode->i_ctime = CURRENT_TIME_SEC; mark_inode_dirty(inode); setflags_out: mnt_drop_write(filp->f_path.mnt); return ret; } case EXT2_IOC_GETVERSION: return put_user(inode->i_generation, (int __user *) arg); case EXT2_IOC_SETVERSION: if (!is_owner_or_cap(inode)) return -EPERM; ret = mnt_want_write(filp->f_path.mnt); if (ret) return ret; if (get_user(inode->i_generation, (int __user *) arg)) { ret = -EFAULT; } else { inode->i_ctime = CURRENT_TIME_SEC; mark_inode_dirty(inode); } mnt_drop_write(filp->f_path.mnt); return ret; case EXT2_IOC_GETRSVSZ: if (test_opt(inode->i_sb, RESERVATION) && S_ISREG(inode->i_mode) && ei->i_block_alloc_info) { rsv_window_size = ei->i_block_alloc_info->rsv_window_node.rsv_goal_size; return put_user(rsv_window_size, (int __user *)arg); } return -ENOTTY; case EXT2_IOC_SETRSVSZ: { if (!test_opt(inode->i_sb, RESERVATION) ||!S_ISREG(inode->i_mode)) return -ENOTTY; if (!is_owner_or_cap(inode)) return -EACCES; if (get_user(rsv_window_size, (int __user *)arg)) return -EFAULT; ret = mnt_want_write(filp->f_path.mnt); if (ret) return ret; if (rsv_window_size > EXT2_MAX_RESERVE_BLOCKS) rsv_window_size = EXT2_MAX_RESERVE_BLOCKS; /* * need to allocate reservation structure for this inode * before set the window size */ /* * XXX What lock should protect the rsv_goal_size? * Accessed in ext2_get_block only. ext3 uses i_truncate. */ mutex_lock(&ei->truncate_mutex); if (!ei->i_block_alloc_info) ext2_init_block_alloc_info(inode); if (ei->i_block_alloc_info){ struct ext2_reserve_window_node *rsv = &ei->i_block_alloc_info->rsv_window_node; rsv->rsv_goal_size = rsv_window_size; } mutex_unlock(&ei->truncate_mutex); mnt_drop_write(filp->f_path.mnt); return 0; } default: return -ENOTTY; } }
/* * ext3301 regular to immediate: convert the file type. * * filesize should be <= EXT3301_IM_SIZE. * There should only be one block in the file, if it is small enough to * become an immediate file. * Returns 0 on success, <0 on failure. */ ssize_t ext3301_reg2im(struct file * filp) { char * data; struct buffer_head * bh, search_bh; int err = 0; long block_offset = 0; struct inode * i = FILP_INODE(filp); ssize_t l = INODE_ISIZE(i); unsigned long blocksize = INODE_BLKSIZE(i); dbg_im(KERN_DEBUG "- reg2im l=%d\n", (int)INODE_ISIZE(i)); // verify the immediate file size if (l > EXT3301_IM_SIZE(i)) { printk(KERN_DEBUG "IM file bad state, size>capacity, ino: %lu\n", INODE_INO(i)); return -EIO; } // prepare a kernel buffer to store the file contents data = kmalloc((size_t)l, GFP_KERNEL); if (!data) return -ENOMEM; // Lock the inode INODE_LOCK(i); // Special case: file length is zero, go straight to freeing blocks if (l==0) goto free; // Use a buffer head to find the block number (of the first data block) // 'false' option: do not allocate new blocks with ext2_get_block search_bh.b_state = 0; search_bh.b_size = blocksize; err = ext2_get_block(i, block_offset, &search_bh, false); if (err < 0) { dbg_im(KERN_DEBUG "- ext2_get_block() failed\n"); goto out; } // Retrieve and lock a paged buffer head to the block number bh = sb_getblk(INODE_SUPER(i), search_bh.b_blocknr); if (bh==NULL) { dbg_im(KERN_DEBUG "- sb_getblk() failed\n"); err = -EIO; goto out; } lock_buffer(bh); // Copy directly from the buffer data segment to our kernel buffer memcpy((void *)data, (const void *)(bh->b_data), (size_t)l); dbg_im(KERN_WARNING "GOT DATA: %.*s\n", (int)l, data); // Release the paged buffer: unlock, release. flush_dcache_page(bh->b_page); set_buffer_uptodate(bh); unlock_buffer(bh); brelse(bh); //Write the kernel buffer into the block pointer area //memcpy((void *)INODE_PAYLOAD(i), (const void *)data, (size_t)l); memset((void *)INODE_PAYLOAD(i), 'A', (size_t)l); free: // Free the block // // Set the file type to immediate INODE_MODE(i) = MODE_SET_IM(INODE_MODE(i)); out: // Finished - unlock the inode and mark it as dirty. // Note we haven't updated the ctime, filesize or anything else. // The previous write operation already did this mark_inode_dirty(i); INODE_UNLOCK(i); kfree(data); return err; }
/* * ext3301 immediate to regular: convert the file type. * filesize should be > EXT3301_IM_SIZE. * Returns 0 on success, <0 on failure. */ ssize_t ext3301_im2reg(struct file * filp) { char * data; struct buffer_head * bh, search_bh; int err = 0; long block_offset = 0; struct inode * i = FILP_INODE(filp); ssize_t l = INODE_ISIZE(i); unsigned long blocksize = INODE_BLKSIZE(i); dbg_im(KERN_DEBUG "- im2reg l=%d\n", (int)INODE_ISIZE(i)); // verify the immediate file size if (l > EXT3301_IM_SIZE(i)) { printk(KERN_DEBUG "IM file bad state, size>capacity, ino: %lu\n", INODE_INO(i)); return -EIO; } // prepare a kernel buffer to store the file contents data = kmalloc((size_t)l, GFP_KERNEL); if (!data) return -ENOMEM; // Lock the inode INODE_LOCK(i); // Set the file type to regular INODE_MODE(i) = MODE_SET_REG(INODE_MODE(i)); // Special case: file length is zero, nothing else to do if (l==0) goto out; // Read the payload (block pointer area) into a buffer memcpy((void *)data, (const void *)INODE_PAYLOAD(i), (size_t)l); // Zero the block pointer area (otherwise get_block will treat our old // immediate data as block pointers, and follow them...) memset((void *)INODE_PAYLOAD(i), 0, (size_t)EXT3301_IM_SIZE(i)); // Use a buffer head to find the block number (of the first data block) // 'true' option: allocate new blocks with ext2_get_block search_bh.b_state = 0; search_bh.b_size = blocksize; err = ext2_get_block(i, block_offset, &search_bh, true); if (err < 0) { dbg_im(KERN_DEBUG "- ext2_get_block() failed\n"); goto out; } // Retrieve and lock a paged buffer head to the block number bh = sb_getblk(INODE_SUPER(i), search_bh.b_blocknr); if (bh==NULL) { dbg_im(KERN_DEBUG "- sb_getblk() failed\n"); err = -EIO; goto out; } lock_buffer(bh); // Write (directly) to the buffer memcpy((void *)(bh->b_data), (const void *)data, (size_t)l); // Flush the paged buffer to disk: mark as dirty, unlock, sync, release. flush_dcache_page(bh->b_page); set_buffer_uptodate(bh); mark_buffer_dirty(bh); unlock_buffer(bh); sync_dirty_buffer(bh); brelse(bh); out: // Finished - unlock the inode and mark it as dirty. // Note we haven't updated the ctime, filesize or anything else. // The subsequent write operation will do this mark_inode_dirty(i); INODE_UNLOCK(i); kfree(data); return err; }