static errcode_t mk_hugefile(ext2_filsys fs, blk64_t num, ext2_ino_t dir, unsigned long idx, ext2_ino_t *ino) { errcode_t retval; blk64_t lblk, bend = 0; __u64 size; blk64_t left; blk64_t count = 0; struct ext2_inode inode; ext2_extent_handle_t handle; retval = ext2fs_new_inode(fs, 0, LINUX_S_IFREG, NULL, ino); if (retval) return retval; memset(&inode, 0, sizeof(struct ext2_inode)); inode.i_mode = LINUX_S_IFREG | (0666 & ~fs->umask); inode.i_links_count = 1; inode.i_uid = uid & 0xFFFF; ext2fs_set_i_uid_high(inode, (uid >> 16) & 0xffff); inode.i_gid = gid & 0xFFFF; ext2fs_set_i_gid_high(inode, (gid >> 16) & 0xffff); retval = ext2fs_write_new_inode(fs, *ino, &inode); if (retval) return retval; ext2fs_inode_alloc_stats2(fs, *ino, +1, 0); retval = ext2fs_extent_open2(fs, *ino, &inode, &handle); if (retval) return retval; /* * We don't use ext2fs_fallocate() here because hugefiles are * designed to be physically contiguous (if the block group * descriptors are configured to be in a single block at the * beginning of the file system, by using the * packed_meta_blocks layout), with the extent tree blocks * allocated near the beginning of the file system. */ lblk = 0; left = num ? num : 1; while (left) { blk64_t pblk, end; blk64_t n = left; retval = ext2fs_find_first_zero_block_bitmap2(fs->block_map, goal, ext2fs_blocks_count(fs->super) - 1, &end); if (retval) goto errout; goal = end; retval = ext2fs_find_first_set_block_bitmap2(fs->block_map, goal, ext2fs_blocks_count(fs->super) - 1, &bend); if (retval == ENOENT) { bend = ext2fs_blocks_count(fs->super); if (num == 0) left = 0; } if (!num || bend - goal < left) n = bend - goal; pblk = goal; if (num) left -= n; goal += n; count += n; ext2fs_block_alloc_stats_range(fs, pblk, n, +1); if (zero_hugefile) { blk64_t ret_blk; retval = ext2fs_zero_blocks2(fs, pblk, n, &ret_blk, NULL); if (retval) com_err(program_name, retval, _("while zeroing block %llu " "for hugefile"), ret_blk); } while (n) { blk64_t l = n; struct ext2fs_extent newextent; if (l > EXT_INIT_MAX_LEN) l = EXT_INIT_MAX_LEN; newextent.e_len = l; newextent.e_pblk = pblk; newextent.e_lblk = lblk; newextent.e_flags = 0; retval = ext2fs_extent_insert(handle, EXT2_EXTENT_INSERT_AFTER, &newextent); if (retval) return retval; pblk += l; lblk += l; n -= l; } } retval = ext2fs_read_inode(fs, *ino, &inode); if (retval) goto errout; retval = ext2fs_iblk_add_blocks(fs, &inode, count / EXT2FS_CLUSTER_RATIO(fs)); if (retval) goto errout; size = (__u64) count * fs->blocksize; retval = ext2fs_inode_size_set(fs, &inode, size); if (retval) goto errout; retval = ext2fs_write_new_inode(fs, *ino, &inode); if (retval) goto errout; if (idx_digits) sprintf(fn_numbuf, "%0*lu", idx_digits, idx); else if (num_files > 1) sprintf(fn_numbuf, "%lu", idx); retry: retval = ext2fs_link(fs, dir, fn_buf, *ino, EXT2_FT_REG_FILE); if (retval == EXT2_ET_DIR_NO_SPACE) { retval = ext2fs_expand_dir(fs, dir); if (retval) goto errout; goto retry; } if (retval) goto errout; errout: if (handle) ext2fs_extent_free(handle); return retval; }
errcode_t ext2fs_allocate_group_table(ext2_filsys fs, dgrp_t group, ext2fs_block_bitmap bmap) { errcode_t retval; blk64_t group_blk, start_blk, last_blk, new_blk; dgrp_t last_grp = 0; int rem_grps = 0, flexbg_size = 0; group_blk = ext2fs_group_first_block2(fs, group); last_blk = ext2fs_group_last_block2(fs, group); if (!bmap) bmap = fs->block_map; if (EXT2_HAS_INCOMPAT_FEATURE(fs->super, EXT4_FEATURE_INCOMPAT_FLEX_BG) && fs->super->s_log_groups_per_flex) { flexbg_size = 1 << fs->super->s_log_groups_per_flex; last_grp = group | (flexbg_size - 1); if (last_grp > fs->group_desc_count-1) last_grp = fs->group_desc_count-1; rem_grps = last_grp - group + 1; } /* * Allocate the block and inode bitmaps, if necessary */ if (fs->stride) { retval = ext2fs_get_free_blocks2(fs, group_blk, last_blk, 1, bmap, &start_blk); if (retval) return retval; start_blk += fs->inode_blocks_per_group; start_blk += ((fs->stride * group) % (last_blk - start_blk + 1)); if (start_blk >= last_blk) start_blk = group_blk; } else start_blk = group_blk; if (flexbg_size) { blk64_t prev_block = 0; if (group % flexbg_size) prev_block = ext2fs_block_bitmap_loc(fs, group - 1) + 1; start_blk = flexbg_offset(fs, group, prev_block, bmap, rem_grps, 1); last_blk = ext2fs_group_last_block2(fs, last_grp); } if (!ext2fs_block_bitmap_loc(fs, group)) { retval = ext2fs_get_free_blocks2(fs, start_blk, last_blk, 1, bmap, &new_blk); if (retval == EXT2_ET_BLOCK_ALLOC_FAIL) retval = ext2fs_get_free_blocks2(fs, group_blk, last_blk, 1, bmap, &new_blk); if (retval) return retval; ext2fs_mark_block_bitmap2(bmap, new_blk); ext2fs_block_bitmap_loc_set(fs, group, new_blk); if (flexbg_size) { dgrp_t gr = ext2fs_group_of_blk2(fs, new_blk); ext2fs_bg_free_blocks_count_set(fs, gr, ext2fs_bg_free_blocks_count(fs, gr) - 1); ext2fs_free_blocks_count_add(fs->super, -1); ext2fs_bg_flags_clear(fs, gr, EXT2_BG_BLOCK_UNINIT); ext2fs_group_desc_csum_set(fs, gr); } } if (flexbg_size) { blk64_t prev_block = 0; if (group % flexbg_size) prev_block = ext2fs_inode_bitmap_loc(fs, group - 1) + 1; else prev_block = ext2fs_block_bitmap_loc(fs, group) + flexbg_size; start_blk = flexbg_offset(fs, group, prev_block, bmap, rem_grps, 1); last_blk = ext2fs_group_last_block2(fs, last_grp); } if (!ext2fs_inode_bitmap_loc(fs, group)) { retval = ext2fs_get_free_blocks2(fs, start_blk, last_blk, 1, bmap, &new_blk); if (retval == EXT2_ET_BLOCK_ALLOC_FAIL) retval = ext2fs_get_free_blocks2(fs, group_blk, last_blk, 1, bmap, &new_blk); if (retval) return retval; ext2fs_mark_block_bitmap2(bmap, new_blk); ext2fs_inode_bitmap_loc_set(fs, group, new_blk); if (flexbg_size) { dgrp_t gr = ext2fs_group_of_blk2(fs, new_blk); ext2fs_bg_free_blocks_count_set(fs, gr, ext2fs_bg_free_blocks_count(fs, gr) - 1); ext2fs_free_blocks_count_add(fs->super, -1); ext2fs_bg_flags_clear(fs, gr, EXT2_BG_BLOCK_UNINIT); ext2fs_group_desc_csum_set(fs, gr); } } /* * Allocate the inode table */ if (flexbg_size) { blk64_t prev_block = 0; if (group % flexbg_size) prev_block = ext2fs_inode_table_loc(fs, group - 1) + fs->inode_blocks_per_group; else prev_block = ext2fs_inode_bitmap_loc(fs, group) + flexbg_size; group_blk = flexbg_offset(fs, group, prev_block, bmap, rem_grps, fs->inode_blocks_per_group); last_blk = ext2fs_group_last_block2(fs, last_grp); } if (!ext2fs_inode_table_loc(fs, group)) { retval = ext2fs_get_free_blocks2(fs, group_blk, last_blk, fs->inode_blocks_per_group, bmap, &new_blk); if (retval) return retval; if (flexbg_size) ext2fs_block_alloc_stats_range(fs, new_blk, fs->inode_blocks_per_group, +1); else ext2fs_mark_block_bitmap_range2(fs->block_map, new_blk, fs->inode_blocks_per_group); ext2fs_inode_table_loc_set(fs, group, new_blk); } ext2fs_group_desc_csum_set(fs, group); return 0; }