bool write_inode_tables(PEXT2_FILESYS fs) { bool retval; ULONG blk, num; int i; for (i = 0; (ULONG)i < fs->group_desc_count; i++) { blk = fs->group_desc[i].bg_inode_table; num = fs->inode_blocks_per_group; retval = zero_blocks(fs, blk, num, &blk, &num); if (!retval) { DPRINT1("\nMke2fs: Could not write %lu blocks " "in inode table starting at %lu.\n", num, blk); zero_blocks(0, 0, 0, 0, 0); return false; } } zero_blocks(0, 0, 0, 0, 0); return true; }
static int zero_dev_end(int fd, u64 dev_size) { size_t len = 2 * 1024 * 1024; off_t start = dev_size - len; return zero_blocks(fd, start, len); }
int btrfs_prepare_device(int fd, char *file, int zero_end, u64 *block_count_ret, u64 max_block_count, int *mixed, int nodiscard) { u64 block_count; u64 bytenr; struct stat st; int i, ret; ret = fstat(fd, &st); if (ret < 0) { fprintf(stderr, "unable to stat %s\n", file); exit(1); } block_count = btrfs_device_size(fd, &st); if (block_count == 0) { fprintf(stderr, "unable to find %s size\n", file); exit(1); } if (max_block_count) block_count = min(block_count, max_block_count); zero_end = 1; if (block_count < 1024 * 1024 * 1024 && !(*mixed)) { printf("SMALL VOLUME: forcing mixed metadata/data groups\n"); *mixed = 1; } if (!nodiscard) { /* * We intentionally ignore errors from the discard ioctl. It is * not necessary for the mkfs functionality but just an optimization. */ discard_blocks(fd, 0, block_count); } ret = zero_dev_start(fd); if (ret) { fprintf(stderr, "failed to zero device start %d\n", ret); exit(1); } for (i = 0 ; i < BTRFS_SUPER_MIRROR_MAX; i++) { bytenr = btrfs_sb_offset(i); if (bytenr >= block_count) break; zero_blocks(fd, bytenr, BTRFS_SUPER_INFO_SIZE); } if (zero_end) { ret = zero_dev_end(fd, block_count); if (ret) { fprintf(stderr, "failed to zero device end %d\n", ret); exit(1); } } *block_count_ret = block_count; return 0; }
/* free a block. * returns negative value on error. */ int testfs_free_block(struct super_block *sb, int block_nr) { zero_blocks(sb, block_nr, 1); block_nr -= sb->sb.data_blocks_start; assert(block_nr >= 0); testfs_put_block_freemap(sb, block_nr); return 0; }
static void write_inode_tables(ext2_filsys fs) { errcode_t retval; blk_t blk; dgrp_t i; int num; struct progress_struct progress; int lazy_flag = 0; if (quiet) memset(&progress, 0, sizeof(progress)); else progress_init(&progress, _("Writing inode tables: "), fs->group_desc_count); if (EXT2_HAS_COMPAT_FEATURE(fs->super, EXT2_FEATURE_COMPAT_LAZY_BG)) lazy_flag = 1; for (i = 0; i < fs->group_desc_count; i++) { progress_update(&progress, i); blk = fs->group_desc[i].bg_inode_table; num = fs->inode_blocks_per_group; if (!(lazy_flag && (fs->group_desc[i].bg_flags & EXT2_BG_INODE_UNINIT))) { retval = zero_blocks(fs, blk, num, 0, &blk, &num); if (retval) { fprintf(stderr, _("\nCould not write %d " "blocks in inode table starting at %u: %s\n"), num, blk, error_message(retval)); exit(1); } } if (sync_kludge) { if (sync_kludge == 1) sync(); else if ((i % sync_kludge) == 0) sync(); } } zero_blocks(0, 0, 0, 0, 0, 0); progress_close(&progress); }
void Carrier::clear () { memory_barrier(); m_support.zero(); zero_blocks(m_reps, 1 + item_dim()); m_rep_count = 0; m_item_count = 0; memory_barrier(); }
static void create_journal_dev(ext2_filsys fs) { struct progress_struct progress; errcode_t retval; char *buf; blk_t blk; int count; retval = ext2fs_create_journal_superblock(fs, fs->super->s_blocks_count, 0, &buf); if (retval) { com_err("create_journal_dev", retval, _("while initializing journal superblock")); exit(1); } if (quiet) memset(&progress, 0, sizeof(progress)); else progress_init(&progress, _("Zeroing journal device: "), fs->super->s_blocks_count); retval = zero_blocks(fs, 0, fs->super->s_blocks_count, &progress, &blk, &count); if (retval) { com_err("create_journal_dev", retval, _("while zeroing journal device (block %u, count %d)"), blk, count); exit(1); } zero_blocks(0, 0, 0, 0, 0, 0); retval = io_channel_write_blk(fs->io, fs->super->s_first_data_block+1, 1, buf); if (retval) { com_err("create_journal_dev", retval, _("while writing journal superblock")); exit(1); } progress_close(&progress); }
static int zero_dev_start(int fd) { off_t start = 0; size_t len = 2 * 1024 * 1024; #ifdef __sparc__ /* don't overwrite the disk labels on sparc */ start = 1024; len -= 1024; #endif return zero_blocks(fd, start, len); }
void testfs_make_inode_blocks(struct super_block *sb) { const size_t num_bits_in_freemap = BLOCK_SIZE * INODE_FREEMAP_SIZE * 8; if (num_bits_in_freemap < NR_INODE_BLOCKS * INODES_PER_BLOCK) { EXIT("not enough inode freemap to support " STR(NR_INODE_BLOCKS) " inode blocks"); } /* dinodes should not span blocks */ assert((BLOCK_SIZE % sizeof(struct dinode)) == 0); zero_blocks(sb, sb->sb.inode_blocks_start, NR_INODE_BLOCKS); }
/* free a block. * returns negative value on error. */ int testfs_free_block(struct super_block *sb, int block_nr) { zero_blocks(sb, block_nr, 1); block_nr -= sb->sb.data_blocks_start; //printf("block nr: %d\n",block_nr ); assert(block_nr >= 0); testfs_put_block_freemap(sb, block_nr); assert(sb->sb.used_block_count > 0); sb->sb.used_block_count--; testfs_write_super_block(sb); return 0; }
int btrfs_prepare_device(int fd, char *file, int zero_end, u64 *block_count_ret) { u64 block_count; u64 bytenr; struct stat st; int i, ret; ret = fstat(fd, &st); if (ret < 0) { fprintf(stderr, "unable to stat %s\n", file); exit(1); } block_count = device_size(fd, &st); if (block_count == 0) { fprintf(stderr, "unable to find %s size\n", file); exit(1); } zero_end = 1; if (block_count < 256 * 1024 * 1024) { fprintf(stderr, "device %s is too small " "(must be at least 256 MB)\n", file); exit(1); } ret = zero_dev_start(fd); if (ret) { fprintf(stderr, "failed to zero device start %d\n", ret); exit(1); } for (i = 0 ; i < BTRFS_SUPER_MIRROR_MAX; i++) { bytenr = btrfs_sb_offset(i); if (bytenr >= block_count) break; zero_blocks(fd, bytenr, BTRFS_SUPER_INFO_SIZE); } if (zero_end) { ret = zero_dev_end(fd, block_count); if (ret) { fprintf(stderr, "failed to zero device end %d\n", ret); exit(1); } } *block_count_ret = block_count; return 0; }
void testfs_make_inode_blocks(struct super_block *sb) { /* dinodes should not span blocks */ assert((BLOCK_SIZE % sizeof(struct dinode)) == 0); zero_blocks(sb, sb->sb.inode_blocks_start, NR_INODE_BLOCKS); }
void testfs_make_csum_table(struct super_block *sb) { /* number of data blocks cannot exceed size of checksum table */ assert(MAX_NR_CSUMS > NR_DATA_BLOCKS); zero_blocks(sb, sb->sb.csum_table_start, CSUM_TABLE_SIZE); }
void testfs_make_block_freemap(struct super_block *sb) { zero_blocks(sb, sb->sb.block_freemap_start, BLOCK_FREEMAP_SIZE); }
void testfs_make_inode_freemap(struct super_block *sb) { zero_blocks(sb, sb->sb.inode_freemap_start, INODE_FREEMAP_SIZE); }
/* * grow_fork() * * Try to add enough allocation blocks to 'fork' * so that it is 'ablock' allocation blocks long. */ static int grow_fork(struct hfs_fork *fork, int ablocks) { struct hfs_cat_entry *entry = fork->entry; struct hfs_mdb *mdb = entry->mdb; struct hfs_extent *ext; int i, start, err; hfs_u16 need, len=0; hfs_u32 ablksz = mdb->alloc_blksz; hfs_u32 blocks, clumpablks; blocks = fork->psize; need = ablocks - blocks/ablksz; if (need < 1) { /* no need to grow the fork */ return 0; } /* round up to clumpsize */ if (entry->u.file.clumpablks) { clumpablks = entry->u.file.clumpablks; } else { clumpablks = mdb->clumpablks; } need = ((need + clumpablks - 1) / clumpablks) * clumpablks; /* find last extent record and try to extend it */ if (!(ext = find_ext(fork, blocks/ablksz - 1))) { /* somehow we couldn't find the end of the file! */ return -1; } /* determine which is the last used extent in the record */ /* then try to allocate the blocks immediately following it */ for (i=2; (i>=0) && !ext->length[i]; --i) {}; if (i>=0) { /* try to extend the last extent */ start = ext->block[i] + ext->length[i]; err = 0; lock_bitmap(mdb); len = hfs_vbm_count_free(mdb, start); if (!len) { unlock_bitmap(mdb); goto more_extents; } if (need < len) { len = need; } err = hfs_set_vbm_bits(mdb, start, len); unlock_bitmap(mdb); if (err) { relse_ext(ext); return -1; } zero_blocks(mdb, start, len); ext->length[i] += len; ext->end += len; blocks = (fork->psize += len * ablksz); need -= len; update_ext(fork, ext); } more_extents: /* add some more extents */ while (need) { len = need; err = 0; lock_bitmap(mdb); start = hfs_vbm_search_free(mdb, &len); if (need < len) { len = need; } err = hfs_set_vbm_bits(mdb, start, len); unlock_bitmap(mdb); if (!len || err) { relse_ext(ext); return -1; } zero_blocks(mdb, start, len); /* determine which is the first free extent in the record */ for (i=0; (i<3) && ext->length[i]; ++i) {}; if (i < 3) { ext->block[i] = start; ext->length[i] = len; ext->end += len; update_ext(fork, ext); } else { if (!(ext = new_extent(fork, ext, blocks/ablksz, start, len, ablksz))) { lock_bitmap(mdb); hfs_clear_vbm_bits(mdb, start, len); unlock_bitmap(mdb); return -1; } } blocks = (fork->psize += len * ablksz); need -= len; } set_cache(fork, ext); relse_ext(ext); return 0; }