static void scan_ag( xfs_agnumber_t agno) { xfs_agf_t *agf; xfs_agi_t *agi; push_cur(); set_cur(&typtab[TYP_AGF], XFS_AG_DADDR(mp, agno, XFS_AGF_DADDR(mp)), XFS_FSS_TO_BB(mp, 1), DB_RING_IGN, NULL); if ((agf = iocur_top->data) == NULL) { dbprintf(_("can't read agf block for ag %u\n"), agno); pop_cur(); return; } push_cur(); set_cur(&typtab[TYP_AGI], XFS_AG_DADDR(mp, agno, XFS_AGI_DADDR(mp)), XFS_FSS_TO_BB(mp, 1), DB_RING_IGN, NULL); if ((agi = iocur_top->data) == NULL) { dbprintf(_("can't read agi block for ag %u\n"), agno); pop_cur(); pop_cur(); return; } scan_sbtree(agf, be32_to_cpu(agi->agi_root), be32_to_cpu(agi->agi_level), scanfunc_ino, TYP_INOBT); pop_cur(); pop_cur(); }
static void process_agi_unlinked( struct xfs_mount *mp, xfs_agnumber_t agno) { struct xfs_buf *bp; struct xfs_agi *agip; xfs_agnumber_t i; int agi_dirty = 0; bp = libxfs_readbuf(mp->m_dev, XFS_AG_DADDR(mp, agno, XFS_AGI_DADDR(mp)), mp->m_sb.sb_sectsize/BBSIZE, 0, &xfs_agi_buf_ops); if (!bp) do_error(_("cannot read agi block %" PRId64 " for ag %u\n"), XFS_AG_DADDR(mp, agno, XFS_AGI_DADDR(mp)), agno); agip = XFS_BUF_TO_AGI(bp); ASSERT(be32_to_cpu(agip->agi_seqno) == agno); for (i = 0; i < XFS_AGI_UNLINKED_BUCKETS; i++) { if (agip->agi_unlinked[i] != cpu_to_be32(NULLAGINO)) { agip->agi_unlinked[i] = cpu_to_be32(NULLAGINO); agi_dirty = 1; } } if (agi_dirty) libxfs_writebuf(bp, 0); else libxfs_putbuf(bp); }
static int get_sb(xfs_agnumber_t agno, xfs_sb_t *sb) { push_cur(); set_cur(&typtab[TYP_SB], XFS_AG_DADDR(mp, agno, XFS_SB_DADDR), XFS_FSS_TO_BB(mp, 1), DB_RING_IGN, NULL); if (!iocur_top->data) { dbprintf(_("can't read superblock for AG %u\n"), agno); pop_cur(); return 0; } libxfs_sb_from_disk(sb, iocur_top->data); if (sb->sb_magicnum != XFS_SB_MAGIC) { dbprintf(_("bad sb magic # %#x in AG %u\n"), sb->sb_magicnum, agno); return 0; } if (!xfs_sb_good_version(sb)) { dbprintf(_("bad sb version # %#x in AG %u\n"), sb->sb_versionnum, agno); return 0; } if (agno == 0 && sb->sb_inprogress != 0) { dbprintf(_("mkfs not completed successfully\n")); return 0; } return 1; }
static int get_sb(xfs_agnumber_t agno, xfs_sb_t *sb) { push_cur(); set_cur(&typtab[TYP_SB], XFS_AG_DADDR(mp, agno, XFS_SB_DADDR), 1, DB_RING_IGN, NULL); if (!iocur_top->data) { dbprintf("can't read superblock for AG %u\n", agno); pop_cur(); return 0; } libxfs_xlate_sb(iocur_top->data, sb, 1, ARCH_CONVERT, XFS_SB_ALL_BITS); if (sb->sb_magicnum != XFS_SB_MAGIC) { dbprintf("bad sb magic # %#x in AG %u\n", sb->sb_magicnum, agno); return 0; } if (!XFS_SB_GOOD_VERSION(sb)) { dbprintf("bad sb version # %#x in AG %u\n", sb->sb_versionnum, agno); return 0; } if (agno == 0 && sb->sb_inprogress != 0) { dbprintf("mkfs not completed successfully\n"); return 0; } return 1; }
void read_ag_header(int fd, xfs_agnumber_t agno, wbuf *buf, ag_header_t *ag, xfs_mount_t *mp, int blocksize, int sectorsize) { xfs_daddr_t off; int length; xfs_off_t newpos; size_t diff; /* initial settings */ diff = 0; off = XFS_AG_DADDR(mp, agno, XFS_SB_DADDR); buf->position = (xfs_off_t) off * (xfs_off_t) BBSIZE; length = buf->length = first_agbno * blocksize; if (length == 0) { do_log(_("ag header buffer invalid!\n")); exit(1); } /* handle alignment stuff */ newpos = rounddown(buf->position, (xfs_off_t) buf->min_io_size); if (newpos != buf->position) { diff = buf->position - newpos; buf->position = newpos; buf->length += diff; } /* round up length for direct I/O if necessary */ if (buf->length % buf->min_io_size != 0) buf->length = roundup(buf->length, buf->min_io_size); read_wbuf(fd, buf, mp); ASSERT(buf->length >= length); ag->xfs_sb = (xfs_dsb_t *) (buf->data + diff); ASSERT(be32_to_cpu(ag->xfs_sb->sb_magicnum) == XFS_SB_MAGIC); ag->xfs_agf = (xfs_agf_t *) (buf->data + diff + sectorsize); ASSERT(be32_to_cpu(ag->xfs_agf->agf_magicnum) == XFS_AGF_MAGIC); ag->xfs_agi = (xfs_agi_t *) (buf->data + diff + 2 * sectorsize); ASSERT(be32_to_cpu(ag->xfs_agi->agi_magicnum) == XFS_AGI_MAGIC); ag->xfs_agfl = (xfs_agfl_t *) (buf->data + diff + 3 * sectorsize); }
/* * XXX: yet more code that can be shared with mkfs, growfs. */ static void build_agi(xfs_mount_t *mp, xfs_agnumber_t agno, bt_status_t *btree_curs, bt_status_t *finobt_curs, struct agi_stat *agi_stat) { xfs_buf_t *agi_buf; xfs_agi_t *agi; int i; agi_buf = libxfs_getbuf(mp->m_dev, XFS_AG_DADDR(mp, agno, XFS_AGI_DADDR(mp)), mp->m_sb.sb_sectsize/BBSIZE); agi_buf->b_ops = &xfs_agi_buf_ops; agi = XFS_BUF_TO_AGI(agi_buf); memset(agi, 0, mp->m_sb.sb_sectsize); agi->agi_magicnum = cpu_to_be32(XFS_AGI_MAGIC); agi->agi_versionnum = cpu_to_be32(XFS_AGI_VERSION); agi->agi_seqno = cpu_to_be32(agno); if (agno < mp->m_sb.sb_agcount - 1) agi->agi_length = cpu_to_be32(mp->m_sb.sb_agblocks); else agi->agi_length = cpu_to_be32(mp->m_sb.sb_dblocks - (xfs_rfsblock_t) mp->m_sb.sb_agblocks * agno); agi->agi_count = cpu_to_be32(agi_stat->count); agi->agi_root = cpu_to_be32(btree_curs->root); agi->agi_level = cpu_to_be32(btree_curs->num_levels); agi->agi_freecount = cpu_to_be32(agi_stat->freecount); agi->agi_newino = cpu_to_be32(agi_stat->first_agino); agi->agi_dirino = cpu_to_be32(NULLAGINO); for (i = 0; i < XFS_AGI_UNLINKED_BUCKETS; i++) agi->agi_unlinked[i] = cpu_to_be32(NULLAGINO); if (xfs_sb_version_hascrc(&mp->m_sb)) platform_uuid_copy(&agi->agi_uuid, &mp->m_sb.sb_meta_uuid); if (xfs_sb_version_hasfinobt(&mp->m_sb)) { agi->agi_free_root = cpu_to_be32(finobt_curs->root); agi->agi_free_level = cpu_to_be32(finobt_curs->num_levels); } libxfs_writebuf(agi_buf, 0); }
static int sb_f( int argc, char **argv) { xfs_agnumber_t agno; char *p; if (argc > 1) { agno = (xfs_agnumber_t)strtoul(argv[1], &p, 0); if (*p != '\0' || agno >= mp->m_sb.sb_agcount) { dbprintf("bad allocation group number %s\n", argv[1]); return 0; } cur_agno = agno; } else if (cur_agno == NULLAGNUMBER) cur_agno = 0; ASSERT(typtab[TYP_SB].typnm == TYP_SB); set_cur(&typtab[TYP_SB], XFS_AG_DADDR(mp, cur_agno, XFS_SB_DADDR), 1, DB_RING_ADD, NULL); return 0; }
xfs_daddr_t xfs_ag_daddr(xfs_mount_t *mp, xfs_agnumber_t agno, xfs_daddr_t d) { return XFS_AG_DADDR(mp, agno, d); }
extern void readbitmap(char* device, image_head image_hdr, unsigned long* bitmap, int pui) { xfs_agnumber_t agno = 0; xfs_agblock_t first_agbno; xfs_agnumber_t num_ags; ag_header_t ag_hdr; xfs_daddr_t read_ag_off; int read_ag_length; void *read_ag_buf = NULL; xfs_off_t read_ag_position; /* xfs_types.h: typedef __s64 */ uint64_t sk, res, s_pos = 0; void *btree_buf_data = NULL; int btree_buf_length; xfs_off_t btree_buf_position; xfs_agblock_t bno; uint current_level; uint btree_levels; xfs_daddr_t begin, next_begin, ag_begin, new_begin, ag_end; /* xfs_types.h: typedef __s64*/ xfs_off_t pos; xfs_alloc_ptr_t *ptr; xfs_alloc_rec_t *rec_ptr; int length; int i; uint64_t size, sizeb; xfs_off_t w_position; int w_length; int wblocks; int w_size = 1 * 1024 * 1024; uint64_t numblocks = 0; xfs_off_t logstart, logend; xfs_off_t logstart_pos, logend_pos; int log_length; struct xfs_btree_block *block; uint64_t current_block, block_count, prog_cur_block = 0; int start = 0; int bit_size = 1; progress_bar prog; uint64_t bused = 0; uint64_t bfree = 0; /// init progress progress_init(&prog, start, image_hdr.totalblock, image_hdr.totalblock, BITMAP, bit_size); fs_open(device); first_agbno = (((XFS_AGFL_DADDR(mp) + 1) * source_sectorsize) + first_residue) / source_blocksize; num_ags = mp->m_sb.sb_agcount; log_mesg(1, 0, 0, fs_opt.debug, "ags = %i\n", num_ags); for (agno = 0; agno < num_ags ; agno++) { /* read in first blocks of the ag */ /* initial settings */ log_mesg(2, 0, 0, fs_opt.debug, "read ag %i header\n", agno); read_ag_off = XFS_AG_DADDR(mp, agno, XFS_SB_DADDR); read_ag_length = first_agbno * source_blocksize; read_ag_position = (xfs_off_t) read_ag_off * (xfs_off_t) BBSIZE; read_ag_buf = malloc(read_ag_length); if(read_ag_buf == NULL){ log_mesg(0, 1, 1, fs_opt.debug, "%s, %i, ERROR:%s", __func__, __LINE__, strerror(errno)); } memset(read_ag_buf, 0, read_ag_length); log_mesg(2, 0, 0, fs_opt.debug, "seek to read_ag_position %lli\n", read_ag_position); sk = lseek(source_fd, read_ag_position, SEEK_SET); current_block = (sk/source_blocksize); block_count = (read_ag_length/source_blocksize); set_bitmap(bitmap, sk, read_ag_length); log_mesg(2, 0, 0, fs_opt.debug, "read ag header fd = %llu(%i), length = %i(%i)\n", sk, current_block, read_ag_length, block_count); if ((res = read(source_fd, read_ag_buf, read_ag_length)) < 0) { log_mesg(1, 0, 1, fs_opt.debug, "read failure at offset %lld\n", read_ag_position); } ag_hdr.xfs_sb = (xfs_dsb_t *) (read_ag_buf); ASSERT(be32_to_cpu(ag_hdr.xfs_sb->sb_magicnum) == XFS_SB_MAGIC); ag_hdr.xfs_agf = (xfs_agf_t *) (read_ag_buf + source_sectorsize); ASSERT(be32_to_cpu(ag_hdr.xfs_agf->agf_magicnum) == XFS_AGF_MAGIC); ag_hdr.xfs_agi = (xfs_agi_t *) (read_ag_buf + 2 * source_sectorsize); ASSERT(be32_to_cpu(ag_hdr.xfs_agi->agi_magicnum) == XFS_AGI_MAGIC); ag_hdr.xfs_agfl = (xfs_agfl_t *) (read_ag_buf + 3 * source_sectorsize); log_mesg(2, 0, 0, fs_opt.debug, "ag header read ok\n"); /* save what we need (agf) in the btree buffer */ btree_buf_data = malloc(source_blocksize); if(btree_buf_data == NULL){ log_mesg(0, 1, 1, fs_opt.debug, "%s, %i, ERROR:%s", __func__, __LINE__, strerror(errno)); } memset(btree_buf_data, 0, source_blocksize); memmove(btree_buf_data, ag_hdr.xfs_agf, source_sectorsize); ag_hdr.xfs_agf = (xfs_agf_t *) btree_buf_data; btree_buf_length = source_blocksize; ///* traverse btree until we get to the leftmost leaf node */ bno = be32_to_cpu(ag_hdr.xfs_agf->agf_roots[XFS_BTNUM_BNOi]); current_level = 0; btree_levels = be32_to_cpu(ag_hdr.xfs_agf->agf_levels[XFS_BTNUM_BNOi]); ag_end = XFS_AGB_TO_DADDR(mp, agno, be32_to_cpu(ag_hdr.xfs_agf->agf_length) - 1) + source_blocksize / BBSIZE; for (;;) { /* none of this touches the w_buf buffer */ current_level++; btree_buf_position = pos = (xfs_off_t)XFS_AGB_TO_DADDR(mp,agno,bno) << BBSHIFT; btree_buf_length = source_blocksize; sk = lseek(source_fd, btree_buf_position, SEEK_SET); current_block = (sk/source_blocksize); block_count = (btree_buf_length/source_blocksize); set_bitmap(bitmap, sk, btree_buf_length); log_mesg(2, 0, 0, fs_opt.debug, "read btree sf = %llu(%i), length = %i(%i)\n", sk, current_block, btree_buf_length, block_count); read(source_fd, btree_buf_data, btree_buf_length); block = (struct xfs_btree_block *)((char *)btree_buf_data + pos - btree_buf_position); if (be16_to_cpu(block->bb_level) == 0) break; ptr = XFS_ALLOC_PTR_ADDR(mp, block, 1, mp->m_alloc_mxr[1]); bno = be32_to_cpu(ptr[0]); } log_mesg(2, 0, 0, fs_opt.debug, "btree read done\n"); /* align first data copy but don't overwrite ag header */ pos = read_ag_position >> BBSHIFT; length = read_ag_length >> BBSHIFT; next_begin = pos + length; ag_begin = next_begin; ///* handle the rest of the ag */ for (;;) { if (be16_to_cpu(block->bb_level) != 0) { log_mesg(0, 1, 1, fs_opt.debug, "WARNING: source filesystem inconsistent.\nA leaf btree rec isn't a leaf. Aborting now.\n"); } rec_ptr = XFS_ALLOC_REC_ADDR(mp, block, 1); for (i = 0; i < be16_to_cpu(block->bb_numrecs); i++, rec_ptr++) { /* calculate in daddr's */ begin = next_begin; /* * protect against pathological case of a * hole right after the ag header in a * mis-aligned case */ if (begin < ag_begin) begin = ag_begin; /* * round size up to ensure we copy a * range bigger than required */ log_mesg(3, 0, 0, fs_opt.debug, "XFS_AGB_TO_DADDR = %llu, agno = %i, be32_to_cpu=%llu\n", XFS_AGB_TO_DADDR(mp, agno, be32_to_cpu(rec_ptr->ar_startblock)), agno, be32_to_cpu(rec_ptr->ar_startblock)); sizeb = XFS_AGB_TO_DADDR(mp, agno, be32_to_cpu(rec_ptr->ar_startblock)) - begin; size = roundup(sizeb <<BBSHIFT, source_sectorsize); log_mesg(3, 0, 0, fs_opt.debug, "BB = %i size %i and sizeb %llu brgin = %llu\n", BBSHIFT, size, sizeb, begin); if (size > 0) { /* copy extent */ log_mesg(2, 0, 0, fs_opt.debug, "copy extent\n"); w_position = (xfs_off_t)begin << BBSHIFT; while (size > 0) { /* * let lower layer do alignment */ if (size > w_size) { w_length = w_size; size -= w_size; sizeb -= wblocks; numblocks += wblocks; } else { w_length = size; numblocks += sizeb; size = 0; } //read_wbuf(source_fd, &w_buf, mp); sk = lseek(source_fd, w_position, SEEK_SET); current_block = (sk/source_blocksize); block_count = (w_length/source_blocksize); set_bitmap(bitmap, sk, w_length); log_mesg(2, 0, 0, fs_opt.debug, "read ext sourcefd to w_buf source_fd=%llu(%i), length=%i(%i)\n", sk, current_block, w_length, block_count); sk = lseek(source_fd, w_length, SEEK_CUR); w_position += w_length; } } /* round next starting point down */ new_begin = XFS_AGB_TO_DADDR(mp, agno, be32_to_cpu(rec_ptr->ar_startblock) + be32_to_cpu(rec_ptr->ar_blockcount)); next_begin = rounddown(new_begin, source_sectorsize >> BBSHIFT); } if (be32_to_cpu(block->bb_u.s.bb_rightsib) == NULLAGBLOCK){ log_mesg(2, 0, 0, fs_opt.debug, "NULLAGBLOCK\n"); break; } /* read in next btree record block */ btree_buf_position = pos = (xfs_off_t)XFS_AGB_TO_DADDR(mp, agno, be32_to_cpu(block->bb_u.s.bb_rightsib)) << BBSHIFT; btree_buf_length = source_blocksize; /* let read_wbuf handle alignment */ //read_wbuf(source_fd, &btree_buf, mp); sk = lseek(source_fd, btree_buf_position, SEEK_SET); current_block = (sk/source_blocksize); block_count = (btree_buf_length/source_blocksize); set_bitmap(bitmap, sk, btree_buf_length); log_mesg(2, 0, 0, fs_opt.debug, "read btreebuf fd = %llu(%i), length = %i(%i) \n", sk, current_block, btree_buf_length, block_count); read(source_fd, btree_buf_data, btree_buf_length); block = (struct xfs_btree_block *)((char *) btree_buf_data + pos - btree_buf_position); ASSERT(be32_to_cpu(block->bb_magic) == XFS_ABTB_MAGIC); } /* * write out range of used blocks after last range * of free blocks in AG */ if (next_begin < ag_end) { begin = next_begin; sizeb = ag_end - begin; size = roundup(sizeb << BBSHIFT, source_sectorsize); if (size > 0) { /* copy extent */ w_position = (xfs_off_t) begin << BBSHIFT; while (size > 0) { /* * let lower layer do alignment */ if (size > w_size) { w_length = w_size; size -= w_size; sizeb -= wblocks; numblocks += wblocks; } else { w_length = size; numblocks += sizeb; size = 0; } sk = lseek(source_fd, w_position, SEEK_SET); current_block = (sk/source_blocksize); block_count = (w_length/source_blocksize); set_bitmap(bitmap, sk, w_length); log_mesg(2, 0, 0, fs_opt.debug, "read ext fd = %llu(%i), length = %i(%i)\n", sk, current_block, w_length, block_count); //read_wbuf(source_fd, &w_buf, mp); lseek(source_fd, w_length, SEEK_CUR); w_position += w_length; } } } log_mesg(2, 0, 0, fs_opt.debug, "write a clean log\n"); log_length = 1 * 1024 * 1024; logstart = XFS_FSB_TO_DADDR(mp, mp->m_sb.sb_logstart) << BBSHIFT; logstart_pos = rounddown(logstart, (xfs_off_t)log_length); if (logstart % log_length) { /* unaligned */ sk = lseek(source_fd, logstart_pos, SEEK_SET); current_block = (sk/source_blocksize); block_count = (log_length/source_blocksize); set_bitmap(bitmap, sk, log_length); log_mesg(2, 0, 0, fs_opt.debug, "read log start from %llu(%i) %i(%i)\n", sk, current_block, log_length, block_count); sk = lseek(source_fd, log_length, SEEK_CUR); } logend = XFS_FSB_TO_DADDR(mp, mp->m_sb.sb_logstart) << BBSHIFT; logend += XFS_FSB_TO_B(mp, mp->m_sb.sb_logblocks); logend_pos = rounddown(logend, (xfs_off_t)log_length); if (logend % log_length) { /* unaligned */ sk = lseek(source_fd, logend_pos, SEEK_SET); current_block = (sk/source_blocksize); block_count = (log_length/source_blocksize); set_bitmap(bitmap, sk, log_length); log_mesg(2, 0, 0, fs_opt.debug, "read log end from %llu(%i) %i(%i)\n", sk, current_block, log_length, block_count); sk = lseek(source_fd, log_length, SEEK_CUR); } log_mesg(2, 0, 0, fs_opt.debug, "write a clean log done\n"); prog_cur_block = image_hdr.totalblock/num_ags*(agno+1)-1; update_pui(&prog, prog_cur_block, prog_cur_block, 0); } for(current_block = 0; current_block <= image_hdr.totalblock; current_block++){ if(pc_test_bit(current_block, bitmap)) bused++; else bfree++; } log_mesg(0, 0, 0, fs_opt.debug, "bused = %lli, bfree = %lli\n", bused, bfree); fs_close(); update_pui(&prog, 1, 1, 1); }
/* * build both the agf and the agfl for an agno given both * btree cursors. * * XXX: yet more common code that can be shared with mkfs/growfs. */ static void build_agf_agfl(xfs_mount_t *mp, xfs_agnumber_t agno, bt_status_t *bno_bt, bt_status_t *bcnt_bt, xfs_extlen_t freeblks, /* # free blocks in tree */ int lostblocks) /* # blocks that will be lost */ { extent_tree_node_t *ext_ptr; xfs_buf_t *agf_buf, *agfl_buf; int i; int j; xfs_agfl_t *agfl; xfs_agf_t *agf; __be32 *freelist; agf_buf = libxfs_getbuf(mp->m_dev, XFS_AG_DADDR(mp, agno, XFS_AGF_DADDR(mp)), mp->m_sb.sb_sectsize/BBSIZE); agf_buf->b_ops = &xfs_agf_buf_ops; agf = XFS_BUF_TO_AGF(agf_buf); memset(agf, 0, mp->m_sb.sb_sectsize); #ifdef XR_BLD_FREE_TRACE fprintf(stderr, "agf = 0x%p, agf_buf->b_addr = 0x%p\n", agf, agf_buf->b_addr); #endif /* * set up fixed part of agf */ agf->agf_magicnum = cpu_to_be32(XFS_AGF_MAGIC); agf->agf_versionnum = cpu_to_be32(XFS_AGF_VERSION); agf->agf_seqno = cpu_to_be32(agno); if (agno < mp->m_sb.sb_agcount - 1) agf->agf_length = cpu_to_be32(mp->m_sb.sb_agblocks); else agf->agf_length = cpu_to_be32(mp->m_sb.sb_dblocks - (xfs_rfsblock_t) mp->m_sb.sb_agblocks * agno); agf->agf_roots[XFS_BTNUM_BNO] = cpu_to_be32(bno_bt->root); agf->agf_levels[XFS_BTNUM_BNO] = cpu_to_be32(bno_bt->num_levels); agf->agf_roots[XFS_BTNUM_CNT] = cpu_to_be32(bcnt_bt->root); agf->agf_levels[XFS_BTNUM_CNT] = cpu_to_be32(bcnt_bt->num_levels); agf->agf_freeblks = cpu_to_be32(freeblks); /* * Count and record the number of btree blocks consumed if required. */ if (xfs_sb_version_haslazysbcount(&mp->m_sb)) { /* * Don't count the root blocks as they are already * accounted for. */ agf->agf_btreeblks = cpu_to_be32( (bno_bt->num_tot_blocks - bno_bt->num_free_blocks) + (bcnt_bt->num_tot_blocks - bcnt_bt->num_free_blocks) - 2); #ifdef XR_BLD_FREE_TRACE fprintf(stderr, "agf->agf_btreeblks = %u\n", be32_to_cpu(agf->agf_btreeblks)); #endif } #ifdef XR_BLD_FREE_TRACE fprintf(stderr, "bno root = %u, bcnt root = %u, indices = %u %u\n", be32_to_cpu(agf->agf_roots[XFS_BTNUM_BNO]), be32_to_cpu(agf->agf_roots[XFS_BTNUM_CNT]), XFS_BTNUM_BNO, XFS_BTNUM_CNT); #endif if (xfs_sb_version_hascrc(&mp->m_sb)) platform_uuid_copy(&agf->agf_uuid, &mp->m_sb.sb_meta_uuid); /* initialise the AGFL, then fill it if there are blocks left over. */ agfl_buf = libxfs_getbuf(mp->m_dev, XFS_AG_DADDR(mp, agno, XFS_AGFL_DADDR(mp)), mp->m_sb.sb_sectsize/BBSIZE); agfl_buf->b_ops = &xfs_agfl_buf_ops; agfl = XFS_BUF_TO_AGFL(agfl_buf); /* setting to 0xff results in initialisation to NULLAGBLOCK */ memset(agfl, 0xff, mp->m_sb.sb_sectsize); if (xfs_sb_version_hascrc(&mp->m_sb)) { agfl->agfl_magicnum = cpu_to_be32(XFS_AGFL_MAGIC); agfl->agfl_seqno = cpu_to_be32(agno); platform_uuid_copy(&agfl->agfl_uuid, &mp->m_sb.sb_meta_uuid); for (i = 0; i < XFS_AGFL_SIZE(mp); i++) agfl->agfl_bno[i] = cpu_to_be32(NULLAGBLOCK); } freelist = XFS_BUF_TO_AGFL_BNO(mp, agfl_buf); /* * do we have left-over blocks in the btree cursors that should * be used to fill the AGFL? */ if (bno_bt->num_free_blocks > 0 || bcnt_bt->num_free_blocks > 0) { /* * yes, now grab as many blocks as we can */ i = j = 0; while (bno_bt->num_free_blocks > 0 && i < XFS_AGFL_SIZE(mp)) { freelist[i] = cpu_to_be32( get_next_blockaddr(agno, 0, bno_bt)); i++; } while (bcnt_bt->num_free_blocks > 0 && i < XFS_AGFL_SIZE(mp)) { freelist[i] = cpu_to_be32( get_next_blockaddr(agno, 0, bcnt_bt)); i++; } /* * now throw the rest of the blocks away and complain */ while (bno_bt->num_free_blocks > 0) { (void) get_next_blockaddr(agno, 0, bno_bt); j++; } while (bcnt_bt->num_free_blocks > 0) { (void) get_next_blockaddr(agno, 0, bcnt_bt); j++; } if (j > 0) { if (j == lostblocks) do_warn(_("lost %d blocks in ag %u\n"), j, agno); else do_warn(_("thought we were going to lose %d " "blocks in ag %u, actually lost " "%d\n"), lostblocks, j, agno); } agf->agf_flfirst = 0; agf->agf_fllast = cpu_to_be32(i - 1); agf->agf_flcount = cpu_to_be32(i); #ifdef XR_BLD_FREE_TRACE fprintf(stderr, "writing agfl for ag %u\n", agno); #endif } else { agf->agf_flfirst = 0; agf->agf_fllast = cpu_to_be32(XFS_AGFL_SIZE(mp) - 1); agf->agf_flcount = 0; } libxfs_writebuf(agfl_buf, 0); ext_ptr = findbiggest_bcnt_extent(agno); agf->agf_longest = cpu_to_be32((ext_ptr != NULL) ? ext_ptr->ex_blockcount : 0); ASSERT(be32_to_cpu(agf->agf_roots[XFS_BTNUM_BNOi]) != be32_to_cpu(agf->agf_roots[XFS_BTNUM_CNTi])); libxfs_writebuf(agf_buf, 0); /* * now fix up the free list appropriately * XXX: code lifted from mkfs, should be shared. */ { xfs_alloc_arg_t args; xfs_trans_t *tp; struct xfs_trans_res tres = {0}; int error; memset(&args, 0, sizeof(args)); args.tp = tp = libxfs_trans_alloc(mp, 0); args.mp = mp; args.agno = agno; args.alignment = 1; args.pag = xfs_perag_get(mp,agno); libxfs_trans_reserve(tp, &tres, xfs_alloc_min_freelist(mp, args.pag), 0); error = libxfs_alloc_fix_freelist(&args, 0); xfs_perag_put(args.pag); if (error) { do_error(_("failed to fix AGFL on AG %d, error %d\n"), agno, error); } libxfs_trans_commit(tp); } #ifdef XR_BLD_FREE_TRACE fprintf(stderr, "wrote agf for ag %u\n", agno); #endif }