/* * Figure out how many blocks to reserve for an AG repair. We calculate the * worst case estimate for the number of blocks we'd need to rebuild one of * any type of per-AG btree. */ xfs_extlen_t xrep_calc_ag_resblks( struct xfs_scrub *sc) { struct xfs_mount *mp = sc->mp; struct xfs_scrub_metadata *sm = sc->sm; struct xfs_perag *pag; struct xfs_buf *bp; xfs_agino_t icount = NULLAGINO; xfs_extlen_t aglen = NULLAGBLOCK; xfs_extlen_t usedlen; xfs_extlen_t freelen; xfs_extlen_t bnobt_sz; xfs_extlen_t inobt_sz; xfs_extlen_t rmapbt_sz; xfs_extlen_t refcbt_sz; int error; if (!(sm->sm_flags & XFS_SCRUB_IFLAG_REPAIR)) return 0; pag = xfs_perag_get(mp, sm->sm_agno); if (pag->pagi_init) { /* Use in-core icount if possible. */ icount = pag->pagi_count; } else { /* Try to get the actual counters from disk. */ error = xfs_ialloc_read_agi(mp, NULL, sm->sm_agno, &bp); if (!error) { icount = pag->pagi_count; xfs_buf_relse(bp); } } /* Now grab the block counters from the AGF. */ error = xfs_alloc_read_agf(mp, NULL, sm->sm_agno, 0, &bp); if (!error) { aglen = be32_to_cpu(XFS_BUF_TO_AGF(bp)->agf_length); freelen = be32_to_cpu(XFS_BUF_TO_AGF(bp)->agf_freeblks); usedlen = aglen - freelen; xfs_buf_relse(bp); } xfs_perag_put(pag); /* If the icount is impossible, make some worst-case assumptions. */ if (icount == NULLAGINO || !xfs_verify_agino(mp, sm->sm_agno, icount)) { xfs_agino_t first, last; xfs_agino_range(mp, sm->sm_agno, &first, &last); icount = last - first + 1; } /* If the block counts are impossible, make worst-case assumptions. */ if (aglen == NULLAGBLOCK || aglen != xfs_ag_block_count(mp, sm->sm_agno) || freelen >= aglen) { aglen = xfs_ag_block_count(mp, sm->sm_agno); freelen = aglen; usedlen = aglen; } trace_xrep_calc_ag_resblks(mp, sm->sm_agno, icount, aglen, freelen, usedlen); /* * Figure out how many blocks we'd need worst case to rebuild * each type of btree. Note that we can only rebuild the * bnobt/cntbt or inobt/finobt as pairs. */ bnobt_sz = 2 * xfs_allocbt_calc_size(mp, freelen); if (xfs_sb_version_hassparseinodes(&mp->m_sb)) inobt_sz = xfs_iallocbt_calc_size(mp, icount / XFS_INODES_PER_HOLEMASK_BIT); else inobt_sz = xfs_iallocbt_calc_size(mp, icount / XFS_INODES_PER_CHUNK); if (xfs_sb_version_hasfinobt(&mp->m_sb)) inobt_sz *= 2; if (xfs_sb_version_hasreflink(&mp->m_sb)) refcbt_sz = xfs_refcountbt_calc_size(mp, usedlen); else refcbt_sz = 0; if (xfs_sb_version_hasrmapbt(&mp->m_sb)) { /* * Guess how many blocks we need to rebuild the rmapbt. * For non-reflink filesystems we can't have more records than * used blocks. However, with reflink it's possible to have * more than one rmap record per AG block. We don't know how * many rmaps there could be in the AG, so we start off with * what we hope is an generous over-estimation. */ if (xfs_sb_version_hasreflink(&mp->m_sb)) rmapbt_sz = xfs_rmapbt_calc_size(mp, (unsigned long long)aglen * 2); else rmapbt_sz = xfs_rmapbt_calc_size(mp, usedlen); } else { rmapbt_sz = 0; } trace_xrep_calc_ag_resblks_btsize(mp, sm->sm_agno, bnobt_sz, inobt_sz, rmapbt_sz, refcbt_sz); return max(max(bnobt_sz, inobt_sz), max(rmapbt_sz, refcbt_sz)); }
static char * version_string( xfs_sb_t *sbp) { static char s[1024]; if (XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_1) strcpy(s, "V1"); else if (XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_2) strcpy(s, "V2"); else if (XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_3) strcpy(s, "V3"); else if (XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_4) strcpy(s, "V4"); else if (XFS_SB_VERSION_NUM(sbp) == XFS_SB_VERSION_5) strcpy(s, "V5"); /* * We assume the state of these features now, so macros don't exist for * them any more. */ if (sbp->sb_versionnum & XFS_SB_VERSION_NLINKBIT) strcat(s, ",NLINK"); if (sbp->sb_versionnum & XFS_SB_VERSION_SHAREDBIT) strcat(s, ",SHARED"); if (sbp->sb_versionnum & XFS_SB_VERSION_DIRV2BIT) strcat(s, ",DIRV2"); if (xfs_sb_version_hasattr(sbp)) strcat(s, ",ATTR"); if (xfs_sb_version_hasquota(sbp)) strcat(s, ",QUOTA"); if (xfs_sb_version_hasalign(sbp)) strcat(s, ",ALIGN"); if (xfs_sb_version_hasdalign(sbp)) strcat(s, ",DALIGN"); if (xfs_sb_version_haslogv2(sbp)) strcat(s, ",LOGV2"); if (xfs_sb_version_hasextflgbit(sbp)) strcat(s, ",EXTFLG"); if (xfs_sb_version_hassector(sbp)) strcat(s, ",SECTOR"); if (xfs_sb_version_hasasciici(sbp)) strcat(s, ",ASCII_CI"); if (xfs_sb_version_hasmorebits(sbp)) strcat(s, ",MOREBITS"); if (xfs_sb_version_hasattr2(sbp)) strcat(s, ",ATTR2"); if (xfs_sb_version_haslazysbcount(sbp)) strcat(s, ",LAZYSBCOUNT"); if (xfs_sb_version_hasprojid32bit(sbp)) strcat(s, ",PROJID32BIT"); if (xfs_sb_version_hascrc(sbp)) strcat(s, ",CRC"); if (xfs_sb_version_hasftype(sbp)) strcat(s, ",FTYPE"); if (xfs_sb_version_hasfinobt(sbp)) strcat(s, ",FINOBT"); if (xfs_sb_version_hassparseinodes(sbp)) strcat(s, ",SPARSE_INODES"); if (xfs_sb_version_hasmetauuid(sbp)) strcat(s, ",META_UUID"); return s; }
/* * rebuilds an inode tree given a cursor. We're lazy here and call * the routine that builds the agi */ static void build_ino_tree(xfs_mount_t *mp, xfs_agnumber_t agno, bt_status_t *btree_curs, __uint32_t magic, struct agi_stat *agi_stat, int finobt) { xfs_agnumber_t i; xfs_agblock_t j; xfs_agblock_t agbno; xfs_agino_t first_agino; struct xfs_btree_block *bt_hdr; xfs_inobt_rec_t *bt_rec; ino_tree_node_t *ino_rec; bt_stat_level_t *lptr; xfs_agino_t count = 0; xfs_agino_t freecount = 0; int inocnt; uint8_t finocnt; int k; int level = btree_curs->num_levels; int spmask; uint64_t sparse; uint16_t holemask; for (i = 0; i < level; i++) { lptr = &btree_curs->level[i]; agbno = get_next_blockaddr(agno, i, btree_curs); lptr->buf_p = libxfs_getbuf(mp->m_dev, XFS_AGB_TO_DADDR(mp, agno, agbno), XFS_FSB_TO_BB(mp, 1)); if (i == btree_curs->num_levels - 1) btree_curs->root = agbno; lptr->agbno = agbno; lptr->prev_agbno = NULLAGBLOCK; lptr->prev_buf_p = NULL; /* * initialize block header */ lptr->buf_p->b_ops = &xfs_inobt_buf_ops; bt_hdr = XFS_BUF_TO_BLOCK(lptr->buf_p); memset(bt_hdr, 0, mp->m_sb.sb_blocksize); if (xfs_sb_version_hascrc(&mp->m_sb)) xfs_btree_init_block(mp, lptr->buf_p, magic, i, 0, agno, XFS_BTREE_CRC_BLOCKS); else xfs_btree_init_block(mp, lptr->buf_p, magic, i, 0, agno, 0); } /* * run along leaf, setting up records. as we have to switch * blocks, call the prop_ino_cursor routine to set up the new * pointers for the parent. that can recurse up to the root * if required. set the sibling pointers for leaf level here. */ if (finobt) ino_rec = findfirst_free_inode_rec(agno); else ino_rec = findfirst_inode_rec(agno); if (ino_rec != NULL) first_agino = ino_rec->ino_startnum; else first_agino = NULLAGINO; lptr = &btree_curs->level[0]; for (i = 0; i < lptr->num_blocks; i++) { /* * block initialization, lay in block header */ lptr->buf_p->b_ops = &xfs_inobt_buf_ops; bt_hdr = XFS_BUF_TO_BLOCK(lptr->buf_p); memset(bt_hdr, 0, mp->m_sb.sb_blocksize); if (xfs_sb_version_hascrc(&mp->m_sb)) xfs_btree_init_block(mp, lptr->buf_p, magic, 0, 0, agno, XFS_BTREE_CRC_BLOCKS); else xfs_btree_init_block(mp, lptr->buf_p, magic, 0, 0, agno, 0); bt_hdr->bb_u.s.bb_leftsib = cpu_to_be32(lptr->prev_agbno); bt_hdr->bb_numrecs = cpu_to_be16(lptr->num_recs_pb + (lptr->modulo > 0)); if (lptr->modulo > 0) lptr->modulo--; if (lptr->num_recs_pb > 0) prop_ino_cursor(mp, agno, btree_curs, ino_rec->ino_startnum, 0); bt_rec = (xfs_inobt_rec_t *) ((char *)bt_hdr + XFS_INOBT_BLOCK_LEN(mp)); for (j = 0; j < be16_to_cpu(bt_hdr->bb_numrecs); j++) { ASSERT(ino_rec != NULL); bt_rec[j].ir_startino = cpu_to_be32(ino_rec->ino_startnum); bt_rec[j].ir_free = cpu_to_be64(ino_rec->ir_free); inocnt = finocnt = 0; for (k = 0; k < sizeof(xfs_inofree_t)*NBBY; k++) { ASSERT(is_inode_confirmed(ino_rec, k)); if (is_inode_sparse(ino_rec, k)) continue; if (is_inode_free(ino_rec, k)) finocnt++; inocnt++; } /* * Set the freecount and check whether we need to update * the sparse format fields. Otherwise, skip to the next * record. */ inorec_set_freecount(mp, &bt_rec[j], finocnt); if (!xfs_sb_version_hassparseinodes(&mp->m_sb)) goto nextrec; /* * Convert the 64-bit in-core sparse inode state to the * 16-bit on-disk holemask. */ holemask = 0; spmask = (1 << XFS_INODES_PER_HOLEMASK_BIT) - 1; sparse = ino_rec->ir_sparse; for (k = 0; k < XFS_INOBT_HOLEMASK_BITS; k++) { if (sparse & spmask) { ASSERT((sparse & spmask) == spmask); holemask |= (1 << k); } else ASSERT((sparse & spmask) == 0); sparse >>= XFS_INODES_PER_HOLEMASK_BIT; } bt_rec[j].ir_u.sp.ir_count = inocnt; bt_rec[j].ir_u.sp.ir_holemask = cpu_to_be16(holemask); nextrec: freecount += finocnt; count += inocnt; if (finobt) ino_rec = next_free_ino_rec(ino_rec); else ino_rec = next_ino_rec(ino_rec); } if (ino_rec != NULL) { /* * get next leaf level block */ if (lptr->prev_buf_p != NULL) { #ifdef XR_BLD_INO_TRACE fprintf(stderr, "writing inobt agbno %u\n", lptr->prev_agbno); #endif ASSERT(lptr->prev_agbno != NULLAGBLOCK); libxfs_writebuf(lptr->prev_buf_p, 0); } lptr->prev_buf_p = lptr->buf_p; lptr->prev_agbno = lptr->agbno; lptr->agbno = get_next_blockaddr(agno, 0, btree_curs); bt_hdr->bb_u.s.bb_rightsib = cpu_to_be32(lptr->agbno); lptr->buf_p = libxfs_getbuf(mp->m_dev, XFS_AGB_TO_DADDR(mp, agno, lptr->agbno), XFS_FSB_TO_BB(mp, 1)); } } if (agi_stat) { agi_stat->first_agino = first_agino; agi_stat->count = count; agi_stat->freecount = freecount; } }