/* Cross-reference with the other btrees. */ STATIC void xchk_inode_xref( struct xfs_scrub *sc, xfs_ino_t ino, struct xfs_dinode *dip) { struct xfs_owner_info oinfo; xfs_agnumber_t agno; xfs_agblock_t agbno; int error; if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) return; agno = XFS_INO_TO_AGNO(sc->mp, ino); agbno = XFS_INO_TO_AGBNO(sc->mp, ino); error = xchk_ag_init(sc, agno, &sc->sa); if (!xchk_xref_process_error(sc, agno, agbno, &error)) return; xchk_xref_is_used_space(sc, agbno, 1); xchk_inode_xref_finobt(sc, ino); xfs_rmap_ag_owner(&oinfo, XFS_RMAP_OWN_INODES); xchk_xref_is_owned_by(sc, agbno, 1, &oinfo); xchk_xref_is_not_shared(sc, agbno, 1); xchk_inode_xref_bmap(sc, dip); xchk_ag_free(sc, &sc->sa); }
/* Cross-reference with the other btrees. */ STATIC void xchk_iallocbt_chunk_xref( struct xfs_scrub *sc, struct xfs_inobt_rec_incore *irec, xfs_agino_t agino, xfs_agblock_t agbno, xfs_extlen_t len) { if (sc->sm->sm_flags & XFS_SCRUB_OFLAG_CORRUPT) return; xchk_xref_is_used_space(sc, agbno, len); xchk_iallocbt_chunk_xref_other(sc, irec, agino); xchk_xref_is_owned_by(sc, agbno, len, &XFS_RMAP_OINFO_INODES); xchk_xref_is_not_shared(sc, agbno, len); }
/* * Check that the holemask and freemask of a hypothetical inode cluster match * what's actually on disk. If sparse inodes are enabled, the cluster does * not actually have to map to inodes if the corresponding holemask bit is set. * * @cluster_base is the first inode in the cluster within the @irec. */ STATIC int xchk_iallocbt_check_cluster( struct xchk_btree *bs, struct xfs_inobt_rec_incore *irec, unsigned int cluster_base) { struct xfs_imap imap; struct xfs_mount *mp = bs->cur->bc_mp; struct xfs_dinode *dip; struct xfs_buf *cluster_bp; unsigned int nr_inodes; xfs_agnumber_t agno = bs->cur->bc_private.a.agno; xfs_agblock_t agbno; unsigned int cluster_index; uint16_t cluster_mask = 0; uint16_t ir_holemask; int error = 0; nr_inodes = min_t(unsigned int, XFS_INODES_PER_CHUNK, mp->m_inodes_per_cluster); /* Map this inode cluster */ agbno = XFS_AGINO_TO_AGBNO(mp, irec->ir_startino + cluster_base); /* Compute a bitmask for this cluster that can be used for holemask. */ for (cluster_index = 0; cluster_index < nr_inodes; cluster_index += XFS_INODES_PER_HOLEMASK_BIT) cluster_mask |= XFS_INOBT_MASK((cluster_base + cluster_index) / XFS_INODES_PER_HOLEMASK_BIT); /* * Map the first inode of this cluster to a buffer and offset. * Be careful about inobt records that don't align with the start of * the inode buffer when block sizes are large enough to hold multiple * inode chunks. When this happens, cluster_base will be zero but * ir_startino can be large enough to make im_boffset nonzero. */ ir_holemask = (irec->ir_holemask & cluster_mask); imap.im_blkno = XFS_AGB_TO_DADDR(mp, agno, agbno); imap.im_len = XFS_FSB_TO_BB(mp, mp->m_blocks_per_cluster); imap.im_boffset = XFS_INO_TO_OFFSET(mp, irec->ir_startino); if (imap.im_boffset != 0 && cluster_base != 0) { ASSERT(imap.im_boffset == 0 || cluster_base == 0); xchk_btree_set_corrupt(bs->sc, bs->cur, 0); return 0; } trace_xchk_iallocbt_check_cluster(mp, agno, irec->ir_startino, imap.im_blkno, imap.im_len, cluster_base, nr_inodes, cluster_mask, ir_holemask, XFS_INO_TO_OFFSET(mp, irec->ir_startino + cluster_base)); /* The whole cluster must be a hole or not a hole. */ if (ir_holemask != cluster_mask && ir_holemask != 0) { xchk_btree_set_corrupt(bs->sc, bs->cur, 0); return 0; } /* If any part of this is a hole, skip it. */ if (ir_holemask) { xchk_xref_is_not_owned_by(bs->sc, agbno, mp->m_blocks_per_cluster, &XFS_RMAP_OINFO_INODES); return 0; } xchk_xref_is_owned_by(bs->sc, agbno, mp->m_blocks_per_cluster, &XFS_RMAP_OINFO_INODES); /* Grab the inode cluster buffer. */ error = xfs_imap_to_bp(mp, bs->cur->bc_tp, &imap, &dip, &cluster_bp, 0, 0); if (!xchk_btree_xref_process_error(bs->sc, bs->cur, 0, &error)) return error; /* Check free status of each inode within this cluster. */ for (cluster_index = 0; cluster_index < nr_inodes; cluster_index++) { struct xfs_dinode *dip; if (imap.im_boffset >= BBTOB(cluster_bp->b_length)) { xchk_btree_set_corrupt(bs->sc, bs->cur, 0); break; } dip = xfs_buf_offset(cluster_bp, imap.im_boffset); error = xchk_iallocbt_check_cluster_ifree(bs, irec, cluster_base + cluster_index, dip); if (error) break; imap.im_boffset += mp->m_sb.sb_inodesize; } xfs_trans_brelse(bs->cur->bc_tp, cluster_bp); return error; }