Ejemplo n.º 1
0
/* Initialize a new AG btree root block with zero entries. */
int
xrep_init_btblock(
	struct xfs_scrub		*sc,
	xfs_fsblock_t			fsb,
	struct xfs_buf			**bpp,
	xfs_btnum_t			btnum,
	const struct xfs_buf_ops	*ops)
{
	struct xfs_trans		*tp = sc->tp;
	struct xfs_mount		*mp = sc->mp;
	struct xfs_buf			*bp;

	trace_xrep_init_btblock(mp, XFS_FSB_TO_AGNO(mp, fsb),
			XFS_FSB_TO_AGBNO(mp, fsb), btnum);

	ASSERT(XFS_FSB_TO_AGNO(mp, fsb) == sc->sa.agno);
	bp = xfs_trans_get_buf(tp, mp->m_ddev_targp, XFS_FSB_TO_DADDR(mp, fsb),
			XFS_FSB_TO_BB(mp, 1), 0);
	xfs_buf_zero(bp, 0, BBTOB(bp->b_length));
	xfs_btree_init_block(mp, bp, btnum, 0, 0, sc->sa.agno, 0);
	xfs_trans_buf_set_type(tp, bp, XFS_BLFT_BTREE_BUF);
	xfs_trans_log_buf(tp, bp, 0, bp->b_length);
	bp->b_ops = ops;
	*bpp = bp;

	return 0;
}
Ejemplo n.º 2
0
/*
 * Generic btree root block init function
 */
static void
xfs_btroot_init(
	struct xfs_mount	*mp,
	struct xfs_buf		*bp,
	struct aghdr_init_data	*id)
{
	xfs_btree_init_block(mp, bp, id->type, 0, 0, id->agno, 0);
}
Ejemplo n.º 3
0
/*
 * Reverse map root block init
 */
static void
xfs_rmaproot_init(
	struct xfs_mount	*mp,
	struct xfs_buf		*bp,
	struct aghdr_init_data	*id)
{
	struct xfs_btree_block	*block = XFS_BUF_TO_BLOCK(bp);
	struct xfs_rmap_rec	*rrec;

	xfs_btree_init_block(mp, bp, XFS_BTNUM_RMAP, 0, 4, id->agno, 0);

	/*
	 * mark the AG header regions as static metadata The BNO
	 * btree block is the first block after the headers, so
	 * it's location defines the size of region the static
	 * metadata consumes.
	 *
	 * Note: unlike mkfs, we never have to account for log
	 * space when growing the data regions
	 */
	rrec = XFS_RMAP_REC_ADDR(block, 1);
	rrec->rm_startblock = 0;
	rrec->rm_blockcount = cpu_to_be32(XFS_BNO_BLOCK(mp));
	rrec->rm_owner = cpu_to_be64(XFS_RMAP_OWN_FS);
	rrec->rm_offset = 0;

	/* account freespace btree root blocks */
	rrec = XFS_RMAP_REC_ADDR(block, 2);
	rrec->rm_startblock = cpu_to_be32(XFS_BNO_BLOCK(mp));
	rrec->rm_blockcount = cpu_to_be32(2);
	rrec->rm_owner = cpu_to_be64(XFS_RMAP_OWN_AG);
	rrec->rm_offset = 0;

	/* account inode btree root blocks */
	rrec = XFS_RMAP_REC_ADDR(block, 3);
	rrec->rm_startblock = cpu_to_be32(XFS_IBT_BLOCK(mp));
	rrec->rm_blockcount = cpu_to_be32(XFS_RMAP_BLOCK(mp) -
					  XFS_IBT_BLOCK(mp));
	rrec->rm_owner = cpu_to_be64(XFS_RMAP_OWN_INOBT);
	rrec->rm_offset = 0;

	/* account for rmap btree root */
	rrec = XFS_RMAP_REC_ADDR(block, 4);
	rrec->rm_startblock = cpu_to_be32(XFS_RMAP_BLOCK(mp));
	rrec->rm_blockcount = cpu_to_be32(1);
	rrec->rm_owner = cpu_to_be64(XFS_RMAP_OWN_AG);
	rrec->rm_offset = 0;

	/* account for refc btree root */
	if (xfs_sb_version_hasreflink(&mp->m_sb)) {
		rrec = XFS_RMAP_REC_ADDR(block, 5);
		rrec->rm_startblock = cpu_to_be32(xfs_refc_block(mp));
		rrec->rm_blockcount = cpu_to_be32(1);
		rrec->rm_owner = cpu_to_be64(XFS_RMAP_OWN_REFC);
		rrec->rm_offset = 0;
		be16_add_cpu(&block->bb_numrecs, 1);
	}
}
Ejemplo n.º 4
0
static void
xfs_cntroot_init(
	struct xfs_mount	*mp,
	struct xfs_buf		*bp,
	struct aghdr_init_data	*id)
{
	struct xfs_alloc_rec	*arec;

	xfs_btree_init_block(mp, bp, XFS_BTNUM_CNT, 0, 1, id->agno, 0);
	arec = XFS_ALLOC_REC_ADDR(mp, XFS_BUF_TO_BLOCK(bp), 1);
	arec->ar_startblock = cpu_to_be32(mp->m_ag_prealloc_blocks);
	arec->ar_blockcount = cpu_to_be32(id->agsize -
					  be32_to_cpu(arec->ar_startblock));
}
Ejemplo n.º 5
0
/*
 * rebuilds a freespace tree given a cursor and magic number of type
 * of tree to build (bno or bcnt).  returns the number of free blocks
 * represented by the tree.
 */
static xfs_extlen_t
build_freespace_tree(xfs_mount_t *mp, xfs_agnumber_t agno,
		bt_status_t *btree_curs, __uint32_t magic)
{
	xfs_agnumber_t		i;
	xfs_agblock_t		j;
	struct xfs_btree_block	*bt_hdr;
	xfs_alloc_rec_t		*bt_rec;
	int			level;
	xfs_agblock_t		agbno;
	extent_tree_node_t	*ext_ptr;
	bt_stat_level_t		*lptr;
	xfs_extlen_t		freeblks;
	__uint32_t		crc_magic;

#ifdef XR_BLD_FREE_TRACE
	fprintf(stderr, "in build_freespace_tree, agno = %d\n", agno);
#endif
	level = btree_curs->num_levels;
	freeblks = 0;

	ASSERT(level > 0);
	if (magic == XFS_ABTB_MAGIC)
		crc_magic = XFS_ABTB_CRC_MAGIC;
	else
		crc_magic = XFS_ABTC_CRC_MAGIC;

	/*
	 * initialize the first block on each btree level
	 */
	for (i = 0; i < level; i++)  {
		lptr = &btree_curs->level[i];

		agbno = get_next_blockaddr(agno, i, btree_curs);
		lptr->buf_p = libxfs_getbuf(mp->m_dev,
					XFS_AGB_TO_DADDR(mp, agno, agbno),
					XFS_FSB_TO_BB(mp, 1));

		if (i == btree_curs->num_levels - 1)
			btree_curs->root = agbno;

		lptr->agbno = agbno;
		lptr->prev_agbno = NULLAGBLOCK;
		lptr->prev_buf_p = NULL;
		/*
		 * initialize block header
		 */
		lptr->buf_p->b_ops = &xfs_allocbt_buf_ops;
		bt_hdr = XFS_BUF_TO_BLOCK(lptr->buf_p);
		memset(bt_hdr, 0, mp->m_sb.sb_blocksize);
		if (xfs_sb_version_hascrc(&mp->m_sb))
			xfs_btree_init_block(mp, lptr->buf_p, crc_magic, i,
						0, agno, XFS_BTREE_CRC_BLOCKS);
		else
			xfs_btree_init_block(mp, lptr->buf_p, magic, i,
						0, agno, 0);
	}
	/*
	 * run along leaf, setting up records.  as we have to switch
	 * blocks, call the prop_freespace_cursor routine to set up the new
	 * pointers for the parent.  that can recurse up to the root
	 * if required.  set the sibling pointers for leaf level here.
	 */
	if (magic == XFS_ABTB_MAGIC)
		ext_ptr = findfirst_bno_extent(agno);
	else
		ext_ptr = findfirst_bcnt_extent(agno);

#ifdef XR_BLD_FREE_TRACE
	fprintf(stderr, "bft, agno = %d, start = %u, count = %u\n",
		agno, ext_ptr->ex_startblock, ext_ptr->ex_blockcount);
#endif

	lptr = &btree_curs->level[0];

	for (i = 0; i < btree_curs->level[0].num_blocks; i++)  {
		/*
		 * block initialization, lay in block header
		 */
		lptr->buf_p->b_ops = &xfs_allocbt_buf_ops;
		bt_hdr = XFS_BUF_TO_BLOCK(lptr->buf_p);
		memset(bt_hdr, 0, mp->m_sb.sb_blocksize);
		if (xfs_sb_version_hascrc(&mp->m_sb))
			xfs_btree_init_block(mp, lptr->buf_p, crc_magic, 0,
						0, agno, XFS_BTREE_CRC_BLOCKS);
		else
			xfs_btree_init_block(mp, lptr->buf_p, magic, 0,
						0, agno, 0);

		bt_hdr->bb_u.s.bb_leftsib = cpu_to_be32(lptr->prev_agbno);
		bt_hdr->bb_numrecs = cpu_to_be16(lptr->num_recs_pb +
							(lptr->modulo > 0));
#ifdef XR_BLD_FREE_TRACE
		fprintf(stderr, "bft, bb_numrecs = %d\n",
				be16_to_cpu(bt_hdr->bb_numrecs));
#endif

		if (lptr->modulo > 0)
			lptr->modulo--;

		/*
		 * initialize values in the path up to the root if
		 * this is a multi-level btree
		 */
		if (btree_curs->num_levels > 1)
			prop_freespace_cursor(mp, agno, btree_curs,
					ext_ptr->ex_startblock,
					ext_ptr->ex_blockcount,
					0, magic);

		bt_rec = (xfs_alloc_rec_t *)
			  ((char *)bt_hdr + XFS_ALLOC_BLOCK_LEN(mp));
		for (j = 0; j < be16_to_cpu(bt_hdr->bb_numrecs); j++) {
			ASSERT(ext_ptr != NULL);
			bt_rec[j].ar_startblock = cpu_to_be32(
							ext_ptr->ex_startblock);
			bt_rec[j].ar_blockcount = cpu_to_be32(
							ext_ptr->ex_blockcount);
			freeblks += ext_ptr->ex_blockcount;
			if (magic == XFS_ABTB_MAGIC)
				ext_ptr = findnext_bno_extent(ext_ptr);
			else
				ext_ptr = findnext_bcnt_extent(agno, ext_ptr);
#if 0
#ifdef XR_BLD_FREE_TRACE
			if (ext_ptr == NULL)
				fprintf(stderr, "null extent pointer, j = %d\n",
					j);
			else
				fprintf(stderr,
				"bft, agno = %d, start = %u, count = %u\n",
					agno, ext_ptr->ex_startblock,
					ext_ptr->ex_blockcount);
#endif
#endif
		}

		if (ext_ptr != NULL)  {
			/*
			 * get next leaf level block
			 */
			if (lptr->prev_buf_p != NULL)  {
#ifdef XR_BLD_FREE_TRACE
				fprintf(stderr, " writing fst agbno %u\n",
					lptr->prev_agbno);
#endif
				ASSERT(lptr->prev_agbno != NULLAGBLOCK);
				libxfs_writebuf(lptr->prev_buf_p, 0);
			}
			lptr->prev_buf_p = lptr->buf_p;
			lptr->prev_agbno = lptr->agbno;
			lptr->agbno = get_next_blockaddr(agno, 0, btree_curs);
			bt_hdr->bb_u.s.bb_rightsib = cpu_to_be32(lptr->agbno);

			lptr->buf_p = libxfs_getbuf(mp->m_dev,
					XFS_AGB_TO_DADDR(mp, agno, lptr->agbno),
					XFS_FSB_TO_BB(mp, 1));
		}
	}

	return(freeblks);
}
Ejemplo n.º 6
0
static void
prop_freespace_cursor(xfs_mount_t *mp, xfs_agnumber_t agno,
		bt_status_t *btree_curs, xfs_agblock_t startblock,
		xfs_extlen_t blockcount, int level, __uint32_t magic)
{
	struct xfs_btree_block	*bt_hdr;
	xfs_alloc_key_t		*bt_key;
	xfs_alloc_ptr_t		*bt_ptr;
	xfs_agblock_t		agbno;
	bt_stat_level_t		*lptr;
	__uint32_t		crc_magic;

	if (magic == XFS_ABTB_MAGIC)
		crc_magic = XFS_ABTB_CRC_MAGIC;
	else
		crc_magic = XFS_ABTC_CRC_MAGIC;

	level++;

	if (level >= btree_curs->num_levels)
		return;

	lptr = &btree_curs->level[level];
	bt_hdr = XFS_BUF_TO_BLOCK(lptr->buf_p);

	if (be16_to_cpu(bt_hdr->bb_numrecs) == 0)  {
		/*
		 * only happens once when initializing the
		 * left-hand side of the tree.
		 */
		prop_freespace_cursor(mp, agno, btree_curs, startblock,
				blockcount, level, magic);
	}

	if (be16_to_cpu(bt_hdr->bb_numrecs) ==
				lptr->num_recs_pb + (lptr->modulo > 0))  {
		/*
		 * write out current prev block, grab us a new block,
		 * and set the rightsib pointer of current block
		 */
#ifdef XR_BLD_FREE_TRACE
		fprintf(stderr, " %d ", lptr->prev_agbno);
#endif
		if (lptr->prev_agbno != NULLAGBLOCK) {
			ASSERT(lptr->prev_buf_p != NULL);
			libxfs_writebuf(lptr->prev_buf_p, 0);
		}
		lptr->prev_agbno = lptr->agbno;;
		lptr->prev_buf_p = lptr->buf_p;
		agbno = get_next_blockaddr(agno, level, btree_curs);

		bt_hdr->bb_u.s.bb_rightsib = cpu_to_be32(agbno);

		lptr->buf_p = libxfs_getbuf(mp->m_dev,
					XFS_AGB_TO_DADDR(mp, agno, agbno),
					XFS_FSB_TO_BB(mp, 1));
		lptr->agbno = agbno;

		if (lptr->modulo)
			lptr->modulo--;

		/*
		 * initialize block header
		 */
		lptr->buf_p->b_ops = &xfs_allocbt_buf_ops;
		bt_hdr = XFS_BUF_TO_BLOCK(lptr->buf_p);
		memset(bt_hdr, 0, mp->m_sb.sb_blocksize);
		if (xfs_sb_version_hascrc(&mp->m_sb))
			xfs_btree_init_block(mp, lptr->buf_p, crc_magic, level,
						0, agno, XFS_BTREE_CRC_BLOCKS);
		else
			xfs_btree_init_block(mp, lptr->buf_p, magic, level,
						0, agno, 0);

		bt_hdr->bb_u.s.bb_leftsib = cpu_to_be32(lptr->prev_agbno);

		/*
		 * propagate extent record for first extent in new block up
		 */
		prop_freespace_cursor(mp, agno, btree_curs, startblock,
				blockcount, level, magic);
	}
	/*
	 * add extent info to current block
	 */
	be16_add_cpu(&bt_hdr->bb_numrecs, 1);

	bt_key = XFS_ALLOC_KEY_ADDR(mp, bt_hdr,
				be16_to_cpu(bt_hdr->bb_numrecs));
	bt_ptr = XFS_ALLOC_PTR_ADDR(mp, bt_hdr,
				be16_to_cpu(bt_hdr->bb_numrecs),
				mp->m_alloc_mxr[1]);

	bt_key->ar_startblock = cpu_to_be32(startblock);
	bt_key->ar_blockcount = cpu_to_be32(blockcount);
	*bt_ptr = cpu_to_be32(btree_curs->level[level-1].agbno);
}
Ejemplo n.º 7
0
/*
 * rebuilds an inode tree given a cursor.  We're lazy here and call
 * the routine that builds the agi
 */
static void
build_ino_tree(xfs_mount_t *mp, xfs_agnumber_t agno,
		bt_status_t *btree_curs, __uint32_t magic,
		struct agi_stat *agi_stat, int finobt)
{
	xfs_agnumber_t		i;
	xfs_agblock_t		j;
	xfs_agblock_t		agbno;
	xfs_agino_t		first_agino;
	struct xfs_btree_block	*bt_hdr;
	xfs_inobt_rec_t		*bt_rec;
	ino_tree_node_t		*ino_rec;
	bt_stat_level_t		*lptr;
	xfs_agino_t		count = 0;
	xfs_agino_t		freecount = 0;
	int			inocnt;
	uint8_t			finocnt;
	int			k;
	int			level = btree_curs->num_levels;
	int			spmask;
	uint64_t		sparse;
	uint16_t		holemask;

	for (i = 0; i < level; i++)  {
		lptr = &btree_curs->level[i];

		agbno = get_next_blockaddr(agno, i, btree_curs);
		lptr->buf_p = libxfs_getbuf(mp->m_dev,
					XFS_AGB_TO_DADDR(mp, agno, agbno),
					XFS_FSB_TO_BB(mp, 1));

		if (i == btree_curs->num_levels - 1)
			btree_curs->root = agbno;

		lptr->agbno = agbno;
		lptr->prev_agbno = NULLAGBLOCK;
		lptr->prev_buf_p = NULL;
		/*
		 * initialize block header
		 */

		lptr->buf_p->b_ops = &xfs_inobt_buf_ops;
		bt_hdr = XFS_BUF_TO_BLOCK(lptr->buf_p);
		memset(bt_hdr, 0, mp->m_sb.sb_blocksize);
		if (xfs_sb_version_hascrc(&mp->m_sb))
			xfs_btree_init_block(mp, lptr->buf_p, magic,
						i, 0, agno,
						XFS_BTREE_CRC_BLOCKS);
		else
			xfs_btree_init_block(mp, lptr->buf_p, magic,
						i, 0, agno, 0);
	}

	/*
	 * run along leaf, setting up records.  as we have to switch
	 * blocks, call the prop_ino_cursor routine to set up the new
	 * pointers for the parent.  that can recurse up to the root
	 * if required.  set the sibling pointers for leaf level here.
	 */
	if (finobt)
		ino_rec = findfirst_free_inode_rec(agno);
	else
		ino_rec = findfirst_inode_rec(agno);

	if (ino_rec != NULL)
		first_agino = ino_rec->ino_startnum;
	else
		first_agino = NULLAGINO;

	lptr = &btree_curs->level[0];

	for (i = 0; i < lptr->num_blocks; i++)  {
		/*
		 * block initialization, lay in block header
		 */
		lptr->buf_p->b_ops = &xfs_inobt_buf_ops;
		bt_hdr = XFS_BUF_TO_BLOCK(lptr->buf_p);
		memset(bt_hdr, 0, mp->m_sb.sb_blocksize);
		if (xfs_sb_version_hascrc(&mp->m_sb))
			xfs_btree_init_block(mp, lptr->buf_p, magic,
						0, 0, agno,
						XFS_BTREE_CRC_BLOCKS);
		else
			xfs_btree_init_block(mp, lptr->buf_p, magic,
						0, 0, agno, 0);

		bt_hdr->bb_u.s.bb_leftsib = cpu_to_be32(lptr->prev_agbno);
		bt_hdr->bb_numrecs = cpu_to_be16(lptr->num_recs_pb +
							(lptr->modulo > 0));

		if (lptr->modulo > 0)
			lptr->modulo--;

		if (lptr->num_recs_pb > 0)
			prop_ino_cursor(mp, agno, btree_curs,
					ino_rec->ino_startnum, 0);

		bt_rec = (xfs_inobt_rec_t *)
			  ((char *)bt_hdr + XFS_INOBT_BLOCK_LEN(mp));
		for (j = 0; j < be16_to_cpu(bt_hdr->bb_numrecs); j++) {
			ASSERT(ino_rec != NULL);
			bt_rec[j].ir_startino =
					cpu_to_be32(ino_rec->ino_startnum);
			bt_rec[j].ir_free = cpu_to_be64(ino_rec->ir_free);

			inocnt = finocnt = 0;
			for (k = 0; k < sizeof(xfs_inofree_t)*NBBY; k++)  {
				ASSERT(is_inode_confirmed(ino_rec, k));

				if (is_inode_sparse(ino_rec, k))
					continue;
				if (is_inode_free(ino_rec, k))
					finocnt++;
				inocnt++;
			}

			/*
			 * Set the freecount and check whether we need to update
			 * the sparse format fields. Otherwise, skip to the next
			 * record.
			 */
			inorec_set_freecount(mp, &bt_rec[j], finocnt);
			if (!xfs_sb_version_hassparseinodes(&mp->m_sb))
				goto nextrec;

			/*
			 * Convert the 64-bit in-core sparse inode state to the
			 * 16-bit on-disk holemask.
			 */
			holemask = 0;
			spmask = (1 << XFS_INODES_PER_HOLEMASK_BIT) - 1;
			sparse = ino_rec->ir_sparse;
			for (k = 0; k < XFS_INOBT_HOLEMASK_BITS; k++) {
				if (sparse & spmask) {
					ASSERT((sparse & spmask) == spmask);
					holemask |= (1 << k);
				} else
					ASSERT((sparse & spmask) == 0);
				sparse >>= XFS_INODES_PER_HOLEMASK_BIT;
			}

			bt_rec[j].ir_u.sp.ir_count = inocnt;
			bt_rec[j].ir_u.sp.ir_holemask = cpu_to_be16(holemask);

nextrec:
			freecount += finocnt;
			count += inocnt;

			if (finobt)
				ino_rec = next_free_ino_rec(ino_rec);
			else
				ino_rec = next_ino_rec(ino_rec);
		}

		if (ino_rec != NULL)  {
			/*
			 * get next leaf level block
			 */
			if (lptr->prev_buf_p != NULL)  {
#ifdef XR_BLD_INO_TRACE
				fprintf(stderr, "writing inobt agbno %u\n",
					lptr->prev_agbno);
#endif
				ASSERT(lptr->prev_agbno != NULLAGBLOCK);
				libxfs_writebuf(lptr->prev_buf_p, 0);
			}
			lptr->prev_buf_p = lptr->buf_p;
			lptr->prev_agbno = lptr->agbno;
			lptr->agbno = get_next_blockaddr(agno, 0, btree_curs);
			bt_hdr->bb_u.s.bb_rightsib = cpu_to_be32(lptr->agbno);

			lptr->buf_p = libxfs_getbuf(mp->m_dev,
					XFS_AGB_TO_DADDR(mp, agno, lptr->agbno),
					XFS_FSB_TO_BB(mp, 1));
		}
	}

	if (agi_stat) {
		agi_stat->first_agino = first_agino;
		agi_stat->count = count;
		agi_stat->freecount = freecount;
	}
}
Ejemplo n.º 8
0
static void
prop_ino_cursor(xfs_mount_t *mp, xfs_agnumber_t agno, bt_status_t *btree_curs,
	xfs_agino_t startino, int level)
{
	struct xfs_btree_block	*bt_hdr;
	xfs_inobt_key_t		*bt_key;
	xfs_inobt_ptr_t		*bt_ptr;
	xfs_agblock_t		agbno;
	bt_stat_level_t		*lptr;

	level++;

	if (level >= btree_curs->num_levels)
		return;

	lptr = &btree_curs->level[level];
	bt_hdr = XFS_BUF_TO_BLOCK(lptr->buf_p);

	if (be16_to_cpu(bt_hdr->bb_numrecs) == 0)  {
		/*
		 * this only happens once to initialize the
		 * first path up the left side of the tree
		 * where the agbno's are already set up
		 */
		prop_ino_cursor(mp, agno, btree_curs, startino, level);
	}

	if (be16_to_cpu(bt_hdr->bb_numrecs) ==
				lptr->num_recs_pb + (lptr->modulo > 0))  {
		/*
		 * write out current prev block, grab us a new block,
		 * and set the rightsib pointer of current block
		 */
#ifdef XR_BLD_INO_TRACE
		fprintf(stderr, " ino prop agbno %d ", lptr->prev_agbno);
#endif
		if (lptr->prev_agbno != NULLAGBLOCK)  {
			ASSERT(lptr->prev_buf_p != NULL);
			libxfs_writebuf(lptr->prev_buf_p, 0);
		}
		lptr->prev_agbno = lptr->agbno;;
		lptr->prev_buf_p = lptr->buf_p;
		agbno = get_next_blockaddr(agno, level, btree_curs);

		bt_hdr->bb_u.s.bb_rightsib = cpu_to_be32(agbno);

		lptr->buf_p = libxfs_getbuf(mp->m_dev,
					XFS_AGB_TO_DADDR(mp, agno, agbno),
					XFS_FSB_TO_BB(mp, 1));
		lptr->agbno = agbno;

		if (lptr->modulo)
			lptr->modulo--;

		/*
		 * initialize block header
		 */
		lptr->buf_p->b_ops = &xfs_inobt_buf_ops;
		bt_hdr = XFS_BUF_TO_BLOCK(lptr->buf_p);
		memset(bt_hdr, 0, mp->m_sb.sb_blocksize);
		if (xfs_sb_version_hascrc(&mp->m_sb))
			xfs_btree_init_block(mp, lptr->buf_p, XFS_IBT_CRC_MAGIC,
						level, 0, agno,
						XFS_BTREE_CRC_BLOCKS);
		else
			xfs_btree_init_block(mp, lptr->buf_p, XFS_IBT_MAGIC,
						level, 0, agno, 0);

		bt_hdr->bb_u.s.bb_leftsib = cpu_to_be32(lptr->prev_agbno);

		/*
		 * propagate extent record for first extent in new block up
		 */
		prop_ino_cursor(mp, agno, btree_curs, startino, level);
	}
	/*
	 * add inode info to current block
	 */
	be16_add_cpu(&bt_hdr->bb_numrecs, 1);

	bt_key = XFS_INOBT_KEY_ADDR(mp, bt_hdr,
				    be16_to_cpu(bt_hdr->bb_numrecs));
	bt_ptr = XFS_INOBT_PTR_ADDR(mp, bt_hdr,
				    be16_to_cpu(bt_hdr->bb_numrecs),
				    mp->m_inobt_mxr[1]);

	bt_key->ir_startino = cpu_to_be32(startino);
	*bt_ptr = cpu_to_be32(btree_curs->level[level-1].agbno);
}