Example #1
0
static void
xfs_agflblock_init(
	struct xfs_mount	*mp,
	struct xfs_buf		*bp,
	struct aghdr_init_data	*id)
{
	struct xfs_agfl		*agfl = XFS_BUF_TO_AGFL(bp);
	__be32			*agfl_bno;
	int			bucket;

	if (xfs_sb_version_hascrc(&mp->m_sb)) {
		agfl->agfl_magicnum = cpu_to_be32(XFS_AGFL_MAGIC);
		agfl->agfl_seqno = cpu_to_be32(id->agno);
		uuid_copy(&agfl->agfl_uuid, &mp->m_sb.sb_meta_uuid);
	}

	agfl_bno = XFS_BUF_TO_AGFL_BNO(mp, bp);
	for (bucket = 0; bucket < xfs_agfl_size(mp); bucket++)
		agfl_bno[bucket] = cpu_to_be32(NULLAGBLOCK);
}
Example #2
0
/*
 * build both the agf and the agfl for an agno given both
 * btree cursors.
 *
 * XXX: yet more common code that can be shared with mkfs/growfs.
 */
static void
build_agf_agfl(xfs_mount_t	*mp,
		xfs_agnumber_t	agno,
		bt_status_t	*bno_bt,
		bt_status_t	*bcnt_bt,
		xfs_extlen_t	freeblks,	/* # free blocks in tree */
		int		lostblocks)	/* # blocks that will be lost */
{
	extent_tree_node_t	*ext_ptr;
	xfs_buf_t		*agf_buf, *agfl_buf;
	int			i;
	int			j;
	xfs_agfl_t		*agfl;
	xfs_agf_t		*agf;
	__be32			*freelist;

	agf_buf = libxfs_getbuf(mp->m_dev,
			XFS_AG_DADDR(mp, agno, XFS_AGF_DADDR(mp)),
			mp->m_sb.sb_sectsize/BBSIZE);
	agf_buf->b_ops = &xfs_agf_buf_ops;
	agf = XFS_BUF_TO_AGF(agf_buf);
	memset(agf, 0, mp->m_sb.sb_sectsize);

#ifdef XR_BLD_FREE_TRACE
	fprintf(stderr, "agf = 0x%p, agf_buf->b_addr = 0x%p\n",
		agf, agf_buf->b_addr);
#endif

	/*
	 * set up fixed part of agf
	 */
	agf->agf_magicnum = cpu_to_be32(XFS_AGF_MAGIC);
	agf->agf_versionnum = cpu_to_be32(XFS_AGF_VERSION);
	agf->agf_seqno = cpu_to_be32(agno);

	if (agno < mp->m_sb.sb_agcount - 1)
		agf->agf_length = cpu_to_be32(mp->m_sb.sb_agblocks);
	else
		agf->agf_length = cpu_to_be32(mp->m_sb.sb_dblocks -
			(xfs_rfsblock_t) mp->m_sb.sb_agblocks * agno);

	agf->agf_roots[XFS_BTNUM_BNO] = cpu_to_be32(bno_bt->root);
	agf->agf_levels[XFS_BTNUM_BNO] = cpu_to_be32(bno_bt->num_levels);
	agf->agf_roots[XFS_BTNUM_CNT] = cpu_to_be32(bcnt_bt->root);
	agf->agf_levels[XFS_BTNUM_CNT] = cpu_to_be32(bcnt_bt->num_levels);
	agf->agf_freeblks = cpu_to_be32(freeblks);

	/*
	 * Count and record the number of btree blocks consumed if required.
	 */
	if (xfs_sb_version_haslazysbcount(&mp->m_sb)) {
		/*
		 * Don't count the root blocks as they are already
		 * accounted for.
		 */
		agf->agf_btreeblks = cpu_to_be32(
			(bno_bt->num_tot_blocks - bno_bt->num_free_blocks) +
			(bcnt_bt->num_tot_blocks - bcnt_bt->num_free_blocks) -
			2);
#ifdef XR_BLD_FREE_TRACE
		fprintf(stderr, "agf->agf_btreeblks = %u\n",
				be32_to_cpu(agf->agf_btreeblks));
#endif
	}

#ifdef XR_BLD_FREE_TRACE
	fprintf(stderr, "bno root = %u, bcnt root = %u, indices = %u %u\n",
			be32_to_cpu(agf->agf_roots[XFS_BTNUM_BNO]),
			be32_to_cpu(agf->agf_roots[XFS_BTNUM_CNT]),
			XFS_BTNUM_BNO,
			XFS_BTNUM_CNT);
#endif

	if (xfs_sb_version_hascrc(&mp->m_sb))
		platform_uuid_copy(&agf->agf_uuid, &mp->m_sb.sb_meta_uuid);

	/* initialise the AGFL, then fill it if there are blocks left over. */
	agfl_buf = libxfs_getbuf(mp->m_dev,
			XFS_AG_DADDR(mp, agno, XFS_AGFL_DADDR(mp)),
			mp->m_sb.sb_sectsize/BBSIZE);
	agfl_buf->b_ops = &xfs_agfl_buf_ops;
	agfl = XFS_BUF_TO_AGFL(agfl_buf);

	/* setting to 0xff results in initialisation to NULLAGBLOCK */
	memset(agfl, 0xff, mp->m_sb.sb_sectsize);
	if (xfs_sb_version_hascrc(&mp->m_sb)) {
		agfl->agfl_magicnum = cpu_to_be32(XFS_AGFL_MAGIC);
		agfl->agfl_seqno = cpu_to_be32(agno);
		platform_uuid_copy(&agfl->agfl_uuid, &mp->m_sb.sb_meta_uuid);
		for (i = 0; i < XFS_AGFL_SIZE(mp); i++)
			agfl->agfl_bno[i] = cpu_to_be32(NULLAGBLOCK);
	}
	freelist = XFS_BUF_TO_AGFL_BNO(mp, agfl_buf);

	/*
	 * do we have left-over blocks in the btree cursors that should
	 * be used to fill the AGFL?
	 */
	if (bno_bt->num_free_blocks > 0 || bcnt_bt->num_free_blocks > 0)  {
		/*
		 * yes, now grab as many blocks as we can
		 */
		i = j = 0;
		while (bno_bt->num_free_blocks > 0 && i < XFS_AGFL_SIZE(mp))  {
			freelist[i] = cpu_to_be32(
					get_next_blockaddr(agno, 0, bno_bt));
			i++;
		}

		while (bcnt_bt->num_free_blocks > 0 && i < XFS_AGFL_SIZE(mp))  {
			freelist[i] = cpu_to_be32(
					get_next_blockaddr(agno, 0, bcnt_bt));
			i++;
		}
		/*
		 * now throw the rest of the blocks away and complain
		 */
		while (bno_bt->num_free_blocks > 0)  {
			(void) get_next_blockaddr(agno, 0, bno_bt);
			j++;
		}
		while (bcnt_bt->num_free_blocks > 0)  {
			(void) get_next_blockaddr(agno, 0, bcnt_bt);
			j++;
		}

		if (j > 0)  {
			if (j == lostblocks)
				do_warn(_("lost %d blocks in ag %u\n"),
					j, agno);
			else
				do_warn(_("thought we were going to lose %d "
					  "blocks in ag %u, actually lost "
					  "%d\n"),
					lostblocks, j, agno);
		}

		agf->agf_flfirst = 0;
		agf->agf_fllast = cpu_to_be32(i - 1);
		agf->agf_flcount = cpu_to_be32(i);

#ifdef XR_BLD_FREE_TRACE
		fprintf(stderr, "writing agfl for ag %u\n", agno);
#endif

	} else  {
		agf->agf_flfirst = 0;
		agf->agf_fllast = cpu_to_be32(XFS_AGFL_SIZE(mp) - 1);
		agf->agf_flcount = 0;
	}

	libxfs_writebuf(agfl_buf, 0);

	ext_ptr = findbiggest_bcnt_extent(agno);
	agf->agf_longest = cpu_to_be32((ext_ptr != NULL) ?
						ext_ptr->ex_blockcount : 0);

	ASSERT(be32_to_cpu(agf->agf_roots[XFS_BTNUM_BNOi]) !=
		be32_to_cpu(agf->agf_roots[XFS_BTNUM_CNTi]));

	libxfs_writebuf(agf_buf, 0);

	/*
	 * now fix up the free list appropriately
	 * XXX: code lifted from mkfs, should be shared.
	 */
	{
		xfs_alloc_arg_t	args;
		xfs_trans_t	*tp;
		struct xfs_trans_res tres = {0};
		int		error;

		memset(&args, 0, sizeof(args));
		args.tp = tp = libxfs_trans_alloc(mp, 0);
		args.mp = mp;
		args.agno = agno;
		args.alignment = 1;
		args.pag = xfs_perag_get(mp,agno);
		libxfs_trans_reserve(tp, &tres,
				     xfs_alloc_min_freelist(mp, args.pag), 0);
		error = libxfs_alloc_fix_freelist(&args, 0);
		xfs_perag_put(args.pag);
		if (error) {
			do_error(_("failed to fix AGFL on AG %d, error %d\n"),
					agno, error);
		}
		libxfs_trans_commit(tp);
	}

#ifdef XR_BLD_FREE_TRACE
	fprintf(stderr, "wrote agf for ag %u\n", agno);
#endif
}