Example #1
0
/*
 * Allocate a new allocation btree cursor.
 */
struct xfs_btree_cur *			/* new alloc btree cursor */
xfs_allocbt_init_cursor(
	struct xfs_mount	*mp,		/* file system mount point */
	struct xfs_trans	*tp,		/* transaction pointer */
	struct xfs_buf		*agbp,		/* buffer for agf structure */
	xfs_agnumber_t		agno,		/* allocation group number */
	xfs_btnum_t		btnum)		/* btree identifier */
{
	struct xfs_agf		*agf = XFS_BUF_TO_AGF(agbp);
	struct xfs_btree_cur	*cur;

	ASSERT(btnum == XFS_BTNUM_BNO || btnum == XFS_BTNUM_CNT);

	cur = kmem_zone_zalloc(xfs_btree_cur_zone, KM_SLEEP);

	cur->bc_tp = tp;
	cur->bc_mp = mp;
	cur->bc_btnum = btnum;
	cur->bc_blocklog = mp->m_sb.sb_blocklog;
	cur->bc_ops = &xfs_allocbt_ops;

	if (btnum == XFS_BTNUM_CNT) {
		cur->bc_nlevels = be32_to_cpu(agf->agf_levels[XFS_BTNUM_CNT]);
		cur->bc_flags = XFS_BTREE_LASTREC_UPDATE;
	} else {
		cur->bc_nlevels = be32_to_cpu(agf->agf_levels[XFS_BTNUM_BNO]);
	}

	cur->bc_private.a.agbp = agbp;
	cur->bc_private.a.agno = agno;

	return cur;
}
Example #2
0
/* Find the roots of the per-AG btrees described in btree_info. */
int
xrep_find_ag_btree_roots(
	struct xfs_scrub		*sc,
	struct xfs_buf			*agf_bp,
	struct xrep_find_ag_btree	*btree_info,
	struct xfs_buf			*agfl_bp)
{
	struct xfs_mount		*mp = sc->mp;
	struct xrep_findroot		ri;
	struct xrep_find_ag_btree	*fab;
	struct xfs_btree_cur		*cur;
	int				error;

	ASSERT(xfs_buf_islocked(agf_bp));
	ASSERT(agfl_bp == NULL || xfs_buf_islocked(agfl_bp));

	ri.sc = sc;
	ri.btree_info = btree_info;
	ri.agf = XFS_BUF_TO_AGF(agf_bp);
	ri.agfl_bp = agfl_bp;
	for (fab = btree_info; fab->buf_ops; fab++) {
		ASSERT(agfl_bp || fab->rmap_owner != XFS_RMAP_OWN_AG);
		ASSERT(XFS_RMAP_NON_INODE_OWNER(fab->rmap_owner));
		fab->root = NULLAGBLOCK;
		fab->height = 0;
	}

	cur = xfs_rmapbt_init_cursor(mp, sc->tp, agf_bp, sc->sa.agno);
	error = xfs_rmap_query_all(cur, xrep_findroot_rmap, &ri);
	xfs_btree_del_cursor(cur, error);

	return error;
}
Example #3
0
STATIC void
xfs_allocbt_init_ptr_from_cur(
	struct xfs_btree_cur	*cur,
	union xfs_btree_ptr	*ptr)
{
	struct xfs_agf		*agf = XFS_BUF_TO_AGF(cur->bc_private.a.agbp);

	ASSERT(cur->bc_private.a.agno == be32_to_cpu(agf->agf_seqno));
	ASSERT(agf->agf_roots[cur->bc_btnum] != 0);

	ptr->s = agf->agf_roots[cur->bc_btnum];
}
STATIC void
xfs_allocbt_set_root(
	struct xfs_btree_cur	*cur,
	union xfs_btree_ptr	*ptr,
	int			inc)
{
	struct xfs_buf		*agbp = cur->bc_private.a.agbp;
	struct xfs_agf		*agf = XFS_BUF_TO_AGF(agbp);
	xfs_agnumber_t		seqno = be32_to_cpu(agf->agf_seqno);
	int			btnum = cur->bc_btnum;

	ASSERT(ptr->s != 0);

	agf->agf_roots[btnum] = ptr->s;
	be32_add_cpu(&agf->agf_levels[btnum], inc);
	cur->bc_mp->m_perag[seqno].pagf_levels[btnum] += inc;

	xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_ROOTS | XFS_AGF_LEVELS);
}
Example #5
0
STATIC void
xfs_refcountbt_set_root(
	struct xfs_btree_cur	*cur,
	union xfs_btree_ptr	*ptr,
	int			inc)
{
	struct xfs_buf		*agbp = cur->bc_private.a.agbp;
	struct xfs_agf		*agf = XFS_BUF_TO_AGF(agbp);
	xfs_agnumber_t		seqno = be32_to_cpu(agf->agf_seqno);
	struct xfs_perag	*pag = xfs_perag_get(cur->bc_mp, seqno);

	ASSERT(ptr->s != 0);

	agf->agf_refcount_root = ptr->s;
	be32_add_cpu(&agf->agf_refcount_level, inc);
	pag->pagf_refcount_level += inc;
	xfs_perag_put(pag);

	xfs_alloc_log_agf(cur->bc_tp, agbp,
			XFS_AGF_REFCOUNT_ROOT | XFS_AGF_REFCOUNT_LEVEL);
}
Example #6
0
static void
xfs_agfblock_init(
	struct xfs_mount	*mp,
	struct xfs_buf		*bp,
	struct aghdr_init_data	*id)
{
	struct xfs_agf		*agf = XFS_BUF_TO_AGF(bp);
	xfs_extlen_t		tmpsize;

	agf->agf_magicnum = cpu_to_be32(XFS_AGF_MAGIC);
	agf->agf_versionnum = cpu_to_be32(XFS_AGF_VERSION);
	agf->agf_seqno = cpu_to_be32(id->agno);
	agf->agf_length = cpu_to_be32(id->agsize);
	agf->agf_roots[XFS_BTNUM_BNOi] = cpu_to_be32(XFS_BNO_BLOCK(mp));
	agf->agf_roots[XFS_BTNUM_CNTi] = cpu_to_be32(XFS_CNT_BLOCK(mp));
	agf->agf_levels[XFS_BTNUM_BNOi] = cpu_to_be32(1);
	agf->agf_levels[XFS_BTNUM_CNTi] = cpu_to_be32(1);
	if (xfs_sb_version_hasrmapbt(&mp->m_sb)) {
		agf->agf_roots[XFS_BTNUM_RMAPi] =
					cpu_to_be32(XFS_RMAP_BLOCK(mp));
		agf->agf_levels[XFS_BTNUM_RMAPi] = cpu_to_be32(1);
		agf->agf_rmap_blocks = cpu_to_be32(1);
	}

	agf->agf_flfirst = cpu_to_be32(1);
	agf->agf_fllast = 0;
	agf->agf_flcount = 0;
	tmpsize = id->agsize - mp->m_ag_prealloc_blocks;
	agf->agf_freeblks = cpu_to_be32(tmpsize);
	agf->agf_longest = cpu_to_be32(tmpsize);
	if (xfs_sb_version_hascrc(&mp->m_sb))
		uuid_copy(&agf->agf_uuid, &mp->m_sb.sb_meta_uuid);
	if (xfs_sb_version_hasreflink(&mp->m_sb)) {
		agf->agf_refcount_root = cpu_to_be32(
				xfs_refc_block(mp));
		agf->agf_refcount_level = cpu_to_be32(1);
		agf->agf_refcount_blocks = cpu_to_be32(1);
	}
}
Example #7
0
	pag->pagf_refcount_level += inc;
	xfs_perag_put(pag);

	xfs_alloc_log_agf(cur->bc_tp, agbp,
			XFS_AGF_REFCOUNT_ROOT | XFS_AGF_REFCOUNT_LEVEL);
}

STATIC int
xfs_refcountbt_alloc_block(
	struct xfs_btree_cur	*cur,
	union xfs_btree_ptr	*start,
	union xfs_btree_ptr	*new,
	int			*stat)
{
	struct xfs_buf		*agbp = cur->bc_private.a.agbp;
	struct xfs_agf		*agf = XFS_BUF_TO_AGF(agbp);
	struct xfs_alloc_arg	args;		/* block allocation args */
	int			error;		/* error return value */

	XFS_BTREE_TRACE_CURSOR(cur, XBT_ENTRY);

	memset(&args, 0, sizeof(args));
	args.tp = cur->bc_tp;
	args.mp = cur->bc_mp;
	args.type = XFS_ALLOCTYPE_NEAR_BNO;
	args.fsbno = XFS_AGB_TO_FSB(cur->bc_mp, cur->bc_private.a.agno,
			xfs_refc_block(args.mp));
	args.firstblock = args.fsbno;
	xfs_rmap_ag_owner(&args.oinfo, XFS_RMAP_OWN_REFC);
	args.minlen = args.maxlen = args.prod = 1;
	args.resv = XFS_AG_RESV_METADATA;
Example #8
0
xfs_agf_t *
xfs_buf_to_agf(xfs_buf_t *bp)
{
    return XFS_BUF_TO_AGF(bp);
}
Example #9
0
STATIC int
xfs_trim_extents(
    struct xfs_mount	*mp,
    xfs_agnumber_t		agno,
    xfs_fsblock_t		start,
    xfs_fsblock_t		len,
    xfs_fsblock_t		minlen,
    __uint64_t		*blocks_trimmed)
{
    struct block_device	*bdev = mp->m_ddev_targp->bt_bdev;
    struct xfs_btree_cur	*cur;
    struct xfs_buf		*agbp;
    struct xfs_perag	*pag;
    int			error;
    int			i;

    pag = xfs_perag_get(mp, agno);

    error = xfs_alloc_read_agf(mp, NULL, agno, 0, &agbp);
    if (error || !agbp)
        goto out_put_perag;

    cur = xfs_allocbt_init_cursor(mp, NULL, agbp, agno, XFS_BTNUM_CNT);

    /*
     * Force out the log.  This means any transactions that might have freed
     * space before we took the AGF buffer lock are now on disk, and the
     * volatile disk cache is flushed.
     */
    xfs_log_force(mp, XFS_LOG_SYNC);

    /*
     * Look up the longest btree in the AGF and start with it.
     */
    error = xfs_alloc_lookup_le(cur, 0,
                                XFS_BUF_TO_AGF(agbp)->agf_longest, &i);
    if (error)
        goto out_del_cursor;

    /*
     * Loop until we are done with all extents that are large
     * enough to be worth discarding.
     */
    while (i) {
        xfs_agblock_t fbno;
        xfs_extlen_t flen;

        error = xfs_alloc_get_rec(cur, &fbno, &flen, &i);
        if (error)
            goto out_del_cursor;
        XFS_WANT_CORRUPTED_GOTO(i == 1, out_del_cursor);
        ASSERT(flen <= XFS_BUF_TO_AGF(agbp)->agf_longest);

        /*
         * Too small?  Give up.
         */
        if (flen < minlen) {
            trace_xfs_discard_toosmall(mp, agno, fbno, flen);
            goto out_del_cursor;
        }

        /*
         * If the extent is entirely outside of the range we are
         * supposed to discard skip it.  Do not bother to trim
         * down partially overlapping ranges for now.
         */
        if (XFS_AGB_TO_FSB(mp, agno, fbno) + flen < start ||
                XFS_AGB_TO_FSB(mp, agno, fbno) >= start + len) {
            trace_xfs_discard_exclude(mp, agno, fbno, flen);
            goto next_extent;
        }

        /*
         * If any blocks in the range are still busy, skip the
         * discard and try again the next time.
         */
        if (xfs_alloc_busy_search(mp, agno, fbno, flen)) {
            trace_xfs_discard_busy(mp, agno, fbno, flen);
            goto next_extent;
        }

        trace_xfs_discard_extent(mp, agno, fbno, flen);
        error = -blkdev_issue_discard(bdev,
                                      XFS_AGB_TO_DADDR(mp, agno, fbno),
                                      XFS_FSB_TO_BB(mp, flen),
                                      GFP_NOFS, 0);
        if (error)
            goto out_del_cursor;
        *blocks_trimmed += flen;

next_extent:
        error = xfs_btree_decrement(cur, 0, &i);
        if (error)
            goto out_del_cursor;
    }

out_del_cursor:
    xfs_btree_del_cursor(cur, error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR);
    xfs_buf_relse(agbp);
out_put_perag:
    xfs_perag_put(pag);
    return error;
}
Example #10
0
/*
 * Figure out how many blocks to reserve for an AG repair.  We calculate the
 * worst case estimate for the number of blocks we'd need to rebuild one of
 * any type of per-AG btree.
 */
xfs_extlen_t
xrep_calc_ag_resblks(
	struct xfs_scrub		*sc)
{
	struct xfs_mount		*mp = sc->mp;
	struct xfs_scrub_metadata	*sm = sc->sm;
	struct xfs_perag		*pag;
	struct xfs_buf			*bp;
	xfs_agino_t			icount = NULLAGINO;
	xfs_extlen_t			aglen = NULLAGBLOCK;
	xfs_extlen_t			usedlen;
	xfs_extlen_t			freelen;
	xfs_extlen_t			bnobt_sz;
	xfs_extlen_t			inobt_sz;
	xfs_extlen_t			rmapbt_sz;
	xfs_extlen_t			refcbt_sz;
	int				error;

	if (!(sm->sm_flags & XFS_SCRUB_IFLAG_REPAIR))
		return 0;

	pag = xfs_perag_get(mp, sm->sm_agno);
	if (pag->pagi_init) {
		/* Use in-core icount if possible. */
		icount = pag->pagi_count;
	} else {
		/* Try to get the actual counters from disk. */
		error = xfs_ialloc_read_agi(mp, NULL, sm->sm_agno, &bp);
		if (!error) {
			icount = pag->pagi_count;
			xfs_buf_relse(bp);
		}
	}

	/* Now grab the block counters from the AGF. */
	error = xfs_alloc_read_agf(mp, NULL, sm->sm_agno, 0, &bp);
	if (!error) {
		aglen = be32_to_cpu(XFS_BUF_TO_AGF(bp)->agf_length);
		freelen = be32_to_cpu(XFS_BUF_TO_AGF(bp)->agf_freeblks);
		usedlen = aglen - freelen;
		xfs_buf_relse(bp);
	}
	xfs_perag_put(pag);

	/* If the icount is impossible, make some worst-case assumptions. */
	if (icount == NULLAGINO ||
	    !xfs_verify_agino(mp, sm->sm_agno, icount)) {
		xfs_agino_t	first, last;

		xfs_agino_range(mp, sm->sm_agno, &first, &last);
		icount = last - first + 1;
	}

	/* If the block counts are impossible, make worst-case assumptions. */
	if (aglen == NULLAGBLOCK ||
	    aglen != xfs_ag_block_count(mp, sm->sm_agno) ||
	    freelen >= aglen) {
		aglen = xfs_ag_block_count(mp, sm->sm_agno);
		freelen = aglen;
		usedlen = aglen;
	}

	trace_xrep_calc_ag_resblks(mp, sm->sm_agno, icount, aglen,
			freelen, usedlen);

	/*
	 * Figure out how many blocks we'd need worst case to rebuild
	 * each type of btree.  Note that we can only rebuild the
	 * bnobt/cntbt or inobt/finobt as pairs.
	 */
	bnobt_sz = 2 * xfs_allocbt_calc_size(mp, freelen);
	if (xfs_sb_version_hassparseinodes(&mp->m_sb))
		inobt_sz = xfs_iallocbt_calc_size(mp, icount /
				XFS_INODES_PER_HOLEMASK_BIT);
	else
		inobt_sz = xfs_iallocbt_calc_size(mp, icount /
				XFS_INODES_PER_CHUNK);
	if (xfs_sb_version_hasfinobt(&mp->m_sb))
		inobt_sz *= 2;
	if (xfs_sb_version_hasreflink(&mp->m_sb))
		refcbt_sz = xfs_refcountbt_calc_size(mp, usedlen);
	else
		refcbt_sz = 0;
	if (xfs_sb_version_hasrmapbt(&mp->m_sb)) {
		/*
		 * Guess how many blocks we need to rebuild the rmapbt.
		 * For non-reflink filesystems we can't have more records than
		 * used blocks.  However, with reflink it's possible to have
		 * more than one rmap record per AG block.  We don't know how
		 * many rmaps there could be in the AG, so we start off with
		 * what we hope is an generous over-estimation.
		 */
		if (xfs_sb_version_hasreflink(&mp->m_sb))
			rmapbt_sz = xfs_rmapbt_calc_size(mp,
					(unsigned long long)aglen * 2);
		else
			rmapbt_sz = xfs_rmapbt_calc_size(mp, usedlen);
	} else {
		rmapbt_sz = 0;
	}

	trace_xrep_calc_ag_resblks_btsize(mp, sm->sm_agno, bnobt_sz,
			inobt_sz, rmapbt_sz, refcbt_sz);

	return max(max(bnobt_sz, inobt_sz), max(rmapbt_sz, refcbt_sz));
}
Example #11
0
/*
 * build both the agf and the agfl for an agno given both
 * btree cursors.
 *
 * XXX: yet more common code that can be shared with mkfs/growfs.
 */
static void
build_agf_agfl(xfs_mount_t	*mp,
		xfs_agnumber_t	agno,
		bt_status_t	*bno_bt,
		bt_status_t	*bcnt_bt,
		xfs_extlen_t	freeblks,	/* # free blocks in tree */
		int		lostblocks)	/* # blocks that will be lost */
{
	extent_tree_node_t	*ext_ptr;
	xfs_buf_t		*agf_buf, *agfl_buf;
	int			i;
	int			j;
	xfs_agfl_t		*agfl;
	xfs_agf_t		*agf;
	__be32			*freelist;

	agf_buf = libxfs_getbuf(mp->m_dev,
			XFS_AG_DADDR(mp, agno, XFS_AGF_DADDR(mp)),
			mp->m_sb.sb_sectsize/BBSIZE);
	agf_buf->b_ops = &xfs_agf_buf_ops;
	agf = XFS_BUF_TO_AGF(agf_buf);
	memset(agf, 0, mp->m_sb.sb_sectsize);

#ifdef XR_BLD_FREE_TRACE
	fprintf(stderr, "agf = 0x%p, agf_buf->b_addr = 0x%p\n",
		agf, agf_buf->b_addr);
#endif

	/*
	 * set up fixed part of agf
	 */
	agf->agf_magicnum = cpu_to_be32(XFS_AGF_MAGIC);
	agf->agf_versionnum = cpu_to_be32(XFS_AGF_VERSION);
	agf->agf_seqno = cpu_to_be32(agno);

	if (agno < mp->m_sb.sb_agcount - 1)
		agf->agf_length = cpu_to_be32(mp->m_sb.sb_agblocks);
	else
		agf->agf_length = cpu_to_be32(mp->m_sb.sb_dblocks -
			(xfs_rfsblock_t) mp->m_sb.sb_agblocks * agno);

	agf->agf_roots[XFS_BTNUM_BNO] = cpu_to_be32(bno_bt->root);
	agf->agf_levels[XFS_BTNUM_BNO] = cpu_to_be32(bno_bt->num_levels);
	agf->agf_roots[XFS_BTNUM_CNT] = cpu_to_be32(bcnt_bt->root);
	agf->agf_levels[XFS_BTNUM_CNT] = cpu_to_be32(bcnt_bt->num_levels);
	agf->agf_freeblks = cpu_to_be32(freeblks);

	/*
	 * Count and record the number of btree blocks consumed if required.
	 */
	if (xfs_sb_version_haslazysbcount(&mp->m_sb)) {
		/*
		 * Don't count the root blocks as they are already
		 * accounted for.
		 */
		agf->agf_btreeblks = cpu_to_be32(
			(bno_bt->num_tot_blocks - bno_bt->num_free_blocks) +
			(bcnt_bt->num_tot_blocks - bcnt_bt->num_free_blocks) -
			2);
#ifdef XR_BLD_FREE_TRACE
		fprintf(stderr, "agf->agf_btreeblks = %u\n",
				be32_to_cpu(agf->agf_btreeblks));
#endif
	}

#ifdef XR_BLD_FREE_TRACE
	fprintf(stderr, "bno root = %u, bcnt root = %u, indices = %u %u\n",
			be32_to_cpu(agf->agf_roots[XFS_BTNUM_BNO]),
			be32_to_cpu(agf->agf_roots[XFS_BTNUM_CNT]),
			XFS_BTNUM_BNO,
			XFS_BTNUM_CNT);
#endif

	if (xfs_sb_version_hascrc(&mp->m_sb))
		platform_uuid_copy(&agf->agf_uuid, &mp->m_sb.sb_meta_uuid);

	/* initialise the AGFL, then fill it if there are blocks left over. */
	agfl_buf = libxfs_getbuf(mp->m_dev,
			XFS_AG_DADDR(mp, agno, XFS_AGFL_DADDR(mp)),
			mp->m_sb.sb_sectsize/BBSIZE);
	agfl_buf->b_ops = &xfs_agfl_buf_ops;
	agfl = XFS_BUF_TO_AGFL(agfl_buf);

	/* setting to 0xff results in initialisation to NULLAGBLOCK */
	memset(agfl, 0xff, mp->m_sb.sb_sectsize);
	if (xfs_sb_version_hascrc(&mp->m_sb)) {
		agfl->agfl_magicnum = cpu_to_be32(XFS_AGFL_MAGIC);
		agfl->agfl_seqno = cpu_to_be32(agno);
		platform_uuid_copy(&agfl->agfl_uuid, &mp->m_sb.sb_meta_uuid);
		for (i = 0; i < XFS_AGFL_SIZE(mp); i++)
			agfl->agfl_bno[i] = cpu_to_be32(NULLAGBLOCK);
	}
	freelist = XFS_BUF_TO_AGFL_BNO(mp, agfl_buf);

	/*
	 * do we have left-over blocks in the btree cursors that should
	 * be used to fill the AGFL?
	 */
	if (bno_bt->num_free_blocks > 0 || bcnt_bt->num_free_blocks > 0)  {
		/*
		 * yes, now grab as many blocks as we can
		 */
		i = j = 0;
		while (bno_bt->num_free_blocks > 0 && i < XFS_AGFL_SIZE(mp))  {
			freelist[i] = cpu_to_be32(
					get_next_blockaddr(agno, 0, bno_bt));
			i++;
		}

		while (bcnt_bt->num_free_blocks > 0 && i < XFS_AGFL_SIZE(mp))  {
			freelist[i] = cpu_to_be32(
					get_next_blockaddr(agno, 0, bcnt_bt));
			i++;
		}
		/*
		 * now throw the rest of the blocks away and complain
		 */
		while (bno_bt->num_free_blocks > 0)  {
			(void) get_next_blockaddr(agno, 0, bno_bt);
			j++;
		}
		while (bcnt_bt->num_free_blocks > 0)  {
			(void) get_next_blockaddr(agno, 0, bcnt_bt);
			j++;
		}

		if (j > 0)  {
			if (j == lostblocks)
				do_warn(_("lost %d blocks in ag %u\n"),
					j, agno);
			else
				do_warn(_("thought we were going to lose %d "
					  "blocks in ag %u, actually lost "
					  "%d\n"),
					lostblocks, j, agno);
		}

		agf->agf_flfirst = 0;
		agf->agf_fllast = cpu_to_be32(i - 1);
		agf->agf_flcount = cpu_to_be32(i);

#ifdef XR_BLD_FREE_TRACE
		fprintf(stderr, "writing agfl for ag %u\n", agno);
#endif

	} else  {
		agf->agf_flfirst = 0;
		agf->agf_fllast = cpu_to_be32(XFS_AGFL_SIZE(mp) - 1);
		agf->agf_flcount = 0;
	}

	libxfs_writebuf(agfl_buf, 0);

	ext_ptr = findbiggest_bcnt_extent(agno);
	agf->agf_longest = cpu_to_be32((ext_ptr != NULL) ?
						ext_ptr->ex_blockcount : 0);

	ASSERT(be32_to_cpu(agf->agf_roots[XFS_BTNUM_BNOi]) !=
		be32_to_cpu(agf->agf_roots[XFS_BTNUM_CNTi]));

	libxfs_writebuf(agf_buf, 0);

	/*
	 * now fix up the free list appropriately
	 * XXX: code lifted from mkfs, should be shared.
	 */
	{
		xfs_alloc_arg_t	args;
		xfs_trans_t	*tp;
		struct xfs_trans_res tres = {0};
		int		error;

		memset(&args, 0, sizeof(args));
		args.tp = tp = libxfs_trans_alloc(mp, 0);
		args.mp = mp;
		args.agno = agno;
		args.alignment = 1;
		args.pag = xfs_perag_get(mp,agno);
		libxfs_trans_reserve(tp, &tres,
				     xfs_alloc_min_freelist(mp, args.pag), 0);
		error = libxfs_alloc_fix_freelist(&args, 0);
		xfs_perag_put(args.pag);
		if (error) {
			do_error(_("failed to fix AGFL on AG %d, error %d\n"),
					agno, error);
		}
		libxfs_trans_commit(tp);
	}

#ifdef XR_BLD_FREE_TRACE
	fprintf(stderr, "wrote agf for ag %u\n", agno);
#endif
}
Example #12
0
/* reference: xfs_alloc.c:xfs_alloc_read_agf() */
int xfs_sffs_freesp_init(xfs_mount_t *mp, xfs_sffs_freesp_t *freesp)
{

//	xfs_perag_t *pag;
	xfs_extlen_t longest = (xfs_extlen_t)0;
	xfs_buf_t	*bp;	/* agf buffer pointer */
	xfs_agf_t *agf;

	/* to find the first block of free extent, we have to calculate the 
	 * address were the internal log ends. internal log location is 
	 * specified by fsbock no. 
	 * since currently we only consider one ag. so the bsblock no. is the 
	 * agblock no.
	 */
	xfs_fsblock_t fs_free_start = (xfs_fsblock_t)0;
	xfs_agblock_t ag_free_start = (xfs_agblock_t)0; 
	unsigned long i;

//	struct xfs_buf **bpp = ;
	int error;
	
	error = xfs_read_agf(mp, NULL, 0, 0, &bp);

	if (error)
		return error;

	if (!bp)
		return 0;

	agf = XFS_BUF_TO_AGF(bp);

	if (bp)
		xfs_trans_brelse(NULL, bp);
	longest = be32_to_cpu(agf->agf_longest);
	printk("xfs_sffs: longest free extent %u blocks", longest);
	printk("xfs_sffs: free block num %u\n", 
	       be32_to_cpu(agf->agf_freeblks));

	printk("xfs_sffs: logstart %lu, logblocks %u\n", 
	       mp->m_sb.sb_logstart,
	       mp->m_sb.sb_logblocks);
	
	fs_free_start = mp->m_sb.sb_logstart + mp->m_sb.sb_logblocks;
	ag_free_start = XFS_FSB_TO_AGBNO(mp, fs_free_start);
	printk("xfs_sffs: free space starts from %u\n", ag_free_start);

	freesp->size = longest / XFS_SFFS_SZONE_BLKS;
	/* here the remainder blocks less than one superblock is discarded */
	freesp->table = vmalloc(sizeof(struct xfs_sffs_superband_entry) *
			     freesp->size);
	printk("xfs_sffs: vmalloc for freesp->table successfully\n");


	/*
	 *  | |x|d|d|x|d| | | ... |
	 *     ^         ^
	 *     |         | 
	 *   tail       head  ->
	 *  
	 *  tail      first non-clean block
	 *  head      first clean block
	 *  |d|       allocated block
	 *  |x|       invalid block (stale data block)
	 *  | |       clean block
	 *  freecnt   #clean block + #invalid block 
	 * 
	 * Note that both the log head and tail pointer stores AGBNO,
	 * grows towards the right, and can wrap around from the start.
	 * Also, we avoid the case to happen where the head pointer wraps
	 * around and grows up to the same position of the tail pointer. 
	 * In this case  we cannot tell whether the superband is full or 
	 * empty. So we set the upper limit of head pointer to be tail - 1 
	 * (in a wrapping sense). By sacrificing one block, we can prevent 
	 * the ambigous case from happening.
	 */

	printk("%7s%11s%11s%11s%11s\n", "idx", "szonestart", "loghead", 
	       "logtail", "freecnt");
	for (i = 0; i < freesp->size; i++) {
		freesp->table[i].szonestart = ag_free_start + XFS_SFFS_SZONE_BLKS * i;
		freesp->table[i].loghead = freesp->table[i].szonestart;
		freesp->table[i].logtail = freesp->table[i].szonestart;
		/* leave at least one guard block to prevent head pointer 
		   to hit tail poitner.*/
		freesp->table[i].freecnt = XFS_SFFS_SZONE_BLKS - 1; 
		/*
		printk("%7d%19u%19u%19u\n%7s%19x%19x%19x\n%7s%19lx%19lx%19lx\n", 
		       i,
		       freesp->table[i].loghead,
		       freesp->table[i].logtail,
		       freesp->table[i].freecnt,
		       "",
		       freesp->table[i].loghead,
		       freesp->table[i].logtail,
		       freesp->table[i].freecnt,
		       "",
		       &freesp->table[i].loghead,
		       &freesp->table[i].logtail,
		       &freesp->table[i].freecnt);

		*/

		printk("%7lu%11u%11u%11u%11u\n",
		       i,
		       freesp->table[i].szonestart,
		       freesp->table[i].loghead,
		       freesp->table[i].logtail,
		       freesp->table[i].freecnt);
	}

	
	
	printk("xfs_sffs: init xfs_sffs_freesp....\n");
	printk("xfs_sffs: XFS_SFFS_ZONE_BLKS:%d\n", XFS_SFFS_ZONE_BLKS);
	printk("xfs_sffs: XFS_SFFS_SZONE_BLKS:%d\n", XFS_SFFS_SZONE_BLKS);
	printk("xfs_sffs: XFS_SFFS_SZONE_INIT_BLK_QUOTA:%d\n", XFS_SFFS_SZONE_INIT_BLK_QUOTA);
	printk("xfs_sffs: %lu table entries created\n", freesp->size);
	

	return 0;
}