STATIC struct xfs_btree_cur * xfs_allocbt_dup_cursor( struct xfs_btree_cur *cur) { return xfs_allocbt_init_cursor(cur->bc_mp, cur->bc_tp, cur->bc_private.a.agbp, cur->bc_private.a.agno, cur->bc_btnum); }
/* Actually query the bno btree. */ STATIC int xfs_getfsmap_datadev_bnobt_query( struct xfs_trans *tp, struct xfs_getfsmap_info *info, struct xfs_btree_cur **curpp, void *priv) { struct xfs_alloc_rec_incore *key = priv; /* Report any gap at the end of the last AG. */ if (info->last) return xfs_getfsmap_datadev_bnobt_helper(*curpp, &key[1], info); /* Allocate cursor for this AG and query_range it. */ *curpp = xfs_allocbt_init_cursor(tp->t_mountp, tp, info->agf_bp, info->agno, XFS_BTNUM_BNO); key->ar_startblock = info->low.rm_startblock; key[1].ar_startblock = info->high.rm_startblock; return xfs_alloc_query_range(*curpp, key, &key[1], xfs_getfsmap_datadev_bnobt_helper, info); }
STATIC int xfs_trim_extents( struct xfs_mount *mp, xfs_agnumber_t agno, xfs_fsblock_t start, xfs_fsblock_t len, xfs_fsblock_t minlen, __uint64_t *blocks_trimmed) { struct block_device *bdev = mp->m_ddev_targp->bt_bdev; struct xfs_btree_cur *cur; struct xfs_buf *agbp; struct xfs_perag *pag; int error; int i; pag = xfs_perag_get(mp, agno); error = xfs_alloc_read_agf(mp, NULL, agno, 0, &agbp); if (error || !agbp) goto out_put_perag; cur = xfs_allocbt_init_cursor(mp, NULL, agbp, agno, XFS_BTNUM_CNT); /* * Force out the log. This means any transactions that might have freed * space before we took the AGF buffer lock are now on disk, and the * volatile disk cache is flushed. */ xfs_log_force(mp, XFS_LOG_SYNC); /* * Look up the longest btree in the AGF and start with it. */ error = xfs_alloc_lookup_le(cur, 0, XFS_BUF_TO_AGF(agbp)->agf_longest, &i); if (error) goto out_del_cursor; /* * Loop until we are done with all extents that are large * enough to be worth discarding. */ while (i) { xfs_agblock_t fbno; xfs_extlen_t flen; error = xfs_alloc_get_rec(cur, &fbno, &flen, &i); if (error) goto out_del_cursor; XFS_WANT_CORRUPTED_GOTO(i == 1, out_del_cursor); ASSERT(flen <= XFS_BUF_TO_AGF(agbp)->agf_longest); /* * Too small? Give up. */ if (flen < minlen) { trace_xfs_discard_toosmall(mp, agno, fbno, flen); goto out_del_cursor; } /* * If the extent is entirely outside of the range we are * supposed to discard skip it. Do not bother to trim * down partially overlapping ranges for now. */ if (XFS_AGB_TO_FSB(mp, agno, fbno) + flen < start || XFS_AGB_TO_FSB(mp, agno, fbno) >= start + len) { trace_xfs_discard_exclude(mp, agno, fbno, flen); goto next_extent; } /* * If any blocks in the range are still busy, skip the * discard and try again the next time. */ if (xfs_alloc_busy_search(mp, agno, fbno, flen)) { trace_xfs_discard_busy(mp, agno, fbno, flen); goto next_extent; } trace_xfs_discard_extent(mp, agno, fbno, flen); error = -blkdev_issue_discard(bdev, XFS_AGB_TO_DADDR(mp, agno, fbno), XFS_FSB_TO_BB(mp, flen), GFP_NOFS, 0); if (error) goto out_del_cursor; *blocks_trimmed += flen; next_extent: error = xfs_btree_decrement(cur, 0, &i); if (error) goto out_del_cursor; } out_del_cursor: xfs_btree_del_cursor(cur, error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR); xfs_buf_relse(agbp); out_put_perag: xfs_perag_put(pag); return error; }