/* * Look up an inode by number in the given file system. * The inode is looked up in the cache held in each AG. * If the inode is found in the cache, initialise the vfs inode * if necessary. * * If it is not in core, read it in from the file system's device, * add it to the cache and initialise the vfs inode. * * The inode is locked according to the value of the lock_flags parameter. * This flag parameter indicates how and if the inode's IO lock and inode lock * should be taken. * * mp -- the mount point structure for the current file system. It points * to the inode hash table. * tp -- a pointer to the current transaction if there is one. This is * simply passed through to the xfs_iread() call. * ino -- the number of the inode desired. This is the unique identifier * within the file system for the inode being requested. * lock_flags -- flags indicating how to lock the inode. See the comment * for xfs_ilock() for a list of valid values. */ int xfs_iget( xfs_mount_t *mp, xfs_trans_t *tp, xfs_ino_t ino, uint flags, uint lock_flags, xfs_inode_t **ipp) { xfs_inode_t *ip; int error; xfs_perag_t *pag; xfs_agino_t agino; /* reject inode numbers outside existing AGs */ if (!ino || XFS_INO_TO_AGNO(mp, ino) >= mp->m_sb.sb_agcount) return EINVAL; /* get the perag structure and ensure that it's inode capable */ pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ino)); agino = XFS_INO_TO_AGINO(mp, ino); again: error = 0; rcu_read_lock(); ip = radix_tree_lookup(&pag->pag_ici_root, agino); if (ip) { error = xfs_iget_cache_hit(pag, ip, ino, flags, lock_flags); if (error) goto out_error_or_again; } else { rcu_read_unlock(); XFS_STATS_INC(xs_ig_missed); error = xfs_iget_cache_miss(mp, pag, tp, ino, &ip, flags, lock_flags); if (error) goto out_error_or_again; } xfs_perag_put(pag); *ipp = ip; /* * If we have a real type for an on-disk inode, we can set ops(&unlock) * now. If it's a new inode being created, xfs_ialloc will handle it. */ if (xfs_iflags_test(ip, XFS_INEW) && ip->i_d.di_mode != 0) xfs_setup_inode(ip); return 0; out_error_or_again: if (error == EAGAIN) { delay(1); goto again; } xfs_perag_put(pag); return error; }
int xfs_iget( xfs_mount_t *mp, xfs_trans_t *tp, xfs_ino_t ino, uint flags, uint lock_flags, xfs_inode_t **ipp) { xfs_inode_t *ip; int error; xfs_perag_t *pag; xfs_agino_t agino; ASSERT((lock_flags & (XFS_IOLOCK_EXCL | XFS_IOLOCK_SHARED)) == 0); if (!ino || XFS_INO_TO_AGNO(mp, ino) >= mp->m_sb.sb_agcount) return EINVAL; pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ino)); agino = XFS_INO_TO_AGINO(mp, ino); again: error = 0; rcu_read_lock(); ip = radix_tree_lookup(&pag->pag_ici_root, agino); if (ip) { error = xfs_iget_cache_hit(pag, ip, ino, flags, lock_flags); if (error) goto out_error_or_again; } else { rcu_read_unlock(); XFS_STATS_INC(xs_ig_missed); error = xfs_iget_cache_miss(mp, pag, tp, ino, &ip, flags, lock_flags); if (error) goto out_error_or_again; } xfs_perag_put(pag); *ipp = ip; if (xfs_iflags_test(ip, XFS_INEW) && ip->i_d.di_mode != 0) xfs_setup_inode(ip); return 0; out_error_or_again: if (error == EAGAIN) { delay(1); goto again; } xfs_perag_put(pag); return error; }
static void __xfs_inode_clear_eofblocks_tag( xfs_inode_t *ip, void (*clear_tp)(struct xfs_mount *mp, xfs_agnumber_t agno, int error, unsigned long caller_ip), int tag) { struct xfs_mount *mp = ip->i_mount; struct xfs_perag *pag; spin_lock(&ip->i_flags_lock); ip->i_flags &= ~XFS_IEOFBLOCKS; spin_unlock(&ip->i_flags_lock); pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino)); spin_lock(&pag->pag_ici_lock); radix_tree_tag_clear(&pag->pag_ici_root, XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino), tag); if (!radix_tree_tagged(&pag->pag_ici_root, tag)) { /* clear the eofblocks tag from the perag radix tree */ spin_lock(&ip->i_mount->m_perag_lock); radix_tree_tag_clear(&ip->i_mount->m_perag_tree, XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino), tag); spin_unlock(&ip->i_mount->m_perag_lock); clear_tp(ip->i_mount, pag->pag_agno, -1, _RET_IP_); } spin_unlock(&pag->pag_ici_lock); xfs_perag_put(pag); }
int xfs_inode_ag_iterator_tag( struct xfs_mount *mp, int (*execute)(struct xfs_inode *ip, int flags, void *args), int flags, void *args, int tag) { struct xfs_perag *pag; int error = 0; int last_error = 0; xfs_agnumber_t ag; ag = 0; while ((pag = xfs_perag_get_tag(mp, ag, tag))) { ag = pag->pag_agno + 1; error = xfs_inode_ag_walk(mp, pag, execute, flags, args, tag); xfs_perag_put(pag); if (error) { last_error = error; if (error == -EFSCORRUPTED) break; } } return last_error; }
void xfs_inode_clear_eofblocks_tag( xfs_inode_t *ip) { struct xfs_mount *mp = ip->i_mount; struct xfs_perag *pag; pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino)); spin_lock(&pag->pag_ici_lock); trace_xfs_inode_clear_eofblocks_tag(ip); radix_tree_tag_clear(&pag->pag_ici_root, XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino), XFS_ICI_EOFBLOCKS_TAG); if (!radix_tree_tagged(&pag->pag_ici_root, XFS_ICI_EOFBLOCKS_TAG)) { /* clear the eofblocks tag from the perag radix tree */ spin_lock(&ip->i_mount->m_perag_lock); radix_tree_tag_clear(&ip->i_mount->m_perag_tree, XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino), XFS_ICI_EOFBLOCKS_TAG); spin_unlock(&ip->i_mount->m_perag_lock); trace_xfs_perag_clear_eofblocks(ip->i_mount, pag->pag_agno, -1, _RET_IP_); } spin_unlock(&pag->pag_ici_lock); xfs_perag_put(pag); }
void xfs_inode_set_eofblocks_tag( xfs_inode_t *ip) { struct xfs_mount *mp = ip->i_mount; struct xfs_perag *pag; int tagged; pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino)); spin_lock(&pag->pag_ici_lock); trace_xfs_inode_set_eofblocks_tag(ip); tagged = radix_tree_tagged(&pag->pag_ici_root, XFS_ICI_EOFBLOCKS_TAG); radix_tree_tag_set(&pag->pag_ici_root, XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino), XFS_ICI_EOFBLOCKS_TAG); if (!tagged) { /* propagate the eofblocks tag up into the perag radix tree */ spin_lock(&ip->i_mount->m_perag_lock); radix_tree_tag_set(&ip->i_mount->m_perag_tree, XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino), XFS_ICI_EOFBLOCKS_TAG); spin_unlock(&ip->i_mount->m_perag_lock); /* kick off background trimming */ xfs_queue_eofblocks(ip->i_mount); trace_xfs_perag_set_eofblocks(ip->i_mount, pag->pag_agno, -1, _RET_IP_); } spin_unlock(&pag->pag_ici_lock); xfs_perag_put(pag); }
int xfs_inode_ag_iterator( struct xfs_mount *mp, int (*execute)(struct xfs_inode *ip, struct xfs_perag *pag, int flags), int flags) { struct xfs_perag *pag; int error = 0; int last_error = 0; xfs_agnumber_t ag; ag = 0; while ((pag = xfs_perag_get(mp, ag))) { ag = pag->pag_agno + 1; error = xfs_inode_ag_walk(mp, pag, execute, flags); xfs_perag_put(pag); if (error) { last_error = error; if (error == EFSCORRUPTED) break; } } return XFS_ERROR(last_error); }
static void xfs_filestream_put_ag( xfs_mount_t *mp, xfs_agnumber_t agno) { struct xfs_perag *pag; pag = xfs_perag_get(mp, agno); atomic_dec(&pag->pagf_fstrms); xfs_perag_put(pag); }
static int xfs_filestream_get_ag( xfs_mount_t *mp, xfs_agnumber_t agno) { struct xfs_perag *pag; int ret; pag = xfs_perag_get(mp, agno); ret = atomic_inc_return(&pag->pagf_fstrms); xfs_perag_put(pag); return ret; }
int xfs_inode_ag_iterator( struct xfs_mount *mp, int (*execute)(struct xfs_inode *ip, struct xfs_perag *pag, int flags), int flags, int tag, int exclusive, int *nr_to_scan) { int error = 0; int last_error = 0; xfs_agnumber_t ag; int nr; nr = nr_to_scan ? *nr_to_scan : INT_MAX; for (ag = 0; ag < mp->m_sb.sb_agcount; ag++) { struct xfs_perag *pag; pag = xfs_perag_get(mp, ag); if (!pag->pag_ici_init) { xfs_perag_put(pag); continue; } error = xfs_inode_ag_walk(mp, pag, execute, flags, tag, exclusive, &nr); xfs_perag_put(pag); if (error) { last_error = error; if (error == EFSCORRUPTED) break; } if (nr <= 0) break; } if (nr_to_scan) *nr_to_scan = nr; return XFS_ERROR(last_error); }
/* * xfs_initialize_perag_data * * Read in each per-ag structure so we can count up the number of * allocated inodes, free inodes and used filesystem blocks as this * information is no longer persistent in the superblock. Once we have * this information, write it into the in-core superblock structure. */ int xfs_initialize_perag_data( struct xfs_mount *mp, xfs_agnumber_t agcount) { xfs_agnumber_t index; xfs_perag_t *pag; xfs_sb_t *sbp = &mp->m_sb; uint64_t ifree = 0; uint64_t ialloc = 0; uint64_t bfree = 0; uint64_t bfreelst = 0; uint64_t btree = 0; int error; for (index = 0; index < agcount; index++) { /* * read the agf, then the agi. This gets us * all the information we need and populates the * per-ag structures for us. */ error = xfs_alloc_pagf_init(mp, NULL, index, 0); if (error) return error; error = xfs_ialloc_pagi_init(mp, NULL, index); if (error) return error; pag = xfs_perag_get(mp, index); ifree += pag->pagi_freecount; ialloc += pag->pagi_count; bfree += pag->pagf_freeblks; bfreelst += pag->pagf_flcount; btree += pag->pagf_btreeblks; xfs_perag_put(pag); } /* * Overwrite incore superblock counters with just-read data */ spin_lock(&mp->m_sb_lock); sbp->sb_ifree = ifree; sbp->sb_icount = ialloc; sbp->sb_fdblocks = bfree + bfreelst + btree; spin_unlock(&mp->m_sb_lock); /* Fixup the per-cpu counters as well. */ xfs_icsb_reinit_counters(mp); return 0; }
int xfs_reclaim_inodes_count( struct xfs_mount *mp) { struct xfs_perag *pag; xfs_agnumber_t ag = 0; int reclaimable = 0; while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) { ag = pag->pag_agno + 1; reclaimable += pag->pag_ici_reclaimable; xfs_perag_put(pag); } return reclaimable; }
void xfs_inode_set_reclaim_tag( xfs_inode_t *ip) { struct xfs_mount *mp = ip->i_mount; struct xfs_perag *pag; pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino)); spin_lock(&pag->pag_ici_lock); spin_lock(&ip->i_flags_lock); __xfs_inode_set_reclaim_tag(pag, ip); __xfs_iflags_set(ip, XFS_IRECLAIMABLE); spin_unlock(&ip->i_flags_lock); spin_unlock(&pag->pag_ici_lock); xfs_perag_put(pag); }
static void __xfs_inode_set_eofblocks_tag( xfs_inode_t *ip, void (*execute)(struct xfs_mount *mp), void (*set_tp)(struct xfs_mount *mp, xfs_agnumber_t agno, int error, unsigned long caller_ip), int tag) { struct xfs_mount *mp = ip->i_mount; struct xfs_perag *pag; int tagged; /* * Don't bother locking the AG and looking up in the radix trees * if we already know that we have the tag set. */ if (ip->i_flags & XFS_IEOFBLOCKS) return; spin_lock(&ip->i_flags_lock); ip->i_flags |= XFS_IEOFBLOCKS; spin_unlock(&ip->i_flags_lock); pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino)); spin_lock(&pag->pag_ici_lock); tagged = radix_tree_tagged(&pag->pag_ici_root, tag); radix_tree_tag_set(&pag->pag_ici_root, XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino), tag); if (!tagged) { /* propagate the eofblocks tag up into the perag radix tree */ spin_lock(&ip->i_mount->m_perag_lock); radix_tree_tag_set(&ip->i_mount->m_perag_tree, XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino), tag); spin_unlock(&ip->i_mount->m_perag_lock); /* kick off background trimming */ execute(ip->i_mount); set_tp(ip->i_mount, pag->pag_agno, -1, _RET_IP_); } spin_unlock(&pag->pag_ici_lock); xfs_perag_put(pag); }
/* * Do we have enough reserve in this AG to handle a reflink? The refcount * btree already reserved all the space it needs, but the rmap btree can grow * infinitely, so we won't allow more reflinks when the AG is down to the * btree reserves. */ static int xfs_reflink_ag_has_free_space( struct xfs_mount *mp, xfs_agnumber_t agno) { struct xfs_perag *pag; int error = 0; if (!xfs_sb_version_hasrmapbt(&mp->m_sb)) return 0; pag = xfs_perag_get(mp, agno); if (xfs_ag_resv_critical(pag, XFS_AG_RESV_AGFL) || xfs_ag_resv_critical(pag, XFS_AG_RESV_METADATA)) error = -ENOSPC; xfs_perag_put(pag); return error; }
/* * We set the inode flag atomically with the radix tree tag. * Once we get tag lookups on the radix tree, this inode flag * can go away. */ void xfs_inode_set_reclaim_tag( struct xfs_inode *ip) { struct xfs_mount *mp = ip->i_mount; struct xfs_perag *pag; pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ip->i_ino)); spin_lock(&pag->pag_ici_lock); spin_lock(&ip->i_flags_lock); radix_tree_tag_set(&pag->pag_ici_root, XFS_INO_TO_AGINO(mp, ip->i_ino), XFS_ICI_RECLAIM_TAG); xfs_perag_set_reclaim_tag(pag); __xfs_iflags_set(ip, XFS_IRECLAIMABLE); spin_unlock(&ip->i_flags_lock); spin_unlock(&pag->pag_ici_lock); xfs_perag_put(pag); }
STATIC void xfs_refcountbt_set_root( struct xfs_btree_cur *cur, union xfs_btree_ptr *ptr, int inc) { struct xfs_buf *agbp = cur->bc_private.a.agbp; struct xfs_agf *agf = XFS_BUF_TO_AGF(agbp); xfs_agnumber_t seqno = be32_to_cpu(agf->agf_seqno); struct xfs_perag *pag = xfs_perag_get(cur->bc_mp, seqno); ASSERT(ptr->s != 0); agf->agf_refcount_root = ptr->s; be32_add_cpu(&agf->agf_refcount_level, inc); pag->pagf_refcount_level += inc; xfs_perag_put(pag); xfs_alloc_log_agf(cur->bc_tp, agbp, XFS_AGF_REFCOUNT_ROOT | XFS_AGF_REFCOUNT_LEVEL); }
/* * Inode cache shrinker. * * When called we make sure that there is a background (fast) inode reclaim in * progress, while we will throttle the speed of reclaim via doiing synchronous * reclaim of inodes. That means if we come across dirty inodes, we wait for * them to be cleaned, which we hope will not be very long due to the * background walker having already kicked the IO off on those dirty inodes. */ static int xfs_reclaim_inode_shrink( struct shrinker *shrink, struct shrink_control *sc) { struct xfs_mount *mp; struct xfs_perag *pag; xfs_agnumber_t ag; int reclaimable; int nr_to_scan = sc->nr_to_scan; gfp_t gfp_mask = sc->gfp_mask; mp = container_of(shrink, struct xfs_mount, m_inode_shrink); if (nr_to_scan) { /* kick background reclaimer and push the AIL */ xfs_syncd_queue_reclaim(mp); xfs_ail_push_all(mp->m_ail); if (!(gfp_mask & __GFP_FS)) return -1; xfs_reclaim_inodes_ag(mp, SYNC_TRYLOCK | SYNC_WAIT, &nr_to_scan); /* terminate if we don't exhaust the scan */ if (nr_to_scan > 0) return -1; } reclaimable = 0; ag = 0; while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) { ag = pag->pag_agno + 1; reclaimable += pag->pag_ici_reclaimable; xfs_perag_put(pag); } return reclaimable; }
static int libxfs_initialize_perag( xfs_mount_t *mp, xfs_agnumber_t agcount, xfs_agnumber_t *maxagi) { xfs_agnumber_t index, max_metadata; xfs_agnumber_t first_initialised = 0; xfs_perag_t *pag; xfs_agino_t agino; xfs_ino_t ino; xfs_sb_t *sbp = &mp->m_sb; int error = -ENOMEM; /* * Walk the current per-ag tree so we don't try to initialise AGs * that already exist (growfs case). Allocate and insert all the * AGs we don't find ready for initialisation. */ for (index = 0; index < agcount; index++) { pag = xfs_perag_get(mp, index); if (pag) { xfs_perag_put(pag); continue; } if (!first_initialised) first_initialised = index; pag = kmem_zalloc(sizeof(*pag), KM_MAYFAIL); if (!pag) goto out_unwind; pag->pag_agno = index; pag->pag_mount = mp; if (radix_tree_insert(&mp->m_perag_tree, index, pag)) { error = -EEXIST; goto out_unwind; } } /* * If we mount with the inode64 option, or no inode overflows * the legacy 32-bit address space clear the inode32 option. */ agino = XFS_OFFBNO_TO_AGINO(mp, sbp->sb_agblocks - 1, 0); ino = XFS_AGINO_TO_INO(mp, agcount - 1, agino); if ((mp->m_flags & XFS_MOUNT_SMALL_INUMS) && ino > XFS_MAXINUMBER_32) mp->m_flags |= XFS_MOUNT_32BITINODES; else mp->m_flags &= ~XFS_MOUNT_32BITINODES; if (mp->m_flags & XFS_MOUNT_32BITINODES) { /* * Calculate how much should be reserved for inodes to meet * the max inode percentage. */ if (mp->m_maxicount) { __uint64_t icount; icount = sbp->sb_dblocks * sbp->sb_imax_pct; do_div(icount, 100); icount += sbp->sb_agblocks - 1; do_div(icount, sbp->sb_agblocks); max_metadata = icount; } else { max_metadata = agcount; } for (index = 0; index < agcount; index++) { ino = XFS_AGINO_TO_INO(mp, index, agino); if (ino > XFS_MAXINUMBER_32) { index++; break; } pag = xfs_perag_get(mp, index); pag->pagi_inodeok = 1; if (index < max_metadata) pag->pagf_metadata = 1; xfs_perag_put(pag); } } else { for (index = 0; index < agcount; index++) { pag = xfs_perag_get(mp, index); pag->pagi_inodeok = 1; xfs_perag_put(pag); } } if (maxagi) *maxagi = index; return 0; out_unwind: kmem_free(pag); for (; index > first_initialised; index--) { pag = radix_tree_delete(&mp->m_perag_tree, index); kmem_free(pag); } return error; }
int xfs_reclaim_inodes_ag( struct xfs_mount *mp, int flags, int *nr_to_scan) { struct xfs_perag *pag; int error = 0; int last_error = 0; xfs_agnumber_t ag; int trylock = flags & SYNC_TRYLOCK; int skipped; restart: ag = 0; skipped = 0; while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) { unsigned long first_index = 0; int done = 0; int nr_found = 0; ag = pag->pag_agno + 1; if (trylock) { if (!mutex_trylock(&pag->pag_ici_reclaim_lock)) { skipped++; xfs_perag_put(pag); continue; } first_index = pag->pag_ici_reclaim_cursor; } else mutex_lock(&pag->pag_ici_reclaim_lock); do { struct xfs_inode *batch[XFS_LOOKUP_BATCH]; int i; rcu_read_lock(); nr_found = radix_tree_gang_lookup_tag( &pag->pag_ici_root, (void **)batch, first_index, XFS_LOOKUP_BATCH, XFS_ICI_RECLAIM_TAG); if (!nr_found) { done = 1; rcu_read_unlock(); break; } for (i = 0; i < nr_found; i++) { struct xfs_inode *ip = batch[i]; if (done || xfs_reclaim_inode_grab(ip, flags)) batch[i] = NULL; if (XFS_INO_TO_AGNO(mp, ip->i_ino) != pag->pag_agno) continue; first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1); if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino)) done = 1; } rcu_read_unlock(); for (i = 0; i < nr_found; i++) { if (!batch[i]) continue; error = xfs_reclaim_inode(batch[i], pag, flags); if (error && last_error != EFSCORRUPTED) last_error = error; } *nr_to_scan -= XFS_LOOKUP_BATCH; cond_resched(); } while (nr_found && !done && *nr_to_scan > 0); if (trylock && !done) pag->pag_ici_reclaim_cursor = first_index; else pag->pag_ici_reclaim_cursor = 0; mutex_unlock(&pag->pag_ici_reclaim_lock); xfs_perag_put(pag); } if (skipped && (flags & SYNC_WAIT) && *nr_to_scan > 0) { trylock = 0; goto restart; } return XFS_ERROR(last_error); }
/* * Figure out how many blocks to reserve for an AG repair. We calculate the * worst case estimate for the number of blocks we'd need to rebuild one of * any type of per-AG btree. */ xfs_extlen_t xrep_calc_ag_resblks( struct xfs_scrub *sc) { struct xfs_mount *mp = sc->mp; struct xfs_scrub_metadata *sm = sc->sm; struct xfs_perag *pag; struct xfs_buf *bp; xfs_agino_t icount = NULLAGINO; xfs_extlen_t aglen = NULLAGBLOCK; xfs_extlen_t usedlen; xfs_extlen_t freelen; xfs_extlen_t bnobt_sz; xfs_extlen_t inobt_sz; xfs_extlen_t rmapbt_sz; xfs_extlen_t refcbt_sz; int error; if (!(sm->sm_flags & XFS_SCRUB_IFLAG_REPAIR)) return 0; pag = xfs_perag_get(mp, sm->sm_agno); if (pag->pagi_init) { /* Use in-core icount if possible. */ icount = pag->pagi_count; } else { /* Try to get the actual counters from disk. */ error = xfs_ialloc_read_agi(mp, NULL, sm->sm_agno, &bp); if (!error) { icount = pag->pagi_count; xfs_buf_relse(bp); } } /* Now grab the block counters from the AGF. */ error = xfs_alloc_read_agf(mp, NULL, sm->sm_agno, 0, &bp); if (!error) { aglen = be32_to_cpu(XFS_BUF_TO_AGF(bp)->agf_length); freelen = be32_to_cpu(XFS_BUF_TO_AGF(bp)->agf_freeblks); usedlen = aglen - freelen; xfs_buf_relse(bp); } xfs_perag_put(pag); /* If the icount is impossible, make some worst-case assumptions. */ if (icount == NULLAGINO || !xfs_verify_agino(mp, sm->sm_agno, icount)) { xfs_agino_t first, last; xfs_agino_range(mp, sm->sm_agno, &first, &last); icount = last - first + 1; } /* If the block counts are impossible, make worst-case assumptions. */ if (aglen == NULLAGBLOCK || aglen != xfs_ag_block_count(mp, sm->sm_agno) || freelen >= aglen) { aglen = xfs_ag_block_count(mp, sm->sm_agno); freelen = aglen; usedlen = aglen; } trace_xrep_calc_ag_resblks(mp, sm->sm_agno, icount, aglen, freelen, usedlen); /* * Figure out how many blocks we'd need worst case to rebuild * each type of btree. Note that we can only rebuild the * bnobt/cntbt or inobt/finobt as pairs. */ bnobt_sz = 2 * xfs_allocbt_calc_size(mp, freelen); if (xfs_sb_version_hassparseinodes(&mp->m_sb)) inobt_sz = xfs_iallocbt_calc_size(mp, icount / XFS_INODES_PER_HOLEMASK_BIT); else inobt_sz = xfs_iallocbt_calc_size(mp, icount / XFS_INODES_PER_CHUNK); if (xfs_sb_version_hasfinobt(&mp->m_sb)) inobt_sz *= 2; if (xfs_sb_version_hasreflink(&mp->m_sb)) refcbt_sz = xfs_refcountbt_calc_size(mp, usedlen); else refcbt_sz = 0; if (xfs_sb_version_hasrmapbt(&mp->m_sb)) { /* * Guess how many blocks we need to rebuild the rmapbt. * For non-reflink filesystems we can't have more records than * used blocks. However, with reflink it's possible to have * more than one rmap record per AG block. We don't know how * many rmaps there could be in the AG, so we start off with * what we hope is an generous over-estimation. */ if (xfs_sb_version_hasreflink(&mp->m_sb)) rmapbt_sz = xfs_rmapbt_calc_size(mp, (unsigned long long)aglen * 2); else rmapbt_sz = xfs_rmapbt_calc_size(mp, usedlen); } else { rmapbt_sz = 0; } trace_xrep_calc_ag_resblks_btsize(mp, sm->sm_agno, bnobt_sz, inobt_sz, rmapbt_sz, refcbt_sz); return max(max(bnobt_sz, inobt_sz), max(rmapbt_sz, refcbt_sz)); }
/* * Allocate an inode. * * The caller selected an AG for us, and made sure that free inodes are * available. */ STATIC int xfs_dialloc_ag( struct xfs_trans *tp, struct xfs_buf *agbp, xfs_ino_t parent, xfs_ino_t *inop) { struct xfs_mount *mp = tp->t_mountp; struct xfs_agi *agi = XFS_BUF_TO_AGI(agbp); xfs_agnumber_t agno = be32_to_cpu(agi->agi_seqno); xfs_agnumber_t pagno = XFS_INO_TO_AGNO(mp, parent); xfs_agino_t pagino = XFS_INO_TO_AGINO(mp, parent); struct xfs_perag *pag; struct xfs_btree_cur *cur, *tcur; struct xfs_inobt_rec_incore rec, trec; xfs_ino_t ino; int error; int offset; int i, j; pag = xfs_perag_get(mp, agno); ASSERT(pag->pagi_init); ASSERT(pag->pagi_inodeok); ASSERT(pag->pagi_freecount > 0); restart_pagno: cur = xfs_inobt_init_cursor(mp, tp, agbp, agno); /* * If pagino is 0 (this is the root inode allocation) use newino. * This must work because we've just allocated some. */ if (!pagino) pagino = be32_to_cpu(agi->agi_newino); error = xfs_check_agi_freecount(cur, agi); if (error) goto error0; /* * If in the same AG as the parent, try to get near the parent. */ if (pagno == agno) { int doneleft; /* done, to the left */ int doneright; /* done, to the right */ int searchdistance = 10; error = xfs_inobt_lookup(cur, pagino, XFS_LOOKUP_LE, &i); if (error) goto error0; XFS_WANT_CORRUPTED_GOTO(i == 1, error0); error = xfs_inobt_get_rec(cur, &rec, &j); if (error) goto error0; XFS_WANT_CORRUPTED_GOTO(i == 1, error0); if (rec.ir_freecount > 0) { /* * Found a free inode in the same chunk * as the parent, done. */ goto alloc_inode; } /* * In the same AG as parent, but parent's chunk is full. */ /* duplicate the cursor, search left & right simultaneously */ error = xfs_btree_dup_cursor(cur, &tcur); if (error) goto error0; /* * Skip to last blocks looked up if same parent inode. */ if (pagino != NULLAGINO && pag->pagl_pagino == pagino && pag->pagl_leftrec != NULLAGINO && pag->pagl_rightrec != NULLAGINO) { error = xfs_ialloc_get_rec(tcur, pag->pagl_leftrec, &trec, &doneleft); if (error) goto error1; error = xfs_ialloc_get_rec(cur, pag->pagl_rightrec, &rec, &doneright); if (error) goto error1; } else { /* search left with tcur, back up 1 record */ error = xfs_ialloc_next_rec(tcur, &trec, &doneleft, 1); if (error) goto error1; /* search right with cur, go forward 1 record. */ error = xfs_ialloc_next_rec(cur, &rec, &doneright, 0); if (error) goto error1; } /* * Loop until we find an inode chunk with a free inode. */ while (!doneleft || !doneright) { int useleft; /* using left inode chunk this time */ if (!--searchdistance) { /* * Not in range - save last search * location and allocate a new inode */ xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR); pag->pagl_leftrec = trec.ir_startino; pag->pagl_rightrec = rec.ir_startino; pag->pagl_pagino = pagino; goto newino; } /* figure out the closer block if both are valid. */ if (!doneleft && !doneright) { useleft = pagino - (trec.ir_startino + XFS_INODES_PER_CHUNK - 1) < rec.ir_startino - pagino; } else { useleft = !doneleft; } /* free inodes to the left? */ if (useleft && trec.ir_freecount) { rec = trec; xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); cur = tcur; pag->pagl_leftrec = trec.ir_startino; pag->pagl_rightrec = rec.ir_startino; pag->pagl_pagino = pagino; goto alloc_inode; } /* free inodes to the right? */ if (!useleft && rec.ir_freecount) { xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR); pag->pagl_leftrec = trec.ir_startino; pag->pagl_rightrec = rec.ir_startino; pag->pagl_pagino = pagino; goto alloc_inode; } /* get next record to check */ if (useleft) { error = xfs_ialloc_next_rec(tcur, &trec, &doneleft, 1); } else { error = xfs_ialloc_next_rec(cur, &rec, &doneright, 0); } if (error) goto error1; } /* * We've reached the end of the btree. because * we are only searching a small chunk of the * btree each search, there is obviously free * inodes closer to the parent inode than we * are now. restart the search again. */ pag->pagl_pagino = NULLAGINO; pag->pagl_leftrec = NULLAGINO; pag->pagl_rightrec = NULLAGINO; xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR); xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); goto restart_pagno; } /* * In a different AG from the parent. * See if the most recently allocated block has any free. */ newino: if (agi->agi_newino != cpu_to_be32(NULLAGINO)) { error = xfs_inobt_lookup(cur, be32_to_cpu(agi->agi_newino), XFS_LOOKUP_EQ, &i); if (error) goto error0; if (i == 1) { error = xfs_inobt_get_rec(cur, &rec, &j); if (error) goto error0; if (j == 1 && rec.ir_freecount > 0) { /* * The last chunk allocated in the group * still has a free inode. */ goto alloc_inode; } } } /* * None left in the last group, search the whole AG */ error = xfs_inobt_lookup(cur, 0, XFS_LOOKUP_GE, &i); if (error) goto error0; XFS_WANT_CORRUPTED_GOTO(i == 1, error0); for (;;) { error = xfs_inobt_get_rec(cur, &rec, &i); if (error) goto error0; XFS_WANT_CORRUPTED_GOTO(i == 1, error0); if (rec.ir_freecount > 0) break; error = xfs_btree_increment(cur, 0, &i); if (error) goto error0; XFS_WANT_CORRUPTED_GOTO(i == 1, error0); } alloc_inode: offset = xfs_lowbit64(rec.ir_free); ASSERT(offset >= 0); ASSERT(offset < XFS_INODES_PER_CHUNK); ASSERT((XFS_AGINO_TO_OFFSET(mp, rec.ir_startino) % XFS_INODES_PER_CHUNK) == 0); ino = XFS_AGINO_TO_INO(mp, agno, rec.ir_startino + offset); rec.ir_free &= ~XFS_INOBT_MASK(offset); rec.ir_freecount--; error = xfs_inobt_update(cur, &rec); if (error) goto error0; be32_add_cpu(&agi->agi_freecount, -1); xfs_ialloc_log_agi(tp, agbp, XFS_AGI_FREECOUNT); pag->pagi_freecount--; error = xfs_check_agi_freecount(cur, agi); if (error) goto error0; xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); xfs_trans_mod_sb(tp, XFS_TRANS_SB_IFREE, -1); xfs_perag_put(pag); *inop = ino; return 0; error1: xfs_btree_del_cursor(tcur, XFS_BTREE_ERROR); error0: xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); xfs_perag_put(pag); return error; }
/* * Look up an inode by number in the given file system. * The inode is looked up in the cache held in each AG. * If the inode is found in the cache, initialise the vfs inode * if necessary. * * If it is not in core, read it in from the file system's device, * add it to the cache and initialise the vfs inode. * * The inode is locked according to the value of the lock_flags parameter. * This flag parameter indicates how and if the inode's IO lock and inode lock * should be taken. * * mp -- the mount point structure for the current file system. It points * to the inode hash table. * tp -- a pointer to the current transaction if there is one. This is * simply passed through to the xfs_iread() call. * ino -- the number of the inode desired. This is the unique identifier * within the file system for the inode being requested. * lock_flags -- flags indicating how to lock the inode. See the comment * for xfs_ilock() for a list of valid values. */ int xfs_iget( xfs_mount_t *mp, xfs_trans_t *tp, xfs_ino_t ino, uint flags, uint lock_flags, xfs_inode_t **ipp) { xfs_inode_t *ip; int error; xfs_perag_t *pag; xfs_agino_t agino; /* * xfs_reclaim_inode() uses the ILOCK to ensure an inode * doesn't get freed while it's being referenced during a * radix tree traversal here. It assumes this function * aqcuires only the ILOCK (and therefore it has no need to * involve the IOLOCK in this synchronization). */ ASSERT((lock_flags & (XFS_IOLOCK_EXCL | XFS_IOLOCK_SHARED)) == 0); /* reject inode numbers outside existing AGs */ if (!ino || XFS_INO_TO_AGNO(mp, ino) >= mp->m_sb.sb_agcount) return EINVAL; /* get the perag structure and ensure that it's inode capable */ pag = xfs_perag_get(mp, XFS_INO_TO_AGNO(mp, ino)); agino = XFS_INO_TO_AGINO(mp, ino); again: error = 0; rcu_read_lock(); ip = radix_tree_lookup(&pag->pag_ici_root, agino); if (ip) { error = xfs_iget_cache_hit(pag, ip, ino, flags, lock_flags); if (error) goto out_error_or_again; } else { rcu_read_unlock(); XFS_STATS_INC(xs_ig_missed); error = xfs_iget_cache_miss(mp, pag, tp, ino, &ip, flags, lock_flags); if (error) goto out_error_or_again; } xfs_perag_put(pag); *ipp = ip; /* * If we have a real type for an on-disk inode, we can set ops(&unlock) * now. If it's a new inode being created, xfs_ialloc will handle it. */ if (xfs_iflags_test(ip, XFS_INEW) && ip->i_d.di_mode != 0) xfs_setup_inode(ip); return 0; out_error_or_again: if (error == EAGAIN) { delay(1); goto again; } xfs_perag_put(pag); return error; }
/* * Allocate an inode on disk. * Mode is used to tell whether the new inode will need space, and whether * it is a directory. * * The arguments IO_agbp and alloc_done are defined to work within * the constraint of one allocation per transaction. * xfs_dialloc() is designed to be called twice if it has to do an * allocation to make more free inodes. On the first call, * IO_agbp should be set to NULL. If an inode is available, * i.e., xfs_dialloc() did not need to do an allocation, an inode * number is returned. In this case, IO_agbp would be set to the * current ag_buf and alloc_done set to false. * If an allocation needed to be done, xfs_dialloc would return * the current ag_buf in IO_agbp and set alloc_done to true. * The caller should then commit the current transaction, allocate a new * transaction, and call xfs_dialloc() again, passing in the previous * value of IO_agbp. IO_agbp should be held across the transactions. * Since the agbp is locked across the two calls, the second call is * guaranteed to have a free inode available. * * Once we successfully pick an inode its number is returned and the * on-disk data structures are updated. The inode itself is not read * in, since doing so would break ordering constraints with xfs_reclaim. */ int xfs_dialloc( xfs_trans_t *tp, /* transaction pointer */ xfs_ino_t parent, /* parent inode (directory) */ umode_t mode, /* mode bits for new inode */ int okalloc, /* ok to allocate more space */ xfs_buf_t **IO_agbp, /* in/out ag header's buffer */ boolean_t *alloc_done, /* true if we needed to replenish inode freelist */ xfs_ino_t *inop) /* inode number allocated */ { xfs_agnumber_t agcount; /* number of allocation groups */ xfs_buf_t *agbp; /* allocation group header's buffer */ xfs_agnumber_t agno; /* allocation group number */ xfs_agi_t *agi; /* allocation group header structure */ xfs_btree_cur_t *cur; /* inode allocation btree cursor */ int error; /* error return value */ int i; /* result code */ int ialloced; /* inode allocation status */ int noroom = 0; /* no space for inode blk allocation */ xfs_ino_t ino; /* fs-relative inode to be returned */ /* REFERENCED */ int j; /* result code */ xfs_mount_t *mp; /* file system mount structure */ int offset; /* index of inode in chunk */ xfs_agino_t pagino; /* parent's AG relative inode # */ xfs_agnumber_t pagno; /* parent's AG number */ xfs_inobt_rec_incore_t rec; /* inode allocation record */ xfs_agnumber_t tagno; /* testing allocation group number */ xfs_btree_cur_t *tcur; /* temp cursor */ xfs_inobt_rec_incore_t trec; /* temp inode allocation record */ struct xfs_perag *pag; if (*IO_agbp == NULL) { /* * We do not have an agbp, so select an initial allocation * group for inode allocation. */ agbp = xfs_ialloc_ag_select(tp, parent, mode, okalloc); /* * Couldn't find an allocation group satisfying the * criteria, give up. */ if (!agbp) { *inop = NULLFSINO; return 0; } agi = XFS_BUF_TO_AGI(agbp); ASSERT(agi->agi_magicnum == cpu_to_be32(XFS_AGI_MAGIC)); } else { /* * Continue where we left off before. In this case, we * know that the allocation group has free inodes. */ agbp = *IO_agbp; agi = XFS_BUF_TO_AGI(agbp); ASSERT(agi->agi_magicnum == cpu_to_be32(XFS_AGI_MAGIC)); ASSERT(be32_to_cpu(agi->agi_freecount) > 0); } mp = tp->t_mountp; agcount = mp->m_sb.sb_agcount; agno = be32_to_cpu(agi->agi_seqno); tagno = agno; pagno = XFS_INO_TO_AGNO(mp, parent); pagino = XFS_INO_TO_AGINO(mp, parent); /* * If we have already hit the ceiling of inode blocks then clear * okalloc so we scan all available agi structures for a free * inode. */ if (mp->m_maxicount && mp->m_sb.sb_icount + XFS_IALLOC_INODES(mp) > mp->m_maxicount) { noroom = 1; okalloc = 0; } /* * Loop until we find an allocation group that either has free inodes * or in which we can allocate some inodes. Iterate through the * allocation groups upward, wrapping at the end. */ *alloc_done = B_FALSE; while (!agi->agi_freecount) { /* * Don't do anything if we're not supposed to allocate * any blocks, just go on to the next ag. */ if (okalloc) { /* * Try to allocate some new inodes in the allocation * group. */ if ((error = xfs_ialloc_ag_alloc(tp, agbp, &ialloced))) { xfs_trans_brelse(tp, agbp); if (error == ENOSPC) { *inop = NULLFSINO; return 0; } else return error; } if (ialloced) { /* * We successfully allocated some inodes, return * the current context to the caller so that it * can commit the current transaction and call * us again where we left off. */ ASSERT(be32_to_cpu(agi->agi_freecount) > 0); *alloc_done = B_TRUE; *IO_agbp = agbp; *inop = NULLFSINO; return 0; } } /* * If it failed, give up on this ag. */ xfs_trans_brelse(tp, agbp); /* * Go on to the next ag: get its ag header. */ nextag: if (++tagno == agcount) tagno = 0; if (tagno == agno) { *inop = NULLFSINO; return noroom ? ENOSPC : 0; } pag = xfs_perag_get(mp, tagno); if (pag->pagi_inodeok == 0) { xfs_perag_put(pag); goto nextag; } error = xfs_ialloc_read_agi(mp, tp, tagno, &agbp); xfs_perag_put(pag); if (error) goto nextag; agi = XFS_BUF_TO_AGI(agbp); ASSERT(agi->agi_magicnum == cpu_to_be32(XFS_AGI_MAGIC)); } /* * Here with an allocation group that has a free inode. * Reset agno since we may have chosen a new ag in the * loop above. */ agno = tagno; *IO_agbp = NULL; pag = xfs_perag_get(mp, agno); restart_pagno: cur = xfs_inobt_init_cursor(mp, tp, agbp, be32_to_cpu(agi->agi_seqno)); /* * If pagino is 0 (this is the root inode allocation) use newino. * This must work because we've just allocated some. */ if (!pagino) pagino = be32_to_cpu(agi->agi_newino); error = xfs_check_agi_freecount(cur, agi); if (error) goto error0; /* * If in the same AG as the parent, try to get near the parent. */ if (pagno == agno) { int doneleft; /* done, to the left */ int doneright; /* done, to the right */ int searchdistance = 10; error = xfs_inobt_lookup(cur, pagino, XFS_LOOKUP_LE, &i); if (error) goto error0; XFS_WANT_CORRUPTED_GOTO(i == 1, error0); error = xfs_inobt_get_rec(cur, &rec, &j); if (error) goto error0; XFS_WANT_CORRUPTED_GOTO(i == 1, error0); if (rec.ir_freecount > 0) { /* * Found a free inode in the same chunk * as the parent, done. */ goto alloc_inode; } /* * In the same AG as parent, but parent's chunk is full. */ /* duplicate the cursor, search left & right simultaneously */ error = xfs_btree_dup_cursor(cur, &tcur); if (error) goto error0; /* * Skip to last blocks looked up if same parent inode. */ if (pagino != NULLAGINO && pag->pagl_pagino == pagino && pag->pagl_leftrec != NULLAGINO && pag->pagl_rightrec != NULLAGINO) { error = xfs_ialloc_get_rec(tcur, pag->pagl_leftrec, &trec, &doneleft, 1); if (error) goto error1; error = xfs_ialloc_get_rec(cur, pag->pagl_rightrec, &rec, &doneright, 0); if (error) goto error1; } else { /* search left with tcur, back up 1 record */ error = xfs_ialloc_next_rec(tcur, &trec, &doneleft, 1); if (error) goto error1; /* search right with cur, go forward 1 record. */ error = xfs_ialloc_next_rec(cur, &rec, &doneright, 0); if (error) goto error1; } /* * Loop until we find an inode chunk with a free inode. */ while (!doneleft || !doneright) { int useleft; /* using left inode chunk this time */ if (!--searchdistance) { /* * Not in range - save last search * location and allocate a new inode */ xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR); pag->pagl_leftrec = trec.ir_startino; pag->pagl_rightrec = rec.ir_startino; pag->pagl_pagino = pagino; goto newino; } /* figure out the closer block if both are valid. */ if (!doneleft && !doneright) { useleft = pagino - (trec.ir_startino + XFS_INODES_PER_CHUNK - 1) < rec.ir_startino - pagino; } else { useleft = !doneleft; } /* free inodes to the left? */ if (useleft && trec.ir_freecount) { rec = trec; xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); cur = tcur; pag->pagl_leftrec = trec.ir_startino; pag->pagl_rightrec = rec.ir_startino; pag->pagl_pagino = pagino; goto alloc_inode; } /* free inodes to the right? */ if (!useleft && rec.ir_freecount) { xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR); pag->pagl_leftrec = trec.ir_startino; pag->pagl_rightrec = rec.ir_startino; pag->pagl_pagino = pagino; goto alloc_inode; } /* get next record to check */ if (useleft) { error = xfs_ialloc_next_rec(tcur, &trec, &doneleft, 1); } else { error = xfs_ialloc_next_rec(cur, &rec, &doneright, 0); } if (error) goto error1; } /* * We've reached the end of the btree. because * we are only searching a small chunk of the * btree each search, there is obviously free * inodes closer to the parent inode than we * are now. restart the search again. */ pag->pagl_pagino = NULLAGINO; pag->pagl_leftrec = NULLAGINO; pag->pagl_rightrec = NULLAGINO; xfs_btree_del_cursor(tcur, XFS_BTREE_NOERROR); xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); goto restart_pagno; } /* * In a different AG from the parent. * See if the most recently allocated block has any free. */ newino: if (agi->agi_newino != cpu_to_be32(NULLAGINO)) { error = xfs_inobt_lookup(cur, be32_to_cpu(agi->agi_newino), XFS_LOOKUP_EQ, &i); if (error) goto error0; if (i == 1) { error = xfs_inobt_get_rec(cur, &rec, &j); if (error) goto error0; if (j == 1 && rec.ir_freecount > 0) { /* * The last chunk allocated in the group * still has a free inode. */ goto alloc_inode; } } } /* * None left in the last group, search the whole AG */ error = xfs_inobt_lookup(cur, 0, XFS_LOOKUP_GE, &i); if (error) goto error0; XFS_WANT_CORRUPTED_GOTO(i == 1, error0); for (;;) { error = xfs_inobt_get_rec(cur, &rec, &i); if (error) goto error0; XFS_WANT_CORRUPTED_GOTO(i == 1, error0); if (rec.ir_freecount > 0) break; error = xfs_btree_increment(cur, 0, &i); if (error) goto error0; XFS_WANT_CORRUPTED_GOTO(i == 1, error0); } alloc_inode: offset = xfs_ialloc_find_free(&rec.ir_free); ASSERT(offset >= 0); ASSERT(offset < XFS_INODES_PER_CHUNK); ASSERT((XFS_AGINO_TO_OFFSET(mp, rec.ir_startino) % XFS_INODES_PER_CHUNK) == 0); ino = XFS_AGINO_TO_INO(mp, agno, rec.ir_startino + offset); rec.ir_free &= ~XFS_INOBT_MASK(offset); rec.ir_freecount--; error = xfs_inobt_update(cur, &rec); if (error) goto error0; be32_add_cpu(&agi->agi_freecount, -1); xfs_ialloc_log_agi(tp, agbp, XFS_AGI_FREECOUNT); pag->pagi_freecount--; error = xfs_check_agi_freecount(cur, agi); if (error) goto error0; xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); xfs_trans_mod_sb(tp, XFS_TRANS_SB_IFREE, -1); xfs_perag_put(pag); *inop = ino; return 0; error1: xfs_btree_del_cursor(tcur, XFS_BTREE_ERROR); error0: xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); xfs_perag_put(pag); return error; }
/* * Select an allocation group to look for a free inode in, based on the parent * inode and then mode. Return the allocation group buffer. */ STATIC xfs_buf_t * /* allocation group buffer */ xfs_ialloc_ag_select( xfs_trans_t *tp, /* transaction pointer */ xfs_ino_t parent, /* parent directory inode number */ umode_t mode, /* bits set to indicate file type */ int okalloc) /* ok to allocate more space */ { xfs_buf_t *agbp; /* allocation group header buffer */ xfs_agnumber_t agcount; /* number of ag's in the filesystem */ xfs_agnumber_t agno; /* current ag number */ int flags; /* alloc buffer locking flags */ xfs_extlen_t ineed; /* blocks needed for inode allocation */ xfs_extlen_t longest = 0; /* longest extent available */ xfs_mount_t *mp; /* mount point structure */ int needspace; /* file mode implies space allocated */ xfs_perag_t *pag; /* per allocation group data */ xfs_agnumber_t pagno; /* parent (starting) ag number */ /* * Files of these types need at least one block if length > 0 * (and they won't fit in the inode, but that's hard to figure out). */ needspace = S_ISDIR(mode) || S_ISREG(mode) || S_ISLNK(mode); mp = tp->t_mountp; agcount = mp->m_maxagi; if (S_ISDIR(mode)) pagno = xfs_ialloc_next_ag(mp); else { pagno = XFS_INO_TO_AGNO(mp, parent); if (pagno >= agcount) pagno = 0; } ASSERT(pagno < agcount); /* * Loop through allocation groups, looking for one with a little * free space in it. Note we don't look for free inodes, exactly. * Instead, we include whether there is a need to allocate inodes * to mean that blocks must be allocated for them, * if none are currently free. */ agno = pagno; flags = XFS_ALLOC_FLAG_TRYLOCK; for (;;) { pag = xfs_perag_get(mp, agno); if (!pag->pagi_init) { if (xfs_ialloc_read_agi(mp, tp, agno, &agbp)) { agbp = NULL; goto nextag; } } else agbp = NULL; if (!pag->pagi_inodeok) { xfs_ialloc_next_ag(mp); goto unlock_nextag; } /* * Is there enough free space for the file plus a block * of inodes (if we need to allocate some)? */ ineed = pag->pagi_freecount ? 0 : XFS_IALLOC_BLOCKS(mp); if (ineed && !pag->pagf_init) { if (agbp == NULL && xfs_ialloc_read_agi(mp, tp, agno, &agbp)) { agbp = NULL; goto nextag; } (void)xfs_alloc_pagf_init(mp, tp, agno, flags); } if (!ineed || pag->pagf_init) { if (ineed && !(longest = pag->pagf_longest)) longest = pag->pagf_flcount > 0; if (!ineed || (pag->pagf_freeblks >= needspace + ineed && longest >= ineed && okalloc)) { if (agbp == NULL && xfs_ialloc_read_agi(mp, tp, agno, &agbp)) { agbp = NULL; goto nextag; } xfs_perag_put(pag); return agbp; } } unlock_nextag: if (agbp) xfs_trans_brelse(tp, agbp); nextag: xfs_perag_put(pag); /* * No point in iterating over the rest, if we're shutting * down. */ if (XFS_FORCED_SHUTDOWN(mp)) return NULL; agno++; if (agno >= agcount) agno = 0; if (agno == pagno) { if (flags == 0) return NULL; flags = 0; } } }
/* * Allocate new inodes in the allocation group specified by agbp. * Return 0 for success, else error code. */ STATIC int /* error code or 0 */ xfs_ialloc_ag_alloc( xfs_trans_t *tp, /* transaction pointer */ xfs_buf_t *agbp, /* alloc group buffer */ int *alloc) { xfs_agi_t *agi; /* allocation group header */ xfs_alloc_arg_t args; /* allocation argument structure */ xfs_btree_cur_t *cur; /* inode btree cursor */ xfs_agnumber_t agno; int error; int i; xfs_agino_t newino; /* new first inode's number */ xfs_agino_t newlen; /* new number of inodes */ xfs_agino_t thisino; /* current inode number, for loop */ int isaligned = 0; /* inode allocation at stripe unit */ /* boundary */ struct xfs_perag *pag; args.tp = tp; args.mp = tp->t_mountp; /* * Locking will ensure that we don't have two callers in here * at one time. */ newlen = XFS_IALLOC_INODES(args.mp); if (args.mp->m_maxicount && args.mp->m_sb.sb_icount + newlen > args.mp->m_maxicount) return XFS_ERROR(ENOSPC); args.minlen = args.maxlen = XFS_IALLOC_BLOCKS(args.mp); /* * First try to allocate inodes contiguous with the last-allocated * chunk of inodes. If the filesystem is striped, this will fill * an entire stripe unit with inodes. */ agi = XFS_BUF_TO_AGI(agbp); newino = be32_to_cpu(agi->agi_newino); agno = be32_to_cpu(agi->agi_seqno); args.agbno = XFS_AGINO_TO_AGBNO(args.mp, newino) + XFS_IALLOC_BLOCKS(args.mp); if (likely(newino != NULLAGINO && (args.agbno < be32_to_cpu(agi->agi_length)))) { args.fsbno = XFS_AGB_TO_FSB(args.mp, agno, args.agbno); args.type = XFS_ALLOCTYPE_THIS_BNO; args.mod = args.total = args.wasdel = args.isfl = args.userdata = args.minalignslop = 0; args.prod = 1; /* * We need to take into account alignment here to ensure that * we don't modify the free list if we fail to have an exact * block. If we don't have an exact match, and every oher * attempt allocation attempt fails, we'll end up cancelling * a dirty transaction and shutting down. * * For an exact allocation, alignment must be 1, * however we need to take cluster alignment into account when * fixing up the freelist. Use the minalignslop field to * indicate that extra blocks might be required for alignment, * but not to use them in the actual exact allocation. */ args.alignment = 1; args.minalignslop = xfs_ialloc_cluster_alignment(&args) - 1; /* Allow space for the inode btree to split. */ args.minleft = args.mp->m_in_maxlevels - 1; if ((error = xfs_alloc_vextent(&args))) return error; } else args.fsbno = NULLFSBLOCK; if (unlikely(args.fsbno == NULLFSBLOCK)) { /* * Set the alignment for the allocation. * If stripe alignment is turned on then align at stripe unit * boundary. * If the cluster size is smaller than a filesystem block * then we're doing I/O for inodes in filesystem block size * pieces, so don't need alignment anyway. */ isaligned = 0; if (args.mp->m_sinoalign) { ASSERT(!(args.mp->m_flags & XFS_MOUNT_NOALIGN)); args.alignment = args.mp->m_dalign; isaligned = 1; } else args.alignment = xfs_ialloc_cluster_alignment(&args); /* * Need to figure out where to allocate the inode blocks. * Ideally they should be spaced out through the a.g. * For now, just allocate blocks up front. */ args.agbno = be32_to_cpu(agi->agi_root); args.fsbno = XFS_AGB_TO_FSB(args.mp, agno, args.agbno); /* * Allocate a fixed-size extent of inodes. */ args.type = XFS_ALLOCTYPE_NEAR_BNO; args.mod = args.total = args.wasdel = args.isfl = args.userdata = args.minalignslop = 0; args.prod = 1; /* * Allow space for the inode btree to split. */ args.minleft = args.mp->m_in_maxlevels - 1; if ((error = xfs_alloc_vextent(&args))) return error; } /* * If stripe alignment is turned on, then try again with cluster * alignment. */ if (isaligned && args.fsbno == NULLFSBLOCK) { args.type = XFS_ALLOCTYPE_NEAR_BNO; args.agbno = be32_to_cpu(agi->agi_root); args.fsbno = XFS_AGB_TO_FSB(args.mp, agno, args.agbno); args.alignment = xfs_ialloc_cluster_alignment(&args); if ((error = xfs_alloc_vextent(&args))) return error; } if (args.fsbno == NULLFSBLOCK) { *alloc = 0; return 0; } ASSERT(args.len == args.minlen); /* * Stamp and write the inode buffers. * * Seed the new inode cluster with a random generation number. This * prevents short-term reuse of generation numbers if a chunk is * freed and then immediately reallocated. We use random numbers * rather than a linear progression to prevent the next generation * number from being easily guessable. */ error = xfs_ialloc_inode_init(args.mp, tp, agno, args.agbno, args.len, prandom_u32()); if (error) return error; /* * Convert the results. */ newino = XFS_OFFBNO_TO_AGINO(args.mp, args.agbno, 0); be32_add_cpu(&agi->agi_count, newlen); be32_add_cpu(&agi->agi_freecount, newlen); pag = xfs_perag_get(args.mp, agno); pag->pagi_freecount += newlen; xfs_perag_put(pag); agi->agi_newino = cpu_to_be32(newino); /* * Insert records describing the new inode chunk into the btree. */ cur = xfs_inobt_init_cursor(args.mp, tp, agbp, agno); for (thisino = newino; thisino < newino + newlen; thisino += XFS_INODES_PER_CHUNK) { cur->bc_rec.i.ir_startino = thisino; cur->bc_rec.i.ir_freecount = XFS_INODES_PER_CHUNK; cur->bc_rec.i.ir_free = XFS_INOBT_ALL_FREE; error = xfs_btree_lookup(cur, XFS_LOOKUP_EQ, &i); if (error) { xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); return error; } ASSERT(i == 0); error = xfs_btree_insert(cur, &i); if (error) { xfs_btree_del_cursor(cur, XFS_BTREE_ERROR); return error; } ASSERT(i == 1); } xfs_btree_del_cursor(cur, XFS_BTREE_NOERROR); /* * Log allocation group header fields */ xfs_ialloc_log_agi(tp, agbp, XFS_AGI_COUNT | XFS_AGI_FREECOUNT | XFS_AGI_NEWINO); /* * Modify/log superblock values for inode count and inode free count. */ xfs_trans_mod_sb(tp, XFS_TRANS_SB_ICOUNT, (long)newlen); xfs_trans_mod_sb(tp, XFS_TRANS_SB_IFREE, (long)newlen); *alloc = 1; return 0; }
/* * Allocate an inode on disk. * * Mode is used to tell whether the new inode will need space, and whether it * is a directory. * * This function is designed to be called twice if it has to do an allocation * to make more free inodes. On the first call, *IO_agbp should be set to NULL. * If an inode is available without having to performn an allocation, an inode * number is returned. In this case, *IO_agbp is set to NULL. If an allocation * needs to be done, xfs_dialloc returns the current AGI buffer in *IO_agbp. * The caller should then commit the current transaction, allocate a * new transaction, and call xfs_dialloc() again, passing in the previous value * of *IO_agbp. IO_agbp should be held across the transactions. Since the AGI * buffer is locked across the two calls, the second call is guaranteed to have * a free inode available. * * Once we successfully pick an inode its number is returned and the on-disk * data structures are updated. The inode itself is not read in, since doing so * would break ordering constraints with xfs_reclaim. */ int xfs_dialloc( struct xfs_trans *tp, xfs_ino_t parent, umode_t mode, int okalloc, struct xfs_buf **IO_agbp, xfs_ino_t *inop) { struct xfs_mount *mp = tp->t_mountp; struct xfs_buf *agbp; xfs_agnumber_t agno; int error; int ialloced; int noroom = 0; xfs_agnumber_t start_agno; struct xfs_perag *pag; if (*IO_agbp) { /* * If the caller passes in a pointer to the AGI buffer, * continue where we left off before. In this case, we * know that the allocation group has free inodes. */ agbp = *IO_agbp; goto out_alloc; } /* * We do not have an agbp, so select an initial allocation * group for inode allocation. */ start_agno = xfs_ialloc_ag_select(tp, parent, mode, okalloc); if (start_agno == NULLAGNUMBER) { *inop = NULLFSINO; return 0; } /* * If we have already hit the ceiling of inode blocks then clear * okalloc so we scan all available agi structures for a free * inode. */ if (mp->m_maxicount && mp->m_sb.sb_icount + XFS_IALLOC_INODES(mp) > mp->m_maxicount) { noroom = 1; okalloc = 0; } /* * Loop until we find an allocation group that either has free inodes * or in which we can allocate some inodes. Iterate through the * allocation groups upward, wrapping at the end. */ agno = start_agno; for (;;) { pag = xfs_perag_get(mp, agno); if (!pag->pagi_inodeok) { xfs_ialloc_next_ag(mp); goto nextag; } if (!pag->pagi_init) { error = xfs_ialloc_pagi_init(mp, tp, agno); if (error) goto out_error; } /* * Do a first racy fast path check if this AG is usable. */ if (!pag->pagi_freecount && !okalloc) goto nextag; /* * Then read in the AGI buffer and recheck with the AGI buffer * lock held. */ error = xfs_ialloc_read_agi(mp, tp, agno, &agbp); if (error) goto out_error; if (pag->pagi_freecount) { xfs_perag_put(pag); goto out_alloc; } if (!okalloc) goto nextag_relse_buffer; error = xfs_ialloc_ag_alloc(tp, agbp, &ialloced); if (error) { xfs_trans_brelse(tp, agbp); if (error != ENOSPC) goto out_error; xfs_perag_put(pag); *inop = NULLFSINO; return 0; } if (ialloced) { /* * We successfully allocated some inodes, return * the current context to the caller so that it * can commit the current transaction and call * us again where we left off. */ ASSERT(pag->pagi_freecount > 0); xfs_perag_put(pag); *IO_agbp = agbp; *inop = NULLFSINO; return 0; } nextag_relse_buffer: xfs_trans_brelse(tp, agbp); nextag: xfs_perag_put(pag); if (++agno == mp->m_sb.sb_agcount) agno = 0; if (agno == start_agno) { *inop = NULLFSINO; return noroom ? ENOSPC : 0; } } out_alloc: *IO_agbp = NULL; return xfs_dialloc_ag(tp, agbp, parent, inop); out_error: xfs_perag_put(pag); return XFS_ERROR(error); }
/* * Walk the AGs and reclaim the inodes in them. Even if the filesystem is * corrupted, we still want to try to reclaim all the inodes. If we don't, * then a shut down during filesystem unmount reclaim walk leak all the * unreclaimed inodes. */ int xfs_reclaim_inodes_ag( struct xfs_mount *mp, int flags, int *nr_to_scan) { struct xfs_perag *pag; int error = 0; int last_error = 0; xfs_agnumber_t ag; int trylock = flags & SYNC_TRYLOCK; int skipped; restart: ag = 0; skipped = 0; while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) { unsigned long first_index = 0; int done = 0; int nr_found = 0; ag = pag->pag_agno + 1; if (trylock) { if (!mutex_trylock(&pag->pag_ici_reclaim_lock)) { skipped++; xfs_perag_put(pag); continue; } first_index = pag->pag_ici_reclaim_cursor; } else mutex_lock(&pag->pag_ici_reclaim_lock); do { struct xfs_inode *batch[XFS_LOOKUP_BATCH]; int i; rcu_read_lock(); nr_found = radix_tree_gang_lookup_tag( &pag->pag_ici_root, (void **)batch, first_index, XFS_LOOKUP_BATCH, XFS_ICI_RECLAIM_TAG); if (!nr_found) { done = 1; rcu_read_unlock(); break; } /* * Grab the inodes before we drop the lock. if we found * nothing, nr == 0 and the loop will be skipped. */ for (i = 0; i < nr_found; i++) { struct xfs_inode *ip = batch[i]; if (done || xfs_reclaim_inode_grab(ip, flags)) batch[i] = NULL; /* * Update the index for the next lookup. Catch * overflows into the next AG range which can * occur if we have inodes in the last block of * the AG and we are currently pointing to the * last inode. * * Because we may see inodes that are from the * wrong AG due to RCU freeing and * reallocation, only update the index if it * lies in this AG. It was a race that lead us * to see this inode, so another lookup from * the same index will not find it again. */ if (XFS_INO_TO_AGNO(mp, ip->i_ino) != pag->pag_agno) continue; first_index = XFS_INO_TO_AGINO(mp, ip->i_ino + 1); if (first_index < XFS_INO_TO_AGINO(mp, ip->i_ino)) done = 1; } /* unlock now we've grabbed the inodes. */ rcu_read_unlock(); for (i = 0; i < nr_found; i++) { if (!batch[i]) continue; error = xfs_reclaim_inode(batch[i], pag, flags); if (error && last_error != EFSCORRUPTED) last_error = error; } *nr_to_scan -= XFS_LOOKUP_BATCH; } while (nr_found && !done && *nr_to_scan > 0); if (trylock && !done) pag->pag_ici_reclaim_cursor = first_index; else pag->pag_ici_reclaim_cursor = 0; mutex_unlock(&pag->pag_ici_reclaim_lock); xfs_perag_put(pag); } /* * if we skipped any AG, and we still have scan count remaining, do * another pass this time using blocking reclaim semantics (i.e * waiting on the reclaim locks and ignoring the reclaim cursors). This * ensure that when we get more reclaimers than AGs we block rather * than spin trying to execute reclaim. */ if (trylock && skipped && *nr_to_scan > 0) { trylock = 0; goto restart; } return XFS_ERROR(last_error); }
STATIC int xfs_trim_extents( struct xfs_mount *mp, xfs_agnumber_t agno, xfs_fsblock_t start, xfs_fsblock_t len, xfs_fsblock_t minlen, __uint64_t *blocks_trimmed) { struct block_device *bdev = mp->m_ddev_targp->bt_bdev; struct xfs_btree_cur *cur; struct xfs_buf *agbp; struct xfs_perag *pag; int error; int i; pag = xfs_perag_get(mp, agno); error = xfs_alloc_read_agf(mp, NULL, agno, 0, &agbp); if (error || !agbp) goto out_put_perag; cur = xfs_allocbt_init_cursor(mp, NULL, agbp, agno, XFS_BTNUM_CNT); /* * Force out the log. This means any transactions that might have freed * space before we took the AGF buffer lock are now on disk, and the * volatile disk cache is flushed. */ xfs_log_force(mp, XFS_LOG_SYNC); /* * Look up the longest btree in the AGF and start with it. */ error = xfs_alloc_lookup_le(cur, 0, XFS_BUF_TO_AGF(agbp)->agf_longest, &i); if (error) goto out_del_cursor; /* * Loop until we are done with all extents that are large * enough to be worth discarding. */ while (i) { xfs_agblock_t fbno; xfs_extlen_t flen; error = xfs_alloc_get_rec(cur, &fbno, &flen, &i); if (error) goto out_del_cursor; XFS_WANT_CORRUPTED_GOTO(i == 1, out_del_cursor); ASSERT(flen <= XFS_BUF_TO_AGF(agbp)->agf_longest); /* * Too small? Give up. */ if (flen < minlen) { trace_xfs_discard_toosmall(mp, agno, fbno, flen); goto out_del_cursor; } /* * If the extent is entirely outside of the range we are * supposed to discard skip it. Do not bother to trim * down partially overlapping ranges for now. */ if (XFS_AGB_TO_FSB(mp, agno, fbno) + flen < start || XFS_AGB_TO_FSB(mp, agno, fbno) >= start + len) { trace_xfs_discard_exclude(mp, agno, fbno, flen); goto next_extent; } /* * If any blocks in the range are still busy, skip the * discard and try again the next time. */ if (xfs_alloc_busy_search(mp, agno, fbno, flen)) { trace_xfs_discard_busy(mp, agno, fbno, flen); goto next_extent; } trace_xfs_discard_extent(mp, agno, fbno, flen); error = -blkdev_issue_discard(bdev, XFS_AGB_TO_DADDR(mp, agno, fbno), XFS_FSB_TO_BB(mp, flen), GFP_NOFS, 0); if (error) goto out_del_cursor; *blocks_trimmed += flen; next_extent: error = xfs_btree_decrement(cur, 0, &i); if (error) goto out_del_cursor; } out_del_cursor: xfs_btree_del_cursor(cur, error ? XFS_BTREE_ERROR : XFS_BTREE_NOERROR); xfs_buf_relse(agbp); out_put_perag: xfs_perag_put(pag); return error; }
static int _xfs_filestream_pick_ag( xfs_mount_t *mp, xfs_agnumber_t startag, xfs_agnumber_t *agp, int flags, xfs_extlen_t minlen) { int streams, max_streams; int err, trylock, nscan; xfs_extlen_t longest, free, minfree, maxfree = 0; xfs_agnumber_t ag, max_ag = NULLAGNUMBER; struct xfs_perag *pag; /* 2% of an AG's blocks must be free for it to be chosen. */ minfree = mp->m_sb.sb_agblocks / 50; ag = startag; *agp = NULLAGNUMBER; /* For the first pass, don't sleep trying to init the per-AG. */ trylock = XFS_ALLOC_FLAG_TRYLOCK; for (nscan = 0; 1; nscan++) { pag = xfs_perag_get(mp, ag); TRACE_AG_SCAN(mp, ag, atomic_read(&pag->pagf_fstrms)); if (!pag->pagf_init) { err = xfs_alloc_pagf_init(mp, NULL, ag, trylock); if (err && !trylock) { xfs_perag_put(pag); return err; } } /* Might fail sometimes during the 1st pass with trylock set. */ if (!pag->pagf_init) goto next_ag; /* Keep track of the AG with the most free blocks. */ if (pag->pagf_freeblks > maxfree) { maxfree = pag->pagf_freeblks; max_streams = atomic_read(&pag->pagf_fstrms); max_ag = ag; } /* * The AG reference count does two things: it enforces mutual * exclusion when examining the suitability of an AG in this * loop, and it guards against two filestreams being established * in the same AG as each other. */ if (xfs_filestream_get_ag(mp, ag) > 1) { xfs_filestream_put_ag(mp, ag); goto next_ag; } longest = xfs_alloc_longest_free_extent(mp, pag); if (((minlen && longest >= minlen) || (!minlen && pag->pagf_freeblks >= minfree)) && (!pag->pagf_metadata || !(flags & XFS_PICK_USERDATA) || (flags & XFS_PICK_LOWSPACE))) { /* Break out, retaining the reference on the AG. */ free = pag->pagf_freeblks; streams = atomic_read(&pag->pagf_fstrms); xfs_perag_put(pag); *agp = ag; break; } /* Drop the reference on this AG, it's not usable. */ xfs_filestream_put_ag(mp, ag); next_ag: xfs_perag_put(pag); /* Move to the next AG, wrapping to AG 0 if necessary. */ if (++ag >= mp->m_sb.sb_agcount) ag = 0; /* If a full pass of the AGs hasn't been done yet, continue. */ if (ag != startag) continue; /* Allow sleeping in xfs_alloc_pagf_init() on the 2nd pass. */ if (trylock != 0) { trylock = 0; continue; } /* Finally, if lowspace wasn't set, set it for the 3rd pass. */ if (!(flags & XFS_PICK_LOWSPACE)) { flags |= XFS_PICK_LOWSPACE; continue; } /* * Take the AG with the most free space, regardless of whether * it's already in use by another filestream. */ if (max_ag != NULLAGNUMBER) { xfs_filestream_get_ag(mp, max_ag); TRACE_AG_PICK1(mp, max_ag, maxfree); streams = max_streams; free = maxfree; *agp = max_ag; break; } /* take AG 0 if none matched */ TRACE_AG_PICK1(mp, max_ag, maxfree); *agp = 0; return 0; } TRACE_AG_PICK2(mp, startag, *agp, streams, free, nscan, flags); return 0; }