void __xfs_inode_set_reclaim_tag( struct xfs_perag *pag, struct xfs_inode *ip) { radix_tree_tag_set(&pag->pag_ici_root, XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino), XFS_ICI_RECLAIM_TAG); if (!pag->pag_ici_reclaimable) { spin_lock(&ip->i_mount->m_perag_lock); radix_tree_tag_set(&ip->i_mount->m_perag_tree, XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino), XFS_ICI_RECLAIM_TAG); spin_unlock(&ip->i_mount->m_perag_lock); xfs_syncd_queue_reclaim(ip->i_mount); trace_xfs_perag_set_reclaim(ip->i_mount, pag->pag_agno, -1, _RET_IP_); } pag->pag_ici_reclaimable++; }
void __xfs_inode_set_reclaim_tag( struct xfs_perag *pag, struct xfs_inode *ip) { radix_tree_tag_set(&pag->pag_ici_root, XFS_INO_TO_AGINO(ip->i_mount, ip->i_ino), XFS_ICI_RECLAIM_TAG); if (!pag->pag_ici_reclaimable) { /* propagate the reclaim tag up into the perag radix tree */ spin_lock(&ip->i_mount->m_perag_lock); radix_tree_tag_set(&ip->i_mount->m_perag_tree, XFS_INO_TO_AGNO(ip->i_mount, ip->i_ino), XFS_ICI_RECLAIM_TAG); spin_unlock(&ip->i_mount->m_perag_lock); /* schedule periodic background inode reclaim */ xfs_syncd_queue_reclaim(ip->i_mount); trace_xfs_perag_set_reclaim(ip->i_mount, pag->pag_agno, -1, _RET_IP_); } pag->pag_ici_reclaimable++; }
STATIC void xfs_reclaim_worker( struct work_struct *work) { struct xfs_mount *mp = container_of(to_delayed_work(work), struct xfs_mount, m_reclaim_work); xfs_reclaim_inodes(mp, SYNC_TRYLOCK); xfs_syncd_queue_reclaim(mp); }
void xfs_reclaim_inodes_nr( struct xfs_mount *mp, int nr_to_scan) { xfs_syncd_queue_reclaim(mp); xfs_ail_push_all(mp->m_ail); xfs_reclaim_inodes_ag(mp, SYNC_TRYLOCK | SYNC_WAIT, &nr_to_scan); }
/* * Scan a certain number of inodes for reclaim. * * When called we make sure that there is a background (fast) inode reclaim in * progress, while we will throttle the speed of reclaim via doing synchronous * reclaim of inodes. That means if we come across dirty inodes, we wait for * them to be cleaned, which we hope will not be very long due to the * background walker having already kicked the IO off on those dirty inodes. */ void xfs_reclaim_inodes_nr( struct xfs_mount *mp, int nr_to_scan) { /* kick background reclaimer and push the AIL */ xfs_syncd_queue_reclaim(mp); xfs_ail_push_all(mp->m_ail); xfs_reclaim_inodes_ag(mp, SYNC_TRYLOCK | SYNC_WAIT, &nr_to_scan); }
int xfs_syncd_init( struct xfs_mount *mp) { INIT_WORK(&mp->m_flush_work, xfs_flush_worker); INIT_DELAYED_WORK(&mp->m_sync_work, xfs_sync_worker); INIT_DELAYED_WORK(&mp->m_reclaim_work, xfs_reclaim_worker); xfs_syncd_queue_sync(mp); xfs_syncd_queue_reclaim(mp); return 0; }
/* * Inode cache shrinker. * * When called we make sure that there is a background (fast) inode reclaim in * progress, while we will throttle the speed of reclaim via doiing synchronous * reclaim of inodes. That means if we come across dirty inodes, we wait for * them to be cleaned, which we hope will not be very long due to the * background walker having already kicked the IO off on those dirty inodes. */ static int xfs_reclaim_inode_shrink( struct shrinker *shrink, struct shrink_control *sc) { struct xfs_mount *mp; struct xfs_perag *pag; xfs_agnumber_t ag; int reclaimable; int nr_to_scan = sc->nr_to_scan; gfp_t gfp_mask = sc->gfp_mask; mp = container_of(shrink, struct xfs_mount, m_inode_shrink); if (nr_to_scan) { /* kick background reclaimer and push the AIL */ xfs_syncd_queue_reclaim(mp); xfs_ail_push_all(mp->m_ail); if (!(gfp_mask & __GFP_FS)) return -1; xfs_reclaim_inodes_ag(mp, SYNC_TRYLOCK | SYNC_WAIT, &nr_to_scan); /* terminate if we don't exhaust the scan */ if (nr_to_scan > 0) return -1; } reclaimable = 0; ag = 0; while ((pag = xfs_perag_get_tag(mp, ag, XFS_ICI_RECLAIM_TAG))) { ag = pag->pag_agno + 1; reclaimable += pag->pag_ici_reclaimable; xfs_perag_put(pag); } return reclaimable; }