Esempio n. 1
0
static void clear_exceptional_entry(struct address_space *mapping,
                                    pgoff_t index, void *entry)
{
    struct radix_tree_node *node;
    void **slot;

    /* Handled by shmem itself */
    if (shmem_mapping(mapping))
        return;

    spin_lock_irq(&mapping->tree_lock);

    if (dax_mapping(mapping)) {
        if (radix_tree_delete_item(&mapping->page_tree, index, entry))
            mapping->nrexceptional--;
    } else {
        /*
         * Regular page slots are stabilized by the page lock even
         * without the tree itself locked.  These unlocked entries
         * need verification under the tree lock.
         */
        if (!__radix_tree_lookup(&mapping->page_tree, index, &node,
                                 &slot))
            goto unlock;
        if (*slot != entry)
            goto unlock;
        radix_tree_replace_slot(slot, NULL);
        mapping->nrexceptional--;
        if (!node)
            goto unlock;
        workingset_node_shadows_dec(node);
        /*
         * Don't track node without shadow entries.
         *
         * Avoid acquiring the list_lru lock if already untracked.
         * The list_empty() test is safe as node->private_list is
         * protected by mapping->tree_lock.
         */
        if (!workingset_node_shadows(node) &&
                !list_empty(&node->private_list))
            list_lru_del(&workingset_shadow_nodes,
                         &node->private_list);
        __radix_tree_delete_node(&mapping->page_tree, node);
    }
unlock:
    spin_unlock_irq(&mapping->tree_lock);
}
Esempio n. 2
0
static enum lru_status shadow_lru_isolate(struct list_head *item,
					  struct list_lru_one *lru,
					  spinlock_t *lru_lock,
					  void *arg)
{
	struct address_space *mapping;
	struct radix_tree_node *node;
	unsigned int i;
	int ret;

	/*
	 * Page cache insertions and deletions synchroneously maintain
	 * the shadow node LRU under the mapping->tree_lock and the
	 * lru_lock.  Because the page cache tree is emptied before
	 * the inode can be destroyed, holding the lru_lock pins any
	 * address_space that has radix tree nodes on the LRU.
	 *
	 * We can then safely transition to the mapping->tree_lock to
	 * pin only the address_space of the particular node we want
	 * to reclaim, take the node off-LRU, and drop the lru_lock.
	 */

	node = container_of(item, struct radix_tree_node, private_list);
	mapping = node->private_data;

	/* Coming from the list, invert the lock order */
	if (!spin_trylock(&mapping->tree_lock)) {
		spin_unlock(lru_lock);
		ret = LRU_RETRY;
		goto out;
	}

	list_lru_isolate(lru, item);
	spin_unlock(lru_lock);

	/*
	 * The nodes should only contain one or more shadow entries,
	 * no pages, so we expect to be able to remove them all and
	 * delete and free the empty node afterwards.
	 */
	BUG_ON(!workingset_node_shadows(node));
	BUG_ON(workingset_node_pages(node));

	for (i = 0; i < RADIX_TREE_MAP_SIZE; i++) {
		if (node->slots[i]) {
			BUG_ON(!radix_tree_exceptional_entry(node->slots[i]));
			node->slots[i] = NULL;
			workingset_node_shadows_dec(node);
			BUG_ON(!mapping->nrshadows);
			mapping->nrshadows--;
		}
	}
	BUG_ON(workingset_node_shadows(node));
	inc_zone_state(page_zone(virt_to_page(node)), WORKINGSET_NODERECLAIM);
	if (!__radix_tree_delete_node(&mapping->page_tree, node))
		BUG();

	spin_unlock(&mapping->tree_lock);
	ret = LRU_REMOVED_RETRY;
out:
	local_irq_enable();
	cond_resched();
	local_irq_disable();
	spin_lock(lru_lock);
	return ret;
}