Esempio n. 1
0
/*
 * Invalidate exceptional entry if clean. This handles exceptional entries for
 * invalidate_inode_pages2() so for DAX it evicts only clean entries.
 */
static int invalidate_exceptional_entry2(struct address_space *mapping,
					 pgoff_t index, void *entry)
{
	/* Handled by shmem itself */
	if (shmem_mapping(mapping))
		return 1;
	if (dax_mapping(mapping))
		return dax_invalidate_mapping_entry_sync(mapping, index);
	clear_shadow_entry(mapping, index, entry);
	return 1;
}
Esempio n. 2
0
/*
 * Unconditionally remove exceptional entry. Usually called from truncate path.
 */
static void truncate_exceptional_entry(struct address_space *mapping,
				       pgoff_t index, void *entry)
{
	/* Handled by shmem itself */
	if (shmem_mapping(mapping))
		return;

	if (dax_mapping(mapping)) {
		dax_delete_mapping_entry(mapping, index);
		return;
	}
	clear_shadow_entry(mapping, index, entry);
}
Esempio n. 3
0
static void clear_exceptional_entry(struct address_space *mapping,
                                    pgoff_t index, void *entry)
{
    struct radix_tree_node *node;
    void **slot;

    /* Handled by shmem itself */
    if (shmem_mapping(mapping))
        return;

    spin_lock_irq(&mapping->tree_lock);

    if (dax_mapping(mapping)) {
        if (radix_tree_delete_item(&mapping->page_tree, index, entry))
            mapping->nrexceptional--;
    } else {
        /*
         * Regular page slots are stabilized by the page lock even
         * without the tree itself locked.  These unlocked entries
         * need verification under the tree lock.
         */
        if (!__radix_tree_lookup(&mapping->page_tree, index, &node,
                                 &slot))
            goto unlock;
        if (*slot != entry)
            goto unlock;
        radix_tree_replace_slot(slot, NULL);
        mapping->nrexceptional--;
        if (!node)
            goto unlock;
        workingset_node_shadows_dec(node);
        /*
         * Don't track node without shadow entries.
         *
         * Avoid acquiring the list_lru lock if already untracked.
         * The list_empty() test is safe as node->private_list is
         * protected by mapping->tree_lock.
         */
        if (!workingset_node_shadows(node) &&
                !list_empty(&node->private_list))
            list_lru_del(&workingset_shadow_nodes,
                         &node->private_list);
        __radix_tree_delete_node(&mapping->page_tree, node);
    }
unlock:
    spin_unlock_irq(&mapping->tree_lock);
}