static int p2m_mem_access_radix_set(struct p2m_domain *p2m, unsigned long pfn, p2m_access_t a) { int rc; if ( !p2m->mem_access_enabled ) return 0; if ( p2m_access_rwx == a ) { radix_tree_delete(&p2m->mem_access_settings, pfn); return 0; } rc = radix_tree_insert(&p2m->mem_access_settings, pfn, radix_tree_int_to_ptr(a)); if ( rc == -EEXIST ) { /* If a setting already exists, change it to the new one */ radix_tree_replace_slot( radix_tree_lookup_slot( &p2m->mem_access_settings, pfn), radix_tree_int_to_ptr(a)); rc = 0; } return rc; }
void replace_irq_desc(unsigned int irq, struct irq_desc *desc) { void **ptr; ptr = radix_tree_lookup_slot(&irq_desc_tree, irq); if (ptr) radix_tree_replace_slot(ptr, desc); }
static void clear_exceptional_entry(struct address_space *mapping, pgoff_t index, void *entry) { struct radix_tree_node *node; void **slot; /* Handled by shmem itself */ if (shmem_mapping(mapping)) return; spin_lock_irq(&mapping->tree_lock); if (dax_mapping(mapping)) { if (radix_tree_delete_item(&mapping->page_tree, index, entry)) mapping->nrexceptional--; } else { /* * Regular page slots are stabilized by the page lock even * without the tree itself locked. These unlocked entries * need verification under the tree lock. */ if (!__radix_tree_lookup(&mapping->page_tree, index, &node, &slot)) goto unlock; if (*slot != entry) goto unlock; radix_tree_replace_slot(slot, NULL); mapping->nrexceptional--; if (!node) goto unlock; workingset_node_shadows_dec(node); /* * Don't track node without shadow entries. * * Avoid acquiring the list_lru lock if already untracked. * The list_empty() test is safe as node->private_list is * protected by mapping->tree_lock. */ if (!workingset_node_shadows(node) && !list_empty(&node->private_list)) list_lru_del(&workingset_shadow_nodes, &node->private_list); __radix_tree_delete_node(&mapping->page_tree, node); } unlock: spin_unlock_irq(&mapping->tree_lock); }
static void page_cache_tree_delete(struct address_space *mapping, struct page *page, void *shadow) { struct radix_tree_node *node; unsigned long index; unsigned int offset; unsigned int tag; void **slot; VM_BUG_ON(!PageLocked(page)); __radix_tree_lookup(&mapping->page_tree, page->index, &node, &slot); if (shadow) { mapping->nrshadows++; /* * Make sure the nrshadows update is committed before * the nrpages update so that final truncate racing * with reclaim does not see both counters 0 at the * same time and miss a shadow entry. */ smp_wmb(); } mapping->nrpages--; if (!node) { /* Clear direct pointer tags in root node */ mapping->page_tree.gfp_mask &= __GFP_BITS_MASK; radix_tree_replace_slot(slot, shadow); return; } /* Clear tree tags for the removed page */ index = page->index; offset = index & RADIX_TREE_MAP_MASK; for (tag = 0; tag < RADIX_TREE_MAX_TAGS; tag++) { if (test_bit(offset, node->tags[tag])) radix_tree_tag_clear(&mapping->page_tree, index, tag); } /* Delete page, swap shadow entry */ radix_tree_replace_slot(slot, shadow); workingset_node_pages_dec(node); if (shadow) workingset_node_shadows_inc(node); else if (__radix_tree_delete_node(&mapping->page_tree, node)) return; /* * Track node that only contains shadow entries. * * Avoid acquiring the list_lru lock if already tracked. The * list_empty() test is safe as node->private_list is * protected by mapping->tree_lock. */ if (!workingset_node_pages(node) && list_empty(&node->private_list)) { node->private_data = mapping; list_lru_add(&workingset_shadow_nodes, &node->private_list); } }