static void invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
{
	struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);

	trace_kvm_book3s_mmu_invalidate(pte);

	/* Different for 32 and 64 bit */
	kvmppc_mmu_invalidate_pte(vcpu, pte);

	spin_lock(&vcpu3s->mmu_lock);

	/* pte already invalidated in between? */
	if (hlist_unhashed(&pte->list_pte)) {
		spin_unlock(&vcpu3s->mmu_lock);
		return;
	}

	hlist_del_init_rcu(&pte->list_pte);
	hlist_del_init_rcu(&pte->list_pte_long);
	hlist_del_init_rcu(&pte->list_vpte);
	hlist_del_init_rcu(&pte->list_vpte_long);

	if (pte->pte.may_write)
		kvm_release_pfn_dirty(pte->pfn);
	else
		kvm_release_pfn_clean(pte->pfn);

	spin_unlock(&vcpu3s->mmu_lock);

	vcpu3s->hpte_cache_count--;
	call_rcu(&pte->rcu_head, free_pte_rcu);
}
Exemplo n.º 2
0
/**
 * bfq_drop_dead_cic - free an exited cic.
 * @bfqd: bfq data for the device in use.
 * @ioc: io_context owning @cic.
 * @cic: the @cic to free.
 *
 * We drop cfq io contexts lazily, so we may find a dead one.
 */
static void bfq_drop_dead_cic(struct bfq_data *bfqd, struct io_context *ioc,
			      struct cfq_io_context *cic)
{
	unsigned long flags;

	WARN_ON(!list_empty(&cic->queue_list));
	BUG_ON(cic->key != bfqd_dead_key(bfqd));

	spin_lock_irqsave(&ioc->lock, flags);

	BUG_ON(ioc->ioc_data == cic);

	/*
	 * With shared I/O contexts two lookups may race and drop the
	 * same cic more than one time: RCU guarantees that the storage
	 * will not be freed too early, here we make sure that we do
	 * not try to remove the cic from the hashing structures multiple
	 * times.
	 */
	if (!hlist_unhashed(&cic->cic_list)) {
		radix_tree_delete(&ioc->bfq_radix_root, bfqd->cic_index);
		hlist_del_init_rcu(&cic->cic_list);
		bfq_cic_free(cic);
	}

	spin_unlock_irqrestore(&ioc->lock, flags);
}
Exemplo n.º 3
0
void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
				    struct kvm_irq_ack_notifier *kian)
{
	mutex_lock(&kvm->irq_lock);
	hlist_del_init_rcu(&kian->link);
	mutex_unlock(&kvm->irq_lock);
	synchronize_rcu();
}
Exemplo n.º 4
0
/**
 * cic_free_func - disconnect a cic ready to be freed.
 * @ioc: the io_context @cic belongs to.
 * @cic: the cic to be freed.
 *
 * Remove @cic from the @ioc radix tree hash and from its cic list,
 * deferring the deallocation of @cic to the end of the current RCU
 * grace period.  This assumes that __bfq_exit_single_io_context()
 * has already been called for @cic.
 */
static void cic_free_func(struct io_context *ioc, struct cfq_io_context *cic)
{
	unsigned long flags;
	unsigned long dead_key = (unsigned long) cic->key;

	BUG_ON(!(dead_key & CIC_DEAD_KEY));

	spin_lock_irqsave(&ioc->lock, flags);
	radix_tree_delete(&ioc->bfq_radix_root,
		dead_key >> CIC_DEAD_INDEX_SHIFT);
	hlist_del_init_rcu(&cic->cic_list);
	spin_unlock_irqrestore(&ioc->lock, flags);

	bfq_cic_free(cic);
}
Exemplo n.º 5
0
void ovs_flow_tbl_destroy(struct flow_table *table)
{
	int i;

	if (!table)
		return;

	for (i = 0; i < table->n_buckets; i++) {
		struct sw_flow *flow;
		struct hlist_head *head = flex_array_get(table->buckets, i);
		struct hlist_node *node, *n;

		hlist_for_each_entry_safe(flow, node, n, head, hash_node) {
			hlist_del_init_rcu(&flow->hash_node);
			flow_free(flow);
		}
	}
Exemplo n.º 6
0
/*
 * NOTE: unmounting 'mnt' naturally propagates to all other mounts its
 * parent propagates to.
 */
static void __propagate_umount(struct mount *mnt)
{
	struct mount *parent = mnt->mnt_parent;
	struct mount *m;

	BUG_ON(parent == mnt);

	for (m = propagation_next(parent, parent); m;
			m = propagation_next(m, parent)) {

		struct mount *child = __lookup_mnt_last(&m->mnt,
						mnt->mnt_mountpoint);
		/*
		 * umount the child only if the child has no
		 * other children
		 */
		if (child && list_empty(&child->mnt_mounts)) {
			hlist_del_init_rcu(&child->mnt_hash);
			hlist_add_before_rcu(&child->mnt_hash, &mnt->mnt_hash);
		}
	}
}
Exemplo n.º 7
0
/*
 * Remove a deviceid from cache
 *
 * @clp nfs_client associated with deviceid
 * @id the deviceid to unhash
 *
 * @ret the unhashed node, if found and dereferenced to zero, NULL otherwise.
 */
void
nfs4_delete_deviceid(const struct pnfs_layoutdriver_type *ld,
			 const struct nfs_client *clp, const struct nfs4_deviceid *id)
{
	struct nfs4_deviceid_node *d;

	spin_lock(&nfs4_deviceid_lock);
	rcu_read_lock();
	d = _lookup_deviceid(ld, clp, id, nfs4_deviceid_hash(id));
	rcu_read_unlock();
	if (!d) {
		spin_unlock(&nfs4_deviceid_lock);
		return;
	}
	hlist_del_init_rcu(&d->node);
	spin_unlock(&nfs4_deviceid_lock);
	synchronize_rcu();

	/* balance the initial ref set in pnfs_insert_deviceid */
	if (atomic_dec_and_test(&d->ref))
		d->ld->free_deviceid_node(d);
}
Exemplo n.º 8
0
static void ip_tunnel_del(struct ip_tunnel *t)
{
	hlist_del_init_rcu(&t->hash_node);
}
static void ip_tunnel_del(struct ip_tunnel_net *itn, struct ip_tunnel *t)
{
	if (t->collect_md)
		rcu_assign_pointer(itn->collect_md_tun, NULL);
	hlist_del_init_rcu(&t->hash_node);
}