Ejemplo n.º 1
0
void free_pid(struct pid *pid)
{
    /* We can be called with write_lock_irq(&tasklist_lock) held */
    int i;
    unsigned long flags;

    spin_lock_irqsave(&pidmap_lock, flags);
    for (i = 0; i <= pid->level; i++) {
        struct upid *upid = pid->numbers + i;
        struct pid_namespace *ns = upid->ns;
        hlist_del_rcu(&upid->pid_chain);
        switch(--ns->nr_hashed) {
        case 2:
        case 1:
            /* When all that is left in the pid namespace
             * is the reaper wake up the reaper.  The reaper
             * may be sleeping in zap_pid_ns_processes().
             */
            wake_up_process(ns->child_reaper);
            break;
        case 0:
            schedule_work(&ns->proc_work);
            break;
        }
    }
    spin_unlock_irqrestore(&pidmap_lock, flags);

    for (i = 0; i <= pid->level; i++)
        free_pidmap(pid->numbers + i);

    call_rcu(&pid->rcu, delayed_put_pid);
}
Ejemplo n.º 2
0
/*
* try to find the mac related vxlan client
*if b_rcu_protect is True,then this routine will accquire RCU lock,and traverse the entire list in RCU context,otherwise not.
*and we set b_rcu_protect not NULL,as long as we make it clear that,this function is called with rcu_read_lock() invoked before somewhere
* the most important point is if we set b_age_check to TRUE,then we must be make sure that the spin-lock is accquired somewhere else before,
*/
struct vxlan_client * vxlan_server_find_client(struct net*net,uint8_t mac[],int b_rcu_protect,int b_age_check)
{
	uint64_t diff_time;
	struct vxlan_client *cl=NULL,*cl_tmp=NULL;
	struct vxlan_server_net* vsn=net_generic(net,vxlan_server_id);
	struct hlist_head *hhead;
	int hash_idx=jhash(mac,6,0x12345678);
	hash_idx&=CLIENT_HASH_MASK;
	hhead=&vsn->client_hhead[hash_idx];

	if(b_rcu_protect){
		hlist_for_each_entry_rcu(cl_tmp,hhead,hnode){
			if(compare_ether_addr(cl_tmp->mac,mac)==0){
				cl=cl_tmp;
				cl->jiffie_cnt=jiffies_64;
				break;
			}
			if(!b_age_check)
				continue;
			/*here we check age expiry timer */
			if(cl_tmp->is_local_port)
				continue;//local port must be skipped and remain here for a long time
			diff_time=jiffies_64-cl_tmp->jiffie_cnt;
			if(diff_time>DEFAULT_CLIENT_AGE_TIME){
				hlist_del_rcu(&cl_tmp->hnode);
				call_rcu(&cl_tmp->rcu,vxlan_server_client_free);
			}
		}
	}else{
Ejemplo n.º 3
0
/**
 * __batadv_dat_purge - delete entries from the DAT local storage
 * @bat_priv: the bat priv with all the soft interface information
 * @to_purge: function in charge to decide whether an entry has to be purged or
 *	      not. This function takes the dat_entry as argument and has to
 *	      returns a boolean value: true is the entry has to be deleted,
 *	      false otherwise
 *
 * Loops over each entry in the DAT local storage and delete it if and only if
 * the to_purge function passed as argument returns true
 */
static void __batadv_dat_purge(struct batadv_priv *bat_priv,
			       bool (*to_purge)(struct batadv_dat_entry *))
{
	spinlock_t *list_lock; /* protects write access to the hash lists */
	struct batadv_dat_entry *dat_entry;
	struct hlist_node *node, *node_tmp;
	struct hlist_head *head;
	uint32_t i;

	if (!bat_priv->dat.hash)
		return;

	for (i = 0; i < bat_priv->dat.hash->size; i++) {
		head = &bat_priv->dat.hash->table[i];
		list_lock = &bat_priv->dat.hash->list_locks[i];

		spin_lock_bh(list_lock);
		hlist_for_each_entry_safe(dat_entry, node, node_tmp, head,
					  hash_entry) {
			/* if an helper function has been passed as parameter,
			 * ask it if the entry has to be purged or not
			 */
			if (to_purge && !to_purge(dat_entry))
				continue;

			hlist_del_rcu(node);
			batadv_dat_entry_free_ref(dat_entry);
		}
		spin_unlock_bh(list_lock);
	}
}
Ejemplo n.º 4
0
/* delete all claims for a backbone */
static void
batadv_bla_del_backbone_claims(struct batadv_bla_backbone_gw *backbone_gw)
{
	struct batadv_hashtable *hash;
	struct hlist_node *node_tmp;
	struct hlist_head *head;
	struct batadv_bla_claim *claim;
	int i;
	spinlock_t *list_lock;	/* protects write access to the hash lists */

	hash = backbone_gw->bat_priv->bla.claim_hash;
	if (!hash)
		return;

	for (i = 0; i < hash->size; i++) {
		head = &hash->table[i];
		list_lock = &hash->list_locks[i];

		spin_lock_bh(list_lock);
		hlist_for_each_entry_safe(claim, node_tmp,
					  head, hash_entry) {
			if (claim->backbone_gw != backbone_gw)
				continue;

			batadv_claim_free_ref(claim);
			hlist_del_rcu(&claim->hash_entry);
		}
		spin_unlock_bh(list_lock);
	}

	/* all claims gone, initialize CRC */
	backbone_gw->crc = BATADV_BLA_CRC_INIT;
}
Ejemplo n.º 5
0
/**
 *	ovs_vport_del - delete existing vport device
 *
 * @vport: vport to delete.
 *
 * Detaches @vport from its datapath and destroys it.  It is possible to fail
 * for reasons such as lack of memory.  RTNL lock must be held.
 */
void ovs_vport_del(struct vport *vport)
{
	ASSERT_RTNL();

	hlist_del_rcu(&vport->hash_node);

	vport->ops->destroy(vport);
}
Ejemplo n.º 6
0
void kvm_unregister_irq_mask_notifier(struct kvm *kvm, int irq,
				      struct kvm_irq_mask_notifier *kimn)
{
	mutex_lock(&kvm->irq_lock);
	hlist_del_rcu(&kimn->link);
	mutex_unlock(&kvm->irq_lock);
	synchronize_rcu();
}
Ejemplo n.º 7
0
Archivo: vport.c Proyecto: JunoZhu/ovs
/**
 *	ovs_vport_del - delete existing vport device
 *
 * @vport: vport to delete.
 *
 * Detaches @vport from its datapath and destroys it.  ovs_mutex must be
 * held.
 */
void ovs_vport_del(struct vport *vport)
{
	ASSERT_OVSL();

	hlist_del_rcu(&vport->hash_node);
	module_put(vport->ops->owner);
	vport->ops->destroy(vport);
}
Ejemplo n.º 8
0
/* Called with RTNL lock. */
void ovs_dp_detach_port(struct vport *p)
{
	ASSERT_RTNL();

	/* First drop references to device. */
	hlist_del_rcu(&p->dp_hash_node);

	/* Then destroy it. */
	ovs_vport_del(p);
}
Ejemplo n.º 9
0
/**
 *	ovs_vport_del - delete existing vport device
 *
 * @vport: vport to delete.
 *
 * Detaches @vport from its datapath and destroys it.  It is possible to fail
 * for reasons such as lack of memory.  ovs_mutex must be held.
 */
void ovs_vport_del(struct vport *vport)
{
	ASSERT_OVSL();

    //将 vport->hash_node 从其对应的链表中删除;
	hlist_del_rcu(&vport->hash_node);
    //递减 vport->ops->owner 的引用计数
	module_put(vport->ops->owner);
	vport->ops->destroy(vport);
}
Ejemplo n.º 10
0
/*
 * Unhash the session.
 * Caller holds ft_lport_lock.
 */
static void ft_sess_unhash(struct ft_sess *sess)
{
	struct ft_tport *tport = sess->tport;

	hlist_del_rcu(&sess->hash);
	BUG_ON(!tport->sess_count);
	tport->sess_count--;
	sess->port_id = -1;
	sess->params = 0;
}
Ejemplo n.º 11
0
static void release_posix_timer(struct k_itimer *tmr, int it_id_set)
{
	if (it_id_set) {
		unsigned long flags;
		spin_lock_irqsave(&hash_lock, flags);
		hlist_del_rcu(&tmr->t_hash);
		spin_unlock_irqrestore(&hash_lock, flags);
	}
	put_pid(tmr->it_pid);
	sigqueue_free(tmr->sigq);
	call_rcu(&tmr->it.rcu, k_itimer_rcu_free);
}
Ejemplo n.º 12
0
/*
 * stop receiving the event interception. It is the opposed operation of
 * kvm_page_track_register_notifier().
 */
void
kvm_page_track_unregister_notifier(struct kvm *kvm,
				   struct kvm_page_track_notifier_node *n)
{
	struct kvm_page_track_notifier_head *head;

	head = &kvm->arch.track_notifier_head;

	spin_lock(&kvm->mmu_lock);
	hlist_del_rcu(&n->node);
	spin_unlock(&kvm->mmu_lock);
	synchronize_srcu(&head->track_srcu);
}
Ejemplo n.º 13
0
/*
 * Remove an entry from the hash table.  Does not free any memory.
 */
int virt_hash_table_remove(struct virt_hash_table *table, struct hlist_node *entry, u32 hash)
{
    struct virt_hash_head *head = &table->head[hash];

    if(WARN_ON(hash >= table->size))
        return -1;

    spin_lock_bh(&head->lock);
    hlist_del_rcu(entry);
    spin_unlock_bh(&head->lock);

    return 0;
}
Ejemplo n.º 14
0
void free_pid(struct pid *pid)
{
	int i;
	unsigned long flags;

	spin_lock_irqsave(&pidmap_lock, flags);
	for (i = 0; i <= pid->level; i++)
		hlist_del_rcu(&pid->numbers[i].pid_chain);
	spin_unlock_irqrestore(&pidmap_lock, flags);

	for (i = 0; i <= pid->level; i++)
		free_pidmap(pid->numbers + i);

	call_rcu(&pid->rcu, delayed_put_pid);
}
Ejemplo n.º 15
0
static
void pid_tracker_del_node_rcu(struct lttng_pid_hash_node *e)
{
	hlist_del_rcu(&e->hlist);
	/*
	 * We choose to use a heavyweight synchronize on removal here,
	 * since removal of a PID from the tracker mask is a rare
	 * operation, and we don't want to use more cache lines than
	 * what we really need when doing the PID lookups, so we don't
	 * want to afford adding a rcu_head field to those pid hash
	 * node.
	 */
	synchronize_trace();
	kfree(e);
}
Ejemplo n.º 16
0
void geneve_sock_release(struct geneve_sock *gs)
{
	struct net *net = sock_net(gs->sock->sk);
	struct geneve_net *gn = net_generic(net, geneve_net_id);

	if (!atomic_dec_and_test(&gs->refcnt))
		return;

	spin_lock(&gn->sock_lock);
	hlist_del_rcu(&gs->hlist);
	geneve_notify_del_rx_port(gs);
	spin_unlock(&gn->sock_lock);

	queue_work(geneve_wq, &gs->del_work);
}
Ejemplo n.º 17
0
fastcall void free_pid(struct pid *pid)
{
	/* We can be called with write_lock_irq(&tasklist_lock) held */
	int i;
	unsigned long flags;

	spin_lock_irqsave(&pidmap_lock, flags);
	for (i = 0; i <= pid->level; i++)
		hlist_del_rcu(&pid->numbers[i].pid_chain);
	spin_unlock_irqrestore(&pidmap_lock, flags);

	for (i = 0; i <= pid->level; i++)
		free_pidmap(pid->numbers[i].ns, pid->numbers[i].nr);

	call_rcu(&pid->rcu, delayed_put_pid);
}
int bpf_dp_replicator_del_all(struct plum *plum, u32 replicator_id)
{
	struct hlist_head *head;
	struct hlist_node *n;
	struct plum_replicator_elem *elem;

	head = replicator_hash_bucket(plum, replicator_id);
	hlist_for_each_entry_safe(elem, n, head, hash_node) {
		if (elem->replicator_id == replicator_id) {
			hlist_del_rcu(&elem->hash_node);
			kfree_rcu(elem, rcu);
		}
	}

	return 0;
}
Ejemplo n.º 19
0
		hlist_for_each_entry(cl_tmp,hhead,hnode){
			if(compare_ether_addr(cl_tmp->mac,mac)==0){
				cl=cl_tmp;
				cl->jiffie_cnt=jiffies_64;
				break;
			}
			if(!b_age_check)
				continue;
			if(cl_tmp->is_local_port)
				continue;
			/*here we check age expiry timer */
			diff_time=jiffies_64-cl_tmp->jiffie_cnt;
			if(diff_time>DEFAULT_CLIENT_AGE_TIME){
				hlist_del_rcu(&cl_tmp->hnode);
				call_rcu(&cl_tmp->rcu,vxlan_server_client_free);
			}
		}
Ejemplo n.º 20
0
/*
 * rfs_rule_destroy_ip_rule
 */
int rfs_rule_destroy_ip_rule(int family, uint8_t *ipaddr, uint32_t is_static)
{
	struct hlist_head *head;
	struct rfs_rule_entry *re;
	struct rfs_rule *rr = &__rr;
	uint32_t type = RFS_RULE_TYPE_IP4_RULE;
	uint16_t cpu;

	head = &rr->hash[rfs_rule_hash(type, ipaddr)];

	spin_lock_bh(&rr->hash_lock);
	hlist_for_each_entry_rcu(re, head, hlist) {
		if (type != re->type)
			continue;

		if (rfs_rule_ip_equal(family, (uint8_t *)&re->u.ip4addr, ipaddr))
			break;
	}

	if (!re || (re->is_static && !is_static)) {
		spin_unlock_bh(&rr->hash_lock);
		return 0;
	}

	hlist_del_rcu(&re->hlist);
	cpu = re->cpu;

	if (family ==AF_INET)
		RFS_DEBUG("Remove IP rule %pI4, cpu %d\n", ipaddr, cpu);
	else
		RFS_DEBUG("Remove IP rule %pI6, cpu %d\n", ipaddr, cpu);

	if (cpu != RPS_NO_CPU &&
		rfs_ess_update_ip_rule(re, RPS_NO_CPU) < 0) {
		if (re->type == RFS_RULE_TYPE_IP4_RULE)
			RFS_WARN("Failed to remove IP rule %pI4, cpu %d\n", ipaddr, re->cpu);
		else
			RFS_WARN("Failed to remove IP rule %pI6, cpu %d\n", ipaddr, re->cpu);
	}

	re->cpu = RPS_NO_CPU;
	call_rcu(&re->rcu, rfs_rule_rcu_free);
	spin_unlock_bh(&rr->hash_lock);

	return 0;
}
Ejemplo n.º 21
0
void fastcall detach_pid(struct task_struct *task, enum pid_type type)
{
	struct pid_link *link;
	struct pid *pid;
	int tmp;

	link = &task->pids[type];
	pid = link->pid;

	hlist_del_rcu(&link->node);
	link->pid = NULL;

	for (tmp = PIDTYPE_MAX; --tmp >= 0; )
		if (!hlist_empty(&pid->tasks[tmp]))
			return;

	free_pid(pid);
}
int bpf_dp_replicator_del_port(struct plum *plum, u32 replicator_id,
			       u32 port_id)
{
	struct plum_replicator_elem *elem;

	rcu_read_lock();
	elem = replicator_lookup_port(plum, replicator_id, port_id);
	if (!elem) {
		rcu_read_unlock();
		return -ENODEV;
	}

	hlist_del_rcu(&elem->hash_node);
	kfree_rcu(elem, rcu);
	rcu_read_unlock();

	return 0;
}
Ejemplo n.º 23
0
/*
 * Deactivate a cell.
 */
static void afs_deactivate_cell(struct afs_net *net, struct afs_cell *cell)
{
	_enter("%s", cell->name);

	afs_proc_cell_remove(cell);

	mutex_lock(&net->proc_cells_lock);
	hlist_del_rcu(&cell->proc_link);
	afs_dynroot_rmdir(net, cell);
	mutex_unlock(&net->proc_cells_lock);

#ifdef CONFIG_AFS_FSCACHE
	fscache_relinquish_cookie(cell->cache, NULL, false);
	cell->cache = NULL;
#endif

	_leave("");
}
Ejemplo n.º 24
0
/* Check when we last heard from other nodes, and remove them in case of
 * a time out, or clean all backbone gws if now is set.
 */
static void batadv_bla_purge_backbone_gw(struct batadv_priv *bat_priv, int now)
{
	struct batadv_bla_backbone_gw *backbone_gw;
	struct hlist_node *node_tmp;
	struct hlist_head *head;
	struct batadv_hashtable *hash;
	spinlock_t *list_lock;	/* protects write access to the hash lists */
	int i;

	hash = bat_priv->bla.backbone_hash;
	if (!hash)
		return;

	for (i = 0; i < hash->size; i++) {
		head = &hash->table[i];
		list_lock = &hash->list_locks[i];

		spin_lock_bh(list_lock);
		hlist_for_each_entry_safe(backbone_gw, node_tmp,
					  head, hash_entry) {
			if (now)
				goto purge_now;
			if (!batadv_has_timed_out(backbone_gw->lasttime,
						  BATADV_BLA_BACKBONE_TIMEOUT))
				continue;

			batadv_dbg(BATADV_DBG_BLA, backbone_gw->bat_priv,
				   "bla_purge_backbone_gw(): backbone gw %pM timed out\n",
				   backbone_gw->orig);

purge_now:
			/* don't wait for the pending request anymore */
			if (atomic_read(&backbone_gw->request_sent))
				atomic_dec(&bat_priv->bla.num_requests);

			batadv_bla_del_backbone_claims(backbone_gw);

			hlist_del_rcu(&backbone_gw->hash_entry);
			batadv_backbone_gw_free_ref(backbone_gw);
		}
		spin_unlock_bh(list_lock);
	}
}
Ejemplo n.º 25
0
int ws_hash_free(struct ws_hash *hash)
{
	struct ws_sta *ws_sta;
	struct hlist_node *node;
	struct hlist_head *head;
	spinlock_t *list_lock;	/* protects write access to the hash lists */
	int i;

	for (i = 0; i < WS_HASH_SIZE; i++) {
		head = &hash->table[i];
		list_lock = &hash->list_locks[i];

		spin_lock_bh(list_lock);
		hlist_for_each_entry_safe(ws_sta, node, head, hash_entry) {
			hlist_del_rcu(&ws_sta->hash_entry);
			ws_sta_free_ref(ws_sta);
		}
		spin_unlock_bh(list_lock);
	}
Ejemplo n.º 26
0
void free_pid(struct pid *pid)
{
	/* We can be called with write_lock_irq(&tasklist_lock) held */
	int i;
	unsigned long flags;

	spin_lock_irqsave(&pidmap_lock, flags);
	for (i = 0; i <= pid->level; i++) {
		struct upid *upid = pid->numbers + i;
		hlist_del_rcu(&upid->pid_chain);
		if (--upid->ns->nr_hashed == 0)
			schedule_work(&upid->ns->proc_work);
	}
	spin_unlock_irqrestore(&pidmap_lock, flags);

	for (i = 0; i <= pid->level; i++)
		free_pidmap(pid->numbers + i);

	call_rcu(&pid->rcu, delayed_put_pid);
}
Ejemplo n.º 27
0
/* called with BH disabled */
static void
__instance_destroy(struct nfulnl_instance *inst)
{
	/* first pull it out of the global list */
	hlist_del_rcu(&inst->hlist);

	/* then flush all pending packets from skb */

	spin_lock(&inst->lock);

	/* lockless readers wont be able to use us */
	inst->copy_mode = NFULNL_COPY_DISABLED;

	if (inst->skb)
		__nfulnl_flush(inst);
	spin_unlock(&inst->lock);

	/* and finally put the refcount */
	instance_put(inst);
}
Ejemplo n.º 28
0
/*
 * rfs_rule_destroy_mac_rule
 */
int rfs_rule_destroy_mac_rule(uint8_t *addr, uint32_t is_static)
{
	struct hlist_head *head;
	struct rfs_rule_entry *re;
	struct rfs_rule *rr = &__rr;
	uint16_t cpu;
	uint32_t type = RFS_RULE_TYPE_MAC_RULE;

	head = &rr->hash[rfs_rule_hash(type, addr)];

	spin_lock_bh(&rr->hash_lock);
	hlist_for_each_entry_rcu(re, head, hlist) {
		if (type != re->type)
			continue;

		if (memcmp(re->mac, addr, ETH_ALEN) == 0) {
			break;
		}
	}

	if (!re || (re->is_static && !is_static)) {
		spin_unlock_bh(&rr->hash_lock);
		return 0;
	}

	hlist_del_rcu(&re->hlist);
	cpu = re->cpu;

	RFS_DEBUG("Remove rules: %pM, cpu %d\n", addr, cpu);
	if (rfs_ess_update_mac_rule(re, RPS_NO_CPU) < 0) {
		RFS_WARN("Failed to update mac rules: %pM, cpu %d\n", addr, cpu);
	}

	re->cpu = RPS_NO_CPU;
	call_rcu(&re->rcu, rfs_rule_rcu_free);

	__rfs_rule_update_iprule_by_mac(addr, RPS_NO_CPU);
	spin_unlock_bh(&rr->hash_lock);

	return 0;
}
Ejemplo n.º 29
0
void ovs_flow_tbl_destroy(struct flow_table *table)
{
	int i;

	if (!table)
		return;

	if (table->keep_flows)
		goto skip_flows;

	for (i = 0; i < table->n_buckets; i++) {
		struct sw_flow *flow;
		struct hlist_head *head = flex_array_get(table->buckets, i);
		struct hlist_node *node, *n;
		int ver = table->node_ver;

		hlist_for_each_entry_safe(flow, node, n, head, hash_node[ver]) {
			hlist_del_rcu(&flow->hash_node[ver]);
			ovs_flow_free(flow);
		}
	}
Ejemplo n.º 30
0
static void table_instance_destroy(struct table_instance *ti, bool deferred)
{
	int i;

	if (!ti)
		return;

	if (ti->keep_flows)
		goto skip_flows;

	for (i = 0; i < ti->n_buckets; i++) {
		struct sw_flow *flow;
		struct hlist_head *head = flex_array_get(ti->buckets, i);
		struct hlist_node *n;
		int ver = ti->node_ver;

		hlist_for_each_entry_safe(flow, n, head, hash_node[ver]) {
			hlist_del_rcu(&flow->hash_node[ver]);
			ovs_flow_free(flow, deferred);
		}
	}