static int __vlan_del(struct net_port_vlans *v, u16 vid)
{
	if (!test_bit(vid, v->vlan_bitmap))
		return -EINVAL;

	__vlan_delete_pvid(v, vid);
	clear_bit(vid, v->untagged_bitmap);

	if (v->port_idx && vid) {
		struct net_device *dev = v->parent.port->dev;
		const struct net_device_ops *ops = dev->netdev_ops;

		if (dev->features & NETIF_F_HW_VLAN_CTAG_FILTER)
			ops->ndo_vlan_rx_kill_vid(dev, htons(ETH_P_8021Q), vid);
	}

	clear_bit(vid, v->vlan_bitmap);
	v->num_vlans--;
	if (bitmap_empty(v->vlan_bitmap, BR_VLAN_BITMAP_LEN)) {
		if (v->port_idx)
			rcu_assign_pointer(v->parent.port->vlan_info, NULL);
		else
			rcu_assign_pointer(v->parent.br->vlan_info, NULL);
		kfree_rcu(v, rcu);
	}
	return 0;
}
Exemple #2
0
static void ieee80211_remove_tid_tx(struct sta_info *sta, int tid)
{
	struct tid_ampdu_tx *tid_tx;

	lockdep_assert_held(&sta->ampdu_mlme.mtx);
	lockdep_assert_held(&sta->lock);

	tid_tx = rcu_dereference_protected_tid_tx(sta, tid);

	/*
	 * When we get here, the TX path will not be lockless any more wrt.
	 * aggregation, since the OPERATIONAL bit has long been cleared.
	 * Thus it will block on getting the lock, if it occurs. So if we
	 * stop the queue now, we will not get any more packets, and any
	 * that might be being processed will wait for us here, thereby
	 * guaranteeing that no packets go to the tid_tx pending queue any
	 * more.
	 */

	ieee80211_agg_splice_packets(sta->sdata, tid_tx, tid);

	/* future packets must not find the tid_tx struct any more */
	ieee80211_assign_tid_tx(sta, tid, NULL);

	ieee80211_agg_splice_finish(sta->sdata, tid);

	kfree_rcu(tid_tx, rcu_head);
}
Exemple #3
0
static void tcf_police_destroy(struct tcf_police *p)
{
	unsigned int h = tcf_hash(p->tcf_index, POL_TAB_MASK);
	struct tcf_common **p1p;

	for (p1p = &tcf_police_ht[h]; *p1p; p1p = &(*p1p)->tcfc_next) {
		if (*p1p == &p->common) {
			write_lock_bh(&police_lock);
			*p1p = p->tcf_next;
			write_unlock_bh(&police_lock);
			gen_kill_estimator(&p->tcf_bstats,
					   &p->tcf_rate_est);
			if (p->tcfp_R_tab)
				qdisc_put_rtab(p->tcfp_R_tab);
			if (p->tcfp_P_tab)
				qdisc_put_rtab(p->tcfp_P_tab);
			/*
			 * gen_estimator est_timer() might access p->tcf_lock
			 * or bstats, wait a RCU grace period before freeing p
			 */
			kfree_rcu(p, tcf_rcu);
			return;
		}
	}
	WARN_ON(1);
}
Exemple #4
0
/*
 * update a user defined key
 * - the key's semaphore is write-locked
 */
int user_update(struct key *key, const void *data, size_t datalen)
{
	struct user_key_payload *upayload, *zap;
	int ret;

	ret = -EINVAL;
	if (datalen <= 0 || datalen > 32767 || !data)
		goto error;

	/* construct a replacement payload */
	ret = -ENOMEM;
	upayload = kmalloc(sizeof(*upayload) + datalen, GFP_KERNEL);
	if (!upayload)
		goto error;

	upayload->datalen = datalen;
	memcpy(upayload->data, data, datalen);

	/* check the quota and attach the new data */
	zap = upayload;

	ret = key_payload_reserve(key, datalen);

	if (ret == 0) {
		/* attach the new data, displacing the old */
		zap = key->payload.data;
		rcu_assign_pointer(key->payload.data, upayload);
		key->expiry = 0;
	}

	kfree_rcu(zap, rcu);

error:
	return ret;
}
static struct sock_reuseport *reuseport_grow(struct sock_reuseport *reuse)
{
	struct sock_reuseport *more_reuse;
	u32 more_socks_size, i;

	more_socks_size = reuse->max_socks * 2U;
	if (more_socks_size > U16_MAX)
		return NULL;

	more_reuse = __reuseport_alloc(more_socks_size);
	if (!more_reuse)
		return NULL;

	more_reuse->max_socks = more_socks_size;
	more_reuse->num_socks = reuse->num_socks;
	more_reuse->prog = reuse->prog;

	memcpy(more_reuse->socks, reuse->socks,
	       reuse->num_socks * sizeof(struct sock *));

	for (i = 0; i < reuse->num_socks; ++i)
		rcu_assign_pointer(reuse->socks[i]->sk_reuseport_cb,
				   more_reuse);

	/* Note: we use kfree_rcu here instead of reuseport_free_rcu so
	 * that reuse and more_reuse can temporarily share a reference
	 * to prog.
	 */
	kfree_rcu(reuse, rcu);
	return more_reuse;
}
void ieee80211_tx_ba_session_handle_start(struct sta_info *sta, int tid)
{
	struct tid_ampdu_tx *tid_tx;
	struct ieee80211_local *local = sta->local;
	struct ieee80211_sub_if_data *sdata = sta->sdata;
	u16 start_seq_num;
	int ret;

	tid_tx = rcu_dereference_protected_tid_tx(sta, tid);

	/*
	 * Start queuing up packets for this aggregation session.
	 * We're going to release them once the driver is OK with
	 * that.
	 */
	clear_bit(HT_AGG_STATE_WANT_START, &tid_tx->state);

	/*
	 * Make sure no packets are being processed. This ensures that
	 * we have a valid starting sequence number and that in-flight
	 * packets have been flushed out and no packets for this TID
	 * will go into the driver during the ampdu_action call.
	 */
	synchronize_net();

	start_seq_num = sta->tid_seq[tid] >> 4;

	ret = drv_ampdu_action(local, sdata, IEEE80211_AMPDU_TX_START,
			       &sta->sta, tid, &start_seq_num, 0);
	if (ret) {
#ifdef CONFIG_MAC80211_HT_DEBUG
		printk(KERN_DEBUG "BA request denied - HW unavailable for"
					" tid %d\n", tid);
#endif
		spin_lock_bh(&sta->lock);
		ieee80211_agg_splice_packets(local, tid_tx, tid);
		ieee80211_assign_tid_tx(sta, tid, NULL);
		ieee80211_agg_splice_finish(local, tid);
		spin_unlock_bh(&sta->lock);

		kfree_rcu(tid_tx, rcu_head);
		return;
	}

	/* activate the timer for the recipient's addBA response */
	mod_timer(&tid_tx->addba_resp_timer, jiffies + ADDBA_RESP_INTERVAL);
#ifdef CONFIG_MAC80211_HT_DEBUG
	printk(KERN_DEBUG "activated addBA response timer on tid %d\n", tid);
#endif

	spin_lock_bh(&sta->lock);
	sta->ampdu_mlme.addba_req_num[tid]++;
	spin_unlock_bh(&sta->lock);

	/* send AddBA request */
	ieee80211_send_addba_request(sdata, sta->sta.addr, tid,
				     tid_tx->dialog_token, start_seq_num,
				     local->hw.max_tx_aggregation_subframes,
				     tid_tx->timeout);
}
/**
 * sel_netnode_insert - Insert a new node into the table
 * @node: the new node record
 *
 * Description:
 * Add a new node record to the network address hash table.
 *
 */
static void sel_netnode_insert(struct sel_netnode *node)
{
    unsigned int idx;

    switch (node->nsec.family) {
    case PF_INET:
        idx = sel_netnode_hashfn_ipv4(node->nsec.addr.ipv4);
        break;
    case PF_INET6:
        idx = sel_netnode_hashfn_ipv6(&node->nsec.addr.ipv6);
        break;
    default:
        BUG();
    }

    /* we need to impose a limit on the growth of the hash table so check
     * this bucket to make sure it is within the specified bounds */
    list_add_rcu(&node->list, &sel_netnode_hash[idx].list);
    if (sel_netnode_hash[idx].size == SEL_NETNODE_HASH_BKT_LIMIT) {
        struct sel_netnode *tail;
        tail = list_entry(
                   rcu_dereference(sel_netnode_hash[idx].list.prev),
                   struct sel_netnode, list);
        list_del_rcu(&tail->list);
        kfree_rcu(tail, rcu);
    } else
Exemple #8
0
/**
 * bus1_queue_entry_free() - free a queue entry
 * @entry:	entry to free, or NULL
 *
 * This destroys an existing queue entry and releases all associated resources.
 * Any files that were put into entry->files are released as well.
 *
 * If NULL is passed, this is a no-op.
 *
 * The caller must make sure the queue-entry is unlinked before calling this.
 * Furthermore, the slice must be released and reset to NULL by the caller.
 *
 * Return: NULL is returned.
 */
struct bus1_queue_entry *
bus1_queue_entry_free(struct bus1_queue_entry *entry)
{
	size_t i;

	if (!entry)
		return NULL;

	for (i = 0; i < entry->n_files; ++i)
		if (entry->files[i])
			fput(entry->files[i]);

	WARN_ON(entry->slice);
	WARN_ON(entry->transaction.peer);
	WARN_ON(!RB_EMPTY_NODE(&entry->transaction.rb));

	/*
	 * Entry must be unlinked by the caller. The rb-storage is re-used by
	 * the rcu-head to enforced delayed memory release. This guarantees
	 * that the entry is accessible via rcu-protected readers.
	 */
	WARN_ON(!RB_EMPTY_NODE(&entry->rb));
	kfree_rcu(entry, rcu);

	return NULL;
}
Exemple #9
0
static struct bnxt_tc_l2_node *
bnxt_tc_get_l2_node(struct bnxt *bp, struct rhashtable *l2_table,
		    struct rhashtable_params ht_params,
		    struct bnxt_tc_l2_key *l2_key)
{
	struct bnxt_tc_l2_node *l2_node;
	int rc;

	l2_node = rhashtable_lookup_fast(l2_table, l2_key, ht_params);
	if (!l2_node) {
		l2_node = kzalloc(sizeof(*l2_node), GFP_KERNEL);
		if (!l2_node) {
			rc = -ENOMEM;
			return NULL;
		}

		l2_node->key = *l2_key;
		rc = rhashtable_insert_fast(l2_table, &l2_node->node,
					    ht_params);
		if (rc) {
			kfree_rcu(l2_node, rcu);
			netdev_err(bp->dev,
				   "Error: %s: rhashtable_insert_fast: %d",
				   __func__, rc);
			return NULL;
		}
		INIT_LIST_HEAD(&l2_node->common_l2_flows);
	}
	return l2_node;
}
Exemple #10
0
static void
reservation_object_add_shared_replace(struct reservation_object *obj,
				      struct reservation_object_list *old,
				      struct reservation_object_list *fobj,
				      struct fence *fence)
{
	unsigned i;
	struct fence *old_fence = NULL;

	fence_get(fence);

	if (!old) {
		RCU_INIT_POINTER(fobj->shared[0], fence);
		fobj->shared_count = 1;
		goto done;
	}

	/*
	 * no need to bump fence refcounts, rcu_read access
	 * requires the use of kref_get_unless_zero, and the
	 * references from the old struct are carried over to
	 * the new.
	 */
	fobj->shared_count = old->shared_count;

	for (i = 0; i < old->shared_count; ++i) {
		struct fence *check;

		check = rcu_dereference_protected(old->shared[i],
						reservation_object_held(obj));

		if (!old_fence && check->context == fence->context) {
			old_fence = check;
			RCU_INIT_POINTER(fobj->shared[i], fence);
		} else
			RCU_INIT_POINTER(fobj->shared[i], check);
	}
	if (!old_fence) {
		RCU_INIT_POINTER(fobj->shared[fobj->shared_count], fence);
		fobj->shared_count++;
	}

done:
	preempt_disable();
	write_seqcount_begin(&obj->seq);
	/*
	 * RCU_INIT_POINTER can be used here,
	 * seqcount provides the necessary barriers
	 */
	RCU_INIT_POINTER(obj->fence, fobj);
	write_seqcount_end(&obj->seq);
	preempt_enable();

	if (old)
		kfree_rcu(old, rcu);

	if (old_fence)
		fence_put(old_fence);
}
Exemple #11
0
static void tcf_skbmod_cleanup(struct tc_action *a, int bind)
{
	struct tcf_skbmod *d = to_skbmod(a);
	struct tcf_skbmod_params  *p;

	p = rcu_dereference_protected(d->skbmod_p, 1);
	kfree_rcu(p, rcu);
}
Exemple #12
0
static void xprt_switch_free(struct kref *kref)
{
	struct rpc_xprt_switch *xps = container_of(kref,
			struct rpc_xprt_switch, xps_kref);

	xprt_switch_free_entries(xps);
	kfree_rcu(xps, xps_rcu);
}
/**
 * batadv_dat_entry_release - release dat_entry from lists and queue for free
 *  after rcu grace period
 * @ref: kref pointer of the dat_entry
 */
static void batadv_dat_entry_release(struct kref *ref)
{
	struct batadv_dat_entry *dat_entry;

	dat_entry = container_of(ref, struct batadv_dat_entry, refcount);

	kfree_rcu(dat_entry, rcu);
}
Exemple #14
0
static void geneve_del_work(struct work_struct *work)
{
	struct geneve_sock *gs = container_of(work, struct geneve_sock,
					      del_work);

	udp_tunnel_sock_release(gs->sock);
	kfree_rcu(gs, rcu);
}
Exemple #15
0
/**
 * gen_new_estimator - create a new rate estimator
 * @bstats: basic statistics
 * @cpu_bstats: bstats per cpu
 * @rate_est: rate estimator statistics
 * @stats_lock: statistics lock
 * @running: qdisc running seqcount
 * @opt: rate estimator configuration TLV
 *
 * Creates a new rate estimator with &bstats as source and &rate_est
 * as destination. A new timer with the interval specified in the
 * configuration TLV is created. Upon each interval, the latest statistics
 * will be read from &bstats and the estimated rate will be stored in
 * &rate_est with the statistics lock grabbed during this period.
 *
 * Returns 0 on success or a negative error code.
 *
 */
int gen_new_estimator(struct gnet_stats_basic_packed *bstats,
		      struct gnet_stats_basic_cpu __percpu *cpu_bstats,
		      struct net_rate_estimator __rcu **rate_est,
		      spinlock_t *stats_lock,
		      seqcount_t *running,
		      struct nlattr *opt)
{
	struct gnet_estimator *parm = nla_data(opt);
	struct net_rate_estimator *old, *est;
	struct gnet_stats_basic_packed b;
	int intvl_log;

	if (nla_len(opt) < sizeof(*parm))
		return -EINVAL;

	/* allowed timer periods are :
	 * -2 : 250ms,   -1 : 500ms,    0 : 1 sec
	 *  1 : 2 sec,    2 : 4 sec,    3 : 8 sec
	 */
	if (parm->interval < -2 || parm->interval > 3)
		return -EINVAL;

	est = kzalloc(sizeof(*est), GFP_KERNEL);
	if (!est)
		return -ENOBUFS;

	seqcount_init(&est->seq);
	intvl_log = parm->interval + 2;
	est->bstats = bstats;
	est->stats_lock = stats_lock;
	est->running  = running;
	est->ewma_log = parm->ewma_log;
	est->intvl_log = intvl_log;
	est->cpu_bstats = cpu_bstats;

	if (stats_lock)
		local_bh_disable();
	est_fetch_counters(est, &b);
	if (stats_lock)
		local_bh_enable();
	est->last_bytes = b.bytes;
	est->last_packets = b.packets;
	old = rcu_dereference_protected(*rate_est, 1);
	if (old) {
		del_timer_sync(&old->timer);
		est->avbps = old->avbps;
		est->avpps = old->avpps;
	}

	est->next_jiffies = jiffies + ((HZ/4) << intvl_log);
	timer_setup(&est->timer, est_timer, 0);
	mod_timer(&est->timer, est->next_jiffies);

	rcu_assign_pointer(*rate_est, est);
	if (old)
		kfree_rcu(old, rcu);
	return 0;
}
Exemple #16
0
void
bl_free_deviceid_node(struct nfs4_deviceid_node *d)
{
	struct pnfs_block_dev *dev =
		container_of(d, struct pnfs_block_dev, node);

	bl_free_device(dev);
	kfree_rcu(dev, node.rcu);
}
Exemple #17
0
static ssize_t store_rps_map(struct netdev_rx_queue *queue,
			     const char *buf, size_t len)
{
	struct rps_map *old_map, *map;
	cpumask_var_t mask;
	int err, cpu, i;
	static DEFINE_MUTEX(rps_map_mutex);

	if (!capable(CAP_NET_ADMIN))
		return -EPERM;

	if (!alloc_cpumask_var(&mask, GFP_KERNEL))
		return -ENOMEM;

	err = bitmap_parse(buf, len, cpumask_bits(mask), nr_cpumask_bits);
	if (err) {
		free_cpumask_var(mask);
		return err;
	}

	map = kzalloc(max_t(unsigned int,
			    RPS_MAP_SIZE(cpumask_weight(mask)), L1_CACHE_BYTES),
		      GFP_KERNEL);
	if (!map) {
		free_cpumask_var(mask);
		return -ENOMEM;
	}

	i = 0;
	for_each_cpu_and(cpu, mask, cpu_online_mask)
		map->cpus[i++] = cpu;

	if (i) {
		map->len = i;
	} else {
		kfree(map);
		map = NULL;
	}

	mutex_lock(&rps_map_mutex);
	old_map = rcu_dereference_protected(queue->rps_map,
					    mutex_is_locked(&rps_map_mutex));
	rcu_assign_pointer(queue->rps_map, map);

	if (map)
		static_key_slow_inc(&rps_needed);
	if (old_map)
		static_key_slow_dec(&rps_needed);

	mutex_unlock(&rps_map_mutex);

	if (old_map)
		kfree_rcu(old_map, rcu);

	free_cpumask_var(mask);
	return len;
}
Exemple #18
0
/*
 * Called from a server thread as it's exiting. Caller must hold the "service
 * mutex" for the service.
 */
void
svc_rqst_free(struct svc_rqst *rqstp)
{
	svc_release_buffer(rqstp);
	kfree(rqstp->rq_resp);
	kfree(rqstp->rq_argp);
	kfree(rqstp->rq_auth_data);
	kfree_rcu(rqstp, rq_rcu_head);
}
Exemple #19
0
static void ovl_dentry_release(struct dentry *dentry)
{
	struct ovl_entry *oe = dentry->d_fsdata;

	if (oe) {
		ovl_entry_stack_free(oe);
		kfree_rcu(oe, rcu);
	}
}
Exemple #20
0
static void fl_free(struct ip6_flowlabel *fl)
{
	if (fl) {
		if (fl->share == IPV6_FL_S_PROCESS)
			put_pid(fl->owner.pid);
		kfree(fl->opt);
		kfree_rcu(fl, rcu);
	}
}
static void tunnel_key_release_params(struct tcf_tunnel_key_params *p)
{
	if (!p)
		return;
	if (p->tcft_action == TCA_TUNNEL_KEY_ACT_SET)
		dst_release(&p->tcft_enc_metadata->dst);

	kfree_rcu(p, rcu);
}
Exemple #22
0
static void bss_free(struct cfg80211_internal_bss *bss)
{
	struct cfg80211_bss_ies *ies;

	if (WARN_ON(atomic_read(&bss->hold)))
		return;

	ies = (void *)rcu_access_pointer(bss->pub.beacon_ies);
	if (ies && !bss->pub.hidden_beacon_bss)
		kfree_rcu(ies, rcu_head);
	ies = (void *)rcu_access_pointer(bss->pub.proberesp_ies);
	if (ies)
		kfree_rcu(ies, rcu_head);

	if (!list_empty(&bss->hidden_list))
		list_del(&bss->hidden_list);

	kfree(bss);
}
Exemple #23
0
static void ovl_dentry_release(struct dentry *dentry)
{
    struct ovl_entry *oe = dentry->d_fsdata;

    if (oe) {
        dput(oe->__upperdentry);
        dput(oe->lowerdentry);
        kfree_rcu(oe, rcu);
    }
}
Exemple #24
0
/**
 * gen_kill_estimator - remove a rate estimator
 * @rate_est: rate estimator
 *
 * Removes the rate estimator.
 *
 */
void gen_kill_estimator(struct net_rate_estimator __rcu **rate_est)
{
	struct net_rate_estimator *est;

	est = xchg((__force struct net_rate_estimator **)rate_est, NULL);
	if (est) {
		del_timer_sync(&est->timer);
		kfree_rcu(est, rcu);
	}
}
Exemple #25
0
static void __vlan_flush(struct net_port_vlans *v)
{
	smp_wmb();
	v->pvid = 0;
	bitmap_zero(v->vlan_bitmap, VLAN_N_VID);
	if (v->port_idx)
		RCU_INIT_POINTER(v->parent.port->vlan_info, NULL);
	else
		RCU_INIT_POINTER(v->parent.br->vlan_info, NULL);
	kfree_rcu(v, rcu);
}
static void __vlan_flush(struct net_port_vlans *v)
{
	smp_wmb();
	v->pvid = 0;
	bitmap_zero(v->vlan_bitmap, BR_VLAN_BITMAP_LEN);
	if (v->port_idx)
		rcu_assign_pointer(v->parent.port->vlan_info, NULL);
	else
		rcu_assign_pointer(v->parent.br->vlan_info, NULL);
	kfree_rcu(v, rcu);
}
Exemple #27
0
static void ip_vs_sh_done_svc(struct ip_vs_service *svc)
{
	struct ip_vs_sh_state *s = svc->sched_data;

	/* got to clean up hash buckets here */
	ip_vs_sh_flush(s);

	/* release the table itself */
	kfree_rcu(s, rcu_head);
	IP_VS_DBG(6, "SH hash table (memory=%Zdbytes) released\n",
		  sizeof(struct ip_vs_sh_bucket)*IP_VS_SH_TAB_SIZE);
}
/*
 * dispose of the links from a revoked keyring
 * - called with the key sem write-locked
 */
void user_revoke(struct key *key)
{
	struct user_key_payload *upayload = key->payload.data;

	/* clear the quota */
	key_payload_reserve(key, 0);

	if (upayload) {
		rcu_assign_keypointer(key, NULL);
		kfree_rcu(upayload, rcu);
	}
}
Exemple #29
0
void lwtstate_free(struct lwtunnel_state *lws)
{
	const struct lwtunnel_encap_ops *ops = lwtun_encaps[lws->type];

	if (ops->destroy_state) {
		ops->destroy_state(lws);
		kfree_rcu(lws, rcu);
	} else {
		kfree(lws);
	}
	module_put(ops->owner);
}
Exemple #30
0
static void tunnel_key_release(struct tc_action *a, int bind)
{
	struct tcf_tunnel_key *t = to_tunnel_key(a);
	struct tcf_tunnel_key_params *params;

	params = rcu_dereference_protected(t->params, 1);

	if (params->tcft_action == TCA_TUNNEL_KEY_ACT_SET)
		dst_release(&params->tcft_enc_metadata->dst);

	kfree_rcu(params, rcu);
}