Exemple #1
0
/**
 * qib_lkey_ok - check IB SGE for validity and initialize
 * @rkt: table containing lkey to check SGE against
 * @pd: protection domain
 * @isge: outgoing internal SGE
 * @sge: SGE to check
 * @acc: access flags
 *
 * Return 1 if valid and successful, otherwise returns 0.
 *
 * increments the reference count upon success
 *
 * Check the IB SGE for validity and initialize our internal version
 * of it.
 */
int qib_lkey_ok(struct qib_lkey_table *rkt, struct qib_pd *pd,
		struct qib_sge *isge, struct ib_sge *sge, int acc)
{
	struct qib_mregion *mr;
	unsigned n, m;
	size_t off;

	/*
	 * We use LKEY == zero for kernel virtual addresses
	 * (see qib_get_dma_mr and qib_dma.c).
	 */
	rcu_read_lock();
	if (sge->lkey == 0) {
		struct qib_ibdev *dev = to_idev(pd->ibpd.device);

		if (pd->user)
			goto bail;
		mr = rcu_dereference(dev->dma_mr);
		if (!mr)
			goto bail;
		if (unlikely(!atomic_inc_not_zero(&mr->refcount)))
			goto bail;
		rcu_read_unlock();

		isge->mr = mr;
		isge->vaddr = (void *) sge->addr;
		isge->length = sge->length;
		isge->sge_length = sge->length;
		isge->m = 0;
		isge->n = 0;
		goto ok;
	}
	mr = rcu_dereference(
		rkt->table[(sge->lkey >> (32 - ib_qib_lkey_table_size))]);
	if (unlikely(!mr || mr->lkey != sge->lkey || mr->pd != &pd->ibpd))
		goto bail;

	off = sge->addr - mr->user_base;
	if (unlikely(sge->addr < mr->user_base ||
		     off + sge->length > mr->length ||
		     (mr->access_flags & acc) != acc))
		goto bail;
	if (unlikely(!atomic_inc_not_zero(&mr->refcount)))
		goto bail;
	rcu_read_unlock();

	off += mr->offset;
	if (mr->page_shift) {
		/*
		page sizes are uniform power of 2 so no loop is necessary
		entries_spanned_by_off is the number of times the loop below
		would have executed.
		*/
		size_t entries_spanned_by_off;
		
		entries_spanned_by_off = off >> mr->page_shift;
		off -= (entries_spanned_by_off << mr->page_shift);
		m = entries_spanned_by_off/QIB_SEGSZ;
		n = entries_spanned_by_off%QIB_SEGSZ;
	} else {
/*
 * this lookup function only uses RCU to protect the skiplist indexing
 * structs.  The actual slots are protected by full locks.
 */
struct sl_slot *skiplist_lookup(struct sl_list *list, unsigned long key,
				unsigned long size)
{
	struct sl_leaf *leaf;
	struct sl_slot *slot_ret = NULL;
	struct sl_node *p;
	int slot;
	int ret;

again:
	rcu_read_lock();
	leaf = __skiplist_lookup_leaf(list, &p, key, size);
	if (leaf) {
		sl_lock_node(&leaf->node);
		if (!verify_key_in_leaf(leaf, key, size)) {
			sl_unlock_node(&leaf->node);
			rcu_read_unlock();
			goto again;
		}
		ret = skiplist_search_leaf(leaf, key, size, &slot);
		if (ret == 0) {
			slot_ret = leaf->ptrs[slot];
			if (atomic_inc_not_zero(&slot_ret->refs) == 0)
				slot_ret = NULL;
		}
		sl_unlock_node(&leaf->node);
	}
	rcu_read_unlock();
	return slot_ret;

}
/**
 * batadv_dat_entry_hash_find - looks for a given dat_entry in the local hash
 * table
 * @bat_priv: the bat priv with all the soft interface information
 * @ip: search key
 *
 * Returns the dat_entry if found, NULL otherwise
 */
static struct batadv_dat_entry *
batadv_dat_entry_hash_find(struct batadv_priv *bat_priv, __be32 ip)
{
	struct hlist_head *head;
	struct hlist_node *node;
	struct batadv_dat_entry *dat_entry, *dat_entry_tmp = NULL;
	struct batadv_hashtable *hash = bat_priv->dat.hash;
	uint32_t index;

	if (!hash)
		return NULL;

	index = batadv_hash_dat(&ip, hash->size);
	head = &hash->table[index];

	rcu_read_lock();
	hlist_for_each_entry_rcu(dat_entry, node, head, hash_entry) {
		if (dat_entry->ip != ip)
			continue;

		if (!atomic_inc_not_zero(&dat_entry->refcount))
			continue;

		dat_entry_tmp = dat_entry;
		break;
	}
	rcu_read_unlock();

	return dat_entry_tmp;
}
Exemple #4
0
/* try to find an element in the hash, return NULL if not found */
struct ws_sta *ws_hash_find(struct ws_hash *hash, u8 *mac)
{
	struct ws_sta *res = NULL, *tmp_sta;
	spinlock_t *list_lock; /* spinlock to protect write access */
	struct hlist_head *head;
	u32 index;

	index = ws_hash_choose(mac);
	head = &hash->table[index];
	list_lock = &hash->list_locks[index];

	rcu_read_lock();
	hlist_for_each_entry_rcu(tmp_sta, head, hash_entry) {
		if (ether_addr_equal(mac, tmp_sta->mac))
			continue;

		if (!atomic_inc_not_zero(&tmp_sta->refcount))
			continue;

		res = tmp_sta;
		break;
	}
	rcu_read_unlock();

	return res;
}
Exemple #5
0
/**
 * batadv_claim_hash_find
 * @bat_priv: the bat priv with all the soft interface information
 * @data: search data (may be local/static data)
 *
 * looks for a claim in the hash, and returns it if found
 * or NULL otherwise.
 */
static struct batadv_bla_claim
*batadv_claim_hash_find(struct batadv_priv *bat_priv,
			struct batadv_bla_claim *data)
{
	struct batadv_hashtable *hash = bat_priv->bla.claim_hash;
	struct hlist_head *head;
	struct batadv_bla_claim *claim;
	struct batadv_bla_claim *claim_tmp = NULL;
	int index;

	if (!hash)
		return NULL;

	index = batadv_choose_claim(data, hash->size);
	head = &hash->table[index];

	rcu_read_lock();
	hlist_for_each_entry_rcu(claim, head, hash_entry) {
		if (!batadv_compare_claim(&claim->hash_entry, data))
			continue;

		if (!atomic_inc_not_zero(&claim->refcount))
			continue;

		claim_tmp = claim;
		break;
	}
	rcu_read_unlock();

	return claim_tmp;
}
/**
 * batadv_dat_entry_hash_find - look for a given dat_entry in the local hash
 * table
 * @bat_priv: the bat priv with all the soft interface information
 * @ip: search key
 * @vid: VLAN identifier
 *
 * Returns the dat_entry if found, NULL otherwise.
 */
static struct batadv_dat_entry *
batadv_dat_entry_hash_find(struct batadv_priv *bat_priv, __be32 ip,
			   unsigned short vid)
{
	struct hlist_head *head;
	struct batadv_dat_entry to_find, *dat_entry, *dat_entry_tmp = NULL;
	struct batadv_hashtable *hash = bat_priv->dat.hash;
	u32 index;

	if (!hash)
		return NULL;

	to_find.ip = ip;
	to_find.vid = vid;

	index = batadv_hash_dat(&to_find, hash->size);
	head = &hash->table[index];

	rcu_read_lock();
	hlist_for_each_entry_rcu(dat_entry, head, hash_entry) {
		if (dat_entry->ip != ip)
			continue;

		if (!atomic_inc_not_zero(&dat_entry->refcount))
			continue;

		dat_entry_tmp = dat_entry;
		break;
	}
	rcu_read_unlock();

	return dat_entry_tmp;
}
int ptrace_get_breakpoints(struct task_struct *tsk)
{
	if (atomic_inc_not_zero(&tsk->ptrace_bp_refcnt))
		return 0;

	return -1;
}
Exemple #8
0
static struct sock *__llc_lookup_listener(struct llc_sap *sap,
					  struct llc_addr *laddr)
{
	struct sock *rc;
	struct hlist_nulls_node *node;
	int slot = llc_sk_laddr_hashfn(sap, laddr);
	struct hlist_nulls_head *laddr_hb = &sap->sk_laddr_hash[slot];

	rcu_read_lock();
again:
	sk_nulls_for_each_rcu(rc, node, laddr_hb) {
		if (llc_listener_match(sap, laddr, rc)) {
			/* Extra checks required by SLAB_DESTROY_BY_RCU */
			if (unlikely(!atomic_inc_not_zero(&rc->sk_refcnt)))
				goto again;
			if (unlikely(llc_sk(rc)->sap != sap ||
				     !llc_listener_match(sap, laddr, rc))) {
				sock_put(rc);
				continue;
			}
			goto found;
		}
	}
	rc = NULL;
	/*
	 * if the nulls value we got at the end of this lookup is
	 * not the expected one, we must restart lookup.
	 * We probably met an item that was moved to another chain.
	 */
	if (unlikely(get_nulls_value(node) != slot))
		goto again;
found:
	rcu_read_unlock();
	return rc;
}
Exemple #9
0
static void *m_start(struct seq_file *m, loff_t *pos)
{
	struct proc_maps_private *priv = m->private;
	struct mm_struct *mm;
	struct rb_node *p;
	loff_t n = *pos;

	/* pin the task and mm whilst we play with them */
	priv->task = get_proc_task(priv->inode);
	if (!priv->task)
		return ERR_PTR(-ESRCH);

	mm = priv->mm;
	if (!mm || !atomic_inc_not_zero(&mm->mm_users))
		return NULL;

	down_read(&mm->mmap_sem);
	/* start from the Nth VMA */
	for (p = rb_first(&mm->mm_rb); p; p = rb_next(p))
		if (n-- == 0)
			return p;

	up_read(&mm->mmap_sem);
	mmput(mm);
	return NULL;
}
static struct sock *__llc_lookup_listener(struct llc_sap *sap,
					  struct llc_addr *laddr)
{
	struct sock *rc;
	struct hlist_nulls_node *node;
	int slot = llc_sk_laddr_hashfn(sap, laddr);
	struct hlist_nulls_head *laddr_hb = &sap->sk_laddr_hash[slot];

	rcu_read_lock();
again:
	sk_nulls_for_each_rcu(rc, node, laddr_hb) {
		if (llc_listener_match(sap, laddr, rc)) {
			
			if (unlikely(!atomic_inc_not_zero(&rc->sk_refcnt)))
				goto again;
			if (unlikely(llc_sk(rc)->sap != sap ||
				     !llc_listener_match(sap, laddr, rc))) {
				sock_put(rc);
				continue;
			}
			goto found;
		}
	}
	rc = NULL;
	if (unlikely(get_nulls_value(node) != slot))
		goto again;
found:
	rcu_read_unlock();
	return rc;
}
Exemple #11
0
/**
 * batadv_backbone_hash_find - looks for a claim in the hash
 * @bat_priv: the bat priv with all the soft interface information
 * @addr: the address of the originator
 * @vid: the VLAN ID
 *
 * Returns claim if found or NULL otherwise.
 */
static struct batadv_bla_backbone_gw *
batadv_backbone_hash_find(struct batadv_priv *bat_priv,
			  uint8_t *addr, unsigned short vid)
{
	struct batadv_hashtable *hash = bat_priv->bla.backbone_hash;
	struct hlist_head *head;
	struct batadv_bla_backbone_gw search_entry, *backbone_gw;
	struct batadv_bla_backbone_gw *backbone_gw_tmp = NULL;
	int index;

	if (!hash)
		return NULL;

	ether_addr_copy(search_entry.orig, addr);
	search_entry.vid = vid;

	index = batadv_choose_backbone_gw(&search_entry, hash->size);
	head = &hash->table[index];

	rcu_read_lock();
	hlist_for_each_entry_rcu(backbone_gw, head, hash_entry) {
		if (!batadv_compare_backbone_gw(&backbone_gw->hash_entry,
						&search_entry))
			continue;

		if (!atomic_inc_not_zero(&backbone_gw->refcount))
			continue;

		backbone_gw_tmp = backbone_gw;
		break;
	}
	rcu_read_unlock();

	return backbone_gw_tmp;
}
Exemple #12
0
void jump_label_inc(struct jump_label_key *key)
{
	if (atomic_inc_not_zero(&key->enabled))
		return;

	jump_label_lock();
	if (atomic_add_return(1, &key->enabled) == 1)
		jump_label_update(key, JUMP_LABEL_ENABLE);
	jump_label_unlock();
}
Exemple #13
0
struct nouveau_channel *
nouveau_channel_get_unlocked(struct nouveau_channel *ref)
{
	struct nouveau_channel *chan = NULL;

	if (likely(ref && atomic_inc_not_zero(&ref->users)))
		nouveau_channel_ref(ref, &chan);

	return chan;
}
Exemple #14
0
static struct ip6_flowlabel *fl_lookup(struct net *net, __be32 label)
{
	struct ip6_flowlabel *fl;

	rcu_read_lock_bh();
	fl = __fl_lookup(net, label);
	if (fl && !atomic_inc_not_zero(&fl->users))
		fl = NULL;
	rcu_read_unlock_bh();
	return fl;
}
void rxrpc_reject_packet(struct rxrpc_local *local, struct sk_buff *skb)
{
	CHECK_SLAB_OKAY(&local->usage);

	if (!atomic_inc_not_zero(&local->usage)) {
		printk("resurrected on reject\n");
		BUG();
	}

	skb_queue_tail(&local->reject_queue, skb);
	rxrpc_queue_work(&local->rejecter);
}
int
linux_alloc_current(struct thread *td, int flags)
{
	struct proc *proc;
	struct thread *td_other;
	struct task_struct *ts;
	struct task_struct *ts_other;
	struct mm_struct *mm;
	struct mm_struct *mm_other;

	MPASS(td->td_lkpi_task == NULL);

	ts = malloc(sizeof(*ts), M_LINUX_CURRENT, flags | M_ZERO);
	if (ts == NULL)
		return (ENOMEM);

	mm = malloc(sizeof(*mm), M_LINUX_CURRENT, flags | M_ZERO);
	if (mm == NULL) {
		free(ts, M_LINUX_CURRENT);
		return (ENOMEM);
	}

	/* setup new task structure */
	atomic_set(&ts->kthread_flags, 0);
	ts->task_thread = td;
	ts->comm = td->td_name;
	ts->pid = td->td_tid;
	atomic_set(&ts->usage, 1);
	atomic_set(&ts->state, TASK_RUNNING);
	init_completion(&ts->parked);
	init_completion(&ts->exited);

	proc = td->td_proc;

	/* check if another thread already has a mm_struct */
	PROC_LOCK(proc);
	FOREACH_THREAD_IN_PROC(proc, td_other) {
		ts_other = td_other->td_lkpi_task;
		if (ts_other == NULL)
			continue;

		mm_other = ts_other->mm;
		if (mm_other == NULL)
			continue;

		/* try to share other mm_struct */
		if (atomic_inc_not_zero(&mm_other->mm_users)) {
			/* set mm_struct pointer */
			ts->mm = mm_other;
			break;
		}
	}
Exemple #17
0
static struct nfulnl_instance *
instance_lookup_get(u_int16_t group_num)
{
	struct nfulnl_instance *inst;

	rcu_read_lock_bh();
	inst = __instance_lookup(group_num);
	if (inst && !atomic_inc_not_zero(&inst->use))
		inst = NULL;
	rcu_read_unlock_bh();

	return inst;
}
Exemple #18
0
/*
 * Lookup a deviceid in cache and get a reference count on it if found
 *
 * @clp nfs_client associated with deviceid
 * @id deviceid to look up
 */
struct nfs4_deviceid_node *
_find_get_deviceid(const struct pnfs_layoutdriver_type *ld,
		   const struct nfs_client *clp, const struct nfs4_deviceid *id,
		   long hash)
{
	struct nfs4_deviceid_node *d;

	rcu_read_lock();
	d = _lookup_deviceid(ld, clp, id, hash);
	if (d && !atomic_inc_not_zero(&d->ref))
		d = NULL;
	rcu_read_unlock();
	return d;
}
void static_key_slow_inc(struct static_key *key)
{
	if (atomic_inc_not_zero(&key->enabled))
		return;

	jump_label_lock();
	if (atomic_read(&key->enabled) == 0) {
		if (!jump_label_get_branch_default(key))
			jump_label_update(key, JUMP_LABEL_ENABLE);
		else
			jump_label_update(key, JUMP_LABEL_DISABLE);
	}
	atomic_inc(&key->enabled);
	jump_label_unlock();
}
Exemple #20
0
/**
 * talitos_submit - submits a descriptor to the device for processing
 * @dev:	the SEC device to be used
 * @desc:	the descriptor to be processed by the device
 * @callback:	whom to call when processing is complete
 * @context:	a handle for use by caller (optional)
 *
 * desc must contain valid dma-mapped (bus physical) address pointers.
 * callback must check err and feedback in descriptor header
 * for device processing status.
 */
static int talitos_submit(struct device *dev, struct talitos_desc *desc,
			  void (*callback)(struct device *dev,
					   struct talitos_desc *desc,
					   void *context, int error),
			  void *context)
{
	struct talitos_private *priv = dev_get_drvdata(dev);
	struct talitos_request *request;
	unsigned long flags, ch;
	int head;

	/* select done notification */
	desc->hdr |= DESC_HDR_DONE_NOTIFY;

	/* emulate SEC's round-robin channel fifo polling scheme */
	ch = atomic_inc_return(&priv->last_chan) & (priv->num_channels - 1);

	spin_lock_irqsave(&priv->head_lock[ch], flags);

	if (!atomic_inc_not_zero(&priv->submit_count[ch])) {
		/* h/w fifo is full */
		spin_unlock_irqrestore(&priv->head_lock[ch], flags);
		return -EAGAIN;
	}

	head = priv->head[ch];
	request = &priv->fifo[ch][head];

	/* map descriptor and save caller data */
	request->dma_desc = dma_map_single(dev, desc, sizeof(*desc),
					   DMA_BIDIRECTIONAL);
	request->callback = callback;
	request->context = context;

	/* increment fifo head */
	priv->head[ch] = (priv->head[ch] + 1) & (priv->fifo_len - 1);

	smp_wmb();
	request->desc = desc;

	/* GO! */
	wmb();
	out_be32(priv->reg + TALITOS_FF_LO(ch), request->dma_desc);

	spin_unlock_irqrestore(&priv->head_lock[ch], flags);

	return -EINPROGRESS;
}
Exemple #21
0
//====================================================================
// Iterator function for adding an entry to the super block map for
// each ext2 file system.
static void build_super_block_map_iter_fn (struct super_block *sb, void *arg) {
    if (num_super_blocks >= kSuperBlockLimit) {
        DBG("Hit limit of %d file systems", kSuperBlockLimit);
        return;
    }

    // Attempt to obtain a reference to this file system's super block.
    if (!atomic_inc_not_zero(&sb->s_active)) {
        DBG("Couldn't increment s_active for %s", sb->s_id);
        return;
    }

    // Add the super block to the map.
    super_block_map[num_super_blocks++] = sb;
    DBG("Adding mapping %d -> %s", num_super_blocks, sb->s_id);
}
Exemple #22
0
struct posix_acl *get_cached_acl(struct inode *inode, int type)
{
	struct posix_acl **p = acl_by_type(inode, type);
	struct posix_acl *acl;

	for (;;) {
		rcu_read_lock();
		acl = rcu_dereference(*p);
		if (!acl || is_uncached_acl(acl) ||
		    atomic_inc_not_zero(&acl->a_refcount))
			break;
		rcu_read_unlock();
		cpu_relax();
	}
	rcu_read_unlock();
	return acl;
}
static void *m_start(struct seq_file *m, loff_t *ppos)
{
	struct proc_maps_private *priv = m->private;
	unsigned long last_addr = m->version;
	struct mm_struct *mm;
	struct vm_area_struct *vma;
	unsigned int pos = *ppos;

	/* See m_cache_vma(). Zero at the start or after lseek. */
	if (last_addr == -1UL)
		return NULL;

	priv->task = get_proc_task(priv->inode);
	if (!priv->task)
		return ERR_PTR(-ESRCH);

	mm = priv->mm;
	if (!mm || !atomic_inc_not_zero(&mm->mm_users))
		return NULL;

	down_read(&mm->mmap_sem);
	hold_task_mempolicy(priv);
	priv->tail_vma = get_gate_vma(mm);

	if (last_addr) {
		vma = find_vma(mm, last_addr);
		if (vma && (vma = m_next_vma(priv, vma)))
			return vma;
	}

	m->version = 0;
	if (pos < mm->map_count) {
		for (vma = mm->mmap; pos; pos--) {
			m->version = vma->vm_start;
			vma = vma->vm_next;
		}
		return vma;
	}

	/* we do not bother to update m->version in this case */
	if (pos == mm->map_count && priv->tail_vma)
		return priv->tail_vma;

	vma_stop(priv);
	return NULL;
}
/**
 * batadv_choose_next_candidate - select the next DHT candidate
 * @bat_priv: the bat priv with all the soft interface information
 * @cands: candidates array
 * @select: number of candidates already present in the array
 * @ip_key: key to look up in the DHT
 * @last_max: pointer where the address of the selected candidate will be saved
 */
static void batadv_choose_next_candidate(struct batadv_priv *bat_priv,
					 struct batadv_dat_candidate *cands,
					 int select, batadv_dat_addr_t ip_key,
					 batadv_dat_addr_t *last_max)
{
	batadv_dat_addr_t max = 0, tmp_max = 0;
	struct batadv_orig_node *orig_node, *max_orig_node = NULL;
	struct batadv_hashtable *hash = bat_priv->orig_hash;
	struct hlist_node *node;
	struct hlist_head *head;
	int i;

	/* if no node is eligible as candidate, leave the candidate type as
	 * NOT_FOUND
	 */
	cands[select].type = BATADV_DAT_CANDIDATE_NOT_FOUND;

	/* iterate over the originator list and find the node with closest
	 * dat_address which has not been selected yet
	 */
	for (i = 0; i < hash->size; i++) {
		head = &hash->table[i];

		rcu_read_lock();
		hlist_for_each_entry_rcu(orig_node, node, head, hash_entry) {
			/* the dht space is a ring and addresses are unsigned */
			tmp_max = BATADV_DAT_ADDR_MAX - orig_node->dat_addr +
				  ip_key;

			if (!batadv_is_orig_node_eligible(cands, select,
							  tmp_max, max,
							  *last_max, orig_node,
							  max_orig_node))
				continue;

			if (!atomic_inc_not_zero(&orig_node->refcount))
				continue;

			max = tmp_max;
			if (max_orig_node)
				batadv_orig_node_free_ref(max_orig_node);
			max_orig_node = orig_node;
		}
		rcu_read_unlock();
	}
Exemple #25
0
static struct rtable *tunnel_rtable_get(struct ip_tunnel *t, u32 cookie)
{
	struct dst_entry *dst;

	rcu_read_lock();
	dst = rcu_dereference(this_cpu_ptr(t->dst_cache)->dst);
	if (dst && !atomic_inc_not_zero(&dst->__refcnt))
		dst = NULL;
	if (dst) {
		if (dst->obsolete && dst->ops->check(dst, cookie) == NULL) {
			tunnel_dst_reset(t);
			dst_release(dst);
			dst = NULL;
		}
	}
	rcu_read_unlock();
	return (struct rtable *)dst;
}
/* 获取缓存socket的HOOK函数 */
static unsigned int ipv4_conntrack_restore_sock (unsigned int hooknum,  
                                      struct sk_buff *skb,  
                                      const struct net_device *in,  
                                      const struct net_device *out,  
                                      int (*okfn)(struct sk_buff *))  
{  
	struct nf_conn *ct;  
	enum ip_conntrack_info ctinfo;  
	struct nf_conntrack_ext *exts;
	ct = nf_ct_get(skb, &ctinfo);  
	if (!ct || ct == &nf_conntrack_untracked){
		goto out;
	}
	if ((ip_hdr(skb)->protocol != IPPROTO_UDP) && 
			(ip_hdr(skb)->protocol != IPPROTO_TCP)) {
		goto out;
	}

	exts = nf_conn_exts_find(ct);
	if (exts) {  
		/* 获取缓存的socket */
		if (exts->bits_idx[CONN_SOCK] != -1) {
			struct sock *sk = (struct sock *)nf_ct_exts_get(ct, exts->bits_idx[CONN_SOCK]);
			if (sk) {
				if ((ip_hdr(skb)->protocol == IPPROTO_TCP) && sk->sk_state != TCP_ESTABLISHED) {
					goto out;
				}
				if (unlikely(!atomic_inc_not_zero(&sk->sk_refcnt))) {
					goto out;
				}
				skb_orphan(skb);
				skb->sk = sk;
				/* 曾经在上面atomic inc了引用计数,等到转交给下任owner的时候,一定要put */
				skb->destructor = nf_ext_destructor;
			}
		}
	}
out:
	return NF_ACCEPT;
}
static struct rtable *tunnel_rtable_get(struct ip_tunnel *t,
					u32 cookie, __be32 *saddr)
{
	struct ip_tunnel_dst *idst;
	struct dst_entry *dst;

	rcu_read_lock();
	idst = raw_cpu_ptr(t->dst_cache);
	dst = rcu_dereference(idst->dst);
	if (dst && !atomic_inc_not_zero(&dst->__refcnt))
		dst = NULL;
	if (dst) {
		if (!dst->obsolete || dst->ops->check(dst, cookie)) {
			*saddr = idst->saddr;
		} else {
			tunnel_dst_reset(t);
			dst_release(dst);
			dst = NULL;
		}
	}
	rcu_read_unlock();
	return (struct rtable *)dst;
}
void skb_clone_tx_timestamp(struct sk_buff *skb)
{
	struct phy_device *phydev;
	struct sk_buff *clone;
	struct sock *sk = skb->sk;
	unsigned int type;

	if (!sk)
		return;

	type = classify(skb);

	switch (type) {
	case PTP_CLASS_V1_IPV4:
	case PTP_CLASS_V1_IPV6:
	case PTP_CLASS_V2_IPV4:
	case PTP_CLASS_V2_IPV6:
	case PTP_CLASS_V2_L2:
	case PTP_CLASS_V2_VLAN:
		phydev = skb->dev->phydev;
		if (likely(phydev->drv->txtstamp)) {
			if (!atomic_inc_not_zero(&sk->sk_refcnt))
				return;
			clone = skb_clone(skb, GFP_ATOMIC);
			if (!clone) {
				sock_put(sk);
				return;
			}
			clone->sk = sk;
			phydev->drv->txtstamp(phydev, clone, type);
		}
		break;
	default:
		break;
	}
}
Exemple #29
0
/*
 * For each partition that XPC has established communications with, there is
 * a minimum of one kernel thread assigned to perform any operation that
 * may potentially sleep or block (basically the callouts to the asynchronous
 * functions registered via xpc_connect()).
 *
 * Additional kthreads are created and destroyed by XPC as the workload
 * demands.
 *
 * A kthread is assigned to one of the active channels that exists for a given
 * partition.
 */
void
xpc_create_kthreads(struct xpc_channel *ch, int needed,
		    int ignore_disconnecting)
{
	unsigned long irq_flags;
	u64 args = XPC_PACK_ARGS(ch->partid, ch->number);
	struct xpc_partition *part = &xpc_partitions[ch->partid];
	struct task_struct *kthread;

	while (needed-- > 0) {

		/*
		 * The following is done on behalf of the newly created
		 * kthread. That kthread is responsible for doing the
		 * counterpart to the following before it exits.
		 */
		if (ignore_disconnecting) {
			if (!atomic_inc_not_zero(&ch->kthreads_assigned)) {
				/* kthreads assigned had gone to zero */
				BUG_ON(!(ch->flags &
					 XPC_C_DISCONNECTINGCALLOUT_MADE));
				break;
			}

		} else if (ch->flags & XPC_C_DISCONNECTING) {
			break;

		} else if (atomic_inc_return(&ch->kthreads_assigned) == 1 &&
			   atomic_inc_return(&part->nchannels_engaged) == 1) {
				xpc_indicate_partition_engaged(part);
		}
		(void)xpc_part_ref(part);
		xpc_msgqueue_ref(ch);

		kthread = kthread_run(xpc_kthread_start, (void *)args,
				      "xpc%02dc%d", ch->partid, ch->number);
		if (IS_ERR(kthread)) {
			/* the fork failed */

			/*
			 * NOTE: if (ignore_disconnecting &&
			 * !(ch->flags & XPC_C_DISCONNECTINGCALLOUT)) is true,
			 * then we'll deadlock if all other kthreads assigned
			 * to this channel are blocked in the channel's
			 * registerer, because the only thing that will unblock
			 * them is the xpDisconnecting callout that this
			 * failed kthread_run() would have made.
			 */

			if (atomic_dec_return(&ch->kthreads_assigned) == 0 &&
			    atomic_dec_return(&part->nchannels_engaged) == 0) {
				xpc_indicate_partition_disengaged(part);
			}
			xpc_msgqueue_deref(ch);
			xpc_part_deref(part);

			if (atomic_read(&ch->kthreads_assigned) <
			    ch->kthreads_idle_limit) {
				/*
				 * Flag this as an error only if we have an
				 * insufficient #of kthreads for the channel
				 * to function.
				 */
				spin_lock_irqsave(&ch->lock, irq_flags);
				XPC_DISCONNECT_CHANNEL(ch, xpLackOfResources,
						       &irq_flags);
				spin_unlock_irqrestore(&ch->lock, irq_flags);
			}
			break;
		}
	}
}
void
xpc_create_kthreads(struct xpc_channel *ch, int needed,
		    int ignore_disconnecting)
{
	unsigned long irq_flags;
	u64 args = XPC_PACK_ARGS(ch->partid, ch->number);
	struct xpc_partition *part = &xpc_partitions[ch->partid];
	struct task_struct *kthread;
	void (*indicate_partition_disengaged) (struct xpc_partition *) =
		xpc_arch_ops.indicate_partition_disengaged;

	while (needed-- > 0) {

		/*
                                                         
                                                       
                                                  
   */
		if (ignore_disconnecting) {
			if (!atomic_inc_not_zero(&ch->kthreads_assigned)) {
				/*                                    */
				BUG_ON(!(ch->flags &
					 XPC_C_DISCONNECTINGCALLOUT_MADE));
				break;
			}

		} else if (ch->flags & XPC_C_DISCONNECTING) {
			break;

		} else if (atomic_inc_return(&ch->kthreads_assigned) == 1 &&
			   atomic_inc_return(&part->nchannels_engaged) == 1) {
			xpc_arch_ops.indicate_partition_engaged(part);
		}
		(void)xpc_part_ref(part);
		xpc_msgqueue_ref(ch);

		kthread = kthread_run(xpc_kthread_start, (void *)args,
				      "xpc%02dc%d", ch->partid, ch->number);
		if (IS_ERR(kthread)) {
			/*                 */

			/*
                                       
                                                         
                                                        
                                                  
                                                          
                                                   
                                           
    */

			if (atomic_dec_return(&ch->kthreads_assigned) == 0 &&
			    atomic_dec_return(&part->nchannels_engaged) == 0) {
				indicate_partition_disengaged(part);
			}
			xpc_msgqueue_deref(ch);
			xpc_part_deref(part);

			if (atomic_read(&ch->kthreads_assigned) <
			    ch->kthreads_idle_limit) {
				/*
                                               
                                                
                   
     */
				spin_lock_irqsave(&ch->lock, irq_flags);
				XPC_DISCONNECT_CHANNEL(ch, xpLackOfResources,
						       &irq_flags);
				spin_unlock_irqrestore(&ch->lock, irq_flags);
			}
			break;
		}
	}
}