Esempio n. 1
0
/**  /proc/web100/<connection>/spec_ascii  **/
static ssize_t connection_spec_ascii_read(struct file * file, char * buf,
	size_t nbytes, loff_t *ppos)
{
	__u32 local_addr, remote_addr;
	__u16 local_port, remote_port;
	int cid;
	struct web100stats *stats;
	struct web100directs *vars;
	char tmpbuf[100];
	int len = 0;
	
	if (*ppos != 0)
		return 0;
	
	cid = cid_from_ino(file->f_dentry->d_parent->d_inode->i_ino);
	
	read_lock_bh(&web100_linkage_lock);
	stats = web100stats_lookup(cid);
	read_unlock_bh(&web100_linkage_lock);
	if (stats == NULL)
		return -ENOENT;
	vars = &stats->wc_vars;
	
	if (vars->LocalAddressType == WC_ADDRTYPE_IPV4) {
		/* These values should not change while stats are linked.
		 * We don't need to lock the sock. */
		local_addr = ntohl(vars->LocalAddress.v4addr);
		remote_addr = ntohl(vars->RemAddress.v4addr);
		local_port = vars->LocalPort;
		remote_port = vars->RemPort;
		
		len = sprintf(tmpbuf, "%d.%d.%d.%d:%d %d.%d.%d.%d:%d\n",
			(local_addr >> 24) & 0xff,
			(local_addr >> 16) & 0xff,
			(local_addr >> 8) & 0xff,
			local_addr & 0xff,
			local_port,
			(remote_addr >> 24) & 0xff,
			(remote_addr >> 16) & 0xff,
			(remote_addr >> 8) & 0xff,
			remote_addr & 0xff,
			remote_port);
	} else if (vars->LocalAddressType == WC_ADDRTYPE_IPV6) {
Esempio n. 2
0
static int pool_change(ip_pool_t index, u_int32_t addr, int isdel)
{
	struct ip_pool *pool;
	int res = -1;

	pool = lookup(index);
	if (    !pool || !pool->members
	     || addr < pool->first_ip || addr > pool->last_ip)
		return -1;
	read_lock_bh(&pool->lock);
	if (pool->members && addr >= pool->first_ip && addr <= pool->last_ip) {
		addr -= pool->first_ip;
		res = isdel
			? (0 != test_and_clear_bit(addr, pool->members))
			: (0 != test_and_set_bit(addr, pool->members));
	}
	read_unlock_bh(&pool->lock);
	return res;
}
static void
bitmap_ip_gc(unsigned long ul_set)
{
	struct ip_set *set = (struct ip_set *) ul_set;
	struct bitmap_ip *map = set->data;
	unsigned long *table = map->members;
	u32 id;

	/* We run parallel with other readers (test element)
	 * but adding/deleting new entries is locked out */
	read_lock_bh(&set->lock);
	for (id = 0; id < map->elements; id++)
		if (ip_set_timeout_expired(table[id]))
			table[id] = IPSET_ELEM_UNSET;
	read_unlock_bh(&set->lock);

	map->gc.expires = jiffies + IPSET_GC_PERIOD(map->timeout) * HZ;
	add_timer(&map->gc);
}
static int tcp_to_nfattr(struct sk_buff *skb, struct nfattr *nfa,
			 const struct nf_conn *ct)
{
	struct nfattr *nest_parms;

	read_lock_bh(&tcp_lock);
	nest_parms = NFA_NEST(skb, CTA_PROTOINFO_TCP);
	NFA_PUT(skb, CTA_PROTOINFO_TCP_STATE, sizeof(u_int8_t),
		&ct->proto.tcp.state);
	read_unlock_bh(&tcp_lock);

	NFA_NEST_END(skb, nest_parms);

	return 0;

nfattr_failure:
	read_unlock_bh(&tcp_lock);
	return -1;
}
Esempio n. 5
0
static in_cache_entry *in_cache_get_by_vcc(struct atm_vcc *vcc,
					   struct mpoa_client *client)
{
	in_cache_entry *entry;

	read_lock_bh(&client->ingress_lock);
	entry = client->in_cache;
	while (entry != NULL) {
		if (entry->shortcut == vcc) {
			atomic_inc(&entry->use);
			read_unlock_bh(&client->ingress_lock);
			return entry;
		}
		entry = entry->next;
	}
	read_unlock_bh(&client->ingress_lock);

	return NULL;
}
Esempio n. 6
0
static inline int ip6_output_finish(struct sk_buff *skb)
{

	struct dst_entry *dst = skb->dst;
	struct hh_cache *hh = dst->hh;

	if (hh) {
		read_lock_bh(&hh->hh_lock);
		memcpy(skb->data - 16, hh->hh_data, 16);
		read_unlock_bh(&hh->hh_lock);
	        skb_push(skb, hh->hh_len);
		return hh->hh_output(skb);
	} else if (dst->neighbour)
		return dst->neighbour->output(skb);

	kfree_skb(skb);
	return -EINVAL;

}
Esempio n. 7
0
struct inet6_ifaddr * ipv6_get_ifaddr(struct in6_addr *addr, struct net_device *dev)
{
	struct inet6_ifaddr * ifp;
	u8 hash = ipv6_addr_hash(addr);

	read_lock_bh(&addrconf_hash_lock);
	for(ifp = inet6_addr_lst[hash]; ifp; ifp=ifp->lst_next) {
		if (ipv6_addr_cmp(&ifp->addr, addr) == 0) {
			if (dev == NULL || ifp->idev->dev == dev ||
			    !(ifp->scope&(IFA_LINK|IFA_HOST))) {
				in6_ifa_hold(ifp);
				break;
			}
		}
	}
	read_unlock_bh(&addrconf_hash_lock);

	return ifp;
}
Esempio n. 8
0
/*
 * Unfortunately, the neighbour code uses the device in its hash
 * function, so we don't get any advantage from it. This function
 * basically does a neigh_lookup(), but without comparing the device
 * field. This is required for the On-Ethernet cache
 */
struct neighbour *dn_neigh_lookup(struct neigh_table *tbl, const void *ptr)
{
    struct neighbour *neigh;
    u32 hash_val;

    hash_val = tbl->hash(ptr, NULL);

    read_lock_bh(&tbl->lock);
    for(neigh = tbl->hash_buckets[hash_val]; neigh != NULL; neigh = neigh->next) {
        if (memcmp(neigh->primary_key, ptr, tbl->key_len) == 0) {
            atomic_inc(&neigh->refcnt);
            read_unlock_bh(&tbl->lock);
            return neigh;
        }
    }
    read_unlock_bh(&tbl->lock);

    return NULL;
}
Esempio n. 9
0
static void smc_lgr_free_work(struct work_struct *work)
{
	struct smc_link_group *lgr = container_of(to_delayed_work(work),
						  struct smc_link_group,
						  free_work);
	bool conns;

	spin_lock_bh(&smc_lgr_list.lock);
	if (list_empty(&lgr->list))
		goto free;
	read_lock_bh(&lgr->conns_lock);
	conns = RB_EMPTY_ROOT(&lgr->conns_all);
	read_unlock_bh(&lgr->conns_lock);
	if (!conns) { /* number of lgr connections is no longer zero */
		spin_unlock_bh(&smc_lgr_list.lock);
		return;
	}
	list_del_init(&lgr->list); /* remove from smc_lgr_list */
free:
	spin_unlock_bh(&smc_lgr_list.lock);

	if (!lgr->is_smcd && !lgr->terminating)	{
		struct smc_link *lnk = &lgr->lnk[SMC_SINGLE_LINK];

		/* try to send del link msg, on error free lgr immediately */
		if (lnk->state == SMC_LNK_ACTIVE &&
		    !smc_link_send_delete(lnk)) {
			/* reschedule in case we never receive a response */
			smc_lgr_schedule_free_work(lgr);
			return;
		}
	}

	if (!delayed_work_pending(&lgr->free_work)) {
		struct smc_link *lnk = &lgr->lnk[SMC_SINGLE_LINK];

		if (!lgr->is_smcd && lnk->state != SMC_LNK_INACTIVE)
			smc_llc_link_inactive(lnk);
		if (lgr->is_smcd)
			smc_ism_signal_shutdown(lgr);
		smc_lgr_free(lgr);
	}
}
Esempio n. 10
0
static struct dst_entry *
__xfrm4_find_bundle(struct flowi *fl, struct xfrm_policy *policy)
{
	struct dst_entry *dst;

	read_lock_bh(&policy->lock);
	for (dst = policy->bundles; dst; dst = dst->next) {
		struct xfrm_dst *xdst = (struct xfrm_dst*)dst;
		if (xdst->u.rt.fl.oif == fl->oif &&	/*XXX*/
		    xdst->u.rt.fl.fl4_dst == fl->fl4_dst &&
	    	    xdst->u.rt.fl.fl4_src == fl->fl4_src &&
		    xfrm_bundle_ok(xdst, fl, AF_INET)) {
			dst_clone(dst);
			break;
		}
	}
	read_unlock_bh(&policy->lock);
	return dst;
}
Esempio n. 11
0
static int arp_req_get(struct arpreq *r, struct net_device *dev)
{
	__be32 ip = ((struct sockaddr_in *) &r->arp_pa)->sin_addr.s_addr;
	struct neighbour *neigh;
	int err = -ENXIO;

	neigh = neigh_lookup(&arp_tbl, &ip, dev);
	if (neigh) {
		read_lock_bh(&neigh->lock);
		memcpy(r->arp_ha.sa_data, neigh->ha, dev->addr_len);
		r->arp_flags = arp_state_to_flags(neigh);
		read_unlock_bh(&neigh->lock);
		r->arp_ha.sa_family = dev->type;
		strlcpy(r->arp_dev, dev->name, sizeof(r->arp_dev));
		neigh_release(neigh);
		err = 0;
	}
	return err;
}
Esempio n. 12
0
static in_cache_entry *in_cache_get(__be32 dst_ip,
				    struct mpoa_client *client)
{
	in_cache_entry *entry;

	read_lock_bh(&client->ingress_lock);
	entry = client->in_cache;
	while (entry != NULL) {
		if (entry->ctrl_info.in_dst_ip == dst_ip) {
			atomic_inc(&entry->use);
			read_unlock_bh(&client->ingress_lock);
			return entry;
		}
		entry = entry->next;
	}
	read_unlock_bh(&client->ingress_lock);

	return NULL;
}
Esempio n. 13
0
/* Stolen from ip6_output_finish
 * PRE : skb->dev is set to the device we are leaving by
 *       skb->dst is not NULL
 * POST: the packet is sent with the link layer header pushed
 *       the packet is destroyed
 */
static void ip_direct_send(struct sk_buff *skb)
{
	struct dst_entry *dst = skb->dst;
	struct hh_cache *hh = dst->hh;

	if (hh) {
		read_lock_bh(&hh->hh_lock);
		memcpy(skb->data - 16, hh->hh_data, 16);
		read_unlock_bh(&hh->hh_lock);
		skb_push(skb, hh->hh_len);
		hh->hh_output(skb);
	} else if (dst->neighbour)
		dst->neighbour->output(skb);
	else {
		if (net_ratelimit())
			DEBUGP(KERN_DEBUG "ip6t_ROUTE: no hdr & no neighbour cache!\n");
		kfree_skb(skb);
	}
}
Esempio n. 14
0
/*
 * Called when the host's ARP layer makes a change to some entry that is
 * loaded into the HW L2 table.
 */
void t3_l2t_update(struct t3cdev *dev, struct neighbour *neigh)
{
	struct l2t_entry *e;
	struct sk_buff *arpq = NULL;
	struct l2t_data *d = L2DATA(dev);
	u32 addr = *(u32 *) neigh->primary_key;
	int ifidx = neigh->dev->ifindex;
	int hash = arp_hash(addr, ifidx, d);

	read_lock_bh(&d->lock);
	for (e = d->l2tab[hash].first; e; e = e->next)
		if (e->addr == addr && e->ifindex == ifidx) {
			spin_lock(&e->lock);
			goto found;
		}
	read_unlock_bh(&d->lock);
	return;

found:
	read_unlock(&d->lock);
	if (atomic_read(&e->refcnt)) {
		if (neigh != e->neigh)
			neigh_replace(e, neigh);

		if (e->state == L2T_STATE_RESOLVING) {
			if (neigh->nud_state & NUD_FAILED) {
				arpq = e->arpq_head;
				e->arpq_head = e->arpq_tail = NULL;
			} else if (neigh->nud_state & (NUD_CONNECTED|NUD_STALE))
				setup_l2e_send_pending(dev, NULL, e);
		} else {
			e->state = neigh->nud_state & NUD_CONNECTED ?
			    L2T_STATE_VALID : L2T_STATE_STALE;
			if (memcmp(e->dmac, neigh->ha, 6))
				setup_l2e_send_pending(dev, NULL, e);
		}
	}
	spin_unlock_bh(&e->lock);

	if (arpq)
		handle_failed_resolution(dev, arpq);
}
Esempio n. 15
0
/* Do some firewalling */
unsigned int ebt_do_table (unsigned int hook, struct sk_buff **pskb,
   const struct net_device *in, const struct net_device *out,
   struct ebt_table *table)
{
	int i, nentries;
	struct ebt_entry *point;
	struct ebt_counter *counter_base, *cb_base;
	struct ebt_entry_target *t;
	int verdict, sp = 0;
	struct ebt_chainstack *cs;
	struct ebt_entries *chaininfo;
	char *base;
	struct ebt_table_info *private = table->private;

	read_lock_bh(&table->lock);
	cb_base = COUNTER_BASE(private->counters, private->nentries,
	   smp_processor_id());
	if (private->chainstack)
		cs = private->chainstack[smp_processor_id()];
	else
Esempio n. 16
0
struct ip6_flowlabel * fl6_sock_lookup(struct sock *sk, __be32 label)
{
	struct ipv6_fl_socklist *sfl;
	struct ipv6_pinfo *np = inet6_sk(sk);

	label &= IPV6_FLOWLABEL_MASK;

	read_lock_bh(&ip6_sk_fl_lock);
	for (sfl=np->ipv6_fl_list; sfl; sfl = sfl->next) {
		struct ip6_flowlabel *fl = sfl->fl;
		if (fl->label == label) {
			fl->lastuse = jiffies;
			atomic_inc(&fl->users);
			read_unlock_bh(&ip6_sk_fl_lock);
			return fl;
		}
	}
	read_unlock_bh(&ip6_sk_fl_lock);
	return NULL;
}
Esempio n. 17
0
/*
 *	check if the interface/address pair is valid
 */
int ipv6_chk_mcast_addr(struct net_device *dev, struct in6_addr *addr)
{
	struct inet6_dev *idev;
	struct ifmcaddr6 *mc;

	idev = in6_dev_get(dev);
	if (idev) {
		read_lock_bh(&idev->lock);
		for (mc = idev->mc_list; mc; mc=mc->next) {
			if (ipv6_addr_cmp(&mc->mca_addr, addr) == 0) {
				read_unlock_bh(&idev->lock);
				in6_dev_put(idev);
				return 1;
			}
		}
		read_unlock_bh(&idev->lock);
		in6_dev_put(idev);
	}
	return 0;
}
struct rt6_info *rt6_lookup(struct in6_addr *daddr, struct in6_addr *saddr,
			    int oif, int flags)
{
	struct fib6_node *fn;
	struct rt6_info *rt;

	read_lock_bh(&rt6_lock);
	fn = fib6_lookup(&ip6_routing_table, daddr, saddr);
	rt = rt6_device_match(fn->leaf, oif, !!(flags & RT6_LOOKUP_FLAG_STRICT));
	dst_hold(&rt->u.dst);
	rt->u.dst.__use++;
	read_unlock_bh(&rt6_lock);

	if (!(flags & RT6_LOOKUP_FLAG_NOUSE))
		rt->u.dst.lastuse = jiffies;
	if (rt->u.dst.error == 0)
		return rt;
	dst_release(&rt->u.dst);
	return NULL;
}
Esempio n. 19
0
static int dccp_to_nlattr(struct sk_buff *skb, struct nlattr *nla,
			  const struct nf_conn *ct)
{
	struct nlattr *nest_parms;

	read_lock_bh(&dccp_lock);
	nest_parms = nla_nest_start(skb, CTA_PROTOINFO_DCCP | NLA_F_NESTED);
	if (!nest_parms)
		goto nla_put_failure;
	NLA_PUT_U8(skb, CTA_PROTOINFO_DCCP_STATE, ct->proto.dccp.state);
	NLA_PUT_U8(skb, CTA_PROTOINFO_DCCP_ROLE,
		   ct->proto.dccp.role[IP_CT_DIR_ORIGINAL]);
	nla_nest_end(skb, nest_parms);
	read_unlock_bh(&dccp_lock);
	return 0;

nla_put_failure:
	read_unlock_bh(&dccp_lock);
	return -1;
}
Esempio n. 20
0
static in_cache_entry *in_cache_get_with_mask(uint32_t dst_ip,
					      struct mpoa_client *client,
					      uint32_t mask)
{
	in_cache_entry *entry;

	read_lock_bh(&client->ingress_lock);
	entry = client->in_cache;
	while(entry != NULL){
		if((entry->ctrl_info.in_dst_ip & mask)  == (dst_ip & mask )){
			atomic_inc(&entry->use);
			read_unlock_bh(&client->ingress_lock);
			return entry;
		}
		entry = entry->next;
	}
	read_unlock_bh(&client->ingress_lock);

	return NULL;

}
Esempio n. 21
0
/* Stolen from ip_finish_output2 */
static void ip_direct_send(struct sk_buff *skb)
{
	struct dst_entry *dst = skb->dst;
	struct hh_cache *hh = dst->hh;

	if (hh) {
		int hh_alen;

		read_lock_bh(&hh->hh_lock);
		hh_alen = HH_DATA_ALIGN(hh->hh_len);
  		memcpy(skb->data - hh_alen, hh->hh_data, hh_alen);
		read_unlock_bh(&hh->hh_lock);
	        skb_push(skb, hh->hh_len);
		hh->hh_output(skb);
	} else if (dst->neighbour)
		dst->neighbour->output(skb);
	else {
		printk(KERN_DEBUG "khm in MIRROR\n");
		kfree_skb(skb);
	}
}
Esempio n. 22
0
s32 mpls_vpws_output(struct sk_buff * skb)
{
    s32 ret = 0;
    struct mpls_vpws_table * vpws ;
    struct mpls_vpws_net * vpws_net;

    if(!(vpws_net = skb->dev->vpws_info))
        return MPLS_L2VPN_IGNORE;

    MPLS_DEBUG_COUNTER_INC(vpws_output);

    read_lock_bh(&vpws_net->lock);

    vpws = &vpws_net->vpws;
    
    ret = mpls_vpws_xmit(skb, vpws);

    read_unlock_bh(&vpws_net->lock);
    
    return ret;
}
Esempio n. 23
0
/* ----- Proc fs support ------ */
static int l2cap_sock_dump(char *buf, struct bluez_sock_list *list)
{
	struct l2cap_pinfo *pi;
	struct sock *sk;
	char *ptr = buf;

	read_lock_bh(&list->lock);

	for (sk = list->head; sk; sk = sk->next) {
		pi = l2cap_pi(sk);
		ptr += sprintf(ptr, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d 0x%x\n",
				batostr(&bluez_pi(sk)->src), batostr(&bluez_pi(sk)->dst), 
				sk->state, pi->psm, pi->scid, pi->dcid, pi->imtu, pi->omtu,
				pi->link_mode);
	}

	read_unlock_bh(&list->lock);

	ptr += sprintf(ptr, "\n");
	return ptr - buf;
}
Esempio n. 24
0
static int lowpan_ndisc_opt_addr_space(const struct net_device *dev,
                                       u8 icmp6_type, struct neighbour *neigh,
                                       u8 *ha_buf, u8 **ha)
{
    struct lowpan_802154_neigh *n;
    struct wpan_dev *wpan_dev;
    int addr_space = 0;

    if (!lowpan_is_ll(dev, LOWPAN_LLTYPE_IEEE802154))
        return 0;

    switch (icmp6_type) {
    case NDISC_REDIRECT:
        n = lowpan_802154_neigh(neighbour_priv(neigh));

        read_lock_bh(&neigh->lock);
        if (lowpan_802154_is_valid_src_short_addr(n->short_addr)) {
            memcpy(ha_buf, &n->short_addr,
                   IEEE802154_SHORT_ADDR_LEN);
            read_unlock_bh(&neigh->lock);
            addr_space += __ndisc_opt_addr_space(IEEE802154_SHORT_ADDR_LEN, 0);
            *ha = ha_buf;
        } else {
            read_unlock_bh(&neigh->lock);
        }
        break;
    case NDISC_NEIGHBOUR_ADVERTISEMENT:
    case NDISC_NEIGHBOUR_SOLICITATION:
    case NDISC_ROUTER_SOLICITATION:
        wpan_dev = lowpan_802154_dev(dev)->wdev->ieee802154_ptr;

        if (lowpan_802154_is_valid_src_short_addr(wpan_dev->short_addr))
            addr_space = __ndisc_opt_addr_space(IEEE802154_SHORT_ADDR_LEN, 0);
        break;
    default:
        break;
    }

    return addr_space;
}
Esempio n. 25
0
static bool
match(const struct sk_buff *skb, struct xt_action_param *par)
{
	const struct xt_helper_info *info = par->matchinfo;
	struct nf_conn *ct;
	struct nf_conn_help *master_help;
	enum ip_conntrack_info ctinfo;
	bool ret = info->invert;

	ct = nf_ct_get((struct sk_buff *)skb, &ctinfo);
	if (!ct) {
		DEBUGP("xt_helper: Eek! invalid conntrack?\n");
		return ret;
	}

	if (!ct->master) {
		DEBUGP("xt_helper: conntrack %p has no master\n", ct);
		return ret;
	}

	read_lock_bh(&nf_conntrack_lock);
	master_help = nfct_help(ct->master);
	if (!master_help || !master_help->helper) {
		DEBUGP("xt_helper: master ct %p has no helper\n",
			exp->expectant);
		goto out_unlock;
	}

	DEBUGP("master's name = %s , info->name = %s\n",
		ct->master->helper->name, info->name);

	if (info->name[0] == '\0')
		ret = !ret;
	else
		ret ^= !strncmp(master_help->helper->name, info->name,
				strlen(master_help->helper->name));
out_unlock:
	read_unlock_bh(&nf_conntrack_lock);
	return ret;
}
static void
bitmap_ipmac_gc(unsigned long ul_set)
{
	struct ip_set *set = (struct ip_set *) ul_set;
	struct bitmap_ipmac *map = set->data;
	struct ipmac_telem *elem;
	u32 id, last = map->last_ip - map->first_ip;

	/* We run parallel with other readers (test element)
	 * but adding/deleting new entries is locked out */
	read_lock_bh(&set->lock);
	for (id = 0; id <= last; id++) {
		elem = bitmap_ipmac_elem(map, id);
		if (elem->match == MAC_FILLED &&
		    ip_set_timeout_expired(elem->timeout))
			elem->match = MAC_EMPTY;
	}
	read_unlock_bh(&set->lock);

	map->gc.expires = jiffies + IPSET_GC_PERIOD(map->timeout) * HZ;
	add_timer(&map->gc);
}
Esempio n. 27
0
int ipv6_get_lladdr(struct net_device *dev, struct in6_addr *addr)
{
	struct inet6_dev *idev;
	int err = -EADDRNOTAVAIL;

	read_lock(&addrconf_lock);
	if ((idev = __in6_dev_get(dev)) != NULL) {
		struct inet6_ifaddr *ifp;

		read_lock_bh(&idev->lock);
		for (ifp=idev->addr_list; ifp; ifp=ifp->if_next) {
			if (ifp->scope == IFA_LINK && !(ifp->flags&IFA_F_TENTATIVE)) {
				ipv6_addr_copy(addr, &ifp->addr);
				err = 0;
				break;
			}
		}
		read_unlock_bh(&idev->lock);
	}
	read_unlock(&addrconf_lock);
	return err;
}
Esempio n. 28
0
static inline int lapd_pass_frame_to_socket_te(
	struct sk_buff *skb)
{
	struct sock *sk;
	struct hlist_node *node;
	struct lapd_data_hdr *hdr = (struct lapd_data_hdr *)skb->data;
	struct lapd_device *dev = to_lapd_dev(skb->dev);
	int queued = 0;

	read_lock_bh(&lapd_hash_lock);

	sk_for_each(sk, node, lapd_get_hash(dev)) {
		struct lapd_sock *lapd_sock = to_lapd_sock(sk);

		if (lapd_sock->dev == dev &&
		    (sk->sk_state == LAPD_SK_STATE_NORMAL_DLC ||
		    sk->sk_state == LAPD_SK_STATE_BROADCAST_DLC) &&
		    lapd_sock->sapi == hdr->addr.sapi &&
		    lapd_sock->tei == hdr->addr.tei) {

			struct sk_buff *new_skb;

			if (!queued) {
				new_skb = skb;
				queued = TRUE;
			} else {
				new_skb = skb_clone(skb, GFP_ATOMIC);
			}

			new_skb->sk = sk;

			queued = lapd_pass_frame_to_socket(
					to_lapd_sock(sk), new_skb);
		}
	}
	read_unlock_bh(&lapd_hash_lock);

	return queued;
}
Esempio n. 29
0
/* Stolen from ip_finish_output2
 * PRE : skb->dev is set to the device we are leaving by
 *       skb->dst is not NULL
 * POST: the packet is sent with the link layer header pushed
 *       the packet is destroyed
 */
static void ip_direct_send(struct sk_buff *skb)
{
	struct dst_entry *dst = skb->dst;
	struct hh_cache *hh = dst->hh;
	struct net_device *dev = dst->dev;
	int hh_len = LL_RESERVED_SPACE(dev);

	/* Be paranoid, rather than too clever. */
	if (unlikely(skb_headroom(skb) < hh_len && dev->hard_header)) {
		struct sk_buff *skb2;

		skb2 = skb_realloc_headroom(skb, LL_RESERVED_SPACE(dev));
		if (skb2 == NULL) {
			kfree_skb(skb);
			return;
		}
		if (skb->sk)
			skb_set_owner_w(skb2, skb->sk);
		kfree_skb(skb);
		skb = skb2;
	}

	if (hh) {
		int hh_alen;

		read_lock_bh(&hh->hh_lock);
		hh_alen = HH_DATA_ALIGN(hh->hh_len);
  		memcpy(skb->data - hh_alen, hh->hh_data, hh_alen);
		read_unlock_bh(&hh->hh_lock);
		skb_push(skb, hh->hh_len);
		hh->hh_output(skb);
	} else if (dst->neighbour)
		dst->neighbour->output(skb);
	else {
		if (net_ratelimit())
			DEBUGP(KERN_DEBUG "ipt_ROUTE: no hdr & no neighbour cache!\n");
		kfree_skb(skb);
	}
}
Esempio n. 30
0
static void smc_lgr_free_work(struct work_struct *work)
{
	struct smc_link_group *lgr = container_of(to_delayed_work(work),
						  struct smc_link_group,
						  free_work);
	bool conns;

	spin_lock_bh(&smc_lgr_list.lock);
	if (list_empty(&lgr->list))
		goto free;
	read_lock_bh(&lgr->conns_lock);
	conns = RB_EMPTY_ROOT(&lgr->conns_all);
	read_unlock_bh(&lgr->conns_lock);
	if (!conns) { /* number of lgr connections is no longer zero */
		spin_unlock_bh(&smc_lgr_list.lock);
		return;
	}
	list_del_init(&lgr->list); /* remove from smc_lgr_list */
free:
	spin_unlock_bh(&smc_lgr_list.lock);
	smc_lgr_free(lgr);
}