static int raw_send_hdrinc(struct sock *sk, void *from, size_t length,
			struct rtable *rt, 
			unsigned int flags)
{
	struct inet_sock *inet = inet_sk(sk);
	int hh_len;
	struct iphdr *iph;
	struct sk_buff *skb;
	int err;

	if (length > rt->u.dst.dev->mtu) {
		ip_local_error(sk, EMSGSIZE, rt->rt_dst, inet->dport,
			       rt->u.dst.dev->mtu);
		return -EMSGSIZE;
	}
	if (flags&MSG_PROBE)
		goto out;

	hh_len = LL_RESERVED_SPACE(rt->u.dst.dev);

	skb = sock_alloc_send_skb(sk, length+hh_len+15,
				  flags&MSG_DONTWAIT, &err);
	if (skb == NULL)
		goto error; 
	skb_reserve(skb, hh_len);

	skb->priority = sk->sk_priority;
	skb->dst = dst_clone(&rt->u.dst);

	skb->nh.iph = iph = (struct iphdr *)skb_put(skb, length);

	skb->ip_summed = CHECKSUM_NONE;

	skb->h.raw = skb->nh.raw;
	err = memcpy_fromiovecend((void *)iph, from, 0, length);
	if (err)
		goto error_fault;

	/* We don't modify invalid header */
	if (length >= sizeof(*iph) && iph->ihl * 4U <= length) {
		if (!iph->saddr)
			iph->saddr = rt->rt_src;
		iph->check   = 0;
		iph->tot_len = htons(length);
		if (!iph->id)
			ip_select_ident(iph, &rt->u.dst, NULL);

		iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
	}

	err = NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, skb, NULL, rt->u.dst.dev,
		      dst_output);
	if (err > 0)
		err = inet->recverr ? net_xmit_errno(err) : 0;
	if (err)
		goto error;
out:
	return 0;

error_fault:
	err = -EFAULT;
	kfree_skb(skb);
error:
	IP_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
	return err; 
}
static ssize_t ieee80211_if_parse_tkip_mic_test(
	struct ieee80211_sub_if_data *sdata, const char *buf, int buflen)
{
	struct ieee80211_local *local = sdata->local;
	u8 addr[ETH_ALEN];
	struct sk_buff *skb;
	struct ieee80211_hdr *hdr;
	__le16 fc;

	if (!mac_pton(buf, addr))
		return -EINVAL;

	if (!ieee80211_sdata_running(sdata))
		return -ENOTCONN;

	skb = dev_alloc_skb(local->hw.extra_tx_headroom + 24 + 100);
	if (!skb)
		return -ENOMEM;
	skb_reserve(skb, local->hw.extra_tx_headroom);

	hdr = (struct ieee80211_hdr *) skb_put(skb, 24);
	memset(hdr, 0, 24);
	fc = cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA);

	switch (sdata->vif.type) {
	case NL80211_IFTYPE_AP:
		fc |= cpu_to_le16(IEEE80211_FCTL_FROMDS);
		/* DA BSSID SA */
		memcpy(hdr->addr1, addr, ETH_ALEN);
		memcpy(hdr->addr2, sdata->vif.addr, ETH_ALEN);
		memcpy(hdr->addr3, sdata->vif.addr, ETH_ALEN);
		break;
	case NL80211_IFTYPE_STATION:
		fc |= cpu_to_le16(IEEE80211_FCTL_TODS);
		/* BSSID SA DA */
		sdata_lock(sdata);
		if (!sdata->u.mgd.associated) {
			sdata_unlock(sdata);
			dev_kfree_skb(skb);
			return -ENOTCONN;
		}
		memcpy(hdr->addr1, sdata->u.mgd.associated->bssid, ETH_ALEN);
		memcpy(hdr->addr2, sdata->vif.addr, ETH_ALEN);
		memcpy(hdr->addr3, addr, ETH_ALEN);
		sdata_unlock(sdata);
		break;
	default:
		dev_kfree_skb(skb);
		return -EOPNOTSUPP;
	}
	hdr->frame_control = fc;

	/*
	 * Add some length to the test frame to make it look bit more valid.
	 * The exact contents does not matter since the recipient is required
	 * to drop this because of the Michael MIC failure.
	 */
	memset(skb_put(skb, 50), 0, 50);

	IEEE80211_SKB_CB(skb)->flags |= IEEE80211_TX_INTFL_TKIP_MIC_FAILURE;

	ieee80211_tx_skb(sdata, skb);

	return buflen;
}
Example #3
0
/**
 * kim_int_recv - receive function called during firmware download
 *	firmware download responses on different UART drivers
 *	have been observed to come in bursts of different
 *	tty_receive and hence the logic
 */
void kim_int_recv(struct kim_data_s *kim_gdata,
	const unsigned char *data, long count)
{
	const unsigned char *ptr;
	int len = 0, type = 0;
	unsigned char *plen;

	pr_debug("%s", __func__);
	/* Decode received bytes here */
	ptr = data;
	if (unlikely(ptr == NULL)) {
		pr_err(" received null from TTY ");
		return;
	}

	while (count) {
		if (kim_gdata->rx_count) {
			len = min_t(unsigned int, kim_gdata->rx_count, count);
			memcpy(skb_put(kim_gdata->rx_skb, len), ptr, len);
			kim_gdata->rx_count -= len;
			count -= len;
			ptr += len;

			if (kim_gdata->rx_count)
				continue;

			/* Check ST RX state machine , where are we? */
			switch (kim_gdata->rx_state) {
				/* Waiting for complete packet ? */
			case ST_W4_DATA:
				pr_debug("Complete pkt received");
				validate_firmware_response(kim_gdata);
				kim_gdata->rx_state = ST_W4_PACKET_TYPE;
				kim_gdata->rx_skb = NULL;
				continue;
				/* Waiting for Bluetooth event header ? */
			case ST_W4_HEADER:
				plen =
				(unsigned char *)&kim_gdata->rx_skb->data[1];
				pr_debug("event hdr: plen 0x%02x\n", *plen);
				kim_check_data_len(kim_gdata, *plen);
				continue;
			}	/* end of switch */
		}		/* end of if rx_state */
		switch (*ptr) {
			/* Bluetooth event packet? */
		case 0x04:
			kim_gdata->rx_state = ST_W4_HEADER;
			kim_gdata->rx_count = 2;
			type = *ptr;
			break;
		default:
			pr_info("unknown packet");
			ptr++;
			count--;
			continue;
		}
		ptr++;
		count--;
		kim_gdata->rx_skb =
			alloc_skb(1024+8, GFP_ATOMIC);
		if (!kim_gdata->rx_skb) {
			pr_err("can't allocate mem for new packet");
			kim_gdata->rx_state = ST_W4_PACKET_TYPE;
			kim_gdata->rx_count = 0;
			return;
		}
		skb_reserve(kim_gdata->rx_skb, 8);
		kim_gdata->rx_skb->cb[0] = 4;
		kim_gdata->rx_skb->cb[1] = 0;

	}
Example #4
0
static void rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags)
{
	struct sk_buff		*skb;
	struct skb_data		*entry;
	int			retval = 0;
	unsigned long		lockflags;
	size_t			size = dev->rx_urb_size;

	if ((skb = alloc_skb (size + NET_IP_ALIGN, flags)) == NULL) {
		if (netif_msg_rx_err (dev))
			devdbg (dev, "no rx skb");
		usbnet_defer_kevent (dev, EVENT_RX_MEMORY);
		usb_free_urb (urb);
		return;
	}
	skb_reserve (skb, NET_IP_ALIGN);

	entry = (struct skb_data *) skb->cb;
	entry->urb = urb;
	entry->dev = dev;
	entry->state = rx_start;
	entry->length = 0;

	usb_fill_bulk_urb (urb, dev->udev, dev->in,
		skb->data, size, rx_complete, skb);

	spin_lock_irqsave (&dev->rxq.lock, lockflags);

	if (netif_running (dev->net) &&
	    netif_device_present (dev->net) &&
	    !test_bit (EVENT_RX_HALT, &dev->flags) &&
	    !test_bit (EVENT_DEV_ASLEEP, &dev->flags)) {
		switch (retval = usb_submit_urb (urb, GFP_ATOMIC)) {
		case -EPIPE:
			usbnet_defer_kevent (dev, EVENT_RX_HALT);
			break;
		case -ENOMEM:
			usbnet_defer_kevent (dev, EVENT_RX_MEMORY);
			break;
		case -ENODEV:
			if (netif_msg_ifdown (dev))
				devdbg (dev, "device gone");
			netif_device_detach (dev->net);
			break;
		default:
			if (netif_msg_rx_err (dev))
				devdbg (dev, "rx submit, %d", retval);
			tasklet_schedule (&dev->bh);
			break;
		case 0:
			__skb_queue_tail (&dev->rxq, skb);
		}
	} else {
		if (netif_msg_ifdown (dev))
			devdbg (dev, "rx: stopped");
		retval = -ENOLINK;
	}
	spin_unlock_irqrestore (&dev->rxq.lock, lockflags);
	if (retval) {
		dev_kfree_skb_any (skb);
		usb_free_urb (urb);
	}
}
Example #5
0
struct sk_buff *ndisc_build_skb(struct net_device *dev,
				const struct in6_addr *daddr,
				const struct in6_addr *saddr,
				struct icmp6hdr *icmp6h,
				const struct in6_addr *target,
				int llinfo)
{
	struct net *net = dev_net(dev);
	struct sock *sk = net->ipv6.ndisc_sk;
	struct sk_buff *skb;
	struct icmp6hdr *hdr;
	int hlen = LL_RESERVED_SPACE(dev);
	int tlen = dev->needed_tailroom;
	int len;
	int err;
	u8 *opt;

	if (!dev->addr_len)
		llinfo = 0;

	len = sizeof(struct icmp6hdr) + (target ? sizeof(*target) : 0);
	if (llinfo)
		len += ndisc_opt_addr_space(dev);

	skb = sock_alloc_send_skb(sk,
				  (MAX_HEADER + sizeof(struct ipv6hdr) +
				   len + hlen + tlen),
				  1, &err);
	if (!skb) {
		ND_PRINTK0(KERN_ERR
			   "ICMPv6 ND: %s() failed to allocate an skb, err=%d.\n",
			   __func__, err);
		return NULL;
	}

	skb_reserve(skb, hlen);
	ip6_nd_hdr(sk, skb, dev, saddr, daddr, IPPROTO_ICMPV6, len);

	skb->transport_header = skb->tail;
	skb_put(skb, len);

	hdr = (struct icmp6hdr *)skb_transport_header(skb);
	memcpy(hdr, icmp6h, sizeof(*hdr));

	opt = skb_transport_header(skb) + sizeof(struct icmp6hdr);
	if (target) {
		*(struct in6_addr *)opt = *target;
		opt += sizeof(*target);
	}

	if (llinfo)
		ndisc_fill_addr_option(opt, llinfo, dev->dev_addr,
				       dev->addr_len, dev->type);

	hdr->icmp6_cksum = csum_ipv6_magic(saddr, daddr, len,
					   IPPROTO_ICMPV6,
					   csum_partial(hdr,
							len, 0));

	return skb;
}
Example #6
0
int ip_build_xmit(struct sock *sk,
		   void getfrag (const void *,
				 __u32,
				 char *,
				 unsigned int,	
				 unsigned int),
		   const void *frag,
		   unsigned short int length,
		   __u32 daddr,
		   __u32 user_saddr,
		   struct options * opt,
		   int flags,
		   int type,
		   int noblock) 
{
	struct rtable *rt;
	unsigned int fraglen, maxfraglen, fragheaderlen;
	int offset, mf;
	__u32 saddr;
	unsigned short id;
	struct iphdr *iph;
	__u32 raddr;
	struct device *dev = NULL;
	struct hh_cache * hh=NULL;
	int nfrags=0;
	__u32 true_daddr = daddr;

	if (opt && opt->srr && !sk->ip_hdrincl)
	  daddr = opt->faddr;
	
	ip_statistics.IpOutRequests++;

#ifdef CONFIG_IP_MULTICAST	
	if(MULTICAST(daddr) && *sk->ip_mc_name)
	{
		dev=dev_get(sk->ip_mc_name);
		if(!dev)
			return -ENODEV;
		rt=NULL;
		if (sk->saddr && (!LOOPBACK(sk->saddr) || LOOPBACK(daddr)))
			saddr = sk->saddr;
		else
			saddr = dev->pa_addr;
	}
	else
	{
#endif	
		rt = ip_check_route(&sk->ip_route_cache, daddr,
				    sk->localroute || (flags&MSG_DONTROUTE) ||
				    (opt && opt->is_strictroute), sk->bound_device);
		if (rt == NULL) 
		{
			ip_statistics.IpOutNoRoutes++;
			return(-ENETUNREACH);
		}
		saddr = rt->rt_src;

		hh = rt->rt_hh;
	
		if (sk->saddr && (!LOOPBACK(sk->saddr) || LOOPBACK(daddr)))
			saddr = sk->saddr;
			
		dev=rt->rt_dev;
#ifdef CONFIG_IP_MULTICAST
	}
	if (rt && !dev)
		dev = rt->rt_dev;
#endif		
	if (user_saddr)
		saddr = user_saddr;

	raddr = rt ? rt->rt_gateway : daddr;
	/*
	 *	Now compute the buffer space we require
	 */ 
	 
	/*
	 *	Try the simple case first. This leaves broadcast, multicast, fragmented frames, and by
	 *	choice RAW frames within 20 bytes of maximum size(rare) to the long path
	 */

	if (!sk->ip_hdrincl) {
		length += sizeof(struct iphdr);
		if (opt) {
			/* make sure not to exceed maximum packet size */
			if (0xffff - length < opt->optlen)
				return -EMSGSIZE;
			length += opt->optlen;
		}
	}

	if(length <= dev->mtu && !MULTICAST(daddr) && daddr!=0xFFFFFFFF && daddr!=dev->pa_brdaddr)
	{	
		int error;
		struct sk_buff *skb=sock_alloc_send_skb(sk, length+15+dev->hard_header_len,0, noblock, &error);
		if(skb==NULL)
		{
			ip_statistics.IpOutDiscards++;
			return error;
		}
		skb->dev=dev;
		skb->protocol = htons(ETH_P_IP);
		skb->free=1;
		skb->when=jiffies;
		skb->sk=sk;
		skb->arp=0;
		skb->saddr=saddr;
		skb->raddr = raddr;
		skb_reserve(skb,(dev->hard_header_len+15)&~15);
		if (hh)
		{
			skb->arp=1;
			memcpy(skb_push(skb,dev->hard_header_len),hh->hh_data,dev->hard_header_len);
			if (!hh->hh_uptodate)
			{
				skb->arp = 0;
#if RT_CACHE_DEBUG >= 2
				printk("ip_build_xmit: hh miss %08x via %08x\n", rt->rt_dst, rt->rt_gateway);
#endif				
			}
		}
		else if(dev->hard_header)
		{
			if(dev->hard_header(skb,dev,ETH_P_IP,NULL,NULL,0)>0)
				skb->arp=1;
		}
		else
			skb->arp=1;
		skb->ip_hdr=iph=(struct iphdr *)skb_put(skb,length);
		dev_lock_list();
		if(!sk->ip_hdrincl)
		{
			iph->version=4;
			iph->ihl=5;
			iph->tos=sk->ip_tos;
			iph->tot_len = htons(length);
			iph->id=htons(ip_id_count++);
			iph->frag_off = 0;
			iph->ttl=sk->ip_ttl;
			iph->protocol=type;
			iph->saddr=saddr;
			iph->daddr=daddr;
			if (opt) 
			{
				iph->ihl += opt->optlen>>2;
				ip_options_build(skb, opt,
						 true_daddr, dev->pa_addr, 0);
			}
			iph->check=0;
			iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
			getfrag(frag,saddr,((char *)iph)+iph->ihl*4,0, length-iph->ihl*4);
		}
Example #7
0
File: pg3.c Project: 2mac/iputils
struct sk_buff *
fill_packet(struct net_device *odev, __u32 saddr)
{
	struct sk_buff *skb;
	__u8 *eth;
	struct udphdr *udph;
	int datalen, iplen;
	struct iphdr *iph;

	skb = alloc_skb(pkt_size+64+16, GFP_ATOMIC);
	if (!skb) {
		sprintf(pg_result, "No memory");
		return NULL;
	}

	skb_reserve(skb, 16);

	/*  Reserve for ethernet and IP header  */
	eth = (__u8 *) skb_push(skb, 14);
	iph = (struct iphdr*)skb_put(skb, sizeof( struct iphdr));
	udph = (struct udphdr*)skb_put(skb, sizeof( struct udphdr));

	/*  Copy the ethernet header  */
	memcpy(eth, hh, 14);

	datalen = pkt_size-14-20-8; /* Eth + IPh + UDPh */
	if (datalen < 0)
		datalen = 0;

	udph->source= htons(9);
	udph->dest= htons(9);
	udph->len= htons(datalen+8); /* DATA + udphdr */
	udph->check=0;  /* No checksum */

	iph->ihl=5;
	iph->version=4;
	iph->ttl=3;
	iph->tos=0;
	iph->protocol = IPPROTO_UDP; /* UDP */
	iph->saddr =  saddr;
	iph->daddr =  in_aton(pg_dst);
	iph->frag_off = 0;
	iplen = 20 + 8 + datalen;
	iph->tot_len = htons(iplen);
	iph->check = 0;
	iph->check = ip_fast_csum((void *)iph, iph->ihl);
	skb->protocol = __constant_htons(ETH_P_IP);
	skb->mac.raw = ((u8*)iph) - 14;
	skb->dev = odev;
	skb->pkt_type = PACKET_HOST;

	if (nfrags<=0) {
		skb_put(skb, datalen);
	} else {
		int frags = nfrags;
		int i;

		if (frags > MAX_SKB_FRAGS)
			frags = MAX_SKB_FRAGS;
		if (datalen > frags*PAGE_SIZE) {
			skb_put(skb, datalen-frags*PAGE_SIZE);
			datalen = frags*PAGE_SIZE;
		}

		i = 0;
		while (datalen > 0) {
			struct page *page = alloc_pages(GFP_KERNEL, 0);
			skb_shinfo(skb)->frags[i].page = page;
			skb_shinfo(skb)->frags[i].page_offset = 0;
			skb_shinfo(skb)->frags[i].size = (datalen < PAGE_SIZE ? datalen : PAGE_SIZE);
			datalen -= skb_shinfo(skb)->frags[i].size;
			skb->len += skb_shinfo(skb)->frags[i].size;
			skb->data_len += skb_shinfo(skb)->frags[i].size;
			i++;
			skb_shinfo(skb)->nr_frags = i;
		}

		while (i < frags) {
			int rem;

			if (i == 0)
				break;

			rem = skb_shinfo(skb)->frags[i-1].size/2;
			if (rem == 0)
				break;

			skb_shinfo(skb)->frags[i-1].size -= rem;

			skb_shinfo(skb)->frags[i] = skb_shinfo(skb)->frags[i-1];
			get_page(skb_shinfo(skb)->frags[i].page);
			skb_shinfo(skb)->frags[i].page = skb_shinfo(skb)->frags[i-1].page;
			skb_shinfo(skb)->frags[i].page_offset += skb_shinfo(skb)->frags[i-1].size;
			skb_shinfo(skb)->frags[i].size = rem;
			i++;
			skb_shinfo(skb)->nr_frags = i;
		}
	}

	return skb;
}
Example #8
0
static int mesh_path_sel_frame_tx(enum mpath_frame_type action, u8 flags,
				  const u8 *orig_addr, u32 orig_sn,
				  u8 target_flags, const u8 *target,
				  u32 target_sn, const u8 *da,
				  u8 hop_count, u8 ttl,
				  u32 lifetime, u32 metric, u32 preq_id,
				  struct ieee80211_sub_if_data *sdata)
{
	struct ieee80211_local *local = sdata->local;
	struct sk_buff *skb;
	struct ieee80211_mgmt *mgmt;
	u8 *pos, ie_len;
	int hdr_len = offsetof(struct ieee80211_mgmt, u.action.u.mesh_action) +
		      sizeof(mgmt->u.action.u.mesh_action);

	skb = dev_alloc_skb(local->tx_headroom +
			    hdr_len +
			    2 + 37); /* max HWMP IE */
	if (!skb)
		return -1;
	skb_reserve(skb, local->tx_headroom);
	mgmt = (struct ieee80211_mgmt *) skb_put(skb, hdr_len);
	memset(mgmt, 0, hdr_len);
	mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
					  IEEE80211_STYPE_ACTION);

	memcpy(mgmt->da, da, ETH_ALEN);
	memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
	/* BSSID == SA */
	memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN);
	mgmt->u.action.category = WLAN_CATEGORY_MESH_ACTION;
	mgmt->u.action.u.mesh_action.action_code =
					WLAN_MESH_ACTION_HWMP_PATH_SELECTION;

	switch (action) {
	case MPATH_PREQ:
		mhwmp_dbg(sdata, "sending PREQ to %pM\n", target);
		ie_len = 37;
		pos = skb_put(skb, 2 + ie_len);
		*pos++ = WLAN_EID_PREQ;
		break;
	case MPATH_PREP:
		mhwmp_dbg(sdata, "sending PREP to %pM\n", orig_addr);
		ie_len = 31;
		pos = skb_put(skb, 2 + ie_len);
		*pos++ = WLAN_EID_PREP;
		break;
	case MPATH_RANN:
		mhwmp_dbg(sdata, "sending RANN from %pM\n", orig_addr);
		ie_len = sizeof(struct ieee80211_rann_ie);
		pos = skb_put(skb, 2 + ie_len);
		*pos++ = WLAN_EID_RANN;
		break;
	default:
		kfree_skb(skb);
		return -ENOTSUPP;
	}
	*pos++ = ie_len;
	*pos++ = flags;
	*pos++ = hop_count;
	*pos++ = ttl;
	if (action == MPATH_PREP) {
		memcpy(pos, target, ETH_ALEN);
		pos += ETH_ALEN;
		put_unaligned_le32(target_sn, pos);
		pos += 4;
	} else {
		if (action == MPATH_PREQ) {
			put_unaligned_le32(preq_id, pos);
			pos += 4;
		}
		memcpy(pos, orig_addr, ETH_ALEN);
		pos += ETH_ALEN;
		put_unaligned_le32(orig_sn, pos);
		pos += 4;
	}
	put_unaligned_le32(lifetime, pos); /* interval for RANN */
	pos += 4;
	put_unaligned_le32(metric, pos);
	pos += 4;
	if (action == MPATH_PREQ) {
		*pos++ = 1; /* destination count */
		*pos++ = target_flags;
		memcpy(pos, target, ETH_ALEN);
		pos += ETH_ALEN;
		put_unaligned_le32(target_sn, pos);
		pos += 4;
	} else if (action == MPATH_PREP) {
		memcpy(pos, orig_addr, ETH_ALEN);
		pos += ETH_ALEN;
		put_unaligned_le32(orig_sn, pos);
		pos += 4;
	}

	ieee80211_tx_skb(sdata, skb);
	return 0;
}
Example #9
0
/**
 * mesh_path_error_tx - Sends a PERR mesh management frame
 *
 * @ttl: allowed remaining hops
 * @target: broken destination
 * @target_sn: SN of the broken destination
 * @target_rcode: reason code for this PERR
 * @ra: node this frame is addressed to
 * @sdata: local mesh subif
 *
 * Note: This function may be called with driver locks taken that the driver
 * also acquires in the TX path.  To avoid a deadlock we don't transmit the
 * frame directly but add it to the pending queue instead.
 */
int mesh_path_error_tx(struct ieee80211_sub_if_data *sdata,
		       u8 ttl, const u8 *target, u32 target_sn,
		       u16 target_rcode, const u8 *ra)
{
	struct ieee80211_local *local = sdata->local;
	struct sk_buff *skb;
	struct ieee80211_if_mesh *ifmsh = &sdata->u.mesh;
	struct ieee80211_mgmt *mgmt;
	u8 *pos, ie_len;
	int hdr_len = offsetof(struct ieee80211_mgmt, u.action.u.mesh_action) +
		      sizeof(mgmt->u.action.u.mesh_action);

	if (time_before(jiffies, ifmsh->next_perr))
		return -EAGAIN;

	skb = dev_alloc_skb(local->tx_headroom +
			    sdata->encrypt_headroom +
			    IEEE80211_ENCRYPT_TAILROOM +
			    hdr_len +
			    2 + 15 /* PERR IE */);
	if (!skb)
		return -1;
	skb_reserve(skb, local->tx_headroom + sdata->encrypt_headroom);
	mgmt = (struct ieee80211_mgmt *) skb_put(skb, hdr_len);
	memset(mgmt, 0, hdr_len);
	mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
					  IEEE80211_STYPE_ACTION);

	memcpy(mgmt->da, ra, ETH_ALEN);
	memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
	/* BSSID == SA */
	memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN);
	mgmt->u.action.category = WLAN_CATEGORY_MESH_ACTION;
	mgmt->u.action.u.mesh_action.action_code =
					WLAN_MESH_ACTION_HWMP_PATH_SELECTION;
	ie_len = 15;
	pos = skb_put(skb, 2 + ie_len);
	*pos++ = WLAN_EID_PERR;
	*pos++ = ie_len;
	/* ttl */
	*pos++ = ttl;
	/* number of destinations */
	*pos++ = 1;
	/* Flags field has AE bit only as defined in
	 * sec 8.4.2.117 IEEE802.11-2012
	 */
	*pos = 0;
	pos++;
	memcpy(pos, target, ETH_ALEN);
	pos += ETH_ALEN;
	put_unaligned_le32(target_sn, pos);
	pos += 4;
	put_unaligned_le16(target_rcode, pos);

	/* see note in function header */
	prepare_frame_for_deferred_tx(sdata, skb);
	ifmsh->next_perr = TU_TO_EXP_TIME(
				   ifmsh->mshcfg.dot11MeshHWMPperrMinInterval);
	ieee80211_add_pending_skb(local, skb);
	return 0;
}
Example #10
0
static int
rx_submit(struct eth_dev *dev, struct usb_request *req, gfp_t gfp_flags)
{
	struct sk_buff	*skb;
	int		retval = -ENOMEM;
	size_t		size = 0;
	struct usb_ep	*out;
	unsigned long	flags;
	unsigned short reserve_headroom;

	spin_lock_irqsave(&dev->lock, flags);
	if (dev->port_usb)
		out = dev->port_usb->out_ep;
	else
		out = NULL;
	spin_unlock_irqrestore(&dev->lock, flags);

	if (!out)
		return -ENOTCONN;


	size += sizeof(struct ethhdr) + dev->net->mtu + RX_EXTRA;
	size += dev->port_usb->header_len;
	size += out->maxpacket - 1;
	size -= size % out->maxpacket;

	if (dev->ul_max_pkts_per_xfer)
		size *= dev->ul_max_pkts_per_xfer;

	if (dev->port_usb->is_fixed)
		size = max_t(size_t, size, dev->port_usb->fixed_out_len);

	if (dev->rx_needed_headroom)
		reserve_headroom = dev->rx_needed_headroom;
	else
		reserve_headroom = NET_IP_ALIGN;

	pr_debug("%s: size: %zu + %d(hr)", __func__, size, reserve_headroom);

	skb = alloc_skb(size + reserve_headroom, gfp_flags);
	if (skb == NULL) {
		DBG(dev, "no rx skb\n");
		goto enomem;
	}

	skb_reserve(skb, reserve_headroom);

	req->buf = skb->data;
	req->length = size;
	req->context = skb;

	retval = usb_ep_queue(out, req, gfp_flags);
	if (retval == -ENOMEM)
enomem:
		defer_kevent(dev, WORK_RX_MEMORY);
	if (retval) {
		DBG(dev, "rx submit --> %d\n", retval);
		if (skb)
			dev_kfree_skb_any(skb);
	}
	return retval;
}
Example #11
0
static int rt2x00mac_tx_rts_cts(struct rt2x00_dev *rt2x00dev,
				struct data_queue *queue,
				struct sk_buff *frag_skb)
{
	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(frag_skb);
	struct ieee80211_tx_info *rts_info;
	struct sk_buff *skb;
	unsigned int data_length;
	int retval = 0;

	if (tx_info->control.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT)
		data_length = sizeof(struct ieee80211_cts);
	else
		data_length = sizeof(struct ieee80211_rts);

	skb = dev_alloc_skb(data_length + rt2x00dev->hw->extra_tx_headroom);
	if (unlikely(!skb)) {
		rt2x00_warn(rt2x00dev, "Failed to create RTS/CTS frame\n");
		return -ENOMEM;
	}

	skb_reserve(skb, rt2x00dev->hw->extra_tx_headroom);
	skb_put(skb, data_length);

	/*
	 * Copy TX information over from original frame to
	 * RTS/CTS frame. Note that we set the no encryption flag
	 * since we don't want this frame to be encrypted.
	 * RTS frames should be acked, while CTS-to-self frames
	 * should not. The ready for TX flag is cleared to prevent
	 * it being automatically send when the descriptor is
	 * written to the hardware.
	 */
	memcpy(skb->cb, frag_skb->cb, sizeof(skb->cb));
	rts_info = IEEE80211_SKB_CB(skb);
	rts_info->control.rates[0].flags &= ~IEEE80211_TX_RC_USE_RTS_CTS;
	rts_info->control.rates[0].flags &= ~IEEE80211_TX_RC_USE_CTS_PROTECT;

	if (tx_info->control.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT)
		rts_info->flags |= IEEE80211_TX_CTL_NO_ACK;
	else
		rts_info->flags &= ~IEEE80211_TX_CTL_NO_ACK;

	/* Disable hardware encryption */
	rts_info->control.hw_key = NULL;

	/*
	 * RTS/CTS frame should use the length of the frame plus any
	 * encryption overhead that will be added by the hardware.
	 */
	data_length += rt2x00crypto_tx_overhead(rt2x00dev, skb);

	if (tx_info->control.rates[0].flags & IEEE80211_TX_RC_USE_CTS_PROTECT)
		ieee80211_ctstoself_get(rt2x00dev->hw, tx_info->control.vif,
					frag_skb->data, data_length, tx_info,
					(struct ieee80211_cts *)(skb->data));
	else
		ieee80211_rts_get(rt2x00dev->hw, tx_info->control.vif,
				  frag_skb->data, data_length, tx_info,
				  (struct ieee80211_rts *)(skb->data));

	retval = rt2x00queue_write_tx_frame(queue, skb, NULL, true);
	if (retval) {
		dev_kfree_skb_any(skb);
		rt2x00_warn(rt2x00dev, "Failed to send RTS/CTS frame\n");
	}

	return retval;
}
Example #12
0
static int tulip_rx(struct net_device *dev)
{
	struct tulip_private *tp = netdev_priv(dev);
	int entry = tp->cur_rx % RX_RING_SIZE;
	int rx_work_limit = tp->dirty_rx + RX_RING_SIZE - tp->cur_rx;
	int received = 0;

	if (tulip_debug > 4)
		printk(KERN_DEBUG " In tulip_rx(), entry %d %8.8x.\n", entry,
			   tp->rx_ring[entry].status);
	/* If we own the next entry, it is a new packet. Send it up. */
	while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) {
		s32 status = le32_to_cpu(tp->rx_ring[entry].status);

		if (tulip_debug > 5)
			printk(KERN_DEBUG "%s: In tulip_rx(), entry %d %8.8x.\n",
				   dev->name, entry, status);
		if (--rx_work_limit < 0)
			break;
		if ((status & 0x38008300) != 0x0300) {
			if ((status & 0x38000300) != 0x0300) {
				/* Ingore earlier buffers. */
				if ((status & 0xffff) != 0x7fff) {
					if (tulip_debug > 1)
						printk(KERN_WARNING "%s: Oversized Ethernet frame "
							   "spanned multiple buffers, status %8.8x!\n",
							   dev->name, status);
					tp->stats.rx_length_errors++;
				}
			} else if (status & RxDescFatalErr) {
				/* There was a fatal error. */
				if (tulip_debug > 2)
					printk(KERN_DEBUG "%s: Receive error, Rx status %8.8x.\n",
						   dev->name, status);
				tp->stats.rx_errors++; /* end of a packet.*/
				if (status & 0x0890) tp->stats.rx_length_errors++;
				if (status & 0x0004) tp->stats.rx_frame_errors++;
				if (status & 0x0002) tp->stats.rx_crc_errors++;
				if (status & 0x0001) tp->stats.rx_fifo_errors++;
			}
		} else {
			/* Omit the four octet CRC from the length. */
			short pkt_len = ((status >> 16) & 0x7ff) - 4;
			struct sk_buff *skb;

#ifndef final_version
			if (pkt_len > 1518) {
				printk(KERN_WARNING "%s: Bogus packet size of %d (%#x).\n",
					   dev->name, pkt_len, pkt_len);
				pkt_len = 1518;
				tp->stats.rx_length_errors++;
			}
#endif

			/* Check if the packet is long enough to accept without copying
			   to a minimally-sized skbuff. */
			if (pkt_len < tulip_rx_copybreak
				&& (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
				skb_reserve(skb, 2);	/* 16 byte align the IP header */
				pci_dma_sync_single_for_cpu(tp->pdev,
							    tp->rx_buffers[entry].mapping,
							    pkt_len, PCI_DMA_FROMDEVICE);
#if ! defined(__alpha__)
				eth_copy_and_sum(skb, tp->rx_buffers[entry].skb->data,
						 pkt_len, 0);
				skb_put(skb, pkt_len);
#else
				memcpy(skb_put(skb, pkt_len),
				       tp->rx_buffers[entry].skb->data,
				       pkt_len);
#endif
				pci_dma_sync_single_for_device(tp->pdev,
							       tp->rx_buffers[entry].mapping,
							       pkt_len, PCI_DMA_FROMDEVICE);
			} else { 	/* Pass up the skb already on the Rx ring. */
				char *temp = skb_put(skb = tp->rx_buffers[entry].skb,
						     pkt_len);

#ifndef final_version
				if (tp->rx_buffers[entry].mapping !=
				    le32_to_cpu(tp->rx_ring[entry].buffer1)) {
					printk(KERN_ERR "%s: Internal fault: The skbuff addresses "
					       "do not match in tulip_rx: %08x vs. %Lx %p / %p.\n",
					       dev->name,
					       le32_to_cpu(tp->rx_ring[entry].buffer1),
					       (long long)tp->rx_buffers[entry].mapping,
					       skb->head, temp);
				}
#endif

				pci_unmap_single(tp->pdev, tp->rx_buffers[entry].mapping,
						 PKT_BUF_SZ, PCI_DMA_FROMDEVICE);

				tp->rx_buffers[entry].skb = NULL;
				tp->rx_buffers[entry].mapping = 0;
			}
			skb->protocol = eth_type_trans(skb, dev);

			netif_rx(skb);

			tp->stats.rx_packets++;
			tp->stats.rx_bytes += pkt_len;
		}
		received++;
		entry = (++tp->cur_rx) % RX_RING_SIZE;
	}
	return received;
}
Example #13
0
int tulip_poll(struct net_device *dev, int *budget)
{
	struct tulip_private *tp = netdev_priv(dev);
	int entry = tp->cur_rx % RX_RING_SIZE;
	int rx_work_limit = *budget;
	int received = 0;

	if (!netif_running(dev))
		goto done;

	if (rx_work_limit > dev->quota)
		rx_work_limit = dev->quota;

#ifdef CONFIG_TULIP_NAPI_HW_MITIGATION

/* that one buffer is needed for mit activation; or might be a
   bug in the ring buffer code; check later -- JHS*/

        if (rx_work_limit >=RX_RING_SIZE) rx_work_limit--;
#endif

	if (tulip_debug > 4)
		printk(KERN_DEBUG " In tulip_rx(), entry %d %8.8x.\n", entry,
			   tp->rx_ring[entry].status);

       do {
		if (ioread32(tp->base_addr + CSR5) == 0xffffffff) {
			printk(KERN_DEBUG " In tulip_poll(), hardware disappeared.\n");
			break;
		}
               /* Acknowledge current RX interrupt sources. */
               iowrite32((RxIntr | RxNoBuf), tp->base_addr + CSR5);


               /* If we own the next entry, it is a new packet. Send it up. */
               while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) {
                       s32 status = le32_to_cpu(tp->rx_ring[entry].status);


                       if (tp->dirty_rx + RX_RING_SIZE == tp->cur_rx)
                               break;

                       if (tulip_debug > 5)
                               printk(KERN_DEBUG "%s: In tulip_rx(), entry %d %8.8x.\n",
                                      dev->name, entry, status);
                       if (--rx_work_limit < 0)
                               goto not_done;

                       if ((status & 0x38008300) != 0x0300) {
                               if ((status & 0x38000300) != 0x0300) {
                                /* Ingore earlier buffers. */
                                       if ((status & 0xffff) != 0x7fff) {
                                               if (tulip_debug > 1)
                                                       printk(KERN_WARNING "%s: Oversized Ethernet frame "
                                                              "spanned multiple buffers, status %8.8x!\n",
                                                              dev->name, status);
                                               tp->stats.rx_length_errors++;
                                       }
                               } else if (status & RxDescFatalErr) {
                                /* There was a fatal error. */
                                       if (tulip_debug > 2)
                                               printk(KERN_DEBUG "%s: Receive error, Rx status %8.8x.\n",
                                                      dev->name, status);
                                       tp->stats.rx_errors++; /* end of a packet.*/
                                       if (status & 0x0890) tp->stats.rx_length_errors++;
                                       if (status & 0x0004) tp->stats.rx_frame_errors++;
                                       if (status & 0x0002) tp->stats.rx_crc_errors++;
                                       if (status & 0x0001) tp->stats.rx_fifo_errors++;
                               }
                       } else {
                               /* Omit the four octet CRC from the length. */
                               short pkt_len = ((status >> 16) & 0x7ff) - 4;
                               struct sk_buff *skb;

#ifndef final_version
                               if (pkt_len > 1518) {
                                       printk(KERN_WARNING "%s: Bogus packet size of %d (%#x).\n",
                                              dev->name, pkt_len, pkt_len);
                                       pkt_len = 1518;
                                       tp->stats.rx_length_errors++;
                               }
#endif
                               /* Check if the packet is long enough to accept without copying
                                  to a minimally-sized skbuff. */
                               if (pkt_len < tulip_rx_copybreak
                                   && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
                                       skb_reserve(skb, 2);    /* 16 byte align the IP header */
                                       pci_dma_sync_single_for_cpu(tp->pdev,
								   tp->rx_buffers[entry].mapping,
								   pkt_len, PCI_DMA_FROMDEVICE);
#if ! defined(__alpha__)
                                       eth_copy_and_sum(skb, tp->rx_buffers[entry].skb->data,
                                                        pkt_len, 0);
                                       skb_put(skb, pkt_len);
#else
                                       memcpy(skb_put(skb, pkt_len),
                                              tp->rx_buffers[entry].skb->data,
                                              pkt_len);
#endif
                                       pci_dma_sync_single_for_device(tp->pdev,
								      tp->rx_buffers[entry].mapping,
								      pkt_len, PCI_DMA_FROMDEVICE);
                               } else {        /* Pass up the skb already on the Rx ring. */
                                       char *temp = skb_put(skb = tp->rx_buffers[entry].skb,
                                                            pkt_len);

#ifndef final_version
                                       if (tp->rx_buffers[entry].mapping !=
                                           le32_to_cpu(tp->rx_ring[entry].buffer1)) {
                                               printk(KERN_ERR "%s: Internal fault: The skbuff addresses "
                                                      "do not match in tulip_rx: %08x vs. %08llx %p / %p.\n",
                                                      dev->name,
                                                      le32_to_cpu(tp->rx_ring[entry].buffer1),
                                                      (unsigned long long)tp->rx_buffers[entry].mapping,
                                                      skb->head, temp);
                                       }
#endif

                                       pci_unmap_single(tp->pdev, tp->rx_buffers[entry].mapping,
                                                        PKT_BUF_SZ, PCI_DMA_FROMDEVICE);

                                       tp->rx_buffers[entry].skb = NULL;
                                       tp->rx_buffers[entry].mapping = 0;
                               }
                               skb->protocol = eth_type_trans(skb, dev);

                               netif_receive_skb(skb);

                               tp->stats.rx_packets++;
                               tp->stats.rx_bytes += pkt_len;
                       }
                       received++;

                       entry = (++tp->cur_rx) % RX_RING_SIZE;
                       if (tp->cur_rx - tp->dirty_rx > RX_RING_SIZE/4)
                               tulip_refill_rx(dev);

                }

               /* New ack strategy... irq does not ack Rx any longer
                  hopefully this helps */

               /* Really bad things can happen here... If new packet arrives
                * and an irq arrives (tx or just due to occasionally unset
                * mask), it will be acked by irq handler, but new thread
                * is not scheduled. It is major hole in design.
                * No idea how to fix this if "playing with fire" will fail
                * tomorrow (night 011029). If it will not fail, we won
                * finally: amount of IO did not increase at all. */
       } while ((ioread32(tp->base_addr + CSR5) & RxIntr));

done:

 #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION

          /* We use this simplistic scheme for IM. It's proven by
             real life installations. We can have IM enabled
            continuesly but this would cause unnecessary latency.
            Unfortunely we can't use all the NET_RX_* feedback here.
            This would turn on IM for devices that is not contributing
            to backlog congestion with unnecessary latency.

             We monitor the the device RX-ring and have:

             HW Interrupt Mitigation either ON or OFF.

            ON:  More then 1 pkt received (per intr.) OR we are dropping
             OFF: Only 1 pkt received

             Note. We only use min and max (0, 15) settings from mit_table */


          if( tp->flags &  HAS_INTR_MITIGATION) {
                 if( received > 1 ) {
                         if( ! tp->mit_on ) {
                                 tp->mit_on = 1;
                                 iowrite32(mit_table[MIT_TABLE], tp->base_addr + CSR11);
                         }
                  }
                 else {
                         if( tp->mit_on ) {
                                 tp->mit_on = 0;
                                 iowrite32(0, tp->base_addr + CSR11);
                         }
                  }
          }

#endif /* CONFIG_TULIP_NAPI_HW_MITIGATION */

         dev->quota -= received;
         *budget -= received;

         tulip_refill_rx(dev);

         /* If RX ring is not full we are out of memory. */
         if (tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL) goto oom;

         /* Remove us from polling list and enable RX intr. */

         netif_rx_complete(dev);
         iowrite32(tulip_tbl[tp->chip_id].valid_intrs, tp->base_addr+CSR7);

         /* The last op happens after poll completion. Which means the following:
          * 1. it can race with disabling irqs in irq handler
          * 2. it can race with dise/enabling irqs in other poll threads
          * 3. if an irq raised after beginning loop, it will be immediately
          *    triggered here.
          *
          * Summarizing: the logic results in some redundant irqs both
          * due to races in masking and due to too late acking of already
          * processed irqs. But it must not result in losing events.
          */

         return 0;

 not_done:
         if (!received) {

                 received = dev->quota; /* Not to happen */
         }
         dev->quota -= received;
         *budget -= received;

         if (tp->cur_rx - tp->dirty_rx > RX_RING_SIZE/2 ||
             tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL)
                 tulip_refill_rx(dev);

         if (tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL) goto oom;

         return 1;


 oom:    /* Executed with RX ints disabled */


         /* Start timer, stop polling, but do not enable rx interrupts. */
         mod_timer(&tp->oom_timer, jiffies+1);

         /* Think: timer_pending() was an explicit signature of bug.
          * Timer can be pending now but fired and completed
          * before we did netif_rx_complete(). See? We would lose it. */

         /* remove ourselves from the polling list */
         netif_rx_complete(dev);

         return 0;
}
Example #14
0
void nf_send_reset6(struct net *net, struct sk_buff *oldskb, int hook)
{
	struct sk_buff *nskb;
	struct tcphdr _otcph;
	const struct tcphdr *otcph;
	unsigned int otcplen, hh_len;
	const struct ipv6hdr *oip6h = ipv6_hdr(oldskb);
	struct ipv6hdr *ip6h;
	struct dst_entry *dst = NULL;
	struct flowi6 fl6;

	if ((!(ipv6_addr_type(&oip6h->saddr) & IPV6_ADDR_UNICAST)) ||
	    (!(ipv6_addr_type(&oip6h->daddr) & IPV6_ADDR_UNICAST))) {
		pr_debug("addr is not unicast.\n");
		return;
	}

	otcph = nf_reject_ip6_tcphdr_get(oldskb, &_otcph, &otcplen, hook);
	if (!otcph)
		return;

	memset(&fl6, 0, sizeof(fl6));
	fl6.flowi6_proto = IPPROTO_TCP;
	fl6.saddr = oip6h->daddr;
	fl6.daddr = oip6h->saddr;
	fl6.fl6_sport = otcph->dest;
	fl6.fl6_dport = otcph->source;
	security_skb_classify_flow(oldskb, flowi6_to_flowi(&fl6));
	dst = ip6_route_output(net, NULL, &fl6);
	if (dst == NULL || dst->error) {
		dst_release(dst);
		return;
	}
	dst = xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), NULL, 0);
	if (IS_ERR(dst))
		return;

	hh_len = (dst->dev->hard_header_len + 15)&~15;
	nskb = alloc_skb(hh_len + 15 + dst->header_len + sizeof(struct ipv6hdr)
			 + sizeof(struct tcphdr) + dst->trailer_len,
			 GFP_ATOMIC);

	if (!nskb) {
		net_dbg_ratelimited("cannot alloc skb\n");
		dst_release(dst);
		return;
	}

	skb_dst_set(nskb, dst);

	skb_reserve(nskb, hh_len + dst->header_len);
	ip6h = nf_reject_ip6hdr_put(nskb, oldskb, IPPROTO_TCP,
				    ip6_dst_hoplimit(dst));
	nf_reject_ip6_tcphdr_put(nskb, oldskb, otcph, otcplen);

	nf_ct_attach(nskb, oldskb);

#if IS_ENABLED(CONFIG_BRIDGE_NETFILTER)
	/* If we use ip6_local_out for bridged traffic, the MAC source on
	 * the RST will be ours, instead of the destination's.  This confuses
	 * some routers/firewalls, and they drop the packet.  So we need to
	 * build the eth header using the original destination's MAC as the
	 * source, and send the RST packet directly.
	 */
	if (oldskb->nf_bridge) {
		struct ethhdr *oeth = eth_hdr(oldskb);

		nskb->dev = nf_bridge_get_physindev(oldskb);
		nskb->protocol = htons(ETH_P_IPV6);
		ip6h->payload_len = htons(sizeof(struct tcphdr));
		if (dev_hard_header(nskb, nskb->dev, ntohs(nskb->protocol),
				    oeth->h_source, oeth->h_dest, nskb->len) < 0)
			return;
		dev_queue_xmit(nskb);
	} else
#endif
		ip6_local_out(net, nskb->sk, nskb);
}
Example #15
0
File: data.c Project: 19Dan01/linux
static int nci_queue_tx_data_frags(struct nci_dev *ndev,
				   __u8 conn_id,
				   struct sk_buff *skb) {
	struct nci_conn_info    *conn_info;
	int total_len = skb->len;
	unsigned char *data = skb->data;
	unsigned long flags;
	struct sk_buff_head frags_q;
	struct sk_buff *skb_frag;
	int frag_len;
	int rc = 0;

	pr_debug("conn_id 0x%x, total_len %d\n", conn_id, total_len);

	conn_info = nci_get_conn_info_by_conn_id(ndev, conn_id);
	if (!conn_info) {
		rc = -EPROTO;
		goto free_exit;
	}

	__skb_queue_head_init(&frags_q);

	while (total_len) {
		frag_len =
			min_t(int, total_len, conn_info->max_pkt_payload_len);

		skb_frag = nci_skb_alloc(ndev,
					 (NCI_DATA_HDR_SIZE + frag_len),
					 GFP_KERNEL);
		if (skb_frag == NULL) {
			rc = -ENOMEM;
			goto free_exit;
		}
		skb_reserve(skb_frag, NCI_DATA_HDR_SIZE);

		/* first, copy the data */
		memcpy(skb_put(skb_frag, frag_len), data, frag_len);

		/* second, set the header */
		nci_push_data_hdr(ndev, conn_id, skb_frag,
				  ((total_len == frag_len) ?
				   (NCI_PBF_LAST) : (NCI_PBF_CONT)));

		__skb_queue_tail(&frags_q, skb_frag);

		data += frag_len;
		total_len -= frag_len;

		pr_debug("frag_len %d, remaining total_len %d\n",
			 frag_len, total_len);
	}

	/* queue all fragments atomically */
	spin_lock_irqsave(&ndev->tx_q.lock, flags);

	while ((skb_frag = __skb_dequeue(&frags_q)) != NULL)
		__skb_queue_tail(&ndev->tx_q, skb_frag);

	spin_unlock_irqrestore(&ndev->tx_q.lock, flags);

	/* free the original skb */
	kfree_skb(skb);

	goto exit;

free_exit:
	while ((skb_frag = __skb_dequeue(&frags_q)) != NULL)
		kfree_skb(skb_frag);

exit:
	return rc;
}
Example #16
0
static struct sk_buff *rtllib_ADDBA(struct rtllib_device *ieee, u8 *Dst,
				    struct ba_record *pBA,
				    u16 StatusCode, u8 type)
{
	struct sk_buff *skb = NULL;
	 struct rtllib_hdr_3addr *BAReq = NULL;
	u8 *tag = NULL;
	u16 len = ieee->tx_headroom + 9;

	netdev_dbg(ieee->dev, "%s(): frame(%d) sentd to: %pM, ieee->dev:%p\n",
		   __func__, type, Dst, ieee->dev);

	if (pBA == NULL) {
		netdev_warn(ieee->dev, "pBA is NULL\n");
		return NULL;
	}
	skb = dev_alloc_skb(len + sizeof(struct rtllib_hdr_3addr));
	if (skb == NULL)
		return NULL;

	memset(skb->data, 0, sizeof(struct rtllib_hdr_3addr));

	skb_reserve(skb, ieee->tx_headroom);

	BAReq = (struct rtllib_hdr_3addr *)skb_put(skb,
		 sizeof(struct rtllib_hdr_3addr));

	ether_addr_copy(BAReq->addr1, Dst);
	ether_addr_copy(BAReq->addr2, ieee->dev->dev_addr);

	ether_addr_copy(BAReq->addr3, ieee->current_network.bssid);
	BAReq->frame_ctl = cpu_to_le16(RTLLIB_STYPE_MANAGE_ACT);

	tag = (u8 *)skb_put(skb, 9);
	*tag++ = ACT_CAT_BA;
	*tag++ = type;
	*tag++ = pBA->DialogToken;

	if (ACT_ADDBARSP == type) {
		RT_TRACE(COMP_DBG, "====>to send ADDBARSP\n");

		put_unaligned_le16(StatusCode, tag);
		tag += 2;
	}

	put_unaligned_le16(pBA->BaParamSet.shortData, tag);
	tag += 2;

	put_unaligned_le16(pBA->BaTimeoutValue, tag);
	tag += 2;

	if (ACT_ADDBAREQ == type) {
		memcpy(tag, (u8 *)&(pBA->BaStartSeqCtrl), 2);
		tag += 2;
	}

#ifdef VERBOSE_DEBUG
	print_hex_dump_bytes("rtllib_ADDBA(): ", DUMP_PREFIX_NONE, skb->data,
			     skb->len);
#endif
	return skb;
}
int rtllib_xmit_inter(struct sk_buff *skb, struct net_device *dev)
{
	struct rtllib_device *ieee = (struct rtllib_device *)
				     netdev_priv_rsl(dev);
	struct rtllib_txb *txb = NULL;
	struct rtllib_hdr_3addrqos *frag_hdr;
	int i, bytes_per_frag, nr_frags, bytes_last_frag, frag_size;
	unsigned long flags;
	struct net_device_stats *stats = &ieee->stats;
	int ether_type = 0, encrypt;
	int bytes, fc, qos_ctl = 0, hdr_len;
	struct sk_buff *skb_frag;
	struct rtllib_hdr_3addrqos header = { /* Ensure zero initialized */
		.duration_id = 0,
		.seq_ctl = 0,
		.qos_ctl = 0
	};
	u8 dest[ETH_ALEN], src[ETH_ALEN];
	int qos_actived = ieee->current_network.qos_data.active;
	struct lib80211_crypt_data *crypt = NULL;
	struct cb_desc *tcb_desc;
	u8 bIsMulticast = false;
	u8 IsAmsdu = false;
	bool	bdhcp = false;

	spin_lock_irqsave(&ieee->lock, flags);

	/* If there is no driver handler to take the TXB, don't bother
	 * creating it... */
	if ((!ieee->hard_start_xmit && !(ieee->softmac_features &
	   IEEE_SOFTMAC_TX_QUEUE)) ||
	   ((!ieee->softmac_data_hard_start_xmit &&
	   (ieee->softmac_features & IEEE_SOFTMAC_TX_QUEUE)))) {
		printk(KERN_WARNING "%s: No xmit handler.\n",
		       ieee->dev->name);
		goto success;
	}


	if (likely(ieee->raw_tx == 0)) {
		if (unlikely(skb->len < SNAP_SIZE + sizeof(u16))) {
			printk(KERN_WARNING "%s: skb too small (%d).\n",
			ieee->dev->name, skb->len);
			goto success;
		}
		/* Save source and destination addresses */
		memcpy(dest, skb->data, ETH_ALEN);
		memcpy(src, skb->data+ETH_ALEN, ETH_ALEN);

		memset(skb->cb, 0, sizeof(skb->cb));
		ether_type = ntohs(((struct ethhdr *)skb->data)->h_proto);

		if (ieee->iw_mode == IW_MODE_MONITOR) {
			txb = rtllib_alloc_txb(1, skb->len, GFP_ATOMIC);
			if (unlikely(!txb)) {
				printk(KERN_WARNING "%s: Could not allocate "
				       "TXB\n",
				ieee->dev->name);
				goto failed;
			}

			txb->encrypted = 0;
			txb->payload_size = skb->len;
			memcpy(skb_put(txb->fragments[0], skb->len), skb->data,
			       skb->len);

			goto success;
		}

		if (skb->len > 282) {
			if (ETH_P_IP == ether_type) {
				const struct iphdr *ip = (struct iphdr *)
					((u8 *)skb->data+14);
				if (IPPROTO_UDP == ip->protocol) {
					struct udphdr *udp;

					udp = (struct udphdr *)((u8 *)ip +
					      (ip->ihl << 2));
					if (((((u8 *)udp)[1] == 68) &&
					   (((u8 *)udp)[3] == 67)) ||
					   ((((u8 *)udp)[1] == 67) &&
					   (((u8 *)udp)[3] == 68))) {
						bdhcp = true;
						ieee->LPSDelayCnt = 200;
					}
				}
			} else if (ETH_P_ARP == ether_type) {
				printk(KERN_INFO "=================>DHCP "
				       "Protocol start tx ARP pkt!!\n");
				bdhcp = true;
				ieee->LPSDelayCnt =
					 ieee->current_network.tim.tim_count;
			}
		}

		skb->priority = rtllib_classify(skb, IsAmsdu);
		crypt = ieee->crypt_info.crypt[ieee->crypt_info.tx_keyidx];
		encrypt = !(ether_type == ETH_P_PAE && ieee->ieee802_1x) &&
			ieee->host_encrypt && crypt && crypt->ops;
		if (!encrypt && ieee->ieee802_1x &&
		    ieee->drop_unencrypted && ether_type != ETH_P_PAE) {
			stats->tx_dropped++;
			goto success;
		}
		if (crypt && !encrypt && ether_type == ETH_P_PAE) {
			struct eapol *eap = (struct eapol *)(skb->data +
				sizeof(struct ethhdr) - SNAP_SIZE -
				sizeof(u16));
			RTLLIB_DEBUG_EAP("TX: IEEE 802.11 EAPOL frame: %s\n",
				eap_get_type(eap->type));
		}

		/* Advance the SKB to the start of the payload */
		skb_pull(skb, sizeof(struct ethhdr));

		/* Determine total amount of storage required for TXB packets */
		bytes = skb->len + SNAP_SIZE + sizeof(u16);

		if (encrypt)
			fc = RTLLIB_FTYPE_DATA | RTLLIB_FCTL_WEP;
		else
			fc = RTLLIB_FTYPE_DATA;

		if (qos_actived)
			fc |= RTLLIB_STYPE_QOS_DATA;
		else
			fc |= RTLLIB_STYPE_DATA;

		if (ieee->iw_mode == IW_MODE_INFRA) {
			fc |= RTLLIB_FCTL_TODS;
			/* To DS: Addr1 = BSSID, Addr2 = SA,
			Addr3 = DA */
			memcpy(&header.addr1, ieee->current_network.bssid,
			       ETH_ALEN);
			memcpy(&header.addr2, &src, ETH_ALEN);
			if (IsAmsdu)
				memcpy(&header.addr3,
				       ieee->current_network.bssid, ETH_ALEN);
			else
				memcpy(&header.addr3, &dest, ETH_ALEN);
		} else if (ieee->iw_mode == IW_MODE_ADHOC) {
			/* not From/To DS: Addr1 = DA, Addr2 = SA,
			Addr3 = BSSID */
			memcpy(&header.addr1, dest, ETH_ALEN);
			memcpy(&header.addr2, src, ETH_ALEN);
			memcpy(&header.addr3, ieee->current_network.bssid,
			       ETH_ALEN);
		}

		bIsMulticast = is_multicast_ether_addr(header.addr1);

		header.frame_ctl = cpu_to_le16(fc);

		/* Determine fragmentation size based on destination (multicast
		* and broadcast are not fragmented) */
		if (bIsMulticast) {
			frag_size = MAX_FRAG_THRESHOLD;
			qos_ctl |= QOS_CTL_NOTCONTAIN_ACK;
		} else {
			frag_size = ieee->fts;
			qos_ctl = 0;
		}

		if (qos_actived) {
			hdr_len = RTLLIB_3ADDR_LEN + 2;

		/* in case we are a client verify acm is not set for this ac */
		while (unlikely(ieee->wmm_acm & (0x01 << skb->priority))) {
			printk(KERN_INFO "skb->priority = %x\n", skb->priority);
			if (wme_downgrade_ac(skb))
				break;
			printk(KERN_INFO "converted skb->priority = %x\n",
			       skb->priority);
		 }
			qos_ctl |= skb->priority;
			header.qos_ctl = cpu_to_le16(qos_ctl & RTLLIB_QOS_TID);
		} else {
			hdr_len = RTLLIB_3ADDR_LEN;
		}
		/* Determine amount of payload per fragment.  Regardless of if
		 * this stack is providing the full 802.11 header, one will
		 * eventually be affixed to this fragment -- so we must account
		 * for it when determining the amount of payload space. */
		bytes_per_frag = frag_size - hdr_len;
		if (ieee->config &
		   (CFG_RTLLIB_COMPUTE_FCS | CFG_RTLLIB_RESERVE_FCS))
			bytes_per_frag -= RTLLIB_FCS_LEN;

		/* Each fragment may need to have room for encrypting
		 * pre/postfix */
		if (encrypt) {
			bytes_per_frag -= crypt->ops->extra_mpdu_prefix_len +
				crypt->ops->extra_mpdu_postfix_len +
				crypt->ops->extra_msdu_prefix_len +
				crypt->ops->extra_msdu_postfix_len;
		}
		/* Number of fragments is the total bytes_per_frag /
		* payload_per_fragment */
		nr_frags = bytes / bytes_per_frag;
		bytes_last_frag = bytes % bytes_per_frag;
		if (bytes_last_frag)
			nr_frags++;
		else
			bytes_last_frag = bytes_per_frag;

		/* When we allocate the TXB we allocate enough space for the
		 * reserve and full fragment bytes (bytes_per_frag doesn't
		 * include prefix, postfix, header, FCS, etc.) */
		txb = rtllib_alloc_txb(nr_frags, frag_size +
				       ieee->tx_headroom, GFP_ATOMIC);
		if (unlikely(!txb)) {
			printk(KERN_WARNING "%s: Could not allocate TXB\n",
			ieee->dev->name);
			goto failed;
		}
		txb->encrypted = encrypt;
		txb->payload_size = bytes;

		if (qos_actived)
			txb->queue_index = UP2AC(skb->priority);
		else
			txb->queue_index = WME_AC_BE;

		for (i = 0; i < nr_frags; i++) {
			skb_frag = txb->fragments[i];
			tcb_desc = (struct cb_desc *)(skb_frag->cb +
				    MAX_DEV_ADDR_SIZE);
			if (qos_actived) {
				skb_frag->priority = skb->priority;
				tcb_desc->queue_index =  UP2AC(skb->priority);
			} else {
				skb_frag->priority = WME_AC_BE;
				tcb_desc->queue_index = WME_AC_BE;
			}
			skb_reserve(skb_frag, ieee->tx_headroom);

			if (encrypt) {
				if (ieee->hwsec_active)
					tcb_desc->bHwSec = 1;
				else
					tcb_desc->bHwSec = 0;
				skb_reserve(skb_frag,
					    crypt->ops->extra_mpdu_prefix_len +
					    crypt->ops->extra_msdu_prefix_len);
			} else {
				tcb_desc->bHwSec = 0;
			}
			frag_hdr = (struct rtllib_hdr_3addrqos *)
				   skb_put(skb_frag, hdr_len);
			memcpy(frag_hdr, &header, hdr_len);

			/* If this is not the last fragment, then add the
			 * MOREFRAGS bit to the frame control */
			if (i != nr_frags - 1) {
				frag_hdr->frame_ctl = cpu_to_le16(
					fc | RTLLIB_FCTL_MOREFRAGS);
				bytes = bytes_per_frag;

			} else {
				/* The last fragment has the remaining length */
				bytes = bytes_last_frag;
			}
			if ((qos_actived) && (!bIsMulticast)) {
				frag_hdr->seq_ctl =
					 rtllib_query_seqnum(ieee, skb_frag,
							     header.addr1);
				frag_hdr->seq_ctl =
					 cpu_to_le16(frag_hdr->seq_ctl<<4 | i);
			} else {
				frag_hdr->seq_ctl =
					 cpu_to_le16(ieee->seq_ctrl[0]<<4 | i);
			}
			/* Put a SNAP header on the first fragment */
			if (i == 0) {
				rtllib_put_snap(
					skb_put(skb_frag, SNAP_SIZE +
					sizeof(u16)), ether_type);
				bytes -= SNAP_SIZE + sizeof(u16);
			}

			memcpy(skb_put(skb_frag, bytes), skb->data, bytes);

			/* Advance the SKB... */
			skb_pull(skb, bytes);

			/* Encryption routine will move the header forward in
			 * order to insert the IV between the header and the
			 * payload */
			if (encrypt)
				rtllib_encrypt_fragment(ieee, skb_frag,
							hdr_len);
			if (ieee->config &
			   (CFG_RTLLIB_COMPUTE_FCS | CFG_RTLLIB_RESERVE_FCS))
				skb_put(skb_frag, 4);
		}

		if ((qos_actived) && (!bIsMulticast)) {
			if (ieee->seq_ctrl[UP2AC(skb->priority) + 1] == 0xFFF)
				ieee->seq_ctrl[UP2AC(skb->priority) + 1] = 0;
			else
				ieee->seq_ctrl[UP2AC(skb->priority) + 1]++;
		} else {
			if (ieee->seq_ctrl[0] == 0xFFF)
				ieee->seq_ctrl[0] = 0;
			else
					ieee->seq_ctrl[0]++;
		}
	} else {
		if (unlikely(skb->len < sizeof(struct rtllib_hdr_3addr))) {
			printk(KERN_WARNING "%s: skb too small (%d).\n",
			ieee->dev->name, skb->len);
			goto success;
		}

		txb = rtllib_alloc_txb(1, skb->len, GFP_ATOMIC);
		if (!txb) {
			printk(KERN_WARNING "%s: Could not allocate TXB\n",
			ieee->dev->name);
			goto failed;
		}

		txb->encrypted = 0;
		txb->payload_size = skb->len;
		memcpy(skb_put(txb->fragments[0], skb->len), skb->data,
		       skb->len);
	}

 success:
	if (txb) {
		struct cb_desc *tcb_desc = (struct cb_desc *)
				(txb->fragments[0]->cb + MAX_DEV_ADDR_SIZE);
		tcb_desc->bTxEnableFwCalcDur = 1;
		tcb_desc->priority = skb->priority;

		if (ether_type == ETH_P_PAE) {
			if (ieee->pHTInfo->IOTAction &
			    HT_IOT_ACT_WA_IOT_Broadcom) {
				tcb_desc->data_rate =
					 MgntQuery_TxRateExcludeCCKRates(ieee);
				tcb_desc->bTxDisableRateFallBack = false;
			} else {
				tcb_desc->data_rate = ieee->basic_rate;
				tcb_desc->bTxDisableRateFallBack = 1;
			}


			tcb_desc->RATRIndex = 7;
			tcb_desc->bTxUseDriverAssingedRate = 1;
		} else {
			if (is_multicast_ether_addr(header.addr1))
				tcb_desc->bMulticast = 1;
			if (is_broadcast_ether_addr(header.addr1))
				tcb_desc->bBroadcast = 1;
			rtllib_txrate_selectmode(ieee, tcb_desc);
			if (tcb_desc->bMulticast ||  tcb_desc->bBroadcast)
				tcb_desc->data_rate = ieee->basic_rate;
			else
				tcb_desc->data_rate = CURRENT_RATE(ieee->mode,
					ieee->rate, ieee->HTCurrentOperaRate);

			if (bdhcp == true) {
				if (ieee->pHTInfo->IOTAction &
				    HT_IOT_ACT_WA_IOT_Broadcom) {
					tcb_desc->data_rate =
					   MgntQuery_TxRateExcludeCCKRates(ieee);
					tcb_desc->bTxDisableRateFallBack = false;
				} else {
					tcb_desc->data_rate = MGN_1M;
					tcb_desc->bTxDisableRateFallBack = 1;
				}


				tcb_desc->RATRIndex = 7;
				tcb_desc->bTxUseDriverAssingedRate = 1;
				tcb_desc->bdhcp = 1;
			}

			rtllib_qurey_ShortPreambleMode(ieee, tcb_desc);
			rtllib_tx_query_agg_cap(ieee, txb->fragments[0],
						tcb_desc);
			rtllib_query_HTCapShortGI(ieee, tcb_desc);
			rtllib_query_BandwidthMode(ieee, tcb_desc);
			rtllib_query_protectionmode(ieee, tcb_desc,
						    txb->fragments[0]);
		}
	}
	spin_unlock_irqrestore(&ieee->lock, flags);
	dev_kfree_skb_any(skb);
	if (txb) {
		if (ieee->softmac_features & IEEE_SOFTMAC_TX_QUEUE) {
			dev->stats.tx_packets++;
			dev->stats.tx_bytes += txb->payload_size;
			rtllib_softmac_xmit(txb, ieee);
		} else {
			if ((*ieee->hard_start_xmit)(txb, dev) == 0) {
				stats->tx_packets++;
				stats->tx_bytes += txb->payload_size;
				return 0;
			}
			rtllib_txb_free(txb);
		}
	}

	return 0;

 failed:
	spin_unlock_irqrestore(&ieee->lock, flags);
	netif_stop_queue(dev);
	stats->tx_errors++;
	return 1;

}
int rtllib_xmit(struct sk_buff *skb, struct net_device *dev)
{
	memset(skb->cb, 0, sizeof(skb->cb));
	return rtllib_xmit_inter(skb, dev);
}
static int ip6_fragment(struct sk_buff *skb, int (*output)(struct sk_buff *))
{
	struct net_device *dev;
	struct sk_buff *frag;
	struct rt6_info *rt = (struct rt6_info*)skb->dst;
	struct ipv6hdr *tmp_hdr;
	struct frag_hdr *fh;
	unsigned int mtu, hlen, left, len;
	u32 frag_id = 0;
	int ptr, offset = 0, err=0;
	u8 *prevhdr, nexthdr = 0;

	dev = rt->u.dst.dev;
	hlen = ip6_find_1stfragopt(skb, &prevhdr);
	nexthdr = *prevhdr;

	mtu = dst_mtu(&rt->u.dst) - hlen - sizeof(struct frag_hdr);

	if (skb_shinfo(skb)->frag_list) {
		int first_len = skb_pagelen(skb);

		if (first_len - hlen > mtu ||
		    ((first_len - hlen) & 7) ||
		    skb_cloned(skb))
			goto slow_path;

		for (frag = skb_shinfo(skb)->frag_list; frag; frag = frag->next) {
			/* Correct geometry. */
			if (frag->len > mtu ||
			    ((frag->len & 7) && frag->next) ||
			    skb_headroom(frag) < hlen)
			    goto slow_path;

			/* Partially cloned skb? */
			if (skb_shared(frag))
				goto slow_path;

			BUG_ON(frag->sk);
			if (skb->sk) {
				sock_hold(skb->sk);
				frag->sk = skb->sk;
				frag->destructor = sock_wfree;
				skb->truesize -= frag->truesize;
			}
		}

		err = 0;
		offset = 0;
		frag = skb_shinfo(skb)->frag_list;
		skb_shinfo(skb)->frag_list = NULL;
		/* BUILD HEADER */

		tmp_hdr = kmalloc(hlen, GFP_ATOMIC);
		if (!tmp_hdr) {
			IP6_INC_STATS(IPSTATS_MIB_FRAGFAILS);
			return -ENOMEM;
		}

		*prevhdr = NEXTHDR_FRAGMENT;
		memcpy(tmp_hdr, skb->nh.raw, hlen);
		__skb_pull(skb, hlen);
		fh = (struct frag_hdr*)__skb_push(skb, sizeof(struct frag_hdr));
		skb->nh.raw = __skb_push(skb, hlen);
		memcpy(skb->nh.raw, tmp_hdr, hlen);

		ipv6_select_ident(skb, fh);
		fh->nexthdr = nexthdr;
		fh->reserved = 0;
		fh->frag_off = htons(IP6_MF);
		frag_id = fh->identification;

		first_len = skb_pagelen(skb);
		skb->data_len = first_len - skb_headlen(skb);
		skb->len = first_len;
		skb->nh.ipv6h->payload_len = htons(first_len - sizeof(struct ipv6hdr));
 

		for (;;) {
			/* Prepare header of the next frame,
			 * before previous one went down. */
			if (frag) {
				frag->ip_summed = CHECKSUM_NONE;
				frag->h.raw = frag->data;
				fh = (struct frag_hdr*)__skb_push(frag, sizeof(struct frag_hdr));
				frag->nh.raw = __skb_push(frag, hlen);
				memcpy(frag->nh.raw, tmp_hdr, hlen);
				offset += skb->len - hlen - sizeof(struct frag_hdr);
				fh->nexthdr = nexthdr;
				fh->reserved = 0;
				fh->frag_off = htons(offset);
				if (frag->next != NULL)
					fh->frag_off |= htons(IP6_MF);
				fh->identification = frag_id;
				frag->nh.ipv6h->payload_len = htons(frag->len - sizeof(struct ipv6hdr));
				ip6_copy_metadata(frag, skb);
			}
			
			err = output(skb);
			if (err || !frag)
				break;

			skb = frag;
			frag = skb->next;
			skb->next = NULL;
		}

		kfree(tmp_hdr);

		if (err == 0) {
			IP6_INC_STATS(IPSTATS_MIB_FRAGOKS);
			return 0;
		}

		while (frag) {
			skb = frag->next;
			kfree_skb(frag);
			frag = skb;
		}

		IP6_INC_STATS(IPSTATS_MIB_FRAGFAILS);
		return err;
	}

slow_path:
	left = skb->len - hlen;		/* Space per frame */
	ptr = hlen;			/* Where to start from */

	/*
	 *	Fragment the datagram.
	 */

	*prevhdr = NEXTHDR_FRAGMENT;

	/*
	 *	Keep copying data until we run out.
	 */
	while(left > 0)	{
		len = left;
		/* IF: it doesn't fit, use 'mtu' - the data space left */
		if (len > mtu)
			len = mtu;
		/* IF: we are not sending upto and including the packet end
		   then align the next start on an eight byte boundary */
		if (len < left)	{
			len &= ~7;
		}
		/*
		 *	Allocate buffer.
		 */

		if ((frag = alloc_skb(len+hlen+sizeof(struct frag_hdr)+LL_RESERVED_SPACE(rt->u.dst.dev), GFP_ATOMIC)) == NULL) {
			NETDEBUG(KERN_INFO "IPv6: frag: no memory for new fragment!\n");
			IP6_INC_STATS(IPSTATS_MIB_FRAGFAILS);
			err = -ENOMEM;
			goto fail;
		}

		/*
		 *	Set up data on packet
		 */

		ip6_copy_metadata(frag, skb);
		skb_reserve(frag, LL_RESERVED_SPACE(rt->u.dst.dev));
		skb_put(frag, len + hlen + sizeof(struct frag_hdr));
		frag->nh.raw = frag->data;
		fh = (struct frag_hdr*)(frag->data + hlen);
		frag->h.raw = frag->data + hlen + sizeof(struct frag_hdr);

		/*
		 *	Charge the memory for the fragment to any owner
		 *	it might possess
		 */
		if (skb->sk)
			skb_set_owner_w(frag, skb->sk);

		/*
		 *	Copy the packet header into the new buffer.
		 */
		memcpy(frag->nh.raw, skb->data, hlen);

		/*
		 *	Build fragment header.
		 */
		fh->nexthdr = nexthdr;
		fh->reserved = 0;
		if (!frag_id) {
			ipv6_select_ident(skb, fh);
			frag_id = fh->identification;
		} else
			fh->identification = frag_id;

		/*
		 *	Copy a block of the IP datagram.
		 */
		if (skb_copy_bits(skb, ptr, frag->h.raw, len))
			BUG();
		left -= len;

		fh->frag_off = htons(offset);
		if (left > 0)
			fh->frag_off |= htons(IP6_MF);
		frag->nh.ipv6h->payload_len = htons(frag->len - sizeof(struct ipv6hdr));

		ptr += len;
		offset += len;

		/*
		 *	Put this fragment into the sending queue.
		 */

		IP6_INC_STATS(IPSTATS_MIB_FRAGCREATES);

		err = output(frag);
		if (err)
			goto fail;
	}
	kfree_skb(skb);
	IP6_INC_STATS(IPSTATS_MIB_FRAGOKS);
	return err;

fail:
	kfree_skb(skb); 
	IP6_INC_STATS(IPSTATS_MIB_FRAGFAILS);
	return err;
}
Example #19
0
int wl1271_cmd_build_arp_rsp(struct wl1271 *wl, struct wl12xx_vif *wlvif)
{
	int ret, extra;
	u16 fc;
	struct ieee80211_vif *vif = wl12xx_wlvif_to_vif(wlvif);
	struct sk_buff *skb;
	struct wl12xx_arp_rsp_template *tmpl;
	struct ieee80211_hdr_3addr *hdr;
	struct arphdr *arp_hdr;

	skb = dev_alloc_skb(sizeof(*hdr) + sizeof(__le16) + sizeof(*tmpl) +
			    WL1271_EXTRA_SPACE_MAX);
	if (!skb) {
		wl1271_error("failed to allocate buffer for arp rsp template");
		return -ENOMEM;
	}

	skb_reserve(skb, sizeof(*hdr) + WL1271_EXTRA_SPACE_MAX);

	tmpl = (struct wl12xx_arp_rsp_template *)skb_put(skb, sizeof(*tmpl));
	memset(tmpl, 0, sizeof(tmpl));

	/* llc layer */
	memcpy(tmpl->llc_hdr, rfc1042_header, sizeof(rfc1042_header));
	tmpl->llc_type = cpu_to_be16(ETH_P_ARP);

	/* arp header */
	arp_hdr = &tmpl->arp_hdr;
	arp_hdr->ar_hrd = cpu_to_be16(ARPHRD_ETHER);
	arp_hdr->ar_pro = cpu_to_be16(ETH_P_IP);
	arp_hdr->ar_hln = ETH_ALEN;
	arp_hdr->ar_pln = 4;
	arp_hdr->ar_op = cpu_to_be16(ARPOP_REPLY);

	/* arp payload */
	memcpy(tmpl->sender_hw, vif->addr, ETH_ALEN);
	tmpl->sender_ip = wlvif->ip_addr;

	/* encryption space */
	switch (wlvif->encryption_type) {
	case KEY_TKIP:
		extra = WL1271_EXTRA_SPACE_TKIP;
		break;
	case KEY_AES:
		extra = WL1271_EXTRA_SPACE_AES;
		break;
	case KEY_NONE:
	case KEY_WEP:
	case KEY_GEM:
		extra = 0;
		break;
	default:
		wl1271_warning("Unknown encryption type: %d",
			       wlvif->encryption_type);
		ret = -EINVAL;
		goto out;
	}

	if (extra) {
		u8 *space = skb_push(skb, extra);
		memset(space, 0, extra);
	}

	/* QoS header - BE */
	if (wlvif->sta.qos)
		memset(skb_push(skb, sizeof(__le16)), 0, sizeof(__le16));

	/* mac80211 header */
	hdr = (struct ieee80211_hdr_3addr *)skb_push(skb, sizeof(*hdr));
	memset(hdr, 0, sizeof(hdr));
	fc = IEEE80211_FTYPE_DATA | IEEE80211_FCTL_TODS;
	if (wlvif->sta.qos)
		fc |= IEEE80211_STYPE_QOS_DATA;
	else
		fc |= IEEE80211_STYPE_DATA;
	if (wlvif->encryption_type != KEY_NONE)
		fc |= IEEE80211_FCTL_PROTECTED;

	hdr->frame_control = cpu_to_le16(fc);
	memcpy(hdr->addr1, vif->bss_conf.bssid, ETH_ALEN);
	memcpy(hdr->addr2, vif->addr, ETH_ALEN);
	memset(hdr->addr3, 0xff, ETH_ALEN);

	ret = wl1271_cmd_template_set(wl, wlvif->role_id, CMD_TEMPL_ARP_RSP,
				      skb->data, skb->len, 0,
				      wlvif->basic_rate);
out:
	dev_kfree_skb(skb);
	return ret;
}
int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
	int offset, int len, int odd, struct sk_buff *skb),
	void *from, int length, int transhdrlen,
	int hlimit, int tclass, struct ipv6_txoptions *opt, struct flowi *fl,
	struct rt6_info *rt, unsigned int flags)
{
	struct inet_sock *inet = inet_sk(sk);
	struct ipv6_pinfo *np = inet6_sk(sk);
	struct sk_buff *skb;
	unsigned int maxfraglen, fragheaderlen;
	int exthdrlen;
	int hh_len;
	int mtu;
	int copy;
	int err;
	int offset = 0;
	int csummode = CHECKSUM_NONE;

	if (flags&MSG_PROBE)
		return 0;
	if (skb_queue_empty(&sk->sk_write_queue)) {
		/*
		 * setup for corking
		 */
		if (opt) {
			if (np->cork.opt == NULL) {
				np->cork.opt = kmalloc(opt->tot_len,
						       sk->sk_allocation);
				if (unlikely(np->cork.opt == NULL))
					return -ENOBUFS;
			} else if (np->cork.opt->tot_len < opt->tot_len) {
				printk(KERN_DEBUG "ip6_append_data: invalid option length\n");
				return -EINVAL;
			}
			memcpy(np->cork.opt, opt, opt->tot_len);
			inet->cork.flags |= IPCORK_OPT;
			/* need source address above miyazawa*/
		}
		dst_hold(&rt->u.dst);
		np->cork.rt = rt;
		inet->cork.fl = *fl;
		np->cork.hop_limit = hlimit;
		np->cork.tclass = tclass;
		inet->cork.fragsize = mtu = dst_mtu(rt->u.dst.path);
		if (dst_allfrag(rt->u.dst.path))
			inet->cork.flags |= IPCORK_ALLFRAG;
		inet->cork.length = 0;
		sk->sk_sndmsg_page = NULL;
		sk->sk_sndmsg_off = 0;
		exthdrlen = rt->u.dst.header_len + (opt ? opt->opt_flen : 0);
		length += exthdrlen;
		transhdrlen += exthdrlen;
	} else {
		rt = np->cork.rt;
		fl = &inet->cork.fl;
		if (inet->cork.flags & IPCORK_OPT)
			opt = np->cork.opt;
		transhdrlen = 0;
		exthdrlen = 0;
		mtu = inet->cork.fragsize;
	}

	hh_len = LL_RESERVED_SPACE(rt->u.dst.dev);

	fragheaderlen = sizeof(struct ipv6hdr) + (opt ? opt->opt_nflen : 0);
	maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen - sizeof(struct frag_hdr);

	if (mtu <= sizeof(struct ipv6hdr) + IPV6_MAXPLEN) {
		if (inet->cork.length + length > sizeof(struct ipv6hdr) + IPV6_MAXPLEN - fragheaderlen) {
			ipv6_local_error(sk, EMSGSIZE, fl, mtu-exthdrlen);
			return -EMSGSIZE;
		}
	}

	/*
	 * Let's try using as much space as possible.
	 * Use MTU if total length of the message fits into the MTU.
	 * Otherwise, we need to reserve fragment header and
	 * fragment alignment (= 8-15 octects, in total).
	 *
	 * Note that we may need to "move" the data from the tail of
	 * of the buffer to the new fragment when we split 
	 * the message.
	 *
	 * FIXME: It may be fragmented into multiple chunks 
	 *        at once if non-fragmentable extension headers
	 *        are too large.
	 * --yoshfuji 
	 */

	inet->cork.length += length;
	if (((length > mtu) && (sk->sk_protocol == IPPROTO_UDP)) &&
	    (rt->u.dst.dev->features & NETIF_F_UFO)) {

		if(ip6_ufo_append_data(sk, getfrag, from, length, hh_len,
				fragheaderlen, transhdrlen, mtu, flags))
			goto error;

		return 0;
	}

	if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL)
		goto alloc_new_skb;

	while (length > 0) {
		/* Check if the remaining data fits into current packet. */
		copy = (inet->cork.length <= mtu && !(inet->cork.flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - skb->len;
		if (copy < length)
			copy = maxfraglen - skb->len;

		if (copy <= 0) {
			char *data;
			unsigned int datalen;
			unsigned int fraglen;
			unsigned int fraggap;
			unsigned int alloclen;
			struct sk_buff *skb_prev;
alloc_new_skb:
			skb_prev = skb;

			/* There's no room in the current skb */
			if (skb_prev)
				fraggap = skb_prev->len - maxfraglen;
			else
				fraggap = 0;

			/*
			 * If remaining data exceeds the mtu,
			 * we know we need more fragment(s).
			 */
			datalen = length + fraggap;
			if (datalen > (inet->cork.length <= mtu && !(inet->cork.flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - fragheaderlen)
				datalen = maxfraglen - fragheaderlen;

			fraglen = datalen + fragheaderlen;
			if ((flags & MSG_MORE) &&
			    !(rt->u.dst.dev->features&NETIF_F_SG))
				alloclen = mtu;
			else
				alloclen = datalen + fragheaderlen;

			/*
			 * The last fragment gets additional space at tail.
			 * Note: we overallocate on fragments with MSG_MODE
			 * because we have no idea if we're the last one.
			 */
			if (datalen == length + fraggap)
				alloclen += rt->u.dst.trailer_len;

			/*
			 * We just reserve space for fragment header.
			 * Note: this may be overallocation if the message 
			 * (without MSG_MORE) fits into the MTU.
			 */
			alloclen += sizeof(struct frag_hdr);

			if (transhdrlen) {
				skb = sock_alloc_send_skb(sk,
						alloclen + hh_len,
						(flags & MSG_DONTWAIT), &err);
			} else {
				skb = NULL;
				if (atomic_read(&sk->sk_wmem_alloc) <=
				    2 * sk->sk_sndbuf)
					skb = sock_wmalloc(sk,
							   alloclen + hh_len, 1,
							   sk->sk_allocation);
				if (unlikely(skb == NULL))
					err = -ENOBUFS;
			}
			if (skb == NULL)
				goto error;
			/*
			 *	Fill in the control structures
			 */
			skb->ip_summed = csummode;
			skb->csum = 0;
			/* reserve for fragmentation */
			skb_reserve(skb, hh_len+sizeof(struct frag_hdr));

			/*
			 *	Find where to start putting bytes
			 */
			data = skb_put(skb, fraglen);
			skb->nh.raw = data + exthdrlen;
			data += fragheaderlen;
			skb->h.raw = data + exthdrlen;

			if (fraggap) {
				skb->csum = skb_copy_and_csum_bits(
					skb_prev, maxfraglen,
					data + transhdrlen, fraggap, 0);
				skb_prev->csum = csum_sub(skb_prev->csum,
							  skb->csum);
				data += fraggap;
				skb_trim(skb_prev, maxfraglen);
			}
			copy = datalen - transhdrlen - fraggap;
			if (copy < 0) {
				err = -EINVAL;
				kfree_skb(skb);
				goto error;
			} else if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) {
				err = -EFAULT;
				kfree_skb(skb);
				goto error;
			}

			offset += copy;
			length -= datalen - fraggap;
			transhdrlen = 0;
			exthdrlen = 0;
			csummode = CHECKSUM_NONE;

			/*
			 * Put the packet on the pending queue
			 */
			__skb_queue_tail(&sk->sk_write_queue, skb);
			continue;
		}

		if (copy > length)
			copy = length;

		if (!(rt->u.dst.dev->features&NETIF_F_SG)) {
			unsigned int off;

			off = skb->len;
			if (getfrag(from, skb_put(skb, copy),
						offset, copy, off, skb) < 0) {
				__skb_trim(skb, off);
				err = -EFAULT;
				goto error;
			}
		} else {
			int i = skb_shinfo(skb)->nr_frags;
			skb_frag_t *frag = &skb_shinfo(skb)->frags[i-1];
			struct page *page = sk->sk_sndmsg_page;
			int off = sk->sk_sndmsg_off;
			unsigned int left;

			if (page && (left = PAGE_SIZE - off) > 0) {
				if (copy >= left)
					copy = left;
				if (page != frag->page) {
					if (i == MAX_SKB_FRAGS) {
						err = -EMSGSIZE;
						goto error;
					}
					get_page(page);
					skb_fill_page_desc(skb, i, page, sk->sk_sndmsg_off, 0);
					frag = &skb_shinfo(skb)->frags[i];
				}
			} else if(i < MAX_SKB_FRAGS) {
				if (copy > PAGE_SIZE)
					copy = PAGE_SIZE;
				page = alloc_pages(sk->sk_allocation, 0);
				if (page == NULL) {
					err = -ENOMEM;
					goto error;
				}
				sk->sk_sndmsg_page = page;
				sk->sk_sndmsg_off = 0;

				skb_fill_page_desc(skb, i, page, 0, 0);
				frag = &skb_shinfo(skb)->frags[i];
				skb->truesize += PAGE_SIZE;
				atomic_add(PAGE_SIZE, &sk->sk_wmem_alloc);
			} else {
				err = -EMSGSIZE;
				goto error;
			}
			if (getfrag(from, page_address(frag->page)+frag->page_offset+frag->size, offset, copy, skb->len, skb) < 0) {
				err = -EFAULT;
				goto error;
			}
			sk->sk_sndmsg_off += copy;
			frag->size += copy;
			skb->len += copy;
			skb->data_len += copy;
		}
		offset += copy;
		length -= copy;
	}
	return 0;
error:
	inet->cork.length -= length;
	IP6_INC_STATS(IPSTATS_MIB_OUTDISCARDS);
	return err;
}
Example #21
0
/* Called in soft-irq context */
static void smd_net_data_handler(unsigned long arg)
{
    struct net_device *dev = (struct net_device *) arg;
    struct rmnet_private *p = netdev_priv(dev);
    struct sk_buff *skb;
    void *ptr = 0;
    int sz;
    u32 opmode = p->operation_mode;
    unsigned long flags;

    for (;;) {
        sz = smd_cur_packet_size(p->ch);
        if (sz == 0) break;
        if (smd_read_avail(p->ch) < sz) break;

        if (RMNET_IS_MODE_IP(opmode) ? (sz > dev->mtu) :
                (sz > (dev->mtu + ETH_HLEN))) {
            pr_err("[%s] rmnet_recv() discarding packet len %d (%d mtu)\n",
                   dev->name, sz, RMNET_IS_MODE_IP(opmode) ?
                   dev->mtu : (dev->mtu + ETH_HLEN));
            ptr = 0;
        } else {
            skb = dev_alloc_skb(sz + NET_IP_ALIGN);
            if (skb == NULL) {
                pr_err("[%s] rmnet_recv() cannot allocate skb\n",
                       dev->name);
            } else {
                skb->dev = dev;
                skb_reserve(skb, NET_IP_ALIGN);
                ptr = skb_put(skb, sz);
                wake_lock_timeout(&p->wake_lock, HZ / 2);
                if (smd_read(p->ch, ptr, sz) != sz) {
                    pr_err("[%s] rmnet_recv() smd lied about avail?!",
                           dev->name);
                    ptr = 0;
                    dev_kfree_skb_irq(skb);
                } else {
                    /* Handle Rx frame format */
                    spin_lock_irqsave(&p->lock, flags);
                    opmode = p->operation_mode;
                    spin_unlock_irqrestore(&p->lock, flags);

                    if (RMNET_IS_MODE_IP(opmode)) {
                        /* Driver in IP mode */
                        skb->protocol =
                            rmnet_ip_type_trans(skb, dev);
                    } else {
                        /* Driver in Ethernet mode */
                        skb->protocol =
                            eth_type_trans(skb, dev);
                    }
                    if (RMNET_IS_MODE_IP(opmode) ||
                            count_this_packet(ptr, skb->len)) {
#ifdef CONFIG_MSM_RMNET_DEBUG
                        p->wakeups_rcv +=
                            rmnet_cause_wakeup(p);
#endif
                        p->stats.rx_packets++;
                        p->stats.rx_bytes += skb->len;
                    }
                    DBG1("[%s] Rx packet #%lu len=%d\n",
                         dev->name, p->stats.rx_packets,
                         skb->len);

                    /* Deliver to network stack */
                    netif_rx(skb);
                }
                continue;
            }
        }
        if (smd_read(p->ch, ptr, sz) != sz)
            pr_err("[%s] rmnet_recv() smd lied about avail?!",
                   dev->name);
    }
}
Example #22
0
/*
 *	Create an arp packet. If (dest_hw == NULL), we create a broadcast
 *	message.
 */
struct sk_buff *arp_create(int type, int ptype, __be32 dest_ip,
			   struct net_device *dev, __be32 src_ip,
			   const unsigned char *dest_hw,
			   const unsigned char *src_hw,
			   const unsigned char *target_hw)
{
	struct sk_buff *skb;
	struct arphdr *arp;
	unsigned char *arp_ptr;
	int hlen = LL_RESERVED_SPACE(dev);
	int tlen = dev->needed_tailroom;

	/*
	 *	Allocate a buffer
	 */

	skb = alloc_skb(arp_hdr_len(dev) + hlen + tlen, GFP_ATOMIC);
	if (skb == NULL)
		return NULL;

	skb_reserve(skb, hlen);
	skb_reset_network_header(skb);
	arp = (struct arphdr *) skb_put(skb, arp_hdr_len(dev));
	skb->dev = dev;
	skb->protocol = htons(ETH_P_ARP);
	if (src_hw == NULL)
		src_hw = dev->dev_addr;
	if (dest_hw == NULL)
		dest_hw = dev->broadcast;

	/*
	 *	Fill the device header for the ARP frame
	 */
	if (dev_hard_header(skb, dev, ptype, dest_hw, src_hw, skb->len) < 0)
		goto out;

	/*
	 * Fill out the arp protocol part.
	 *
	 * The arp hardware type should match the device type, except for FDDI,
	 * which (according to RFC 1390) should always equal 1 (Ethernet).
	 */
	/*
	 *	Exceptions everywhere. AX.25 uses the AX.25 PID value not the
	 *	DIX code for the protocol. Make these device structure fields.
	 */
	switch (dev->type) {
	default:
		arp->ar_hrd = htons(dev->type);
		arp->ar_pro = htons(ETH_P_IP);
		break;

#if IS_ENABLED(CONFIG_AX25)
	case ARPHRD_AX25:
		arp->ar_hrd = htons(ARPHRD_AX25);
		arp->ar_pro = htons(AX25_P_IP);
		break;

#if IS_ENABLED(CONFIG_NETROM)
	case ARPHRD_NETROM:
		arp->ar_hrd = htons(ARPHRD_NETROM);
		arp->ar_pro = htons(AX25_P_IP);
		break;
#endif
#endif

#if IS_ENABLED(CONFIG_FDDI)
	case ARPHRD_FDDI:
		arp->ar_hrd = htons(ARPHRD_ETHER);
		arp->ar_pro = htons(ETH_P_IP);
		break;
#endif
#if IS_ENABLED(CONFIG_TR)
	case ARPHRD_IEEE802_TR:
		arp->ar_hrd = htons(ARPHRD_IEEE802);
		arp->ar_pro = htons(ETH_P_IP);
		break;
#endif
	}

	arp->ar_hln = dev->addr_len;
	arp->ar_pln = 4;
	arp->ar_op = htons(type);

	arp_ptr = (unsigned char *)(arp + 1);

	memcpy(arp_ptr, src_hw, dev->addr_len);
	arp_ptr += dev->addr_len;
	memcpy(arp_ptr, &src_ip, 4);
	arp_ptr += 4;
	if (target_hw != NULL)
		memcpy(arp_ptr, target_hw, dev->addr_len);
	else
		memset(arp_ptr, 0, dev->addr_len);
	arp_ptr += dev->addr_len;
	memcpy(arp_ptr, &dest_ip, 4);

	return skb;

out:
	kfree_skb(skb);
	return NULL;
}
Example #23
0
static void delude_send_reset(struct sk_buff *oldskb, unsigned int hook)
{
	struct tcphdr _otcph, *tcph;
	const struct tcphdr *oth;
	const struct iphdr *oiph;
	unsigned int addr_type;
	struct sk_buff *nskb;
	struct iphdr *niph;

	oiph = ip_hdr(oldskb);

	/* IP header checks: fragment. */
	if (oiph->frag_off & htons(IP_OFFSET))
		return;

	oth = skb_header_pointer(oldskb, ip_hdrlen(oldskb),
				 sizeof(_otcph), &_otcph);
	if (oth == NULL)
		return;

	/* No RST for RST. */
	if (oth->rst)
		return;

	/* Check checksum */
	if (nf_ip_checksum(oldskb, hook, ip_hdrlen(oldskb), IPPROTO_TCP))
		return;

	nskb = alloc_skb(sizeof(struct iphdr) + sizeof(struct tcphdr) +
	                 LL_MAX_HEADER, GFP_ATOMIC);
	if (nskb == NULL)
		return;

	skb_reserve(nskb, LL_MAX_HEADER);
	skb_reset_network_header(nskb);
	niph = (struct iphdr *)skb_put(nskb, sizeof(struct iphdr));
	niph->version  = 4;
	niph->ihl      = sizeof(struct iphdr) / 4;
	niph->tos      = 0;
	niph->id       = 0;
	niph->frag_off = htons(IP_DF);
	niph->protocol = IPPROTO_TCP;
	niph->check    = 0;
	niph->saddr    = oiph->daddr;
	niph->daddr    = oiph->saddr;

	tcph = (struct tcphdr *)skb_put(nskb, sizeof(struct tcphdr));
	memset(tcph, 0, sizeof(*tcph));
	tcph->source = oth->dest;
	tcph->dest   = oth->source;
	tcph->doff   = sizeof(struct tcphdr) / 4;

	/* DELUDE essential part */
	if (oth->syn && !oth->ack && !oth->rst && !oth->fin) {
		tcph->syn     = true;
		tcph->seq     = 0;
		tcph->ack     = true;
		tcph->ack_seq = htonl(ntohl(oth->seq) + oth->syn + oth->fin +
		                oldskb->len - ip_hdrlen(oldskb) -
		                (oth->doff << 2));
	} else {
		tcph->rst = true;
		if (!oth->ack) {
			tcph->seq     = 0;
			tcph->ack     = true;
			tcph->ack_seq = htonl(ntohl(oth->seq) + oth->syn +
			                oth->fin + oldskb->len -
			                ip_hdrlen(oldskb) - (oth->doff << 2));
		} else {
			tcph->seq     = oth->ack_seq;
			tcph->ack     = false;
			tcph->ack_seq = 0;
		}
	}

	tcph->check = tcp_v4_check(sizeof(struct tcphdr), niph->saddr,
	              niph->daddr, csum_partial((char *)tcph,
	              sizeof(struct tcphdr), 0));

	addr_type = RTN_UNSPEC;
#ifdef CONFIG_BRIDGE_NETFILTER
	if (hook != NF_INET_FORWARD || (nskb->nf_bridge != NULL &&
	    nskb->nf_bridge->mask & BRNF_BRIDGED))
#else
	if (hook != NF_INET_FORWARD)
#endif
		addr_type = RTN_LOCAL;

	/* ip_route_me_harder expects skb->dst to be set */
	skb_dst_set(nskb, dst_clone(skb_dst(oldskb)));

	if (ip_route_me_harder(nskb, addr_type))
		goto free_nskb;
	else
		niph = ip_hdr(nskb);

	niph->ttl       = dst_metric(skb_dst(nskb), RTAX_HOPLIMIT);
	nskb->ip_summed = CHECKSUM_NONE;

	/* "Never happens" */
	if (nskb->len > dst_mtu(skb_dst(nskb)))
		goto free_nskb;

	nf_ct_attach(nskb, oldskb);

	ip_local_out(nskb);
	return;

 free_nskb:
	kfree_skb(nskb);
}
Example #24
0
static int raw_send_hdrinc(struct sock *sk, struct flowi4 *fl4,
			   void *from, size_t length,
			   struct rtable **rtp,
			   unsigned int flags)
{
	struct inet_sock *inet = inet_sk(sk);
	struct net *net = sock_net(sk);
	struct iphdr *iph;
	struct sk_buff *skb;
	unsigned int iphlen;
	int err;
	struct rtable *rt = *rtp;
	int hlen, tlen;

	if (length > rt->dst.dev->mtu) {
		ip_local_error(sk, EMSGSIZE, fl4->daddr, inet->inet_dport,
			       rt->dst.dev->mtu);
		return -EMSGSIZE;
	}
	if (flags&MSG_PROBE)
		goto out;

	hlen = LL_RESERVED_SPACE(rt->dst.dev);
	tlen = rt->dst.dev->needed_tailroom;
	skb = sock_alloc_send_skb(sk,
				  length + hlen + tlen + 15,
				  flags & MSG_DONTWAIT, &err);
	if (skb == NULL)
		goto error;
	skb_reserve(skb, hlen);

	skb->priority = sk->sk_priority;
	skb->mark = sk->sk_mark;
	skb_dst_set(skb, &rt->dst);
	*rtp = NULL;

	skb_reset_network_header(skb);
	iph = ip_hdr(skb);
	skb_put(skb, length);

	skb->ip_summed = CHECKSUM_NONE;

	skb->transport_header = skb->network_header;
	err = -EFAULT;
	if (memcpy_fromiovecend((void *)iph, from, 0, length))
		goto error_free;

	iphlen = iph->ihl * 4;

	/*
	 * We don't want to modify the ip header, but we do need to
	 * be sure that it won't cause problems later along the network
	 * stack.  Specifically we want to make sure that iph->ihl is a
	 * sane value.  If ihl points beyond the length of the buffer passed
	 * in, reject the frame as invalid
	 */
	err = -EINVAL;
	if (iphlen > length)
		goto error_free;

	if (iphlen >= sizeof(*iph)) {
		if (!iph->saddr)
			iph->saddr = fl4->saddr;
		iph->check   = 0;
		iph->tot_len = htons(length);
		if (!iph->id)
			ip_select_ident(skb, &rt->dst, NULL);

		iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
	}
	if (iph->protocol == IPPROTO_ICMP)
		icmp_out_count(net, ((struct icmphdr *)
			skb_transport_header(skb))->type);

	err = NF_HOOK(NFPROTO_IPV4, NF_INET_LOCAL_OUT, skb, NULL,
		      rt->dst.dev, dst_output);
	if (err > 0)
		err = net_xmit_errno(err);
	if (err)
		goto error;
out:
	return 0;

error_free:
	kfree_skb(skb);
error:
	IP_INC_STATS(net, IPSTATS_MIB_OUTDISCARDS);
	if (err == -ENOBUFS && !inet->recverr)
		err = 0;
	return err;
}
Example #25
0
static void xennet_alloc_rx_buffers(struct net_device *dev)
{
	unsigned short id;
	struct netfront_info *np = netdev_priv(dev);
	struct sk_buff *skb;
	struct page *page;
	int i, batch_target, notify;
	RING_IDX req_prod = np->rx.req_prod_pvt;
	grant_ref_t ref;
	unsigned long pfn;
	void *vaddr;
	struct xen_netif_rx_request *req;

	if (unlikely(!netif_carrier_ok(dev)))
		return;

	/*
	 * Allocate skbuffs greedily, even though we batch updates to the
	 * receive ring. This creates a less bursty demand on the memory
	 * allocator, so should reduce the chance of failed allocation requests
	 * both for ourself and for other kernel subsystems.
	 */
	batch_target = np->rx_target - (req_prod - np->rx.rsp_cons);
	for (i = skb_queue_len(&np->rx_batch); i < batch_target; i++) {
		skb = __netdev_alloc_skb(dev, RX_COPY_THRESHOLD + NET_IP_ALIGN,
					 GFP_ATOMIC | __GFP_NOWARN);
		if (unlikely(!skb))
			goto no_skb;

		/* Align ip header to a 16 bytes boundary */
		skb_reserve(skb, NET_IP_ALIGN);

		page = alloc_page(GFP_ATOMIC | __GFP_NOWARN);
		if (!page) {
			kfree_skb(skb);
no_skb:
			/* Any skbuffs queued for refill? Force them out. */
			if (i != 0)
				goto refill;
			/* Could not allocate any skbuffs. Try again later. */
			mod_timer(&np->rx_refill_timer,
				  jiffies + (HZ/10));
			break;
		}

		skb_shinfo(skb)->frags[0].page = page;
		skb_shinfo(skb)->nr_frags = 1;
		__skb_queue_tail(&np->rx_batch, skb);
	}

	/* Is the batch large enough to be worthwhile? */
	if (i < (np->rx_target/2)) {
		if (req_prod > np->rx.sring->req_prod)
			goto push;
		return;
	}

	/* Adjust our fill target if we risked running out of buffers. */
	if (((req_prod - np->rx.sring->rsp_prod) < (np->rx_target / 4)) &&
	    ((np->rx_target *= 2) > np->rx_max_target))
		np->rx_target = np->rx_max_target;

 refill:
	for (i = 0; ; i++) {
		skb = __skb_dequeue(&np->rx_batch);
		if (skb == NULL)
			break;

		skb->dev = dev;

		id = xennet_rxidx(req_prod + i);

		BUG_ON(np->rx_skbs[id]);
		np->rx_skbs[id] = skb;

		ref = gnttab_claim_grant_reference(&np->gref_rx_head);
		BUG_ON((signed short)ref < 0);
		np->grant_rx_ref[id] = ref;

		pfn = page_to_pfn(skb_shinfo(skb)->frags[0].page);
		vaddr = page_address(skb_shinfo(skb)->frags[0].page);

		req = RING_GET_REQUEST(&np->rx, req_prod + i);
		gnttab_grant_foreign_access_ref(ref,
						np->xbdev->otherend_id,
						pfn_to_mfn(pfn),
						0);

		req->id = id;
		req->gref = ref;
	}

	wmb();		/* barrier so backend seens requests */

	/* Above is a suitable barrier to ensure backend will see requests. */
	np->rx.req_prod_pvt = req_prod + i;
 push:
	RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->rx, notify);
	if (notify)
		notify_remote_via_irq(np->netdev->irq);
}
Example #26
0
static int ar2313_rx_int(struct net_device *dev)
{
	struct ar2313_private *sp = netdev_priv(dev);
	struct sk_buff *skb, *skb_new;
	ar2313_descr_t *rxdesc;
	unsigned int status;
	u32 idx;
	int pkts = 0;
	int rval;

	idx = sp->cur_rx;

	/* process at most the entire ring and then wait for another interrupt
	 */
	while (1) {

		rxdesc = &sp->rx_ring[idx];
		status = rxdesc->status;
		if (status & DMA_RX_OWN) {
			/* SiByte owns descriptor or descr not yet filled in */
			rval = 0;
			break;
		}

		if (++pkts > AR2313_MAX_PKTS_PER_CALL) {
			rval = 1;
			break;
		}
#if DEBUG_RX
		printk("index %d\n", idx);
		printk("RX status %08x\n", rxdesc->status);
		printk("RX devcs  %08x\n", rxdesc->devcs);
		printk("RX addr   %08x\n", rxdesc->addr);
		printk("RX descr  %08x\n", rxdesc->descr);
#endif

		if ((status & DMA_RX_ERROR) && !(status & DMA_RX_LONG)) {
#if DEBUG_RX
			printk("%s: rx ERROR %08x\n", __FUNCTION__, status);
#endif
			dev->stats.rx_errors++;
			dev->stats.rx_dropped++;

			/* add statistics counters */
			if (status & DMA_RX_ERR_CRC)
				dev->stats.rx_crc_errors++;
			if (status & DMA_RX_ERR_COL)
				dev->stats.rx_over_errors++;
			if (status & DMA_RX_ERR_LENGTH)
				dev->stats.rx_length_errors++;
			if (status & DMA_RX_ERR_RUNT)
				dev->stats.rx_over_errors++;
			if (status & DMA_RX_ERR_DESC)
				dev->stats.rx_over_errors++;

		} else {
			/* alloc new buffer. */
			skb_new = dev_alloc_skb(AR2313_BUFSIZE + RX_OFFSET + 128);
			if (skb_new != NULL) {

				skb = sp->rx_skb[idx];
				/* set skb */
				skb_put(skb,
						((status >> DMA_RX_LEN_SHIFT) & 0x3fff) - CRC_LEN);

				dev->stats.rx_bytes += skb->len;
				skb->protocol = eth_type_trans(skb, dev);
				/* pass the packet to upper layers */
				netif_rx(skb);

				skb_new->dev = dev;
				/* 16 bit align */
				skb_reserve(skb_new, RX_OFFSET + 32);
				/* reset descriptor's curr_addr */
				rxdesc->addr = virt_to_phys(skb_new->data);

				dev->stats.rx_packets++;
				sp->rx_skb[idx] = skb_new;
			} else {
				dev->stats.rx_dropped++;
			}
		}
Example #27
0
static void fr_lmi_send(hdlc_device *hdlc, int fullrep)
{
	struct sk_buff *skb;
	pvc_device *pvc = hdlc->first_pvc;
	int len = mode_is(hdlc, MODE_FR_ANSI) ? LMI_ANSI_LENGTH : LMI_LENGTH;
	int stat_len = 3;
	u8 *data;
	int i = 0;

	if (mode_is(hdlc, MODE_DCE) && fullrep) {
		len += hdlc->pvc_count * (2 + stat_len);
		if (len > HDLC_MAX_MTU) {
			printk(KERN_WARNING "%s: Too many PVCs while sending "
			       "LMI full report\n", hdlc_to_name(hdlc));
			return;
		}
	}

	skb = dev_alloc_skb(len);
	if (!skb) {
		printk(KERN_WARNING "%s: Memory squeeze on fr_lmi_send()\n",
			       hdlc_to_name(hdlc));
		return;
	}
	memset(skb->data, 0, len);
	skb_reserve(skb, 4);
	fr_hard_header(skb, hdlc_to_dev(hdlc), LMI_PROTO, NULL, NULL, 0);
	data = skb->tail;
	data[i++] = LMI_CALLREF;
	data[i++] = mode_is(hdlc, MODE_DCE) ? LMI_STATUS : LMI_STATUS_ENQUIRY;
	if (mode_is(hdlc, MODE_FR_ANSI))
		data[i++] = LMI_ANSI_LOCKSHIFT;
	data[i++] = mode_is(hdlc, MODE_FR_CCITT) ? LMI_CCITT_REPTYPE :
		LMI_REPTYPE;
	data[i++] = LMI_REPT_LEN;
	data[i++] = fullrep ? LMI_FULLREP : LMI_INTEGRITY;

	data[i++] = mode_is(hdlc, MODE_FR_CCITT) ? LMI_CCITT_ALIVE : LMI_ALIVE;
	data[i++] = LMI_INTEG_LEN;
	data[i++] = hdlc->lmi.txseq = fr_lmi_nextseq(hdlc->lmi.txseq);
	data[i++] = hdlc->lmi.rxseq;

	if (mode_is(hdlc, MODE_DCE) && fullrep) {
		while (pvc) {
			data[i++] = mode_is(hdlc, MODE_FR_CCITT) ?
				LMI_CCITT_PVCSTAT:LMI_PVCSTAT;
			data[i++] = stat_len;

			if ((hdlc->lmi.state & LINK_STATE_RELIABLE) &&
			    (pvc->netdev.flags & IFF_UP) &&
			    !(pvc->state & (PVC_STATE_ACTIVE|PVC_STATE_NEW))) {
				pvc->state |= PVC_STATE_NEW;
				fr_log_dlci_active(pvc);
			}

			dlci_to_status(hdlc, netdev_dlci(&pvc->netdev),
				       data+i, pvc->state);
			i += stat_len;
			pvc = pvc->next;
		}
	}

	skb_put(skb, i);
	skb->priority = TC_PRIO_CONTROL;
	skb->dev = hdlc_to_dev(hdlc);

	dev_queue_xmit(skb);
}
Example #28
0
/*
 * Function kingsun_net_open (dev)
 *
 *    Network device is taken up. Usually this is done by "ifconfig irda0 up"
 */
static int kingsun_net_open(struct net_device *netdev)
{
	struct kingsun_cb *kingsun = netdev_priv(netdev);
	int err = -ENOMEM;
	char hwname[16];

	/* At this point, urbs are NULL, and skb is NULL (see kingsun_probe) */
	kingsun->receiving = 0;

	/* Initialize for SIR to copy data directly into skb.  */
	kingsun->rx_buff.in_frame = FALSE;
	kingsun->rx_buff.state = OUTSIDE_FRAME;
	kingsun->rx_buff.truesize = IRDA_SKB_MAX_MTU;
	kingsun->rx_buff.skb = dev_alloc_skb(IRDA_SKB_MAX_MTU);
	if (!kingsun->rx_buff.skb)
		goto free_mem;

	skb_reserve(kingsun->rx_buff.skb, 1);
	kingsun->rx_buff.head = kingsun->rx_buff.skb->data;
	do_gettimeofday(&kingsun->rx_time);

	kingsun->rx_urb = usb_alloc_urb(0, GFP_KERNEL);
	if (!kingsun->rx_urb)
		goto free_mem;

	kingsun->tx_urb = usb_alloc_urb(0, GFP_KERNEL);
	if (!kingsun->tx_urb)
		goto free_mem;

	/*
	 * Now that everything should be initialized properly,
	 * Open new IrLAP layer instance to take care of us...
	 */
	sprintf(hwname, "usb#%d", kingsun->usbdev->devnum);
	kingsun->irlap = irlap_open(netdev, &kingsun->qos, hwname);
	if (!kingsun->irlap) {
		err("kingsun-sir: irlap_open failed");
		goto free_mem;
	}

	/* Start first reception */
	usb_fill_int_urb(kingsun->rx_urb, kingsun->usbdev,
			  usb_rcvintpipe(kingsun->usbdev, kingsun->ep_in),
			  kingsun->in_buf, kingsun->max_rx,
			  kingsun_rcv_irq, kingsun, 1);
	kingsun->rx_urb->status = 0;
	err = usb_submit_urb(kingsun->rx_urb, GFP_KERNEL);
	if (err) {
		err("kingsun-sir: first urb-submit failed: %d", err);
		goto close_irlap;
	}

	netif_start_queue(netdev);

	/* Situation at this point:
	   - all work buffers allocated
	   - urbs allocated and ready to fill
	   - max rx packet known (in max_rx)
	   - unwrap state machine initialized, in state outside of any frame
	   - receive request in progress
	   - IrLAP layer started, about to hand over packets to send
	 */

	return 0;

 close_irlap:
	irlap_close(kingsun->irlap);
 free_mem:
	if (kingsun->tx_urb) {
		usb_free_urb(kingsun->tx_urb);
		kingsun->tx_urb = NULL;
	}
	if (kingsun->rx_urb) {
		usb_free_urb(kingsun->rx_urb);
		kingsun->rx_urb = NULL;
	}
	if (kingsun->rx_buff.skb) {
		kfree_skb(kingsun->rx_buff.skb);
		kingsun->rx_buff.skb = NULL;
		kingsun->rx_buff.head = NULL;
	}
	return err;
}
static int
rx_submit(struct eth_dev *dev, struct usb_request *req, gfp_t gfp_flags)
{
	struct sk_buff	*skb;
	int		retval = -ENOMEM;
	size_t		size = 0;
	struct usb_ep	*out;
	unsigned long	flags;

	spin_lock_irqsave(&dev->lock, flags);
	if (dev->port_usb)
		out = dev->port_usb->out_ep;
	else
		out = NULL;
	spin_unlock_irqrestore(&dev->lock, flags);

	if (!out)
		return -ENOTCONN;


	/* Padding up to RX_EXTRA handles minor disagreements with host.
	 * Normally we use the USB "terminate on short read" convention;
	 * so allow up to (N*maxpacket), since that memory is normally
	 * already allocated.  Some hardware doesn't deal well with short
	 * reads (e.g. DMA must be N*maxpacket), so for now don't trim a
	 * byte off the end (to force hardware errors on overflow).
	 *
	 * RNDIS uses internal framing, and explicitly allows senders to
	 * pad to end-of-packet.  That's potentially nice for speed, but
	 * means receivers can't recover lost synch on their own (because
	 * new packets don't only start after a short RX).
	 */
	size += sizeof(struct ethhdr) + dev->net->mtu + RX_EXTRA;
	size += dev->port_usb->header_len;
	size += out->maxpacket - 1;
	size -= size % out->maxpacket;

	skb = alloc_skb(size + NET_IP_ALIGN, gfp_flags);
	if (skb == NULL) {
		DBG(dev, "no rx skb\n");
		goto enomem;
	}

	/* Some platforms perform better when IP packets are aligned,
	 * but on at least one, checksumming fails otherwise.  Note:
	 * RNDIS headers involve variable numbers of LE32 values.
	 */
	/*
	 * RX: Do not move data by IP_ALIGN:
	 * if your DMA controller cannot handle it
	 */
	if (!gadget_dma32(dev->gadget))
		skb_reserve(skb, NET_IP_ALIGN);

	req->buf = skb->data;
	req->length = size;
	req->complete = rx_complete;
	req->context = skb;

	retval = usb_ep_queue(out, req, gfp_flags);
	if (retval == -ENOMEM)
enomem:
		defer_kevent(dev, WORK_RX_MEMORY);
	if (retval) {
		DBG(dev, "rx submit --> %d\n", retval);
		if (skb)
			dev_kfree_skb_any(skb);
		spin_lock_irqsave(&dev->req_lock, flags);
		list_add(&req->list, &dev->rx_reqs);
		spin_unlock_irqrestore(&dev->req_lock, flags);
	}
	return retval;
}
Example #30
0
File: ltpc.c Project: 274914765/C
static int sendup_buffer (struct net_device *dev)
{
    /* on entry, command is in ltdmacbuf, data in ltdmabuf */
    /* called from idle, non-reentrant */

    int dnode, snode, llaptype, len; 
    int sklen;
    struct sk_buff *skb;
    struct net_device_stats *stats = &((struct ltpc_private *)dev->priv)->stats;
    struct lt_rcvlap *ltc = (struct lt_rcvlap *) ltdmacbuf;

    if (ltc->command != LT_RCVLAP) {
        printk("unknown command 0x%02x from ltpc card\n",ltc->command);
        return(-1);
    }
    dnode = ltc->dnode;
    snode = ltc->snode;
    llaptype = ltc->laptype;
    len = ltc->length; 

    sklen = len;
    if (llaptype == 1) 
        sklen += 8;  /* correct for short ddp */
    if(sklen > 800) {
        printk(KERN_INFO "%s: nonsense length in ltpc command 0x14: 0x%08x\n",
            dev->name,sklen);
        return -1;
    }

    if ( (llaptype==0) || (llaptype>2) ) {
        printk(KERN_INFO "%s: unknown LLAP type: %d\n",dev->name,llaptype);
        return -1;
    }


    skb = dev_alloc_skb(3+sklen);
    if (skb == NULL) 
    {
        printk("%s: dropping packet due to memory squeeze.\n",
            dev->name);
        return -1;
    }
    skb->dev = dev;

    if (sklen > len)
        skb_reserve(skb,8);
    skb_put(skb,len+3);
    skb->protocol = htons(ETH_P_LOCALTALK);
    /* add LLAP header */
    skb->data[0] = dnode;
    skb->data[1] = snode;
    skb->data[2] = llaptype;
    skb_reset_mac_header(skb);    /* save pointer to llap header */
    skb_pull(skb,3);

    /* copy ddp(s,e)hdr + contents */
    skb_copy_to_linear_data(skb, ltdmabuf, len);

    skb_reset_transport_header(skb);

    stats->rx_packets++;
    stats->rx_bytes+=skb->len;

    /* toss it onwards */
    netif_rx(skb);
    dev->last_rx = jiffies;
    return 0;
}