Beispiel #1
0
static int r6040_rx(struct net_device *dev, int limit)
{
	struct r6040_private *priv = netdev_priv(dev);
	struct r6040_descriptor *descptr = priv->rx_remove_ptr;
	struct sk_buff *skb_ptr, *new_skb;
	int count = 0;
	u16 err;

	/* Limit not reached and the descriptor belongs to the CPU */
	while (count < limit && !(descptr->status & DSC_OWNER_MAC)) {
		/* Read the descriptor status */
		err = descptr->status;
		/* Global error status set */
		if (err & DSC_RX_ERR) {
			/* RX dribble */
			if (err & DSC_RX_ERR_DRI)
				dev->stats.rx_frame_errors++;
			/* Buffer length exceeded */
			if (err & DSC_RX_ERR_BUF)
				dev->stats.rx_length_errors++;
			/* Packet too long */
			if (err & DSC_RX_ERR_LONG)
				dev->stats.rx_length_errors++;
			/* Packet < 64 bytes */
			if (err & DSC_RX_ERR_RUNT)
				dev->stats.rx_length_errors++;
			/* CRC error */
			if (err & DSC_RX_ERR_CRC) {
				spin_lock(&priv->lock);
				dev->stats.rx_crc_errors++;
				spin_unlock(&priv->lock);
			}
			goto next_descr;
		}

		/* Packet successfully received */
		new_skb = netdev_alloc_skb(dev, MAX_BUF_SIZE);
		if (!new_skb) {
			dev->stats.rx_dropped++;
			goto next_descr;
		}
		skb_ptr = descptr->skb_ptr;
		skb_ptr->dev = priv->dev;

		/* Do not count the CRC */
		skb_put(skb_ptr, descptr->len - 4);
		pci_unmap_single(priv->pdev, le32_to_cpu(descptr->buf),
					MAX_BUF_SIZE, PCI_DMA_FROMDEVICE);
		skb_ptr->protocol = eth_type_trans(skb_ptr, priv->dev);

		/* Send to upper layer */
		netif_receive_skb(skb_ptr);
		dev->stats.rx_packets++;
		dev->stats.rx_bytes += descptr->len - 4;

		/* put new skb into descriptor */
		descptr->skb_ptr = new_skb;
		descptr->buf = cpu_to_le32(pci_map_single(priv->pdev,
						descptr->skb_ptr->data,
					MAX_BUF_SIZE, PCI_DMA_FROMDEVICE));

next_descr:
		/* put the descriptor back to the MAC */
		descptr->status = DSC_OWNER_MAC;
		descptr = descptr->vndescp;
		count++;
	}
	priv->rx_remove_ptr = descptr;

	return count;
}
Beispiel #2
0
/*
 *	Create an arp packet. If (dest_hw == NULL), we create a broadcast
 *	message.
 */
struct sk_buff *arp_create(int type, int ptype, u32 dest_ip,
			   struct net_device *dev, u32 src_ip,
			   unsigned char *dest_hw, unsigned char *src_hw,
			   unsigned char *target_hw)
{
	struct sk_buff *skb;
	struct arphdr *arp;
	unsigned char *arp_ptr;

	/*
	 *	Allocate a buffer
	 */
	
	skb = alloc_skb(sizeof(struct arphdr)+ 2*(dev->addr_len+4)
				+ LL_RESERVED_SPACE(dev), GFP_ATOMIC);
	if (skb == NULL)
		return NULL;

	skb_reserve(skb, LL_RESERVED_SPACE(dev));
	skb->nh.raw = skb->data;
	arp = (struct arphdr *) skb_put(skb,sizeof(struct arphdr) + 2*(dev->addr_len+4));
	skb->dev = dev;
	skb->protocol = htons(ETH_P_ARP);
	if (src_hw == NULL)
		src_hw = dev->dev_addr;
	if (dest_hw == NULL)
		dest_hw = dev->broadcast;

	/*
	 *	Fill the device header for the ARP frame
	 */
	if (dev->hard_header &&
	    dev->hard_header(skb,dev,ptype,dest_hw,src_hw,skb->len) < 0)
		goto out;

	/*
	 * Fill out the arp protocol part.
	 *
	 * The arp hardware type should match the device type, except for FDDI,
	 * which (according to RFC 1390) should always equal 1 (Ethernet).
	 */
	/*
	 *	Exceptions everywhere. AX.25 uses the AX.25 PID value not the
	 *	DIX code for the protocol. Make these device structure fields.
	 */
	switch (dev->type) {
	default:
		arp->ar_hrd = htons(dev->type);
		arp->ar_pro = htons(ETH_P_IP);
		break;

#if defined(CONFIG_AX25) || defined(CONFIG_AX25_MODULE)
	case ARPHRD_AX25:
		arp->ar_hrd = htons(ARPHRD_AX25);
		arp->ar_pro = htons(AX25_P_IP);
		break;

#if defined(CONFIG_NETROM) || defined(CONFIG_NETROM_MODULE)
	case ARPHRD_NETROM:
		arp->ar_hrd = htons(ARPHRD_NETROM);
		arp->ar_pro = htons(AX25_P_IP);
		break;
#endif
#endif

#ifdef CONFIG_FDDI
	case ARPHRD_FDDI:
		arp->ar_hrd = htons(ARPHRD_ETHER);
		arp->ar_pro = htons(ETH_P_IP);
		break;
#endif
#ifdef CONFIG_TR
	case ARPHRD_IEEE802_TR:
		arp->ar_hrd = htons(ARPHRD_IEEE802);
		arp->ar_pro = htons(ETH_P_IP);
		break;
#endif
	}

	arp->ar_hln = dev->addr_len;
	arp->ar_pln = 4;
	arp->ar_op = htons(type);

	arp_ptr=(unsigned char *)(arp+1);

	memcpy(arp_ptr, src_hw, dev->addr_len);
	arp_ptr+=dev->addr_len;
	memcpy(arp_ptr, &src_ip,4);
	arp_ptr+=4;
	if (target_hw != NULL)
		memcpy(arp_ptr, target_hw, dev->addr_len);
	else
		memset(arp_ptr, 0, dev->addr_len);
	arp_ptr+=dev->addr_len;
	memcpy(arp_ptr, &dest_ip, 4);

	return skb;

out:
	kfree_skb(skb);
	return NULL;
}
Beispiel #3
0
int capi_conn_req(const char * calledPN, struct sk_buff **skb, int proto)
{
        ushort len;

        /*
         * length
         *   AppInfoMask - 2
         *   BC0         - 3
         *   BC1         - 1
         *   Chan        - 2
         *   Keypad      - 1
         *   CPN         - 1
         *   CPSA        - 1
         *   CalledPN    - 2 + strlen
         *   CalledPSA   - 1
         *   rest...     - 4
         *   ----------------
         *   Total        18 + strlen
         */

        len = 18 + strlen(calledPN);

	if (proto == ISDN_PROTO_L2_TRANS)
		len++;

	if ((*skb = dev_alloc_skb(len)) == NULL) {
    
	        printk(KERN_WARNING "capi_conn_req: alloc_skb failed\n");
		return -1;
	}

        /* InfoElmMask */
        *((ushort*) skb_put(*skb, 2)) = AppInfoMask; 

	if (proto == ISDN_PROTO_L2_TRANS)
	{
		/* Bearer Capability - Mandatory*/
		*(skb_put(*skb, 1)) = 3;        /* BC0.Length		*/
		*(skb_put(*skb, 1)) = 0x80;     /* Speech		*/
		*(skb_put(*skb, 1)) = 0x10;     /* Circuit Mode		*/
		*(skb_put(*skb, 1)) = 0x23;     /* A-law		*/
	}
	else
	{
		/* Bearer Capability - Mandatory*/
		*(skb_put(*skb, 1)) = 2;        /* BC0.Length		*/
		*(skb_put(*skb, 1)) = 0x88;     /* Digital Information	*/
		*(skb_put(*skb, 1)) = 0x90;     /* BC0.Octect4		*/
	}

        /* Bearer Capability - Optional*/
        *(skb_put(*skb, 1)) = 0;        /* BC1.Length = 0                    */

        *(skb_put(*skb, 1)) = 1;        /* ChannelID.Length = 1              */
        *(skb_put(*skb, 1)) = 0x83;     /* Basic Interface - Any Channel     */

        *(skb_put(*skb, 1)) = 0;        /* Keypad.Length = 0                 */
                  

        *(skb_put(*skb, 1)) = 0;        /* CallingPN.Length = 0              */
        *(skb_put(*skb, 1)) = 0;        /* CallingPSA.Length = 0             */

        /* Called Party Number */
        *(skb_put(*skb, 1)) = strlen(calledPN) + 1;
        *(skb_put(*skb, 1)) = 0x81;
        memcpy(skb_put(*skb, strlen(calledPN)), calledPN, strlen(calledPN));

        /* '#' */

        *(skb_put(*skb, 1)) = 0;       /* CalledPSA.Length = 0     */

        /* LLC.Length  = 0; */
        /* HLC0.Length = 0; */
        /* HLC1.Length = 0; */ 
        /* UTUS.Length = 0; */
        memset(skb_put(*skb, 4), 0, 4);

        return len;
}
Beispiel #4
0
static void rx_complete (struct urb *urb)
{
	struct sk_buff		*skb = (struct sk_buff *) urb->context;
	struct skb_data		*entry = (struct skb_data *) skb->cb;
	struct usbnet		*dev = entry->dev;
	int			urb_status = urb->status;

	skb_put (skb, urb->actual_length);
	entry->state = rx_done;
	entry->urb = NULL;

	switch (urb_status) {
	/* success */
	case 0:
		if (skb->len < dev->net->hard_header_len) {
			entry->state = rx_cleanup;
			dev->net->stats.rx_errors++;
			dev->net->stats.rx_length_errors++;
			netif_dbg(dev, rx_err, dev->net,
				  "rx length %d\n", skb->len);
		}
		break;

	/* stalls need manual reset. this is rare ... except that
	 * when going through USB 2.0 TTs, unplug appears this way.
	 * we avoid the highspeed version of the ETIMEDOUT/EILSEQ
	 * storm, recovering as needed.
	 */
	case -EPIPE:
		dev->net->stats.rx_errors++;
		usbnet_defer_kevent (dev, EVENT_RX_HALT);
		// FALLTHROUGH

	/* software-driven interface shutdown */
	case -ECONNRESET:		/* async unlink */
	case -ESHUTDOWN:		/* hardware gone */
		netif_dbg(dev, ifdown, dev->net,
			  "rx shutdown, code %d\n", urb_status);
		goto block;

	/* we get controller i/o faults during khubd disconnect() delays.
	 * throttle down resubmits, to avoid log floods; just temporarily,
	 * so we still recover when the fault isn't a khubd delay.
	 */
	case -EPROTO:
	case -ETIME:
	case -EILSEQ:
		dev->net->stats.rx_errors++;
		if (!timer_pending (&dev->delay)) {
			mod_timer (&dev->delay, jiffies + THROTTLE_JIFFIES);
			netif_dbg(dev, link, dev->net,
				  "rx throttle %d\n", urb_status);
		}
block:
		entry->state = rx_cleanup;
		entry->urb = urb;
		urb = NULL;
		break;

	/* data overrun ... flush fifo? */
	case -EOVERFLOW:
		dev->net->stats.rx_over_errors++;
		// FALLTHROUGH

	default:
		entry->state = rx_cleanup;
		dev->net->stats.rx_errors++;
		netif_dbg(dev, rx_err, dev->net, "rx status %d\n", urb_status);
		break;
	}

	defer_bh(dev, skb, &dev->rxq);

	if (urb) {
		if (netif_running (dev->net) &&
		    !test_bit (EVENT_RX_HALT, &dev->flags)) {
			rx_submit (dev, urb, GFP_ATOMIC);
			return;
		}
		usb_free_urb (urb);
	}
	netif_dbg(dev, rx_err, dev->net, "no read resubmitted\n");
}
Beispiel #5
0
static void fjn_rx(struct net_device *dev)
{
    struct local_info_t *lp = (struct local_info_t *)dev->priv;
    ioaddr_t ioaddr = dev->base_addr;
    int boguscount = 10;	/* 5 -> 10: by agy 19940922 */

    DEBUG(4, "%s: in rx_packet(), rx_status %02x.\n",
	  dev->name, inb(ioaddr + RX_STATUS));

    while ((inb(ioaddr + RX_MODE) & F_BUF_EMP) == 0) {
	u_short status = inw(ioaddr + DATAPORT);

	DEBUG(4, "%s: Rxing packet mode %02x status %04x.\n",
	      dev->name, inb(ioaddr + RX_MODE), status);
#ifndef final_version
	if (status == 0) {
	    outb(F_SKP_PKT, ioaddr + RX_SKIP);
	    break;
	}
#endif
	if ((status & 0xF0) != 0x20) {	/* There was an error. */
	    lp->stats.rx_errors++;
	    if (status & F_LEN_ERR) lp->stats.rx_length_errors++;
	    if (status & F_ALG_ERR) lp->stats.rx_frame_errors++;
	    if (status & F_CRC_ERR) lp->stats.rx_crc_errors++;
	    if (status & F_OVR_FLO) lp->stats.rx_over_errors++;
	} else {
	    u_short pkt_len = inw(ioaddr + DATAPORT);
	    /* Malloc up new buffer. */
	    struct sk_buff *skb;

	    if (pkt_len > 1550) {
		printk(KERN_NOTICE "%s: The FMV-18x claimed a very "
		       "large packet, size %d.\n", dev->name, pkt_len);
		outb(F_SKP_PKT, ioaddr + RX_SKIP);
		lp->stats.rx_errors++;
		break;
	    }
	    skb = dev_alloc_skb(pkt_len+2);
	    if (skb == NULL) {
		printk(KERN_NOTICE "%s: Memory squeeze, dropping "
		       "packet (len %d).\n", dev->name, pkt_len);
		outb(F_SKP_PKT, ioaddr + RX_SKIP);
		lp->stats.rx_dropped++;
		break;
	    }
	    skb->dev = dev;

	    skb_reserve(skb, 2);
	    insw(ioaddr + DATAPORT, skb_put(skb, pkt_len),
		    (pkt_len + 1) >> 1);
	    skb->protocol = eth_type_trans(skb, dev);

#ifdef PCMCIA_DEBUG
	    if (pc_debug > 5) {
		int i;
		printk(KERN_DEBUG "%s: Rxed packet of length %d: ",
		       dev->name, pkt_len);
		for (i = 0; i < 14; i++)
		    printk(" %02x", skb->data[i]);
		printk(".\n");
	    }
#endif

	    netif_rx(skb);
	    lp->stats.rx_packets++;
	    lp->stats.rx_bytes += skb->len;
	}
	if (--boguscount <= 0)
	    break;
    }

    /* If any worth-while packets have been received, dev_rint()
	   has done a netif_wake_queue() for us and will work on them
	   when we get to the bottom-half routine. */
/*
    if( lp->cardtype != TDK ) {
	int i;
	for (i = 0; i < 20; i++) {
	    if ((inb(ioaddr + RX_MODE) & F_BUF_EMP) == F_BUF_EMP)
		break;
	    (void)inw(ioaddr + DATAPORT);  /+ dummy status read +/
	    outb(F_SKP_PKT, ioaddr + RX_SKIP);
	}

	if (i > 0)
	    DEBUG(5, "%s: Exint Rx packet with mode %02x after "
		  "%d ticks.\n", dev->name, inb(ioaddr + RX_MODE), i);
    }
*/

    return;
} /* fjn_rx */
Beispiel #6
0
static int __ip6_append_data(struct sock *sk,
			     struct flowi6 *fl6,
			     struct sk_buff_head *queue,
			     struct inet_cork *cork,
			     struct inet6_cork *v6_cork,
			     struct page_frag *pfrag,
			     int getfrag(void *from, char *to, int offset,
					 int len, int odd, struct sk_buff *skb),
			     void *from, int length, int transhdrlen,
			     unsigned int flags, int dontfrag)
{
	struct sk_buff *skb, *skb_prev = NULL;
	unsigned int maxfraglen, fragheaderlen, mtu, orig_mtu;
	int exthdrlen = 0;
	int dst_exthdrlen = 0;
	int hh_len;
	int copy;
	int err;
	int offset = 0;
	__u8 tx_flags = 0;
	u32 tskey = 0;
	struct rt6_info *rt = (struct rt6_info *)cork->dst;
	struct ipv6_txoptions *opt = v6_cork->opt;
	int csummode = CHECKSUM_NONE;

	skb = skb_peek_tail(queue);
	if (!skb) {
		exthdrlen = opt ? opt->opt_flen : 0;
		dst_exthdrlen = rt->dst.header_len - rt->rt6i_nfheader_len;
	}

	mtu = cork->fragsize;
	orig_mtu = mtu;

	hh_len = LL_RESERVED_SPACE(rt->dst.dev);

	fragheaderlen = sizeof(struct ipv6hdr) + rt->rt6i_nfheader_len +
			(opt ? opt->opt_nflen : 0);
	maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen -
		     sizeof(struct frag_hdr);

	if (mtu <= sizeof(struct ipv6hdr) + IPV6_MAXPLEN) {
		unsigned int maxnonfragsize, headersize;

		headersize = sizeof(struct ipv6hdr) +
			     (opt ? opt->opt_flen + opt->opt_nflen : 0) +
			     (dst_allfrag(&rt->dst) ?
			      sizeof(struct frag_hdr) : 0) +
			     rt->rt6i_nfheader_len;

		if (ip6_sk_ignore_df(sk))
			maxnonfragsize = sizeof(struct ipv6hdr) + IPV6_MAXPLEN;
		else
			maxnonfragsize = mtu;

		/* dontfrag active */
		if ((cork->length + length > mtu - headersize) && dontfrag &&
		    (sk->sk_protocol == IPPROTO_UDP ||
		     sk->sk_protocol == IPPROTO_RAW)) {
			ipv6_local_rxpmtu(sk, fl6, mtu - headersize +
						   sizeof(struct ipv6hdr));
			goto emsgsize;
		}

		if (cork->length + length > maxnonfragsize - headersize) {
emsgsize:
			ipv6_local_error(sk, EMSGSIZE, fl6,
					 mtu - headersize +
					 sizeof(struct ipv6hdr));
			return -EMSGSIZE;
		}
	}

	if (sk->sk_type == SOCK_DGRAM || sk->sk_type == SOCK_RAW) {
		sock_tx_timestamp(sk, &tx_flags);
		if (tx_flags & SKBTX_ANY_SW_TSTAMP &&
		    sk->sk_tsflags & SOF_TIMESTAMPING_OPT_ID)
			tskey = sk->sk_tskey++;
	}

	/* If this is the first and only packet and device
	 * supports checksum offloading, let's use it.
	 * Use transhdrlen, same as IPv4, because partial
	 * sums only work when transhdrlen is set.
	 */
	if (transhdrlen && sk->sk_protocol == IPPROTO_UDP &&
	    length + fragheaderlen < mtu &&
	    rt->dst.dev->features & NETIF_F_V6_CSUM &&
	    !exthdrlen)
		csummode = CHECKSUM_PARTIAL;
	/*
	 * Let's try using as much space as possible.
	 * Use MTU if total length of the message fits into the MTU.
	 * Otherwise, we need to reserve fragment header and
	 * fragment alignment (= 8-15 octects, in total).
	 *
	 * Note that we may need to "move" the data from the tail of
	 * of the buffer to the new fragment when we split
	 * the message.
	 *
	 * FIXME: It may be fragmented into multiple chunks
	 *        at once if non-fragmentable extension headers
	 *        are too large.
	 * --yoshfuji
	 */

	cork->length += length;
	if (((length > mtu) ||
	     (skb && skb_is_gso(skb))) &&
	    (sk->sk_protocol == IPPROTO_UDP) &&
	    (rt->dst.dev->features & NETIF_F_UFO) &&
	    (sk->sk_type == SOCK_DGRAM)) {
		err = ip6_ufo_append_data(sk, queue, getfrag, from, length,
					  hh_len, fragheaderlen,
					  transhdrlen, mtu, flags, fl6);
		if (err)
			goto error;
		return 0;
	}

	if (!skb)
		goto alloc_new_skb;

	while (length > 0) {
		/* Check if the remaining data fits into current packet. */
		copy = (cork->length <= mtu && !(cork->flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - skb->len;
		if (copy < length)
			copy = maxfraglen - skb->len;

		if (copy <= 0) {
			char *data;
			unsigned int datalen;
			unsigned int fraglen;
			unsigned int fraggap;
			unsigned int alloclen;
alloc_new_skb:
			/* There's no room in the current skb */
			if (skb)
				fraggap = skb->len - maxfraglen;
			else
				fraggap = 0;
			/* update mtu and maxfraglen if necessary */
			if (!skb || !skb_prev)
				ip6_append_data_mtu(&mtu, &maxfraglen,
						    fragheaderlen, skb, rt,
						    orig_mtu);

			skb_prev = skb;

			/*
			 * If remaining data exceeds the mtu,
			 * we know we need more fragment(s).
			 */
			datalen = length + fraggap;

			if (datalen > (cork->length <= mtu && !(cork->flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - fragheaderlen)
				datalen = maxfraglen - fragheaderlen - rt->dst.trailer_len;
			if ((flags & MSG_MORE) &&
			    !(rt->dst.dev->features&NETIF_F_SG))
				alloclen = mtu;
			else
				alloclen = datalen + fragheaderlen;

			alloclen += dst_exthdrlen;

			if (datalen != length + fraggap) {
				/*
				 * this is not the last fragment, the trailer
				 * space is regarded as data space.
				 */
				datalen += rt->dst.trailer_len;
			}

			alloclen += rt->dst.trailer_len;
			fraglen = datalen + fragheaderlen;

			/*
			 * We just reserve space for fragment header.
			 * Note: this may be overallocation if the message
			 * (without MSG_MORE) fits into the MTU.
			 */
			alloclen += sizeof(struct frag_hdr);

			if (transhdrlen) {
				skb = sock_alloc_send_skb(sk,
						alloclen + hh_len,
						(flags & MSG_DONTWAIT), &err);
			} else {
				skb = NULL;
				if (atomic_read(&sk->sk_wmem_alloc) <=
				    2 * sk->sk_sndbuf)
					skb = sock_wmalloc(sk,
							   alloclen + hh_len, 1,
							   sk->sk_allocation);
				if (unlikely(!skb))
					err = -ENOBUFS;
			}
			if (!skb)
				goto error;
			/*
			 *	Fill in the control structures
			 */
			skb->protocol = htons(ETH_P_IPV6);
			skb->ip_summed = csummode;
			skb->csum = 0;
			/* reserve for fragmentation and ipsec header */
			skb_reserve(skb, hh_len + sizeof(struct frag_hdr) +
				    dst_exthdrlen);

			/* Only the initial fragment is time stamped */
			skb_shinfo(skb)->tx_flags = tx_flags;
			tx_flags = 0;
			skb_shinfo(skb)->tskey = tskey;
			tskey = 0;

			/*
			 *	Find where to start putting bytes
			 */
			data = skb_put(skb, fraglen);
			skb_set_network_header(skb, exthdrlen);
			data += fragheaderlen;
			skb->transport_header = (skb->network_header +
						 fragheaderlen);
			if (fraggap) {
				skb->csum = skb_copy_and_csum_bits(
					skb_prev, maxfraglen,
					data + transhdrlen, fraggap, 0);
				skb_prev->csum = csum_sub(skb_prev->csum,
							  skb->csum);
				data += fraggap;
				pskb_trim_unique(skb_prev, maxfraglen);
			}
			copy = datalen - transhdrlen - fraggap;

			if (copy < 0) {
				err = -EINVAL;
				kfree_skb(skb);
				goto error;
			} else if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) {
				err = -EFAULT;
				kfree_skb(skb);
				goto error;
			}

			offset += copy;
			length -= datalen - fraggap;
			transhdrlen = 0;
			exthdrlen = 0;
			dst_exthdrlen = 0;

			/*
			 * Put the packet on the pending queue
			 */
			__skb_queue_tail(queue, skb);
			continue;
		}

		if (copy > length)
			copy = length;

		if (!(rt->dst.dev->features&NETIF_F_SG)) {
			unsigned int off;

			off = skb->len;
			if (getfrag(from, skb_put(skb, copy),
						offset, copy, off, skb) < 0) {
				__skb_trim(skb, off);
				err = -EFAULT;
				goto error;
			}
		} else {
			int i = skb_shinfo(skb)->nr_frags;

			err = -ENOMEM;
			if (!sk_page_frag_refill(sk, pfrag))
				goto error;

			if (!skb_can_coalesce(skb, i, pfrag->page,
					      pfrag->offset)) {
				err = -EMSGSIZE;
				if (i == MAX_SKB_FRAGS)
					goto error;

				__skb_fill_page_desc(skb, i, pfrag->page,
						     pfrag->offset, 0);
				skb_shinfo(skb)->nr_frags = ++i;
				get_page(pfrag->page);
			}
			copy = min_t(int, copy, pfrag->size - pfrag->offset);
			if (getfrag(from,
				    page_address(pfrag->page) + pfrag->offset,
				    offset, copy, skb->len, skb) < 0)
				goto error_efault;

			pfrag->offset += copy;
			skb_frag_size_add(&skb_shinfo(skb)->frags[i - 1], copy);
			skb->len += copy;
			skb->data_len += copy;
			skb->truesize += copy;
			atomic_add(copy, &sk->sk_wmem_alloc);
		}
		offset += copy;
		length -= copy;
	}

	return 0;

error_efault:
	err = -EFAULT;
error:
	cork->length -= length;
	IP6_INC_STATS(sock_net(sk), rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS);
	return err;
}
Beispiel #7
0
static int bgmac_dma_rx_read(struct bgmac *bgmac, struct bgmac_dma_ring *ring,
			     int weight)
{
	u32 end_slot;
	int handled = 0;

	end_slot = bgmac_read(bgmac, ring->mmio_base + BGMAC_DMA_RX_STATUS);
	end_slot &= BGMAC_DMA_RX_STATDPTR;
	end_slot -= ring->index_base;
	end_slot &= BGMAC_DMA_RX_STATDPTR;
	end_slot /= sizeof(struct bgmac_dma_desc);

	while (ring->start != end_slot) {
		struct device *dma_dev = bgmac->core->dma_dev;
		struct bgmac_slot_info *slot = &ring->slots[ring->start];
		struct bgmac_rx_header *rx = slot->buf + BGMAC_RX_BUF_OFFSET;
		struct sk_buff *skb;
		void *buf = slot->buf;
		dma_addr_t dma_addr = slot->dma_addr;
		u16 len, flags;

		do {
			/* Prepare new skb as replacement */
			if (bgmac_dma_rx_skb_for_slot(bgmac, slot)) {
				bgmac_dma_rx_poison_buf(dma_dev, slot);
				break;
			}

			/* Unmap buffer to make it accessible to the CPU */
			dma_unmap_single(dma_dev, dma_addr,
					 BGMAC_RX_BUF_SIZE, DMA_FROM_DEVICE);

			/* Get info from the header */
			len = le16_to_cpu(rx->len);
			flags = le16_to_cpu(rx->flags);

			/* Check for poison and drop or pass the packet */
			if (len == 0xdead && flags == 0xbeef) {
				bgmac_err(bgmac, "Found poisoned packet at slot %d, DMA issue!\n",
					  ring->start);
				put_page(virt_to_head_page(buf));
				break;
			}

			if (len > BGMAC_RX_ALLOC_SIZE) {
				bgmac_err(bgmac, "Found oversized packet at slot %d, DMA issue!\n",
					  ring->start);
				put_page(virt_to_head_page(buf));
				break;
			}

			/* Omit CRC. */
			len -= ETH_FCS_LEN;

			skb = build_skb(buf, BGMAC_RX_ALLOC_SIZE);
			skb_put(skb, BGMAC_RX_FRAME_OFFSET +
				BGMAC_RX_BUF_OFFSET + len);
			skb_pull(skb, BGMAC_RX_FRAME_OFFSET +
				 BGMAC_RX_BUF_OFFSET);

			skb_checksum_none_assert(skb);
			skb->protocol = eth_type_trans(skb, bgmac->net_dev);
			napi_gro_receive(&bgmac->napi, skb);
			handled++;
		} while (0);

		bgmac_dma_rx_setup_desc(bgmac, ring, ring->start);

		if (++ring->start >= BGMAC_RX_RING_SLOTS)
			ring->start = 0;

		if (handled >= weight) /* Should never be greater */
			break;
	}

	bgmac_dma_rx_update_index(bgmac, ring);

	return handled;
}
Beispiel #8
0
static int bfusb_send_frame(struct sk_buff *skb)
{
	struct hci_dev *hdev = (struct hci_dev *) skb->dev;
	struct bfusb_data *data;
	struct sk_buff *nskb;
	unsigned char buf[3];
	int sent = 0, size, count;

	BT_DBG("hdev %p skb %p type %d len %d", hdev, skb, bt_cb(skb)->pkt_type, skb->len);

	if (!hdev) {
		BT_ERR("Frame for unknown HCI device (hdev=NULL)");
		return -ENODEV;
	}

	if (!test_bit(HCI_RUNNING, &hdev->flags))
		return -EBUSY;

	data = hdev->driver_data;

	switch (bt_cb(skb)->pkt_type) {
	case HCI_COMMAND_PKT:
		hdev->stat.cmd_tx++;
		break;
	case HCI_ACLDATA_PKT:
		hdev->stat.acl_tx++;
		break;
	case HCI_SCODATA_PKT:
		hdev->stat.sco_tx++;
		break;
	};

	/* Prepend skb with frame type */
	memcpy(skb_push(skb, 1), &bt_cb(skb)->pkt_type, 1);

	count = skb->len;

	/* Max HCI frame size seems to be 1511 + 1 */
	nskb = bt_skb_alloc(count + 32, GFP_ATOMIC);
	if (!nskb) {
		BT_ERR("Can't allocate memory for new packet");
		return -ENOMEM;
	}

	nskb->dev = (void *) data;

	while (count) {
		size = min_t(uint, count, BFUSB_MAX_BLOCK_SIZE);

		buf[0] = 0xc1 | ((sent == 0) ? 0x04 : 0) | ((count == size) ? 0x08 : 0);
		buf[1] = 0x00;
		buf[2] = (size == BFUSB_MAX_BLOCK_SIZE) ? 0 : size;

		memcpy(skb_put(nskb, 3), buf, 3);
		skb_copy_from_linear_data_offset(skb, sent, skb_put(nskb, size), size);

		sent  += size;
		count -= size;
	}

	/* Don't send frame with multiple size of bulk max packet */
	if ((nskb->len % data->bulk_pkt_size) == 0) {
		buf[0] = 0xdd;
		buf[1] = 0x00;
		memcpy(skb_put(nskb, 2), buf, 2);
	}

	read_lock(&data->lock);

	skb_queue_tail(&data->transmit_q, nskb);
	bfusb_tx_wakeup(data);

	read_unlock(&data->lock);

	kfree_skb(skb);

	return 0;
}
Beispiel #9
0
/* Send RST reply */
static void send_reset(struct net *net, struct sk_buff *oldskb)
{
	struct sk_buff *nskb;
	struct tcphdr otcph, *tcph;
	unsigned int otcplen, hh_len;
	int tcphoff, needs_ack;
	const struct ipv6hdr *oip6h = ipv6_hdr(oldskb);
	struct ipv6hdr *ip6h;
#define DEFAULT_TOS_VALUE	0x0U
	const __u8 tclass = DEFAULT_TOS_VALUE;
	struct dst_entry *dst = NULL;
	u8 proto;
	__be16 frag_off;
	struct flowi6 fl6;

	if ((!(ipv6_addr_type(&oip6h->saddr) & IPV6_ADDR_UNICAST)) ||
	    (!(ipv6_addr_type(&oip6h->daddr) & IPV6_ADDR_UNICAST))) {
		pr_debug("addr is not unicast.\n");
		return;
	}

	proto = oip6h->nexthdr;
	tcphoff = ipv6_skip_exthdr(oldskb, ((u8*)(oip6h+1) - oldskb->data), &proto, &frag_off);

	if ((tcphoff < 0) || (tcphoff > oldskb->len)) {
		pr_debug("Cannot get TCP header.\n");
		return;
	}

	otcplen = oldskb->len - tcphoff;

	/* IP header checks: fragment, too short. */
	if (proto != IPPROTO_TCP || otcplen < sizeof(struct tcphdr)) {
		pr_debug("proto(%d) != IPPROTO_TCP, "
			 "or too short. otcplen = %d\n",
			 proto, otcplen);
		return;
	}

	if (skb_copy_bits(oldskb, tcphoff, &otcph, sizeof(struct tcphdr)))
		BUG();

	/* No RST for RST. */
	if (otcph.rst) {
		pr_debug("RST is set\n");
		return;
	}

	/* Check checksum. */
	if (csum_ipv6_magic(&oip6h->saddr, &oip6h->daddr, otcplen, IPPROTO_TCP,
			    skb_checksum(oldskb, tcphoff, otcplen, 0))) {
		pr_debug("TCP checksum is invalid\n");
		return;
	}

	memset(&fl6, 0, sizeof(fl6));
	fl6.flowi6_proto = IPPROTO_TCP;
	fl6.saddr = oip6h->daddr;
	fl6.daddr = oip6h->saddr;
	fl6.fl6_sport = otcph.dest;
	fl6.fl6_dport = otcph.source;
	security_skb_classify_flow(oldskb, flowi6_to_flowi(&fl6));
	dst = ip6_route_output(net, NULL, &fl6);
	if (dst == NULL || dst->error) {
		dst_release(dst);
		return;
	}
	dst = xfrm_lookup(net, dst, flowi6_to_flowi(&fl6), NULL, 0);
	if (IS_ERR(dst))
		return;

	hh_len = (dst->dev->hard_header_len + 15)&~15;
	nskb = alloc_skb(hh_len + 15 + dst->header_len + sizeof(struct ipv6hdr)
			 + sizeof(struct tcphdr) + dst->trailer_len,
			 GFP_ATOMIC);

	if (!nskb) {
		net_dbg_ratelimited("cannot alloc skb\n");
		dst_release(dst);
		return;
	}

	skb_dst_set(nskb, dst);

	skb_reserve(nskb, hh_len + dst->header_len);

	skb_put(nskb, sizeof(struct ipv6hdr));
	skb_reset_network_header(nskb);
	ip6h = ipv6_hdr(nskb);
	ip6_flow_hdr(ip6h, tclass, 0);
	ip6h->hop_limit = ip6_dst_hoplimit(dst);
	ip6h->nexthdr = IPPROTO_TCP;
	ip6h->saddr = oip6h->daddr;
	ip6h->daddr = oip6h->saddr;

	skb_reset_transport_header(nskb);
	tcph = (struct tcphdr *)skb_put(nskb, sizeof(struct tcphdr));
	/* Truncate to length (no data) */
	tcph->doff = sizeof(struct tcphdr)/4;
	tcph->source = otcph.dest;
	tcph->dest = otcph.source;

	if (otcph.ack) {
		needs_ack = 0;
		tcph->seq = otcph.ack_seq;
		tcph->ack_seq = 0;
	} else {
		needs_ack = 1;
		tcph->ack_seq = htonl(ntohl(otcph.seq) + otcph.syn + otcph.fin
				      + otcplen - (otcph.doff<<2));
		tcph->seq = 0;
	}

	/* Reset flags */
	((u_int8_t *)tcph)[13] = 0;
	tcph->rst = 1;
	tcph->ack = needs_ack;
	tcph->window = 0;
	tcph->urg_ptr = 0;
	tcph->check = 0;

	/* Adjust TCP checksum */
	tcph->check = csum_ipv6_magic(&ipv6_hdr(nskb)->saddr,
				      &ipv6_hdr(nskb)->daddr,
				      sizeof(struct tcphdr), IPPROTO_TCP,
				      csum_partial(tcph,
						   sizeof(struct tcphdr), 0));

	nf_ct_attach(nskb, oldskb);

	ip6_local_out(nskb);
}
Beispiel #10
0
static inline int bfusb_recv_block(struct bfusb_data *data, int hdr, unsigned char *buf, int len)
{
	BT_DBG("bfusb %p hdr 0x%02x data %p len %d", data, hdr, buf, len);

	if (hdr & 0x10) {
		BT_ERR("%s error in block", data->hdev->name);
		kfree_skb(data->reassembly);
		data->reassembly = NULL;
		return -EIO;
	}

	if (hdr & 0x04) {
		struct sk_buff *skb;
		unsigned char pkt_type;
		int pkt_len = 0;

		if (data->reassembly) {
			BT_ERR("%s unexpected start block", data->hdev->name);
			kfree_skb(data->reassembly);
			data->reassembly = NULL;
		}

		if (len < 1) {
			BT_ERR("%s no packet type found", data->hdev->name);
			return -EPROTO;
		}

		pkt_type = *buf++; len--;

		switch (pkt_type) {
		case HCI_EVENT_PKT:
			if (len >= HCI_EVENT_HDR_SIZE) {
				struct hci_event_hdr *hdr = (struct hci_event_hdr *) buf;
				pkt_len = HCI_EVENT_HDR_SIZE + hdr->plen;
			} else {
				BT_ERR("%s event block is too short", data->hdev->name);
				return -EILSEQ;
			}
			break;

		case HCI_ACLDATA_PKT:
			if (len >= HCI_ACL_HDR_SIZE) {
				struct hci_acl_hdr *hdr = (struct hci_acl_hdr *) buf;
				pkt_len = HCI_ACL_HDR_SIZE + __le16_to_cpu(hdr->dlen);
			} else {
				BT_ERR("%s data block is too short", data->hdev->name);
				return -EILSEQ;
			}
			break;

		case HCI_SCODATA_PKT:
			if (len >= HCI_SCO_HDR_SIZE) {
				struct hci_sco_hdr *hdr = (struct hci_sco_hdr *) buf;
				pkt_len = HCI_SCO_HDR_SIZE + hdr->dlen;
			} else {
				BT_ERR("%s audio block is too short", data->hdev->name);
				return -EILSEQ;
			}
			break;
		}

		skb = bt_skb_alloc(pkt_len, GFP_ATOMIC);
		if (!skb) {
			BT_ERR("%s no memory for the packet", data->hdev->name);
			return -ENOMEM;
		}

		skb->dev = (void *) data->hdev;
		bt_cb(skb)->pkt_type = pkt_type;

		data->reassembly = skb;
	} else {
		if (!data->reassembly) {
			BT_ERR("%s unexpected continuation block", data->hdev->name);
			return -EIO;
		}
	}

	if (len > 0)
		memcpy(skb_put(data->reassembly, len), buf, len);

	if (hdr & 0x08) {
		hci_recv_frame(data->reassembly);
		data->reassembly = NULL;
	}

	return 0;
}
Beispiel #11
0
static void bfusb_rx_complete(struct urb *urb)
{
	struct sk_buff *skb = (struct sk_buff *) urb->context;
	struct bfusb_data *data = (struct bfusb_data *) skb->dev;
	unsigned char *buf = urb->transfer_buffer;
	int count = urb->actual_length;
	int err, hdr, len;

	BT_DBG("bfusb %p urb %p skb %p len %d", data, urb, skb, skb->len);

	read_lock(&data->lock);

	if (!test_bit(HCI_RUNNING, &data->hdev->flags))
		goto unlock;

	if (urb->status || !count)
		goto resubmit;

	data->hdev->stat.byte_rx += count;

	skb_put(skb, count);

	while (count) {
		hdr = buf[0] | (buf[1] << 8);

		if (hdr & 0x4000) {
			len = 0;
			count -= 2;
			buf   += 2;
		} else {
			len = (buf[2] == 0) ? 256 : buf[2];
			count -= 3;
			buf   += 3;
		}

		if (count < len) {
			BT_ERR("%s block extends over URB buffer ranges",
					data->hdev->name);
		}

		if ((hdr & 0xe1) == 0xc1)
			bfusb_recv_block(data, hdr, buf, len);

		count -= len;
		buf   += len;
	}

	skb_unlink(skb, &data->pending_q);
	kfree_skb(skb);

	bfusb_rx_submit(data, urb);

	read_unlock(&data->lock);

	return;

resubmit:
	urb->dev = data->udev;

	err = usb_submit_urb(urb, GFP_ATOMIC);
	if (err) {
		BT_ERR("%s bulk resubmit failed urb %p err %d",
					data->hdev->name, urb, err);
	}

unlock:
	read_unlock(&data->lock);
}
Beispiel #12
0
/*
 * Receive a message with payloads from the USB bus into an skb
 *
 * @i2400mu: USB device descriptor
 * @rx_skb: skb where to place the received message
 *
 * Deals with all the USB-specifics of receiving, dynamically
 * increasing the buffer size if so needed. Returns the payload in the
 * skb, ready to process. On a zero-length packet, we retry.
 *
 * On soft USB errors, we retry (until they become too frequent and
 * then are promoted to hard); on hard USB errors, we reset the
 * device. On other errors (skb realloacation, we just drop it and
 * hope for the next invocation to solve it).
 *
 * Returns: pointer to the skb if ok, ERR_PTR on error.
 *   NOTE: this function might realloc the skb (if it is too small),
 *   so always update with the one returned.
 *   ERR_PTR() is < 0 on error.
 *   Will return NULL if it cannot reallocate -- this can be
 *   considered a transient retryable error.
 */
static
struct sk_buff *i2400mu_rx(struct i2400mu *i2400mu, struct sk_buff *rx_skb)
{
	int result = 0;
	struct device *dev = &i2400mu->usb_iface->dev;
	int usb_pipe, read_size, rx_size, do_autopm;
	struct usb_endpoint_descriptor *epd;
	const size_t max_pkt_size = 512;

	d_fnstart(4, dev, "(i2400mu %p)\n", i2400mu);
	do_autopm = atomic_read(&i2400mu->do_autopm);
	result = do_autopm ?
		usb_autopm_get_interface(i2400mu->usb_iface) : 0;
	if (result < 0) {
		dev_err(dev, "RX: can't get autopm: %d\n", result);
		do_autopm = 0;
	}
	epd = usb_get_epd(i2400mu->usb_iface, i2400mu->endpoint_cfg.bulk_in);
	usb_pipe = usb_rcvbulkpipe(i2400mu->usb_dev, epd->bEndpointAddress);
retry:
	rx_size = skb_end_pointer(rx_skb) - rx_skb->data - rx_skb->len;
	if (unlikely(rx_size % max_pkt_size == 0)) {
		rx_size -= 8;
		d_printf(1, dev, "RX: rx_size adapted to %d [-8]\n", rx_size);
	}
	result = usb_bulk_msg(
		i2400mu->usb_dev, usb_pipe, rx_skb->data + rx_skb->len,
		rx_size, &read_size, 200);
	usb_mark_last_busy(i2400mu->usb_dev);
	switch (result) {
	case 0:
		if (read_size == 0)
			goto retry;	/* ZLP, just resubmit */
		skb_put(rx_skb, read_size);
		break;
	case -EPIPE:
		/*
		 * Stall -- maybe the device is choking with our
		 * requests. Clear it and give it some time. If they
		 * happen to often, it might be another symptom, so we
		 * reset.
		 *
		 * No error handling for usb_clear_halt(0; if it
		 * works, the retry works; if it fails, this switch
		 * does the error handling for us.
		 */
		if (edc_inc(&i2400mu->urb_edc,
			    10 * EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) {
			dev_err(dev, "BM-CMD: too many stalls in "
				"URB; resetting device\n");
			goto do_reset;
		}
		usb_clear_halt(i2400mu->usb_dev, usb_pipe);
		msleep(10);	/* give the device some time */
		goto retry;
	case -EINVAL:			/* while removing driver */
	case -ENODEV:			/* dev disconnect ... */
	case -ENOENT:			/* just ignore it */
	case -ESHUTDOWN:
	case -ECONNRESET:
		break;
	case -EOVERFLOW: {		/* too small, reallocate */
		struct sk_buff *new_skb;
		rx_size = i2400mu_rx_size_grow(i2400mu);
		if (rx_size <= (1 << 16))	/* cap it */
			i2400mu->rx_size = rx_size;
		else if (printk_ratelimit()) {
			dev_err(dev, "BUG? rx_size up to %d\n", rx_size);
			result = -EINVAL;
			goto out;
		}
		skb_put(rx_skb, read_size);
		new_skb = skb_copy_expand(rx_skb, 0, rx_size - rx_skb->len,
					  GFP_KERNEL);
		if (new_skb == NULL) {
			if (printk_ratelimit())
				dev_err(dev, "RX: Can't reallocate skb to %d; "
					"RX dropped\n", rx_size);
			kfree_skb(rx_skb);
			rx_skb = NULL;
			goto out;	/* drop it...*/
		}
		kfree_skb(rx_skb);
		rx_skb = new_skb;
		i2400mu->rx_size_cnt = 0;
		i2400mu->rx_size_acc = i2400mu->rx_size;
		d_printf(1, dev, "RX: size changed to %d, received %d, "
			 "copied %d, capacity %ld\n",
			 rx_size, read_size, rx_skb->len,
			 (long) (skb_end_pointer(new_skb) - new_skb->head));
		goto retry;
	}
		/* In most cases, it happens due to the hardware scheduling a
		 * read when there was no data - unfortunately, we have no way
		 * to tell this timeout from a USB timeout. So we just ignore
		 * it. */
	case -ETIMEDOUT:
		dev_err(dev, "RX: timeout: %d\n", result);
		result = 0;
		break;
	default:			/* Any error */
		if (edc_inc(&i2400mu->urb_edc,
			    EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME))
			goto error_reset;
		dev_err(dev, "RX: error receiving URB: %d, retrying\n", result);
		goto retry;
	}
out:
	if (do_autopm)
		usb_autopm_put_interface(i2400mu->usb_iface);
	d_fnend(4, dev, "(i2400mu %p) = %p\n", i2400mu, rx_skb);
	return rx_skb;

error_reset:
	dev_err(dev, "RX: maximum errors in URB exceeded; "
		"resetting device\n");
do_reset:
	usb_queue_reset_device(i2400mu->usb_iface);
	rx_skb = ERR_PTR(result);
	goto out;
}
Beispiel #13
0
static inline void brf6150_rx(struct brf6150_info *info)
{
	u8 byte;

	NBT_DBG_TRANSFER("rx_tasklet woke up\ndata ");

	while (brf6150_inb(info, UART_LSR) & UART_LSR_DR) {
		if (info->rx_skb == NULL) {
			info->rx_skb = bt_skb_alloc(HCI_MAX_FRAME_SIZE, GFP_ATOMIC);
			if (!info->rx_skb) {
				printk(KERN_WARNING "brf6150: Can't allocate memory for new packet\n");
				return;
			}
			info->rx_state = WAIT_FOR_PKT_TYPE;
			info->rx_skb->dev = (void *)info->hdev;
			brf6150_disable_pm_rx(info);
			clk_enable(info->uart_ck);
		}

		byte = brf6150_inb(info, UART_RX);
		if (info->garbage_bytes) {
			info->garbage_bytes--;
			info->hdev->stat.err_rx++;
			continue;
		}
		info->hdev->stat.byte_rx++;
		NBT_DBG_TRANSFER_NF("0x%.2x  ", byte);
		switch (info->rx_state) {
		case WAIT_FOR_PKT_TYPE:
			bt_cb(info->rx_skb)->pkt_type = byte;
			info->rx_count = brf6150_get_hdr_len(byte);
			if (info->rx_count >= 0) {
				info->rx_state = WAIT_FOR_HEADER;
			} else {
				info->hdev->stat.err_rx++;
				kfree_skb(info->rx_skb);
				info->rx_skb = NULL;
				clk_disable(info->uart_ck);
			}
			break;
		case WAIT_FOR_HEADER:
			info->rx_count--;
			*skb_put(info->rx_skb, 1) = byte;
			if (info->rx_count == 0) {
				info->rx_count = brf6150_get_data_len(info, info->rx_skb);
				if (info->rx_count > skb_tailroom(info->rx_skb)) {
					printk(KERN_WARNING "brf6150: Frame is %ld bytes too long.\n",
					       info->rx_count - skb_tailroom(info->rx_skb));
					info->rx_skb = NULL;
					info->garbage_bytes = info->rx_count - skb_tailroom(info->rx_skb);
					clk_disable(info->uart_ck);
					break;
				}
				info->rx_state = WAIT_FOR_DATA;
				if (bt_cb(info->rx_skb)->pkt_type == H4_NEG_PKT) {
					brf6150_negotiation_packet(info, info->rx_skb);
					info->rx_skb = NULL;
					clk_disable(info->uart_ck);
					return;
				}
				if (bt_cb(info->rx_skb)->pkt_type == H4_ALIVE_PKT) {
					brf6150_alive_packet(info, info->rx_skb);
					info->rx_skb = NULL;
					clk_disable(info->uart_ck);
					return;
				}
			}
			break;
		case WAIT_FOR_DATA:
			info->rx_count--;
			*skb_put(info->rx_skb, 1) = byte;
			if (info->rx_count == 0) {
				brf6150_recv_frame(info, info->rx_skb);
				info->rx_skb = NULL;
				clk_disable(info->uart_ck);
			}
			break;
		default:
			WARN_ON(1);
			break;
		}
	}

	NBT_DBG_TRANSFER_NF("\n");
}
Beispiel #14
0
static int vnet_start_xmit(struct sk_buff *skb, struct net_device *net)
{
	struct pdp_info *dev = (struct pdp_info *)net->ml_priv;

#ifdef USE_LOOPBACK_PING
	int ret;
	struct sk_buff *skb2;
	struct icmphdr *icmph;
	struct iphdr *iph;
#endif

   DPRINTK(2, "BEGIN\n");

#ifdef USE_LOOPBACK_PING
	dev->vn_dev.stats.tx_bytes += skb->len;
	dev->vn_dev.stats.tx_packets++;

	skb2 = alloc_skb(skb->len, GFP_ATOMIC);
	if (skb2 == NULL) {
		DPRINTK(1, "alloc_skb() failed\n");
		dev_kfree_skb_any(skb);
		return -ENOMEM;
	}

	memcpy(skb2->data, skb->data, skb->len);
	skb_put(skb2, skb->len);
	dev_kfree_skb_any(skb);

	icmph = (struct icmphdr *)(skb2->data + sizeof(struct iphdr));
	iph = (struct iphdr *)skb2->data;

	icmph->type = __constant_htons(ICMP_ECHOREPLY);

	ret = iph->daddr;
	iph->daddr = iph->saddr;
	iph->saddr = ret;
	iph->check = 0;
	iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);

	skb2->dev = net;
	skb2->protocol = __constant_htons(ETH_P_IP);

	netif_rx(skb2);

	dev->vn_dev.stats.rx_packets++;
	dev->vn_dev.stats.rx_bytes += skb->len;
#else
#if 0
   if (vnet_start_xmit_flag != 0) {
        if (vnet_start_xmit_count > 100000) {
            vnet_start_xmit_flag = 0;
            DPRINTK(1, "vnet_start_xmit_count exceed.. clear vnet_start_xmit_flag \n");
        }
        EPRINTK("vnet_start_xmit() return -EAGAIN \n");
        vnet_start_xmit_count++; 
        return -EAGAIN;
    }
    vnet_start_xmit_count = 0;
   vnet_start_xmit_flag = 1; 
#endif
	workqueue_data = (unsigned long)skb;
	PREPARE_WORK(&dev->vn_dev.xmit_task,vnet_defer_xmit);
	schedule_work(&dev->vn_dev.xmit_task);
	netif_stop_queue(net);
#endif

   DPRINTK(2, "END\n");
	return 0;
}
Beispiel #15
0
static int mesh_path_sel_frame_tx(enum mpath_frame_type action, u8 flags,
				  const u8 *orig_addr, u32 orig_sn,
				  u8 target_flags, const u8 *target,
				  u32 target_sn, const u8 *da,
				  u8 hop_count, u8 ttl,
				  u32 lifetime, u32 metric, u32 preq_id,
				  struct ieee80211_sub_if_data *sdata)
{
	struct ieee80211_local *local = sdata->local;
	struct sk_buff *skb;
	struct ieee80211_mgmt *mgmt;
	u8 *pos, ie_len;
	int hdr_len = offsetofend(struct ieee80211_mgmt,
				  u.action.u.mesh_action);

	skb = dev_alloc_skb(local->tx_headroom +
			    hdr_len +
			    2 + 37); /* max HWMP IE */
	if (!skb)
		return -1;
	skb_reserve(skb, local->tx_headroom);
	mgmt = skb_put_zero(skb, hdr_len);
	mgmt->frame_control = cpu_to_le16(IEEE80211_FTYPE_MGMT |
					  IEEE80211_STYPE_ACTION);

	memcpy(mgmt->da, da, ETH_ALEN);
	memcpy(mgmt->sa, sdata->vif.addr, ETH_ALEN);
	/* BSSID == SA */
	memcpy(mgmt->bssid, sdata->vif.addr, ETH_ALEN);
	mgmt->u.action.category = WLAN_CATEGORY_MESH_ACTION;
	mgmt->u.action.u.mesh_action.action_code =
					WLAN_MESH_ACTION_HWMP_PATH_SELECTION;

	switch (action) {
	case MPATH_PREQ:
		mhwmp_dbg(sdata, "sending PREQ to %pM\n", target);
		ie_len = 37;
		pos = skb_put(skb, 2 + ie_len);
		*pos++ = WLAN_EID_PREQ;
		break;
	case MPATH_PREP:
		mhwmp_dbg(sdata, "sending PREP to %pM\n", orig_addr);
		ie_len = 31;
		pos = skb_put(skb, 2 + ie_len);
		*pos++ = WLAN_EID_PREP;
		break;
	case MPATH_RANN:
		mhwmp_dbg(sdata, "sending RANN from %pM\n", orig_addr);
		ie_len = sizeof(struct ieee80211_rann_ie);
		pos = skb_put(skb, 2 + ie_len);
		*pos++ = WLAN_EID_RANN;
		break;
	default:
		kfree_skb(skb);
		return -ENOTSUPP;
	}
	*pos++ = ie_len;
	*pos++ = flags;
	*pos++ = hop_count;
	*pos++ = ttl;
	if (action == MPATH_PREP) {
		memcpy(pos, target, ETH_ALEN);
		pos += ETH_ALEN;
		put_unaligned_le32(target_sn, pos);
		pos += 4;
	} else {
		if (action == MPATH_PREQ) {
			put_unaligned_le32(preq_id, pos);
			pos += 4;
		}
		memcpy(pos, orig_addr, ETH_ALEN);
		pos += ETH_ALEN;
		put_unaligned_le32(orig_sn, pos);
		pos += 4;
	}
	put_unaligned_le32(lifetime, pos); /* interval for RANN */
	pos += 4;
	put_unaligned_le32(metric, pos);
	pos += 4;
	if (action == MPATH_PREQ) {
		*pos++ = 1; /* destination count */
		*pos++ = target_flags;
		memcpy(pos, target, ETH_ALEN);
		pos += ETH_ALEN;
		put_unaligned_le32(target_sn, pos);
		pos += 4;
	} else if (action == MPATH_PREP) {
		memcpy(pos, orig_addr, ETH_ALEN);
		pos += ETH_ALEN;
		put_unaligned_le32(orig_sn, pos);
		pos += 4;
	}

	ieee80211_tx_skb(sdata, skb);
	return 0;
}
Beispiel #16
0
/* During a receive, the cur_rx points to the current incoming buffer.
 * When we update through the ring, if the next incoming buffer has
 * not been given to the system, we just set the empty indicator,
 * effectively tossing the packet.
 */
static int
fec_enet_rx(struct net_device *ndev, int budget)
{
	struct fec_enet_private *fep = netdev_priv(ndev);
	const struct platform_device_id *id_entry =
				platform_get_device_id(fep->pdev);
	struct bufdesc *bdp;
	unsigned short status;
	struct	sk_buff	*skb;
	ushort	pkt_len;
	__u8 *data;
	int	pkt_received = 0;

#ifdef CONFIG_M532x
	flush_cache_all();
#endif

	/* First, grab all of the stats for the incoming packet.
	 * These get messed up if we get called due to a busy condition.
	 */
	bdp = fep->cur_rx;

	while (!((status = bdp->cbd_sc) & BD_ENET_RX_EMPTY)) {

		if (pkt_received >= budget)
			break;
		pkt_received++;

		/* Since we have allocated space to hold a complete frame,
		 * the last indicator should be set.
		 */
		if ((status & BD_ENET_RX_LAST) == 0)
			printk("FEC ENET: rcv is not +last\n");

		if (!fep->opened)
			goto rx_processing_done;

		/* Check for errors. */
		if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO |
			   BD_ENET_RX_CR | BD_ENET_RX_OV)) {
			ndev->stats.rx_errors++;
			if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH)) {
				/* Frame too long or too short. */
				ndev->stats.rx_length_errors++;
			}
			if (status & BD_ENET_RX_NO)	/* Frame alignment */
				ndev->stats.rx_frame_errors++;
			if (status & BD_ENET_RX_CR)	/* CRC Error */
				ndev->stats.rx_crc_errors++;
			if (status & BD_ENET_RX_OV)	/* FIFO overrun */
				ndev->stats.rx_fifo_errors++;
		}

		/* Report late collisions as a frame error.
		 * On this error, the BD is closed, but we don't know what we
		 * have in the buffer.  So, just drop this frame on the floor.
		 */
		if (status & BD_ENET_RX_CL) {
			ndev->stats.rx_errors++;
			ndev->stats.rx_frame_errors++;
			goto rx_processing_done;
		}

		/* Process the incoming frame. */
		ndev->stats.rx_packets++;
		pkt_len = bdp->cbd_datlen;
		ndev->stats.rx_bytes += pkt_len;
		data = (__u8*)__va(bdp->cbd_bufaddr);

		dma_unmap_single(&fep->pdev->dev, bdp->cbd_bufaddr,
				FEC_ENET_TX_FRSIZE, DMA_FROM_DEVICE);

		if (id_entry->driver_data & FEC_QUIRK_SWAP_FRAME)
			swap_buffer(data, pkt_len);

		/* This does 16 byte alignment, exactly what we need.
		 * The packet length includes FCS, but we don't want to
		 * include that when passing upstream as it messes up
		 * bridging applications.
		 */
		skb = netdev_alloc_skb(ndev, pkt_len - 4 + NET_IP_ALIGN);

		if (unlikely(!skb)) {
			printk("%s: Memory squeeze, dropping packet.\n",
					ndev->name);
			ndev->stats.rx_dropped++;
		} else {
			skb_reserve(skb, NET_IP_ALIGN);
			skb_put(skb, pkt_len - 4);	/* Make room */
			skb_copy_to_linear_data(skb, data, pkt_len - 4);
			skb->protocol = eth_type_trans(skb, ndev);

			/* Get receive timestamp from the skb */
			if (fep->hwts_rx_en && fep->bufdesc_ex) {
				struct skb_shared_hwtstamps *shhwtstamps =
							    skb_hwtstamps(skb);
				unsigned long flags;
				struct bufdesc_ex *ebdp =
					(struct bufdesc_ex *)bdp;

				memset(shhwtstamps, 0, sizeof(*shhwtstamps));

				spin_lock_irqsave(&fep->tmreg_lock, flags);
				shhwtstamps->hwtstamp = ns_to_ktime(
				    timecounter_cyc2time(&fep->tc, ebdp->ts));
				spin_unlock_irqrestore(&fep->tmreg_lock, flags);
			}

			if (!skb_defer_rx_timestamp(skb))
				napi_gro_receive(&fep->napi, skb);
		}

		bdp->cbd_bufaddr = dma_map_single(&fep->pdev->dev, data,
				FEC_ENET_TX_FRSIZE, DMA_FROM_DEVICE);
rx_processing_done:
		/* Clear the status flags for this buffer */
		status &= ~BD_ENET_RX_STATS;

		/* Mark the buffer empty */
		status |= BD_ENET_RX_EMPTY;
		bdp->cbd_sc = status;

		if (fep->bufdesc_ex) {
			struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;

			ebdp->cbd_esc = BD_ENET_RX_INT;
			ebdp->cbd_prot = 0;
			ebdp->cbd_bdu = 0;
		}

		/* Update BD pointer to next entry */
		if (status & BD_ENET_RX_WRAP)
			bdp = fep->rx_bd_base;
		else
			bdp = fec_enet_get_nextdesc(bdp, fep->bufdesc_ex);
		/* Doing this here will keep the FEC running while we process
		 * incoming frames.  On a heavily loaded network, we should be
		 * able to keep up at the expense of system resources.
		 */
		writel(0, fep->hwp + FEC_R_DES_ACTIVE);
	}
	fep->cur_rx = bdp;

	return pkt_received;
}
Beispiel #17
0
static struct sk_buff_head *msm_ipc_router_build_msg(unsigned int num_sect,
					  struct iovec const *msg_sect,
					  size_t total_len)
{
	struct sk_buff_head *msg_head;
	struct sk_buff *msg;
	int i, copied, first = 1;
	int data_size = 0, request_size, offset;
	void *data;

	for (i = 0; i < num_sect; i++)
		data_size += msg_sect[i].iov_len;

	if (!data_size)
		return NULL;

	msg_head = kmalloc(sizeof(struct sk_buff_head), GFP_KERNEL);
	if (!msg_head) {
		pr_err("%s: cannot allocate skb_head\n", __func__);
		return NULL;
	}
	skb_queue_head_init(msg_head);

	for (copied = 1, i = 0; copied && (i < num_sect); i++) {
		data_size = msg_sect[i].iov_len;
		offset = 0;
		while (offset != msg_sect[i].iov_len) {
			request_size = data_size;
			if (first)
				request_size += IPC_ROUTER_HDR_SIZE;

			msg = alloc_skb(request_size, GFP_KERNEL);
			if (!msg) {
				if (request_size <= (PAGE_SIZE/2)) {
					pr_err("%s: cannot allocated skb\n",
						__func__);
					goto msg_build_failure;
				}
				data_size = data_size / 2;
				continue;
			}

			if (first) {
				skb_reserve(msg, IPC_ROUTER_HDR_SIZE);
				first = 0;
			}

			data = skb_put(msg, data_size);
			copied = !copy_from_user(msg->data,
					msg_sect[i].iov_base + offset,
					data_size);
			if (!copied) {
				pr_err("%s: copy_from_user failed\n",
					__func__);
				kfree_skb(msg);
				goto msg_build_failure;
			}
			skb_queue_tail(msg_head, msg);
			offset += data_size;
			data_size = msg_sect[i].iov_len - offset;
		}
	}
	return msg_head;

msg_build_failure:
	while (!skb_queue_empty(msg_head)) {
		msg = skb_dequeue(msg_head);
		kfree_skb(msg);
	}
	kfree(msg_head);
	return NULL;
}
Beispiel #18
0
/* receive a single frame and assemble datagram
 * (this is the heart of the interrupt routine)
 */
static inline int
sb1000_rx(struct net_device *dev)
{

#define FRAMESIZE 184
	unsigned char st[2], buffer[FRAMESIZE], session_id, frame_id;
	short dlen;
	int ioaddr, ns;
	unsigned int skbsize;
	struct sk_buff *skb;
	struct sb1000_private *lp = netdev_priv(dev);
	struct net_device_stats *stats = &lp->stats;

	/* SB1000 frame constants */
	const int FrameSize = FRAMESIZE;
	const int NewDatagramHeaderSkip = 8;
	const int NewDatagramHeaderSize = NewDatagramHeaderSkip + 18;
	const int NewDatagramDataSize = FrameSize - NewDatagramHeaderSize;
	const int ContDatagramHeaderSkip = 7;
	const int ContDatagramHeaderSize = ContDatagramHeaderSkip + 1;
	const int ContDatagramDataSize = FrameSize - ContDatagramHeaderSize;
	const int TrailerSize = 4;

	ioaddr = dev->base_addr;

	insw(ioaddr, (unsigned short*) st, 1);
#ifdef XXXDEBUG
printk("cm0: received: %02x %02x\n", st[0], st[1]);
#endif /* XXXDEBUG */
	lp->rx_frames++;

	/* decide if it is a good or bad frame */
	for (ns = 0; ns < NPIDS; ns++) {
		session_id = lp->rx_session_id[ns];
		frame_id = lp->rx_frame_id[ns];
		if (st[0] == session_id) {
			if (st[1] == frame_id || (!frame_id && (st[1] & 0xf0) == 0x30)) {
				goto good_frame;
			} else if ((st[1] & 0xf0) == 0x30 && (st[0] & 0x40)) {
				goto skipped_frame;
			} else {
				goto bad_frame;
			}
		} else if (st[0] == (session_id | 0x40)) {
			if ((st[1] & 0xf0) == 0x30) {
				goto skipped_frame;
			} else {
				goto bad_frame;
			}
		}
	}
	goto bad_frame;

skipped_frame:
	stats->rx_frame_errors++;
	skb = lp->rx_skb[ns];
	if (sb1000_debug > 1)
		printk(KERN_WARNING "%s: missing frame(s): got %02x %02x "
			"expecting %02x %02x\n", dev->name, st[0], st[1],
			skb ? session_id : session_id | 0x40, frame_id);
	if (skb) {
		dev_kfree_skb(skb);
		skb = NULL;
	}

good_frame:
	lp->rx_frame_id[ns] = 0x30 | ((st[1] + 1) & 0x0f);
	/* new datagram */
	if (st[0] & 0x40) {
		/* get data length */
		insw(ioaddr, buffer, NewDatagramHeaderSize / 2);
#ifdef XXXDEBUG
printk("cm0: IP identification: %02x%02x  fragment offset: %02x%02x\n", buffer[30], buffer[31], buffer[32], buffer[33]);
#endif /* XXXDEBUG */
		if (buffer[0] != NewDatagramHeaderSkip) {
			if (sb1000_debug > 1)
				printk(KERN_WARNING "%s: new datagram header skip error: "
					"got %02x expecting %02x\n", dev->name, buffer[0],
					NewDatagramHeaderSkip);
			stats->rx_length_errors++;
			insw(ioaddr, buffer, NewDatagramDataSize / 2);
			goto bad_frame_next;
		}
		dlen = ((buffer[NewDatagramHeaderSkip + 3] & 0x0f) << 8 |
			buffer[NewDatagramHeaderSkip + 4]) - 17;
		if (dlen > SB1000_MRU) {
			if (sb1000_debug > 1)
				printk(KERN_WARNING "%s: datagram length (%d) greater "
					"than MRU (%d)\n", dev->name, dlen, SB1000_MRU);
			stats->rx_length_errors++;
			insw(ioaddr, buffer, NewDatagramDataSize / 2);
			goto bad_frame_next;
		}
		lp->rx_dlen[ns] = dlen;
		/* compute size to allocate for datagram */
		skbsize = dlen + FrameSize;
		if ((skb = alloc_skb(skbsize, GFP_ATOMIC)) == NULL) {
			if (sb1000_debug > 1)
				printk(KERN_WARNING "%s: can't allocate %d bytes long "
					"skbuff\n", dev->name, skbsize);
			stats->rx_dropped++;
			insw(ioaddr, buffer, NewDatagramDataSize / 2);
			goto dropped_frame;
		}
		skb->dev = dev;
		skb->mac.raw = skb->data;
		skb->protocol = (unsigned short) buffer[NewDatagramHeaderSkip + 16];
		insw(ioaddr, skb_put(skb, NewDatagramDataSize),
			NewDatagramDataSize / 2);
		lp->rx_skb[ns] = skb;
	} else {
		/* continuation of previous datagram */
		insw(ioaddr, buffer, ContDatagramHeaderSize / 2);
		if (buffer[0] != ContDatagramHeaderSkip) {
			if (sb1000_debug > 1)
				printk(KERN_WARNING "%s: cont datagram header skip error: "
					"got %02x expecting %02x\n", dev->name, buffer[0],
					ContDatagramHeaderSkip);
			stats->rx_length_errors++;
			insw(ioaddr, buffer, ContDatagramDataSize / 2);
			goto bad_frame_next;
		}
		skb = lp->rx_skb[ns];
		insw(ioaddr, skb_put(skb, ContDatagramDataSize),
			ContDatagramDataSize / 2);
		dlen = lp->rx_dlen[ns];
	}
	if (skb->len < dlen + TrailerSize) {
		lp->rx_session_id[ns] &= ~0x40;
		return 0;
	}

	/* datagram completed: send to upper level */
	skb_trim(skb, dlen);
	netif_rx(skb);
	dev->last_rx = jiffies;
	stats->rx_bytes+=dlen;
	stats->rx_packets++;
	lp->rx_skb[ns] = NULL;
	lp->rx_session_id[ns] |= 0x40;
	return 0;

bad_frame:
	insw(ioaddr, buffer, FrameSize / 2);
	if (sb1000_debug > 1)
		printk(KERN_WARNING "%s: frame error: got %02x %02x\n",
			dev->name, st[0], st[1]);
	stats->rx_frame_errors++;
bad_frame_next:
	if (sb1000_debug > 2)
		sb1000_print_status_buffer(dev->name, st, buffer, FrameSize);
dropped_frame:
	stats->rx_errors++;
	if (ns < NPIDS) {
		if ((skb = lp->rx_skb[ns])) {
			dev_kfree_skb(skb);
			lp->rx_skb[ns] = NULL;
		}
		lp->rx_session_id[ns] |= 0x40;
	}
	return -1;
}
Beispiel #19
0
int ip6_fragment(struct sock *sk, struct sk_buff *skb,
		 int (*output)(struct sock *, struct sk_buff *))
{
	struct sk_buff *frag;
	struct rt6_info *rt = (struct rt6_info *)skb_dst(skb);
	struct ipv6_pinfo *np = skb->sk && !dev_recursion_level() ?
				inet6_sk(skb->sk) : NULL;
	struct ipv6hdr *tmp_hdr;
	struct frag_hdr *fh;
	unsigned int mtu, hlen, left, len;
	int hroom, troom;
	__be32 frag_id;
	int ptr, offset = 0, err = 0;
	u8 *prevhdr, nexthdr = 0;
	struct net *net = dev_net(skb_dst(skb)->dev);

	hlen = ip6_find_1stfragopt(skb, &prevhdr);
	nexthdr = *prevhdr;

	mtu = ip6_skb_dst_mtu(skb);

	/* We must not fragment if the socket is set to force MTU discovery
	 * or if the skb it not generated by a local socket.
	 */
	if (unlikely(!skb->ignore_df && skb->len > mtu))
		goto fail_toobig;

	if (IP6CB(skb)->frag_max_size) {
		if (IP6CB(skb)->frag_max_size > mtu)
			goto fail_toobig;

		/* don't send fragments larger than what we received */
		mtu = IP6CB(skb)->frag_max_size;
		if (mtu < IPV6_MIN_MTU)
			mtu = IPV6_MIN_MTU;
	}

	if (np && np->frag_size < mtu) {
		if (np->frag_size)
			mtu = np->frag_size;
	}
	mtu -= hlen + sizeof(struct frag_hdr);

	frag_id = ipv6_select_ident(net, &ipv6_hdr(skb)->daddr,
				    &ipv6_hdr(skb)->saddr);

	hroom = LL_RESERVED_SPACE(rt->dst.dev);
	if (skb_has_frag_list(skb)) {
		int first_len = skb_pagelen(skb);
		struct sk_buff *frag2;

		if (first_len - hlen > mtu ||
		    ((first_len - hlen) & 7) ||
		    skb_cloned(skb) ||
		    skb_headroom(skb) < (hroom + sizeof(struct frag_hdr)))
			goto slow_path;

		skb_walk_frags(skb, frag) {
			/* Correct geometry. */
			if (frag->len > mtu ||
			    ((frag->len & 7) && frag->next) ||
			    skb_headroom(frag) < (hlen + hroom + sizeof(struct frag_hdr)))
				goto slow_path_clean;

			/* Partially cloned skb? */
			if (skb_shared(frag))
				goto slow_path_clean;

			BUG_ON(frag->sk);
			if (skb->sk) {
				frag->sk = skb->sk;
				frag->destructor = sock_wfree;
			}
			skb->truesize -= frag->truesize;
		}

		err = 0;
		offset = 0;
		/* BUILD HEADER */

		*prevhdr = NEXTHDR_FRAGMENT;
		tmp_hdr = kmemdup(skb_network_header(skb), hlen, GFP_ATOMIC);
		if (!tmp_hdr) {
			IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
				      IPSTATS_MIB_FRAGFAILS);
			err = -ENOMEM;
			goto fail;
		}
		frag = skb_shinfo(skb)->frag_list;
		skb_frag_list_init(skb);

		__skb_pull(skb, hlen);
		fh = (struct frag_hdr *)__skb_push(skb, sizeof(struct frag_hdr));
		__skb_push(skb, hlen);
		skb_reset_network_header(skb);
		memcpy(skb_network_header(skb), tmp_hdr, hlen);

		fh->nexthdr = nexthdr;
		fh->reserved = 0;
		fh->frag_off = htons(IP6_MF);
		fh->identification = frag_id;

		first_len = skb_pagelen(skb);
		skb->data_len = first_len - skb_headlen(skb);
		skb->len = first_len;
		ipv6_hdr(skb)->payload_len = htons(first_len -
						   sizeof(struct ipv6hdr));

		dst_hold(&rt->dst);

		for (;;) {
			/* Prepare header of the next frame,
			 * before previous one went down. */
			if (frag) {
				frag->ip_summed = CHECKSUM_NONE;
				skb_reset_transport_header(frag);
				fh = (struct frag_hdr *)__skb_push(frag, sizeof(struct frag_hdr));
				__skb_push(frag, hlen);
				skb_reset_network_header(frag);
				memcpy(skb_network_header(frag), tmp_hdr,
				       hlen);
				offset += skb->len - hlen - sizeof(struct frag_hdr);
				fh->nexthdr = nexthdr;
				fh->reserved = 0;
				fh->frag_off = htons(offset);
				if (frag->next)
					fh->frag_off |= htons(IP6_MF);
				fh->identification = frag_id;
				ipv6_hdr(frag)->payload_len =
						htons(frag->len -
						      sizeof(struct ipv6hdr));
				ip6_copy_metadata(frag, skb);
			}

			err = output(sk, skb);
			if (!err)
				IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
					      IPSTATS_MIB_FRAGCREATES);

			if (err || !frag)
				break;

			skb = frag;
			frag = skb->next;
			skb->next = NULL;
		}

		kfree(tmp_hdr);

		if (err == 0) {
			IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
				      IPSTATS_MIB_FRAGOKS);
			ip6_rt_put(rt);
			return 0;
		}

		kfree_skb_list(frag);

		IP6_INC_STATS(net, ip6_dst_idev(&rt->dst),
			      IPSTATS_MIB_FRAGFAILS);
		ip6_rt_put(rt);
		return err;

slow_path_clean:
		skb_walk_frags(skb, frag2) {
			if (frag2 == frag)
				break;
			frag2->sk = NULL;
			frag2->destructor = NULL;
			skb->truesize += frag2->truesize;
		}
	}

slow_path:
	if ((skb->ip_summed == CHECKSUM_PARTIAL) &&
	    skb_checksum_help(skb))
		goto fail;

	left = skb->len - hlen;		/* Space per frame */
	ptr = hlen;			/* Where to start from */

	/*
	 *	Fragment the datagram.
	 */

	*prevhdr = NEXTHDR_FRAGMENT;
	troom = rt->dst.dev->needed_tailroom;

	/*
	 *	Keep copying data until we run out.
	 */
	while (left > 0)	{
		len = left;
		/* IF: it doesn't fit, use 'mtu' - the data space left */
		if (len > mtu)
			len = mtu;
		/* IF: we are not sending up to and including the packet end
		   then align the next start on an eight byte boundary */
		if (len < left)	{
			len &= ~7;
		}

		/* Allocate buffer */
		frag = alloc_skb(len + hlen + sizeof(struct frag_hdr) +
				 hroom + troom, GFP_ATOMIC);
		if (!frag) {
			IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
				      IPSTATS_MIB_FRAGFAILS);
			err = -ENOMEM;
			goto fail;
		}

		/*
		 *	Set up data on packet
		 */

		ip6_copy_metadata(frag, skb);
		skb_reserve(frag, hroom);
		skb_put(frag, len + hlen + sizeof(struct frag_hdr));
		skb_reset_network_header(frag);
		fh = (struct frag_hdr *)(skb_network_header(frag) + hlen);
		frag->transport_header = (frag->network_header + hlen +
					  sizeof(struct frag_hdr));

		/*
		 *	Charge the memory for the fragment to any owner
		 *	it might possess
		 */
		if (skb->sk)
			skb_set_owner_w(frag, skb->sk);

		/*
		 *	Copy the packet header into the new buffer.
		 */
		skb_copy_from_linear_data(skb, skb_network_header(frag), hlen);

		/*
		 *	Build fragment header.
		 */
		fh->nexthdr = nexthdr;
		fh->reserved = 0;
		fh->identification = frag_id;

		/*
		 *	Copy a block of the IP datagram.
		 */
		BUG_ON(skb_copy_bits(skb, ptr, skb_transport_header(frag),
				     len));
		left -= len;

		fh->frag_off = htons(offset);
		if (left > 0)
			fh->frag_off |= htons(IP6_MF);
		ipv6_hdr(frag)->payload_len = htons(frag->len -
						    sizeof(struct ipv6hdr));

		ptr += len;
		offset += len;

		/*
		 *	Put this fragment into the sending queue.
		 */
		err = output(sk, frag);
		if (err)
			goto fail;

		IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
			      IPSTATS_MIB_FRAGCREATES);
	}
	IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
		      IPSTATS_MIB_FRAGOKS);
	consume_skb(skb);
	return err;

fail_toobig:
	if (skb->sk && dst_allfrag(skb_dst(skb)))
		sk_nocaps_add(skb->sk, NETIF_F_GSO_MASK);

	skb->dev = skb_dst(skb)->dev;
	icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, mtu);
	err = -EMSGSIZE;

fail:
	IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
		      IPSTATS_MIB_FRAGFAILS);
	kfree_skb(skb);
	return err;
}
Beispiel #20
0
static inline int ip6_ufo_append_data(struct sock *sk,
			int getfrag(void *from, char *to, int offset, int len,
			int odd, struct sk_buff *skb),
			void *from, int length, int hh_len, int fragheaderlen,
			int transhdrlen, int mtu,unsigned int flags)

{
	struct sk_buff *skb;
	int err;

	/* There is support for UDP large send offload by network
	 * device, so create one single skb packet containing complete
	 * udp datagram
	 */
	if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL) {
		skb = sock_alloc_send_skb(sk,
			hh_len + fragheaderlen + transhdrlen + 20,
			(flags & MSG_DONTWAIT), &err);
		if (skb == NULL)
			return -ENOMEM;

		/* reserve space for Hardware header */
		skb_reserve(skb, hh_len);

		/* create space for UDP/IP header */
		skb_put(skb,fragheaderlen + transhdrlen);

		/* initialize network header pointer */
		skb_reset_network_header(skb);

		/* initialize protocol header pointer */
		skb->transport_header = skb->network_header + fragheaderlen;

		skb->ip_summed = CHECKSUM_PARTIAL;
		skb->csum = 0;
	}

	err = skb_append_datato_frags(sk,skb, getfrag, from,
				      (length - transhdrlen));
	if (!err) {
		struct frag_hdr fhdr;

		/* Specify the length of each IPv6 datagram fragment.
		 * It has to be a multiple of 8.
		 */
		skb_shinfo(skb)->gso_size = (mtu - fragheaderlen -
					     sizeof(struct frag_hdr)) & ~7;
		skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
		ipv6_select_ident(&fhdr);
		skb_shinfo(skb)->ip6_frag_id = fhdr.identification;
		__skb_queue_tail(&sk->sk_write_queue, skb);

		return 0;
	}
	/* There is not enough support do UPD LSO,
	 * so follow normal path
	 */
	kfree_skb(skb);

	return err;
}
/*
 * This function checks the current interrupt status.
 *
 * The following interrupts are checked and handled by this function -
 *      - Data sent
 *      - Command sent
 *      - Packets received
 *
 * Since the firmware does not generate download ready interrupt if the
 * port updated is command port only, command sent interrupt checking
 * should be done manually, and for every SDIO interrupt.
 *
 * In case of Rx packets received, the packets are uploaded from card to
 * host and processed accordingly.
 */
static int mwifiex_process_int_status(struct mwifiex_adapter *adapter)
{
	struct sdio_mmc_card *card = adapter->card;
	int ret = 0;
	u8 sdio_ireg;
	struct sk_buff *skb;
	u8 port = CTRL_PORT;
	u32 len_reg_l, len_reg_u;
	u32 rx_blocks;
	u16 rx_len;
	unsigned long flags;

	spin_lock_irqsave(&adapter->int_lock, flags);
	sdio_ireg = adapter->int_status;
	adapter->int_status = 0;
	spin_unlock_irqrestore(&adapter->int_lock, flags);

	if (!sdio_ireg)
		return ret;

	if (sdio_ireg & DN_LD_HOST_INT_STATUS) {
		card->mp_wr_bitmap = ((u16) card->mp_regs[WR_BITMAP_U]) << 8;
		card->mp_wr_bitmap |= (u16) card->mp_regs[WR_BITMAP_L];
		dev_dbg(adapter->dev, "int: DNLD: wr_bitmap=0x%04x\n",
			card->mp_wr_bitmap);
		if (adapter->data_sent &&
		    (card->mp_wr_bitmap & card->mp_data_port_mask)) {
			dev_dbg(adapter->dev,
				"info:  <--- Tx DONE Interrupt --->\n");
			adapter->data_sent = false;
		}
	}

	/* As firmware will not generate download ready interrupt if the port
	   updated is command port only, cmd_sent should be done for any SDIO
	   interrupt. */
	if (adapter->cmd_sent) {
		/* Check if firmware has attach buffer at command port and
		   update just that in wr_bit_map. */
		card->mp_wr_bitmap |=
			(u16) card->mp_regs[WR_BITMAP_L] & CTRL_PORT_MASK;
		if (card->mp_wr_bitmap & CTRL_PORT_MASK)
			adapter->cmd_sent = false;
	}

	dev_dbg(adapter->dev, "info: cmd_sent=%d data_sent=%d\n",
		adapter->cmd_sent, adapter->data_sent);
	if (sdio_ireg & UP_LD_HOST_INT_STATUS) {
		card->mp_rd_bitmap = ((u16) card->mp_regs[RD_BITMAP_U]) << 8;
		card->mp_rd_bitmap |= (u16) card->mp_regs[RD_BITMAP_L];
		dev_dbg(adapter->dev, "int: UPLD: rd_bitmap=0x%04x\n",
			card->mp_rd_bitmap);

		while (true) {
			ret = mwifiex_get_rd_port(adapter, &port);
			if (ret) {
				dev_dbg(adapter->dev,
					"info: no more rd_port available\n");
				break;
			}
			len_reg_l = RD_LEN_P0_L + (port << 1);
			len_reg_u = RD_LEN_P0_U + (port << 1);
			rx_len = ((u16) card->mp_regs[len_reg_u]) << 8;
			rx_len |= (u16) card->mp_regs[len_reg_l];
			dev_dbg(adapter->dev, "info: RX: port=%d rx_len=%u\n",
				port, rx_len);
			rx_blocks =
				(rx_len + MWIFIEX_SDIO_BLOCK_SIZE -
				 1) / MWIFIEX_SDIO_BLOCK_SIZE;
			if (rx_len <= INTF_HEADER_LEN ||
			    (rx_blocks * MWIFIEX_SDIO_BLOCK_SIZE) >
			     MWIFIEX_RX_DATA_BUF_SIZE) {
				dev_err(adapter->dev, "invalid rx_len=%d\n",
					rx_len);
				return -1;
			}
			rx_len = (u16) (rx_blocks * MWIFIEX_SDIO_BLOCK_SIZE);

			skb = dev_alloc_skb(rx_len);

			if (!skb) {
				dev_err(adapter->dev, "%s: failed to alloc skb",
					__func__);
				return -1;
			}

			skb_put(skb, rx_len);

			dev_dbg(adapter->dev, "info: rx_len = %d skb->len = %d\n",
				rx_len, skb->len);

			if (mwifiex_sdio_card_to_host_mp_aggr(adapter, skb,
							      port)) {
				u32 cr = 0;

				dev_err(adapter->dev, "card_to_host_mpa failed:"
					" int status=%#x\n", sdio_ireg);
				if (mwifiex_read_reg(adapter,
						     CONFIGURATION_REG, &cr))
					dev_err(adapter->dev,
						"read CFG reg failed\n");

				dev_dbg(adapter->dev,
					"info: CFG reg val = %d\n", cr);
				if (mwifiex_write_reg(adapter,
						      CONFIGURATION_REG,
						      (cr | 0x04)))
					dev_err(adapter->dev,
						"write CFG reg failed\n");

				dev_dbg(adapter->dev, "info: write success\n");
				if (mwifiex_read_reg(adapter,
						     CONFIGURATION_REG, &cr))
					dev_err(adapter->dev,
						"read CFG reg failed\n");

				dev_dbg(adapter->dev,
					"info: CFG reg val =%x\n", cr);
				return -1;
			}
		}
	}

	return 0;
}
Beispiel #22
0
int ip6_append_data(struct sock *sk, int getfrag(void *from, char *to,
	int offset, int len, int odd, struct sk_buff *skb),
	void *from, int length, int transhdrlen,
	int hlimit, int tclass, struct ipv6_txoptions *opt, struct flowi6 *fl6,
	struct rt6_info *rt, unsigned int flags, int dontfrag)
{
	struct inet_sock *inet = inet_sk(sk);
	struct ipv6_pinfo *np = inet6_sk(sk);
	struct inet_cork *cork;
	struct sk_buff *skb;
	unsigned int maxfraglen, fragheaderlen;
	int exthdrlen;
	int hh_len;
	int mtu;
	int copy;
	int err;
	int offset = 0;
	int csummode = CHECKSUM_NONE;
	__u8 tx_flags = 0;

	if (flags&MSG_PROBE)
		return 0;
	cork = &inet->cork.base;
	if (skb_queue_empty(&sk->sk_write_queue)) {
		/*
		 * setup for corking
		 */
		if (opt) {
			if (WARN_ON(np->cork.opt))
				return -EINVAL;

			np->cork.opt = kmalloc(opt->tot_len, sk->sk_allocation);
			if (unlikely(np->cork.opt == NULL))
				return -ENOBUFS;

			np->cork.opt->tot_len = opt->tot_len;
			np->cork.opt->opt_flen = opt->opt_flen;
			np->cork.opt->opt_nflen = opt->opt_nflen;

			np->cork.opt->dst0opt = ip6_opt_dup(opt->dst0opt,
							    sk->sk_allocation);
			if (opt->dst0opt && !np->cork.opt->dst0opt)
				return -ENOBUFS;

			np->cork.opt->dst1opt = ip6_opt_dup(opt->dst1opt,
							    sk->sk_allocation);
			if (opt->dst1opt && !np->cork.opt->dst1opt)
				return -ENOBUFS;

			np->cork.opt->hopopt = ip6_opt_dup(opt->hopopt,
							   sk->sk_allocation);
			if (opt->hopopt && !np->cork.opt->hopopt)
				return -ENOBUFS;

			np->cork.opt->srcrt = ip6_rthdr_dup(opt->srcrt,
							    sk->sk_allocation);
			if (opt->srcrt && !np->cork.opt->srcrt)
				return -ENOBUFS;

			/* need source address above miyazawa*/
		}
		dst_hold(&rt->dst);
		cork->dst = &rt->dst;
		inet->cork.fl.u.ip6 = *fl6;
		np->cork.hop_limit = hlimit;
		np->cork.tclass = tclass;
		mtu = np->pmtudisc == IPV6_PMTUDISC_PROBE ?
		      rt->dst.dev->mtu : dst_mtu(rt->dst.path);
		if (np->frag_size < mtu) {
			if (np->frag_size)
				mtu = np->frag_size;
		}
		cork->fragsize = mtu;
		if (dst_allfrag(rt->dst.path))
			cork->flags |= IPCORK_ALLFRAG;
		cork->length = 0;
		sk->sk_sndmsg_page = NULL;
		sk->sk_sndmsg_off = 0;
		exthdrlen = rt->dst.header_len + (opt ? opt->opt_flen : 0) -
			    rt->rt6i_nfheader_len;
		length += exthdrlen;
		transhdrlen += exthdrlen;
	} else {
		rt = (struct rt6_info *)cork->dst;
		fl6 = &inet->cork.fl.u.ip6;
		opt = np->cork.opt;
		transhdrlen = 0;
		exthdrlen = 0;
		mtu = cork->fragsize;
	}

	hh_len = LL_RESERVED_SPACE(rt->dst.dev);

	fragheaderlen = sizeof(struct ipv6hdr) + rt->rt6i_nfheader_len +
			(opt ? opt->opt_nflen : 0);
	maxfraglen = ((mtu - fragheaderlen) & ~7) + fragheaderlen - sizeof(struct frag_hdr);

	if (mtu <= sizeof(struct ipv6hdr) + IPV6_MAXPLEN) {
		if (cork->length + length > sizeof(struct ipv6hdr) + IPV6_MAXPLEN - fragheaderlen) {
			ipv6_local_error(sk, EMSGSIZE, fl6, mtu-exthdrlen);
			return -EMSGSIZE;
		}
	}

	/* For UDP, check if TX timestamp is enabled */
	if (sk->sk_type == SOCK_DGRAM) {
		err = sock_tx_timestamp(sk, &tx_flags);
		if (err)
			goto error;
	}

	/*
	 * Let's try using as much space as possible.
	 * Use MTU if total length of the message fits into the MTU.
	 * Otherwise, we need to reserve fragment header and
	 * fragment alignment (= 8-15 octects, in total).
	 *
	 * Note that we may need to "move" the data from the tail of
	 * of the buffer to the new fragment when we split
	 * the message.
	 *
	 * FIXME: It may be fragmented into multiple chunks
	 *        at once if non-fragmentable extension headers
	 *        are too large.
	 * --yoshfuji
	 */

	cork->length += length;
	if (length > mtu) {
		int proto = sk->sk_protocol;
		if (dontfrag && (proto == IPPROTO_UDP || proto == IPPROTO_RAW)){
			ipv6_local_rxpmtu(sk, fl6, mtu-exthdrlen);
			return -EMSGSIZE;
		}

		if (proto == IPPROTO_UDP &&
		    (rt->dst.dev->features & NETIF_F_UFO)) {

			err = ip6_ufo_append_data(sk, getfrag, from, length,
						  hh_len, fragheaderlen,
						  transhdrlen, mtu, flags);
			if (err)
				goto error;
			return 0;
		}
	}

	if ((skb = skb_peek_tail(&sk->sk_write_queue)) == NULL)
		goto alloc_new_skb;

	while (length > 0) {
		/* Check if the remaining data fits into current packet. */
		copy = (cork->length <= mtu && !(cork->flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - skb->len;
		if (copy < length)
			copy = maxfraglen - skb->len;

		if (copy <= 0) {
			char *data;
			unsigned int datalen;
			unsigned int fraglen;
			unsigned int fraggap;
			unsigned int alloclen;
			struct sk_buff *skb_prev;
alloc_new_skb:
			skb_prev = skb;

			/* There's no room in the current skb */
			if (skb_prev)
				fraggap = skb_prev->len - maxfraglen;
			else
				fraggap = 0;

			/*
			 * If remaining data exceeds the mtu,
			 * we know we need more fragment(s).
			 */
			datalen = length + fraggap;
			if (datalen > (cork->length <= mtu && !(cork->flags & IPCORK_ALLFRAG) ? mtu : maxfraglen) - fragheaderlen)
				datalen = maxfraglen - fragheaderlen;

			fraglen = datalen + fragheaderlen;
			if ((flags & MSG_MORE) &&
			    !(rt->dst.dev->features&NETIF_F_SG))
				alloclen = mtu;
			else
				alloclen = datalen + fragheaderlen;

			/*
			 * The last fragment gets additional space at tail.
			 * Note: we overallocate on fragments with MSG_MODE
			 * because we have no idea if we're the last one.
			 */
			if (datalen == length + fraggap)
				alloclen += rt->dst.trailer_len;

			/*
			 * We just reserve space for fragment header.
			 * Note: this may be overallocation if the message
			 * (without MSG_MORE) fits into the MTU.
			 */
			alloclen += sizeof(struct frag_hdr);

			if (transhdrlen) {
				skb = sock_alloc_send_skb(sk,
						alloclen + hh_len,
						(flags & MSG_DONTWAIT), &err);
			} else {
				skb = NULL;
				if (atomic_read(&sk->sk_wmem_alloc) <=
				    2 * sk->sk_sndbuf)
					skb = sock_wmalloc(sk,
							   alloclen + hh_len, 1,
							   sk->sk_allocation);
				if (unlikely(skb == NULL))
					err = -ENOBUFS;
				else {
					/* Only the initial fragment
					 * is time stamped.
					 */
					tx_flags = 0;
				}
			}
			if (skb == NULL)
				goto error;
			/*
			 *	Fill in the control structures
			 */
			skb->ip_summed = csummode;
			skb->csum = 0;
			/* reserve for fragmentation */
			skb_reserve(skb, hh_len+sizeof(struct frag_hdr));

			if (sk->sk_type == SOCK_DGRAM)
				skb_shinfo(skb)->tx_flags = tx_flags;

			/*
			 *	Find where to start putting bytes
			 */
			data = skb_put(skb, fraglen);
			skb_set_network_header(skb, exthdrlen);
			data += fragheaderlen;
			skb->transport_header = (skb->network_header +
						 fragheaderlen);
			if (fraggap) {
				skb->csum = skb_copy_and_csum_bits(
					skb_prev, maxfraglen,
					data + transhdrlen, fraggap, 0);
				skb_prev->csum = csum_sub(skb_prev->csum,
							  skb->csum);
				data += fraggap;
				pskb_trim_unique(skb_prev, maxfraglen);
			}
			copy = datalen - transhdrlen - fraggap;
			if (copy < 0) {
				err = -EINVAL;
				kfree_skb(skb);
				goto error;
			} else if (copy > 0 && getfrag(from, data + transhdrlen, offset, copy, fraggap, skb) < 0) {
				err = -EFAULT;
				kfree_skb(skb);
				goto error;
			}

			offset += copy;
			length -= datalen - fraggap;
			transhdrlen = 0;
			exthdrlen = 0;
			csummode = CHECKSUM_NONE;

			/*
			 * Put the packet on the pending queue
			 */
			__skb_queue_tail(&sk->sk_write_queue, skb);
			continue;
		}

		if (copy > length)
			copy = length;

		if (!(rt->dst.dev->features&NETIF_F_SG)) {
			unsigned int off;

			off = skb->len;
			if (getfrag(from, skb_put(skb, copy),
						offset, copy, off, skb) < 0) {
				__skb_trim(skb, off);
				err = -EFAULT;
				goto error;
			}
		} else {
			int i = skb_shinfo(skb)->nr_frags;
			skb_frag_t *frag = &skb_shinfo(skb)->frags[i-1];
			struct page *page = sk->sk_sndmsg_page;
			int off = sk->sk_sndmsg_off;
			unsigned int left;

			if (page && (left = PAGE_SIZE - off) > 0) {
				if (copy >= left)
					copy = left;
				if (page != frag->page) {
					if (i == MAX_SKB_FRAGS) {
						err = -EMSGSIZE;
						goto error;
					}
					get_page(page);
					skb_fill_page_desc(skb, i, page, sk->sk_sndmsg_off, 0);
					frag = &skb_shinfo(skb)->frags[i];
				}
			} else if(i < MAX_SKB_FRAGS) {
				if (copy > PAGE_SIZE)
					copy = PAGE_SIZE;
				page = alloc_pages(sk->sk_allocation, 0);
				if (page == NULL) {
					err = -ENOMEM;
					goto error;
				}
				sk->sk_sndmsg_page = page;
				sk->sk_sndmsg_off = 0;

				skb_fill_page_desc(skb, i, page, 0, 0);
				frag = &skb_shinfo(skb)->frags[i];
			} else {
				err = -EMSGSIZE;
				goto error;
			}
			if (getfrag(from, page_address(frag->page)+frag->page_offset+frag->size, offset, copy, skb->len, skb) < 0) {
				err = -EFAULT;
				goto error;
			}
			sk->sk_sndmsg_off += copy;
			frag->size += copy;
			skb->len += copy;
			skb->data_len += copy;
			skb->truesize += copy;
			atomic_add(copy, &sk->sk_wmem_alloc);
		}
		offset += copy;
		length -= copy;
	}
	return 0;
error:
	cork->length -= length;
	IP6_INC_STATS(sock_net(sk), rt->rt6i_idev, IPSTATS_MIB_OUTDISCARDS);
	return err;
}
Beispiel #23
0
void
isac_interrupt(struct IsdnCardState *cs, u_char val)
{
	u_char exval, v1;
	struct sk_buff *skb;
	unsigned int count;

	if (cs->debug & L1_DEB_ISAC)
		debugl1(cs, "ISAC interrupt %x", val);
	if (val & 0x80) {	/* RME */
		exval = cs->readisac(cs, ISAC_RSTA);
		if ((exval & 0x70) != 0x20) {
			if (exval & 0x40) {
				if (cs->debug & L1_DEB_WARN)
					debugl1(cs, "ISAC RDO");
#ifdef ERROR_STATISTIC
				cs->err_rx++;
#endif
			}
			if (!(exval & 0x20)) {
				if (cs->debug & L1_DEB_WARN)
					debugl1(cs, "ISAC CRC error");
#ifdef ERROR_STATISTIC
				cs->err_crc++;
#endif
			}
			cs->writeisac(cs, ISAC_CMDR, 0x80);
		} else {
			count = cs->readisac(cs, ISAC_RBCL) & 0x1f;
			if (count == 0)
				count = 32;
			isac_empty_fifo(cs, count);
			if ((count = cs->rcvidx) > 0) {
				cs->rcvidx = 0;
				if (!(skb = alloc_skb(count, GFP_ATOMIC)))
					printk(KERN_WARNING "HiSax: D receive out of memory\n");
				else {
					memcpy(skb_put(skb, count), cs->rcvbuf, count);
					skb_queue_tail(&cs->rq, skb);
				}
			}
		}
		cs->rcvidx = 0;
		schedule_event(cs, D_RCVBUFREADY);
	}
	if (val & 0x40) {	/* RPF */
		isac_empty_fifo(cs, 32);
	}
	if (val & 0x20) {	/* RSC */
		/* never */
		if (cs->debug & L1_DEB_WARN)
			debugl1(cs, "ISAC RSC interrupt");
	}
	if (val & 0x10) {	/* XPR */
		if (test_and_clear_bit(FLG_DBUSY_TIMER, &cs->HW_Flags))
			del_timer(&cs->dbusytimer);
		if (test_and_clear_bit(FLG_L1_DBUSY, &cs->HW_Flags))
			schedule_event(cs, D_CLEARBUSY);
		if (cs->tx_skb) {
			if (cs->tx_skb->len) {
				isac_fill_fifo(cs);
				goto afterXPR;
			} else {
				dev_kfree_skb_irq(cs->tx_skb);
				cs->tx_cnt = 0;
				cs->tx_skb = NULL;
			}
		}
		if ((cs->tx_skb = skb_dequeue(&cs->sq))) {
			cs->tx_cnt = 0;
			isac_fill_fifo(cs);
		} else
			schedule_event(cs, D_XMTBUFREADY);
	}
      afterXPR:
	if (val & 0x04) {	/* CISQ */
		exval = cs->readisac(cs, ISAC_CIR0);
		if (cs->debug & L1_DEB_ISAC)
			debugl1(cs, "ISAC CIR0 %02X", exval );
		if (exval & 2) {
			cs->dc.isac.ph_state = (exval >> 2) & 0xf;
			if (cs->debug & L1_DEB_ISAC)
				debugl1(cs, "ph_state change %x", cs->dc.isac.ph_state);
			schedule_event(cs, D_L1STATECHANGE);
		}
Beispiel #24
0
/*
 * Decode frames received on the B/D channel.
 * Note that this function will be called continuously
 * with 64Kbit/s / 16Kbit/s of data and hence it will be 
 * called 50 times per second with 20 ISOC descriptors. 
 * Called at interrupt.
 */
static void usb_in_complete(struct urb *urb)
{
	struct st5481_in *in = urb->context;
	unsigned char *ptr;
	struct sk_buff *skb;
	int len, count, status;

	if (unlikely(urb->status < 0)) {
		switch (urb->status) {
			case -ENOENT:
			case -ESHUTDOWN:
			case -ECONNRESET:
				DBG(1,"urb killed status %d", urb->status);
				return; // Give up
			default: 
				WARNING("urb status %d",urb->status);
				break;
		}
	}

	DBG_ISO_PACKET(0x80,urb);

	len = st5481_isoc_flatten(urb);
	ptr = urb->transfer_buffer;
	while (len > 0) {
		if (in->mode == L1_MODE_TRANS) {
			memcpy(in->rcvbuf, ptr, len);
			status = len;
			len = 0;
		} else {
			status = isdnhdlc_decode(&in->hdlc_state, ptr, len, &count,
				in->rcvbuf, in->bufsize);
			ptr += count;
			len -= count;
		}
		
		if (status > 0) {
			// Good frame received
			DBG(4,"count=%d",status);
			DBG_PACKET(0x400, in->rcvbuf, status);
			if (!(skb = dev_alloc_skb(status))) {
				WARNING("receive out of memory\n");
				break;
			}
			memcpy(skb_put(skb, status), in->rcvbuf, status);
			in->hisax_if->l1l2(in->hisax_if, PH_DATA | INDICATION, skb);
		} else if (status == -HDLC_CRC_ERROR) {
			INFO("CRC error");
		} else if (status == -HDLC_FRAMING_ERROR) {
			INFO("framing error");
		} else if (status == -HDLC_LENGTH_ERROR) {
			INFO("length error");
		}
	}

	// Prepare URB for next transfer
	urb->dev = in->adapter->usb_dev;
	urb->actual_length = 0;

	SUBMIT_URB(urb, GFP_ATOMIC);
}
Beispiel #25
0
/* EIF(Error in FIFO/End in Frame) handler for FIR */
static void pxa_irda_fir_irq_eif(struct pxa_irda *si, struct net_device *dev, int icsr0)
{
	unsigned int len, stat, data;

	/* Get the current data position. */
	len = DTADR(si->rxdma) - si->dma_rx_buff_phy;

	do {
		/* Read Status, and then Data. 	 */
		stat = ICSR1;
		rmb();
		data = ICDR;

		if (stat & (ICSR1_CRE | ICSR1_ROR)) {
			dev->stats.rx_errors++;
			if (stat & ICSR1_CRE) {
				printk(KERN_DEBUG "pxa_ir: fir receive CRC error\n");
				dev->stats.rx_crc_errors++;
			}
			if (stat & ICSR1_ROR) {
				printk(KERN_DEBUG "pxa_ir: fir receive overrun\n");
				dev->stats.rx_over_errors++;
			}
		} else	{
			si->dma_rx_buff[len++] = data;
		}
		/* If we hit the end of frame, there's no point in continuing. */
		if (stat & ICSR1_EOF)
			break;
	} while (ICSR0 & ICSR0_EIF);

	if (stat & ICSR1_EOF) {
		/* end of frame. */
		struct sk_buff *skb;

		if (icsr0 & ICSR0_FRE) {
			printk(KERN_ERR "pxa_ir: dropping erroneous frame\n");
			dev->stats.rx_dropped++;
			return;
		}

		skb = alloc_skb(len+1,GFP_ATOMIC);
		if (!skb)  {
			printk(KERN_ERR "pxa_ir: fir out of memory for receive skb\n");
			dev->stats.rx_dropped++;
			return;
		}

		/* Align IP header to 20 bytes  */
		skb_reserve(skb, 1);
		skb_copy_to_linear_data(skb, si->dma_rx_buff, len);
		skb_put(skb, len);

		/* Feed it to IrLAP  */
		skb->dev = dev;
		skb_reset_mac_header(skb);
		skb->protocol = htons(ETH_P_IRDA);
		netif_rx(skb);

		dev->stats.rx_packets++;
		dev->stats.rx_bytes += len;
	}
}
Beispiel #26
0
static int btusb_probe(struct usb_interface *intf,
				const struct usb_device_id *id)
{
	struct usb_endpoint_descriptor *ep_desc;
	struct btusb_data *data;
	struct hci_dev *hdev;
	int i, err;

	BT_DBG("intf %p id %p", intf, id);

	/* interface numbers are hardcoded in the spec */
	if (intf->cur_altsetting->desc.bInterfaceNumber != 0)
		return -ENODEV;

	if (!id->driver_info) {
		const struct usb_device_id *match;
		match = usb_match_id(intf, blacklist_table);
		if (match)
			id = match;
	}

	if (id->driver_info == BTUSB_IGNORE)
		return -ENODEV;

	if (ignore_dga && id->driver_info & BTUSB_DIGIANSWER)
		return -ENODEV;

	if (ignore_csr && id->driver_info & BTUSB_CSR)
		return -ENODEV;

	if (ignore_sniffer && id->driver_info & BTUSB_SNIFFER)
		return -ENODEV;

	if (id->driver_info & BTUSB_ATH3012) {
		struct usb_device *udev = interface_to_usbdev(intf);

		/* Old firmware would otherwise let ath3k driver load
		 * patch and sysconfig files */
		if (le16_to_cpu(udev->descriptor.bcdDevice) <= 0x0001)
			return -ENODEV;
	}

	data = kzalloc(sizeof(*data), GFP_KERNEL);
	if (!data)
		return -ENOMEM;

	for (i = 0; i < intf->cur_altsetting->desc.bNumEndpoints; i++) {
		ep_desc = &intf->cur_altsetting->endpoint[i].desc;

		if (!data->intr_ep && usb_endpoint_is_int_in(ep_desc)) {
			data->intr_ep = ep_desc;
			continue;
		}

		if (!data->bulk_tx_ep && usb_endpoint_is_bulk_out(ep_desc)) {
			data->bulk_tx_ep = ep_desc;
			continue;
		}

		if (!data->bulk_rx_ep && usb_endpoint_is_bulk_in(ep_desc)) {
			data->bulk_rx_ep = ep_desc;
			continue;
		}
	}

	if (!data->intr_ep || !data->bulk_tx_ep || !data->bulk_rx_ep) {
		kfree(data);
		return -ENODEV;
	}

	data->cmdreq_type = USB_TYPE_CLASS;

	data->udev = interface_to_usbdev(intf);
	data->intf = intf;

	spin_lock_init(&data->lock);

	INIT_WORK(&data->work, btusb_work);
	INIT_WORK(&data->waker, btusb_waker);
	spin_lock_init(&data->txlock);

	init_usb_anchor(&data->tx_anchor);
	init_usb_anchor(&data->intr_anchor);
	init_usb_anchor(&data->bulk_anchor);
	init_usb_anchor(&data->isoc_anchor);
	init_usb_anchor(&data->deferred);

	hdev = hci_alloc_dev();
	if (!hdev) {
		kfree(data);
		return -ENOMEM;
	}

	hdev->bus = HCI_USB;
	hdev->driver_data = data;

	data->hdev = hdev;

	SET_HCIDEV_DEV(hdev, &intf->dev);

	hdev->open     = btusb_open;
	hdev->close    = btusb_close;
	hdev->flush    = btusb_flush;
	hdev->send     = btusb_send_frame;
	hdev->destruct = btusb_destruct;
	hdev->notify   = btusb_notify;

	hdev->owner = THIS_MODULE;

	/* Interface numbers are hardcoded in the specification */
	data->isoc = usb_ifnum_to_if(data->udev, 1);

	if (!reset)
		set_bit(HCI_QUIRK_NO_RESET, &hdev->quirks);

	if (force_scofix || id->driver_info & BTUSB_WRONG_SCO_MTU) {
		if (!disable_scofix)
			set_bit(HCI_QUIRK_FIXUP_BUFFER_SIZE, &hdev->quirks);
	}

	if (id->driver_info & BTUSB_BROKEN_ISOC)
		data->isoc = NULL;

	if (id->driver_info & BTUSB_DIGIANSWER) {
		data->cmdreq_type = USB_TYPE_VENDOR;
		set_bit(HCI_QUIRK_NO_RESET, &hdev->quirks);
	}

	if (id->driver_info & BTUSB_CSR) {
		struct usb_device *udev = data->udev;

		/* Old firmware would otherwise execute USB reset */
		if (le16_to_cpu(udev->descriptor.bcdDevice) < 0x117)
			set_bit(HCI_QUIRK_NO_RESET, &hdev->quirks);
	}

	if (id->driver_info & BTUSB_SNIFFER) {
		struct usb_device *udev = data->udev;

		/* New sniffer firmware has crippled HCI interface */
		if (le16_to_cpu(udev->descriptor.bcdDevice) > 0x997)
			set_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks);

		data->isoc = NULL;
	}

	if (id->driver_info & BTUSB_BCM92035) {
		unsigned char cmd[] = { 0x3b, 0xfc, 0x01, 0x00 };
		struct sk_buff *skb;

		skb = bt_skb_alloc(sizeof(cmd), GFP_KERNEL);
		if (skb) {
			memcpy(skb_put(skb, sizeof(cmd)), cmd, sizeof(cmd));
			skb_queue_tail(&hdev->driver_init, skb);
		}
	}

	if (data->isoc) {
		err = usb_driver_claim_interface(&btusb_driver,
							data->isoc, data);
		if (err < 0) {
			hci_free_dev(hdev);
			kfree(data);
			return err;
		}
	}

	err = hci_register_dev(hdev);
	if (err < 0) {
		hci_free_dev(hdev);
		kfree(data);
		return err;
	}

	usb_set_intfdata(intf, data);

	return 0;
}
Beispiel #27
0
int capi_select_proto_req(struct pcbit_chan *chan, struct sk_buff **skb, 
                          int outgoing)
{

        /*
         * 18 bytes
         */

	if ((*skb = dev_alloc_skb(18)) == NULL) {
    
		printk(KERN_WARNING "capi_select_proto_req: alloc_skb failed\n");
		return -1;
	}

        *((ushort*) skb_put(*skb, 2) ) = chan->callref;  

        /* Layer2 protocol */

        switch (chan->proto) {
        case ISDN_PROTO_L2_X75I: 
                *(skb_put(*skb, 1)) = 0x05;            /* LAPB */
                break;
        case ISDN_PROTO_L2_HDLC:
                *(skb_put(*skb, 1)) = 0x02;
                break;
	case ISDN_PROTO_L2_TRANS:
		/* 
		 *	Voice (a-law)
		 */
		*(skb_put(*skb, 1)) = 0x06;
		break;
        default:
#ifdef DEBUG 
                printk(KERN_DEBUG "Transparent\n");
#endif
                *(skb_put(*skb, 1)) = 0x03;
                break;
        }

        *(skb_put(*skb, 1)) = (outgoing ? 0x02 : 0x42);    /* Don't ask */
        *(skb_put(*skb, 1)) = 0x00;
  
        *((ushort *) skb_put(*skb, 2)) = MRU;

 
        *(skb_put(*skb, 1)) = 0x08;           /* Modulo */
        *(skb_put(*skb, 1)) = 0x07;           /* Max Window */
  
        *(skb_put(*skb, 1)) = 0x01;           /* No Layer3 Protocol */

        /*
         * 2 - layer3 MTU       [10]
         *   - Modulo           [12]
         *   - Window           
         *   - layer1 proto     [14]
         *   - bitrate
         *   - sub-channel      [16]
         *   - layer1dataformat [17]
         */

        memset(skb_put(*skb, 8), 0, 8);

        return 18;
}
Beispiel #28
0
void
hycapi_rx_capipkt(hysdn_card * card, unsigned char *buf, unsigned short len)
{
    struct sk_buff *skb;
    hycapictrl_info *cinfo = card->hyctrlinfo;
    struct capi_ctr *ctrl;
    __u16 ApplId;
    __u16 MsgLen, info;
    __u16 len2, CapiCmd;
    __u32 CP64[2] = {0,0};
#ifdef HYCAPI_PRINTFNAMES
    printk(KERN_NOTICE "hycapi_rx_capipkt\n");    
#endif
    if(!cinfo) {
        return;
    }
    ctrl = &cinfo->capi_ctrl;
    if(len < CAPI_MSG_BASELEN) {
        printk(KERN_ERR "HYSDN Card%d: invalid CAPI-message, length %d!\n",
               card->myid, len);
        return;
    }    
    MsgLen = CAPIMSG_LEN(buf);
    ApplId = CAPIMSG_APPID(buf);
    CapiCmd = CAPIMSG_CMD(buf);
    
    if((CapiCmd == CAPI_DATA_B3_IND) && (MsgLen < 30)) {
        len2 = len + (30 - MsgLen);
        if (!(skb = alloc_skb(len2, GFP_ATOMIC))) {
            printk(KERN_ERR "HYSDN Card%d: incoming packet dropped\n",
                   card->myid);
            return;
        }
        memcpy(skb_put(skb, MsgLen), buf, MsgLen);
        memcpy(skb_put(skb, 2*sizeof(__u32)), CP64, 2* sizeof(__u32));
        memcpy(skb_put(skb, len - MsgLen), buf + MsgLen,
               len - MsgLen);
        CAPIMSG_SETLEN(skb->data, 30);
    } else {
        if (!(skb = alloc_skb(len, GFP_ATOMIC))) {
            printk(KERN_ERR "HYSDN Card%d: incoming packet dropped\n",
                   card->myid);
            return;
        }
        memcpy(skb_put(skb, len), buf, len);
    }
    switch(CAPIMSG_CMD(skb->data)) 
    {
        case CAPI_CONNECT_B3_CONF:
/* Check info-field for error-indication: */
            info = CAPIMSG_U16(skb->data, 12);
            switch(info)
            {
                case 0:
                    capilib_new_ncci(&cinfo->ncci_head, ApplId, CAPIMSG_NCCI(skb->data), 
                             hycapi_applications[ApplId-1].rp.datablkcnt); 
                    
                    break;
                case 0x0001:
                    printk(KERN_ERR "HYSDN Card%d: NCPI not supported by current "
                           "protocol. NCPI ignored.\n", card->myid);
                    break;
                case 0x2001:
                    printk(KERN_ERR "HYSDN Card%d: Message not supported in"
                           " current state\n", card->myid);
                    break;
                case 0x2002:
                    printk(KERN_ERR "HYSDN Card%d: invalid PLCI\n", card->myid);
                    break;        
                case 0x2004:
                    printk(KERN_ERR "HYSDN Card%d: out of NCCI\n", card->myid);
                    break;                
                case 0x3008:
                    printk(KERN_ERR "HYSDN Card%d: NCPI not supported\n", 
                           card->myid);
                    break;    
                default:
                    printk(KERN_ERR "HYSDN Card%d: Info in CONNECT_B3_CONF: %d\n", 
                           card->myid, info);
                    break;            
            }
            break;
        case CAPI_CONNECT_B3_IND:
            capilib_new_ncci(&cinfo->ncci_head, ApplId, 
                     CAPIMSG_NCCI(skb->data), 
                     hycapi_applications[ApplId-1].rp.datablkcnt);
            break;
            case CAPI_DATA_B3_CONF:
            capilib_data_b3_conf(&cinfo->ncci_head, ApplId,
                         CAPIMSG_NCCI(skb->data),
                         CAPIMSG_MSGID(skb->data));
            break;
        default:
            break;
    }
    capi_ctr_handle_message(ctrl, ApplId, skb);
}
Beispiel #29
0
static void rx_complete(struct usb_ep *ep, struct usb_request *req)
{
	struct sk_buff	*skb = req->context;
	struct eth_dev	*dev = ep->driver_data;
	int		status = req->status;
	bool		queue = 0;

	switch (status) {

	/* normal completion */
	case 0:
		skb_put(skb, req->actual);

		if (dev->unwrap) {
			unsigned long	flags;

			spin_lock_irqsave(&dev->lock, flags);
			if (dev->port_usb) {
				status = dev->unwrap(dev->port_usb,
							skb,
							&dev->rx_frames);
				if (status == -EINVAL)
					dev->net->stats.rx_errors++;
				else if (status == -EOVERFLOW)
					dev->net->stats.rx_over_errors++;
			} else {
				dev_kfree_skb_any(skb);
				status = -ENOTCONN;
			}
			spin_unlock_irqrestore(&dev->lock, flags);
		} else {
			skb_queue_tail(&dev->rx_frames, skb);
		}

		if (!status)
			queue = 1;
		break;

	/* software-driven interface shutdown */
	case -ECONNRESET:		/* unlink */
	case -ESHUTDOWN:		/* disconnect etc */
		VDBG(dev, "rx shutdown, code %d\n", status);
		goto quiesce;

	/* for hardware automagic (such as pxa) */
	case -ECONNABORTED:		/* endpoint reset */
		DBG(dev, "rx %s reset\n", ep->name);
		defer_kevent(dev, WORK_RX_MEMORY);
quiesce:
		dev_kfree_skb_any(skb);
		goto clean;

	/* data overrun */
	case -EOVERFLOW:
		dev->net->stats.rx_over_errors++;
		/* FALLTHROUGH */

	default:
		queue = 1;
		dev_kfree_skb_any(skb);
		dev->net->stats.rx_errors++;
		DBG(dev, "rx status %d\n", status);
		break;
	}

clean:
	spin_lock(&dev->req_lock);
	list_add(&req->list, &dev->rx_reqs);
	spin_unlock(&dev->req_lock);

	if (queue)
		queue_work(uether_wq, &dev->rx_work);
}
static bool pio_rx_frame(struct b43_pio_rxqueue *q)
{
	struct b43_wldev *dev = q->dev;
	struct b43_wl *wl = dev->wl;
	u16 len;
	u32 macstat = 0;
	unsigned int i, padding;
	struct sk_buff *skb;
	const char *err_msg = NULL;
	struct b43_rxhdr_fw4 *rxhdr =
		(struct b43_rxhdr_fw4 *)wl->pio_scratchspace;
	size_t rxhdr_size = sizeof(*rxhdr);

	BUILD_BUG_ON(sizeof(wl->pio_scratchspace) < sizeof(*rxhdr));
	switch (dev->fw.hdr_format) {
	case B43_FW_HDR_410:
	case B43_FW_HDR_351:
		rxhdr_size -= sizeof(rxhdr->format_598) -
			sizeof(rxhdr->format_351);
		break;
	case B43_FW_HDR_598:
		break;
	}
	memset(rxhdr, 0, rxhdr_size);

	
	if (q->rev >= 8) {
		u32 ctl;

		ctl = b43_piorx_read32(q, B43_PIO8_RXCTL);
		if (!(ctl & B43_PIO8_RXCTL_FRAMERDY))
			return 0;
		b43_piorx_write32(q, B43_PIO8_RXCTL,
				  B43_PIO8_RXCTL_FRAMERDY);
		for (i = 0; i < 10; i++) {
			ctl = b43_piorx_read32(q, B43_PIO8_RXCTL);
			if (ctl & B43_PIO8_RXCTL_DATARDY)
				goto data_ready;
			udelay(10);
		}
	} else {
		u16 ctl;

		ctl = b43_piorx_read16(q, B43_PIO_RXCTL);
		if (!(ctl & B43_PIO_RXCTL_FRAMERDY))
			return 0;
		b43_piorx_write16(q, B43_PIO_RXCTL,
				  B43_PIO_RXCTL_FRAMERDY);
		for (i = 0; i < 10; i++) {
			ctl = b43_piorx_read16(q, B43_PIO_RXCTL);
			if (ctl & B43_PIO_RXCTL_DATARDY)
				goto data_ready;
			udelay(10);
		}
	}
	b43dbg(q->dev->wl, "PIO RX timed out\n");
	return 1;
data_ready:

	
	if (q->rev >= 8) {
		b43_block_read(dev, rxhdr, rxhdr_size,
			       q->mmio_base + B43_PIO8_RXDATA,
			       sizeof(u32));
	} else {
		b43_block_read(dev, rxhdr, rxhdr_size,
			       q->mmio_base + B43_PIO_RXDATA,
			       sizeof(u16));
	}
	
	len = le16_to_cpu(rxhdr->frame_len);
	if (unlikely(len > 0x700)) {
		err_msg = "len > 0x700";
		goto rx_error;
	}
	if (unlikely(len == 0)) {
		err_msg = "len == 0";
		goto rx_error;
	}

	switch (dev->fw.hdr_format) {
	case B43_FW_HDR_598:
		macstat = le32_to_cpu(rxhdr->format_598.mac_status);
		break;
	case B43_FW_HDR_410:
	case B43_FW_HDR_351:
		macstat = le32_to_cpu(rxhdr->format_351.mac_status);
		break;
	}
	if (macstat & B43_RX_MAC_FCSERR) {
		if (!(q->dev->wl->filter_flags & FIF_FCSFAIL)) {
			
			err_msg = "Frame FCS error";
			goto rx_error;
		}
	}

	padding = (macstat & B43_RX_MAC_PADDING) ? 2 : 0;
	skb = dev_alloc_skb(len + padding + 2);
	if (unlikely(!skb)) {
		err_msg = "Out of memory";
		goto rx_error;
	}
	skb_reserve(skb, 2);
	skb_put(skb, len + padding);
	if (q->rev >= 8) {
		b43_block_read(dev, skb->data + padding, (len & ~3),
			       q->mmio_base + B43_PIO8_RXDATA,
			       sizeof(u32));
		if (len & 3) {
			u8 *tail = wl->pio_tailspace;
			BUILD_BUG_ON(sizeof(wl->pio_tailspace) < 4);

			
			b43_block_read(dev, tail, 4,
				       q->mmio_base + B43_PIO8_RXDATA,
				       sizeof(u32));
			switch (len & 3) {
			case 3:
				skb->data[len + padding - 3] = tail[0];
				skb->data[len + padding - 2] = tail[1];
				skb->data[len + padding - 1] = tail[2];
				break;
			case 2:
				skb->data[len + padding - 2] = tail[0];
				skb->data[len + padding - 1] = tail[1];
				break;
			case 1:
				skb->data[len + padding - 1] = tail[0];
				break;
			}
		}
	} else {
		b43_block_read(dev, skb->data + padding, (len & ~1),
			       q->mmio_base + B43_PIO_RXDATA,
			       sizeof(u16));
		if (len & 1) {
			u8 *tail = wl->pio_tailspace;
			BUILD_BUG_ON(sizeof(wl->pio_tailspace) < 2);

			
			b43_block_read(dev, tail, 2,
				       q->mmio_base + B43_PIO_RXDATA,
				       sizeof(u16));
			skb->data[len + padding - 1] = tail[0];
		}
	}

	b43_rx(q->dev, skb, rxhdr);

	return 1;

rx_error:
	if (err_msg)
		b43dbg(q->dev->wl, "PIO RX error: %s\n", err_msg);
	if (q->rev >= 8)
		b43_piorx_write32(q, B43_PIO8_RXCTL, B43_PIO8_RXCTL_DATARDY);
	else
		b43_piorx_write16(q, B43_PIO_RXCTL, B43_PIO_RXCTL_DATARDY);

	return 1;
}