static void ipt_ulog_packet(unsigned int hooknum,
			    const struct sk_buff *skb,
			    const struct net_device *in,
			    const struct net_device *out,
			    const struct ipt_ulog_info *loginfo,
			    const char *prefix)
{
	ulog_buff_t *ub;
	ulog_packet_msg_t *pm;
	size_t size, copy_len;
	struct nlmsghdr *nlh;
	struct timeval tv;

	/* ffs == find first bit set, necessary because userspace
	 * is already shifting groupnumber, but we need unshifted.
	 * ffs() returns [1..32], we need [0..31] */
	unsigned int groupnum = ffs(loginfo->nl_group) - 1;

	/* calculate the size of the skb needed */
	if (loginfo->copy_range == 0 || loginfo->copy_range > skb->len)
		copy_len = skb->len;
	else
		copy_len = loginfo->copy_range;

	size = NLMSG_SPACE(sizeof(*pm) + copy_len);

	ub = &ulog_buffers[groupnum];

	spin_lock_bh(&ulog_lock);

	if (!ub->skb) {
		if (!(ub->skb = ulog_alloc_skb(size)))
			goto alloc_failure;
	} else if (ub->qlen >= loginfo->qthreshold ||
		   size > skb_tailroom(ub->skb)) {
		/* either the queue len is too high or we don't have
		 * enough room in nlskb left. send it to userspace. */

		ulog_send(groupnum);

		if (!(ub->skb = ulog_alloc_skb(size)))
			goto alloc_failure;
	}

	pr_debug("ipt_ULOG: qlen %d, qthreshold %Zu\n", ub->qlen,
		 loginfo->qthreshold);

	/* NLMSG_PUT contains a hidden goto nlmsg_failure !!! */
	nlh = NLMSG_PUT(ub->skb, 0, ub->qlen, ULOG_NL_EVENT,
			sizeof(*pm)+copy_len);
	ub->qlen++;

	pm = NLMSG_DATA(nlh);

	/* We might not have a timestamp, get one */
	if (skb->tstamp.tv64 == 0)
		__net_timestamp((struct sk_buff *)skb);

	/* copy hook, prefix, timestamp, payload, etc. */
	pm->data_len = copy_len;
	tv = ktime_to_timeval(skb->tstamp);
	put_unaligned(tv.tv_sec, &pm->timestamp_sec);
	put_unaligned(tv.tv_usec, &pm->timestamp_usec);
	put_unaligned(skb->mark, &pm->mark);
	pm->hook = hooknum;
	if (prefix != NULL)
		strncpy(pm->prefix, prefix, sizeof(pm->prefix));
	else if (loginfo->prefix[0] != '\0')
		strncpy(pm->prefix, loginfo->prefix, sizeof(pm->prefix));
	else
		*(pm->prefix) = '\0';

	if (in && in->hard_header_len > 0 &&
	    skb->mac_header != skb->network_header &&
	    in->hard_header_len <= ULOG_MAC_LEN) {
		memcpy(pm->mac, skb_mac_header(skb), in->hard_header_len);
		pm->mac_len = in->hard_header_len;
	} else
		pm->mac_len = 0;

	if (in)
		strncpy(pm->indev_name, in->name, sizeof(pm->indev_name));
	else
		pm->indev_name[0] = '\0';

	if (out)
		strncpy(pm->outdev_name, out->name, sizeof(pm->outdev_name));
	else
		pm->outdev_name[0] = '\0';

	/* copy_len <= skb->len, so can't fail. */
	if (skb_copy_bits(skb, 0, pm->payload, copy_len) < 0)
		BUG();

	/* check if we are building multi-part messages */
	if (ub->qlen > 1)
		ub->lastnlh->nlmsg_flags |= NLM_F_MULTI;

	ub->lastnlh = nlh;

	/* if timer isn't already running, start it */
	if (!timer_pending(&ub->timer)) {
		ub->timer.expires = jiffies + flushtimeout * HZ / 100;
		add_timer(&ub->timer);
	}

	/* if threshold is reached, send message to userspace */
	if (ub->qlen >= loginfo->qthreshold) {
		if (loginfo->qthreshold > 1)
			nlh->nlmsg_type = NLMSG_DONE;
		ulog_send(groupnum);
	}

	spin_unlock_bh(&ulog_lock);

	return;

nlmsg_failure:
	PRINTR("ipt_ULOG: error during NLMSG_PUT\n");

alloc_failure:
	PRINTR("ipt_ULOG: Error building netlink message\n");

	spin_unlock_bh(&ulog_lock);
}
Beispiel #2
0
netdev_tx_t
islpci_eth_transmit(struct sk_buff *skb, struct net_device *ndev)
{
	islpci_private *priv = netdev_priv(ndev);
	isl38xx_control_block *cb = priv->control_block;
	u32 index;
	dma_addr_t pci_map_address;
	int frame_size;
	isl38xx_fragment *fragment;
	int offset;
	struct sk_buff *newskb;
	int newskb_offset;
	unsigned long flags;
	unsigned char wds_mac[6];
	u32 curr_frag;

#if VERBOSE > SHOW_ERROR_MESSAGES
	DEBUG(SHOW_FUNCTION_CALLS, "islpci_eth_transmit\n");
#endif

	/* lock the driver code */
	spin_lock_irqsave(&priv->slock, flags);

	/* check whether the destination queue has enough fragments for the frame */
	curr_frag = le32_to_cpu(cb->driver_curr_frag[ISL38XX_CB_TX_DATA_LQ]);
	if (unlikely(curr_frag - priv->free_data_tx >= ISL38XX_CB_TX_QSIZE)) {
		printk(KERN_ERR "%s: transmit device queue full when awake\n",
		       ndev->name);
		netif_stop_queue(ndev);

		/* trigger the device */
		isl38xx_w32_flush(priv->device_base, ISL38XX_DEV_INT_UPDATE,
				  ISL38XX_DEV_INT_REG);
		udelay(ISL38XX_WRITEIO_DELAY);
		goto drop_free;
	}
	/* Check alignment and WDS frame formatting. The start of the packet should
	 * be aligned on a 4-byte boundary. If WDS is enabled add another 6 bytes
	 * and add WDS address information */
	if (likely(((long) skb->data & 0x03) | init_wds)) {
		/* get the number of bytes to add and re-align */
		offset = (4 - (long) skb->data) & 0x03;
		offset += init_wds ? 6 : 0;

		/* check whether the current skb can be used  */
		if (!skb_cloned(skb) && (skb_tailroom(skb) >= offset)) {
			unsigned char *src = skb->data;

#if VERBOSE > SHOW_ERROR_MESSAGES
			DEBUG(SHOW_TRACING, "skb offset %i wds %i\n", offset,
			      init_wds);
#endif

			/* align the buffer on 4-byte boundary */
			skb_reserve(skb, (4 - (long) skb->data) & 0x03);
			if (init_wds) {
				/* wds requires an additional address field of 6 bytes */
				skb_put(skb, 6);
#ifdef ISLPCI_ETH_DEBUG
				printk("islpci_eth_transmit:wds_mac\n");
#endif
				memmove(skb->data + 6, src, skb->len);
				skb_copy_to_linear_data(skb, wds_mac, 6);
			} else {
				memmove(skb->data, src, skb->len);
			}

#if VERBOSE > SHOW_ERROR_MESSAGES
			DEBUG(SHOW_TRACING, "memmove %p %p %i\n", skb->data,
			      src, skb->len);
#endif
		} else {
			newskb =
			    dev_alloc_skb(init_wds ? skb->len + 6 : skb->len);
			if (unlikely(newskb == NULL)) {
				printk(KERN_ERR "%s: Cannot allocate skb\n",
				       ndev->name);
				goto drop_free;
			}
			newskb_offset = (4 - (long) newskb->data) & 0x03;

			/* Check if newskb->data is aligned */
			if (newskb_offset)
				skb_reserve(newskb, newskb_offset);

			skb_put(newskb, init_wds ? skb->len + 6 : skb->len);
			if (init_wds) {
				skb_copy_from_linear_data(skb,
							  newskb->data + 6,
							  skb->len);
				skb_copy_to_linear_data(newskb, wds_mac, 6);
#ifdef ISLPCI_ETH_DEBUG
				printk("islpci_eth_transmit:wds_mac\n");
#endif
			} else
				skb_copy_from_linear_data(skb, newskb->data,
							  skb->len);

#if VERBOSE > SHOW_ERROR_MESSAGES
			DEBUG(SHOW_TRACING, "memcpy %p %p %i wds %i\n",
			      newskb->data, skb->data, skb->len, init_wds);
#endif

			newskb->dev = skb->dev;
			dev_kfree_skb_irq(skb);
			skb = newskb;
		}
	}
	/* display the buffer contents for debugging */
#if VERBOSE > SHOW_ERROR_MESSAGES
	DEBUG(SHOW_BUFFER_CONTENTS, "\ntx %p ", skb->data);
	display_buffer((char *) skb->data, skb->len);
#endif

	/* map the skb buffer to pci memory for DMA operation */
	pci_map_address = pci_map_single(priv->pdev,
					 (void *) skb->data, skb->len,
					 PCI_DMA_TODEVICE);
	if (unlikely(pci_map_address == 0)) {
		printk(KERN_WARNING "%s: cannot map buffer to PCI\n",
		       ndev->name);
		goto drop_free;
	}
	/* Place the fragment in the control block structure. */
	index = curr_frag % ISL38XX_CB_TX_QSIZE;
	fragment = &cb->tx_data_low[index];

	priv->pci_map_tx_address[index] = pci_map_address;
	/* store the skb address for future freeing  */
	priv->data_low_tx[index] = skb;
	/* set the proper fragment start address and size information */
	frame_size = skb->len;
	fragment->size = cpu_to_le16(frame_size);
	fragment->flags = cpu_to_le16(0);	/* set to 1 if more fragments */
	fragment->address = cpu_to_le32(pci_map_address);
	curr_frag++;

	/* The fragment address in the control block must have been
	 * written before announcing the frame buffer to device. */
	wmb();
	cb->driver_curr_frag[ISL38XX_CB_TX_DATA_LQ] = cpu_to_le32(curr_frag);

	if (curr_frag - priv->free_data_tx + ISL38XX_MIN_QTHRESHOLD
	    > ISL38XX_CB_TX_QSIZE) {
		/* stop sends from upper layers */
		netif_stop_queue(ndev);

		/* set the full flag for the transmission queue */
		priv->data_low_tx_full = 1;
	}

	ndev->stats.tx_packets++;
	ndev->stats.tx_bytes += skb->len;

	/* trigger the device */
	islpci_trigger(priv);

	/* unlock the driver code */
	spin_unlock_irqrestore(&priv->slock, flags);

	return NETDEV_TX_OK;

      drop_free:
	ndev->stats.tx_dropped++;
	spin_unlock_irqrestore(&priv->slock, flags);
	dev_kfree_skb(skb);
	return NETDEV_TX_OK;
}
Beispiel #3
0
struct sk_buff *h4_recv_buf(struct hci_dev *hdev, struct sk_buff *skb,
                            const unsigned char *buffer, int count,
                            const struct h4_recv_pkt *pkts, int pkts_count)
{
    while (count) {
        int i, len;

        if (!skb) {
            for (i = 0; i < pkts_count; i++) {
                if (buffer[0] != (&pkts[i])->type)
                    continue;

                skb = bt_skb_alloc((&pkts[i])->maxlen,
                                   GFP_ATOMIC);
                if (!skb)
                    return ERR_PTR(-ENOMEM);

                bt_cb(skb)->pkt_type = (&pkts[i])->type;
                bt_cb(skb)->expect = (&pkts[i])->hlen;
                break;
            }

            /* Check for invalid packet type */
            if (!skb)
                return ERR_PTR(-EILSEQ);

            count -= 1;
            buffer += 1;
        }

        len = min_t(uint, bt_cb(skb)->expect - skb->len, count);
        memcpy(skb_put(skb, len), buffer, len);

        count -= len;
        buffer += len;

        /* Check for partial packet */
        if (skb->len < bt_cb(skb)->expect)
            continue;

        for (i = 0; i < pkts_count; i++) {
            if (bt_cb(skb)->pkt_type == (&pkts[i])->type)
                break;
        }

        if (i >= pkts_count) {
            kfree_skb(skb);
            return ERR_PTR(-EILSEQ);
        }

        if (skb->len == (&pkts[i])->hlen) {
            u16 dlen;

            switch ((&pkts[i])->lsize) {
            case 0:
                /* No variable data length */
                dlen = 0;
                break;
            case 1:
                /* Single octet variable length */
                dlen = skb->data[(&pkts[i])->loff];
                bt_cb(skb)->expect += dlen;

                if (skb_tailroom(skb) < dlen) {
                    kfree_skb(skb);
                    return ERR_PTR(-EMSGSIZE);
                }
                break;
            case 2:
                /* Double octet variable length */
                dlen = get_unaligned_le16(skb->data +
                                          (&pkts[i])->loff);
                bt_cb(skb)->expect += dlen;

                if (skb_tailroom(skb) < dlen) {
                    kfree_skb(skb);
                    return ERR_PTR(-EMSGSIZE);
                }
                break;
            default:
                /* Unsupported variable length */
                kfree_skb(skb);
                return ERR_PTR(-EILSEQ);
            }

            if (!dlen) {
                /* No more data, complete frame */
                (&pkts[i])->recv(hdev, skb);
                skb = NULL;
            }
        } else {
            /* Complete frame */
            (&pkts[i])->recv(hdev, skb);
            skb = NULL;
        }
    }

    return skb;
}
static void kevent (void *data)
{
	struct usbnet *dev = (struct usbnet *)data;
#else
static void kevent (struct work_struct *work)
{
	struct usbnet		*dev =
		container_of(work, struct usbnet, kevent);
#endif
	int			status;

	/* usb_clear_halt() needs a thread context */
	if (test_bit (EVENT_TX_HALT, &dev->flags)) {

		unlink_urbs (dev, &dev->txq);
		status = usb_clear_halt (dev->udev, dev->out);
		if (status < 0
				&& status != -EPIPE
				&& status != -ESHUTDOWN) {
			if (netif_msg_tx_err (dev))
				deverr (dev, "can't clear tx halt, status %d",
					status);
		} else {
			clear_bit (EVENT_TX_HALT, &dev->flags);
			if (status != -ESHUTDOWN)
				netif_wake_queue (dev->net);
		}
	}
	if (test_bit (EVENT_RX_HALT, &dev->flags)) {

		unlink_urbs (dev, &dev->rxq);
		status = usb_clear_halt (dev->udev, dev->in);
		if (status < 0
				&& status != -EPIPE
				&& status != -ESHUTDOWN) {
			if (netif_msg_rx_err (dev))
				deverr (dev, "can't clear rx halt, status %d",
					status);
		} else {
			clear_bit (EVENT_RX_HALT, &dev->flags);
			tasklet_schedule (&dev->bh);
		}
	}

	/* tasklet could resubmit itself forever if memory is tight */
	if (test_bit (EVENT_RX_MEMORY, &dev->flags)) {
		struct urb	*urb = NULL;

		if (netif_running (dev->net))
			urb = usb_alloc_urb (0, GFP_KERNEL);
		else
			clear_bit (EVENT_RX_MEMORY, &dev->flags);
		if (urb != NULL) {
			clear_bit (EVENT_RX_MEMORY, &dev->flags);
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14)
			urb->transfer_flags |= URB_ASYNC_UNLINK;
#endif
			rx_submit (dev, urb, GFP_KERNEL);
			tasklet_schedule (&dev->bh);
		}
	}

	if (test_bit (EVENT_LINK_RESET, &dev->flags)) {
		struct driver_info	*info = dev->driver_info;
		int			retval = 0;

		clear_bit (EVENT_LINK_RESET, &dev->flags);
		if(info->link_reset && (retval = info->link_reset(dev)) < 0) {
			devinfo(dev, "link reset failed (%d) usbnet usb-%s-%s, %s",
				retval,
				dev->udev->bus->bus_name, dev->udev->devpath,
				info->description);
		}
	}

	if (dev->flags)
		devdbg (dev, "kevent done, flags = 0x%lx",
			dev->flags);
}

/*-------------------------------------------------------------------------*/

#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,19)
static void tx_complete (struct urb *urb, struct pt_regs *regs)
#else
static void tx_complete (struct urb *urb)
#endif
{
	struct sk_buff		*skb = (struct sk_buff *) urb->context;
	struct skb_data		*entry = (struct skb_data *) skb->cb;
	struct usbnet		*dev = entry->dev;

	if (urb->status == 0) {
		dev->stats.tx_packets++;
		dev->stats.tx_bytes += entry->length;
	} else {
		dev->stats.tx_errors++;

		switch (urb->status) {
		case -EPIPE:
			axusbnet_defer_kevent (dev, EVENT_TX_HALT);
			break;

		/* software-driven interface shutdown */
		case -ECONNRESET:		// async unlink
		case -ESHUTDOWN:		// hardware gone
			break;

		// like rx, tx gets controller i/o faults during khubd delays
		// and so it uses the same throttling mechanism.
		case -EPROTO:
		case -ETIME:
		case -EILSEQ:
			if (!timer_pending (&dev->delay)) {
				mod_timer (&dev->delay,
					jiffies + THROTTLE_JIFFIES);
				if (netif_msg_link (dev))
					devdbg (dev, "tx throttle %d",
							urb->status);
			}
			netif_stop_queue (dev->net);
			break;
		default:
			if (netif_msg_tx_err (dev))
				devdbg (dev, "tx err %d", entry->urb->status);
			break;
		}
	}

	urb->dev = NULL;
	entry->state = tx_done;
	defer_bh(dev, skb, &dev->txq);
}

/*-------------------------------------------------------------------------*/

static
void axusbnet_tx_timeout (struct net_device *net)
{
	struct usbnet		*dev = netdev_priv(net);

	unlink_urbs (dev, &dev->txq);
	tasklet_schedule (&dev->bh);

	// FIXME: device recovery -- reset?
}

/*-------------------------------------------------------------------------*/
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,32)
static int
#else
static netdev_tx_t
#endif
axusbnet_start_xmit (struct sk_buff *skb,
				     struct net_device *net)
{
	struct usbnet		*dev = netdev_priv(net);
	int			length;
	struct urb		*urb = NULL;
	struct skb_data		*entry;
	struct driver_info	*info = dev->driver_info;
	unsigned long		flags;
	int retval;

	// some devices want funky USB-level framing, for
	// win32 driver (usually) and/or hardware quirks
	if (info->tx_fixup) {
		skb = info->tx_fixup (dev, skb, GFP_ATOMIC);
		if (!skb) {
			if (netif_msg_tx_err (dev))
				devdbg (dev, "can't tx_fixup skb");
			goto drop;
		}
	}
	length = skb->len;

	if (!(urb = usb_alloc_urb (0, GFP_ATOMIC))) {
		if (netif_msg_tx_err (dev))
			devdbg (dev, "no urb");
		goto drop;
	}

	entry = (struct skb_data *) skb->cb;
	entry->urb = urb;
	entry->dev = dev;
	entry->state = tx_start;
	entry->length = length;

	usb_fill_bulk_urb (urb, dev->udev, dev->out,
			skb->data, skb->len, tx_complete, skb);

	/* don't assume the hardware handles USB_ZERO_PACKET
	 * NOTE:  strictly conforming cdc-ether devices should expect
	 * the ZLP here, but ignore the one-byte packet.
	 */
	if (!(info->flags & FLAG_SEND_ZLP) && (length % dev->maxpacket) == 0) {
		urb->transfer_buffer_length++;
		if (skb_tailroom(skb)) {
			skb->data[skb->len] = 0;
			__skb_put(skb, 1);
		}
	}

	spin_lock_irqsave (&dev->txq.lock, flags);

	switch ((retval = usb_submit_urb (urb, GFP_ATOMIC))) {
	case -EPIPE:
		netif_stop_queue (net);
		axusbnet_defer_kevent (dev, EVENT_TX_HALT);
		break;
	default:
		if (netif_msg_tx_err (dev))
			devdbg (dev, "tx: submit urb err %d", retval);
		break;
	case 0:
		net->trans_start = jiffies;
		__skb_queue_tail (&dev->txq, skb);
		if (dev->txq.qlen >= TX_QLEN (dev))
			netif_stop_queue (net);
	}
	spin_unlock_irqrestore (&dev->txq.lock, flags);

	if (retval) {
		if (netif_msg_tx_err (dev))
			devdbg (dev, "drop, code %d", retval);
drop:
		dev->stats.tx_dropped++;
		if (skb)
			dev_kfree_skb_any (skb);
		usb_free_urb (urb);
	} else if (netif_msg_tx_queued (dev)) {
		devdbg (dev, "> tx, len %d, type 0x%x",
			length, skb->protocol);
	}
	return NETDEV_TX_OK;
}

/*-------------------------------------------------------------------------*/

// tasklet (work deferred from completions, in_irq) or timer

static void axusbnet_bh (unsigned long param)
{
	struct usbnet		*dev = (struct usbnet *) param;
	struct sk_buff		*skb;
	struct skb_data		*entry;

	while ((skb = skb_dequeue (&dev->done))) {
		entry = (struct skb_data *) skb->cb;
		switch (entry->state) {
		case rx_done:
			entry->state = rx_cleanup;
			rx_process (dev, skb);
			continue;
		case tx_done:
		case rx_cleanup:
			usb_free_urb (entry->urb);
			dev_kfree_skb (skb);
			continue;
		default:
			devdbg (dev, "bogus skb state %d", entry->state);
		}
	}

	// waiting for all pending urbs to complete?
	if (dev->wait) {
		if ((dev->txq.qlen + dev->rxq.qlen + dev->done.qlen) == 0) {
			wake_up (dev->wait);
		}

	// or are we maybe short a few urbs?
	} else if (netif_running (dev->net)
			&& netif_device_present (dev->net)
			&& !timer_pending (&dev->delay)
			&& !test_bit (EVENT_RX_HALT, &dev->flags)) {
		int	temp = dev->rxq.qlen;
		int	qlen = RX_QLEN (dev);

		if (temp < qlen) {
			struct urb	*urb;
			int		i;

			// don't refill the queue all at once
			for (i = 0; i < 10 && dev->rxq.qlen < qlen; i++) {
				urb = usb_alloc_urb (0, GFP_ATOMIC);
				if (urb != NULL) {
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,14)
					urb->transfer_flags |= URB_ASYNC_UNLINK;
#endif
					rx_submit (dev, urb, GFP_ATOMIC);
				}
			}
			if (temp != dev->rxq.qlen && netif_msg_link (dev))
				devdbg (dev, "rxqlen %d --> %d",
						temp, dev->rxq.qlen);
			if (dev->rxq.qlen < qlen)
				tasklet_schedule (&dev->bh);
		}
		if (dev->txq.qlen < TX_QLEN (dev))
			netif_wake_queue (dev->net);
	}
}


/*-------------------------------------------------------------------------
 *
 * USB Device Driver support
 *
 *-------------------------------------------------------------------------*/

// precondition: never called in_interrupt

static
void axusbnet_disconnect (struct usb_interface *intf)
{
	struct usbnet		*dev;
	struct usb_device	*xdev;
	struct net_device	*net;

	dev = usb_get_intfdata(intf);
	usb_set_intfdata(intf, NULL);
	if (!dev)
		return;

	xdev = interface_to_usbdev (intf);

	if (netif_msg_probe (dev))
		devinfo (dev, "unregister '%s' usb-%s-%s, %s",
			intf->dev.driver->name,
			xdev->bus->bus_name, xdev->devpath,
			dev->driver_info->description);

	net = dev->net;
	unregister_netdev (net);

	/* we don't hold rtnl here ... */
	flush_scheduled_work ();

	if (dev->driver_info->unbind)
		dev->driver_info->unbind (dev, intf);

	free_netdev(net);
	usb_put_dev (xdev);
}

/*-------------------------------------------------------------------------*/

// precondition: never called in_interrupt

static int
axusbnet_probe (struct usb_interface *udev, const struct usb_device_id *prod)
{
	struct usbnet			*dev;
	struct net_device		*net;
	struct usb_host_interface	*interface;
	struct driver_info		*info;
	struct usb_device		*xdev;
	int				status;
	const char			*name;

	name = udev->dev.driver->name;
	info = (struct driver_info *) prod->driver_info;
	if (!info) {
		printk (KERN_ERR "blacklisted by %s\n", name);
		return -ENODEV;
	}
	xdev = interface_to_usbdev (udev);
	interface = udev->cur_altsetting;

	usb_get_dev (xdev);

	status = -ENOMEM;

	// set up our own records
	net = alloc_etherdev(sizeof(*dev));
	if (!net) {
		dbg ("can't kmalloc dev");
		goto out;
	}

	dev = netdev_priv(net);
	dev->udev = xdev;
	dev->intf = udev;
	dev->driver_info = info;
	dev->driver_name = name;
	dev->msg_enable = netif_msg_init (msg_level, NETIF_MSG_DRV
				| NETIF_MSG_PROBE | NETIF_MSG_LINK);
	skb_queue_head_init (&dev->rxq);
	skb_queue_head_init (&dev->txq);
	skb_queue_head_init (&dev->done);
	dev->bh.func = axusbnet_bh;
	dev->bh.data = (unsigned long) dev;
#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,20)
	INIT_WORK (&dev->kevent, kevent, dev);
#else
	INIT_WORK (&dev->kevent, kevent);
#endif

	dev->delay.function = axusbnet_bh;
	dev->delay.data = (unsigned long) dev;
	init_timer (&dev->delay);
//	mutex_init (&dev->phy_mutex);

	dev->net = net;

	/* rx and tx sides can use different message sizes;
	 * bind() should set rx_urb_size in that case.
	 */
	dev->hard_mtu = net->mtu + net->hard_header_len;

#if 0
// dma_supported() is deeply broken on almost all architectures
	// possible with some EHCI controllers
	if (dma_supported (&udev->dev, DMA_BIT_MASK(64)))
		net->features |= NETIF_F_HIGHDMA;
#endif

#if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,30)
	net->open		= axusbnet_open,
	net->stop		= axusbnet_stop,
	net->hard_start_xmit	= axusbnet_start_xmit,
	net->tx_timeout	= axusbnet_tx_timeout,
	net->get_stats = axusbnet_get_stats;
#endif

	net->watchdog_timeo = TX_TIMEOUT_JIFFIES;
	net->ethtool_ops = &axusbnet_ethtool_ops;

	// allow device-specific bind/init procedures
	// NOTE net->name still not usable ...
	status = info->bind (dev, udev);
	if (status < 0) {
		deverr(dev, "Binding device failed: %d", status);
		goto out1;
	}

	/* maybe the remote can't receive an Ethernet MTU */
	if (net->mtu > (dev->hard_mtu - net->hard_header_len))
		net->mtu = dev->hard_mtu - net->hard_header_len;

	status = init_status (dev, udev);
	if (status < 0)
		goto out3;

	if (!dev->rx_urb_size)
		dev->rx_urb_size = dev->hard_mtu;
	dev->maxpacket = usb_maxpacket (dev->udev, dev->out, 1);

	SET_NETDEV_DEV(net, &udev->dev);
	status = register_netdev (net);
	if (status) {
		deverr(dev, "net device registration failed: %d", status);
		goto out3;
	}

	if (netif_msg_probe (dev))
		devinfo (dev, "register '%s' at usb-%s-%s, %s, %pM",
			udev->dev.driver->name,
			xdev->bus->bus_name, xdev->devpath,
			dev->driver_info->description,
			net->dev_addr);

	// ok, it's ready to go.
	usb_set_intfdata (udev, dev);

	// start as if the link is up
	netif_device_attach (net);

	return 0;

out3:
	if (info->unbind)
		info->unbind (dev, udev);
out1:
	free_netdev(net);
out:
	usb_put_dev(xdev);
	return status;
}

/*-------------------------------------------------------------------------*/

/*
 * suspend the whole driver as soon as the first interface is suspended
 * resume only when the last interface is resumed
 */

static int axusbnet_suspend (struct usb_interface *intf,
#if LINUX_VERSION_CODE > KERNEL_VERSION(2,6,10)
pm_message_t message)
#else
u32 message)
#endif
{
	struct usbnet		*dev = usb_get_intfdata(intf);

	if (!dev->suspend_count++) {
		/*
		 * accelerate emptying of the rx and queues, to avoid
		 * having everything error out.
		 */
		netif_device_detach (dev->net);
		(void) unlink_urbs (dev, &dev->rxq);
		(void) unlink_urbs (dev, &dev->txq);
		/*
		 * reattach so runtime management can use and
		 * wake the device
		 */
		netif_device_attach (dev->net);
	}
	return 0;
}

static int
axusbnet_resume (struct usb_interface *intf)
{
	struct usbnet		*dev = usb_get_intfdata(intf);

	if (!--dev->suspend_count)
		tasklet_schedule (&dev->bh);

	return 0;
}
/* log handler for internal netfilter logging api */
void
nfulnl_log_packet(struct net *net,
		  u_int8_t pf,
		  unsigned int hooknum,
		  const struct sk_buff *skb,
		  const struct net_device *in,
		  const struct net_device *out,
		  const struct nf_loginfo *li_user,
		  const char *prefix)
{
	unsigned int size, data_len;
	struct nfulnl_instance *inst;
	const struct nf_loginfo *li;
	unsigned int qthreshold;
	unsigned int plen;
	struct nfnl_log_net *log = nfnl_log_pernet(net);

	if (li_user && li_user->type == NF_LOG_TYPE_ULOG)
		li = li_user;
	else
		li = &default_loginfo;

	inst = instance_lookup_get(log, li->u.ulog.group);
	if (!inst)
		return;

	plen = 0;
	if (prefix)
		plen = strlen(prefix) + 1;

	/* FIXME: do we want to make the size calculation conditional based on
	 * what is actually present?  way more branches and checks, but more
	 * memory efficient... */
	size =    nlmsg_total_size(sizeof(struct nfgenmsg))
		+ nla_total_size(sizeof(struct nfulnl_msg_packet_hdr))
		+ nla_total_size(sizeof(u_int32_t))	/* ifindex */
		+ nla_total_size(sizeof(u_int32_t))	/* ifindex */
#ifdef CONFIG_BRIDGE_NETFILTER
		+ nla_total_size(sizeof(u_int32_t))	/* ifindex */
		+ nla_total_size(sizeof(u_int32_t))	/* ifindex */
#endif
		+ nla_total_size(sizeof(u_int32_t))	/* mark */
		+ nla_total_size(sizeof(u_int32_t))	/* uid */
		+ nla_total_size(sizeof(u_int32_t))	/* gid */
		+ nla_total_size(plen)			/* prefix */
		+ nla_total_size(sizeof(struct nfulnl_msg_packet_hw))
		+ nla_total_size(sizeof(struct nfulnl_msg_packet_timestamp))
		+ nla_total_size(sizeof(struct nfgenmsg));	/* NLMSG_DONE */

	if (in && skb_mac_header_was_set(skb)) {
		size +=   nla_total_size(skb->dev->hard_header_len)
			+ nla_total_size(sizeof(u_int16_t))	/* hwtype */
			+ nla_total_size(sizeof(u_int16_t));	/* hwlen */
	}

	spin_lock_bh(&inst->lock);

	if (inst->flags & NFULNL_CFG_F_SEQ)
		size += nla_total_size(sizeof(u_int32_t));
	if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL)
		size += nla_total_size(sizeof(u_int32_t));

	qthreshold = inst->qthreshold;
	/* per-rule qthreshold overrides per-instance */
	if (li->u.ulog.qthreshold)
		if (qthreshold > li->u.ulog.qthreshold)
			qthreshold = li->u.ulog.qthreshold;


	switch (inst->copy_mode) {
	case NFULNL_COPY_META:
	case NFULNL_COPY_NONE:
		data_len = 0;
		break;

	case NFULNL_COPY_PACKET:
		if (inst->copy_range > skb->len)
			data_len = skb->len;
		else
			data_len = inst->copy_range;

		size += nla_total_size(data_len);
		break;

	case NFULNL_COPY_DISABLED:
	default:
		goto unlock_and_release;
	}

	if (inst->skb && size > skb_tailroom(inst->skb)) {
		/* either the queue len is too high or we don't have
		 * enough room in the skb left. flush to userspace. */
		__nfulnl_flush(inst);
	}

	if (!inst->skb) {
		inst->skb = nfulnl_alloc_skb(inst->peer_portid, inst->nlbufsiz,
					     size);
		if (!inst->skb)
			goto alloc_failure;
	}

	inst->qlen++;

	__build_packet_message(log, inst, skb, data_len, pf,
				hooknum, in, out, prefix, plen);

	if (inst->qlen >= qthreshold)
		__nfulnl_flush(inst);
	/* timer_pending always called within inst->lock, so there
	 * is no chance of a race here */
	else if (!timer_pending(&inst->timer)) {
		instance_get(inst);
		inst->timer.expires = jiffies + (inst->flushtimeout*HZ/100);
		add_timer(&inst->timer);
	}

unlock_and_release:
	spin_unlock_bh(&inst->lock);
	instance_put(inst);
	return;

alloc_failure:
	/* FIXME: statistics */
	goto unlock_and_release;
}
/* Called only from software IRQ */
static struct sk_buff * hostap_tx_encrypt(struct sk_buff *skb,
					  struct lib80211_crypt_data *crypt)
{
	struct hostap_interface *iface;
	local_info_t *local;
	struct ieee80211_hdr *hdr;
	int prefix_len, postfix_len, hdr_len, res;

	iface = netdev_priv(skb->dev);
	local = iface->local;

	if (skb->len < IEEE80211_DATA_HDR3_LEN) {
		kfree_skb(skb);
		return NULL;
	}

	if (local->tkip_countermeasures &&
	    strcmp(crypt->ops->name, "TKIP") == 0) {
		hdr = (struct ieee80211_hdr *) skb->data;
		if (net_ratelimit()) {
			printk(KERN_DEBUG "%s: TKIP countermeasures: dropped "
			       "TX packet to %pM\n",
			       local->dev->name, hdr->addr1);
		}
		kfree_skb(skb);
		return NULL;
	}

	skb = skb_unshare(skb, GFP_ATOMIC);
	if (skb == NULL)
		return NULL;

	prefix_len = crypt->ops->extra_mpdu_prefix_len +
		crypt->ops->extra_msdu_prefix_len;
	postfix_len = crypt->ops->extra_mpdu_postfix_len +
		crypt->ops->extra_msdu_postfix_len;
	if ((skb_headroom(skb) < prefix_len ||
	     skb_tailroom(skb) < postfix_len) &&
	    pskb_expand_head(skb, prefix_len, postfix_len, GFP_ATOMIC)) {
		kfree_skb(skb);
		return NULL;
	}

	hdr = (struct ieee80211_hdr *) skb->data;
	hdr_len = hostap_80211_get_hdrlen(hdr->frame_control);

	/* Host-based IEEE 802.11 fragmentation for TX is not yet supported, so
	 * call both MSDU and MPDU encryption functions from here. */
	atomic_inc(&crypt->refcnt);
	res = 0;
	if (crypt->ops->encrypt_msdu)
		res = crypt->ops->encrypt_msdu(skb, hdr_len, crypt->priv);
	if (res == 0 && crypt->ops->encrypt_mpdu)
		res = crypt->ops->encrypt_mpdu(skb, hdr_len, crypt->priv);
	atomic_dec(&crypt->refcnt);
	if (res < 0) {
		kfree_skb(skb);
		return NULL;
	}

	return skb;
}
Beispiel #7
0
static inline int 
__build_packet_message(struct nfulnl_instance *inst,
			const struct sk_buff *skb, 
			unsigned int data_len,
			unsigned int pf,
			unsigned int hooknum,
			const struct net_device *indev,
			const struct net_device *outdev,
			const struct nf_loginfo *li,
			const char *prefix)
{
	unsigned char *old_tail;
	struct nfulnl_msg_packet_hdr pmsg;
	struct nlmsghdr *nlh;
	struct nfgenmsg *nfmsg;
	u_int32_t tmp_uint;

	UDEBUG("entered\n");
		
	old_tail = inst->skb->tail;
	nlh = NLMSG_PUT(inst->skb, 0, 0, 
			NFNL_SUBSYS_ULOG << 8 | NFULNL_MSG_PACKET,
			sizeof(struct nfgenmsg));
	nfmsg = NLMSG_DATA(nlh);
	nfmsg->nfgen_family = pf;
	nfmsg->version = NFNETLINK_V0;
	nfmsg->res_id = htons(inst->group_num);

	pmsg.hw_protocol	= htons(skb->protocol);
	pmsg.hook		= hooknum;

	NFA_PUT(inst->skb, NFULA_PACKET_HDR, sizeof(pmsg), &pmsg);

	if (prefix) {
		int slen = strlen(prefix);
		if (slen > NFULNL_PREFIXLEN)
			slen = NFULNL_PREFIXLEN;
		NFA_PUT(inst->skb, NFULA_PREFIX, slen, prefix);
	}

	if (indev) {
		tmp_uint = htonl(indev->ifindex);
#ifndef CONFIG_BRIDGE_NETFILTER
		NFA_PUT(inst->skb, NFULA_IFINDEX_INDEV, sizeof(tmp_uint),
			&tmp_uint);
#else
		if (pf == PF_BRIDGE) {
			/* Case 1: outdev is physical input device, we need to
			 * look for bridge group (when called from
			 * netfilter_bridge) */
			NFA_PUT(inst->skb, NFULA_IFINDEX_PHYSINDEV,
				sizeof(tmp_uint), &tmp_uint);
			/* this is the bridge group "brX" */
			tmp_uint = htonl(indev->br_port->br->dev->ifindex);
			NFA_PUT(inst->skb, NFULA_IFINDEX_INDEV,
				sizeof(tmp_uint), &tmp_uint);
		} else {
			/* Case 2: indev is bridge group, we need to look for
			 * physical device (when called from ipv4) */
			NFA_PUT(inst->skb, NFULA_IFINDEX_INDEV,
				sizeof(tmp_uint), &tmp_uint);
			if (skb->nf_bridge && skb->nf_bridge->physindev) {
				tmp_uint = 
				    htonl(skb->nf_bridge->physindev->ifindex);
				NFA_PUT(inst->skb, NFULA_IFINDEX_PHYSINDEV,
					sizeof(tmp_uint), &tmp_uint);
			}
		}
#endif
	}

	if (outdev) {
		tmp_uint = htonl(outdev->ifindex);
#ifndef CONFIG_BRIDGE_NETFILTER
		NFA_PUT(inst->skb, NFULA_IFINDEX_OUTDEV, sizeof(tmp_uint),
			&tmp_uint);
#else
		if (pf == PF_BRIDGE) {
			/* Case 1: outdev is physical output device, we need to
			 * look for bridge group (when called from
			 * netfilter_bridge) */
			NFA_PUT(inst->skb, NFULA_IFINDEX_PHYSOUTDEV,
				sizeof(tmp_uint), &tmp_uint);
			/* this is the bridge group "brX" */
			tmp_uint = htonl(outdev->br_port->br->dev->ifindex);
			NFA_PUT(inst->skb, NFULA_IFINDEX_OUTDEV,
				sizeof(tmp_uint), &tmp_uint);
		} else {
			/* Case 2: indev is a bridge group, we need to look
			 * for physical device (when called from ipv4) */
			NFA_PUT(inst->skb, NFULA_IFINDEX_OUTDEV,
				sizeof(tmp_uint), &tmp_uint);
			if (skb->nf_bridge) {
				tmp_uint = 
				    htonl(skb->nf_bridge->physoutdev->ifindex);
				NFA_PUT(inst->skb, NFULA_IFINDEX_PHYSOUTDEV,
					sizeof(tmp_uint), &tmp_uint);
			}
		}
#endif
	}

	if (skb->nfmark) {
		tmp_uint = htonl(skb->nfmark);
		NFA_PUT(inst->skb, NFULA_MARK, sizeof(tmp_uint), &tmp_uint);
	}

	if (indev && skb->dev && skb->dev->hard_header_parse) {
		struct nfulnl_msg_packet_hw phw;

		phw.hw_addrlen = 
			skb->dev->hard_header_parse((struct sk_buff *)skb, 
						    phw.hw_addr);
		phw.hw_addrlen = htons(phw.hw_addrlen);
		NFA_PUT(inst->skb, NFULA_HWADDR, sizeof(phw), &phw);
	}

	if (skb->tstamp.off_sec) {
		struct nfulnl_msg_packet_timestamp ts;

		ts.sec = cpu_to_be64(skb->tstamp.off_sec);
		ts.usec = cpu_to_be64(skb->tstamp.off_usec);

		NFA_PUT(inst->skb, NFULA_TIMESTAMP, sizeof(ts), &ts);
	}

	/* UID */
	if (skb->sk) {
		read_lock_bh(&skb->sk->sk_callback_lock);
		if (skb->sk->sk_socket && skb->sk->sk_socket->file) {
			u_int32_t uid = htonl(skb->sk->sk_socket->file->f_uid);
			/* need to unlock here since NFA_PUT may goto */
			read_unlock_bh(&skb->sk->sk_callback_lock);
			NFA_PUT(inst->skb, NFULA_UID, sizeof(uid), &uid);
		} else
			read_unlock_bh(&skb->sk->sk_callback_lock);
	}

	if (data_len) {
		struct nfattr *nfa;
		int size = NFA_LENGTH(data_len);

		if (skb_tailroom(inst->skb) < (int)NFA_SPACE(data_len)) {
			printk(KERN_WARNING "nfnetlink_log: no tailroom!\n");
			goto nlmsg_failure;
		}

		nfa = (struct nfattr *)skb_put(inst->skb, NFA_ALIGN(size));
		nfa->nfa_type = NFULA_PAYLOAD;
		nfa->nfa_len = size;

		if (skb_copy_bits(skb, 0, NFA_DATA(nfa), data_len))
			BUG();
	}
		
	nlh->nlmsg_len = inst->skb->tail - old_tail;
	return 0;

nlmsg_failure:
	UDEBUG("nlmsg_failure\n");
nfattr_failure:
	PRINTR(KERN_ERR "nfnetlink_log: error creating log nlmsg\n");
	return -1;
}
Beispiel #8
0
int msm_sdio_dmux_write(uint32_t id, struct sk_buff *skb)
{
	int rc = 0;
	struct sdio_mux_hdr *hdr;
	unsigned long flags;
	struct sk_buff *new_skb;

	if (id >= SDIO_DMUX_NUM_CHANNELS)
		return -EINVAL;
	if (!skb)
		return -EINVAL;
	if (!sdio_mux_initialized)
		return -ENODEV;
	if (fatal_error)
		return -ENODEV;

	DBG("%s: writing to ch %d len %d\n", __func__, id, skb->len);
	spin_lock_irqsave(&sdio_ch[id].lock, flags);
	if (sdio_ch_is_in_reset(id)) {
		spin_unlock_irqrestore(&sdio_ch[id].lock, flags);
		pr_err("%s: port is in reset: %d\n", __func__,
				sdio_ch[id].status);
		return -ENETRESET;
	}
	if (!sdio_ch_is_local_open(id)) {
		spin_unlock_irqrestore(&sdio_ch[id].lock, flags);
		pr_err("%s: port not open: %d\n", __func__, sdio_ch[id].status);
		return -ENODEV;
	}
	if (sdio_ch[id].use_wm &&
			(sdio_ch[id].num_tx_pkts >= HIGH_WATERMARK)) {
		spin_unlock_irqrestore(&sdio_ch[id].lock, flags);
		pr_err("%s: watermark exceeded: %d\n", __func__, id);
		return -EAGAIN;
	}
	spin_unlock_irqrestore(&sdio_ch[id].lock, flags);

	spin_lock_irqsave(&sdio_mux_write_lock, flags);
	/* if skb do not have any tailroom for padding,
	   copy the skb into a new expanded skb */
	if ((skb->len & 0x3) && (skb_tailroom(skb) < (4 - (skb->len & 0x3)))) {
		/* revisit, probably dev_alloc_skb and memcpy is effecient */
		new_skb = skb_copy_expand(skb, skb_headroom(skb),
					  4 - (skb->len & 0x3), GFP_ATOMIC);
		if (new_skb == NULL) {
			pr_err("%s: cannot allocate skb\n", __func__);
			rc = -ENOMEM;
			goto write_done;
		}
		dev_kfree_skb_any(skb);
		skb = new_skb;
		DBG_INC_WRITE_CPY(skb->len);
	}

	hdr = (struct sdio_mux_hdr *)skb_push(skb, sizeof(struct sdio_mux_hdr));

	/* caller should allocate for hdr and padding
	   hdr is fine, padding is tricky */
	hdr->magic_num = SDIO_MUX_HDR_MAGIC_NO;
	hdr->cmd = SDIO_MUX_HDR_CMD_DATA;
	hdr->reserved = 0;
	hdr->ch_id = id;
	hdr->pkt_len = skb->len - sizeof(struct sdio_mux_hdr);
	if (skb->len & 0x3)
		skb_put(skb, 4 - (skb->len & 0x3));

	hdr->pad_len = skb->len - (sizeof(struct sdio_mux_hdr) + hdr->pkt_len);

	DBG("%s: data %p, tail %p skb len %d pkt len %d pad len %d\n",
	    __func__, skb->data, skb->tail, skb->len,
	    hdr->pkt_len, hdr->pad_len);
	__skb_queue_tail(&sdio_mux_write_pool, skb);

	spin_lock(&sdio_ch[id].lock);
	sdio_ch[id].num_tx_pkts++;
	spin_unlock(&sdio_ch[id].lock);

	queue_work_on(0, sdio_mux_workqueue, &work_sdio_mux_write);

write_done:
	spin_unlock_irqrestore(&sdio_mux_write_lock, flags);
	return rc;
}
Beispiel #9
0
uint
osl_pkttailroom(osl_t *osh, void *skb)
{
	return (uint) skb_tailroom((struct sk_buff *) skb);
}
int msm_rmnet_sdio_write(uint32_t id, struct sk_buff *skb)
{
	int rc = 0;
	struct sdio_mux_hdr *hdr;
	unsigned long flags;
	struct sk_buff *new_skb;

	if (!skb)
		return -EINVAL;

	DBG("%s: writing to ch %d len %d\n", __func__, id, skb->len);
	spin_lock_irqsave(&sdio_ch[id].lock, flags);
	if (!sdio_ch_is_local_open(id)) {
		pr_err("%s: port not open: %d\n", __func__, sdio_ch[id].status);
		rc = -ENODEV;
		goto write_done;
	}

	if (sdio_ch[id].skb) {
		pr_err("%s: packet pending ch: %d\n", __func__, id);
		rc = -EPERM;
		goto write_done;
	}

	/* if skb do not have any tailroom for padding,
	   copy the skb into a new expanded skb */
	if ((skb->len & 0x3) && (skb_tailroom(skb) < (4 - (skb->len & 0x3)))) {
		/* revisit, probably dev_alloc_skb and memcpy is effecient */
		new_skb = skb_copy_expand(skb, skb_headroom(skb),
					  4 - (skb->len & 0x3), GFP_KERNEL);
		if (new_skb == NULL) {
			pr_err("%s: cannot allocate skb\n", __func__);
			rc = -ENOMEM;
			goto write_done;
		}
		dev_kfree_skb_any(skb);
		skb = new_skb;
		DBG_INC_WRITE_CPY(skb->len);
	}

	hdr = (struct sdio_mux_hdr *)skb_push(skb, sizeof(struct sdio_mux_hdr));

	/* caller should allocate for hdr and padding
	   hdr is fine, padding is tricky */
	hdr->magic_num = SDIO_MUX_HDR_MAGIC_NO;
	hdr->cmd = SDIO_MUX_HDR_CMD_DATA;
	hdr->reserved = 0;
	hdr->ch_id = id;
	hdr->pkt_len = skb->len - sizeof(struct sdio_mux_hdr);
	if (skb->len & 0x3)
		skb_put(skb, 4 - (skb->len & 0x3));

	hdr->pad_len = skb->len - (sizeof(struct sdio_mux_hdr) + hdr->pkt_len);

	DBG("%s: data %p, tail %p skb len %d pkt len %d pad len %d\n",
	    __func__, skb->data, skb->tail, skb->len,
	    hdr->pkt_len, hdr->pad_len);
	sdio_ch[id].skb = skb;
	queue_work(sdio_mux_workqueue, &work_sdio_mux_write);

write_done:
	spin_unlock_irqrestore(&sdio_ch[id].lock, flags);
	return rc;
}
Beispiel #11
0
void wlRecv(struct net_device *netdev)
{
	struct wlprivate *wlpptr = NETDEV_PRIV_P(struct wlprivate, netdev);
	int work_done = 0;
	wlrxdesc_t *pCurrent = ((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[0].pNextRxDesc;
	static Bool_e isFunctionBusy = WL_FALSE;
	int receivedHandled = 0;
	u_int32_t rxRdPtr;
	u_int32_t rxWrPtr;
	struct sk_buff *pRxSkBuff=NULL;
	WL_BUFF *wlb = NULL;
	void *pCurrentData;
	u_int8_t rxRate;
	int rxCount;
	int rssi;
	vmacApInfo_t *vmacSta_p = wlpptr->vmacSta_p;
	u_int32_t status;
    u_int32_t rssi_paths;
	WLDBG_ENTER(DBG_LEVEL_14);

    /* In a corner case the descriptors may be uninitialized and not usable, accessing these may cause a crash */
	if (isFunctionBusy || (pCurrent == NULL))
	{
		return;
	}
	isFunctionBusy = WL_TRUE;

	rxRdPtr = readl(wlpptr->ioBase0 + ((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[0].rxDescRead);
	rxWrPtr = readl(wlpptr->ioBase0 + ((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[0].rxDescWrite);

	while ((pCurrent->RxControl ==EAGLE_RXD_CTRL_DMA_OWN)
		&& (work_done < vmacSta_p->work_to_do)
		)
	{		
		/* AUTOCHANNEL */
		{
			if(vmacSta_p->StopTraffic)
				goto out;
		}

		rxCount = ENDIAN_SWAP16(pCurrent->PktLen);
		pRxSkBuff = pCurrent->pSkBuff;
		if (pRxSkBuff == NULL)
		{
			goto out;
		}
		pci_unmap_single(wlpptr->pPciDev, 
			ENDIAN_SWAP32(pCurrent->pPhysBuffData),
			((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[0].rxBufSize,
			PCI_DMA_FROMDEVICE);
		pCurrentData = pCurrent->pBuffData;
		rxRate = pCurrent->Rate;
		status = (u_int32_t)pCurrent->Status;
		pRxSkBuff->protocol = 0;
		if(pCurrent->QosCtrl & IEEE_QOS_CTL_AMSDU)
		{
			pRxSkBuff->protocol |= WL_WLAN_TYPE_AMSDU;
		}
		rssi = (int)pCurrent->RSSI + W836X_RSSI_OFFSET;
        rssi_paths = *((u_int32_t *)&pCurrent->HwRssiInfo);
		if (skb_tailroom(pRxSkBuff) >= rxCount)
		{
			skb_put(pRxSkBuff, rxCount ); 
			skb_pull(pRxSkBuff, 2); 
		}
		else
		{
			WLDBG_INFO(DBG_LEVEL_14,"Not enough tail room =%x recvlen=%x, pCurrent=%x, pCurrentData=%x", WL_BUFF_TAILROOM(pRxSkBuff), rxCount,pCurrent, pCurrentData);
			WL_SKB_FREE(pRxSkBuff);
			goto out;
		}

		wlpptr->netDevStats->rx_packets++;
		wlb = WL_BUFF_PTR(pRxSkBuff);
		WL_PREPARE_BUF_INFO(pRxSkBuff);
		if(pCurrent->HtSig2 & 0x8 )
		{
			u_int8_t ampdu_qos;
			/** use bit 3 for ampdu flag, and 0,1,2,3 for qos so as to save a register **/	
			ampdu_qos = 8|(pCurrent->QosCtrl&0x7);
			work_done+=ieee80211_input(wlpptr, wlb,rssi,rssi_paths,ampdu_qos,status);
		}	
		else
		{
			u_int8_t ampdu_qos;
			/** use bit 3 for ampdu flag, and 0,1,2,3 for qos so as to save a register **/	
			ampdu_qos = 0|(pCurrent->QosCtrl&0x7); 
			work_done+=ieee80211_input(wlpptr, wlb,rssi,rssi_paths,ampdu_qos,status);
		}

		wlpptr->netDevStats->rx_bytes += pRxSkBuff->len;
		{
			pCurrent->pSkBuff   = dev_alloc_skb(((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[0].rxBufSize);
			if (pCurrent->pSkBuff != NULL)
			{
				if(skb_linearize(pCurrent->pSkBuff))
				{
					WL_SKB_FREE(pCurrent->pSkBuff);
					printk(KERN_ERR "%s: Need linearize memory\n", netdev->name);
					goto out;
				}
				skb_reserve(pCurrent->pSkBuff , MIN_BYTES_HEADROOM);
				pCurrent->Status    = EAGLE_RXD_STATUS_OK;
				pCurrent->QosCtrl   = 0x0000;
				pCurrent->Channel   = 0x00;
				pCurrent->RSSI      = 0x00;
				pCurrent->SQ2       = 0x00;

				pCurrent->PktLen    = 6*netdev->mtu + NUM_EXTRA_RX_BYTES;
				pCurrent->pBuffData = pCurrent->pSkBuff->data;
				pCurrent->pPhysBuffData =
					ENDIAN_SWAP32(pci_map_single(wlpptr->pPciDev,
					pCurrent->pSkBuff->data,
					((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[0].rxBufSize/*+sizeof(struct skb_shared_info)*/,
					PCI_DMA_BIDIRECTIONAL));
			}
		}
out:

		receivedHandled++;
		pCurrent->RxControl = EAGLE_RXD_CTRL_DRIVER_OWN;
		pCurrent->QosCtrl =0;
		rxRdPtr = ENDIAN_SWAP32(pCurrent->pPhysNext);
		pCurrent = pCurrent->pNext;
	}
	writel(rxRdPtr, wlpptr->ioBase0 + ((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[0].rxDescRead);
	((struct wlprivate_data *)(wlpptr->wlpd_p))->descData[0].pNextRxDesc = pCurrent;
	isFunctionBusy = WL_FALSE;
	WLDBG_EXIT(DBG_LEVEL_14);
}
Beispiel #12
0
struct sk_buff *h4_recv_buf(struct hci_dev *hdev, struct sk_buff *skb,
			    const unsigned char *buffer, int count,
			    const struct h4_recv_pkt *pkts, int pkts_count)
{
	struct hci_uart *hu = hci_get_drvdata(hdev);
	u8 alignment = hu->alignment ? hu->alignment : 1;

	while (count) {
		int i, len;

		/* remove padding bytes from buffer */
		for (; hu->padding && count > 0; hu->padding--) {
			count--;
			buffer++;
		}
		if (!count)
			break;

		if (!skb) {
			for (i = 0; i < pkts_count; i++) {
				if (buffer[0] != (&pkts[i])->type)
					continue;

				skb = bt_skb_alloc((&pkts[i])->maxlen,
						   GFP_ATOMIC);
				if (!skb)
					return ERR_PTR(-ENOMEM);

				hci_skb_pkt_type(skb) = (&pkts[i])->type;
				hci_skb_expect(skb) = (&pkts[i])->hlen;
				break;
			}

			/* Check for invalid packet type */
			if (!skb)
				return ERR_PTR(-EILSEQ);

			count -= 1;
			buffer += 1;
		}

		len = min_t(uint, hci_skb_expect(skb) - skb->len, count);
		skb_put_data(skb, buffer, len);

		count -= len;
		buffer += len;

		/* Check for partial packet */
		if (skb->len < hci_skb_expect(skb))
			continue;

		for (i = 0; i < pkts_count; i++) {
			if (hci_skb_pkt_type(skb) == (&pkts[i])->type)
				break;
		}

		if (i >= pkts_count) {
			kfree_skb(skb);
			return ERR_PTR(-EILSEQ);
		}

		if (skb->len == (&pkts[i])->hlen) {
			u16 dlen;

			switch ((&pkts[i])->lsize) {
			case 0:
				/* No variable data length */
				dlen = 0;
				break;
			case 1:
				/* Single octet variable length */
				dlen = skb->data[(&pkts[i])->loff];
				hci_skb_expect(skb) += dlen;

				if (skb_tailroom(skb) < dlen) {
					kfree_skb(skb);
					return ERR_PTR(-EMSGSIZE);
				}
				break;
			case 2:
				/* Double octet variable length */
				dlen = get_unaligned_le16(skb->data +
							  (&pkts[i])->loff);
				hci_skb_expect(skb) += dlen;

				if (skb_tailroom(skb) < dlen) {
					kfree_skb(skb);
					return ERR_PTR(-EMSGSIZE);
				}
				break;
			default:
				/* Unsupported variable length */
				kfree_skb(skb);
				return ERR_PTR(-EILSEQ);
			}

			if (!dlen) {
				hu->padding = (skb->len - 1) % alignment;
				hu->padding = (alignment - hu->padding) % alignment;

				/* No more data, complete frame */
				(&pkts[i])->recv(hdev, skb);
				skb = NULL;
			}
		} else {
			hu->padding = (skb->len - 1) % alignment;
			hu->padding = (alignment - hu->padding) % alignment;

			/* Complete frame */
			(&pkts[i])->recv(hdev, skb);
			skb = NULL;
		}
	}

	return skb;
}
static unsigned int ipt_ulog_target(struct sk_buff **pskb,
				    unsigned int hooknum,
				    const struct net_device *in,
				    const struct net_device *out,
				    const void *targinfo, void *userinfo)
{
	ulog_buff_t *ub;
	ulog_packet_msg_t *pm;
	size_t size, copy_len;
	struct nlmsghdr *nlh;
	struct ipt_ulog_info *loginfo = (struct ipt_ulog_info *) targinfo;

	/* ffs == find first bit set, necessary because userspace
	 * is already shifting groupnumber, but we need unshifted.
	 * ffs() returns [1..32], we need [0..31] */
	unsigned int groupnum = ffs(loginfo->nl_group) - 1;

	/* calculate the size of the skb needed */
	if ((loginfo->copy_range == 0) ||
	    (loginfo->copy_range > (*pskb)->len)) {
		copy_len = (*pskb)->len;
	} else {
		copy_len = loginfo->copy_range;
	}

	size = NLMSG_SPACE(sizeof(*pm) + copy_len);

	ub = &ulog_buffers[groupnum];
	
	LOCK_BH(&ulog_lock);

	if (!ub->skb) {
		if (!(ub->skb = ulog_alloc_skb(size)))
			goto alloc_failure;
	} else if (ub->qlen >= loginfo->qthreshold ||
		   size > skb_tailroom(ub->skb)) {
		/* either the queue len is too high or we don't have 
		 * enough room in nlskb left. send it to userspace. */

		ulog_send(groupnum);

		if (!(ub->skb = ulog_alloc_skb(size)))
			goto alloc_failure;
	}

	DEBUGP("ipt_ULOG: qlen %d, qthreshold %d\n", ub->qlen, 
		loginfo->qthreshold);

	/* NLMSG_PUT contains a hidden goto nlmsg_failure !!! */
	nlh = NLMSG_PUT(ub->skb, 0, ub->qlen, ULOG_NL_EVENT, 
			size - sizeof(*nlh));
	ub->qlen++;

	pm = NLMSG_DATA(nlh);

	/* copy hook, prefix, timestamp, payload, etc. */
	pm->data_len = copy_len;
	pm->timestamp_sec = (*pskb)->stamp.tv_sec;
	pm->timestamp_usec = (*pskb)->stamp.tv_usec;
	pm->mark = (*pskb)->nfmark;
	pm->hook = hooknum;
	if (loginfo->prefix[0] != '\0')
		strncpy(pm->prefix, loginfo->prefix, sizeof(pm->prefix));
	else
		*(pm->prefix) = '\0';

	if (in && in->hard_header_len > 0
	    && (*pskb)->mac.raw != (void *) (*pskb)->nh.iph
	    && in->hard_header_len <= ULOG_MAC_LEN) {
		memcpy(pm->mac, (*pskb)->mac.raw, in->hard_header_len);
		pm->mac_len = in->hard_header_len;
	} else
		pm->mac_len = 0;

	if (in)
		strncpy(pm->indev_name, in->name, sizeof(pm->indev_name));
	else
		pm->indev_name[0] = '\0';

	if (out)
		strncpy(pm->outdev_name, out->name, sizeof(pm->outdev_name));
	else
		pm->outdev_name[0] = '\0';

	if (copy_len)
		memcpy(pm->payload, (*pskb)->data, copy_len);
	
	/* check if we are building multi-part messages */
	if (ub->qlen > 1) {
		ub->lastnlh->nlmsg_flags |= NLM_F_MULTI;
	}

	ub->lastnlh = nlh;

	/* if timer isn't already running, start it */
	if (!timer_pending(&ub->timer)) {
		ub->timer.expires = jiffies + flushtimeout;
		add_timer(&ub->timer);
	}

	/* if threshold is reached, send message to userspace */
	if (ub->qlen >= loginfo->qthreshold) {
		if (loginfo->qthreshold > 1)
			nlh->nlmsg_type = NLMSG_DONE;
		ulog_send(groupnum);
	}

	UNLOCK_BH(&ulog_lock);

	return IPT_CONTINUE;


nlmsg_failure:
	PRINTR("ipt_ULOG: error during NLMSG_PUT\n");

alloc_failure:
	PRINTR("ipt_ULOG: Error building netlink message\n");

	UNLOCK_BH(&ulog_lock);

	return IPT_CONTINUE;
}
Beispiel #14
0
static int ipcomp_decompress(struct xfrm_state *x, struct sk_buff *skb)
{
	struct ipcomp_data *ipcd = x->data;
	const int plen = skb->len;
	int dlen = IPCOMP_SCRATCH_SIZE;
	const u8 *start = skb->data;
	const int cpu = get_cpu();
	u8 *scratch = *per_cpu_ptr(ipcomp_scratches, cpu);
	struct crypto_comp *tfm = *per_cpu_ptr(ipcd->tfms, cpu);
	int err = crypto_comp_decompress(tfm, start, plen, scratch, &dlen);
	int len;

	if (err)
		goto out;

	if (dlen < (plen + sizeof(struct ip_comp_hdr))) {
		err = -EINVAL;
		goto out;
	}

	len = dlen - plen;
	if (len > skb_tailroom(skb))
		len = skb_tailroom(skb);

	__skb_put(skb, len);

	len += plen;
	skb_copy_to_linear_data(skb, scratch, len);

	while ((scratch += len, dlen -= len) > 0) {
		skb_frag_t *frag;
		struct page *page;

		err = -EMSGSIZE;
		if (WARN_ON(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS))
			goto out;

		frag = skb_shinfo(skb)->frags + skb_shinfo(skb)->nr_frags;
		page = alloc_page(GFP_ATOMIC);

		err = -ENOMEM;
		if (!page)
			goto out;

		__skb_frag_set_page(frag, page);

		len = PAGE_SIZE;
		if (dlen < len)
			len = dlen;

		frag->page_offset = 0;
		skb_frag_size_set(frag, len);
		memcpy(skb_frag_address(frag), scratch, len);

		skb->truesize += len;
		skb->data_len += len;
		skb->len += len;

		skb_shinfo(skb)->nr_frags++;
	}

	err = 0;

out:
	put_cpu();
	return err;
}
/**
 * Unpack a just received skb and hand it over to
 * upper layers.
 *
 *  ch		The channel where this skb has been received.
 *  pskb	The received skb.
 */
void ctcm_unpack_skb(struct channel *ch, struct sk_buff *pskb)
{
	struct net_device *dev = ch->netdev;
	struct ctcm_priv *priv = dev->ml_priv;
	__u16 len = *((__u16 *) pskb->data);

	skb_put(pskb, 2 + LL_HEADER_LENGTH);
	skb_pull(pskb, 2);
	pskb->dev = dev;
	pskb->ip_summed = CHECKSUM_UNNECESSARY;
	while (len > 0) {
		struct sk_buff *skb;
		int skblen;
		struct ll_header *header = (struct ll_header *)pskb->data;

		skb_pull(pskb, LL_HEADER_LENGTH);
		if ((ch->protocol == CTCM_PROTO_S390) &&
		    (header->type != ETH_P_IP)) {
			if (!(ch->logflags & LOG_FLAG_ILLEGALPKT)) {
				ch->logflags |= LOG_FLAG_ILLEGALPKT;
				/*
				 * Check packet type only if we stick strictly
				 * to S/390's protocol of OS390. This only
				 * supports IP. Otherwise allow any packet
				 * type.
				 */
				CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
					"%s(%s): Illegal packet type 0x%04x"
					" - dropping",
					CTCM_FUNTAIL, dev->name, header->type);
			}
			priv->stats.rx_dropped++;
			priv->stats.rx_frame_errors++;
			return;
		}
		pskb->protocol = ntohs(header->type);
		if ((header->length <= LL_HEADER_LENGTH) ||
		    (len <= LL_HEADER_LENGTH)) {
			if (!(ch->logflags & LOG_FLAG_ILLEGALSIZE)) {
				CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
					"%s(%s): Illegal packet size %d(%d,%d)"
					"- dropping",
					CTCM_FUNTAIL, dev->name,
					header->length, dev->mtu, len);
				ch->logflags |= LOG_FLAG_ILLEGALSIZE;
			}

			priv->stats.rx_dropped++;
			priv->stats.rx_length_errors++;
			return;
		}
		header->length -= LL_HEADER_LENGTH;
		len -= LL_HEADER_LENGTH;
		if ((header->length > skb_tailroom(pskb)) ||
			(header->length > len)) {
			if (!(ch->logflags & LOG_FLAG_OVERRUN)) {
				CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
					"%s(%s): Packet size %d (overrun)"
					" - dropping", CTCM_FUNTAIL,
						dev->name, header->length);
				ch->logflags |= LOG_FLAG_OVERRUN;
			}

			priv->stats.rx_dropped++;
			priv->stats.rx_length_errors++;
			return;
		}
		skb_put(pskb, header->length);
		skb_reset_mac_header(pskb);
		len -= header->length;
		skb = dev_alloc_skb(pskb->len);
		if (!skb) {
			if (!(ch->logflags & LOG_FLAG_NOMEM)) {
				CTCM_DBF_TEXT_(ERROR, CTC_DBF_ERROR,
					"%s(%s): MEMORY allocation error",
						CTCM_FUNTAIL, dev->name);
				ch->logflags |= LOG_FLAG_NOMEM;
			}
			priv->stats.rx_dropped++;
			return;
		}
		skb_copy_from_linear_data(pskb, skb_put(skb, pskb->len),
					  pskb->len);
		skb_reset_mac_header(skb);
		skb->dev = pskb->dev;
		skb->protocol = pskb->protocol;
		pskb->ip_summed = CHECKSUM_UNNECESSARY;
		skblen = skb->len;
		/*
		 * reset logflags
		 */
		ch->logflags = 0;
		priv->stats.rx_packets++;
		priv->stats.rx_bytes += skblen;
		netif_rx_ni(skb);
		if (len > 0) {
			skb_pull(pskb, header->length);
			if (skb_tailroom(pskb) < LL_HEADER_LENGTH) {
				CTCM_DBF_DEV_NAME(TRACE, dev,
					"Overrun in ctcm_unpack_skb");
				ch->logflags |= LOG_FLAG_OVERRUN;
				return;
			}
			skb_put(pskb, LL_HEADER_LENGTH);
		}
	}
}
Beispiel #16
0
void sc_capwap_parsingdatapacket(struct sc_capwap_session* session, struct sk_buff* skb) {
	uint8_t* pos;
	uint8_t* dstaddress;
	struct net_device* dev;
	struct sc_capwap_header* header = (struct sc_capwap_header*)skb->data;
	int is80211 = (IS_FLAG_T_HEADER(header) ? 1 : 0);
	struct sc_capwap_radio_addr* radioaddr = NULL;
	int radioaddrsize = 0;
	struct sc_capwap_wireless_information* winfo = NULL;
	struct sc_capwap_destination_wlans* destwlan = NULL;
	int winfosize = 0;

	TRACEKMOD("### sc_capwap_parsingdatapacket\n");

	/* Retrieve optional attribute */
	pos = skb->data + sizeof(struct sc_capwap_header);
	if (IS_FLAG_M_HEADER(header)) {
		radioaddr = (struct sc_capwap_radio_addr*)pos;
		radioaddrsize = (sizeof(struct sc_capwap_radio_addr) + radioaddr->length + 3) & ~3;
		pos += radioaddrsize;
	}

	if (IS_FLAG_W_HEADER(header)) {
		winfo = (struct sc_capwap_wireless_information*)pos;
		destwlan = (struct sc_capwap_destination_wlans*)(pos + sizeof(struct sc_capwap_wireless_information));
		winfosize = (sizeof(struct sc_capwap_wireless_information) + winfo->length + 3) & ~3;
		pos += winfosize;
	}

	/* Body packet */
	skb_pull(skb, GET_HLEN_HEADER(header) * 4);

	dstaddress = (is80211 ? ieee80211_get_DA((struct ieee80211_hdr*)skb->data) : (uint8_t*)((struct ethhdr*)skb->data)->h_dest);
	if (is_multicast_ether_addr(dstaddress)) {
		/* Accept only broadcast packet with wireless information */
		if (winfo) {
			uint8_t wlanid = 1;
			uint16_t bitmask = be16_to_cpu(destwlan->wlanidbitmap);
			while (bitmask) {
				if (bitmask & 0x01) {
					dev = sc_netlink_getdev_from_wlanid(GET_RID_HEADER(header), wlanid);
					if (dev) {
						struct sk_buff* clone = skb_copy_expand(skb, skb_headroom(skb), skb_tailroom(skb), GFP_KERNEL);
						if (!clone) {
							goto error;
						}

						/* */
						if (!is80211) { 
							if (sc_capwap_8023_to_80211(clone, dev->dev_addr)) {
								kfree_skb(clone);
								goto error;
							}
						}

						TRACEKMOD("*** Send broadcast packet to interface: %d\n", dev->ifindex);

						/* Send packet */
						local_bh_disable();
						ieee80211_inject_xmit(clone, dev);
						local_bh_enable();
					} else {
						TRACEKMOD("*** Unknown wlanid: %d\n", (int)wlanid);
					}
				}

				/* Next */
				wlanid++;
				bitmask >>= 1;
			}
		} else {
			TRACEKMOD("*** Invalid broadcast packet\n");
		}

		/* Free broadcast packet */
		kfree_skb(skb);
	} else {
Beispiel #17
0
/* This is an inline function, we don't really care about a long
 * list of arguments */
static inline int
__build_packet_message(struct nfulnl_instance *inst,
			const struct sk_buff *skb,
			unsigned int data_len,
			u_int8_t pf,
			unsigned int hooknum,
			const struct net_device *indev,
			const struct net_device *outdev,
			const struct nf_loginfo *li,
			const char *prefix, unsigned int plen)
{
	struct nfulnl_msg_packet_hdr pmsg;
	struct nlmsghdr *nlh;
	struct nfgenmsg *nfmsg;
	__be32 tmp_uint;
	sk_buff_data_t old_tail = inst->skb->tail;

	nlh = NLMSG_PUT(inst->skb, 0, 0,
			NFNL_SUBSYS_ULOG << 8 | NFULNL_MSG_PACKET,
			sizeof(struct nfgenmsg));
	nfmsg = NLMSG_DATA(nlh);
	nfmsg->nfgen_family = pf;
	nfmsg->version = NFNETLINK_V0;
	nfmsg->res_id = htons(inst->group_num);

	pmsg.hw_protocol	= skb->protocol;
	pmsg.hook		= hooknum;

	NLA_PUT(inst->skb, NFULA_PACKET_HDR, sizeof(pmsg), &pmsg);

	if (prefix)
		NLA_PUT(inst->skb, NFULA_PREFIX, plen, prefix);

	if (indev) {
#ifndef CONFIG_BRIDGE_NETFILTER
		NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_INDEV,
			     htonl(indev->ifindex));
#else
		if (pf == PF_BRIDGE) {
			/* Case 1: outdev is physical input device, we need to
			 * look for bridge group (when called from
			 * netfilter_bridge) */
			NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_PHYSINDEV,
				     htonl(indev->ifindex));
			/* this is the bridge group "brX" */
			NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_INDEV,
				     htonl(indev->br_port->br->dev->ifindex));
		} else {
			/* Case 2: indev is bridge group, we need to look for
			 * physical device (when called from ipv4) */
			NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_INDEV,
				     htonl(indev->ifindex));
			if (skb->nf_bridge && skb->nf_bridge->physindev)
				NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_PHYSINDEV,
					     htonl(skb->nf_bridge->physindev->ifindex));
		}
#endif
	}

	if (outdev) {
		tmp_uint = htonl(outdev->ifindex);
#ifndef CONFIG_BRIDGE_NETFILTER
		NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_OUTDEV,
			     htonl(outdev->ifindex));
#else
		if (pf == PF_BRIDGE) {
			/* Case 1: outdev is physical output device, we need to
			 * look for bridge group (when called from
			 * netfilter_bridge) */
			NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_PHYSOUTDEV,
				     htonl(outdev->ifindex));
			/* this is the bridge group "brX" */
			NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_OUTDEV,
				     htonl(outdev->br_port->br->dev->ifindex));
		} else {
			/* Case 2: indev is a bridge group, we need to look
			 * for physical device (when called from ipv4) */
			NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_OUTDEV,
				     htonl(outdev->ifindex));
			if (skb->nf_bridge && skb->nf_bridge->physoutdev)
				NLA_PUT_BE32(inst->skb, NFULA_IFINDEX_PHYSOUTDEV,
					     htonl(skb->nf_bridge->physoutdev->ifindex));
		}
#endif
	}

	if (skb->mark)
		NLA_PUT_BE32(inst->skb, NFULA_MARK, htonl(skb->mark));

	if (indev && skb->dev) {
		struct nfulnl_msg_packet_hw phw;
		int len = dev_parse_header(skb, phw.hw_addr);
		if (len > 0) {
			phw.hw_addrlen = htons(len);
			NLA_PUT(inst->skb, NFULA_HWADDR, sizeof(phw), &phw);
		}
	}

	if (indev && skb_mac_header_was_set(skb)) {
		NLA_PUT_BE16(inst->skb, NFULA_HWTYPE, htons(skb->dev->type));
		NLA_PUT_BE16(inst->skb, NFULA_HWLEN,
			     htons(skb->dev->hard_header_len));
		NLA_PUT(inst->skb, NFULA_HWHEADER, skb->dev->hard_header_len,
			skb_mac_header(skb));
	}

	if (skb->tstamp.tv64) {
		struct nfulnl_msg_packet_timestamp ts;
		struct timeval tv = ktime_to_timeval(skb->tstamp);
		ts.sec = cpu_to_be64(tv.tv_sec);
		ts.usec = cpu_to_be64(tv.tv_usec);

		NLA_PUT(inst->skb, NFULA_TIMESTAMP, sizeof(ts), &ts);
	}

	/* UID */
	if (skb->sk) {
		read_lock_bh(&skb->sk->sk_callback_lock);
		if (skb->sk->sk_socket && skb->sk->sk_socket->file) {
			struct file *file = skb->sk->sk_socket->file;
			__be32 uid = htonl(file->f_cred->fsuid);
			__be32 gid = htonl(file->f_cred->fsgid);
			/* need to unlock here since NLA_PUT may goto */
			read_unlock_bh(&skb->sk->sk_callback_lock);
			NLA_PUT_BE32(inst->skb, NFULA_UID, uid);
			NLA_PUT_BE32(inst->skb, NFULA_GID, gid);
		} else
			read_unlock_bh(&skb->sk->sk_callback_lock);
	}

	/* local sequence number */
	if (inst->flags & NFULNL_CFG_F_SEQ)
		NLA_PUT_BE32(inst->skb, NFULA_SEQ, htonl(inst->seq++));

	/* global sequence number */
	if (inst->flags & NFULNL_CFG_F_SEQ_GLOBAL)
		NLA_PUT_BE32(inst->skb, NFULA_SEQ_GLOBAL,
			     htonl(atomic_inc_return(&global_seq)));

	if (data_len) {
		struct nlattr *nla;
		int size = nla_attr_size(data_len);

		if (skb_tailroom(inst->skb) < nla_total_size(data_len)) {
			printk(KERN_WARNING "nfnetlink_log: no tailroom!\n");
			goto nlmsg_failure;
		}

		nla = (struct nlattr *)skb_put(inst->skb, nla_total_size(data_len));
		nla->nla_type = NFULA_PAYLOAD;
		nla->nla_len = size;

		if (skb_copy_bits(skb, 0, nla_data(nla), data_len))
			BUG();
	}

	nlh->nlmsg_len = inst->skb->tail - old_tail;
	return 0;

nlmsg_failure:
nla_put_failure:
	PRINTR(KERN_ERR "nfnetlink_log: error creating log nlmsg\n");
	return -1;
}
Beispiel #18
0
/* Send RST reply */
static void send_reset(struct sk_buff *oldskb, int hook)
{
	struct sk_buff *nskb;
	struct iphdr *iph = oldskb->nh.iph;
	struct tcphdr _otcph, *oth, *tcph;
	__be16 tmp_port;
	__be32 tmp_addr;
	int needs_ack;
	unsigned int addr_type;

	/* IP header checks: fragment. */
	if (oldskb->nh.iph->frag_off & htons(IP_OFFSET))
		return;

	oth = skb_header_pointer(oldskb, oldskb->nh.iph->ihl * 4,
				 sizeof(_otcph), &_otcph);
	if (oth == NULL)
 		return;

	/* No RST for RST. */
	if (oth->rst)
		return;

	/* Check checksum */
	if (nf_ip_checksum(oldskb, hook, iph->ihl * 4, IPPROTO_TCP))
		return;

	/* We need a linear, writeable skb.  We also need to expand
	   headroom in case hh_len of incoming interface < hh_len of
	   outgoing interface */
	nskb = skb_copy_expand(oldskb, LL_MAX_HEADER, skb_tailroom(oldskb),
			       GFP_ATOMIC);
	if (!nskb)
		return;

	/* This packet will not be the same as the other: clear nf fields */
	nf_reset(nskb);
	nskb->mark = 0;
	skb_init_secmark(nskb);

	tcph = (struct tcphdr *)((u_int32_t*)nskb->nh.iph + nskb->nh.iph->ihl);

	/* Swap source and dest */
	tmp_addr = nskb->nh.iph->saddr;
	nskb->nh.iph->saddr = nskb->nh.iph->daddr;
	nskb->nh.iph->daddr = tmp_addr;
	tmp_port = tcph->source;
	tcph->source = tcph->dest;
	tcph->dest = tmp_port;

	/* Truncate to length (no data) */
	tcph->doff = sizeof(struct tcphdr)/4;
	skb_trim(nskb, nskb->nh.iph->ihl*4 + sizeof(struct tcphdr));
	nskb->nh.iph->tot_len = htons(nskb->len);

	if (tcph->ack) {
		needs_ack = 0;
		tcph->seq = oth->ack_seq;
		tcph->ack_seq = 0;
	} else {
		needs_ack = 1;
		tcph->ack_seq = htonl(ntohl(oth->seq) + oth->syn + oth->fin
				      + oldskb->len - oldskb->nh.iph->ihl*4
				      - (oth->doff<<2));
		tcph->seq = 0;
	}

	/* Reset flags */
	((u_int8_t *)tcph)[13] = 0;
	tcph->rst = 1;
	tcph->ack = needs_ack;

	tcph->window = 0;
	tcph->urg_ptr = 0;

	/* Adjust TCP checksum */
	tcph->check = 0;
	tcph->check = tcp_v4_check(tcph, sizeof(struct tcphdr),
				   nskb->nh.iph->saddr,
				   nskb->nh.iph->daddr,
				   csum_partial((char *)tcph,
						sizeof(struct tcphdr), 0));

	/* Set DF, id = 0 */
	nskb->nh.iph->frag_off = htons(IP_DF);
	nskb->nh.iph->id = 0;

	addr_type = RTN_UNSPEC;
	if (hook != NF_IP_FORWARD
#ifdef CONFIG_BRIDGE_NETFILTER
	    || (nskb->nf_bridge && nskb->nf_bridge->mask & BRNF_BRIDGED)
#endif
	   )
		addr_type = RTN_LOCAL;

	if (ip_route_me_harder(&nskb, addr_type))
		goto free_nskb;

	nskb->ip_summed = CHECKSUM_NONE;

	/* Adjust IP TTL */
	nskb->nh.iph->ttl = dst_metric(nskb->dst, RTAX_HOPLIMIT);

	/* Adjust IP checksum */
	nskb->nh.iph->check = 0;
	nskb->nh.iph->check = ip_fast_csum((unsigned char *)nskb->nh.iph, 
					   nskb->nh.iph->ihl);

	/* "Never happens" */
	if (nskb->len > dst_mtu(nskb->dst))
		goto free_nskb;

	nf_ct_attach(nskb, oldskb);

	NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, nskb, NULL, nskb->dst->dev,
		dst_output);
	return;

 free_nskb:
	kfree_skb(nskb);
}
/* hard_start_xmit function for data interfaces (wlan#, wlan#wds#, wlan#sta)
 * Convert Ethernet header into a suitable IEEE 802.11 header depending on
 * device configuration. */
netdev_tx_t hostap_data_start_xmit(struct sk_buff *skb,
				   struct net_device *dev)
{
	struct hostap_interface *iface;
	local_info_t *local;
	int need_headroom, need_tailroom = 0;
	struct ieee80211_hdr hdr;
	u16 fc, ethertype = 0;
	enum {
		WDS_NO = 0, WDS_OWN_FRAME, WDS_COMPLIANT_FRAME
	} use_wds = WDS_NO;
	u8 *encaps_data;
	int hdr_len, encaps_len, skip_header_bytes;
	int to_assoc_ap = 0;
	struct hostap_skb_tx_data *meta;

	iface = netdev_priv(dev);
	local = iface->local;

	if (skb->len < ETH_HLEN) {
		printk(KERN_DEBUG "%s: hostap_data_start_xmit: short skb "
		       "(len=%d)\n", dev->name, skb->len);
		kfree_skb(skb);
		return NETDEV_TX_OK;
	}

	if (local->ddev != dev) {
		use_wds = (local->iw_mode == IW_MODE_MASTER &&
			   !(local->wds_type & HOSTAP_WDS_STANDARD_FRAME)) ?
			WDS_OWN_FRAME : WDS_COMPLIANT_FRAME;
		if (dev == local->stadev) {
			to_assoc_ap = 1;
			use_wds = WDS_NO;
		} else if (dev == local->apdev) {
			printk(KERN_DEBUG "%s: prism2_tx: trying to use "
			       "AP device with Ethernet net dev\n", dev->name);
			kfree_skb(skb);
			return NETDEV_TX_OK;
		}
	} else {
		if (local->iw_mode == IW_MODE_REPEAT) {
			printk(KERN_DEBUG "%s: prism2_tx: trying to use "
			       "non-WDS link in Repeater mode\n", dev->name);
			kfree_skb(skb);
			return NETDEV_TX_OK;
		} else if (local->iw_mode == IW_MODE_INFRA &&
			   (local->wds_type & HOSTAP_WDS_AP_CLIENT) &&
			   memcmp(skb->data + ETH_ALEN, dev->dev_addr,
				  ETH_ALEN) != 0) {
			/* AP client mode: send frames with foreign src addr
			 * using 4-addr WDS frames */
			use_wds = WDS_COMPLIANT_FRAME;
		}
	}

	/* Incoming skb->data: dst_addr[6], src_addr[6], proto[2], payload
	 * ==>
	 * Prism2 TX frame with 802.11 header:
	 * txdesc (address order depending on used mode; includes dst_addr and
	 * src_addr), possible encapsulation (RFC1042/Bridge-Tunnel;
	 * proto[2], payload {, possible addr4[6]} */

	ethertype = (skb->data[12] << 8) | skb->data[13];

	memset(&hdr, 0, sizeof(hdr));

	/* Length of data after IEEE 802.11 header */
	encaps_data = NULL;
	encaps_len = 0;
	skip_header_bytes = ETH_HLEN;
	if (ethertype == ETH_P_AARP || ethertype == ETH_P_IPX) {
		encaps_data = bridge_tunnel_header;
		encaps_len = sizeof(bridge_tunnel_header);
		skip_header_bytes -= 2;
	} else if (ethertype >= 0x600) {
		encaps_data = rfc1042_header;
		encaps_len = sizeof(rfc1042_header);
		skip_header_bytes -= 2;
	}

	fc = IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA;
	hdr_len = IEEE80211_DATA_HDR3_LEN;

	if (use_wds != WDS_NO) {
		/* Note! Prism2 station firmware has problems with sending real
		 * 802.11 frames with four addresses; until these problems can
		 * be fixed or worked around, 4-addr frames needed for WDS are
		 * using incompatible format: FromDS flag is not set and the
		 * fourth address is added after the frame payload; it is
		 * assumed, that the receiving station knows how to handle this
		 * frame format */

		if (use_wds == WDS_COMPLIANT_FRAME) {
			fc |= IEEE80211_FCTL_FROMDS | IEEE80211_FCTL_TODS;
			/* From&To DS: Addr1 = RA, Addr2 = TA, Addr3 = DA,
			 * Addr4 = SA */
			skb_copy_from_linear_data_offset(skb, ETH_ALEN,
							 &hdr.addr4, ETH_ALEN);
			hdr_len += ETH_ALEN;
		} else {
			/* bogus 4-addr format to workaround Prism2 station
			 * f/w bug */
			fc |= IEEE80211_FCTL_TODS;
			/* From DS: Addr1 = DA (used as RA),
			 * Addr2 = BSSID (used as TA), Addr3 = SA (used as DA),
			 */

			/* SA from skb->data + ETH_ALEN will be added after
			 * frame payload; use hdr.addr4 as a temporary buffer
			 */
			skb_copy_from_linear_data_offset(skb, ETH_ALEN,
							 &hdr.addr4, ETH_ALEN);
			need_tailroom += ETH_ALEN;
		}

		/* send broadcast and multicast frames to broadcast RA, if
		 * configured; otherwise, use unicast RA of the WDS link */
		if ((local->wds_type & HOSTAP_WDS_BROADCAST_RA) &&
		    skb->data[0] & 0x01)
			memset(&hdr.addr1, 0xff, ETH_ALEN);
		else if (iface->type == HOSTAP_INTERFACE_WDS)
			memcpy(&hdr.addr1, iface->u.wds.remote_addr,
			       ETH_ALEN);
		else
			memcpy(&hdr.addr1, local->bssid, ETH_ALEN);
		memcpy(&hdr.addr2, dev->dev_addr, ETH_ALEN);
		skb_copy_from_linear_data(skb, &hdr.addr3, ETH_ALEN);
	} else if (local->iw_mode == IW_MODE_MASTER && !to_assoc_ap) {
		fc |= IEEE80211_FCTL_FROMDS;
		/* From DS: Addr1 = DA, Addr2 = BSSID, Addr3 = SA */
		skb_copy_from_linear_data(skb, &hdr.addr1, ETH_ALEN);
		memcpy(&hdr.addr2, dev->dev_addr, ETH_ALEN);
		skb_copy_from_linear_data_offset(skb, ETH_ALEN, &hdr.addr3,
						 ETH_ALEN);
	} else if (local->iw_mode == IW_MODE_INFRA || to_assoc_ap) {
		fc |= IEEE80211_FCTL_TODS;
		/* To DS: Addr1 = BSSID, Addr2 = SA, Addr3 = DA */
		memcpy(&hdr.addr1, to_assoc_ap ?
		       local->assoc_ap_addr : local->bssid, ETH_ALEN);
		skb_copy_from_linear_data_offset(skb, ETH_ALEN, &hdr.addr2,
						 ETH_ALEN);
		skb_copy_from_linear_data(skb, &hdr.addr3, ETH_ALEN);
	} else if (local->iw_mode == IW_MODE_ADHOC) {
		/* not From/To DS: Addr1 = DA, Addr2 = SA, Addr3 = BSSID */
		skb_copy_from_linear_data(skb, &hdr.addr1, ETH_ALEN);
		skb_copy_from_linear_data_offset(skb, ETH_ALEN, &hdr.addr2,
						 ETH_ALEN);
		memcpy(&hdr.addr3, local->bssid, ETH_ALEN);
	}

	hdr.frame_control = cpu_to_le16(fc);

	skb_pull(skb, skip_header_bytes);
	need_headroom = local->func->need_tx_headroom + hdr_len + encaps_len;
	if (skb_tailroom(skb) < need_tailroom) {
		skb = skb_unshare(skb, GFP_ATOMIC);
		if (skb == NULL) {
			iface->stats.tx_dropped++;
			return NETDEV_TX_OK;
		}
		if (pskb_expand_head(skb, need_headroom, need_tailroom,
				     GFP_ATOMIC)) {
			kfree_skb(skb);
			iface->stats.tx_dropped++;
			return NETDEV_TX_OK;
		}
	} else if (skb_headroom(skb) < need_headroom) {
		struct sk_buff *tmp = skb;
		skb = skb_realloc_headroom(skb, need_headroom);
		kfree_skb(tmp);
		if (skb == NULL) {
			iface->stats.tx_dropped++;
			return NETDEV_TX_OK;
		}
	} else {
		skb = skb_unshare(skb, GFP_ATOMIC);
		if (skb == NULL) {
			iface->stats.tx_dropped++;
			return NETDEV_TX_OK;
		}
	}

	if (encaps_data)
		memcpy(skb_push(skb, encaps_len), encaps_data, encaps_len);
	memcpy(skb_push(skb, hdr_len), &hdr, hdr_len);
	if (use_wds == WDS_OWN_FRAME) {
		memcpy(skb_put(skb, ETH_ALEN), &hdr.addr4, ETH_ALEN);
	}

	iface->stats.tx_packets++;
	iface->stats.tx_bytes += skb->len;

	skb_reset_mac_header(skb);
	meta = (struct hostap_skb_tx_data *) skb->cb;
	memset(meta, 0, sizeof(*meta));
	meta->magic = HOSTAP_SKB_TX_DATA_MAGIC;
	if (use_wds)
		meta->flags |= HOSTAP_TX_FLAGS_WDS;
	meta->ethertype = ethertype;
	meta->iface = iface;

	/* Send IEEE 802.11 encapsulated frame using the master radio device */
	skb->dev = local->dev;
	dev_queue_xmit(skb);
	return NETDEV_TX_OK;
}
Beispiel #20
0
int skb_cow_data(struct sk_buff *skb, int tailbits, struct sk_buff **trailer)
{
	int copyflag;
	int elt;
	struct sk_buff *skb1, **skb_p;

	/* If skb is cloned or its head is paged, reallocate
	 * head pulling out all the pages (pages are considered not writable
	 * at the moment even if they are anonymous).
	 */
	if ((skb_cloned(skb) || skb_shinfo(skb)->nr_frags) &&
	    __pskb_pull_tail(skb, skb_pagelen(skb)-skb_headlen(skb)) == NULL)
		return -ENOMEM;

	/* Easy case. Most of packets will go this way. */
	if (!skb_shinfo(skb)->frag_list) {
		/* A little of trouble, not enough of space for trailer.
		 * This should not happen, when stack is tuned to generate
		 * good frames. OK, on miss we reallocate and reserve even more
		 * space, 128 bytes is fair. */

		if (skb_tailroom(skb) < tailbits &&
		    pskb_expand_head(skb, 0, tailbits-skb_tailroom(skb)+128, GFP_ATOMIC))
			return -ENOMEM;

		/* Voila! */
		*trailer = skb;
		return 1;
	}

	/* Misery. We are in troubles, going to mincer fragments... */

	elt = 1;
	skb_p = &skb_shinfo(skb)->frag_list;
	copyflag = 0;

	while ((skb1 = *skb_p) != NULL) {
		int ntail = 0;

		/* The fragment is partially pulled by someone,
		 * this can happen on input. Copy it and everything
		 * after it. */

		if (skb_shared(skb1))
			copyflag = 1;

		/* If the skb is the last, worry about trailer. */

		if (skb1->next == NULL && tailbits) {
			if (skb_shinfo(skb1)->nr_frags ||
			    skb_shinfo(skb1)->frag_list ||
			    skb_tailroom(skb1) < tailbits)
				ntail = tailbits + 128;
		}

		if (copyflag ||
		    skb_cloned(skb1) ||
		    ntail ||
		    skb_shinfo(skb1)->nr_frags ||
		    skb_shinfo(skb1)->frag_list) {
			struct sk_buff *skb2;

			/* F**k, we are miserable poor guys... */
			if (ntail == 0)
				skb2 = skb_copy(skb1, GFP_ATOMIC);
			else
				skb2 = skb_copy_expand(skb1,
						       skb_headroom(skb1),
						       ntail,
						       GFP_ATOMIC);
			if (unlikely(skb2 == NULL))
				return -ENOMEM;

			if (skb1->sk)
				skb_set_owner_w(skb2, skb1->sk);

			/* Looking around. Are we still alive?
			 * OK, link new skb, drop old one */

			skb2->next = skb1->next;
			*skb_p = skb2;
			kfree_skb(skb1);
			skb1 = skb2;
		}
		elt++;
		*trailer = skb1;
		skb_p = &skb1->next;
	}

	return elt;
}
Beispiel #21
0
/* log handler for internal netfilter logging api */
static void
nfulnl_log_packet(unsigned int pf,
		  unsigned int hooknum,
		  const struct sk_buff *skb,
		  const struct net_device *in,
		  const struct net_device *out,
		  const struct nf_loginfo *li_user,
		  const char *prefix)
{
	unsigned int size, data_len;
	struct nfulnl_instance *inst;
	const struct nf_loginfo *li;
	unsigned int qthreshold;
	unsigned int nlbufsiz;

	if (li_user && li_user->type == NF_LOG_TYPE_ULOG) 
		li = li_user;
	else
		li = &default_loginfo;

	inst = instance_lookup_get(li->u.ulog.group);
	if (!inst)
		inst = instance_lookup_get(0);
	if (!inst) {
		PRINTR("nfnetlink_log: trying to log packet, "
			"but no instance for group %u\n", li->u.ulog.group);
		return;
	}

	/* all macros expand to constant values at compile time */
	/* FIXME: do we want to make the size calculation conditional based on
	 * what is actually present?  way more branches and checks, but more
	 * memory efficient... */
	size =    NLMSG_SPACE(sizeof(struct nfgenmsg))
		+ NFA_SPACE(sizeof(struct nfulnl_msg_packet_hdr))
		+ NFA_SPACE(sizeof(u_int32_t))	/* ifindex */
		+ NFA_SPACE(sizeof(u_int32_t))	/* ifindex */
#ifdef CONFIG_BRIDGE_NETFILTER
		+ NFA_SPACE(sizeof(u_int32_t))	/* ifindex */
		+ NFA_SPACE(sizeof(u_int32_t))	/* ifindex */
#endif
		+ NFA_SPACE(sizeof(u_int32_t))	/* mark */
		+ NFA_SPACE(sizeof(u_int32_t))	/* uid */
		+ NFA_SPACE(NFULNL_PREFIXLEN)	/* prefix */
		+ NFA_SPACE(sizeof(struct nfulnl_msg_packet_hw))
		+ NFA_SPACE(sizeof(struct nfulnl_msg_packet_timestamp));

	UDEBUG("initial size=%u\n", size);

	spin_lock_bh(&inst->lock);

	qthreshold = inst->qthreshold;
	/* per-rule qthreshold overrides per-instance */
	if (qthreshold > li->u.ulog.qthreshold)
		qthreshold = li->u.ulog.qthreshold;
	
	switch (inst->copy_mode) {
	case NFULNL_COPY_META:
	case NFULNL_COPY_NONE:
		data_len = 0;
		break;
	
	case NFULNL_COPY_PACKET:
		if (inst->copy_range == 0 
		    || inst->copy_range > skb->len)
			data_len = skb->len;
		else
			data_len = inst->copy_range;
		
		size += NFA_SPACE(data_len);
		UDEBUG("copy_packet, therefore size now %u\n", size);
		break;
	
	default:
		spin_unlock_bh(&inst->lock);
		instance_put(inst);
		return;
	}

	if (size > inst->nlbufsiz)
		nlbufsiz = size;
	else
		nlbufsiz = inst->nlbufsiz;

	if (!inst->skb) {
		if (!(inst->skb = nfulnl_alloc_skb(nlbufsiz, size))) {
			UDEBUG("error in nfulnl_alloc_skb(%u, %u)\n",
				inst->nlbufsiz, size);
			goto alloc_failure;
		}
	} else if (inst->qlen >= qthreshold ||
		   size > skb_tailroom(inst->skb)) {
		/* either the queue len is too high or we don't have
		 * enough room in the skb left. flush to userspace. */
		UDEBUG("flushing old skb\n");

		__nfulnl_send(inst);

		if (!(inst->skb = nfulnl_alloc_skb(nlbufsiz, size))) {
			UDEBUG("error in nfulnl_alloc_skb(%u, %u)\n",
				inst->nlbufsiz, size);
			goto alloc_failure;
		}
	}

	UDEBUG("qlen %d, qthreshold %d\n", inst->qlen, qthreshold);
	inst->qlen++;

	__build_packet_message(inst, skb, data_len, pf,
				hooknum, in, out, li, prefix);

	/* timer_pending always called within inst->lock, so there
	 * is no chance of a race here */
	if (!timer_pending(&inst->timer)) {
		instance_get(inst);
		inst->timer.expires = jiffies + (inst->flushtimeout*HZ/100);
		add_timer(&inst->timer);
	}
	spin_unlock_bh(&inst->lock);

	return;

alloc_failure:
	spin_unlock_bh(&inst->lock);
	instance_put(inst);
	UDEBUG("error allocating skb\n");
	/* FIXME: statistics */
}
Beispiel #22
0
/* Send RST reply */
static void send_reset(struct sk_buff *oldskb, int hook)
{
	struct sk_buff *nskb;
	struct tcphdr otcph, *tcph;
	struct rtable *rt;
	u_int16_t tmp_port;
	u_int32_t tmp_addr;
	int needs_ack;
	int hh_len;

	/* IP header checks: fragment. */
	if (oldskb->nh.iph->frag_off & htons(IP_OFFSET))
		return;

	if (skb_copy_bits(oldskb, oldskb->nh.iph->ihl*4,
			  &otcph, sizeof(otcph)) < 0)
 		return;

	/* No RST for RST. */
	if (otcph.rst)
		return;

	/* FIXME: Check checksum --RR */
	if ((rt = route_reverse(oldskb, hook)) == NULL)
		return;

	hh_len = LL_RESERVED_SPACE(rt->u.dst.dev);

	/* We need a linear, writeable skb.  We also need to expand
	   headroom in case hh_len of incoming interface < hh_len of
	   outgoing interface */
	nskb = skb_copy_expand(oldskb, hh_len, skb_tailroom(oldskb),
			       GFP_ATOMIC);
	if (!nskb) {
		dst_release(&rt->u.dst);
		return;
	}

	dst_release(nskb->dst);
	nskb->dst = &rt->u.dst;

	/* This packet will not be the same as the other: clear nf fields */
	nf_conntrack_put(nskb->nfct);
	nskb->nfct = NULL;
	nskb->nfcache = 0;
#ifdef CONFIG_NETFILTER_DEBUG
	nskb->nf_debug = 0;
#endif
	nskb->nfmark = 0;
#ifdef CONFIG_BRIDGE_NETFILTER
	nf_bridge_put(nskb->nf_bridge);
	nskb->nf_bridge = NULL;
#endif

	tcph = (struct tcphdr *)((u_int32_t*)nskb->nh.iph + nskb->nh.iph->ihl);

	/* Swap source and dest */
	tmp_addr = nskb->nh.iph->saddr;
	nskb->nh.iph->saddr = nskb->nh.iph->daddr;
	nskb->nh.iph->daddr = tmp_addr;
	tmp_port = tcph->source;
	tcph->source = tcph->dest;
	tcph->dest = tmp_port;

	/* Truncate to length (no data) */
	tcph->doff = sizeof(struct tcphdr)/4;
	skb_trim(nskb, nskb->nh.iph->ihl*4 + sizeof(struct tcphdr));
	nskb->nh.iph->tot_len = htons(nskb->len);

	if (tcph->ack) {
		needs_ack = 0;
		tcph->seq = otcph.ack_seq;
		tcph->ack_seq = 0;
	} else {
		needs_ack = 1;
		tcph->ack_seq = htonl(ntohl(otcph.seq) + otcph.syn + otcph.fin
				      + oldskb->len - oldskb->nh.iph->ihl*4
				      - (otcph.doff<<2));
		tcph->seq = 0;
	}

	/* Reset flags */
	((u_int8_t *)tcph)[13] = 0;
	tcph->rst = 1;
	tcph->ack = needs_ack;

	tcph->window = 0;
	tcph->urg_ptr = 0;

	/* Adjust TCP checksum */
	tcph->check = 0;
	tcph->check = tcp_v4_check(tcph, sizeof(struct tcphdr),
				   nskb->nh.iph->saddr,
				   nskb->nh.iph->daddr,
				   csum_partial((char *)tcph,
						sizeof(struct tcphdr), 0));

	/* Adjust IP TTL, DF */
	nskb->nh.iph->ttl = MAXTTL;
	/* Set DF, id = 0 */
	nskb->nh.iph->frag_off = htons(IP_DF);
	nskb->nh.iph->id = 0;

	/* Adjust IP checksum */
	nskb->nh.iph->check = 0;
	nskb->nh.iph->check = ip_fast_csum((unsigned char *)nskb->nh.iph, 
					   nskb->nh.iph->ihl);

	/* "Never happens" */
	if (nskb->len > dst_pmtu(nskb->dst))
		goto free_nskb;

	connection_attach(nskb, oldskb->nfct);

	NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, nskb, NULL, nskb->dst->dev,
		ip_finish_output);
	return;

 free_nskb:
	kfree_skb(nskb);
}
/* This is an inline function, we don't really care about a long
 * list of arguments */
static inline int
__build_packet_message(struct nfnl_log_net *log,
			struct nfulnl_instance *inst,
			const struct sk_buff *skb,
			unsigned int data_len,
			u_int8_t pf,
			unsigned int hooknum,
			const struct net_device *indev,
			const struct net_device *outdev,
			const char *prefix, unsigned int plen)
{
	struct nfulnl_msg_packet_hdr pmsg;
	struct nlmsghdr *nlh;
	struct nfgenmsg *nfmsg;
	sk_buff_data_t old_tail = inst->skb->tail;
	struct sock *sk;
	const unsigned char *hwhdrp;

	nlh = nlmsg_put(inst->skb, 0, 0,
			NFNL_SUBSYS_ULOG << 8 | NFULNL_MSG_PACKET,
			sizeof(struct nfgenmsg), 0);
	if (!nlh)
		return -1;
	nfmsg = nlmsg_data(nlh);
	nfmsg->nfgen_family = pf;
	nfmsg->version = NFNETLINK_V0;
	nfmsg->res_id = htons(inst->group_num);

	memset(&pmsg, 0, sizeof(pmsg));
	pmsg.hw_protocol	= skb->protocol;
	pmsg.hook		= hooknum;

	if (nla_put(inst->skb, NFULA_PACKET_HDR, sizeof(pmsg), &pmsg))
		goto nla_put_failure;

	if (prefix &&
	    nla_put(inst->skb, NFULA_PREFIX, plen, prefix))
		goto nla_put_failure;

	if (indev) {
#ifndef CONFIG_BRIDGE_NETFILTER
		if (nla_put_be32(inst->skb, NFULA_IFINDEX_INDEV,
				 htonl(indev->ifindex)))
			goto nla_put_failure;
#else
		if (pf == PF_BRIDGE) {
			/* Case 1: outdev is physical input device, we need to
			 * look for bridge group (when called from
			 * netfilter_bridge) */
			if (nla_put_be32(inst->skb, NFULA_IFINDEX_PHYSINDEV,
					 htonl(indev->ifindex)) ||
			/* this is the bridge group "brX" */
			/* rcu_read_lock()ed by nf_hook_slow or nf_log_packet */
			    nla_put_be32(inst->skb, NFULA_IFINDEX_INDEV,
					 htonl(br_port_get_rcu(indev)->br->dev->ifindex)))
				goto nla_put_failure;
		} else {
			/* Case 2: indev is bridge group, we need to look for
			 * physical device (when called from ipv4) */
			if (nla_put_be32(inst->skb, NFULA_IFINDEX_INDEV,
					 htonl(indev->ifindex)))
				goto nla_put_failure;
			if (skb->nf_bridge && skb->nf_bridge->physindev &&
			    nla_put_be32(inst->skb, NFULA_IFINDEX_PHYSINDEV,
					 htonl(skb->nf_bridge->physindev->ifindex)))
				goto nla_put_failure;
		}
#endif
	}

	if (outdev) {
#ifndef CONFIG_BRIDGE_NETFILTER
		if (nla_put_be32(inst->skb, NFULA_IFINDEX_OUTDEV,
				 htonl(outdev->ifindex)))
			goto nla_put_failure;
#else
		if (pf == PF_BRIDGE) {
			/* Case 1: outdev is physical output device, we need to
			 * look for bridge group (when called from
			 * netfilter_bridge) */
			if (nla_put_be32(inst->skb, NFULA_IFINDEX_PHYSOUTDEV,
					 htonl(outdev->ifindex)) ||
			/* this is the bridge group "brX" */
			/* rcu_read_lock()ed by nf_hook_slow or nf_log_packet */
			    nla_put_be32(inst->skb, NFULA_IFINDEX_OUTDEV,
					 htonl(br_port_get_rcu(outdev)->br->dev->ifindex)))
				goto nla_put_failure;
		} else {
			/* Case 2: indev is a bridge group, we need to look
			 * for physical device (when called from ipv4) */
			if (nla_put_be32(inst->skb, NFULA_IFINDEX_OUTDEV,
					 htonl(outdev->ifindex)))
				goto nla_put_failure;
			if (skb->nf_bridge && skb->nf_bridge->physoutdev &&
			    nla_put_be32(inst->skb, NFULA_IFINDEX_PHYSOUTDEV,
					 htonl(skb->nf_bridge->physoutdev->ifindex)))
				goto nla_put_failure;
		}
#endif
	}

	if (skb->mark &&
	    nla_put_be32(inst->skb, NFULA_MARK, htonl(skb->mark)))
		goto nla_put_failure;

	if (indev && skb->dev &&
	    skb->mac_header != skb->network_header) {
		struct nfulnl_msg_packet_hw phw;
		int len;

		memset(&phw, 0, sizeof(phw));
		len = dev_parse_header(skb, phw.hw_addr);
		if (len > 0) {
			phw.hw_addrlen = htons(len);
			if (nla_put(inst->skb, NFULA_HWADDR, sizeof(phw), &phw))
				goto nla_put_failure;
		}
	}

	if (indev && skb_mac_header_was_set(skb)) {
		if (nla_put_be16(inst->skb, NFULA_HWTYPE, htons(skb->dev->type)) ||
		    nla_put_be16(inst->skb, NFULA_HWLEN,
				 htons(skb->dev->hard_header_len)))
			goto nla_put_failure;

		hwhdrp = skb_mac_header(skb);

		if (skb->dev->type == ARPHRD_SIT)
			hwhdrp -= ETH_HLEN;

		if (hwhdrp >= skb->head &&
		    nla_put(inst->skb, NFULA_HWHEADER,
			    skb->dev->hard_header_len, hwhdrp))
			goto nla_put_failure;
	}

	if (skb->tstamp.tv64) {
		struct nfulnl_msg_packet_timestamp ts;
		struct timeval tv = ktime_to_timeval(skb->tstamp);
		ts.sec = cpu_to_be64(tv.tv_sec);
		ts.usec = cpu_to_be64(tv.tv_usec);

		if (nla_put(inst->skb, NFULA_TIMESTAMP, sizeof(ts), &ts))
			goto nla_put_failure;
	}

	/* UID */
	sk = skb->sk;
	if (sk && sk->sk_state != TCP_TIME_WAIT) {
		read_lock_bh(&sk->sk_callback_lock);
		if (sk->sk_socket && sk->sk_socket->file) {
			struct file *file = sk->sk_socket->file;
			const struct cred *cred = file->f_cred;
			struct user_namespace *user_ns = inst->peer_user_ns;
			__be32 uid = htonl(from_kuid_munged(user_ns, cred->fsuid));
			__be32 gid = htonl(from_kgid_munged(user_ns, cred->fsgid));
			read_unlock_bh(&sk->sk_callback_lock);
			if (nla_put_be32(inst->skb, NFULA_UID, uid) ||
			    nla_put_be32(inst->skb, NFULA_GID, gid))
				goto nla_put_failure;
		} else
			read_unlock_bh(&sk->sk_callback_lock);
	}

	/* local sequence number */
	if ((inst->flags & NFULNL_CFG_F_SEQ) &&
	    nla_put_be32(inst->skb, NFULA_SEQ, htonl(inst->seq++)))
		goto nla_put_failure;

	/* global sequence number */
	if ((inst->flags & NFULNL_CFG_F_SEQ_GLOBAL) &&
	    nla_put_be32(inst->skb, NFULA_SEQ_GLOBAL,
			 htonl(atomic_inc_return(&log->global_seq))))
		goto nla_put_failure;

	if (data_len) {
		struct nlattr *nla;
		int size = nla_attr_size(data_len);

		if (skb_tailroom(inst->skb) < nla_total_size(data_len)) {
			printk(KERN_WARNING "nfnetlink_log: no tailroom!\n");
			return -1;
		}

		nla = (struct nlattr *)skb_put(inst->skb, nla_total_size(data_len));
		nla->nla_type = NFULA_PAYLOAD;
		nla->nla_len = size;

		if (skb_copy_bits(skb, 0, nla_data(nla), data_len))
			BUG();
	}

	nlh->nlmsg_len = inst->skb->tail - old_tail;
	return 0;

nla_put_failure:
	PRINTR(KERN_ERR "nfnetlink_log: error creating log nlmsg\n");
	return -1;
}
Beispiel #24
0
static inline void brf6150_rx(struct brf6150_info *info)
{
	u8 byte;

	NBT_DBG_TRANSFER("rx_tasklet woke up\ndata ");

	while (brf6150_inb(info, UART_LSR) & UART_LSR_DR) {
		if (info->rx_skb == NULL) {
			info->rx_skb = bt_skb_alloc(HCI_MAX_FRAME_SIZE, GFP_ATOMIC);
			if (!info->rx_skb) {
				printk(KERN_WARNING "brf6150: Can't allocate memory for new packet\n");
				return;
			}
			info->rx_state = WAIT_FOR_PKT_TYPE;
			info->rx_skb->dev = (void *)info->hdev;
			brf6150_disable_pm_rx(info);
			clk_enable(info->uart_ck);
		}

		byte = brf6150_inb(info, UART_RX);
		if (info->garbage_bytes) {
			info->garbage_bytes--;
			info->hdev->stat.err_rx++;
			continue;
		}
		info->hdev->stat.byte_rx++;
		NBT_DBG_TRANSFER_NF("0x%.2x  ", byte);
		switch (info->rx_state) {
		case WAIT_FOR_PKT_TYPE:
			bt_cb(info->rx_skb)->pkt_type = byte;
			info->rx_count = brf6150_get_hdr_len(byte);
			if (info->rx_count >= 0) {
				info->rx_state = WAIT_FOR_HEADER;
			} else {
				info->hdev->stat.err_rx++;
				kfree_skb(info->rx_skb);
				info->rx_skb = NULL;
				clk_disable(info->uart_ck);
			}
			break;
		case WAIT_FOR_HEADER:
			info->rx_count--;
			*skb_put(info->rx_skb, 1) = byte;
			if (info->rx_count == 0) {
				info->rx_count = brf6150_get_data_len(info, info->rx_skb);
				if (info->rx_count > skb_tailroom(info->rx_skb)) {
					printk(KERN_WARNING "brf6150: Frame is %ld bytes too long.\n",
					       info->rx_count - skb_tailroom(info->rx_skb));
					info->rx_skb = NULL;
					info->garbage_bytes = info->rx_count - skb_tailroom(info->rx_skb);
					clk_disable(info->uart_ck);
					break;
				}
				info->rx_state = WAIT_FOR_DATA;
				if (bt_cb(info->rx_skb)->pkt_type == H4_NEG_PKT) {
					brf6150_negotiation_packet(info, info->rx_skb);
					info->rx_skb = NULL;
					clk_disable(info->uart_ck);
					return;
				}
				if (bt_cb(info->rx_skb)->pkt_type == H4_ALIVE_PKT) {
					brf6150_alive_packet(info, info->rx_skb);
					info->rx_skb = NULL;
					clk_disable(info->uart_ck);
					return;
				}
			}
			break;
		case WAIT_FOR_DATA:
			info->rx_count--;
			*skb_put(info->rx_skb, 1) = byte;
			if (info->rx_count == 0) {
				brf6150_recv_frame(info, info->rx_skb);
				info->rx_skb = NULL;
				clk_disable(info->uart_ck);
			}
			break;
		default:
			WARN_ON(1);
			break;
		}
	}

	NBT_DBG_TRANSFER_NF("\n");
}
Beispiel #25
0
/*
 *  This routine is called when the HDLC layer internally generates a
 *  control frame.
 */
void nr_write_internal(struct sock *sk, int frametype)
{
	struct nr_sock *nr = nr_sk(sk);
	struct sk_buff *skb;
	unsigned char  *dptr;
	int len, timeout;

	len = NR_NETWORK_LEN + NR_TRANSPORT_LEN;

	switch (frametype & 0x0F) {
	case NR_CONNREQ:
		len += 17;
		break;
	case NR_CONNACK:
		len += (nr->bpqext) ? 2 : 1;
		break;
	case NR_DISCREQ:
	case NR_DISCACK:
	case NR_INFOACK:
		break;
	default:
		printk(KERN_ERR "NET/ROM: nr_write_internal - invalid frame type %d\n", frametype);
		return;
	}

	if ((skb = alloc_skb(len, GFP_ATOMIC)) == NULL)
		return;

	/*
	 *	Space for AX.25 and NET/ROM network header
	 */
	skb_reserve(skb, NR_NETWORK_LEN);

	dptr = skb_put(skb, skb_tailroom(skb));

	switch (frametype & 0x0F) {
	case NR_CONNREQ:
		timeout  = nr->t1 / HZ;
		*dptr++  = nr->my_index;
		*dptr++  = nr->my_id;
		*dptr++  = 0;
		*dptr++  = 0;
		*dptr++  = frametype;
		*dptr++  = nr->window;
		memcpy(dptr, &nr->user_addr, AX25_ADDR_LEN);
		dptr[6] &= ~AX25_CBIT;
		dptr[6] &= ~AX25_EBIT;
		dptr[6] |= AX25_SSSID_SPARE;
		dptr    += AX25_ADDR_LEN;
		memcpy(dptr, &nr->source_addr, AX25_ADDR_LEN);
		dptr[6] &= ~AX25_CBIT;
		dptr[6] &= ~AX25_EBIT;
		dptr[6] |= AX25_SSSID_SPARE;
		dptr    += AX25_ADDR_LEN;
		*dptr++  = timeout % 256;
		*dptr++  = timeout / 256;
		break;

	case NR_CONNACK:
		*dptr++ = nr->your_index;
		*dptr++ = nr->your_id;
		*dptr++ = nr->my_index;
		*dptr++ = nr->my_id;
		*dptr++ = frametype;
		*dptr++ = nr->window;
		if (nr->bpqext) *dptr++ = sysctl_netrom_network_ttl_initialiser;
		break;

	case NR_DISCREQ:
	case NR_DISCACK:
		*dptr++ = nr->your_index;
		*dptr++ = nr->your_id;
		*dptr++ = 0;
		*dptr++ = 0;
		*dptr++ = frametype;
		break;

	case NR_INFOACK:
		*dptr++ = nr->your_index;
		*dptr++ = nr->your_id;
		*dptr++ = 0;
		*dptr++ = nr->vr;
		*dptr++ = frametype;
		break;
	}

	nr_transmit_buffer(sk, skb);
}
/* Perform WEP encryption on given skb that has at least 4 bytes of headroom
 * for IV and 4 bytes of tailroom for ICV. Both IV and ICV will be transmitted,
 * so the payload length increases with 8 bytes.
 *
 * WEP frame payload: IV + TX key idx, RC4(data), ICV = RC4(CRC32(data))
 */
static int prism2_wep_encrypt(struct sk_buff *skb, int hdr_len, void *priv)
{
	struct prism2_wep_data *wep = priv;
	u32 klen, len;
	u8 key[WEP_KEY_LEN + 3];
	u8 *pos;
	cb_desc *tcb_desc = (cb_desc *)(skb->cb + MAX_DEV_ADDR_SIZE);
	#if ( !defined(BUILT_IN_CRYPTO) && ((LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,21)) || (OPENSUSE_SLED)) )
	struct blkcipher_desc desc = {.tfm = wep->tx_tfm};
	#endif
	u32 crc;
	u8 *icv;
#ifdef _RTL8192_EXT_PATCH_	
	u8 broadcastaddr[6] = {0xff,0xff,0xff,0xff,0xff,0xff};
	struct rtllib_hdr_3addr* tmp_header = (struct rtllib_hdr_3addr*)(skb->data);
	u8 is_broadcast_data = 0;
	u8 is_multicast_data = 0;
#endif
	struct scatterlist sg;
	if (skb_headroom(skb) < 4 || skb_tailroom(skb) < 4 ||
	    skb->len < hdr_len){
		printk("Error!!!headroom=%d tailroom=%d skblen=%d hdr_len=%d\n",skb_headroom(skb),skb_tailroom(skb),skb->len,hdr_len);
		return -1;
	}
#ifdef _RTL8192_EXT_PATCH_	
	if(tcb_desc->badhoc==0){
		if(memcmp(tmp_header->addr1,broadcastaddr,6) == 0){
			is_broadcast_data = 1;
			tcb_desc->bHwSec = 0;
		}
		if(is_multicast_ether_addr(tmp_header->addr1)){
			is_multicast_data = 1;
			tcb_desc->bHwSec = 0;
		}
	}
#endif
	len = skb->len - hdr_len;
	pos = skb_push(skb, 4);
	memmove(pos, pos + 4, hdr_len);
	pos += hdr_len;

	klen = 3 + wep->key_len;

	wep->iv++;

	/* Fluhrer, Mantin, and Shamir have reported weaknesses in the key
	 * scheduling algorithm of RC4. At least IVs (KeyByte + 3, 0xff, N)
	 * can be used to speedup attacks, so avoid using them. */
	if ((wep->iv & 0xff00) == 0xff00) {
		u8 B = (wep->iv >> 16) & 0xff;
		if (B >= 3 && B < klen)
			wep->iv += 0x0100;
	}

	/* Prepend 24-bit IV to RC4 key and TX frame */
	*pos++ = key[0] = (wep->iv >> 16) & 0xff;
	*pos++ = key[1] = (wep->iv >> 8) & 0xff;
	*pos++ = key[2] = wep->iv & 0xff;
	*pos++ = wep->key_idx << 6;

	/* Copy rest of the WEP key (the secret part) */
	memcpy(key + 3, wep->key, wep->key_len);

	if (!tcb_desc->bHwSec)
	{
	
		/* Append little-endian CRC32 and encrypt it to produce ICV */
	#if (LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0))
		crc = ~crc32_le(~0, pos, len);
	#else
		crc = ~ether_crc_le(len, pos);
	#endif
		icv = skb_put(skb, 4);
		icv[0] = crc;
		icv[1] = crc >> 8;
		icv[2] = crc >> 16;
		icv[3] = crc >> 24;
		
	#if(LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24))
		sg.page = virt_to_page(pos);
		sg.offset = offset_in_page(pos);
		sg.length = len + 4;
	#else
		sg_init_one(&sg, pos, len+4);
	#endif
	#if ( defined(BUILT_IN_CRYPTO) || ((LINUX_VERSION_CODE < KERNEL_VERSION(2,6,21)) && (!OPENSUSE_SLED)) )
		crypto_cipher_setkey(wep->tfm, key, klen);
		crypto_cipher_encrypt(wep->tfm, &sg, &sg, len + 4);
		return 0;
	#else
		crypto_blkcipher_setkey(wep->tx_tfm, key, klen);
		return crypto_blkcipher_encrypt(&desc, &sg, &sg, len + 4);
	#endif
	}
netdev_tx_t usbnet_start_xmit (struct sk_buff *skb,
				     struct net_device *net)
{
	struct usbnet		*dev = netdev_priv(net);
	int			length;
	struct urb		*urb = NULL;
	struct skb_data		*entry;
	struct driver_info	*info = dev->driver_info;
	unsigned long		flags;
	int retval;

	// some devices want funky USB-level framing, for
	// win32 driver (usually) and/or hardware quirks
	if (info->tx_fixup) {
		skb = info->tx_fixup (dev, skb, GFP_ATOMIC);
		if (!skb) {
			if (netif_msg_tx_err(dev)) {
				netif_dbg(dev, tx_err, dev->net, "can't tx_fixup skb\n");
				goto drop;
			} else {
				/* cdc_ncm collected packet; waits for more */
				goto not_drop;
			}
		}
	}
	length = skb->len;

	if (!(urb = usb_alloc_urb (0, GFP_ATOMIC))) {
		netif_dbg(dev, tx_err, dev->net, "no urb\n");
		goto drop;
	}

	entry = (struct skb_data *) skb->cb;
	entry->urb = urb;
	entry->dev = dev;
	entry->state = tx_start;
	entry->length = length;

	usb_fill_bulk_urb (urb, dev->udev, dev->out,
			skb->data, skb->len, tx_complete, skb);

	/* don't assume the hardware handles USB_ZERO_PACKET
	 * NOTE:  strictly conforming cdc-ether devices should expect
	 * the ZLP here, but ignore the one-byte packet.
	 * NOTE2: CDC NCM specification is different from CDC ECM when
	 * handling ZLP/short packets, so cdc_ncm driver will make short
	 * packet itself if needed.
	 */
	if (length % dev->maxpacket == 0) {
		if (!(info->flags & FLAG_SEND_ZLP)) {
			if (!(info->flags & FLAG_MULTI_PACKET)) {
				urb->transfer_buffer_length++;
				if (skb_tailroom(skb)) {
					skb->data[skb->len] = 0;
					__skb_put(skb, 1);
				}
			}
		} else
			urb->transfer_flags |= URB_ZERO_PACKET;
	}

	spin_lock_irqsave(&dev->txq.lock, flags);
	retval = usb_autopm_get_interface_async(dev->intf);
	if (retval < 0) {
		spin_unlock_irqrestore(&dev->txq.lock, flags);
		goto drop;
	}

#ifdef CONFIG_PM
	/* if this triggers the device is still a sleep */
	if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
		/* transmission will be done in resume */
		usb_anchor_urb(urb, &dev->deferred);
		/* no use to process more packets */
		netif_stop_queue(net);
		spin_unlock_irqrestore(&dev->txq.lock, flags);
		netdev_dbg(dev->net, "Delaying transmission for resumption\n");
		goto deferred;
	}
#endif

	switch ((retval = usb_submit_urb (urb, GFP_ATOMIC))) {
	case -EPIPE:
		netif_stop_queue (net);
		usbnet_defer_kevent (dev, EVENT_TX_HALT);
		usb_autopm_put_interface_async(dev->intf);
		break;
	default:
		usb_autopm_put_interface_async(dev->intf);
		netif_dbg(dev, tx_err, dev->net,
			  "tx: submit urb err %d\n", retval);
		break;
	case 0:
		net->trans_start = jiffies;
		__skb_queue_tail (&dev->txq, skb);
		if (dev->txq.qlen >= TX_QLEN (dev))
			netif_stop_queue (net);
	}
	spin_unlock_irqrestore (&dev->txq.lock, flags);

	if (retval) {
		netif_dbg(dev, tx_err, dev->net, "drop, code %d\n", retval);
drop:
		dev->net->stats.tx_dropped++;
not_drop:
		if (skb)
			dev_kfree_skb_any (skb);
		usb_free_urb (urb);
	} else
		netif_dbg(dev, tx_queued, dev->net,
			  "> tx, len %d, type 0x%x\n", length, skb->protocol);
#ifdef CONFIG_PM
deferred:
#endif
	return NETDEV_TX_OK;
}
/* Send RST reply */
static void send_reset(struct sk_buff *oldskb, int hook)
{
	struct sk_buff *nskb;
	struct iphdr *iph = oldskb->nh.iph;
	struct tcphdr _otcph, *oth, *tcph;
	struct rtable *rt;
	u_int16_t tmp_port;
	u_int32_t tmp_addr;
	unsigned int tcplen;
	int needs_ack;
	int hh_len;

	/* IP header checks: fragment. */
	if (oldskb->nh.iph->frag_off & htons(IP_OFFSET))
		return;

	oth = skb_header_pointer(oldskb, oldskb->nh.iph->ihl * 4,
				 sizeof(_otcph), &_otcph);
	if (oth == NULL)
 		return;

	/* No RST for RST. */
	if (oth->rst)
		return;

	/* Check checksum */
	tcplen = oldskb->len - iph->ihl * 4;
	if (((hook != NF_IP_LOCAL_IN && oldskb->ip_summed != CHECKSUM_HW) ||
	     (hook == NF_IP_LOCAL_IN &&
	      oldskb->ip_summed != CHECKSUM_UNNECESSARY)) &&
	    csum_tcpudp_magic(iph->saddr, iph->daddr, tcplen, IPPROTO_TCP,
	                      oldskb->ip_summed == CHECKSUM_HW ? oldskb->csum :
	                      skb_checksum(oldskb, iph->ihl * 4, tcplen, 0)))
		return;

	if ((rt = route_reverse(oldskb, oth, hook)) == NULL)
		return;

	hh_len = LL_RESERVED_SPACE(rt->u.dst.dev);

	/* We need a linear, writeable skb.  We also need to expand
	   headroom in case hh_len of incoming interface < hh_len of
	   outgoing interface */
	nskb = skb_copy_expand(oldskb, hh_len, skb_tailroom(oldskb),
			       GFP_ATOMIC);
	if (!nskb) {
		dst_release(&rt->u.dst);
		return;
	}

	dst_release(nskb->dst);
	nskb->dst = &rt->u.dst;

	/* This packet will not be the same as the other: clear nf fields */
	nf_reset(nskb);
	nskb->nfmark = 0;
#ifdef CONFIG_BRIDGE_NETFILTER
	nf_bridge_put(nskb->nf_bridge);
	nskb->nf_bridge = NULL;
#endif

	tcph = (struct tcphdr *)((u_int32_t*)nskb->nh.iph + nskb->nh.iph->ihl);

	/* Swap source and dest */
	tmp_addr = nskb->nh.iph->saddr;
	nskb->nh.iph->saddr = nskb->nh.iph->daddr;
	nskb->nh.iph->daddr = tmp_addr;
	tmp_port = tcph->source;
	tcph->source = tcph->dest;
	tcph->dest = tmp_port;

	/* Truncate to length (no data) */
	tcph->doff = sizeof(struct tcphdr)/4;
	skb_trim(nskb, nskb->nh.iph->ihl*4 + sizeof(struct tcphdr));
	nskb->nh.iph->tot_len = htons(nskb->len);

	if (tcph->ack) {
		needs_ack = 0;
		tcph->seq = oth->ack_seq;
		tcph->ack_seq = 0;
	} else {
		needs_ack = 1;
		tcph->ack_seq = htonl(ntohl(oth->seq) + oth->syn + oth->fin
				      + oldskb->len - oldskb->nh.iph->ihl*4
				      - (oth->doff<<2));
		tcph->seq = 0;
	}

	/* Reset flags */
	((u_int8_t *)tcph)[13] = 0;
	tcph->rst = 1;
	tcph->ack = needs_ack;

	tcph->window = 0;
	tcph->urg_ptr = 0;

	/* Adjust TCP checksum */
	tcph->check = 0;
	tcph->check = tcp_v4_check(tcph, sizeof(struct tcphdr),
				   nskb->nh.iph->saddr,
				   nskb->nh.iph->daddr,
				   csum_partial((char *)tcph,
						sizeof(struct tcphdr), 0));

	/* Adjust IP TTL, DF */
	nskb->nh.iph->ttl = dst_metric(nskb->dst, RTAX_HOPLIMIT);
	/* Set DF, id = 0 */
	nskb->nh.iph->frag_off = htons(IP_DF);
	nskb->nh.iph->id = 0;

	/* Adjust IP checksum */
	nskb->nh.iph->check = 0;
	nskb->nh.iph->check = ip_fast_csum((unsigned char *)nskb->nh.iph, 
					   nskb->nh.iph->ihl);

	/* "Never happens" */
	if (nskb->len > dst_mtu(nskb->dst))
		goto free_nskb;

	nf_ct_attach(nskb, oldskb);

	NF_HOOK(PF_INET, NF_IP_LOCAL_OUT, nskb, NULL, nskb->dst->dev,
		dst_output);
	return;

 free_nskb:
	kfree_skb(nskb);
}