Пример #1
0
static int rx_submit (struct usbnet *dev, struct urb *urb, gfp_t flags)
{
	struct sk_buff		*skb;
	struct skb_data		*entry;
	int			retval = 0;
	unsigned long		lockflags;
	size_t			size = dev->rx_urb_size;

	/* prevent rx skb allocation when error ratio is high */
	if (test_bit(EVENT_RX_KILL, &dev->flags)) {
		usb_free_urb(urb);
		return -ENOLINK;
	}

	skb = __netdev_alloc_skb(dev->net, size + NET_IP_ALIGN, flags);
	if (NET_IP_ALIGN && skb)
		skb_reserve(skb, NET_IP_ALIGN);

	if (!skb) {
		netif_dbg(dev, rx_err, dev->net, "no rx skb\n");
		usbnet_defer_kevent (dev, EVENT_RX_MEMORY);
		usb_free_urb (urb);
		return -ENOMEM;
	}

	entry = (struct skb_data *) skb->cb;
	entry->urb = urb;
	entry->dev = dev;
	entry->length = 0;

	usb_fill_bulk_urb (urb, dev->udev, dev->in,
		skb->data, size, rx_complete, skb);

	spin_lock_irqsave (&dev->rxq.lock, lockflags);

	if (netif_running (dev->net) &&
	    netif_device_present (dev->net) &&
	    !test_bit (EVENT_RX_HALT, &dev->flags) &&
	    !test_bit (EVENT_DEV_ASLEEP, &dev->flags)) {
		switch (retval = usb_submit_urb (urb, GFP_ATOMIC)) {
		case -EPIPE:
			usbnet_defer_kevent (dev, EVENT_RX_HALT);
			break;
		case -ENOMEM:
			usbnet_defer_kevent (dev, EVENT_RX_MEMORY);
			break;
		case -ENODEV:
			netif_dbg(dev, ifdown, dev->net, "device gone\n");
			netif_device_detach (dev->net);
			break;
		case -EHOSTUNREACH:
			retval = -ENOLINK;
			break;
		default:
			netif_dbg(dev, rx_err, dev->net,
				  "rx submit, %d\n", retval);
			tasklet_schedule (&dev->bh);
			break;
		case 0:
			__usbnet_queue_skb(&dev->rxq, skb, rx_start);
		}
	} else {
		netif_dbg(dev, ifdown, dev->net, "rx: stopped\n");
		retval = -ENOLINK;
	}
	spin_unlock_irqrestore (&dev->rxq.lock, lockflags);
	if (retval) {
		dev_kfree_skb_any (skb);
		usb_free_urb (urb);
	}
	return retval;
}
Пример #2
0
/*
 * Kernel thread for USB reception of data
 *
 * This thread waits for a kick; once kicked, it will allocate an skb
 * and receive a single message to it from USB (using
 * i2400mu_rx()). Once received, it is passed to the generic i2400m RX
 * code for processing.
 *
 * When done processing, it runs some dirty statistics to verify if
 * the last 100 messages received were smaller than half of the
 * current RX buffer size. In that case, the RX buffer size is
 * halved. This will helps lowering the pressure on the memory
 * allocator.
 *
 * Hard errors force the thread to exit.
 */
static
int i2400mu_rxd(void *_i2400mu)
{
    int result = 0;
    struct i2400mu *i2400mu = _i2400mu;
    struct i2400m *i2400m = &i2400mu->i2400m;
    struct device *dev = &i2400mu->usb_iface->dev;
    struct net_device *net_dev = i2400m->wimax_dev.net_dev;
    size_t pending;
    int rx_size;
    struct sk_buff *rx_skb;
    unsigned long flags;

    d_fnstart(4, dev, "(i2400mu %p)\n", i2400mu);
    spin_lock_irqsave(&i2400m->rx_lock, flags);
    BUG_ON(i2400mu->rx_kthread != NULL);
    i2400mu->rx_kthread = current;
    spin_unlock_irqrestore(&i2400m->rx_lock, flags);
    while (1) {
        d_printf(2, dev, "RX: waiting for messages\n");
        pending = 0;
        wait_event_interruptible(
            i2400mu->rx_wq,
            (kthread_should_stop()	/* check this first! */
             || (pending = atomic_read(&i2400mu->rx_pending_count)))
        );
        if (kthread_should_stop())
            break;
        if (pending == 0)
            continue;
        rx_size = i2400mu->rx_size;
        d_printf(2, dev, "RX: reading up to %d bytes\n", rx_size);
        rx_skb = __netdev_alloc_skb(net_dev, rx_size, GFP_KERNEL);
        if (rx_skb == NULL) {
            dev_err(dev, "RX: can't allocate skb [%d bytes]\n",
                    rx_size);
            msleep(50);	/* give it some time? */
            continue;
        }

        /* Receive the message with the payloads */
        rx_skb = i2400mu_rx(i2400mu, rx_skb);
        result = PTR_ERR(rx_skb);
        if (IS_ERR(rx_skb))
            goto out;
        atomic_dec(&i2400mu->rx_pending_count);
        if (rx_skb == NULL || rx_skb->len == 0) {
            /* some "ignorable" condition */
            kfree_skb(rx_skb);
            continue;
        }

        /* Deliver the message to the generic i2400m code */
        i2400mu->rx_size_cnt++;
        i2400mu->rx_size_acc += rx_skb->len;
        result = i2400m_rx(i2400m, rx_skb);
        if (result == -EIO
                && edc_inc(&i2400mu->urb_edc,
                           EDC_MAX_ERRORS, EDC_ERROR_TIMEFRAME)) {
            goto error_reset;
        }

        /* Maybe adjust RX buffer size */
        i2400mu_rx_size_maybe_shrink(i2400mu);
    }
    result = 0;
out:
    spin_lock_irqsave(&i2400m->rx_lock, flags);
    i2400mu->rx_kthread = NULL;
    spin_unlock_irqrestore(&i2400m->rx_lock, flags);
    d_fnend(4, dev, "(i2400mu %p) = %d\n", i2400mu, result);
    return result;

error_reset:
    dev_err(dev, "RX: maximum errors in received buffer exceeded; "
            "resetting device\n");
    usb_queue_reset_device(i2400mu->usb_iface);
    goto out;
}
Пример #3
0
static void xennet_alloc_rx_buffers(struct net_device *dev)
{
	unsigned short id;
	struct netfront_info *np = netdev_priv(dev);
	struct sk_buff *skb;
	struct page *page;
	int i, batch_target, notify;
	RING_IDX req_prod = np->rx.req_prod_pvt;
	grant_ref_t ref;
	unsigned long pfn;
	void *vaddr;
	struct xen_netif_rx_request *req;

	if (unlikely(!netif_carrier_ok(dev)))
		return;

	/*
	 * Allocate skbuffs greedily, even though we batch updates to the
	 * receive ring. This creates a less bursty demand on the memory
	 * allocator, so should reduce the chance of failed allocation requests
	 * both for ourself and for other kernel subsystems.
	 */
	batch_target = np->rx_target - (req_prod - np->rx.rsp_cons);
	for (i = skb_queue_len(&np->rx_batch); i < batch_target; i++) {
		skb = __netdev_alloc_skb(dev, RX_COPY_THRESHOLD + NET_IP_ALIGN,
					 GFP_ATOMIC | __GFP_NOWARN);
		if (unlikely(!skb))
			goto no_skb;

		/* Align ip header to a 16 bytes boundary */
		skb_reserve(skb, NET_IP_ALIGN);

		page = alloc_page(GFP_ATOMIC | __GFP_NOWARN);
		if (!page) {
			kfree_skb(skb);
no_skb:
			/* Any skbuffs queued for refill? Force them out. */
			if (i != 0)
				goto refill;
			/* Could not allocate any skbuffs. Try again later. */
			mod_timer(&np->rx_refill_timer,
				  jiffies + (HZ/10));
			break;
		}

		skb_shinfo(skb)->frags[0].page = page;
		skb_shinfo(skb)->nr_frags = 1;
		__skb_queue_tail(&np->rx_batch, skb);
	}

	/* Is the batch large enough to be worthwhile? */
	if (i < (np->rx_target/2)) {
		if (req_prod > np->rx.sring->req_prod)
			goto push;
		return;
	}

	/* Adjust our fill target if we risked running out of buffers. */
	if (((req_prod - np->rx.sring->rsp_prod) < (np->rx_target / 4)) &&
	    ((np->rx_target *= 2) > np->rx_max_target))
		np->rx_target = np->rx_max_target;

 refill:
	for (i = 0; ; i++) {
		skb = __skb_dequeue(&np->rx_batch);
		if (skb == NULL)
			break;

		skb->dev = dev;

		id = xennet_rxidx(req_prod + i);

		BUG_ON(np->rx_skbs[id]);
		np->rx_skbs[id] = skb;

		ref = gnttab_claim_grant_reference(&np->gref_rx_head);
		BUG_ON((signed short)ref < 0);
		np->grant_rx_ref[id] = ref;

		pfn = page_to_pfn(skb_shinfo(skb)->frags[0].page);
		vaddr = page_address(skb_shinfo(skb)->frags[0].page);

		req = RING_GET_REQUEST(&np->rx, req_prod + i);
		gnttab_grant_foreign_access_ref(ref,
						np->xbdev->otherend_id,
						pfn_to_mfn(pfn),
						0);

		req->id = id;
		req->gref = ref;
	}

	wmb();		/* barrier so backend seens requests */

	/* Above is a suitable barrier to ensure backend will see requests. */
	np->rx.req_prod_pvt = req_prod + i;
 push:
	RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->rx, notify);
	if (notify)
		notify_remote_via_irq(np->netdev->irq);
}
Пример #4
0
static int
rx_submit(struct eth_dev *dev, struct usb_request *req, gfp_t gfp_flags)
{
	struct usb_gadget *g = dev->gadget;
	struct sk_buff	*skb;
	int		retval = -ENOMEM;
	size_t		size = 0;
	struct usb_ep	*out;
	unsigned long	flags;

	spin_lock_irqsave(&dev->lock, flags);
	if (dev->port_usb)
		out = dev->port_usb->out_ep;
	else
		out = NULL;
	spin_unlock_irqrestore(&dev->lock, flags);

	if (!out)
		return -ENOTCONN;


	/* Padding up to RX_EXTRA handles minor disagreements with host.
	 * Normally we use the USB "terminate on short read" convention;
	 * so allow up to (N*maxpacket), since that memory is normally
	 * already allocated.  Some hardware doesn't deal well with short
	 * reads (e.g. DMA must be N*maxpacket), so for now don't trim a
	 * byte off the end (to force hardware errors on overflow).
	 *
	 * RNDIS uses internal framing, and explicitly allows senders to
	 * pad to end-of-packet.  That's potentially nice for speed, but
	 * means receivers can't recover lost synch on their own (because
	 * new packets don't only start after a short RX).
	 */
	size += sizeof(struct ethhdr) + dev->net->mtu + RX_EXTRA;
	size += dev->port_usb->header_len;

	if (g->quirk_ep_out_aligned_size) {
		size += out->maxpacket - 1;
		size -= size % out->maxpacket;
	}

	if (dev->port_usb->is_fixed)
		size = max_t(size_t, size, dev->port_usb->fixed_out_len);

	skb = __netdev_alloc_skb(dev->net, size + NET_IP_ALIGN, gfp_flags);
	if (skb == NULL) {
		DBG(dev, "no rx skb\n");
		goto enomem;
	}

	/* Some platforms perform better when IP packets are aligned,
	 * but on at least one, checksumming fails otherwise.  Note:
	 * RNDIS headers involve variable numbers of LE32 values.
	 */
	if (likely(!dev->no_skb_reserve))
		skb_reserve(skb, NET_IP_ALIGN);

	req->buf = skb->data;
	req->length = size;
	req->complete = rx_complete;
	req->context = skb;

	retval = usb_ep_queue(out, req, gfp_flags);
	if (retval == -ENOMEM)
enomem:
		defer_kevent(dev, WORK_RX_MEMORY);
	if (retval) {
		DBG(dev, "rx submit --> %d\n", retval);
		if (skb)
			dev_kfree_skb_any(skb);
		spin_lock_irqsave(&dev->req_lock, flags);
		list_add(&req->list, &dev->rx_reqs);
		spin_unlock_irqrestore(&dev->req_lock, flags);
	}
	return retval;
}