Exemple #1
0
static int cpmac_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
	int queue, len;
	struct cpmac_desc *desc;
	struct cpmac_priv *priv = netdev_priv(dev);

	if (unlikely(atomic_read(&priv->reset_pending)))
		return NETDEV_TX_BUSY;

	if (unlikely(skb_padto(skb, ETH_ZLEN)))
		return NETDEV_TX_OK;

	len = max(skb->len, ETH_ZLEN);
	queue = skb_get_queue_mapping(skb);
#ifdef CONFIG_NETDEVICES_MULTIQUEUE
	netif_stop_subqueue(dev, queue);
#else
	netif_stop_queue(dev);
#endif

	desc = &priv->desc_ring[queue];
	if (unlikely(desc->dataflags & CPMAC_OWN)) {
		if (netif_msg_tx_err(priv) && net_ratelimit())
			printk(KERN_WARNING "%s: tx dma ring full\n",
			       dev->name);
		return NETDEV_TX_BUSY;
	}

	spin_lock(&priv->lock);
	dev->trans_start = jiffies;
	spin_unlock(&priv->lock);
	desc->dataflags = CPMAC_SOP | CPMAC_EOP | CPMAC_OWN;
	desc->skb = skb;
	desc->data_mapping = dma_map_single(&dev->dev, skb->data, len,
					    DMA_TO_DEVICE);
	desc->hw_data = (u32)desc->data_mapping;
	desc->datalen = len;
	desc->buflen = len;
	if (unlikely(netif_msg_tx_queued(priv)))
		printk(KERN_DEBUG "%s: sending 0x%p, len=%d\n", dev->name, skb,
		       skb->len);
	if (unlikely(netif_msg_hw(priv)))
		cpmac_dump_desc(dev, desc);
	if (unlikely(netif_msg_pktdata(priv)))
		cpmac_dump_skb(dev, skb);
	cpmac_write(priv->regs, CPMAC_TX_PTR(queue), (u32)desc->mapping);

	return NETDEV_TX_OK;
}
Exemple #2
0
/* Packet transmit function */
static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
{
	struct sh_eth_private *mdp = netdev_priv(ndev);
	struct sh_eth_txdesc *txdesc;
	u32 entry;
	unsigned long flags;

	spin_lock_irqsave(&mdp->lock, flags);
	if ((mdp->cur_tx - mdp->dirty_tx) >= (TX_RING_SIZE - 4)) {
		if (!sh_eth_txfree(ndev)) {
			if (netif_msg_tx_queued(mdp))
				dev_warn(&ndev->dev, "TxFD exhausted.\n");
			netif_stop_queue(ndev);
			spin_unlock_irqrestore(&mdp->lock, flags);
			return NETDEV_TX_BUSY;
		}
	}
	spin_unlock_irqrestore(&mdp->lock, flags);

	entry = mdp->cur_tx % TX_RING_SIZE;
	mdp->tx_skbuff[entry] = skb;
	txdesc = &mdp->tx_ring[entry];
	txdesc->addr = virt_to_phys(skb->data);
	/* soft swap. */
	if (!mdp->cd->hw_swap)
		sh_eth_soft_swap(phys_to_virt(ALIGN(txdesc->addr, 4)),
				 skb->len + 2);
	/* write back */
	__flush_purge_region(skb->data, skb->len);
	if (skb->len < ETHERSMALL)
		txdesc->buffer_length = ETHERSMALL;
	else
		txdesc->buffer_length = skb->len;

	if (entry >= TX_RING_SIZE - 1)
		txdesc->status |= cpu_to_edmac(mdp, TD_TACT | TD_TDLE);
	else
		txdesc->status |= cpu_to_edmac(mdp, TD_TACT);

	mdp->cur_tx++;

	if (!(sh_eth_read(ndev, EDTRR) & sh_eth_get_edtrr_trns(mdp)))
		sh_eth_write(ndev, sh_eth_get_edtrr_trns(mdp), EDTRR);

	return NETDEV_TX_OK;
}
Exemple #3
0
static netdev_tx_t bcm_transmit(struct sk_buff *skb, struct net_device *dev)
{
	struct bcm_mini_adapter *Adapter = GET_BCM_ADAPTER(dev);
	u16 qindex = skb_get_queue_mapping(skb);


	if (Adapter->device_removed || !Adapter->LinkUpStatus)
		goto drop;

	if (Adapter->TransferMode != IP_PACKET_ONLY_MODE)
		goto drop;

	if (INVALID_QUEUE_INDEX == qindex)
		goto drop;

	if (Adapter->PackInfo[qindex].uiCurrentPacketsOnHost >=
	    SF_MAX_ALLOWED_PACKETS_TO_BACKUP)
		return NETDEV_TX_BUSY;

	/* Now Enqueue the packet */
	if (netif_msg_tx_queued(Adapter))
		pr_info(PFX "%s: enqueueing packet to queue %d\n",
			dev->name, qindex);

	spin_lock(&Adapter->PackInfo[qindex].SFQueueLock);
	Adapter->PackInfo[qindex].uiCurrentBytesOnHost += skb->len;
	Adapter->PackInfo[qindex].uiCurrentPacketsOnHost++;

	*((B_UINT32 *) skb->cb + SKB_CB_LATENCY_OFFSET) = jiffies;
	ENQUEUEPACKET(Adapter->PackInfo[qindex].FirstTxQueue,
		      Adapter->PackInfo[qindex].LastTxQueue, skb);
	atomic_inc(&Adapter->TotalPacketCount);
	spin_unlock(&Adapter->PackInfo[qindex].SFQueueLock);

	/* FIXME - this is racy and incorrect, replace with work queue */
	if (!atomic_read(&Adapter->TxPktAvail)) {
		atomic_set(&Adapter->TxPktAvail, 1);
		wake_up(&Adapter->tx_packet_wait_queue);
	}
	return NETDEV_TX_OK;

 drop:
	dev_kfree_skb(skb);
	return NETDEV_TX_OK;
}
Exemple #4
0
static int rionet_queue_tx_msg(struct sk_buff *skb, struct net_device *ndev,
			       struct rio_dev *rdev)
{
	struct rionet_private *rnet = netdev_priv(ndev);

	rio_add_outb_message(rnet->mport, rdev, 0, skb->data, skb->len);
	rnet->tx_skb[rnet->tx_slot] = skb;

	ndev->stats.tx_packets++;
	ndev->stats.tx_bytes += skb->len;

	if (++rnet->tx_cnt == RIONET_TX_RING_SIZE)
		netif_stop_queue(ndev);

	++rnet->tx_slot;
	rnet->tx_slot &= (RIONET_TX_RING_SIZE - 1);

	if (netif_msg_tx_queued(rnet))
		printk(KERN_INFO "%s: queued skb len %8.8x\n", DRV_NAME,
		       skb->len);

	return 0;
}
static void kevent(void *data)
{
	struct usbnet *dev = (struct usbnet *)data;
#else
static void kevent(struct work_struct *work)
{
	struct usbnet		*dev =
		container_of(work, struct usbnet, kevent);
#endif
	int			status;

	/* usb_clear_halt() needs a thread context */
	if (test_bit(EVENT_TX_HALT, &dev->flags)) {

		unlink_urbs(dev, &dev->txq);
		status = usb_clear_halt(dev->udev, dev->out);
		if (status < 0
				&& status != -EPIPE
				&& status != -ESHUTDOWN) {
			if (netif_msg_tx_err(dev))
				deverr(dev, "can't clear tx halt, status %d",
				       status);
		} else {
			clear_bit(EVENT_TX_HALT, &dev->flags);
			if (status != -ESHUTDOWN)
				netif_wake_queue(dev->net);
		}
	}
	if (test_bit(EVENT_RX_HALT, &dev->flags)) {

		unlink_urbs(dev, &dev->rxq);
		status = usb_clear_halt(dev->udev, dev->in);
		if (status < 0
				&& status != -EPIPE
				&& status != -ESHUTDOWN) {
			if (netif_msg_rx_err(dev))
				deverr(dev, "can't clear rx halt, status %d",
				       status);
		} else {
			clear_bit(EVENT_RX_HALT, &dev->flags);
			tasklet_schedule(&dev->bh);
		}
	}

	/* tasklet could resubmit itself forever if memory is tight */
	if (test_bit(EVENT_RX_MEMORY, &dev->flags)) {
		struct urb	*urb = NULL;

		if (netif_running(dev->net))
			urb = usb_alloc_urb(0, GFP_KERNEL);
		else
			clear_bit(EVENT_RX_MEMORY, &dev->flags);
		if (urb != NULL) {
			clear_bit(EVENT_RX_MEMORY, &dev->flags);
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 14)
			urb->transfer_flags |= URB_ASYNC_UNLINK;
#endif
			rx_submit(dev, urb, GFP_KERNEL);
			tasklet_schedule(&dev->bh);
		}
	}

	if (test_bit(EVENT_LINK_RESET, &dev->flags)) {
		struct driver_info	*info = dev->driver_info;
		int			retval = 0;

		clear_bit(EVENT_LINK_RESET, &dev->flags);

		if (info->link_reset) {
			retval = info->link_reset(dev);
			if (retval < 0) {
				devinfo(dev,
					"link reset failed (%d) usbnet usb-%s-%s, %s",
					retval,
					dev->udev->bus->bus_name,
					dev->udev->devpath,
					info->description);
			}
		}
	}

	if (dev->flags)
		devdbg(dev, "kevent done, flags = 0x%lx", dev->flags);
}

/*-------------------------------------------------------------------------*/

#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 19)
static void tx_complete(struct urb *urb, struct pt_regs *regs)
#else
static void tx_complete(struct urb *urb)
#endif
{
	struct sk_buff		*skb = (struct sk_buff *) urb->context;
	struct skb_data		*entry = (struct skb_data *) skb->cb;
	struct usbnet		*dev = entry->dev;

	if (urb->status == 0) {
		dev->stats.tx_packets++;
		dev->stats.tx_bytes += entry->length;
	} else {
		dev->stats.tx_errors++;

		switch (urb->status) {
		case -EPIPE:
			axusbnet_defer_kevent(dev, EVENT_TX_HALT);
			break;

		/* software-driven interface shutdown */
		case -ECONNRESET:		/* async unlink */
		case -ESHUTDOWN:		/* hardware gone */
			break;

		/* like rx, tx gets controller i/o faults during khubd delays */
		/* and so it uses the same throttling mechanism. */
		case -EPROTO:
		case -ETIME:
		case -EILSEQ:
			if (!timer_pending(&dev->delay)) {
				mod_timer(&dev->delay,
					  jiffies + THROTTLE_JIFFIES);
				if (netif_msg_link(dev))
					devdbg(dev, "tx throttle %d",
					       urb->status);
			}
			netif_stop_queue(dev->net);
			break;
		default:
			if (netif_msg_tx_err(dev))
				devdbg(dev, "tx err %d", entry->urb->status);
			break;
		}
	}

	urb->dev = NULL;
	entry->state = tx_done;
	defer_bh(dev, skb, &dev->txq);
}

/*-------------------------------------------------------------------------*/

static
void axusbnet_tx_timeout(struct net_device *net)
{
	struct usbnet *dev = netdev_priv(net);

	unlink_urbs(dev, &dev->txq);
	tasklet_schedule(&dev->bh);

	/* FIXME: device recovery -- reset? */
}

/*-------------------------------------------------------------------------*/
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 32)
static int
#else
static netdev_tx_t
#endif
axusbnet_start_xmit(struct sk_buff *skb, struct net_device *net)
{
	struct usbnet		*dev = netdev_priv(net);
	int			length;
	struct urb		*urb = NULL;
	struct skb_data		*entry;
	struct driver_info	*info = dev->driver_info;
	unsigned long		flags;
	int retval;

	/* some devices want funky USB-level framing, for */
	/* win32 driver (usually) and/or hardware quirks */
	if (info->tx_fixup) {
		skb = info->tx_fixup(dev, skb, GFP_ATOMIC);
		if (!skb) {
			if (netif_msg_tx_err(dev))
				devdbg(dev, "can't tx_fixup skb");
			goto drop;
		}
	}
	length = skb->len;

	urb = usb_alloc_urb(0, GFP_ATOMIC);
	if (!urb) {
		if (netif_msg_tx_err(dev))
			devdbg(dev, "no urb");
		goto drop;
	}

	entry = (struct skb_data *) skb->cb;
	entry->urb = urb;
	entry->dev = dev;
	entry->state = tx_start;
	entry->length = length;

	usb_fill_bulk_urb(urb, dev->udev, dev->out, skb->data,
			  skb->len, tx_complete, skb);

	/* don't assume the hardware handles USB_ZERO_PACKET
	 * NOTE:  strictly conforming cdc-ether devices should expect
	 * the ZLP here, but ignore the one-byte packet.
	 */
	if (!(info->flags & FLAG_SEND_ZLP) && (length % dev->maxpacket) == 0) {
		urb->transfer_buffer_length++;
		if (skb_tailroom(skb)) {
			skb->data[skb->len] = 0;
			__skb_put(skb, 1);
		}
	}

	spin_lock_irqsave(&dev->txq.lock, flags);

	switch ((retval = usb_submit_urb(urb, GFP_ATOMIC))) {
	case -EPIPE:
		netif_stop_queue(net);
		axusbnet_defer_kevent(dev, EVENT_TX_HALT);
		break;
	default:
		if (netif_msg_tx_err(dev))
			devdbg(dev, "tx: submit urb err %d", retval);
		break;
	case 0:
		net->trans_start = jiffies;
		__skb_queue_tail(&dev->txq, skb);
		if (dev->txq.qlen >= TX_QLEN(dev))
			netif_stop_queue(net);
	}
	spin_unlock_irqrestore(&dev->txq.lock, flags);

	if (retval) {
		if (netif_msg_tx_err(dev))
			devdbg(dev, "drop, code %d", retval);
drop:
		dev->stats.tx_dropped++;
		if (skb)
			dev_kfree_skb_any(skb);
		usb_free_urb(urb);
	} else if (netif_msg_tx_queued(dev)) {
		devdbg(dev, "> tx, len %d, type 0x%x",
		       length, skb->protocol);
	}
	return NETDEV_TX_OK;
}

/*-------------------------------------------------------------------------*/

/* tasklet (work deferred from completions, in_irq) or timer */

static void axusbnet_bh(unsigned long param)
{
	struct usbnet		*dev = (struct usbnet *) param;
	struct sk_buff		*skb;
	struct skb_data		*entry;

	while ((skb = skb_dequeue(&dev->done))) {
		entry = (struct skb_data *) skb->cb;
		switch (entry->state) {
		case rx_done:
			entry->state = rx_cleanup;
			rx_process(dev, skb);
			continue;
		case tx_done:
		case rx_cleanup:
			usb_free_urb(entry->urb);
			dev_kfree_skb(skb);
			continue;
		default:
			devdbg(dev, "bogus skb state %d", entry->state);
		}
	}

	/* waiting for all pending urbs to complete? */
	if (dev->wait) {
		if ((dev->txq.qlen + dev->rxq.qlen + dev->done.qlen) == 0)
			wake_up(dev->wait);

	/* or are we maybe short a few urbs? */
	} else if (netif_running(dev->net)
			&& netif_device_present(dev->net)
			&& !timer_pending(&dev->delay)
			&& !test_bit(EVENT_RX_HALT, &dev->flags)) {
		int	temp = dev->rxq.qlen;
		int	qlen = RX_QLEN(dev);

		if (temp < qlen) {
			struct urb	*urb;
			int		i;

			/* don't refill the queue all at once */
			for (i = 0; i < 10 && dev->rxq.qlen < qlen; i++) {
				urb = usb_alloc_urb(0, GFP_ATOMIC);
				if (urb != NULL) {
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 14)
					urb->transfer_flags |= URB_ASYNC_UNLINK;
#endif
					rx_submit(dev, urb, GFP_ATOMIC);
				}
			}
			if (temp != dev->rxq.qlen && netif_msg_link(dev))
				devdbg(dev, "rxqlen %d --> %d",
				       temp, dev->rxq.qlen);
			if (dev->rxq.qlen < qlen)
				tasklet_schedule(&dev->bh);
		}
		if (dev->txq.qlen < TX_QLEN(dev))
			netif_wake_queue(dev->net);
	}
}


/*-------------------------------------------------------------------------
 *
 * USB Device Driver support
 *
 *-------------------------------------------------------------------------*/

/* precondition: never called in_interrupt */

static
void axusbnet_disconnect(struct usb_interface *intf)
{
	struct usbnet		*dev;
	struct usb_device	*xdev;
	struct net_device	*net;

	dev = usb_get_intfdata(intf);
	usb_set_intfdata(intf, NULL);
	if (!dev)
		return;

	xdev = interface_to_usbdev(intf);

	if (netif_msg_probe(dev))
		devinfo(dev, "unregister '%s' usb-%s-%s, %s",
			intf->dev.driver->name,
			xdev->bus->bus_name, xdev->devpath,
			dev->driver_info->description);

	net = dev->net;
	unregister_netdev(net);

	/* we don't hold rtnl here ... */
	flush_scheduled_work();

	if (dev->driver_info->unbind)
		dev->driver_info->unbind(dev, intf);

	free_netdev(net);
	usb_put_dev(xdev);
}

/*-------------------------------------------------------------------------*/

/* precondition: never called in_interrupt */

static int
axusbnet_probe(struct usb_interface *udev, const struct usb_device_id *prod)
{
	struct usbnet			*dev;
	struct net_device		*net;
	struct usb_host_interface	*interface;
	struct driver_info		*info;
	struct usb_device		*xdev;
	int				status;
	const char			*name;

	name = udev->dev.driver->name;
	info = (struct driver_info *) prod->driver_info;
	if (!info) {
		printk(KERN_ERR "blacklisted by %s\n", name);
		return -ENODEV;
	}
	xdev = interface_to_usbdev(udev);
	interface = udev->cur_altsetting;

	usb_get_dev(xdev);

	status = -ENOMEM;

	/* set up our own records */
	net = alloc_etherdev(sizeof(*dev));
	if (!net) {
		printk(KERN_ERR "can't kmalloc dev");
		goto out;
	}

	dev = netdev_priv(net);
	dev->udev = xdev;
	dev->intf = udev;
	dev->driver_info = info;
	dev->driver_name = name;
	dev->msg_enable = netif_msg_init(msg_level, NETIF_MSG_DRV |
					 NETIF_MSG_PROBE | NETIF_MSG_LINK);
	skb_queue_head_init(&dev->rxq);
	skb_queue_head_init(&dev->txq);
	skb_queue_head_init(&dev->done);
	dev->bh.func = axusbnet_bh;
	dev->bh.data = (unsigned long) dev;
#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 20)
	INIT_WORK(&dev->kevent, kevent, dev);
#else
	INIT_WORK(&dev->kevent, kevent);
#endif

	dev->delay.function = axusbnet_bh;
	dev->delay.data = (unsigned long) dev;
	init_timer(&dev->delay);
	/* mutex_init(&dev->phy_mutex); */

	dev->net = net;

	/* rx and tx sides can use different message sizes;
	 * bind() should set rx_urb_size in that case.
	 */
	dev->hard_mtu = net->mtu + net->hard_header_len;

#if 0
	/* dma_supported() is deeply broken on almost all architectures */
	/* possible with some EHCI controllers */
	if (dma_supported(&udev->dev, DMA_BIT_MASK(64)))
		net->features |= NETIF_F_HIGHDMA;
#endif

#if LINUX_VERSION_CODE < KERNEL_VERSION(2, 6, 30)
	net->open		= axusbnet_open,
	net->stop		= axusbnet_stop,
	net->hard_start_xmit	= axusbnet_start_xmit,
	net->tx_timeout	= axusbnet_tx_timeout,
	net->get_stats = axusbnet_get_stats;
#endif

	net->watchdog_timeo = TX_TIMEOUT_JIFFIES;
	net->ethtool_ops = &axusbnet_ethtool_ops;

	/* allow device-specific bind/init procedures */
	/* NOTE net->name still not usable ... */
	status = info->bind(dev, udev);
	if (status < 0) {
		deverr(dev, "Binding device failed: %d", status);
		goto out1;
	}

	/* maybe the remote can't receive an Ethernet MTU */
	if (net->mtu > (dev->hard_mtu - net->hard_header_len))
		net->mtu = dev->hard_mtu - net->hard_header_len;

	status = init_status(dev, udev);
	if (status < 0)
		goto out3;

	if (!dev->rx_urb_size)
		dev->rx_urb_size = dev->hard_mtu;
	dev->maxpacket = usb_maxpacket(dev->udev, dev->out, 1);

	SET_NETDEV_DEV(net, &udev->dev);
	status = register_netdev(net);
	if (status) {
		deverr(dev, "net device registration failed: %d", status);
		goto out3;
	}

	if (netif_msg_probe(dev))
		devinfo(dev, "register '%s' at usb-%s-%s, %s, %pM",
			udev->dev.driver->name,
			xdev->bus->bus_name, xdev->devpath,
			dev->driver_info->description,
			net->dev_addr);

	/* ok, it's ready to go. */
	usb_set_intfdata(udev, dev);

	/* start as if the link is up */
	netif_device_attach(net);

	return 0;

out3:
	if (info->unbind)
		info->unbind(dev, udev);
out1:
	free_netdev(net);
out:
	usb_put_dev(xdev);
	return status;
}

/*-------------------------------------------------------------------------*/

/*
 * suspend the whole driver as soon as the first interface is suspended
 * resume only when the last interface is resumed
 */

static int axusbnet_suspend(struct usb_interface *intf,
#if LINUX_VERSION_CODE > KERNEL_VERSION(2, 6, 10)
pm_message_t message)
#else
u32 message)
#endif
{
	struct usbnet *dev = usb_get_intfdata(intf);

	if (!dev->suspend_count++) {
		/*
		 * accelerate emptying of the rx and queues, to avoid
		 * having everything error out.
		 */
		netif_device_detach(dev->net);
		(void) unlink_urbs(dev, &dev->rxq);
		(void) unlink_urbs(dev, &dev->txq);
		usb_kill_urb(dev->interrupt);
		/*
		 * reattach so runtime management can use and
		 * wake the device
		 */
		netif_device_attach(dev->net);
	}
	return 0;
}

static int
axusbnet_resume(struct usb_interface *intf)
{
	struct usbnet	*dev = usb_get_intfdata(intf);
	int	retval = 0;

	if (!--dev->suspend_count)
		tasklet_schedule(&dev->bh);

	retval = init_status(dev, intf);
	if (retval < 0)
		return retval;

	if (dev->interrupt) {
		retval = usb_submit_urb(dev->interrupt, GFP_KERNEL);
		if (retval < 0 && netif_msg_ifup(dev))
			deverr(dev, "intr submit %d", retval);
	}

	return retval;
}
Exemple #6
0
static int usbnet_start_xmit (struct sk_buff *skb, struct net_device *net)
{
	struct usbnet		*dev = netdev_priv(net);
	int			length;
	int			retval = NET_XMIT_SUCCESS;
	struct urb		*urb = NULL;
	struct skb_data		*entry;
	struct driver_info	*info = dev->driver_info;
	unsigned long		flags;

	// some devices want funky USB-level framing, for
	// win32 driver (usually) and/or hardware quirks
	if (info->tx_fixup) {
		skb = info->tx_fixup (dev, skb, GFP_ATOMIC);
		if (!skb) {
			if (netif_msg_tx_err (dev))
				devdbg (dev, "can't tx_fixup skb");
			goto drop;
		}
	}
	length = skb->len;

	if (!(urb = usb_alloc_urb (0, GFP_ATOMIC))) {
		if (netif_msg_tx_err (dev))
			devdbg (dev, "no urb");
		goto drop;
	}

	entry = (struct skb_data *) skb->cb;
	entry->urb = urb;
	entry->dev = dev;
	entry->state = tx_start;
	entry->length = length;

	usb_fill_bulk_urb (urb, dev->udev, dev->out,
			skb->data, skb->len, tx_complete, skb);

	/* don't assume the hardware handles USB_ZERO_PACKET
	 * NOTE:  strictly conforming cdc-ether devices should expect
	 * the ZLP here, but ignore the one-byte packet.
	 */
	if ((length % dev->maxpacket) == 0) {
		urb->transfer_buffer_length++;
		if (skb_tailroom(skb)) {
			skb->data[skb->len] = 0;
			__skb_put(skb, 1);
		}
	}

	spin_lock_irqsave (&dev->txq.lock, flags);

	switch ((retval = usb_submit_urb (urb, GFP_ATOMIC))) {
	case -EPIPE:
		netif_stop_queue (net);
		usbnet_defer_kevent (dev, EVENT_TX_HALT);
		break;
	default:
		if (netif_msg_tx_err (dev))
			devdbg (dev, "tx: submit urb err %d", retval);
		break;
	case 0:
		net->trans_start = jiffies;
		__skb_queue_tail (&dev->txq, skb);
		if (dev->txq.qlen >= TX_QLEN (dev))
			netif_stop_queue (net);
	}
	spin_unlock_irqrestore (&dev->txq.lock, flags);

	if (retval) {
		if (netif_msg_tx_err (dev))
			devdbg (dev, "drop, code %d", retval);
drop:
		retval = NET_XMIT_SUCCESS;
		dev->stats.tx_dropped++;
		if (skb)
			dev_kfree_skb_any (skb);
		usb_free_urb (urb);
	} else if (netif_msg_tx_queued (dev)) {
		devdbg (dev, "> tx, len %d, type 0x%x",
			length, skb->protocol);
	}
	return retval;
}
static int c2_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
{
	struct c2_port *c2_port = netdev_priv(netdev);
	struct c2_dev *c2dev = c2_port->c2dev;
	struct c2_ring *tx_ring = &c2_port->tx_ring;
	struct c2_element *elem;
	dma_addr_t mapaddr;
	u32 maplen;
	unsigned long flags;
	unsigned int i;

	spin_lock_irqsave(&c2_port->tx_lock, flags);

	if (unlikely(c2_port->tx_avail < (skb_shinfo(skb)->nr_frags + 1))) {
		netif_stop_queue(netdev);
		spin_unlock_irqrestore(&c2_port->tx_lock, flags);

		pr_debug("%s: Tx ring full when queue awake!\n",
			netdev->name);
		return NETDEV_TX_BUSY;
	}

	maplen = skb_headlen(skb);
	mapaddr =
	    pci_map_single(c2dev->pcidev, skb->data, maplen, PCI_DMA_TODEVICE);

	elem = tx_ring->to_use;
	elem->skb = skb;
	elem->mapaddr = mapaddr;
	elem->maplen = maplen;

	
	__raw_writeq((__force u64) cpu_to_be64(mapaddr),
		     elem->hw_desc + C2_TXP_ADDR);
	__raw_writew((__force u16) cpu_to_be16(maplen),
		     elem->hw_desc + C2_TXP_LEN);
	__raw_writew((__force u16) cpu_to_be16(TXP_HTXD_READY),
		     elem->hw_desc + C2_TXP_FLAGS);

	netdev->stats.tx_packets++;
	netdev->stats.tx_bytes += maplen;

	
	if (skb_shinfo(skb)->nr_frags) {
		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
			const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
			maplen = skb_frag_size(frag);
			mapaddr = skb_frag_dma_map(&c2dev->pcidev->dev, frag,
						   0, maplen, DMA_TO_DEVICE);
			elem = elem->next;
			elem->skb = NULL;
			elem->mapaddr = mapaddr;
			elem->maplen = maplen;

			
			__raw_writeq((__force u64) cpu_to_be64(mapaddr),
				     elem->hw_desc + C2_TXP_ADDR);
			__raw_writew((__force u16) cpu_to_be16(maplen),
				     elem->hw_desc + C2_TXP_LEN);
			__raw_writew((__force u16) cpu_to_be16(TXP_HTXD_READY),
				     elem->hw_desc + C2_TXP_FLAGS);

			netdev->stats.tx_packets++;
			netdev->stats.tx_bytes += maplen;
		}
	}

	tx_ring->to_use = elem->next;
	c2_port->tx_avail -= (skb_shinfo(skb)->nr_frags + 1);

	if (c2_port->tx_avail <= MAX_SKB_FRAGS + 1) {
		netif_stop_queue(netdev);
		if (netif_msg_tx_queued(c2_port))
			pr_debug("%s: transmit queue full\n",
				netdev->name);
	}

	spin_unlock_irqrestore(&c2_port->tx_lock, flags);

	netdev->trans_start = jiffies;

	return NETDEV_TX_OK;
}
Exemple #8
0
netdev_tx_t mpodp_start_xmit(struct sk_buff *skb,
			     struct net_device *netdev)
{
	struct mpodp_if_priv *priv = netdev_priv(netdev);
	struct mpodp_tx *tx;
	struct dma_async_tx_descriptor *dma_txd;
	struct mpodp_cache_entry *entry;
	int ret;
	uint8_t fifo_mode;
	int16_t requested_engine;
	struct mpodp_pkt_hdr *hdr;
	uint32_t tx_autoloop_next;
	uint32_t tx_submitted, tx_next, tx_done;
	uint32_t tx_mppa_idx;
	int qidx;
	unsigned long flags = 0;
	struct mpodp_txq *txq;

	/* Fetch HW queue selected by the kernel */
	qidx = skb_get_queue_mapping(skb);
	txq = &priv->txqs[qidx];

	if (atomic_read(&priv->reset) == 1) {
		mpodp_clean_tx_unlocked(priv, txq, -1);
		goto addr_error;
	}

	tx_submitted = atomic_read(&txq->submitted);
	/* Compute txd id */
	tx_next = (tx_submitted + 1);
	if (tx_next == txq->size)
		tx_next = 0;

	/* MPPA H2C Entry to use */
	tx_mppa_idx = atomic_read(&txq->autoloop_cur);

	tx_done = atomic_read(&txq->done);
	if (tx_done != tx_submitted &&
	    ((txq->ring[tx_done].jiffies + msecs_to_jiffies(5) >= jiffies) ||
	     (tx_submitted < tx_done && tx_submitted + txq->size - tx_done >= TX_POLL_THRESHOLD) ||
	     (tx_submitted >= tx_done && tx_submitted - tx_done >= TX_POLL_THRESHOLD))) {
		mpodp_clean_tx_unlocked(priv, txq, -1);
	}

	/* Check if there are txd available */
	if (tx_next == atomic_read(&txq->done)) {
		/* Ring is full */
		if (netif_msg_tx_err(priv))
			netdev_err(netdev, "txq[%d]: ring full \n", txq->id);
		netif_tx_stop_queue(txq->txq);
		return NETDEV_TX_BUSY;
	}

	tx = &(txq->ring[tx_submitted]);
	entry = &(txq->cache[tx_mppa_idx]);

	/* take the time */
	mppa_pcie_time_get(priv->tx_time, &tx->time);

	/* configure channel */
	tx->dst_addr = entry->addr;

	/* Check the provided address */
	ret =
	    mppa_pcie_dma_check_addr(priv->pdata, tx->dst_addr, &fifo_mode,
				     &requested_engine);
	if (ret) {
		if (netif_msg_tx_err(priv))
			netdev_err(netdev, "txq[%d] tx[%d]: invalid send address %llx\n",
				   txq->id, tx_submitted, tx->dst_addr);
		goto addr_error;
	}
	if (!fifo_mode) {
		if (netif_msg_tx_err(priv))
			netdev_err(netdev, "txq[%d] tx[%d]: %llx is not a PCI2Noc addres\n",
				   txq->id, tx_submitted, tx->dst_addr);
		goto addr_error;
	}
	if (requested_engine >= MPODP_NOC_CHAN_COUNT) {
		if (netif_msg_tx_err(priv))
			netdev_err(netdev,
				   "txq[%d] tx[%d]: address %llx using NoC engine out of range (%d >= %d)\n",
				   txq->id, tx_submitted, tx->dst_addr,
				   requested_engine, MPODP_NOC_CHAN_COUNT);
		goto addr_error;
	}

	tx->chanidx = requested_engine;

	/* The packet needs a header to determine size,timestamp, etc.
	 * Add it */
	if (skb_headroom(skb) < sizeof(struct mpodp_pkt_hdr)) {
		struct sk_buff *skb_new;

		skb_new =
			skb_realloc_headroom(skb, sizeof(struct mpodp_pkt_hdr));
		if (!skb_new) {
			netdev->stats.tx_errors++;
			kfree_skb(skb);
			return NETDEV_TX_OK;
		}
		kfree_skb(skb);
		skb = skb_new;
	}

	hdr = (struct mpodp_pkt_hdr *)
		skb_push(skb, sizeof(struct mpodp_pkt_hdr));
	hdr->timestamp = priv->packet_id;
	hdr->info._.pkt_id = priv->packet_id;
	hdr->info.dword = 0ULL;
	hdr->info._.pkt_size = skb->len; /* Also count the header size */
	hdr->info._.pkt_id = priv->packet_id;
	priv->packet_id++;

	/* save skb to free it later */
	tx->skb = skb;
	tx->len = skb->len;

	/* prepare sg */
	if (map_skb(&priv->pdev->dev, skb, tx)){
		if (netif_msg_tx_err(priv))
			netdev_err(netdev, "tx %d: failed to map skb to dma\n",
				   tx_submitted);
		goto busy;
	}

	if (priv->n_txqs > MPODP_NOC_CHAN_COUNT)
		spin_lock_irqsave(&priv->tx_lock[requested_engine], flags);

	/* Prepare slave args */
	priv->tx_config[requested_engine].cfg.dst_addr = tx->dst_addr;
	priv->tx_config[requested_engine].requested_engine = requested_engine;
	/* FIFO mode, direction, latency were filled at setup */

	if (dmaengine_slave_config(priv->tx_chan[requested_engine],
				   &priv->tx_config[requested_engine].cfg)) {
		/* board has reset, wait for reset of netdev */
		netif_tx_stop_queue(txq->txq);
		netif_carrier_off(netdev);
		if (netif_msg_tx_err(priv))
			netdev_err(netdev, "txq[%d] tx[%d]: cannot configure channel\n",
				   txq->id, tx_submitted);
		goto busy;
	}

	/* get transfer descriptor */
	dma_txd =
	    dmaengine_prep_slave_sg(priv->tx_chan[requested_engine], tx->sg,
				    tx->sg_len, DMA_MEM_TO_DEV, 0);
	if (dma_txd == NULL) {
		/* dmaengine_prep_slave_sg failed, retry */
		if (netif_msg_tx_err(priv))
			netdev_err(netdev, "txq[%d] tx[%d]: cannot get dma descriptor\n",
				   txq->id, tx_submitted);
		goto busy;
	}
	if (netif_msg_tx_queued(priv))
		netdev_info(netdev,
			    "txq[%d] tx[%d]: transfer start (submitted: %d done: %d) len=%d, sg_len=%d\n",
			    txq->id, tx_submitted, tx_next, atomic_read(&txq->done),
			    tx->len, tx->sg_len);

	skb_orphan(skb);

	/* submit and issue descriptor */
	tx->jiffies = jiffies;
	tx->cookie = dmaengine_submit(dma_txd);
	dma_async_issue_pending(priv->tx_chan[requested_engine]);

	if (priv->n_txqs > MPODP_NOC_CHAN_COUNT)
		spin_unlock_irqrestore(&priv->tx_lock[requested_engine], flags);

	/* Count number of bytes on the fly for DQL */
	netdev_tx_sent_queue(txq->txq, skb->len);
	if (test_bit(__QUEUE_STATE_STACK_XOFF, &txq->txq->state)){
		/* We reached over the limit of DQL. Try to clean some
		 * tx so we are rescheduled right now */
		mpodp_clean_tx_unlocked(priv, txq, -1);
	}

	/* Increment tail pointer locally */
	atomic_set(&txq->submitted, tx_next);

	/* Update H2C entry offset */
	tx_autoloop_next = tx_mppa_idx + 1;
	if (tx_autoloop_next == txq->cached_head)
		tx_autoloop_next = 0;
	atomic_set(&txq->autoloop_cur, tx_autoloop_next);

	skb_tx_timestamp(skb);

	/* Check if there is room for another txd
	 * or stop the queue if there is not */
	tx_next = (tx_next + 1);
	if (tx_next == txq->size)
		tx_next = 0;

	if (tx_next == atomic_read(&txq->done)) {
		if (netif_msg_tx_queued(priv))
			netdev_info(netdev, "txq[%d]: ring full \n", txq->id);
		netif_tx_stop_queue(txq->txq);
	}

	return NETDEV_TX_OK;

      busy:
	unmap_skb(&priv->pdev->dev, skb, tx);
	return NETDEV_TX_BUSY;

 addr_error:
	netdev->stats.tx_dropped++;
	dev_kfree_skb(skb);
	/* We can't do anything, just stop the queue artificially */
	netif_tx_stop_queue(txq->txq);
	return NETDEV_TX_OK;
}
netdev_tx_t usbnet_start_xmit (struct sk_buff *skb,
				     struct net_device *net)
{
	struct usbnet		*dev = netdev_priv(net);
	int			length;
	struct urb		*urb = NULL;
	struct skb_data		*entry;
	struct driver_info	*info = dev->driver_info;
	unsigned long		flags;
	int retval;

	// some devices want funky USB-level framing, for
	// win32 driver (usually) and/or hardware quirks
	if (info->tx_fixup) {
		skb = info->tx_fixup (dev, skb, GFP_ATOMIC);
		if (!skb) {
			if (netif_msg_tx_err (dev))
				devdbg (dev, "can't tx_fixup skb");
			goto drop;
		}
	}
	length = skb->len;

	if (!(urb = usb_alloc_urb (0, GFP_ATOMIC))) {
		if (netif_msg_tx_err (dev))
			devdbg (dev, "no urb");
		goto drop;
	}

	entry = (struct skb_data *) skb->cb;
	entry->urb = urb;
	entry->dev = dev;
	entry->state = tx_start;
	entry->length = length;

	usb_fill_bulk_urb (urb, dev->udev, dev->out,
			skb->data, skb->len, tx_complete, skb);

	/* don't assume the hardware handles USB_ZERO_PACKET
	 * NOTE:  strictly conforming cdc-ether devices should expect
	 * the ZLP here, but ignore the one-byte packet.
	 */
	if (!(info->flags & FLAG_SEND_ZLP) && (length % dev->maxpacket) == 0) {
		urb->transfer_buffer_length++;
		if (skb_tailroom(skb)) {
			skb->data[skb->len] = 0;
			__skb_put(skb, 1);
		}
	}

#if !defined(CONFIG_ERICSSON_F3307_ENABLE)
	spin_lock_irqsave (&dev->txq.lock, flags);
#else
	spin_lock_irqsave(&dev->txq.lock, flags);
	retval = usb_autopm_get_interface_async(dev->intf);
	if (retval < 0) {
		spin_unlock_irqrestore(&dev->txq.lock, flags);
		goto drop;
	}

#ifdef CONFIG_PM
	/* if this triggers the device is still a sleep */
	if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
		/* transmission will be done in resume */
		usb_anchor_urb(urb, &dev->deferred);
		/* no use to process more packets */
		netif_stop_queue(net);
		spin_unlock_irqrestore(&dev->txq.lock, flags);
		devdbg(dev, "Delaying transmission for resumption");
		goto deferred;
	}
#endif
#endif

	switch ((retval = usb_submit_urb (urb, GFP_ATOMIC))) {
	case -EPIPE:
		netif_stop_queue (net);
		usbnet_defer_kevent (dev, EVENT_TX_HALT);
#if defined(CONFIG_ERICSSON_F3307_ENABLE)
		usb_autopm_put_interface_async(dev->intf);
#endif
		break;
	default:
#if defined(CONFIG_ERICSSON_F3307_ENABLE)
		usb_autopm_put_interface_async(dev->intf);
#endif
		if (netif_msg_tx_err (dev))
			devdbg (dev, "tx: submit urb err %d", retval);
		break;
	case 0:
		net->trans_start = jiffies;
		__skb_queue_tail (&dev->txq, skb);
		if (dev->txq.qlen >= TX_QLEN (dev))
			netif_stop_queue (net);
	}
	spin_unlock_irqrestore (&dev->txq.lock, flags);

	if (retval) {
		if (netif_msg_tx_err (dev))
			devdbg (dev, "drop, code %d", retval);
drop:
		dev->net->stats.tx_dropped++;
		if (skb)
			dev_kfree_skb_any (skb);
		usb_free_urb (urb);
	} else if (netif_msg_tx_queued (dev)) {
		devdbg (dev, "> tx, len %d, type 0x%x",
			length, skb->protocol);
	}
#if defined(CONFIG_ERICSSON_F3307_ENABLE)
#ifdef CONFIG_PM
deferred:
#endif
#endif
	return NETDEV_TX_OK;
}
Exemple #10
0
static int cp_start_xmit (struct sk_buff *skb, struct net_device *dev)
{
	struct cp_private *cp = netdev_priv(dev);
	unsigned entry;
	u32 eor;
#if CP_VLAN_TAG_USED
	u32 vlan_tag = 0;
#endif

	spin_lock_irq(&cp->lock);

	/* This is a hard error, log it. */
	if (TX_BUFFS_AVAIL(cp) <= (skb_shinfo(skb)->nr_frags + 1)) {
		netif_stop_queue(dev);
		spin_unlock_irq(&cp->lock);
		printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
		       dev->name);
		return 1;
	}

#if CP_VLAN_TAG_USED
	if (cp->vlgrp && vlan_tx_tag_present(skb))
		vlan_tag = TxVlanTag | cpu_to_be16(vlan_tx_tag_get(skb));
#endif

	entry = cp->tx_head;
	eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0;
	if (skb_shinfo(skb)->nr_frags == 0) {
		struct cp_desc *txd = &cp->tx_ring[entry];
		u32 len;
		dma_addr_t mapping;

		len = skb->len;
		mapping = pci_map_single(cp->pdev, skb->data, len, PCI_DMA_TODEVICE);
		CP_VLAN_TX_TAG(txd, vlan_tag);
		txd->addr = cpu_to_le64(mapping);
		wmb();

		if (skb->ip_summed == CHECKSUM_HW) {
			const struct iphdr *ip = skb->nh.iph;
			if (ip->protocol == IPPROTO_TCP)
				txd->opts1 = cpu_to_le32(eor | len | DescOwn |
							 FirstFrag | LastFrag |
							 IPCS | TCPCS);
			else if (ip->protocol == IPPROTO_UDP)
				txd->opts1 = cpu_to_le32(eor | len | DescOwn |
							 FirstFrag | LastFrag |
							 IPCS | UDPCS);
			else
				BUG();
		} else
			txd->opts1 = cpu_to_le32(eor | len | DescOwn |
						 FirstFrag | LastFrag);
		wmb();

		cp->tx_skb[entry].skb = skb;
		cp->tx_skb[entry].mapping = mapping;
		cp->tx_skb[entry].frag = 0;
		entry = NEXT_TX(entry);
	} else {
		struct cp_desc *txd;
		u32 first_len, first_eor;
		dma_addr_t first_mapping;
		int frag, first_entry = entry;
		const struct iphdr *ip = skb->nh.iph;

		/* We must give this initial chunk to the device last.
		 * Otherwise we could race with the device.
		 */
		first_eor = eor;
		first_len = skb_headlen(skb);
		first_mapping = pci_map_single(cp->pdev, skb->data,
					       first_len, PCI_DMA_TODEVICE);
		cp->tx_skb[entry].skb = skb;
		cp->tx_skb[entry].mapping = first_mapping;
		cp->tx_skb[entry].frag = 1;
		entry = NEXT_TX(entry);

		for (frag = 0; frag < skb_shinfo(skb)->nr_frags; frag++) {
			skb_frag_t *this_frag = &skb_shinfo(skb)->frags[frag];
			u32 len;
			u32 ctrl;
			dma_addr_t mapping;

			len = this_frag->size;
			mapping = pci_map_single(cp->pdev,
						 ((void *) page_address(this_frag->page) +
						  this_frag->page_offset),
						 len, PCI_DMA_TODEVICE);
			eor = (entry == (CP_TX_RING_SIZE - 1)) ? RingEnd : 0;

			if (skb->ip_summed == CHECKSUM_HW) {
				ctrl = eor | len | DescOwn | IPCS;
				if (ip->protocol == IPPROTO_TCP)
					ctrl |= TCPCS;
				else if (ip->protocol == IPPROTO_UDP)
					ctrl |= UDPCS;
				else
					BUG();
			} else
				ctrl = eor | len | DescOwn;

			if (frag == skb_shinfo(skb)->nr_frags - 1)
				ctrl |= LastFrag;

			txd = &cp->tx_ring[entry];
			CP_VLAN_TX_TAG(txd, vlan_tag);
			txd->addr = cpu_to_le64(mapping);
			wmb();

			txd->opts1 = cpu_to_le32(ctrl);
			wmb();

			cp->tx_skb[entry].skb = skb;
			cp->tx_skb[entry].mapping = mapping;
			cp->tx_skb[entry].frag = frag + 2;
			entry = NEXT_TX(entry);
		}

		txd = &cp->tx_ring[first_entry];
		CP_VLAN_TX_TAG(txd, vlan_tag);
		txd->addr = cpu_to_le64(first_mapping);
		wmb();

		if (skb->ip_summed == CHECKSUM_HW) {
			if (ip->protocol == IPPROTO_TCP)
				txd->opts1 = cpu_to_le32(first_eor | first_len |
							 FirstFrag | DescOwn |
							 IPCS | TCPCS);
			else if (ip->protocol == IPPROTO_UDP)
				txd->opts1 = cpu_to_le32(first_eor | first_len |
							 FirstFrag | DescOwn |
							 IPCS | UDPCS);
			else
				BUG();
		} else
			txd->opts1 = cpu_to_le32(first_eor | first_len |
						 FirstFrag | DescOwn);
		wmb();
	}
	cp->tx_head = entry;
	if (netif_msg_tx_queued(cp))
		printk(KERN_DEBUG "%s: tx queued, slot %d, skblen %d\n",
		       dev->name, entry, skb->len);
	if (TX_BUFFS_AVAIL(cp) <= (MAX_SKB_FRAGS + 1))
		netif_stop_queue(dev);

	spin_unlock_irq(&cp->lock);

	cpw8(TxPoll, NormalTxPoll);
	dev->trans_start = jiffies;

	return 0;
}