Ejemplo n.º 1
0
static void sun3_82586_timeout(struct net_device *dev)
{
	struct priv *p = netdev_priv(dev);
#ifndef NO_NOPCOMMANDS
	if(p->scb->cus & CU_ACTIVE) /* COMMAND-UNIT active? */
	{
		netif_wake_queue(dev);
#ifdef DEBUG
		printk("%s: strange ... timeout with CU active?!?\n",dev->name);
		printk("%s: X0: %04x N0: %04x N1: %04x %d\n",dev->name,(int)swab16(p->xmit_cmds[0]->cmd_status),(int)swab16(p->nop_cmds[0]->cmd_status),(int)swab16(p->nop_cmds[1]->cmd_status),(int)p->nop_point);
#endif
		p->scb->cmd_cuc = CUC_ABORT;
		sun3_attn586();
		WAIT_4_SCB_CMD();
		p->scb->cbl_offset = make16(p->nop_cmds[p->nop_point]);
		p->scb->cmd_cuc = CUC_START;
		sun3_attn586();
		WAIT_4_SCB_CMD();
		netif_trans_update(dev); /* prevent tx timeout */
		return 0;
	}
#endif
	{
#ifdef DEBUG
		printk("%s: xmitter timed out, try to restart! stat: %02x\n",dev->name,p->scb->cus);
		printk("%s: command-stats: %04x %04x\n",dev->name,swab16(p->xmit_cmds[0]->cmd_status),swab16(p->xmit_cmds[1]->cmd_status));
		printk("%s: check, whether you set the right interrupt number!\n",dev->name);
#endif
		sun3_82586_close(dev);
		sun3_82586_open(dev);
	}
	netif_trans_update(dev); /* prevent tx timeout */
}
Ejemplo n.º 2
0
/* Hardware start transmission.
 * Send a packet to media from the upper layer.
 */
static int emac_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
	struct emac_board_info *db = netdev_priv(dev);
	unsigned long channel;
	unsigned long flags;

	channel = db->tx_fifo_stat & 3;
	if (channel == 3)
		return 1;

	channel = (channel == 1 ? 1 : 0);

	spin_lock_irqsave(&db->lock, flags);

	writel(channel, db->membase + EMAC_TX_INS_REG);

	emac_outblk_32bit(db->membase + EMAC_TX_IO_DATA_REG,
			skb->data, skb->len);
	dev->stats.tx_bytes += skb->len;

	db->tx_fifo_stat |= 1 << channel;
	/* TX control: First packet immediately send, second packet queue */
	if (channel == 0) {
		/* set TX len */
		writel(skb->len, db->membase + EMAC_TX_PL0_REG);
		/* start translate from fifo to phy */
		writel(readl(db->membase + EMAC_TX_CTL0_REG) | 1,
		       db->membase + EMAC_TX_CTL0_REG);

		/* save the time stamp */
		netif_trans_update(dev);
	} else if (channel == 1) {
		/* set TX len */
		writel(skb->len, db->membase + EMAC_TX_PL1_REG);
		/* start translate from fifo to phy */
		writel(readl(db->membase + EMAC_TX_CTL1_REG) | 1,
		       db->membase + EMAC_TX_CTL1_REG);

		/* save the time stamp */
		netif_trans_update(dev);
	}

	if ((db->tx_fifo_stat & 3) == 3) {
		/* Second packet */
		netif_stop_queue(dev);
	}

	spin_unlock_irqrestore(&db->lock, flags);

	/* free this SKB */
	dev_consume_skb_any(skb);

	return NETDEV_TX_OK;
}
Ejemplo n.º 3
0
static void bfin_sir_send_work(struct work_struct *work)
{
	struct bfin_sir_self  *self = container_of(work, struct bfin_sir_self, work);
	struct net_device *dev = self->sir_port->dev;
	struct bfin_sir_port *port = self->sir_port;
	unsigned short val;
	int tx_cnt = 10;

	while (bfin_sir_is_receiving(dev) && --tx_cnt)
		turnaround_delay(dev->last_rx, self->mtt);

	bfin_sir_stop_rx(port);

	/* To avoid losting RX interrupt, we reset IR function before
	 * sending data. We also can set the speed, which will
	 * reset all the UART.
	 */
	val = UART_GET_GCTL(port);
	val &= ~(UMOD_MASK | RPOLC);
	UART_PUT_GCTL(port, val);
	SSYNC();
	val |= UMOD_IRDA | RPOLC;
	UART_PUT_GCTL(port, val);
	SSYNC();
	/* bfin_sir_set_speed(port, self->speed); */

#ifdef CONFIG_SIR_BFIN_DMA
	bfin_sir_dma_tx_chars(dev);
#endif
	bfin_sir_enable_tx(port);
	netif_trans_update(dev);
}
Ejemplo n.º 4
0
/*
 * callback for bulk IN urb
 */
static void ems_usb_write_bulk_callback(struct urb *urb)
{
	struct ems_tx_urb_context *context = urb->context;
	struct ems_usb *dev;
	struct net_device *netdev;

	BUG_ON(!context);

	dev = context->dev;
	netdev = dev->netdev;

	/* free up our allocated buffer */
	usb_free_coherent(urb->dev, urb->transfer_buffer_length,
			  urb->transfer_buffer, urb->transfer_dma);

	atomic_dec(&dev->active_tx_urbs);

	if (!netif_device_present(netdev))
		return;

	if (urb->status)
		netdev_info(netdev, "Tx URB aborted (%d)\n", urb->status);

	netif_trans_update(netdev);

	/* transmission complete interrupt */
	netdev->stats.tx_packets++;
	netdev->stats.tx_bytes += context->dlc;

	can_get_echo_skb(netdev, context->echo_index);

	/* Release context */
	context->echo_index = MAX_TX_URBS;

}
Ejemplo n.º 5
0
void dev_activate(struct net_device *dev)
{
	int need_watchdog;

	/* No queueing discipline is attached to device;
	 * create default one for devices, which need queueing
	 * and noqueue_qdisc for virtual interfaces
	 */

	if (dev->qdisc == &noop_qdisc)
		attach_default_qdiscs(dev);

	if (!netif_carrier_ok(dev))
		/* Delay activation until next carrier-on event */
		return;

	need_watchdog = 0;
	netdev_for_each_tx_queue(dev, transition_one_qdisc, &need_watchdog);
	if (dev_ingress_queue(dev))
		transition_one_qdisc(dev, dev_ingress_queue(dev), NULL);

	if (need_watchdog) {
		netif_trans_update(dev);
		dev_watchdog_up(dev);
	}
}
Ejemplo n.º 6
0
static void timeout(struct net_device *dev)
{
	printk(KERN_NOTICE "%s: transmit timed out, resetting\n", dev->name);
	sgiseeq_reset(dev);

	netif_trans_update(dev); /* prevent tx timeout */
	netif_wake_queue(dev);
}
Ejemplo n.º 7
0
/*
 * This function sets trans_start per tx_queue
 */
void mwifiex_set_trans_start(struct net_device *dev)
{
	int i;

	for (i = 0; i < dev->num_tx_queues; i++)
		netdev_get_tx_queue(dev, i)->trans_start = jiffies;

	netif_trans_update(dev);
}
Ejemplo n.º 8
0
static int octeon_mgmt_xmit(struct sk_buff *skb, struct net_device *netdev)
{
	struct octeon_mgmt *p = netdev_priv(netdev);
	union mgmt_port_ring_entry re;
	unsigned long flags;
	int rv = NETDEV_TX_BUSY;

	re.d64 = 0;
	re.s.tstamp = ((skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP) != 0);
	re.s.len = skb->len;
	re.s.addr = dma_map_single(p->dev, skb->data,
				   skb->len,
				   DMA_TO_DEVICE);

	spin_lock_irqsave(&p->tx_list.lock, flags);

	if (unlikely(p->tx_current_fill >= ring_max_fill(OCTEON_MGMT_TX_RING_SIZE) - 1)) {
		spin_unlock_irqrestore(&p->tx_list.lock, flags);
		netif_stop_queue(netdev);
		spin_lock_irqsave(&p->tx_list.lock, flags);
	}

	if (unlikely(p->tx_current_fill >=
		     ring_max_fill(OCTEON_MGMT_TX_RING_SIZE))) {
		spin_unlock_irqrestore(&p->tx_list.lock, flags);
		dma_unmap_single(p->dev, re.s.addr, re.s.len,
				 DMA_TO_DEVICE);
		goto out;
	}

	__skb_queue_tail(&p->tx_list, skb);

	/* Put it in the ring.  */
	p->tx_ring[p->tx_next] = re.d64;
	p->tx_next = (p->tx_next + 1) % OCTEON_MGMT_TX_RING_SIZE;
	p->tx_current_fill++;

	spin_unlock_irqrestore(&p->tx_list.lock, flags);

	dma_sync_single_for_device(p->dev, p->tx_ring_handle,
				   ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE),
				   DMA_BIDIRECTIONAL);

	netdev->stats.tx_packets++;
	netdev->stats.tx_bytes += skb->len;

	/* Ring the bell.  */
	cvmx_write_csr(p->mix + MIX_ORING2, 1);

	netif_trans_update(netdev);
	rv = NETDEV_TX_OK;
out:
	octeon_mgmt_update_tx_stats(netdev);
	return rv;
}
Ejemplo n.º 9
0
Archivo: dl2k.c Proyecto: Lyude/linux
static void
rio_tx_timeout (struct net_device *dev)
{
	struct netdev_private *np = netdev_priv(dev);
	void __iomem *ioaddr = np->ioaddr;

	printk (KERN_INFO "%s: Tx timed out (%4.4x), is buffer full?\n",
		dev->name, dr32(TxStatus));
	rio_free_tx(dev, 0);
	dev->if_port = 0;
	netif_trans_update(dev); /* prevent tx timeout */
}
Ejemplo n.º 10
0
static void el3_tx_timeout(struct net_device *dev)
{
	unsigned int ioaddr = dev->base_addr;

	netdev_warn(dev, "Transmit timed out!\n");
	dump_status(dev);
	dev->stats.tx_errors++;
	netif_trans_update(dev); /* prevent tx timeout */
	/* Issue TX_RESET and TX_START commands. */
	tc589_wait_for_completion(dev, TxReset);
	outw(TxEnable, ioaddr + EL3_CMD);
	netif_wake_queue(dev);
}
Ejemplo n.º 11
0
static inline int sgiseeq_reset(struct net_device *dev)
{
	struct sgiseeq_private *sp = netdev_priv(dev);
	struct sgiseeq_regs *sregs = sp->sregs;
	int err;

	err = init_seeq(dev, sp, sregs);
	if (err)
		return err;

	netif_trans_update(dev); /* prevent tx timeout */
	netif_wake_queue(dev);

	return 0;
}
Ejemplo n.º 12
0
//---------------------------------------------------------------------------
void nasmt_tx_timeout(struct net_device *dev)
{
  //---------------------------------------------------------------------------
  /* Transmitter timeout, serious problems. */
  printk("nasmt_tx_timeout: begin\n");
  //((struct nas_priv *)(dev->priv))->stats.tx_errors++;
  (gpriv->stats).tx_errors++;
#if  LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
  netif_trans_update(dev);
#else
  dev->trans_start = jiffies;
#endif
  netif_wake_queue(dev);
  printk("nasmt_tx_timeout: transmit timed out %s\n",dev->name);
}
Ejemplo n.º 13
0
static inline void korina_abort_dma(struct net_device *dev,
					struct dma_reg *ch)
{
       if (readl(&ch->dmac) & DMA_CHAN_RUN_BIT) {
	       writel(0x10, &ch->dmac);

	       while (!(readl(&ch->dmas) & DMA_STAT_HALT))
		       netif_trans_update(dev);

	       writel(0, &ch->dmas);
       }

       writel(0, &ch->dmadptr);
       writel(0, &ch->dmandptr);
}
Ejemplo n.º 14
0
/*
 * Transmit a packet to the base station on behalf of the network stack.
 *
 * Returns: 0 if ok, < 0 errno code on error.
 *
 * We need to pull the ethernet header and add the hardware header,
 * which is currently set to all zeroes and reserved.
 */
static
int i2400m_net_tx(struct i2400m *i2400m, struct net_device *net_dev,
		  struct sk_buff *skb)
{
	int result;
	struct device *dev = i2400m_dev(i2400m);

	d_fnstart(3, dev, "(i2400m %p net_dev %p skb %p)\n",
		  i2400m, net_dev, skb);
	/* FIXME: check eth hdr, only IPv4 is routed by the device as of now */
	netif_trans_update(net_dev);
	i2400m_tx_prep_header(skb);
	d_printf(3, dev, "NETTX: skb %p sending %d bytes to radio\n",
		 skb, skb->len);
	d_dump(4, dev, skb->data, skb->len);
	result = i2400m_tx(i2400m, skb->data, skb->len, I2400M_PT_DATA);
	d_fnend(3, dev, "(i2400m %p net_dev %p skb %p) = %d\n",
		i2400m, net_dev, skb, result);
	return result;
}
Ejemplo n.º 15
0
/* Our watchdog timed out. Called by the networking layer */
static void emac_timeout(struct net_device *dev)
{
	struct emac_board_info *db = netdev_priv(dev);
	unsigned long flags;

	if (netif_msg_timer(db))
		dev_err(db->dev, "tx time out.\n");

	/* Save previous register address */
	spin_lock_irqsave(&db->lock, flags);

	netif_stop_queue(dev);
	emac_reset(db);
	emac_init_device(dev);
	/* We can accept TX packets again */
	netif_trans_update(dev);
	netif_wake_queue(dev);

	/* Restore previous register address */
	spin_unlock_irqrestore(&db->lock, flags);
}
//------------------------------------------------------------------------------
static int vethStartXmit(struct sk_buff* pSkb_p, struct net_device* pNetDevice_p)
{
    tOplkError      ret;
    tFrameInfo      frameInfo;

    //transmit function
    struct net_device_stats* pStats = netdev_priv(pNetDevice_p);

    //save time stamp
#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 7, 0))
    pNetDevice_p->trans_start = jiffies;
#else
    netif_trans_update(pNetDevice_p);
#endif

    frameInfo.frame.pBuffer = (tPlkFrame*)pSkb_p->data;
    frameInfo.frameSize = pSkb_p->len;

    //call send fkt on DLL
    ret = dllkcal_sendAsyncFrame(&frameInfo, kDllAsyncReqPrioGeneric);
    if (ret != kErrorOk)
    {
        DEBUG_LVL_VETH_TRACE("%s(): dllkcal_sendAsyncFrame returned 0x%04X\n", __func__, ret);
        netif_stop_queue(pNetDevice_p);
        goto Exit;
    }
    else
    {
        DEBUG_LVL_VETH_TRACE("%s(): frame passed to DLL\n", __func__);
        dev_kfree_skb(pSkb_p);

        //set stats for the device
        pStats->tx_packets++;
        pStats->tx_bytes += frameInfo.frameSize;
    }

Exit:
    return 0;
}
Ejemplo n.º 17
0
//---------------------------------------------------------------------------
//
int nasmt_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
  //---------------------------------------------------------------------------
  // Start debug information
#ifdef NAS_DEBUG_DEVICE
  printk("nasmt_hard_start_xmit: begin\n");
#endif

  if ((!skb)||(!dev)) {
    printk("nasmt_hard_start_xmit - input parameter skb or dev is NULL \n");
    return -1;
  }

  // End debug information
  netif_stop_queue(dev);
#if  LINUX_VERSION_CODE >= KERNEL_VERSION(4,7,0)
  netif_trans_update(dev);
#else
  dev->trans_start = jiffies;
#endif
#ifdef NAS_DEBUG_SEND_DETAIL
  printk("nasmt_hard_start_xmit: step 1\n");
#endif
  nasmt_CLASS_send(skb);
#ifdef NAS_DEBUG_SEND_DETAIL
  printk("nasmt_hard_start_xmit: step 2\n");
#endif
  dev_kfree_skb(skb);
#ifdef NAS_DEBUG_SEND_DETAIL
  printk("nasmt_hard_start_xmit: step 3\n");
#endif
  netif_wake_queue(dev);
#ifdef NAS_DEBUG_DEVICE
  printk("nasmt_hard_start_xmit: end\n");
#endif
  return 0;
}
Ejemplo n.º 18
0
static netdev_tx_t ems_usb_start_xmit(struct sk_buff *skb, struct net_device *netdev)
{
	struct ems_usb *dev = netdev_priv(netdev);
	struct ems_tx_urb_context *context = NULL;
	struct net_device_stats *stats = &netdev->stats;
	struct can_frame *cf = (struct can_frame *)skb->data;
	struct ems_cpc_msg *msg;
	struct urb *urb;
	u8 *buf;
	int i, err;
	size_t size = CPC_HEADER_SIZE + CPC_MSG_HEADER_LEN
			+ sizeof(struct cpc_can_msg);

	if (can_dropped_invalid_skb(netdev, skb))
		return NETDEV_TX_OK;

	/* create a URB, and a buffer for it, and copy the data to the URB */
	urb = usb_alloc_urb(0, GFP_ATOMIC);
	if (!urb) {
		netdev_err(netdev, "No memory left for URBs\n");
		goto nomem;
	}

	buf = usb_alloc_coherent(dev->udev, size, GFP_ATOMIC, &urb->transfer_dma);
	if (!buf) {
		netdev_err(netdev, "No memory left for USB buffer\n");
		usb_free_urb(urb);
		goto nomem;
	}

	msg = (struct ems_cpc_msg *)&buf[CPC_HEADER_SIZE];

	msg->msg.can_msg.id = cpu_to_le32(cf->can_id & CAN_ERR_MASK);
	msg->msg.can_msg.length = cf->can_dlc;

	if (cf->can_id & CAN_RTR_FLAG) {
		msg->type = cf->can_id & CAN_EFF_FLAG ?
			CPC_CMD_TYPE_EXT_RTR_FRAME : CPC_CMD_TYPE_RTR_FRAME;

		msg->length = CPC_CAN_MSG_MIN_SIZE;
	} else {
		msg->type = cf->can_id & CAN_EFF_FLAG ?
			CPC_CMD_TYPE_EXT_CAN_FRAME : CPC_CMD_TYPE_CAN_FRAME;

		for (i = 0; i < cf->can_dlc; i++)
			msg->msg.can_msg.msg[i] = cf->data[i];

		msg->length = CPC_CAN_MSG_MIN_SIZE + cf->can_dlc;
	}

	for (i = 0; i < MAX_TX_URBS; i++) {
		if (dev->tx_contexts[i].echo_index == MAX_TX_URBS) {
			context = &dev->tx_contexts[i];
			break;
		}
	}

	/*
	 * May never happen! When this happens we'd more URBs in flight as
	 * allowed (MAX_TX_URBS).
	 */
	if (!context) {
		usb_free_coherent(dev->udev, size, buf, urb->transfer_dma);
		usb_free_urb(urb);

		netdev_warn(netdev, "couldn't find free context\n");

		return NETDEV_TX_BUSY;
	}

	context->dev = dev;
	context->echo_index = i;
	context->dlc = cf->can_dlc;

	usb_fill_bulk_urb(urb, dev->udev, usb_sndbulkpipe(dev->udev, 2), buf,
			  size, ems_usb_write_bulk_callback, context);
	urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
	usb_anchor_urb(urb, &dev->tx_submitted);

	can_put_echo_skb(skb, netdev, context->echo_index);

	atomic_inc(&dev->active_tx_urbs);

	err = usb_submit_urb(urb, GFP_ATOMIC);
	if (unlikely(err)) {
		can_free_echo_skb(netdev, context->echo_index);

		usb_unanchor_urb(urb);
		usb_free_coherent(dev->udev, size, buf, urb->transfer_dma);
		dev_kfree_skb(skb);

		atomic_dec(&dev->active_tx_urbs);

		if (err == -ENODEV) {
			netif_device_detach(netdev);
		} else {
			netdev_warn(netdev, "failed tx_urb %d\n", err);

			stats->tx_dropped++;
		}
	} else {
		netif_trans_update(netdev);

		/* Slow down tx path */
		if (atomic_read(&dev->active_tx_urbs) >= MAX_TX_URBS ||
		    dev->free_slots < CPC_TX_QUEUE_TRIGGER_LOW) {
			netif_stop_queue(netdev);
		}
	}

	/*
	 * Release our reference to this URB, the USB core will eventually free
	 * it entirely.
	 */
	usb_free_urb(urb);

	return NETDEV_TX_OK;

nomem:
	dev_kfree_skb(skb);
	stats->tx_dropped++;

	return NETDEV_TX_OK;
}
Ejemplo n.º 19
0
static int vector_send(struct vector_queue *qi)
{
	struct vector_private *vp = netdev_priv(qi->dev);
	struct mmsghdr *send_from;
	int result = 0, send_len, queue_depth = qi->max_depth;

	if (spin_trylock(&qi->head_lock)) {
		if (spin_trylock(&qi->tail_lock)) {
			/* update queue_depth to current value */
			queue_depth = qi->queue_depth;
			spin_unlock(&qi->tail_lock);
			while (queue_depth > 0) {
				/* Calculate the start of the vector */
				send_len = queue_depth;
				send_from = qi->mmsg_vector;
				send_from += qi->head;
				/* Adjust vector size if wraparound */
				if (send_len + qi->head > qi->max_depth)
					send_len = qi->max_depth - qi->head;
				/* Try to TX as many packets as possible */
				if (send_len > 0) {
					result = uml_vector_sendmmsg(
						 vp->fds->tx_fd,
						 send_from,
						 send_len,
						 0
					);
					vp->in_write_poll =
						(result != send_len);
				}
				/* For some of the sendmmsg error scenarios
				 * we may end being unsure in the TX success
				 * for all packets. It is safer to declare
				 * them all TX-ed and blame the network.
				 */
				if (result < 0) {
					if (net_ratelimit())
						netdev_err(vp->dev, "sendmmsg err=%i\n",
							result);
					result = send_len;
				}
				if (result > 0) {
					queue_depth =
						consume_vector_skbs(qi, result);
					/* This is equivalent to an TX IRQ.
					 * Restart the upper layers to feed us
					 * more packets.
					 */
					if (result > vp->estats.tx_queue_max)
						vp->estats.tx_queue_max = result;
					vp->estats.tx_queue_running_average =
						(vp->estats.tx_queue_running_average + result) >> 1;
				}
				netif_trans_update(qi->dev);
				netif_wake_queue(qi->dev);
				/* if TX is busy, break out of the send loop,
				 *  poll write IRQ will reschedule xmit for us
				 */
				if (result != send_len) {
					vp->estats.tx_restart_queue++;
					break;
				}
			}
		}
Ejemplo n.º 20
0
/*----------------------------------------------------------------
 * p80211knetdev_hard_start_xmit
 *
 * Linux netdevice method for transmitting a frame.
 *
 * Arguments:
 *	skb	Linux sk_buff containing the frame.
 *	netdev	Linux netdevice.
 *
 * Side effects:
 *	If the lower layers report that buffers are full. netdev->tbusy
 *	will be set to prevent higher layers from sending more traffic.
 *
 *	Note: If this function returns non-zero, higher layers retain
 *	      ownership of the skb.
 *
 * Returns:
 *	zero on success, non-zero on failure.
 *----------------------------------------------------------------
 */
static int p80211knetdev_hard_start_xmit(struct sk_buff *skb,
					 struct net_device *netdev)
{
	int result = 0;
	int txresult = -1;
	struct wlandevice *wlandev = netdev->ml_priv;
	union p80211_hdr p80211_hdr;
	struct p80211_metawep p80211_wep;

	p80211_wep.data = NULL;

	if (!skb)
		return NETDEV_TX_OK;

	if (wlandev->state != WLAN_DEVICE_OPEN) {
		result = 1;
		goto failed;
	}

	memset(&p80211_hdr, 0, sizeof(p80211_hdr));
	memset(&p80211_wep, 0, sizeof(p80211_wep));

	if (netif_queue_stopped(netdev)) {
		netdev_dbg(netdev, "called when queue stopped.\n");
		result = 1;
		goto failed;
	}

	netif_stop_queue(netdev);

	/* Check to see that a valid mode is set */
	switch (wlandev->macmode) {
	case WLAN_MACMODE_IBSS_STA:
	case WLAN_MACMODE_ESS_STA:
	case WLAN_MACMODE_ESS_AP:
		break;
	default:
		/* Mode isn't set yet, just drop the frame
		 * and return success .
		 * TODO: we need a saner way to handle this
		 */
		if (be16_to_cpu(skb->protocol) != ETH_P_80211_RAW) {
			netif_start_queue(wlandev->netdev);
			netdev_notice(netdev, "Tx attempt prior to association, frame dropped.\n");
			netdev->stats.tx_dropped++;
			result = 0;
			goto failed;
		}
		break;
	}

	/* Check for raw transmits */
	if (be16_to_cpu(skb->protocol) == ETH_P_80211_RAW) {
		if (!capable(CAP_NET_ADMIN)) {
			result = 1;
			goto failed;
		}
		/* move the header over */
		memcpy(&p80211_hdr, skb->data, sizeof(p80211_hdr));
		skb_pull(skb, sizeof(p80211_hdr));
	} else {
		if (skb_ether_to_p80211
		    (wlandev, wlandev->ethconv, skb, &p80211_hdr,
		     &p80211_wep) != 0) {
			/* convert failed */
			netdev_dbg(netdev, "ether_to_80211(%d) failed.\n",
				   wlandev->ethconv);
			result = 1;
			goto failed;
		}
	}
	if (!wlandev->txframe) {
		result = 1;
		goto failed;
	}

	netif_trans_update(netdev);

	netdev->stats.tx_packets++;
	/* count only the packet payload */
	netdev->stats.tx_bytes += skb->len;

	txresult = wlandev->txframe(wlandev, skb, &p80211_hdr, &p80211_wep);

	if (txresult == 0) {
		/* success and more buf */
		/* avail, re: hw_txdata */
		netif_wake_queue(wlandev->netdev);
		result = NETDEV_TX_OK;
	} else if (txresult == 1) {
		/* success, no more avail */
		netdev_dbg(netdev, "txframe success, no more bufs\n");
		/* netdev->tbusy = 1;  don't set here, irqhdlr */
		/*   may have already cleared it */
		result = NETDEV_TX_OK;
	} else if (txresult == 2) {
		/* alloc failure, drop frame */
		netdev_dbg(netdev, "txframe returned alloc_fail\n");
		result = NETDEV_TX_BUSY;
	} else {
		/* buffer full or queue busy, drop frame. */
		netdev_dbg(netdev, "txframe returned full or busy\n");
		result = NETDEV_TX_BUSY;
	}

failed:
	/* Free up the WEP buffer if it's not the same as the skb */
	if ((p80211_wep.data) && (p80211_wep.data != skb->data))
		kzfree(p80211_wep.data);

	/* we always free the skb here, never in a lower level. */
	if (!result)
		dev_kfree_skb(skb);

	return result;
}
Ejemplo n.º 21
0
/* transmit packet */
static int korina_send_packet(struct sk_buff *skb, struct net_device *dev)
{
	struct korina_private *lp = netdev_priv(dev);
	unsigned long flags;
	u32 length;
	u32 chain_prev, chain_next;
	struct dma_desc *td;

	spin_lock_irqsave(&lp->lock, flags);

	td = &lp->td_ring[lp->tx_chain_tail];

	/* stop queue when full, drop pkts if queue already full */
	if (lp->tx_count >= (KORINA_NUM_TDS - 2)) {
		lp->tx_full = 1;

		if (lp->tx_count == (KORINA_NUM_TDS - 2))
			netif_stop_queue(dev);
		else {
			dev->stats.tx_dropped++;
			dev_kfree_skb_any(skb);
			spin_unlock_irqrestore(&lp->lock, flags);

			return NETDEV_TX_BUSY;
		}
	}

	lp->tx_count++;

	lp->tx_skb[lp->tx_chain_tail] = skb;

	length = skb->len;
	dma_cache_wback((u32)skb->data, skb->len);

	/* Setup the transmit descriptor. */
	dma_cache_inv((u32) td, sizeof(*td));
	td->ca = CPHYSADDR(skb->data);
	chain_prev = (lp->tx_chain_tail - 1) & KORINA_TDS_MASK;
	chain_next = (lp->tx_chain_tail + 1) & KORINA_TDS_MASK;

	if (readl(&(lp->tx_dma_regs->dmandptr)) == 0) {
		if (lp->tx_chain_status == desc_empty) {
			/* Update tail */
			td->control = DMA_COUNT(length) |
					DMA_DESC_COF | DMA_DESC_IOF;
			/* Move tail */
			lp->tx_chain_tail = chain_next;
			/* Write to NDPTR */
			writel(CPHYSADDR(&lp->td_ring[lp->tx_chain_head]),
					&lp->tx_dma_regs->dmandptr);
			/* Move head to tail */
			lp->tx_chain_head = lp->tx_chain_tail;
		} else {
			/* Update tail */
			td->control = DMA_COUNT(length) |
					DMA_DESC_COF | DMA_DESC_IOF;
			/* Link to prev */
			lp->td_ring[chain_prev].control &=
					~DMA_DESC_COF;
			/* Link to prev */
			lp->td_ring[chain_prev].link =  CPHYSADDR(td);
			/* Move tail */
			lp->tx_chain_tail = chain_next;
			/* Write to NDPTR */
			writel(CPHYSADDR(&lp->td_ring[lp->tx_chain_head]),
					&(lp->tx_dma_regs->dmandptr));
			/* Move head to tail */
			lp->tx_chain_head = lp->tx_chain_tail;
			lp->tx_chain_status = desc_empty;
		}
	} else {
		if (lp->tx_chain_status == desc_empty) {
			/* Update tail */
			td->control = DMA_COUNT(length) |
					DMA_DESC_COF | DMA_DESC_IOF;
			/* Move tail */
			lp->tx_chain_tail = chain_next;
			lp->tx_chain_status = desc_filled;
		} else {
			/* Update tail */
			td->control = DMA_COUNT(length) |
					DMA_DESC_COF | DMA_DESC_IOF;
			lp->td_ring[chain_prev].control &=
					~DMA_DESC_COF;
			lp->td_ring[chain_prev].link =  CPHYSADDR(td);
			lp->tx_chain_tail = chain_next;
		}
	}
	dma_cache_wback((u32) td, sizeof(*td));

	netif_trans_update(dev);
	spin_unlock_irqrestore(&lp->lock, flags);

	return NETDEV_TX_OK;
}
Ejemplo n.º 22
0
static netdev_tx_t
qcaspi_netdev_xmit(struct sk_buff *skb, struct net_device *dev)
{
	u32 frame_len;
	u8 *ptmp;
	struct qcaspi *qca = netdev_priv(dev);
	u16 new_tail;
	struct sk_buff *tskb;
	u8 pad_len = 0;

	if (skb->len < QCAFRM_MIN_LEN)
		pad_len = QCAFRM_MIN_LEN - skb->len;

	if (qca->txr.skb[qca->txr.tail]) {
		netdev_warn(qca->net_dev, "queue was unexpectedly full!\n");
		netif_stop_queue(qca->net_dev);
		qca->stats.ring_full++;
		return NETDEV_TX_BUSY;
	}

	if ((skb_headroom(skb) < QCAFRM_HEADER_LEN) ||
	    (skb_tailroom(skb) < QCAFRM_FOOTER_LEN + pad_len)) {
		tskb = skb_copy_expand(skb, QCAFRM_HEADER_LEN,
				       QCAFRM_FOOTER_LEN + pad_len, GFP_ATOMIC);
		if (!tskb) {
			qca->stats.out_of_mem++;
			return NETDEV_TX_BUSY;
		}
		dev_kfree_skb(skb);
		skb = tskb;
	}

	frame_len = skb->len + pad_len;

	ptmp = skb_push(skb, QCAFRM_HEADER_LEN);
	qcafrm_create_header(ptmp, frame_len);

	if (pad_len) {
		ptmp = skb_put_zero(skb, pad_len);
	}

	ptmp = skb_put(skb, QCAFRM_FOOTER_LEN);
	qcafrm_create_footer(ptmp);

	netdev_dbg(qca->net_dev, "Tx-ing packet: Size: 0x%08x\n",
		   skb->len);

	qca->txr.size += skb->len + QCASPI_HW_PKT_LEN;

	new_tail = qca->txr.tail + 1;
	if (new_tail >= qca->txr.count)
		new_tail = 0;

	qca->txr.skb[qca->txr.tail] = skb;
	qca->txr.tail = new_tail;

	if (!qcaspi_tx_ring_has_space(&qca->txr)) {
		netif_stop_queue(qca->net_dev);
		qca->stats.ring_full++;
	}

	netif_trans_update(dev);

	if (qca->spi_thread &&
	    qca->spi_thread->state != TASK_RUNNING)
		wake_up_process(qca->spi_thread);

	return NETDEV_TX_OK;
}
Ejemplo n.º 23
0
static int
kni_net_tx(struct sk_buff *skb, struct net_device *dev)
{
	int len = 0;
	unsigned ret;
	struct kni_dev *kni = netdev_priv(dev);
	struct rte_kni_mbuf *pkt_kva = NULL;
	struct rte_kni_mbuf *pkt_va = NULL;

	/* save the timestamp */
#ifdef HAVE_TRANS_START_HELPER
	netif_trans_update(dev);
#else
	dev->trans_start = jiffies;
#endif

	/* Check if the length of skb is less than mbuf size */
	if (skb->len > kni->mbuf_size)
		goto drop;

	/**
	 * Check if it has at least one free entry in tx_q and
	 * one entry in alloc_q.
	 */
	if (kni_fifo_free_count(kni->tx_q) == 0 ||
			kni_fifo_count(kni->alloc_q) == 0) {
		/**
		 * If no free entry in tx_q or no entry in alloc_q,
		 * drops skb and goes out.
		 */
		goto drop;
	}

	/* dequeue a mbuf from alloc_q */
	ret = kni_fifo_get(kni->alloc_q, (void **)&pkt_va, 1);
	if (likely(ret == 1)) {
		void *data_kva;

		pkt_kva = (void *)pkt_va - kni->mbuf_va + kni->mbuf_kva;
		data_kva = pkt_kva->buf_addr + pkt_kva->data_off - kni->mbuf_va
				+ kni->mbuf_kva;

		len = skb->len;
		memcpy(data_kva, skb->data, len);
		if (unlikely(len < ETH_ZLEN)) {
			memset(data_kva + len, 0, ETH_ZLEN - len);
			len = ETH_ZLEN;
		}
		pkt_kva->pkt_len = len;
		pkt_kva->data_len = len;

		/* enqueue mbuf into tx_q */
		ret = kni_fifo_put(kni->tx_q, (void **)&pkt_va, 1);
		if (unlikely(ret != 1)) {
			/* Failing should not happen */
			KNI_ERR("Fail to enqueue mbuf into tx_q\n");
			goto drop;
		}
	} else {
		/* Failing should not happen */
		KNI_ERR("Fail to dequeue mbuf from alloc_q\n");
		goto drop;
	}

	/* Free skb and update statistics */
	dev_kfree_skb(skb);
	kni->stats.tx_bytes += len;
	kni->stats.tx_packets++;

	return NETDEV_TX_OK;

drop:
	/* Free skb and update statistics */
	dev_kfree_skb(skb);
	kni->stats.tx_dropped++;

	return NETDEV_TX_OK;
}
Ejemplo n.º 24
0
static netdev_tx_t eth_start_xmit(struct sk_buff *skb,
					struct net_device *net)
{
	struct eth_dev		*dev = netdev_priv(net);
	int			length = 0;
	int			retval;
	struct usb_request	*req = NULL;
	unsigned long		flags;
	struct usb_ep		*in;
	u16			cdc_filter;

	spin_lock_irqsave(&dev->lock, flags);
	if (dev->port_usb) {
		in = dev->port_usb->in_ep;
		cdc_filter = dev->port_usb->cdc_filter;
	} else {
		in = NULL;
		cdc_filter = 0;
	}
	spin_unlock_irqrestore(&dev->lock, flags);

	if (skb && !in) {
		dev_kfree_skb_any(skb);
		return NETDEV_TX_OK;
	}

	/* apply outgoing CDC or RNDIS filters */
	if (skb && !is_promisc(cdc_filter)) {
		u8		*dest = skb->data;

		if (is_multicast_ether_addr(dest)) {
			u16	type;

			/* ignores USB_CDC_PACKET_TYPE_MULTICAST and host
			 * SET_ETHERNET_MULTICAST_FILTERS requests
			 */
			if (is_broadcast_ether_addr(dest))
				type = USB_CDC_PACKET_TYPE_BROADCAST;
			else
				type = USB_CDC_PACKET_TYPE_ALL_MULTICAST;
			if (!(cdc_filter & type)) {
				dev_kfree_skb_any(skb);
				return NETDEV_TX_OK;
			}
		}
		/* ignores USB_CDC_PACKET_TYPE_DIRECTED */
	}

	spin_lock_irqsave(&dev->req_lock, flags);
	/*
	 * this freelist can be empty if an interrupt triggered disconnect()
	 * and reconfigured the gadget (shutting down this queue) after the
	 * network stack decided to xmit but before we got the spinlock.
	 */
	if (list_empty(&dev->tx_reqs)) {
		spin_unlock_irqrestore(&dev->req_lock, flags);
		return NETDEV_TX_BUSY;
	}

	req = list_first_entry(&dev->tx_reqs, struct usb_request, list);
	list_del(&req->list);

	/* temporarily stop TX queue when the freelist empties */
	if (list_empty(&dev->tx_reqs))
		netif_stop_queue(net);
	spin_unlock_irqrestore(&dev->req_lock, flags);

	/* no buffer copies needed, unless the network stack did it
	 * or the hardware can't use skb buffers.
	 * or there's not enough space for extra headers we need
	 */
	if (dev->wrap) {
		unsigned long	flags;

		spin_lock_irqsave(&dev->lock, flags);
		if (dev->port_usb)
			skb = dev->wrap(dev->port_usb, skb);
		spin_unlock_irqrestore(&dev->lock, flags);
		if (!skb) {
			/* Multi frame CDC protocols may store the frame for
			 * later which is not a dropped frame.
			 */
			if (dev->port_usb &&
					dev->port_usb->supports_multi_frame)
				goto multiframe;
			goto drop;
		}
	}

	length = skb->len;
	req->buf = skb->data;
	req->context = skb;
	req->complete = tx_complete;

	/* NCM requires no zlp if transfer is dwNtbInMaxSize */
	if (dev->port_usb &&
	    dev->port_usb->is_fixed &&
	    length == dev->port_usb->fixed_in_len &&
	    (length % in->maxpacket) == 0)
		req->zero = 0;
	else
		req->zero = 1;

	/* use zlp framing on tx for strict CDC-Ether conformance,
	 * though any robust network rx path ignores extra padding.
	 * and some hardware doesn't like to write zlps.
	 */
	if (req->zero && !dev->zlp && (length % in->maxpacket) == 0)
		length++;

	req->length = length;

	retval = usb_ep_queue(in, req, GFP_ATOMIC);
	switch (retval) {
	default:
		DBG(dev, "tx queue err %d\n", retval);
		break;
	case 0:
		netif_trans_update(net);
		atomic_inc(&dev->tx_qlen);
	}

	if (retval) {
		dev_kfree_skb_any(skb);
drop:
		dev->net->stats.tx_dropped++;
multiframe:
		spin_lock_irqsave(&dev->req_lock, flags);
		if (list_empty(&dev->tx_reqs))
			netif_start_queue(net);
		list_add(&req->list, &dev->tx_reqs);
		spin_unlock_irqrestore(&dev->req_lock, flags);
	}
	return NETDEV_TX_OK;
}
Ejemplo n.º 25
0
static netdev_tx_t
fjes_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
{
	struct fjes_adapter *adapter = netdev_priv(netdev);
	struct fjes_hw *hw = &adapter->hw;

	int max_epid, my_epid, dest_epid;
	enum ep_partner_status pstatus;
	struct netdev_queue *cur_queue;
	char shortpkt[VLAN_ETH_HLEN];
	bool is_multi, vlan;
	struct ethhdr *eth;
	u16 queue_no = 0;
	u16 vlan_id = 0;
	netdev_tx_t ret;
	char *data;
	int len;

	ret = NETDEV_TX_OK;
	is_multi = false;
	cur_queue = netdev_get_tx_queue(netdev, queue_no);

	eth = (struct ethhdr *)skb->data;
	my_epid = hw->my_epid;

	vlan = (vlan_get_tag(skb, &vlan_id) == 0) ? true : false;

	data = skb->data;
	len = skb->len;

	if (is_multicast_ether_addr(eth->h_dest)) {
		dest_epid = 0;
		max_epid = hw->max_epid;
		is_multi = true;
	} else if (is_local_ether_addr(eth->h_dest)) {
		dest_epid = eth->h_dest[ETH_ALEN - 1];
		max_epid = dest_epid + 1;

		if ((eth->h_dest[0] == 0x02) &&
		    (0x00 == (eth->h_dest[1] | eth->h_dest[2] |
			      eth->h_dest[3] | eth->h_dest[4])) &&
		    (dest_epid < hw->max_epid)) {
			;
		} else {
			dest_epid = 0;
			max_epid = 0;
			ret = NETDEV_TX_OK;

			adapter->stats64.tx_packets += 1;
			hw->ep_shm_info[my_epid].net_stats.tx_packets += 1;
			adapter->stats64.tx_bytes += len;
			hw->ep_shm_info[my_epid].net_stats.tx_bytes += len;
		}
	} else {
		dest_epid = 0;
		max_epid = 0;
		ret = NETDEV_TX_OK;

		adapter->stats64.tx_packets += 1;
		hw->ep_shm_info[my_epid].net_stats.tx_packets += 1;
		adapter->stats64.tx_bytes += len;
		hw->ep_shm_info[my_epid].net_stats.tx_bytes += len;
	}

	for (; dest_epid < max_epid; dest_epid++) {
		if (my_epid == dest_epid)
			continue;

		pstatus = fjes_hw_get_partner_ep_status(hw, dest_epid);
		if (pstatus != EP_PARTNER_SHARED) {
			if (!is_multi)
				hw->ep_shm_info[dest_epid].ep_stats
					.tx_dropped_not_shared += 1;
			ret = NETDEV_TX_OK;
		} else if (!fjes_hw_check_epbuf_version(
				&adapter->hw.ep_shm_info[dest_epid].rx, 0)) {
			/* version is NOT 0 */
			adapter->stats64.tx_carrier_errors += 1;
			hw->ep_shm_info[dest_epid].net_stats
						.tx_carrier_errors += 1;
			hw->ep_shm_info[dest_epid].ep_stats
					.tx_dropped_ver_mismatch += 1;

			ret = NETDEV_TX_OK;
		} else if (!fjes_hw_check_mtu(
				&adapter->hw.ep_shm_info[dest_epid].rx,
				netdev->mtu)) {
			adapter->stats64.tx_dropped += 1;
			hw->ep_shm_info[dest_epid].net_stats.tx_dropped += 1;
			adapter->stats64.tx_errors += 1;
			hw->ep_shm_info[dest_epid].net_stats.tx_errors += 1;
			hw->ep_shm_info[dest_epid].ep_stats
					.tx_dropped_buf_size_mismatch += 1;

			ret = NETDEV_TX_OK;
		} else if (vlan &&
			   !fjes_hw_check_vlan_id(
				&adapter->hw.ep_shm_info[dest_epid].rx,
				vlan_id)) {
			hw->ep_shm_info[dest_epid].ep_stats
				.tx_dropped_vlanid_mismatch += 1;
			ret = NETDEV_TX_OK;
		} else {
			if (len < VLAN_ETH_HLEN) {
				memset(shortpkt, 0, VLAN_ETH_HLEN);
				memcpy(shortpkt, skb->data, skb->len);
				len = VLAN_ETH_HLEN;
				data = shortpkt;
			}

			if (adapter->tx_retry_count == 0) {
				adapter->tx_start_jiffies = jiffies;
				adapter->tx_retry_count = 1;
			} else {
				adapter->tx_retry_count++;
			}

			if (fjes_tx_send(adapter, dest_epid, data, len)) {
				if (is_multi) {
					ret = NETDEV_TX_OK;
				} else if (
					   ((long)jiffies -
					    (long)adapter->tx_start_jiffies) >=
					    FJES_TX_RETRY_TIMEOUT) {
					adapter->stats64.tx_fifo_errors += 1;
					hw->ep_shm_info[dest_epid].net_stats
								.tx_fifo_errors += 1;
					adapter->stats64.tx_errors += 1;
					hw->ep_shm_info[dest_epid].net_stats
								.tx_errors += 1;

					ret = NETDEV_TX_OK;
				} else {
					netif_trans_update(netdev);
					hw->ep_shm_info[dest_epid].ep_stats
						.tx_buffer_full += 1;
					netif_tx_stop_queue(cur_queue);

					if (!work_pending(&adapter->tx_stall_task))
						queue_work(adapter->txrx_wq,
							   &adapter->tx_stall_task);

					ret = NETDEV_TX_BUSY;
				}
			} else {
				if (!is_multi) {
					adapter->stats64.tx_packets += 1;
					hw->ep_shm_info[dest_epid].net_stats
								.tx_packets += 1;
					adapter->stats64.tx_bytes += len;
					hw->ep_shm_info[dest_epid].net_stats
								.tx_bytes += len;
				}

				adapter->tx_retry_count = 0;
				ret = NETDEV_TX_OK;
			}
		}
	}

	if (ret == NETDEV_TX_OK) {
		dev_kfree_skb(skb);
		if (is_multi) {
			adapter->stats64.tx_packets += 1;
			hw->ep_shm_info[my_epid].net_stats.tx_packets += 1;
			adapter->stats64.tx_bytes += 1;
			hw->ep_shm_info[my_epid].net_stats.tx_bytes += len;
		}
	}

	return ret;
}