Esempio n. 1
0
static int net_send_packet(struct sk_buff *skb, struct net_device *dev)
{
	struct net_local *lp = dev->priv;
	int ioaddr = dev->base_addr;
	short length = skb->len;
	unsigned char *buf;
	unsigned long flags;

	/* Block a transmit from overlapping.  */
	
	if (length > ETH_FRAME_LEN) {
		if (net_debug)
			printk("%s: Attempting to send a large packet (%d bytes).\n",
				dev->name, length);
		return 1;
	}
	
	if (length < ETH_ZLEN)
	{
		skb = skb_padto(skb, ETH_ZLEN);
		if(skb == NULL)
			return 0;
		length = ETH_ZLEN;
	}
	buf = skb->data;
	
	if (net_debug > 4)
		printk("%s: Transmitting a packet of length %lu.\n", dev->name,
			   (unsigned long)skb->len);
	/* We may not start transmitting unless we finish transferring
	   a packet into the Tx queue. During executing the following
	   codes we possibly catch a Tx interrupt. Thus we flag off
	   tx_queue_ready, so that we prevent the interrupt routine
	   (net_interrupt) to start transmitting. */
	spin_lock_irqsave(&lp->lock, flags);
	lp->tx_queue_ready = 0;
	{
		outw(length, ioaddr + DATAPORT);
		outsw(ioaddr + DATAPORT, buf, (length + 1) >> 1);
		lp->tx_queue++;
		lp->tx_queue_len += length + 2;
	}
	lp->tx_queue_ready = 1;
	spin_unlock_irqrestore(&lp->lock, flags);

	if (lp->tx_started == 0) {
		/* If the Tx is idle, always trigger a transmit. */
		outb(0x80 | lp->tx_queue, ioaddr + TX_START);
		lp->tx_queue = 0;
		lp->tx_queue_len = 0;
		dev->trans_start = jiffies;
		lp->tx_started = 1;
	} else if (lp->tx_queue_len < 4096 - 1502)
		/* Yes, there is room for one more packet. */
	else
		netif_stop_queue(dev);

	dev_kfree_skb(skb);
	return 0;
}
Esempio n. 2
0
static int seeq8005_send_packet(struct sk_buff *skb, struct net_device *dev)
{
	struct net_local *lp = netdev_priv(dev);
	short length = skb->len;
	unsigned char *buf;

	if (length < ETH_ZLEN) {
		skb = skb_padto(skb, ETH_ZLEN);
		if (skb == NULL)
			return 0;
		length = ETH_ZLEN;
	}
	buf = skb->data;

	/* Block a timer-based transmit from overlapping */
	netif_stop_queue(dev);
	
	hardware_send_packet(dev, buf, length); 
	dev->trans_start = jiffies;
	lp->stats.tx_bytes += length;
	dev_kfree_skb (skb);
	/* You might need to clean up and record Tx statistics here. */

	return 0;
}
Esempio n. 3
0
static netdev_tx_t cpsw_ndo_start_xmit(struct sk_buff *skb,
				       struct net_device *ndev)
{
	struct cpsw_priv *priv = netdev_priv(ndev);
	int ret;

	ndev->trans_start = jiffies;

	if (skb_padto(skb, CPSW_MIN_PACKET_SIZE)) {
		cpsw_err(priv, tx_err, "packet pad failed\n");
		priv->stats.tx_dropped++;
		return NETDEV_TX_OK;
	}

	ret = cpdma_chan_submit(priv->txch, skb, skb->data,
				skb->len, GFP_KERNEL);
	if (unlikely(ret != 0)) {
		cpsw_err(priv, tx_err, "desc submit failed\n");
		goto fail;
	}

	return NETDEV_TX_OK;
fail:
	priv->stats.tx_dropped++;
	netif_stop_queue(ndev);
	return NETDEV_TX_BUSY;
}
/**********************************************************************
*%FUNCTION: addTag
*%ARGUMENTS:
* m -- a PPPoE packet
* tag -- tag to add
*%RETURNS:
* -1 if no room in packet; number of bytes added otherwise.
*%DESCRIPTION:
* Inserts a tag as the first tag in a PPPoE packet.
***********************************************************************/
int
addTag(struct sk_buff **m, PPPoETag const *tag)
{
    int len = ntohs(tag->length) + TAG_HDR_SIZE;
    PPPoEPacket *packet = MTOD(*m, PPPoEPacket *);
    int new_size, offset;
    struct sk_buff *skb;
    
    if (len + ntohs(packet->length) > MAX_PPPOE_PAYLOAD)
	return -1;
    offset = HDR_SIZE + ntohs(packet->length);
    new_size = len + offset;
    skb = skb_padto(*m, new_size);
    if (!skb)
	return -1;
    if (new_size > skb->len)
	skb_put(skb, new_size - skb->len);
    memcpy(skb->data + offset, (u8 *)tag, len);
    *m = skb;
    
    /* In case buf was realloced... */
    packet = MTOD(*m, PPPoEPacket *);
    packet->length = htons(ntohs(packet->length) + len);
    return 0;
}
Esempio n. 5
0
static int
ramips_eth_hard_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
	struct raeth_priv *re = netdev_priv(dev);
	struct raeth_tx_info *txi, *txi_next;
	struct ramips_tx_dma *txd, *txd_next;
	unsigned long tx;
	unsigned int tx_next;
	dma_addr_t mapped_addr;

	if (re->plat->min_pkt_len) {
		if (skb->len < re->plat->min_pkt_len) {
			if (skb_padto(skb, re->plat->min_pkt_len)) {
				printk(KERN_ERR
				       "ramips_eth: skb_padto failed\n");
				kfree_skb(skb);
				return 0;
			}
			skb_put(skb, re->plat->min_pkt_len - skb->len);
		}
	}

	dev->trans_start = jiffies;
	mapped_addr = dma_map_single(&re->netdev->dev, skb->data, skb->len,
				     DMA_TO_DEVICE);

	spin_lock(&re->page_lock);
	tx = ramips_fe_trr(RAETH_REG_TX_CTX_IDX0);
	tx_next = (tx + 1) % NUM_TX_DESC;

	txi = &re->tx_info[tx];
	txd = txi->tx_desc;
	txi_next = &re->tx_info[tx_next];
	txd_next = txi_next->tx_desc;

	if ((txi->tx_skb) || (txi_next->tx_skb) ||
	    !(txd->txd2 & TX_DMA_DONE) ||
	    !(txd_next->txd2 & TX_DMA_DONE))
		goto out;

	txi->tx_skb = skb;

	txd->txd1 = (unsigned int) mapped_addr;
	wmb();
	txd->txd2 = TX_DMA_LSO | TX_DMA_PLEN0(skb->len);
	dev->stats.tx_packets++;
	dev->stats.tx_bytes += skb->len;
	ramips_fe_twr(tx_next, RAETH_REG_TX_CTX_IDX0);
	netdev_sent_queue(dev, skb->len);
	spin_unlock(&re->page_lock);
	return NETDEV_TX_OK;

 out:
	spin_unlock(&re->page_lock);
	dev->stats.tx_dropped++;
	kfree_skb(skb);
	return NETDEV_TX_OK;
}
Esempio n. 6
0
/**
 * arc_emac_tx - Starts the data transmission.
 * @skb:	sk_buff pointer that contains data to be Transmitted.
 * @ndev:	Pointer to net_device structure.
 *
 * returns: NETDEV_TX_OK, on success
 *		NETDEV_TX_BUSY, if any of the descriptors are not free.
 *
 * This function is invoked from upper layers to initiate transmission.
 */
static int arc_emac_tx(struct sk_buff *skb, struct net_device *ndev)
{
	struct arc_emac_priv *priv = netdev_priv(ndev);
	unsigned int len, *txbd_curr = &priv->txbd_curr;
	struct net_device_stats *stats = &priv->stats;
	__le32 *info = &priv->txbd[*txbd_curr].info;
	dma_addr_t addr;

	if (skb_padto(skb, ETH_ZLEN))
		return NETDEV_TX_OK;

	len = max_t(unsigned int, ETH_ZLEN, skb->len);

	/* EMAC still holds this buffer in its possession.
	 * CPU must not modify this buffer descriptor
	 */
	if (unlikely((le32_to_cpu(*info) & OWN_MASK) == FOR_EMAC)) {
		netif_stop_queue(ndev);
		return NETDEV_TX_BUSY;
	}

	addr = dma_map_single(&ndev->dev, (void *)skb->data, len,
			      DMA_TO_DEVICE);

	if (unlikely(dma_mapping_error(&ndev->dev, addr))) {
		stats->tx_dropped++;
		stats->tx_errors++;
		dev_kfree_skb(skb);
		return NETDEV_TX_OK;
	}
	dma_unmap_addr_set(&priv->tx_buff[*txbd_curr], addr, addr);
	dma_unmap_len_set(&priv->tx_buff[*txbd_curr], len, len);

	priv->tx_buff[*txbd_curr].skb = skb;
	priv->txbd[*txbd_curr].data = cpu_to_le32(addr);

	/* Make sure pointer to data buffer is set */
	wmb();

	skb_tx_timestamp(skb);

	*info = cpu_to_le32(FOR_EMAC | FIRST_OR_LAST_MASK | len);

	/* Increment index to point to the next BD */
	*txbd_curr = (*txbd_curr + 1) % TX_BD_NUM;

	/* Get "info" of the next BD */
	info = &priv->txbd[*txbd_curr].info;

	/* Check if if Tx BD ring is full - next BD is still owned by EMAC */
	if (unlikely((le32_to_cpu(*info) & OWN_MASK) == FOR_EMAC))
		netif_stop_queue(ndev);

	arc_reg_set(priv, R_STATUS, TXPL_MASK);

	return NETDEV_TX_OK;
}
Esempio n. 7
0
static int cpmac_start_xmit(struct sk_buff *skb)
{
	int queue, len, ret;
	lock_s(l1);
	//struct cpmac_desc *desc;
	//struct cpmac_priv *priv = netdev_priv(dev);

	//if (unlikely(atomic_read(reset_pending)))
	//	return NETDEV_TX_BUSY;


        //cpmac_write(CPMAC_TX_PTR(queue), (u32)desc_ring[queue].mapping);

        // BUG: move this line to the  *** location below
        notify(cond_irq_can_happen);

	if (unlikely(skb_padto(skb, ETH_ZLEN))) {
            ret = NETDEV_TX_OK;
        } else {
            len = max(skb->len, ETH_ZLEN);
            //queue = skb_get_queue_mapping(skb);
            netif_stop_subqueue(/*queue*/);

            //desc = &desc_ring[queue];
            if (unlikely(desc_ring[queue].dataflags & CPMAC_OWN)) {
    //		if (netif_msg_tx_err(priv) && net_ratelimit())
    //			netdev_warn(dev, "tx dma ring full\n");

                    ret = NETDEV_TX_BUSY;
            } else {

                spin_lock(cplock);
                spin_unlock(cplock);
                desc_ring[queue].dataflags = CPMAC_SOP | CPMAC_EOP | CPMAC_OWN;
                desc_ring[queue].skb = skb;
                desc_ring[queue].data_mapping = dma_map_single(skb->data, len,
                                                    DMA_TO_DEVICE);
                desc_ring[queue].hw_data = (u32)desc_ring[queue].data_mapping;
                desc_ring[queue].datalen = len;
                desc_ring[queue].buflen = len;
        //	if (unlikely(netif_msg_tx_queued(priv)))
        //		netdev_dbg(dev, "sending 0x%p, len=%d\n", skb, skb->len);
        //	if (unlikely(netif_msg_hw(priv)))
        //		cpmac_dump_desc(dev, &desc_ring[queue]);
        //	if (unlikely(netif_msg_pktdata(priv)))
        //		cpmac_dump_skb(dev, skb);
	

                ret = NETDEV_TX_OK;
            }
        }
        // ***
        unlock_s(l1);
        return ret;
}
Esempio n. 8
0
static int epic_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
	struct epic_private *ep = dev->priv;
	int entry, free_count;
	u32 ctrl_word;
	unsigned long flags;

	if (skb_padto(skb, ETH_ZLEN))
		return 0;

	/* Caution: the write order is important here, set the field with the
	   "ownership" bit last. */

	/* Calculate the next Tx descriptor entry. */
	spin_lock_irqsave(&ep->lock, flags);
	free_count = ep->cur_tx - ep->dirty_tx;
	entry = ep->cur_tx % TX_RING_SIZE;

	ep->tx_skbuff[entry] = skb;
	ep->tx_ring[entry].bufaddr = pci_map_single(ep->pci_dev, skb->data,
		 			            skb->len, PCI_DMA_TODEVICE);
	if (free_count < TX_QUEUE_LEN/2) {/* Typical path */
		ctrl_word = cpu_to_le32(0x100000); /* No interrupt */
	} else if (free_count == TX_QUEUE_LEN/2) {
		ctrl_word = cpu_to_le32(0x140000); /* Tx-done intr. */
	} else if (free_count < TX_QUEUE_LEN - 1) {
		ctrl_word = cpu_to_le32(0x100000); /* No Tx-done intr. */
	} else {
		/* Leave room for an additional entry. */
		ctrl_word = cpu_to_le32(0x140000); /* Tx-done intr. */
		ep->tx_full = 1;
	}
	ep->tx_ring[entry].buflength = ctrl_word | cpu_to_le32(skb->len);
	ep->tx_ring[entry].txstatus =
		((skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN) << 16)
		| cpu_to_le32(DescOwn);

	ep->cur_tx++;
	if (ep->tx_full)
		netif_stop_queue(dev);

	spin_unlock_irqrestore(&ep->lock, flags);
	/* Trigger an immediate transmit demand. */
	outl(TxQueued, dev->base_addr + COMMAND);

	dev->trans_start = jiffies;
	if (debug > 4)
		printk(KERN_DEBUG "%s: Queued Tx packet size %d to slot %d, "
			   "flag %2.2x Tx status %8.8x.\n",
			   dev->name, (int)skb->len, entry, ctrl_word,
			   (int)inl(dev->base_addr + TxSTAT));

	return 0;
}
Esempio n. 9
0
File: r8169.c Progetto: wxlong/Test
static int
rtl8169_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
    struct rtl8169_private *tp = dev->priv;
    void *ioaddr = tp->mmio_addr;
    int entry = tp->cur_tx % NUM_TX_DESC;
    u32 len = skb->len;

    if (unlikely(skb->len < ETH_ZLEN)) {
        skb = skb_padto(skb, ETH_ZLEN);
        if (!skb)
            goto err_update_stats;
        len = ETH_ZLEN;
    }

    spin_lock_irq(&tp->lock);

    if (!(le32_to_cpu(tp->TxDescArray[entry].status) & OWNbit)) {
        dma_addr_t mapping;

        mapping = pci_map_single(tp->pci_dev, skb->data, len,
                                 PCI_DMA_TODEVICE);

        tp->Tx_skbuff[entry] = skb;
        tp->TxDescArray[entry].addr = cpu_to_le64(mapping);

        tp->TxDescArray[entry].status = cpu_to_le32(OWNbit | FSbit |
                                        LSbit | len | (EORbit * !((entry + 1) % NUM_TX_DESC)));

        RTL_W8(TxPoll, 0x40);	//set polling bit

        dev->trans_start = jiffies;

        tp->cur_tx++;
    } else
        goto err_drop;


    if ((tp->cur_tx - NUM_TX_DESC) == tp->dirty_tx) {
        netif_stop_queue(dev);
    }
out:
    spin_unlock_irq(&tp->lock);

    return 0;

err_drop:
    dev_kfree_skb(skb);
err_update_stats:
    tp->stats.tx_dropped++;
    goto out;
}
Esempio n. 10
0
static int
ramips_eth_hard_start_xmit(struct sk_buff* skb, struct net_device *dev)
{
	struct raeth_priv *priv = netdev_priv(dev);
	unsigned long tx;
	unsigned int tx_next;
	unsigned int mapped_addr;
	unsigned long flags;

	if(priv->plat->min_pkt_len)
	{
		if(skb->len < priv->plat->min_pkt_len)
		 {
		     if(skb_padto(skb, priv->plat->min_pkt_len))
			 {
				 printk(KERN_ERR "ramips_eth: skb_padto failed\n");
				 kfree_skb(skb);
				 return 0;
			 }
		     skb_put(skb, priv->plat->min_pkt_len - skb->len);
		 }
	}
	dev->trans_start = jiffies;
	mapped_addr = (unsigned int)dma_map_single(NULL, skb->data, skb->len,
			DMA_TO_DEVICE);
	dma_sync_single_for_device(NULL, mapped_addr, skb->len, DMA_TO_DEVICE);
	spin_lock_irqsave(&priv->page_lock, flags);
	tx = ramips_fe_rr(RAMIPS_TX_CTX_IDX0);
	if(tx == NUM_TX_DESC - 1)
		tx_next = 0;
	else
		tx_next = tx + 1;
	if((priv->tx_skb[tx]) || (priv->tx_skb[tx_next]) ||
		!(priv->tx[tx].txd2 & TX_DMA_DONE) || !(priv->tx[tx_next].txd2 & TX_DMA_DONE))
		goto out;
	priv->tx[tx].txd1 = mapped_addr;
	priv->tx[tx].txd2 &= ~(TX_DMA_PLEN0_MASK | TX_DMA_DONE);
	priv->tx[tx].txd2 |= TX_DMA_PLEN0(skb->len);
	dev->stats.tx_packets++;
	dev->stats.tx_bytes += skb->len;
	priv->tx_skb[tx] = skb;
	wmb();
	ramips_fe_wr((tx + 1) % NUM_TX_DESC, RAMIPS_TX_CTX_IDX0);
	spin_unlock_irqrestore(&priv->page_lock, flags);
	return NETDEV_TX_OK;
out:
	spin_unlock_irqrestore(&priv->page_lock, flags);
	dev->stats.tx_dropped++;
	kfree_skb(skb);
	return NETDEV_TX_OK;
}
Esempio n. 11
0
static int cpmac_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
	int queue, len;
	struct cpmac_desc *desc;
	struct cpmac_priv *priv = netdev_priv(dev);

	if (unlikely(atomic_read(&priv->reset_pending)))
		return NETDEV_TX_BUSY;

	if (unlikely(skb_padto(skb, ETH_ZLEN)))
		return NETDEV_TX_OK;

	len = max(skb->len, ETH_ZLEN);
	queue = skb_get_queue_mapping(skb);
#ifdef CONFIG_NETDEVICES_MULTIQUEUE
	netif_stop_subqueue(dev, queue);
#else
	netif_stop_queue(dev);
#endif

	desc = &priv->desc_ring[queue];
	if (unlikely(desc->dataflags & CPMAC_OWN)) {
		if (netif_msg_tx_err(priv) && net_ratelimit())
			printk(KERN_WARNING "%s: tx dma ring full\n",
			       dev->name);
		return NETDEV_TX_BUSY;
	}

	spin_lock(&priv->lock);
	dev->trans_start = jiffies;
	spin_unlock(&priv->lock);
	desc->dataflags = CPMAC_SOP | CPMAC_EOP | CPMAC_OWN;
	desc->skb = skb;
	desc->data_mapping = dma_map_single(&dev->dev, skb->data, len,
					    DMA_TO_DEVICE);
	desc->hw_data = (u32)desc->data_mapping;
	desc->datalen = len;
	desc->buflen = len;
	if (unlikely(netif_msg_tx_queued(priv)))
		printk(KERN_DEBUG "%s: sending 0x%p, len=%d\n", dev->name, skb,
		       skb->len);
	if (unlikely(netif_msg_hw(priv)))
		cpmac_dump_desc(dev, desc);
	if (unlikely(netif_msg_pktdata(priv)))
		cpmac_dump_skb(dev, skb);
	cpmac_write(priv->regs, CPMAC_TX_PTR(queue), (u32)desc->mapping);

	return NETDEV_TX_OK;
}
Esempio n. 12
0
static int mc32_send_packet(struct sk_buff *skb, struct net_device *dev)
{
	struct mc32_local *lp = netdev_priv(dev);
	u32 head = atomic_read(&lp->tx_ring_head);
	
	volatile struct skb_header *p, *np;

	netif_stop_queue(dev);

	if(atomic_read(&lp->tx_count)==0) {
		return 1;
	}

	skb = skb_padto(skb, ETH_ZLEN);
	if (skb == NULL) {
		netif_wake_queue(dev);
		return 0;
	}

	atomic_dec(&lp->tx_count); 

	/* P is the last sending/sent buffer as a pointer */
	p=lp->tx_ring[head].p;
		
	head = next_tx(head);

	/* NP is the buffer we will be loading */
	np=lp->tx_ring[head].p; 
	
	/* We will need this to flush the buffer out */
	lp->tx_ring[head].skb=skb;

	np->length      = unlikely(skb->len < ETH_ZLEN) ? ETH_ZLEN : skb->len;			
	np->data	= isa_virt_to_bus(skb->data);
	np->status	= 0;
	np->control     = CONTROL_EOP | CONTROL_EOL;     
	wmb();
		
	/*
	 * The new frame has been setup; we can now
	 * let the interrupt handler and card "see" it
	 */

	atomic_set(&lp->tx_ring_head, head); 
	p->control     &= ~CONTROL_EOL;

	netif_wake_queue(dev);
	return 0;
}
Esempio n. 13
0
static netdev_tx_t eepro_send_packet(struct sk_buff *skb,
				     struct net_device *dev)
{
	struct eepro_local *lp = netdev_priv(dev);
	unsigned long flags;
	int ioaddr = dev->base_addr;
	short length = skb->len;

	if (net_debug > 5)
		printk(KERN_DEBUG  "%s: entering eepro_send_packet routine.\n", dev->name);

	if (length < ETH_ZLEN) {
		if (skb_padto(skb, ETH_ZLEN))
			return NETDEV_TX_OK;
		length = ETH_ZLEN;
	}
	netif_stop_queue (dev);

	eepro_dis_int(ioaddr);
	spin_lock_irqsave(&lp->lock, flags);

	{
		unsigned char *buf = skb->data;

		if (hardware_send_packet(dev, buf, length))
			/* we won't wake queue here because we're out of space */
			dev->stats.tx_dropped++;
		else {
			dev->stats.tx_bytes+=skb->len;
			netif_wake_queue(dev);
		}

	}

	dev_kfree_skb (skb);

	/* You might need to clean up and record Tx statistics here. */
	/* dev->stats.tx_aborted_errors++; */

	if (net_debug > 5)
		printk(KERN_DEBUG "%s: exiting eepro_send_packet routine.\n", dev->name);

	eepro_en_int(ioaddr);
	spin_unlock_irqrestore(&lp->lock, flags);

	return NETDEV_TX_OK;
}
Esempio n. 14
0
static int znet_send_packet(struct sk_buff *skb, struct net_device *dev)
{
	int ioaddr = dev->base_addr;
	struct znet_private *znet = dev->priv;
	unsigned long flags;
	short length = skb->len;

	if (znet_debug > 4)
		printk(KERN_DEBUG "%s: ZNet_send_packet.\n", dev->name);

	if (length < ETH_ZLEN) {
		skb = skb_padto(skb, ETH_ZLEN);
		if (skb == NULL)
			return 0;
		length = ETH_ZLEN;
	}
	
	netif_stop_queue (dev);
	
	/* Check that the part hasn't reset itself, probably from suspend. */
	outb(CR0_STATUS_0, ioaddr);
	if (inw(ioaddr) == 0x0010 &&
	    inw(ioaddr) == 0x0000 &&
	    inw(ioaddr) == 0x0010) {
		if (znet_debug > 1)
			printk (KERN_WARNING "%s : waking up\n", dev->name);
		hardware_init(dev);
		znet_transceiver_power (dev, 1);
	}

	if (1) {
		unsigned char *buf = (void *)skb->data;
		ushort *tx_link = znet->tx_cur - 1;
		ushort rnd_len = (length + 1)>>1;
		
		znet->stats.tx_bytes+=length;

		if (znet->tx_cur >= znet->tx_end)
		  znet->tx_cur = znet->tx_start;
		*znet->tx_cur++ = length;
		if (znet->tx_cur + rnd_len + 1 > znet->tx_end) {
			int semi_cnt = (znet->tx_end - znet->tx_cur)<<1; /* Cvrt to byte cnt. */
			memcpy(znet->tx_cur, buf, semi_cnt);
			rnd_len -= semi_cnt>>1;
			memcpy(znet->tx_start, buf + semi_cnt, length - semi_cnt);
			znet->tx_cur = znet->tx_start + rnd_len;
		} else {
Esempio n. 15
0
static bool rt2x00usb_kick_tx_entry(struct queue_entry *entry, void *data)
{
	struct rt2x00_dev *rt2x00dev = entry->queue->rt2x00dev;
	struct usb_device *usb_dev = to_usb_device_intf(rt2x00dev->dev);
	struct queue_entry_priv_usb *entry_priv = entry->priv_data;
	u32 length;
	int status;

	if (!test_and_clear_bit(ENTRY_DATA_PENDING, &entry->flags) ||
	    test_bit(ENTRY_DATA_STATUS_PENDING, &entry->flags))
		return false;

	/*
	 * USB devices require certain padding at the end of each frame
	 * and urb. Those paddings are not included in skbs. Pass entry
	 * to the driver to determine what the overall length should be.
	 */
	length = rt2x00dev->ops->lib->get_tx_data_len(entry);

	status = skb_padto(entry->skb, length);
	if (unlikely(status)) {
		/* TODO: report something more appropriate than IO_FAILED. */
		rt2x00_warn(rt2x00dev, "TX SKB padding error, out of memory\n");
		set_bit(ENTRY_DATA_IO_FAILED, &entry->flags);
		rt2x00lib_dmadone(entry);

		return false;
	}

	usb_fill_bulk_urb(entry_priv->urb, usb_dev,
			  usb_sndbulkpipe(usb_dev, entry->queue->usb_endpoint),
			  entry->skb->data, length,
			  rt2x00usb_interrupt_txdone, entry);

	usb_anchor_urb(entry_priv->urb, rt2x00dev->anchor);
	status = usb_submit_urb(entry_priv->urb, GFP_ATOMIC);
	if (status) {
		usb_unanchor_urb(entry_priv->urb);
		if (status == -ENODEV)
			clear_bit(DEVICE_STATE_PRESENT, &rt2x00dev->flags);
		set_bit(ENTRY_DATA_IO_FAILED, &entry->flags);
		rt2x00lib_dmadone(entry);
	}

	return false;
}
Esempio n. 16
0
/* opa_netdev_start_xmit - transmit function */
static netdev_tx_t opa_netdev_start_xmit(struct sk_buff *skb,
					 struct net_device *netdev)
{
	struct opa_vnic_adapter *adapter = opa_vnic_priv(netdev);

	v_dbg("xmit: queue %d skb len %d\n", skb->queue_mapping, skb->len);
	/* pad to ensure mininum ethernet packet length */
	if (unlikely(skb->len < ETH_ZLEN)) {
		if (skb_padto(skb, ETH_ZLEN))
			return NETDEV_TX_OK;

		skb_put(skb, ETH_ZLEN - skb->len);
	}

	opa_vnic_encap_skb(adapter, skb);
	return adapter->rn_ops->ndo_start_xmit(skb, netdev);
}
static netdev_tx_t seeq8005_send_packet(struct sk_buff *skb,
					struct net_device *dev)
{
	short length = skb->len;
	unsigned char *buf;

	if (length < ETH_ZLEN) {
		if (skb_padto(skb, ETH_ZLEN))
			return NETDEV_TX_OK;
		length = ETH_ZLEN;
	}
	buf = skb->data;

	
	netif_stop_queue(dev);

	hardware_send_packet(dev, buf, length);
	dev->stats.tx_bytes += length;
	dev_kfree_skb (skb);
	

	return NETDEV_TX_OK;
}
Esempio n. 18
0
static netdev_tx_t seeq8005_send_packet(struct sk_buff *skb,
					struct net_device *dev)
{
	short length = skb->len;
	unsigned char *buf;

	if (length < ETH_ZLEN) {
		if (skb_padto(skb, ETH_ZLEN))
			return NETDEV_TX_OK;
		length = ETH_ZLEN;
	}
	buf = skb->data;

	/* Block a timer-based transmit from overlapping */
	netif_stop_queue(dev);

	hardware_send_packet(dev, buf, length);
	dev->stats.tx_bytes += length;
	dev_kfree_skb (skb);
	/* You might need to clean up and record Tx statistics here. */

	return NETDEV_TX_OK;
}
static void rt2800usb_write_tx_data(struct queue_entry *entry,
					struct txentry_desc *txdesc)
{
	unsigned int len;
	int err;

	rt2800_write_tx_data(entry, txdesc);

	/*
	 * pad(1~3 bytes) is added after each 802.11 payload.
	 * USB end pad(4 bytes) is added at each USB bulk out packet end.
	 * TX frame format is :
	 * | TXINFO | TXWI | 802.11 header | L2 pad | payload | pad | USB end pad |
	 *                 |<------------- tx_pkt_len ------------->|
	 */
	len = roundup(entry->skb->len, 4) + 4;
	err = skb_padto(entry->skb, len);
	if (unlikely(err)) {
		WARNING(entry->queue->rt2x00dev, "TX SKB padding error, out of memory\n");
		return;
	}

	entry->skb->len = len;
}
Esempio n. 20
0
/*
 * Transmit a packet
 */
static int
ether3_sendpacket(struct sk_buff *skb, struct net_device *dev)
{
	unsigned long flags;
	unsigned int length = ETH_ZLEN < skb->len ? skb->len : ETH_ZLEN;
	unsigned int ptr, next_ptr;

	if (priv(dev)->broken) {
		dev_kfree_skb(skb);
		dev->stats.tx_dropped++;
		netif_start_queue(dev);
		return NETDEV_TX_OK;
	}

	length = (length + 1) & ~1;
	if (length != skb->len) {
		if (skb_padto(skb, length))
			goto out;
	}

	next_ptr = (priv(dev)->tx_head + 1) & 15;

	local_irq_save(flags);

	if (priv(dev)->tx_tail == next_ptr) {
		local_irq_restore(flags);
		return NETDEV_TX_BUSY;	/* unable to queue */
	}

	ptr		 = 0x600 * priv(dev)->tx_head;
	priv(dev)->tx_head = next_ptr;
	next_ptr	*= 0x600;

#define TXHDR_FLAGS (TXHDR_TRANSMIT|TXHDR_CHAINCONTINUE|TXHDR_DATAFOLLOWS|TXHDR_ENSUCCESS)

	ether3_setbuffer(dev, buffer_write, next_ptr);
	ether3_writelong(dev, 0);
	ether3_setbuffer(dev, buffer_write, ptr);
	ether3_writelong(dev, 0);
	ether3_writebuffer(dev, skb->data, length);
	ether3_writeword(dev, htons(next_ptr));
	ether3_writeword(dev, TXHDR_CHAINCONTINUE >> 16);
	ether3_setbuffer(dev, buffer_write, ptr);
	ether3_writeword(dev, htons((ptr + length + 4)));
	ether3_writeword(dev, TXHDR_FLAGS >> 16);
	ether3_ledon(dev);

	if (!(ether3_inw(REG_STATUS) & STAT_TXON)) {
		ether3_outw(ptr, REG_TRANSMITPTR);
		ether3_outw(priv(dev)->regs.command | CMD_TXON, REG_COMMAND);
	}

	next_ptr = (priv(dev)->tx_head + 1) & 15;
	local_irq_restore(flags);

	dev_kfree_skb(skb);

	if (priv(dev)->tx_tail == next_ptr)
		netif_stop_queue(dev);

 out:
	return NETDEV_TX_OK;
}
Esempio n. 21
0
static int sonic_send_packet(struct sk_buff *skb, struct net_device *dev)
{
    struct sonic_local *lp = netdev_priv(dev);
    dma_addr_t laddr;
    int length;
    int entry = lp->next_tx;

    if (sonic_debug > 2)
        printk("sonic_send_packet: skb=%p, dev=%p\n", skb, dev);

    length = skb->len;
    if (length < ETH_ZLEN) {
        if (skb_padto(skb, ETH_ZLEN))
            return NETDEV_TX_OK;
        length = ETH_ZLEN;
    }

    /*
     * Map the packet data into the logical DMA address space
     */

    laddr = dma_map_single(lp->device, skb->data, length, DMA_TO_DEVICE);
    if (!laddr) {
        printk(KERN_ERR "%s: failed to map tx DMA buffer.\n", dev->name);
        dev_kfree_skb(skb);
        return NETDEV_TX_BUSY;
    }

    sonic_tda_put(dev, entry, SONIC_TD_STATUS, 0);       /* clear status */
    sonic_tda_put(dev, entry, SONIC_TD_FRAG_COUNT, 1);   /* single fragment */
    sonic_tda_put(dev, entry, SONIC_TD_PKTSIZE, length); /* length of packet */
    sonic_tda_put(dev, entry, SONIC_TD_FRAG_PTR_L, laddr & 0xffff);
    sonic_tda_put(dev, entry, SONIC_TD_FRAG_PTR_H, laddr >> 16);
    sonic_tda_put(dev, entry, SONIC_TD_FRAG_SIZE, length);
    sonic_tda_put(dev, entry, SONIC_TD_LINK,
                  sonic_tda_get(dev, entry, SONIC_TD_LINK) | SONIC_EOL);

    /*
     * Must set tx_skb[entry] only after clearing status, and
     * before clearing EOL and before stopping queue
     */
    wmb();
    lp->tx_len[entry] = length;
    lp->tx_laddr[entry] = laddr;
    lp->tx_skb[entry] = skb;

    wmb();
    sonic_tda_put(dev, lp->eol_tx, SONIC_TD_LINK,
                  sonic_tda_get(dev, lp->eol_tx, SONIC_TD_LINK) & ~SONIC_EOL);
    lp->eol_tx = entry;

    lp->next_tx = (entry + 1) & SONIC_TDS_MASK;
    if (lp->tx_skb[lp->next_tx] != NULL) {
        /* The ring is full, the ISR has yet to process the next TD. */
        if (sonic_debug > 3)
            printk("%s: stopping queue\n", dev->name);
        netif_stop_queue(dev);
        /* after this packet, wait for ISR to free up some TDAs */
    } else netif_start_queue(dev);

    if (sonic_debug > 2)
        printk("sonic_send_packet: issuing Tx command\n");

    SONIC_WRITE(SONIC_CMD, SONIC_CR_TXP);

    return NETDEV_TX_OK;
}
Esempio n. 22
0
static int
ether1_sendpacket (struct sk_buff *skb, struct net_device *dev)
{
	int tmp, tst, nopaddr, txaddr, tbdaddr, dataddr;
	unsigned long flags;
	tx_t tx;
	tbd_t tbd;
	nop_t nop;

	if (priv(dev)->restart) {
		printk(KERN_WARNING "%s: resetting device\n", dev->name);

		ether1_reset(dev);

		if (ether1_init_for_open(dev))
			printk(KERN_ERR "%s: unable to restart interface\n", dev->name);
		else
			priv(dev)->restart = 0;
	}

	if (skb->len < ETH_ZLEN) {
		if (skb_padto(skb, ETH_ZLEN))
			goto out;
	}

	/*
	 * insert packet followed by a nop
	 */
	txaddr = ether1_txalloc (dev, TX_SIZE);
	tbdaddr = ether1_txalloc (dev, TBD_SIZE);
	dataddr = ether1_txalloc (dev, skb->len);
	nopaddr = ether1_txalloc (dev, NOP_SIZE);

	tx.tx_status = 0;
	tx.tx_command = CMD_TX | CMD_INTR;
	tx.tx_link = nopaddr;
	tx.tx_tbdoffset = tbdaddr;
	tbd.tbd_opts = TBD_EOL | skb->len;
	tbd.tbd_link = I82586_NULL;
	tbd.tbd_bufl = dataddr;
	tbd.tbd_bufh = 0;
	nop.nop_status = 0;
	nop.nop_command = CMD_NOP;
	nop.nop_link = nopaddr;

	local_irq_save(flags);
	ether1_writebuffer (dev, &tx, txaddr, TX_SIZE);
	ether1_writebuffer (dev, &tbd, tbdaddr, TBD_SIZE);
	ether1_writebuffer (dev, skb->data, dataddr, skb->len);
	ether1_writebuffer (dev, &nop, nopaddr, NOP_SIZE);
	tmp = priv(dev)->tx_link;
	priv(dev)->tx_link = nopaddr;

	/* now reset the previous nop pointer */
	ether1_writew(dev, txaddr, tmp, nop_t, nop_link, NORMALIRQS);

	local_irq_restore(flags);

	/* handle transmit */

	/* check to see if we have room for a full sized ether frame */
	tmp = priv(dev)->tx_head;
	tst = ether1_txalloc (dev, TX_SIZE + TBD_SIZE + NOP_SIZE + ETH_FRAME_LEN);
	priv(dev)->tx_head = tmp;
	dev_kfree_skb (skb);

	if (tst == -1)
		netif_stop_queue(dev);

 out:
	return NETDEV_TX_OK;
}
Esempio n. 23
0
static int sgiseeq_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
	struct sgiseeq_private *sp = netdev_priv(dev);
	struct hpc3_ethregs *hregs = sp->hregs;
	unsigned long flags;
	struct sgiseeq_tx_desc *td;
	int len, entry;

	spin_lock_irqsave(&sp->tx_lock, flags);

	/* Setup... */
	len = skb->len;
	if (len < ETH_ZLEN) {
		if (skb_padto(skb, ETH_ZLEN)) {
			spin_unlock_irqrestore(&sp->tx_lock, flags);
			return NETDEV_TX_OK;
		}
		len = ETH_ZLEN;
	}

	dev->stats.tx_bytes += len;
	entry = sp->tx_new;
	td = &sp->tx_desc[entry];
	dma_sync_desc_cpu(dev, td);

	/* Create entry.  There are so many races with adding a new
	 * descriptor to the chain:
	 * 1) Assume that the HPC is off processing a DMA chain while
	 *    we are changing all of the following.
	 * 2) Do no allow the HPC to look at a new descriptor until
	 *    we have completely set up it's state.  This means, do
	 *    not clear HPCDMA_EOX in the current last descritptor
	 *    until the one we are adding looks consistent and could
	 *    be processes right now.
	 * 3) The tx interrupt code must notice when we've added a new
	 *    entry and the HPC got to the end of the chain before we
	 *    added this new entry and restarted it.
	 */
	td->skb = skb;
	td->tdma.pbuf = dma_map_single(dev->dev.parent, skb->data,
				       len, DMA_TO_DEVICE);
	td->tdma.cntinfo = (len & HPCDMA_BCNT) |
	                   HPCDMA_XIU | HPCDMA_EOXP | HPCDMA_XIE | HPCDMA_EOX;
	dma_sync_desc_dev(dev, td);
	if (sp->tx_old != sp->tx_new) {
		struct sgiseeq_tx_desc *backend;

		backend = &sp->tx_desc[PREV_TX(sp->tx_new)];
		dma_sync_desc_cpu(dev, backend);
		backend->tdma.cntinfo &= ~HPCDMA_EOX;
		dma_sync_desc_dev(dev, backend);
	}
	sp->tx_new = NEXT_TX(sp->tx_new); /* Advance. */

	/* Maybe kick the HPC back into motion. */
	if (!(hregs->tx_ctrl & HPC3_ETXCTRL_ACTIVE))
		kick_tx(dev, sp, hregs);

	dev->trans_start = jiffies;

	if (!TX_BUFFS_AVAIL(sp))
		netif_stop_queue(dev);
	spin_unlock_irqrestore(&sp->tx_lock, flags);

	return NETDEV_TX_OK;
}
Esempio n. 24
0
static inline int
dma_xmit(struct sk_buff* skb, struct net_device *dev, END_DEVICE *ei_local, int gmac_no)
{
	struct netdev_queue *txq;
	dma_addr_t frag_addr;
	u32 frag_size, nr_desc;
	u32 next_idx, desc_odd = 0;
	u32 txd_info2 = 0, txd_info4;
#if defined (CONFIG_RAETH_SG_DMA_TX)
	u32 i, nr_frags;
	const skb_frag_t *tx_frag;
	const struct skb_shared_info *shinfo;
#else
#define nr_frags 0
#endif

#if defined (CONFIG_RA_HW_NAT) || defined (CONFIG_RA_HW_NAT_MODULE)
	if (ra_sw_nat_hook_tx != NULL) {
#if defined (CONFIG_RA_HW_NAT_WIFI) || defined (CONFIG_RA_HW_NAT_PCI)
		if (IS_DPORT_PPE_VALID(skb))
			gmac_no = PSE_PORT_PPE;
		else
#endif
		if (ra_sw_nat_hook_tx(skb, gmac_no) == 0) {
			dev_kfree_skb(skb);
			return NETDEV_TX_OK;
		}
	}
#endif

#if !defined (RAETH_HW_PADPKT)
	if (skb->len < ei_local->min_pkt_len) {
		if (skb_padto(skb, ei_local->min_pkt_len)) {
#if defined (CONFIG_RAETH_DEBUG)
			if (net_ratelimit())
				printk(KERN_ERR "%s: skb_padto failed\n", RAETH_DEV_NAME);
#endif
			return NETDEV_TX_OK;
		}
		skb_put(skb, ei_local->min_pkt_len - skb->len);
	}
#endif

#if defined (CONFIG_RALINK_MT7620)
	if (gmac_no == PSE_PORT_PPE)
		txd_info4 = TX4_DMA_FP_BMAP(0x80); /* P7 */
	else
#if defined (CONFIG_RAETH_HAS_PORT5) && !defined (CONFIG_RAETH_HAS_PORT4) && !defined (CONFIG_RAETH_ESW)
		txd_info4 = TX4_DMA_FP_BMAP(0x20); /* P5 */
#elif defined (CONFIG_RAETH_HAS_PORT4) && !defined (CONFIG_RAETH_HAS_PORT5) && !defined (CONFIG_RAETH_ESW)
		txd_info4 = TX4_DMA_FP_BMAP(0x10); /* P4 */
#else
		txd_info4 = 0; /* routing by DA */
#endif
#elif defined (CONFIG_RALINK_MT7621)
	txd_info4 = TX4_DMA_FPORT(gmac_no);
#else
	txd_info4 = (TX4_DMA_QN(3) | TX4_DMA_PN(gmac_no));
#endif

#if defined (CONFIG_RAETH_CHECKSUM_OFFLOAD) && !defined (RAETH_SDMA)
	if (skb->ip_summed == CHECKSUM_PARTIAL)
		txd_info4 |= TX4_DMA_TUI_CO(7);
#endif

#if defined (CONFIG_RAETH_HW_VLAN_TX)
	if (skb_vlan_tag_present(skb)) {
#if defined (RAETH_HW_VLAN4K)
		txd_info4 |= (0x10000 | skb_vlan_tag_get(skb));
#else
		u32 vlan_tci = skb_vlan_tag_get(skb);
		txd_info4 |= (TX4_DMA_INSV | TX4_DMA_VPRI(vlan_tci));
		txd_info4 |= (u32)ei_local->vlan_4k_map[(vlan_tci & VLAN_VID_MASK)];
#endif
	}
#endif

#if defined (CONFIG_RAETH_SG_DMA_TX)
	shinfo = skb_shinfo(skb);
#endif

#if defined (CONFIG_RAETH_TSO)
	/* fill MSS info in tcp checksum field */
	if (shinfo->gso_size) {
		u32 hdr_len;
		
		if (!(shinfo->gso_type & (SKB_GSO_TCPV4|SKB_GSO_TCPV6))) {
			dev_kfree_skb(skb);
			return NETDEV_TX_OK;
		}
		
		if (skb_header_cloned(skb)) {
			if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
				dev_kfree_skb(skb);
				return NETDEV_TX_OK;
			}
		}
		
		hdr_len = (skb_transport_offset(skb) + tcp_hdrlen(skb));
		if (hdr_len >= skb->len) {
			dev_kfree_skb(skb);
			return NETDEV_TX_OK;
		}
		
		tcp_hdr(skb)->check = htons(shinfo->gso_size);
		txd_info4 |= TX4_DMA_TSO;
	}
#endif

	nr_desc = DIV_ROUND_UP(skb_headlen(skb), TXD_MAX_SEG_SIZE);
#if defined (CONFIG_RAETH_SG_DMA_TX)
	nr_frags = (u32)shinfo->nr_frags;

	for (i = 0; i < nr_frags; i++) {
		tx_frag = &shinfo->frags[i];
		nr_desc += DIV_ROUND_UP(skb_frag_size(tx_frag), TXD_MAX_SEG_SIZE);
	}
#endif
	nr_desc = DIV_ROUND_UP(nr_desc, 2);

	txq = netdev_get_tx_queue(dev, 0);

	/* flush main skb part before spin_lock() */
	frag_size = (u32)skb_headlen(skb);
	frag_addr = dma_map_single(NULL, skb->data, frag_size, DMA_TO_DEVICE);

	/* protect TX ring access (from eth2/eth3 queues) */
	spin_lock(&ei_local->page_lock);

	/* check nr_desc+1 free descriptors */
	next_idx = (ei_local->txd_last_idx + nr_desc) % NUM_TX_DESC;
	if (ei_local->txd_buff[ei_local->txd_last_idx] || ei_local->txd_buff[next_idx]) {
		spin_unlock(&ei_local->page_lock);
		netif_tx_stop_queue(txq);
#if defined (CONFIG_RAETH_DEBUG)
		if (net_ratelimit())
			printk("%s: PDMA TX ring is full! (GMAC: %d)\n", RAETH_DEV_NAME, gmac_no);
#endif
		return NETDEV_TX_BUSY;
	}

	pdma_write_skb_fragment(ei_local, frag_addr, frag_size, &desc_odd,
				&txd_info2, txd_info4, skb, nr_frags == 0);
#if defined (CONFIG_RAETH_SG_DMA_TX)
	for (i = 0; i < nr_frags; i++) {
		tx_frag = &shinfo->frags[i];
		frag_size = skb_frag_size(tx_frag);
		frag_addr = skb_frag_dma_map(NULL, tx_frag, 0, frag_size, DMA_TO_DEVICE);
		pdma_write_skb_fragment(ei_local, frag_addr, frag_size, &desc_odd,
					&txd_info2, txd_info4, skb, i == nr_frags - 1);
	}
#endif

#if defined (CONFIG_RAETH_BQL)
	netdev_tx_sent_queue(txq, skb->len);
#endif

#if !defined (CONFIG_RAETH_BQL) || !defined (CONFIG_SMP)
	/* smp_mb() already inlined in netdev_tx_sent_queue */
	wmb();
#endif

	/* kick the DMA TX */
	sysRegWrite(TX_CTX_IDX0, cpu_to_le32(ei_local->txd_last_idx));

	spin_unlock(&ei_local->page_lock);

	return NETDEV_TX_OK;
}