예제 #1
0
/*
========================================================================
Routine Description:
    The entry point for Linux kernel sent packet to our driver.

Arguments:
    sk_buff *skb		the pointer refer to a sk_buffer.

Return Value:
    0					

Note:
	This function is the entry point of Tx Path for Os delivery packet to 
	our driver. You only can put OS-depened & STA/AP common handle procedures 
	in here.
========================================================================
*/
int rt28xx_packet_xmit(struct sk_buff *skb)
{
	struct net_device *net_dev = skb->dev;
	PRTMP_ADAPTER pAd = NULL;
	int status = 0;
	PNDIS_PACKET pPacket = (PNDIS_PACKET) skb;

	GET_PAD_FROM_NET_DEV(pAd, net_dev);	

	/* RT2870STA does this in RTMPSendPackets() */
#ifdef RALINK_ATE
	if (ATE_ON(pAd))
	{
		RELEASE_NDIS_PACKET(pAd, pPacket, NDIS_STATUS_RESOURCES);
		return 0;
	}
#endif // RALINK_ATE //


        // EapolStart size is 18
	if (skb->len < 14)
	{
		//printk("bad packet size: %d\n", pkt->len);
		hex_dump("bad packet", skb->data, skb->len);
		RELEASE_NDIS_PACKET(pAd, pPacket, NDIS_STATUS_FAILURE);
		goto done;
	}

#if !defined(CONFIG_RA_NAT_NONE)	// ASUS EXT
        /* add tx hook point*/
        if(ra_sw_nat_hook_tx!= NULL)
        {
            unsigned long flags;

            ra_sw_nat_hook_tx(pPacket);
        }
#endif

	RTMP_SET_PACKET_5VT(pPacket, 0);
//	MiniportMMRequest(pAd, pkt->data, pkt->len);
#ifdef CONFIG_5VT_ENHANCE
    if (*(int*)(skb->cb) == BRIDGE_TAG) {
		RTMP_SET_PACKET_5VT(pPacket, 1);
    }
#endif


#ifdef CONFIG_AP_SUPPORT
	IF_DEV_CONFIG_OPMODE_ON_AP(pAd)
		APSendPackets((NDIS_HANDLE)pAd, (PPNDIS_PACKET) &pPacket, 1);
#endif // CONFIG_AP_SUPPORT //


	status = 0;
done:
			   
	return status;
}
예제 #2
0
static inline int
dma_xmit(struct sk_buff *skb, struct net_device *dev, END_DEVICE *ei_local, int gmac_no)
{
	struct netdev_queue *txq;
	dma_addr_t frag_addr;
	u32 frag_size, nr_desc;
	u32 txd_info3, txd_info4;
#if defined (CONFIG_RAETH_SG_DMA_TX)
	u32 i, nr_frags;
	const skb_frag_t *tx_frag;
	const struct skb_shared_info *shinfo;
#else
#define nr_frags 0
#endif

#if defined (CONFIG_RA_HW_NAT) || defined (CONFIG_RA_HW_NAT_MODULE)
	if (ra_sw_nat_hook_tx != NULL) {
#if defined (CONFIG_RA_HW_NAT_WIFI) || defined (CONFIG_RA_HW_NAT_PCI)
		if (IS_DPORT_PPE_VALID(skb))
			gmac_no = PSE_PORT_PPE;
		else
#endif
		if (ra_sw_nat_hook_tx(skb, gmac_no) == 0) {
			dev_kfree_skb(skb);
			return NETDEV_TX_OK;
		}
	}
#endif

	txd_info3 = TX3_QDMA_SWC;
	if (gmac_no != PSE_PORT_PPE) {
		u32 QID = M2Q_table[(skb->mark & 0x3f)];
		if (QID < 8 && M2Q_wan_lan) {
#if defined (CONFIG_PSEUDO_SUPPORT)
			if (gmac_no == PSE_PORT_GMAC2)
				QID += 8;
#elif defined (CONFIG_RAETH_HW_VLAN_TX)
			if ((skb_vlan_tag_get(skb) & VLAN_VID_MASK) > 1)
				QID += 8;
#endif
		}
		txd_info3 |= TX3_QDMA_QID(QID);
	}

	txd_info4 = TX4_DMA_FPORT(gmac_no);

#if defined (CONFIG_RAETH_CHECKSUM_OFFLOAD)
	if (skb->ip_summed == CHECKSUM_PARTIAL)
		txd_info4 |= TX4_DMA_TUI_CO(7);
#endif

#if defined (CONFIG_RAETH_HW_VLAN_TX)
	if (skb_vlan_tag_present(skb))
		txd_info4 |= (0x10000 | skb_vlan_tag_get(skb));
#endif

#if defined (CONFIG_RAETH_SG_DMA_TX)
	shinfo = skb_shinfo(skb);
#endif

#if defined (CONFIG_RAETH_TSO)
	/* fill MSS info in tcp checksum field */
	if (shinfo->gso_size) {
		u32 hdr_len;
		
		if (!(shinfo->gso_type & (SKB_GSO_TCPV4|SKB_GSO_TCPV6))) {
			dev_kfree_skb(skb);
			return NETDEV_TX_OK;
		}
		
		if (skb_header_cloned(skb)) {
			if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
				dev_kfree_skb(skb);
				return NETDEV_TX_OK;
			}
		}
		
		hdr_len = (skb_transport_offset(skb) + tcp_hdrlen(skb));
		if (hdr_len >= skb->len) {
			dev_kfree_skb(skb);
			return NETDEV_TX_OK;
		}
		
		tcp_hdr(skb)->check = htons(shinfo->gso_size);
		txd_info4 |= TX4_DMA_TSO;
	}
#endif

	nr_desc = DIV_ROUND_UP(skb_headlen(skb), TXD_MAX_SEG_SIZE);
#if defined (CONFIG_RAETH_SG_DMA_TX)
	nr_frags = (u32)shinfo->nr_frags;

	for (i = 0; i < nr_frags; i++) {
		tx_frag = &shinfo->frags[i];
		nr_desc += DIV_ROUND_UP(skb_frag_size(tx_frag), TXD_MAX_SEG_SIZE);
	}
#endif

	txq = netdev_get_tx_queue(dev, 0);

	/* flush main skb part before spin_lock() */
	frag_size = (u32)skb_headlen(skb);
	frag_addr = dma_map_single(NULL, skb->data, frag_size, DMA_TO_DEVICE);

	/* protect TX ring access (from eth2/eth3 queues) */
	spin_lock(&ei_local->page_lock);

	/* check nr_desc+2 free descriptors (2 need to prevent head/tail overlap) */
	if (ei_local->txd_pool_free_num < (nr_desc+2)) {
		spin_unlock(&ei_local->page_lock);
		netif_tx_stop_queue(txq);
#if defined (CONFIG_RAETH_DEBUG)
		if (net_ratelimit())
			printk("%s: QDMA TX pool is run out! (GMAC: %d)\n", RAETH_DEV_NAME, gmac_no);
#endif
		return NETDEV_TX_BUSY;
	}

	qdma_write_skb_fragment(ei_local, frag_addr, frag_size,
				txd_info3, txd_info4, skb, nr_frags == 0);
#if defined (CONFIG_RAETH_SG_DMA_TX)
	for (i = 0; i < nr_frags; i++) {
		tx_frag = &shinfo->frags[i];
		frag_size = skb_frag_size(tx_frag);
		frag_addr = skb_frag_dma_map(NULL, tx_frag, 0, frag_size, DMA_TO_DEVICE);
		qdma_write_skb_fragment(ei_local, frag_addr, frag_size,
					txd_info3, txd_info4, skb, i == nr_frags - 1);
	}
#endif

#if defined (CONFIG_RAETH_BQL)
	netdev_tx_sent_queue(txq, skb->len);
#endif

#if !defined (CONFIG_RAETH_BQL) || !defined (CONFIG_SMP)
	/* smp_mb() already inlined in netdev_tx_sent_queue */
	wmb();
#endif

	/* kick the QDMA TX */
	sysRegWrite(QTX_CTX_PTR, (u32)get_txd_ptr_phy(ei_local, ei_local->txd_last_idx));

	spin_unlock(&ei_local->page_lock);

	return NETDEV_TX_OK;
}
예제 #3
0
/*
========================================================================
Routine Description:
    The entry point for Linux kernel sent packet to our driver.

Arguments:
    sk_buff *skb		the pointer refer to a sk_buffer.

Return Value:
    0					

Note:
	This function is the entry point of Tx Path for Os delivery packet to 
	our driver. You only can put OS-depened & STA/AP common handle procedures 
	in here.
========================================================================
*/
int rt28xx_packet_xmit(struct sk_buff *skb)
{
	struct net_device *net_dev = skb->dev;
	PRTMP_ADAPTER pAd = NULL;
	PNDIS_PACKET pPacket = (PNDIS_PACKET) skb;

	GET_PAD_FROM_NET_DEV(pAd, net_dev);

	/* RT2870STA does this in RTMPSendPackets() */
#ifdef RALINK_ATE
	if (ATE_ON(pAd))
	{
		RELEASE_NDIS_PACKET(pAd, pPacket, NDIS_STATUS_RESOURCES);
		return 0;
	}
#endif // RALINK_ATE //

#ifdef CONFIG_STA_SUPPORT
	IF_DEV_CONFIG_OPMODE_ON_STA(pAd)
	{
		// Drop send request since we are in monitor mode
		if (MONITOR_ON(pAd))
		{
			RELEASE_NDIS_PACKET(pAd, pPacket, NDIS_STATUS_FAILURE);
			goto done;
		}
	}
#endif // CONFIG_STA_SUPPORT //

        // EapolStart size is 18
	if (skb->len < 14)
	{
		//printk("bad packet size: %d\n", pkt->len);
		hex_dump("bad packet", skb->data, skb->len);
		RELEASE_NDIS_PACKET(pAd, pPacket, NDIS_STATUS_FAILURE);
		goto done;
	}

#if defined (CONFIG_RA_HW_NAT) || defined (CONFIG_RA_HW_NAT_MODULE)
#if !defined (CONFIG_RA_NAT_NONE)
	if (ra_sw_nat_hook_tx != NULL)
	{
		ra_sw_nat_hook_tx(pPacket, 0);
	}
#endif
#endif

#ifdef CONFIG_5VT_ENHANCE
	RTMP_SET_PACKET_5VT(pPacket, 0);
	if (*(int*)(skb->cb) == BRIDGE_TAG) {
		RTMP_SET_PACKET_5VT(pPacket, 1);
	}
#endif

#ifdef CONFIG_AP_SUPPORT
	IF_DEV_CONFIG_OPMODE_ON_AP(pAd)
		APSendPackets((NDIS_HANDLE)pAd, (PPNDIS_PACKET) &pPacket, 1);
#endif // CONFIG_AP_SUPPORT //

#ifdef CONFIG_STA_SUPPORT
	IF_DEV_CONFIG_OPMODE_ON_STA(pAd)
	{
		STASendPackets((NDIS_HANDLE)pAd, (PPNDIS_PACKET) &pPacket, 1);
	}
#endif // CONFIG_STA_SUPPORT //

done:

	return 0;
}
예제 #4
0
static inline int
dma_xmit(struct sk_buff* skb, struct net_device *dev, END_DEVICE *ei_local, int gmac_no)
{
	struct netdev_queue *txq;
	dma_addr_t frag_addr;
	u32 frag_size, nr_desc;
	u32 next_idx, desc_odd = 0;
	u32 txd_info2 = 0, txd_info4;
#if defined (CONFIG_RAETH_SG_DMA_TX)
	u32 i, nr_frags;
	const skb_frag_t *tx_frag;
	const struct skb_shared_info *shinfo;
#else
#define nr_frags 0
#endif

#if defined (CONFIG_RA_HW_NAT) || defined (CONFIG_RA_HW_NAT_MODULE)
	if (ra_sw_nat_hook_tx != NULL) {
#if defined (CONFIG_RA_HW_NAT_WIFI) || defined (CONFIG_RA_HW_NAT_PCI)
		if (IS_DPORT_PPE_VALID(skb))
			gmac_no = PSE_PORT_PPE;
		else
#endif
		if (ra_sw_nat_hook_tx(skb, gmac_no) == 0) {
			dev_kfree_skb(skb);
			return NETDEV_TX_OK;
		}
	}
#endif

#if !defined (RAETH_HW_PADPKT)
	if (skb->len < ei_local->min_pkt_len) {
		if (skb_padto(skb, ei_local->min_pkt_len)) {
#if defined (CONFIG_RAETH_DEBUG)
			if (net_ratelimit())
				printk(KERN_ERR "%s: skb_padto failed\n", RAETH_DEV_NAME);
#endif
			return NETDEV_TX_OK;
		}
		skb_put(skb, ei_local->min_pkt_len - skb->len);
	}
#endif

#if defined (CONFIG_RALINK_MT7620)
	if (gmac_no == PSE_PORT_PPE)
		txd_info4 = TX4_DMA_FP_BMAP(0x80); /* P7 */
	else
#if defined (CONFIG_RAETH_HAS_PORT5) && !defined (CONFIG_RAETH_HAS_PORT4) && !defined (CONFIG_RAETH_ESW)
		txd_info4 = TX4_DMA_FP_BMAP(0x20); /* P5 */
#elif defined (CONFIG_RAETH_HAS_PORT4) && !defined (CONFIG_RAETH_HAS_PORT5) && !defined (CONFIG_RAETH_ESW)
		txd_info4 = TX4_DMA_FP_BMAP(0x10); /* P4 */
#else
		txd_info4 = 0; /* routing by DA */
#endif
#elif defined (CONFIG_RALINK_MT7621)
	txd_info4 = TX4_DMA_FPORT(gmac_no);
#else
	txd_info4 = (TX4_DMA_QN(3) | TX4_DMA_PN(gmac_no));
#endif

#if defined (CONFIG_RAETH_CHECKSUM_OFFLOAD) && !defined (RAETH_SDMA)
	if (skb->ip_summed == CHECKSUM_PARTIAL)
		txd_info4 |= TX4_DMA_TUI_CO(7);
#endif

#if defined (CONFIG_RAETH_HW_VLAN_TX)
	if (skb_vlan_tag_present(skb)) {
#if defined (RAETH_HW_VLAN4K)
		txd_info4 |= (0x10000 | skb_vlan_tag_get(skb));
#else
		u32 vlan_tci = skb_vlan_tag_get(skb);
		txd_info4 |= (TX4_DMA_INSV | TX4_DMA_VPRI(vlan_tci));
		txd_info4 |= (u32)ei_local->vlan_4k_map[(vlan_tci & VLAN_VID_MASK)];
#endif
	}
#endif

#if defined (CONFIG_RAETH_SG_DMA_TX)
	shinfo = skb_shinfo(skb);
#endif

#if defined (CONFIG_RAETH_TSO)
	/* fill MSS info in tcp checksum field */
	if (shinfo->gso_size) {
		u32 hdr_len;
		
		if (!(shinfo->gso_type & (SKB_GSO_TCPV4|SKB_GSO_TCPV6))) {
			dev_kfree_skb(skb);
			return NETDEV_TX_OK;
		}
		
		if (skb_header_cloned(skb)) {
			if (pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
				dev_kfree_skb(skb);
				return NETDEV_TX_OK;
			}
		}
		
		hdr_len = (skb_transport_offset(skb) + tcp_hdrlen(skb));
		if (hdr_len >= skb->len) {
			dev_kfree_skb(skb);
			return NETDEV_TX_OK;
		}
		
		tcp_hdr(skb)->check = htons(shinfo->gso_size);
		txd_info4 |= TX4_DMA_TSO;
	}
#endif

	nr_desc = DIV_ROUND_UP(skb_headlen(skb), TXD_MAX_SEG_SIZE);
#if defined (CONFIG_RAETH_SG_DMA_TX)
	nr_frags = (u32)shinfo->nr_frags;

	for (i = 0; i < nr_frags; i++) {
		tx_frag = &shinfo->frags[i];
		nr_desc += DIV_ROUND_UP(skb_frag_size(tx_frag), TXD_MAX_SEG_SIZE);
	}
#endif
	nr_desc = DIV_ROUND_UP(nr_desc, 2);

	txq = netdev_get_tx_queue(dev, 0);

	/* flush main skb part before spin_lock() */
	frag_size = (u32)skb_headlen(skb);
	frag_addr = dma_map_single(NULL, skb->data, frag_size, DMA_TO_DEVICE);

	/* protect TX ring access (from eth2/eth3 queues) */
	spin_lock(&ei_local->page_lock);

	/* check nr_desc+1 free descriptors */
	next_idx = (ei_local->txd_last_idx + nr_desc) % NUM_TX_DESC;
	if (ei_local->txd_buff[ei_local->txd_last_idx] || ei_local->txd_buff[next_idx]) {
		spin_unlock(&ei_local->page_lock);
		netif_tx_stop_queue(txq);
#if defined (CONFIG_RAETH_DEBUG)
		if (net_ratelimit())
			printk("%s: PDMA TX ring is full! (GMAC: %d)\n", RAETH_DEV_NAME, gmac_no);
#endif
		return NETDEV_TX_BUSY;
	}

	pdma_write_skb_fragment(ei_local, frag_addr, frag_size, &desc_odd,
				&txd_info2, txd_info4, skb, nr_frags == 0);
#if defined (CONFIG_RAETH_SG_DMA_TX)
	for (i = 0; i < nr_frags; i++) {
		tx_frag = &shinfo->frags[i];
		frag_size = skb_frag_size(tx_frag);
		frag_addr = skb_frag_dma_map(NULL, tx_frag, 0, frag_size, DMA_TO_DEVICE);
		pdma_write_skb_fragment(ei_local, frag_addr, frag_size, &desc_odd,
					&txd_info2, txd_info4, skb, i == nr_frags - 1);
	}
#endif

#if defined (CONFIG_RAETH_BQL)
	netdev_tx_sent_queue(txq, skb->len);
#endif

#if !defined (CONFIG_RAETH_BQL) || !defined (CONFIG_SMP)
	/* smp_mb() already inlined in netdev_tx_sent_queue */
	wmb();
#endif

	/* kick the DMA TX */
	sysRegWrite(TX_CTX_IDX0, cpu_to_le32(ei_local->txd_last_idx));

	spin_unlock(&ei_local->page_lock);

	return NETDEV_TX_OK;
}
예제 #5
0
int	RTMPSendPackets(
	IN NDIS_HANDLE dev_hnd,
	IN PPNDIS_PACKET ppPacketArray,
	IN UINT NumberOfPackets,
	IN UINT32 PktTotalLen,
	IN RTMP_NET_ETH_CONVERT_DEV_SEARCH Func)
{
	RTMP_ADAPTER *pAd = (RTMP_ADAPTER *)dev_hnd;
	PNDIS_PACKET pPacket = ppPacketArray[0];


	INC_COUNTER64(pAd->WlanCounters.TransmitCountFrmOs);

	if (pPacket == NULL)
		goto done;

	/* RT2870STA does this in RTMPSendPackets() */
#ifdef RALINK_ATE
	if (ATE_ON(pAd))
	{
		RELEASE_NDIS_PACKET(pAd, pPacket, NDIS_STATUS_RESOURCES);
		return 0;
	}
#endif /* RALINK_ATE */


        /* EapolStart size is 18 */
	if (PktTotalLen < 14)
	{
		/*printk("bad packet size: %d\n", pkt->len); */
		hex_dump("bad packet", GET_OS_PKT_DATAPTR(pPacket), PktTotalLen);
		RELEASE_NDIS_PACKET(pAd, pPacket, NDIS_STATUS_FAILURE);
		return 0;
	}

#if !defined(CONFIG_RA_NAT_NONE)
	if(ra_sw_nat_hook_tx!= NULL)
	{
		unsigned long flags;

		RTMP_INT_LOCK(&pAd->page_lock, flags);
		ra_sw_nat_hook_tx(pPacket);
		RTMP_INT_UNLOCK(&pAd->page_lock, flags);
	}
#endif


	RTMP_SET_PACKET_5VT(pPacket, 0);
/*	MiniportMMRequest(pAd, pkt->data, pkt->len); */
#ifdef CONFIG_5VT_ENHANCE
    if (*(int*)(GET_OS_PKT_CB(pPacket)) == BRIDGE_TAG) {
		RTMP_SET_PACKET_5VT(pPacket, 1);
    }
#endif

#ifdef CONFIG_AP_SUPPORT
	IF_DEV_CONFIG_OPMODE_ON_AP(pAd)
		APSendPackets((NDIS_HANDLE)pAd, (PPNDIS_PACKET) &pPacket, 1);
#endif /* CONFIG_AP_SUPPORT */


done:
	return 0;
}
예제 #6
0
netdev_tx_t usbnet_start_xmit (struct sk_buff *skb,
				     struct net_device *net)
{
	struct usbnet		*dev = netdev_priv(net);
	int			length;
	struct urb		*urb = NULL;
	struct skb_data		*entry;
	struct driver_info	*info = dev->driver_info;
	unsigned long		flags;
	int retval;

	// some devices want funky USB-level framing, for
	// win32 driver (usually) and/or hardware quirks
	if (info->tx_fixup) {
		skb = info->tx_fixup (dev, skb, GFP_ATOMIC);
		if (!skb) {
			netif_dbg(dev, tx_err, dev->net, "can't tx_fixup skb\n");
			goto drop;
		}
	}
	length = skb->len;

	if (!(urb = usb_alloc_urb (0, GFP_ATOMIC))) {
		netif_dbg(dev, tx_err, dev->net, "no urb\n");
		goto drop;
	}

#if defined (CONFIG_RA_HW_NAT) || defined (CONFIG_RA_HW_NAT_MODULE)
	/* add tx hook point*/
	if(ra_sw_nat_hook_tx != NULL) {
		skb->data += 4; //pointer to DA
		ra_sw_nat_hook_tx(skb, 1);
		skb->data -= 4;
	}
#endif

	entry = (struct skb_data *) skb->cb;
	entry->urb = urb;
	entry->dev = dev;
	entry->state = tx_start;
	entry->length = length;

	usb_fill_bulk_urb (urb, dev->udev, dev->out,
			skb->data, skb->len, tx_complete, skb);

	/* don't assume the hardware handles USB_ZERO_PACKET
	 * NOTE:  strictly conforming cdc-ether devices should expect
	 * the ZLP here, but ignore the one-byte packet.
	 */
	if (length % dev->maxpacket == 0) {
		if (!(info->flags & FLAG_SEND_ZLP)) {
			urb->transfer_buffer_length++;
			if (skb_tailroom(skb)) {
				skb->data[skb->len] = 0;
				__skb_put(skb, 1);
			}
		} else
			urb->transfer_flags |= URB_ZERO_PACKET;
	}

	spin_lock_irqsave(&dev->txq.lock, flags);
	retval = usb_autopm_get_interface_async(dev->intf);
	if (retval < 0) {
		spin_unlock_irqrestore(&dev->txq.lock, flags);
		goto drop;
	}

#ifdef CONFIG_PM
	/* if this triggers the device is still a sleep */
	if (test_bit(EVENT_DEV_ASLEEP, &dev->flags)) {
		/* transmission will be done in resume */
		usb_anchor_urb(urb, &dev->deferred);
		/* no use to process more packets */
		netif_stop_queue(net);
		spin_unlock_irqrestore(&dev->txq.lock, flags);
		netdev_dbg(dev->net, "Delaying transmission for resumption\n");
		goto deferred;
	}
#endif

	switch ((retval = usb_submit_urb (urb, GFP_ATOMIC))) {
	case -EPIPE:
		netif_stop_queue (net);
		usbnet_defer_kevent (dev, EVENT_TX_HALT);
		usb_autopm_put_interface_async(dev->intf);
		break;
	default:
		usb_autopm_put_interface_async(dev->intf);
		netif_dbg(dev, tx_err, dev->net,
			  "tx: submit urb err %d\n", retval);
		break;
	case 0:
		net->trans_start = jiffies;
		__skb_queue_tail (&dev->txq, skb);
		if (dev->txq.qlen >= TX_QLEN (dev))
			netif_stop_queue (net);
	}
	spin_unlock_irqrestore (&dev->txq.lock, flags);

	if (retval) {
		netif_dbg(dev, tx_err, dev->net, "drop, code %d\n", retval);
drop:
		dev->net->stats.tx_dropped++;
		if (skb)
			dev_kfree_skb_any (skb);
		usb_free_urb (urb);
	} else
		netif_dbg(dev, tx_queued, dev->net,
			  "> tx, len %d, type 0x%x\n", length, skb->protocol);
#ifdef CONFIG_PM
deferred:
#endif
	return NETDEV_TX_OK;
}