Esempio n. 1
0
static int ethoc_rx(struct eth_device *dev, int limit)
{
	struct ethoc *priv = (struct ethoc *)dev->priv;
	int count;

	for (count = 0; count < limit; ++count) {
		u32 entry;
		struct ethoc_bd bd;

		entry = priv->num_tx + (priv->cur_rx % priv->num_rx);
		ethoc_read_bd(dev, entry, &bd);
		if (bd.stat & RX_BD_EMPTY)
			break;

		debug("%s(): RX buffer %d, %x received\n",
		      __func__, priv->cur_rx, bd.stat);
		if (ethoc_update_rx_stats(&bd) == 0) {
			int size = bd.stat >> 16;
			size -= 4;	/* strip the CRC */
			NetReceive((void *)bd.addr, size);
		}

		/* clear the buffer descriptor so it can be reused */
		flush_dcache(bd.addr, PKTSIZE_ALIGN);
		bd.stat &= ~RX_BD_STATS;
		bd.stat |= RX_BD_EMPTY;
		ethoc_write_bd(dev, entry, &bd);
		priv->cur_rx++;
	}
Esempio n. 2
0
static int ethoc_rx(struct net_device *dev, int limit)
{
	struct ethoc *priv = netdev_priv(dev);
	int count;

	for (count = 0; count < limit; ++count) {
		unsigned int entry;
		struct ethoc_bd bd;

		entry = priv->num_tx + priv->cur_rx;
		ethoc_read_bd(priv, entry, &bd);
		if (bd.stat & RX_BD_EMPTY) {
			ethoc_ack_irq(priv, INT_MASK_RX);
			/* If packet (interrupt) came in between checking
			 * BD_EMTPY and clearing the interrupt source, then we
			 * risk missing the packet as the RX interrupt won't
			 * trigger right away when we reenable it; hence, check
			 * BD_EMTPY here again to make sure there isn't such a
			 * packet waiting for us...
			 */
			ethoc_read_bd(priv, entry, &bd);
			if (bd.stat & RX_BD_EMPTY)
				break;
		}

		if (ethoc_update_rx_stats(priv, &bd) == 0) {
			int size = bd.stat >> 16;
			struct sk_buff *skb;

			size -= 4; /* strip the CRC */
			skb = netdev_alloc_skb_ip_align(dev, size);

			if (likely(skb)) {
				void *src = priv->vma[entry];
				memcpy_fromio(skb_put(skb, size), src, size);
				skb->protocol = eth_type_trans(skb, dev);
				dev->stats.rx_packets++;
				dev->stats.rx_bytes += size;
				netif_receive_skb(skb);
			} else {
				if (net_ratelimit())
					dev_warn(&dev->dev, "low on memory - "
							"packet dropped\n");

				dev->stats.rx_dropped++;
				break;
			}
		}

		/* clear the buffer descriptor so it can be reused */
		bd.stat &= ~RX_BD_STATS;
		bd.stat |=  RX_BD_EMPTY;
		ethoc_write_bd(priv, entry, &bd);
		if (++priv->cur_rx == priv->num_rx)
			priv->cur_rx = 0;
	}
Esempio n. 3
0
static int ethoc_rx(struct net_device *dev, int limit)
{
	struct ethoc *priv = netdev_priv(dev);
	int count;

	for (count = 0; count < limit; ++count) {
		unsigned int entry;
		struct ethoc_bd bd;

		entry = priv->num_tx + priv->cur_rx;
		ethoc_read_bd(priv, entry, &bd);
		if (bd.stat & RX_BD_EMPTY) {
			ethoc_ack_irq(priv, INT_MASK_RX);
			/*                                               
                                                         
                                                       
                                                          
                                                         
                              
    */
			ethoc_read_bd(priv, entry, &bd);
			if (bd.stat & RX_BD_EMPTY)
				break;
		}

		if (ethoc_update_rx_stats(priv, &bd) == 0) {
			int size = bd.stat >> 16;
			struct sk_buff *skb;

			size -= 4; /*               */
			skb = netdev_alloc_skb_ip_align(dev, size);

			if (likely(skb)) {
				void *src = priv->vma[entry];
				memcpy_fromio(skb_put(skb, size), src, size);
				skb->protocol = eth_type_trans(skb, dev);
				dev->stats.rx_packets++;
				dev->stats.rx_bytes += size;
				netif_receive_skb(skb);
			} else {
				if (net_ratelimit())
					dev_warn(&dev->dev, "low on memory - "
							"packet dropped\n");

				dev->stats.rx_dropped++;
				break;
			}
		}

		/*                                                 */
		bd.stat &= ~RX_BD_STATS;
		bd.stat |=  RX_BD_EMPTY;
		ethoc_write_bd(priv, entry, &bd);
		if (++priv->cur_rx == priv->num_rx)
			priv->cur_rx = 0;
	}
Esempio n. 4
0
static int ethoc_rx(struct net_device *dev, int limit)
{
	struct ethoc *priv = netdev_priv(dev);
	int count;

	for (count = 0; count < limit; ++count) {
		unsigned int entry;
		struct ethoc_bd bd;

		entry = priv->num_tx + (priv->cur_rx % priv->num_rx);
		ethoc_read_bd(priv, entry, &bd);
		if (bd.stat & RX_BD_EMPTY)
			break;

		if (ethoc_update_rx_stats(priv, &bd) == 0) {
			int size = bd.stat >> 16;
			struct sk_buff *skb = netdev_alloc_skb(dev, size);

			size -= 4; /* strip the CRC */
			skb_reserve(skb, 2); /* align TCP/IP header */

			if (likely(skb)) {
				void *src = phys_to_virt(bd.addr);
				memcpy_fromio(skb_put(skb, size), src, size);
				skb->protocol = eth_type_trans(skb, dev);
				priv->stats.rx_packets++;
				priv->stats.rx_bytes += size;
				netif_receive_skb(skb);
			} else {
				if (net_ratelimit())
					dev_warn(&dev->dev, "low on memory - "
							"packet dropped\n");

				priv->stats.rx_dropped++;
				break;
			}
		}

		/* clear the buffer descriptor so it can be reused */
		bd.stat &= ~RX_BD_STATS;
		bd.stat |=  RX_BD_EMPTY;
		ethoc_write_bd(priv, entry, &bd);
		priv->cur_rx++;
	}
Esempio n. 5
0
static int ethoc_rx(struct eth_device *edev, int limit)
{
	struct ethoc *priv = edev->priv;
	int count;

	for (count = 0; count < limit; ++count) {
		unsigned int entry;
		struct ethoc_bd bd;

		entry = priv->num_tx + priv->cur_rx;
		ethoc_read_bd(priv, entry, &bd);
		if (bd.stat & RX_BD_EMPTY) {
			ethoc_ack_irq(priv, INT_MASK_RX);
			/* If packet (interrupt) came in between checking
			 * BD_EMTPY and clearing the interrupt source, then we
			 * risk missing the packet as the RX interrupt won't
			 * trigger right away when we reenable it; hence, check
			 * BD_EMTPY here again to make sure there isn't such a
			 * packet waiting for us...
			 */
			ethoc_read_bd(priv, entry, &bd);
			if (bd.stat & RX_BD_EMPTY)
				break;
		}

		if (ethoc_update_rx_stats(edev, &bd) == 0) {
			int size = bd.stat >> 16;

			size -= 4; /* strip the CRC */
			invalidate_dcache_range(bd.addr, bd.addr + PKTSIZE);
			net_receive((unsigned char *)bd.addr, size);
		}

		/* clear the buffer descriptor so it can be reused */
		bd.stat &= ~RX_BD_STATS;
		bd.stat |=  RX_BD_EMPTY;
		ethoc_write_bd(priv, entry, &bd);
		if (++priv->cur_rx == priv->num_rx)
			priv->cur_rx = 0;
	}
Esempio n. 6
0
static int ethoc_rx_common(struct ethoc *priv, uchar **packetp)
{
	struct ethoc_bd bd;
	u32 i = priv->cur_rx % priv->num_rx;
	u32 entry = priv->num_tx + i;

	ethoc_read_bd(priv, entry, &bd);
	if (bd.stat & RX_BD_EMPTY)
		return -EAGAIN;

	debug("%s(): RX buffer %d, %x received\n",
	      __func__, priv->cur_rx, bd.stat);
	if (ethoc_update_rx_stats(&bd) == 0) {
		int size = bd.stat >> 16;

		size -= 4;	/* strip the CRC */
		if (priv->packet)
			*packetp = priv->packet + entry * PKTSIZE_ALIGN;
		else
			*packetp = net_rx_packets[i];
		return size;
	} else {
Esempio n. 7
0
struct pbuf *ethoc_rx(struct eth_device *dev, int limit)
{
	struct ethoc *priv = (struct ethoc *)dev->priv;
	int count;
    struct pbuf *p;
 	p = RT_NULL;

	for (count = 0; count < limit; ++count) {
		u32 entry;
		struct ethoc_bd bd;

		entry = priv->num_tx + (priv->cur_rx % priv->num_rx);
		ethoc_read_bd(dev, entry, &bd);
		if (bd.stat & rx_bd_empty)
			break;
#if 0
        debug("%s(): rx buffer %d, %x received\n",
		      __func__, priv->cur_rx, bd.stat);
#endif	
        if (ethoc_update_rx_stats(&bd) == 0) {
			int size = bd.stat >> 16;
			size -= 4;	/* strip the CRC */
			//netreceive((void *)bd.addr, size);

            rt_memcpy(uip_buf, (char *)bd.addr,size);
            uip_len = size;

        }

		/* clear the buffer descriptor so it can be reused */
		flush_dcache_range(bd.addr, bd.addr + PKTSIZE_ALIGN );
		bd.stat &= ~rx_bd_stats;
		bd.stat |= rx_bd_empty;
		ethoc_write_bd(dev, entry, &bd);
		priv->cur_rx++;

    }