static int ethoc_init_ring(struct eth_device *dev)
{
	struct ethoc *priv = (struct ethoc *)dev->priv;
	struct ethoc_bd bd;
	int i;

	priv->cur_tx = 0;
	priv->dty_tx = 0;
	priv->cur_rx = 0;

	/* setup transmission buffers */
	bd.stat = TX_BD_IRQ | TX_BD_CRC;

	for (i = 0; i < priv->num_tx; i++) {
		if (i == priv->num_tx - 1)
			bd.stat |= TX_BD_WRAP;

		ethoc_write_bd(dev, i, &bd);
	}

	bd.stat = RX_BD_EMPTY | RX_BD_IRQ;

	for (i = 0; i < priv->num_rx; i++) {
		bd.addr = (u32)NetRxPackets[i];
		if (i == priv->num_rx - 1)
			bd.stat |= RX_BD_WRAP;

		flush_dcache(bd.addr, PKTSIZE_ALIGN);
		ethoc_write_bd(dev, priv->num_tx + i, &bd);
	}

	return 0;
}
Exemple #2
0
static int ethoc_init_ring(struct ethoc *dev)
{
	struct ethoc_bd bd;
	int i;

	dev->cur_tx = 0;
	dev->dty_tx = 0;
	dev->cur_rx = 0;

	/* setup transmission buffers */
	bd.addr = virt_to_phys(dev->membase);
	bd.stat = TX_BD_IRQ | TX_BD_CRC;

	for (i = 0; i < dev->num_tx; i++) {
		if (i == dev->num_tx - 1)
			bd.stat |= TX_BD_WRAP;

		ethoc_write_bd(dev, i, &bd);
		bd.addr += ETHOC_BUFSIZ;
	}

	bd.stat = RX_BD_EMPTY | RX_BD_IRQ;

	for (i = 0; i < dev->num_rx; i++) {
		if (i == dev->num_rx - 1)
			bd.stat |= RX_BD_WRAP;

		ethoc_write_bd(dev, dev->num_tx + i, &bd);
		bd.addr += ETHOC_BUFSIZ;
	}

	return 0;
}
static int ethoc_rx(struct eth_device *dev, int limit)
{
	struct ethoc *priv = (struct ethoc *)dev->priv;
	int count;

	for (count = 0; count < limit; ++count) {
		u32 entry;
		struct ethoc_bd bd;

		entry = priv->num_tx + (priv->cur_rx % priv->num_rx);
		ethoc_read_bd(dev, entry, &bd);
		if (bd.stat & RX_BD_EMPTY)
			break;

		debug("%s(): RX buffer %d, %x received\n",
		      __func__, priv->cur_rx, bd.stat);
		if (ethoc_update_rx_stats(&bd) == 0) {
			int size = bd.stat >> 16;
			size -= 4;	/* strip the CRC */
			NetReceive((void *)bd.addr, size);
		}

		/* clear the buffer descriptor so it can be reused */
		flush_dcache(bd.addr, PKTSIZE_ALIGN);
		bd.stat &= ~RX_BD_STATS;
		bd.stat |= RX_BD_EMPTY;
		ethoc_write_bd(dev, entry, &bd);
		priv->cur_rx++;
	}
static int ethoc_init_ring(struct eth_device *dev)
{
	struct ethoc *priv = (struct ethoc *)dev->priv;
	struct ethoc_bd bd;
	int i;

	priv->cur_tx = 0;
	priv->dty_tx = 0;
	priv->cur_rx = 0;


	/* setup transmission buffers */
	bd.stat = tx_bd_irq | tx_bd_CRC;
	for (i = 0; i < priv->num_tx; i++) {
		if (i == priv->num_tx - 1)
			bd.stat |= tx_bd_wrap;
		ethoc_write_bd(dev, i, &bd);
	}
    //copy from uboot
	NetTxPacket = RT_NULL;    
	if (!NetTxPacket) {
		int	i;
		/*
		 *	Setup packet buffers, aligned correctly.
		 */
		NetTxPacket = &PktBuf[0] + (PKTALIGN - 1);
		NetTxPacket -= (ulong)NetTxPacket % PKTALIGN;
		for (i = 0; i < PKTBUFSRX; i++)
			NetRxPackets[i] = NetTxPacket + (i+1)*PKTSIZE_ALIGN;
	}

	bd.stat = rx_bd_empty | rx_bd_irq;

	for (i = 0; i < priv->num_rx; i++) {
		bd.addr = (u32)NetRxPackets[i];
		if (i == priv->num_rx - 1)
			bd.stat |= rx_bd_wrap;

		flush_dcache_range(bd.addr, bd.addr + PKTSIZE_ALIGN);//attention
		ethoc_write_bd(dev, priv->num_tx + i, &bd);
	}

	return 0;
}
Exemple #5
0
static int ethoc_init_ring(struct ethoc *priv)
{
	struct ethoc_bd bd;
	phys_addr_t addr = priv->packet_phys;
	int i;

	priv->cur_tx = 0;
	priv->dty_tx = 0;
	priv->cur_rx = 0;

	/* setup transmission buffers */
	bd.stat = TX_BD_IRQ | TX_BD_CRC;
	bd.addr = 0;

	for (i = 0; i < priv->num_tx; i++) {
		if (addr) {
			bd.addr = addr;
			addr += PKTSIZE_ALIGN;
		}
		if (i == priv->num_tx - 1)
			bd.stat |= TX_BD_WRAP;

		ethoc_write_bd(priv, i, &bd);
	}

	bd.stat = RX_BD_EMPTY | RX_BD_IRQ;

	for (i = 0; i < priv->num_rx; i++) {
		if (addr) {
			bd.addr = addr;
			addr += PKTSIZE_ALIGN;
		} else {
			bd.addr = virt_to_phys(net_rx_packets[i]);
		}
		if (i == priv->num_rx - 1)
			bd.stat |= RX_BD_WRAP;

		flush_dcache_range((ulong)net_rx_packets[i],
				   (ulong)net_rx_packets[i] + PKTSIZE_ALIGN);
		ethoc_write_bd(priv, priv->num_tx + i, &bd);
	}

	return 0;
}
static int ethoc_rx(struct net_device *dev, int limit)
{
	struct ethoc *priv = netdev_priv(dev);
	int count;

	for (count = 0; count < limit; ++count) {
		unsigned int entry;
		struct ethoc_bd bd;

		entry = priv->num_tx + priv->cur_rx;
		ethoc_read_bd(priv, entry, &bd);
		if (bd.stat & RX_BD_EMPTY) {
			ethoc_ack_irq(priv, INT_MASK_RX);
			/* If packet (interrupt) came in between checking
			 * BD_EMTPY and clearing the interrupt source, then we
			 * risk missing the packet as the RX interrupt won't
			 * trigger right away when we reenable it; hence, check
			 * BD_EMTPY here again to make sure there isn't such a
			 * packet waiting for us...
			 */
			ethoc_read_bd(priv, entry, &bd);
			if (bd.stat & RX_BD_EMPTY)
				break;
		}

		if (ethoc_update_rx_stats(priv, &bd) == 0) {
			int size = bd.stat >> 16;
			struct sk_buff *skb;

			size -= 4; /* strip the CRC */
			skb = netdev_alloc_skb_ip_align(dev, size);

			if (likely(skb)) {
				void *src = priv->vma[entry];
				memcpy_fromio(skb_put(skb, size), src, size);
				skb->protocol = eth_type_trans(skb, dev);
				dev->stats.rx_packets++;
				dev->stats.rx_bytes += size;
				netif_receive_skb(skb);
			} else {
				if (net_ratelimit())
					dev_warn(&dev->dev, "low on memory - "
							"packet dropped\n");

				dev->stats.rx_dropped++;
				break;
			}
		}

		/* clear the buffer descriptor so it can be reused */
		bd.stat &= ~RX_BD_STATS;
		bd.stat |=  RX_BD_EMPTY;
		ethoc_write_bd(priv, entry, &bd);
		if (++priv->cur_rx == priv->num_rx)
			priv->cur_rx = 0;
	}
static int ethoc_rx(struct net_device *dev, int limit)
{
	struct ethoc *priv = netdev_priv(dev);
	int count;

	for (count = 0; count < limit; ++count) {
		unsigned int entry;
		struct ethoc_bd bd;

		entry = priv->num_tx + priv->cur_rx;
		ethoc_read_bd(priv, entry, &bd);
		if (bd.stat & RX_BD_EMPTY) {
			ethoc_ack_irq(priv, INT_MASK_RX);
			/*                                               
                                                         
                                                       
                                                          
                                                         
                              
    */
			ethoc_read_bd(priv, entry, &bd);
			if (bd.stat & RX_BD_EMPTY)
				break;
		}

		if (ethoc_update_rx_stats(priv, &bd) == 0) {
			int size = bd.stat >> 16;
			struct sk_buff *skb;

			size -= 4; /*               */
			skb = netdev_alloc_skb_ip_align(dev, size);

			if (likely(skb)) {
				void *src = priv->vma[entry];
				memcpy_fromio(skb_put(skb, size), src, size);
				skb->protocol = eth_type_trans(skb, dev);
				dev->stats.rx_packets++;
				dev->stats.rx_bytes += size;
				netif_receive_skb(skb);
			} else {
				if (net_ratelimit())
					dev_warn(&dev->dev, "low on memory - "
							"packet dropped\n");

				dev->stats.rx_dropped++;
				break;
			}
		}

		/*                                                 */
		bd.stat &= ~RX_BD_STATS;
		bd.stat |=  RX_BD_EMPTY;
		ethoc_write_bd(priv, entry, &bd);
		if (++priv->cur_rx == priv->num_rx)
			priv->cur_rx = 0;
	}
static int ethoc_init_ring(struct ethoc *dev, unsigned long mem_start)
{
	struct ethoc_bd bd;
	int i;
	void* vma;

	dev->cur_tx = 0;
	dev->dty_tx = 0;
	dev->cur_rx = 0;

	ethoc_write(dev, TX_BD_NUM, dev->num_tx);

	/* setup transmission buffers */
	bd.addr = mem_start;
	bd.stat = TX_BD_IRQ | TX_BD_CRC;
	vma = dev->membase;

	for (i = 0; i < dev->num_tx; i++) {
		if (i == dev->num_tx - 1)
			bd.stat |= TX_BD_WRAP;

		ethoc_write_bd(dev, i, &bd);
		bd.addr += ETHOC_BUFSIZ;

		dev->vma[i] = vma;
		vma += ETHOC_BUFSIZ;
	}

	bd.stat = RX_BD_EMPTY | RX_BD_IRQ;

	for (i = 0; i < dev->num_rx; i++) {
		if (i == dev->num_rx - 1)
			bd.stat |= RX_BD_WRAP;

		ethoc_write_bd(dev, dev->num_tx + i, &bd);
		bd.addr += ETHOC_BUFSIZ;

		dev->vma[dev->num_tx + i] = vma;
		vma += ETHOC_BUFSIZ;
	}

	return 0;
}
Exemple #9
0
static int ethoc_init_ring(struct ethoc *dev)
{
	struct ethoc_bd bd;
	int i;

	dev->num_tx = 1;
	dev->num_rx = PKTBUFSRX;

	dev->cur_tx = 0;
	dev->dty_tx = 0;
	dev->cur_rx = 0;

	ethoc_write(dev, TX_BD_NUM, dev->num_tx);

	/* setup transmission buffers */
	bd.addr = 0;
	bd.stat = TX_BD_IRQ | TX_BD_CRC;

	for (i = 0; i < dev->num_tx; i++) {
		if (i == dev->num_tx - 1)
			bd.stat |= TX_BD_WRAP;

		ethoc_write_bd(dev, i, &bd);
	}

	bd.stat = RX_BD_EMPTY | RX_BD_IRQ;

	for (i = 0; i < dev->num_rx; i++) {
		if (i == dev->num_rx - 1)
			bd.stat |= RX_BD_WRAP;

		bd.addr = (u32)NetRxPackets[i];
		ethoc_write_bd(dev, dev->num_tx + i, &bd);

		flush_dcache_range(bd.addr, bd.addr + PKTSIZE);
	}

	return 0;
}
Exemple #10
0
static int ethoc_rx(struct net_device *dev, int limit)
{
	struct ethoc *priv = netdev_priv(dev);
	int count;

	for (count = 0; count < limit; ++count) {
		unsigned int entry;
		struct ethoc_bd bd;

		entry = priv->num_tx + (priv->cur_rx % priv->num_rx);
		ethoc_read_bd(priv, entry, &bd);
		if (bd.stat & RX_BD_EMPTY)
			break;

		if (ethoc_update_rx_stats(priv, &bd) == 0) {
			int size = bd.stat >> 16;
			struct sk_buff *skb = netdev_alloc_skb(dev, size);

			size -= 4; /* strip the CRC */
			skb_reserve(skb, 2); /* align TCP/IP header */

			if (likely(skb)) {
				void *src = phys_to_virt(bd.addr);
				memcpy_fromio(skb_put(skb, size), src, size);
				skb->protocol = eth_type_trans(skb, dev);
				priv->stats.rx_packets++;
				priv->stats.rx_bytes += size;
				netif_receive_skb(skb);
			} else {
				if (net_ratelimit())
					dev_warn(&dev->dev, "low on memory - "
							"packet dropped\n");

				priv->stats.rx_dropped++;
				break;
			}
		}

		/* clear the buffer descriptor so it can be reused */
		bd.stat &= ~RX_BD_STATS;
		bd.stat |=  RX_BD_EMPTY;
		ethoc_write_bd(priv, entry, &bd);
		priv->cur_rx++;
	}
Exemple #11
0
static int ethoc_rx(struct eth_device *edev, int limit)
{
	struct ethoc *priv = edev->priv;
	int count;

	for (count = 0; count < limit; ++count) {
		unsigned int entry;
		struct ethoc_bd bd;

		entry = priv->num_tx + priv->cur_rx;
		ethoc_read_bd(priv, entry, &bd);
		if (bd.stat & RX_BD_EMPTY) {
			ethoc_ack_irq(priv, INT_MASK_RX);
			/* If packet (interrupt) came in between checking
			 * BD_EMTPY and clearing the interrupt source, then we
			 * risk missing the packet as the RX interrupt won't
			 * trigger right away when we reenable it; hence, check
			 * BD_EMTPY here again to make sure there isn't such a
			 * packet waiting for us...
			 */
			ethoc_read_bd(priv, entry, &bd);
			if (bd.stat & RX_BD_EMPTY)
				break;
		}

		if (ethoc_update_rx_stats(edev, &bd) == 0) {
			int size = bd.stat >> 16;

			size -= 4; /* strip the CRC */
			invalidate_dcache_range(bd.addr, bd.addr + PKTSIZE);
			net_receive((unsigned char *)bd.addr, size);
		}

		/* clear the buffer descriptor so it can be reused */
		bd.stat &= ~RX_BD_STATS;
		bd.stat |=  RX_BD_EMPTY;
		ethoc_write_bd(priv, entry, &bd);
		if (++priv->cur_rx == priv->num_rx)
			priv->cur_rx = 0;
	}
struct pbuf *ethoc_rx(struct eth_device *dev, int limit)
{
	struct ethoc *priv = (struct ethoc *)dev->priv;
	int count;
    struct pbuf *p;
 	p = RT_NULL;

	for (count = 0; count < limit; ++count) {
		u32 entry;
		struct ethoc_bd bd;

		entry = priv->num_tx + (priv->cur_rx % priv->num_rx);
		ethoc_read_bd(dev, entry, &bd);
		if (bd.stat & rx_bd_empty)
			break;
#if 0
        debug("%s(): rx buffer %d, %x received\n",
		      __func__, priv->cur_rx, bd.stat);
#endif	
        if (ethoc_update_rx_stats(&bd) == 0) {
			int size = bd.stat >> 16;
			size -= 4;	/* strip the CRC */
			//netreceive((void *)bd.addr, size);

            rt_memcpy(uip_buf, (char *)bd.addr,size);
            uip_len = size;

        }

		/* clear the buffer descriptor so it can be reused */
		flush_dcache_range(bd.addr, bd.addr + PKTSIZE_ALIGN );
		bd.stat &= ~rx_bd_stats;
		bd.stat |= rx_bd_empty;
		ethoc_write_bd(dev, entry, &bd);
		priv->cur_rx++;

    }