Exemple #1
0
static void
ramips_eth_tx_housekeeping(unsigned long ptr)
{
	struct net_device *dev = (struct net_device*)ptr;
	struct raeth_priv *re = netdev_priv(dev);
	unsigned int bytes_compl = 0, pkts_compl = 0;

	spin_lock(&re->page_lock);
	while (1) {
		struct raeth_tx_info *txi;
		struct ramips_tx_dma *txd;

		txi = &re->tx_info[re->skb_free_idx];
		txd = txi->tx_desc;

		if (!(txd->txd2 & TX_DMA_DONE) || !(txi->tx_skb))
			break;

		pkts_compl++;
		bytes_compl += txi->tx_skb->len;

		dev_kfree_skb_irq(txi->tx_skb);
		txi->tx_skb = NULL;
		re->skb_free_idx++;
		if (re->skb_free_idx >= NUM_TX_DESC)
			re->skb_free_idx = 0;
	}
	netdev_completed_queue(dev, pkts_compl, bytes_compl);
	spin_unlock(&re->page_lock);

	ramips_fe_int_enable(TX_DLY_INT);
}
Exemple #2
0
static void
ramips_eth_rx_hw(unsigned long ptr)
{
    struct net_device *dev = (struct net_device *) ptr;
    struct raeth_priv *priv = netdev_priv(dev);
    int rx;
    int max_rx = 16;

    while (max_rx) {
        struct sk_buff *rx_skb, *new_skb;
        int pktlen;

        rx = (ramips_fe_rr(RAMIPS_RX_CALC_IDX0) + 1) % NUM_RX_DESC;
        if (!(priv->rx[rx].rxd2 & RX_DMA_DONE))
            break;
        max_rx--;

        rx_skb = priv->rx_skb[rx];
        pktlen = RX_DMA_PLEN0(priv->rx[rx].rxd2);

        new_skb = netdev_alloc_skb(dev, MAX_RX_LENGTH + NET_IP_ALIGN);
        /* Reuse the buffer on allocation failures */
        if (new_skb) {
            dma_addr_t dma_addr;

            dma_unmap_single(&priv->netdev->dev, priv->rx_dma[rx],
                             MAX_RX_LENGTH, DMA_FROM_DEVICE);

            skb_put(rx_skb, pktlen);
            rx_skb->dev = dev;
            rx_skb->protocol = eth_type_trans(rx_skb, dev);
            rx_skb->ip_summed = CHECKSUM_NONE;
            dev->stats.rx_packets++;
            dev->stats.rx_bytes += pktlen;
            netif_rx(rx_skb);

            priv->rx_skb[rx] = new_skb;
            skb_reserve(new_skb, NET_IP_ALIGN);

            dma_addr = dma_map_single(&priv->netdev->dev,
                                      new_skb->data,
                                      MAX_RX_LENGTH,
                                      DMA_FROM_DEVICE);
            priv->rx_dma[rx] = dma_addr;
            priv->rx[rx].rxd1 = (unsigned int) dma_addr;
        } else {
            dev->stats.rx_dropped++;
        }

        priv->rx[rx].rxd2 &= ~RX_DMA_DONE;
        wmb();
        ramips_fe_wr(rx, RAMIPS_RX_CALC_IDX0);
    }

    if (max_rx == 0)
        tasklet_schedule(&priv->rx_tasklet);
    else
        ramips_fe_int_enable(RAMIPS_RX_DLY_INT);
}
Exemple #3
0
static void
ramips_eth_tx_housekeeping(unsigned long ptr)
{
    struct net_device *dev = (struct net_device*)ptr;
    struct raeth_priv *priv = netdev_priv(dev);

    spin_lock(&priv->page_lock);
    while ((priv->tx[priv->skb_free_idx].txd2 & TX_DMA_DONE) &&
            (priv->tx_skb[priv->skb_free_idx])) {
        dev_kfree_skb_irq(priv->tx_skb[priv->skb_free_idx]);
        priv->tx_skb[priv->skb_free_idx] = 0;
        priv->skb_free_idx++;
        if (priv->skb_free_idx >= NUM_TX_DESC)
            priv->skb_free_idx = 0;
    }
    spin_unlock(&priv->page_lock);

    ramips_fe_int_enable(RAMIPS_TX_DLY_INT);
}
Exemple #4
0
static void
ramips_eth_rx_hw(unsigned long ptr)
{
	struct net_device *dev = (struct net_device *) ptr;
	struct raeth_priv *re = netdev_priv(dev);
	int rx;
	int max_rx = 16;

	rx = ramips_fe_trr(RAETH_REG_RX_CALC_IDX0);

	while (max_rx) {
		struct raeth_rx_info *rxi;
		struct ramips_rx_dma *rxd;
		struct sk_buff *rx_skb, *new_skb;
		int pktlen;

		rx = (rx + 1) % NUM_RX_DESC;

		rxi = &re->rx_info[rx];
		rxd = rxi->rx_desc;
		if (!(rxd->rxd2 & RX_DMA_DONE))
			break;

		rx_skb = rxi->rx_skb;
		pktlen = RX_DMA_PLEN0(rxd->rxd2);

		new_skb = ramips_alloc_skb(re);
		/* Reuse the buffer on allocation failures */
		if (new_skb) {
			dma_addr_t dma_addr;

			dma_unmap_single(&re->netdev->dev, rxi->rx_dma,
					 MAX_RX_LENGTH, DMA_FROM_DEVICE);

			skb_put(rx_skb, pktlen);
			rx_skb->dev = dev;
			rx_skb->protocol = eth_type_trans(rx_skb, dev);
			rx_skb->ip_summed = CHECKSUM_NONE;
			dev->stats.rx_packets++;
			dev->stats.rx_bytes += pktlen;
			netif_rx(rx_skb);

			rxi->rx_skb = new_skb;

			dma_addr = dma_map_single(&re->netdev->dev,
						  new_skb->data,
						  MAX_RX_LENGTH,
						  DMA_FROM_DEVICE);
			rxi->rx_dma = dma_addr;
			rxd->rxd1 = (unsigned int) dma_addr;
			wmb();
		} else {
			dev->stats.rx_dropped++;
		}

		rxd->rxd2 = RX_DMA_LSO;
		ramips_fe_twr(rx, RAETH_REG_RX_CALC_IDX0);
		max_rx--;
	}

	if (max_rx == 0)
		tasklet_schedule(&re->rx_tasklet);
	else
		ramips_fe_int_enable(RX_DLY_INT);
}