Example #1
0
/* free Tx skb function */
static int sh_eth_txfree(struct net_device *ndev)
{
    struct sh_eth_private *mdp = netdev_priv(ndev);
    struct sh_eth_txdesc *txdesc;
    int freeNum = 0;
    int entry = 0;

    for (; mdp->cur_tx - mdp->dirty_tx > 0; mdp->dirty_tx++) {
        entry = mdp->dirty_tx % TX_RING_SIZE;
        txdesc = &mdp->tx_ring[entry];
        if (txdesc->status & cpu_to_edmac(mdp, TD_TACT))
            break;
        /* Free the original skb. */
        if (mdp->tx_skbuff[entry]) {
            dev_kfree_skb_irq(mdp->tx_skbuff[entry]);
            mdp->tx_skbuff[entry] = NULL;
            freeNum++;
        }
        txdesc->status = cpu_to_edmac(mdp, TD_TFP);
        if (entry >= TX_RING_SIZE - 1)
            txdesc->status |= cpu_to_edmac(mdp, TD_TDLE);

        mdp->stats.tx_packets++;
        mdp->stats.tx_bytes += txdesc->buffer_length;
    }
    return freeNum;
}
Example #2
0
/* Packet transmit function */
static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
{
	struct sh_eth_private *mdp = netdev_priv(ndev);
	struct sh_eth_txdesc *txdesc;
	u32 entry;
	unsigned long flags;

	spin_lock_irqsave(&mdp->lock, flags);
	if ((mdp->cur_tx - mdp->dirty_tx) >= (TX_RING_SIZE - 4)) {
		if (!sh_eth_txfree(ndev)) {
			if (netif_msg_tx_queued(mdp))
				dev_warn(&ndev->dev, "TxFD exhausted.\n");
			netif_stop_queue(ndev);
			spin_unlock_irqrestore(&mdp->lock, flags);
			return NETDEV_TX_BUSY;
		}
	}
	spin_unlock_irqrestore(&mdp->lock, flags);

	entry = mdp->cur_tx % TX_RING_SIZE;
	mdp->tx_skbuff[entry] = skb;
	txdesc = &mdp->tx_ring[entry];
	txdesc->addr = virt_to_phys(skb->data);
	/* soft swap. */
	if (!mdp->cd->hw_swap)
		sh_eth_soft_swap(phys_to_virt(ALIGN(txdesc->addr, 4)),
				 skb->len + 2);
	/* write back */
	__flush_purge_region(skb->data, skb->len);
	if (skb->len < ETHERSMALL)
		txdesc->buffer_length = ETHERSMALL;
	else
		txdesc->buffer_length = skb->len;

	if (entry >= TX_RING_SIZE - 1)
		txdesc->status |= cpu_to_edmac(mdp, TD_TACT | TD_TDLE);
	else
		txdesc->status |= cpu_to_edmac(mdp, TD_TACT);

	mdp->cur_tx++;

	if (!(sh_eth_read(ndev, EDTRR) & sh_eth_get_edtrr_trns(mdp)))
		sh_eth_write(ndev, sh_eth_get_edtrr_trns(mdp), EDTRR);

	return NETDEV_TX_OK;
}
Example #3
0
/* Packet transmit function */
static int sh_eth_start_xmit(struct sk_buff *skb, struct net_device *ndev)
{
    struct sh_eth_private *mdp = netdev_priv(ndev);
    struct sh_eth_txdesc *txdesc;
    u32 entry;
    unsigned long flags;

    spin_lock_irqsave(&mdp->lock, flags);
    if ((mdp->cur_tx - mdp->dirty_tx) >= (TX_RING_SIZE - 4)) {
        if (!sh_eth_txfree(ndev)) {
            netif_stop_queue(ndev);
            spin_unlock_irqrestore(&mdp->lock, flags);
            return 1;
        }
    }
    spin_unlock_irqrestore(&mdp->lock, flags);

    entry = mdp->cur_tx % TX_RING_SIZE;
    mdp->tx_skbuff[entry] = skb;
    txdesc = &mdp->tx_ring[entry];
    txdesc->addr = (u32)(skb->data);
    /* soft swap. */
    swaps((char *)(txdesc->addr & ~0x3), skb->len + 2);
    /* write back */
    __flush_purge_region(skb->data, skb->len);
    if (skb->len < ETHERSMALL)
        txdesc->buffer_length = ETHERSMALL;
    else
        txdesc->buffer_length = skb->len;

    if (entry >= TX_RING_SIZE - 1)
        txdesc->status |= cpu_to_edmac(mdp, TD_TACT | TD_TDLE);
    else
        txdesc->status |= cpu_to_edmac(mdp, TD_TACT);

    mdp->cur_tx++;

    if (!(ctrl_inl(ndev->base_addr + EDTRR) & EDTRR_TRNS))
        ctrl_outl(EDTRR_TRNS, ndev->base_addr + EDTRR);

    ndev->trans_start = jiffies;

    return 0;
}
Example #4
0
/* Packet receive function */
static int sh_eth_rx(struct net_device *ndev)
{
    struct sh_eth_private *mdp = netdev_priv(ndev);
    struct sh_eth_rxdesc *rxdesc;

    int entry = mdp->cur_rx % RX_RING_SIZE;
    int boguscnt = (mdp->dirty_rx + RX_RING_SIZE) - mdp->cur_rx;
    struct sk_buff *skb;
    u16 pkt_len = 0;
    u32 desc_status, reserve = 0;

    rxdesc = &mdp->rx_ring[entry];
    while (!(rxdesc->status & cpu_to_edmac(mdp, RD_RACT))) {
        desc_status = edmac_to_cpu(mdp, rxdesc->status);
        pkt_len = rxdesc->frame_length;

        if (--boguscnt < 0)
            break;

        if (!(desc_status & RDFEND))
            mdp->stats.rx_length_errors++;

        if (desc_status & (RD_RFS1 | RD_RFS2 | RD_RFS3 | RD_RFS4 |
                           RD_RFS5 | RD_RFS6 | RD_RFS10)) {
            mdp->stats.rx_errors++;
            if (desc_status & RD_RFS1)
                mdp->stats.rx_crc_errors++;
            if (desc_status & RD_RFS2)
                mdp->stats.rx_frame_errors++;
            if (desc_status & RD_RFS3)
                mdp->stats.rx_length_errors++;
            if (desc_status & RD_RFS4)
                mdp->stats.rx_length_errors++;
            if (desc_status & RD_RFS6)
                mdp->stats.rx_missed_errors++;
            if (desc_status & RD_RFS10)
                mdp->stats.rx_over_errors++;
        } else {
            swaps((char *)(rxdesc->addr & ~0x3), pkt_len + 2);
            skb = mdp->rx_skbuff[entry];
            mdp->rx_skbuff[entry] = NULL;
            skb_put(skb, pkt_len);
            skb->protocol = eth_type_trans(skb, ndev);
            netif_rx(skb);
            mdp->stats.rx_packets++;
            mdp->stats.rx_bytes += pkt_len;
        }
        rxdesc->status |= cpu_to_edmac(mdp, RD_RACT);
        entry = (++mdp->cur_rx) % RX_RING_SIZE;
    }

    /* Refill the Rx ring buffers. */
    for (; mdp->cur_rx - mdp->dirty_rx > 0; mdp->dirty_rx++) {
        entry = mdp->dirty_rx % RX_RING_SIZE;
        rxdesc = &mdp->rx_ring[entry];
        /* The size of the buffer is 16 byte boundary. */
        rxdesc->buffer_length = (mdp->rx_buf_sz + 16) & ~0x0F;

        if (mdp->rx_skbuff[entry] == NULL) {
            skb = dev_alloc_skb(mdp->rx_buf_sz);
            mdp->rx_skbuff[entry] = skb;
            if (skb == NULL)
                break;	/* Better luck next round. */
            skb->dev = ndev;
#if defined(CONFIG_CPU_SUBTYPE_SH7763)
            reserve = SH7763_SKB_ALIGN
                      - ((uint32_t)skb->data & (SH7763_SKB_ALIGN-1));
            if (reserve)
                skb_reserve(skb, reserve);
#else
            skb_reserve(skb, RX_OFFSET);
#endif
            skb->ip_summed = CHECKSUM_NONE;
            rxdesc->addr = (u32)skb->data & ~0x3UL;
        }
        if (entry >= RX_RING_SIZE - 1)
            rxdesc->status |=
                cpu_to_edmac(mdp, RD_RACT | RD_RFP | RD_RDEL);
        else
            rxdesc->status |=
                cpu_to_edmac(mdp, RD_RACT | RD_RFP);
    }

    /* Restart Rx engine if stopped. */
    /* If we don't need to check status, don't. -KDU */
    if (!(ctrl_inl(ndev->base_addr + EDRRR) & EDRRR_R))
        ctrl_outl(EDRRR_R, ndev->base_addr + EDRRR);

    return 0;
}
Example #5
0
/* format skb and descriptor buffer */
static void sh_eth_ring_format(struct net_device *ndev)
{
    u32 ioaddr = ndev->base_addr, reserve = 0;
    struct sh_eth_private *mdp = netdev_priv(ndev);
    int i;
    struct sk_buff *skb;
    struct sh_eth_rxdesc *rxdesc = NULL;
    struct sh_eth_txdesc *txdesc = NULL;
    int rx_ringsize = sizeof(*rxdesc) * RX_RING_SIZE;
    int tx_ringsize = sizeof(*txdesc) * TX_RING_SIZE;

    mdp->cur_rx = mdp->cur_tx = 0;
    mdp->dirty_rx = mdp->dirty_tx = 0;

    memset(mdp->rx_ring, 0, rx_ringsize);

    /* build Rx ring buffer */
    for (i = 0; i < RX_RING_SIZE; i++) {
        /* skb */
        mdp->rx_skbuff[i] = NULL;
        skb = dev_alloc_skb(mdp->rx_buf_sz);
        mdp->rx_skbuff[i] = skb;
        if (skb == NULL)
            break;
        skb->dev = ndev; /* Mark as being used by this device. */
#if defined(CONFIG_CPU_SUBTYPE_SH7763)
        reserve = SH7763_SKB_ALIGN
                  - ((uint32_t)skb->data & (SH7763_SKB_ALIGN-1));
        if (reserve)
            skb_reserve(skb, reserve);
#else
        skb_reserve(skb, RX_OFFSET);
#endif
        /* RX descriptor */
        rxdesc = &mdp->rx_ring[i];
        rxdesc->addr = (u32)skb->data & ~0x3UL;
        rxdesc->status = cpu_to_edmac(mdp, RD_RACT | RD_RFP);

        /* The size of the buffer is 16 byte boundary. */
        rxdesc->buffer_length = (mdp->rx_buf_sz + 16) & ~0x0F;
        /* Rx descriptor address set */
        if (i == 0) {
            ctrl_outl((u32)rxdesc, ioaddr + RDLAR);
#if defined(CONFIG_CPU_SUBTYPE_SH7763)
            ctrl_outl((u32)rxdesc, ioaddr + RDFAR);
#endif
        }
    }

    /* Rx descriptor address set */
#if defined(CONFIG_CPU_SUBTYPE_SH7763)
    ctrl_outl((u32)rxdesc, ioaddr + RDFXR);
    ctrl_outl(0x1, ioaddr + RDFFR);
#endif

    mdp->dirty_rx = (u32) (i - RX_RING_SIZE);

    /* Mark the last entry as wrapping the ring. */
    rxdesc->status |= cpu_to_edmac(mdp, RD_RDEL);

    memset(mdp->tx_ring, 0, tx_ringsize);

    /* build Tx ring buffer */
    for (i = 0; i < TX_RING_SIZE; i++) {
        mdp->tx_skbuff[i] = NULL;
        txdesc = &mdp->tx_ring[i];
        txdesc->status = cpu_to_edmac(mdp, TD_TFP);
        txdesc->buffer_length = 0;
        if (i == 0) {
            /* Tx descriptor address set */
            ctrl_outl((u32)txdesc, ioaddr + TDLAR);
#if defined(CONFIG_CPU_SUBTYPE_SH7763)
            ctrl_outl((u32)txdesc, ioaddr + TDFAR);
#endif
        }
    }

    /* Tx descriptor address set */
#if defined(CONFIG_CPU_SUBTYPE_SH7763)
    ctrl_outl((u32)txdesc, ioaddr + TDFXR);
    ctrl_outl(0x1, ioaddr + TDFFR);
#endif

    txdesc->status |= cpu_to_edmac(mdp, TD_TDLE);
}
Example #6
0
/* Packet receive function */
static int sh_eth_rx(struct net_device *ndev)
{
	struct sh_eth_private *mdp = netdev_priv(ndev);
	struct sh_eth_rxdesc *rxdesc;

	int entry = mdp->cur_rx % RX_RING_SIZE;
	int boguscnt = (mdp->dirty_rx + RX_RING_SIZE) - mdp->cur_rx;
	struct sk_buff *skb;
	u16 pkt_len = 0;
	u32 desc_status;

	rxdesc = &mdp->rx_ring[entry];
	while (!(rxdesc->status & cpu_to_edmac(mdp, RD_RACT))) {
		desc_status = edmac_to_cpu(mdp, rxdesc->status);
		pkt_len = rxdesc->frame_length;

		if (--boguscnt < 0)
			break;

		if (!(desc_status & RDFEND))
			mdp->stats.rx_length_errors++;

		if (desc_status & (RD_RFS1 | RD_RFS2 | RD_RFS3 | RD_RFS4 |
				   RD_RFS5 | RD_RFS6 | RD_RFS10)) {
			mdp->stats.rx_errors++;
			if (desc_status & RD_RFS1)
				mdp->stats.rx_crc_errors++;
			if (desc_status & RD_RFS2)
				mdp->stats.rx_frame_errors++;
			if (desc_status & RD_RFS3)
				mdp->stats.rx_length_errors++;
			if (desc_status & RD_RFS4)
				mdp->stats.rx_length_errors++;
			if (desc_status & RD_RFS6)
				mdp->stats.rx_missed_errors++;
			if (desc_status & RD_RFS10)
				mdp->stats.rx_over_errors++;
		} else {
			if (!mdp->cd->hw_swap)
				sh_eth_soft_swap(
					phys_to_virt(ALIGN(rxdesc->addr, 4)),
					pkt_len + 2);
			skb = mdp->rx_skbuff[entry];
			mdp->rx_skbuff[entry] = NULL;
			if (mdp->cd->rpadir)
				skb_reserve(skb, NET_IP_ALIGN);
			skb_put(skb, pkt_len);
			skb->protocol = eth_type_trans(skb, ndev);
			netif_rx(skb);
			mdp->stats.rx_packets++;
			mdp->stats.rx_bytes += pkt_len;
		}
		rxdesc->status |= cpu_to_edmac(mdp, RD_RACT);
		entry = (++mdp->cur_rx) % RX_RING_SIZE;
		rxdesc = &mdp->rx_ring[entry];
	}

	/* Refill the Rx ring buffers. */
	for (; mdp->cur_rx - mdp->dirty_rx > 0; mdp->dirty_rx++) {
		entry = mdp->dirty_rx % RX_RING_SIZE;
		rxdesc = &mdp->rx_ring[entry];
		/* The size of the buffer is 16 byte boundary. */
		rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);

		if (mdp->rx_skbuff[entry] == NULL) {
			skb = dev_alloc_skb(mdp->rx_buf_sz);
			mdp->rx_skbuff[entry] = skb;
			if (skb == NULL)
				break;	/* Better luck next round. */
			dma_map_single(&ndev->dev, skb->tail, mdp->rx_buf_sz,
					DMA_FROM_DEVICE);
			skb->dev = ndev;
			sh_eth_set_receive_align(skb);

			skb_checksum_none_assert(skb);
			rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4));
		}
		if (entry >= RX_RING_SIZE - 1)
			rxdesc->status |=
				cpu_to_edmac(mdp, RD_RACT | RD_RFP | RD_RDEL);
		else
			rxdesc->status |=
				cpu_to_edmac(mdp, RD_RACT | RD_RFP);
	}

	/* Restart Rx engine if stopped. */
	/* If we don't need to check status, don't. -KDU */
	if (!(sh_eth_read(ndev, EDRRR) & EDRRR_R))
		sh_eth_write(ndev, EDRRR_R, EDRRR);

	return 0;
}
Example #7
0
/* format skb and descriptor buffer */
static void sh_eth_ring_format(struct net_device *ndev)
{
	struct sh_eth_private *mdp = netdev_priv(ndev);
	int i;
	struct sk_buff *skb;
	struct sh_eth_rxdesc *rxdesc = NULL;
	struct sh_eth_txdesc *txdesc = NULL;
	int rx_ringsize = sizeof(*rxdesc) * RX_RING_SIZE;
	int tx_ringsize = sizeof(*txdesc) * TX_RING_SIZE;

	mdp->cur_rx = mdp->cur_tx = 0;
	mdp->dirty_rx = mdp->dirty_tx = 0;

	memset(mdp->rx_ring, 0, rx_ringsize);

	/* build Rx ring buffer */
	for (i = 0; i < RX_RING_SIZE; i++) {
		/* skb */
		mdp->rx_skbuff[i] = NULL;
		skb = dev_alloc_skb(mdp->rx_buf_sz);
		mdp->rx_skbuff[i] = skb;
		if (skb == NULL)
			break;
		dma_map_single(&ndev->dev, skb->tail, mdp->rx_buf_sz,
				DMA_FROM_DEVICE);
		skb->dev = ndev; /* Mark as being used by this device. */
		sh_eth_set_receive_align(skb);

		/* RX descriptor */
		rxdesc = &mdp->rx_ring[i];
		rxdesc->addr = virt_to_phys(PTR_ALIGN(skb->data, 4));
		rxdesc->status = cpu_to_edmac(mdp, RD_RACT | RD_RFP);

		/* The size of the buffer is 16 byte boundary. */
		rxdesc->buffer_length = ALIGN(mdp->rx_buf_sz, 16);
		/* Rx descriptor address set */
		if (i == 0) {
			sh_eth_write(ndev, mdp->rx_desc_dma, RDLAR);
			if (sh_eth_is_gether(mdp))
				sh_eth_write(ndev, mdp->rx_desc_dma, RDFAR);
		}
	}

	mdp->dirty_rx = (u32) (i - RX_RING_SIZE);

	/* Mark the last entry as wrapping the ring. */
	rxdesc->status |= cpu_to_edmac(mdp, RD_RDEL);

	memset(mdp->tx_ring, 0, tx_ringsize);

	/* build Tx ring buffer */
	for (i = 0; i < TX_RING_SIZE; i++) {
		mdp->tx_skbuff[i] = NULL;
		txdesc = &mdp->tx_ring[i];
		txdesc->status = cpu_to_edmac(mdp, TD_TFP);
		txdesc->buffer_length = 0;
		if (i == 0) {
			/* Tx descriptor address set */
			sh_eth_write(ndev, mdp->tx_desc_dma, TDLAR);
			if (sh_eth_is_gether(mdp))
				sh_eth_write(ndev, mdp->tx_desc_dma, TDFAR);
		}
	}

	txdesc->status |= cpu_to_edmac(mdp, TD_TDLE);
}