Exemple #1
0
/* device close function */
static int sh_eth_close(struct net_device *ndev)
{
	struct sh_eth_private *mdp = netdev_priv(ndev);
	u32 ioaddr = ndev->base_addr;
	int ringsize;

	netif_stop_queue(ndev);

	/* Disable interrupts by clearing the interrupt mask. */
	writel(0x0000, ioaddr + EESIPR);

	/* Stop the chip's Tx and Rx processes. */
	writel(0, ioaddr + EDTRR);
	writel(0, ioaddr + EDRRR);

	/* PHY Disconnect */
	if (mdp->phydev) {
		phy_stop(mdp->phydev);
		phy_disconnect(mdp->phydev);
	}

	free_irq(ndev->irq, ndev);

	del_timer_sync(&mdp->timer);

	/* Free all the skbuffs in the Rx queue. */
	sh_eth_ring_free(ndev);

	/* free DMA buffer */
	ringsize = sizeof(struct sh_eth_rxdesc) * RX_RING_SIZE;
	dma_free_coherent(NULL, ringsize, mdp->rx_ring, mdp->rx_desc_dma);

	/* free DMA buffer */
	ringsize = sizeof(struct sh_eth_txdesc) * TX_RING_SIZE;
	dma_free_coherent(NULL, ringsize, mdp->tx_ring, mdp->tx_desc_dma);

	pm_runtime_put_sync(&mdp->pdev->dev);

	return 0;
}
Exemple #2
0
/* Get skb and descriptor buffer */
static int sh_eth_ring_init(struct net_device *ndev)
{
    struct sh_eth_private *mdp = netdev_priv(ndev);
    int rx_ringsize, tx_ringsize, ret = 0;

    /*
     * +26 gets the maximum ethernet encapsulation, +7 & ~7 because the
     * card needs room to do 8 byte alignment, +2 so we can reserve
     * the first 2 bytes, and +16 gets room for the status word from the
     * card.
     */
    mdp->rx_buf_sz = (ndev->mtu <= 1492 ? PKT_BUF_SZ :
                      (((ndev->mtu + 26 + 7) & ~7) + 2 + 16));

    /* Allocate RX and TX skb rings */
    mdp->rx_skbuff = kmalloc(sizeof(*mdp->rx_skbuff) * RX_RING_SIZE,
                             GFP_KERNEL);
    if (!mdp->rx_skbuff) {
        printk(KERN_ERR "%s: Cannot allocate Rx skb\n", ndev->name);
        ret = -ENOMEM;
        return ret;
    }

    mdp->tx_skbuff = kmalloc(sizeof(*mdp->tx_skbuff) * TX_RING_SIZE,
                             GFP_KERNEL);
    if (!mdp->tx_skbuff) {
        printk(KERN_ERR "%s: Cannot allocate Tx skb\n", ndev->name);
        ret = -ENOMEM;
        goto skb_ring_free;
    }

    /* Allocate all Rx descriptors. */
    rx_ringsize = sizeof(struct sh_eth_rxdesc) * RX_RING_SIZE;
    mdp->rx_ring = dma_alloc_coherent(NULL, rx_ringsize, &mdp->rx_desc_dma,
                                      GFP_KERNEL);

    if (!mdp->rx_ring) {
        printk(KERN_ERR "%s: Cannot allocate Rx Ring (size %d bytes)\n",
               ndev->name, rx_ringsize);
        ret = -ENOMEM;
        goto desc_ring_free;
    }

    mdp->dirty_rx = 0;

    /* Allocate all Tx descriptors. */
    tx_ringsize = sizeof(struct sh_eth_txdesc) * TX_RING_SIZE;
    mdp->tx_ring = dma_alloc_coherent(NULL, tx_ringsize, &mdp->tx_desc_dma,
                                      GFP_KERNEL);
    if (!mdp->tx_ring) {
        printk(KERN_ERR "%s: Cannot allocate Tx Ring (size %d bytes)\n",
               ndev->name, tx_ringsize);
        ret = -ENOMEM;
        goto desc_ring_free;
    }
    return ret;

desc_ring_free:
    /* free DMA buffer */
    dma_free_coherent(NULL, rx_ringsize, mdp->rx_ring, mdp->rx_desc_dma);

skb_ring_free:
    /* Free Rx and Tx skb ring buffer */
    sh_eth_ring_free(ndev);

    return ret;
}