Example #1
0
static int
ramips_alloc_dma(struct raeth_priv *re)
{
    int err = -ENOMEM;
    int i;

    re->skb_free_idx = 0;

    /* setup tx ring */
    re->tx = dma_alloc_coherent(&re->netdev->dev,
                                NUM_TX_DESC * sizeof(struct ramips_tx_dma),
                                &re->tx_desc_dma, GFP_ATOMIC);
    if (!re->tx)
        goto err_cleanup;

    memset(re->tx, 0, NUM_TX_DESC * sizeof(struct ramips_tx_dma));
    for (i = 0; i < NUM_TX_DESC; i++) {
        re->tx[i].txd2 = TX_DMA_LSO | TX_DMA_DONE;
        re->tx[i].txd4 = TX_DMA_QN(3) | TX_DMA_PN(1);
    }

    /* setup rx ring */
    re->rx = dma_alloc_coherent(&re->netdev->dev,
                                NUM_RX_DESC * sizeof(struct ramips_rx_dma),
                                &re->rx_desc_dma, GFP_ATOMIC);
    if (!re->rx)
        goto err_cleanup;

    memset(re->rx, 0, sizeof(struct ramips_rx_dma) * NUM_RX_DESC);
    for (i = 0; i < NUM_RX_DESC; i++) {
        dma_addr_t dma_addr;
        struct sk_buff *new_skb = dev_alloc_skb(MAX_RX_LENGTH +
                                                NET_IP_ALIGN);

        if (!new_skb)
            goto err_cleanup;

        skb_reserve(new_skb, NET_IP_ALIGN);

        dma_addr = dma_map_single(&re->netdev->dev, new_skb->data,
                                  MAX_RX_LENGTH, DMA_FROM_DEVICE);
        re->rx_dma[i] = dma_addr;
        re->rx[i].rxd1 = (unsigned int) re->rx_dma[i];
        re->rx[i].rxd2 |= RX_DMA_LSO;
        re->rx_skb[i] = new_skb;
    }

    return 0;

err_cleanup:
    ramips_cleanup_dma(re);
    return err;
}
Example #2
0
static int
ramips_alloc_dma(struct net_device *dev)
{
	struct raeth_priv *priv = netdev_priv(dev);
	int err = -ENOMEM;
	int i;

	priv->skb_free_idx = 0;

	/* setup tx ring */
	priv->tx = dma_alloc_coherent(NULL,
		NUM_TX_DESC * sizeof(struct ramips_tx_dma), &priv->phy_tx, GFP_ATOMIC);
	if (!priv->tx)
		goto err_cleanup;

	for(i = 0; i < NUM_TX_DESC; i++)
	{
		memset(&priv->tx[i], 0, sizeof(struct ramips_tx_dma));
		priv->tx[i].txd2 |= TX_DMA_LSO | TX_DMA_DONE;
		priv->tx[i].txd4 &= (TX_DMA_QN_MASK | TX_DMA_PN_MASK);
		priv->tx[i].txd4 |= TX_DMA_QN(3) | TX_DMA_PN(1);
	}

	/* setup rx ring */
	priv->rx = dma_alloc_coherent(NULL,
		NUM_RX_DESC * sizeof(struct ramips_rx_dma), &priv->phy_rx, GFP_ATOMIC);
	if (!priv->rx)
		goto err_cleanup;

	memset(priv->rx, 0, sizeof(struct ramips_rx_dma) * NUM_RX_DESC);
	for(i = 0; i < NUM_RX_DESC; i++)
	{
		struct sk_buff *new_skb = dev_alloc_skb(MAX_RX_LENGTH + 2);

		if (!new_skb)
			goto err_cleanup;

		skb_reserve(new_skb, 2);
		priv->rx[i].rxd1 =
			dma_map_single(NULL, skb_put(new_skb, 2), MAX_RX_LENGTH + 2,
				DMA_FROM_DEVICE);
		priv->rx[i].rxd2 |= RX_DMA_LSO;
		priv->rx_skb[i] = new_skb;
	}

	return 0;

 err_cleanup:
	ramips_cleanup_dma(dev);
	return err;
}
Example #3
0
static int
ramips_eth_stop(struct net_device *dev)
{
	struct raeth_priv *priv = netdev_priv(dev);

	ramips_fe_wr(RAMIPS_PDMA_GLO_CFG, ramips_fe_rr(RAMIPS_PDMA_GLO_CFG) &
		~(RAMIPS_TX_WB_DDONE | RAMIPS_RX_DMA_EN | RAMIPS_TX_DMA_EN));
	free_irq(dev->irq, dev);
	netif_stop_queue(dev);
	tasklet_kill(&priv->tx_housekeeping_tasklet);
	tasklet_kill(&priv->rx_tasklet);
	ramips_cleanup_dma(dev);
	printk(KERN_DEBUG "ramips_eth: stopped\n");
	return 0;
}
Example #4
0
static int
ramips_eth_stop(struct net_device *dev)
{
	struct raeth_priv *priv = netdev_priv(dev);

	ramips_fe_wr(ramips_fe_rr(RAMIPS_PDMA_GLO_CFG) &
		     ~(RAMIPS_TX_WB_DDONE | RAMIPS_RX_DMA_EN | RAMIPS_TX_DMA_EN),
		     RAMIPS_PDMA_GLO_CFG);

	/* disable all interrupts in the hw */
	ramips_fe_wr(0, RAMIPS_FE_INT_ENABLE);

	free_irq(dev->irq, dev);
	netif_stop_queue(dev);
	tasklet_kill(&priv->tx_housekeeping_tasklet);
	tasklet_kill(&priv->rx_tasklet);
	ramips_cleanup_dma(priv);
	printk(KERN_DEBUG "ramips_eth: stopped\n");
	return 0;
}