Exemplo n.º 1
0
static int
ramips_alloc_dma(struct raeth_priv *re)
{
    int err = -ENOMEM;
    int i;

    re->skb_free_idx = 0;

    /* setup tx ring */
    re->tx = dma_alloc_coherent(&re->netdev->dev,
                                NUM_TX_DESC * sizeof(struct ramips_tx_dma),
                                &re->tx_desc_dma, GFP_ATOMIC);
    if (!re->tx)
        goto err_cleanup;

    memset(re->tx, 0, NUM_TX_DESC * sizeof(struct ramips_tx_dma));
    for (i = 0; i < NUM_TX_DESC; i++) {
        re->tx[i].txd2 = TX_DMA_LSO | TX_DMA_DONE;
        re->tx[i].txd4 = TX_DMA_QN(3) | TX_DMA_PN(1);
    }

    /* setup rx ring */
    re->rx = dma_alloc_coherent(&re->netdev->dev,
                                NUM_RX_DESC * sizeof(struct ramips_rx_dma),
                                &re->rx_desc_dma, GFP_ATOMIC);
    if (!re->rx)
        goto err_cleanup;

    memset(re->rx, 0, sizeof(struct ramips_rx_dma) * NUM_RX_DESC);
    for (i = 0; i < NUM_RX_DESC; i++) {
        dma_addr_t dma_addr;
        struct sk_buff *new_skb = dev_alloc_skb(MAX_RX_LENGTH +
                                                NET_IP_ALIGN);

        if (!new_skb)
            goto err_cleanup;

        skb_reserve(new_skb, NET_IP_ALIGN);

        dma_addr = dma_map_single(&re->netdev->dev, new_skb->data,
                                  MAX_RX_LENGTH, DMA_FROM_DEVICE);
        re->rx_dma[i] = dma_addr;
        re->rx[i].rxd1 = (unsigned int) re->rx_dma[i];
        re->rx[i].rxd2 |= RX_DMA_LSO;
        re->rx_skb[i] = new_skb;
    }

    return 0;

err_cleanup:
    ramips_cleanup_dma(re);
    return err;
}
Exemplo n.º 2
0
static int
ramips_alloc_dma(struct net_device *dev)
{
	struct raeth_priv *priv = netdev_priv(dev);
	int err = -ENOMEM;
	int i;

	priv->skb_free_idx = 0;

	/* setup tx ring */
	priv->tx = dma_alloc_coherent(NULL,
		NUM_TX_DESC * sizeof(struct ramips_tx_dma), &priv->phy_tx, GFP_ATOMIC);
	if (!priv->tx)
		goto err_cleanup;

	for(i = 0; i < NUM_TX_DESC; i++)
	{
		memset(&priv->tx[i], 0, sizeof(struct ramips_tx_dma));
		priv->tx[i].txd2 |= TX_DMA_LSO | TX_DMA_DONE;
		priv->tx[i].txd4 &= (TX_DMA_QN_MASK | TX_DMA_PN_MASK);
		priv->tx[i].txd4 |= TX_DMA_QN(3) | TX_DMA_PN(1);
	}

	/* setup rx ring */
	priv->rx = dma_alloc_coherent(NULL,
		NUM_RX_DESC * sizeof(struct ramips_rx_dma), &priv->phy_rx, GFP_ATOMIC);
	if (!priv->rx)
		goto err_cleanup;

	memset(priv->rx, 0, sizeof(struct ramips_rx_dma) * NUM_RX_DESC);
	for(i = 0; i < NUM_RX_DESC; i++)
	{
		struct sk_buff *new_skb = dev_alloc_skb(MAX_RX_LENGTH + 2);

		if (!new_skb)
			goto err_cleanup;

		skb_reserve(new_skb, 2);
		priv->rx[i].rxd1 =
			dma_map_single(NULL, skb_put(new_skb, 2), MAX_RX_LENGTH + 2,
				DMA_FROM_DEVICE);
		priv->rx[i].rxd2 |= RX_DMA_LSO;
		priv->rx_skb[i] = new_skb;
	}

	return 0;

 err_cleanup:
	ramips_cleanup_dma(dev);
	return err;
}
Exemplo n.º 3
0
static void
ramips_ring_setup(struct raeth_priv *re)
{
	int len;
	int i;

	memset(re->tx_info, 0, NUM_TX_DESC * sizeof(struct raeth_tx_info));

	len = NUM_TX_DESC * sizeof(struct ramips_tx_dma);
	memset(re->tx, 0, len);

	for (i = 0; i < NUM_TX_DESC; i++) {
		struct raeth_tx_info *txi;
		struct ramips_tx_dma *txd;

		txd = &re->tx[i];
		txd->txd4 = TX_DMA_QN(3) | TX_DMA_PN(1);
		txd->txd2 = TX_DMA_LSO | TX_DMA_DONE;

		txi = &re->tx_info[i];
		txi->tx_desc = txd;
		if (txi->tx_skb != NULL) {
			netdev_warn(re->netdev,
				    "dirty skb for TX desc %d\n", i);
			txi->tx_skb = NULL;
		}
	}

	len = NUM_RX_DESC * sizeof(struct ramips_rx_dma);
	memset(re->rx, 0, len);

	for (i = 0; i < NUM_RX_DESC; i++) {
		struct raeth_rx_info *rxi;
		struct ramips_rx_dma *rxd;
		dma_addr_t dma_addr;

		rxd = &re->rx[i];
		rxi = &re->rx_info[i];
		BUG_ON(rxi->rx_skb == NULL);
		dma_addr = dma_map_single(&re->netdev->dev, rxi->rx_skb->data,
					  MAX_RX_LENGTH, DMA_FROM_DEVICE);
		rxi->rx_dma = dma_addr;
		rxi->rx_desc = rxd;

		rxd->rxd1 = (unsigned int) dma_addr;
		rxd->rxd2 = RX_DMA_LSO;
	}

	/* flush descriptors */
	wmb();
}