static int bgmac_dma_init(struct bgmac *bgmac) { struct bgmac_dma_ring *ring; int i, err; for (i = 0; i < BGMAC_MAX_TX_RINGS; i++) { ring = &bgmac->tx_ring[i]; if (!ring->unaligned) bgmac_dma_tx_enable(bgmac, ring); bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGLO, lower_32_bits(ring->dma_base)); bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGHI, upper_32_bits(ring->dma_base)); if (ring->unaligned) bgmac_dma_tx_enable(bgmac, ring); ring->start = 0; ring->end = 0; /* Points the slot that should *not* be read */ } for (i = 0; i < BGMAC_MAX_RX_RINGS; i++) { int j; ring = &bgmac->rx_ring[i]; if (!ring->unaligned) bgmac_dma_rx_enable(bgmac, ring); bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGLO, lower_32_bits(ring->dma_base)); bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGHI, upper_32_bits(ring->dma_base)); if (ring->unaligned) bgmac_dma_rx_enable(bgmac, ring); ring->start = 0; ring->end = 0; for (j = 0; j < BGMAC_RX_RING_SLOTS; j++) { err = bgmac_dma_rx_skb_for_slot(bgmac, &ring->slots[j]); if (err) goto error; bgmac_dma_rx_setup_desc(bgmac, ring, j); } bgmac_dma_rx_update_index(bgmac, ring); } return 0; error: bgmac_dma_cleanup(bgmac); return err; }
/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipinit */ static void bgmac_chip_init(struct bgmac *bgmac, bool full_init) { struct bgmac_dma_ring *ring; int i; /* 1 interrupt per received frame */ bgmac_write(bgmac, BGMAC_INT_RECV_LAZY, 1 << BGMAC_IRL_FC_SHIFT); /* Enable 802.3x tx flow control (honor received PAUSE frames) */ bgmac_cmdcfg_maskset(bgmac, ~BGMAC_CMDCFG_RPI, 0, true); bgmac_set_rx_mode(bgmac->net_dev); bgmac_write_mac_address(bgmac, bgmac->net_dev->dev_addr); if (bgmac->loopback) bgmac_cmdcfg_maskset(bgmac, ~0, BGMAC_CMDCFG_ML, false); else bgmac_cmdcfg_maskset(bgmac, ~BGMAC_CMDCFG_ML, 0, false); bgmac_write(bgmac, BGMAC_RXMAX_LENGTH, 32 + ETHER_MAX_LEN); if (full_init) { bgmac_dma_init(bgmac); if (1) /* FIXME: is there any case we don't want IRQs? */ bgmac_chip_intrs_on(bgmac); } else { for (i = 0; i < BGMAC_MAX_RX_RINGS; i++) { ring = &bgmac->rx_ring[i]; bgmac_dma_rx_enable(bgmac, ring); } } bgmac_enable(bgmac); }
static void bgmac_dma_init(struct bgmac *bgmac) { struct bgmac_dma_ring *ring; int i; for (i = 0; i < BGMAC_MAX_TX_RINGS; i++) { ring = &bgmac->tx_ring[i]; if (!ring->unaligned) bgmac_dma_tx_enable(bgmac, ring); bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGLO, lower_32_bits(ring->dma_base)); bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGHI, upper_32_bits(ring->dma_base)); if (ring->unaligned) bgmac_dma_tx_enable(bgmac, ring); ring->start = 0; ring->end = 0; /* Points the slot that should *not* be read */ } for (i = 0; i < BGMAC_MAX_RX_RINGS; i++) { int j; ring = &bgmac->rx_ring[i]; if (!ring->unaligned) bgmac_dma_rx_enable(bgmac, ring); bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGLO, lower_32_bits(ring->dma_base)); bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGHI, upper_32_bits(ring->dma_base)); if (ring->unaligned) bgmac_dma_rx_enable(bgmac, ring); for (j = 0; j < ring->num_slots; j++) bgmac_dma_rx_setup_desc(bgmac, ring, j); bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_INDEX, ring->index_base + ring->num_slots * sizeof(struct bgmac_dma_desc)); ring->start = 0; ring->end = 0; } }
static void bgmac_dma_init(struct bgmac *bgmac) { struct bgmac_dma_ring *ring; struct bgmac_dma_desc *dma_desc; u32 ctl0, ctl1; int i; for (i = 0; i < BGMAC_MAX_TX_RINGS; i++) { ring = &bgmac->tx_ring[i]; /* We don't implement unaligned addressing, so enable first */ bgmac_dma_tx_enable(bgmac, ring); bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGLO, lower_32_bits(ring->dma_base)); bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_TX_RINGHI, upper_32_bits(ring->dma_base)); ring->start = 0; ring->end = 0; /* Points the slot that should *not* be read */ } for (i = 0; i < BGMAC_MAX_RX_RINGS; i++) { int j; ring = &bgmac->rx_ring[i]; /* We don't implement unaligned addressing, so enable first */ bgmac_dma_rx_enable(bgmac, ring); bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGLO, lower_32_bits(ring->dma_base)); bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_RINGHI, upper_32_bits(ring->dma_base)); for (j = 0, dma_desc = ring->cpu_base; j < ring->num_slots; j++, dma_desc++) { ctl0 = ctl1 = 0; if (j == ring->num_slots - 1) ctl0 |= BGMAC_DESC_CTL0_EOT; ctl1 |= BGMAC_RX_BUF_SIZE & BGMAC_DESC_CTL1_LEN; /* Is there any BGMAC device that requires extension? */ /* ctl1 |= (addrext << B43_DMA64_DCTL1_ADDREXT_SHIFT) & * B43_DMA64_DCTL1_ADDREXT_MASK; */ dma_desc->addr_low = cpu_to_le32(lower_32_bits(ring->slots[j].dma_addr)); dma_desc->addr_high = cpu_to_le32(upper_32_bits(ring->slots[j].dma_addr)); dma_desc->ctl0 = cpu_to_le32(ctl0); dma_desc->ctl1 = cpu_to_le32(ctl1); } bgmac_write(bgmac, ring->mmio_base + BGMAC_DMA_RX_INDEX, ring->num_slots * sizeof(struct bgmac_dma_desc)); ring->start = 0; ring->end = 0; } }