bool bcma_core_is_enabled(struct bcma_device *core) { if ((bcma_aread32(core, BCMA_IOCTL) & (BCMA_IOCTL_CLK | BCMA_IOCTL_FGC)) != BCMA_IOCTL_CLK) return false; if (bcma_aread32(core, BCMA_RESET_CTL) & BCMA_RESET_CTL_RESET) return false; return true; }
static void bcma_core_disable(struct bcma_device *core, u32 flags) { if (bcma_aread32(core, BCMA_RESET_CTL) & BCMA_RESET_CTL_RESET) return; bcma_awrite32(core, BCMA_IOCTL, flags); bcma_aread32(core, BCMA_IOCTL); udelay(10); bcma_awrite32(core, BCMA_RESET_CTL, BCMA_RESET_CTL_RESET); udelay(1); }
static u64 supported_dma_mask(struct b43_wldev *dev) { u32 tmp; u16 mmio_base; switch (dev->dev->bus_type) { #ifdef CPTCFG_B43_BCMA case B43_BUS_BCMA: tmp = bcma_aread32(dev->dev->bdev, BCMA_IOST); if (tmp & BCMA_IOST_DMA64) return DMA_BIT_MASK(64); break; #endif #ifdef CPTCFG_B43_SSB case B43_BUS_SSB: tmp = ssb_read32(dev->dev->sdev, SSB_TMSHIGH); if (tmp & SSB_TMSHIGH_DMA64) return DMA_BIT_MASK(64); break; #endif } mmio_base = b43_dmacontroller_base(0, 0); b43_write32(dev, mmio_base + B43_DMA32_TXCTL, B43_DMA32_TXADDREXT_MASK); tmp = b43_read32(dev, mmio_base + B43_DMA32_TXCTL); if (tmp & B43_DMA32_TXADDREXT_MASK) return DMA_BIT_MASK(32); return DMA_BIT_MASK(30); }
int bcma_core_enable(struct bcma_device *core, u32 flags) { bcma_core_disable(core, flags); bcma_awrite32(core, BCMA_IOCTL, (BCMA_IOCTL_CLK | BCMA_IOCTL_FGC | flags)); bcma_aread32(core, BCMA_IOCTL); bcma_awrite32(core, BCMA_RESET_CTL, 0); udelay(1); bcma_awrite32(core, BCMA_IOCTL, (BCMA_IOCTL_CLK | flags)); bcma_aread32(core, BCMA_IOCTL); udelay(1); return 0; }
u32 bcma_core_dma_translation(struct bcma_device *core) { switch (core->bus->hosttype) { case BCMA_HOSTTYPE_SOC: return 0; case BCMA_HOSTTYPE_PCI: if (bcma_aread32(core, BCMA_IOST) & BCMA_IOST_DMA64) return BCMA_DMA_TRANSLATION_DMA64_CMT; else return BCMA_DMA_TRANSLATION_DMA32_CMT; default: pr_err("DMA translation unknown for host %d\n", core->bus->hosttype); } return BCMA_DMA_TRANSLATION_NONE; }
static void bgmac_miiconfig(struct bgmac *bgmac) { struct bcma_device *core = bgmac->core; u8 imode; if (bgmac_is_bcm4707_family(bgmac)) { bcma_awrite32(core, BCMA_IOCTL, bcma_aread32(core, BCMA_IOCTL) | 0x40 | BGMAC_BCMA_IOCTL_SW_CLKEN); bgmac->mac_speed = SPEED_2500; bgmac->mac_duplex = DUPLEX_FULL; bgmac_mac_speed(bgmac); } else { imode = (bgmac_read(bgmac, BGMAC_DEV_STATUS) & BGMAC_DS_MM_MASK) >> BGMAC_DS_MM_SHIFT; if (imode == 0 || imode == 1) { bgmac->mac_speed = SPEED_100; bgmac->mac_duplex = DUPLEX_FULL; bgmac_mac_speed(bgmac); } } }
static void bgmac_miiconfig(struct bgmac *bgmac) { struct bcma_device *core = bgmac->core; struct bcma_chipinfo *ci = &core->bus->chipinfo; u8 imode; if (ci->id == BCMA_CHIP_ID_BCM4707 || ci->id == BCMA_CHIP_ID_BCM53018) { bcma_awrite32(core, BCMA_IOCTL, bcma_aread32(core, BCMA_IOCTL) | 0x40 | BGMAC_BCMA_IOCTL_SW_CLKEN); bgmac->mac_speed = SPEED_2500; bgmac->mac_duplex = DUPLEX_FULL; bgmac_mac_speed(bgmac); } else { imode = (bgmac_read(bgmac, BGMAC_DEV_STATUS) & BGMAC_DS_MM_MASK) >> BGMAC_DS_MM_SHIFT; if (imode == 0 || imode == 1) { bgmac->mac_speed = SPEED_100; bgmac->mac_duplex = DUPLEX_FULL; bgmac_mac_speed(bgmac); } } }
static int bgmac_dma_alloc(struct bgmac *bgmac) { struct device *dma_dev = bgmac->core->dma_dev; struct bgmac_dma_ring *ring; static const u16 ring_base[] = { BGMAC_DMA_BASE0, BGMAC_DMA_BASE1, BGMAC_DMA_BASE2, BGMAC_DMA_BASE3, }; int size; /* ring size: different for Tx and Rx */ int err; int i; BUILD_BUG_ON(BGMAC_MAX_TX_RINGS > ARRAY_SIZE(ring_base)); BUILD_BUG_ON(BGMAC_MAX_RX_RINGS > ARRAY_SIZE(ring_base)); if (!(bcma_aread32(bgmac->core, BCMA_IOST) & BCMA_IOST_DMA64)) { bgmac_err(bgmac, "Core does not report 64-bit DMA\n"); return -ENOTSUPP; } for (i = 0; i < BGMAC_MAX_TX_RINGS; i++) { ring = &bgmac->tx_ring[i]; ring->mmio_base = ring_base[i]; /* Alloc ring of descriptors */ size = BGMAC_TX_RING_SLOTS * sizeof(struct bgmac_dma_desc); ring->cpu_base = dma_zalloc_coherent(dma_dev, size, &ring->dma_base, GFP_KERNEL); if (!ring->cpu_base) { bgmac_err(bgmac, "Allocation of TX ring 0x%X failed\n", ring->mmio_base); goto err_dma_free; } ring->unaligned = bgmac_dma_unaligned(bgmac, ring, BGMAC_DMA_RING_TX); if (ring->unaligned) ring->index_base = lower_32_bits(ring->dma_base); else ring->index_base = 0; /* No need to alloc TX slots yet */ } for (i = 0; i < BGMAC_MAX_RX_RINGS; i++) { ring = &bgmac->rx_ring[i]; ring->mmio_base = ring_base[i]; /* Alloc ring of descriptors */ size = BGMAC_RX_RING_SLOTS * sizeof(struct bgmac_dma_desc); ring->cpu_base = dma_zalloc_coherent(dma_dev, size, &ring->dma_base, GFP_KERNEL); if (!ring->cpu_base) { bgmac_err(bgmac, "Allocation of RX ring 0x%X failed\n", ring->mmio_base); err = -ENOMEM; goto err_dma_free; } ring->unaligned = bgmac_dma_unaligned(bgmac, ring, BGMAC_DMA_RING_RX); if (ring->unaligned) ring->index_base = lower_32_bits(ring->dma_base); else ring->index_base = 0; } return 0; err_dma_free: bgmac_dma_free(bgmac); return -ENOMEM; }
/* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/chipreset */ static void bgmac_chip_reset(struct bgmac *bgmac) { struct bcma_device *core = bgmac->core; struct bcma_bus *bus = core->bus; struct bcma_chipinfo *ci = &bus->chipinfo; u32 flags; u32 iost; int i; if (bcma_core_is_enabled(core)) { if (!bgmac->stats_grabbed) { /* bgmac_chip_stats_update(bgmac); */ bgmac->stats_grabbed = true; } for (i = 0; i < BGMAC_MAX_TX_RINGS; i++) bgmac_dma_tx_reset(bgmac, &bgmac->tx_ring[i]); bgmac_cmdcfg_maskset(bgmac, ~0, BGMAC_CMDCFG_ML, false); udelay(1); for (i = 0; i < BGMAC_MAX_RX_RINGS; i++) bgmac_dma_rx_reset(bgmac, &bgmac->rx_ring[i]); /* TODO: Clear software multicast filter list */ } iost = bcma_aread32(core, BCMA_IOST); if ((ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg == BCMA_PKG_ID_BCM47186) || (ci->id == BCMA_CHIP_ID_BCM4749 && ci->pkg == 10) || (ci->id == BCMA_CHIP_ID_BCM53572 && ci->pkg == BCMA_PKG_ID_BCM47188)) iost &= ~BGMAC_BCMA_IOST_ATTACHED; /* 3GMAC: for BCM4707 & BCM47094, only do core reset at bgmac_probe() */ if (ci->id != BCMA_CHIP_ID_BCM4707 && ci->id != BCMA_CHIP_ID_BCM47094) { flags = 0; if (iost & BGMAC_BCMA_IOST_ATTACHED) { flags = BGMAC_BCMA_IOCTL_SW_CLKEN; if (!bgmac->has_robosw) flags |= BGMAC_BCMA_IOCTL_SW_RESET; } bcma_core_enable(core, flags); } /* Request Misc PLL for corerev > 2 */ if (core->id.rev > 2 && !bgmac_is_bcm4707_family(bgmac)) { bgmac_set(bgmac, BCMA_CLKCTLST, BGMAC_BCMA_CLKCTLST_MISC_PLL_REQ); bgmac_wait_value(bgmac->core, BCMA_CLKCTLST, BGMAC_BCMA_CLKCTLST_MISC_PLL_ST, BGMAC_BCMA_CLKCTLST_MISC_PLL_ST, 1000); } if (ci->id == BCMA_CHIP_ID_BCM5357 || ci->id == BCMA_CHIP_ID_BCM4749 || ci->id == BCMA_CHIP_ID_BCM53572) { struct bcma_drv_cc *cc = &bgmac->core->bus->drv_cc; u8 et_swtype = 0; u8 sw_type = BGMAC_CHIPCTL_1_SW_TYPE_EPHY | BGMAC_CHIPCTL_1_IF_TYPE_MII; char buf[4]; if (bcm47xx_nvram_getenv("et_swtype", buf, sizeof(buf)) > 0) { if (kstrtou8(buf, 0, &et_swtype)) bgmac_err(bgmac, "Failed to parse et_swtype (%s)\n", buf); et_swtype &= 0x0f; et_swtype <<= 4; sw_type = et_swtype; } else if (ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg == BCMA_PKG_ID_BCM5358) { sw_type = BGMAC_CHIPCTL_1_SW_TYPE_EPHYRMII; } else if ((ci->id == BCMA_CHIP_ID_BCM5357 && ci->pkg == BCMA_PKG_ID_BCM47186) || (ci->id == BCMA_CHIP_ID_BCM4749 && ci->pkg == 10) || (ci->id == BCMA_CHIP_ID_BCM53572 && ci->pkg == BCMA_PKG_ID_BCM47188)) { sw_type = BGMAC_CHIPCTL_1_IF_TYPE_RGMII | BGMAC_CHIPCTL_1_SW_TYPE_RGMII; } bcma_chipco_chipctl_maskset(cc, 1, ~(BGMAC_CHIPCTL_1_IF_TYPE_MASK | BGMAC_CHIPCTL_1_SW_TYPE_MASK), sw_type); } if (iost & BGMAC_BCMA_IOST_ATTACHED && !bgmac->has_robosw) bcma_awrite32(core, BCMA_IOCTL, bcma_aread32(core, BCMA_IOCTL) & ~BGMAC_BCMA_IOCTL_SW_RESET); /* http://bcm-v4.sipsolutions.net/mac-gbit/gmac/gmac_reset * Specs don't say about using BGMAC_CMDCFG_SR, but in this routine * BGMAC_CMDCFG is read _after_ putting chip in a reset. So it has to * be keps until taking MAC out of the reset. */ bgmac_cmdcfg_maskset(bgmac, ~(BGMAC_CMDCFG_TE | BGMAC_CMDCFG_RE | BGMAC_CMDCFG_RPI | BGMAC_CMDCFG_TAI | BGMAC_CMDCFG_HD | BGMAC_CMDCFG_ML | BGMAC_CMDCFG_CFE | BGMAC_CMDCFG_RL | BGMAC_CMDCFG_RED | BGMAC_CMDCFG_PE | BGMAC_CMDCFG_TPI | BGMAC_CMDCFG_PAD_EN | BGMAC_CMDCFG_PF), BGMAC_CMDCFG_PROM | BGMAC_CMDCFG_NLC | BGMAC_CMDCFG_CFE | BGMAC_CMDCFG_SR(core->id.rev), false); bgmac->mac_speed = SPEED_UNKNOWN; bgmac->mac_duplex = DUPLEX_UNKNOWN; bgmac_clear_mib(bgmac); if (core->id.id == BCMA_CORE_4706_MAC_GBIT) bcma_maskset32(bgmac->cmn, BCMA_GMAC_CMN_PHY_CTL, ~0, BCMA_GMAC_CMN_PC_MTE); else bgmac_set(bgmac, BGMAC_PHY_CNTL, BGMAC_PC_MTE); bgmac_miiconfig(bgmac); bgmac_phy_init(bgmac); netdev_reset_queue(bgmac->net_dev); }
static int bgmac_dma_alloc(struct bgmac *bgmac) { struct device *dma_dev = bgmac->core->dma_dev; struct bgmac_dma_ring *ring; static const u16 ring_base[] = { BGMAC_DMA_BASE0, BGMAC_DMA_BASE1, BGMAC_DMA_BASE2, BGMAC_DMA_BASE3, }; int size; /* ring size: different for Tx and Rx */ int err; int i; BUILD_BUG_ON(BGMAC_MAX_TX_RINGS > ARRAY_SIZE(ring_base)); BUILD_BUG_ON(BGMAC_MAX_RX_RINGS > ARRAY_SIZE(ring_base)); if (!(bcma_aread32(bgmac->core, BCMA_IOST) & BCMA_IOST_DMA64)) { bgmac_err(bgmac, "Core does not report 64-bit DMA\n"); return -ENOTSUPP; } for (i = 0; i < BGMAC_MAX_TX_RINGS; i++) { ring = &bgmac->tx_ring[i]; ring->num_slots = BGMAC_TX_RING_SLOTS; ring->mmio_base = ring_base[i]; /* Alloc ring of descriptors */ size = ring->num_slots * sizeof(struct bgmac_dma_desc); ring->cpu_base = dma_zalloc_coherent(dma_dev, size, &ring->dma_base, GFP_KERNEL); if (!ring->cpu_base) { bgmac_err(bgmac, "Allocation of TX ring 0x%X failed\n", ring->mmio_base); goto err_dma_free; } if (ring->dma_base & 0xC0000000) bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n"); ring->unaligned = bgmac_dma_unaligned(bgmac, ring, BGMAC_DMA_RING_TX); if (ring->unaligned) ring->index_base = lower_32_bits(ring->dma_base); else ring->index_base = 0; /* No need to alloc TX slots yet */ } for (i = 0; i < BGMAC_MAX_RX_RINGS; i++) { int j; ring = &bgmac->rx_ring[i]; ring->num_slots = BGMAC_RX_RING_SLOTS; ring->mmio_base = ring_base[i]; /* Alloc ring of descriptors */ size = ring->num_slots * sizeof(struct bgmac_dma_desc); ring->cpu_base = dma_zalloc_coherent(dma_dev, size, &ring->dma_base, GFP_KERNEL); if (!ring->cpu_base) { bgmac_err(bgmac, "Allocation of RX ring 0x%X failed\n", ring->mmio_base); err = -ENOMEM; goto err_dma_free; } if (ring->dma_base & 0xC0000000) bgmac_warn(bgmac, "DMA address using 0xC0000000 bit(s), it may need translation trick\n"); ring->unaligned = bgmac_dma_unaligned(bgmac, ring, BGMAC_DMA_RING_RX); if (ring->unaligned) ring->index_base = lower_32_bits(ring->dma_base); else ring->index_base = 0; /* Alloc RX slots */ for (j = 0; j < ring->num_slots; j++) { err = bgmac_dma_rx_skb_for_slot(bgmac, &ring->slots[j]); if (err) { bgmac_err(bgmac, "Can't allocate skb for slot in RX ring\n"); goto err_dma_free; } } } return 0; err_dma_free: bgmac_dma_free(bgmac); return -ENOMEM; }
static u32 bcma_bgmac_idm_read(struct bgmac *bgmac, u16 offset) { return bcma_aread32(bgmac->bcma.core, offset); }