static void __b44_set_rx_mode(struct net_device *dev) { struct b44 *bp = netdev_priv(dev); u32 val; int i=0; unsigned char zero[6] = {0,0,0,0,0,0}; val = br32(bp, B44_RXCONFIG); val &= ~(RXCONFIG_PROMISC | RXCONFIG_ALLMULTI); if (dev->flags & IFF_PROMISC) { val |= RXCONFIG_PROMISC; bw32(bp, B44_RXCONFIG, val); } else { __b44_set_mac_addr(bp); if (dev->flags & IFF_ALLMULTI) val |= RXCONFIG_ALLMULTI; else i=__b44_load_mcast(bp, dev); for(;i<64;i++) { __b44_cam_write(bp, zero, i); } bw32(bp, B44_RXCONFIG, val); val = br32(bp, B44_CAM_CTRL); bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE); } }
static void ssb_core_reset(struct b44_private *bp) { u32 val; const u32 mask = (SBTMSLOW_CLOCK | SBTMSLOW_FGC | SBTMSLOW_RESET); ssb_core_disable(bp); bw32(bp, B44_SBTMSLOW, mask); bflush(bp, B44_SBTMSLOW, 1); /* Clear SERR if set, this is a hw bug workaround. */ if (br32(bp, B44_SBTMSHIGH) & SBTMSHIGH_SERR) bw32(bp, B44_SBTMSHIGH, 0); val = br32(bp, B44_SBIMSTATE); if (val & (SBIMSTATE_BAD)) { bw32(bp, B44_SBIMSTATE, val & ~SBIMSTATE_BAD); } bw32(bp, B44_SBTMSLOW, (SBTMSLOW_CLOCK | SBTMSLOW_FGC)); bflush(bp, B44_SBTMSLOW, 1); bw32(bp, B44_SBTMSLOW, (SBTMSLOW_CLOCK)); bflush(bp, B44_SBTMSLOW, 1); }
static void ssb_core_reset(struct b44 *bp) { u32 val; ssb_core_disable(bp); bw32(bp, B44_SBTMSLOW, (SBTMSLOW_RESET | SBTMSLOW_CLOCK | SBTMSLOW_FGC)); br32(bp, B44_SBTMSLOW); udelay(1); /* Clear SERR if set, this is a hw bug workaround. */ if (br32(bp, B44_SBTMSHIGH) & SBTMSHIGH_SERR) bw32(bp, B44_SBTMSHIGH, 0); val = br32(bp, B44_SBIMSTATE); if (val & (SBIMSTATE_IBE | SBIMSTATE_TO)) bw32(bp, B44_SBIMSTATE, val & ~(SBIMSTATE_IBE | SBIMSTATE_TO)); bw32(bp, B44_SBTMSLOW, (SBTMSLOW_CLOCK | SBTMSLOW_FGC)); br32(bp, B44_SBTMSLOW); udelay(1); bw32(bp, B44_SBTMSLOW, (SBTMSLOW_CLOCK)); br32(bp, B44_SBTMSLOW); udelay(1); }
/* bp->lock is held. */ static void b44_clear_stats(struct b44 *bp) { unsigned long reg; bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ); for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL) br32(bp, reg); for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL) br32(bp, reg); }
static void b44_stats_update(struct b44 *bp) { unsigned long reg; u32 *val; val = &bp->hw_stats.tx_good_octets; for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL) { *val++ += br32(bp, reg); } val = &bp->hw_stats.rx_good_octets; for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL) { *val++ += br32(bp, reg); } }
static void ssb_core_disable(struct b44 *bp) { if (br32(bp, B44_SBTMSLOW) & SBTMSLOW_RESET) return; bw32(bp, B44_SBTMSLOW, (SBTMSLOW_REJECT | SBTMSLOW_CLOCK)); b44_wait_bit(bp, B44_SBTMSLOW, SBTMSLOW_REJECT, 100000, 0); b44_wait_bit(bp, B44_SBTMSHIGH, SBTMSHIGH_BUSY, 100000, 1); bw32(bp, B44_SBTMSLOW, (SBTMSLOW_FGC | SBTMSLOW_CLOCK | SBTMSLOW_REJECT | SBTMSLOW_RESET)); br32(bp, B44_SBTMSLOW); udelay(1); bw32(bp, B44_SBTMSLOW, (SBTMSLOW_REJECT | SBTMSLOW_RESET)); br32(bp, B44_SBTMSLOW); udelay(1); }
static void b44_init_hw(struct b44 *bp) { u32 val; b44_chip_reset(bp); b44_phy_reset(bp); b44_setup_phy(bp); /* Enable CRC32, set proper LED modes and power on PHY */ bw32(bp, B44_MAC_CTRL, MAC_CTRL_CRC32_ENAB | MAC_CTRL_PHY_LEDCTRL); bw32(bp, B44_RCV_LAZY, (1 << RCV_LAZY_FC_SHIFT)); /* This sets the MAC address too. */ __b44_set_rx_mode(bp->dev); /* MTU + eth header + possible VLAN tag + struct rx_header */ bw32(bp, B44_RXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN); bw32(bp, B44_TXMAXLEN, bp->dev->mtu + ETH_HLEN + 8 + RX_HEADER_LEN); bw32(bp, B44_TX_WMARK, 56); /* XXX magic */ bw32(bp, B44_DMATX_CTRL, DMATX_CTRL_ENABLE); bw32(bp, B44_DMATX_ADDR, bp->tx_ring_dma + bp->dma_offset); bw32(bp, B44_DMARX_CTRL, (DMARX_CTRL_ENABLE | (bp->rx_offset << DMARX_CTRL_ROSHIFT))); bw32(bp, B44_DMARX_ADDR, bp->rx_ring_dma + bp->dma_offset); bw32(bp, B44_DMARX_PTR, bp->rx_pending); bp->rx_prod = bp->rx_pending; bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ); val = br32(bp, B44_ENET_CTRL); bw32(bp, B44_ENET_CTRL, (val | ENET_CTRL_ENABLE)); }
static void b44_tx(struct b44 *bp) { u32 cur, cons; cur = br32(bp, B44_DMATX_STAT) & DMATX_STAT_CDMASK; cur /= sizeof(struct dma_desc); /* XXX needs updating when NETIF_F_SG is supported */ for (cons = bp->tx_cons; cons != cur; cons = NEXT_TX(cons)) { struct ring_info *rp = &bp->tx_buffers[cons]; struct sk_buff *skb = rp->skb; if (unlikely(skb == NULL)) BUG(); pci_unmap_single(bp->pdev, pci_unmap_addr(rp, mapping), skb->len, PCI_DMA_TODEVICE); rp->skb = NULL; dev_kfree_skb_irq(skb); } bp->tx_cons = cons; if (netif_queue_stopped(bp->dev) && TX_BUFFS_AVAIL(bp) > B44_TX_WAKEUP_THRESH) netif_wake_queue(bp->dev); bw32(bp, B44_GPTIMER, 0); }
/** Poll for completed and received packets * * @v netdev Network device */ static void b44_poll(struct net_device *netdev) { struct b44_private *bp = netdev_priv(netdev); u32 istat; /* Interrupt status */ istat = br32(bp, B44_ISTAT); istat &= IMASK_DEF; /* only the events we care about */ if (!istat) return; if (istat & ISTAT_TX) b44_tx_complete(bp); if (istat & ISTAT_RX) b44_process_rx_packets(bp); if (istat & ISTAT_ERRORS) { DBG("b44 error istat=0x%08x\n", istat); /* Reset B44 core partially to avoid long waits */ b44_irq(bp->netdev, 0); b44_halt(bp); b44_init_tx_ring(bp); b44_init_rx_ring(bp); b44_init_hw(bp, B44_FULL_RESET_SKIP_PHY); } /* Acknowledge interrupt */ bw32(bp, B44_ISTAT, 0); bflush(bp, B44_ISTAT, 1); }
static void b44_disable_ints(struct b44 *bp) { __b44_disable_ints(bp); /* Flush posted writes. */ br32(bp, B44_IMASK); }
/* * Chip reset provides power to the b44 MAC & PCI cores, which * is necessary for MAC register access. We only do a partial * reset in case of transmit/receive errors (ISTAT_ERRORS) to * avoid the chip being hung for an unnecessary long time in * this case. * * Called-by: b44_close, b44_halt, b44_inithw(b44_open), b44_probe */ static void b44_chip_reset(struct b44_private *bp, int reset_kind) { if (ssb_is_core_up(bp)) { bw32(bp, B44_RCV_LAZY, 0); bw32(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE); b44_wait_bit(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE, 200, 1); bw32(bp, B44_DMATX_CTRL, 0); bp->tx_dirty = bp->tx_cur = 0; if (br32(bp, B44_DMARX_STAT) & DMARX_STAT_EMASK) b44_wait_bit(bp, B44_DMARX_STAT, DMARX_STAT_SIDLE, 100, 0); bw32(bp, B44_DMARX_CTRL, 0); bp->rx_cur = 0; } else { ssb_pci_setup(bp, SBINTVEC_ENET0); } ssb_core_reset(bp); /* Don't enable PHY if we are only doing a partial reset. */ if (reset_kind == B44_CHIP_RESET_PARTIAL) return; /* Make PHY accessible. */ bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE | (0x0d & MDIO_CTRL_MAXF_MASK))); bflush(bp, B44_MDIO_CTRL, 1); /* Enable internal or external PHY */ if (!(br32(bp, B44_DEVCTRL) & DEVCTRL_IPP)) { bw32(bp, B44_ENET_CTRL, ENET_CTRL_EPSEL); bflush(bp, B44_ENET_CTRL, 1); } else { u32 val = br32(bp, B44_DEVCTRL); if (val & DEVCTRL_EPR) { bw32(bp, B44_DEVCTRL, (val & ~DEVCTRL_EPR)); bflush(bp, B44_DEVCTRL, 100); } } }
static void b44_set_mac_addr(struct b44_private *bp) { u32 val; bw32(bp, B44_CAM_CTRL, 0); b44_cam_write(bp, bp->netdev->ll_addr, 0); val = br32(bp, B44_CAM_CTRL); bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE); }
/** * Ring cells waiting to be processed are between 'rx_cur' and 'pending' * indexes in the ring. */ static u32 pending_rx_index(struct b44_private *bp) { u32 pending = br32(bp, B44_DMARX_STAT); pending &= DMARX_STAT_CDMASK; pending /= sizeof(struct dma_desc); return pending & (B44_RING_SIZE - 1); }
static void b44_set_rx_mode(struct net_device *netdev) { struct b44_private *bp = netdev_priv(netdev); unsigned char zero[6] = { 0, 0, 0, 0, 0, 0 }; u32 val; int i; val = br32(bp, B44_RXCONFIG); val &= ~RXCONFIG_PROMISC; val |= RXCONFIG_ALLMULTI; b44_set_mac_addr(bp); for (i = 1; i < 64; i++) b44_cam_write(bp, zero, i); bw32(bp, B44_RXCONFIG, val); val = br32(bp, B44_CAM_CTRL); bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE); }
/* bp->lock is held. */ static void __b44_set_mac_addr(struct b44 *bp) { bw32(bp, B44_CAM_CTRL, 0); if (!(bp->dev->flags & IFF_PROMISC)) { u32 val; __b44_cam_write(bp, bp->dev->dev_addr, 0); val = br32(bp, B44_CAM_CTRL); bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE); } }
static u32 ssb_pci_setup(struct b44_private *bp, u32 cores) { u32 bar_orig, pci_rev, val; pci_read_config_dword(bp->pci, SSB_BAR0_WIN, &bar_orig); pci_write_config_dword(bp->pci, SSB_BAR0_WIN, BCM4400_PCI_CORE_ADDR); pci_rev = ssb_get_core_rev(bp); val = br32(bp, B44_SBINTVEC); val |= cores; bw32(bp, B44_SBINTVEC, val); val = br32(bp, SSB_PCI_TRANS_2); val |= SSB_PCI_PREF | SSB_PCI_BURST; bw32(bp, SSB_PCI_TRANS_2, val); pci_write_config_dword(bp->pci, SSB_BAR0_WIN, bar_orig); return pci_rev; }
static int b44_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct b44 *bp = netdev_priv(dev); dma_addr_t mapping; u32 len, entry, ctrl; len = skb->len; spin_lock_irq(&bp->lock); /* This is a hard error, log it. */ if (unlikely(TX_BUFFS_AVAIL(bp) < 1)) { netif_stop_queue(dev); spin_unlock_irq(&bp->lock); printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n", dev->name); return 1; } entry = bp->tx_prod; mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE); bp->tx_buffers[entry].skb = skb; pci_unmap_addr_set(&bp->tx_buffers[entry], mapping, mapping); ctrl = (len & DESC_CTRL_LEN); ctrl |= DESC_CTRL_IOC | DESC_CTRL_SOF | DESC_CTRL_EOF; if (entry == (B44_TX_RING_SIZE - 1)) ctrl |= DESC_CTRL_EOT; bp->tx_ring[entry].ctrl = cpu_to_le32(ctrl); bp->tx_ring[entry].addr = cpu_to_le32((u32) mapping+bp->dma_offset); entry = NEXT_TX(entry); bp->tx_prod = entry; wmb(); bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc)); if (bp->flags & B44_FLAG_BUGGY_TXPTR) bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc)); if (bp->flags & B44_FLAG_REORDER_BUG) br32(bp, B44_DMATX_PTR); if (TX_BUFFS_AVAIL(bp) < 1) netif_stop_queue(dev); spin_unlock_irq(&bp->lock); dev->trans_start = jiffies; return 0; }
static u32 ssb_pci_setup(struct b44 *bp, u32 cores) { u32 bar_orig, pci_rev, val; pci_read_config_dword(bp->pdev, SSB_BAR0_WIN, &bar_orig); pci_write_config_dword(bp->pdev, SSB_BAR0_WIN, ssb_get_addr(bp, SBID_REG_PCI, 0)); pci_rev = ssb_get_core_rev(bp); val = br32(bp, B44_SBINTVEC); val |= cores; bw32(bp, B44_SBINTVEC, val); val = br32(bp, SSB_PCI_TRANS_2); val |= SSB_PCI_PREF | SSB_PCI_BURST; bw32(bp, SSB_PCI_TRANS_2, val); pci_write_config_dword(bp->pdev, SSB_BAR0_WIN, bar_orig); return pci_rev; }
static void __b44_set_flow_ctrl(struct b44 *bp, u32 pause_flags) { u32 val; bp->flags &= ~(B44_FLAG_TX_PAUSE | B44_FLAG_RX_PAUSE); bp->flags |= pause_flags; val = br32(bp, B44_RXCONFIG); if (pause_flags & B44_FLAG_RX_PAUSE) val |= RXCONFIG_FLOW; else val &= ~RXCONFIG_FLOW; bw32(bp, B44_RXCONFIG, val); val = br32(bp, B44_MAC_FLOW); if (pause_flags & B44_FLAG_TX_PAUSE) val |= (MAC_FLOW_PAUSE_ENAB | (0xc0 & MAC_FLOW_RX_HI_WATER)); else val &= ~MAC_FLOW_PAUSE_ENAB; bw32(bp, B44_MAC_FLOW, val); }
static irqreturn_t b44_interrupt(int irq, void *dev_id, struct pt_regs *regs) { struct net_device *dev = dev_id; struct b44 *bp = netdev_priv(dev); unsigned long flags; u32 istat, imask; int handled = 0; spin_lock_irqsave(&bp->lock, flags); istat = br32(bp, B44_ISTAT); imask = br32(bp, B44_IMASK); /* ??? What the f**k is the purpose of the interrupt mask * ??? register if we have to mask it out by hand anyways? */ istat &= imask; if (istat) { handled = 1; if (netif_rx_schedule_prep(dev)) { /* NOTE: These writes are posted by the readback of * the ISTAT register below. */ bp->istat = istat; __b44_disable_ints(bp); __netif_rx_schedule(dev); } else { printk(KERN_ERR PFX "%s: Error, poll already scheduled\n", dev->name); } bw32(bp, B44_ISTAT, istat); br32(bp, B44_ISTAT); } spin_unlock_irqrestore(&bp->lock, flags); return IRQ_RETVAL(handled); }
static void b44_check_phy(struct b44 *bp) { u32 bmsr, aux; if (!b44_readphy(bp, MII_BMSR, &bmsr) && !b44_readphy(bp, B44_MII_AUXCTRL, &aux) && (bmsr != 0xffff)) { if (aux & MII_AUXCTRL_SPEED) bp->flags |= B44_FLAG_100_BASE_T; else bp->flags &= ~B44_FLAG_100_BASE_T; if (aux & MII_AUXCTRL_DUPLEX) bp->flags |= B44_FLAG_FULL_DUPLEX; else bp->flags &= ~B44_FLAG_FULL_DUPLEX; if (!netif_carrier_ok(bp->dev) && (bmsr & BMSR_LSTATUS)) { u32 val = br32(bp, B44_TX_CTRL); u32 local_adv, remote_adv; if (bp->flags & B44_FLAG_FULL_DUPLEX) val |= TX_CTRL_DUPLEX; else val &= ~TX_CTRL_DUPLEX; bw32(bp, B44_TX_CTRL, val); if (!(bp->flags & B44_FLAG_FORCE_LINK) && !b44_readphy(bp, MII_ADVERTISE, &local_adv) && !b44_readphy(bp, MII_LPA, &remote_adv)) b44_set_flow_ctrl(bp, local_adv, remote_adv); /* Link now up */ netif_carrier_on(bp->dev); b44_link_report(bp); } else if (netif_carrier_ok(bp->dev) && !(bmsr & BMSR_LSTATUS)) { /* Link now down */ netif_carrier_off(bp->dev); b44_link_report(bp); } if (bmsr & BMSR_RFAULT) printk(KERN_WARNING PFX "%s: Remote fault detected in PHY\n", bp->dev->name); if (bmsr & BMSR_JCD) printk(KERN_WARNING PFX "%s: Jabber detected in PHY\n", bp->dev->name); } }
static int b44_readphy(struct b44 *bp, int reg, u32 *val) { int err; bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII); bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START | (MDIO_OP_READ << MDIO_DATA_OP_SHIFT) | (bp->phy_addr << MDIO_DATA_PMD_SHIFT) | (reg << MDIO_DATA_RA_SHIFT) | (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT))); err = b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0); *val = br32(bp, B44_MDIO_DATA) & MDIO_DATA_DATA; return err; }
static void ssb_core_disable(struct b44_private *bp) { if (br32(bp, B44_SBTMSLOW) & SBTMSLOW_RESET) return; bw32(bp, B44_SBTMSLOW, (SBTMSLOW_REJECT | SBTMSLOW_CLOCK)); b44_wait_bit(bp, B44_SBTMSLOW, SBTMSLOW_REJECT, 100000, 0); b44_wait_bit(bp, B44_SBTMSHIGH, SBTMSHIGH_BUSY, 100000, 1); bw32(bp, B44_SBTMSLOW, (SBTMSLOW_FGC | SBTMSLOW_CLOCK | SSB_CORE_DOWN)); bflush(bp, B44_SBTMSLOW, 1); bw32(bp, B44_SBTMSLOW, SSB_CORE_DOWN); bflush(bp, B44_SBTMSLOW, 1); }
static int b44_phy_read(struct b44_private *bp, int reg, u32 * val) { int err; u32 arg1 = (MDIO_OP_READ << MDIO_DATA_OP_SHIFT); u32 arg2 = (bp->phy_addr << MDIO_DATA_PMD_SHIFT); u32 arg3 = (reg << MDIO_DATA_RA_SHIFT); u32 arg4 = (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT); u32 argv = arg1 | arg2 | arg3 | arg4; bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII); bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START | argv)); err = b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0); *val = br32(bp, B44_MDIO_DATA) & MDIO_DATA_DATA; return err; }
/** * Wait until the given bit is set/cleared. */ static int b44_wait_bit(struct b44_private *bp, unsigned long reg, u32 bit, unsigned long timeout, const int clear) { unsigned long i; for (i = 0; i < timeout; i++) { u32 val = br32(bp, reg); if (clear && !(val & bit)) break; if (!clear && (val & bit)) break; udelay(10); } if (i == timeout) { return -ENODEV; } return 0; }
/* * Called at device open time to get the chip ready for * packet processing. * * Called-by: b44_open */ static void b44_init_hw(struct b44_private *bp, int reset_kind) { u32 val; #define CTRL_MASK (DMARX_CTRL_ENABLE | (RX_PKT_OFFSET << DMARX_CTRL_ROSHIFT)) b44_chip_reset(bp, B44_CHIP_RESET_FULL); if (reset_kind == B44_FULL_RESET) { b44_phy_reset(bp); } /* Enable CRC32, set proper LED modes and power on PHY */ bw32(bp, B44_MAC_CTRL, MAC_CTRL_CRC32_ENAB | MAC_CTRL_PHY_LEDCTRL); bw32(bp, B44_RCV_LAZY, (1 << RCV_LAZY_FC_SHIFT)); /* This sets the MAC address too. */ b44_set_rx_mode(bp->netdev); /* MTU + eth header + possible VLAN tag + struct rx_header */ bw32(bp, B44_RXMAXLEN, B44_MAX_MTU + ETH_HLEN + 8 + RX_HEADER_LEN); bw32(bp, B44_TXMAXLEN, B44_MAX_MTU + ETH_HLEN + 8 + RX_HEADER_LEN); bw32(bp, B44_TX_HIWMARK, TX_HIWMARK_DEFLT); if (reset_kind == B44_PARTIAL_RESET) { bw32(bp, B44_DMARX_CTRL, CTRL_MASK); } else { bw32(bp, B44_DMATX_CTRL, DMATX_CTRL_ENABLE); bw32(bp, B44_DMATX_ADDR, VIRT_TO_B44(bp->tx)); bw32(bp, B44_DMARX_CTRL, CTRL_MASK); bw32(bp, B44_DMARX_ADDR, VIRT_TO_B44(bp->rx)); bw32(bp, B44_DMARX_PTR, B44_RX_RING_LEN_BYTES); bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ); } val = br32(bp, B44_ENET_CTRL); bw32(bp, B44_ENET_CTRL, (val | ENET_CTRL_ENABLE)); #undef CTRL_MASK }
/* bp->lock is held. */ static void b44_chip_reset(struct b44 *bp) { if (ssb_is_core_up(bp)) { bw32(bp, B44_RCV_LAZY, 0); bw32(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE); b44_wait_bit(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE, 100, 1); bw32(bp, B44_DMATX_CTRL, 0); bp->tx_prod = bp->tx_cons = 0; if (br32(bp, B44_DMARX_STAT) & DMARX_STAT_EMASK) { b44_wait_bit(bp, B44_DMARX_STAT, DMARX_STAT_SIDLE, 100, 0); } bw32(bp, B44_DMARX_CTRL, 0); bp->rx_prod = bp->rx_cons = 0; } else { ssb_pci_setup(bp, (bp->core_unit == 0 ? SBINTVEC_ENET0 : SBINTVEC_ENET1)); } ssb_core_reset(bp); b44_clear_stats(bp); /* Make PHY accessible. */ bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE | (0x0d & MDIO_CTRL_MAXF_MASK))); br32(bp, B44_MDIO_CTRL); if (!(br32(bp, B44_DEVCTRL) & DEVCTRL_IPP)) { bw32(bp, B44_ENET_CTRL, ENET_CTRL_EPSEL); br32(bp, B44_ENET_CTRL); bp->flags &= ~B44_FLAG_INTERNAL_PHY; } else { u32 val = br32(bp, B44_DEVCTRL); if (val & DEVCTRL_EPR) { bw32(bp, B44_DEVCTRL, (val & ~DEVCTRL_EPR)); br32(bp, B44_DEVCTRL); udelay(100); } bp->flags |= B44_FLAG_INTERNAL_PHY; } }
static int b44_wait_bit(struct b44 *bp, unsigned long reg, u32 bit, unsigned long timeout, const int clear) { unsigned long i; for (i = 0; i < timeout; i++) { u32 val = br32(bp, reg); if (clear && !(val & bit)) break; if (!clear && (val & bit)) break; udelay(10); } if (i == timeout) { printk(KERN_ERR PFX "%s: BUG! Timeout waiting for bit %08x of register " "%lx to %s.\n", bp->dev->name, bit, reg, (clear ? "clear" : "set")); return -ENODEV; } return 0; }
static int ssb_core_unit(struct b44 *bp) { #if 0 u32 val = br32(bp, B44_SBADMATCH0); u32 base; type = val & SBADMATCH0_TYPE_MASK; switch (type) { case 0: base = val & SBADMATCH0_BS0_MASK; break; case 1: base = val & SBADMATCH0_BS1_MASK; break; case 2: default: base = val & SBADMATCH0_BS2_MASK; break; }; #endif return 0; }
static inline int ssb_is_core_up(struct b44_private *bp) { return ((br32(bp, B44_SBTMSLOW) & (SSB_CORE_DOWN | SBTMSLOW_CLOCK)) == SBTMSLOW_CLOCK); }