Exemple #1
0
static void __b44_set_rx_mode(struct net_device *dev)
{
	struct b44 *bp = netdev_priv(dev);
	u32 val;
	int i=0;
	unsigned char zero[6] = {0,0,0,0,0,0};

	val = br32(bp, B44_RXCONFIG);
	val &= ~(RXCONFIG_PROMISC | RXCONFIG_ALLMULTI);
	if (dev->flags & IFF_PROMISC) {
		val |= RXCONFIG_PROMISC;
		bw32(bp, B44_RXCONFIG, val);
	} else {
		__b44_set_mac_addr(bp);

		if (dev->flags & IFF_ALLMULTI)
			val |= RXCONFIG_ALLMULTI;
		else
			i=__b44_load_mcast(bp, dev);
		
		for(;i<64;i++) {
			__b44_cam_write(bp, zero, i);			
		}
		bw32(bp, B44_RXCONFIG, val);
        	val = br32(bp, B44_CAM_CTRL);
	        bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
	}
}
Exemple #2
0
static void ssb_core_reset(struct b44 *bp)
{
	u32 val;

	ssb_core_disable(bp);
	bw32(bp, B44_SBTMSLOW, (SBTMSLOW_RESET | SBTMSLOW_CLOCK | SBTMSLOW_FGC));
	br32(bp, B44_SBTMSLOW);
	udelay(1);

	/* Clear SERR if set, this is a hw bug workaround.  */
	if (br32(bp, B44_SBTMSHIGH) & SBTMSHIGH_SERR)
		bw32(bp, B44_SBTMSHIGH, 0);

	val = br32(bp, B44_SBIMSTATE);
	if (val & (SBIMSTATE_IBE | SBIMSTATE_TO))
		bw32(bp, B44_SBIMSTATE, val & ~(SBIMSTATE_IBE | SBIMSTATE_TO));

	bw32(bp, B44_SBTMSLOW, (SBTMSLOW_CLOCK | SBTMSLOW_FGC));
	br32(bp, B44_SBTMSLOW);
	udelay(1);

	bw32(bp, B44_SBTMSLOW, (SBTMSLOW_CLOCK));
	br32(bp, B44_SBTMSLOW);
	udelay(1);
}
Exemple #3
0
static void ssb_core_reset(struct b44_private *bp)
{
	u32 val;
	const u32 mask = (SBTMSLOW_CLOCK | SBTMSLOW_FGC | SBTMSLOW_RESET);

	ssb_core_disable(bp);

	bw32(bp, B44_SBTMSLOW, mask);
	bflush(bp, B44_SBTMSLOW, 1);

	/* Clear SERR if set, this is a hw bug workaround.  */
	if (br32(bp, B44_SBTMSHIGH) & SBTMSHIGH_SERR)
		bw32(bp, B44_SBTMSHIGH, 0);

	val = br32(bp, B44_SBIMSTATE);
	if (val & (SBIMSTATE_BAD)) {
		bw32(bp, B44_SBIMSTATE, val & ~SBIMSTATE_BAD);
	}

	bw32(bp, B44_SBTMSLOW, (SBTMSLOW_CLOCK | SBTMSLOW_FGC));
	bflush(bp, B44_SBTMSLOW, 1);

	bw32(bp, B44_SBTMSLOW, (SBTMSLOW_CLOCK));
	bflush(bp, B44_SBTMSLOW, 1);
}
Exemple #4
0
static void b44_set_mac_addr(struct b44_private *bp)
{
	u32 val;
	bw32(bp, B44_CAM_CTRL, 0);
	b44_cam_write(bp, bp->netdev->ll_addr, 0);
	val = br32(bp, B44_CAM_CTRL);
	bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
}
Exemple #5
0
static int b44_writephy(struct b44 *bp, int reg, u32 val)
{
	bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
	bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
			     (MDIO_OP_WRITE << MDIO_DATA_OP_SHIFT) |
			     (bp->phy_addr << MDIO_DATA_PMD_SHIFT) |
			     (reg << MDIO_DATA_RA_SHIFT) |
			     (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT) |
			     (val & MDIO_DATA_DATA)));
	return b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
}
Exemple #6
0
/* bp->lock is held. */
static void __b44_set_mac_addr(struct b44 *bp)
{
	bw32(bp, B44_CAM_CTRL, 0);
	if (!(bp->dev->flags & IFF_PROMISC)) {
		u32 val;

		__b44_cam_write(bp, bp->dev->dev_addr, 0);
		val = br32(bp, B44_CAM_CTRL);
		bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
	}
}
Exemple #7
0
static int b44_start_xmit(struct sk_buff *skb, struct net_device *dev)
{
	struct b44 *bp = netdev_priv(dev);
	dma_addr_t mapping;
	u32 len, entry, ctrl;

	len = skb->len;
	spin_lock_irq(&bp->lock);

	/* This is a hard error, log it. */
	if (unlikely(TX_BUFFS_AVAIL(bp) < 1)) {
		netif_stop_queue(dev);
		spin_unlock_irq(&bp->lock);
		printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
		       dev->name);
		return 1;
	}

	entry = bp->tx_prod;
	mapping = pci_map_single(bp->pdev, skb->data, len, PCI_DMA_TODEVICE);

	bp->tx_buffers[entry].skb = skb;
	pci_unmap_addr_set(&bp->tx_buffers[entry], mapping, mapping);

	ctrl  = (len & DESC_CTRL_LEN);
	ctrl |= DESC_CTRL_IOC | DESC_CTRL_SOF | DESC_CTRL_EOF;
	if (entry == (B44_TX_RING_SIZE - 1))
		ctrl |= DESC_CTRL_EOT;

	bp->tx_ring[entry].ctrl = cpu_to_le32(ctrl);
	bp->tx_ring[entry].addr = cpu_to_le32((u32) mapping+bp->dma_offset);

	entry = NEXT_TX(entry);

	bp->tx_prod = entry;

	wmb();

	bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
	if (bp->flags & B44_FLAG_BUGGY_TXPTR)
		bw32(bp, B44_DMATX_PTR, entry * sizeof(struct dma_desc));
	if (bp->flags & B44_FLAG_REORDER_BUG)
		br32(bp, B44_DMATX_PTR);

	if (TX_BUFFS_AVAIL(bp) < 1)
		netif_stop_queue(dev);

	spin_unlock_irq(&bp->lock);

	dev->trans_start = jiffies;

	return 0;
}
Exemple #8
0
static int b44_phy_write(struct b44_private *bp, int reg, u32 val)
{
	u32 arg1 = (MDIO_OP_WRITE << MDIO_DATA_OP_SHIFT);
	u32 arg2 = (bp->phy_addr << MDIO_DATA_PMD_SHIFT);
	u32 arg3 = (reg << MDIO_DATA_RA_SHIFT);
	u32 arg4 = (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT);
	u32 arg5 = (val & MDIO_DATA_DATA);
	u32 argv = arg1 | arg2 | arg3 | arg4 | arg5;


	bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
	bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START | argv));
	return b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
}
Exemple #9
0
static int b44_readphy(struct b44 *bp, int reg, u32 *val)
{
	int err;

	bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
	bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START |
			     (MDIO_OP_READ << MDIO_DATA_OP_SHIFT) |
			     (bp->phy_addr << MDIO_DATA_PMD_SHIFT) |
			     (reg << MDIO_DATA_RA_SHIFT) |
			     (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT)));
	err = b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
	*val = br32(bp, B44_MDIO_DATA) & MDIO_DATA_DATA;

	return err;
}
Exemple #10
0
static void ssb_core_disable(struct b44 *bp)
{
	if (br32(bp, B44_SBTMSLOW) & SBTMSLOW_RESET)
		return;

	bw32(bp, B44_SBTMSLOW, (SBTMSLOW_REJECT | SBTMSLOW_CLOCK));
	b44_wait_bit(bp, B44_SBTMSLOW, SBTMSLOW_REJECT, 100000, 0);
	b44_wait_bit(bp, B44_SBTMSHIGH, SBTMSHIGH_BUSY, 100000, 1);
	bw32(bp, B44_SBTMSLOW, (SBTMSLOW_FGC | SBTMSLOW_CLOCK |
			    SBTMSLOW_REJECT | SBTMSLOW_RESET));
	br32(bp, B44_SBTMSLOW);
	udelay(1);
	bw32(bp, B44_SBTMSLOW, (SBTMSLOW_REJECT | SBTMSLOW_RESET));
	br32(bp, B44_SBTMSLOW);
	udelay(1);
}
Exemple #11
0
/** Poll for completed and received packets
 *
 * @v netdev	Network device
 */
static void b44_poll(struct net_device *netdev)
{
	struct b44_private *bp = netdev_priv(netdev);
	u32 istat;

	/* Interrupt status */
	istat = br32(bp, B44_ISTAT);
	istat &= IMASK_DEF;	/* only the events we care about */

	if (!istat)
		return;
	if (istat & ISTAT_TX)
		b44_tx_complete(bp);
	if (istat & ISTAT_RX)
		b44_process_rx_packets(bp);
	if (istat & ISTAT_ERRORS) {
		DBG("b44 error istat=0x%08x\n", istat);

		/* Reset B44 core partially to avoid long waits */
		b44_irq(bp->netdev, 0);
		b44_halt(bp);
		b44_init_tx_ring(bp);
		b44_init_rx_ring(bp);
		b44_init_hw(bp, B44_FULL_RESET_SKIP_PHY);
	}

	/* Acknowledge interrupt */
	bw32(bp, B44_ISTAT, 0);
	bflush(bp, B44_ISTAT, 1);
}
Exemple #12
0
/** Enable or disable interrupts
 *
 * @v netdev	Network device
 * @v enable	Interrupts should be enabled
 */
static void b44_irq(struct net_device *netdev, int enable)
{
	struct b44_private *bp = netdev_priv(netdev);

	/* Interrupt mask specifies which events generate interrupts */
	bw32(bp, B44_IMASK, enable ? IMASK_DEF : IMASK_DISABLE);
}
Exemple #13
0
static void ssb_core_disable(struct b44_private *bp)
{
	if (br32(bp, B44_SBTMSLOW) & SBTMSLOW_RESET)
		return;

	bw32(bp, B44_SBTMSLOW, (SBTMSLOW_REJECT | SBTMSLOW_CLOCK));
	b44_wait_bit(bp, B44_SBTMSLOW, SBTMSLOW_REJECT, 100000, 0);
	b44_wait_bit(bp, B44_SBTMSHIGH, SBTMSHIGH_BUSY, 100000, 1);

	bw32(bp, B44_SBTMSLOW, (SBTMSLOW_FGC | SBTMSLOW_CLOCK |
	                                        SSB_CORE_DOWN));
	bflush(bp, B44_SBTMSLOW, 1);

	bw32(bp, B44_SBTMSLOW, SSB_CORE_DOWN);
	bflush(bp, B44_SBTMSLOW, 1);
}
Exemple #14
0
/** Transmit packet
 *
 * @v netdev	Network device
 * @v iobuf	I/O buffer
 * @ret rc	Return status code
 */
static int b44_transmit(struct net_device *netdev, struct io_buffer *iobuf)
{
	struct b44_private *bp = netdev_priv(netdev);
	u32 cur = bp->tx_cur;
	u32 ctrl;

	/* Check for TX ring overflow */
	if (bp->tx[cur].ctrl) {
		DBG("tx overflow\n");
		return -ENOBUFS;
	}

	/* Will call netdev_tx_complete() on the iobuf later */
	bp->tx_iobuf[cur] = iobuf;

	/* Set up TX descriptor */
	ctrl = (iob_len(iobuf) & DESC_CTRL_LEN) |
	    DESC_CTRL_IOC | DESC_CTRL_SOF | DESC_CTRL_EOF;

	if (cur == B44_RING_LAST)
		ctrl |= DESC_CTRL_EOT;

	bp->tx[cur].ctrl = cpu_to_le32(ctrl);
	bp->tx[cur].addr = cpu_to_le32(VIRT_TO_B44(iobuf->data));

	/* Update next available descriptor index */
	cur = ring_next(cur);
	bp->tx_cur = cur;
	wmb();

	/* Tell card that a new TX descriptor is ready */
	bw32(bp, B44_DMATX_PTR, cur * sizeof(struct dma_desc));
	return 0;
}
Exemple #15
0
static void b44_tx(struct b44 *bp)
{
	u32 cur, cons;

	cur  = br32(bp, B44_DMATX_STAT) & DMATX_STAT_CDMASK;
	cur /= sizeof(struct dma_desc);

	/* XXX needs updating when NETIF_F_SG is supported */
	for (cons = bp->tx_cons; cons != cur; cons = NEXT_TX(cons)) {
		struct ring_info *rp = &bp->tx_buffers[cons];
		struct sk_buff *skb = rp->skb;

		if (unlikely(skb == NULL))
			BUG();

		pci_unmap_single(bp->pdev,
				 pci_unmap_addr(rp, mapping),
				 skb->len,
				 PCI_DMA_TODEVICE);
		rp->skb = NULL;
		dev_kfree_skb_irq(skb);
	}

	bp->tx_cons = cons;
	if (netif_queue_stopped(bp->dev) &&
	    TX_BUFFS_AVAIL(bp) > B44_TX_WAKEUP_THRESH)
		netif_wake_queue(bp->dev);

	bw32(bp, B44_GPTIMER, 0);
}
Exemple #16
0
/**
 * called by b44_poll in the error path
 */
static void b44_halt(struct b44_private *bp)
{
	/* disable ints */
	bw32(bp, B44_IMASK, 0);
	bflush(bp, B44_IMASK, 1);

	DBG("b44: powering down PHY\n");
	bw32(bp, B44_MAC_CTRL, MAC_CTRL_PHY_PDOWN);

	/*
	 * Now reset the chip, but without enabling
	 * the MAC&PHY part of it.
	 * This has to be done _after_ we shut down the PHY
	 */
	b44_chip_reset(bp, B44_CHIP_RESET_PARTIAL);
}
Exemple #17
0
static int b44_phy_read(struct b44_private *bp, int reg, u32 * val)
{
	int err;

	u32 arg1 = (MDIO_OP_READ << MDIO_DATA_OP_SHIFT);
	u32 arg2 = (bp->phy_addr << MDIO_DATA_PMD_SHIFT);
	u32 arg3 = (reg << MDIO_DATA_RA_SHIFT);
	u32 arg4 = (MDIO_TA_VALID << MDIO_DATA_TA_SHIFT);
	u32 argv = arg1 | arg2 | arg3 | arg4;

	bw32(bp, B44_EMAC_ISTAT, EMAC_INT_MII);
	bw32(bp, B44_MDIO_DATA, (MDIO_DATA_SB_START | argv));
	err = b44_wait_bit(bp, B44_EMAC_ISTAT, EMAC_INT_MII, 100, 0);
	*val = br32(bp, B44_MDIO_DATA) & MDIO_DATA_DATA;

	return err;
}
Exemple #18
0
static void __b44_cam_write(struct b44 *bp, unsigned char *data, int index)
{
	u32 val;

	val  = ((u32) data[2]) << 24;
	val |= ((u32) data[3]) << 16;
	val |= ((u32) data[4]) <<  8;
	val |= ((u32) data[5]) <<  0;
	bw32(bp, B44_CAM_DATA_LO, val);
	val = (CAM_DATA_HI_VALID | 
	       (((u32) data[0]) << 8) |
	       (((u32) data[1]) << 0));
	bw32(bp, B44_CAM_DATA_HI, val);
	bw32(bp, B44_CAM_CTRL, (CAM_CTRL_WRITE |
			    (index << CAM_CTRL_INDEX_SHIFT)));
	b44_wait_bit(bp, B44_CAM_CTRL, CAM_CTRL_BUSY, 100, 1);	
}
Exemple #19
0
/* bp->lock is held. */
static void b44_clear_stats(struct b44 *bp)
{
	unsigned long reg;

	bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
	for (reg = B44_TX_GOOD_O; reg <= B44_TX_PAUSE; reg += 4UL)
		br32(bp, reg);
	for (reg = B44_RX_GOOD_O; reg <= B44_RX_NPAUSE; reg += 4UL)
		br32(bp, reg);
}
Exemple #20
0
static void b44_set_rx_mode(struct net_device *netdev)
{
	struct b44_private *bp = netdev_priv(netdev);
	unsigned char zero[6] = { 0, 0, 0, 0, 0, 0 };
	u32 val;
	int i;

	val = br32(bp, B44_RXCONFIG);
	val &= ~RXCONFIG_PROMISC;
	val |= RXCONFIG_ALLMULTI;

	b44_set_mac_addr(bp);

	for (i = 1; i < 64; i++)
		b44_cam_write(bp, zero, i);

	bw32(bp, B44_RXCONFIG, val);
	val = br32(bp, B44_CAM_CTRL);
	bw32(bp, B44_CAM_CTRL, val | CAM_CTRL_ENABLE);
}
Exemple #21
0
static u32 ssb_pci_setup(struct b44_private *bp, u32 cores)
{
	u32 bar_orig, pci_rev, val;

	pci_read_config_dword(bp->pci, SSB_BAR0_WIN, &bar_orig);
	pci_write_config_dword(bp->pci, SSB_BAR0_WIN,
	                       BCM4400_PCI_CORE_ADDR);
	pci_rev = ssb_get_core_rev(bp);

	val = br32(bp, B44_SBINTVEC);
	val |= cores;
	bw32(bp, B44_SBINTVEC, val);

	val = br32(bp, SSB_PCI_TRANS_2);
	val |= SSB_PCI_PREF | SSB_PCI_BURST;
	bw32(bp, SSB_PCI_TRANS_2, val);

	pci_write_config_dword(bp->pci, SSB_BAR0_WIN, bar_orig);

	return pci_rev;
}
Exemple #22
0
static u32 ssb_pci_setup(struct b44 *bp, u32 cores)
{
	u32 bar_orig, pci_rev, val;

	pci_read_config_dword(bp->pdev, SSB_BAR0_WIN, &bar_orig);
	pci_write_config_dword(bp->pdev, SSB_BAR0_WIN,
			       ssb_get_addr(bp, SBID_REG_PCI, 0));
	pci_rev = ssb_get_core_rev(bp);

	val = br32(bp, B44_SBINTVEC);
	val |= cores;
	bw32(bp, B44_SBINTVEC, val);

	val = br32(bp, SSB_PCI_TRANS_2);
	val |= SSB_PCI_PREF | SSB_PCI_BURST;
	bw32(bp, SSB_PCI_TRANS_2, val);

	pci_write_config_dword(bp->pdev, SSB_BAR0_WIN, bar_orig);

	return pci_rev;
}
Exemple #23
0
static void __b44_set_flow_ctrl(struct b44 *bp, u32 pause_flags)
{
	u32 val;

	bp->flags &= ~(B44_FLAG_TX_PAUSE | B44_FLAG_RX_PAUSE);
	bp->flags |= pause_flags;

	val = br32(bp, B44_RXCONFIG);
	if (pause_flags & B44_FLAG_RX_PAUSE)
		val |= RXCONFIG_FLOW;
	else
		val &= ~RXCONFIG_FLOW;
	bw32(bp, B44_RXCONFIG, val);

	val = br32(bp, B44_MAC_FLOW);
	if (pause_flags & B44_FLAG_TX_PAUSE)
		val |= (MAC_FLOW_PAUSE_ENAB |
			(0xc0 & MAC_FLOW_RX_HI_WATER));
	else
		val &= ~MAC_FLOW_PAUSE_ENAB;
	bw32(bp, B44_MAC_FLOW, val);
}
Exemple #24
0
static void b44_check_phy(struct b44 *bp)
{
	u32 bmsr, aux;

	if (!b44_readphy(bp, MII_BMSR, &bmsr) &&
	    !b44_readphy(bp, B44_MII_AUXCTRL, &aux) &&
	    (bmsr != 0xffff)) {
		if (aux & MII_AUXCTRL_SPEED)
			bp->flags |= B44_FLAG_100_BASE_T;
		else
			bp->flags &= ~B44_FLAG_100_BASE_T;
		if (aux & MII_AUXCTRL_DUPLEX)
			bp->flags |= B44_FLAG_FULL_DUPLEX;
		else
			bp->flags &= ~B44_FLAG_FULL_DUPLEX;

		if (!netif_carrier_ok(bp->dev) &&
		    (bmsr & BMSR_LSTATUS)) {
			u32 val = br32(bp, B44_TX_CTRL);
			u32 local_adv, remote_adv;

			if (bp->flags & B44_FLAG_FULL_DUPLEX)
				val |= TX_CTRL_DUPLEX;
			else
				val &= ~TX_CTRL_DUPLEX;
			bw32(bp, B44_TX_CTRL, val);

			if (!(bp->flags & B44_FLAG_FORCE_LINK) &&
			    !b44_readphy(bp, MII_ADVERTISE, &local_adv) &&
			    !b44_readphy(bp, MII_LPA, &remote_adv))
				b44_set_flow_ctrl(bp, local_adv, remote_adv);

			/* Link now up */
			netif_carrier_on(bp->dev);
			b44_link_report(bp);
		} else if (netif_carrier_ok(bp->dev) && !(bmsr & BMSR_LSTATUS)) {
			/* Link now down */
			netif_carrier_off(bp->dev);
			b44_link_report(bp);
		}

		if (bmsr & BMSR_RFAULT)
			printk(KERN_WARNING PFX "%s: Remote fault detected in PHY\n",
			       bp->dev->name);
		if (bmsr & BMSR_JCD)
			printk(KERN_WARNING PFX "%s: Jabber detected in PHY\n",
			       bp->dev->name);
	}
}
Exemple #25
0
static void b44_populate_rx_descriptor(struct b44_private *bp, u32 idx)
{
	struct rx_header *rh;
	u32 ctrl, addr;

	rh = bp->rx_iobuf[idx]->data;
	rh->len = 0;
	rh->flags = 0;
	ctrl = DESC_CTRL_LEN & (RX_PKT_BUF_SZ - RX_PKT_OFFSET);
	if (idx == B44_RING_LAST) {
		ctrl |= DESC_CTRL_EOT;
	}
	addr = VIRT_TO_B44(bp->rx_iobuf[idx]->data);

	bp->rx[idx].ctrl = cpu_to_le32(ctrl);
	bp->rx[idx].addr = cpu_to_le32(addr);
	bw32(bp, B44_DMARX_PTR, idx * sizeof(struct dma_desc));
}
Exemple #26
0
/*
 * Chip reset provides power to the b44 MAC & PCI cores, which
 * is necessary for MAC register access. We only do a partial
 * reset in case of transmit/receive errors (ISTAT_ERRORS) to
 * avoid the chip being hung for an unnecessary long time in
 * this case.
 *
 * Called-by: b44_close, b44_halt, b44_inithw(b44_open), b44_probe
 */
static void b44_chip_reset(struct b44_private *bp, int reset_kind)
{
	if (ssb_is_core_up(bp)) {
		bw32(bp, B44_RCV_LAZY, 0);

		bw32(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE);

		b44_wait_bit(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE, 200, 1);

		bw32(bp, B44_DMATX_CTRL, 0);

		bp->tx_dirty = bp->tx_cur = 0;

		if (br32(bp, B44_DMARX_STAT) & DMARX_STAT_EMASK)
			b44_wait_bit(bp, B44_DMARX_STAT, DMARX_STAT_SIDLE,
			                                          100, 0);

		bw32(bp, B44_DMARX_CTRL, 0);

		bp->rx_cur = 0;
	} else {
		ssb_pci_setup(bp, SBINTVEC_ENET0);
	}

	ssb_core_reset(bp);

	/* Don't enable PHY if we are only doing a partial reset. */
	if (reset_kind == B44_CHIP_RESET_PARTIAL)
		return;

	/* Make PHY accessible. */
	bw32(bp, B44_MDIO_CTRL,
	     (MDIO_CTRL_PREAMBLE | (0x0d & MDIO_CTRL_MAXF_MASK)));
	bflush(bp, B44_MDIO_CTRL, 1);

	/* Enable internal or external PHY */
	if (!(br32(bp, B44_DEVCTRL) & DEVCTRL_IPP)) {
		bw32(bp, B44_ENET_CTRL, ENET_CTRL_EPSEL);
		bflush(bp, B44_ENET_CTRL, 1);
	} else {
		u32 val = br32(bp, B44_DEVCTRL);
		if (val & DEVCTRL_EPR) {
			bw32(bp, B44_DEVCTRL, (val & ~DEVCTRL_EPR));
			bflush(bp, B44_DEVCTRL, 100);
		}
	}
}
Exemple #27
0
/* bp->lock is held. */
static void b44_chip_reset(struct b44 *bp)
{
	if (ssb_is_core_up(bp)) {
		bw32(bp, B44_RCV_LAZY, 0);
		bw32(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE);
		b44_wait_bit(bp, B44_ENET_CTRL, ENET_CTRL_DISABLE, 100, 1);
		bw32(bp, B44_DMATX_CTRL, 0);
		bp->tx_prod = bp->tx_cons = 0;
		if (br32(bp, B44_DMARX_STAT) & DMARX_STAT_EMASK) {
			b44_wait_bit(bp, B44_DMARX_STAT, DMARX_STAT_SIDLE,
				     100, 0);
		}
		bw32(bp, B44_DMARX_CTRL, 0);
		bp->rx_prod = bp->rx_cons = 0;
	} else {
		ssb_pci_setup(bp, (bp->core_unit == 0 ?
				   SBINTVEC_ENET0 :
				   SBINTVEC_ENET1));
	}

	ssb_core_reset(bp);

	b44_clear_stats(bp);

	/* Make PHY accessible. */
	bw32(bp, B44_MDIO_CTRL, (MDIO_CTRL_PREAMBLE |
			     (0x0d & MDIO_CTRL_MAXF_MASK)));
	br32(bp, B44_MDIO_CTRL);

	if (!(br32(bp, B44_DEVCTRL) & DEVCTRL_IPP)) {
		bw32(bp, B44_ENET_CTRL, ENET_CTRL_EPSEL);
		br32(bp, B44_ENET_CTRL);
		bp->flags &= ~B44_FLAG_INTERNAL_PHY;
	} else {
		u32 val = br32(bp, B44_DEVCTRL);

		if (val & DEVCTRL_EPR) {
			bw32(bp, B44_DEVCTRL, (val & ~DEVCTRL_EPR));
			br32(bp, B44_DEVCTRL);
			udelay(100);
		}
		bp->flags |= B44_FLAG_INTERNAL_PHY;
	}
}
Exemple #28
0
static irqreturn_t b44_interrupt(int irq, void *dev_id, struct pt_regs *regs)
{
	struct net_device *dev = dev_id;
	struct b44 *bp = netdev_priv(dev);
	unsigned long flags;
	u32 istat, imask;
	int handled = 0;

	spin_lock_irqsave(&bp->lock, flags);

	istat = br32(bp, B44_ISTAT);
	imask = br32(bp, B44_IMASK);

	/* ??? What the f**k is the purpose of the interrupt mask
	 * ??? register if we have to mask it out by hand anyways?
	 */
	istat &= imask;
	if (istat) {
		handled = 1;
		if (netif_rx_schedule_prep(dev)) {
			/* NOTE: These writes are posted by the readback of
			 *       the ISTAT register below.
			 */
			bp->istat = istat;
			__b44_disable_ints(bp);
			__netif_rx_schedule(dev);
		} else {
			printk(KERN_ERR PFX "%s: Error, poll already scheduled\n",
			       dev->name);
		}

		bw32(bp, B44_ISTAT, istat);
		br32(bp, B44_ISTAT);
	}
	spin_unlock_irqrestore(&bp->lock, flags);
	return IRQ_RETVAL(handled);
}
Exemple #29
0
static int b44_rx(struct b44 *bp, int budget)
{
	int received;
	u32 cons, prod;

	received = 0;
	prod  = br32(bp, B44_DMARX_STAT) & DMARX_STAT_CDMASK;
	prod /= sizeof(struct dma_desc);
	cons = bp->rx_cons;

	while (cons != prod && budget > 0) {
		struct ring_info *rp = &bp->rx_buffers[cons];
		struct sk_buff *skb = rp->skb;
		dma_addr_t map = pci_unmap_addr(rp, mapping);
		struct rx_header *rh;
		u16 len;

		pci_dma_sync_single_for_cpu(bp->pdev, map,
					    RX_PKT_BUF_SZ,
					    PCI_DMA_FROMDEVICE);
		rh = (struct rx_header *) skb->data;
		len = cpu_to_le16(rh->len);
		if ((len > (RX_PKT_BUF_SZ - bp->rx_offset)) ||
		    (rh->flags & cpu_to_le16(RX_FLAG_ERRORS))) {
		drop_it:
			b44_recycle_rx(bp, cons, bp->rx_prod);
		drop_it_no_recycle:
			bp->stats.rx_dropped++;
			goto next_pkt;
		}

		if (len == 0) {
			int i = 0;

			do {
				udelay(2);
				barrier();
				len = cpu_to_le16(rh->len);
			} while (len == 0 && i++ < 5);
			if (len == 0)
				goto drop_it;
		}

		/* Omit CRC. */
		len -= 4;

		if (len > RX_COPY_THRESHOLD) {
			int skb_size;
			skb_size = b44_alloc_rx_skb(bp, cons, bp->rx_prod);
			if (skb_size < 0)
				goto drop_it;
			pci_unmap_single(bp->pdev, map,
					 skb_size, PCI_DMA_FROMDEVICE);
			/* Leave out rx_header */
                	skb_put(skb, len+bp->rx_offset);
            	        skb_pull(skb,bp->rx_offset);
		} else {
			struct sk_buff *copy_skb;

			b44_recycle_rx(bp, cons, bp->rx_prod);
			copy_skb = dev_alloc_skb(len + 2);
			if (copy_skb == NULL)
				goto drop_it_no_recycle;

			copy_skb->dev = bp->dev;
			skb_reserve(copy_skb, 2);
			skb_put(copy_skb, len);
			/* DMA sync done above, copy just the actual packet */
			memcpy(copy_skb->data, skb->data+bp->rx_offset, len);

			skb = copy_skb;
		}
		skb->ip_summed = CHECKSUM_NONE;
		skb->protocol = eth_type_trans(skb, bp->dev);
		netif_receive_skb(skb);
		bp->dev->last_rx = jiffies;
		received++;
		budget--;
	next_pkt:
		bp->rx_prod = (bp->rx_prod + 1) &
			(B44_RX_RING_SIZE - 1);
		cons = (cons + 1) & (B44_RX_RING_SIZE - 1);
	}

	bp->rx_cons = cons;
	bw32(bp, B44_DMARX_PTR, cons * sizeof(struct dma_desc));

	return received;
}
Exemple #30
0
/*
 * Called at device open time to get the chip ready for
 * packet processing.
 *
 * Called-by: b44_open
 */
static void b44_init_hw(struct b44_private *bp, int reset_kind)
{
	u32 val;
#define CTRL_MASK (DMARX_CTRL_ENABLE | (RX_PKT_OFFSET << DMARX_CTRL_ROSHIFT))

	b44_chip_reset(bp, B44_CHIP_RESET_FULL);
	if (reset_kind == B44_FULL_RESET) {
		b44_phy_reset(bp);
	}

	/* Enable CRC32, set proper LED modes and power on PHY */
	bw32(bp, B44_MAC_CTRL, MAC_CTRL_CRC32_ENAB | MAC_CTRL_PHY_LEDCTRL);
	bw32(bp, B44_RCV_LAZY, (1 << RCV_LAZY_FC_SHIFT));

	/* This sets the MAC address too.  */
	b44_set_rx_mode(bp->netdev);

	/* MTU + eth header + possible VLAN tag + struct rx_header */
	bw32(bp, B44_RXMAXLEN, B44_MAX_MTU + ETH_HLEN + 8 + RX_HEADER_LEN);
	bw32(bp, B44_TXMAXLEN, B44_MAX_MTU + ETH_HLEN + 8 + RX_HEADER_LEN);

	bw32(bp, B44_TX_HIWMARK, TX_HIWMARK_DEFLT);
	if (reset_kind == B44_PARTIAL_RESET) {
		bw32(bp, B44_DMARX_CTRL, CTRL_MASK);
	} else {
		bw32(bp, B44_DMATX_CTRL, DMATX_CTRL_ENABLE);
		bw32(bp, B44_DMATX_ADDR, VIRT_TO_B44(bp->tx));

		bw32(bp, B44_DMARX_CTRL, CTRL_MASK);
		bw32(bp, B44_DMARX_ADDR, VIRT_TO_B44(bp->rx));
		bw32(bp, B44_DMARX_PTR, B44_RX_RING_LEN_BYTES);

		bw32(bp, B44_MIB_CTRL, MIB_CTRL_CLR_ON_READ);
	}

	val = br32(bp, B44_ENET_CTRL);
	bw32(bp, B44_ENET_CTRL, (val | ENET_CTRL_ENABLE));
#undef CTRL_MASK
}