Ejemplo n.º 1
0
static __inline int enet_mdio_read(struct net_device *dev, int phy_id,
				   int location)
{
	int val, i;
	struct tangox_enet_priv *priv = netdev_priv(dev);

	for (i = 0; (i < MDIO_TIMEOUT) && (enet_readl(ENET_MDIO_CMD1(priv->enet_mac_base)) & MDIO_CMD_GO); i++)
		udelay(1);
	if (i >= MDIO_TIMEOUT)
		goto err_out;

	val = MIIAR_ADDR(phy_id) | MIIAR_REG(location);
	enet_writel(ENET_MDIO_CMD1(priv->enet_mac_base), val);

	udelay(10);

	enet_writel(ENET_MDIO_CMD1(priv->enet_mac_base), val | MDIO_CMD_GO);

	for (i = 0; (i < MDIO_TIMEOUT) && (enet_readl(ENET_MDIO_CMD1(priv->enet_mac_base)) & MDIO_CMD_GO); i++)
		udelay(1);
	if (i >= MDIO_TIMEOUT)
		goto err_out;

	val = enet_readl(ENET_MDIO_STS1(priv->enet_mac_base));
	if (val & MDIO_STS_ERR)
		return -1;

	return val & 0xffff;

err_out:
	return -1;
}
Ejemplo n.º 2
0
static void enet_mdio_write(struct net_device *dev, int phy_id,
				     int location, int val)
{
	int i, tmp;
	struct tangox_enet_priv *priv = netdev_priv(dev);

	for (i = 0; (i < MDIO_TIMEOUT) && (enet_readl(ENET_MDIO_CMD1(priv->enet_mac_base)) & MDIO_CMD_GO); i++)
		udelay(1);
	if (i >= MDIO_TIMEOUT)
		goto err_out;

	tmp = MIIAR_DATA(val) | MIIAR_ADDR(phy_id) | MIIAR_REG(location);
	enet_writel(ENET_MDIO_CMD1(priv->enet_mac_base), tmp);

	udelay(10);

	enet_writel(ENET_MDIO_CMD1(priv->enet_mac_base), tmp | MDIO_CMD_WR);

	udelay(10);

	enet_writel(ENET_MDIO_CMD1(priv->enet_mac_base), tmp | MDIO_CMD_WR | MDIO_CMD_GO);

	for (i = 0; (i < MDIO_TIMEOUT) && (enet_readl(ENET_MDIO_CMD1(priv->enet_mac_base)) & MDIO_CMD_GO); i++)
		udelay(1);
	if (i >= MDIO_TIMEOUT)
		goto err_out;

	return;

err_out:
	return;
}
Ejemplo n.º 3
0
/*
 * open callback
 */
static int enet_open(struct net_device *dev)
{
	struct tangox_enet_priv *priv;
	unsigned char val;

	priv = netdev_priv(dev);

#ifdef CONFIG_ETHERENET_LED_ON_OFF_IP101A
	{
	int mii_bmcr_val;
	mii_bmcr_val=priv->mii.mdio_read(dev,priv->mii.phy_id,MII_BMCR);
	mii_bmcr_val &= ~(1<<11);
	priv->mii.mdio_write(dev,priv->mii.phy_id,MII_BMCR,mii_bmcr_val);
	}
#endif

	/* check link */
	if (mii_check_media(&priv->mii, 1, 1))
		enet_link_reconfigure(dev);

	/* enable mac rx & tx */
	val = enet_readb(ENET_RX_CTL(priv->enet_mac_base));
	val |= RX_EN;
	enet_writeb(ENET_RX_CTL(priv->enet_mac_base), val);

	val = enet_readb(ENET_TX_CTL1(priv->enet_mac_base));
	val |= TX_EN;
	enet_writeb(ENET_TX_CTL1(priv->enet_mac_base), val);

	/*
	 * clear & enable interrupts, we want:
	 * - receive complete
	 * - transmit complete
	 */
	enet_writel(ENET_TXC_SR(priv->enet_mac_base), 0xff);
	enet_writel(ENET_RXC_SR(priv->enet_mac_base), 0xff);

	/* start link check & tx reclaim timer */
	priv->link_check_timer.expires = jiffies + LINK_CHECK_TIMER_FREQ;
	add_timer(&priv->link_check_timer);

	//priv->tx_reclaim_timer.expires = jiffies + TX_RECLAIM_TIMER_FREQ;
	//add_timer(&priv->tx_reclaim_timer);

	/* and finally start tx queue */
	netif_start_queue(dev);

	/* start rx dma engine */
	enet_start_rx(priv);

	return 0;
}
Ejemplo n.º 4
0
/*
 * our  irq handler, just  ack it  and schedule  the right  tasklet to
 * handle this
 */
static irqreturn_t enet_isr(int irq, void *dev_id)
{
	struct net_device *dev;
	struct tangox_enet_priv *priv;
	unsigned long val = 0;

	dev = (struct net_device *)dev_id;
	priv = netdev_priv(dev);

	/* tx interrupt */
	if ((val = enet_readl(ENET_TXC_SR(priv->enet_mac_base))) != 0) {
		enet_writel(ENET_TXC_SR(priv->enet_mac_base), 0xff);
		//if (likely(val & TSR_DI)) {
		if (likely(val & TSR_TI)) {
			tasklet_schedule(&priv->tx_reclaim_tasklet);
		}
		if (unlikely(val & TSR_DE))
			printk("TX DMA error\n");
		if (unlikely(val & TSR_TO))
			printk("TX FIFO overflow\n");
	}
	/* rx interrupt */
	if ((val = enet_readl(ENET_RXC_SR(priv->enet_mac_base))) != 0) {
		enet_writel(ENET_RXC_SR(priv->enet_mac_base), 0xff);
		if (likely(val & RSR_RI)) {
			if (netif_rx_schedule_prep(dev)) {
				/*todo: disable rx interrupt */
				/*avoid reentering */
				enet_writel(ENET_RXC_SR(priv->enet_mac_base), 0xff);
				__netif_rx_schedule(dev);
			}
		}

		if (unlikely(val & RSR_DI)) 
			DBG("RX EOC\n");			
		if (unlikely(val & RSR_DE))
			DBG("RX DMA error\n");
		if (unlikely(val & RSR_RO))
			DBG("RX FIFO overflow\n");
	}

 	/* wake on lan */
 	if ((val = enet_readb(ENET_WAKEUP(priv->enet_mac_base))) == 1) {
 		/* clear sleeping mode */
 		enet_writeb(ENET_SLEEP_MODE(priv->enet_mac_base), 0);
 		/* clear wakeup mode */
 		enet_writeb(ENET_WAKEUP(priv->enet_mac_base), 0);
 	}

	return IRQ_HANDLED;
}
Ejemplo n.º 5
0
/*
 * Change rx mode (promiscuous/allmulti) and update multicast list
 */
static void bcm_enet_set_multicast_list(struct net_device *dev)
{
	struct bcm_enet_priv *priv;
	struct netdev_hw_addr *ha;
	u32 val;
	int i;

	priv = netdev_priv(dev);

	val = enet_readl(priv, ENET_RXCFG_REG);

	if (dev->flags & IFF_PROMISC)
		val |= ENET_RXCFG_PROMISC_MASK;
	else
		val &= ~ENET_RXCFG_PROMISC_MASK;

	/* only 3 perfect match registers left, first one is used for
	 * own mac address */
	if ((dev->flags & IFF_ALLMULTI) || netdev_mc_count(dev) > 3)
		val |= ENET_RXCFG_ALLMCAST_MASK;
	else
		val &= ~ENET_RXCFG_ALLMCAST_MASK;

	/* no need to set perfect match registers if we catch all
	 * multicast */
	if (val & ENET_RXCFG_ALLMCAST_MASK) {
		enet_writel(priv, val, ENET_RXCFG_REG);
		return;
	}

	i = 0;
	netdev_for_each_mc_addr(ha, dev) {
		u8 *dmi_addr;
		u32 tmp;

		if (i == 3)
			break;
		/* update perfect match registers */
		dmi_addr = ha->addr;
		tmp = (dmi_addr[2] << 24) | (dmi_addr[3] << 16) |
			(dmi_addr[4] << 8) | dmi_addr[5];
		enet_writel(priv, tmp, ENET_PML_REG(i + 1));

		tmp = (dmi_addr[0] << 8 | dmi_addr[1]);
		tmp |= ENET_PMH_DATAVALID_MASK;
		enet_writel(priv, tmp, ENET_PMH_REG(i++ + 1));
	}
Ejemplo n.º 6
0
/*
 * write given data into mii register and wait for transfer to end
 * with timeout (average measured transfer time is 25us)
 */
static int do_mdio_op(struct bcm_enet_priv *priv, unsigned int data)
{
	int limit;

	/* make sure mii interrupt status is cleared */
	enet_writel(priv, ENET_IR_MII, ENET_IR_REG);

	enet_writel(priv, data, ENET_MIIDATA_REG);
	wmb();

	/* busy wait on mii interrupt bit, with timeout */
	limit = 1000;
	do {
		if (enet_readl(priv, ENET_IR_REG) & ENET_IR_MII)
			break;
		udelay(1);
	} while (limit-- >= 0);

	return (limit < 0) ? 1 : 0;
}
Ejemplo n.º 7
0
/*
 * start/stop dma engine
 */
static __inline void enet_start_rx(struct tangox_enet_priv *priv)
{
	unsigned long val, flags;
	spin_lock_irqsave(&priv->ier_lock, flags);
	val = enet_readl(ENET_RXC_CR(priv->enet_mac_base));
	if(!(val & RCR_EN)){
		val  |= RCR_EN;
		enet_writel(ENET_RXC_CR(priv->enet_mac_base), val);
	}
	spin_unlock_irqrestore(&priv->ier_lock, flags);
}
Ejemplo n.º 8
0
/*
 * Change the interface's mac address.
 */
static int bcm_enet_set_mac_address(struct net_device *dev, void *p)
{
	struct bcm_enet_priv *priv;
	struct sockaddr *addr = p;
	u32 val;

	priv = netdev_priv(dev);
	memcpy(dev->dev_addr, addr->sa_data, ETH_ALEN);

	/* use perfect match register 0 to store my mac address */
	val = (dev->dev_addr[2] << 24) | (dev->dev_addr[3] << 16) |
		(dev->dev_addr[4] << 8) | dev->dev_addr[5];
	enet_writel(priv, val, ENET_PML_REG(0));

	val = (dev->dev_addr[0] << 8 | dev->dev_addr[1]);
	val |= ENET_PMH_DATAVALID_MASK;
	enet_writel(priv, val, ENET_PMH_REG(0));

	return 0;
}
Ejemplo n.º 9
0
/*
 * set mac duplex parameters
 */
static void bcm_enet_set_duplex(struct bcm_enet_priv *priv, int fullduplex)
{
	u32 val;

	val = enet_readl(priv, ENET_TXCTL_REG);
	if (fullduplex)
		val |= ENET_TXCTL_FD_MASK;
	else
		val &= ~ENET_TXCTL_FD_MASK;
	enet_writel(priv, val, ENET_TXCTL_REG);
}
Ejemplo n.º 10
0
/*
 * mac interrupt handler
 */
static irqreturn_t bcm_enet_isr_mac(int irq, void *dev_id)
{
	struct net_device *dev;
	struct bcm_enet_priv *priv;
	u32 stat;

	dev = dev_id;
	priv = netdev_priv(dev);

	stat = enet_readl(priv, ENET_IR_REG);
	if (!(stat & ENET_IR_MIB))
		return IRQ_NONE;

	/* clear & mask interrupt */
	enet_writel(priv, ENET_IR_MIB, ENET_IR_REG);
	enet_writel(priv, 0, ENET_IRMASK_REG);

	/* read mib registers in workqueue */
	schedule_work(&priv->mib_update_task);

	return IRQ_HANDLED;
}
Ejemplo n.º 11
0
static void bcm_enet_update_mib_counters_defer(struct work_struct *t)
{
	struct bcm_enet_priv *priv;

	priv = container_of(t, struct bcm_enet_priv, mib_update_task);
	mutex_lock(&priv->mib_update_lock);
	update_mib_counters(priv);
	mutex_unlock(&priv->mib_update_lock);

	/* reenable mib interrupt */
	if (netif_running(priv->net_dev))
		enet_writel(priv, ENET_IR_MIB, ENET_IRMASK_REG);
}
Ejemplo n.º 12
0
/*
 * preinit hardware to allow mii operation while device is down
 */
static void bcm_enet_hw_preinit(struct bcm_enet_priv *priv)
{
	u32 val;
	int limit;

	/* make sure mac is disabled */
	bcm_enet_disable_mac(priv);

	/* soft reset mac */
	val = ENET_CTL_SRESET_MASK;
	enet_writel(priv, val, ENET_CTL_REG);
	wmb();

	limit = 1000;
	do {
		val = enet_readl(priv, ENET_CTL_REG);
		if (!(val & ENET_CTL_SRESET_MASK))
			break;
		udelay(1);
	} while (limit--);

	/* select correct mii interface */
	val = enet_readl(priv, ENET_CTL_REG);
	if (priv->use_external_mii)
		val |= ENET_CTL_EPHYSEL_MASK;
	else
		val &= ~ENET_CTL_EPHYSEL_MASK;
	enet_writel(priv, val, ENET_CTL_REG);

	/* turn on mdc clock */
	enet_writel(priv, (0x1f << ENET_MIISC_MDCFREQDIV_SHIFT) |
		    ENET_MIISC_PREAMBLEEN_MASK, ENET_MIISC_REG);

	/* set mib counters to self-clear when read */
	val = enet_readl(priv, ENET_MIBCTL_REG);
	val |= ENET_MIBCTL_RDCLEAR_MASK;
	enet_writel(priv, val, ENET_MIBCTL_REG);
}
Ejemplo n.º 13
0
/*
 * Change rx mode (promiscous/allmulti) and update multicast list
 */
static void bcm_enet_set_multicast_list(struct net_device *dev)
{
	struct bcm_enet_priv *priv;
	struct dev_mc_list *mc_list;
	u32 val;
	int i;

	priv = netdev_priv(dev);

	val = enet_readl(priv, ENET_RXCFG_REG);

	if (dev->flags & IFF_PROMISC)
		val |= ENET_RXCFG_PROMISC_MASK;
	else
		val &= ~ENET_RXCFG_PROMISC_MASK;

	/* only 3 perfect match registers left, first one is used for
	 * own mac address */
	if ((dev->flags & IFF_ALLMULTI) || dev->mc_count > 3)
		val |= ENET_RXCFG_ALLMCAST_MASK;
	else
		val &= ~ENET_RXCFG_ALLMCAST_MASK;

	/* no need to set perfect match registers if we catch all
	 * multicast */
	if (val & ENET_RXCFG_ALLMCAST_MASK) {
		enet_writel(priv, val, ENET_RXCFG_REG);
		return;
	}

	for (i = 0, mc_list = dev->mc_list;
	     (mc_list != NULL) && (i < dev->mc_count) && (i < 3);
	     i++, mc_list = mc_list->next) {
		u8 *dmi_addr;
		u32 tmp;

		/* filter non ethernet address */
		if (mc_list->dmi_addrlen != 6)
			continue;

		/* update perfect match registers */
		dmi_addr = mc_list->dmi_addr;
		tmp = (dmi_addr[2] << 24) | (dmi_addr[3] << 16) |
			(dmi_addr[4] << 8) | dmi_addr[5];
		enet_writel(priv, tmp, ENET_PML_REG(i + 1));

		tmp = (dmi_addr[0] << 8 | dmi_addr[1]);
		tmp |= ENET_PMH_DATAVALID_MASK;
		enet_writel(priv, tmp, ENET_PMH_REG(i + 1));
	}

	for (; i < 3; i++) {
		enet_writel(priv, 0, ENET_PML_REG(i + 1));
		enet_writel(priv, 0, ENET_PMH_REG(i + 1));
	}

	enet_writel(priv, val, ENET_RXCFG_REG);
}
Ejemplo n.º 14
0
/*
 * exit func, stops hardware and unregisters netdevice
 */
static int __devexit bcm_enet_remove(struct platform_device *pdev)
{
	struct bcm_enet_priv *priv;
	struct net_device *dev;
	struct resource *res;

	/* stop netdevice */
	dev = platform_get_drvdata(pdev);
	priv = netdev_priv(dev);
	unregister_netdev(dev);

	/* turn off mdc clock */
	enet_writel(priv, 0, ENET_MIISC_REG);

	if (priv->has_phy) {
		mdiobus_unregister(priv->mii_bus);
		kfree(priv->mii_bus->irq);
		mdiobus_free(priv->mii_bus);
	} else {
		struct bcm63xx_enet_platform_data *pd;

		pd = pdev->dev.platform_data;
		if (pd && pd->mii_config)
			pd->mii_config(dev, 0, bcm_enet_mdio_read_mii,
				       bcm_enet_mdio_write_mii);
	}

	/* release device resources */
	iounmap(priv->base);
	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	release_mem_region(res->start, res->end - res->start + 1);

	/* disable hw block clocks */
	if (priv->phy_clk) {
		clk_disable(priv->phy_clk);
		clk_put(priv->phy_clk);
	}
	clk_disable(priv->mac_clk);
	clk_put(priv->mac_clk);

	platform_set_drvdata(pdev, NULL);
	free_netdev(dev);
	return 0;
}
Ejemplo n.º 15
0
/*
 * disable mac
 */
static void bcm_enet_disable_mac(struct bcm_enet_priv *priv)
{
	int limit;
	u32 val;

	val = enet_readl(priv, ENET_CTL_REG);
	val |= ENET_CTL_DISABLE_MASK;
	enet_writel(priv, val, ENET_CTL_REG);

	limit = 1000;
	do {
		u32 val;

		val = enet_readl(priv, ENET_CTL_REG);
		if (!(val & ENET_CTL_DISABLE_MASK))
			break;
		udelay(1);
	} while (limit--);
}
Ejemplo n.º 16
0
/*
 * set mac flow control parameters
 */
static void bcm_enet_set_flow(struct bcm_enet_priv *priv, int rx_en, int tx_en)
{
	u32 val;

	/* rx flow control (pause frame handling) */
	val = enet_readl(priv, ENET_RXCFG_REG);
	if (rx_en)
		val |= ENET_RXCFG_ENFLOW_MASK;
	else
		val &= ~ENET_RXCFG_ENFLOW_MASK;
	enet_writel(priv, val, ENET_RXCFG_REG);

	/* tx flow control (pause frame generation) */
	val = enet_dma_readl(priv, ENETDMA_CFG_REG);
	if (tx_en)
		val |= ENETDMA_CFG_FLOWCH_MASK(priv->rx_chan);
	else
		val &= ~ENETDMA_CFG_FLOWCH_MASK(priv->rx_chan);
	enet_dma_writel(priv, val, ENETDMA_CFG_REG);
}
Ejemplo n.º 17
0
static void enet_stat_write(struct net_device *dev, unsigned long val, unsigned char index)
{
	struct tangox_enet_priv *priv = netdev_priv(dev);
	enet_writeb(ENET_STAT_INDEX(priv->enet_mac_base), index);
	enet_writel(ENET_STAT_DATA1(priv->enet_mac_base), val);
}
Ejemplo n.º 18
0
/*
 * mac hw init is done here
 */
static int enet_hw_init(struct net_device *dev)
{
	struct tangox_enet_priv *priv;
	unsigned int val = 0;

	if(phy_reset(dev))
		return -EBUSY;

	priv = netdev_priv(dev);

	/* set pad_mode according to rgmii or not*/
	val = enet_readb(priv->enet_mac_base + 0x400) & 0xf0;
	if(priv->rgmii)
		enet_writeb(priv->enet_mac_base + 0x400, val | 0x01);

	/* software reset IP */
	enet_writeb(priv->enet_mac_base + 0x424, 0);
	udelay(10);
	enet_writeb(priv->enet_mac_base + 0x424, 1);

	/*set threshold for internal clock 0x1*/
	enet_writeb(ENET_IC_THRESHOLD(priv->enet_mac_base), 1);

	/*set Random seed 0x8*/
	enet_writeb(ENET_RANDOM_SEED(priv->enet_mac_base), 0x08);

	/*set TX single deferral params 0xc*/
	enet_writeb(ENET_TX_SDP(priv->enet_mac_base), 0xc);

	/*set slot time  0x7f for 10/100Mbps*/
	enet_writeb(ENET_SLOT_TIME(priv->enet_mac_base), 0x7f);

	/*set Threshold for partial full  0x7f */
	enet_writeb(ENET_PF_THRESHOLD(priv->enet_mac_base), 0x7f);

	/* configure TX DMA Channels */
	val = enet_readl(ENET_TXC_CR(priv->enet_mac_base));
	val |=	TCR_RS  | TCR_LE  | TCR_TFI(1) | 
		/*TCR_DIE |*/ TCR_BTS(2);
	val |=	TCR_DM;
 
	enet_writel(ENET_TXC_CR(priv->enet_mac_base), val);
	val = enet_readl(ENET_TXC_CR(priv->enet_mac_base));

 	/* configure RX DMA Channels */
	val = enet_readl(ENET_RXC_CR(priv->enet_mac_base));
	val |= (RCR_RS    | RCR_LE | RCR_RFI(1) | 
		RCR_BTS(2) | RCR_FI | RCR_DIE /* | RCR_EN*/); 
	val |=	RCR_DM;

	val |=  RX_BUF_SIZE << 16;
	enet_writel(ENET_RXC_CR(priv->enet_mac_base), val); 

	/* configure MAC ctrller */
	val = enet_readb(ENET_TX_CTL1(priv->enet_mac_base));
	val |= (TX_RETRY_EN | TX_PAD_EN | TX_APPEND_FCS);
	enet_writeb(ENET_TX_CTL1(priv->enet_mac_base), (unsigned char)val);

	/* set retry 5 time when collision occurs*/
	enet_writeb(ENET_TX_CTL2(priv->enet_mac_base), 5);

	val = enet_readb(ENET_RX_CTL(priv->enet_mac_base));
	val |= (RX_RUNT | RX_PAD_STRIP | RX_SEND_CRC 
	                | RX_PAUSE_EN| RX_AF_EN);
	enet_writeb(ENET_RX_CTL(priv->enet_mac_base), (unsigned char)val);

#ifdef ENABLE_MULTICAST
	/* clear internal multicast address table */
	enet_writeb(ENET_MC_INIT(priv->enet_mac_base),  0x00);
	while(enet_readb(ENET_MC_INIT(priv->enet_mac_base)));
	DBG("Internal multicast address table is cleared\n");
#endif

	/* unicast */
	/* Threshold for internal clock*/
	/* threshold for partial empty*/
	/* threshold for partial full */

	/* buffer size for transmit must be 1 from the doc
	   however, it's said that using 0xff ??*/
	enet_writeb(ENET_TX_BUFSIZE(priv->enet_mac_base), 0xff);

	/* fifo control */

	/*MAC mode*/
	enet_mac_config(dev);

	/* check gmii mode support */
	priv->mii.supports_gmii = mii_check_gmii_support(&priv->mii);
	DBG("gmii support=0x%x id=0x%x\n", priv->mii.supports_gmii, priv->mii.phy_id);

	return 0;
}
Ejemplo n.º 19
0
/*
 * tx request callback
 */
static int enet_xmit(struct sk_buff *skb, struct net_device *dev)
{
	struct tangox_enet_priv *priv;
	volatile struct enet_desc *tx=NULL, *ptx=NULL;
	unsigned long tconfig_cache;
	unsigned long val = 0;
	volatile u32 *r_addr;
	int len = 0;
	int tx_busy = 0;
	unsigned char *txbuf;

	priv = netdev_priv(dev);
	spin_lock(&priv->tx_lock);

	val = enet_readl(ENET_TXC_CR(priv->enet_mac_base)) & 0xffff;
#ifndef ENABLE_TX_CHAINING
#ifdef CONFIG_TANGOX_ENET_TX_DELAY_1000US
#define MAX_TX_TIMEOUT	1000	/* usec */
#else
#define MAX_TX_TIMEOUT	100	/* usec */
#endif
	for (len = 0; len < MAX_TX_TIMEOUT; len++) {
		val = enet_readl(ENET_TXC_CR(priv->enet_mac_base)) & 0xffff;
		if (val & TCR_EN)
			udelay(1);
		else
			break;
	}
	if (len >= MAX_TX_TIMEOUT) {
		priv->stats.tx_dropped++;
		spin_unlock(&priv->tx_lock);
		return NETDEV_TX_BUSY;
	}
#else
	if (val & TCR_EN){ 
		//BUG_ON(skb == NULL);
		tx_busy = 1;
		if (priv->pending_tx < 0)
			priv->pending_tx = priv->next_tx_desc;
	} 

	if (tx_busy && (priv->pending_tx >= 0) && (priv->pending_tx_cnt >= (TX_DESC_COUNT -1))) {
		DBG(KERN_WARNING PFX "no more tx desc can be scheduled in pending queue.\n");
		netif_stop_queue(dev);
		spin_unlock(&priv->tx_lock);
		return NETDEV_TX_BUSY;
	}
		
	if (skb == NULL) {
		unsigned int last_tx;		
		last_tx = (priv->next_tx_desc - 1 + TX_DESC_COUNT) % TX_DESC_COUNT;
		tx = &priv->tx_descs[last_tx];
		tx->config |= DESC_EOC;
		priv->tx_eoc = last_tx;
		mb();
		goto tx_pending;
	}
#endif
	len = skb->len;
	tx = &priv->tx_descs[priv->next_tx_desc];

	/* fill the tx desc with this skb address */
	tconfig_cache = 0;
	tconfig_cache |= DESC_BTS(2);
	tconfig_cache |= DESC_EOF;
	tconfig_cache |= len; 

	if (((unsigned long)(skb->data) & 0x7) != 0) { /* not align by 8 bytes */
		txbuf = priv->tx_bufs[priv->next_tx_desc];
		memcpy(txbuf, skb->data, len); 
		dma_cache_wback((unsigned long)txbuf, len);
		tx->s_addr = PHYSADDR((void *)txbuf);
	} else {
		dma_cache_wback((unsigned long)skb->data, len);
		tx->s_addr = PHYSADDR(skb->data);
	}

	if (tx_busy != 0) {
		tx->n_addr = PHYSADDR((void *)&(priv->tx_descs[(priv->next_tx_desc + 1) % TX_DESC_COUNT]));
	} else {
		tx->n_addr = 0;
		tconfig_cache |= DESC_EOC;
		priv->tx_eoc = priv->next_tx_desc;
	}
	tx->config = tconfig_cache;

	/* keep a pointer to it for later and give it to dma  */
	priv->tx_skbs[priv->next_tx_desc] = skb;

	r_addr = (volatile u32 *)KSEG1ADDR((u32)(&(priv->tx_report[priv->next_tx_desc])));
	__raw_writel(0, r_addr);
	priv->next_tx_desc++;
	priv->next_tx_desc %= TX_DESC_COUNT;

#ifdef ETH_DEBUG
	{
	int i;			
 	for(i=0; i<len; i++){
		if(i%16==0 && i>0)
			DBG("\n");
		DBG("%02x ", txbuf[i] & 0xff);
	}
	DBG("\n");

	DBG("DESC Mode:  TXC_CR=0x%x  desc_addr=0x%x s_addr=0x%x n_addr=0x%x r_addr=0x%x config=0x%x\n",
			enet_readl(ENET_TXC_CR(priv->enet_mac_base)), tx,
			tx->s_addr, tx->n_addr,
			tx->r_addr, tx->config); 
	}
#endif

tx_pending:
	if (tx_busy == 0) {
		if (priv->pending_tx >= 0) {
			ptx = &priv->tx_descs[priv->pending_tx];
			len = ptx->config & 0xffff;

			enet_writel(ENET_TX_DESC_ADDR(priv->enet_mac_base), PHYSADDR((void *)ptx));
			priv->reclaim_limit = priv->pending_tx;
			priv->pending_tx = -1;
		} else {
			priv->reclaim_limit = (priv->next_tx_desc - 1 + TX_DESC_COUNT) % TX_DESC_COUNT;
			enet_writel(ENET_TX_DESC_ADDR(priv->enet_mac_base), PHYSADDR((void *)tx));
		}

		enet_writel(ENET_TX_SAR(priv->enet_mac_base), 0);
		enet_writel(ENET_TX_REPORT_ADDR(priv->enet_mac_base), 0);

		/* kick tx dma in case it was suspended */
		val |= TCR_EN; 
		val |= TCR_BTS(2); 
		val |= (len << 16); 
		enet_writel(ENET_TXC_CR(priv->enet_mac_base), val);

		/* no pending at this stage*/
		priv->pending_tx_cnt = 0;
	} else 
		priv->pending_tx_cnt++;

	/* if next tx descriptor is not  clean, then we have to stop
	 * queue */
	if (unlikely(--priv->free_tx_desc_count == 0))
		netif_stop_queue(dev);

	spin_unlock(&priv->tx_lock);

	return NETDEV_TX_OK;
}
Ejemplo n.º 20
0
/*
 * update hash table to reflect new device multicast address list
 */
static void enet_set_multicast_list(struct net_device *dev)
{
#ifdef ENABLE_MULTICAST
	struct tangox_enet_priv *priv;
	struct dev_mc_list *mclist;
	unsigned char val;
	uint32_t mc_filter[2];
	int i;

	priv = netdev_priv(dev);

	/* the link check timer might change RX control, we need to protect
	 * against it */
	spin_lock_bh(&priv->maccr_lock);
	val = enet_readl(ENET_RX_CTL(priv->enet_mac_base));

	if (dev->flags & IFF_PROMISC) {
		val &= ~(RX_BC_DISABLE | RX_AF_EN);
	} else {
		val |= RX_AF_EN	;
		/* if we want all multicast or if address count is too
		 * high, don't try to compute hash value */
		if (dev->mc_count > 64 || dev->flags & IFF_ALLMULTI) {
			val &= ~(RX_BC_DISABLE | RX_AF_EN);
		}
	}

	enet_writel(ENET_RX_CTL(priv->enet_mac_base), val);
	spin_unlock_bh(&priv->maccr_lock);

	/* we  don't  need  to  update  hash  table  if  we  pass  all
	 * multicast */
	if (!(val & RX_BC_DISABLE) && !(val & RX_AF_EN))
		return;

	/* clear internal multicast address table */
	enet_writeb(ENET_MC_INIT(priv->enet_mac_base), 0x0);
	while(enet_readb(ENET_MC_INIT(priv->enet_mac_base)));

	mc_filter[0] = mc_filter[1] = 0;
	mclist = dev->mc_list;

	for (i = 0; i < dev->mc_count; i++) {
		char *addr;

		addr = mclist->dmi_addr;
		mclist = mclist->next;
		if (!(*addr & 1))
			continue;

		enet_writeb(ENET_MC_ADDR1(priv->enet_mac_base), addr[0]);
		enet_writeb(ENET_MC_ADDR2(priv->enet_mac_base), addr[1]);
		enet_writeb(ENET_MC_ADDR3(priv->enet_mac_base), addr[2]);
		enet_writeb(ENET_MC_ADDR4(priv->enet_mac_base), addr[3]);
		enet_writeb(ENET_MC_ADDR5(priv->enet_mac_base), addr[4]);
		enet_writeb(ENET_MC_ADDR6(priv->enet_mac_base), addr[5]);
		enet_writeb(ENET_MC_INIT(priv->enet_mac_base),	0xff);
		while(enet_readb(ENET_MC_INIT(priv->enet_mac_base)));
	}
#endif
}
Ejemplo n.º 21
0
/*
 * dma ring allocation is done here
 */
static int enet_dma_init(struct tangox_enet_priv *priv)
{
	unsigned int size;
	int i, rx_order, tx_order;
	
	/*
	 * allocate rx descriptor list & rx buffers
	 */
	size = RX_DESC_COUNT * sizeof (struct enet_desc);
	for (rx_order = 0; (PAGE_SIZE << rx_order) < size; rx_order++);

	if (!(priv->rx_descs_cached = (void *)__get_free_pages(GFP_KERNEL | GFP_DMA, rx_order)))
		return -ENOMEM;
	dma_cache_wback_inv((unsigned long)priv->rx_descs_cached, size);
	priv->rx_descs = (volatile struct enet_desc *)
		CACHE_TO_NONCACHE((unsigned long)priv->rx_descs_cached);

	/*
	 * initialize all rx descs
	 */
	for (i = 0; i < RX_DESC_COUNT; i++) {
		volatile struct enet_desc *rx;
		struct sk_buff *skb;

		rx = &priv->rx_descs[i];
		rx->config = RX_BUF_SIZE | DESC_BTS(2) | DESC_EOF/* | DESC_ID*/;

		skb = dev_alloc_skb(RX_BUF_SIZE + SKB_RESERVE_SIZE);
		if (!skb)
			return -ENOMEM;
		
		skb_reserve(skb, SKB_RESERVE_SIZE);
		*((volatile unsigned long *)KSEG1ADDR(&(priv->rx_report[i]))) = 0; 
		rx->s_addr = PHYSADDR((void *)skb->data);
		rx->r_addr = PHYSADDR((void *)&priv->rx_report[i]);
		rx->n_addr = PHYSADDR((void *)&priv->rx_descs[i+1]);
		if (i == (RX_DESC_COUNT - 1)) {
			rx->n_addr = PHYSADDR((void *)&priv->rx_descs[0]);
			rx->config |= DESC_EOC ;
			priv->rx_eoc = i;
		}
#ifdef ETH_DEBUG
		DBG("rx[%d]=0x%08x\n", i, (unsigned int)rx);
		DBG("  s_addr=0x%08x\n", (unsigned int)rx->s_addr);
		DBG("  n_addr=0x%08x\n", (unsigned int)rx->n_addr);
		DBG("  r_addr=0x%08x\n", (unsigned int)rx->r_addr);
		DBG("  config=0x%08x\n", (unsigned int)rx->config);
#endif
		dma_cache_inv((unsigned long)skb->data, RX_BUF_SIZE);
		priv->rx_skbs[i] = skb;
	}
	priv->last_rx_desc = 0;

	/*
	 * allocate tx descriptor list
	 *
	 * We allocate  only the descriptor list and  prepare them for
	 * further use. When tx is needed, we will set the right flags
	 * and kick the dma.
	 */
	size = TX_DESC_COUNT * sizeof (struct enet_desc);
	for (tx_order = 0; (PAGE_SIZE << tx_order) < size; tx_order++);

	if (!(priv->tx_descs_cached = (void *)__get_free_pages(GFP_KERNEL | GFP_DMA, tx_order))) {
		free_pages((u32)priv->rx_descs_cached, rx_order);
		return -ENOMEM;
	}
	dma_cache_wback_inv((unsigned long)priv->tx_descs_cached, size);
	priv->tx_descs = (volatile struct enet_desc *)
		CACHE_TO_NONCACHE((unsigned long)priv->tx_descs_cached);

	/*
	 * initialize tx descs
	 */
	for (i = 0; i < TX_DESC_COUNT; i++) {
		volatile struct enet_desc *tx;

		priv->tx_bufs[i] = (unsigned char *)__get_free_page(GFP_KERNEL | GFP_DMA);
		dma_cache_wback_inv((unsigned long)priv->tx_bufs[i], PAGE_SIZE);

		tx = &priv->tx_descs[i];
		*((volatile unsigned long *)KSEG1ADDR(&(priv->tx_report[i]))) = 0; 
		tx->r_addr = PHYSADDR((void *)&priv->tx_report[i]);
		tx->s_addr = 0;
		tx->config = DESC_EOF;
		if (i == (TX_DESC_COUNT - 1)) {
			tx->config |= DESC_EOC;
			tx->n_addr = PHYSADDR((void *)&priv->tx_descs[0]);
			priv->tx_eoc = i;
		}
		//DBG("tx[%d]=0x%08x\n", i, (unsigned int)tx);
	}
	priv->dirty_tx_desc = priv->next_tx_desc = 0;
	priv->pending_tx = -1;
	priv->pending_tx_cnt  = 0;
	priv->reclaim_limit  = -1;
	priv->free_tx_desc_count = TX_DESC_COUNT;

	/*
	 * write rx desc list & tx desc list addresses in registers
	 */
	enet_writel(ENET_TX_DESC_ADDR(priv->enet_mac_base), PHYSADDR((void *)&priv->tx_descs[0]));
	enet_writel(ENET_RX_DESC_ADDR(priv->enet_mac_base), PHYSADDR((void *)&priv->rx_descs[0]));
	return 0;
}
Ejemplo n.º 22
0
/*
 * allocate netdevice, request register memory and register device.
 */
static int __devinit bcm_enet_probe(struct platform_device *pdev)
{
	struct bcm_enet_priv *priv;
	struct net_device *dev;
	struct bcm63xx_enet_platform_data *pd;
	struct resource *res_mem, *res_irq, *res_irq_rx, *res_irq_tx;
	struct mii_bus *bus;
	const char *clk_name;
	unsigned int iomem_size;
	int i, ret;

	/* stop if shared driver failed, assume driver->probe will be
	 * called in the same order we register devices (correct ?) */
	if (!bcm_enet_shared_base)
		return -ENODEV;

	res_mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
	res_irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
	res_irq_rx = platform_get_resource(pdev, IORESOURCE_IRQ, 1);
	res_irq_tx = platform_get_resource(pdev, IORESOURCE_IRQ, 2);
	if (!res_mem || !res_irq || !res_irq_rx || !res_irq_tx)
		return -ENODEV;

	ret = 0;
	dev = alloc_etherdev(sizeof(*priv));
	if (!dev)
		return -ENOMEM;
	priv = netdev_priv(dev);
	memset(priv, 0, sizeof(*priv));

	ret = compute_hw_mtu(priv, dev->mtu);
	if (ret)
		goto out;

	iomem_size = res_mem->end - res_mem->start + 1;
	if (!request_mem_region(res_mem->start, iomem_size, "bcm63xx_enet")) {
		ret = -EBUSY;
		goto out;
	}

	priv->base = ioremap(res_mem->start, iomem_size);
	if (priv->base == NULL) {
		ret = -ENOMEM;
		goto out_release_mem;
	}
	dev->irq = priv->irq = res_irq->start;
	priv->irq_rx = res_irq_rx->start;
	priv->irq_tx = res_irq_tx->start;
	priv->mac_id = pdev->id;

	/* get rx & tx dma channel id for this mac */
	if (priv->mac_id == 0) {
		priv->rx_chan = 0;
		priv->tx_chan = 1;
		clk_name = "enet0";
	} else {
		priv->rx_chan = 2;
		priv->tx_chan = 3;
		clk_name = "enet1";
	}

	priv->mac_clk = clk_get(&pdev->dev, clk_name);
	if (IS_ERR(priv->mac_clk)) {
		ret = PTR_ERR(priv->mac_clk);
		goto out_unmap;
	}
	clk_enable(priv->mac_clk);

	/* initialize default and fetch platform data */
	priv->rx_ring_size = BCMENET_DEF_RX_DESC;
	priv->tx_ring_size = BCMENET_DEF_TX_DESC;

	pd = pdev->dev.platform_data;
	if (pd) {
		memcpy(dev->dev_addr, pd->mac_addr, ETH_ALEN);
		priv->has_phy = pd->has_phy;
		priv->phy_id = pd->phy_id;
		priv->has_phy_interrupt = pd->has_phy_interrupt;
		priv->phy_interrupt = pd->phy_interrupt;
		priv->use_external_mii = !pd->use_internal_phy;
		priv->pause_auto = pd->pause_auto;
		priv->pause_rx = pd->pause_rx;
		priv->pause_tx = pd->pause_tx;
		priv->force_duplex_full = pd->force_duplex_full;
		priv->force_speed_100 = pd->force_speed_100;
	}

	if (priv->mac_id == 0 && priv->has_phy && !priv->use_external_mii) {
		/* using internal PHY, enable clock */
		priv->phy_clk = clk_get(&pdev->dev, "ephy");
		if (IS_ERR(priv->phy_clk)) {
			ret = PTR_ERR(priv->phy_clk);
			priv->phy_clk = NULL;
			goto out_put_clk_mac;
		}
		clk_enable(priv->phy_clk);
	}

	/* do minimal hardware init to be able to probe mii bus */
	bcm_enet_hw_preinit(priv);

	/* MII bus registration */
	if (priv->has_phy) {

		priv->mii_bus = mdiobus_alloc();
		if (!priv->mii_bus) {
			ret = -ENOMEM;
			goto out_uninit_hw;
		}

		bus = priv->mii_bus;
		bus->name = "bcm63xx_enet MII bus";
		bus->parent = &pdev->dev;
		bus->priv = priv;
		bus->read = bcm_enet_mdio_read_phylib;
		bus->write = bcm_enet_mdio_write_phylib;
		sprintf(bus->id, "%d", priv->mac_id);

		/* only probe bus where we think the PHY is, because
		 * the mdio read operation return 0 instead of 0xffff
		 * if a slave is not present on hw */
		bus->phy_mask = ~(1 << priv->phy_id);

		bus->irq = kmalloc(sizeof(int) * PHY_MAX_ADDR, GFP_KERNEL);
		if (!bus->irq) {
			ret = -ENOMEM;
			goto out_free_mdio;
		}

		if (priv->has_phy_interrupt)
			bus->irq[priv->phy_id] = priv->phy_interrupt;
		else
			bus->irq[priv->phy_id] = PHY_POLL;

		ret = mdiobus_register(bus);
		if (ret) {
			dev_err(&pdev->dev, "unable to register mdio bus\n");
			goto out_free_mdio;
		}
	} else {

		/* run platform code to initialize PHY device */
		if (pd->mii_config &&
		    pd->mii_config(dev, 1, bcm_enet_mdio_read_mii,
				   bcm_enet_mdio_write_mii)) {
			dev_err(&pdev->dev, "unable to configure mdio bus\n");
			goto out_uninit_hw;
		}
	}

	spin_lock_init(&priv->rx_lock);

	/* init rx timeout (used for oom) */
	init_timer(&priv->rx_timeout);
	priv->rx_timeout.function = bcm_enet_refill_rx_timer;
	priv->rx_timeout.data = (unsigned long)dev;

	/* init the mib update lock&work */
	mutex_init(&priv->mib_update_lock);
	INIT_WORK(&priv->mib_update_task, bcm_enet_update_mib_counters_defer);

	/* zero mib counters */
	for (i = 0; i < ENET_MIB_REG_COUNT; i++)
		enet_writel(priv, 0, ENET_MIB_REG(i));

	/* register netdevice */
	dev->netdev_ops = &bcm_enet_ops;
	netif_napi_add(dev, &priv->napi, bcm_enet_poll, 16);

	SET_ETHTOOL_OPS(dev, &bcm_enet_ethtool_ops);
	SET_NETDEV_DEV(dev, &pdev->dev);

	ret = register_netdev(dev);
	if (ret)
		goto out_unregister_mdio;

	netif_carrier_off(dev);
	platform_set_drvdata(pdev, dev);
	priv->pdev = pdev;
	priv->net_dev = dev;

	return 0;

out_unregister_mdio:
	if (priv->mii_bus) {
		mdiobus_unregister(priv->mii_bus);
		kfree(priv->mii_bus->irq);
	}

out_free_mdio:
	if (priv->mii_bus)
		mdiobus_free(priv->mii_bus);

out_uninit_hw:
	/* turn off mdc clock */
	enet_writel(priv, 0, ENET_MIISC_REG);
	if (priv->phy_clk) {
		clk_disable(priv->phy_clk);
		clk_put(priv->phy_clk);
	}

out_put_clk_mac:
	clk_disable(priv->mac_clk);
	clk_put(priv->mac_clk);

out_unmap:
	iounmap(priv->base);

out_release_mem:
	release_mem_region(res_mem->start, iomem_size);
out:
	free_netdev(dev);
	return ret;
}
Ejemplo n.º 23
0
/*
 * stop callback
 */
static int bcm_enet_stop(struct net_device *dev)
{
	struct bcm_enet_priv *priv;
	struct device *kdev;
	int i;

	priv = netdev_priv(dev);
	kdev = &priv->pdev->dev;

	netif_stop_queue(dev);
	napi_disable(&priv->napi);
	if (priv->has_phy)
		phy_stop(priv->phydev);
	del_timer_sync(&priv->rx_timeout);

	/* mask all interrupts */
	enet_writel(priv, 0, ENET_IRMASK_REG);
	enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->rx_chan));
	enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->tx_chan));

	/* make sure no mib update is scheduled */
	flush_scheduled_work();

	/* disable dma & mac */
	bcm_enet_disable_dma(priv, priv->tx_chan);
	bcm_enet_disable_dma(priv, priv->rx_chan);
	bcm_enet_disable_mac(priv);

	/* force reclaim of all tx buffers */
	bcm_enet_tx_reclaim(dev, 1);

	/* free the rx skb ring */
	for (i = 0; i < priv->rx_ring_size; i++) {
		struct bcm_enet_desc *desc;

		if (!priv->rx_skb[i])
			continue;

		desc = &priv->rx_desc_cpu[i];
		dma_unmap_single(kdev, desc->address, priv->rx_skb_size,
				 DMA_FROM_DEVICE);
		kfree_skb(priv->rx_skb[i]);
	}

	/* free remaining allocated memory */
	kfree(priv->rx_skb);
	kfree(priv->tx_skb);
	dma_free_coherent(kdev, priv->rx_desc_alloc_size,
			  priv->rx_desc_cpu, priv->rx_desc_dma);
	dma_free_coherent(kdev, priv->tx_desc_alloc_size,
			  priv->tx_desc_cpu, priv->tx_desc_dma);
	free_irq(priv->irq_tx, dev);
	free_irq(priv->irq_rx, dev);
	free_irq(dev->irq, dev);

	/* release phy */
	if (priv->has_phy) {
		phy_disconnect(priv->phydev);
		priv->phydev = NULL;
	}

	return 0;
}
Ejemplo n.º 24
0
/*
 * open callback, allocate dma rings & buffers and start rx operation
 */
static int bcm_enet_open(struct net_device *dev)
{
	struct bcm_enet_priv *priv;
	struct sockaddr addr;
	struct device *kdev;
	struct phy_device *phydev;
	int i, ret;
	unsigned int size;
	char phy_id[MII_BUS_ID_SIZE + 3];
	void *p;
	u32 val;

	priv = netdev_priv(dev);
	kdev = &priv->pdev->dev;

	if (priv->has_phy) {
		/* connect to PHY */
		snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT,
			 priv->mac_id ? "1" : "0", priv->phy_id);

		phydev = phy_connect(dev, phy_id, &bcm_enet_adjust_phy_link, 0,
				     PHY_INTERFACE_MODE_MII);

		if (IS_ERR(phydev)) {
			dev_err(kdev, "could not attach to PHY\n");
			return PTR_ERR(phydev);
		}

		/* mask with MAC supported features */
		phydev->supported &= (SUPPORTED_10baseT_Half |
				      SUPPORTED_10baseT_Full |
				      SUPPORTED_100baseT_Half |
				      SUPPORTED_100baseT_Full |
				      SUPPORTED_Autoneg |
				      SUPPORTED_Pause |
				      SUPPORTED_MII);
		phydev->advertising = phydev->supported;

		if (priv->pause_auto && priv->pause_rx && priv->pause_tx)
			phydev->advertising |= SUPPORTED_Pause;
		else
			phydev->advertising &= ~SUPPORTED_Pause;

		dev_info(kdev, "attached PHY at address %d [%s]\n",
			 phydev->addr, phydev->drv->name);

		priv->old_link = 0;
		priv->old_duplex = -1;
		priv->old_pause = -1;
		priv->phydev = phydev;
	}

	/* mask all interrupts and request them */
	enet_writel(priv, 0, ENET_IRMASK_REG);
	enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->rx_chan));
	enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->tx_chan));

	ret = request_irq(dev->irq, bcm_enet_isr_mac, 0, dev->name, dev);
	if (ret)
		goto out_phy_disconnect;

	ret = request_irq(priv->irq_rx, bcm_enet_isr_dma,
			  IRQF_SAMPLE_RANDOM | IRQF_DISABLED, dev->name, dev);
	if (ret)
		goto out_freeirq;

	ret = request_irq(priv->irq_tx, bcm_enet_isr_dma,
			  IRQF_DISABLED, dev->name, dev);
	if (ret)
		goto out_freeirq_rx;

	/* initialize perfect match registers */
	for (i = 0; i < 4; i++) {
		enet_writel(priv, 0, ENET_PML_REG(i));
		enet_writel(priv, 0, ENET_PMH_REG(i));
	}

	/* write device mac address */
	memcpy(addr.sa_data, dev->dev_addr, ETH_ALEN);
	bcm_enet_set_mac_address(dev, &addr);

	/* allocate rx dma ring */
	size = priv->rx_ring_size * sizeof(struct bcm_enet_desc);
	p = dma_alloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL);
	if (!p) {
		dev_err(kdev, "cannot allocate rx ring %u\n", size);
		ret = -ENOMEM;
		goto out_freeirq_tx;
	}

	memset(p, 0, size);
	priv->rx_desc_alloc_size = size;
	priv->rx_desc_cpu = p;

	/* allocate tx dma ring */
	size = priv->tx_ring_size * sizeof(struct bcm_enet_desc);
	p = dma_alloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL);
	if (!p) {
		dev_err(kdev, "cannot allocate tx ring\n");
		ret = -ENOMEM;
		goto out_free_rx_ring;
	}

	memset(p, 0, size);
	priv->tx_desc_alloc_size = size;
	priv->tx_desc_cpu = p;

	priv->tx_skb = kzalloc(sizeof(struct sk_buff *) * priv->tx_ring_size,
			       GFP_KERNEL);
	if (!priv->tx_skb) {
		dev_err(kdev, "cannot allocate rx skb queue\n");
		ret = -ENOMEM;
		goto out_free_tx_ring;
	}

	priv->tx_desc_count = priv->tx_ring_size;
	priv->tx_dirty_desc = 0;
	priv->tx_curr_desc = 0;
	spin_lock_init(&priv->tx_lock);

	/* init & fill rx ring with skbs */
	priv->rx_skb = kzalloc(sizeof(struct sk_buff *) * priv->rx_ring_size,
			       GFP_KERNEL);
	if (!priv->rx_skb) {
		dev_err(kdev, "cannot allocate rx skb queue\n");
		ret = -ENOMEM;
		goto out_free_tx_skb;
	}

	priv->rx_desc_count = 0;
	priv->rx_dirty_desc = 0;
	priv->rx_curr_desc = 0;

	/* initialize flow control buffer allocation */
	enet_dma_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0,
			ENETDMA_BUFALLOC_REG(priv->rx_chan));

	if (bcm_enet_refill_rx(dev)) {
		dev_err(kdev, "cannot allocate rx skb queue\n");
		ret = -ENOMEM;
		goto out;
	}

	/* write rx & tx ring addresses */
	enet_dma_writel(priv, priv->rx_desc_dma,
			ENETDMA_RSTART_REG(priv->rx_chan));
	enet_dma_writel(priv, priv->tx_desc_dma,
			ENETDMA_RSTART_REG(priv->tx_chan));

	/* clear remaining state ram for rx & tx channel */
	enet_dma_writel(priv, 0, ENETDMA_SRAM2_REG(priv->rx_chan));
	enet_dma_writel(priv, 0, ENETDMA_SRAM2_REG(priv->tx_chan));
	enet_dma_writel(priv, 0, ENETDMA_SRAM3_REG(priv->rx_chan));
	enet_dma_writel(priv, 0, ENETDMA_SRAM3_REG(priv->tx_chan));
	enet_dma_writel(priv, 0, ENETDMA_SRAM4_REG(priv->rx_chan));
	enet_dma_writel(priv, 0, ENETDMA_SRAM4_REG(priv->tx_chan));

	/* set max rx/tx length */
	enet_writel(priv, priv->hw_mtu, ENET_RXMAXLEN_REG);
	enet_writel(priv, priv->hw_mtu, ENET_TXMAXLEN_REG);

	/* set dma maximum burst len */
	enet_dma_writel(priv, BCMENET_DMA_MAXBURST,
			ENETDMA_MAXBURST_REG(priv->rx_chan));
	enet_dma_writel(priv, BCMENET_DMA_MAXBURST,
			ENETDMA_MAXBURST_REG(priv->tx_chan));

	/* set correct transmit fifo watermark */
	enet_writel(priv, BCMENET_TX_FIFO_TRESH, ENET_TXWMARK_REG);

	/* set flow control low/high threshold to 1/3 / 2/3 */
	val = priv->rx_ring_size / 3;
	enet_dma_writel(priv, val, ENETDMA_FLOWCL_REG(priv->rx_chan));
	val = (priv->rx_ring_size * 2) / 3;
	enet_dma_writel(priv, val, ENETDMA_FLOWCH_REG(priv->rx_chan));

	/* all set, enable mac and interrupts, start dma engine and
	 * kick rx dma channel */
	wmb();
	enet_writel(priv, ENET_CTL_ENABLE_MASK, ENET_CTL_REG);
	enet_dma_writel(priv, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG);
	enet_dma_writel(priv, ENETDMA_CHANCFG_EN_MASK,
			ENETDMA_CHANCFG_REG(priv->rx_chan));

	/* watch "mib counters about to overflow" interrupt */
	enet_writel(priv, ENET_IR_MIB, ENET_IR_REG);
	enet_writel(priv, ENET_IR_MIB, ENET_IRMASK_REG);

	/* watch "packet transferred" interrupt in rx and tx */
	enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
			ENETDMA_IR_REG(priv->rx_chan));
	enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
			ENETDMA_IR_REG(priv->tx_chan));

	/* make sure we enable napi before rx interrupt  */
	napi_enable(&priv->napi);

	enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
			ENETDMA_IRMASK_REG(priv->rx_chan));
	enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK,
			ENETDMA_IRMASK_REG(priv->tx_chan));

	if (priv->has_phy)
		phy_start(priv->phydev);
	else
		bcm_enet_adjust_link(dev);

	netif_start_queue(dev);
	return 0;

out:
	for (i = 0; i < priv->rx_ring_size; i++) {
		struct bcm_enet_desc *desc;

		if (!priv->rx_skb[i])
			continue;

		desc = &priv->rx_desc_cpu[i];
		dma_unmap_single(kdev, desc->address, priv->rx_skb_size,
				 DMA_FROM_DEVICE);
		kfree_skb(priv->rx_skb[i]);
	}
	kfree(priv->rx_skb);

out_free_tx_skb:
	kfree(priv->tx_skb);

out_free_tx_ring:
	dma_free_coherent(kdev, priv->tx_desc_alloc_size,
			  priv->tx_desc_cpu, priv->tx_desc_dma);

out_free_rx_ring:
	dma_free_coherent(kdev, priv->rx_desc_alloc_size,
			  priv->rx_desc_cpu, priv->rx_desc_dma);

out_freeirq_tx:
	free_irq(priv->irq_tx, dev);

out_freeirq_rx:
	free_irq(priv->irq_rx, dev);

out_freeirq:
	free_irq(dev->irq, dev);

out_phy_disconnect:
	phy_disconnect(priv->phydev);

	return ret;
}
Ejemplo n.º 25
0
/*
 * stop callback
 */
static int enet_stop(struct net_device *dev)
{
	struct tangox_enet_priv *priv;
	unsigned char val;
	volatile struct enet_desc *rx;
	int i;

	priv = netdev_priv(dev);

	/* stop link timer */
	del_timer_sync(&priv->link_check_timer);

	/* stop tx queue */
	netif_stop_queue(dev);

	/* stop dma */
	//enet_stop_rx(priv);

	/* stop mac rx & tx */
	val = enet_readb(ENET_RX_CTL(priv->enet_mac_base));
	val &= ~RX_EN;
	enet_writeb(ENET_RX_CTL(priv->enet_mac_base), val);

	val = enet_readb(ENET_TX_CTL1(priv->enet_mac_base));
	val &= ~TX_EN;
	enet_writeb(ENET_TX_CTL1(priv->enet_mac_base), val);

	/* while we were stopping it,  the rx dma may have filled some
	 * buffer, consider it junk and rearm all descriptor */
	priv->dirty_tx_desc = priv->next_tx_desc = 0;
	priv->pending_tx  = -1;
	priv->pending_tx_cnt  = 0;
	priv->reclaim_limit  = -1;
	priv->last_rx_desc = 0;
	priv->free_tx_desc_count = TX_DESC_COUNT;

	enet_writel(ENET_RX_DESC_ADDR(priv->enet_mac_base), PHYSADDR((void *)&priv->rx_descs[0]));
	enet_writel(ENET_TX_DESC_ADDR(priv->enet_mac_base), PHYSADDR((void *)&priv->tx_descs[0]));
	/* clear eoc and set it to the last one*/
        for(i=0; i< RX_DESC_COUNT; i++){
		int cnt;
		cnt = (priv->last_rx_desc + i) % RX_DESC_COUNT;
		rx = &priv->rx_descs[cnt];
		rx->config &= ~DESC_EOC;
		*((volatile unsigned long *)KSEG1ADDR(&(priv->rx_report[i]))) = 0; 
	}
	rx = &priv->rx_descs[RX_DESC_COUNT-1];
	rx->config |= DESC_EOC;
	priv->rx_eoc = RX_DESC_COUNT - 1;
	mb();

#ifdef CONFIG_ETHERENET_LED_ON_OFF_IP101A
	{
	int mii_bmcr_val;
	mii_bmcr_val=priv->mii.mdio_read(dev,priv->mii.phy_id,MII_BMCR);
	mii_bmcr_val |= 1<<11;
	priv->mii.mdio_write(dev,priv->mii.phy_id,MII_BMCR,mii_bmcr_val);
	}
#endif

	return 0;
}