/* * Change rx mode (promiscuous/allmulti) and update multicast list */ static void bcm_enet_set_multicast_list(struct net_device *dev) { struct bcm_enet_priv *priv; struct netdev_hw_addr *ha; u32 val; int i; priv = netdev_priv(dev); val = enet_readl(priv, ENET_RXCFG_REG); if (dev->flags & IFF_PROMISC) val |= ENET_RXCFG_PROMISC_MASK; else val &= ~ENET_RXCFG_PROMISC_MASK; /* only 3 perfect match registers left, first one is used for * own mac address */ if ((dev->flags & IFF_ALLMULTI) || netdev_mc_count(dev) > 3) val |= ENET_RXCFG_ALLMCAST_MASK; else val &= ~ENET_RXCFG_ALLMCAST_MASK; /* no need to set perfect match registers if we catch all * multicast */ if (val & ENET_RXCFG_ALLMCAST_MASK) { enet_writel(priv, val, ENET_RXCFG_REG); return; } i = 0; netdev_for_each_mc_addr(ha, dev) { u8 *dmi_addr; u32 tmp; if (i == 3) break; /* update perfect match registers */ dmi_addr = ha->addr; tmp = (dmi_addr[2] << 24) | (dmi_addr[3] << 16) | (dmi_addr[4] << 8) | dmi_addr[5]; enet_writel(priv, tmp, ENET_PML_REG(i + 1)); tmp = (dmi_addr[0] << 8 | dmi_addr[1]); tmp |= ENET_PMH_DATAVALID_MASK; enet_writel(priv, tmp, ENET_PMH_REG(i++ + 1)); }
/* * MII internal read callback */ static int bcm_enet_mdio_read(struct bcm_enet_priv *priv, int mii_id, int regnum) { u32 tmp, val; tmp = regnum << ENET_MIIDATA_REG_SHIFT; tmp |= 0x2 << ENET_MIIDATA_TA_SHIFT; tmp |= mii_id << ENET_MIIDATA_PHYID_SHIFT; tmp |= ENET_MIIDATA_OP_READ_MASK; if (do_mdio_op(priv, tmp)) return -1; val = enet_readl(priv, ENET_MIIDATA_REG); val &= 0xffff; return val; }
/* * write given data into mii register and wait for transfer to end * with timeout (average measured transfer time is 25us) */ static int do_mdio_op(struct bcm_enet_priv *priv, unsigned int data) { int limit; /* make sure mii interrupt status is cleared */ enet_writel(priv, ENET_IR_MII, ENET_IR_REG); enet_writel(priv, data, ENET_MIIDATA_REG); wmb(); /* busy wait on mii interrupt bit, with timeout */ limit = 1000; do { if (enet_readl(priv, ENET_IR_REG) & ENET_IR_MII) break; udelay(1); } while (limit-- >= 0); return (limit < 0) ? 1 : 0; }
/* * set mac flow control parameters */ static void bcm_enet_set_flow(struct bcm_enet_priv *priv, int rx_en, int tx_en) { u32 val; /* rx flow control (pause frame handling) */ val = enet_readl(priv, ENET_RXCFG_REG); if (rx_en) val |= ENET_RXCFG_ENFLOW_MASK; else val &= ~ENET_RXCFG_ENFLOW_MASK; enet_writel(priv, val, ENET_RXCFG_REG); /* tx flow control (pause frame generation) */ val = enet_dma_readl(priv, ENETDMA_CFG_REG); if (tx_en) val |= ENETDMA_CFG_FLOWCH_MASK(priv->rx_chan); else val &= ~ENETDMA_CFG_FLOWCH_MASK(priv->rx_chan); enet_dma_writel(priv, val, ENETDMA_CFG_REG); }
/* * reconfigure mac for new link state */ static void enet_link_reconfigure(struct net_device *dev) { struct tangox_enet_priv *priv; unsigned char val; priv = netdev_priv(dev); /* reflect duplex status in dma register */ spin_lock(&priv->maccr_lock); val = enet_readl(ENET_MAC_MODE(priv->enet_mac_base)); if (priv->mii.full_duplex) val &= ~HALF_DUPLEX; else val |= HALF_DUPLEX; enet_writeb(ENET_MAC_MODE(priv->enet_mac_base), val); enet_mac_config(dev); spin_unlock(&priv->maccr_lock); }
/* * mac interrupt handler */ static irqreturn_t bcm_enet_isr_mac(int irq, void *dev_id) { struct net_device *dev; struct bcm_enet_priv *priv; u32 stat; dev = dev_id; priv = netdev_priv(dev); stat = enet_readl(priv, ENET_IR_REG); if (!(stat & ENET_IR_MIB)) return IRQ_NONE; /* clear & mask interrupt */ enet_writel(priv, ENET_IR_MIB, ENET_IR_REG); enet_writel(priv, 0, ENET_IRMASK_REG); /* read mib registers in workqueue */ schedule_work(&priv->mib_update_task); return IRQ_HANDLED; }
/* * update hash table to reflect new device multicast address list */ static void enet_set_multicast_list(struct net_device *dev) { #ifdef ENABLE_MULTICAST struct tangox_enet_priv *priv; struct dev_mc_list *mclist; unsigned char val; uint32_t mc_filter[2]; int i; priv = netdev_priv(dev); /* the link check timer might change RX control, we need to protect * against it */ spin_lock_bh(&priv->maccr_lock); val = enet_readl(ENET_RX_CTL(priv->enet_mac_base)); if (dev->flags & IFF_PROMISC) { val &= ~(RX_BC_DISABLE | RX_AF_EN); } else { val |= RX_AF_EN ; /* if we want all multicast or if address count is too * high, don't try to compute hash value */ if (dev->mc_count > 64 || dev->flags & IFF_ALLMULTI) { val &= ~(RX_BC_DISABLE | RX_AF_EN); } } enet_writel(ENET_RX_CTL(priv->enet_mac_base), val); spin_unlock_bh(&priv->maccr_lock); /* we don't need to update hash table if we pass all * multicast */ if (!(val & RX_BC_DISABLE) && !(val & RX_AF_EN)) return; /* clear internal multicast address table */ enet_writeb(ENET_MC_INIT(priv->enet_mac_base), 0x0); while(enet_readb(ENET_MC_INIT(priv->enet_mac_base))); mc_filter[0] = mc_filter[1] = 0; mclist = dev->mc_list; for (i = 0; i < dev->mc_count; i++) { char *addr; addr = mclist->dmi_addr; mclist = mclist->next; if (!(*addr & 1)) continue; enet_writeb(ENET_MC_ADDR1(priv->enet_mac_base), addr[0]); enet_writeb(ENET_MC_ADDR2(priv->enet_mac_base), addr[1]); enet_writeb(ENET_MC_ADDR3(priv->enet_mac_base), addr[2]); enet_writeb(ENET_MC_ADDR4(priv->enet_mac_base), addr[3]); enet_writeb(ENET_MC_ADDR5(priv->enet_mac_base), addr[4]); enet_writeb(ENET_MC_ADDR6(priv->enet_mac_base), addr[5]); enet_writeb(ENET_MC_INIT(priv->enet_mac_base), 0xff); while(enet_readb(ENET_MC_INIT(priv->enet_mac_base))); } #endif }
/* * tx request callback */ static int enet_xmit(struct sk_buff *skb, struct net_device *dev) { struct tangox_enet_priv *priv; volatile struct enet_desc *tx=NULL, *ptx=NULL; unsigned long tconfig_cache; unsigned long val = 0; volatile u32 *r_addr; int len = 0; int tx_busy = 0; unsigned char *txbuf; priv = netdev_priv(dev); spin_lock(&priv->tx_lock); val = enet_readl(ENET_TXC_CR(priv->enet_mac_base)) & 0xffff; #ifndef ENABLE_TX_CHAINING #ifdef CONFIG_TANGOX_ENET_TX_DELAY_1000US #define MAX_TX_TIMEOUT 1000 /* usec */ #else #define MAX_TX_TIMEOUT 100 /* usec */ #endif for (len = 0; len < MAX_TX_TIMEOUT; len++) { val = enet_readl(ENET_TXC_CR(priv->enet_mac_base)) & 0xffff; if (val & TCR_EN) udelay(1); else break; } if (len >= MAX_TX_TIMEOUT) { priv->stats.tx_dropped++; spin_unlock(&priv->tx_lock); return NETDEV_TX_BUSY; } #else if (val & TCR_EN){ //BUG_ON(skb == NULL); tx_busy = 1; if (priv->pending_tx < 0) priv->pending_tx = priv->next_tx_desc; } if (tx_busy && (priv->pending_tx >= 0) && (priv->pending_tx_cnt >= (TX_DESC_COUNT -1))) { DBG(KERN_WARNING PFX "no more tx desc can be scheduled in pending queue.\n"); netif_stop_queue(dev); spin_unlock(&priv->tx_lock); return NETDEV_TX_BUSY; } if (skb == NULL) { unsigned int last_tx; last_tx = (priv->next_tx_desc - 1 + TX_DESC_COUNT) % TX_DESC_COUNT; tx = &priv->tx_descs[last_tx]; tx->config |= DESC_EOC; priv->tx_eoc = last_tx; mb(); goto tx_pending; } #endif len = skb->len; tx = &priv->tx_descs[priv->next_tx_desc]; /* fill the tx desc with this skb address */ tconfig_cache = 0; tconfig_cache |= DESC_BTS(2); tconfig_cache |= DESC_EOF; tconfig_cache |= len; if (((unsigned long)(skb->data) & 0x7) != 0) { /* not align by 8 bytes */ txbuf = priv->tx_bufs[priv->next_tx_desc]; memcpy(txbuf, skb->data, len); dma_cache_wback((unsigned long)txbuf, len); tx->s_addr = PHYSADDR((void *)txbuf); } else { dma_cache_wback((unsigned long)skb->data, len); tx->s_addr = PHYSADDR(skb->data); } if (tx_busy != 0) { tx->n_addr = PHYSADDR((void *)&(priv->tx_descs[(priv->next_tx_desc + 1) % TX_DESC_COUNT])); } else { tx->n_addr = 0; tconfig_cache |= DESC_EOC; priv->tx_eoc = priv->next_tx_desc; } tx->config = tconfig_cache; /* keep a pointer to it for later and give it to dma */ priv->tx_skbs[priv->next_tx_desc] = skb; r_addr = (volatile u32 *)KSEG1ADDR((u32)(&(priv->tx_report[priv->next_tx_desc]))); __raw_writel(0, r_addr); priv->next_tx_desc++; priv->next_tx_desc %= TX_DESC_COUNT; #ifdef ETH_DEBUG { int i; for(i=0; i<len; i++){ if(i%16==0 && i>0) DBG("\n"); DBG("%02x ", txbuf[i] & 0xff); } DBG("\n"); DBG("DESC Mode: TXC_CR=0x%x desc_addr=0x%x s_addr=0x%x n_addr=0x%x r_addr=0x%x config=0x%x\n", enet_readl(ENET_TXC_CR(priv->enet_mac_base)), tx, tx->s_addr, tx->n_addr, tx->r_addr, tx->config); } #endif tx_pending: if (tx_busy == 0) { if (priv->pending_tx >= 0) { ptx = &priv->tx_descs[priv->pending_tx]; len = ptx->config & 0xffff; enet_writel(ENET_TX_DESC_ADDR(priv->enet_mac_base), PHYSADDR((void *)ptx)); priv->reclaim_limit = priv->pending_tx; priv->pending_tx = -1; } else { priv->reclaim_limit = (priv->next_tx_desc - 1 + TX_DESC_COUNT) % TX_DESC_COUNT; enet_writel(ENET_TX_DESC_ADDR(priv->enet_mac_base), PHYSADDR((void *)tx)); } enet_writel(ENET_TX_SAR(priv->enet_mac_base), 0); enet_writel(ENET_TX_REPORT_ADDR(priv->enet_mac_base), 0); /* kick tx dma in case it was suspended */ val |= TCR_EN; val |= TCR_BTS(2); val |= (len << 16); enet_writel(ENET_TXC_CR(priv->enet_mac_base), val); /* no pending at this stage*/ priv->pending_tx_cnt = 0; } else priv->pending_tx_cnt++; /* if next tx descriptor is not clean, then we have to stop * queue */ if (unlikely(--priv->free_tx_desc_count == 0)) netif_stop_queue(dev); spin_unlock(&priv->tx_lock); return NETDEV_TX_OK; }
/* statistic counter read and write functions * 44 counters are included for tracking * occurences of frame status evernts. */ static unsigned long enet_stat_read(struct net_device *dev, unsigned char index) { struct tangox_enet_priv *priv = netdev_priv(dev); enet_writeb(ENET_STAT_INDEX(priv->enet_mac_base), index); return enet_readl(ENET_STAT_DATA1(priv->enet_mac_base)); }
/* * mac hw init is done here */ static int enet_hw_init(struct net_device *dev) { struct tangox_enet_priv *priv; unsigned int val = 0; if(phy_reset(dev)) return -EBUSY; priv = netdev_priv(dev); /* set pad_mode according to rgmii or not*/ val = enet_readb(priv->enet_mac_base + 0x400) & 0xf0; if(priv->rgmii) enet_writeb(priv->enet_mac_base + 0x400, val | 0x01); /* software reset IP */ enet_writeb(priv->enet_mac_base + 0x424, 0); udelay(10); enet_writeb(priv->enet_mac_base + 0x424, 1); /*set threshold for internal clock 0x1*/ enet_writeb(ENET_IC_THRESHOLD(priv->enet_mac_base), 1); /*set Random seed 0x8*/ enet_writeb(ENET_RANDOM_SEED(priv->enet_mac_base), 0x08); /*set TX single deferral params 0xc*/ enet_writeb(ENET_TX_SDP(priv->enet_mac_base), 0xc); /*set slot time 0x7f for 10/100Mbps*/ enet_writeb(ENET_SLOT_TIME(priv->enet_mac_base), 0x7f); /*set Threshold for partial full 0x7f */ enet_writeb(ENET_PF_THRESHOLD(priv->enet_mac_base), 0x7f); /* configure TX DMA Channels */ val = enet_readl(ENET_TXC_CR(priv->enet_mac_base)); val |= TCR_RS | TCR_LE | TCR_TFI(1) | /*TCR_DIE |*/ TCR_BTS(2); val |= TCR_DM; enet_writel(ENET_TXC_CR(priv->enet_mac_base), val); val = enet_readl(ENET_TXC_CR(priv->enet_mac_base)); /* configure RX DMA Channels */ val = enet_readl(ENET_RXC_CR(priv->enet_mac_base)); val |= (RCR_RS | RCR_LE | RCR_RFI(1) | RCR_BTS(2) | RCR_FI | RCR_DIE /* | RCR_EN*/); val |= RCR_DM; val |= RX_BUF_SIZE << 16; enet_writel(ENET_RXC_CR(priv->enet_mac_base), val); /* configure MAC ctrller */ val = enet_readb(ENET_TX_CTL1(priv->enet_mac_base)); val |= (TX_RETRY_EN | TX_PAD_EN | TX_APPEND_FCS); enet_writeb(ENET_TX_CTL1(priv->enet_mac_base), (unsigned char)val); /* set retry 5 time when collision occurs*/ enet_writeb(ENET_TX_CTL2(priv->enet_mac_base), 5); val = enet_readb(ENET_RX_CTL(priv->enet_mac_base)); val |= (RX_RUNT | RX_PAD_STRIP | RX_SEND_CRC | RX_PAUSE_EN| RX_AF_EN); enet_writeb(ENET_RX_CTL(priv->enet_mac_base), (unsigned char)val); #ifdef ENABLE_MULTICAST /* clear internal multicast address table */ enet_writeb(ENET_MC_INIT(priv->enet_mac_base), 0x00); while(enet_readb(ENET_MC_INIT(priv->enet_mac_base))); DBG("Internal multicast address table is cleared\n"); #endif /* unicast */ /* Threshold for internal clock*/ /* threshold for partial empty*/ /* threshold for partial full */ /* buffer size for transmit must be 1 from the doc however, it's said that using 0xff ??*/ enet_writeb(ENET_TX_BUFSIZE(priv->enet_mac_base), 0xff); /* fifo control */ /*MAC mode*/ enet_mac_config(dev); /* check gmii mode support */ priv->mii.supports_gmii = mii_check_gmii_support(&priv->mii); DBG("gmii support=0x%x id=0x%x\n", priv->mii.supports_gmii, priv->mii.phy_id); return 0; }