static int ramips_mdio_read(struct mii_bus *bus, int phy_addr, int phy_reg) { struct raeth_priv *re = bus->priv; int err; u32 t; err = ramips_mdio_wait_ready(re); if (err) return 0xffff; t = (phy_addr << 24) | (phy_reg << 16); ramips_fe_wr(t, RAMIPS_MDIO_ACCESS); t |= (1 << 31); ramips_fe_wr(t, RAMIPS_MDIO_ACCESS); err = ramips_mdio_wait_ready(re); if (err) return 0xffff; RADEBUG("%s: addr=%04x, reg=%04x, value=%04x\n", __func__, phy_addr, phy_reg, ramips_fe_rr(RAMIPS_MDIO_ACCESS) & 0xffff); return ramips_fe_rr(RAMIPS_MDIO_ACCESS) & 0xffff; }
static inline void ramips_fe_int_enable(u32 mask) { ramips_fe_wr(ramips_fe_rr(RAMIPS_FE_INT_ENABLE) | mask, RAMIPS_FE_INT_ENABLE); /* flush write */ ramips_fe_rr(RAMIPS_FE_INT_ENABLE); }
static int ramips_eth_hw_init(struct net_device *dev) { struct raeth_priv *re = netdev_priv(dev); int err; err = request_irq(dev->irq, ramips_eth_irq, IRQF_DISABLED, dev->name, dev); if (err) return err; err = ramips_ring_alloc(re); if (err) goto err_free_irq; ramips_ring_setup(re); ramips_hw_set_macaddr(dev->dev_addr); ramips_setup_dma(re); ramips_fe_wr((ramips_fe_rr(RAMIPS_FE_GLO_CFG) & ~(RAMIPS_US_CYC_CNT_MASK << RAMIPS_US_CYC_CNT_SHIFT)) | ((re->plat->sys_freq / RAMIPS_US_CYC_CNT_DIVISOR) << RAMIPS_US_CYC_CNT_SHIFT), RAMIPS_FE_GLO_CFG); tasklet_init(&re->tx_housekeeping_tasklet, ramips_eth_tx_housekeeping, (unsigned long)dev); tasklet_init(&re->rx_tasklet, ramips_eth_rx_hw, (unsigned long)dev); ramips_fe_twr(RAMIPS_DELAY_INIT, RAETH_REG_DLY_INT_CFG); ramips_fe_twr(TX_DLY_INT | RX_DLY_INT, RAETH_REG_FE_INT_ENABLE); if (soc_is_rt5350()) { ramips_fe_wr(ramips_fe_rr(RT5350_SDM_CFG) & ~(RT5350_SDM_ICS_EN | RT5350_SDM_TCS_EN | RT5350_SDM_UCS_EN | 0xffff), RT5350_SDM_CFG); } else { ramips_fe_wr(ramips_fe_rr(RAMIPS_GDMA1_FWD_CFG) & ~(RAMIPS_GDM1_ICS_EN | RAMIPS_GDM1_TCS_EN | RAMIPS_GDM1_UCS_EN | 0xffff), RAMIPS_GDMA1_FWD_CFG); ramips_fe_wr(ramips_fe_rr(RAMIPS_CDMA_CSG_CFG) & ~(RAMIPS_ICS_GEN_EN | RAMIPS_TCS_GEN_EN | RAMIPS_UCS_GEN_EN), RAMIPS_CDMA_CSG_CFG); ramips_fe_wr(RAMIPS_PSE_FQFC_CFG_INIT, RAMIPS_PSE_FQ_CFG); } ramips_fe_wr(1, RAMIPS_FE_RST_GL); ramips_fe_wr(0, RAMIPS_FE_RST_GL); return 0; err_free_irq: free_irq(dev->irq, dev); return err; }
static int ramips_eth_open(struct net_device *dev) { struct raeth_priv *priv = netdev_priv(dev); int err; err = request_irq(dev->irq, ramips_eth_irq, IRQF_DISABLED, dev->name, dev); if (err) return err; err = ramips_alloc_dma(priv); if (err) goto err_free_irq; ramips_hw_set_macaddr(dev->dev_addr); ramips_setup_dma(priv); ramips_fe_wr((ramips_fe_rr(RAMIPS_PDMA_GLO_CFG) & 0xff) | (RAMIPS_TX_WB_DDONE | RAMIPS_RX_DMA_EN | RAMIPS_TX_DMA_EN | RAMIPS_PDMA_SIZE_4DWORDS), RAMIPS_PDMA_GLO_CFG); ramips_fe_wr((ramips_fe_rr(RAMIPS_FE_GLO_CFG) & ~(RAMIPS_US_CYC_CNT_MASK << RAMIPS_US_CYC_CNT_SHIFT)) | ((priv->plat->sys_freq / RAMIPS_US_CYC_CNT_DIVISOR) << RAMIPS_US_CYC_CNT_SHIFT), RAMIPS_FE_GLO_CFG); tasklet_init(&priv->tx_housekeeping_tasklet, ramips_eth_tx_housekeeping, (unsigned long)dev); tasklet_init(&priv->rx_tasklet, ramips_eth_rx_hw, (unsigned long)dev); ramips_phy_start(priv); ramips_fe_wr(RAMIPS_DELAY_INIT, RAMIPS_DLY_INT_CFG); ramips_fe_wr(RAMIPS_TX_DLY_INT | RAMIPS_RX_DLY_INT, RAMIPS_FE_INT_ENABLE); ramips_fe_wr(ramips_fe_rr(RAMIPS_GDMA1_FWD_CFG) & ~(RAMIPS_GDM1_ICS_EN | RAMIPS_GDM1_TCS_EN | RAMIPS_GDM1_UCS_EN | 0xffff), RAMIPS_GDMA1_FWD_CFG); ramips_fe_wr(ramips_fe_rr(RAMIPS_CDMA_CSG_CFG) & ~(RAMIPS_ICS_GEN_EN | RAMIPS_TCS_GEN_EN | RAMIPS_UCS_GEN_EN), RAMIPS_CDMA_CSG_CFG); ramips_fe_wr(RAMIPS_PSE_FQFC_CFG_INIT, RAMIPS_PSE_FQ_CFG); ramips_fe_wr(1, RAMIPS_FE_RST_GL); ramips_fe_wr(0, RAMIPS_FE_RST_GL); netif_start_queue(dev); return 0; err_free_irq: free_irq(dev->irq, dev); return err; }
static void ramips_eth_rx_hw(unsigned long ptr) { struct net_device *dev = (struct net_device *) ptr; struct raeth_priv *priv = netdev_priv(dev); int rx; int max_rx = 16; while (max_rx) { struct sk_buff *rx_skb, *new_skb; int pktlen; rx = (ramips_fe_rr(RAMIPS_RX_CALC_IDX0) + 1) % NUM_RX_DESC; if (!(priv->rx[rx].rxd2 & RX_DMA_DONE)) break; max_rx--; rx_skb = priv->rx_skb[rx]; pktlen = RX_DMA_PLEN0(priv->rx[rx].rxd2); new_skb = netdev_alloc_skb(dev, MAX_RX_LENGTH + NET_IP_ALIGN); /* Reuse the buffer on allocation failures */ if (new_skb) { dma_addr_t dma_addr; dma_unmap_single(&priv->netdev->dev, priv->rx_dma[rx], MAX_RX_LENGTH, DMA_FROM_DEVICE); skb_put(rx_skb, pktlen); rx_skb->dev = dev; rx_skb->protocol = eth_type_trans(rx_skb, dev); rx_skb->ip_summed = CHECKSUM_NONE; dev->stats.rx_packets++; dev->stats.rx_bytes += pktlen; netif_rx(rx_skb); priv->rx_skb[rx] = new_skb; skb_reserve(new_skb, NET_IP_ALIGN); dma_addr = dma_map_single(&priv->netdev->dev, new_skb->data, MAX_RX_LENGTH, DMA_FROM_DEVICE); priv->rx_dma[rx] = dma_addr; priv->rx[rx].rxd1 = (unsigned int) dma_addr; } else { dev->stats.rx_dropped++; } priv->rx[rx].rxd2 &= ~RX_DMA_DONE; wmb(); ramips_fe_wr(rx, RAMIPS_RX_CALC_IDX0); } if (max_rx == 0) tasklet_schedule(&priv->rx_tasklet); else ramips_fe_int_enable(RAMIPS_RX_DLY_INT); }
static irqreturn_t ramips_eth_irq(int irq, void *dev) { struct raeth_priv *priv = netdev_priv(dev); unsigned long fe_int = ramips_fe_rr(RAMIPS_FE_INT_STATUS); ramips_fe_wr(0xFFFFFFFF, RAMIPS_FE_INT_STATUS); if(fe_int & RAMIPS_RX_DLY_INT) { ramips_fe_wr(ramips_fe_rr(RAMIPS_FE_INT_ENABLE) & ~(RAMIPS_RX_DLY_INT), RAMIPS_FE_INT_ENABLE); tasklet_schedule(&priv->rx_tasklet); } if(fe_int & RAMIPS_TX_DLY_INT) ramips_eth_tx_housekeeping((unsigned long)dev); return IRQ_HANDLED; }
static void ramips_eth_rx_hw(unsigned long ptr) { struct net_device *dev = (struct net_device*)ptr; struct raeth_priv *priv = netdev_priv(dev); int rx; int max_rx = 16; while(max_rx) { struct sk_buff *rx_skb, *new_skb; rx = (ramips_fe_rr(RAMIPS_RX_CALC_IDX0) + 1) % NUM_RX_DESC; if(!(priv->rx[rx].rxd2 & RX_DMA_DONE)) break; max_rx--; rx_skb = priv->rx_skb[rx]; rx_skb->len = RX_DMA_PLEN0(priv->rx[rx].rxd2); rx_skb->dev = dev; rx_skb->protocol = eth_type_trans(rx_skb, dev); rx_skb->ip_summed = CHECKSUM_NONE; dev->stats.rx_packets++; dev->stats.rx_bytes += rx_skb->len; netif_rx(rx_skb); new_skb = netdev_alloc_skb(dev, MAX_RX_LENGTH + 2); priv->rx_skb[rx] = new_skb; BUG_ON(!new_skb); skb_reserve(new_skb, 2); priv->rx[rx].rxd1 = dma_map_single(NULL, new_skb->data, MAX_RX_LENGTH + 2, DMA_FROM_DEVICE); priv->rx[rx].rxd2 &= ~RX_DMA_DONE; wmb(); ramips_fe_wr(rx, RAMIPS_RX_CALC_IDX0); } if(max_rx == 0) tasklet_schedule(&priv->rx_tasklet); else ramips_fe_wr(ramips_fe_rr(RAMIPS_FE_INT_ENABLE) | RAMIPS_RX_DLY_INT, RAMIPS_FE_INT_ENABLE); }
static int ramips_eth_hard_start_xmit(struct sk_buff* skb, struct net_device *dev) { struct raeth_priv *priv = netdev_priv(dev); unsigned long tx; unsigned int tx_next; unsigned int mapped_addr; unsigned long flags; if(priv->plat->min_pkt_len) { if(skb->len < priv->plat->min_pkt_len) { if(skb_padto(skb, priv->plat->min_pkt_len)) { printk(KERN_ERR "ramips_eth: skb_padto failed\n"); kfree_skb(skb); return 0; } skb_put(skb, priv->plat->min_pkt_len - skb->len); } } dev->trans_start = jiffies; mapped_addr = (unsigned int)dma_map_single(NULL, skb->data, skb->len, DMA_TO_DEVICE); dma_sync_single_for_device(NULL, mapped_addr, skb->len, DMA_TO_DEVICE); spin_lock_irqsave(&priv->page_lock, flags); tx = ramips_fe_rr(RAMIPS_TX_CTX_IDX0); if(tx == NUM_TX_DESC - 1) tx_next = 0; else tx_next = tx + 1; if((priv->tx_skb[tx]) || (priv->tx_skb[tx_next]) || !(priv->tx[tx].txd2 & TX_DMA_DONE) || !(priv->tx[tx_next].txd2 & TX_DMA_DONE)) goto out; priv->tx[tx].txd1 = mapped_addr; priv->tx[tx].txd2 &= ~(TX_DMA_PLEN0_MASK | TX_DMA_DONE); priv->tx[tx].txd2 |= TX_DMA_PLEN0(skb->len); dev->stats.tx_packets++; dev->stats.tx_bytes += skb->len; priv->tx_skb[tx] = skb; wmb(); ramips_fe_wr((tx + 1) % NUM_TX_DESC, RAMIPS_TX_CTX_IDX0); spin_unlock_irqrestore(&priv->page_lock, flags); return NETDEV_TX_OK; out: spin_unlock_irqrestore(&priv->page_lock, flags); dev->stats.tx_dropped++; kfree_skb(skb); return NETDEV_TX_OK; }
static int ramips_eth_stop(struct net_device *dev) { struct raeth_priv *priv = netdev_priv(dev); ramips_fe_wr(RAMIPS_PDMA_GLO_CFG, ramips_fe_rr(RAMIPS_PDMA_GLO_CFG) & ~(RAMIPS_TX_WB_DDONE | RAMIPS_RX_DMA_EN | RAMIPS_TX_DMA_EN)); free_irq(dev->irq, dev); netif_stop_queue(dev); tasklet_kill(&priv->tx_housekeeping_tasklet); tasklet_kill(&priv->rx_tasklet); ramips_cleanup_dma(dev); printk(KERN_DEBUG "ramips_eth: stopped\n"); return 0; }
static void ramips_eth_tx_housekeeping(unsigned long ptr) { struct net_device *dev = (struct net_device*)ptr; struct raeth_priv *priv = netdev_priv(dev); while((priv->tx[priv->skb_free_idx].txd2 & TX_DMA_DONE) && (priv->tx_skb[priv->skb_free_idx])) { dev_kfree_skb_irq((struct sk_buff*)priv->tx_skb[priv->skb_free_idx]); priv->tx_skb[priv->skb_free_idx] = 0; priv->skb_free_idx++; if(priv->skb_free_idx >= NUM_TX_DESC) priv->skb_free_idx = 0; } ramips_fe_wr(ramips_fe_rr(RAMIPS_FE_INT_ENABLE) | RAMIPS_TX_DLY_INT, RAMIPS_FE_INT_ENABLE); }
static int ramips_eth_stop(struct net_device *dev) { struct raeth_priv *priv = netdev_priv(dev); ramips_fe_wr(ramips_fe_rr(RAMIPS_PDMA_GLO_CFG) & ~(RAMIPS_TX_WB_DDONE | RAMIPS_RX_DMA_EN | RAMIPS_TX_DMA_EN), RAMIPS_PDMA_GLO_CFG); /* disable all interrupts in the hw */ ramips_fe_wr(0, RAMIPS_FE_INT_ENABLE); free_irq(dev->irq, dev); netif_stop_queue(dev); tasklet_kill(&priv->tx_housekeeping_tasklet); tasklet_kill(&priv->rx_tasklet); ramips_cleanup_dma(priv); printk(KERN_DEBUG "ramips_eth: stopped\n"); return 0; }
static int ramips_mdio_wait_ready(struct raeth_priv *re) { int retries; retries = RAMIPS_MDIO_RETRY; while (1) { u32 t; t = ramips_fe_rr(RAMIPS_MDIO_ACCESS); if ((t & (0x1 << 31)) == 0) return 0; if (retries-- == 0) break; udelay(1); } dev_err(re->parent, "MDIO operation timed out\n"); return -ETIMEDOUT; }
static inline u32 ramips_fe_trr(enum raeth_reg reg) { return ramips_fe_rr(get_reg_offset(reg)); }