static void ag71xx_hw_init(struct ag71xx *ag) { struct ag71xx_platform_data *pdata = ag71xx_get_pdata(ag); ag71xx_sb(ag, AG71XX_REG_MAC_CFG1, MAC_CFG1_SR); udelay(20); ar71xx_device_stop(pdata->reset_bit); mdelay(100); ar71xx_device_start(pdata->reset_bit); mdelay(100); /* setup MAC configuration registers */ ag71xx_wr(ag, AG71XX_REG_MAC_CFG1, MAC_CFG1_INIT); ag71xx_sb(ag, AG71XX_REG_MAC_CFG2, MAC_CFG2_PAD_CRC_EN | MAC_CFG2_LEN_CHECK); /* setup max frame length */ ag71xx_wr(ag, AG71XX_REG_MAC_MFL, AG71XX_TX_MTU_LEN); /* setup MII interface type */ ag71xx_mii_ctrl_set_if(ag, pdata->mii_if); /* setup FIFO configuration registers */ ag71xx_wr(ag, AG71XX_REG_FIFO_CFG0, FIFO_CFG0_INIT); ag71xx_wr(ag, AG71XX_REG_FIFO_CFG1, 0x0fff0000); ag71xx_wr(ag, AG71XX_REG_FIFO_CFG2, 0x00001fff); ag71xx_wr(ag, AG71XX_REG_FIFO_CFG4, FIFO_CFG4_INIT); ag71xx_wr(ag, AG71XX_REG_FIFO_CFG5, FIFO_CFG5_INIT); ag71xx_dma_reset(ag); }
static void ag71xx_hw_init(struct ag71xx *ag) { struct ag71xx_platform_data *pdata = ag71xx_get_pdata(ag); ag71xx_sb(ag, AG71XX_REG_MAC_CFG1, MAC_CFG1_SR); udelay(20); ar71xx_device_stop(pdata->reset_bit); mdelay(100); ar71xx_device_start(pdata->reset_bit); mdelay(100); ag71xx_wr(ag, AG71XX_REG_MAC_CFG1, MAC_CFG1_INIT); /* TODO: set max packet size */ ag71xx_sb(ag, AG71XX_REG_MAC_CFG2, MAC_CFG2_PAD_CRC_EN | MAC_CFG2_LEN_CHECK); ag71xx_wr(ag, AG71XX_REG_FIFO_CFG0, 0x00001f00); ag71xx_mii_ctrl_set_if(ag, pdata->mii_if); ag71xx_wr(ag, AG71XX_REG_FIFO_CFG1, 0x0fff0000); ag71xx_wr(ag, AG71XX_REG_FIFO_CFG2, 0x00001fff); ag71xx_wr(ag, AG71XX_REG_FIFO_CFG4, 0x0000ffff); ag71xx_wr(ag, AG71XX_REG_FIFO_CFG5, 0x0007ffef); }
static int ag71xx_open(struct net_device *dev) { struct ag71xx *ag = netdev_priv(dev); struct ag71xx_platform_data *pdata = ag71xx_get_pdata(ag); int ret; ret = ag71xx_rings_init(ag); if (ret) goto err; if (pdata->is_ar724x) ag71xx_hw_init(ag); napi_enable(&ag->napi); netif_carrier_off(dev); ag71xx_phy_start(ag); ag71xx_wr(ag, AG71XX_REG_TX_DESC, ag->tx_ring.descs_dma); ag71xx_wr(ag, AG71XX_REG_RX_DESC, ag->rx_ring.descs_dma); ag71xx_hw_set_macaddr(ag, dev->dev_addr); ag71xx_hw_start(ag); netif_start_queue(dev); return 0; err: ag71xx_rings_cleanup(ag); return ret; }
static int ag71xx_phy_connect_fixed(struct ag71xx *ag) { struct device *dev = &ag->pdev->dev; struct ag71xx_platform_data *pdata = ag71xx_get_pdata(ag); int ret = 0; /* use fixed settings */ switch (pdata->speed) { case SPEED_10: case SPEED_100: case SPEED_1000: break; default: dev_err(dev, "invalid speed specified\n"); ret = -EINVAL; break; } dev_dbg(dev, "using fixed link parameters\n"); ag->duplex = pdata->duplex; ag->speed = pdata->speed; return ret; }
static int ag71xx_hard_start_xmit(struct sk_buff *skb, struct net_device *dev) { struct ag71xx *ag = netdev_priv(dev); struct ag71xx_platform_data *pdata = ag71xx_get_pdata(ag); struct ag71xx_ring *ring = &ag->tx_ring; struct ag71xx_desc *desc; unsigned long flags; int i; i = ring->curr % AG71XX_TX_RING_SIZE; desc = &ring->descs[i]; spin_lock_irqsave(&ag->lock, flags); pdata->ddr_flush(); spin_unlock_irqrestore(&ag->lock, flags); if (!ag71xx_desc_empty(desc)) goto err_drop; ag71xx_add_ar8216_header(ag, skb); if (skb->len <= 0) { DBG("%s: packet len is too small\n", ag->dev->name); goto err_drop; } dma_map_single(NULL, skb->data, skb->len, DMA_TO_DEVICE); ring->buf[i].skb = skb; /* setup descriptor fields */ desc->data = virt_to_phys(skb->data); desc->ctrl = (skb->len & DESC_PKTLEN_M); /* flush descriptor */ wmb(); ring->curr++; if (ring->curr == (ring->dirty + AG71XX_TX_THRES_STOP)) { DBG("%s: tx queue full\n", ag->dev->name); netif_stop_queue(dev); } DBG("%s: packet injected into TX queue\n", ag->dev->name); /* enable TX engine */ ag71xx_wr(ag, AG71XX_REG_TX_CTRL, TX_CTRL_TXE); dev->trans_start = jiffies; return 0; err_drop: dev->stats.tx_dropped++; dev_kfree_skb(skb); return 0; }
static int ag71xx_poll(struct napi_struct *napi, int limit) { struct ag71xx *ag = container_of(napi, struct ag71xx, napi); #ifdef AG71XX_NAPI_TX struct ag71xx_platform_data *pdata = ag71xx_get_pdata(ag); #endif struct net_device *dev = ag->dev; unsigned long flags; u32 status; int done; #ifdef AG71XX_NAPI_TX ar71xx_ddr_flush(pdata->flush_reg); ag71xx_tx_packets(ag); #endif DBG("%s: processing RX ring\n", dev->name); done = ag71xx_rx_packets(ag, limit); /* TODO: add OOM handler */ status = ag71xx_rr(ag, AG71XX_REG_INT_STATUS); status &= AG71XX_INT_POLL; if ((done < limit) && (!status)) { DBG("%s: disable polling mode, done=%d, status=%x\n", dev->name, done, status); netif_rx_complete(dev, napi); /* enable interrupts */ spin_lock_irqsave(&ag->lock, flags); ag71xx_int_enable(ag, AG71XX_INT_POLL); spin_unlock_irqrestore(&ag->lock, flags); return 0; } if (status & AG71XX_INT_RX_OF) { if (netif_msg_rx_err(ag)) printk(KERN_ALERT "%s: rx owerflow, restarting dma\n", dev->name); /* ack interrupt */ ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_OF); /* restart RX */ ag71xx_wr(ag, AG71XX_REG_RX_CTRL, RX_CTRL_RXE); } DBG("%s: stay in polling mode, done=%d, status=%x\n", dev->name, done, status); return 1; }
void ag71xx_phy_start(struct ag71xx *ag) { struct ag71xx_platform_data *pdata = ag71xx_get_pdata(ag); if (ag->phy_dev) { phy_start(ag->phy_dev); } else if (pdata->mii_bus_dev && pdata->switch_data) { ag71xx_ar7240_start(ag); } else { ag->link = 1; ag71xx_link_adjust(ag); } }
void ag71xx_phy_start(struct ag71xx *ag) { if (ag->phy_dev) { phy_start(ag->phy_dev); } else { struct ag71xx_platform_data *pdata = ag71xx_get_pdata(ag); ag->duplex = pdata->duplex; ag->speed = pdata->speed; ag->link = 1; ag71xx_phy_link_update(ag); } }
static int ag71xx_rx_reserve(struct ag71xx *ag) { int reserve = 0; if (ag71xx_get_pdata(ag)->is_ar724x) { if (!ag71xx_has_ar8216(ag)) reserve = 2; if (ag->phy_dev) reserve += 4 - (ag->phy_dev->pkt_align % 4); reserve %= 4; } return reserve + AG71XX_RX_PKT_RESERVE; }
void ag71xx_phy_stop(struct ag71xx *ag) { struct ag71xx_platform_data *pdata = ag71xx_get_pdata(ag); unsigned long flags; if (ag->phy_dev) phy_stop(ag->phy_dev); else if (pdata->mii_bus_dev && pdata->switch_data) ag71xx_ar7240_stop(ag); spin_lock_irqsave(&ag->lock, flags); if (ag->link) { ag->link = 0; ag71xx_link_adjust(ag); } spin_unlock_irqrestore(&ag->lock, flags); }
static int ag71xx_phy_connect_fixed(struct ag71xx *ag) { struct platform_device *pdev = ag->pdev; struct device *dev = NULL; struct ag71xx_platform_data *pdata = ag71xx_get_pdata(ag); int ret = 0; if (!pdev) return -ENODEV; dev = &pdev->dev; if (!dev) return -ENODEV; if (!ag->phy_dev) { pr_err("Missing PHY for %s", dev_name(dev)); return -ENODEV; } /* use fixed settings */ switch (pdata->speed) { case SPEED_10: case SPEED_100: case SPEED_1000: break; default: dev_err(dev, "invalid speed specified\n"); ret = -EINVAL; break; } dev_dbg(dev, "using fixed link parameters\n"); ag->duplex = pdata->duplex; ag->speed = pdata->speed; if (!ret) { dev_info(dev, "connected to fixed PHY at %s [uid=%08x, driver=%s]\n", phydev_name(ag->phy_dev), ag->phy_dev->phy_id, ag->phy_dev->drv->name); } else { pr_err("Failed to connect to fixed PHY\n"); } return ret; }
static int ag71xx_phy_connect_fixed(struct ag71xx *ag) { struct net_device *dev = ag->dev; struct ag71xx_platform_data *pdata = ag71xx_get_pdata(ag); int ret = 0; /* use fixed settings */ switch (pdata->speed) { case SPEED_10: case SPEED_100: case SPEED_1000: break; default: printk(KERN_ERR "%s: invalid speed specified\n", dev->name); ret = -EINVAL; break; } return ret; }
static void ag71xx_tx_packets(struct ag71xx *ag) { struct ag71xx_platform_data *pdata = ag71xx_get_pdata(ag); struct ag71xx_ring *ring = &ag->tx_ring; unsigned int sent; DBG("%s: processing TX ring\n", ag->dev->name); #ifdef AG71XX_NAPI_TX ar71xx_ddr_flush(pdata->flush_reg); #endif sent = 0; while (ring->dirty != ring->curr) { unsigned int i = ring->dirty % AG71XX_TX_RING_SIZE; struct ag71xx_desc *desc = &ring->descs[i]; struct sk_buff *skb = ring->buf[i].skb; if (!ag71xx_desc_empty(desc)) break; ag71xx_wr(ag, AG71XX_REG_TX_STATUS, TX_STATUS_PS); ag->dev->stats.tx_bytes += skb->len; ag->dev->stats.tx_packets++; dev_kfree_skb_any(skb); ring->buf[i].skb = NULL; ring->dirty++; sent++; } DBG("%s: %d packets sent out\n", ag->dev->name, sent); if ((ring->curr - ring->dirty) < AG71XX_TX_THRES_WAKEUP) netif_wake_queue(ag->dev); }
static void ag71xx_phy_link_adjust(struct net_device *dev) { struct ag71xx *ag = netdev_priv(dev); struct ag71xx_platform_data *pdata = ag71xx_get_pdata(ag); struct phy_device *phydev = ag->phy_dev; unsigned long flags; int status_change = 0; spin_lock_irqsave(&ag->lock, flags); if (phydev->link) { if (ag->duplex != phydev->duplex || ag->speed != phydev->speed) { status_change = 1; } } if (phydev->link != ag->link) status_change = 1; if (pdata->force_link) { ag->link = 1; ag->duplex = pdata->duplex; ag->speed = pdata->speed; }else{ ag->link = phydev->link; ag->duplex = phydev->duplex; ag->speed = phydev->speed; } if (status_change) ag71xx_link_adjust(ag); spin_unlock_irqrestore(&ag->lock, flags); }
static int ag71xx_rx_packets(struct ag71xx *ag, int limit) { struct net_device *dev = ag->dev; struct ag71xx_ring *ring = &ag->rx_ring; #ifndef AG71XX_NAPI_TX struct ag71xx_platform_data *pdata = ag71xx_get_pdata(ag); unsigned long flags; #endif int done = 0; #ifndef AG71XX_NAPI_TX spin_lock_irqsave(&ag->lock, flags); ar71xx_ddr_flush(pdata->flush_reg); spin_unlock_irqrestore(&ag->lock, flags); #endif DBG("%s: rx packets, limit=%d, curr=%u, dirty=%u\n", dev->name, limit, ring->curr, ring->dirty); while (done < limit) { unsigned int i = ring->curr % AG71XX_RX_RING_SIZE; struct ag71xx_desc *desc = &ring->descs[i]; struct sk_buff *skb; int pktlen; if (ag71xx_desc_empty(desc)) break; if ((ring->dirty + AG71XX_RX_RING_SIZE) == ring->curr) { ag71xx_assert(0); break; } skb = ring->buf[i].skb; pktlen = ag71xx_desc_pktlen(desc); pktlen -= ETH_FCS_LEN; /* TODO: move it into the refill function */ dma_cache_wback_inv((unsigned long)skb->data, pktlen); skb_put(skb, pktlen); skb->dev = dev; skb->protocol = eth_type_trans(skb, dev); skb->ip_summed = CHECKSUM_UNNECESSARY; netif_receive_skb(skb); dev->last_rx = jiffies; dev->stats.rx_packets++; dev->stats.rx_bytes += pktlen; ring->buf[i].skb = NULL; done++; ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_PR); ring->curr++; if ((ring->curr - ring->dirty) > (AG71XX_RX_RING_SIZE / 4)) ag71xx_ring_rx_refill(ag); } ag71xx_ring_rx_refill(ag); DBG("%s: rx finish, curr=%u, dirty=%u, done=%d\n", dev->name, ring->curr, ring->dirty, done); return done; }
static struct ar7240sw *ar7240_probe(struct ag71xx *ag) { struct ag71xx_platform_data *pdata = ag71xx_get_pdata(ag); struct mii_bus *mii = ag->mii_bus; struct ar7240sw *as; struct switch_dev *swdev; u32 ctrl; u16 phy_id1; u16 phy_id2; int i; phy_id1 = ar7240sw_phy_read(mii, 0, MII_PHYSID1); phy_id2 = ar7240sw_phy_read(mii, 0, MII_PHYSID2); if ((phy_id1 != AR7240_PHY_ID1 || phy_id2 != AR7240_PHY_ID2) && (phy_id1 != AR934X_PHY_ID1 || phy_id2 != AR934X_PHY_ID2)) { pr_err("%s: unknown phy id '%04x:%04x'\n", ag->dev->name, phy_id1, phy_id2); return NULL; } as = kzalloc(sizeof(*as), GFP_KERNEL); if (!as) return NULL; as->mii_bus = mii; as->swdata = pdata->switch_data; swdev = &as->swdev; ctrl = ar7240sw_reg_read(mii, AR7240_REG_MASK_CTRL); as->ver = (ctrl >> AR7240_MASK_CTRL_VERSION_S) & AR7240_MASK_CTRL_VERSION_M; if (sw_is_ar7240(as)) { swdev->name = "AR7240/AR9330 built-in switch"; swdev->ports = AR7240_NUM_PORTS - 1; } else if (sw_is_ar934x(as)) { swdev->name = "AR934X built-in switch"; if (pdata->phy_if_mode == PHY_INTERFACE_MODE_GMII) { ar7240sw_reg_set(mii, AR934X_REG_OPER_MODE0, AR934X_OPER_MODE0_MAC_GMII_EN); } else if (pdata->phy_if_mode == PHY_INTERFACE_MODE_MII) { ar7240sw_reg_set(mii, AR934X_REG_OPER_MODE0, AR934X_OPER_MODE0_PHY_MII_EN); } else { pr_err("%s: invalid PHY interface mode\n", ag->dev->name); goto err_free; } if (as->swdata->phy4_mii_en) { ar7240sw_reg_set(mii, AR934X_REG_OPER_MODE1, AR934X_REG_OPER_MODE1_PHY4_MII_EN); swdev->ports = AR7240_NUM_PORTS - 1; } else { swdev->ports = AR7240_NUM_PORTS; } } else { pr_err("%s: unsupported chip, ctrl=%08x\n", ag->dev->name, ctrl); goto err_free; } swdev->cpu_port = AR7240_PORT_CPU; swdev->vlans = AR7240_MAX_VLANS; swdev->ops = &ar7240_ops; if (register_switch(&as->swdev, ag->dev) < 0) goto err_free; pr_info("%s: Found an %s\n", ag->dev->name, swdev->name); /* initialize defaults */ for (i = 0; i < AR7240_MAX_VLANS; i++) as->vlan_id[i] = i; as->vlan_table[0] = ar7240sw_port_mask_all(as); return as; err_free: kfree(as); return NULL; }
static int ag71xx_phy_connect_multi(struct ag71xx *ag) { struct device *dev = &ag->pdev->dev; struct ag71xx_platform_data *pdata = ag71xx_get_pdata(ag); struct phy_device *phydev = NULL; int phy_addr; int ret = 0; int phyadd=0; for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) { if (!(pdata->phy_mask & (1 << phy_addr))) continue; if (ag->mii_bus->phy_map[phy_addr] == NULL) continue; DBG("%s: PHY found at %s, uid=%08x\n", dev_name(dev), dev_name(&ag->mii_bus->phy_map[phy_addr]->dev), ag->mii_bus->phy_map[phy_addr]->phy_id); phyadd = phy_addr; if (phydev == NULL) phydev = ag->mii_bus->phy_map[phy_addr]; } if (!phydev) { dev_err(dev, "no PHY found with phy_mask=%08x\n", pdata->phy_mask); return -ENODEV; } if (ag->mii_bus->phy_map[phyadd]->phy_id == 0x4dd072 && phyadd == 1) { printk(KERN_INFO "fixup Atheros F1 Phy\n"); ag71xx_mdio_mii_write(ag->mii_bus->priv, phyadd, 0x1d, 0); ag71xx_mdio_mii_write(ag->mii_bus->priv, phyadd, 0x1e, 0x82ee); ag71xx_mdio_mii_write(ag->mii_bus->priv, phyadd, 0x1d, 5); ag71xx_mdio_mii_write(ag->mii_bus->priv, phyadd, 0x1e, 0x2d47); ag71xx_mdio_mii_write(ag->mii_bus->priv, phyadd, 0, 0x8000|0x1000); udelay(50000); } ag->phy_dev = phy_connect(ag->dev, dev_name(&phydev->dev), &ag71xx_phy_link_adjust, pdata->phy_if_mode); if (IS_ERR(ag->phy_dev)) { dev_err(dev, "could not connect to PHY at %s\n", dev_name(&phydev->dev)); return PTR_ERR(ag->phy_dev); } /* mask with MAC supported features */ if (pdata->has_gbit) phydev->supported &= PHY_GBIT_FEATURES; else phydev->supported &= PHY_BASIC_FEATURES; phydev->advertising = phydev->supported; dev_info(dev, "connected to PHY at %s [uid=%08x, driver=%s]\n", dev_name(&phydev->dev), phydev->phy_id, phydev->drv->name); ag->link = 0; ag->speed = 0; ag->duplex = -1; return ret; }
static int ag71xx_phy_connect_multi(struct ag71xx *ag) { struct device *dev = &ag->pdev->dev; struct ag71xx_platform_data *pdata = ag71xx_get_pdata(ag); struct phy_device *phydev = NULL; int phy_addr; int ret = 0; for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) { if (!(pdata->phy_mask & (1 << phy_addr))) continue; #if LINUX_VERSION_CODE < KERNEL_VERSION(4,5,0) if (ag->mii_bus->phy_map[phy_addr] == NULL) continue; DBG("%s: PHY found at %s, uid=%08x\n", dev_name(dev), dev_name(&ag->mii_bus->phy_map[phy_addr]->dev), &ag->mii_bus->phy_map[phy_addr]->phy_id), &ag->mii_bus->phy_map[phy_addr]->phy_id : 0); if (phydev == NULL) phydev = ag->mii_bus->phy_map[phy_addr]; #else if (ag->mii_bus->mdio_map[phy_addr] == NULL) continue; DBG("%s: PHY found at %s, uid=%08x\n", dev_name(dev), dev_name(&ag->mii_bus->mdio_map[phy_addr]->dev), mdiobus_get_phy(ag->mii_bus, phy_addr) ? mdiobus_get_phy(ag->mii_bus, phy_addr)->phy_id : 0); if (phydev == NULL) phydev = mdiobus_get_phy(ag->mii_bus, phy_addr); #endif } if (!phydev) { dev_err(dev, "no PHY found with phy_mask=%08x\n", pdata->phy_mask); return -ENODEV; } #if LINUX_VERSION_CODE < KERNEL_VERSION(4,5,0) ag->phy_dev = phy_connect(ag->dev, dev_name(&phydev->dev), #else ag->phy_dev = phy_connect(ag->dev, phydev_name(phydev), #endif &ag71xx_phy_link_adjust, pdata->phy_if_mode); if (IS_ERR(ag->phy_dev)) { dev_err(dev, "could not connect to PHY at %s\n", #if LINUX_VERSION_CODE < KERNEL_VERSION(4,5,0) dev_name(&phydev->dev)); #else phydev_name(phydev)); #endif return PTR_ERR(ag->phy_dev); }
static int ag71xx_phy_connect_multi(struct ag71xx *ag) { struct net_device *dev = ag->dev; struct ag71xx_platform_data *pdata = ag71xx_get_pdata(ag); struct phy_device *phydev = NULL; int phy_count = 0; int phy_addr; int ret = 0; for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) { if (!(pdata->phy_mask & (1 << phy_addr))) continue; if (ag->mii_bus->phy_map[phy_addr] == NULL) continue; DBG("%s: PHY found at %s, uid=%08x\n", dev->name, dev_name(&ag->mii_bus->phy_map[phy_addr]->dev), ag->mii_bus->phy_map[phy_addr]->phy_id); if (phydev == NULL) phydev = ag->mii_bus->phy_map[phy_addr]; phy_count++; } switch (phy_count) { case 0: printk(KERN_ERR "%s: no PHY found with phy_mask=%08x\n", dev->name, pdata->phy_mask); ret = -ENODEV; break; case 1: ag->phy_dev = phy_connect(dev, dev_name(&phydev->dev), &ag71xx_phy_link_adjust, 0, pdata->phy_if_mode); if (IS_ERR(ag->phy_dev)) { printk(KERN_ERR "%s: could not connect to PHY at %s\n", dev->name, dev_name(&phydev->dev)); return PTR_ERR(ag->phy_dev); } /* mask with MAC supported features */ if (pdata->has_gbit) phydev->supported &= PHY_GBIT_FEATURES; else phydev->supported &= PHY_BASIC_FEATURES; phydev->advertising = phydev->supported; printk(KERN_DEBUG "%s: connected to PHY at %s " "[uid=%08x, driver=%s]\n", dev->name, dev_name(&phydev->dev), phydev->phy_id, phydev->drv->name); ag->link = 0; ag->speed = 0; ag->duplex = -1; break; default: printk(KERN_DEBUG "%s: connected to %d PHYs\n", dev->name, phy_count); ret = ag71xx_phy_connect_fixed(ag); break; } return ret; }
void ag71xx_link_adjust(struct ag71xx *ag) { struct ag71xx_platform_data *pdata = ag71xx_get_pdata(ag); u32 cfg2; u32 ifctl; u32 fifo5; u32 mii_speed; if (!ag->link) { netif_carrier_off(ag->dev); if (netif_msg_link(ag)) printk(KERN_INFO "%s: link down\n", ag->dev->name); return; } cfg2 = ag71xx_rr(ag, AG71XX_REG_MAC_CFG2); cfg2 &= ~(MAC_CFG2_IF_1000 | MAC_CFG2_IF_10_100 | MAC_CFG2_FDX); cfg2 |= (ag->duplex) ? MAC_CFG2_FDX : 0; ifctl = ag71xx_rr(ag, AG71XX_REG_MAC_IFCTL); ifctl &= ~(MAC_IFCTL_SPEED); fifo5 = ag71xx_rr(ag, AG71XX_REG_FIFO_CFG5); fifo5 &= ~FIFO_CFG5_BM; switch (ag->speed) { case SPEED_1000: mii_speed = MII_CTRL_SPEED_1000; cfg2 |= MAC_CFG2_IF_1000; fifo5 |= FIFO_CFG5_BM; break; case SPEED_100: mii_speed = MII_CTRL_SPEED_100; cfg2 |= MAC_CFG2_IF_10_100; ifctl |= MAC_IFCTL_SPEED; break; case SPEED_10: mii_speed = MII_CTRL_SPEED_10; cfg2 |= MAC_CFG2_IF_10_100; break; default: BUG(); return; } if (pdata->is_ar91xx) ag71xx_wr(ag, AG71XX_REG_FIFO_CFG3, 0x00780fff); else if (pdata->is_ar724x) ag71xx_wr(ag, AG71XX_REG_FIFO_CFG3, pdata->fifo_cfg3); else ag71xx_wr(ag, AG71XX_REG_FIFO_CFG3, 0x008001ff); if (pdata->set_pll) pdata->set_pll(ag->speed); ag71xx_mii_ctrl_set_speed(ag, mii_speed); ag71xx_wr(ag, AG71XX_REG_MAC_CFG2, cfg2); ag71xx_wr(ag, AG71XX_REG_FIFO_CFG5, fifo5); ag71xx_wr(ag, AG71XX_REG_MAC_IFCTL, ifctl); netif_carrier_on(ag->dev); if (netif_msg_link(ag)) printk(KERN_INFO "%s: link up (%sMbps/%s duplex)\n", ag->dev->name, ag71xx_speed_str(ag), (DUPLEX_FULL == ag->duplex) ? "Full" : "Half"); DBG("%s: fifo_cfg0=%#x, fifo_cfg1=%#x, fifo_cfg2=%#x\n", ag->dev->name, ag71xx_rr(ag, AG71XX_REG_FIFO_CFG0), ag71xx_rr(ag, AG71XX_REG_FIFO_CFG1), ag71xx_rr(ag, AG71XX_REG_FIFO_CFG2)); DBG("%s: fifo_cfg3=%#x, fifo_cfg4=%#x, fifo_cfg5=%#x\n", ag->dev->name, ag71xx_rr(ag, AG71XX_REG_FIFO_CFG3), ag71xx_rr(ag, AG71XX_REG_FIFO_CFG4), ag71xx_rr(ag, AG71XX_REG_FIFO_CFG5)); DBG("%s: mac_cfg2=%#x, mac_ifctl=%#x, mii_ctrl=%#x\n", ag->dev->name, ag71xx_rr(ag, AG71XX_REG_MAC_CFG2), ag71xx_rr(ag, AG71XX_REG_MAC_IFCTL), ag71xx_mii_ctrl_rr(ag)); }
static int ag71xx_poll(struct napi_struct *napi, int limit) { struct ag71xx *ag = container_of(napi, struct ag71xx, napi); struct ag71xx_platform_data *pdata = ag71xx_get_pdata(ag); struct net_device *dev = ag->dev; struct ag71xx_ring *rx_ring; unsigned long flags; u32 status; int done; pdata->ddr_flush(); ag71xx_tx_packets(ag); DBG("%s: processing RX ring\n", dev->name); done = ag71xx_rx_packets(ag, limit); rx_ring = &ag->rx_ring; if (rx_ring->buf[rx_ring->dirty % AG71XX_RX_RING_SIZE].skb == NULL) goto oom; status = ag71xx_rr(ag, AG71XX_REG_RX_STATUS); if (unlikely(status & RX_STATUS_OF)) { ag71xx_wr(ag, AG71XX_REG_RX_STATUS, RX_STATUS_OF); dev->stats.rx_fifo_errors++; /* restart RX */ ag71xx_wr(ag, AG71XX_REG_RX_CTRL, RX_CTRL_RXE); } if (done < limit) { if (status & RX_STATUS_PR) goto more; status = ag71xx_rr(ag, AG71XX_REG_TX_STATUS); if (status & TX_STATUS_PS) goto more; DBG("%s: disable polling mode, done=%d, limit=%d\n", dev->name, done, limit); netif_rx_complete(dev, napi); /* enable interrupts */ spin_lock_irqsave(&ag->lock, flags); ag71xx_int_enable(ag, AG71XX_INT_POLL); spin_unlock_irqrestore(&ag->lock, flags); return done; } more: DBG("%s: stay in polling mode, done=%d, limit=%d\n", dev->name, done, limit); return done; oom: if (netif_msg_rx_err(ag)) printk(KERN_DEBUG "%s: out of memory\n", dev->name); mod_timer(&ag->oom_timer, jiffies + AG71XX_OOM_REFILL); netif_rx_complete(dev, napi); return 0; }
int ag71xx_phy_connect(struct ag71xx *ag) { struct net_device *dev = ag->dev; struct ag71xx_platform_data *pdata = ag71xx_get_pdata(ag); struct phy_device *phydev = NULL; int phy_count = 0; int phy_addr; if (ag->mii_bus && pdata->phy_mask) { /* TODO: use mutex of the mdio bus? */ for (phy_addr = 0; phy_addr < PHY_MAX_ADDR; phy_addr++) { if (!(pdata->phy_mask & (1 << phy_addr))) continue; if (ag->mii_bus->phy_map[phy_addr] == NULL) continue; DBG("%s: PHY found at %s, uid=%08x\n", dev->name, ag->mii_bus->phy_map[phy_addr]->dev.bus_id, ag->mii_bus->phy_map[phy_addr]->phy_id); if (phydev == NULL) phydev = ag->mii_bus->phy_map[phy_addr]; phy_count++; } } switch (phy_count) { case 1: ag->phy_dev = phy_connect(dev, phydev->dev.bus_id, &ag71xx_phy_link_adjust, 0, pdata->phy_if_mode); if (IS_ERR(ag->phy_dev)) { printk(KERN_ERR "%s: could not connect to PHY at %s\n", dev->name, phydev->dev.bus_id); return PTR_ERR(ag->phy_dev); } /* mask with MAC supported features */ phydev->supported &= (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | SUPPORTED_Autoneg | SUPPORTED_MII | SUPPORTED_TP); phydev->advertising = phydev->supported; printk(KERN_DEBUG "%s: connected to PHY at %s " "[uid=%08x, driver=%s]\n", dev->name, phydev->dev.bus_id, phydev->phy_id, phydev->drv->name); ag->link = 0; ag->speed = 0; ag->duplex = -1; break; default: switch (pdata->speed) { case SPEED_10: case SPEED_100: case SPEED_1000: break; default: printk(KERN_ERR "%s: invalid speed specified\n", dev->name); return -EINVAL; } ag->phy_dev = NULL; printk(KERN_DEBUG "%s: connected to %d PHYs\n", dev->name, phy_count); break; } return 0; }