int bcmgenet_mii_probe(struct net_device *dev) { struct bcmgenet_priv *priv = netdev_priv(dev); struct device_node *dn = priv->pdev->dev.of_node; struct phy_device *phydev; u32 phy_flags; int ret; /* Communicate the integrated PHY revision */ phy_flags = priv->gphy_rev; /* Initialize link state variables that bcmgenet_mii_setup() uses */ priv->old_link = -1; priv->old_speed = -1; priv->old_duplex = -1; priv->old_pause = -1; if (dn) { phydev = of_phy_connect(dev, priv->phy_dn, bcmgenet_mii_setup, phy_flags, priv->phy_interface); if (!phydev) { pr_err("could not attach to PHY\n"); return -ENODEV; } } else { phydev = dev->phydev; phydev->dev_flags = phy_flags; ret = phy_connect_direct(dev, phydev, bcmgenet_mii_setup, priv->phy_interface); if (ret) { pr_err("could not attach to PHY\n"); return -ENODEV; } } /* Configure port multiplexer based on what the probed PHY device since * reading the 'max-speed' property determines the maximum supported * PHY speed which is needed for bcmgenet_mii_config() to configure * things appropriately. */ ret = bcmgenet_mii_config(dev, true); if (ret) { phy_disconnect(dev->phydev); return ret; } linkmode_copy(phydev->advertising, phydev->supported); /* The internal PHY has its link interrupts routed to the * Ethernet MAC ISRs. On GENETv5 there is a hardware issue * that prevents the signaling of link UP interrupts when * the link operates at 10Mbps, so fallback to polling for * those versions of GENET. */ if (priv->internal_phy && !GENET_IS_V5(priv)) dev->phydev->irq = PHY_IGNORE_INTERRUPT; return 0; }
static void emac_mdio_remove(struct net_device *dev) { struct emac_board_info *db = netdev_priv(dev); phy_disconnect(db->phy_dev); db->phy_dev = NULL; }
static int hieth_platdev_remove_port(struct platform_device *pdev, int port) { struct net_device *ndev; struct hieth_netdev_local *ld; ndev = hieth_devs_save[port]; if (!ndev) goto _ndev_exit; ld = netdev_priv(ndev); unregister_netdev(ndev); hieth_destroy_skb_buffers(ld); phy_disconnect(ld->phy); ld->phy = NULL; iounmap((void *)ld->iobase); local_lock_exit(); hieth_devs_save[port] = NULL; free_netdev(ndev); _ndev_exit: return 0; }
static int yatse_stop(struct net_device *ndev){ struct yatse_private *priv = netdev_priv(ndev); unsigned long flags; napi_disable(&priv->napi); printk(KERN_INFO "yatse: shutdown\n"); spin_lock_irqsave(&priv->mac_lock, flags); priv->link = 0; if(priv->phy_irq != PHY_POLL) phy_stop_interrupts(priv->phydev); phy_disconnect(priv->phydev); spin_unlock_irqrestore(&priv->mac_lock, flags); disable_irq(priv->dma.rx_irq); disable_irq(priv->dma.tx_irq); netif_stop_queue(ndev); spin_lock_irqsave(&priv->dma.rx_lock, flags); spin_lock(&priv->dma.tx_lock); yatse_dma_stop(&priv->dma); spin_unlock(&priv->dma.tx_lock); spin_unlock_irqrestore(&priv->dma.rx_lock, flags); tasklet_kill(&priv->tx_tasklet); napi_disable(&priv->napi); printk(KERN_INFO "yatse: shutdown complete\n"); return 0; }
void cvm_oct_common_uninit(struct net_device *dev) { struct octeon_ethernet *priv = netdev_priv(dev); if (priv->phydev) phy_disconnect(priv->phydev); }
static int octeon_mgmt_stop(struct net_device *netdev) { struct octeon_mgmt *p = netdev_priv(netdev); napi_disable(&p->napi); netif_stop_queue(netdev); if (p->phydev) phy_disconnect(p->phydev); p->phydev = NULL; netif_carrier_off(netdev); octeon_mgmt_reset_hw(p); free_irq(p->irq, netdev); /* dma_unmap is a nop on Octeon, so just free everything. */ skb_queue_purge(&p->tx_list); skb_queue_purge(&p->rx_list); dma_unmap_single(p->dev, p->rx_ring_handle, ring_size_to_bytes(OCTEON_MGMT_RX_RING_SIZE), DMA_BIDIRECTIONAL); kfree(p->rx_ring); dma_unmap_single(p->dev, p->tx_ring_handle, ring_size_to_bytes(OCTEON_MGMT_TX_RING_SIZE), DMA_BIDIRECTIONAL); kfree(p->tx_ring); return 0; }
static void cpsw_slave_stop(struct cpsw_slave *slave, struct cpsw_priv *priv) { if (!slave->phy) return; phy_stop(slave->phy); phy_disconnect(slave->phy); slave->phy = NULL; }
void dsa_slave_destroy(struct net_device *slave_dev) { struct dsa_slave_priv *p = netdev_priv(slave_dev); netif_carrier_off(slave_dev); if (p->phy) phy_disconnect(p->phy); unregister_netdev(slave_dev); free_netdev(slave_dev); }
static int arc_emac_remove(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); struct arc_emac_priv *priv = netdev_priv(ndev); phy_disconnect(priv->phy_dev); priv->phy_dev = NULL; arc_mdio_remove(priv); unregister_netdev(ndev); netif_napi_del(&priv->napi); free_netdev(ndev); return 0; }
static int __devexit eth_remove_one(struct platform_device *pdev) { struct net_device *dev = platform_get_drvdata(pdev); struct port *port = netdev_priv(dev); unregister_netdev(dev); phy_disconnect(port->phydev); npe_port_tab[NPE_ID(port->id)] = NULL; platform_set_drvdata(pdev, NULL); npe_release(port->npe); release_resource(port->mem_res); free_netdev(dev); return 0; }
static int ax88172a_stop(struct usbnet *dev) { struct ax88172a_private *priv = dev->driver_priv; netdev_dbg(dev->net, "Stopping interface\n"); if (priv->phydev) { netdev_info(dev->net, "Disconnecting from phy %s\n", priv->phy_name); phy_stop(priv->phydev); phy_disconnect(priv->phydev); } return 0; }
static int at91ether_remove(struct platform_device *pdev) { struct net_device *dev = platform_get_drvdata(pdev); struct macb *lp = netdev_priv(dev); if (lp->phy_dev) phy_disconnect(lp->phy_dev); mdiobus_unregister(lp->mii_bus); kfree(lp->mii_bus->irq); mdiobus_free(lp->mii_bus); unregister_netdev(dev); clk_disable(lp->pclk); free_netdev(dev); return 0; }
/** * phylink_disconnect_phy() - disconnect any PHY attached to the phylink * instance. * @pl: a pointer to a &struct phylink returned from phylink_create() * * Disconnect any current PHY from the phylink instance described by @pl. */ void phylink_disconnect_phy(struct phylink *pl) { struct phy_device *phy; ASSERT_RTNL(); phy = pl->phydev; if (phy) { mutex_lock(&phy->lock); mutex_lock(&pl->state_mutex); pl->phydev = NULL; mutex_unlock(&pl->state_mutex); mutex_unlock(&phy->lock); flush_work(&pl->resolve); phy_disconnect(phy); } }
static int hip04_remove(struct platform_device *pdev) { struct net_device *ndev = platform_get_drvdata(pdev); struct hip04_priv *priv = netdev_priv(ndev); struct device *d = &pdev->dev; if (priv->phy) phy_disconnect(priv->phy); hip04_free_ring(ndev, d); unregister_netdev(ndev); free_irq(ndev->irq, ndev); of_node_put(priv->phy_node); cancel_work_sync(&priv->tx_timeout_task); free_netdev(ndev); return 0; }
void phylink_disconnect_phy(struct phylink *pl) { struct phy_device *phy; WARN_ON(!lockdep_rtnl_is_held()); phy = pl->phydev; if (phy) { mutex_lock(&phy->lock); mutex_lock(&pl->state_mutex); pl->netdev->phydev = NULL; pl->phydev = NULL; mutex_unlock(&pl->state_mutex); mutex_unlock(&phy->lock); flush_work(&pl->resolve); phy_disconnect(phy); } }
/** * stmmac_init_phy - PHY initialization * @dev: net device structure * Description: it initializes the driver's PHY state, and attaches the PHY * to the mac driver. * Return value: * 0 on success */ static int stmmac_init_phy(struct net_device *dev) { struct stmmac_priv *priv = netdev_priv(dev); struct phy_device *phydev; char phy_id[MII_BUS_ID_SIZE + 3]; char bus_id[MII_BUS_ID_SIZE]; priv->oldlink = 0; priv->speed = 0; priv->oldduplex = -1; snprintf(bus_id, MII_BUS_ID_SIZE, "%x", priv->plat->bus_id); snprintf(phy_id, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, bus_id, priv->plat->phy_addr); pr_debug("stmmac_init_phy: trying to attach to %s\n", phy_id); phydev = phy_connect(dev, phy_id, &stmmac_adjust_link, 0, priv->plat->interface); if (IS_ERR(phydev)) { pr_err("%s: Could not attach to PHY\n", dev->name); return PTR_ERR(phydev); } /* * Broken HW is sometimes missing the pull-up resistor on the * MDIO line, which results in reads to non-existent devices returning * 0 rather than 0xffff. Catch this here and treat 0 as a non-existent * device as well. * Note: phydev->phy_id is the result of reading the UID PHY registers. */ if (phydev->phy_id == 0) { phy_disconnect(phydev); return -ENODEV; } pr_debug("stmmac_init_phy: %s: attached to PHY (UID 0x%x)" " Link = %d\n", dev->name, phydev->phy_id, phydev->link); priv->phydev = phydev; return 0; }
static int fs_enet_close(struct net_device *dev) { struct fs_enet_private *fep = netdev_priv(dev); unsigned long flags; netif_stop_queue(dev); netif_carrier_off(dev); phy_stop(fep->phydev); spin_lock_irqsave(&fep->lock, flags); (*fep->ops->stop)(dev); spin_unlock_irqrestore(&fep->lock, flags); /* release any irqs */ phy_disconnect(fep->phydev); fep->phydev = NULL; fs_free_irq(dev, fep->interrupt); return 0; }
/* device close function */ static int sh_eth_close(struct net_device *ndev) { struct sh_eth_private *mdp = netdev_priv(ndev); u32 ioaddr = ndev->base_addr; int ringsize; netif_stop_queue(ndev); /* Disable interrupts by clearing the interrupt mask. */ writel(0x0000, ioaddr + EESIPR); /* Stop the chip's Tx and Rx processes. */ writel(0, ioaddr + EDTRR); writel(0, ioaddr + EDRRR); /* PHY Disconnect */ if (mdp->phydev) { phy_stop(mdp->phydev); phy_disconnect(mdp->phydev); } free_irq(ndev->irq, ndev); del_timer_sync(&mdp->timer); /* Free all the skbuffs in the Rx queue. */ sh_eth_ring_free(ndev); /* free DMA buffer */ ringsize = sizeof(struct sh_eth_rxdesc) * RX_RING_SIZE; dma_free_coherent(NULL, ringsize, mdp->rx_ring, mdp->rx_desc_dma); /* free DMA buffer */ ringsize = sizeof(struct sh_eth_txdesc) * TX_RING_SIZE; dma_free_coherent(NULL, ringsize, mdp->tx_ring, mdp->tx_desc_dma); pm_runtime_put_sync(&mdp->pdev->dev); return 0; }
/** * stmmac_release - close entry point of the driver * @dev : device pointer. * Description: * This is the stop entry point of the driver. */ static int stmmac_release(struct net_device *dev) { struct stmmac_priv *priv = netdev_priv(dev); /* Stop and disconnect the PHY */ if (priv->phydev) { phy_stop(priv->phydev); phy_disconnect(priv->phydev); priv->phydev = NULL; } netif_stop_queue(dev); #ifdef CONFIG_STMMAC_TIMER /* Stop and release the timer */ stmmac_close_ext_timer(); if (priv->tm != NULL) kfree(priv->tm); #endif napi_disable(&priv->napi); skb_queue_purge(&priv->rx_recycle); /* Free the IRQ lines */ free_irq(dev->irq, dev); /* Stop TX/RX DMA and clear the descriptors */ priv->hw->dma->stop_tx(priv->ioaddr); priv->hw->dma->stop_rx(priv->ioaddr); /* Release and free the Rx/Tx resources */ free_dma_desc_resources(priv); /* Disable the MAC Rx/Tx */ stmmac_disable_mac(priv->ioaddr); netif_carrier_off(dev); return 0; }
static void mtk_phy_disconnect(struct mtk_mac *mac) { struct mtk_eth *eth = mac->hw; unsigned long flags; int i; for (i = 0; i < 8; i++) if (eth->phy->phy_fixed[i]) { spin_lock_irqsave(ð->phy->lock, flags); eth->link[i] = 0; if (eth->soc->mdio_adjust_link) eth->soc->mdio_adjust_link(eth, i); spin_unlock_irqrestore(ð->phy->lock, flags); } else if (eth->phy->phy[i]) { phy_disconnect(eth->phy->phy[i]); } else if (eth->mii_bus) { struct phy_device *phy = mdiobus_get_phy(eth->mii_bus, i); if (phy) phy_detach(phy); } }
int cvm_oct_sgmii_stop(struct net_device *dev) { union cvmx_gmxx_prtx_cfg gmx_cfg; struct octeon_ethernet *priv = netdev_priv(dev); cvmx_helper_link_info_t link_info; gmx_cfg.u64 = cvmx_read_csr(CVMX_GMXX_PRTX_CFG(priv->interface_port, priv->interface)); gmx_cfg.s.en = 0; cvmx_write_csr(CVMX_GMXX_PRTX_CFG(priv->interface_port, priv->interface), gmx_cfg.u64); priv->poll = NULL; if (priv->phydev) { phy_disconnect(priv->phydev); } priv->phydev = NULL; if (priv->last_link) { link_info.u64 = 0; priv->last_link = 0; cvmx_helper_link_set(priv->ipd_port, link_info); } return 0; }
struct net_device * dsa_slave_create(struct dsa_switch *ds, struct device *parent, int port, char *name) { struct net_device *master = ds->dst->master_netdev; struct net_device *slave_dev; struct dsa_slave_priv *p; int ret; slave_dev = alloc_netdev(sizeof(struct dsa_slave_priv), name, NET_NAME_UNKNOWN, ether_setup); if (slave_dev == NULL) return slave_dev; slave_dev->features = master->vlan_features; slave_dev->ethtool_ops = &dsa_slave_ethtool_ops; eth_hw_addr_inherit(slave_dev, master); slave_dev->tx_queue_len = 0; slave_dev->netdev_ops = &dsa_slave_netdev_ops; SET_NETDEV_DEV(slave_dev, parent); slave_dev->dev.of_node = ds->pd->port_dn[port]; slave_dev->vlan_features = master->vlan_features; p = netdev_priv(slave_dev); p->dev = slave_dev; p->parent = ds; p->port = port; switch (ds->dst->tag_protocol) { #ifdef CONFIG_NET_DSA_TAG_DSA case DSA_TAG_PROTO_DSA: p->xmit = dsa_netdev_ops.xmit; break; #endif #ifdef CONFIG_NET_DSA_TAG_EDSA case DSA_TAG_PROTO_EDSA: p->xmit = edsa_netdev_ops.xmit; break; #endif #ifdef CONFIG_NET_DSA_TAG_TRAILER case DSA_TAG_PROTO_TRAILER: p->xmit = trailer_netdev_ops.xmit; break; #endif #ifdef CONFIG_NET_DSA_TAG_BRCM case DSA_TAG_PROTO_BRCM: p->xmit = brcm_netdev_ops.xmit; break; #endif default: p->xmit = dsa_slave_notag_xmit; break; } p->old_pause = -1; p->old_link = -1; p->old_duplex = -1; ret = dsa_slave_phy_setup(p, slave_dev); if (ret) { free_netdev(slave_dev); return NULL; } ret = register_netdev(slave_dev); if (ret) { netdev_err(master, "error %d registering interface %s\n", ret, slave_dev->name); phy_disconnect(p->phy); free_netdev(slave_dev); return NULL; } netif_carrier_off(slave_dev); return slave_dev; }
static void ramips_phy_disconnect(struct raeth_priv *re) { if (re->phy_dev) phy_disconnect(re->phy_dev); }
static int bcmgenet_mii_probe(struct net_device *dev) { struct bcmgenet_priv *priv = netdev_priv(dev); struct device_node *dn = priv->pdev->dev.of_node; struct phy_device *phydev; u32 phy_flags; int ret; if (priv->phydev) { pr_info("PHY already attached\n"); return 0; } /* In the case of a fixed PHY, the DT node associated * to the PHY is the Ethernet MAC DT node. */ if (!priv->phy_dn && of_phy_is_fixed_link(dn)) { ret = of_phy_register_fixed_link(dn); if (ret) return ret; priv->phy_dn = of_node_get(dn); } /* Communicate the integrated PHY revision */ phy_flags = priv->gphy_rev; /* Initialize link state variables that bcmgenet_mii_setup() uses */ priv->old_link = -1; priv->old_speed = -1; priv->old_duplex = -1; priv->old_pause = -1; phydev = of_phy_connect(dev, priv->phy_dn, bcmgenet_mii_setup, phy_flags, priv->phy_interface); if (!phydev) { pr_err("could not attach to PHY\n"); return -ENODEV; } priv->phydev = phydev; /* Configure port multiplexer based on what the probed PHY device since * reading the 'max-speed' property determines the maximum supported * PHY speed which is needed for bcmgenet_mii_config() to configure * things appropriately. */ ret = bcmgenet_mii_config(dev, true); if (ret) { phy_disconnect(priv->phydev); return ret; } phydev->advertising = phydev->supported; /* The internal PHY has its link interrupts routed to the * Ethernet MAC ISRs */ if (phy_is_internal(priv->phydev)) priv->mii_bus->irq[phydev->addr] = PHY_IGNORE_INTERRUPT; else priv->mii_bus->irq[phydev->addr] = PHY_POLL; pr_info("attached PHY at address %d [%s]\n", phydev->addr, phydev->drv->name); return 0; }
/* * open callback, allocate dma rings & buffers and start rx operation */ static int bcm_enet_open(struct net_device *dev) { struct bcm_enet_priv *priv; struct sockaddr addr; struct device *kdev; struct phy_device *phydev; int i, ret; unsigned int size; char phy_id[MII_BUS_ID_SIZE + 3]; void *p; u32 val; priv = netdev_priv(dev); kdev = &priv->pdev->dev; if (priv->has_phy) { /* connect to PHY */ snprintf(phy_id, sizeof(phy_id), PHY_ID_FMT, priv->mac_id ? "1" : "0", priv->phy_id); phydev = phy_connect(dev, phy_id, &bcm_enet_adjust_phy_link, 0, PHY_INTERFACE_MODE_MII); if (IS_ERR(phydev)) { dev_err(kdev, "could not attach to PHY\n"); return PTR_ERR(phydev); } /* mask with MAC supported features */ phydev->supported &= (SUPPORTED_10baseT_Half | SUPPORTED_10baseT_Full | SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full | SUPPORTED_Autoneg | SUPPORTED_Pause | SUPPORTED_MII); phydev->advertising = phydev->supported; if (priv->pause_auto && priv->pause_rx && priv->pause_tx) phydev->advertising |= SUPPORTED_Pause; else phydev->advertising &= ~SUPPORTED_Pause; dev_info(kdev, "attached PHY at address %d [%s]\n", phydev->addr, phydev->drv->name); priv->old_link = 0; priv->old_duplex = -1; priv->old_pause = -1; priv->phydev = phydev; } /* mask all interrupts and request them */ enet_writel(priv, 0, ENET_IRMASK_REG); enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->rx_chan)); enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->tx_chan)); ret = request_irq(dev->irq, bcm_enet_isr_mac, 0, dev->name, dev); if (ret) goto out_phy_disconnect; ret = request_irq(priv->irq_rx, bcm_enet_isr_dma, IRQF_SAMPLE_RANDOM | IRQF_DISABLED, dev->name, dev); if (ret) goto out_freeirq; ret = request_irq(priv->irq_tx, bcm_enet_isr_dma, IRQF_DISABLED, dev->name, dev); if (ret) goto out_freeirq_rx; /* initialize perfect match registers */ for (i = 0; i < 4; i++) { enet_writel(priv, 0, ENET_PML_REG(i)); enet_writel(priv, 0, ENET_PMH_REG(i)); } /* write device mac address */ memcpy(addr.sa_data, dev->dev_addr, ETH_ALEN); bcm_enet_set_mac_address(dev, &addr); /* allocate rx dma ring */ size = priv->rx_ring_size * sizeof(struct bcm_enet_desc); p = dma_alloc_coherent(kdev, size, &priv->rx_desc_dma, GFP_KERNEL); if (!p) { dev_err(kdev, "cannot allocate rx ring %u\n", size); ret = -ENOMEM; goto out_freeirq_tx; } memset(p, 0, size); priv->rx_desc_alloc_size = size; priv->rx_desc_cpu = p; /* allocate tx dma ring */ size = priv->tx_ring_size * sizeof(struct bcm_enet_desc); p = dma_alloc_coherent(kdev, size, &priv->tx_desc_dma, GFP_KERNEL); if (!p) { dev_err(kdev, "cannot allocate tx ring\n"); ret = -ENOMEM; goto out_free_rx_ring; } memset(p, 0, size); priv->tx_desc_alloc_size = size; priv->tx_desc_cpu = p; priv->tx_skb = kzalloc(sizeof(struct sk_buff *) * priv->tx_ring_size, GFP_KERNEL); if (!priv->tx_skb) { dev_err(kdev, "cannot allocate rx skb queue\n"); ret = -ENOMEM; goto out_free_tx_ring; } priv->tx_desc_count = priv->tx_ring_size; priv->tx_dirty_desc = 0; priv->tx_curr_desc = 0; spin_lock_init(&priv->tx_lock); /* init & fill rx ring with skbs */ priv->rx_skb = kzalloc(sizeof(struct sk_buff *) * priv->rx_ring_size, GFP_KERNEL); if (!priv->rx_skb) { dev_err(kdev, "cannot allocate rx skb queue\n"); ret = -ENOMEM; goto out_free_tx_skb; } priv->rx_desc_count = 0; priv->rx_dirty_desc = 0; priv->rx_curr_desc = 0; /* initialize flow control buffer allocation */ enet_dma_writel(priv, ENETDMA_BUFALLOC_FORCE_MASK | 0, ENETDMA_BUFALLOC_REG(priv->rx_chan)); if (bcm_enet_refill_rx(dev)) { dev_err(kdev, "cannot allocate rx skb queue\n"); ret = -ENOMEM; goto out; } /* write rx & tx ring addresses */ enet_dma_writel(priv, priv->rx_desc_dma, ENETDMA_RSTART_REG(priv->rx_chan)); enet_dma_writel(priv, priv->tx_desc_dma, ENETDMA_RSTART_REG(priv->tx_chan)); /* clear remaining state ram for rx & tx channel */ enet_dma_writel(priv, 0, ENETDMA_SRAM2_REG(priv->rx_chan)); enet_dma_writel(priv, 0, ENETDMA_SRAM2_REG(priv->tx_chan)); enet_dma_writel(priv, 0, ENETDMA_SRAM3_REG(priv->rx_chan)); enet_dma_writel(priv, 0, ENETDMA_SRAM3_REG(priv->tx_chan)); enet_dma_writel(priv, 0, ENETDMA_SRAM4_REG(priv->rx_chan)); enet_dma_writel(priv, 0, ENETDMA_SRAM4_REG(priv->tx_chan)); /* set max rx/tx length */ enet_writel(priv, priv->hw_mtu, ENET_RXMAXLEN_REG); enet_writel(priv, priv->hw_mtu, ENET_TXMAXLEN_REG); /* set dma maximum burst len */ enet_dma_writel(priv, BCMENET_DMA_MAXBURST, ENETDMA_MAXBURST_REG(priv->rx_chan)); enet_dma_writel(priv, BCMENET_DMA_MAXBURST, ENETDMA_MAXBURST_REG(priv->tx_chan)); /* set correct transmit fifo watermark */ enet_writel(priv, BCMENET_TX_FIFO_TRESH, ENET_TXWMARK_REG); /* set flow control low/high threshold to 1/3 / 2/3 */ val = priv->rx_ring_size / 3; enet_dma_writel(priv, val, ENETDMA_FLOWCL_REG(priv->rx_chan)); val = (priv->rx_ring_size * 2) / 3; enet_dma_writel(priv, val, ENETDMA_FLOWCH_REG(priv->rx_chan)); /* all set, enable mac and interrupts, start dma engine and * kick rx dma channel */ wmb(); enet_writel(priv, ENET_CTL_ENABLE_MASK, ENET_CTL_REG); enet_dma_writel(priv, ENETDMA_CFG_EN_MASK, ENETDMA_CFG_REG); enet_dma_writel(priv, ENETDMA_CHANCFG_EN_MASK, ENETDMA_CHANCFG_REG(priv->rx_chan)); /* watch "mib counters about to overflow" interrupt */ enet_writel(priv, ENET_IR_MIB, ENET_IR_REG); enet_writel(priv, ENET_IR_MIB, ENET_IRMASK_REG); /* watch "packet transferred" interrupt in rx and tx */ enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK, ENETDMA_IR_REG(priv->rx_chan)); enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK, ENETDMA_IR_REG(priv->tx_chan)); /* make sure we enable napi before rx interrupt */ napi_enable(&priv->napi); enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK, ENETDMA_IRMASK_REG(priv->rx_chan)); enet_dma_writel(priv, ENETDMA_IR_PKTDONE_MASK, ENETDMA_IRMASK_REG(priv->tx_chan)); if (priv->has_phy) phy_start(priv->phydev); else bcm_enet_adjust_link(dev); netif_start_queue(dev); return 0; out: for (i = 0; i < priv->rx_ring_size; i++) { struct bcm_enet_desc *desc; if (!priv->rx_skb[i]) continue; desc = &priv->rx_desc_cpu[i]; dma_unmap_single(kdev, desc->address, priv->rx_skb_size, DMA_FROM_DEVICE); kfree_skb(priv->rx_skb[i]); } kfree(priv->rx_skb); out_free_tx_skb: kfree(priv->tx_skb); out_free_tx_ring: dma_free_coherent(kdev, priv->tx_desc_alloc_size, priv->tx_desc_cpu, priv->tx_desc_dma); out_free_rx_ring: dma_free_coherent(kdev, priv->rx_desc_alloc_size, priv->rx_desc_cpu, priv->rx_desc_dma); out_freeirq_tx: free_irq(priv->irq_tx, dev); out_freeirq_rx: free_irq(priv->irq_rx, dev); out_freeirq: free_irq(dev->irq, dev); out_phy_disconnect: phy_disconnect(priv->phydev); return ret; }
static int __devinit eth_init_one(struct platform_device *pdev) { struct port *port; struct net_device *dev; struct eth_plat_info *plat = pdev->dev.platform_data; u32 regs_phys; char phy_id[MII_BUS_ID_SIZE + 3]; int err; if (!(dev = alloc_etherdev(sizeof(struct port)))) return -ENOMEM; SET_NETDEV_DEV(dev, &pdev->dev); port = netdev_priv(dev); port->netdev = dev; port->id = pdev->id; switch (port->id) { case IXP4XX_ETH_NPEA: port->regs = (struct eth_regs __iomem *)IXP4XX_EthA_BASE_VIRT; regs_phys = IXP4XX_EthA_BASE_PHYS; break; case IXP4XX_ETH_NPEB: port->regs = (struct eth_regs __iomem *)IXP4XX_EthB_BASE_VIRT; regs_phys = IXP4XX_EthB_BASE_PHYS; break; case IXP4XX_ETH_NPEC: port->regs = (struct eth_regs __iomem *)IXP4XX_EthC_BASE_VIRT; regs_phys = IXP4XX_EthC_BASE_PHYS; break; default: err = -ENODEV; goto err_free; } dev->netdev_ops = &ixp4xx_netdev_ops; dev->ethtool_ops = &ixp4xx_ethtool_ops; dev->tx_queue_len = 100; netif_napi_add(dev, &port->napi, eth_poll, NAPI_WEIGHT); if (!(port->npe = npe_request(NPE_ID(port->id)))) { err = -EIO; goto err_free; } port->mem_res = request_mem_region(regs_phys, REGS_SIZE, dev->name); if (!port->mem_res) { err = -EBUSY; goto err_npe_rel; } port->plat = plat; npe_port_tab[NPE_ID(port->id)] = port; memcpy(dev->dev_addr, plat->hwaddr, ETH_ALEN); platform_set_drvdata(pdev, dev); __raw_writel(DEFAULT_CORE_CNTRL | CORE_RESET, &port->regs->core_control); udelay(50); __raw_writel(DEFAULT_CORE_CNTRL, &port->regs->core_control); udelay(50); snprintf(phy_id, MII_BUS_ID_SIZE + 3, PHY_ID_FMT, "0", plat->phy); port->phydev = phy_connect(dev, phy_id, &ixp4xx_adjust_link, 0, PHY_INTERFACE_MODE_MII); if ((err = IS_ERR(port->phydev))) goto err_free_mem; port->phydev->irq = PHY_POLL; if ((err = register_netdev(dev))) goto err_phy_dis; printk(KERN_INFO "%s: MII PHY %i on %s\n", dev->name, plat->phy, npe_name(port->npe)); return 0; err_phy_dis: phy_disconnect(port->phydev); err_free_mem: npe_port_tab[NPE_ID(port->id)] = NULL; platform_set_drvdata(pdev, NULL); release_resource(port->mem_res); err_npe_rel: npe_release(port->npe); err_free: free_netdev(dev); return err; }
static void emac_mdio_remove(struct net_device *dev) { phy_disconnect(dev->phydev); }
int bgmac_phy_connect_direct(struct bgmac *bgmac) { struct fixed_phy_status fphy_status = { .link = 1, .speed = SPEED_1000, .duplex = DUPLEX_FULL, }; struct phy_device *phy_dev; int err; phy_dev = fixed_phy_register(PHY_POLL, &fphy_status, -1, NULL); if (!phy_dev || IS_ERR(phy_dev)) { dev_err(bgmac->dev, "Failed to register fixed PHY device\n"); return -ENODEV; } err = phy_connect_direct(bgmac->net_dev, phy_dev, bgmac_adjust_link, PHY_INTERFACE_MODE_MII); if (err) { dev_err(bgmac->dev, "Connecting PHY failed\n"); return err; } return err; } EXPORT_SYMBOL_GPL(bgmac_phy_connect_direct); struct bgmac *bgmac_alloc(struct device *dev) { struct net_device *net_dev; struct bgmac *bgmac; /* Allocation and references */ net_dev = devm_alloc_etherdev(dev, sizeof(*bgmac)); if (!net_dev) return NULL; net_dev->netdev_ops = &bgmac_netdev_ops; net_dev->ethtool_ops = &bgmac_ethtool_ops; bgmac = netdev_priv(net_dev); bgmac->dev = dev; bgmac->net_dev = net_dev; return bgmac; } EXPORT_SYMBOL_GPL(bgmac_alloc); int bgmac_enet_probe(struct bgmac *bgmac) { struct net_device *net_dev = bgmac->net_dev; int err; net_dev->irq = bgmac->irq; SET_NETDEV_DEV(net_dev, bgmac->dev); dev_set_drvdata(bgmac->dev, bgmac); if (!is_valid_ether_addr(net_dev->dev_addr)) { dev_err(bgmac->dev, "Invalid MAC addr: %pM\n", net_dev->dev_addr); eth_hw_addr_random(net_dev); dev_warn(bgmac->dev, "Using random MAC: %pM\n", net_dev->dev_addr); } /* This (reset &) enable is not preset in specs or reference driver but * Broadcom does it in arch PCI code when enabling fake PCI device. */ bgmac_clk_enable(bgmac, 0); /* This seems to be fixing IRQ by assigning OOB #6 to the core */ if (!(bgmac->feature_flags & BGMAC_FEAT_IDM_MASK)) { if (bgmac->feature_flags & BGMAC_FEAT_IRQ_ID_OOB_6) bgmac_idm_write(bgmac, BCMA_OOB_SEL_OUT_A30, 0x86); } bgmac_chip_reset(bgmac); err = bgmac_dma_alloc(bgmac); if (err) { dev_err(bgmac->dev, "Unable to alloc memory for DMA\n"); goto err_out; } bgmac->int_mask = BGMAC_IS_ERRMASK | BGMAC_IS_RX | BGMAC_IS_TX_MASK; if (bcm47xx_nvram_getenv("et0_no_txint", NULL, 0) == 0) bgmac->int_mask &= ~BGMAC_IS_TX_MASK; netif_napi_add(net_dev, &bgmac->napi, bgmac_poll, BGMAC_WEIGHT); err = bgmac_phy_connect(bgmac); if (err) { dev_err(bgmac->dev, "Cannot connect to phy\n"); goto err_dma_free; } net_dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; net_dev->hw_features = net_dev->features; net_dev->vlan_features = net_dev->features; err = register_netdev(bgmac->net_dev); if (err) { dev_err(bgmac->dev, "Cannot register net device\n"); goto err_phy_disconnect; } netif_carrier_off(net_dev); return 0; err_phy_disconnect: phy_disconnect(net_dev->phydev); err_dma_free: bgmac_dma_free(bgmac); err_out: return err; } EXPORT_SYMBOL_GPL(bgmac_enet_probe); void bgmac_enet_remove(struct bgmac *bgmac) { unregister_netdev(bgmac->net_dev); phy_disconnect(bgmac->net_dev->phydev); netif_napi_del(&bgmac->napi); bgmac_dma_free(bgmac); free_netdev(bgmac->net_dev); }
/* * stop callback */ static int bcm_enet_stop(struct net_device *dev) { struct bcm_enet_priv *priv; struct device *kdev; int i; priv = netdev_priv(dev); kdev = &priv->pdev->dev; netif_stop_queue(dev); napi_disable(&priv->napi); if (priv->has_phy) phy_stop(priv->phydev); del_timer_sync(&priv->rx_timeout); /* mask all interrupts */ enet_writel(priv, 0, ENET_IRMASK_REG); enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->rx_chan)); enet_dma_writel(priv, 0, ENETDMA_IRMASK_REG(priv->tx_chan)); /* make sure no mib update is scheduled */ flush_scheduled_work(); /* disable dma & mac */ bcm_enet_disable_dma(priv, priv->tx_chan); bcm_enet_disable_dma(priv, priv->rx_chan); bcm_enet_disable_mac(priv); /* force reclaim of all tx buffers */ bcm_enet_tx_reclaim(dev, 1); /* free the rx skb ring */ for (i = 0; i < priv->rx_ring_size; i++) { struct bcm_enet_desc *desc; if (!priv->rx_skb[i]) continue; desc = &priv->rx_desc_cpu[i]; dma_unmap_single(kdev, desc->address, priv->rx_skb_size, DMA_FROM_DEVICE); kfree_skb(priv->rx_skb[i]); } /* free remaining allocated memory */ kfree(priv->rx_skb); kfree(priv->tx_skb); dma_free_coherent(kdev, priv->rx_desc_alloc_size, priv->rx_desc_cpu, priv->rx_desc_dma); dma_free_coherent(kdev, priv->tx_desc_alloc_size, priv->tx_desc_cpu, priv->tx_desc_dma); free_irq(priv->irq_tx, dev); free_irq(priv->irq_rx, dev); free_irq(dev->irq, dev); /* release phy */ if (priv->has_phy) { phy_disconnect(priv->phydev); priv->phydev = NULL; } return 0; }
static int hieth_platdev_probe_port(struct platform_device *pdev, int port) { int ret = -1; struct net_device *netdev = NULL; struct hieth_netdev_local *ld; if ((UP_PORT != port) && (DOWN_PORT != port)) { hieth_error("port error!"); ret = -ENODEV; goto _error_exit; } netdev = alloc_etherdev(sizeof(*ld)); if (netdev == NULL) { hieth_error("alloc_etherdev fail!"); ret = -ENOMEM; goto _error_alloc_etherdev; } SET_NETDEV_DEV(netdev, &pdev->dev); netdev->irq = CONFIG_HIETH_IRQNUM; netdev->watchdog_timeo = 3*HZ; netdev->netdev_ops = &hieth_netdev_ops; netdev->ethtool_ops = &hieth_ethtools_ops; /* init hieth_global somethings... */ hieth_devs_save[port] = netdev; /* init hieth_local_driver */ ld = netdev_priv(netdev); memset(ld, 0, sizeof(*ld)); local_lock_init(ld); ld->iobase = (unsigned long)ioremap_nocache(CONFIG_HIETH_IOBASE, \ CONFIG_HIETH_IOSIZE); if (!ld->iobase) { hieth_error("ioremap_nocache err, base=0x%.8x, size=0x%.8x\n", CONFIG_HIETH_IOBASE, CONFIG_HIETH_IOSIZE); ret = -EFAULT; goto _error_ioremap_nocache; } ld->iobase_phys = CONFIG_HIETH_IOBASE; ld->port = port; ld->dev = &(pdev->dev); /* reset and init port */ hieth_port_reset(ld, ld->port); hieth_port_init(ld, ld->port); ld->depth.hw_xmitq = CONFIG_HIETH_HWQ_XMIT_DEPTH; memset(ld->phy_name, 0, sizeof(ld->phy_name)); snprintf(ld->phy_name, MII_BUS_ID_SIZE, PHY_ID_FMT, \ HIETH_MIIBUS_NAME, UD_PHY_NAME(CONFIG_HIETH_PHYID)); ld->phy = phy_connect(netdev, ld->phy_name, hieth_adjust_link, 0, \ UD_BIT_NAME(CONFIG_HIETH_MII_RMII_MODE) ? \ PHY_INTERFACE_MODE_MII : PHY_INTERFACE_MODE_MII); if (IS_ERR(ld->phy)) { hieth_error("connect to phy_device %s failed!", ld->phy_name); ld->phy = NULL; goto _error_phy_connect; } skb_queue_head_init(&ld->rx_head); skb_queue_head_init(&ld->rx_hw); skb_queue_head_init(&ld->tx_hw); ld->tx_hw_cnt = 0; ret = hieth_init_skb_buffers(ld); if (ret) { hieth_error("hieth_init_skb_buffers failed!"); goto _error_init_skb_buffers; } ret = register_netdev(netdev); if (ret) { hieth_error("register_netdev %s failed!", netdev->name); goto _error_register_netdev; } return ret; _error_register_netdev: hieth_destroy_skb_buffers(ld); _error_init_skb_buffers: phy_disconnect(ld->phy); ld->phy = NULL; _error_phy_connect: iounmap((void *)ld->iobase); _error_ioremap_nocache: local_lock_exit(); hieth_devs_save[port] = NULL; free_netdev(netdev); _error_alloc_etherdev: _error_exit: return ret; }