void cleanup_module(void) { unregister_netdev(&de600_dev); release_region(DE600_IO, 3); }
/** * lbs_add_mesh - add mshX interface * * @priv: A pointer to the &struct lbs_private structure * returns: 0 if successful, -X otherwise */ static int lbs_add_mesh(struct lbs_private *priv) { struct net_device *mesh_dev = NULL; struct wireless_dev *mesh_wdev; int ret = 0; /* Allocate a virtual mesh device */ mesh_wdev = kzalloc(sizeof(struct wireless_dev), GFP_KERNEL); if (!mesh_wdev) { lbs_deb_mesh("init mshX wireless device failed\n"); ret = -ENOMEM; goto done; } mesh_dev = alloc_netdev(0, "msh%d", NET_NAME_UNKNOWN, ether_setup); if (!mesh_dev) { lbs_deb_mesh("init mshX device failed\n"); ret = -ENOMEM; goto err_free_wdev; } mesh_wdev->iftype = NL80211_IFTYPE_MESH_POINT; mesh_wdev->wiphy = priv->wdev->wiphy; mesh_wdev->netdev = mesh_dev; mesh_dev->ml_priv = priv; mesh_dev->ieee80211_ptr = mesh_wdev; priv->mesh_dev = mesh_dev; mesh_dev->netdev_ops = &mesh_netdev_ops; mesh_dev->ethtool_ops = &lbs_ethtool_ops; eth_hw_addr_inherit(mesh_dev, priv->dev); SET_NETDEV_DEV(priv->mesh_dev, priv->dev->dev.parent); mesh_dev->flags |= IFF_BROADCAST | IFF_MULTICAST; /* Register virtual mesh interface */ ret = register_netdev(mesh_dev); if (ret) { pr_err("cannot register mshX virtual interface\n"); goto err_free_netdev; } ret = sysfs_create_group(&(mesh_dev->dev.kobj), &lbs_mesh_attr_group); if (ret) goto err_unregister; lbs_persist_config_init(mesh_dev); /* Everything successful */ ret = 0; goto done; err_unregister: unregister_netdev(mesh_dev); err_free_netdev: free_netdev(mesh_dev); err_free_wdev: kfree(mesh_wdev); done: return ret; }
static int __init ag71xx_probe(struct platform_device *pdev) { struct net_device *dev; struct resource *res; struct ag71xx *ag; struct ag71xx_platform_data *pdata; int err; pdata = pdev->dev.platform_data; if (!pdata) { dev_err(&pdev->dev, "no platform data specified\n"); err = -ENXIO; goto err_out; } dev = alloc_etherdev(sizeof(*ag)); if (!dev) { dev_err(&pdev->dev, "alloc_etherdev failed\n"); err = -ENOMEM; goto err_out; } SET_NETDEV_DEV(dev, &pdev->dev); ag = netdev_priv(dev); ag->pdev = pdev; ag->dev = dev; ag->mii_bus = &ag71xx_mdio_bus->mii_bus; ag->msg_enable = netif_msg_init(ag71xx_debug, AG71XX_DEFAULT_MSG_ENABLE); spin_lock_init(&ag->lock); res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mac_base"); if (!res) { dev_err(&pdev->dev, "no mac_base resource found\n"); err = -ENXIO; goto err_out; } ag->mac_base = ioremap_nocache(res->start, res->end - res->start + 1); if (!ag->mac_base) { dev_err(&pdev->dev, "unable to ioremap mac_base\n"); err = -ENOMEM; goto err_free_dev; } res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mac_base2"); if (!res) { dev_err(&pdev->dev, "no mac_base2 resource found\n"); err = -ENXIO; goto err_unmap_base1; } ag->mac_base2 = ioremap_nocache(res->start, res->end - res->start + 1); if (!ag->mac_base) { dev_err(&pdev->dev, "unable to ioremap mac_base2\n"); err = -ENOMEM; goto err_unmap_base1; } res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mii_ctrl"); if (!res) { dev_err(&pdev->dev, "no mii_ctrl resource found\n"); err = -ENXIO; goto err_unmap_base2; } ag->mii_ctrl = ioremap_nocache(res->start, res->end - res->start + 1); if (!ag->mii_ctrl) { dev_err(&pdev->dev, "unable to ioremap mii_ctrl\n"); err = -ENOMEM; goto err_unmap_base2; } dev->irq = platform_get_irq(pdev, 0); err = request_irq(dev->irq, ag71xx_interrupt, IRQF_DISABLED | IRQF_SAMPLE_RANDOM, dev->name, dev); if (err) { dev_err(&pdev->dev, "unable to request IRQ %d\n", dev->irq); goto err_unmap_mii_ctrl; } dev->base_addr = (unsigned long)ag->mac_base; dev->open = ag71xx_open; dev->stop = ag71xx_stop; dev->hard_start_xmit = ag71xx_hard_start_xmit; dev->set_multicast_list = ag71xx_set_multicast_list; dev->do_ioctl = ag71xx_do_ioctl; dev->ethtool_ops = &ag71xx_ethtool_ops; netif_napi_add(dev, &ag->napi, ag71xx_poll, AG71XX_NAPI_WEIGHT); if (is_valid_ether_addr(pdata->mac_addr)) memcpy(dev->dev_addr, pdata->mac_addr, ETH_ALEN); else { dev->dev_addr[0] = 0xde; dev->dev_addr[1] = 0xad; get_random_bytes(&dev->dev_addr[2], 3); dev->dev_addr[5] = pdev->id & 0xff; } err = register_netdev(dev); if (err) { dev_err(&pdev->dev, "unable to register net device\n"); goto err_free_irq; } printk(KERN_INFO "%s: Atheros AG71xx at 0x%08lx, irq %d\n", dev->name, dev->base_addr, dev->irq); ag71xx_dump_regs(ag); ag71xx_hw_init(ag); ag71xx_dump_regs(ag); /* Reset the mdio bus explicitly */ if (ag->mii_bus) { mutex_lock(&ag->mii_bus->mdio_lock); ag->mii_bus->reset(ag->mii_bus); mutex_unlock(&ag->mii_bus->mdio_lock); } err = ag71xx_phy_connect(ag); if (err) goto err_unregister_netdev; platform_set_drvdata(pdev, dev); return 0; err_unregister_netdev: unregister_netdev(dev); err_free_irq: free_irq(dev->irq, dev); err_unmap_mii_ctrl: iounmap(ag->mii_ctrl); err_unmap_base2: iounmap(ag->mac_base2); err_unmap_base1: iounmap(ag->mac_base); err_free_dev: kfree(dev); err_out: platform_set_drvdata(pdev, NULL); return err; }
static int nss_probe(struct platform_device *nss_dev) #endif { struct nss_top_instance *nss_top = &nss_top_main; struct nss_ctx_instance *nss_ctx = NULL; struct nss_platform_data *npd = NULL; struct netdev_priv_instance *ndev_priv; #if (NSS_DT_SUPPORT == 1) struct reset_control *rstctl = NULL; #endif int i, err = 0; const struct firmware *nss_fw = NULL; int rc = -ENODEV; void __iomem *load_mem; #if (NSS_DT_SUPPORT == 1) struct device_node *np = NULL; if (nss_top_main.nss_hal_common_init_done == false) { /* * Perform clock init common to all NSS cores */ struct clk *nss_tcm_src = NULL; struct clk *nss_tcm_clk = NULL; /* * Attach debug interface to TLMM */ nss_write_32((uint32_t)nss_top_main.nss_fpb_base, NSS_REGS_FPB_CSR_CFG_OFFSET, 0x360); /* * NSS TCM CLOCK */ nss_tcm_src = clk_get(&nss_dev->dev, NSS_TCM_SRC_CLK); if (IS_ERR(nss_tcm_src)) { pr_err("nss-driver: cannot get clock: " NSS_TCM_SRC_CLK); return -EFAULT; } clk_set_rate(nss_tcm_src, NSSTCM_FREQ); clk_prepare(nss_tcm_src); clk_enable(nss_tcm_src); nss_tcm_clk = clk_get(&nss_dev->dev, NSS_TCM_CLK); if (IS_ERR(nss_tcm_clk)) { pr_err("nss-driver: cannot get clock: " NSS_TCM_CLK); return -EFAULT; } clk_prepare(nss_tcm_clk); clk_enable(nss_tcm_clk); nss_top_main.nss_hal_common_init_done = true; nss_info("nss_hal_common_reset Done.\n"); } if (nss_dev->dev.of_node) { /* * Device Tree based init */ np = of_node_get(nss_dev->dev.of_node); npd = nss_drv_of_get_pdata(np, nss_dev); of_node_put(np); if (!npd) { return -EFAULT; } nss_ctx = &nss_top->nss[npd->id]; nss_ctx->id = npd->id; nss_dev->id = nss_ctx->id; } else { /* * Platform Device based init */ npd = (struct nss_platform_data *) nss_dev->dev.platform_data; nss_ctx = &nss_top->nss[nss_dev->id]; nss_ctx->id = nss_dev->id; } #else npd = (struct nss_platform_data *) nss_dev->dev.platform_data; nss_ctx = &nss_top->nss[nss_dev->id]; nss_ctx->id = nss_dev->id; #endif nss_ctx->nss_top = nss_top; nss_info("%p: NSS_DEV_ID %s \n", nss_ctx, dev_name(&nss_dev->dev)); /* * F/W load from NSS Driver */ if (nss_ctx->id == 0) { rc = request_firmware(&nss_fw, NETAP0_IMAGE, &(nss_dev->dev)); } else if (nss_ctx->id == 1) { rc = request_firmware(&nss_fw, NETAP1_IMAGE, &(nss_dev->dev)); } else { nss_warning("%p: Invalid nss dev: %d \n", nss_ctx, nss_dev->id); } /* * Check if the file read is successful */ if (rc) { nss_warning("%p: request_firmware failed with err code: %d", nss_ctx, rc); err = rc; goto err_init_0; } if (nss_fw->size < MIN_IMG_SIZE) { nss_warning("%p: nss firmware is truncated, size:%d", nss_ctx, nss_fw->size); } load_mem = ioremap_nocache(npd->load_addr, nss_fw->size); if (load_mem == NULL) { nss_warning("%p: ioremap_nocache failed: %x", nss_ctx, npd->load_addr); release_firmware(nss_fw); goto err_init_0; } printk("nss_driver - fw of size %u bytes copied to load addr: %x, nss_id : %d\n", nss_fw->size, npd->load_addr, nss_dev->id); memcpy_toio(load_mem, nss_fw->data, nss_fw->size); release_firmware(nss_fw); iounmap(load_mem); /* * Both NSS cores controlled by same regulator, Hook only Once */ if (!nss_ctx->id) { nss_core0_clk = clk_get(&nss_dev->dev, "nss_core_clk"); if (IS_ERR(nss_core0_clk)) { err = PTR_ERR(nss_core0_clk); nss_info("%p: Regulator %s get failed, err=%d\n", nss_ctx, dev_name(&nss_dev->dev), err); return err; } clk_set_rate(nss_core0_clk, NSS_FREQ_550); clk_prepare(nss_core0_clk); clk_enable(nss_core0_clk); #if (NSS_PM_SUPPORT == 1) /* * Check if turbo is supported */ if (npd->turbo_frequency) { /* * Turbo is supported */ printk("nss_driver - Turbo Support %d\n", npd->turbo_frequency); nss_runtime_samples.freq_scale_sup_max = NSS_MAX_CPU_SCALES; nss_pm_set_turbo(); } else { printk("nss_driver - Turbo No Support %d\n", npd->turbo_frequency); nss_runtime_samples.freq_scale_sup_max = NSS_MAX_CPU_SCALES - 1; } #else printk("nss_driver - Turbo Not Supported\n"); #endif } /* * Get load address of NSS firmware */ nss_info("%p: Setting NSS%d Firmware load address to %x\n", nss_ctx, nss_ctx->id, npd->load_addr); nss_top->nss[nss_ctx->id].load = npd->load_addr; /* * Get virtual and physical memory addresses for nss logical/hardware address maps */ /* * Virtual address of CSM space */ nss_ctx->nmap = npd->nmap; nss_assert(nss_ctx->nmap); /* * Physical address of CSM space */ nss_ctx->nphys = npd->nphys; nss_assert(nss_ctx->nphys); /* * Virtual address of logical registers space */ nss_ctx->vmap = npd->vmap; nss_assert(nss_ctx->vmap); /* * Physical address of logical registers space */ nss_ctx->vphys = npd->vphys; nss_assert(nss_ctx->vphys); nss_info("%d:ctx=%p, vphys=%x, vmap=%x, nphys=%x, nmap=%x", nss_ctx->id, nss_ctx, nss_ctx->vphys, nss_ctx->vmap, nss_ctx->nphys, nss_ctx->nmap); /* * Register netdevice handlers */ nss_ctx->int_ctx[0].ndev = alloc_netdev(sizeof(struct netdev_priv_instance), "qca-nss-dev%d", nss_dummy_netdev_setup); if (nss_ctx->int_ctx[0].ndev == NULL) { nss_warning("%p: Could not allocate net_device #0", nss_ctx); err = -ENOMEM; goto err_init_0; } nss_ctx->int_ctx[0].ndev->netdev_ops = &nss_netdev_ops; nss_ctx->int_ctx[0].ndev->ethtool_ops = &nss_ethtool_ops; err = register_netdev(nss_ctx->int_ctx[0].ndev); if (err) { nss_warning("%p: Could not register net_device #0", nss_ctx); goto err_init_1; } /* * request for IRQs * * WARNING: CPU affinities should be set using OS supported methods */ nss_ctx->int_ctx[0].nss_ctx = nss_ctx; nss_ctx->int_ctx[0].shift_factor = 0; nss_ctx->int_ctx[0].irq = npd->irq[0]; err = request_irq(npd->irq[0], nss_handle_irq, IRQF_DISABLED, "nss", &nss_ctx->int_ctx[0]); if (err) { nss_warning("%d: IRQ0 request failed", nss_dev->id); goto err_init_2; } /* * Register NAPI for NSS core interrupt #0 */ ndev_priv = netdev_priv(nss_ctx->int_ctx[0].ndev); ndev_priv->int_ctx = &nss_ctx->int_ctx[0]; netif_napi_add(nss_ctx->int_ctx[0].ndev, &nss_ctx->int_ctx[0].napi, nss_core_handle_napi, 64); napi_enable(&nss_ctx->int_ctx[0].napi); nss_ctx->int_ctx[0].napi_active = true; /* * Check if second interrupt is supported on this nss core */ if (npd->num_irq > 1) { nss_info("%d: This NSS core supports two interrupts", nss_dev->id); /* * Register netdevice handlers */ nss_ctx->int_ctx[1].ndev = alloc_netdev(sizeof(struct netdev_priv_instance), "qca-nss-dev%d", nss_dummy_netdev_setup); if (nss_ctx->int_ctx[1].ndev == NULL) { nss_warning("%p: Could not allocate net_device #1", nss_ctx); err = -ENOMEM; goto err_init_3; } nss_ctx->int_ctx[1].ndev->netdev_ops = &nss_netdev_ops; nss_ctx->int_ctx[1].ndev->ethtool_ops = &nss_ethtool_ops; err = register_netdev(nss_ctx->int_ctx[1].ndev); if (err) { nss_warning("%p: Could not register net_device #1", nss_ctx); goto err_init_4; } nss_ctx->int_ctx[1].nss_ctx = nss_ctx; nss_ctx->int_ctx[1].shift_factor = 15; nss_ctx->int_ctx[1].irq = npd->irq[1]; err = request_irq(npd->irq[1], nss_handle_irq, IRQF_DISABLED, "nss", &nss_ctx->int_ctx[1]); if (err) { nss_warning("%d: IRQ1 request failed for nss", nss_dev->id); goto err_init_5; } /* * Register NAPI for NSS core interrupt #1 */ ndev_priv = netdev_priv(nss_ctx->int_ctx[1].ndev); ndev_priv->int_ctx = &nss_ctx->int_ctx[1]; netif_napi_add(nss_ctx->int_ctx[1].ndev, &nss_ctx->int_ctx[1].napi, nss_core_handle_napi, 64); napi_enable(&nss_ctx->int_ctx[1].napi); nss_ctx->int_ctx[1].napi_active = true; } spin_lock_bh(&(nss_top->lock)); /* * Check functionalities are supported by this NSS core */ if (npd->shaping_enabled == NSS_FEATURE_ENABLED) { nss_top->shaping_handler_id = nss_dev->id; printk(KERN_INFO "%p: NSS Shaping is enabled, handler id: %u\n", __func__, nss_top->shaping_handler_id); } if (npd->ipv4_enabled == NSS_FEATURE_ENABLED) { nss_top->ipv4_handler_id = nss_dev->id; nss_ipv4_register_handler(); nss_pppoe_register_handler(); nss_eth_rx_register_handler(); nss_n2h_register_handler(); nss_virt_if_register_handler(); nss_lag_register_handler(); nss_dynamic_interface_register_handler(); nss_top->capwap_handler_id = nss_dev->id; nss_capwap_init(); for (i = 0; i < NSS_MAX_VIRTUAL_INTERFACES; i++) { nss_top->virt_if_handler_id[i] = nss_dev->id; } nss_top->dynamic_interface_table[NSS_DYNAMIC_INTERFACE_TYPE_802_3_REDIR] = nss_dev->id; } if (npd->ipv4_reasm_enabled == NSS_FEATURE_ENABLED) { nss_top->ipv4_reasm_handler_id = nss_dev->id; nss_ipv4_reasm_register_handler(); } if (npd->ipv6_enabled == NSS_FEATURE_ENABLED) { nss_top->ipv6_handler_id = nss_dev->id; nss_ipv6_register_handler(); } if (npd->crypto_enabled == NSS_FEATURE_ENABLED) { nss_top->crypto_handler_id = nss_dev->id; nss_crypto_register_handler(); } if (npd->ipsec_enabled == NSS_FEATURE_ENABLED) { nss_top->ipsec_handler_id = nss_dev->id; nss_ipsec_register_handler(); } if (npd->wlan_enabled == NSS_FEATURE_ENABLED) { nss_top->wlan_handler_id = nss_dev->id; } if (npd->tun6rd_enabled == NSS_FEATURE_ENABLED) { nss_top->tun6rd_handler_id = nss_dev->id; } if (npd->tunipip6_enabled == NSS_FEATURE_ENABLED) { nss_top->tunipip6_handler_id = nss_dev->id; nss_tunipip6_register_handler(); } if (npd->gre_redir_enabled == NSS_FEATURE_ENABLED) { nss_top->gre_redir_handler_id = nss_dev->id; nss_top->dynamic_interface_table[NSS_DYNAMIC_INTERFACE_TYPE_GRE_REDIR] = nss_dev->id; nss_gre_redir_register_handler(); nss_sjack_register_handler(); } /* * Mark data plane enabled so when nss core init done we call register to nss-gmac */ for (i = 0 ; i < NSS_MAX_PHYSICAL_INTERFACES ; i ++) { if (npd->gmac_enabled[i] == NSS_FEATURE_ENABLED) { nss_data_plane_set_enabled(i); } } #if (NSS_PM_SUPPORT == 1) nss_freq_register_handler(); #endif nss_lso_rx_register_handler(); nss_top->frequency_handler_id = nss_dev->id; spin_unlock_bh(&(nss_top->lock)); /* * Initialize decongestion callbacks to NULL */ for (i = 0; i< NSS_MAX_CLIENTS; i++) { nss_ctx->queue_decongestion_callback[i] = 0; nss_ctx->queue_decongestion_ctx[i] = 0; } spin_lock_init(&(nss_ctx->decongest_cb_lock)); nss_ctx->magic = NSS_CTX_MAGIC; nss_info("%p: Reseting NSS core %d now", nss_ctx, nss_ctx->id); /* * Enable clocks and bring NSS core out of reset */ #if (NSS_DT_SUPPORT == 1) /* * Remove UBI32 reset clamp */ rstctl = devm_reset_control_get(&nss_dev->dev, "clkrst_clamp"); if (IS_ERR(rstctl)) { nss_info("%p: Deassert UBI32 reset clamp failed", nss_ctx, nss_ctx->id); err = -EFAULT; goto err_init_5; } reset_control_deassert(rstctl); mdelay(1); reset_control_put(rstctl); /* * Remove UBI32 core clamp */ rstctl = devm_reset_control_get(&nss_dev->dev, "clamp"); if (IS_ERR(rstctl)) { nss_info("%p: Deassert UBI32 core clamp failed", nss_ctx, nss_ctx->id); err = -EFAULT; goto err_init_5; } reset_control_deassert(rstctl); mdelay(1); reset_control_put(rstctl); /* * Remove UBI32 AHB reset */ rstctl = devm_reset_control_get(&nss_dev->dev, "ahb"); if (IS_ERR(rstctl)) { nss_info("%p: Deassert AHB reset failed", nss_ctx, nss_ctx->id); err = -EFAULT; goto err_init_5; } reset_control_deassert(rstctl); mdelay(1); reset_control_put(rstctl); /* * Remove UBI32 AXI reset */ rstctl = devm_reset_control_get(&nss_dev->dev, "axi"); if (IS_ERR(rstctl)) { nss_info("%p: Deassert AXI reset failed", nss_ctx, nss_ctx->id); err = -EFAULT; goto err_init_5; } reset_control_deassert(rstctl); mdelay(1); reset_control_put(rstctl); nss_hal_core_reset(nss_ctx->nmap, nss_ctx->load); #else nss_hal_core_reset(nss_dev->id, nss_ctx->nmap, nss_ctx->load, nss_top->clk_src); #endif /* * Enable interrupts for NSS core */ nss_hal_enable_interrupt(nss_ctx->nmap, nss_ctx->int_ctx[0].irq, nss_ctx->int_ctx[0].shift_factor, NSS_HAL_SUPPORTED_INTERRUPTS); if (npd->num_irq > 1) { nss_hal_enable_interrupt(nss_ctx->nmap, nss_ctx->int_ctx[1].irq, nss_ctx->int_ctx[1].shift_factor, NSS_HAL_SUPPORTED_INTERRUPTS); } /* * Initialize max buffer size for NSS core */ nss_ctx->max_buf_size = NSS_NBUF_PAYLOAD_SIZE; nss_info("%p: All resources initialized and nss core%d has been brought out of reset", nss_ctx, nss_dev->id); goto err_init_0; err_init_5: unregister_netdev(nss_ctx->int_ctx[1].ndev); err_init_4: free_netdev(nss_ctx->int_ctx[1].ndev); err_init_3: free_irq(npd->irq[0], &nss_ctx->int_ctx[0]); err_init_2: unregister_netdev(nss_ctx->int_ctx[0].ndev); err_init_1: free_netdev(nss_ctx->int_ctx[0].ndev); #if (NSS_DT_SUPPORT == 1) if (nss_dev->dev.of_node) { if (npd->nmap) { iounmap((void *)npd->nmap); } if (npd->vmap) { iounmap((void *)npd->vmap); } } #endif err_init_0: #if (NSS_DT_SUPPORT == 1) if (nss_dev->dev.of_node) { devm_kfree(&nss_dev->dev, npd); } #endif return err; }
void drv_disconnect() { remove_sysfs_files(); unregister_netdev(); }
int bgmac_phy_connect_direct(struct bgmac *bgmac) { struct fixed_phy_status fphy_status = { .link = 1, .speed = SPEED_1000, .duplex = DUPLEX_FULL, }; struct phy_device *phy_dev; int err; phy_dev = fixed_phy_register(PHY_POLL, &fphy_status, NULL); if (!phy_dev || IS_ERR(phy_dev)) { dev_err(bgmac->dev, "Failed to register fixed PHY device\n"); return -ENODEV; } err = phy_connect_direct(bgmac->net_dev, phy_dev, bgmac_adjust_link, PHY_INTERFACE_MODE_MII); if (err) { dev_err(bgmac->dev, "Connecting PHY failed\n"); return err; } return err; } EXPORT_SYMBOL_GPL(bgmac_phy_connect_direct); struct bgmac *bgmac_alloc(struct device *dev) { struct net_device *net_dev; struct bgmac *bgmac; /* Allocation and references */ net_dev = devm_alloc_etherdev(dev, sizeof(*bgmac)); if (!net_dev) return NULL; net_dev->netdev_ops = &bgmac_netdev_ops; net_dev->ethtool_ops = &bgmac_ethtool_ops; bgmac = netdev_priv(net_dev); bgmac->dev = dev; bgmac->net_dev = net_dev; return bgmac; } EXPORT_SYMBOL_GPL(bgmac_alloc); int bgmac_enet_probe(struct bgmac *bgmac) { struct net_device *net_dev = bgmac->net_dev; int err; bgmac_chip_intrs_off(bgmac); net_dev->irq = bgmac->irq; SET_NETDEV_DEV(net_dev, bgmac->dev); dev_set_drvdata(bgmac->dev, bgmac); if (!is_valid_ether_addr(net_dev->dev_addr)) { dev_err(bgmac->dev, "Invalid MAC addr: %pM\n", net_dev->dev_addr); eth_hw_addr_random(net_dev); dev_warn(bgmac->dev, "Using random MAC: %pM\n", net_dev->dev_addr); } /* This (reset &) enable is not preset in specs or reference driver but * Broadcom does it in arch PCI code when enabling fake PCI device. */ bgmac_clk_enable(bgmac, 0); /* This seems to be fixing IRQ by assigning OOB #6 to the core */ if (!(bgmac->feature_flags & BGMAC_FEAT_IDM_MASK)) { if (bgmac->feature_flags & BGMAC_FEAT_IRQ_ID_OOB_6) bgmac_idm_write(bgmac, BCMA_OOB_SEL_OUT_A30, 0x86); } bgmac_chip_reset(bgmac); err = bgmac_dma_alloc(bgmac); if (err) { dev_err(bgmac->dev, "Unable to alloc memory for DMA\n"); goto err_out; } bgmac->int_mask = BGMAC_IS_ERRMASK | BGMAC_IS_RX | BGMAC_IS_TX_MASK; if (bcm47xx_nvram_getenv("et0_no_txint", NULL, 0) == 0) bgmac->int_mask &= ~BGMAC_IS_TX_MASK; netif_napi_add(net_dev, &bgmac->napi, bgmac_poll, BGMAC_WEIGHT); err = bgmac_phy_connect(bgmac); if (err) { dev_err(bgmac->dev, "Cannot connect to phy\n"); goto err_dma_free; } net_dev->features = NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM; net_dev->hw_features = net_dev->features; net_dev->vlan_features = net_dev->features; err = register_netdev(bgmac->net_dev); if (err) { dev_err(bgmac->dev, "Cannot register net device\n"); goto err_phy_disconnect; } netif_carrier_off(net_dev); return 0; err_phy_disconnect: phy_disconnect(net_dev->phydev); err_dma_free: bgmac_dma_free(bgmac); err_out: return err; } EXPORT_SYMBOL_GPL(bgmac_enet_probe); void bgmac_enet_remove(struct bgmac *bgmac) { unregister_netdev(bgmac->net_dev); phy_disconnect(bgmac->net_dev->phydev); netif_napi_del(&bgmac->napi); bgmac_dma_free(bgmac); free_netdev(bgmac->net_dev); }
void zd_netdev_disconnect(struct net_device *netdev) { unregister_netdev(netdev); }
static int prism54_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct net_device *ndev; u8 latency_tmr; u32 mem_addr; islpci_private *priv; int rvalue; if (pci_enable_device(pdev)) { printk(KERN_ERR "%s: pci_enable_device() failed.\n", DRV_NAME); return -ENODEV; } pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &latency_tmr); #if VERBOSE > SHOW_ERROR_MESSAGES DEBUG(SHOW_TRACING, "latency timer: %x\n", latency_tmr); #endif if (latency_tmr < PCIDEVICE_LATENCY_TIMER_MIN) { pci_write_config_byte(pdev, PCI_LATENCY_TIMER, PCIDEVICE_LATENCY_TIMER_VAL); } if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) { printk(KERN_ERR "%s: 32-bit PCI DMA not supported", DRV_NAME); goto do_pci_disable_device; } if ( init_pcitm >= 0 ) { pci_write_config_byte(pdev, 0x40, (u8)init_pcitm); pci_write_config_byte(pdev, 0x41, (u8)init_pcitm); } else { printk(KERN_INFO "PCI TRDY/RETRY unchanged\n"); } rvalue = pci_request_regions(pdev, DRV_NAME); if (rvalue) { printk(KERN_ERR "%s: pci_request_regions failure (rc=%d)\n", DRV_NAME, rvalue); goto do_pci_disable_device; } rvalue = pci_read_config_dword(pdev, PCI_BASE_ADDRESS_0, &mem_addr); if (rvalue || !mem_addr) { printk(KERN_ERR "%s: PCI device memory region not configured; fix your BIOS or CardBus bridge/drivers\n", DRV_NAME); goto do_pci_release_regions; } DEBUG(SHOW_TRACING, "%s: pci_set_master(pdev)\n", DRV_NAME); pci_set_master(pdev); pci_try_set_mwi(pdev); if (!(ndev = islpci_setup(pdev))) { printk(KERN_ERR "%s: could not configure network device\n", DRV_NAME); goto do_pci_clear_mwi; } priv = netdev_priv(ndev); islpci_set_state(priv, PRV_STATE_PREBOOT); isl38xx_disable_interrupts(priv->device_base); rvalue = request_irq(pdev->irq, islpci_interrupt, IRQF_SHARED, ndev->name, priv); if (rvalue) { printk(KERN_ERR "%s: could not install IRQ handler\n", ndev->name); goto do_unregister_netdev; } return 0; do_unregister_netdev: unregister_netdev(ndev); islpci_free_memory(priv); pci_set_drvdata(pdev, NULL); free_netdev(ndev); priv = NULL; do_pci_clear_mwi: pci_clear_mwi(pdev); do_pci_release_regions: pci_release_regions(pdev); do_pci_disable_device: pci_disable_device(pdev); return -EIO; }
static int __devinit c2_probe(struct pci_dev *pcidev, const struct pci_device_id *ent) { int ret = 0, i; unsigned long reg0_start, reg0_flags, reg0_len; unsigned long reg2_start, reg2_flags, reg2_len; unsigned long reg4_start, reg4_flags, reg4_len; unsigned kva_map_size; struct net_device *netdev = NULL; struct c2_dev *c2dev = NULL; void __iomem *mmio_regs = NULL; printk(KERN_INFO PFX "AMSO1100 Gigabit Ethernet driver v%s loaded\n", DRV_VERSION); /* Enable PCI device */ ret = pci_enable_device(pcidev); if (ret) { printk(KERN_ERR PFX "%s: Unable to enable PCI device\n", pci_name(pcidev)); goto bail0; } reg0_start = pci_resource_start(pcidev, BAR_0); reg0_len = pci_resource_len(pcidev, BAR_0); reg0_flags = pci_resource_flags(pcidev, BAR_0); reg2_start = pci_resource_start(pcidev, BAR_2); reg2_len = pci_resource_len(pcidev, BAR_2); reg2_flags = pci_resource_flags(pcidev, BAR_2); reg4_start = pci_resource_start(pcidev, BAR_4); reg4_len = pci_resource_len(pcidev, BAR_4); reg4_flags = pci_resource_flags(pcidev, BAR_4); pr_debug("BAR0 size = 0x%lX bytes\n", reg0_len); pr_debug("BAR2 size = 0x%lX bytes\n", reg2_len); pr_debug("BAR4 size = 0x%lX bytes\n", reg4_len); /* Make sure PCI base addr are MMIO */ if (!(reg0_flags & IORESOURCE_MEM) || !(reg2_flags & IORESOURCE_MEM) || !(reg4_flags & IORESOURCE_MEM)) { printk(KERN_ERR PFX "PCI regions not an MMIO resource\n"); ret = -ENODEV; goto bail1; } /* Check for weird/broken PCI region reporting */ if ((reg0_len < C2_REG0_SIZE) || (reg2_len < C2_REG2_SIZE) || (reg4_len < C2_REG4_SIZE)) { printk(KERN_ERR PFX "Invalid PCI region sizes\n"); ret = -ENODEV; goto bail1; } /* Reserve PCI I/O and memory resources */ ret = pci_request_regions(pcidev, DRV_NAME); if (ret) { printk(KERN_ERR PFX "%s: Unable to request regions\n", pci_name(pcidev)); goto bail1; } if ((sizeof(dma_addr_t) > 4)) { ret = pci_set_dma_mask(pcidev, DMA_BIT_MASK(64)); if (ret < 0) { printk(KERN_ERR PFX "64b DMA configuration failed\n"); goto bail2; } } else { ret = pci_set_dma_mask(pcidev, DMA_BIT_MASK(32)); if (ret < 0) { printk(KERN_ERR PFX "32b DMA configuration failed\n"); goto bail2; } } /* Enables bus-mastering on the device */ pci_set_master(pcidev); /* Remap the adapter PCI registers in BAR4 */ mmio_regs = ioremap_nocache(reg4_start + C2_PCI_REGS_OFFSET, sizeof(struct c2_adapter_pci_regs)); if (!mmio_regs) { printk(KERN_ERR PFX "Unable to remap adapter PCI registers in BAR4\n"); ret = -EIO; goto bail2; } /* Validate PCI regs magic */ for (i = 0; i < sizeof(c2_magic); i++) { if (c2_magic[i] != readb(mmio_regs + C2_REGS_MAGIC + i)) { printk(KERN_ERR PFX "Downlevel Firmware boot loader " "[%d/%Zd: got 0x%x, exp 0x%x]. Use the cc_flash " "utility to update your boot loader\n", i + 1, sizeof(c2_magic), readb(mmio_regs + C2_REGS_MAGIC + i), c2_magic[i]); printk(KERN_ERR PFX "Adapter not claimed\n"); iounmap(mmio_regs); ret = -EIO; goto bail2; } } /* Validate the adapter version */ if (be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_VERS)) != C2_VERSION) { printk(KERN_ERR PFX "Version mismatch " "[fw=%u, c2=%u], Adapter not claimed\n", be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_VERS)), C2_VERSION); ret = -EINVAL; iounmap(mmio_regs); goto bail2; } /* Validate the adapter IVN */ if (be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_IVN)) != C2_IVN) { printk(KERN_ERR PFX "Downlevel FIrmware level. You should be using " "the OpenIB device support kit. " "[fw=0x%x, c2=0x%x], Adapter not claimed\n", be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_IVN)), C2_IVN); ret = -EINVAL; iounmap(mmio_regs); goto bail2; } /* Allocate hardware structure */ c2dev = (struct c2_dev *) ib_alloc_device(sizeof(*c2dev)); if (!c2dev) { printk(KERN_ERR PFX "%s: Unable to alloc hardware struct\n", pci_name(pcidev)); ret = -ENOMEM; iounmap(mmio_regs); goto bail2; } memset(c2dev, 0, sizeof(*c2dev)); spin_lock_init(&c2dev->lock); c2dev->pcidev = pcidev; c2dev->cur_tx = 0; /* Get the last RX index */ c2dev->cur_rx = (be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_HRX_CUR)) - 0xffffc000) / sizeof(struct c2_rxp_desc); /* Request an interrupt line for the driver */ ret = request_irq(pcidev->irq, c2_interrupt, IRQF_SHARED, DRV_NAME, c2dev); if (ret) { printk(KERN_ERR PFX "%s: requested IRQ %u is busy\n", pci_name(pcidev), pcidev->irq); iounmap(mmio_regs); goto bail3; } /* Set driver specific data */ pci_set_drvdata(pcidev, c2dev); /* Initialize network device */ if ((netdev = c2_devinit(c2dev, mmio_regs)) == NULL) { iounmap(mmio_regs); goto bail4; } /* Save off the actual size prior to unmapping mmio_regs */ kva_map_size = be32_to_cpu((__force __be32) readl(mmio_regs + C2_REGS_PCI_WINSIZE)); /* Unmap the adapter PCI registers in BAR4 */ iounmap(mmio_regs); /* Register network device */ ret = register_netdev(netdev); if (ret) { printk(KERN_ERR PFX "Unable to register netdev, ret = %d\n", ret); goto bail5; } /* Disable network packets */ netif_stop_queue(netdev); /* Remap the adapter HRXDQ PA space to kernel VA space */ c2dev->mmio_rxp_ring = ioremap_nocache(reg4_start + C2_RXP_HRXDQ_OFFSET, C2_RXP_HRXDQ_SIZE); if (!c2dev->mmio_rxp_ring) { printk(KERN_ERR PFX "Unable to remap MMIO HRXDQ region\n"); ret = -EIO; goto bail6; } /* Remap the adapter HTXDQ PA space to kernel VA space */ c2dev->mmio_txp_ring = ioremap_nocache(reg4_start + C2_TXP_HTXDQ_OFFSET, C2_TXP_HTXDQ_SIZE); if (!c2dev->mmio_txp_ring) { printk(KERN_ERR PFX "Unable to remap MMIO HTXDQ region\n"); ret = -EIO; goto bail7; } /* Save off the current RX index in the last 4 bytes of the TXP Ring */ C2_SET_CUR_RX(c2dev, c2dev->cur_rx); /* Remap the PCI registers in adapter BAR0 to kernel VA space */ c2dev->regs = ioremap_nocache(reg0_start, reg0_len); if (!c2dev->regs) { printk(KERN_ERR PFX "Unable to remap BAR0\n"); ret = -EIO; goto bail8; } /* Remap the PCI registers in adapter BAR4 to kernel VA space */ c2dev->pa = reg4_start + C2_PCI_REGS_OFFSET; c2dev->kva = ioremap_nocache(reg4_start + C2_PCI_REGS_OFFSET, kva_map_size); if (!c2dev->kva) { printk(KERN_ERR PFX "Unable to remap BAR4\n"); ret = -EIO; goto bail9; } /* Print out the MAC address */ c2_print_macaddr(netdev); ret = c2_rnic_init(c2dev); if (ret) { printk(KERN_ERR PFX "c2_rnic_init failed: %d\n", ret); goto bail10; } if (c2_register_device(c2dev)) goto bail10; return 0; bail10: iounmap(c2dev->kva); bail9: iounmap(c2dev->regs); bail8: iounmap(c2dev->mmio_txp_ring); bail7: iounmap(c2dev->mmio_rxp_ring); bail6: unregister_netdev(netdev); bail5: free_netdev(netdev); bail4: free_irq(pcidev->irq, c2dev); bail3: ib_dealloc_device(&c2dev->ibdev); bail2: pci_release_regions(pcidev); bail1: pci_disable_device(pcidev); bail0: return ret; }
/* * Jeff: this function should be called under ioctl (rtnl_lock is accquired) while * LINUX_VERSION_CODE < KERNEL_VERSION(2,6,26) */ int rtw_change_ifname(_adapter *padapter, const char *ifname) { struct net_device *pnetdev; struct net_device *old_pnetdev = padapter->pnetdev; int ret; if(!padapter) goto error; #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)) if(!rtnl_is_locked()) unregister_netdev(old_pnetdev); else #endif unregister_netdevice(old_pnetdev); rtw_proc_remove_one(old_pnetdev); pnetdev = rtw_init_netdev(padapter); if (!pnetdev) { ret = -1; goto error; } #ifdef CONFIG_USB_HCI SET_NETDEV_DEV(pnetdev, &padapter->dvobjpriv.pusbintf->dev); usb_set_intfdata(padapter->dvobjpriv.pusbintf, pnetdev); #elif defined(CONFIG_PCI_HCI) #if LINUX_VERSION_CODE > KERNEL_VERSION(2,5,0) SET_NETDEV_DEV(pnetdev, &padapter->dvobjpriv.ppcidev->dev); #endif pci_set_drvdata(padapter->dvobjpriv.ppcidev, pnetdev); #endif rtw_init_netdev_name(pnetdev, ifname); _rtw_memcpy(pnetdev->dev_addr, padapter->eeprompriv.mac_addr, ETH_ALEN); #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,26)) if(!rtnl_is_locked()) ret = register_netdev(pnetdev); else #endif ret = register_netdevice(pnetdev); if ( ret != 0) { RT_TRACE(_module_hci_intfs_c_,_drv_err_,("register_netdev() failed\n")); goto error; } rtw_proc_init_one(pnetdev); //do free_netdev outside return 0; error: return -1; }