static int mt76_dma_init(struct mt76_dev *dev) { int i; init_dummy_netdev(&dev->napi_dev); for (i = 0; i < ARRAY_SIZE(dev->q_rx); i++) { netif_napi_add(&dev->napi_dev, &dev->napi[i], mt76_dma_rx_poll, 64); mt76_dma_rx_fill(dev, &dev->q_rx[i], false); skb_queue_head_init(&dev->rx_skb[i]); napi_enable(&dev->napi[i]); } return 0; }
static int qtnf_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct qtnf_pcie_bus_priv *pcie_priv; struct qtnf_bus *bus; int ret; bus = devm_kzalloc(&pdev->dev, sizeof(*bus) + sizeof(*pcie_priv), GFP_KERNEL); if (!bus) return -ENOMEM; pcie_priv = get_bus_priv(bus); pci_set_drvdata(pdev, bus); bus->bus_ops = &qtnf_pcie_bus_ops; bus->dev = &pdev->dev; bus->fw_state = QTNF_FW_STATE_RESET; pcie_priv->pdev = pdev; strcpy(bus->fwname, QTN_PCI_PEARL_FW_NAME); init_completion(&bus->firmware_init_complete); mutex_init(&bus->bus_lock); spin_lock_init(&pcie_priv->tx0_lock); spin_lock_init(&pcie_priv->irq_lock); spin_lock_init(&pcie_priv->tx_reclaim_lock); /* init stats */ pcie_priv->tx_full_count = 0; pcie_priv->tx_done_count = 0; pcie_priv->pcie_irq_count = 0; pcie_priv->pcie_irq_rx_count = 0; pcie_priv->pcie_irq_tx_count = 0; pcie_priv->pcie_irq_uf_count = 0; pcie_priv->tx_reclaim_done = 0; pcie_priv->tx_reclaim_req = 0; tasklet_init(&pcie_priv->reclaim_tq, qtnf_reclaim_tasklet_fn, (unsigned long)pcie_priv); init_dummy_netdev(&bus->mux_dev); netif_napi_add(&bus->mux_dev, &bus->mux_napi, qtnf_rx_poll, 10); pcie_priv->workqueue = create_singlethread_workqueue("QTNF_PEARL_PCIE"); if (!pcie_priv->workqueue) { pr_err("failed to alloc bus workqueue\n"); ret = -ENODEV; goto err_init; } if (!pci_is_pcie(pdev)) { pr_err("device %s is not PCI Express\n", pci_name(pdev)); ret = -EIO; goto err_base; } qtnf_tune_pcie_mps(pcie_priv); ret = pcim_enable_device(pdev); if (ret) { pr_err("failed to init PCI device %x\n", pdev->device); goto err_base; } else { pr_debug("successful init of PCI device %x\n", pdev->device); } #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); #else ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); #endif if (ret) { pr_err("PCIE DMA coherent mask init failed\n"); goto err_base; } pci_set_master(pdev); qtnf_pcie_init_irq(pcie_priv); ret = qtnf_pcie_init_memory(pcie_priv); if (ret < 0) { pr_err("PCIE memory init failed\n"); goto err_base; } pci_save_state(pdev); ret = qtnf_pcie_init_shm_ipc(pcie_priv); if (ret < 0) { pr_err("PCIE SHM IPC init failed\n"); goto err_base; } ret = qtnf_pcie_init_xfer(pcie_priv); if (ret) { pr_err("PCIE xfer init failed\n"); goto err_ipc; } /* init default irq settings */ qtnf_init_hdp_irqs(pcie_priv); /* start with disabled irqs */ qtnf_disable_hdp_irqs(pcie_priv); ret = devm_request_irq(&pdev->dev, pdev->irq, &qtnf_interrupt, 0, "qtnf_pcie_irq", (void *)bus); if (ret) { pr_err("failed to request pcie irq %d\n", pdev->irq); goto err_xfer; } qtnf_bringup_fw_async(bus); return 0; err_xfer: qtnf_free_xfer_buffers(pcie_priv); err_ipc: qtnf_pcie_free_shm_ipc(pcie_priv); err_base: flush_workqueue(pcie_priv->workqueue); destroy_workqueue(pcie_priv->workqueue); netif_napi_del(&bus->mux_napi); err_init: tasklet_kill(&pcie_priv->reclaim_tq); pci_set_drvdata(pdev, NULL); return ret; }
struct ieee80211_hw *ieee80211_alloc_hw(size_t priv_data_len, const struct ieee80211_ops *ops) { struct ieee80211_local *local; int priv_size, i; struct wiphy *wiphy; /* Ensure 32-byte alignment of our private data and hw private data. * We use the wiphy priv data for both our ieee80211_local and for * the driver's private data * * In memory it'll be like this: * * +-------------------------+ * | struct wiphy | * +-------------------------+ * | struct ieee80211_local | * +-------------------------+ * | driver's private data | * +-------------------------+ * */ priv_size = ALIGN(sizeof(*local), NETDEV_ALIGN) + priv_data_len; wiphy = wiphy_new(&mac80211_config_ops, priv_size); if (!wiphy) return NULL; wiphy->mgmt_stypes = ieee80211_default_mgmt_stypes; wiphy->privid = mac80211_wiphy_privid; wiphy->flags |= WIPHY_FLAG_NETNS_OK | WIPHY_FLAG_4ADDR_AP | WIPHY_FLAG_4ADDR_STATION; if (!ops->set_key) wiphy->flags |= WIPHY_FLAG_IBSS_RSN; wiphy->bss_priv_size = sizeof(struct ieee80211_bss); local = wiphy_priv(wiphy); local->hw.wiphy = wiphy; local->hw.priv = (char *)local + ALIGN(sizeof(*local), NETDEV_ALIGN); BUG_ON(!ops->tx); BUG_ON(!ops->start); BUG_ON(!ops->stop); BUG_ON(!ops->config); BUG_ON(!ops->add_interface); BUG_ON(!ops->remove_interface); BUG_ON(!ops->configure_filter); local->ops = ops; /* set up some defaults */ local->hw.queues = 1; local->hw.max_rates = 1; local->hw.max_report_rates = 0; local->hw.max_rx_aggregation_subframes = IEEE80211_MAX_AMPDU_BUF; local->hw.conf.long_frame_max_tx_count = wiphy->retry_long; local->hw.conf.short_frame_max_tx_count = wiphy->retry_short; local->user_power_level = -1; local->uapsd_queues = IEEE80211_DEFAULT_UAPSD_QUEUES; local->uapsd_max_sp_len = IEEE80211_DEFAULT_MAX_SP_LEN; INIT_LIST_HEAD(&local->interfaces); #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) __hw_addr_init(&local->mc_list); #endif mutex_init(&local->iflist_mtx); mutex_init(&local->mtx); mutex_init(&local->key_mtx); spin_lock_init(&local->filter_lock); spin_lock_init(&local->queue_stop_reason_lock); /* * The rx_skb_queue is only accessed from tasklets, * but other SKB queues are used from within IRQ * context. Therefore, this one needs a different * locking class so our direct, non-irq-safe use of * the queue's lock doesn't throw lockdep warnings. */ skb_queue_head_init_class(&local->rx_skb_queue, &ieee80211_rx_skb_queue_class); INIT_DELAYED_WORK(&local->scan_work, ieee80211_scan_work); ieee80211_work_init(local); INIT_WORK(&local->restart_work, ieee80211_restart_work); INIT_WORK(&local->reconfig_filter, ieee80211_reconfig_filter); INIT_WORK(&local->recalc_smps, ieee80211_recalc_smps_work); local->smps_mode = IEEE80211_SMPS_OFF; INIT_WORK(&local->dynamic_ps_enable_work, ieee80211_dynamic_ps_enable_work); INIT_WORK(&local->dynamic_ps_disable_work, ieee80211_dynamic_ps_disable_work); setup_timer(&local->dynamic_ps_timer, ieee80211_dynamic_ps_timer, (unsigned long) local); INIT_WORK(&local->sched_scan_stopped_work, ieee80211_sched_scan_stopped_work); sta_info_init(local); for (i = 0; i < IEEE80211_MAX_QUEUES; i++) { skb_queue_head_init(&local->pending[i]); atomic_set(&local->agg_queue_stop[i], 0); } tasklet_init(&local->tx_pending_tasklet, ieee80211_tx_pending, (unsigned long)local); tasklet_init(&local->tasklet, ieee80211_tasklet_handler, (unsigned long) local); skb_queue_head_init(&local->skb_queue); skb_queue_head_init(&local->skb_queue_unreliable); /* init dummy netdev for use w/ NAPI */ init_dummy_netdev(&local->napi_dev); ieee80211_led_names(local); ieee80211_hw_roc_setup(local); return local_to_hw(local); }
static int qtnf_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct qtnf_pcie_bus_priv *pcie_priv; struct qtnf_bus *bus; void __iomem *sysctl_bar; void __iomem *epmem_bar; void __iomem *dmareg_bar; unsigned int chipid; int ret; if (!pci_is_pcie(pdev)) { pr_err("device %s is not PCI Express\n", pci_name(pdev)); return -EIO; } qtnf_tune_pcie_mps(pdev); ret = pcim_enable_device(pdev); if (ret) { pr_err("failed to init PCI device %x\n", pdev->device); return ret; } pci_set_master(pdev); sysctl_bar = qtnf_map_bar(pdev, QTN_SYSCTL_BAR); if (IS_ERR(sysctl_bar)) { pr_err("failed to map BAR%u\n", QTN_SYSCTL_BAR); return ret; } dmareg_bar = qtnf_map_bar(pdev, QTN_DMA_BAR); if (IS_ERR(dmareg_bar)) { pr_err("failed to map BAR%u\n", QTN_DMA_BAR); return ret; } epmem_bar = qtnf_map_bar(pdev, QTN_SHMEM_BAR); if (IS_ERR(epmem_bar)) { pr_err("failed to map BAR%u\n", QTN_SHMEM_BAR); return ret; } chipid = qtnf_chip_id_get(sysctl_bar); pr_info("identified device: %s\n", qtnf_chipid_to_string(chipid)); switch (chipid) { case QTN_CHIP_ID_PEARL: case QTN_CHIP_ID_PEARL_B: case QTN_CHIP_ID_PEARL_C: bus = qtnf_pcie_pearl_alloc(pdev); break; case QTN_CHIP_ID_TOPAZ: bus = qtnf_pcie_topaz_alloc(pdev); break; default: pr_err("unsupported chip ID 0x%x\n", chipid); return -ENOTSUPP; } if (!bus) return -ENOMEM; pcie_priv = get_bus_priv(bus); pci_set_drvdata(pdev, bus); bus->dev = &pdev->dev; bus->fw_state = QTNF_FW_STATE_RESET; pcie_priv->pdev = pdev; pcie_priv->tx_stopped = 0; pcie_priv->rx_bd_num = rx_bd_size_param; pcie_priv->flashboot = flashboot; if (fw_blksize_param > QTN_PCIE_MAX_FW_BUFSZ) pcie_priv->fw_blksize = QTN_PCIE_MAX_FW_BUFSZ; else pcie_priv->fw_blksize = fw_blksize_param; mutex_init(&bus->bus_lock); spin_lock_init(&pcie_priv->tx_lock); spin_lock_init(&pcie_priv->tx_reclaim_lock); pcie_priv->tx_full_count = 0; pcie_priv->tx_done_count = 0; pcie_priv->pcie_irq_count = 0; pcie_priv->tx_reclaim_done = 0; pcie_priv->tx_reclaim_req = 0; pcie_priv->workqueue = create_singlethread_workqueue("QTNF_PCIE"); if (!pcie_priv->workqueue) { pr_err("failed to alloc bus workqueue\n"); return -ENODEV; } ret = dma_set_mask_and_coherent(&pdev->dev, pcie_priv->dma_mask_get_cb()); if (ret) { pr_err("PCIE DMA coherent mask init failed 0x%llx\n", pcie_priv->dma_mask_get_cb()); goto error; } init_dummy_netdev(&bus->mux_dev); qtnf_pcie_init_irq(pcie_priv, use_msi); pcie_priv->sysctl_bar = sysctl_bar; pcie_priv->dmareg_bar = dmareg_bar; pcie_priv->epmem_bar = epmem_bar; pci_save_state(pdev); ret = pcie_priv->probe_cb(bus, tx_bd_size_param); if (ret) goto error; qtnf_pcie_bringup_fw_async(bus); return 0; error: flush_workqueue(pcie_priv->workqueue); destroy_workqueue(pcie_priv->workqueue); pci_set_drvdata(pdev, NULL); return ret; }