static int fnic_notify_set(struct fnic *fnic) { int err; switch (vnic_dev_get_intr_mode(fnic->vdev)) { case VNIC_DEV_INTR_MODE_INTX: err = vnic_dev_notify_set(fnic->vdev, FNIC_INTX_NOTIFY); break; case VNIC_DEV_INTR_MODE_MSI: err = vnic_dev_notify_set(fnic->vdev, -1); break; case VNIC_DEV_INTR_MODE_MSIX: err = vnic_dev_notify_set(fnic->vdev, FNIC_MSIX_ERR_NOTIFY); break; default: shost_printk(KERN_ERR, fnic->lport->host, "Interrupt mode should be set up" " before devcmd notify set %d\n", vnic_dev_get_intr_mode(fnic->vdev)); err = -1; break; } return err; }
static void enic_notify_timer_start(struct enic *enic) { switch (vnic_dev_get_intr_mode(enic->vdev)) { case VNIC_DEV_INTR_MODE_MSI: mod_timer(&enic->notify_timer, jiffies); break; default: break; }; }
static void __devexit fnic_remove(struct pci_dev *pdev) { struct fnic *fnic = pci_get_drvdata(pdev); unsigned long flags; spin_lock_irqsave(&fnic->fnic_lock, flags); fnic->stop_rx_link_events = 1; spin_unlock_irqrestore(&fnic->fnic_lock, flags); if (vnic_dev_get_intr_mode(fnic->vdev) == VNIC_DEV_INTR_MODE_MSI) del_timer_sync(&fnic->notify_timer); flush_workqueue(fnic_event_queue); skb_queue_purge(&fnic->frame_queue); fc_fabric_logoff(fnic->lport); spin_lock_irqsave(&fnic->fnic_lock, flags); fnic->in_remove = 1; spin_unlock_irqrestore(&fnic->fnic_lock, flags); fc_lport_destroy(fnic->lport); fnic_cleanup(fnic); BUG_ON(!skb_queue_empty(&fnic->frame_queue)); spin_lock_irqsave(&fnic_list_lock, flags); list_del(&fnic->list); spin_unlock_irqrestore(&fnic_list_lock, flags); fc_remove_host(fnic->lport->host); scsi_remove_host(fnic->lport->host); fc_exch_mgr_free(fnic->lport); vnic_dev_notify_unset(fnic->vdev); fnic_free_vnic_resources(fnic); fnic_free_intr(fnic); fnic_clear_intr_mode(fnic); vnic_dev_close(fnic->vdev); vnic_dev_unregister(fnic->vdev); fnic_iounmap(fnic); pci_release_regions(pdev); pci_disable_device(pdev); pci_set_drvdata(pdev, NULL); scsi_host_put(fnic->lport->host); }
static int enic_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ecmd) { struct enic *enic = netdev_priv(netdev); u32 tx_coalesce_usecs; u32 rx_coalesce_usecs; u32 rx_coalesce_usecs_low; u32 rx_coalesce_usecs_high; u32 coalesce_usecs_max; unsigned int i, intr; int ret; struct enic_rx_coal *rxcoal = &enic->rx_coalesce_setting; ret = enic_coalesce_valid(enic, ecmd); if (ret) return ret; coalesce_usecs_max = vnic_dev_get_intr_coal_timer_max(enic->vdev); tx_coalesce_usecs = min_t(u32, ecmd->tx_coalesce_usecs, coalesce_usecs_max); rx_coalesce_usecs = min_t(u32, ecmd->rx_coalesce_usecs, coalesce_usecs_max); rx_coalesce_usecs_low = min_t(u32, ecmd->rx_coalesce_usecs_low, coalesce_usecs_max); rx_coalesce_usecs_high = min_t(u32, ecmd->rx_coalesce_usecs_high, coalesce_usecs_max); if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX) { for (i = 0; i < enic->wq_count; i++) { intr = enic_msix_wq_intr(enic, i); vnic_intr_coalescing_timer_set(&enic->intr[intr], tx_coalesce_usecs); } enic->tx_coalesce_usecs = tx_coalesce_usecs; } rxcoal->use_adaptive_rx_coalesce = !!ecmd->use_adaptive_rx_coalesce; if (!rxcoal->use_adaptive_rx_coalesce) enic_intr_coal_set_rx(enic, rx_coalesce_usecs); if (ecmd->rx_coalesce_usecs_high) { rxcoal->range_end = rx_coalesce_usecs_high; rxcoal->small_pkt_range_start = rx_coalesce_usecs_low; rxcoal->large_pkt_range_start = rx_coalesce_usecs_low + ENIC_AIC_LARGE_PKT_DIFF; } enic->rx_coalesce_usecs = rx_coalesce_usecs; return 0; }
static void enic_clear_intr_mode(struct enic *enic) { switch (vnic_dev_get_intr_mode(enic->vdev)) { case VNIC_DEV_INTR_MODE_MSIX: pci_disable_msix(enic->pdev); break; case VNIC_DEV_INTR_MODE_MSI: pci_disable_msi(enic->pdev); break; default: break; } vnic_dev_set_intr_mode(enic->vdev, VNIC_DEV_INTR_MODE_UNKNOWN); }
static void fnic_notify_timer_start(struct fnic *fnic) { switch (vnic_dev_get_intr_mode(fnic->vdev)) { case VNIC_DEV_INTR_MODE_MSI: /* * Schedule first timeout immediately. The driver is * initiatialized and ready to look for link up notification */ mod_timer(&fnic->notify_timer, jiffies); break; default: /* Using intr for notification for INTx/MSI-X */ break; }; }
void fnic_clear_intr_mode(struct fnic *fnic) { switch (vnic_dev_get_intr_mode(fnic->vdev)) { case VNIC_DEV_INTR_MODE_MSIX: pci_disable_msix(fnic->pdev); break; case VNIC_DEV_INTR_MODE_MSI: pci_disable_msi(fnic->pdev); break; default: break; } vnic_dev_set_intr_mode(fnic->vdev, VNIC_DEV_INTR_MODE_INTX); }
static int enic_get_coalesce(struct net_device *netdev, struct ethtool_coalesce *ecmd) { struct enic *enic = netdev_priv(netdev); struct enic_rx_coal *rxcoal = &enic->rx_coalesce_setting; if (vnic_dev_get_intr_mode(enic->vdev) == VNIC_DEV_INTR_MODE_MSIX) ecmd->tx_coalesce_usecs = enic->tx_coalesce_usecs; ecmd->rx_coalesce_usecs = enic->rx_coalesce_usecs; if (rxcoal->use_adaptive_rx_coalesce) ecmd->use_adaptive_rx_coalesce = 1; ecmd->rx_coalesce_usecs_low = rxcoal->small_pkt_range_start; ecmd->rx_coalesce_usecs_high = rxcoal->range_end; return 0; }
static int enic_coalesce_valid(struct enic *enic, struct ethtool_coalesce *ec) { u32 coalesce_usecs_max = vnic_dev_get_intr_coal_timer_max(enic->vdev); u32 rx_coalesce_usecs_high = min_t(u32, coalesce_usecs_max, ec->rx_coalesce_usecs_high); u32 rx_coalesce_usecs_low = min_t(u32, coalesce_usecs_max, ec->rx_coalesce_usecs_low); if (ec->rx_max_coalesced_frames || ec->rx_coalesce_usecs_irq || ec->rx_max_coalesced_frames_irq || ec->tx_max_coalesced_frames || ec->tx_coalesce_usecs_irq || ec->tx_max_coalesced_frames_irq || ec->stats_block_coalesce_usecs || ec->use_adaptive_tx_coalesce || ec->pkt_rate_low || ec->rx_max_coalesced_frames_low || ec->tx_coalesce_usecs_low || ec->tx_max_coalesced_frames_low || ec->pkt_rate_high || ec->rx_max_coalesced_frames_high || ec->tx_coalesce_usecs_high || ec->tx_max_coalesced_frames_high || ec->rate_sample_interval) return -EINVAL; if ((vnic_dev_get_intr_mode(enic->vdev) != VNIC_DEV_INTR_MODE_MSIX) && ec->tx_coalesce_usecs) return -EINVAL; if ((ec->tx_coalesce_usecs > coalesce_usecs_max) || (ec->rx_coalesce_usecs > coalesce_usecs_max) || (ec->rx_coalesce_usecs_low > coalesce_usecs_max) || (ec->rx_coalesce_usecs_high > coalesce_usecs_max)) netdev_info(enic->netdev, "ethtool_set_coalesce: adaptor supports max coalesce value of %d. Setting max value.\n", coalesce_usecs_max); if (ec->rx_coalesce_usecs_high && (rx_coalesce_usecs_high < rx_coalesce_usecs_low + ENIC_AIC_LARGE_PKT_DIFF)) return -EINVAL; return 0; }
static int enic_notify_set(struct enic *enic) { int err; switch (vnic_dev_get_intr_mode(enic->vdev)) { case VNIC_DEV_INTR_MODE_INTX: err = vnic_dev_notify_set(enic->vdev, ENIC_INTX_NOTIFY); break; case VNIC_DEV_INTR_MODE_MSIX: err = vnic_dev_notify_set(enic->vdev, ENIC_MSIX_NOTIFY); break; default: err = vnic_dev_notify_set(enic->vdev, -1 /* no intr */); break; } return err; }
static void enic_poll_controller(struct net_device *netdev) { struct enic *enic = netdev_priv(netdev); struct vnic_dev *vdev = enic->vdev; switch (vnic_dev_get_intr_mode(vdev)) { case VNIC_DEV_INTR_MODE_MSIX: enic_isr_msix_rq(enic->pdev->irq, enic); enic_isr_msix_wq(enic->pdev->irq, enic); break; case VNIC_DEV_INTR_MODE_MSI: enic_isr_msi(enic->pdev->irq, enic); break; case VNIC_DEV_INTR_MODE_INTX: enic_isr_legacy(enic->pdev->irq, netdev); break; default: break; } }
static int enic_notify_set(struct enic *enic) { int err; spin_lock(&enic->devcmd_lock); switch (vnic_dev_get_intr_mode(enic->vdev)) { case VNIC_DEV_INTR_MODE_INTX: err = vnic_dev_notify_set(enic->vdev, ENIC_INTX_NOTIFY); break; case VNIC_DEV_INTR_MODE_MSIX: err = vnic_dev_notify_set(enic->vdev, ENIC_MSIX_NOTIFY); break; default: err = vnic_dev_notify_set(enic->vdev, -1 ); break; } spin_unlock(&enic->devcmd_lock); return err; }
void fnic_free_intr(struct fnic *fnic) { int i; switch (vnic_dev_get_intr_mode(fnic->vdev)) { case VNIC_DEV_INTR_MODE_INTX: case VNIC_DEV_INTR_MODE_MSI: free_irq(fnic->pdev->irq, fnic); break; case VNIC_DEV_INTR_MODE_MSIX: for (i = 0; i < ARRAY_SIZE(fnic->msix); i++) if (fnic->msix[i].requested) free_irq(fnic->msix_entry[i].vector, fnic->msix[i].devid); break; default: break; } }
static void enic_free_intr(struct enic *enic) { struct net_device *netdev = enic->netdev; unsigned int i; switch (vnic_dev_get_intr_mode(enic->vdev)) { case VNIC_DEV_INTR_MODE_INTX: free_irq(enic->pdev->irq, netdev); break; case VNIC_DEV_INTR_MODE_MSI: free_irq(enic->pdev->irq, enic); break; case VNIC_DEV_INTR_MODE_MSIX: for (i = 0; i < ARRAY_SIZE(enic->msix); i++) if (enic->msix[i].requested) free_irq(enic->msix_entry[i].vector, enic->msix[i].devid); break; default: break; } }
int fnic_alloc_vnic_resources(struct fnic *fnic) { enum vnic_dev_intr_mode intr_mode; unsigned int mask_on_assertion; unsigned int interrupt_offset; unsigned int error_interrupt_enable; unsigned int error_interrupt_offset; unsigned int i, cq_index; unsigned int wq_copy_cq_desc_count; int err; intr_mode = vnic_dev_get_intr_mode(fnic->vdev); shost_printk(KERN_INFO, fnic->lport->host, "vNIC interrupt mode: %s\n", intr_mode == VNIC_DEV_INTR_MODE_INTX ? "legacy PCI INTx" : intr_mode == VNIC_DEV_INTR_MODE_MSI ? "MSI" : intr_mode == VNIC_DEV_INTR_MODE_MSIX ? "MSI-X" : "unknown"); shost_printk(KERN_INFO, fnic->lport->host, "vNIC resources avail: " "wq %d cp_wq %d raw_wq %d rq %d cq %d intr %d\n", fnic->wq_count, fnic->wq_copy_count, fnic->raw_wq_count, fnic->rq_count, fnic->cq_count, fnic->intr_count); for (i = 0; i < fnic->raw_wq_count; i++) { err = vnic_wq_alloc(fnic->vdev, &fnic->wq[i], i, fnic->config.wq_enet_desc_count, sizeof(struct wq_enet_desc)); if (err) goto err_out_cleanup; } for (i = 0; i < fnic->wq_copy_count; i++) { err = vnic_wq_copy_alloc(fnic->vdev, &fnic->wq_copy[i], (fnic->raw_wq_count + i), fnic->config.wq_copy_desc_count, sizeof(struct fcpio_host_req)); if (err) goto err_out_cleanup; } for (i = 0; i < fnic->rq_count; i++) { err = vnic_rq_alloc(fnic->vdev, &fnic->rq[i], i, fnic->config.rq_desc_count, sizeof(struct rq_enet_desc)); if (err) goto err_out_cleanup; } for (i = 0; i < fnic->rq_count; i++) { cq_index = i; err = vnic_cq_alloc(fnic->vdev, &fnic->cq[cq_index], cq_index, fnic->config.rq_desc_count, sizeof(struct cq_enet_rq_desc)); if (err) goto err_out_cleanup; } for (i = 0; i < fnic->raw_wq_count; i++) { cq_index = fnic->rq_count + i; err = vnic_cq_alloc(fnic->vdev, &fnic->cq[cq_index], cq_index, fnic->config.wq_enet_desc_count, sizeof(struct cq_enet_wq_desc)); if (err) goto err_out_cleanup; } wq_copy_cq_desc_count = (fnic->config.wq_copy_desc_count * 3); for (i = 0; i < fnic->wq_copy_count; i++) { cq_index = fnic->raw_wq_count + fnic->rq_count + i; err = vnic_cq_alloc(fnic->vdev, &fnic->cq[cq_index], cq_index, wq_copy_cq_desc_count, sizeof(struct fcpio_fw_req)); if (err) goto err_out_cleanup; } for (i = 0; i < fnic->intr_count; i++) { err = vnic_intr_alloc(fnic->vdev, &fnic->intr[i], i); if (err) goto err_out_cleanup; } fnic->legacy_pba = vnic_dev_get_res(fnic->vdev, RES_TYPE_INTR_PBA_LEGACY, 0); if (!fnic->legacy_pba && intr_mode == VNIC_DEV_INTR_MODE_INTX) { shost_printk(KERN_ERR, fnic->lport->host, "Failed to hook legacy pba resource\n"); err = -ENODEV; goto err_out_cleanup; } switch (intr_mode) { case VNIC_DEV_INTR_MODE_INTX: case VNIC_DEV_INTR_MODE_MSIX: error_interrupt_enable = 1; error_interrupt_offset = fnic->err_intr_offset; break; default: error_interrupt_enable = 0; error_interrupt_offset = 0; break; } for (i = 0; i < fnic->rq_count; i++) { cq_index = i; vnic_rq_init(&fnic->rq[i], cq_index, error_interrupt_enable, error_interrupt_offset); } for (i = 0; i < fnic->raw_wq_count; i++) { cq_index = i + fnic->rq_count; vnic_wq_init(&fnic->wq[i], cq_index, error_interrupt_enable, error_interrupt_offset); } for (i = 0; i < fnic->wq_copy_count; i++) { vnic_wq_copy_init(&fnic->wq_copy[i], 0 , error_interrupt_enable, error_interrupt_offset); } for (i = 0; i < fnic->cq_count; i++) { switch (intr_mode) { case VNIC_DEV_INTR_MODE_MSIX: interrupt_offset = i; break; default: interrupt_offset = 0; break; } vnic_cq_init(&fnic->cq[i], 0 , 1 , 0 , 0 , 1 , 1 , 1 , 0 , interrupt_offset, 0 ); } switch (intr_mode) { case VNIC_DEV_INTR_MODE_MSI: case VNIC_DEV_INTR_MODE_MSIX: mask_on_assertion = 1; break; default: mask_on_assertion = 0; break; } for (i = 0; i < fnic->intr_count; i++) { vnic_intr_init(&fnic->intr[i], fnic->config.intr_timer, fnic->config.intr_timer_type, mask_on_assertion); } err = vnic_dev_stats_dump(fnic->vdev, &fnic->stats); if (err) { shost_printk(KERN_ERR, fnic->lport->host, "vnic_dev_stats_dump failed - x%x\n", err); goto err_out_cleanup; } vnic_dev_stats_clear(fnic->vdev); return 0; err_out_cleanup: fnic_free_vnic_resources(fnic); return err; }
static int __devinit enic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct net_device *netdev; struct enic *enic; int using_dac = 0; unsigned int i; int err; const u8 rss_default_cpu = 0; const u8 rss_hash_type = 0; const u8 rss_hash_bits = 0; const u8 rss_base_cpu = 0; const u8 rss_enable = 0; const u8 tso_ipid_split_en = 0; const u8 ig_vlan_strip_en = 1; /* Allocate net device structure and initialize. Private * instance data is initialized to zero. */ netdev = alloc_etherdev(sizeof(struct enic)); if (!netdev) { printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n"); return -ENOMEM; } pci_set_drvdata(pdev, netdev); SET_NETDEV_DEV(netdev, &pdev->dev); enic = netdev_priv(netdev); enic->netdev = netdev; enic->pdev = pdev; /* Setup PCI resources */ err = pci_enable_device(pdev); if (err) { printk(KERN_ERR PFX "Cannot enable PCI device, aborting.\n"); goto err_out_free_netdev; } err = pci_request_regions(pdev, DRV_NAME); if (err) { printk(KERN_ERR PFX "Cannot request PCI regions, aborting.\n"); goto err_out_disable_device; } pci_set_master(pdev); /* Query PCI controller on system for DMA addressing * limitation for the device. Try 40-bit first, and * fail to 32-bit. */ err = pci_set_dma_mask(pdev, DMA_40BIT_MASK); if (err) { err = pci_set_dma_mask(pdev, DMA_32BIT_MASK); if (err) { printk(KERN_ERR PFX "No usable DMA configuration, aborting.\n"); goto err_out_release_regions; } err = pci_set_consistent_dma_mask(pdev, DMA_32BIT_MASK); if (err) { printk(KERN_ERR PFX "Unable to obtain 32-bit DMA " "for consistent allocations, aborting.\n"); goto err_out_release_regions; } } else { err = pci_set_consistent_dma_mask(pdev, DMA_40BIT_MASK); if (err) { printk(KERN_ERR PFX "Unable to obtain 40-bit DMA " "for consistent allocations, aborting.\n"); goto err_out_release_regions; } using_dac = 1; } /* Map vNIC resources from BAR0 */ if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { printk(KERN_ERR PFX "BAR0 not memory-map'able, aborting.\n"); err = -ENODEV; goto err_out_release_regions; } enic->bar0.vaddr = pci_iomap(pdev, 0, enic->bar0.len); enic->bar0.bus_addr = pci_resource_start(pdev, 0); enic->bar0.len = pci_resource_len(pdev, 0); if (!enic->bar0.vaddr) { printk(KERN_ERR PFX "Cannot memory-map BAR0 res hdr, aborting.\n"); err = -ENODEV; goto err_out_release_regions; } /* Register vNIC device */ enic->vdev = vnic_dev_register(NULL, enic, pdev, &enic->bar0); if (!enic->vdev) { printk(KERN_ERR PFX "vNIC registration failed, aborting.\n"); err = -ENODEV; goto err_out_iounmap; } /* Issue device open to get device in known state */ err = enic_dev_open(enic); if (err) { printk(KERN_ERR PFX "vNIC dev open failed, aborting.\n"); goto err_out_vnic_unregister; } /* Issue device init to initialize the vnic-to-switch link. * We'll start with carrier off and wait for link UP * notification later to turn on carrier. We don't need * to wait here for the vnic-to-switch link initialization * to complete; link UP notification is the indication that * the process is complete. */ netif_carrier_off(netdev); err = vnic_dev_init(enic->vdev, 0); if (err) { printk(KERN_ERR PFX "vNIC dev init failed, aborting.\n"); goto err_out_dev_close; } /* Get vNIC configuration */ err = enic_get_vnic_config(enic); if (err) { printk(KERN_ERR PFX "Get vNIC configuration failed, aborting.\n"); goto err_out_dev_close; } /* Get available resource counts */ enic_get_res_counts(enic); /* Set interrupt mode based on resource counts and system * capabilities */ err = enic_set_intr_mode(enic); if (err) { printk(KERN_ERR PFX "Failed to set intr mode, aborting.\n"); goto err_out_dev_close; } /* Allocate and configure vNIC resources */ err = enic_alloc_vnic_resources(enic); if (err) { printk(KERN_ERR PFX "Failed to alloc vNIC resources, aborting.\n"); goto err_out_free_vnic_resources; } enic_init_vnic_resources(enic); /* Enable VLAN tag stripping. RSS not enabled (yet). */ err = enic_set_nic_cfg(enic, rss_default_cpu, rss_hash_type, rss_hash_bits, rss_base_cpu, rss_enable, tso_ipid_split_en, ig_vlan_strip_en); if (err) { printk(KERN_ERR PFX "Failed to config nic, aborting.\n"); goto err_out_free_vnic_resources; } /* Setup notification timer, HW reset task, and locks */ init_timer(&enic->notify_timer); enic->notify_timer.function = enic_notify_timer; enic->notify_timer.data = (unsigned long)enic; INIT_WORK(&enic->reset, enic_reset); for (i = 0; i < enic->wq_count; i++) spin_lock_init(&enic->wq_lock[i]); spin_lock_init(&enic->devcmd_lock); /* Register net device */ enic->port_mtu = enic->config.mtu; (void)enic_change_mtu(netdev, enic->port_mtu); err = enic_set_mac_addr(netdev, enic->mac_addr); if (err) { printk(KERN_ERR PFX "Invalid MAC address, aborting.\n"); goto err_out_free_vnic_resources; } netdev->netdev_ops = &enic_netdev_ops; netdev->watchdog_timeo = 2 * HZ; netdev->ethtool_ops = &enic_ethtool_ops; switch (vnic_dev_get_intr_mode(enic->vdev)) { default: netif_napi_add(netdev, &enic->napi, enic_poll, 64); break; case VNIC_DEV_INTR_MODE_MSIX: netif_napi_add(netdev, &enic->napi, enic_poll_msix, 64); break; } netdev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX; if (ENIC_SETTING(enic, TXCSUM)) netdev->features |= NETIF_F_SG | NETIF_F_HW_CSUM; if (ENIC_SETTING(enic, TSO)) netdev->features |= NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_TSO_ECN; if (ENIC_SETTING(enic, LRO)) netdev->features |= NETIF_F_LRO; if (using_dac) netdev->features |= NETIF_F_HIGHDMA; enic->csum_rx_enabled = ENIC_SETTING(enic, RXCSUM); enic->lro_mgr.max_aggr = ENIC_LRO_MAX_AGGR; enic->lro_mgr.max_desc = ENIC_LRO_MAX_DESC; enic->lro_mgr.lro_arr = enic->lro_desc; enic->lro_mgr.get_skb_header = enic_get_skb_header; enic->lro_mgr.features = LRO_F_NAPI | LRO_F_EXTRACT_VLAN_ID; enic->lro_mgr.dev = netdev; enic->lro_mgr.ip_summed = CHECKSUM_COMPLETE; enic->lro_mgr.ip_summed_aggr = CHECKSUM_UNNECESSARY; err = register_netdev(netdev); if (err) { printk(KERN_ERR PFX "Cannot register net device, aborting.\n"); goto err_out_free_vnic_resources; } return 0; err_out_free_vnic_resources: enic_free_vnic_resources(enic); err_out_dev_close: vnic_dev_close(enic->vdev); err_out_vnic_unregister: enic_clear_intr_mode(enic); vnic_dev_unregister(enic->vdev); err_out_iounmap: enic_iounmap(enic); err_out_release_regions: pci_release_regions(pdev); err_out_disable_device: pci_disable_device(pdev); err_out_free_netdev: pci_set_drvdata(pdev, NULL); free_netdev(netdev); return err; }
int fnic_request_intr(struct fnic *fnic) { int err = 0; int i; switch (vnic_dev_get_intr_mode(fnic->vdev)) { case VNIC_DEV_INTR_MODE_INTX: err = request_irq(fnic->pdev->irq, &fnic_isr_legacy, IRQF_SHARED, DRV_NAME, fnic); break; case VNIC_DEV_INTR_MODE_MSI: err = request_irq(fnic->pdev->irq, &fnic_isr_msi, 0, fnic->name, fnic); break; case VNIC_DEV_INTR_MODE_MSIX: sprintf(fnic->msix[FNIC_MSIX_RQ].devname, "%.11s-fcs-rq", fnic->name); fnic->msix[FNIC_MSIX_RQ].isr = fnic_isr_msix_rq; fnic->msix[FNIC_MSIX_RQ].devid = fnic; sprintf(fnic->msix[FNIC_MSIX_WQ].devname, "%.11s-fcs-wq", fnic->name); fnic->msix[FNIC_MSIX_WQ].isr = fnic_isr_msix_wq; fnic->msix[FNIC_MSIX_WQ].devid = fnic; sprintf(fnic->msix[FNIC_MSIX_WQ_COPY].devname, "%.11s-scsi-wq", fnic->name); fnic->msix[FNIC_MSIX_WQ_COPY].isr = fnic_isr_msix_wq_copy; fnic->msix[FNIC_MSIX_WQ_COPY].devid = fnic; sprintf(fnic->msix[FNIC_MSIX_ERR_NOTIFY].devname, "%.11s-err-notify", fnic->name); fnic->msix[FNIC_MSIX_ERR_NOTIFY].isr = fnic_isr_msix_err_notify; fnic->msix[FNIC_MSIX_ERR_NOTIFY].devid = fnic; for (i = 0; i < ARRAY_SIZE(fnic->msix); i++) { err = request_irq(fnic->msix_entry[i].vector, fnic->msix[i].isr, 0, fnic->msix[i].devname, fnic->msix[i].devid); if (err) { shost_printk(KERN_ERR, fnic->lport->host, "MSIX: request_irq" " failed %d\n", err); fnic_free_intr(fnic); break; } fnic->msix[i].requested = 1; } break; default: break; } return err; }
void enic_init_vnic_resources(struct enic *enic) { enum vnic_dev_intr_mode intr_mode; unsigned int mask_on_assertion; unsigned int interrupt_offset; unsigned int error_interrupt_enable; unsigned int error_interrupt_offset; unsigned int cq_index; unsigned int i; intr_mode = vnic_dev_get_intr_mode(enic->vdev); /* Init RQ/WQ resources. * * RQ[0 - n-1] point to CQ[0 - n-1] * WQ[0 - m-1] point to CQ[n - n+m-1] * * Error interrupt is not enabled for MSI. */ switch (intr_mode) { case VNIC_DEV_INTR_MODE_INTX: case VNIC_DEV_INTR_MODE_MSIX: error_interrupt_enable = 1; error_interrupt_offset = enic->intr_count - 2; break; default: error_interrupt_enable = 0; error_interrupt_offset = 0; break; } for (i = 0; i < enic->rq_count; i++) { cq_index = i; vnic_rq_init(&enic->rq[i], cq_index, error_interrupt_enable, error_interrupt_offset); } for (i = 0; i < enic->wq_count; i++) { cq_index = enic->rq_count + i; vnic_wq_init(&enic->wq[i], cq_index, error_interrupt_enable, error_interrupt_offset); } /* Init CQ resources * * CQ[0 - n+m-1] point to INTR[0] for INTx, MSI * CQ[0 - n+m-1] point to INTR[0 - n+m-1] for MSI-X */ for (i = 0; i < enic->cq_count; i++) { switch (intr_mode) { case VNIC_DEV_INTR_MODE_MSIX: interrupt_offset = i; break; default: interrupt_offset = 0; break; } vnic_cq_init(&enic->cq[i], 0 /* flow_control_enable */, 1 /* color_enable */, 0 /* cq_head */, 0 /* cq_tail */, 1 /* cq_tail_color */, 1 /* interrupt_enable */, 1 /* cq_entry_enable */, 0 /* cq_message_enable */, interrupt_offset, 0 /* cq_message_addr */); } /* Init INTR resources * * mask_on_assertion is not used for INTx due to the level- * triggered nature of INTx */ switch (intr_mode) { case VNIC_DEV_INTR_MODE_MSI: case VNIC_DEV_INTR_MODE_MSIX: mask_on_assertion = 1; break; default: mask_on_assertion = 0; break; } for (i = 0; i < enic->intr_count; i++) { vnic_intr_init(&enic->intr[i], enic->config.intr_timer_usec, enic->config.intr_timer_type, mask_on_assertion); } }
int enic_alloc_vnic_resources(struct enic *enic) { enum vnic_dev_intr_mode intr_mode; unsigned int i; int err; intr_mode = vnic_dev_get_intr_mode(enic->vdev); dev_info(enic_get_dev(enic), "vNIC resources used: " "wq %d rq %d cq %d intr %d intr mode %s\n", enic->wq_count, enic->rq_count, enic->cq_count, enic->intr_count, intr_mode == VNIC_DEV_INTR_MODE_INTX ? "legacy PCI INTx" : intr_mode == VNIC_DEV_INTR_MODE_MSI ? "MSI" : intr_mode == VNIC_DEV_INTR_MODE_MSIX ? "MSI-X" : "unknown"); /* Allocate queue resources */ for (i = 0; i < enic->wq_count; i++) { err = vnic_wq_alloc(enic->vdev, &enic->wq[i], i, enic->config.wq_desc_count, sizeof(struct wq_enet_desc)); if (err) goto err_out_cleanup; } for (i = 0; i < enic->rq_count; i++) { err = vnic_rq_alloc(enic->vdev, &enic->rq[i], i, enic->config.rq_desc_count, sizeof(struct rq_enet_desc)); if (err) goto err_out_cleanup; } for (i = 0; i < enic->cq_count; i++) { if (i < enic->rq_count) err = vnic_cq_alloc(enic->vdev, &enic->cq[i], i, enic->config.rq_desc_count, sizeof(struct cq_enet_rq_desc)); else err = vnic_cq_alloc(enic->vdev, &enic->cq[i], i, enic->config.wq_desc_count, sizeof(struct cq_enet_wq_desc)); if (err) goto err_out_cleanup; } for (i = 0; i < enic->intr_count; i++) { err = vnic_intr_alloc(enic->vdev, &enic->intr[i], i); if (err) goto err_out_cleanup; } /* Hook remaining resource */ enic->legacy_pba = vnic_dev_get_res(enic->vdev, RES_TYPE_INTR_PBA_LEGACY, 0); if (!enic->legacy_pba && intr_mode == VNIC_DEV_INTR_MODE_INTX) { dev_err(enic_get_dev(enic), "Failed to hook legacy pba resource\n"); err = -ENODEV; goto err_out_cleanup; } return 0; err_out_cleanup: enic_free_vnic_resources(enic); return err; }
static void fnic_remove(struct pci_dev *pdev) { struct fnic *fnic = pci_get_drvdata(pdev); struct fc_lport *lp = fnic->lport; unsigned long flags; /* * Mark state so that the workqueue thread stops forwarding * received frames and link events to the local port. ISR and * other threads that can queue work items will also stop * creating work items on the fnic workqueue */ spin_lock_irqsave(&fnic->fnic_lock, flags); fnic->stop_rx_link_events = 1; spin_unlock_irqrestore(&fnic->fnic_lock, flags); if (vnic_dev_get_intr_mode(fnic->vdev) == VNIC_DEV_INTR_MODE_MSI) del_timer_sync(&fnic->notify_timer); /* * Flush the fnic event queue. After this call, there should * be no event queued for this fnic device in the workqueue */ flush_workqueue(fnic_event_queue); skb_queue_purge(&fnic->frame_queue); skb_queue_purge(&fnic->tx_queue); if (fnic->config.flags & VFCF_FIP_CAPABLE) { del_timer_sync(&fnic->fip_timer); skb_queue_purge(&fnic->fip_frame_queue); fnic_fcoe_reset_vlans(fnic); fnic_fcoe_evlist_free(fnic); } /* * Log off the fabric. This stops all remote ports, dns port, * logs off the fabric. This flushes all rport, disc, lport work * before returning */ fc_fabric_logoff(fnic->lport); spin_lock_irqsave(&fnic->fnic_lock, flags); fnic->in_remove = 1; spin_unlock_irqrestore(&fnic->fnic_lock, flags); fcoe_ctlr_destroy(&fnic->ctlr); fc_lport_destroy(lp); fnic_stats_debugfs_remove(fnic); /* * This stops the fnic device, masks all interrupts. Completed * CQ entries are drained. Posted WQ/RQ/Copy-WQ entries are * cleaned up */ fnic_cleanup(fnic); BUG_ON(!skb_queue_empty(&fnic->frame_queue)); BUG_ON(!skb_queue_empty(&fnic->tx_queue)); spin_lock_irqsave(&fnic_list_lock, flags); list_del(&fnic->list); spin_unlock_irqrestore(&fnic_list_lock, flags); fc_remove_host(fnic->lport->host); scsi_remove_host(fnic->lport->host); fc_exch_mgr_free(fnic->lport); vnic_dev_notify_unset(fnic->vdev); fnic_free_intr(fnic); fnic_free_vnic_resources(fnic); fnic_clear_intr_mode(fnic); vnic_dev_close(fnic->vdev); vnic_dev_unregister(fnic->vdev); fnic_iounmap(fnic); pci_release_regions(pdev); pci_disable_device(pdev); scsi_host_put(lp->host); }
int fnic_alloc_vnic_resources(struct fnic *fnic) { enum vnic_dev_intr_mode intr_mode; unsigned int mask_on_assertion; unsigned int interrupt_offset; unsigned int error_interrupt_enable; unsigned int error_interrupt_offset; unsigned int i, cq_index; unsigned int wq_copy_cq_desc_count; int err; intr_mode = vnic_dev_get_intr_mode(fnic->vdev); shost_printk(KERN_INFO, fnic->lport->host, "vNIC interrupt mode: %s\n", intr_mode == VNIC_DEV_INTR_MODE_INTX ? "legacy PCI INTx" : intr_mode == VNIC_DEV_INTR_MODE_MSI ? "MSI" : intr_mode == VNIC_DEV_INTR_MODE_MSIX ? "MSI-X" : "unknown"); shost_printk(KERN_INFO, fnic->lport->host, "vNIC resources avail: " "wq %d cp_wq %d raw_wq %d rq %d cq %d intr %d\n", fnic->wq_count, fnic->wq_copy_count, fnic->raw_wq_count, fnic->rq_count, fnic->cq_count, fnic->intr_count); /* Allocate Raw WQ used for FCS frames */ for (i = 0; i < fnic->raw_wq_count; i++) { err = vnic_wq_alloc(fnic->vdev, &fnic->wq[i], i, fnic->config.wq_enet_desc_count, sizeof(struct wq_enet_desc)); if (err) goto err_out_cleanup; } /* Allocate Copy WQs used for SCSI IOs */ for (i = 0; i < fnic->wq_copy_count; i++) { err = vnic_wq_copy_alloc(fnic->vdev, &fnic->wq_copy[i], (fnic->raw_wq_count + i), fnic->config.wq_copy_desc_count, sizeof(struct fcpio_host_req)); if (err) goto err_out_cleanup; } /* RQ for receiving FCS frames */ for (i = 0; i < fnic->rq_count; i++) { err = vnic_rq_alloc(fnic->vdev, &fnic->rq[i], i, fnic->config.rq_desc_count, sizeof(struct rq_enet_desc)); if (err) goto err_out_cleanup; } /* CQ for each RQ */ for (i = 0; i < fnic->rq_count; i++) { cq_index = i; err = vnic_cq_alloc(fnic->vdev, &fnic->cq[cq_index], cq_index, fnic->config.rq_desc_count, sizeof(struct cq_enet_rq_desc)); if (err) goto err_out_cleanup; } /* CQ for each WQ */ for (i = 0; i < fnic->raw_wq_count; i++) { cq_index = fnic->rq_count + i; err = vnic_cq_alloc(fnic->vdev, &fnic->cq[cq_index], cq_index, fnic->config.wq_enet_desc_count, sizeof(struct cq_enet_wq_desc)); if (err) goto err_out_cleanup; } /* CQ for each COPY WQ */ wq_copy_cq_desc_count = (fnic->config.wq_copy_desc_count * 3); for (i = 0; i < fnic->wq_copy_count; i++) { cq_index = fnic->raw_wq_count + fnic->rq_count + i; err = vnic_cq_alloc(fnic->vdev, &fnic->cq[cq_index], cq_index, wq_copy_cq_desc_count, sizeof(struct fcpio_fw_req)); if (err) goto err_out_cleanup; } for (i = 0; i < fnic->intr_count; i++) { err = vnic_intr_alloc(fnic->vdev, &fnic->intr[i], i); if (err) goto err_out_cleanup; } fnic->legacy_pba = vnic_dev_get_res(fnic->vdev, RES_TYPE_INTR_PBA_LEGACY, 0); if (!fnic->legacy_pba && intr_mode == VNIC_DEV_INTR_MODE_INTX) { shost_printk(KERN_ERR, fnic->lport->host, "Failed to hook legacy pba resource\n"); err = -ENODEV; goto err_out_cleanup; } /* * Init RQ/WQ resources. * * RQ[0 to n-1] point to CQ[0 to n-1] * WQ[0 to m-1] point to CQ[n to n+m-1] * WQ_COPY[0 to k-1] points to CQ[n+m to n+m+k-1] * * Note for copy wq we always initialize with cq_index = 0 * * Error interrupt is not enabled for MSI. */ switch (intr_mode) { case VNIC_DEV_INTR_MODE_INTX: case VNIC_DEV_INTR_MODE_MSIX: error_interrupt_enable = 1; error_interrupt_offset = fnic->err_intr_offset; break; default: error_interrupt_enable = 0; error_interrupt_offset = 0; break; } for (i = 0; i < fnic->rq_count; i++) { cq_index = i; vnic_rq_init(&fnic->rq[i], cq_index, error_interrupt_enable, error_interrupt_offset); } for (i = 0; i < fnic->raw_wq_count; i++) { cq_index = i + fnic->rq_count; vnic_wq_init(&fnic->wq[i], cq_index, error_interrupt_enable, error_interrupt_offset); } for (i = 0; i < fnic->wq_copy_count; i++) { vnic_wq_copy_init(&fnic->wq_copy[i], 0 /* cq_index 0 - always */, error_interrupt_enable, error_interrupt_offset); } for (i = 0; i < fnic->cq_count; i++) { switch (intr_mode) { case VNIC_DEV_INTR_MODE_MSIX: interrupt_offset = i; break; default: interrupt_offset = 0; break; } vnic_cq_init(&fnic->cq[i], 0 /* flow_control_enable */, 1 /* color_enable */, 0 /* cq_head */, 0 /* cq_tail */, 1 /* cq_tail_color */, 1 /* interrupt_enable */, 1 /* cq_entry_enable */, 0 /* cq_message_enable */, interrupt_offset, 0 /* cq_message_addr */); } /* * Init INTR resources * * mask_on_assertion is not used for INTx due to the level- * triggered nature of INTx */ switch (intr_mode) { case VNIC_DEV_INTR_MODE_MSI: case VNIC_DEV_INTR_MODE_MSIX: mask_on_assertion = 1; break; default: mask_on_assertion = 0; break; } for (i = 0; i < fnic->intr_count; i++) { vnic_intr_init(&fnic->intr[i], fnic->config.intr_timer, fnic->config.intr_timer_type, mask_on_assertion); } /* init the stats memory by making the first call here */ err = vnic_dev_stats_dump(fnic->vdev, &fnic->stats); if (err) { shost_printk(KERN_ERR, fnic->lport->host, "vnic_dev_stats_dump failed - x%x\n", err); goto err_out_cleanup; } /* Clear LIF stats */ vnic_dev_stats_clear(fnic->vdev); return 0; err_out_cleanup: fnic_free_vnic_resources(fnic); return err; }
static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct Scsi_Host *host; struct fc_lport *lp; struct fnic *fnic; mempool_t *pool; int err; int i; unsigned long flags; /* * Allocate SCSI Host and set up association between host, * local port, and fnic */ lp = libfc_host_alloc(&fnic_host_template, sizeof(struct fnic)); if (!lp) { printk(KERN_ERR PFX "Unable to alloc libfc local port\n"); err = -ENOMEM; goto err_out; } host = lp->host; fnic = lport_priv(lp); fnic->lport = lp; fnic->ctlr.lp = lp; snprintf(fnic->name, sizeof(fnic->name) - 1, "%s%d", DRV_NAME, host->host_no); host->transportt = fnic_fc_transport; err = fnic_stats_debugfs_init(fnic); if (err) { shost_printk(KERN_ERR, fnic->lport->host, "Failed to initialize debugfs for stats\n"); fnic_stats_debugfs_remove(fnic); } /* Setup PCI resources */ pci_set_drvdata(pdev, fnic); fnic->pdev = pdev; err = pci_enable_device(pdev); if (err) { shost_printk(KERN_ERR, fnic->lport->host, "Cannot enable PCI device, aborting.\n"); goto err_out_free_hba; } err = pci_request_regions(pdev, DRV_NAME); if (err) { shost_printk(KERN_ERR, fnic->lport->host, "Cannot enable PCI resources, aborting\n"); goto err_out_disable_device; } pci_set_master(pdev); /* Query PCI controller on system for DMA addressing * limitation for the device. Try 64-bit first, and * fail to 32-bit. */ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64)); if (err) { err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); if (err) { shost_printk(KERN_ERR, fnic->lport->host, "No usable DMA configuration " "aborting\n"); goto err_out_release_regions; } err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); if (err) { shost_printk(KERN_ERR, fnic->lport->host, "Unable to obtain 32-bit DMA " "for consistent allocations, aborting.\n"); goto err_out_release_regions; } } else { err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)); if (err) { shost_printk(KERN_ERR, fnic->lport->host, "Unable to obtain 64-bit DMA " "for consistent allocations, aborting.\n"); goto err_out_release_regions; } } /* Map vNIC resources from BAR0 */ if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { shost_printk(KERN_ERR, fnic->lport->host, "BAR0 not memory-map'able, aborting.\n"); err = -ENODEV; goto err_out_release_regions; } fnic->bar0.vaddr = pci_iomap(pdev, 0, 0); fnic->bar0.bus_addr = pci_resource_start(pdev, 0); fnic->bar0.len = pci_resource_len(pdev, 0); if (!fnic->bar0.vaddr) { shost_printk(KERN_ERR, fnic->lport->host, "Cannot memory-map BAR0 res hdr, " "aborting.\n"); err = -ENODEV; goto err_out_release_regions; } fnic->vdev = vnic_dev_register(NULL, fnic, pdev, &fnic->bar0); if (!fnic->vdev) { shost_printk(KERN_ERR, fnic->lport->host, "vNIC registration failed, " "aborting.\n"); err = -ENODEV; goto err_out_iounmap; } err = fnic_dev_wait(fnic->vdev, vnic_dev_open, vnic_dev_open_done, 0); if (err) { shost_printk(KERN_ERR, fnic->lport->host, "vNIC dev open failed, aborting.\n"); goto err_out_vnic_unregister; } err = vnic_dev_init(fnic->vdev, 0); if (err) { shost_printk(KERN_ERR, fnic->lport->host, "vNIC dev init failed, aborting.\n"); goto err_out_dev_close; } err = vnic_dev_mac_addr(fnic->vdev, fnic->ctlr.ctl_src_addr); if (err) { shost_printk(KERN_ERR, fnic->lport->host, "vNIC get MAC addr failed \n"); goto err_out_dev_close; } /* set data_src for point-to-point mode and to keep it non-zero */ memcpy(fnic->data_src_addr, fnic->ctlr.ctl_src_addr, ETH_ALEN); /* Get vNIC configuration */ err = fnic_get_vnic_config(fnic); if (err) { shost_printk(KERN_ERR, fnic->lport->host, "Get vNIC configuration failed, " "aborting.\n"); goto err_out_dev_close; } /* Configure Maximum Outstanding IO reqs*/ if (fnic->config.io_throttle_count != FNIC_UCSM_DFLT_THROTTLE_CNT_BLD) { host->can_queue = min_t(u32, FNIC_MAX_IO_REQ, max_t(u32, FNIC_MIN_IO_REQ, fnic->config.io_throttle_count)); } fnic->fnic_max_tag_id = host->can_queue; err = scsi_init_shared_tag_map(host, fnic->fnic_max_tag_id); if (err) { shost_printk(KERN_ERR, fnic->lport->host, "Unable to alloc shared tag map\n"); goto err_out_dev_close; } host->max_lun = fnic->config.luns_per_tgt; host->max_id = FNIC_MAX_FCP_TARGET; host->max_cmd_len = FCOE_MAX_CMD_LEN; fnic_get_res_counts(fnic); err = fnic_set_intr_mode(fnic); if (err) { shost_printk(KERN_ERR, fnic->lport->host, "Failed to set intr mode, " "aborting.\n"); goto err_out_dev_close; } err = fnic_alloc_vnic_resources(fnic); if (err) { shost_printk(KERN_ERR, fnic->lport->host, "Failed to alloc vNIC resources, " "aborting.\n"); goto err_out_clear_intr; } /* initialize all fnic locks */ spin_lock_init(&fnic->fnic_lock); for (i = 0; i < FNIC_WQ_MAX; i++) spin_lock_init(&fnic->wq_lock[i]); for (i = 0; i < FNIC_WQ_COPY_MAX; i++) { spin_lock_init(&fnic->wq_copy_lock[i]); fnic->wq_copy_desc_low[i] = DESC_CLEAN_LOW_WATERMARK; fnic->fw_ack_recd[i] = 0; fnic->fw_ack_index[i] = -1; } for (i = 0; i < FNIC_IO_LOCKS; i++) spin_lock_init(&fnic->io_req_lock[i]); fnic->io_req_pool = mempool_create_slab_pool(2, fnic_io_req_cache); if (!fnic->io_req_pool) goto err_out_free_resources; pool = mempool_create_slab_pool(2, fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]); if (!pool) goto err_out_free_ioreq_pool; fnic->io_sgl_pool[FNIC_SGL_CACHE_DFLT] = pool; pool = mempool_create_slab_pool(2, fnic_sgl_cache[FNIC_SGL_CACHE_MAX]); if (!pool) goto err_out_free_dflt_pool; fnic->io_sgl_pool[FNIC_SGL_CACHE_MAX] = pool; /* setup vlan config, hw inserts vlan header */ fnic->vlan_hw_insert = 1; fnic->vlan_id = 0; /* Initialize the FIP fcoe_ctrl struct */ fnic->ctlr.send = fnic_eth_send; fnic->ctlr.update_mac = fnic_update_mac; fnic->ctlr.get_src_addr = fnic_get_mac; if (fnic->config.flags & VFCF_FIP_CAPABLE) { shost_printk(KERN_INFO, fnic->lport->host, "firmware supports FIP\n"); /* enable directed and multicast */ vnic_dev_packet_filter(fnic->vdev, 1, 1, 0, 0, 0); vnic_dev_add_addr(fnic->vdev, FIP_ALL_ENODE_MACS); vnic_dev_add_addr(fnic->vdev, fnic->ctlr.ctl_src_addr); fnic->set_vlan = fnic_set_vlan; fcoe_ctlr_init(&fnic->ctlr, FIP_MODE_AUTO); setup_timer(&fnic->fip_timer, fnic_fip_notify_timer, (unsigned long)fnic); spin_lock_init(&fnic->vlans_lock); INIT_WORK(&fnic->fip_frame_work, fnic_handle_fip_frame); INIT_WORK(&fnic->event_work, fnic_handle_event); skb_queue_head_init(&fnic->fip_frame_queue); INIT_LIST_HEAD(&fnic->evlist); INIT_LIST_HEAD(&fnic->vlans); } else { shost_printk(KERN_INFO, fnic->lport->host, "firmware uses non-FIP mode\n"); fcoe_ctlr_init(&fnic->ctlr, FIP_MODE_NON_FIP); } fnic->state = FNIC_IN_FC_MODE; atomic_set(&fnic->in_flight, 0); fnic->state_flags = FNIC_FLAGS_NONE; /* Enable hardware stripping of vlan header on ingress */ fnic_set_nic_config(fnic, 0, 0, 0, 0, 0, 0, 1); /* Setup notification buffer area */ err = fnic_notify_set(fnic); if (err) { shost_printk(KERN_ERR, fnic->lport->host, "Failed to alloc notify buffer, aborting.\n"); goto err_out_free_max_pool; } /* Setup notify timer when using MSI interrupts */ if (vnic_dev_get_intr_mode(fnic->vdev) == VNIC_DEV_INTR_MODE_MSI) setup_timer(&fnic->notify_timer, fnic_notify_timer, (unsigned long)fnic); /* allocate RQ buffers and post them to RQ*/ for (i = 0; i < fnic->rq_count; i++) { err = vnic_rq_fill(&fnic->rq[i], fnic_alloc_rq_frame); if (err) { shost_printk(KERN_ERR, fnic->lport->host, "fnic_alloc_rq_frame can't alloc " "frame\n"); goto err_out_free_rq_buf; } } /* * Initialization done with PCI system, hardware, firmware. * Add host to SCSI */ err = scsi_add_host(lp->host, &pdev->dev); if (err) { shost_printk(KERN_ERR, fnic->lport->host, "fnic: scsi_add_host failed...exiting\n"); goto err_out_free_rq_buf; } /* Start local port initiatialization */ lp->link_up = 0; lp->max_retry_count = fnic->config.flogi_retries; lp->max_rport_retry_count = fnic->config.plogi_retries; lp->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS | FCP_SPPF_CONF_COMPL); if (fnic->config.flags & VFCF_FCP_SEQ_LVL_ERR) lp->service_params |= FCP_SPPF_RETRY; lp->boot_time = jiffies; lp->e_d_tov = fnic->config.ed_tov; lp->r_a_tov = fnic->config.ra_tov; lp->link_supported_speeds = FC_PORTSPEED_10GBIT; fc_set_wwnn(lp, fnic->config.node_wwn); fc_set_wwpn(lp, fnic->config.port_wwn); fcoe_libfc_config(lp, &fnic->ctlr, &fnic_transport_template, 0); if (!fc_exch_mgr_alloc(lp, FC_CLASS_3, FCPIO_HOST_EXCH_RANGE_START, FCPIO_HOST_EXCH_RANGE_END, NULL)) { err = -ENOMEM; goto err_out_remove_scsi_host; } fc_lport_init_stats(lp); fnic->stats_reset_time = jiffies; fc_lport_config(lp); if (fc_set_mfs(lp, fnic->config.maxdatafieldsize + sizeof(struct fc_frame_header))) { err = -EINVAL; goto err_out_free_exch_mgr; } fc_host_maxframe_size(lp->host) = lp->mfs; fc_host_dev_loss_tmo(lp->host) = fnic->config.port_down_timeout / 1000; sprintf(fc_host_symbolic_name(lp->host), DRV_NAME " v" DRV_VERSION " over %s", fnic->name); spin_lock_irqsave(&fnic_list_lock, flags); list_add_tail(&fnic->list, &fnic_list); spin_unlock_irqrestore(&fnic_list_lock, flags); INIT_WORK(&fnic->link_work, fnic_handle_link); INIT_WORK(&fnic->frame_work, fnic_handle_frame); skb_queue_head_init(&fnic->frame_queue); skb_queue_head_init(&fnic->tx_queue); /* Enable all queues */ for (i = 0; i < fnic->raw_wq_count; i++) vnic_wq_enable(&fnic->wq[i]); for (i = 0; i < fnic->rq_count; i++) vnic_rq_enable(&fnic->rq[i]); for (i = 0; i < fnic->wq_copy_count; i++) vnic_wq_copy_enable(&fnic->wq_copy[i]); fc_fabric_login(lp); vnic_dev_enable(fnic->vdev); err = fnic_request_intr(fnic); if (err) { shost_printk(KERN_ERR, fnic->lport->host, "Unable to request irq.\n"); goto err_out_free_exch_mgr; } for (i = 0; i < fnic->intr_count; i++) vnic_intr_unmask(&fnic->intr[i]); fnic_notify_timer_start(fnic); return 0; err_out_free_exch_mgr: fc_exch_mgr_free(lp); err_out_remove_scsi_host: fc_remove_host(lp->host); scsi_remove_host(lp->host); err_out_free_rq_buf: for (i = 0; i < fnic->rq_count; i++) vnic_rq_clean(&fnic->rq[i], fnic_free_rq_buf); vnic_dev_notify_unset(fnic->vdev); err_out_free_max_pool: mempool_destroy(fnic->io_sgl_pool[FNIC_SGL_CACHE_MAX]); err_out_free_dflt_pool: mempool_destroy(fnic->io_sgl_pool[FNIC_SGL_CACHE_DFLT]); err_out_free_ioreq_pool: mempool_destroy(fnic->io_req_pool); err_out_free_resources: fnic_free_vnic_resources(fnic); err_out_clear_intr: fnic_clear_intr_mode(fnic); err_out_dev_close: vnic_dev_close(fnic->vdev); err_out_vnic_unregister: vnic_dev_unregister(fnic->vdev); err_out_iounmap: fnic_iounmap(fnic); err_out_release_regions: pci_release_regions(pdev); err_out_disable_device: pci_disable_device(pdev); err_out_free_hba: fnic_stats_debugfs_remove(fnic); scsi_host_put(lp->host); err_out: return err; }
int enic_dev_init(struct enic *enic) { struct net_device *netdev = enic->netdev; int err; err = enic_get_vnic_config(enic); if (err) { printk(KERN_ERR PFX "Get vNIC configuration failed, aborting.\n"); return err; } enic_get_res_counts(enic); err = enic_set_intr_mode(enic); if (err) { printk(KERN_ERR PFX "Failed to set intr mode, aborting.\n"); return err; } err = enic_alloc_vnic_resources(enic); if (err) { printk(KERN_ERR PFX "Failed to alloc vNIC resources, aborting.\n"); goto err_out_free_vnic_resources; } enic_init_vnic_resources(enic); err = enic_set_rq_alloc_buf(enic); if (err) { printk(KERN_ERR PFX "Failed to set RQ buffer allocator, aborting.\n"); goto err_out_free_vnic_resources; } err = enic_set_niccfg(enic); if (err) { printk(KERN_ERR PFX "Failed to config nic, aborting.\n"); goto err_out_free_vnic_resources; } switch (vnic_dev_get_intr_mode(enic->vdev)) { default: netif_napi_add(netdev, &enic->napi, enic_poll, 64); break; case VNIC_DEV_INTR_MODE_MSIX: netif_napi_add(netdev, &enic->napi, enic_poll_msix, 64); break; } return 0; err_out_free_vnic_resources: enic_clear_intr_mode(enic); enic_free_vnic_resources(enic); return err; }
static int __devinit fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent) { struct Scsi_Host *host; struct fc_lport *lp; struct fnic *fnic; mempool_t *pool; int err; int i; unsigned long flags; /* * Allocate SCSI Host and set up association between host, * local port, and fnic */ host = scsi_host_alloc(&fnic_host_template, sizeof(struct fc_lport) + sizeof(struct fnic)); if (!host) { printk(KERN_ERR PFX "Unable to alloc SCSI host\n"); err = -ENOMEM; goto err_out; } lp = shost_priv(host); lp->host = host; fnic = lport_priv(lp); fnic->lport = lp; snprintf(fnic->name, sizeof(fnic->name) - 1, "%s%d", DRV_NAME, host->host_no); host->transportt = fnic_fc_transport; err = scsi_init_shared_tag_map(host, FNIC_MAX_IO_REQ); if (err) { shost_printk(KERN_ERR, fnic->lport->host, "Unable to alloc shared tag map\n"); goto err_out_free_hba; } /* Setup PCI resources */ pci_set_drvdata(pdev, fnic); fnic->pdev = pdev; err = pci_enable_device(pdev); if (err) { shost_printk(KERN_ERR, fnic->lport->host, "Cannot enable PCI device, aborting.\n"); goto err_out_free_hba; } err = pci_request_regions(pdev, DRV_NAME); if (err) { shost_printk(KERN_ERR, fnic->lport->host, "Cannot enable PCI resources, aborting\n"); goto err_out_disable_device; } pci_set_master(pdev); /* Query PCI controller on system for DMA addressing * limitation for the device. Try 40-bit first, and * fail to 32-bit. */ err = pci_set_dma_mask(pdev, DMA_BIT_MASK(40)); if (err) { err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32)); if (err) { shost_printk(KERN_ERR, fnic->lport->host, "No usable DMA configuration " "aborting\n"); goto err_out_release_regions; } err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32)); if (err) { shost_printk(KERN_ERR, fnic->lport->host, "Unable to obtain 32-bit DMA " "for consistent allocations, aborting.\n"); goto err_out_release_regions; } } else { err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40)); if (err) { shost_printk(KERN_ERR, fnic->lport->host, "Unable to obtain 40-bit DMA " "for consistent allocations, aborting.\n"); goto err_out_release_regions; } } /* Map vNIC resources from BAR0 */ if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) { shost_printk(KERN_ERR, fnic->lport->host, "BAR0 not memory-map'able, aborting.\n"); err = -ENODEV; goto err_out_release_regions; } fnic->bar0.vaddr = pci_iomap(pdev, 0, 0); fnic->bar0.bus_addr = pci_resource_start(pdev, 0); fnic->bar0.len = pci_resource_len(pdev, 0); if (!fnic->bar0.vaddr) { shost_printk(KERN_ERR, fnic->lport->host, "Cannot memory-map BAR0 res hdr, " "aborting.\n"); err = -ENODEV; goto err_out_release_regions; } fnic->vdev = vnic_dev_register(NULL, fnic, pdev, &fnic->bar0); if (!fnic->vdev) { shost_printk(KERN_ERR, fnic->lport->host, "vNIC registration failed, " "aborting.\n"); err = -ENODEV; goto err_out_iounmap; } err = fnic_dev_wait(fnic->vdev, vnic_dev_open, vnic_dev_open_done, 0); if (err) { shost_printk(KERN_ERR, fnic->lport->host, "vNIC dev open failed, aborting.\n"); goto err_out_vnic_unregister; } err = vnic_dev_init(fnic->vdev, 0); if (err) { shost_printk(KERN_ERR, fnic->lport->host, "vNIC dev init failed, aborting.\n"); goto err_out_dev_close; } err = vnic_dev_mac_addr(fnic->vdev, fnic->mac_addr); if (err) { shost_printk(KERN_ERR, fnic->lport->host, "vNIC get MAC addr failed \n"); goto err_out_dev_close; } /* Get vNIC configuration */ err = fnic_get_vnic_config(fnic); if (err) { shost_printk(KERN_ERR, fnic->lport->host, "Get vNIC configuration failed, " "aborting.\n"); goto err_out_dev_close; } host->max_lun = fnic->config.luns_per_tgt; host->max_id = FNIC_MAX_FCP_TARGET; fnic_get_res_counts(fnic); err = fnic_set_intr_mode(fnic); if (err) { shost_printk(KERN_ERR, fnic->lport->host, "Failed to set intr mode, " "aborting.\n"); goto err_out_dev_close; } err = fnic_request_intr(fnic); if (err) { shost_printk(KERN_ERR, fnic->lport->host, "Unable to request irq.\n"); goto err_out_clear_intr; } err = fnic_alloc_vnic_resources(fnic); if (err) { shost_printk(KERN_ERR, fnic->lport->host, "Failed to alloc vNIC resources, " "aborting.\n"); goto err_out_free_intr; } /* initialize all fnic locks */ spin_lock_init(&fnic->fnic_lock); for (i = 0; i < FNIC_WQ_MAX; i++) spin_lock_init(&fnic->wq_lock[i]); for (i = 0; i < FNIC_WQ_COPY_MAX; i++) { spin_lock_init(&fnic->wq_copy_lock[i]); fnic->wq_copy_desc_low[i] = DESC_CLEAN_LOW_WATERMARK; fnic->fw_ack_recd[i] = 0; fnic->fw_ack_index[i] = -1; } for (i = 0; i < FNIC_IO_LOCKS; i++) spin_lock_init(&fnic->io_req_lock[i]); fnic->io_req_pool = mempool_create_slab_pool(2, fnic_io_req_cache); if (!fnic->io_req_pool) goto err_out_free_resources; pool = mempool_create(2, fnic_alloc_slab_dma, mempool_free_slab, fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]); if (!pool) goto err_out_free_ioreq_pool; fnic->io_sgl_pool[FNIC_SGL_CACHE_DFLT] = pool; pool = mempool_create(2, fnic_alloc_slab_dma, mempool_free_slab, fnic_sgl_cache[FNIC_SGL_CACHE_MAX]); if (!pool) goto err_out_free_dflt_pool; fnic->io_sgl_pool[FNIC_SGL_CACHE_MAX] = pool; /* setup vlan config, hw inserts vlan header */ fnic->vlan_hw_insert = 1; fnic->vlan_id = 0; fnic->flogi_oxid = FC_XID_UNKNOWN; fnic->flogi = NULL; fnic->flogi_resp = NULL; fnic->state = FNIC_IN_FC_MODE; /* Enable hardware stripping of vlan header on ingress */ fnic_set_nic_config(fnic, 0, 0, 0, 0, 0, 0, 1); /* Setup notification buffer area */ err = fnic_notify_set(fnic); if (err) { shost_printk(KERN_ERR, fnic->lport->host, "Failed to alloc notify buffer, aborting.\n"); goto err_out_free_max_pool; } /* Setup notify timer when using MSI interrupts */ if (vnic_dev_get_intr_mode(fnic->vdev) == VNIC_DEV_INTR_MODE_MSI) setup_timer(&fnic->notify_timer, fnic_notify_timer, (unsigned long)fnic); /* allocate RQ buffers and post them to RQ*/ for (i = 0; i < fnic->rq_count; i++) { err = vnic_rq_fill(&fnic->rq[i], fnic_alloc_rq_frame); if (err) { shost_printk(KERN_ERR, fnic->lport->host, "fnic_alloc_rq_frame can't alloc " "frame\n"); goto err_out_free_rq_buf; } } /* * Initialization done with PCI system, hardware, firmware. * Add host to SCSI */ err = scsi_add_host(lp->host, &pdev->dev); if (err) { shost_printk(KERN_ERR, fnic->lport->host, "fnic: scsi_add_host failed...exiting\n"); goto err_out_free_rq_buf; } /* Start local port initiatialization */ lp->link_up = 0; lp->tt = fnic_transport_template; lp->emp = fc_exch_mgr_alloc(lp, FC_CLASS_3, FCPIO_HOST_EXCH_RANGE_START, FCPIO_HOST_EXCH_RANGE_END); if (!lp->emp) { err = -ENOMEM; goto err_out_remove_scsi_host; } lp->max_retry_count = fnic->config.flogi_retries; lp->max_rport_retry_count = fnic->config.plogi_retries; lp->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS | FCP_SPPF_CONF_COMPL); if (fnic->config.flags & VFCF_FCP_SEQ_LVL_ERR) lp->service_params |= FCP_SPPF_RETRY; lp->boot_time = jiffies; lp->e_d_tov = fnic->config.ed_tov; lp->r_a_tov = fnic->config.ra_tov; lp->link_supported_speeds = FC_PORTSPEED_10GBIT; fc_set_wwnn(lp, fnic->config.node_wwn); fc_set_wwpn(lp, fnic->config.port_wwn); fc_exch_init(lp); fc_lport_init(lp); fc_elsct_init(lp); fc_rport_init(lp); fc_disc_init(lp); fc_lport_config(lp); if (fc_set_mfs(lp, fnic->config.maxdatafieldsize + sizeof(struct fc_frame_header))) { err = -EINVAL; goto err_out_free_exch_mgr; } fc_host_maxframe_size(lp->host) = lp->mfs; sprintf(fc_host_symbolic_name(lp->host), DRV_NAME " v" DRV_VERSION " over %s", fnic->name); spin_lock_irqsave(&fnic_list_lock, flags); list_add_tail(&fnic->list, &fnic_list); spin_unlock_irqrestore(&fnic_list_lock, flags); INIT_WORK(&fnic->link_work, fnic_handle_link); INIT_WORK(&fnic->frame_work, fnic_handle_frame); skb_queue_head_init(&fnic->frame_queue); /* Enable all queues */ for (i = 0; i < fnic->raw_wq_count; i++) vnic_wq_enable(&fnic->wq[i]); for (i = 0; i < fnic->rq_count; i++) vnic_rq_enable(&fnic->rq[i]); for (i = 0; i < fnic->wq_copy_count; i++) vnic_wq_copy_enable(&fnic->wq_copy[i]); fc_fabric_login(lp); vnic_dev_enable(fnic->vdev); for (i = 0; i < fnic->intr_count; i++) vnic_intr_unmask(&fnic->intr[i]); fnic_notify_timer_start(fnic); return 0; err_out_free_exch_mgr: fc_exch_mgr_free(lp->emp); err_out_remove_scsi_host: fc_remove_host(fnic->lport->host); scsi_remove_host(fnic->lport->host); err_out_free_rq_buf: for (i = 0; i < fnic->rq_count; i++) vnic_rq_clean(&fnic->rq[i], fnic_free_rq_buf); vnic_dev_notify_unset(fnic->vdev); err_out_free_max_pool: mempool_destroy(fnic->io_sgl_pool[FNIC_SGL_CACHE_MAX]); err_out_free_dflt_pool: mempool_destroy(fnic->io_sgl_pool[FNIC_SGL_CACHE_DFLT]); err_out_free_ioreq_pool: mempool_destroy(fnic->io_req_pool); err_out_free_resources: fnic_free_vnic_resources(fnic); err_out_free_intr: fnic_free_intr(fnic); err_out_clear_intr: fnic_clear_intr_mode(fnic); err_out_dev_close: vnic_dev_close(fnic->vdev); err_out_vnic_unregister: vnic_dev_unregister(fnic->vdev); err_out_iounmap: fnic_iounmap(fnic); err_out_release_regions: pci_release_regions(pdev); err_out_disable_device: pci_disable_device(pdev); err_out_free_hba: scsi_host_put(lp->host); err_out: return err; }
static int enic_request_intr(struct enic *enic) { struct net_device *netdev = enic->netdev; unsigned int i; int err = 0; switch (vnic_dev_get_intr_mode(enic->vdev)) { case VNIC_DEV_INTR_MODE_INTX: err = request_irq(enic->pdev->irq, enic_isr_legacy, IRQF_SHARED, netdev->name, netdev); break; case VNIC_DEV_INTR_MODE_MSI: err = request_irq(enic->pdev->irq, enic_isr_msi, 0, netdev->name, enic); break; case VNIC_DEV_INTR_MODE_MSIX: sprintf(enic->msix[ENIC_MSIX_RQ].devname, "%.11s-rx-0", netdev->name); enic->msix[ENIC_MSIX_RQ].isr = enic_isr_msix_rq; enic->msix[ENIC_MSIX_RQ].devid = enic; sprintf(enic->msix[ENIC_MSIX_WQ].devname, "%.11s-tx-0", netdev->name); enic->msix[ENIC_MSIX_WQ].isr = enic_isr_msix_wq; enic->msix[ENIC_MSIX_WQ].devid = enic; sprintf(enic->msix[ENIC_MSIX_ERR].devname, "%.11s-err", netdev->name); enic->msix[ENIC_MSIX_ERR].isr = enic_isr_msix_err; enic->msix[ENIC_MSIX_ERR].devid = enic; sprintf(enic->msix[ENIC_MSIX_NOTIFY].devname, "%.11s-notify", netdev->name); enic->msix[ENIC_MSIX_NOTIFY].isr = enic_isr_msix_notify; enic->msix[ENIC_MSIX_NOTIFY].devid = enic; for (i = 0; i < ARRAY_SIZE(enic->msix); i++) { err = request_irq(enic->msix_entry[i].vector, enic->msix[i].isr, 0, enic->msix[i].devname, enic->msix[i].devid); if (err) { enic_free_intr(enic); break; } enic->msix[i].requested = 1; } break; default: break; } return err; }
static int enic_set_coalesce(struct net_device *netdev, struct ethtool_coalesce *ecmd) { struct enic *enic = netdev_priv(netdev); u32 tx_coalesce_usecs; u32 rx_coalesce_usecs; u32 rx_coalesce_usecs_low; u32 rx_coalesce_usecs_high; u32 coalesce_usecs_max; unsigned int i, intr; struct enic_rx_coal *rxcoal = &enic->rx_coalesce_setting; coalesce_usecs_max = vnic_dev_get_intr_coal_timer_max(enic->vdev); tx_coalesce_usecs = min_t(u32, ecmd->tx_coalesce_usecs, coalesce_usecs_max); rx_coalesce_usecs = min_t(u32, ecmd->rx_coalesce_usecs, coalesce_usecs_max); rx_coalesce_usecs_low = min_t(u32, ecmd->rx_coalesce_usecs_low, coalesce_usecs_max); rx_coalesce_usecs_high = min_t(u32, ecmd->rx_coalesce_usecs_high, coalesce_usecs_max); switch (vnic_dev_get_intr_mode(enic->vdev)) { case VNIC_DEV_INTR_MODE_INTX: if (tx_coalesce_usecs != rx_coalesce_usecs) return -EINVAL; if (ecmd->use_adaptive_rx_coalesce || ecmd->rx_coalesce_usecs_low || ecmd->rx_coalesce_usecs_high) return -EINVAL; intr = enic_legacy_io_intr(); vnic_intr_coalescing_timer_set(&enic->intr[intr], tx_coalesce_usecs); break; case VNIC_DEV_INTR_MODE_MSI: if (tx_coalesce_usecs != rx_coalesce_usecs) return -EINVAL; if (ecmd->use_adaptive_rx_coalesce || ecmd->rx_coalesce_usecs_low || ecmd->rx_coalesce_usecs_high) return -EINVAL; vnic_intr_coalescing_timer_set(&enic->intr[0], tx_coalesce_usecs); break; case VNIC_DEV_INTR_MODE_MSIX: if (ecmd->rx_coalesce_usecs_high && (rx_coalesce_usecs_high < rx_coalesce_usecs_low + ENIC_AIC_LARGE_PKT_DIFF)) return -EINVAL; for (i = 0; i < enic->wq_count; i++) { intr = enic_msix_wq_intr(enic, i); vnic_intr_coalescing_timer_set(&enic->intr[intr], tx_coalesce_usecs); } rxcoal->use_adaptive_rx_coalesce = !!ecmd->use_adaptive_rx_coalesce; if (!rxcoal->use_adaptive_rx_coalesce) enic_intr_coal_set_rx(enic, rx_coalesce_usecs); if (ecmd->rx_coalesce_usecs_high) { rxcoal->range_end = rx_coalesce_usecs_high; rxcoal->small_pkt_range_start = rx_coalesce_usecs_low; rxcoal->large_pkt_range_start = rx_coalesce_usecs_low + ENIC_AIC_LARGE_PKT_DIFF; } break; default: break; } enic->tx_coalesce_usecs = tx_coalesce_usecs; enic->rx_coalesce_usecs = rx_coalesce_usecs; return 0; }