Esempio n. 1
0
static irqreturn_t enic_isr_legacy(int irq, void *data)
{
	struct net_device *netdev = data;
	struct enic *enic = netdev_priv(netdev);
	u32 pba;

	vnic_intr_mask(&enic->intr[ENIC_INTX_WQ_RQ]);

	pba = vnic_intr_legacy_pba(enic->legacy_pba);
	if (!pba) {
		vnic_intr_unmask(&enic->intr[ENIC_INTX_WQ_RQ]);
		return IRQ_NONE;	/* not our interrupt */
	}

	if (ENIC_TEST_INTR(pba, ENIC_INTX_NOTIFY))
		enic_notify_check(enic);

	if (ENIC_TEST_INTR(pba, ENIC_INTX_ERR)) {
		enic_log_q_error(enic);
		/* schedule recovery from WQ/RQ error */
		schedule_work(&enic->reset);
		return IRQ_HANDLED;
	}

	if (ENIC_TEST_INTR(pba, ENIC_INTX_WQ_RQ)) {
		if (netif_rx_schedule_prep(&enic->napi))
			__netif_rx_schedule(&enic->napi);
	} else {
		vnic_intr_unmask(&enic->intr[ENIC_INTX_WQ_RQ]);
	}

	return IRQ_HANDLED;
}
Esempio n. 2
0
static irqreturn_t enic_isr_msix_notify(int irq, void *data)
{
	struct enic *enic = data;

	enic_notify_check(enic);
	vnic_intr_unmask(&enic->intr[ENIC_MSIX_NOTIFY]);

	return IRQ_HANDLED;
}
Esempio n. 3
0
/* rtnl lock is held, process context */
static int enic_open(struct net_device *netdev)
{
	struct enic *enic = netdev_priv(netdev);
	unsigned int i;
	int err;

	err = enic_request_intr(enic);
	if (err) {
		printk(KERN_ERR PFX "%s: Unable to request irq.\n",
			netdev->name);
		return err;
	}

	err = enic_notify_set(enic);
	if (err) {
		printk(KERN_ERR PFX
			"%s: Failed to alloc notify buffer, aborting.\n",
			netdev->name);
		goto err_out_free_intr;
	}

	for (i = 0; i < enic->rq_count; i++) {
		err = vnic_rq_fill(&enic->rq[i], enic_rq_alloc_buf);
		if (err) {
			printk(KERN_ERR PFX
				"%s: Unable to alloc receive buffers.\n",
				netdev->name);
			goto err_out_notify_unset;
		}
	}

	for (i = 0; i < enic->wq_count; i++)
		vnic_wq_enable(&enic->wq[i]);
	for (i = 0; i < enic->rq_count; i++)
		vnic_rq_enable(&enic->rq[i]);

	enic_add_station_addr(enic);
	enic_set_multicast_list(netdev);

	netif_wake_queue(netdev);
	napi_enable(&enic->napi);
	vnic_dev_enable(enic->vdev);

	for (i = 0; i < enic->intr_count; i++)
		vnic_intr_unmask(&enic->intr[i]);

	enic_notify_timer_start(enic);

	return 0;

err_out_notify_unset:
	vnic_dev_notify_unset(enic->vdev);
err_out_free_intr:
	enic_free_intr(enic);

	return err;
}
Esempio n. 4
0
static int enic_poll(struct napi_struct *napi, int budget)
{
	struct enic *enic = container_of(napi, struct enic, napi);
	struct net_device *netdev = enic->netdev;
	unsigned int rq_work_to_do = budget;
	unsigned int wq_work_to_do = -1; /* no limit */
	unsigned int  work_done, rq_work_done, wq_work_done;

	/* Service RQ (first) and WQ
	 */

	rq_work_done = vnic_cq_service(&enic->cq[ENIC_CQ_RQ],
		rq_work_to_do, enic_rq_service, NULL);

	wq_work_done = vnic_cq_service(&enic->cq[ENIC_CQ_WQ],
		wq_work_to_do, enic_wq_service, NULL);

	/* Accumulate intr event credits for this polling
	 * cycle.  An intr event is the completion of a
	 * a WQ or RQ packet.
	 */

	work_done = rq_work_done + wq_work_done;

	if (work_done > 0)
		vnic_intr_return_credits(&enic->intr[ENIC_INTX_WQ_RQ],
			work_done,
			0 /* don't unmask intr */,
			0 /* don't reset intr timer */);

	if (rq_work_done > 0) {

		/* Replenish RQ
		 */

		vnic_rq_fill(&enic->rq[0], enic_rq_alloc_buf);

	} else {

		/* If no work done, flush all LROs and exit polling
		 */

		if (netdev->features & NETIF_F_LRO)
			lro_flush_all(&enic->lro_mgr);

		netif_rx_complete(napi);
		vnic_intr_unmask(&enic->intr[ENIC_MSIX_RQ]);
	}

	return rq_work_done;
}
Esempio n. 5
0
static irqreturn_t enic_isr_legacy(int irq, void *data)
{
	struct net_device *netdev = data;
	struct enic *enic = netdev_priv(netdev);
	u32 pba;

	vnic_intr_mask(&enic->intr[ENIC_INTX_WQ_RQ]);

	pba = vnic_intr_legacy_pba(enic->legacy_pba);
	if (!pba) {
		vnic_intr_unmask(&enic->intr[ENIC_INTX_WQ_RQ]);
		return IRQ_NONE;	
	}

	if (ENIC_TEST_INTR(pba, ENIC_INTX_NOTIFY)) {
		vnic_intr_return_all_credits(&enic->intr[ENIC_INTX_NOTIFY]);
		enic_notify_check(enic);
	}

	if (ENIC_TEST_INTR(pba, ENIC_INTX_ERR)) {
		vnic_intr_return_all_credits(&enic->intr[ENIC_INTX_ERR]);
		enic_log_q_error(enic);
		
		schedule_work(&enic->reset);
		return IRQ_HANDLED;
	}

	if (ENIC_TEST_INTR(pba, ENIC_INTX_WQ_RQ)) {
		if (napi_schedule_prep(&enic->napi))
			__napi_schedule(&enic->napi);
	} else {
		vnic_intr_unmask(&enic->intr[ENIC_INTX_WQ_RQ]);
	}

	return IRQ_HANDLED;
}
Esempio n. 6
0
static int enic_poll(struct napi_struct *napi, int budget)
{
	struct enic *enic = container_of(napi, struct enic, napi);
	struct net_device *netdev = enic->netdev;
	unsigned int rq_work_to_do = budget;
	unsigned int wq_work_to_do = -1; 
	unsigned int  work_done, rq_work_done, wq_work_done;

	

	rq_work_done = vnic_cq_service(&enic->cq[ENIC_CQ_RQ],
		rq_work_to_do, enic_rq_service, NULL);

	wq_work_done = vnic_cq_service(&enic->cq[ENIC_CQ_WQ],
		wq_work_to_do, enic_wq_service, NULL);

	

	work_done = rq_work_done + wq_work_done;

	if (work_done > 0)
		vnic_intr_return_credits(&enic->intr[ENIC_INTX_WQ_RQ],
			work_done,
			0 ,
			0 );

	if (rq_work_done > 0) {

		

		vnic_rq_fill(&enic->rq[0], enic->rq_alloc_buf);

	} else {

		

		if (netdev->features & NETIF_F_LRO)
			lro_flush_all(&enic->lro_mgr);

		napi_complete(napi);
		vnic_intr_unmask(&enic->intr[ENIC_INTX_WQ_RQ]);
	}

	return rq_work_done;
}
Esempio n. 7
0
int enic_enable(struct enic *enic)
{
	unsigned int index;
	struct rte_eth_dev *eth_dev = enic->rte_dev;

	eth_dev->data->dev_link.link_speed = vnic_dev_port_speed(enic->vdev);
	eth_dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
	vnic_dev_notify_set(enic->vdev, -1); /* No Intr for notify */

	if (enic_clsf_init(enic))
		dev_warning(enic, "Init of hash table for clsf failed."\
			"Flow director feature will not work\n");

	/* Fill RQ bufs */
	for (index = 0; index < enic->rq_count; index++) {
		vnic_rq_fill(&enic->rq[index], enic_rq_alloc_buf);

		/* Need at least one buffer on ring to get going
		*/
		if (vnic_rq_desc_used(&enic->rq[index]) == 0) {
			dev_err(enic, "Unable to alloc receive buffers\n");
			return -1;
		}
	}

	for (index = 0; index < enic->wq_count; index++)
		vnic_wq_enable(&enic->wq[index]);
	for (index = 0; index < enic->rq_count; index++)
		vnic_rq_enable(&enic->rq[index]);

	vnic_dev_enable_wait(enic->vdev);

	/* Register and enable error interrupt */
	rte_intr_callback_register(&(enic->pdev->intr_handle),
		enic_intr_handler, (void *)enic->rte_dev);

	rte_intr_enable(&(enic->pdev->intr_handle));
	vnic_intr_unmask(&enic->intr);

	return 0;
}
Esempio n. 8
0
int enic_enable(struct enic *enic)
{
	unsigned int index;
	int err;
	struct rte_eth_dev *eth_dev = enic->rte_dev;

	eth_dev->data->dev_link.link_speed = vnic_dev_port_speed(enic->vdev);
	eth_dev->data->dev_link.link_duplex = ETH_LINK_FULL_DUPLEX;
	vnic_dev_notify_set(enic->vdev, -1); /* No Intr for notify */

	if (enic_clsf_init(enic))
		dev_warning(enic, "Init of hash table for clsf failed."\
			"Flow director feature will not work\n");

	for (index = 0; index < enic->rq_count; index++) {
		err = enic_alloc_rx_queue_mbufs(enic, &enic->rq[index]);
		if (err) {
			dev_err(enic, "Failed to alloc RX queue mbufs\n");
			return err;
		}
	}

	for (index = 0; index < enic->wq_count; index++)
		vnic_wq_enable(&enic->wq[index]);
	for (index = 0; index < enic->rq_count; index++)
		vnic_rq_enable(&enic->rq[index]);

	vnic_dev_enable_wait(enic->vdev);

	/* Register and enable error interrupt */
	rte_intr_callback_register(&(enic->pdev->intr_handle),
		enic_intr_handler, (void *)enic->rte_dev);

	rte_intr_enable(&(enic->pdev->intr_handle));
	vnic_intr_unmask(&enic->intr);

	return 0;
}
Esempio n. 9
0
static int fnic_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
	struct Scsi_Host *host;
	struct fc_lport *lp;
	struct fnic *fnic;
	mempool_t *pool;
	int err;
	int i;
	unsigned long flags;

	/*
	 * Allocate SCSI Host and set up association between host,
	 * local port, and fnic
	 */
	lp = libfc_host_alloc(&fnic_host_template, sizeof(struct fnic));
	if (!lp) {
		printk(KERN_ERR PFX "Unable to alloc libfc local port\n");
		err = -ENOMEM;
		goto err_out;
	}
	host = lp->host;
	fnic = lport_priv(lp);
	fnic->lport = lp;
	fnic->ctlr.lp = lp;

	snprintf(fnic->name, sizeof(fnic->name) - 1, "%s%d", DRV_NAME,
		 host->host_no);

	host->transportt = fnic_fc_transport;

	err = fnic_stats_debugfs_init(fnic);
	if (err) {
		shost_printk(KERN_ERR, fnic->lport->host,
				"Failed to initialize debugfs for stats\n");
		fnic_stats_debugfs_remove(fnic);
	}

	/* Setup PCI resources */
	pci_set_drvdata(pdev, fnic);

	fnic->pdev = pdev;

	err = pci_enable_device(pdev);
	if (err) {
		shost_printk(KERN_ERR, fnic->lport->host,
			     "Cannot enable PCI device, aborting.\n");
		goto err_out_free_hba;
	}

	err = pci_request_regions(pdev, DRV_NAME);
	if (err) {
		shost_printk(KERN_ERR, fnic->lport->host,
			     "Cannot enable PCI resources, aborting\n");
		goto err_out_disable_device;
	}

	pci_set_master(pdev);

	/* Query PCI controller on system for DMA addressing
	 * limitation for the device.  Try 64-bit first, and
	 * fail to 32-bit.
	 */
	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(64));
	if (err) {
		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
		if (err) {
			shost_printk(KERN_ERR, fnic->lport->host,
				     "No usable DMA configuration "
				     "aborting\n");
			goto err_out_release_regions;
		}
		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
		if (err) {
			shost_printk(KERN_ERR, fnic->lport->host,
				     "Unable to obtain 32-bit DMA "
				     "for consistent allocations, aborting.\n");
			goto err_out_release_regions;
		}
	} else {
		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64));
		if (err) {
			shost_printk(KERN_ERR, fnic->lport->host,
				     "Unable to obtain 64-bit DMA "
				     "for consistent allocations, aborting.\n");
			goto err_out_release_regions;
		}
	}

	/* Map vNIC resources from BAR0 */
	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
		shost_printk(KERN_ERR, fnic->lport->host,
			     "BAR0 not memory-map'able, aborting.\n");
		err = -ENODEV;
		goto err_out_release_regions;
	}

	fnic->bar0.vaddr = pci_iomap(pdev, 0, 0);
	fnic->bar0.bus_addr = pci_resource_start(pdev, 0);
	fnic->bar0.len = pci_resource_len(pdev, 0);

	if (!fnic->bar0.vaddr) {
		shost_printk(KERN_ERR, fnic->lport->host,
			     "Cannot memory-map BAR0 res hdr, "
			     "aborting.\n");
		err = -ENODEV;
		goto err_out_release_regions;
	}

	fnic->vdev = vnic_dev_register(NULL, fnic, pdev, &fnic->bar0);
	if (!fnic->vdev) {
		shost_printk(KERN_ERR, fnic->lport->host,
			     "vNIC registration failed, "
			     "aborting.\n");
		err = -ENODEV;
		goto err_out_iounmap;
	}

	err = fnic_dev_wait(fnic->vdev, vnic_dev_open,
			    vnic_dev_open_done, 0);
	if (err) {
		shost_printk(KERN_ERR, fnic->lport->host,
			     "vNIC dev open failed, aborting.\n");
		goto err_out_vnic_unregister;
	}

	err = vnic_dev_init(fnic->vdev, 0);
	if (err) {
		shost_printk(KERN_ERR, fnic->lport->host,
			     "vNIC dev init failed, aborting.\n");
		goto err_out_dev_close;
	}

	err = vnic_dev_mac_addr(fnic->vdev, fnic->ctlr.ctl_src_addr);
	if (err) {
		shost_printk(KERN_ERR, fnic->lport->host,
			     "vNIC get MAC addr failed \n");
		goto err_out_dev_close;
	}
	/* set data_src for point-to-point mode and to keep it non-zero */
	memcpy(fnic->data_src_addr, fnic->ctlr.ctl_src_addr, ETH_ALEN);

	/* Get vNIC configuration */
	err = fnic_get_vnic_config(fnic);
	if (err) {
		shost_printk(KERN_ERR, fnic->lport->host,
			     "Get vNIC configuration failed, "
			     "aborting.\n");
		goto err_out_dev_close;
	}

	/* Configure Maximum Outstanding IO reqs*/
	if (fnic->config.io_throttle_count != FNIC_UCSM_DFLT_THROTTLE_CNT_BLD) {
		host->can_queue = min_t(u32, FNIC_MAX_IO_REQ,
					max_t(u32, FNIC_MIN_IO_REQ,
					fnic->config.io_throttle_count));
	}
	fnic->fnic_max_tag_id = host->can_queue;

	err = scsi_init_shared_tag_map(host, fnic->fnic_max_tag_id);
	if (err) {
		shost_printk(KERN_ERR, fnic->lport->host,
			  "Unable to alloc shared tag map\n");
		goto err_out_dev_close;
	}

	host->max_lun = fnic->config.luns_per_tgt;
	host->max_id = FNIC_MAX_FCP_TARGET;
	host->max_cmd_len = FCOE_MAX_CMD_LEN;

	fnic_get_res_counts(fnic);

	err = fnic_set_intr_mode(fnic);
	if (err) {
		shost_printk(KERN_ERR, fnic->lport->host,
			     "Failed to set intr mode, "
			     "aborting.\n");
		goto err_out_dev_close;
	}

	err = fnic_alloc_vnic_resources(fnic);
	if (err) {
		shost_printk(KERN_ERR, fnic->lport->host,
			     "Failed to alloc vNIC resources, "
			     "aborting.\n");
		goto err_out_clear_intr;
	}


	/* initialize all fnic locks */
	spin_lock_init(&fnic->fnic_lock);

	for (i = 0; i < FNIC_WQ_MAX; i++)
		spin_lock_init(&fnic->wq_lock[i]);

	for (i = 0; i < FNIC_WQ_COPY_MAX; i++) {
		spin_lock_init(&fnic->wq_copy_lock[i]);
		fnic->wq_copy_desc_low[i] = DESC_CLEAN_LOW_WATERMARK;
		fnic->fw_ack_recd[i] = 0;
		fnic->fw_ack_index[i] = -1;
	}

	for (i = 0; i < FNIC_IO_LOCKS; i++)
		spin_lock_init(&fnic->io_req_lock[i]);

	fnic->io_req_pool = mempool_create_slab_pool(2, fnic_io_req_cache);
	if (!fnic->io_req_pool)
		goto err_out_free_resources;

	pool = mempool_create_slab_pool(2, fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]);
	if (!pool)
		goto err_out_free_ioreq_pool;
	fnic->io_sgl_pool[FNIC_SGL_CACHE_DFLT] = pool;

	pool = mempool_create_slab_pool(2, fnic_sgl_cache[FNIC_SGL_CACHE_MAX]);
	if (!pool)
		goto err_out_free_dflt_pool;
	fnic->io_sgl_pool[FNIC_SGL_CACHE_MAX] = pool;

	/* setup vlan config, hw inserts vlan header */
	fnic->vlan_hw_insert = 1;
	fnic->vlan_id = 0;

	/* Initialize the FIP fcoe_ctrl struct */
	fnic->ctlr.send = fnic_eth_send;
	fnic->ctlr.update_mac = fnic_update_mac;
	fnic->ctlr.get_src_addr = fnic_get_mac;
	if (fnic->config.flags & VFCF_FIP_CAPABLE) {
		shost_printk(KERN_INFO, fnic->lport->host,
			     "firmware supports FIP\n");
		/* enable directed and multicast */
		vnic_dev_packet_filter(fnic->vdev, 1, 1, 0, 0, 0);
		vnic_dev_add_addr(fnic->vdev, FIP_ALL_ENODE_MACS);
		vnic_dev_add_addr(fnic->vdev, fnic->ctlr.ctl_src_addr);
		fnic->set_vlan = fnic_set_vlan;
		fcoe_ctlr_init(&fnic->ctlr, FIP_MODE_AUTO);
		setup_timer(&fnic->fip_timer, fnic_fip_notify_timer,
							(unsigned long)fnic);
		spin_lock_init(&fnic->vlans_lock);
		INIT_WORK(&fnic->fip_frame_work, fnic_handle_fip_frame);
		INIT_WORK(&fnic->event_work, fnic_handle_event);
		skb_queue_head_init(&fnic->fip_frame_queue);
		INIT_LIST_HEAD(&fnic->evlist);
		INIT_LIST_HEAD(&fnic->vlans);
	} else {
		shost_printk(KERN_INFO, fnic->lport->host,
			     "firmware uses non-FIP mode\n");
		fcoe_ctlr_init(&fnic->ctlr, FIP_MODE_NON_FIP);
	}
	fnic->state = FNIC_IN_FC_MODE;

	atomic_set(&fnic->in_flight, 0);
	fnic->state_flags = FNIC_FLAGS_NONE;

	/* Enable hardware stripping of vlan header on ingress */
	fnic_set_nic_config(fnic, 0, 0, 0, 0, 0, 0, 1);

	/* Setup notification buffer area */
	err = fnic_notify_set(fnic);
	if (err) {
		shost_printk(KERN_ERR, fnic->lport->host,
			     "Failed to alloc notify buffer, aborting.\n");
		goto err_out_free_max_pool;
	}

	/* Setup notify timer when using MSI interrupts */
	if (vnic_dev_get_intr_mode(fnic->vdev) == VNIC_DEV_INTR_MODE_MSI)
		setup_timer(&fnic->notify_timer,
			    fnic_notify_timer, (unsigned long)fnic);

	/* allocate RQ buffers and post them to RQ*/
	for (i = 0; i < fnic->rq_count; i++) {
		err = vnic_rq_fill(&fnic->rq[i], fnic_alloc_rq_frame);
		if (err) {
			shost_printk(KERN_ERR, fnic->lport->host,
				     "fnic_alloc_rq_frame can't alloc "
				     "frame\n");
			goto err_out_free_rq_buf;
		}
	}

	/*
	 * Initialization done with PCI system, hardware, firmware.
	 * Add host to SCSI
	 */
	err = scsi_add_host(lp->host, &pdev->dev);
	if (err) {
		shost_printk(KERN_ERR, fnic->lport->host,
			     "fnic: scsi_add_host failed...exiting\n");
		goto err_out_free_rq_buf;
	}

	/* Start local port initiatialization */

	lp->link_up = 0;

	lp->max_retry_count = fnic->config.flogi_retries;
	lp->max_rport_retry_count = fnic->config.plogi_retries;
	lp->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS |
			      FCP_SPPF_CONF_COMPL);
	if (fnic->config.flags & VFCF_FCP_SEQ_LVL_ERR)
		lp->service_params |= FCP_SPPF_RETRY;

	lp->boot_time = jiffies;
	lp->e_d_tov = fnic->config.ed_tov;
	lp->r_a_tov = fnic->config.ra_tov;
	lp->link_supported_speeds = FC_PORTSPEED_10GBIT;
	fc_set_wwnn(lp, fnic->config.node_wwn);
	fc_set_wwpn(lp, fnic->config.port_wwn);

	fcoe_libfc_config(lp, &fnic->ctlr, &fnic_transport_template, 0);

	if (!fc_exch_mgr_alloc(lp, FC_CLASS_3, FCPIO_HOST_EXCH_RANGE_START,
			       FCPIO_HOST_EXCH_RANGE_END, NULL)) {
		err = -ENOMEM;
		goto err_out_remove_scsi_host;
	}

	fc_lport_init_stats(lp);
	fnic->stats_reset_time = jiffies;

	fc_lport_config(lp);

	if (fc_set_mfs(lp, fnic->config.maxdatafieldsize +
		       sizeof(struct fc_frame_header))) {
		err = -EINVAL;
		goto err_out_free_exch_mgr;
	}
	fc_host_maxframe_size(lp->host) = lp->mfs;
	fc_host_dev_loss_tmo(lp->host) = fnic->config.port_down_timeout / 1000;

	sprintf(fc_host_symbolic_name(lp->host),
		DRV_NAME " v" DRV_VERSION " over %s", fnic->name);

	spin_lock_irqsave(&fnic_list_lock, flags);
	list_add_tail(&fnic->list, &fnic_list);
	spin_unlock_irqrestore(&fnic_list_lock, flags);

	INIT_WORK(&fnic->link_work, fnic_handle_link);
	INIT_WORK(&fnic->frame_work, fnic_handle_frame);
	skb_queue_head_init(&fnic->frame_queue);
	skb_queue_head_init(&fnic->tx_queue);

	/* Enable all queues */
	for (i = 0; i < fnic->raw_wq_count; i++)
		vnic_wq_enable(&fnic->wq[i]);
	for (i = 0; i < fnic->rq_count; i++)
		vnic_rq_enable(&fnic->rq[i]);
	for (i = 0; i < fnic->wq_copy_count; i++)
		vnic_wq_copy_enable(&fnic->wq_copy[i]);

	fc_fabric_login(lp);

	vnic_dev_enable(fnic->vdev);

	err = fnic_request_intr(fnic);
	if (err) {
		shost_printk(KERN_ERR, fnic->lport->host,
			     "Unable to request irq.\n");
		goto err_out_free_exch_mgr;
	}

	for (i = 0; i < fnic->intr_count; i++)
		vnic_intr_unmask(&fnic->intr[i]);

	fnic_notify_timer_start(fnic);

	return 0;

err_out_free_exch_mgr:
	fc_exch_mgr_free(lp);
err_out_remove_scsi_host:
	fc_remove_host(lp->host);
	scsi_remove_host(lp->host);
err_out_free_rq_buf:
	for (i = 0; i < fnic->rq_count; i++)
		vnic_rq_clean(&fnic->rq[i], fnic_free_rq_buf);
	vnic_dev_notify_unset(fnic->vdev);
err_out_free_max_pool:
	mempool_destroy(fnic->io_sgl_pool[FNIC_SGL_CACHE_MAX]);
err_out_free_dflt_pool:
	mempool_destroy(fnic->io_sgl_pool[FNIC_SGL_CACHE_DFLT]);
err_out_free_ioreq_pool:
	mempool_destroy(fnic->io_req_pool);
err_out_free_resources:
	fnic_free_vnic_resources(fnic);
err_out_clear_intr:
	fnic_clear_intr_mode(fnic);
err_out_dev_close:
	vnic_dev_close(fnic->vdev);
err_out_vnic_unregister:
	vnic_dev_unregister(fnic->vdev);
err_out_iounmap:
	fnic_iounmap(fnic);
err_out_release_regions:
	pci_release_regions(pdev);
err_out_disable_device:
	pci_disable_device(pdev);
err_out_free_hba:
	fnic_stats_debugfs_remove(fnic);
	scsi_host_put(lp->host);
err_out:
	return err;
}
Esempio n. 10
0
static void enicpmd_dev_info_get(struct rte_eth_dev *eth_dev,
	struct rte_eth_dev_info *device_info)
{
	struct enic *enic = pmd_priv(eth_dev);

	ENICPMD_FUNC_TRACE();
	/* Scattered Rx uses two receive queues per rx queue exposed to dpdk */
	device_info->max_rx_queues = enic->conf_rq_count / 2;
	device_info->max_tx_queues = enic->conf_wq_count;
	device_info->min_rx_bufsize = ENIC_MIN_MTU;
	/* "Max" mtu is not a typo. HW receives packet sizes up to the
	 * max mtu regardless of the current mtu (vNIC's mtu). vNIC mtu is
	 * a hint to the driver to size receive buffers accordingly so that
	 * larger-than-vnic-mtu packets get truncated.. For DPDK, we let
	 * the user decide the buffer size via rxmode.max_rx_pkt_len, basically
	 * ignoring vNIC mtu.
	 */
	device_info->max_rx_pktlen = enic_mtu_to_max_rx_pktlen(enic->max_mtu);
	device_info->max_mac_addrs = ENIC_UNICAST_PERFECT_FILTERS;
	device_info->rx_offload_capa = enic->rx_offload_capa;
	device_info->tx_offload_capa = enic->tx_offload_capa;
	device_info->tx_queue_offload_capa = enic->tx_queue_offload_capa;
	device_info->default_rxconf = (struct rte_eth_rxconf) {
		.rx_free_thresh = ENIC_DEFAULT_RX_FREE_THRESH
	};
	device_info->reta_size = enic->reta_size;
	device_info->hash_key_size = enic->hash_key_size;
	device_info->flow_type_rss_offloads = enic->flow_type_rss_offloads;
	device_info->rx_desc_lim = (struct rte_eth_desc_lim) {
		.nb_max = enic->config.rq_desc_count,
		.nb_min = ENIC_MIN_RQ_DESCS,
		.nb_align = ENIC_ALIGN_DESCS,
	};
	device_info->tx_desc_lim = (struct rte_eth_desc_lim) {
		.nb_max = enic->config.wq_desc_count,
		.nb_min = ENIC_MIN_WQ_DESCS,
		.nb_align = ENIC_ALIGN_DESCS,
		.nb_seg_max = ENIC_TX_XMIT_MAX,
		.nb_mtu_seg_max = ENIC_NON_TSO_MAX_DESC,
	};
	device_info->default_rxportconf = (struct rte_eth_dev_portconf) {
		.burst_size = ENIC_DEFAULT_RX_BURST,
		.ring_size = RTE_MIN(device_info->rx_desc_lim.nb_max,
			ENIC_DEFAULT_RX_RING_SIZE),
		.nb_queues = ENIC_DEFAULT_RX_RINGS,
	};
	device_info->default_txportconf = (struct rte_eth_dev_portconf) {
		.burst_size = ENIC_DEFAULT_TX_BURST,
		.ring_size = RTE_MIN(device_info->tx_desc_lim.nb_max,
			ENIC_DEFAULT_TX_RING_SIZE),
		.nb_queues = ENIC_DEFAULT_TX_RINGS,
	};
}

static const uint32_t *enicpmd_dev_supported_ptypes_get(struct rte_eth_dev *dev)
{
	static const uint32_t ptypes[] = {
		RTE_PTYPE_L2_ETHER,
		RTE_PTYPE_L2_ETHER_VLAN,
		RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
		RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
		RTE_PTYPE_L4_TCP,
		RTE_PTYPE_L4_UDP,
		RTE_PTYPE_L4_FRAG,
		RTE_PTYPE_L4_NONFRAG,
		RTE_PTYPE_UNKNOWN
	};
	static const uint32_t ptypes_overlay[] = {
		RTE_PTYPE_L2_ETHER,
		RTE_PTYPE_L2_ETHER_VLAN,
		RTE_PTYPE_L3_IPV4_EXT_UNKNOWN,
		RTE_PTYPE_L3_IPV6_EXT_UNKNOWN,
		RTE_PTYPE_L4_TCP,
		RTE_PTYPE_L4_UDP,
		RTE_PTYPE_L4_FRAG,
		RTE_PTYPE_L4_NONFRAG,
		RTE_PTYPE_TUNNEL_GRENAT,
		RTE_PTYPE_INNER_L2_ETHER,
		RTE_PTYPE_INNER_L3_IPV4_EXT_UNKNOWN,
		RTE_PTYPE_INNER_L3_IPV6_EXT_UNKNOWN,
		RTE_PTYPE_INNER_L4_TCP,
		RTE_PTYPE_INNER_L4_UDP,
		RTE_PTYPE_INNER_L4_FRAG,
		RTE_PTYPE_INNER_L4_NONFRAG,
		RTE_PTYPE_UNKNOWN
	};

	if (dev->rx_pkt_burst != enic_dummy_recv_pkts &&
	    dev->rx_pkt_burst != NULL) {
		struct enic *enic = pmd_priv(dev);
		if (enic->overlay_offload)
			return ptypes_overlay;
		else
			return ptypes;
	}
	return NULL;
}

static void enicpmd_dev_promiscuous_enable(struct rte_eth_dev *eth_dev)
{
	struct enic *enic = pmd_priv(eth_dev);

	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
		return;

	ENICPMD_FUNC_TRACE();

	enic->promisc = 1;
	enic_add_packet_filter(enic);
}

static void enicpmd_dev_promiscuous_disable(struct rte_eth_dev *eth_dev)
{
	struct enic *enic = pmd_priv(eth_dev);

	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
		return;

	ENICPMD_FUNC_TRACE();
	enic->promisc = 0;
	enic_add_packet_filter(enic);
}

static void enicpmd_dev_allmulticast_enable(struct rte_eth_dev *eth_dev)
{
	struct enic *enic = pmd_priv(eth_dev);

	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
		return;

	ENICPMD_FUNC_TRACE();
	enic->allmulti = 1;
	enic_add_packet_filter(enic);
}

static void enicpmd_dev_allmulticast_disable(struct rte_eth_dev *eth_dev)
{
	struct enic *enic = pmd_priv(eth_dev);

	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
		return;

	ENICPMD_FUNC_TRACE();
	enic->allmulti = 0;
	enic_add_packet_filter(enic);
}

static int enicpmd_add_mac_addr(struct rte_eth_dev *eth_dev,
	struct ether_addr *mac_addr,
	__rte_unused uint32_t index, __rte_unused uint32_t pool)
{
	struct enic *enic = pmd_priv(eth_dev);

	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
		return -E_RTE_SECONDARY;

	ENICPMD_FUNC_TRACE();
	return enic_set_mac_address(enic, mac_addr->addr_bytes);
}

static void enicpmd_remove_mac_addr(struct rte_eth_dev *eth_dev, uint32_t index)
{
	struct enic *enic = pmd_priv(eth_dev);

	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
		return;

	ENICPMD_FUNC_TRACE();
	if (enic_del_mac_address(enic, index))
		dev_err(enic, "del mac addr failed\n");
}

static int enicpmd_set_mac_addr(struct rte_eth_dev *eth_dev,
				struct ether_addr *addr)
{
	struct enic *enic = pmd_priv(eth_dev);
	int ret;

	if (rte_eal_process_type() != RTE_PROC_PRIMARY)
		return -E_RTE_SECONDARY;

	ENICPMD_FUNC_TRACE();
	ret = enic_del_mac_address(enic, 0);
	if (ret)
		return ret;
	return enic_set_mac_address(enic, addr->addr_bytes);
}

static void debug_log_add_del_addr(struct ether_addr *addr, bool add)
{
	char mac_str[ETHER_ADDR_FMT_SIZE];

	ether_format_addr(mac_str, ETHER_ADDR_FMT_SIZE, addr);
	PMD_INIT_LOG(DEBUG, " %s address %s\n",
		     add ? "add" : "remove", mac_str);
}

static int enicpmd_set_mc_addr_list(struct rte_eth_dev *eth_dev,
				    struct ether_addr *mc_addr_set,
				    uint32_t nb_mc_addr)
{
	struct enic *enic = pmd_priv(eth_dev);
	char mac_str[ETHER_ADDR_FMT_SIZE];
	struct ether_addr *addr;
	uint32_t i, j;
	int ret;

	ENICPMD_FUNC_TRACE();

	/* Validate the given addresses first */
	for (i = 0; i < nb_mc_addr && mc_addr_set != NULL; i++) {
		addr = &mc_addr_set[i];
		if (!is_multicast_ether_addr(addr) ||
		    is_broadcast_ether_addr(addr)) {
			ether_format_addr(mac_str, ETHER_ADDR_FMT_SIZE, addr);
			PMD_INIT_LOG(ERR, " invalid multicast address %s\n",
				     mac_str);
			return -EINVAL;
		}
	}

	/* Flush all if requested */
	if (nb_mc_addr == 0 || mc_addr_set == NULL) {
		PMD_INIT_LOG(DEBUG, " flush multicast addresses\n");
		for (i = 0; i < enic->mc_count; i++) {
			addr = &enic->mc_addrs[i];
			debug_log_add_del_addr(addr, false);
			ret = vnic_dev_del_addr(enic->vdev, addr->addr_bytes);
			if (ret)
				return ret;
		}
		enic->mc_count = 0;
		return 0;
	}

	if (nb_mc_addr > ENIC_MULTICAST_PERFECT_FILTERS) {
		PMD_INIT_LOG(ERR, " too many multicast addresses: max=%d\n",
			     ENIC_MULTICAST_PERFECT_FILTERS);
		return -ENOSPC;
	}
	/*
	 * devcmd is slow, so apply the difference instead of flushing and
	 * adding everything.
	 * 1. Delete addresses on the NIC but not on the host
	 */
	for (i = 0; i < enic->mc_count; i++) {
		addr = &enic->mc_addrs[i];
		for (j = 0; j < nb_mc_addr; j++) {
			if (is_same_ether_addr(addr, &mc_addr_set[j]))
				break;
		}
		if (j < nb_mc_addr)
			continue;
		debug_log_add_del_addr(addr, false);
		ret = vnic_dev_del_addr(enic->vdev, addr->addr_bytes);
		if (ret)
			return ret;
	}
	/* 2. Add addresses on the host but not on the NIC */
	for (i = 0; i < nb_mc_addr; i++) {
		addr = &mc_addr_set[i];
		for (j = 0; j < enic->mc_count; j++) {
			if (is_same_ether_addr(addr, &enic->mc_addrs[j]))
				break;
		}
		if (j < enic->mc_count)
			continue;
		debug_log_add_del_addr(addr, true);
		ret = vnic_dev_add_addr(enic->vdev, addr->addr_bytes);
		if (ret)
			return ret;
	}
	/* Keep a copy so we can flush/apply later on.. */
	memcpy(enic->mc_addrs, mc_addr_set,
	       nb_mc_addr * sizeof(struct ether_addr));
	enic->mc_count = nb_mc_addr;
	return 0;
}

static int enicpmd_mtu_set(struct rte_eth_dev *eth_dev, uint16_t mtu)
{
	struct enic *enic = pmd_priv(eth_dev);

	ENICPMD_FUNC_TRACE();
	return enic_set_mtu(enic, mtu);
}

static int enicpmd_dev_rss_reta_query(struct rte_eth_dev *dev,
				      struct rte_eth_rss_reta_entry64
				      *reta_conf,
				      uint16_t reta_size)
{
	struct enic *enic = pmd_priv(dev);
	uint16_t i, idx, shift;

	ENICPMD_FUNC_TRACE();
	if (reta_size != ENIC_RSS_RETA_SIZE) {
		dev_err(enic, "reta_query: wrong reta_size. given=%u expected=%u\n",
			reta_size, ENIC_RSS_RETA_SIZE);
		return -EINVAL;
	}

	for (i = 0; i < reta_size; i++) {
		idx = i / RTE_RETA_GROUP_SIZE;
		shift = i % RTE_RETA_GROUP_SIZE;
		if (reta_conf[idx].mask & (1ULL << shift))
			reta_conf[idx].reta[shift] = enic_sop_rq_idx_to_rte_idx(
				enic->rss_cpu.cpu[i / 4].b[i % 4]);
	}

	return 0;
}

static int enicpmd_dev_rss_reta_update(struct rte_eth_dev *dev,
				       struct rte_eth_rss_reta_entry64
				       *reta_conf,
				       uint16_t reta_size)
{
	struct enic *enic = pmd_priv(dev);
	union vnic_rss_cpu rss_cpu;
	uint16_t i, idx, shift;

	ENICPMD_FUNC_TRACE();
	if (reta_size != ENIC_RSS_RETA_SIZE) {
		dev_err(enic, "reta_update: wrong reta_size. given=%u"
			" expected=%u\n",
			reta_size, ENIC_RSS_RETA_SIZE);
		return -EINVAL;
	}
	/*
	 * Start with the current reta and modify it per reta_conf, as we
	 * need to push the entire reta even if we only modify one entry.
	 */
	rss_cpu = enic->rss_cpu;
	for (i = 0; i < reta_size; i++) {
		idx = i / RTE_RETA_GROUP_SIZE;
		shift = i % RTE_RETA_GROUP_SIZE;
		if (reta_conf[idx].mask & (1ULL << shift))
			rss_cpu.cpu[i / 4].b[i % 4] =
				enic_rte_rq_idx_to_sop_idx(
					reta_conf[idx].reta[shift]);
	}
	return enic_set_rss_reta(enic, &rss_cpu);
}

static int enicpmd_dev_rss_hash_update(struct rte_eth_dev *dev,
				       struct rte_eth_rss_conf *rss_conf)
{
	struct enic *enic = pmd_priv(dev);

	ENICPMD_FUNC_TRACE();
	return enic_set_rss_conf(enic, rss_conf);
}

static int enicpmd_dev_rss_hash_conf_get(struct rte_eth_dev *dev,
					 struct rte_eth_rss_conf *rss_conf)
{
	struct enic *enic = pmd_priv(dev);

	ENICPMD_FUNC_TRACE();
	if (rss_conf == NULL)
		return -EINVAL;
	if (rss_conf->rss_key != NULL &&
	    rss_conf->rss_key_len < ENIC_RSS_HASH_KEY_SIZE) {
		dev_err(enic, "rss_hash_conf_get: wrong rss_key_len. given=%u"
			" expected=%u+\n",
			rss_conf->rss_key_len, ENIC_RSS_HASH_KEY_SIZE);
		return -EINVAL;
	}
	rss_conf->rss_hf = enic->rss_hf;
	if (rss_conf->rss_key != NULL) {
		int i;
		for (i = 0; i < ENIC_RSS_HASH_KEY_SIZE; i++) {
			rss_conf->rss_key[i] =
				enic->rss_key.key[i / 10].b[i % 10];
		}
		rss_conf->rss_key_len = ENIC_RSS_HASH_KEY_SIZE;
	}
	return 0;
}

static void enicpmd_dev_rxq_info_get(struct rte_eth_dev *dev,
				     uint16_t rx_queue_id,
				     struct rte_eth_rxq_info *qinfo)
{
	struct enic *enic = pmd_priv(dev);
	struct vnic_rq *rq_sop;
	struct vnic_rq *rq_data;
	struct rte_eth_rxconf *conf;
	uint16_t sop_queue_idx;
	uint16_t data_queue_idx;

	ENICPMD_FUNC_TRACE();
	sop_queue_idx = enic_rte_rq_idx_to_sop_idx(rx_queue_id);
	data_queue_idx = enic_rte_rq_idx_to_data_idx(rx_queue_id);
	rq_sop = &enic->rq[sop_queue_idx];
	rq_data = &enic->rq[data_queue_idx]; /* valid if data_queue_enable */
	qinfo->mp = rq_sop->mp;
	qinfo->scattered_rx = rq_sop->data_queue_enable;
	qinfo->nb_desc = rq_sop->ring.desc_count;
	if (qinfo->scattered_rx)
		qinfo->nb_desc += rq_data->ring.desc_count;
	conf = &qinfo->conf;
	memset(conf, 0, sizeof(*conf));
	conf->rx_free_thresh = rq_sop->rx_free_thresh;
	conf->rx_drop_en = 1;
	/*
	 * Except VLAN stripping (port setting), all the checksum offloads
	 * are always enabled.
	 */
	conf->offloads = enic->rx_offload_capa;
	if (!enic->ig_vlan_strip_en)
		conf->offloads &= ~DEV_RX_OFFLOAD_VLAN_STRIP;
	/* rx_thresh and other fields are not applicable for enic */
}

static void enicpmd_dev_txq_info_get(struct rte_eth_dev *dev,
				     uint16_t tx_queue_id,
				     struct rte_eth_txq_info *qinfo)
{
	struct enic *enic = pmd_priv(dev);
	struct vnic_wq *wq = &enic->wq[tx_queue_id];

	ENICPMD_FUNC_TRACE();
	qinfo->nb_desc = wq->ring.desc_count;
	memset(&qinfo->conf, 0, sizeof(qinfo->conf));
	qinfo->conf.offloads = wq->offloads;
	/* tx_thresh, and all the other fields are not applicable for enic */
}

static int enicpmd_dev_rx_queue_intr_enable(struct rte_eth_dev *eth_dev,
					    uint16_t rx_queue_id)
{
	struct enic *enic = pmd_priv(eth_dev);

	ENICPMD_FUNC_TRACE();
	vnic_intr_unmask(&enic->intr[rx_queue_id + ENICPMD_RXQ_INTR_OFFSET]);
	return 0;
}

static int enicpmd_dev_rx_queue_intr_disable(struct rte_eth_dev *eth_dev,
					     uint16_t rx_queue_id)
{
	struct enic *enic = pmd_priv(eth_dev);

	ENICPMD_FUNC_TRACE();
	vnic_intr_mask(&enic->intr[rx_queue_id + ENICPMD_RXQ_INTR_OFFSET]);
	return 0;
}

static int udp_tunnel_common_check(struct enic *enic,
				   struct rte_eth_udp_tunnel *tnl)
{
	if (tnl->prot_type != RTE_TUNNEL_TYPE_VXLAN)
		return -ENOTSUP;
	if (!enic->overlay_offload) {
		PMD_INIT_LOG(DEBUG, " vxlan (overlay offload) is not "
			     "supported\n");
		return -ENOTSUP;
	}
	return 0;
}

static int update_vxlan_port(struct enic *enic, uint16_t port)
{
	if (vnic_dev_overlay_offload_cfg(enic->vdev,
					 OVERLAY_CFG_VXLAN_PORT_UPDATE,
					 port)) {
		PMD_INIT_LOG(DEBUG, " failed to update vxlan port\n");
		return -EINVAL;
	}
	PMD_INIT_LOG(DEBUG, " updated vxlan port to %u\n", port);
	enic->vxlan_port = port;
	return 0;
}

static int enicpmd_dev_udp_tunnel_port_add(struct rte_eth_dev *eth_dev,
					   struct rte_eth_udp_tunnel *tnl)
{
	struct enic *enic = pmd_priv(eth_dev);
	int ret;

	ENICPMD_FUNC_TRACE();
	ret = udp_tunnel_common_check(enic, tnl);
	if (ret)
		return ret;
	/*
	 * The NIC has 1 configurable VXLAN port number. "Adding" a new port
	 * number replaces it.
	 */
	if (tnl->udp_port == enic->vxlan_port || tnl->udp_port == 0) {
		PMD_INIT_LOG(DEBUG, " %u is already configured or invalid\n",
			     tnl->udp_port);
		return -EINVAL;
	}
	return update_vxlan_port(enic, tnl->udp_port);
}

static int enicpmd_dev_udp_tunnel_port_del(struct rte_eth_dev *eth_dev,
					   struct rte_eth_udp_tunnel *tnl)
{
	struct enic *enic = pmd_priv(eth_dev);
	int ret;

	ENICPMD_FUNC_TRACE();
	ret = udp_tunnel_common_check(enic, tnl);
	if (ret)
		return ret;
	/*
	 * Clear the previously set port number and restore the
	 * hardware default port number. Some drivers disable VXLAN
	 * offloads when there are no configured port numbers. But
	 * enic does not do that as VXLAN is part of overlay offload,
	 * which is tied to inner RSS and TSO.
	 */
	if (tnl->udp_port != enic->vxlan_port) {
		PMD_INIT_LOG(DEBUG, " %u is not a configured vxlan port\n",
			     tnl->udp_port);
		return -EINVAL;
	}
	return update_vxlan_port(enic, ENIC_DEFAULT_VXLAN_PORT);
}

static int enicpmd_dev_fw_version_get(struct rte_eth_dev *eth_dev,
				      char *fw_version, size_t fw_size)
{
	struct vnic_devcmd_fw_info *info;
	struct enic *enic;
	int ret;

	ENICPMD_FUNC_TRACE();
	if (fw_version == NULL || fw_size <= 0)
		return -EINVAL;
	enic = pmd_priv(eth_dev);
	ret = vnic_dev_fw_info(enic->vdev, &info);
	if (ret)
		return ret;
	snprintf(fw_version, fw_size, "%s %s",
		 info->fw_version, info->fw_build);
	fw_version[fw_size - 1] = '\0';
	return 0;
}

static const struct eth_dev_ops enicpmd_eth_dev_ops = {
	.dev_configure        = enicpmd_dev_configure,
	.dev_start            = enicpmd_dev_start,
	.dev_stop             = enicpmd_dev_stop,
	.dev_set_link_up      = NULL,
	.dev_set_link_down    = NULL,
	.dev_close            = enicpmd_dev_close,
	.promiscuous_enable   = enicpmd_dev_promiscuous_enable,
	.promiscuous_disable  = enicpmd_dev_promiscuous_disable,
	.allmulticast_enable  = enicpmd_dev_allmulticast_enable,
	.allmulticast_disable = enicpmd_dev_allmulticast_disable,
	.link_update          = enicpmd_dev_link_update,
	.stats_get            = enicpmd_dev_stats_get,
	.stats_reset          = enicpmd_dev_stats_reset,
	.queue_stats_mapping_set = NULL,
	.dev_infos_get        = enicpmd_dev_info_get,
	.dev_supported_ptypes_get = enicpmd_dev_supported_ptypes_get,
	.mtu_set              = enicpmd_mtu_set,
	.vlan_filter_set      = NULL,
	.vlan_tpid_set        = NULL,
	.vlan_offload_set     = enicpmd_vlan_offload_set,
	.vlan_strip_queue_set = NULL,
	.rx_queue_start       = enicpmd_dev_rx_queue_start,
	.rx_queue_stop        = enicpmd_dev_rx_queue_stop,
	.tx_queue_start       = enicpmd_dev_tx_queue_start,
	.tx_queue_stop        = enicpmd_dev_tx_queue_stop,
	.rx_queue_setup       = enicpmd_dev_rx_queue_setup,
	.rx_queue_release     = enicpmd_dev_rx_queue_release,
	.rx_queue_count       = enicpmd_dev_rx_queue_count,
	.rx_descriptor_done   = NULL,
	.tx_queue_setup       = enicpmd_dev_tx_queue_setup,
	.tx_queue_release     = enicpmd_dev_tx_queue_release,
	.rx_queue_intr_enable = enicpmd_dev_rx_queue_intr_enable,
	.rx_queue_intr_disable = enicpmd_dev_rx_queue_intr_disable,
	.rxq_info_get         = enicpmd_dev_rxq_info_get,
	.txq_info_get         = enicpmd_dev_txq_info_get,
	.dev_led_on           = NULL,
	.dev_led_off          = NULL,
	.flow_ctrl_get        = NULL,
	.flow_ctrl_set        = NULL,
	.priority_flow_ctrl_set = NULL,
	.mac_addr_add         = enicpmd_add_mac_addr,
	.mac_addr_remove      = enicpmd_remove_mac_addr,
	.mac_addr_set         = enicpmd_set_mac_addr,
	.set_mc_addr_list     = enicpmd_set_mc_addr_list,
	.filter_ctrl          = enicpmd_dev_filter_ctrl,
	.reta_query           = enicpmd_dev_rss_reta_query,
	.reta_update          = enicpmd_dev_rss_reta_update,
	.rss_hash_conf_get    = enicpmd_dev_rss_hash_conf_get,
	.rss_hash_update      = enicpmd_dev_rss_hash_update,
	.udp_tunnel_port_add  = enicpmd_dev_udp_tunnel_port_add,
	.udp_tunnel_port_del  = enicpmd_dev_udp_tunnel_port_del,
	.fw_version_get       = enicpmd_dev_fw_version_get,
};

static int enic_parse_zero_one(const char *key,
			       const char *value,
			       void *opaque)
{
	struct enic *enic;
	bool b;

	enic = (struct enic *)opaque;
	if (strcmp(value, "0") == 0) {
		b = false;
	} else if (strcmp(value, "1") == 0) {
		b = true;
	} else {
		dev_err(enic, "Invalid value for %s"
			": expected=0|1 given=%s\n", key, value);
		return -EINVAL;
	}
	if (strcmp(key, ENIC_DEVARG_DISABLE_OVERLAY) == 0)
		enic->disable_overlay = b;
	if (strcmp(key, ENIC_DEVARG_ENABLE_AVX2_RX) == 0)
		enic->enable_avx2_rx = b;
	return 0;
}

static int enic_parse_ig_vlan_rewrite(__rte_unused const char *key,
				      const char *value,
				      void *opaque)
{
	struct enic *enic;

	enic = (struct enic *)opaque;
	if (strcmp(value, "trunk") == 0) {
		/* Trunk mode: always tag */
		enic->ig_vlan_rewrite_mode = IG_VLAN_REWRITE_MODE_DEFAULT_TRUNK;
	} else if (strcmp(value, "untag") == 0) {
		/* Untag default VLAN mode: untag if VLAN = default VLAN */
		enic->ig_vlan_rewrite_mode =
			IG_VLAN_REWRITE_MODE_UNTAG_DEFAULT_VLAN;
	} else if (strcmp(value, "priority") == 0) {
		/*
		 * Priority-tag default VLAN mode: priority tag (VLAN header
		 * with ID=0) if VLAN = default
		 */
		enic->ig_vlan_rewrite_mode =
			IG_VLAN_REWRITE_MODE_PRIORITY_TAG_DEFAULT_VLAN;
	} else if (strcmp(value, "pass") == 0) {
		/* Pass through mode: do not touch tags */
		enic->ig_vlan_rewrite_mode = IG_VLAN_REWRITE_MODE_PASS_THRU;
	} else {
		dev_err(enic, "Invalid value for " ENIC_DEVARG_IG_VLAN_REWRITE
			": expected=trunk|untag|priority|pass given=%s\n",
			value);
		return -EINVAL;
	}
	return 0;
}

static int enic_check_devargs(struct rte_eth_dev *dev)
{
	static const char *const valid_keys[] = {
		ENIC_DEVARG_DISABLE_OVERLAY,
		ENIC_DEVARG_ENABLE_AVX2_RX,
		ENIC_DEVARG_IG_VLAN_REWRITE,
		NULL};
	struct enic *enic = pmd_priv(dev);
	struct rte_kvargs *kvlist;

	ENICPMD_FUNC_TRACE();

	enic->disable_overlay = false;
	enic->enable_avx2_rx = false;
	enic->ig_vlan_rewrite_mode = IG_VLAN_REWRITE_MODE_PASS_THRU;
	if (!dev->device->devargs)
		return 0;
	kvlist = rte_kvargs_parse(dev->device->devargs->args, valid_keys);
	if (!kvlist)
		return -EINVAL;
	if (rte_kvargs_process(kvlist, ENIC_DEVARG_DISABLE_OVERLAY,
			       enic_parse_zero_one, enic) < 0 ||
	    rte_kvargs_process(kvlist, ENIC_DEVARG_ENABLE_AVX2_RX,
			       enic_parse_zero_one, enic) < 0 ||
	    rte_kvargs_process(kvlist, ENIC_DEVARG_IG_VLAN_REWRITE,
			       enic_parse_ig_vlan_rewrite, enic) < 0) {
		rte_kvargs_free(kvlist);
		return -EINVAL;
	}
	rte_kvargs_free(kvlist);
	return 0;
}

/* Initialize the driver
 * It returns 0 on success.
 */
static int eth_enicpmd_dev_init(struct rte_eth_dev *eth_dev)
{
	struct rte_pci_device *pdev;
	struct rte_pci_addr *addr;
	struct enic *enic = pmd_priv(eth_dev);
	int err;

	ENICPMD_FUNC_TRACE();

	enic->port_id = eth_dev->data->port_id;
	enic->rte_dev = eth_dev;
	eth_dev->dev_ops = &enicpmd_eth_dev_ops;
	eth_dev->rx_pkt_burst = &enic_recv_pkts;
	eth_dev->tx_pkt_burst = &enic_xmit_pkts;
	eth_dev->tx_pkt_prepare = &enic_prep_pkts;
	/* Let rte_eth_dev_close() release the port resources */
	eth_dev->data->dev_flags |= RTE_ETH_DEV_CLOSE_REMOVE;

	pdev = RTE_ETH_DEV_TO_PCI(eth_dev);
	rte_eth_copy_pci_info(eth_dev, pdev);
	enic->pdev = pdev;
	addr = &pdev->addr;

	snprintf(enic->bdf_name, ENICPMD_BDF_LENGTH, "%04x:%02x:%02x.%x",
		addr->domain, addr->bus, addr->devid, addr->function);

	err = enic_check_devargs(eth_dev);
	if (err)
		return err;
	return enic_probe(enic);
}

static int eth_enic_pci_probe(struct rte_pci_driver *pci_drv __rte_unused,
	struct rte_pci_device *pci_dev)
{
	return rte_eth_dev_pci_generic_probe(pci_dev, sizeof(struct enic),
		eth_enicpmd_dev_init);
}

static int eth_enic_pci_remove(struct rte_pci_device *pci_dev)
{
	return rte_eth_dev_pci_generic_remove(pci_dev, NULL);
}

static struct rte_pci_driver rte_enic_pmd = {
	.id_table = pci_id_enic_map,
	.drv_flags = RTE_PCI_DRV_NEED_MAPPING | RTE_PCI_DRV_INTR_LSC |
		     RTE_PCI_DRV_IOVA_AS_VA,
	.probe = eth_enic_pci_probe,
	.remove = eth_enic_pci_remove,
};

RTE_PMD_REGISTER_PCI(net_enic, rte_enic_pmd);
RTE_PMD_REGISTER_PCI_TABLE(net_enic, pci_id_enic_map);
RTE_PMD_REGISTER_KMOD_DEP(net_enic, "* igb_uio | uio_pci_generic | vfio-pci");
RTE_PMD_REGISTER_PARAM_STRING(net_enic,
	ENIC_DEVARG_DISABLE_OVERLAY "=0|1 "
	ENIC_DEVARG_ENABLE_AVX2_RX "=0|1 "
	ENIC_DEVARG_IG_VLAN_REWRITE "=trunk|untag|priority|pass");
Esempio n. 11
0
static int __devinit fnic_probe(struct pci_dev *pdev,
				const struct pci_device_id *ent)
{
	struct Scsi_Host *host;
	struct fc_lport *lp;
	struct fnic *fnic;
	mempool_t *pool;
	int err;
	int i;
	unsigned long flags;

	/*
	 * Allocate SCSI Host and set up association between host,
	 * local port, and fnic
	 */
	host = scsi_host_alloc(&fnic_host_template,
			       sizeof(struct fc_lport) + sizeof(struct fnic));
	if (!host) {
		printk(KERN_ERR PFX "Unable to alloc SCSI host\n");
		err = -ENOMEM;
		goto err_out;
	}
	lp = shost_priv(host);
	lp->host = host;
	fnic = lport_priv(lp);
	fnic->lport = lp;

	snprintf(fnic->name, sizeof(fnic->name) - 1, "%s%d", DRV_NAME,
		 host->host_no);

	host->transportt = fnic_fc_transport;

	err = scsi_init_shared_tag_map(host, FNIC_MAX_IO_REQ);
	if (err) {
		shost_printk(KERN_ERR, fnic->lport->host,
			     "Unable to alloc shared tag map\n");
		goto err_out_free_hba;
	}

	/* Setup PCI resources */
	pci_set_drvdata(pdev, fnic);

	fnic->pdev = pdev;

	err = pci_enable_device(pdev);
	if (err) {
		shost_printk(KERN_ERR, fnic->lport->host,
			     "Cannot enable PCI device, aborting.\n");
		goto err_out_free_hba;
	}

	err = pci_request_regions(pdev, DRV_NAME);
	if (err) {
		shost_printk(KERN_ERR, fnic->lport->host,
			     "Cannot enable PCI resources, aborting\n");
		goto err_out_disable_device;
	}

	pci_set_master(pdev);

	/* Query PCI controller on system for DMA addressing
	 * limitation for the device.  Try 40-bit first, and
	 * fail to 32-bit.
	 */
	err = pci_set_dma_mask(pdev, DMA_BIT_MASK(40));
	if (err) {
		err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
		if (err) {
			shost_printk(KERN_ERR, fnic->lport->host,
				     "No usable DMA configuration "
				     "aborting\n");
			goto err_out_release_regions;
		}
		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(32));
		if (err) {
			shost_printk(KERN_ERR, fnic->lport->host,
				     "Unable to obtain 32-bit DMA "
				     "for consistent allocations, aborting.\n");
			goto err_out_release_regions;
		}
	} else {
		err = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(40));
		if (err) {
			shost_printk(KERN_ERR, fnic->lport->host,
				     "Unable to obtain 40-bit DMA "
				     "for consistent allocations, aborting.\n");
			goto err_out_release_regions;
		}
	}

	/* Map vNIC resources from BAR0 */
	if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
		shost_printk(KERN_ERR, fnic->lport->host,
			     "BAR0 not memory-map'able, aborting.\n");
		err = -ENODEV;
		goto err_out_release_regions;
	}

	fnic->bar0.vaddr = pci_iomap(pdev, 0, 0);
	fnic->bar0.bus_addr = pci_resource_start(pdev, 0);
	fnic->bar0.len = pci_resource_len(pdev, 0);

	if (!fnic->bar0.vaddr) {
		shost_printk(KERN_ERR, fnic->lport->host,
			     "Cannot memory-map BAR0 res hdr, "
			     "aborting.\n");
		err = -ENODEV;
		goto err_out_release_regions;
	}

	fnic->vdev = vnic_dev_register(NULL, fnic, pdev, &fnic->bar0);
	if (!fnic->vdev) {
		shost_printk(KERN_ERR, fnic->lport->host,
			     "vNIC registration failed, "
			     "aborting.\n");
		err = -ENODEV;
		goto err_out_iounmap;
	}

	err = fnic_dev_wait(fnic->vdev, vnic_dev_open,
			    vnic_dev_open_done, 0);
	if (err) {
		shost_printk(KERN_ERR, fnic->lport->host,
			     "vNIC dev open failed, aborting.\n");
		goto err_out_vnic_unregister;
	}

	err = vnic_dev_init(fnic->vdev, 0);
	if (err) {
		shost_printk(KERN_ERR, fnic->lport->host,
			     "vNIC dev init failed, aborting.\n");
		goto err_out_dev_close;
	}

	err = vnic_dev_mac_addr(fnic->vdev, fnic->mac_addr);
	if (err) {
		shost_printk(KERN_ERR, fnic->lport->host,
			     "vNIC get MAC addr failed \n");
		goto err_out_dev_close;
	}

	/* Get vNIC configuration */
	err = fnic_get_vnic_config(fnic);
	if (err) {
		shost_printk(KERN_ERR, fnic->lport->host,
			     "Get vNIC configuration failed, "
			     "aborting.\n");
		goto err_out_dev_close;
	}
	host->max_lun = fnic->config.luns_per_tgt;
	host->max_id = FNIC_MAX_FCP_TARGET;

	fnic_get_res_counts(fnic);

	err = fnic_set_intr_mode(fnic);
	if (err) {
		shost_printk(KERN_ERR, fnic->lport->host,
			     "Failed to set intr mode, "
			     "aborting.\n");
		goto err_out_dev_close;
	}

	err = fnic_request_intr(fnic);
	if (err) {
		shost_printk(KERN_ERR, fnic->lport->host,
			     "Unable to request irq.\n");
		goto err_out_clear_intr;
	}

	err = fnic_alloc_vnic_resources(fnic);
	if (err) {
		shost_printk(KERN_ERR, fnic->lport->host,
			     "Failed to alloc vNIC resources, "
			     "aborting.\n");
		goto err_out_free_intr;
	}


	/* initialize all fnic locks */
	spin_lock_init(&fnic->fnic_lock);

	for (i = 0; i < FNIC_WQ_MAX; i++)
		spin_lock_init(&fnic->wq_lock[i]);

	for (i = 0; i < FNIC_WQ_COPY_MAX; i++) {
		spin_lock_init(&fnic->wq_copy_lock[i]);
		fnic->wq_copy_desc_low[i] = DESC_CLEAN_LOW_WATERMARK;
		fnic->fw_ack_recd[i] = 0;
		fnic->fw_ack_index[i] = -1;
	}

	for (i = 0; i < FNIC_IO_LOCKS; i++)
		spin_lock_init(&fnic->io_req_lock[i]);

	fnic->io_req_pool = mempool_create_slab_pool(2, fnic_io_req_cache);
	if (!fnic->io_req_pool)
		goto err_out_free_resources;

	pool = mempool_create(2, fnic_alloc_slab_dma, mempool_free_slab,
			      fnic_sgl_cache[FNIC_SGL_CACHE_DFLT]);
	if (!pool)
		goto err_out_free_ioreq_pool;
	fnic->io_sgl_pool[FNIC_SGL_CACHE_DFLT] = pool;

	pool = mempool_create(2, fnic_alloc_slab_dma, mempool_free_slab,
			      fnic_sgl_cache[FNIC_SGL_CACHE_MAX]);
	if (!pool)
		goto err_out_free_dflt_pool;
	fnic->io_sgl_pool[FNIC_SGL_CACHE_MAX] = pool;

	/* setup vlan config, hw inserts vlan header */
	fnic->vlan_hw_insert = 1;
	fnic->vlan_id = 0;

	fnic->flogi_oxid = FC_XID_UNKNOWN;
	fnic->flogi = NULL;
	fnic->flogi_resp = NULL;
	fnic->state = FNIC_IN_FC_MODE;

	/* Enable hardware stripping of vlan header on ingress */
	fnic_set_nic_config(fnic, 0, 0, 0, 0, 0, 0, 1);

	/* Setup notification buffer area */
	err = fnic_notify_set(fnic);
	if (err) {
		shost_printk(KERN_ERR, fnic->lport->host,
			     "Failed to alloc notify buffer, aborting.\n");
		goto err_out_free_max_pool;
	}

	/* Setup notify timer when using MSI interrupts */
	if (vnic_dev_get_intr_mode(fnic->vdev) == VNIC_DEV_INTR_MODE_MSI)
		setup_timer(&fnic->notify_timer,
			    fnic_notify_timer, (unsigned long)fnic);

	/* allocate RQ buffers and post them to RQ*/
	for (i = 0; i < fnic->rq_count; i++) {
		err = vnic_rq_fill(&fnic->rq[i], fnic_alloc_rq_frame);
		if (err) {
			shost_printk(KERN_ERR, fnic->lport->host,
				     "fnic_alloc_rq_frame can't alloc "
				     "frame\n");
			goto err_out_free_rq_buf;
		}
	}

	/*
	 * Initialization done with PCI system, hardware, firmware.
	 * Add host to SCSI
	 */
	err = scsi_add_host(lp->host, &pdev->dev);
	if (err) {
		shost_printk(KERN_ERR, fnic->lport->host,
			     "fnic: scsi_add_host failed...exiting\n");
		goto err_out_free_rq_buf;
	}

	/* Start local port initiatialization */

	lp->link_up = 0;
	lp->tt = fnic_transport_template;

	lp->emp = fc_exch_mgr_alloc(lp, FC_CLASS_3,
				    FCPIO_HOST_EXCH_RANGE_START,
				    FCPIO_HOST_EXCH_RANGE_END);
	if (!lp->emp) {
		err = -ENOMEM;
		goto err_out_remove_scsi_host;
	}

	lp->max_retry_count = fnic->config.flogi_retries;
	lp->max_rport_retry_count = fnic->config.plogi_retries;
	lp->service_params = (FCP_SPPF_INIT_FCN | FCP_SPPF_RD_XRDY_DIS |
			      FCP_SPPF_CONF_COMPL);
	if (fnic->config.flags & VFCF_FCP_SEQ_LVL_ERR)
		lp->service_params |= FCP_SPPF_RETRY;

	lp->boot_time = jiffies;
	lp->e_d_tov = fnic->config.ed_tov;
	lp->r_a_tov = fnic->config.ra_tov;
	lp->link_supported_speeds = FC_PORTSPEED_10GBIT;
	fc_set_wwnn(lp, fnic->config.node_wwn);
	fc_set_wwpn(lp, fnic->config.port_wwn);

	fc_exch_init(lp);
	fc_lport_init(lp);
	fc_elsct_init(lp);
	fc_rport_init(lp);
	fc_disc_init(lp);

	fc_lport_config(lp);

	if (fc_set_mfs(lp, fnic->config.maxdatafieldsize +
		       sizeof(struct fc_frame_header))) {
		err = -EINVAL;
		goto err_out_free_exch_mgr;
	}
	fc_host_maxframe_size(lp->host) = lp->mfs;

	sprintf(fc_host_symbolic_name(lp->host),
		DRV_NAME " v" DRV_VERSION " over %s", fnic->name);

	spin_lock_irqsave(&fnic_list_lock, flags);
	list_add_tail(&fnic->list, &fnic_list);
	spin_unlock_irqrestore(&fnic_list_lock, flags);

	INIT_WORK(&fnic->link_work, fnic_handle_link);
	INIT_WORK(&fnic->frame_work, fnic_handle_frame);
	skb_queue_head_init(&fnic->frame_queue);

	/* Enable all queues */
	for (i = 0; i < fnic->raw_wq_count; i++)
		vnic_wq_enable(&fnic->wq[i]);
	for (i = 0; i < fnic->rq_count; i++)
		vnic_rq_enable(&fnic->rq[i]);
	for (i = 0; i < fnic->wq_copy_count; i++)
		vnic_wq_copy_enable(&fnic->wq_copy[i]);

	fc_fabric_login(lp);

	vnic_dev_enable(fnic->vdev);
	for (i = 0; i < fnic->intr_count; i++)
		vnic_intr_unmask(&fnic->intr[i]);

	fnic_notify_timer_start(fnic);

	return 0;

err_out_free_exch_mgr:
	fc_exch_mgr_free(lp->emp);
err_out_remove_scsi_host:
	fc_remove_host(fnic->lport->host);
	scsi_remove_host(fnic->lport->host);
err_out_free_rq_buf:
	for (i = 0; i < fnic->rq_count; i++)
		vnic_rq_clean(&fnic->rq[i], fnic_free_rq_buf);
	vnic_dev_notify_unset(fnic->vdev);
err_out_free_max_pool:
	mempool_destroy(fnic->io_sgl_pool[FNIC_SGL_CACHE_MAX]);
err_out_free_dflt_pool:
	mempool_destroy(fnic->io_sgl_pool[FNIC_SGL_CACHE_DFLT]);
err_out_free_ioreq_pool:
	mempool_destroy(fnic->io_req_pool);
err_out_free_resources:
	fnic_free_vnic_resources(fnic);
err_out_free_intr:
	fnic_free_intr(fnic);
err_out_clear_intr:
	fnic_clear_intr_mode(fnic);
err_out_dev_close:
	vnic_dev_close(fnic->vdev);
err_out_vnic_unregister:
	vnic_dev_unregister(fnic->vdev);
err_out_iounmap:
	fnic_iounmap(fnic);
err_out_release_regions:
	pci_release_regions(pdev);
err_out_disable_device:
	pci_disable_device(pdev);
err_out_free_hba:
	scsi_host_put(lp->host);
err_out:
	return err;
}