Example #1
0
/**
 * genwqe_bus_reset() - Card recovery
 *
 * pci_reset_function() will recover the device and ensure that the
 * registers are accessible again when it completes with success. If
 * not, the card will stay dead and registers will be unaccessible
 * still.
 */
static int genwqe_bus_reset(struct genwqe_dev *cd)
{
	int bars, rc = 0;
	struct pci_dev *pci_dev = cd->pci_dev;
	void __iomem *mmio;

	if (cd->err_inject & GENWQE_INJECT_BUS_RESET_FAILURE)
		return -EIO;

	mmio = cd->mmio;
	cd->mmio = NULL;
	pci_iounmap(pci_dev, mmio);

	bars = pci_select_bars(pci_dev, IORESOURCE_MEM);
	pci_release_selected_regions(pci_dev, bars);

	/*
	 * Firmware/BIOS might change memory mapping during bus reset.
	 * Settings like enable bus-mastering, ... are backuped and
	 * restored by the pci_reset_function().
	 */
	dev_dbg(&pci_dev->dev, "[%s] pci_reset function ...\n", __func__);
	rc = pci_reset_function(pci_dev);
	if (rc) {
		dev_err(&pci_dev->dev,
			"[%s] err: failed reset func (rc %d)\n", __func__, rc);
		return rc;
	}
	dev_dbg(&pci_dev->dev, "[%s] done with rc=%d\n", __func__, rc);

	/*
	 * Here is the right spot to clear the register read
	 * failure. pci_bus_reset() does this job in real systems.
	 */
	cd->err_inject &= ~(GENWQE_INJECT_HARDWARE_FAILURE |
			    GENWQE_INJECT_GFIR_FATAL |
			    GENWQE_INJECT_GFIR_INFO);

	rc = pci_request_selected_regions(pci_dev, bars, genwqe_driver_name);
	if (rc) {
		dev_err(&pci_dev->dev,
			"[%s] err: request bars failed (%d)\n", __func__, rc);
		return -EIO;
	}

	cd->mmio = pci_iomap(pci_dev, 0, 0);
	if (cd->mmio == NULL) {
		dev_err(&pci_dev->dev,
			"[%s] err: mapping BAR0 failed\n", __func__);
		return -ENOMEM;
	}
	return 0;
}
Example #2
0
/**
 * genwqe_pci_remove() - Free PCIe related resources for our card
 */
static void genwqe_pci_remove(struct genwqe_dev *cd)
{
	int bars;
	struct pci_dev *pci_dev = cd->pci_dev;

	if (cd->mmio)
		pci_iounmap(pci_dev, cd->mmio);

	bars = pci_select_bars(pci_dev, IORESOURCE_MEM);
	pci_release_selected_regions(pci_dev, bars);
	pci_disable_device(pci_dev);
}
Example #3
0
/*
 * To backport b718989d correctly pcibios_enable_device()
 * is required but we don't have access to it on modules
 * as its an architecture specific routine that is not
 * exported and as such only core kernel code has access
 * to it. We implement a sloppy work around for backporting
 * this.
 */
int pci_enable_device_mem(struct pci_dev *dev)
{
	int bars = pci_select_bars(dev, IORESOURCE_MEM);

	return pci_enable_device_bars(dev, bars);
}
Example #4
0
static int xgbe_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id)
{
	struct xgbe_prv_data *pdata;
	struct device *dev = &pdev->dev;
	void __iomem * const *iomap_table;
	struct pci_dev *rdev;
	unsigned int ma_lo, ma_hi;
	unsigned int reg;
	int bar_mask;
	int ret;

	pdata = xgbe_alloc_pdata(dev);
	if (IS_ERR(pdata)) {
		ret = PTR_ERR(pdata);
		goto err_alloc;
	}

	pdata->pcidev = pdev;
	pci_set_drvdata(pdev, pdata);

	/* Get the version data */
	pdata->vdata = (struct xgbe_version_data *)id->driver_data;

	ret = pcim_enable_device(pdev);
	if (ret) {
		dev_err(dev, "pcim_enable_device failed\n");
		goto err_pci_enable;
	}

	/* Obtain the mmio areas for the device */
	bar_mask = pci_select_bars(pdev, IORESOURCE_MEM);
	ret = pcim_iomap_regions(pdev, bar_mask, XGBE_DRV_NAME);
	if (ret) {
		dev_err(dev, "pcim_iomap_regions failed\n");
		goto err_pci_enable;
	}

	iomap_table = pcim_iomap_table(pdev);
	if (!iomap_table) {
		dev_err(dev, "pcim_iomap_table failed\n");
		ret = -ENOMEM;
		goto err_pci_enable;
	}

	pdata->xgmac_regs = iomap_table[XGBE_XGMAC_BAR];
	if (!pdata->xgmac_regs) {
		dev_err(dev, "xgmac ioremap failed\n");
		ret = -ENOMEM;
		goto err_pci_enable;
	}
	pdata->xprop_regs = pdata->xgmac_regs + XGBE_MAC_PROP_OFFSET;
	pdata->xi2c_regs = pdata->xgmac_regs + XGBE_I2C_CTRL_OFFSET;
	if (netif_msg_probe(pdata)) {
		dev_dbg(dev, "xgmac_regs = %p\n", pdata->xgmac_regs);
		dev_dbg(dev, "xprop_regs = %p\n", pdata->xprop_regs);
		dev_dbg(dev, "xi2c_regs  = %p\n", pdata->xi2c_regs);
	}

	pdata->xpcs_regs = iomap_table[XGBE_XPCS_BAR];
	if (!pdata->xpcs_regs) {
		dev_err(dev, "xpcs ioremap failed\n");
		ret = -ENOMEM;
		goto err_pci_enable;
	}
	if (netif_msg_probe(pdata))
		dev_dbg(dev, "xpcs_regs  = %p\n", pdata->xpcs_regs);

	/* Set the PCS indirect addressing definition registers */
	rdev = pci_get_domain_bus_and_slot(0, 0, PCI_DEVFN(0, 0));
	if (rdev &&
	    (rdev->vendor == PCI_VENDOR_ID_AMD) && (rdev->device == 0x15d0)) {
		pdata->xpcs_window_def_reg = PCS_V2_RV_WINDOW_DEF;
		pdata->xpcs_window_sel_reg = PCS_V2_RV_WINDOW_SELECT;
	} else {
		pdata->xpcs_window_def_reg = PCS_V2_WINDOW_DEF;
		pdata->xpcs_window_sel_reg = PCS_V2_WINDOW_SELECT;
	}
	pci_dev_put(rdev);

	/* Configure the PCS indirect addressing support */
	reg = XPCS32_IOREAD(pdata, pdata->xpcs_window_def_reg);
	pdata->xpcs_window = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, OFFSET);
	pdata->xpcs_window <<= 6;
	pdata->xpcs_window_size = XPCS_GET_BITS(reg, PCS_V2_WINDOW_DEF, SIZE);
	pdata->xpcs_window_size = 1 << (pdata->xpcs_window_size + 7);
	pdata->xpcs_window_mask = pdata->xpcs_window_size - 1;
	if (netif_msg_probe(pdata)) {
		dev_dbg(dev, "xpcs window def  = %#010x\n",
			pdata->xpcs_window_def_reg);
		dev_dbg(dev, "xpcs window sel  = %#010x\n",
			pdata->xpcs_window_sel_reg);
		dev_dbg(dev, "xpcs window      = %#010x\n",
			pdata->xpcs_window);
		dev_dbg(dev, "xpcs window size = %#010x\n",
			pdata->xpcs_window_size);
		dev_dbg(dev, "xpcs window mask = %#010x\n",
			pdata->xpcs_window_mask);
	}

	pci_set_master(pdev);

	/* Enable all interrupts in the hardware */
	XP_IOWRITE(pdata, XP_INT_EN, 0x1fffff);

	/* Retrieve the MAC address */
	ma_lo = XP_IOREAD(pdata, XP_MAC_ADDR_LO);
	ma_hi = XP_IOREAD(pdata, XP_MAC_ADDR_HI);
	pdata->mac_addr[0] = ma_lo & 0xff;
	pdata->mac_addr[1] = (ma_lo >> 8) & 0xff;
	pdata->mac_addr[2] = (ma_lo >> 16) & 0xff;
	pdata->mac_addr[3] = (ma_lo >> 24) & 0xff;
	pdata->mac_addr[4] = ma_hi & 0xff;
	pdata->mac_addr[5] = (ma_hi >> 8) & 0xff;
	if (!XP_GET_BITS(ma_hi, XP_MAC_ADDR_HI, VALID) ||
	    !is_valid_ether_addr(pdata->mac_addr)) {
		dev_err(dev, "invalid mac address\n");
		ret = -EINVAL;
		goto err_pci_enable;
	}

	/* Clock settings */
	pdata->sysclk_rate = XGBE_V2_DMA_CLOCK_FREQ;
	pdata->ptpclk_rate = XGBE_V2_PTP_CLOCK_FREQ;

	/* Set the DMA coherency values */
	pdata->coherent = 1;
	pdata->arcr = XGBE_DMA_PCI_ARCR;
	pdata->awcr = XGBE_DMA_PCI_AWCR;
	pdata->awarcr = XGBE_DMA_PCI_AWARCR;

	/* Set the maximum channels and queues */
	reg = XP_IOREAD(pdata, XP_PROP_1);
	pdata->tx_max_channel_count = XP_GET_BITS(reg, XP_PROP_1, MAX_TX_DMA);
	pdata->rx_max_channel_count = XP_GET_BITS(reg, XP_PROP_1, MAX_RX_DMA);
	pdata->tx_max_q_count = XP_GET_BITS(reg, XP_PROP_1, MAX_TX_QUEUES);
	pdata->rx_max_q_count = XP_GET_BITS(reg, XP_PROP_1, MAX_RX_QUEUES);
	if (netif_msg_probe(pdata)) {
		dev_dbg(dev, "max tx/rx channel count = %u/%u\n",
			pdata->tx_max_channel_count,
			pdata->tx_max_channel_count);
		dev_dbg(dev, "max tx/rx hw queue count = %u/%u\n",
			pdata->tx_max_q_count, pdata->rx_max_q_count);
	}

	/* Set the hardware channel and queue counts */
	xgbe_set_counts(pdata);

	/* Set the maximum fifo amounts */
	reg = XP_IOREAD(pdata, XP_PROP_2);
	pdata->tx_max_fifo_size = XP_GET_BITS(reg, XP_PROP_2, TX_FIFO_SIZE);
	pdata->tx_max_fifo_size *= 16384;
	pdata->tx_max_fifo_size = min(pdata->tx_max_fifo_size,
				      pdata->vdata->tx_max_fifo_size);
	pdata->rx_max_fifo_size = XP_GET_BITS(reg, XP_PROP_2, RX_FIFO_SIZE);
	pdata->rx_max_fifo_size *= 16384;
	pdata->rx_max_fifo_size = min(pdata->rx_max_fifo_size,
				      pdata->vdata->rx_max_fifo_size);
	if (netif_msg_probe(pdata))
		dev_dbg(dev, "max tx/rx max fifo size = %u/%u\n",
			pdata->tx_max_fifo_size, pdata->rx_max_fifo_size);

	/* Configure interrupt support */
	ret = xgbe_config_irqs(pdata);
	if (ret)
		goto err_pci_enable;

	/* Configure the netdev resource */
	ret = xgbe_config_netdev(pdata);
	if (ret)
		goto err_irq_vectors;

	netdev_notice(pdata->netdev, "net device enabled\n");

	return 0;

err_irq_vectors:
	pci_free_irq_vectors(pdata->pcidev);

err_pci_enable:
	xgbe_free_pdata(pdata);

err_alloc:
	dev_notice(dev, "net device not enabled\n");

	return ret;
}
Example #5
0
static int __devinit
#else
static int
#endif
igbuio_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
{
	struct rte_uio_pci_dev *udev;
	struct msix_entry msix_entry;
	int err;

	/* essential vars for configuring the device with net_device */
	struct net_device *netdev;
	struct net_adapter *adapter = NULL;
	struct ixgbe_hw *hw_i = NULL;
	struct e1000_hw *hw_e = NULL;

	udev = kzalloc(sizeof(struct rte_uio_pci_dev), GFP_KERNEL);
	if (!udev)
		return -ENOMEM;

	/*
	 * enable device: ask low-level code to enable I/O and
	 * memory
	 */
	err = pci_enable_device(dev);
	if (err != 0) {
		dev_err(&dev->dev, "Cannot enable PCI device\n");
		goto fail_free;
	}

	/*
	 * reserve device's PCI memory regions for use by this
	 * module
	 */
	err = pci_request_regions(dev, "igb_uio");
	if (err != 0) {
		dev_err(&dev->dev, "Cannot request regions\n");
		goto fail_disable;
	}

	/* enable bus mastering on the device */
	pci_set_master(dev);

	/* remap IO memory */
	err = igbuio_setup_bars(dev, &udev->info);
	if (err != 0)
		goto fail_release_iomem;

	/* set 64-bit DMA mask */
	err = pci_set_dma_mask(dev,  DMA_BIT_MASK(64));
	if (err != 0) {
		dev_err(&dev->dev, "Cannot set DMA mask\n");
		goto fail_release_iomem;
	}

	err = pci_set_consistent_dma_mask(dev, DMA_BIT_MASK(64));
	if (err != 0) {
		dev_err(&dev->dev, "Cannot set consistent DMA mask\n");
		goto fail_release_iomem;
	}

	/* fill uio infos */
	udev->info.name = "igb_uio";
	udev->info.version = "0.1";
	udev->info.handler = igbuio_pci_irqhandler;
	udev->info.irqcontrol = igbuio_pci_irqcontrol;
#ifdef CONFIG_XEN_DOM0
	/* check if the driver run on Xen Dom0 */
	if (xen_initial_domain())
		udev->info.mmap = igbuio_dom0_pci_mmap;
#endif
	udev->info.priv = udev;
	udev->pdev = dev;

	switch (igbuio_intr_mode_preferred) {
	case RTE_INTR_MODE_MSIX:
		/* Only 1 msi-x vector needed */
		msix_entry.entry = 0;
		if (pci_enable_msix(dev, &msix_entry, 1) == 0) {
			dev_dbg(&dev->dev, "using MSI-X");
			udev->info.irq = msix_entry.vector;
			udev->mode = RTE_INTR_MODE_MSIX;
			break;
		}
		/* fall back to INTX */
	case RTE_INTR_MODE_LEGACY:
		if (pci_intx_mask_supported(dev)) {
			dev_dbg(&dev->dev, "using INTX");
			udev->info.irq_flags = IRQF_SHARED;
			udev->info.irq = dev->irq;
			udev->mode = RTE_INTR_MODE_LEGACY;
			break;
		}
		dev_notice(&dev->dev, "PCI INTX mask not supported\n");
		/* fall back to no IRQ */
	case RTE_INTR_MODE_NONE:
		udev->mode = RTE_INTR_MODE_NONE;
		udev->info.irq = 0;
		break;

	default:
		dev_err(&dev->dev, "invalid IRQ mode %u",
			igbuio_intr_mode_preferred);
		err = -EINVAL;
		goto fail_release_iomem;
	}

	err = sysfs_create_group(&dev->dev.kobj, &dev_attr_grp);
	if (err != 0)
		goto fail_release_iomem;

	/* initialize the corresponding netdev */
	netdev = alloc_etherdev(sizeof(struct net_adapter));
	if (!netdev) {
		err = -ENOMEM;
		goto fail_alloc_etherdev;
	}
	SET_NETDEV_DEV(netdev, pci_dev_to_dev(dev));
	adapter = netdev_priv(netdev);
	adapter->netdev = netdev;
	adapter->pdev = dev;
	udev->adapter = adapter;
	adapter->type = retrieve_dev_specs(id);
	/* recover device-specific mac address */
	switch (adapter->type) {
	case IXGBE:
		hw_i = &adapter->hw._ixgbe_hw;
		hw_i->back = adapter;
		hw_i->hw_addr = ioremap(pci_resource_start(dev, 0),
				       pci_resource_len(dev, 0));
		if (!hw_i->hw_addr) {
			err = -EIO;
			goto fail_ioremap;
		}
		break;
	case IGB:
		hw_e = &adapter->hw._e1000_hw;
		hw_e->back = adapter;
		hw_e->hw_addr = ioremap(pci_resource_start(dev, 0),
				      pci_resource_len(dev, 0));
		if (!hw_e->hw_addr) {
			err = -EIO;
			goto fail_ioremap;
		}
		break;
	}

	netdev_assign_netdev_ops(netdev);
	strncpy(netdev->name, pci_name(dev), sizeof(netdev->name) - 1);
	retrieve_dev_addr(netdev, adapter);
	strcpy(netdev->name, "dpdk%d");
	err = register_netdev(netdev);
	if (err)
		goto fail_ioremap;
	adapter->netdev_registered = true;
	
	if (sscanf(netdev->name, "dpdk%hu", &adapter->bd_number) <= 0)
		goto fail_bdnumber;

	//printk(KERN_DEBUG "ifindex picked: %hu\n", adapter->bd_number);
	dev_info(&dev->dev, "ifindex picked: %hu\n", adapter->bd_number);

	/* register uio driver */
	err = uio_register_device(&dev->dev, &udev->info);
	if (err != 0)
		goto fail_remove_group;

	pci_set_drvdata(dev, udev);

	dev_info(&dev->dev, "uio device registered with irq %lx\n",
		 udev->info.irq);

	/* reset nstats */
	memset(&adapter->nstats, 0, sizeof(struct net_device_stats));

	return 0;

 fail_bdnumber:
 fail_ioremap:
	free_netdev(netdev);
 fail_alloc_etherdev:
	pci_release_selected_regions(dev,
                                     pci_select_bars(dev, IORESOURCE_MEM));
fail_remove_group:
	sysfs_remove_group(&dev->dev.kobj, &dev_attr_grp);
fail_release_iomem:
	igbuio_pci_release_iomem(&udev->info);
	if (udev->mode == RTE_INTR_MODE_MSIX)
		pci_disable_msix(udev->pdev);
	pci_release_regions(dev);
fail_disable:
	pci_disable_device(dev);
fail_free:
	kfree(udev);

	return err;
}
Example #6
0
/**
 * genwqe_pci_setup() - Allocate PCIe related resources for our card
 */
static int genwqe_pci_setup(struct genwqe_dev *cd)
{
	int err, bars;
	struct pci_dev *pci_dev = cd->pci_dev;

	bars = pci_select_bars(pci_dev, IORESOURCE_MEM);
	err = pci_enable_device_mem(pci_dev);
	if (err) {
		dev_err(&pci_dev->dev,
			"err: failed to enable pci memory (err=%d)\n", err);
		goto err_out;
	}

	/* Reserve PCI I/O and memory resources */
	err = pci_request_selected_regions(pci_dev, bars, genwqe_driver_name);
	if (err) {
		dev_err(&pci_dev->dev,
			"[%s] err: request bars failed (%d)\n", __func__, err);
		err = -EIO;
		goto err_disable_device;
	}

	/* check for 64-bit DMA address supported (DAC) */
	if (!pci_set_dma_mask(pci_dev, DMA_BIT_MASK(64))) {
		err = pci_set_consistent_dma_mask(pci_dev, DMA_BIT_MASK(64));
		if (err) {
			dev_err(&pci_dev->dev,
				"err: DMA64 consistent mask error\n");
			err = -EIO;
			goto out_release_resources;
		}
	/* check for 32-bit DMA address supported (SAC) */
	} else if (!pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32))) {
		err = pci_set_consistent_dma_mask(pci_dev, DMA_BIT_MASK(32));
		if (err) {
			dev_err(&pci_dev->dev,
				"err: DMA32 consistent mask error\n");
			err = -EIO;
			goto out_release_resources;
		}
	} else {
		dev_err(&pci_dev->dev,
			"err: neither DMA32 nor DMA64 supported\n");
		err = -EIO;
		goto out_release_resources;
	}

	pci_set_master(pci_dev);
	pci_enable_pcie_error_reporting(pci_dev);

	/* EEH recovery requires PCIe fundamental reset */
	pci_dev->needs_freset = 1;

	/* request complete BAR-0 space (length = 0) */
	cd->mmio_len = pci_resource_len(pci_dev, 0);
	cd->mmio = pci_iomap(pci_dev, 0, 0);
	if (cd->mmio == NULL) {
		dev_err(&pci_dev->dev,
			"[%s] err: mapping BAR0 failed\n", __func__);
		err = -ENOMEM;
		goto out_release_resources;
	}

	cd->num_vfs = pci_sriov_get_totalvfs(pci_dev);
	if (cd->num_vfs < 0)
		cd->num_vfs = 0;

	err = genwqe_read_ids(cd);
	if (err)
		goto out_iounmap;

	return 0;

 out_iounmap:
	pci_iounmap(pci_dev, cd->mmio);
 out_release_resources:
	pci_release_selected_regions(pci_dev, bars);
 err_disable_device:
	pci_disable_device(pci_dev);
 err_out:
	return err;
}