static void __devexit scc_remove(struct pci_dev *dev) { struct scc_ports *ports = pci_get_drvdata(dev); struct ide_host *host = ports->host; ide_host_remove(host); iounmap((void*)ports->dma); iounmap((void*)ports->ctl); pci_release_selected_regions(dev, (1 << 2) - 1); memset(ports, 0, sizeof(*ports)); }
/** * genwqe_bus_reset() - Card recovery * * pci_reset_function() will recover the device and ensure that the * registers are accessible again when it completes with success. If * not, the card will stay dead and registers will be unaccessible * still. */ static int genwqe_bus_reset(struct genwqe_dev *cd) { int bars, rc = 0; struct pci_dev *pci_dev = cd->pci_dev; void __iomem *mmio; if (cd->err_inject & GENWQE_INJECT_BUS_RESET_FAILURE) return -EIO; mmio = cd->mmio; cd->mmio = NULL; pci_iounmap(pci_dev, mmio); bars = pci_select_bars(pci_dev, IORESOURCE_MEM); pci_release_selected_regions(pci_dev, bars); /* * Firmware/BIOS might change memory mapping during bus reset. * Settings like enable bus-mastering, ... are backuped and * restored by the pci_reset_function(). */ dev_dbg(&pci_dev->dev, "[%s] pci_reset function ...\n", __func__); rc = pci_reset_function(pci_dev); if (rc) { dev_err(&pci_dev->dev, "[%s] err: failed reset func (rc %d)\n", __func__, rc); return rc; } dev_dbg(&pci_dev->dev, "[%s] done with rc=%d\n", __func__, rc); /* * Here is the right spot to clear the register read * failure. pci_bus_reset() does this job in real systems. */ cd->err_inject &= ~(GENWQE_INJECT_HARDWARE_FAILURE | GENWQE_INJECT_GFIR_FATAL | GENWQE_INJECT_GFIR_INFO); rc = pci_request_selected_regions(pci_dev, bars, genwqe_driver_name); if (rc) { dev_err(&pci_dev->dev, "[%s] err: request bars failed (%d)\n", __func__, rc); return -EIO; } cd->mmio = pci_iomap(pci_dev, 0, 0); if (cd->mmio == NULL) { dev_err(&pci_dev->dev, "[%s] err: mapping BAR0 failed\n", __func__); return -ENOMEM; } return 0; }
/** * genwqe_pci_remove() - Free PCIe related resources for our card */ static void genwqe_pci_remove(struct genwqe_dev *cd) { int bars; struct pci_dev *pci_dev = cd->pci_dev; if (cd->mmio) pci_iounmap(pci_dev, cd->mmio); bars = pci_select_bars(pci_dev, IORESOURCE_MEM); pci_release_selected_regions(pci_dev, bars); pci_disable_device(pci_dev); }
/* I/O Port BAR access */ ssize_t vfio_pci_io_readwrite(struct vfio_pci_device *vdev, char __user *buf, size_t count, loff_t *ppos, bool iswrite) { struct pci_dev *pdev = vdev->pdev; loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK; int bar = VFIO_PCI_OFFSET_TO_INDEX(*ppos); void __iomem *io; size_t done = 0; if (!pci_resource_start(pdev, bar)) return -EINVAL; if (pos + count > pci_resource_len(pdev, bar)) return -EINVAL; if (!vdev->barmap[bar]) { int ret; ret = pci_request_selected_regions(pdev, 1 << bar, "vfio"); if (ret) return ret; vdev->barmap[bar] = pci_iomap(pdev, bar, 0); if (!vdev->barmap[bar]) { pci_release_selected_regions(pdev, 1 << bar); return -EINVAL; } } io = vdev->barmap[bar]; while (count) { int filled; if (count >= 3 && !(pos % 4)) { __le32 val; if (iswrite) { if (copy_from_user(&val, buf, 4)) return -EFAULT; iowrite32(le32_to_cpu(val), io + pos); } else { val = cpu_to_le32(ioread32(io + pos)); if (copy_to_user(buf, &val, 4)) return -EFAULT; } filled = 4; } else if ((pos % 2) == 0 && count >= 2) { __le16 val; if (iswrite) { if (copy_from_user(&val, buf, 2)) return -EFAULT; iowrite16(le16_to_cpu(val), io + pos); } else { val = cpu_to_le16(ioread16(io + pos)); if (copy_to_user(buf, &val, 2)) return -EFAULT; } filled = 2; } else { u8 val; if (iswrite) { if (copy_from_user(&val, buf, 1)) return -EFAULT; iowrite8(val, io + pos); } else { val = ioread8(io + pos); if (copy_to_user(buf, &val, 1)) return -EFAULT; } filled = 1; } count -= filled; done += filled; buf += filled; pos += filled; } *ppos += done; return done; }
/* * MMIO BAR access * We handle two excluded ranges here as well, if the user tries to read * the ROM beyond what PCI tells us is available or the MSI-X table region, * we return 0xFF and writes are dropped. */ ssize_t vfio_pci_mem_readwrite(struct vfio_pci_device *vdev, char __user *buf, size_t count, loff_t *ppos, bool iswrite) { struct pci_dev *pdev = vdev->pdev; loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK; int bar = VFIO_PCI_OFFSET_TO_INDEX(*ppos); void __iomem *io; resource_size_t end; size_t done = 0; size_t x_start = 0, x_end = 0; /* excluded range */ if (!pci_resource_start(pdev, bar)) return -EINVAL; end = pci_resource_len(pdev, bar); if (pos > end) return -EINVAL; if (pos == end) return 0; if (pos + count > end) count = end - pos; if (bar == PCI_ROM_RESOURCE) { io = pci_map_rom(pdev, &x_start); x_end = end; } else { if (!vdev->barmap[bar]) { int ret; ret = pci_request_selected_regions(pdev, 1 << bar, "vfio"); if (ret) return ret; vdev->barmap[bar] = pci_iomap(pdev, bar, 0); if (!vdev->barmap[bar]) { pci_release_selected_regions(pdev, 1 << bar); return -EINVAL; } } io = vdev->barmap[bar]; if (bar == vdev->msix_bar) { x_start = vdev->msix_offset; x_end = vdev->msix_offset + vdev->msix_size; } } if (!io) return -EINVAL; while (count) { size_t fillable, filled; if (pos < x_start) fillable = x_start - pos; else if (pos >= x_end) fillable = end - pos; else fillable = 0; if (fillable >= 4 && !(pos % 4) && (count >= 4)) { __le32 val; if (iswrite) { if (copy_from_user(&val, buf, 4)) goto out; iowrite32(le32_to_cpu(val), io + pos); } else { val = cpu_to_le32(ioread32(io + pos)); if (copy_to_user(buf, &val, 4)) goto out; } filled = 4; } else if (fillable >= 2 && !(pos % 2) && (count >= 2)) { __le16 val; if (iswrite) { if (copy_from_user(&val, buf, 2)) goto out; iowrite16(le16_to_cpu(val), io + pos); } else { val = cpu_to_le16(ioread16(io + pos)); if (copy_to_user(buf, &val, 2)) goto out; } filled = 2; } else if (fillable) { u8 val; if (iswrite) { if (copy_from_user(&val, buf, 1)) goto out; iowrite8(val, io + pos); } else { val = ioread8(io + pos); if (copy_to_user(buf, &val, 1)) goto out; } filled = 1; } else { /* Drop writes, fill reads with FF */ if (!iswrite) { char val = 0xFF; size_t i; for (i = 0; i < x_end - pos; i++) { if (put_user(val, buf + i)) goto out; } } filled = x_end - pos; } count -= filled; done += filled; buf += filled; pos += filled; } *ppos += done; out: if (bar == PCI_ROM_RESOURCE) pci_unmap_rom(pdev, io); return count ? -EFAULT : done; }
static int __devinit #else static int #endif igbuio_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) { struct rte_uio_pci_dev *udev; struct msix_entry msix_entry; int err; /* essential vars for configuring the device with net_device */ struct net_device *netdev; struct net_adapter *adapter = NULL; struct ixgbe_hw *hw_i = NULL; struct e1000_hw *hw_e = NULL; udev = kzalloc(sizeof(struct rte_uio_pci_dev), GFP_KERNEL); if (!udev) return -ENOMEM; /* * enable device: ask low-level code to enable I/O and * memory */ err = pci_enable_device(dev); if (err != 0) { dev_err(&dev->dev, "Cannot enable PCI device\n"); goto fail_free; } /* * reserve device's PCI memory regions for use by this * module */ err = pci_request_regions(dev, "igb_uio"); if (err != 0) { dev_err(&dev->dev, "Cannot request regions\n"); goto fail_disable; } /* enable bus mastering on the device */ pci_set_master(dev); /* remap IO memory */ err = igbuio_setup_bars(dev, &udev->info); if (err != 0) goto fail_release_iomem; /* set 64-bit DMA mask */ err = pci_set_dma_mask(dev, DMA_BIT_MASK(64)); if (err != 0) { dev_err(&dev->dev, "Cannot set DMA mask\n"); goto fail_release_iomem; } err = pci_set_consistent_dma_mask(dev, DMA_BIT_MASK(64)); if (err != 0) { dev_err(&dev->dev, "Cannot set consistent DMA mask\n"); goto fail_release_iomem; } /* fill uio infos */ udev->info.name = "igb_uio"; udev->info.version = "0.1"; udev->info.handler = igbuio_pci_irqhandler; udev->info.irqcontrol = igbuio_pci_irqcontrol; #ifdef CONFIG_XEN_DOM0 /* check if the driver run on Xen Dom0 */ if (xen_initial_domain()) udev->info.mmap = igbuio_dom0_pci_mmap; #endif udev->info.priv = udev; udev->pdev = dev; switch (igbuio_intr_mode_preferred) { case RTE_INTR_MODE_MSIX: /* Only 1 msi-x vector needed */ msix_entry.entry = 0; if (pci_enable_msix(dev, &msix_entry, 1) == 0) { dev_dbg(&dev->dev, "using MSI-X"); udev->info.irq = msix_entry.vector; udev->mode = RTE_INTR_MODE_MSIX; break; } /* fall back to INTX */ case RTE_INTR_MODE_LEGACY: if (pci_intx_mask_supported(dev)) { dev_dbg(&dev->dev, "using INTX"); udev->info.irq_flags = IRQF_SHARED; udev->info.irq = dev->irq; udev->mode = RTE_INTR_MODE_LEGACY; break; } dev_notice(&dev->dev, "PCI INTX mask not supported\n"); /* fall back to no IRQ */ case RTE_INTR_MODE_NONE: udev->mode = RTE_INTR_MODE_NONE; udev->info.irq = 0; break; default: dev_err(&dev->dev, "invalid IRQ mode %u", igbuio_intr_mode_preferred); err = -EINVAL; goto fail_release_iomem; } err = sysfs_create_group(&dev->dev.kobj, &dev_attr_grp); if (err != 0) goto fail_release_iomem; /* initialize the corresponding netdev */ netdev = alloc_etherdev(sizeof(struct net_adapter)); if (!netdev) { err = -ENOMEM; goto fail_alloc_etherdev; } SET_NETDEV_DEV(netdev, pci_dev_to_dev(dev)); adapter = netdev_priv(netdev); adapter->netdev = netdev; adapter->pdev = dev; udev->adapter = adapter; adapter->type = retrieve_dev_specs(id); /* recover device-specific mac address */ switch (adapter->type) { case IXGBE: hw_i = &adapter->hw._ixgbe_hw; hw_i->back = adapter; hw_i->hw_addr = ioremap(pci_resource_start(dev, 0), pci_resource_len(dev, 0)); if (!hw_i->hw_addr) { err = -EIO; goto fail_ioremap; } break; case IGB: hw_e = &adapter->hw._e1000_hw; hw_e->back = adapter; hw_e->hw_addr = ioremap(pci_resource_start(dev, 0), pci_resource_len(dev, 0)); if (!hw_e->hw_addr) { err = -EIO; goto fail_ioremap; } break; } netdev_assign_netdev_ops(netdev); strncpy(netdev->name, pci_name(dev), sizeof(netdev->name) - 1); retrieve_dev_addr(netdev, adapter); strcpy(netdev->name, "dpdk%d"); err = register_netdev(netdev); if (err) goto fail_ioremap; adapter->netdev_registered = true; if (sscanf(netdev->name, "dpdk%hu", &adapter->bd_number) <= 0) goto fail_bdnumber; //printk(KERN_DEBUG "ifindex picked: %hu\n", adapter->bd_number); dev_info(&dev->dev, "ifindex picked: %hu\n", adapter->bd_number); /* register uio driver */ err = uio_register_device(&dev->dev, &udev->info); if (err != 0) goto fail_remove_group; pci_set_drvdata(dev, udev); dev_info(&dev->dev, "uio device registered with irq %lx\n", udev->info.irq); /* reset nstats */ memset(&adapter->nstats, 0, sizeof(struct net_device_stats)); return 0; fail_bdnumber: fail_ioremap: free_netdev(netdev); fail_alloc_etherdev: pci_release_selected_regions(dev, pci_select_bars(dev, IORESOURCE_MEM)); fail_remove_group: sysfs_remove_group(&dev->dev.kobj, &dev_attr_grp); fail_release_iomem: igbuio_pci_release_iomem(&udev->info); if (udev->mode == RTE_INTR_MODE_MSIX) pci_disable_msix(udev->pdev); pci_release_regions(dev); fail_disable: pci_disable_device(dev); fail_free: kfree(udev); return err; }
/** * genwqe_pci_setup() - Allocate PCIe related resources for our card */ static int genwqe_pci_setup(struct genwqe_dev *cd) { int err, bars; struct pci_dev *pci_dev = cd->pci_dev; bars = pci_select_bars(pci_dev, IORESOURCE_MEM); err = pci_enable_device_mem(pci_dev); if (err) { dev_err(&pci_dev->dev, "err: failed to enable pci memory (err=%d)\n", err); goto err_out; } /* Reserve PCI I/O and memory resources */ err = pci_request_selected_regions(pci_dev, bars, genwqe_driver_name); if (err) { dev_err(&pci_dev->dev, "[%s] err: request bars failed (%d)\n", __func__, err); err = -EIO; goto err_disable_device; } /* check for 64-bit DMA address supported (DAC) */ if (!pci_set_dma_mask(pci_dev, DMA_BIT_MASK(64))) { err = pci_set_consistent_dma_mask(pci_dev, DMA_BIT_MASK(64)); if (err) { dev_err(&pci_dev->dev, "err: DMA64 consistent mask error\n"); err = -EIO; goto out_release_resources; } /* check for 32-bit DMA address supported (SAC) */ } else if (!pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32))) { err = pci_set_consistent_dma_mask(pci_dev, DMA_BIT_MASK(32)); if (err) { dev_err(&pci_dev->dev, "err: DMA32 consistent mask error\n"); err = -EIO; goto out_release_resources; } } else { dev_err(&pci_dev->dev, "err: neither DMA32 nor DMA64 supported\n"); err = -EIO; goto out_release_resources; } pci_set_master(pci_dev); pci_enable_pcie_error_reporting(pci_dev); /* EEH recovery requires PCIe fundamental reset */ pci_dev->needs_freset = 1; /* request complete BAR-0 space (length = 0) */ cd->mmio_len = pci_resource_len(pci_dev, 0); cd->mmio = pci_iomap(pci_dev, 0, 0); if (cd->mmio == NULL) { dev_err(&pci_dev->dev, "[%s] err: mapping BAR0 failed\n", __func__); err = -ENOMEM; goto out_release_resources; } cd->num_vfs = pci_sriov_get_totalvfs(pci_dev); if (cd->num_vfs < 0) cd->num_vfs = 0; err = genwqe_read_ids(cd); if (err) goto out_iounmap; return 0; out_iounmap: pci_iounmap(pci_dev, cd->mmio); out_release_resources: pci_release_selected_regions(pci_dev, bars); err_disable_device: pci_disable_device(pci_dev); err_out: return err; }