Пример #1
0
/**
 * genwqe_bus_reset() - Card recovery
 *
 * pci_reset_function() will recover the device and ensure that the
 * registers are accessible again when it completes with success. If
 * not, the card will stay dead and registers will be unaccessible
 * still.
 */
static int genwqe_bus_reset(struct genwqe_dev *cd)
{
	int bars, rc = 0;
	struct pci_dev *pci_dev = cd->pci_dev;
	void __iomem *mmio;

	if (cd->err_inject & GENWQE_INJECT_BUS_RESET_FAILURE)
		return -EIO;

	mmio = cd->mmio;
	cd->mmio = NULL;
	pci_iounmap(pci_dev, mmio);

	bars = pci_select_bars(pci_dev, IORESOURCE_MEM);
	pci_release_selected_regions(pci_dev, bars);

	/*
	 * Firmware/BIOS might change memory mapping during bus reset.
	 * Settings like enable bus-mastering, ... are backuped and
	 * restored by the pci_reset_function().
	 */
	dev_dbg(&pci_dev->dev, "[%s] pci_reset function ...\n", __func__);
	rc = pci_reset_function(pci_dev);
	if (rc) {
		dev_err(&pci_dev->dev,
			"[%s] err: failed reset func (rc %d)\n", __func__, rc);
		return rc;
	}
	dev_dbg(&pci_dev->dev, "[%s] done with rc=%d\n", __func__, rc);

	/*
	 * Here is the right spot to clear the register read
	 * failure. pci_bus_reset() does this job in real systems.
	 */
	cd->err_inject &= ~(GENWQE_INJECT_HARDWARE_FAILURE |
			    GENWQE_INJECT_GFIR_FATAL |
			    GENWQE_INJECT_GFIR_INFO);

	rc = pci_request_selected_regions(pci_dev, bars, genwqe_driver_name);
	if (rc) {
		dev_err(&pci_dev->dev,
			"[%s] err: request bars failed (%d)\n", __func__, rc);
		return -EIO;
	}

	cd->mmio = pci_iomap(pci_dev, 0, 0);
	if (cd->mmio == NULL) {
		dev_err(&pci_dev->dev,
			"[%s] err: mapping BAR0 failed\n", __func__);
		return -ENOMEM;
	}
	return 0;
}
Пример #2
0
static int setup_mmio_scc (struct pci_dev *dev, const char *name)
{
	unsigned long ctl_base = pci_resource_start(dev, 0);
	unsigned long dma_base = pci_resource_start(dev, 1);
	unsigned long ctl_size = pci_resource_len(dev, 0);
	unsigned long dma_size = pci_resource_len(dev, 1);
	void __iomem *ctl_addr;
	void __iomem *dma_addr;
	int i, ret;

	for (i = 0; i < MAX_HWIFS; i++) {
		if (scc_ports[i].ctl == 0)
			break;
	}
	if (i >= MAX_HWIFS)
		return -ENOMEM;

	ret = pci_request_selected_regions(dev, (1 << 2) - 1, name);
	if (ret < 0) {
		printk(KERN_ERR "%s: can't reserve resources\n", name);
		return ret;
	}

	if ((ctl_addr = ioremap(ctl_base, ctl_size)) == NULL)
		goto fail_0;

	if ((dma_addr = ioremap(dma_base, dma_size)) == NULL)
		goto fail_1;

	pci_set_master(dev);
	scc_ports[i].ctl = (unsigned long)ctl_addr;
	scc_ports[i].dma = (unsigned long)dma_addr;
	pci_set_drvdata(dev, (void *) &scc_ports[i]);

	return 1;

 fail_1:
	iounmap(ctl_addr);
 fail_0:
	return -ENOMEM;
}
Пример #3
0
/* I/O Port BAR access */
ssize_t vfio_pci_io_readwrite(struct vfio_pci_device *vdev, char __user *buf,
                              size_t count, loff_t *ppos, bool iswrite)
{
    struct pci_dev *pdev = vdev->pdev;
    loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
    int bar = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
    void __iomem *io;
    size_t done = 0;

    if (!pci_resource_start(pdev, bar))
        return -EINVAL;

    if (pos + count > pci_resource_len(pdev, bar))
        return -EINVAL;

    if (!vdev->barmap[bar]) {
        int ret;

        ret = pci_request_selected_regions(pdev, 1 << bar, "vfio");
        if (ret)
            return ret;

        vdev->barmap[bar] = pci_iomap(pdev, bar, 0);

        if (!vdev->barmap[bar]) {
            pci_release_selected_regions(pdev, 1 << bar);
            return -EINVAL;
        }
    }

    io = vdev->barmap[bar];

    while (count) {
        int filled;

        if (count >= 3 && !(pos % 4)) {
            __le32 val;

            if (iswrite) {
                if (copy_from_user(&val, buf, 4))
                    return -EFAULT;

                iowrite32(le32_to_cpu(val), io + pos);
            } else {
                val = cpu_to_le32(ioread32(io + pos));

                if (copy_to_user(buf, &val, 4))
                    return -EFAULT;
            }

            filled = 4;

        } else if ((pos % 2) == 0 && count >= 2) {
            __le16 val;

            if (iswrite) {
                if (copy_from_user(&val, buf, 2))
                    return -EFAULT;

                iowrite16(le16_to_cpu(val), io + pos);
            } else {
                val = cpu_to_le16(ioread16(io + pos));

                if (copy_to_user(buf, &val, 2))
                    return -EFAULT;
            }

            filled = 2;
        } else {
            u8 val;

            if (iswrite) {
                if (copy_from_user(&val, buf, 1))
                    return -EFAULT;

                iowrite8(val, io + pos);
            } else {
                val = ioread8(io + pos);

                if (copy_to_user(buf, &val, 1))
                    return -EFAULT;
            }

            filled = 1;
        }

        count -= filled;
        done += filled;
        buf += filled;
        pos += filled;
    }

    *ppos += done;

    return done;
}
Пример #4
0
/*
 * MMIO BAR access
 * We handle two excluded ranges here as well, if the user tries to read
 * the ROM beyond what PCI tells us is available or the MSI-X table region,
 * we return 0xFF and writes are dropped.
 */
ssize_t vfio_pci_mem_readwrite(struct vfio_pci_device *vdev, char __user *buf,
                               size_t count, loff_t *ppos, bool iswrite)
{
    struct pci_dev *pdev = vdev->pdev;
    loff_t pos = *ppos & VFIO_PCI_OFFSET_MASK;
    int bar = VFIO_PCI_OFFSET_TO_INDEX(*ppos);
    void __iomem *io;
    resource_size_t end;
    size_t done = 0;
    size_t x_start = 0, x_end = 0; /* excluded range */

    if (!pci_resource_start(pdev, bar))
        return -EINVAL;

    end = pci_resource_len(pdev, bar);

    if (pos > end)
        return -EINVAL;

    if (pos == end)
        return 0;

    if (pos + count > end)
        count = end - pos;

    if (bar == PCI_ROM_RESOURCE) {
        io = pci_map_rom(pdev, &x_start);
        x_end = end;
    } else {
        if (!vdev->barmap[bar]) {
            int ret;

            ret = pci_request_selected_regions(pdev, 1 << bar,
                                               "vfio");
            if (ret)
                return ret;

            vdev->barmap[bar] = pci_iomap(pdev, bar, 0);

            if (!vdev->barmap[bar]) {
                pci_release_selected_regions(pdev, 1 << bar);
                return -EINVAL;
            }
        }

        io = vdev->barmap[bar];

        if (bar == vdev->msix_bar) {
            x_start = vdev->msix_offset;
            x_end = vdev->msix_offset + vdev->msix_size;
        }
    }

    if (!io)
        return -EINVAL;

    while (count) {
        size_t fillable, filled;

        if (pos < x_start)
            fillable = x_start - pos;
        else if (pos >= x_end)
            fillable = end - pos;
        else
            fillable = 0;

        if (fillable >= 4 && !(pos % 4) && (count >= 4)) {
            __le32 val;

            if (iswrite) {
                if (copy_from_user(&val, buf, 4))
                    goto out;

                iowrite32(le32_to_cpu(val), io + pos);
            } else {
                val = cpu_to_le32(ioread32(io + pos));

                if (copy_to_user(buf, &val, 4))
                    goto out;
            }

            filled = 4;
        } else if (fillable >= 2 && !(pos % 2) && (count >= 2)) {
            __le16 val;

            if (iswrite) {
                if (copy_from_user(&val, buf, 2))
                    goto out;

                iowrite16(le16_to_cpu(val), io + pos);
            } else {
                val = cpu_to_le16(ioread16(io + pos));

                if (copy_to_user(buf, &val, 2))
                    goto out;
            }

            filled = 2;
        } else if (fillable) {
            u8 val;

            if (iswrite) {
                if (copy_from_user(&val, buf, 1))
                    goto out;

                iowrite8(val, io + pos);
            } else {
                val = ioread8(io + pos);

                if (copy_to_user(buf, &val, 1))
                    goto out;
            }

            filled = 1;
        } else {
            /* Drop writes, fill reads with FF */
            if (!iswrite) {
                char val = 0xFF;
                size_t i;

                for (i = 0; i < x_end - pos; i++) {
                    if (put_user(val, buf + i))
                        goto out;
                }
            }

            filled = x_end - pos;
        }

        count -= filled;
        done += filled;
        buf += filled;
        pos += filled;
    }

    *ppos += done;

out:
    if (bar == PCI_ROM_RESOURCE)
        pci_unmap_rom(pdev, io);

    return count ? -EFAULT : done;
}
Пример #5
0
/**
 * genwqe_pci_setup() - Allocate PCIe related resources for our card
 */
static int genwqe_pci_setup(struct genwqe_dev *cd)
{
	int err, bars;
	struct pci_dev *pci_dev = cd->pci_dev;

	bars = pci_select_bars(pci_dev, IORESOURCE_MEM);
	err = pci_enable_device_mem(pci_dev);
	if (err) {
		dev_err(&pci_dev->dev,
			"err: failed to enable pci memory (err=%d)\n", err);
		goto err_out;
	}

	/* Reserve PCI I/O and memory resources */
	err = pci_request_selected_regions(pci_dev, bars, genwqe_driver_name);
	if (err) {
		dev_err(&pci_dev->dev,
			"[%s] err: request bars failed (%d)\n", __func__, err);
		err = -EIO;
		goto err_disable_device;
	}

	/* check for 64-bit DMA address supported (DAC) */
	if (!pci_set_dma_mask(pci_dev, DMA_BIT_MASK(64))) {
		err = pci_set_consistent_dma_mask(pci_dev, DMA_BIT_MASK(64));
		if (err) {
			dev_err(&pci_dev->dev,
				"err: DMA64 consistent mask error\n");
			err = -EIO;
			goto out_release_resources;
		}
	/* check for 32-bit DMA address supported (SAC) */
	} else if (!pci_set_dma_mask(pci_dev, DMA_BIT_MASK(32))) {
		err = pci_set_consistent_dma_mask(pci_dev, DMA_BIT_MASK(32));
		if (err) {
			dev_err(&pci_dev->dev,
				"err: DMA32 consistent mask error\n");
			err = -EIO;
			goto out_release_resources;
		}
	} else {
		dev_err(&pci_dev->dev,
			"err: neither DMA32 nor DMA64 supported\n");
		err = -EIO;
		goto out_release_resources;
	}

	pci_set_master(pci_dev);
	pci_enable_pcie_error_reporting(pci_dev);

	/* EEH recovery requires PCIe fundamental reset */
	pci_dev->needs_freset = 1;

	/* request complete BAR-0 space (length = 0) */
	cd->mmio_len = pci_resource_len(pci_dev, 0);
	cd->mmio = pci_iomap(pci_dev, 0, 0);
	if (cd->mmio == NULL) {
		dev_err(&pci_dev->dev,
			"[%s] err: mapping BAR0 failed\n", __func__);
		err = -ENOMEM;
		goto out_release_resources;
	}

	cd->num_vfs = pci_sriov_get_totalvfs(pci_dev);
	if (cd->num_vfs < 0)
		cd->num_vfs = 0;

	err = genwqe_read_ids(cd);
	if (err)
		goto out_iounmap;

	return 0;

 out_iounmap:
	pci_iounmap(pci_dev, cd->mmio);
 out_release_resources:
	pci_release_selected_regions(pci_dev, bars);
 err_disable_device:
	pci_disable_device(pci_dev);
 err_out:
	return err;
}