Пример #1
0
void
pci_iov_cfg_save(device_t dev, struct pci_devinfo *dinfo)
{
	struct pcicfg_iov *iov;

	iov = dinfo->cfg.iov;

	iov->iov_page_size = IOV_READ(dinfo, PCIR_SRIOV_PAGE_SIZE, 4);
	iov->iov_ctl = IOV_READ(dinfo, PCIR_SRIOV_CTL, 2);
}
Пример #2
0
static void
pci_iov_enumerate_vfs(struct pci_devinfo *dinfo, const nvlist_t *config,
    uint16_t first_rid, uint16_t rid_stride)
{
	char device_name[VF_MAX_NAME];
	const nvlist_t *device, *driver_config, *iov_config;
	device_t bus, dev, vf;
	struct pcicfg_iov *iov;
	struct pci_devinfo *vfinfo;
	size_t size;
	int i, error;
	uint16_t vid, did, next_rid;

	iov = dinfo->cfg.iov;
	dev = dinfo->cfg.dev;
	bus = device_get_parent(dev);
	size = dinfo->cfg.devinfo_size;
	next_rid = first_rid;
	vid = pci_get_vendor(dev);
	did = IOV_READ(dinfo, PCIR_SRIOV_VF_DID, 2);

	for (i = 0; i < iov->iov_num_vfs; i++, next_rid += rid_stride) {
		snprintf(device_name, sizeof(device_name), VF_PREFIX"%d", i);
		device = nvlist_get_nvlist(config, device_name);
		iov_config = nvlist_get_nvlist(device, IOV_CONFIG_NAME);
		driver_config = nvlist_get_nvlist(device, DRIVER_CONFIG_NAME);

		vf = PCI_CREATE_IOV_CHILD(bus, dev, next_rid, vid, did);
		if (vf == NULL)
			break;

		/*
		 * If we are creating passthrough devices then force the ppt
		 * driver to attach to prevent a VF driver from claiming the
		 * VFs.
		 */
		if (nvlist_get_bool(iov_config, "passthrough"))
			device_set_devclass_fixed(vf, "ppt");

		vfinfo = device_get_ivars(vf);

		vfinfo->cfg.iov = iov;
		vfinfo->cfg.vf.index = i;

		pci_iov_add_bars(iov, vfinfo);

		error = PCI_IOV_ADD_VF(dev, i, driver_config);
		if (error != 0) {
			device_printf(dev, "Failed to add VF %d\n", i);
			pci_delete_child(bus, vf);
		}
	}

	bus_generic_attach(bus);
}
Пример #3
0
static int
pci_iov_config_page_size(struct pci_devinfo *dinfo)
{
	uint32_t page_cap, page_size;

	page_cap = IOV_READ(dinfo, PCIR_SRIOV_PAGE_CAP, 4);

	/*
	 * If the system page size is less than the smallest SR-IOV page size
	 * then round up to the smallest SR-IOV page size.
	 */
	if (PAGE_SHIFT < PCI_SRIOV_BASE_PAGE_SHIFT)
		page_size = (1 << 0);
	else
		page_size = (1 << (PAGE_SHIFT - PCI_SRIOV_BASE_PAGE_SHIFT));

	/* Check that the device supports the system page size. */
	if (!(page_size & page_cap))
		return (ENXIO);

	IOV_WRITE(dinfo, PCIR_SRIOV_PAGE_SIZE, page_size, 4);
	return (0);
}
Пример #4
0
static int
pci_iov_delete(struct cdev *cdev)
{
	device_t bus, dev, vf, *devlist;
	struct pci_devinfo *dinfo;
	struct pcicfg_iov *iov;
	int i, error, devcount;
	uint32_t iov_ctl;

	mtx_lock(&Giant);
	dinfo = cdev->si_drv1;
	iov = dinfo->cfg.iov;
	dev = dinfo->cfg.dev;
	bus = device_get_parent(dev);
	devlist = NULL;

	if (iov->iov_flags & IOV_BUSY) {
		mtx_unlock(&Giant);
		return (EBUSY);
	}

	if (iov->iov_num_vfs == 0) {
		mtx_unlock(&Giant);
		return (ECHILD);
	}

	iov->iov_flags |= IOV_BUSY;

	error = device_get_children(bus, &devlist, &devcount);

	if (error != 0)
		goto out;

	for (i = 0; i < devcount; i++) {
		vf = devlist[i];

		if (!pci_iov_is_child_vf(iov, vf))
			continue;

		error = device_detach(vf);
		if (error != 0) {
			device_printf(dev,
			   "Could not disable SR-IOV: failed to detach VF %s\n",
			    device_get_nameunit(vf));
			goto out;
		}
	}

	for (i = 0; i < devcount; i++) {
		vf = devlist[i];

		if (pci_iov_is_child_vf(iov, vf))
			device_delete_child(bus, vf);
	}
	PCI_IOV_UNINIT(dev);

	iov_ctl = IOV_READ(dinfo, PCIR_SRIOV_CTL, 2);
	iov_ctl &= ~(PCIM_SRIOV_VF_EN | PCIM_SRIOV_VF_MSE);
	IOV_WRITE(dinfo, PCIR_SRIOV_CTL, iov_ctl, 2);
	IOV_WRITE(dinfo, PCIR_SRIOV_NUM_VFS, 0, 2);

	iov->iov_num_vfs = 0;

	for (i = 0; i <= PCIR_MAX_BAR_0; i++) {
		if (iov->iov_bar[i].res != NULL) {
			pci_release_resource(bus, dev, SYS_RES_MEMORY,
			    iov->iov_pos + PCIR_SRIOV_BAR(i),
			    iov->iov_bar[i].res);
			pci_delete_resource(bus, dev, SYS_RES_MEMORY,
			    iov->iov_pos + PCIR_SRIOV_BAR(i));
			iov->iov_bar[i].res = NULL;
		}
	}

	if (iov->iov_flags & IOV_RMAN_INITED) {
		rman_fini(&iov->rman);
		iov->iov_flags &= ~IOV_RMAN_INITED;
	}

	error = 0;
out:
	free(devlist, M_TEMP);
	iov->iov_flags &= ~IOV_BUSY;
	mtx_unlock(&Giant);
	return (error);
}
Пример #5
0
static int
pci_iov_config(struct cdev *cdev, struct pci_iov_arg *arg)
{
	device_t bus, dev;
	struct pci_devinfo *dinfo;
	struct pcicfg_iov *iov;
	nvlist_t *config;
	int i, error;
	uint16_t rid_off, rid_stride;
	uint16_t first_rid, last_rid;
	uint16_t iov_ctl;
	uint16_t num_vfs, total_vfs;
	int iov_inited;

	mtx_lock(&Giant);
	dinfo = cdev->si_drv1;
	iov = dinfo->cfg.iov;
	dev = dinfo->cfg.dev;
	bus = device_get_parent(dev);
	iov_inited = 0;
	config = NULL;

	if ((iov->iov_flags & IOV_BUSY) || iov->iov_num_vfs != 0) {
		mtx_unlock(&Giant);
		return (EBUSY);
	}
	iov->iov_flags |= IOV_BUSY;

	error = pci_iov_parse_config(iov, arg, &config);
	if (error != 0)
		goto out;

	num_vfs = pci_iov_config_get_num_vfs(config);
	total_vfs = IOV_READ(dinfo, PCIR_SRIOV_TOTAL_VFS, 2);
	if (num_vfs > total_vfs) {
		error = EINVAL;
		goto out;
	}

	error = pci_iov_config_page_size(dinfo);
	if (error != 0)
		goto out;

	error = pci_iov_set_ari(bus);
	if (error != 0)
		goto out;

	error = pci_iov_init(dev, num_vfs, config);
	if (error != 0)
		goto out;
	iov_inited = 1;

	IOV_WRITE(dinfo, PCIR_SRIOV_NUM_VFS, num_vfs, 2);

	rid_off = IOV_READ(dinfo, PCIR_SRIOV_VF_OFF, 2);
	rid_stride = IOV_READ(dinfo, PCIR_SRIOV_VF_STRIDE, 2);

	first_rid = pci_get_rid(dev) + rid_off;
	last_rid = first_rid + (num_vfs - 1) * rid_stride;

	/* We don't yet support allocating extra bus numbers for VFs. */
	if (pci_get_bus(dev) != PCI_RID2BUS(last_rid)) {
		error = ENOSPC;
		goto out;
	}

	iov_ctl = IOV_READ(dinfo, PCIR_SRIOV_CTL, 2);
	iov_ctl &= ~(PCIM_SRIOV_VF_EN | PCIM_SRIOV_VF_MSE);
	IOV_WRITE(dinfo, PCIR_SRIOV_CTL, iov_ctl, 2);

	error = pci_iov_init_rman(dev, iov);
	if (error != 0)
		goto out;

	iov->iov_num_vfs = num_vfs;

	error = pci_iov_setup_bars(dinfo);
	if (error != 0)
		goto out;

	iov_ctl = IOV_READ(dinfo, PCIR_SRIOV_CTL, 2);
	iov_ctl |= PCIM_SRIOV_VF_EN | PCIM_SRIOV_VF_MSE;
	IOV_WRITE(dinfo, PCIR_SRIOV_CTL, iov_ctl, 2);

	/* Per specification, we must wait 100ms before accessing VFs. */
	pause("iov", roundup(hz, 10));
	pci_iov_enumerate_vfs(dinfo, config, first_rid, rid_stride);

	nvlist_destroy(config);
	iov->iov_flags &= ~IOV_BUSY;
	mtx_unlock(&Giant);

	return (0);
out:
	if (iov_inited)
		PCI_IOV_UNINIT(dev);

	for (i = 0; i <= PCIR_MAX_BAR_0; i++) {
		if (iov->iov_bar[i].res != NULL) {
			pci_release_resource(bus, dev, SYS_RES_MEMORY,
			    iov->iov_pos + PCIR_SRIOV_BAR(i),
			    iov->iov_bar[i].res);
			pci_delete_resource(bus, dev, SYS_RES_MEMORY,
			    iov->iov_pos + PCIR_SRIOV_BAR(i));
			iov->iov_bar[i].res = NULL;
		}
	}

	if (iov->iov_flags & IOV_RMAN_INITED) {
		rman_fini(&iov->rman);
		iov->iov_flags &= ~IOV_RMAN_INITED;
	}

	nvlist_destroy(config);
	iov->iov_num_vfs = 0;
	iov->iov_flags &= ~IOV_BUSY;
	mtx_unlock(&Giant);
	return (error);
}