static int pci_iov_alloc_bar_ea(struct pci_devinfo *dinfo, int bar) { struct pcicfg_iov *iov; rman_res_t start, end; struct resource *res; struct resource_list *rl; struct resource_list_entry *rle; rl = &dinfo->resources; iov = dinfo->cfg.iov; rle = resource_list_find(rl, SYS_RES_MEMORY, iov->iov_pos + PCIR_SRIOV_BAR(bar)); if (rle == NULL) rle = resource_list_find(rl, SYS_RES_IOPORT, iov->iov_pos + PCIR_SRIOV_BAR(bar)); if (rle == NULL) return (ENXIO); res = rle->res; iov->iov_bar[bar].res = res; iov->iov_bar[bar].bar_size = rman_get_size(res) / iov->iov_num_vfs; iov->iov_bar[bar].bar_shift = pci_mapsize(iov->iov_bar[bar].bar_size); start = rman_get_start(res); end = rman_get_end(res); return (rman_manage_region(&iov->rman, start, end)); }
static int pci_iov_alloc_bar(struct pci_devinfo *dinfo, int bar, pci_addr_t bar_shift) { struct resource *res; struct pcicfg_iov *iov; device_t dev, bus; rman_res_t start, end; pci_addr_t bar_size; int rid; iov = dinfo->cfg.iov; dev = dinfo->cfg.dev; bus = device_get_parent(dev); rid = iov->iov_pos + PCIR_SRIOV_BAR(bar); bar_size = 1 << bar_shift; res = pci_alloc_multi_resource(bus, dev, SYS_RES_MEMORY, &rid, 0, ~0, 1, iov->iov_num_vfs, RF_ACTIVE); if (res == NULL) return (ENXIO); iov->iov_bar[bar].res = res; iov->iov_bar[bar].bar_size = bar_size; iov->iov_bar[bar].bar_shift = bar_shift; start = rman_get_start(res); end = rman_get_end(res); return (rman_manage_region(&iov->rman, start, end)); }
static int pci_iov_setup_bars(struct pci_devinfo *dinfo) { device_t dev; struct pcicfg_iov *iov; pci_addr_t bar_value, testval; int i, last_64, error; iov = dinfo->cfg.iov; dev = dinfo->cfg.dev; last_64 = 0; pci_add_resources_ea(device_get_parent(dev), dev, 1); for (i = 0; i <= PCIR_MAX_BAR_0; i++) { /* First, try to use BARs allocated with EA */ error = pci_iov_alloc_bar_ea(dinfo, i); if (error == 0) continue; /* Allocate legacy-BAR only if EA is not enabled */ if (pci_ea_is_enabled(dev, iov->iov_pos + PCIR_SRIOV_BAR(i))) continue; /* * If a PCI BAR is a 64-bit wide BAR, then it spans two * consecutive registers. Therefore if the last BAR that * we looked at was a 64-bit BAR, we need to skip this * register as it's the second half of the last BAR. */ if (!last_64) { pci_read_bar(dev, iov->iov_pos + PCIR_SRIOV_BAR(i), &bar_value, &testval, &last_64); if (testval != 0) { error = pci_iov_alloc_bar(dinfo, i, pci_mapsize(testval)); if (error != 0) return (error); } } else last_64 = 0; } return (0); }
static int pci_iov_setup_bars(struct pci_devinfo *dinfo) { device_t dev; struct pcicfg_iov *iov; pci_addr_t bar_value, testval; int i, last_64, error; iov = dinfo->cfg.iov; dev = dinfo->cfg.dev; last_64 = 0; for (i = 0; i <= PCIR_MAX_BAR_0; i++) { /* * If a PCI BAR is a 64-bit wide BAR, then it spans two * consecutive registers. Therefore if the last BAR that * we looked at was a 64-bit BAR, we need to skip this * register as it's the second half of the last BAR. */ if (!last_64) { pci_read_bar(dev, iov->iov_pos + PCIR_SRIOV_BAR(i), &bar_value, &testval, &last_64); if (testval != 0) { error = pci_iov_alloc_bar(dinfo, i, pci_mapsize(testval)); if (error != 0) return (error); } } else last_64 = 0; } return (0); }
static int pci_iov_delete(struct cdev *cdev) { device_t bus, dev, vf, *devlist; struct pci_devinfo *dinfo; struct pcicfg_iov *iov; int i, error, devcount; uint32_t iov_ctl; mtx_lock(&Giant); dinfo = cdev->si_drv1; iov = dinfo->cfg.iov; dev = dinfo->cfg.dev; bus = device_get_parent(dev); devlist = NULL; if (iov->iov_flags & IOV_BUSY) { mtx_unlock(&Giant); return (EBUSY); } if (iov->iov_num_vfs == 0) { mtx_unlock(&Giant); return (ECHILD); } iov->iov_flags |= IOV_BUSY; error = device_get_children(bus, &devlist, &devcount); if (error != 0) goto out; for (i = 0; i < devcount; i++) { vf = devlist[i]; if (!pci_iov_is_child_vf(iov, vf)) continue; error = device_detach(vf); if (error != 0) { device_printf(dev, "Could not disable SR-IOV: failed to detach VF %s\n", device_get_nameunit(vf)); goto out; } } for (i = 0; i < devcount; i++) { vf = devlist[i]; if (pci_iov_is_child_vf(iov, vf)) device_delete_child(bus, vf); } PCI_IOV_UNINIT(dev); iov_ctl = IOV_READ(dinfo, PCIR_SRIOV_CTL, 2); iov_ctl &= ~(PCIM_SRIOV_VF_EN | PCIM_SRIOV_VF_MSE); IOV_WRITE(dinfo, PCIR_SRIOV_CTL, iov_ctl, 2); IOV_WRITE(dinfo, PCIR_SRIOV_NUM_VFS, 0, 2); iov->iov_num_vfs = 0; for (i = 0; i <= PCIR_MAX_BAR_0; i++) { if (iov->iov_bar[i].res != NULL) { pci_release_resource(bus, dev, SYS_RES_MEMORY, iov->iov_pos + PCIR_SRIOV_BAR(i), iov->iov_bar[i].res); pci_delete_resource(bus, dev, SYS_RES_MEMORY, iov->iov_pos + PCIR_SRIOV_BAR(i)); iov->iov_bar[i].res = NULL; } } if (iov->iov_flags & IOV_RMAN_INITED) { rman_fini(&iov->rman); iov->iov_flags &= ~IOV_RMAN_INITED; } error = 0; out: free(devlist, M_TEMP); iov->iov_flags &= ~IOV_BUSY; mtx_unlock(&Giant); return (error); }
static int pci_iov_config(struct cdev *cdev, struct pci_iov_arg *arg) { device_t bus, dev; struct pci_devinfo *dinfo; struct pcicfg_iov *iov; nvlist_t *config; int i, error; uint16_t rid_off, rid_stride; uint16_t first_rid, last_rid; uint16_t iov_ctl; uint16_t num_vfs, total_vfs; int iov_inited; mtx_lock(&Giant); dinfo = cdev->si_drv1; iov = dinfo->cfg.iov; dev = dinfo->cfg.dev; bus = device_get_parent(dev); iov_inited = 0; config = NULL; if ((iov->iov_flags & IOV_BUSY) || iov->iov_num_vfs != 0) { mtx_unlock(&Giant); return (EBUSY); } iov->iov_flags |= IOV_BUSY; error = pci_iov_parse_config(iov, arg, &config); if (error != 0) goto out; num_vfs = pci_iov_config_get_num_vfs(config); total_vfs = IOV_READ(dinfo, PCIR_SRIOV_TOTAL_VFS, 2); if (num_vfs > total_vfs) { error = EINVAL; goto out; } error = pci_iov_config_page_size(dinfo); if (error != 0) goto out; error = pci_iov_set_ari(bus); if (error != 0) goto out; error = pci_iov_init(dev, num_vfs, config); if (error != 0) goto out; iov_inited = 1; IOV_WRITE(dinfo, PCIR_SRIOV_NUM_VFS, num_vfs, 2); rid_off = IOV_READ(dinfo, PCIR_SRIOV_VF_OFF, 2); rid_stride = IOV_READ(dinfo, PCIR_SRIOV_VF_STRIDE, 2); first_rid = pci_get_rid(dev) + rid_off; last_rid = first_rid + (num_vfs - 1) * rid_stride; /* We don't yet support allocating extra bus numbers for VFs. */ if (pci_get_bus(dev) != PCI_RID2BUS(last_rid)) { error = ENOSPC; goto out; } iov_ctl = IOV_READ(dinfo, PCIR_SRIOV_CTL, 2); iov_ctl &= ~(PCIM_SRIOV_VF_EN | PCIM_SRIOV_VF_MSE); IOV_WRITE(dinfo, PCIR_SRIOV_CTL, iov_ctl, 2); error = pci_iov_init_rman(dev, iov); if (error != 0) goto out; iov->iov_num_vfs = num_vfs; error = pci_iov_setup_bars(dinfo); if (error != 0) goto out; iov_ctl = IOV_READ(dinfo, PCIR_SRIOV_CTL, 2); iov_ctl |= PCIM_SRIOV_VF_EN | PCIM_SRIOV_VF_MSE; IOV_WRITE(dinfo, PCIR_SRIOV_CTL, iov_ctl, 2); /* Per specification, we must wait 100ms before accessing VFs. */ pause("iov", roundup(hz, 10)); pci_iov_enumerate_vfs(dinfo, config, first_rid, rid_stride); nvlist_destroy(config); iov->iov_flags &= ~IOV_BUSY; mtx_unlock(&Giant); return (0); out: if (iov_inited) PCI_IOV_UNINIT(dev); for (i = 0; i <= PCIR_MAX_BAR_0; i++) { if (iov->iov_bar[i].res != NULL) { pci_release_resource(bus, dev, SYS_RES_MEMORY, iov->iov_pos + PCIR_SRIOV_BAR(i), iov->iov_bar[i].res); pci_delete_resource(bus, dev, SYS_RES_MEMORY, iov->iov_pos + PCIR_SRIOV_BAR(i)); iov->iov_bar[i].res = NULL; } } if (iov->iov_flags & IOV_RMAN_INITED) { rman_fini(&iov->rman); iov->iov_flags &= ~IOV_RMAN_INITED; } nvlist_destroy(config); iov->iov_num_vfs = 0; iov->iov_flags &= ~IOV_BUSY; mtx_unlock(&Giant); return (error); }