static int lca_pcib_release_resource(device_t bus, device_t child, int type, int rid, struct resource *r) { if (type == SYS_RES_IRQ) return isa_release_intr(bus, child, r); else return pci_release_resource(bus, child, type, rid, r); }
static int apecs_pcib_release_resource(device_t bus, device_t child, int type, int rid, struct resource *r) { if ((hwrpb->rpb_type == ST_DEC_2100_A50) && (type == SYS_RES_IRQ)) return isa_release_intr(bus, child, r); else return pci_release_resource(bus, child, type, rid, r); }
static int pci_iov_delete(struct cdev *cdev) { device_t bus, dev, vf, *devlist; struct pci_devinfo *dinfo; struct pcicfg_iov *iov; int i, error, devcount; uint32_t iov_ctl; mtx_lock(&Giant); dinfo = cdev->si_drv1; iov = dinfo->cfg.iov; dev = dinfo->cfg.dev; bus = device_get_parent(dev); devlist = NULL; if (iov->iov_flags & IOV_BUSY) { mtx_unlock(&Giant); return (EBUSY); } if (iov->iov_num_vfs == 0) { mtx_unlock(&Giant); return (ECHILD); } iov->iov_flags |= IOV_BUSY; error = device_get_children(bus, &devlist, &devcount); if (error != 0) goto out; for (i = 0; i < devcount; i++) { vf = devlist[i]; if (!pci_iov_is_child_vf(iov, vf)) continue; error = device_detach(vf); if (error != 0) { device_printf(dev, "Could not disable SR-IOV: failed to detach VF %s\n", device_get_nameunit(vf)); goto out; } } for (i = 0; i < devcount; i++) { vf = devlist[i]; if (pci_iov_is_child_vf(iov, vf)) device_delete_child(bus, vf); } PCI_IOV_UNINIT(dev); iov_ctl = IOV_READ(dinfo, PCIR_SRIOV_CTL, 2); iov_ctl &= ~(PCIM_SRIOV_VF_EN | PCIM_SRIOV_VF_MSE); IOV_WRITE(dinfo, PCIR_SRIOV_CTL, iov_ctl, 2); IOV_WRITE(dinfo, PCIR_SRIOV_NUM_VFS, 0, 2); iov->iov_num_vfs = 0; for (i = 0; i <= PCIR_MAX_BAR_0; i++) { if (iov->iov_bar[i].res != NULL) { pci_release_resource(bus, dev, SYS_RES_MEMORY, iov->iov_pos + PCIR_SRIOV_BAR(i), iov->iov_bar[i].res); pci_delete_resource(bus, dev, SYS_RES_MEMORY, iov->iov_pos + PCIR_SRIOV_BAR(i)); iov->iov_bar[i].res = NULL; } } if (iov->iov_flags & IOV_RMAN_INITED) { rman_fini(&iov->rman); iov->iov_flags &= ~IOV_RMAN_INITED; } error = 0; out: free(devlist, M_TEMP); iov->iov_flags &= ~IOV_BUSY; mtx_unlock(&Giant); return (error); }
static int pci_iov_config(struct cdev *cdev, struct pci_iov_arg *arg) { device_t bus, dev; struct pci_devinfo *dinfo; struct pcicfg_iov *iov; nvlist_t *config; int i, error; uint16_t rid_off, rid_stride; uint16_t first_rid, last_rid; uint16_t iov_ctl; uint16_t num_vfs, total_vfs; int iov_inited; mtx_lock(&Giant); dinfo = cdev->si_drv1; iov = dinfo->cfg.iov; dev = dinfo->cfg.dev; bus = device_get_parent(dev); iov_inited = 0; config = NULL; if ((iov->iov_flags & IOV_BUSY) || iov->iov_num_vfs != 0) { mtx_unlock(&Giant); return (EBUSY); } iov->iov_flags |= IOV_BUSY; error = pci_iov_parse_config(iov, arg, &config); if (error != 0) goto out; num_vfs = pci_iov_config_get_num_vfs(config); total_vfs = IOV_READ(dinfo, PCIR_SRIOV_TOTAL_VFS, 2); if (num_vfs > total_vfs) { error = EINVAL; goto out; } error = pci_iov_config_page_size(dinfo); if (error != 0) goto out; error = pci_iov_set_ari(bus); if (error != 0) goto out; error = pci_iov_init(dev, num_vfs, config); if (error != 0) goto out; iov_inited = 1; IOV_WRITE(dinfo, PCIR_SRIOV_NUM_VFS, num_vfs, 2); rid_off = IOV_READ(dinfo, PCIR_SRIOV_VF_OFF, 2); rid_stride = IOV_READ(dinfo, PCIR_SRIOV_VF_STRIDE, 2); first_rid = pci_get_rid(dev) + rid_off; last_rid = first_rid + (num_vfs - 1) * rid_stride; /* We don't yet support allocating extra bus numbers for VFs. */ if (pci_get_bus(dev) != PCI_RID2BUS(last_rid)) { error = ENOSPC; goto out; } iov_ctl = IOV_READ(dinfo, PCIR_SRIOV_CTL, 2); iov_ctl &= ~(PCIM_SRIOV_VF_EN | PCIM_SRIOV_VF_MSE); IOV_WRITE(dinfo, PCIR_SRIOV_CTL, iov_ctl, 2); error = pci_iov_init_rman(dev, iov); if (error != 0) goto out; iov->iov_num_vfs = num_vfs; error = pci_iov_setup_bars(dinfo); if (error != 0) goto out; iov_ctl = IOV_READ(dinfo, PCIR_SRIOV_CTL, 2); iov_ctl |= PCIM_SRIOV_VF_EN | PCIM_SRIOV_VF_MSE; IOV_WRITE(dinfo, PCIR_SRIOV_CTL, iov_ctl, 2); /* Per specification, we must wait 100ms before accessing VFs. */ pause("iov", roundup(hz, 10)); pci_iov_enumerate_vfs(dinfo, config, first_rid, rid_stride); nvlist_destroy(config); iov->iov_flags &= ~IOV_BUSY; mtx_unlock(&Giant); return (0); out: if (iov_inited) PCI_IOV_UNINIT(dev); for (i = 0; i <= PCIR_MAX_BAR_0; i++) { if (iov->iov_bar[i].res != NULL) { pci_release_resource(bus, dev, SYS_RES_MEMORY, iov->iov_pos + PCIR_SRIOV_BAR(i), iov->iov_bar[i].res); pci_delete_resource(bus, dev, SYS_RES_MEMORY, iov->iov_pos + PCIR_SRIOV_BAR(i)); iov->iov_bar[i].res = NULL; } } if (iov->iov_flags & IOV_RMAN_INITED) { rman_fini(&iov->rman); iov->iov_flags &= ~IOV_RMAN_INITED; } nvlist_destroy(config); iov->iov_num_vfs = 0; iov->iov_flags &= ~IOV_BUSY; mtx_unlock(&Giant); return (error); }