void pcie_cap_slot_write_config(PCIDevice *dev, uint32_t addr, uint32_t val, int len) { uint32_t pos = dev->exp.exp_cap; uint8_t *exp_cap = dev->config + pos; uint16_t sltsta = pci_get_word(exp_cap + PCI_EXP_SLTSTA); if (ranges_overlap(addr, len, pos + PCI_EXP_SLTSTA, 2)) { hotplug_event_clear(dev); } if (!ranges_overlap(addr, len, pos + PCI_EXP_SLTCTL, 2)) { return; } if (pci_word_test_and_clear_mask(exp_cap + PCI_EXP_SLTCTL, PCI_EXP_SLTCTL_EIC)) { sltsta ^= PCI_EXP_SLTSTA_EIS; /* toggle PCI_EXP_SLTSTA_EIS bit */ pci_set_word(exp_cap + PCI_EXP_SLTSTA, sltsta); PCIE_DEV_PRINTF(dev, "PCI_EXP_SLTCTL_EIC: " "sltsta -> 0x%02"PRIx16"\n", sltsta); } /* * If the slot is polulated, power indicator is off and power * controller is off, it is safe to detach the devices. */ if ((sltsta & PCI_EXP_SLTSTA_PDS) && (val & PCI_EXP_SLTCTL_PCC) && ((val & PCI_EXP_SLTCTL_PIC_OFF) == PCI_EXP_SLTCTL_PIC_OFF)) { PCIBus *sec_bus = pci_bridge_get_sec_bus(PCI_BRIDGE(dev)); pci_for_each_device(sec_bus, pci_bus_num(sec_bus), pcie_unplug_device, NULL); pci_word_test_and_clear_mask(exp_cap + PCI_EXP_SLTSTA, PCI_EXP_SLTSTA_PDS); pci_word_test_and_set_mask(exp_cap + PCI_EXP_SLTSTA, PCI_EXP_SLTSTA_PDC); } hotplug_event_notify(dev); /* * 6.7.3.2 Command Completed Events * * Software issues a command to a hot-plug capable Downstream Port by * issuing a write transaction that targets any portion of the Port’s Slot * Control register. A single write to the Slot Control register is * considered to be a single command, even if the write affects more than * one field in the Slot Control register. In response to this transaction, * the Port must carry out the requested actions and then set the * associated status field for the command completed event. */ /* Real hardware might take a while to complete requested command because * physical movement would be involved like locking the electromechanical * lock. However in our case, command is completed instantaneously above, * so send a command completion event right now. */ pcie_cap_slot_event(dev, PCI_EXP_HP_EV_CCI); }
static AddressSpace *q35_host_dma_iommu(PCIBus *bus, void *opaque, int devfn) { IntelIOMMUState *s = opaque; VTDAddressSpace **pvtd_as; int bus_num = pci_bus_num(bus); assert(0 <= bus_num && bus_num <= VTD_PCI_BUS_MAX); assert(0 <= devfn && devfn <= VTD_PCI_DEVFN_MAX); pvtd_as = s->address_spaces[bus_num]; if (!pvtd_as) { /* No corresponding free() */ pvtd_as = g_malloc0(sizeof(VTDAddressSpace *) * VTD_PCI_DEVFN_MAX); s->address_spaces[bus_num] = pvtd_as; } if (!pvtd_as[devfn]) { pvtd_as[devfn] = g_malloc0(sizeof(VTDAddressSpace)); pvtd_as[devfn]->bus_num = (uint8_t)bus_num; pvtd_as[devfn]->devfn = (uint8_t)devfn; pvtd_as[devfn]->iommu_state = s; pvtd_as[devfn]->context_cache_entry.context_cache_gen = 0; memory_region_init_iommu(&pvtd_as[devfn]->iommu, OBJECT(s), &s->iommu_ops, "intel_iommu", UINT64_MAX); address_space_init(&pvtd_as[devfn]->as, &pvtd_as[devfn]->iommu, "intel_iommu"); } return &pvtd_as[devfn]->as; }
void pci_device_hot_add(Monitor *mon, const QDict *qdict) { PCIDevice *dev = NULL; const char *pci_addr = qdict_get_str(qdict, "pci_addr"); const char *type = qdict_get_str(qdict, "type"); const char *opts = qdict_get_try_str(qdict, "opts"); /* strip legacy tag */ if (!strncmp(pci_addr, "pci_addr=", 9)) { pci_addr += 9; } if (!opts) { opts = ""; } if (!strcmp(pci_addr, "auto")) pci_addr = NULL; if (strcmp(type, "nic") == 0) { dev = qemu_pci_hot_add_nic(mon, pci_addr, opts); } else if (strcmp(type, "storage") == 0) { dev = qemu_pci_hot_add_storage(mon, pci_addr, opts); } else { monitor_printf(mon, "invalid type: %s\n", type); } if (dev) { monitor_printf(mon, "OK domain %d, bus %d, slot %d, function %d\n", pci_find_domain(dev->bus), pci_bus_num(dev->bus), PCI_SLOT(dev->devfn), PCI_FUNC(dev->devfn)); } else monitor_printf(mon, "failed to add %s\n", opts); }
uint32_t igd_pci_read(PCIDevice *pci_dev, uint32_t config_addr, int len) { uint32_t val; assert(pci_dev->devfn == 0x00); if ( !igd_passthru ) { return pci_default_read_config(pci_dev, config_addr, len); } switch (config_addr) { case 0x00: /* vendor id */ case 0x02: /* device id */ case 0x52: /* processor graphics control register */ case 0xa0: /* top of memory */ case 0xb0: /* ILK: BSM: should read from dev 2 offset 0x5c */ case 0x58: /* SNB: PAVPC Offset */ case 0xa4: /* SNB: graphics base of stolen memory */ case 0xa8: /* SNB: base of GTT stolen memory */ val = pt_pci_host_read(0, PCI_SLOT(pci_dev->devfn), 0, config_addr, len); PT_LOG("pci_config_read: %x:%x.%x: addr=%x len=%x val=%x\n", pci_bus_num(pci_dev->bus), PCI_SLOT(pci_dev->devfn), PCI_FUNC(pci_dev->devfn), config_addr, len, val); break; default: val = pci_default_read_config(pci_dev, config_addr, len); } return val; }
void xen_pt_log(const PCIDevice *d, const char *f, ...) { va_list ap; va_start(ap, f); if (d) { fprintf(stderr, "[%02x:%02x.%d] ", pci_bus_num(d->bus), PCI_SLOT(d->devfn), PCI_FUNC(d->devfn)); } vfprintf(stderr, f, ap); va_end(ap); }
void pci_emulation_init(PCIBus *bus, PCI_EMULATION_INFO *pci_emulation_info) { int instance_id; PCI_EMULATION_State *d; uint8_t *pci_conf; #ifdef DEBUG fprintf(logfile, "qemu: pciinit\n"); #endif d = (PCI_EMULATION_State *)pci_register_device(bus, pci_emulation_info->name, sizeof(PCI_EMULATION_State), -1, NULL, NULL); pci_conf = d->dev.config; pci_conf[0x00] = pci_emulation_info->vendorid & 0xff; pci_conf[0x01] = (pci_emulation_info->vendorid & 0xff00) >> 8; pci_conf[0x02] = pci_emulation_info->deviceid & 0xff; pci_conf[0x03] = (pci_emulation_info->deviceid & 0xff00) >> 8; pci_conf[0x04] = pci_emulation_info->command & 0xff; pci_conf[0x05] = (pci_emulation_info->command & 0xff00) >> 8; pci_conf[0x06] = pci_emulation_info->status & 0xff; pci_conf[0x07] = (pci_emulation_info->status & 0xff00) >> 8; pci_conf[0x08] = pci_emulation_info->revision & 0xff; pci_conf[0x09] = pci_emulation_info->classcode & 0xff; pci_conf[0x0a] = (pci_emulation_info->classcode & 0xff00) >> 8; pci_conf[0x0b] = (pci_emulation_info->classcode & 0xff0000) >> 16; pci_conf[0x0e] = pci_emulation_info->headertype & 0xff; pci_conf[0x2c] = pci_emulation_info->subvendorid & 0xff; pci_conf[0x2d] = (pci_emulation_info->subvendorid & 0xff00) >> 8; pci_conf[0x2e] = pci_emulation_info->subsystemid & 0xff; pci_conf[0x2f] = (pci_emulation_info->subsystemid & 0xff00) >> 8; pci_conf[0x3c] = pci_emulation_info->interruputline & 0xff; pci_conf[0x3d] = pci_emulation_info->interruputpin & 0xff; instance_id = pci_bus_num(bus) << 8 | d->dev.devfn; register_savevm(pci_emulation_info->name, instance_id, 1, pci_emulation_save, pci_emulation_load, d); return; }
void igd_pci_write(PCIDevice *pci_dev, uint32_t config_addr, uint32_t val, int len) { assert(pci_dev->devfn == 0x00); if ( !igd_passthru ) { pci_default_write_config(pci_dev, config_addr, val, len); return; } switch (config_addr) { case 0x58: // PAVPC Offset pt_pci_host_write(0, 0, 0, config_addr, val, len); PT_LOG("pci_config_write: %x:%x.%x: addr=%x len=%x val=%x\n", pci_bus_num(pci_dev->bus), PCI_SLOT(pci_dev->devfn), PCI_FUNC(pci_dev->devfn), config_addr, len, val); break; default: pci_default_write_config(pci_dev, config_addr, val, len); } }
static void spapr_phb_vfio_finish_realize(sPAPRPHBState *sphb, Error **errp) { sPAPRPHBVFIOState *svphb = SPAPR_PCI_VFIO_HOST_BRIDGE(sphb); struct vfio_iommu_spapr_tce_info info = { .argsz = sizeof(info) }; int ret; sPAPRTCETable *tcet; uint32_t liobn = svphb->phb.dma_liobn; if (svphb->iommugroupid == -1) { error_setg(errp, "Wrong IOMMU group ID %d", svphb->iommugroupid); return; } ret = vfio_container_ioctl(&svphb->phb.iommu_as, svphb->iommugroupid, VFIO_CHECK_EXTENSION, (void *) VFIO_SPAPR_TCE_IOMMU); if (ret != 1) { error_setg_errno(errp, -ret, "spapr-vfio: SPAPR extension is not supported"); return; } ret = vfio_container_ioctl(&svphb->phb.iommu_as, svphb->iommugroupid, VFIO_IOMMU_SPAPR_TCE_GET_INFO, &info); if (ret) { error_setg_errno(errp, -ret, "spapr-vfio: get info from container failed"); return; } tcet = spapr_tce_new_table(DEVICE(sphb), liobn, info.dma32_window_start, SPAPR_TCE_PAGE_SHIFT, info.dma32_window_size >> SPAPR_TCE_PAGE_SHIFT, true); if (!tcet) { error_setg(errp, "spapr-vfio: failed to create VFIO TCE table"); return; } /* Register default 32bit DMA window */ memory_region_add_subregion(&sphb->iommu_root, tcet->bus_offset, spapr_tce_get_iommu(tcet)); } static void spapr_phb_vfio_eeh_reenable(sPAPRPHBVFIOState *svphb) { struct vfio_eeh_pe_op op = { .argsz = sizeof(op), .op = VFIO_EEH_PE_ENABLE }; vfio_container_ioctl(&svphb->phb.iommu_as, svphb->iommugroupid, VFIO_EEH_PE_OP, &op); } static void spapr_phb_vfio_reset(DeviceState *qdev) { /* * The PE might be in frozen state. To reenable the EEH * functionality on it will clean the frozen state, which * ensures that the contained PCI devices will work properly * after reboot. */ spapr_phb_vfio_eeh_reenable(SPAPR_PCI_VFIO_HOST_BRIDGE(qdev)); } static int spapr_phb_vfio_eeh_set_option(sPAPRPHBState *sphb, unsigned int addr, int option) { sPAPRPHBVFIOState *svphb = SPAPR_PCI_VFIO_HOST_BRIDGE(sphb); struct vfio_eeh_pe_op op = { .argsz = sizeof(op) }; int ret; switch (option) { case RTAS_EEH_DISABLE: op.op = VFIO_EEH_PE_DISABLE; break; case RTAS_EEH_ENABLE: { PCIHostState *phb; PCIDevice *pdev; /* * The EEH functionality is enabled on basis of PCI device, * instead of PE. We need check the validity of the PCI * device address. */ phb = PCI_HOST_BRIDGE(sphb); pdev = pci_find_device(phb->bus, (addr >> 16) & 0xFF, (addr >> 8) & 0xFF); if (!pdev) { return RTAS_OUT_PARAM_ERROR; } op.op = VFIO_EEH_PE_ENABLE; break; } case RTAS_EEH_THAW_IO: op.op = VFIO_EEH_PE_UNFREEZE_IO; break; case RTAS_EEH_THAW_DMA: op.op = VFIO_EEH_PE_UNFREEZE_DMA; break; default: return RTAS_OUT_PARAM_ERROR; } ret = vfio_container_ioctl(&svphb->phb.iommu_as, svphb->iommugroupid, VFIO_EEH_PE_OP, &op); if (ret < 0) { return RTAS_OUT_HW_ERROR; } return RTAS_OUT_SUCCESS; } static int spapr_phb_vfio_eeh_get_state(sPAPRPHBState *sphb, int *state) { sPAPRPHBVFIOState *svphb = SPAPR_PCI_VFIO_HOST_BRIDGE(sphb); struct vfio_eeh_pe_op op = { .argsz = sizeof(op) }; int ret; op.op = VFIO_EEH_PE_GET_STATE; ret = vfio_container_ioctl(&svphb->phb.iommu_as, svphb->iommugroupid, VFIO_EEH_PE_OP, &op); if (ret < 0) { return RTAS_OUT_PARAM_ERROR; } *state = ret; return RTAS_OUT_SUCCESS; } static void spapr_phb_vfio_eeh_clear_dev_msix(PCIBus *bus, PCIDevice *pdev, void *opaque) { /* Check if the device is VFIO PCI device */ if (!object_dynamic_cast(OBJECT(pdev), "vfio-pci")) { return; } /* * The MSIx table will be cleaned out by reset. We need * disable it so that it can be reenabled properly. Also, * the cached MSIx table should be cleared as it's not * reflecting the contents in hardware. */ if (msix_enabled(pdev)) { uint16_t flags; flags = pci_host_config_read_common(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, pci_config_size(pdev), 2); flags &= ~PCI_MSIX_FLAGS_ENABLE; pci_host_config_write_common(pdev, pdev->msix_cap + PCI_MSIX_FLAGS, pci_config_size(pdev), flags, 2); } msix_reset(pdev); } static void spapr_phb_vfio_eeh_clear_bus_msix(PCIBus *bus, void *opaque) { pci_for_each_device(bus, pci_bus_num(bus), spapr_phb_vfio_eeh_clear_dev_msix, NULL); }