/* reset bridge specific configuration registers */ void pci_bridge_reset(DeviceState *qdev) { PCIDevice *dev = PCI_DEVICE(qdev); uint8_t *conf = dev->config; conf[PCI_PRIMARY_BUS] = 0; conf[PCI_SECONDARY_BUS] = 0; conf[PCI_SUBORDINATE_BUS] = 0; conf[PCI_SEC_LATENCY_TIMER] = 0; /* * the default values for base/limit registers aren't specified * in the PCI-to-PCI-bridge spec. So we don't thouch them here. * Each implementation can override it. * typical implementation does * zero base/limit registers or * disable forwarding: pci_bridge_disable_base_limit() * If disable forwarding is wanted, call pci_bridge_disable_base_limit() * after this function. */ pci_byte_test_and_clear_mask(conf + PCI_IO_BASE, PCI_IO_RANGE_MASK & 0xff); pci_byte_test_and_clear_mask(conf + PCI_IO_LIMIT, PCI_IO_RANGE_MASK & 0xff); pci_word_test_and_clear_mask(conf + PCI_MEMORY_BASE, PCI_MEMORY_RANGE_MASK & 0xffff); pci_word_test_and_clear_mask(conf + PCI_MEMORY_LIMIT, PCI_MEMORY_RANGE_MASK & 0xffff); pci_word_test_and_clear_mask(conf + PCI_PREF_MEMORY_BASE, PCI_PREF_RANGE_MASK & 0xffff); pci_word_test_and_clear_mask(conf + PCI_PREF_MEMORY_LIMIT, PCI_PREF_RANGE_MASK & 0xffff); pci_set_long(conf + PCI_PREF_BASE_UPPER32, 0); pci_set_long(conf + PCI_PREF_LIMIT_UPPER32, 0); pci_set_word(conf + PCI_BRIDGE_CONTROL, 0); }
static bool pcie_aer_inject_uncor_error(PCIEAERInject *inj, bool is_fatal) { PCIDevice *dev = inj->dev; uint16_t cmd; if (is_fatal) { inj->devsta |= PCI_EXP_DEVSTA_FED; } else { inj->devsta |= PCI_EXP_DEVSTA_NFED; } if (inj->unsupported_request) { inj->devsta |= PCI_EXP_DEVSTA_URD; } pci_set_long(dev->config + dev->exp.exp_cap + PCI_EXP_DEVSTA, inj->devsta); if (inj->aer_cap) { uint32_t mask = pci_get_long(inj->aer_cap + PCI_ERR_UNCOR_MASK); if (mask & inj->error_status) { pci_long_test_and_set_mask(inj->aer_cap + PCI_ERR_UNCOR_STATUS, inj->error_status); return false; } inj->log_overflow = !!pcie_aer_record_error(dev, inj->err); pci_long_test_and_set_mask(inj->aer_cap + PCI_ERR_UNCOR_STATUS, inj->error_status); } cmd = pci_get_word(dev->config + PCI_COMMAND); if (inj->unsupported_request && !(inj->devctl & PCI_EXP_DEVCTL_URRE) && !(cmd & PCI_COMMAND_SERR)) { return false; } if (is_fatal) { if (!((cmd & PCI_COMMAND_SERR) || (inj->devctl & PCI_EXP_DEVCTL_FERE))) { return false; } inj->msg.severity = PCI_ERR_ROOT_CMD_FATAL_EN; } else { if (!((cmd & PCI_COMMAND_SERR) || (inj->devctl & PCI_EXP_DEVCTL_NFERE))) { return false; } inj->msg.severity = PCI_ERR_ROOT_CMD_NONFATAL_EN; } return true; }
/* via ide func */ static void vt82c686b_ide_realize(PCIDevice *dev, Error **errp) { PCIIDEState *d = PCI_IDE(dev); uint8_t *pci_conf = dev->config; pci_config_set_prog_interface(pci_conf, 0x8a); /* legacy ATA mode */ pci_set_long(pci_conf + PCI_CAPABILITY_LIST, 0x000000c0); qemu_register_reset(via_reset, d); bmdma_setup_bar(d); pci_register_bar(dev, 4, PCI_BASE_ADDRESS_SPACE_IO, &d->bmdma_bar); vmstate_register(DEVICE(dev), 0, &vmstate_ide_pci, d); vt82c686b_init_ports(d); }
/* via ide func */ static int vt82c686b_ide_initfn(PCIDevice *dev) { PCIIDEState *d = DO_UPCAST(PCIIDEState, dev, dev); uint8_t *pci_conf = d->dev.config; pci_config_set_prog_interface(pci_conf, 0x8a); /* legacy ATA mode */ pci_set_long(pci_conf + PCI_CAPABILITY_LIST, 0x000000c0); qemu_register_reset(via_reset, d); bmdma_setup_bar(d); pci_register_bar(&d->dev, 4, PCI_BASE_ADDRESS_SPACE_IO, &d->bmdma_bar); vmstate_register(&dev->qdev, 0, &vmstate_ide_pci, d); vt82c686b_init_ports(d); return 0; }
static void msix_mmio_write(void *opaque, target_phys_addr_t addr, uint64_t val, unsigned size) { PCIDevice *dev = opaque; unsigned int offset = addr & (MSIX_PAGE_SIZE - 1) & ~0x3; int vector = offset / PCI_MSIX_ENTRY_SIZE; int was_masked = msix_is_masked(dev, vector); pci_set_long(dev->msix_table_page + offset, val); if (kvm_enabled() && kvm_irqchip_in_kernel()) { kvm_msix_update(dev, vector, was_masked, msix_is_masked(dev, vector)); } if (was_masked != msix_is_masked(dev, vector) && dev->msix_mask_notifier) { int r = dev->msix_mask_notifier(dev, vector, msix_is_masked(dev, vector)); assert(r >= 0); } msix_handle_mask_update(dev, vector); }
static void pci_ich9_ahci_realize(PCIDevice *dev, Error **errp) { struct AHCIPCIState *d; int sata_cap_offset; uint8_t *sata_cap; d = ICH_AHCI(dev); ahci_realize(&d->ahci, DEVICE(dev), pci_get_address_space(dev), 6); pci_config_set_prog_interface(dev->config, AHCI_PROGMODE_MAJOR_REV_1); dev->config[PCI_CACHE_LINE_SIZE] = 0x08; /* Cache line size */ dev->config[PCI_LATENCY_TIMER] = 0x00; /* Latency timer */ pci_config_set_interrupt_pin(dev->config, 1); /* XXX Software should program this register */ dev->config[0x90] = 1 << 6; /* Address Map Register - AHCI mode */ d->ahci.irq = pci_allocate_irq(dev); pci_register_bar(dev, ICH9_IDP_BAR, PCI_BASE_ADDRESS_SPACE_IO, &d->ahci.idp); pci_register_bar(dev, ICH9_MEM_BAR, PCI_BASE_ADDRESS_SPACE_MEMORY, &d->ahci.mem); sata_cap_offset = pci_add_capability2(dev, PCI_CAP_ID_SATA, ICH9_SATA_CAP_OFFSET, SATA_CAP_SIZE, errp); if (sata_cap_offset < 0) { return; } sata_cap = dev->config + sata_cap_offset; pci_set_word(sata_cap + SATA_CAP_REV, 0x10); pci_set_long(sata_cap + SATA_CAP_BAR, (ICH9_IDP_BAR + 0x4) | (ICH9_IDP_INDEX_LOG2 << 4)); d->ahci.idp_offset = ICH9_IDP_INDEX; /* Although the AHCI 1.3 specification states that the first capability * should be PMCAP, the Intel ICH9 data sheet specifies that the ICH9 * AHCI device puts the MSI capability first, pointing to 0x80. */ msi_init(dev, ICH9_MSI_CAP_OFFSET, 1, true, false); }
static void via_ide_realize(PCIDevice *dev, Error **errp) { PCIIDEState *d = PCI_IDE(dev); uint8_t *pci_conf = dev->config; int i; pci_config_set_prog_interface(pci_conf, 0x8f); /* native PCI ATA mode */ pci_set_long(pci_conf + PCI_CAPABILITY_LIST, 0x000000c0); dev->wmask[PCI_INTERRUPT_LINE] = 0xf; qemu_register_reset(via_ide_reset, d); memory_region_init_io(&d->data_bar[0], OBJECT(d), &pci_ide_data_le_ops, &d->bus[0], "via-ide0-data", 8); pci_register_bar(dev, 0, PCI_BASE_ADDRESS_SPACE_IO, &d->data_bar[0]); memory_region_init_io(&d->cmd_bar[0], OBJECT(d), &pci_ide_cmd_le_ops, &d->bus[0], "via-ide0-cmd", 4); pci_register_bar(dev, 1, PCI_BASE_ADDRESS_SPACE_IO, &d->cmd_bar[0]); memory_region_init_io(&d->data_bar[1], OBJECT(d), &pci_ide_data_le_ops, &d->bus[1], "via-ide1-data", 8); pci_register_bar(dev, 2, PCI_BASE_ADDRESS_SPACE_IO, &d->data_bar[1]); memory_region_init_io(&d->cmd_bar[1], OBJECT(d), &pci_ide_cmd_le_ops, &d->bus[1], "via-ide1-cmd", 4); pci_register_bar(dev, 3, PCI_BASE_ADDRESS_SPACE_IO, &d->cmd_bar[1]); bmdma_setup_bar(d); pci_register_bar(dev, 4, PCI_BASE_ADDRESS_SPACE_IO, &d->bmdma_bar); vmstate_register(DEVICE(dev), 0, &vmstate_ide_pci, d); for (i = 0; i < 2; i++) { ide_bus_new(&d->bus[i], sizeof(d->bus[i]), DEVICE(d), i, 2); ide_init2(&d->bus[i], qemu_allocate_irq(via_ide_set_irq, d, i)); bmdma_init(&d->bus[i], &d->bmdma[i], d); d->bmdma[i].bus = &d->bus[i]; ide_register_restart_cb(&d->bus[i]); } }
static int pci_ich9_ahci_init(PCIDevice *dev) { struct AHCIPCIState *d; int sata_cap_offset; uint8_t *sata_cap; d = DO_UPCAST(struct AHCIPCIState, card, dev); ahci_init(&d->ahci, &dev->qdev, 6); pci_config_set_prog_interface(d->card.config, AHCI_PROGMODE_MAJOR_REV_1); d->card.config[PCI_CACHE_LINE_SIZE] = 0x08; /* Cache line size */ d->card.config[PCI_LATENCY_TIMER] = 0x00; /* Latency timer */ pci_config_set_interrupt_pin(d->card.config, 1); /* XXX Software should program this register */ d->card.config[0x90] = 1 << 6; /* Address Map Register - AHCI mode */ qemu_register_reset(ahci_reset, d); msi_init(dev, 0x50, 1, true, false); d->ahci.irq = d->card.irq[0]; pci_register_bar(&d->card, ICH9_IDP_BAR, PCI_BASE_ADDRESS_SPACE_IO, &d->ahci.idp); pci_register_bar(&d->card, ICH9_MEM_BAR, PCI_BASE_ADDRESS_SPACE_MEMORY, &d->ahci.mem); sata_cap_offset = pci_add_capability(&d->card, PCI_CAP_ID_SATA, ICH9_SATA_CAP_OFFSET, SATA_CAP_SIZE); if (sata_cap_offset < 0) { return sata_cap_offset; } sata_cap = d->card.config + sata_cap_offset; pci_set_word(sata_cap + SATA_CAP_REV, 0x10); pci_set_long(sata_cap + SATA_CAP_BAR, (ICH9_IDP_BAR + 0x4) | (ICH9_IDP_INDEX_LOG2 << 4)); d->ahci.idp_offset = ICH9_IDP_INDEX; return 0; }
static void pcie_aer_update_log(PCIDevice *dev, const PCIEAERErr *err) { uint8_t *aer_cap = dev->config + dev->exp.aer_cap; uint8_t first_bit = ctz32(err->status); uint32_t errcap = pci_get_long(aer_cap + PCI_ERR_CAP); int i; assert(err->status); assert(!(err->status & (err->status - 1))); errcap &= ~(PCI_ERR_CAP_FEP_MASK | PCI_ERR_CAP_TLP); errcap |= PCI_ERR_CAP_FEP(first_bit); if (err->flags & PCIE_AER_ERR_HEADER_VALID) { for (i = 0; i < ARRAY_SIZE(err->header); ++i) { /* 7.10.8 Header Log Register */ uint8_t *header_log = aer_cap + PCI_ERR_HEADER_LOG + i * sizeof err->header[0]; stl_be_p(header_log, err->header[i]); } } else { assert(!(err->flags & PCIE_AER_ERR_TLP_PREFIX_PRESENT)); memset(aer_cap + PCI_ERR_HEADER_LOG, 0, PCI_ERR_HEADER_LOG_SIZE); } if ((err->flags & PCIE_AER_ERR_TLP_PREFIX_PRESENT) && (pci_get_long(dev->config + dev->exp.exp_cap + PCI_EXP_DEVCAP2) & PCI_EXP_DEVCAP2_EETLPP)) { for (i = 0; i < ARRAY_SIZE(err->prefix); ++i) { /* 7.10.12 tlp prefix log register */ uint8_t *prefix_log = aer_cap + PCI_ERR_TLP_PREFIX_LOG + i * sizeof err->prefix[0]; stl_be_p(prefix_log, err->prefix[i]); } errcap |= PCI_ERR_CAP_TLP; } else { memset(aer_cap + PCI_ERR_TLP_PREFIX_LOG, 0, PCI_ERR_TLP_PREFIX_LOG_SIZE); } pci_set_long(aer_cap + PCI_ERR_CAP, errcap); }
/* via ide func */ static int vt82c686b_ide_initfn(PCIDevice *dev) { PCIIDEState *d = DO_UPCAST(PCIIDEState, dev, dev);; uint8_t *pci_conf = d->dev.config; pci_config_set_vendor_id(pci_conf, PCI_VENDOR_ID_VIA); pci_config_set_device_id(pci_conf, PCI_DEVICE_ID_VIA_IDE); pci_config_set_class(pci_conf, PCI_CLASS_STORAGE_IDE); pci_config_set_prog_interface(pci_conf, 0x8a); /* legacy ATA mode */ pci_config_set_revision(pci_conf,0x06); /* Revision 0.6 */ pci_set_long(pci_conf + PCI_CAPABILITY_LIST, 0x000000c0); qemu_register_reset(via_reset, d); pci_register_bar(&d->dev, 4, 0x10, PCI_BASE_ADDRESS_SPACE_IO, bmdma_map); vmstate_register(&dev->qdev, 0, &vmstate_ide_pci, d); vt82c686b_init_ports(d); return 0; }
static void via_reset(void *opaque) { PCIIDEState *d = opaque; PCIDevice *pd = PCI_DEVICE(d); uint8_t *pci_conf = pd->config; int i; for (i = 0; i < 2; i++) { ide_bus_reset(&d->bus[i]); } pci_set_word(pci_conf + PCI_COMMAND, PCI_COMMAND_WAIT); pci_set_word(pci_conf + PCI_STATUS, PCI_STATUS_FAST_BACK | PCI_STATUS_DEVSEL_MEDIUM); pci_set_long(pci_conf + PCI_BASE_ADDRESS_0, 0x000001f0); pci_set_long(pci_conf + PCI_BASE_ADDRESS_1, 0x000003f4); pci_set_long(pci_conf + PCI_BASE_ADDRESS_2, 0x00000170); pci_set_long(pci_conf + PCI_BASE_ADDRESS_3, 0x00000374); pci_set_long(pci_conf + PCI_BASE_ADDRESS_4, 0x0000cc01); /* BMIBA: 20-23h */ pci_set_long(pci_conf + PCI_INTERRUPT_LINE, 0x0000010e); /* IDE chip enable, IDE configuration 1/2, IDE FIFO Configuration*/ pci_set_long(pci_conf + 0x40, 0x0a090600); /* IDE misc configuration 1/2/3 */ pci_set_long(pci_conf + 0x44, 0x00c00068); /* IDE Timing control */ pci_set_long(pci_conf + 0x48, 0xa8a8a8a8); /* IDE Address Setup Time */ pci_set_long(pci_conf + 0x4c, 0x000000ff); /* UltraDMA Extended Timing Control*/ pci_set_long(pci_conf + 0x50, 0x07070707); /* UltraDMA FIFO Control */ pci_set_long(pci_conf + 0x54, 0x00000004); /* IDE primary sector size */ pci_set_long(pci_conf + 0x60, 0x00000200); /* IDE secondary sector size */ pci_set_long(pci_conf + 0x68, 0x00000200); /* PCI PM Block */ pci_set_long(pci_conf + 0xc0, 0x00020001); }
/* ARI */ void pcie_ari_init(PCIDevice *dev, uint16_t offset, uint16_t nextfn) { pcie_add_capability(dev, PCI_EXT_CAP_ID_ARI, PCI_ARI_VER, offset, PCI_ARI_SIZEOF); pci_set_long(dev->config + offset + PCI_ARI_CAP, (nextfn & 0xff) << 8); }
int pcie_aer_init(PCIDevice *dev, uint16_t offset) { PCIExpressDevice *exp; pcie_add_capability(dev, PCI_EXT_CAP_ID_ERR, PCI_ERR_VER, offset, PCI_ERR_SIZEOF); exp = &dev->exp; exp->aer_cap = offset; /* log_max is property */ if (dev->exp.aer_log.log_max == PCIE_AER_LOG_MAX_UNSET) { dev->exp.aer_log.log_max = PCIE_AER_LOG_MAX_DEFAULT; } /* clip down the value to avoid unreasobale memory usage */ if (dev->exp.aer_log.log_max > PCIE_AER_LOG_MAX_LIMIT) { return -EINVAL; } dev->exp.aer_log.log = g_malloc0(sizeof dev->exp.aer_log.log[0] * dev->exp.aer_log.log_max); pci_set_long(dev->w1cmask + offset + PCI_ERR_UNCOR_STATUS, PCI_ERR_UNC_SUPPORTED); pci_set_long(dev->config + offset + PCI_ERR_UNCOR_SEVER, PCI_ERR_UNC_SEVERITY_DEFAULT); pci_set_long(dev->wmask + offset + PCI_ERR_UNCOR_SEVER, PCI_ERR_UNC_SUPPORTED); pci_long_test_and_set_mask(dev->w1cmask + offset + PCI_ERR_COR_STATUS, PCI_ERR_COR_SUPPORTED); pci_set_long(dev->config + offset + PCI_ERR_COR_MASK, PCI_ERR_COR_MASK_DEFAULT); pci_set_long(dev->wmask + offset + PCI_ERR_COR_MASK, PCI_ERR_COR_SUPPORTED); /* capabilities and control. multiple header logging is supported */ if (dev->exp.aer_log.log_max > 0) { pci_set_long(dev->config + offset + PCI_ERR_CAP, PCI_ERR_CAP_ECRC_GENC | PCI_ERR_CAP_ECRC_CHKC | PCI_ERR_CAP_MHRC); pci_set_long(dev->wmask + offset + PCI_ERR_CAP, PCI_ERR_CAP_ECRC_GENE | PCI_ERR_CAP_ECRC_CHKE | PCI_ERR_CAP_MHRE); } else { pci_set_long(dev->config + offset + PCI_ERR_CAP, PCI_ERR_CAP_ECRC_GENC | PCI_ERR_CAP_ECRC_CHKC); pci_set_long(dev->wmask + offset + PCI_ERR_CAP, PCI_ERR_CAP_ECRC_GENE | PCI_ERR_CAP_ECRC_CHKE); } switch (pcie_cap_get_type(dev)) { case PCI_EXP_TYPE_ROOT_PORT: /* this case will be set by pcie_aer_root_init() */ /* fallthrough */ case PCI_EXP_TYPE_DOWNSTREAM: case PCI_EXP_TYPE_UPSTREAM: pci_word_test_and_set_mask(dev->wmask + PCI_BRIDGE_CONTROL, PCI_BRIDGE_CTL_SERR); pci_long_test_and_set_mask(dev->w1cmask + PCI_STATUS, PCI_SEC_STATUS_RCV_SYSTEM_ERROR); break; default: /* nothing */ break; } return 0; }
/* * 6.2.6 Error Message Control * Figure 6-3 * root port part */ static void pcie_aer_msg_root_port(PCIDevice *dev, const PCIEAERMsg *msg) { uint16_t cmd; uint8_t *aer_cap; uint32_t root_cmd; uint32_t root_status, prev_status; cmd = pci_get_word(dev->config + PCI_COMMAND); aer_cap = dev->config + dev->exp.aer_cap; root_cmd = pci_get_long(aer_cap + PCI_ERR_ROOT_COMMAND); prev_status = root_status = pci_get_long(aer_cap + PCI_ERR_ROOT_STATUS); if (cmd & PCI_COMMAND_SERR) { /* System Error. * * The way to report System Error is platform specific and * it isn't implemented in qemu right now. * So just discard the error for now. * OS which cares of aer would receive errors via * native aer mechanims, so this wouldn't matter. */ } /* Errro Message Received: Root Error Status register */ switch (msg->severity) { case PCI_ERR_ROOT_CMD_COR_EN: if (root_status & PCI_ERR_ROOT_COR_RCV) { root_status |= PCI_ERR_ROOT_MULTI_COR_RCV; } else { pci_set_word(aer_cap + PCI_ERR_ROOT_ERR_SRC + PCI_ERR_SRC_COR_OFFS, msg->source_id); } root_status |= PCI_ERR_ROOT_COR_RCV; break; case PCI_ERR_ROOT_CMD_NONFATAL_EN: root_status |= PCI_ERR_ROOT_NONFATAL_RCV; break; case PCI_ERR_ROOT_CMD_FATAL_EN: if (!(root_status & PCI_ERR_ROOT_UNCOR_RCV)) { root_status |= PCI_ERR_ROOT_FIRST_FATAL; } root_status |= PCI_ERR_ROOT_FATAL_RCV; break; default: abort(); break; } if (pcie_aer_msg_is_uncor(msg)) { if (root_status & PCI_ERR_ROOT_UNCOR_RCV) { root_status |= PCI_ERR_ROOT_MULTI_UNCOR_RCV; } else { pci_set_word(aer_cap + PCI_ERR_ROOT_ERR_SRC + PCI_ERR_SRC_UNCOR_OFFS, msg->source_id); } root_status |= PCI_ERR_ROOT_UNCOR_RCV; } pci_set_long(aer_cap + PCI_ERR_ROOT_STATUS, root_status); /* 6.2.4.1.2 Interrupt Generation */ /* All the above did was set some bits in the status register. * Specifically these that match message severity. * The below code relies on this fact. */ if (!(root_cmd & msg->severity) || (pcie_aer_status_to_cmd(prev_status) & root_cmd)) { /* Condition is not being set or was already true so nothing to do. */ return; } pcie_aer_root_notify(dev); }
/* Initialize the MSI-X structures */ int msix_init(struct PCIDevice *dev, unsigned short nentries, MemoryRegion *table_bar, uint8_t table_bar_nr, unsigned table_offset, MemoryRegion *pba_bar, uint8_t pba_bar_nr, unsigned pba_offset, uint8_t cap_pos) { int cap; unsigned table_size, pba_size; uint8_t *config; /* Nothing to do if MSI is not supported by interrupt controller */ if (!msi_supported) { return -ENOTSUP; } if (nentries < 1 || nentries > PCI_MSIX_FLAGS_QSIZE + 1) { return -EINVAL; } table_size = nentries * PCI_MSIX_ENTRY_SIZE; pba_size = QEMU_ALIGN_UP(nentries, 64) / 8; /* Sanity test: table & pba don't overlap, fit within BARs, min aligned */ if ((table_bar_nr == pba_bar_nr && ranges_overlap(table_offset, table_size, pba_offset, pba_size)) || table_offset + table_size > memory_region_size(table_bar) || pba_offset + pba_size > memory_region_size(pba_bar) || (table_offset | pba_offset) & PCI_MSIX_FLAGS_BIRMASK) { return -EINVAL; } cap = pci_add_capability(dev, PCI_CAP_ID_MSIX, cap_pos, MSIX_CAP_LENGTH); if (cap < 0) { return cap; } dev->msix_cap = cap; dev->cap_present |= QEMU_PCI_CAP_MSIX; config = dev->config + cap; pci_set_word(config + PCI_MSIX_FLAGS, nentries - 1); dev->msix_entries_nr = nentries; dev->msix_function_masked = true; pci_set_long(config + PCI_MSIX_TABLE, table_offset | table_bar_nr); pci_set_long(config + PCI_MSIX_PBA, pba_offset | pba_bar_nr); /* Make flags bit writable. */ dev->wmask[cap + MSIX_CONTROL_OFFSET] |= MSIX_ENABLE_MASK | MSIX_MASKALL_MASK; dev->msix_table = g_malloc0(table_size); dev->msix_pba = g_malloc0(pba_size); dev->msix_entry_used = g_malloc0(nentries * sizeof *dev->msix_entry_used); msix_mask_all(dev, nentries); memory_region_init_io(&dev->msix_table_mmio, OBJECT(dev), &msix_table_mmio_ops, dev, "msix-table", table_size); memory_region_add_subregion(table_bar, table_offset, &dev->msix_table_mmio); memory_region_init_io(&dev->msix_pba_mmio, OBJECT(dev), &msix_pba_mmio_ops, dev, "msix-pba", pba_size); memory_region_add_subregion(pba_bar, pba_offset, &dev->msix_pba_mmio); return 0; }
/* Initialize the SHPC structure in bridge's BAR. */ int shpc_init(PCIDevice *d, PCIBus *sec_bus, MemoryRegion *bar, unsigned offset, Error **errp) { int i, ret; int nslots = SHPC_MAX_SLOTS; /* TODO: qdev property? */ SHPCDevice *shpc = d->shpc = g_malloc0(sizeof(*d->shpc)); shpc->sec_bus = sec_bus; ret = shpc_cap_add_config(d, errp); if (ret) { g_free(d->shpc); return ret; } if (nslots < SHPC_MIN_SLOTS) { return 0; } if (nslots > SHPC_MAX_SLOTS || SHPC_IDX_TO_PCI(nslots) > PCI_SLOT_MAX) { /* TODO: report an error mesage that makes sense. */ return -EINVAL; } shpc->nslots = nslots; shpc->config = g_malloc0(SHPC_SIZEOF(d)); shpc->cmask = g_malloc0(SHPC_SIZEOF(d)); shpc->wmask = g_malloc0(SHPC_SIZEOF(d)); shpc->w1cmask = g_malloc0(SHPC_SIZEOF(d)); shpc_reset(d); pci_set_long(shpc->config + SHPC_BASE_OFFSET, offset); pci_set_byte(shpc->wmask + SHPC_CMD_CODE, 0xff); pci_set_byte(shpc->wmask + SHPC_CMD_TRGT, SHPC_CMD_TRGT_MAX); pci_set_byte(shpc->wmask + SHPC_CMD_TRGT, SHPC_CMD_TRGT_MAX); pci_set_long(shpc->wmask + SHPC_SERR_INT, SHPC_INT_DIS | SHPC_SERR_DIS | SHPC_CMD_INT_DIS | SHPC_ARB_SERR_DIS); pci_set_long(shpc->w1cmask + SHPC_SERR_INT, SHPC_CMD_DETECTED | SHPC_ARB_DETECTED); for (i = 0; i < nslots; ++i) { pci_set_byte(shpc->wmask + SHPC_SLOT_EVENT_SERR_INT_DIS(d, i), SHPC_SLOT_EVENT_PRESENCE | SHPC_SLOT_EVENT_ISOLATED_FAULT | SHPC_SLOT_EVENT_BUTTON | SHPC_SLOT_EVENT_MRL | SHPC_SLOT_EVENT_CONNECTED_FAULT | SHPC_SLOT_EVENT_MRL_SERR_DIS | SHPC_SLOT_EVENT_CONNECTED_FAULT_SERR_DIS); pci_set_byte(shpc->w1cmask + SHPC_SLOT_EVENT_LATCH(i), SHPC_SLOT_EVENT_PRESENCE | SHPC_SLOT_EVENT_ISOLATED_FAULT | SHPC_SLOT_EVENT_BUTTON | SHPC_SLOT_EVENT_MRL | SHPC_SLOT_EVENT_CONNECTED_FAULT); } /* TODO: init cmask */ memory_region_init_io(&shpc->mmio, OBJECT(d), &shpc_mmio_ops, d, "shpc-mmio", SHPC_SIZEOF(d)); shpc_cap_update_dword(d); memory_region_add_subregion(bar, offset, &shpc->mmio); qbus_set_hotplug_handler(BUS(sec_bus), DEVICE(d), NULL); d->cap_present |= QEMU_PCI_CAP_SHPC; return 0; }
/* Update dword data capability register */ static void shpc_cap_update_dword(PCIDevice *d) { unsigned data; data = shpc_read(d, shpc_cap_dword(d) * 4, 4); pci_set_long(d->config + d->shpc->cap + SHPC_CAP_DWORD_DATA, data); }
int pcie_aer_init(PCIDevice *dev, uint8_t cap_ver, uint16_t offset, uint16_t size, Error **errp) { pcie_add_capability(dev, PCI_EXT_CAP_ID_ERR, cap_ver, offset, size); dev->exp.aer_cap = offset; /* clip down the value to avoid unreasonable memory usage */ if (dev->exp.aer_log.log_max > PCIE_AER_LOG_MAX_LIMIT) { error_setg(errp, "Invalid aer_log_max %d. The max number of aer log " "is %d", dev->exp.aer_log.log_max, PCIE_AER_LOG_MAX_LIMIT); return -EINVAL; } dev->exp.aer_log.log = g_malloc0(sizeof dev->exp.aer_log.log[0] * dev->exp.aer_log.log_max); pci_set_long(dev->w1cmask + offset + PCI_ERR_UNCOR_STATUS, PCI_ERR_UNC_SUPPORTED); pci_set_long(dev->config + offset + PCI_ERR_UNCOR_SEVER, PCI_ERR_UNC_SEVERITY_DEFAULT); pci_set_long(dev->wmask + offset + PCI_ERR_UNCOR_SEVER, PCI_ERR_UNC_SUPPORTED); pci_long_test_and_set_mask(dev->w1cmask + offset + PCI_ERR_COR_STATUS, PCI_ERR_COR_SUPPORTED); pci_set_long(dev->config + offset + PCI_ERR_COR_MASK, PCI_ERR_COR_MASK_DEFAULT); pci_set_long(dev->wmask + offset + PCI_ERR_COR_MASK, PCI_ERR_COR_SUPPORTED); /* capabilities and control. multiple header logging is supported */ if (dev->exp.aer_log.log_max > 0) { pci_set_long(dev->config + offset + PCI_ERR_CAP, PCI_ERR_CAP_ECRC_GENC | PCI_ERR_CAP_ECRC_CHKC | PCI_ERR_CAP_MHRC); pci_set_long(dev->wmask + offset + PCI_ERR_CAP, PCI_ERR_CAP_ECRC_GENE | PCI_ERR_CAP_ECRC_CHKE | PCI_ERR_CAP_MHRE); } else { pci_set_long(dev->config + offset + PCI_ERR_CAP, PCI_ERR_CAP_ECRC_GENC | PCI_ERR_CAP_ECRC_CHKC); pci_set_long(dev->wmask + offset + PCI_ERR_CAP, PCI_ERR_CAP_ECRC_GENE | PCI_ERR_CAP_ECRC_CHKE); } switch (pcie_cap_get_type(dev)) { case PCI_EXP_TYPE_ROOT_PORT: /* this case will be set by pcie_aer_root_init() */ /* fallthrough */ case PCI_EXP_TYPE_DOWNSTREAM: case PCI_EXP_TYPE_UPSTREAM: pci_word_test_and_set_mask(dev->wmask + PCI_BRIDGE_CONTROL, PCI_BRIDGE_CTL_SERR); pci_long_test_and_set_mask(dev->w1cmask + PCI_STATUS, PCI_SEC_STATUS_RCV_SYSTEM_ERROR); break; default: /* nothing */ break; } return 0; }