void pcie_cap_slot_reset(PCIDevice *dev) { uint8_t *exp_cap = dev->config + dev->exp.exp_cap; PCIE_DEV_PRINTF(dev, "reset\n"); pci_word_test_and_clear_mask(exp_cap + PCI_EXP_SLTCTL, PCI_EXP_SLTCTL_EIC | PCI_EXP_SLTCTL_PIC | PCI_EXP_SLTCTL_AIC | PCI_EXP_SLTCTL_HPIE | PCI_EXP_SLTCTL_CCIE | PCI_EXP_SLTCTL_PDCE | PCI_EXP_SLTCTL_ABPE); pci_word_test_and_set_mask(exp_cap + PCI_EXP_SLTCTL, PCI_EXP_SLTCTL_PIC_OFF | PCI_EXP_SLTCTL_AIC_OFF); pci_word_test_and_clear_mask(exp_cap + PCI_EXP_SLTSTA, PCI_EXP_SLTSTA_EIS |/* on reset, the lock is released */ PCI_EXP_SLTSTA_CC | PCI_EXP_SLTSTA_PDC | PCI_EXP_SLTSTA_ABP); hotplug_event_update_event_status(dev); }
void pcie_cap_slot_write_config(PCIDevice *dev, uint32_t addr, uint32_t val, int len) { uint32_t pos = dev->exp.exp_cap; uint8_t *exp_cap = dev->config + pos; uint16_t sltsta = pci_get_word(exp_cap + PCI_EXP_SLTSTA); if (ranges_overlap(addr, len, pos + PCI_EXP_SLTSTA, 2)) { hotplug_event_clear(dev); } if (!ranges_overlap(addr, len, pos + PCI_EXP_SLTCTL, 2)) { return; } if (pci_word_test_and_clear_mask(exp_cap + PCI_EXP_SLTCTL, PCI_EXP_SLTCTL_EIC)) { sltsta ^= PCI_EXP_SLTSTA_EIS; /* toggle PCI_EXP_SLTSTA_EIS bit */ pci_set_word(exp_cap + PCI_EXP_SLTSTA, sltsta); PCIE_DEV_PRINTF(dev, "PCI_EXP_SLTCTL_EIC: " "sltsta -> 0x%02"PRIx16"\n", sltsta); } /* * If the slot is polulated, power indicator is off and power * controller is off, it is safe to detach the devices. */ if ((sltsta & PCI_EXP_SLTSTA_PDS) && (val & PCI_EXP_SLTCTL_PCC) && ((val & PCI_EXP_SLTCTL_PIC_OFF) == PCI_EXP_SLTCTL_PIC_OFF)) { PCIBus *sec_bus = pci_bridge_get_sec_bus(PCI_BRIDGE(dev)); pci_for_each_device(sec_bus, pci_bus_num(sec_bus), pcie_unplug_device, NULL); pci_word_test_and_clear_mask(exp_cap + PCI_EXP_SLTSTA, PCI_EXP_SLTSTA_PDS); pci_word_test_and_set_mask(exp_cap + PCI_EXP_SLTSTA, PCI_EXP_SLTSTA_PDC); } hotplug_event_notify(dev); /* * 6.7.3.2 Command Completed Events * * Software issues a command to a hot-plug capable Downstream Port by * issuing a write transaction that targets any portion of the Port’s Slot * Control register. A single write to the Slot Control register is * considered to be a single command, even if the write affects more than * one field in the Slot Control register. In response to this transaction, * the Port must carry out the requested actions and then set the * associated status field for the command completed event. */ /* Real hardware might take a while to complete requested command because * physical movement would be involved like locking the electromechanical * lock. However in our case, command is completed instantaneously above, * so send a command completion event right now. */ pcie_cap_slot_event(dev, PCI_EXP_HP_EV_CCI); }
/* pci express slot for pci express root/downstream port PCI express capability slot registers */ void pcie_cap_slot_init(PCIDevice *dev, uint16_t slot) { uint32_t pos = dev->exp.exp_cap; pci_word_test_and_set_mask(dev->config + pos + PCI_EXP_FLAGS, PCI_EXP_FLAGS_SLOT); pci_long_test_and_clear_mask(dev->config + pos + PCI_EXP_SLTCAP, ~PCI_EXP_SLTCAP_PSN); pci_long_test_and_set_mask(dev->config + pos + PCI_EXP_SLTCAP, (slot << PCI_EXP_SLTCAP_PSN_SHIFT) | PCI_EXP_SLTCAP_EIP | PCI_EXP_SLTCAP_HPS | PCI_EXP_SLTCAP_HPC | PCI_EXP_SLTCAP_PIP | PCI_EXP_SLTCAP_AIP | PCI_EXP_SLTCAP_ABP); if (dev->cap_present & QEMU_PCIE_SLTCAP_PCP) { pci_long_test_and_set_mask(dev->config + pos + PCI_EXP_SLTCAP, PCI_EXP_SLTCAP_PCP); pci_word_test_and_clear_mask(dev->config + pos + PCI_EXP_SLTCTL, PCI_EXP_SLTCTL_PCC); pci_word_test_and_set_mask(dev->wmask + pos + PCI_EXP_SLTCTL, PCI_EXP_SLTCTL_PCC); } pci_word_test_and_clear_mask(dev->config + pos + PCI_EXP_SLTCTL, PCI_EXP_SLTCTL_PIC | PCI_EXP_SLTCTL_AIC); pci_word_test_and_set_mask(dev->config + pos + PCI_EXP_SLTCTL, PCI_EXP_SLTCTL_PIC_OFF | PCI_EXP_SLTCTL_AIC_OFF); pci_word_test_and_set_mask(dev->wmask + pos + PCI_EXP_SLTCTL, PCI_EXP_SLTCTL_PIC | PCI_EXP_SLTCTL_AIC | PCI_EXP_SLTCTL_HPIE | PCI_EXP_SLTCTL_CCIE | PCI_EXP_SLTCTL_PDCE | PCI_EXP_SLTCTL_ABPE); /* Although reading PCI_EXP_SLTCTL_EIC returns always 0, * make the bit writable here in order to detect 1b is written. * pcie_cap_slot_write_config() test-and-clear the bit, so * this bit always returns 0 to the guest. */ pci_word_test_and_set_mask(dev->wmask + pos + PCI_EXP_SLTCTL, PCI_EXP_SLTCTL_EIC); pci_word_test_and_set_mask(dev->w1cmask + pos + PCI_EXP_SLTSTA, PCI_EXP_HP_EV_SUPPORTED); dev->exp.hpev_notified = false; qbus_set_hotplug_handler(BUS(pci_bridge_get_sec_bus(PCI_BRIDGE(dev))), DEVICE(dev), NULL); }
/* * Sync the PCIe Link Status negotiated speed and width of a bridge with the * downstream device. If downstream device is not present, re-write with the * Link Capability fields. If downstream device reports invalid width or * speed, replace with minimum values (LnkSta fields are RsvdZ on VFs but such * values interfere with PCIe native hotplug detecting new devices). Limit * width and speed to bridge capabilities for compatibility. Use config_read * to access the downstream device since it could be an assigned device with * volatile link information. */ void pcie_sync_bridge_lnk(PCIDevice *bridge_dev) { PCIBridge *br = PCI_BRIDGE(bridge_dev); PCIBus *bus = pci_bridge_get_sec_bus(br); PCIDevice *target = bus->devices[0]; uint8_t *exp_cap = bridge_dev->config + bridge_dev->exp.exp_cap; uint16_t lnksta, lnkcap = pci_get_word(exp_cap + PCI_EXP_LNKCAP); if (!target || !target->exp.exp_cap) { lnksta = lnkcap; } else { lnksta = target->config_read(target, target->exp.exp_cap + PCI_EXP_LNKSTA, sizeof(lnksta)); if ((lnksta & PCI_EXP_LNKSTA_NLW) > (lnkcap & PCI_EXP_LNKCAP_MLW)) { lnksta &= ~PCI_EXP_LNKSTA_NLW; lnksta |= lnkcap & PCI_EXP_LNKCAP_MLW; } else if (!(lnksta & PCI_EXP_LNKSTA_NLW)) { lnksta |= QEMU_PCI_EXP_LNKSTA_NLW(QEMU_PCI_EXP_LNK_X1); } if ((lnksta & PCI_EXP_LNKSTA_CLS) > (lnkcap & PCI_EXP_LNKCAP_SLS)) { lnksta &= ~PCI_EXP_LNKSTA_CLS; lnksta |= lnkcap & PCI_EXP_LNKCAP_SLS; } else if (!(lnksta & PCI_EXP_LNKSTA_CLS)) { lnksta |= QEMU_PCI_EXP_LNKSTA_CLS(QEMU_PCI_EXP_LNK_2_5GT); } } pci_word_test_and_clear_mask(exp_cap + PCI_EXP_LNKSTA, PCI_EXP_LNKSTA_CLS | PCI_EXP_LNKSTA_NLW); pci_word_test_and_set_mask(exp_cap + PCI_EXP_LNKSTA, lnksta & (PCI_EXP_LNKSTA_CLS | PCI_EXP_LNKSTA_NLW)); }
static void shpc_set_status(SHPCDevice *shpc, int slot, uint8_t value, uint16_t msk) { uint8_t *status = shpc->config + SHPC_SLOT_STATUS(slot); pci_word_test_and_clear_mask(status, msk); pci_word_test_and_set_mask(status, value << ctz32(msk)); }
static void gen_rp_realize(DeviceState *dev, Error **errp) { PCIDevice *d = PCI_DEVICE(dev); GenPCIERootPort *grp = GEN_PCIE_ROOT_PORT(d); PCIERootPortClass *rpc = PCIE_ROOT_PORT_GET_CLASS(d); Error *local_err = NULL; rpc->parent_realize(dev, &local_err); if (local_err) { error_propagate(errp, local_err); return; } int rc = pci_bridge_qemu_reserve_cap_init(d, 0, grp->res_reserve, errp); if (rc < 0) { rpc->parent_class.exit(d); return; } if (!grp->res_reserve.io) { pci_word_test_and_clear_mask(d->wmask + PCI_COMMAND, PCI_COMMAND_IO); d->wmask[PCI_IO_BASE] = 0; d->wmask[PCI_IO_LIMIT] = 0; } }
void pcie_cap_slot_reset(PCIDevice *dev) { uint8_t *exp_cap = dev->config + dev->exp.exp_cap; uint8_t port_type = pcie_cap_get_type(dev); assert(port_type == PCI_EXP_TYPE_DOWNSTREAM || port_type == PCI_EXP_TYPE_ROOT_PORT); PCIE_DEV_PRINTF(dev, "reset\n"); pci_word_test_and_clear_mask(exp_cap + PCI_EXP_SLTCTL, PCI_EXP_SLTCTL_EIC | PCI_EXP_SLTCTL_PIC | PCI_EXP_SLTCTL_AIC | PCI_EXP_SLTCTL_HPIE | PCI_EXP_SLTCTL_CCIE | PCI_EXP_SLTCTL_PDCE | PCI_EXP_SLTCTL_ABPE); pci_word_test_and_set_mask(exp_cap + PCI_EXP_SLTCTL, PCI_EXP_SLTCTL_AIC_OFF); if (dev->cap_present & QEMU_PCIE_SLTCAP_PCP) { /* Downstream ports enforce device number 0. */ bool populated = pci_bridge_get_sec_bus(PCI_BRIDGE(dev))->devices[0]; uint16_t pic; if (populated) { pci_word_test_and_clear_mask(exp_cap + PCI_EXP_SLTCTL, PCI_EXP_SLTCTL_PCC); } else { pci_word_test_and_set_mask(exp_cap + PCI_EXP_SLTCTL, PCI_EXP_SLTCTL_PCC); } pic = populated ? PCI_EXP_SLTCTL_PIC_ON : PCI_EXP_SLTCTL_PIC_OFF; pci_word_test_and_set_mask(exp_cap + PCI_EXP_SLTCTL, pic); } pci_word_test_and_clear_mask(exp_cap + PCI_EXP_SLTSTA, PCI_EXP_SLTSTA_EIS |/* on reset, the lock is released */ PCI_EXP_SLTSTA_CC | PCI_EXP_SLTSTA_PDC | PCI_EXP_SLTSTA_ABP); hotplug_event_update_event_status(dev); }
/* 7.8.2 PCI Express Capabilities Register: Interrupt Message Number */ void pcie_cap_flags_set_vector(PCIDevice *dev, uint8_t vector) { uint8_t *exp_cap = dev->config + dev->exp.exp_cap; assert(vector < 32); pci_word_test_and_clear_mask(exp_cap + PCI_EXP_FLAGS, PCI_EXP_FLAGS_IRQ); pci_word_test_and_set_mask(exp_cap + PCI_EXP_FLAGS, vector << PCI_EXP_FLAGS_IRQ_SHIFT); }
void pci_bridge_disable_base_limit(PCIDevice *dev) { uint8_t *conf = dev->config; pci_byte_test_and_set_mask(conf + PCI_IO_BASE, PCI_IO_RANGE_MASK & 0xff); pci_byte_test_and_clear_mask(conf + PCI_IO_LIMIT, PCI_IO_RANGE_MASK & 0xff); pci_word_test_and_set_mask(conf + PCI_MEMORY_BASE, PCI_MEMORY_RANGE_MASK & 0xffff); pci_word_test_and_clear_mask(conf + PCI_MEMORY_LIMIT, PCI_MEMORY_RANGE_MASK & 0xffff); pci_word_test_and_set_mask(conf + PCI_PREF_MEMORY_BASE, PCI_PREF_RANGE_MASK & 0xffff); pci_word_test_and_clear_mask(conf + PCI_PREF_MEMORY_LIMIT, PCI_PREF_RANGE_MASK & 0xffff); pci_set_word(conf + PCI_PREF_BASE_UPPER32, 0); pci_set_word(conf + PCI_PREF_LIMIT_UPPER32, 0); }
void pcie_cap_flr_write_config(PCIDevice *dev, uint32_t addr, uint32_t val, int len) { uint8_t *devctl = dev->config + dev->exp.exp_cap + PCI_EXP_DEVCTL; if (pci_get_word(devctl) & PCI_EXP_DEVCTL_BCR_FLR) { /* Clear PCI_EXP_DEVCTL_BCR_FLR after invoking the reset handler so the handler can detect FLR by looking at this bit. */ pci_device_reset(dev); pci_word_test_and_clear_mask(devctl, PCI_EXP_DEVCTL_BCR_FLR); } }
/* reset bridge specific configuration registers */ void pci_bridge_reset(DeviceState *qdev) { PCIDevice *dev = PCI_DEVICE(qdev); uint8_t *conf = dev->config; conf[PCI_PRIMARY_BUS] = 0; conf[PCI_SECONDARY_BUS] = 0; conf[PCI_SUBORDINATE_BUS] = 0; conf[PCI_SEC_LATENCY_TIMER] = 0; /* * the default values for base/limit registers aren't specified * in the PCI-to-PCI-bridge spec. So we don't thouch them here. * Each implementation can override it. * typical implementation does * zero base/limit registers or * disable forwarding: pci_bridge_disable_base_limit() * If disable forwarding is wanted, call pci_bridge_disable_base_limit() * after this function. */ pci_byte_test_and_clear_mask(conf + PCI_IO_BASE, PCI_IO_RANGE_MASK & 0xff); pci_byte_test_and_clear_mask(conf + PCI_IO_LIMIT, PCI_IO_RANGE_MASK & 0xff); pci_word_test_and_clear_mask(conf + PCI_MEMORY_BASE, PCI_MEMORY_RANGE_MASK & 0xffff); pci_word_test_and_clear_mask(conf + PCI_MEMORY_LIMIT, PCI_MEMORY_RANGE_MASK & 0xffff); pci_word_test_and_clear_mask(conf + PCI_PREF_MEMORY_BASE, PCI_PREF_RANGE_MASK & 0xffff); pci_word_test_and_clear_mask(conf + PCI_PREF_MEMORY_LIMIT, PCI_PREF_RANGE_MASK & 0xffff); pci_set_long(conf + PCI_PREF_BASE_UPPER32, 0); pci_set_long(conf + PCI_PREF_LIMIT_UPPER32, 0); pci_set_word(conf + PCI_BRIDGE_CONTROL, 0); }
static int pcie_cap_slot_hotplug(DeviceState *qdev, PCIDevice *pci_dev, PCIHotplugState state) { PCIDevice *d = PCI_DEVICE(qdev); uint8_t *exp_cap = d->config + d->exp.exp_cap; uint16_t sltsta = pci_get_word(exp_cap + PCI_EXP_SLTSTA); /* Don't send event when device is enabled during qemu machine creation: * it is present on boot, no hotplug event is necessary. We do send an * event when the device is disabled later. */ if (state == PCI_COLDPLUG_ENABLED) { pci_word_test_and_set_mask(exp_cap + PCI_EXP_SLTSTA, PCI_EXP_SLTSTA_PDS); return 0; } PCIE_DEV_PRINTF(pci_dev, "hotplug state: %d\n", state); if (sltsta & PCI_EXP_SLTSTA_EIS) { /* the slot is electromechanically locked. * This error is propagated up to qdev and then to HMP/QMP. */ return -EBUSY; } /* TODO: multifunction hot-plug. * Right now, only a device of function = 0 is allowed to be * hot plugged/unplugged. */ assert(PCI_FUNC(pci_dev->devfn) == 0); if (state == PCI_HOTPLUG_ENABLED) { pci_word_test_and_set_mask(exp_cap + PCI_EXP_SLTSTA, PCI_EXP_SLTSTA_PDS); pcie_cap_slot_event(d, PCI_EXP_HP_EV_PDC); } else { object_unparent(OBJECT(pci_dev)); pci_word_test_and_clear_mask(exp_cap + PCI_EXP_SLTSTA, PCI_EXP_SLTSTA_PDS); pcie_cap_slot_event(d, PCI_EXP_HP_EV_PDC); } return 0; }
static void pcie_cap_fill_slot_lnk(PCIDevice *dev) { PCIESlot *s = (PCIESlot *)object_dynamic_cast(OBJECT(dev), TYPE_PCIE_SLOT); uint8_t *exp_cap = dev->config + dev->exp.exp_cap; /* Skip anything that isn't a PCIESlot */ if (!s) { return; } /* Clear and fill LNKCAP from what was configured above */ pci_long_test_and_clear_mask(exp_cap + PCI_EXP_LNKCAP, PCI_EXP_LNKCAP_MLW | PCI_EXP_LNKCAP_SLS); pci_long_test_and_set_mask(exp_cap + PCI_EXP_LNKCAP, QEMU_PCI_EXP_LNKCAP_MLW(s->width) | QEMU_PCI_EXP_LNKCAP_MLS(s->speed)); /* * Link bandwidth notification is required for all root ports and * downstream ports supporting links wider than x1 or multiple link * speeds. */ if (s->width > QEMU_PCI_EXP_LNK_X1 || s->speed > QEMU_PCI_EXP_LNK_2_5GT) { pci_long_test_and_set_mask(exp_cap + PCI_EXP_LNKCAP, PCI_EXP_LNKCAP_LBNC); } if (s->speed > QEMU_PCI_EXP_LNK_2_5GT) { /* * Hot-plug capable downstream ports and downstream ports supporting * link speeds greater than 5GT/s must hardwire PCI_EXP_LNKCAP_DLLLARC * to 1b. PCI_EXP_LNKCAP_DLLLARC implies PCI_EXP_LNKSTA_DLLLA, which * we also hardwire to 1b here. 2.5GT/s hot-plug slots should also * technically implement this, but it's not done here for compatibility. */ pci_long_test_and_set_mask(exp_cap + PCI_EXP_LNKCAP, PCI_EXP_LNKCAP_DLLLARC); pci_word_test_and_set_mask(exp_cap + PCI_EXP_LNKSTA, PCI_EXP_LNKSTA_DLLLA); /* * Target Link Speed defaults to the highest link speed supported by * the component. 2.5GT/s devices are permitted to hardwire to zero. */ pci_word_test_and_clear_mask(exp_cap + PCI_EXP_LNKCTL2, PCI_EXP_LNKCTL2_TLS); pci_word_test_and_set_mask(exp_cap + PCI_EXP_LNKCTL2, QEMU_PCI_EXP_LNKCAP_MLS(s->speed) & PCI_EXP_LNKCTL2_TLS); } /* * 2.5 & 5.0GT/s can be fully described by LNKCAP, but 8.0GT/s is * actually a reference to the highest bit supported in this register. * We assume the device supports all link speeds. */ if (s->speed > QEMU_PCI_EXP_LNK_5GT) { pci_long_test_and_clear_mask(exp_cap + PCI_EXP_LNKCAP2, ~0U); pci_long_test_and_set_mask(exp_cap + PCI_EXP_LNKCAP2, PCI_EXP_LNKCAP2_SLS_2_5GB | PCI_EXP_LNKCAP2_SLS_5_0GB | PCI_EXP_LNKCAP2_SLS_8_0GB); if (s->speed > QEMU_PCI_EXP_LNK_8GT) { pci_long_test_and_set_mask(exp_cap + PCI_EXP_LNKCAP2, PCI_EXP_LNKCAP2_SLS_16_0GB); } } }
static void shpc_command(SHPCDevice *shpc) { uint8_t code = pci_get_byte(shpc->config + SHPC_CMD_CODE); uint8_t speed; uint8_t target; uint8_t attn; uint8_t power; uint8_t state; int i; /* Clear status from the previous command. */ pci_word_test_and_clear_mask(shpc->config + SHPC_CMD_STATUS, SHPC_CMD_STATUS_BUSY | SHPC_CMD_STATUS_MRL_OPEN | SHPC_CMD_STATUS_INVALID_CMD | SHPC_CMD_STATUS_INVALID_MODE); switch (code) { case 0x00 ... 0x3f: target = shpc->config[SHPC_CMD_TRGT] & SHPC_CMD_TRGT_MAX; state = (code & SHPC_SLOT_STATE_MASK) >> SHPC_SLOT_STATE_SHIFT; power = (code & SHPC_SLOT_PWR_LED_MASK) >> SHPC_SLOT_PWR_LED_SHIFT; attn = (code & SHPC_SLOT_ATTN_LED_MASK) >> SHPC_SLOT_ATTN_LED_SHIFT; shpc_slot_command(shpc, target, state, power, attn); break; case 0x40 ... 0x47: speed = code & SHPC_SEC_BUS_MASK; shpc_set_sec_bus_speed(shpc, speed); break; case 0x48: /* Power only all slots */ /* first verify no slots are enabled */ for (i = 0; i < shpc->nslots; ++i) { state = shpc_get_status(shpc, i, SHPC_SLOT_STATE_MASK); if (state == SHPC_STATE_ENABLED) { shpc_invalid_command(shpc); goto done; } } for (i = 0; i < shpc->nslots; ++i) { if (!(shpc_get_status(shpc, i, SHPC_SLOT_STATUS_MRL_OPEN))) { shpc_slot_command(shpc, i + SHPC_CMD_TRGT_MIN, SHPC_STATE_PWRONLY, SHPC_LED_ON, SHPC_LED_NO); } else { shpc_slot_command(shpc, i + SHPC_CMD_TRGT_MIN, SHPC_STATE_NO, SHPC_LED_OFF, SHPC_LED_NO); } } break; case 0x49: /* Enable all slots */ /* TODO: Spec says this shall fail if some are already enabled. * This doesn't make sense - why not? a spec bug? */ for (i = 0; i < shpc->nslots; ++i) { state = shpc_get_status(shpc, i, SHPC_SLOT_STATE_MASK); if (state == SHPC_STATE_ENABLED) { shpc_invalid_command(shpc); goto done; } } for (i = 0; i < shpc->nslots; ++i) { if (!(shpc_get_status(shpc, i, SHPC_SLOT_STATUS_MRL_OPEN))) { shpc_slot_command(shpc, i + SHPC_CMD_TRGT_MIN, SHPC_STATE_ENABLED, SHPC_LED_ON, SHPC_LED_NO); } else { shpc_slot_command(shpc, i + SHPC_CMD_TRGT_MIN, SHPC_STATE_NO, SHPC_LED_OFF, SHPC_LED_NO); } } break; default: shpc_invalid_command(shpc); break; } done: pci_long_test_and_set_mask(shpc->config + SHPC_SERR_INT, SHPC_CMD_DETECTED); }