Beispiel #1
0
static void xen_platform_realize(PCIDevice *dev, Error **errp)
{
    PCIXenPlatformState *d = XEN_PLATFORM(dev);
    uint8_t *pci_conf;

    /* Device will crash on reset if xen is not initialized */
    if (!xen_enabled()) {
        error_setg(errp, "xen-platform device requires the Xen accelerator");
        return;
    }

    pci_conf = dev->config;

    pci_set_word(pci_conf + PCI_COMMAND, PCI_COMMAND_IO | PCI_COMMAND_MEMORY);

    pci_config_set_prog_interface(pci_conf, 0);

    pci_conf[PCI_INTERRUPT_PIN] = 1;

    platform_ioport_bar_setup(d);
    pci_register_bar(dev, 0, PCI_BASE_ADDRESS_SPACE_IO, &d->bar);

    /* reserve 16MB mmio address for share memory*/
    platform_mmio_setup(d);
    pci_register_bar(dev, 1, PCI_BASE_ADDRESS_MEM_PREFETCH,
                     &d->mmio_bar);

    platform_fixed_ioport_init(d);
}
Beispiel #2
0
static int versatile_pci_host_init(PCIDevice *d)
{
    pci_set_word(d->config + PCI_STATUS,
		 PCI_STATUS_66MHZ | PCI_STATUS_DEVSEL_MEDIUM);
    pci_set_byte(d->config + PCI_LATENCY_TIMER, 0x10);
    return 0;
}
Beispiel #3
0
void pcie_cap_slot_write_config(PCIDevice *dev,
                                uint32_t addr, uint32_t val, int len)
{
    uint32_t pos = dev->exp.exp_cap;
    uint8_t *exp_cap = dev->config + pos;
    uint16_t sltsta = pci_get_word(exp_cap + PCI_EXP_SLTSTA);

    if (ranges_overlap(addr, len, pos + PCI_EXP_SLTSTA, 2)) {
        hotplug_event_clear(dev);
    }

    if (!ranges_overlap(addr, len, pos + PCI_EXP_SLTCTL, 2)) {
        return;
    }

    if (pci_word_test_and_clear_mask(exp_cap + PCI_EXP_SLTCTL,
                                     PCI_EXP_SLTCTL_EIC)) {
        sltsta ^= PCI_EXP_SLTSTA_EIS; /* toggle PCI_EXP_SLTSTA_EIS bit */
        pci_set_word(exp_cap + PCI_EXP_SLTSTA, sltsta);
        PCIE_DEV_PRINTF(dev, "PCI_EXP_SLTCTL_EIC: "
                        "sltsta -> 0x%02"PRIx16"\n",
                        sltsta);
    }

    /*
     * If the slot is polulated, power indicator is off and power
     * controller is off, it is safe to detach the devices.
     */
    if ((sltsta & PCI_EXP_SLTSTA_PDS) && (val & PCI_EXP_SLTCTL_PCC) &&
        ((val & PCI_EXP_SLTCTL_PIC_OFF) == PCI_EXP_SLTCTL_PIC_OFF)) {
            PCIBus *sec_bus = pci_bridge_get_sec_bus(PCI_BRIDGE(dev));
            pci_for_each_device(sec_bus, pci_bus_num(sec_bus),
                                pcie_unplug_device, NULL);

            pci_word_test_and_clear_mask(exp_cap + PCI_EXP_SLTSTA,
                                         PCI_EXP_SLTSTA_PDS);
            pci_word_test_and_set_mask(exp_cap + PCI_EXP_SLTSTA,
                                       PCI_EXP_SLTSTA_PDC);
    }

    hotplug_event_notify(dev);

    /* 
     * 6.7.3.2 Command Completed Events
     *
     * Software issues a command to a hot-plug capable Downstream Port by
     * issuing a write transaction that targets any portion of the Port’s Slot
     * Control register. A single write to the Slot Control register is
     * considered to be a single command, even if the write affects more than
     * one field in the Slot Control register. In response to this transaction,
     * the Port must carry out the requested actions and then set the
     * associated status field for the command completed event. */

    /* Real hardware might take a while to complete requested command because
     * physical movement would be involved like locking the electromechanical
     * lock.  However in our case, command is completed instantaneously above,
     * so send a command completion event right now.
     */
    pcie_cap_slot_event(dev, PCI_EXP_HP_EV_CCI);
}
Beispiel #4
0
static int xen_platform_initfn(PCIDevice *dev)
{
    PCIXenPlatformState *d = XEN_PLATFORM(dev);
    uint8_t *pci_conf;

    pci_conf = dev->config;

    pci_set_word(pci_conf + PCI_COMMAND, PCI_COMMAND_IO | PCI_COMMAND_MEMORY);

    pci_config_set_prog_interface(pci_conf, 0);

    pci_conf[PCI_INTERRUPT_PIN] = 1;

    platform_ioport_bar_setup(d);
    pci_register_bar(dev, 0, PCI_BASE_ADDRESS_SPACE_IO, &d->bar);

    /* reserve 16MB mmio address for share memory*/
    platform_mmio_setup(d);
    pci_register_bar(dev, 1, PCI_BASE_ADDRESS_MEM_PREFETCH,
                     &d->mmio_bar);

    platform_fixed_ioport_init(d);

    return 0;
}
Beispiel #5
0
static int pci_pcnet_init(PCIDevice *pci_dev)
{
    PCIPCNetState *d = DO_UPCAST(PCIPCNetState, pci_dev, pci_dev);
    PCNetState *s = &d->state;
    uint8_t *pci_conf;

#if 0
    printf("sizeof(RMD)=%d, sizeof(TMD)=%d\n",
        sizeof(struct pcnet_RMD), sizeof(struct pcnet_TMD));
#endif

    pci_conf = pci_dev->config;

    pci_set_word(pci_conf + PCI_STATUS,
                 PCI_STATUS_FAST_BACK | PCI_STATUS_DEVSEL_MEDIUM);

    pci_set_word(pci_conf + PCI_SUBSYSTEM_VENDOR_ID, 0x0);
    pci_set_word(pci_conf + PCI_SUBSYSTEM_ID, 0x0);

    pci_conf[PCI_INTERRUPT_PIN] = 1; /* interrupt pin A */
    pci_conf[PCI_MIN_GNT] = 0x06;
    pci_conf[PCI_MAX_LAT] = 0xff;

    /* Handler for memory-mapped I/O */
    memory_region_init_io(&d->state.mmio, &pcnet_mmio_ops, s, "pcnet-mmio",
                          PCNET_PNPMMIO_SIZE);

    memory_region_init_io(&d->io_bar, &pcnet_io_ops, s, "pcnet-io",
                          PCNET_IOPORT_SIZE);
    pci_register_bar(pci_dev, 0, PCI_BASE_ADDRESS_SPACE_IO, &d->io_bar);

    pci_register_bar(pci_dev, 1, 0, &s->mmio);

    s->irq = pci_dev->irq[0];
    s->phys_mem_read = pci_physical_memory_read;
    s->phys_mem_write = pci_physical_memory_write;

    if (!pci_dev->qdev.hotplugged) {
        static int loaded = 0;
        if (!loaded) {
            rom_add_option("pxe-pcnet.rom", -1);
            loaded = 1;
        }
    }

    return pcnet_common_init(&pci_dev->qdev, s, &net_pci_pcnet_info);
}
Beispiel #6
0
/***************************************************************************
 * pci express capability helper functions
 */
int pcie_cap_init(PCIDevice *dev, uint8_t offset, uint8_t type, uint8_t port)
{
    int pos;
    uint8_t *exp_cap;

    assert(pci_is_express(dev));

    pos = pci_add_capability(dev, PCI_CAP_ID_EXP, offset,
                                 PCI_EXP_VER2_SIZEOF);
    if (pos < 0) {
        return pos;
    }
    dev->exp.exp_cap = pos;
    exp_cap = dev->config + pos;

    /* capability register
       interrupt message number defaults to 0 */
    pci_set_word(exp_cap + PCI_EXP_FLAGS,
                 ((type << PCI_EXP_FLAGS_TYPE_SHIFT) & PCI_EXP_FLAGS_TYPE) |
                 PCI_EXP_FLAGS_VER2);

    /* device capability register
     * table 7-12:
     * roll based error reporting bit must be set by all
     * Functions conforming to the ECN, PCI Express Base
     * Specification, Revision 1.1., or subsequent PCI Express Base
     * Specification revisions.
     */
    pci_set_long(exp_cap + PCI_EXP_DEVCAP, PCI_EXP_DEVCAP_RBER);

    pci_set_long(exp_cap + PCI_EXP_LNKCAP,
                 (port << PCI_EXP_LNKCAP_PN_SHIFT) |
                 PCI_EXP_LNKCAP_ASPMS_0S |
                 PCI_EXP_LNK_MLW_1 |
                 PCI_EXP_LNK_LS_25);

    pci_set_word(exp_cap + PCI_EXP_LNKSTA,
                 PCI_EXP_LNK_MLW_1 | PCI_EXP_LNK_LS_25);

    pci_set_long(exp_cap + PCI_EXP_DEVCAP2,
                 PCI_EXP_DEVCAP2_EFF | PCI_EXP_DEVCAP2_EETLPP);

    pci_set_word(dev->wmask + pos, PCI_EXP_DEVCTL2_EETLPPB);
    return pos;
}
Beispiel #7
0
/**
 * A typical PCI OHCI will additionally set PERR in its configspace to
 * signal that it got an error.
 */
static void ohci_pci_die(struct OHCIState *ohci)
{
    OHCIPCIState *dev = container_of(ohci, OHCIPCIState, state);

    ohci_sysbus_die(ohci);

    pci_set_word(dev->parent_obj.config + PCI_STATUS,
                 PCI_STATUS_DETECTED_PARITY);
}
Beispiel #8
0
void pci_bridge_disable_base_limit(PCIDevice *dev)
{
    uint8_t *conf = dev->config;

    pci_byte_test_and_set_mask(conf + PCI_IO_BASE,
                               PCI_IO_RANGE_MASK & 0xff);
    pci_byte_test_and_clear_mask(conf + PCI_IO_LIMIT,
                                 PCI_IO_RANGE_MASK & 0xff);
    pci_word_test_and_set_mask(conf + PCI_MEMORY_BASE,
                               PCI_MEMORY_RANGE_MASK & 0xffff);
    pci_word_test_and_clear_mask(conf + PCI_MEMORY_LIMIT,
                                 PCI_MEMORY_RANGE_MASK & 0xffff);
    pci_word_test_and_set_mask(conf + PCI_PREF_MEMORY_BASE,
                               PCI_PREF_RANGE_MASK & 0xffff);
    pci_word_test_and_clear_mask(conf + PCI_PREF_MEMORY_LIMIT,
                                 PCI_PREF_RANGE_MASK & 0xffff);
    pci_set_word(conf + PCI_PREF_BASE_UPPER32, 0);
    pci_set_word(conf + PCI_PREF_LIMIT_UPPER32, 0);
}
Beispiel #9
0
static void
pcie_cap_v1_fill(PCIDevice *dev, uint8_t port, uint8_t type, uint8_t version)
{
    uint8_t *exp_cap = dev->config + dev->exp.exp_cap;
    uint8_t *cmask = dev->cmask + dev->exp.exp_cap;

    /* capability register
    interrupt message number defaults to 0 */
    pci_set_word(exp_cap + PCI_EXP_FLAGS,
                 ((type << PCI_EXP_FLAGS_TYPE_SHIFT) & PCI_EXP_FLAGS_TYPE) |
                 version);

    /* device capability register
     * table 7-12:
     * roll based error reporting bit must be set by all
     * Functions conforming to the ECN, PCI Express Base
     * Specification, Revision 1.1., or subsequent PCI Express Base
     * Specification revisions.
     */
    pci_set_long(exp_cap + PCI_EXP_DEVCAP, PCI_EXP_DEVCAP_RBER);

    pci_set_long(exp_cap + PCI_EXP_LNKCAP,
                 (port << PCI_EXP_LNKCAP_PN_SHIFT) |
                 PCI_EXP_LNKCAP_ASPMS_0S |
                 QEMU_PCI_EXP_LNKCAP_MLW(QEMU_PCI_EXP_LNK_X1) |
                 QEMU_PCI_EXP_LNKCAP_MLS(QEMU_PCI_EXP_LNK_2_5GT));

    pci_set_word(exp_cap + PCI_EXP_LNKSTA,
                 QEMU_PCI_EXP_LNKSTA_NLW(QEMU_PCI_EXP_LNK_X1) |
                 QEMU_PCI_EXP_LNKSTA_CLS(QEMU_PCI_EXP_LNK_2_5GT));

    if (dev->cap_present & QEMU_PCIE_LNKSTA_DLLLA) {
        pci_word_test_and_set_mask(exp_cap + PCI_EXP_LNKSTA,
                                   PCI_EXP_LNKSTA_DLLLA);
    }

    /* We changed link status bits over time, and changing them across
     * migrations is generally fine as hardware changes them too.
     * Let's not bother checking.
     */
    pci_set_word(cmask + PCI_EXP_LNKSTA, 0);
}
Beispiel #10
0
static int versatile_pci_host_init(PCIDevice *d)
{
    pci_config_set_vendor_id(d->config, PCI_VENDOR_ID_XILINX);
    /* Both boards have the same device ID.  Oh well.  */
    pci_config_set_device_id(d->config, PCI_DEVICE_ID_XILINX_XC2VP30);
    pci_set_word(d->config + PCI_STATUS,
		 PCI_STATUS_66MHZ | PCI_STATUS_DEVSEL_MEDIUM);
    pci_config_set_class(d->config, PCI_CLASS_PROCESSOR_CO);
    pci_set_byte(d->config + PCI_LATENCY_TIMER, 0x10);
    return 0;
}
Beispiel #11
0
static void via_reset(void *opaque)
{
    PCIIDEState *d = opaque;
    PCIDevice *pd = PCI_DEVICE(d);
    uint8_t *pci_conf = pd->config;
    int i;

    for (i = 0; i < 2; i++) {
        ide_bus_reset(&d->bus[i]);
    }

    pci_set_word(pci_conf + PCI_COMMAND, PCI_COMMAND_WAIT);
    pci_set_word(pci_conf + PCI_STATUS, PCI_STATUS_FAST_BACK |
                 PCI_STATUS_DEVSEL_MEDIUM);

    pci_set_long(pci_conf + PCI_BASE_ADDRESS_0, 0x000001f0);
    pci_set_long(pci_conf + PCI_BASE_ADDRESS_1, 0x000003f4);
    pci_set_long(pci_conf + PCI_BASE_ADDRESS_2, 0x00000170);
    pci_set_long(pci_conf + PCI_BASE_ADDRESS_3, 0x00000374);
    pci_set_long(pci_conf + PCI_BASE_ADDRESS_4, 0x0000cc01); /* BMIBA: 20-23h */
    pci_set_long(pci_conf + PCI_INTERRUPT_LINE, 0x0000010e);

    /* IDE chip enable, IDE configuration 1/2, IDE FIFO Configuration*/
    pci_set_long(pci_conf + 0x40, 0x0a090600);
    /* IDE misc configuration 1/2/3 */
    pci_set_long(pci_conf + 0x44, 0x00c00068);
    /* IDE Timing control */
    pci_set_long(pci_conf + 0x48, 0xa8a8a8a8);
    /* IDE Address Setup Time */
    pci_set_long(pci_conf + 0x4c, 0x000000ff);
    /* UltraDMA Extended Timing Control*/
    pci_set_long(pci_conf + 0x50, 0x07070707);
    /* UltraDMA FIFO Control */
    pci_set_long(pci_conf + 0x54, 0x00000004);
    /* IDE primary sector size */
    pci_set_long(pci_conf + 0x60, 0x00000200);
    /* IDE secondary sector size */
    pci_set_long(pci_conf + 0x68, 0x00000200);
    /* PCI PM Block */
    pci_set_long(pci_conf + 0xc0, 0x00020001);
}
Beispiel #12
0
static int
e1000e_add_pm_capability(PCIDevice *pdev, uint8_t offset, uint16_t pmc)
{
    int ret = pci_add_capability(pdev, PCI_CAP_ID_PM, offset, PCI_PM_SIZEOF);

    if (ret >= 0) {
        pci_set_word(pdev->config + offset + PCI_PM_PMC,
                     PCI_PM_CAP_VER_1_1 |
                     pmc);

        pci_set_word(pdev->wmask + offset + PCI_PM_CTRL,
                     PCI_PM_CTRL_STATE_MASK |
                     PCI_PM_CTRL_PME_ENABLE |
                     PCI_PM_CTRL_DATA_SEL_MASK);

        pci_set_word(pdev->w1cmask + offset + PCI_PM_CTRL,
                     PCI_PM_CTRL_PME_STATUS);
    }

    return ret;
}
Beispiel #13
0
/* reset bridge specific configuration registers */
void pci_bridge_reset_reg(PCIDevice *dev)
{
    uint8_t *conf = dev->config;

    conf[PCI_PRIMARY_BUS] = 0;
    conf[PCI_SECONDARY_BUS] = 0;
    conf[PCI_SUBORDINATE_BUS] = 0;
    conf[PCI_SEC_LATENCY_TIMER] = 0;

    /*
     * the default values for base/limit registers aren't specified
     * in the PCI-to-PCI-bridge spec. So we don't thouch them here.
     * Each implementation can override it.
     * typical implementation does
     * zero base/limit registers or
     * disable forwarding: pci_bridge_disable_base_limit()
     * If disable forwarding is wanted, call pci_bridge_disable_base_limit()
     * after this function.
     */
    pci_byte_test_and_clear_mask(conf + PCI_IO_BASE,
                                 PCI_IO_RANGE_MASK & 0xff);
    pci_byte_test_and_clear_mask(conf + PCI_IO_LIMIT,
                                 PCI_IO_RANGE_MASK & 0xff);
    pci_word_test_and_clear_mask(conf + PCI_MEMORY_BASE,
                                 PCI_MEMORY_RANGE_MASK & 0xffff);
    pci_word_test_and_clear_mask(conf + PCI_MEMORY_LIMIT,
                                 PCI_MEMORY_RANGE_MASK & 0xffff);
    pci_word_test_and_clear_mask(conf + PCI_PREF_MEMORY_BASE,
                                 PCI_PREF_RANGE_MASK & 0xffff);
    pci_word_test_and_clear_mask(conf + PCI_PREF_MEMORY_LIMIT,
                                 PCI_PREF_RANGE_MASK & 0xffff);
    pci_set_word(conf + PCI_PREF_BASE_UPPER32, 0);
    pci_set_word(conf + PCI_PREF_LIMIT_UPPER32, 0);

    pci_set_word(conf + PCI_BRIDGE_CONTROL, 0);
}
Beispiel #14
0
/* Given a bar and its size, add MSI-X table on top of it
 * and fill MSI-X capability in the config space.
 * Original bar size must be a power of 2 or 0.
 * New bar size is returned. */
static int msix_add_config(struct PCIDevice *pdev, unsigned short nentries,
                           unsigned bar_nr, unsigned bar_size)
{
    int config_offset;
    uint8_t *config;

    pdev->msix_bar_size = bar_size;

    config_offset = pci_find_capability(pdev, PCI_CAP_ID_MSIX);

    if (!config_offset) {
        uint32_t new_size;

        if (nentries < 1 || nentries > PCI_MSIX_FLAGS_QSIZE + 1)
            return -EINVAL;
        if (bar_size > 0x80000000)
            return -ENOSPC;

        /* Add space for MSI-X structures */
        if (!bar_size) {
            new_size = MSIX_PAGE_SIZE;
        } else if (bar_size < MSIX_PAGE_SIZE) {
            bar_size = MSIX_PAGE_SIZE;
            new_size = MSIX_PAGE_SIZE * 2;
        } else {
            new_size = bar_size * 2;
        }

        pdev->msix_bar_size = new_size;
        config_offset = pci_add_capability(pdev, PCI_CAP_ID_MSIX,
                                           0, MSIX_CAP_LENGTH);
        if (config_offset < 0)
            return config_offset;
        config = pdev->config + config_offset;

        pci_set_word(config + PCI_MSIX_FLAGS, nentries - 1);
        /* Table on top of BAR */
        pci_set_long(config + PCI_MSIX_TABLE, bar_size | bar_nr);
        /* Pending bits on top of that */
        pci_set_long(config + PCI_MSIX_PBA, (bar_size + MSIX_PAGE_PENDING) |
                     bar_nr);
    }
    pdev->msix_cap = config_offset;
    /* Make flags bit writable. */
    pdev->wmask[config_offset + MSIX_CONTROL_OFFSET] |= MSIX_ENABLE_MASK |
	    MSIX_MASKALL_MASK;
    return 0;
}
Beispiel #15
0
static int usb_ehci_pci_initfn(PCIDevice *dev)
{
    EHCIPCIState *i = DO_UPCAST(EHCIPCIState, pcidev, dev);
    EHCIState *s = &i->ehci;
    uint8_t *pci_conf = dev->config;

    pci_set_byte(&pci_conf[PCI_CLASS_PROG], 0x20);

    /* capabilities pointer */
    pci_set_byte(&pci_conf[PCI_CAPABILITY_LIST], 0x00);
    /* pci_set_byte(&pci_conf[PCI_CAPABILITY_LIST], 0x50); */

    pci_set_byte(&pci_conf[PCI_INTERRUPT_PIN], 4); /* interrupt pin D */
    pci_set_byte(&pci_conf[PCI_MIN_GNT], 0);
    pci_set_byte(&pci_conf[PCI_MAX_LAT], 0);

    /* pci_conf[0x50] = 0x01; *//* power management caps */

    pci_set_byte(&pci_conf[USB_SBRN], USB_RELEASE_2); /* release # (2.1.4) */
    pci_set_byte(&pci_conf[0x61], 0x20);  /* frame length adjustment (2.1.5) */
    pci_set_word(&pci_conf[0x62], 0x00);  /* port wake up capability (2.1.6) */

    pci_conf[0x64] = 0x00;
    pci_conf[0x65] = 0x00;
    pci_conf[0x66] = 0x00;
    pci_conf[0x67] = 0x00;
    pci_conf[0x68] = 0x01;
    pci_conf[0x69] = 0x00;
    pci_conf[0x6a] = 0x00;
    pci_conf[0x6b] = 0x00;  /* USBLEGSUP */
    pci_conf[0x6c] = 0x00;
    pci_conf[0x6d] = 0x00;
    pci_conf[0x6e] = 0x00;
    pci_conf[0x6f] = 0xc0;  /* USBLEFCTLSTS */

    s->caps[0x09] = 0x68;        /* EECP */

    s->irq = dev->irq[3];
    s->dma = pci_dma_context(dev);

    s->capsbase = 0x00;
    s->opregbase = 0x20;

    usb_ehci_initfn(s, DEVICE(dev));
    pci_register_bar(dev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY, &s->mem);

    return 0;
}
Beispiel #16
0
static void tnetw1130_pci_config(uint8_t *pci_conf)
{
    pci_set_word(pci_conf + PCI_STATUS, 0x0210);
    pci_set_long(pci_conf + PCI_CARDBUS_CIS, 0x00001c02);
    /* Address registers are set by pci_register_bar. */
    /* Capabilities Pointer, CLOFS */
    pci_set_long(pci_conf + PCI_CAPABILITY_LIST, 0x00000040);
    /* 0x38 reserved, returns 0 */
    /* MNGNT = 11, MXLAT = 52, IPIN = 0 */
    /* TODO: Split next command using pci_set_byte. */
    pci_set_long(pci_conf + PCI_INTERRUPT_LINE, 0x00000100);
    /* Power Management Capabilities */
    pci_set_long(pci_conf + 0x40, 0x7e020001);
    /* Power Management Control and Status */
    //~ pci_set_long(pci_conf + 0x44, 0x00000000);
    /* 0x48...0xff reserved, returns 0 */
}
Beispiel #17
0
static void xilinx_pcie_root_realize(PCIDevice *pci_dev, Error **errp)
{
    BusState *bus = qdev_get_parent_bus(DEVICE(pci_dev));
    XilinxPCIEHost *s = XILINX_PCIE_HOST(bus->parent);

    pci_set_word(pci_dev->config + PCI_COMMAND,
                 PCI_COMMAND_MEMORY | PCI_COMMAND_MASTER);
    pci_set_word(pci_dev->config + PCI_MEMORY_BASE, s->mmio_base >> 16);
    pci_set_word(pci_dev->config + PCI_MEMORY_LIMIT,
                 ((s->mmio_base + s->mmio_size - 1) >> 16) & 0xfff0);

    pci_bridge_initfn(pci_dev, TYPE_PCI_BUS);

    if (pcie_endpoint_cap_v1_init(pci_dev, 0x80) < 0) {
        error_setg(errp, "Failed to initialize PCIe capability");
    }
}
Beispiel #18
0
static void pci_ich9_ahci_realize(PCIDevice *dev, Error **errp)
{
    struct AHCIPCIState *d;
    int sata_cap_offset;
    uint8_t *sata_cap;
    d = ICH_AHCI(dev);

    ahci_realize(&d->ahci, DEVICE(dev), pci_get_address_space(dev), 6);

    pci_config_set_prog_interface(dev->config, AHCI_PROGMODE_MAJOR_REV_1);

    dev->config[PCI_CACHE_LINE_SIZE] = 0x08;  /* Cache line size */
    dev->config[PCI_LATENCY_TIMER]   = 0x00;  /* Latency timer */
    pci_config_set_interrupt_pin(dev->config, 1);

    /* XXX Software should program this register */
    dev->config[0x90]   = 1 << 6; /* Address Map Register - AHCI mode */

    d->ahci.irq = pci_allocate_irq(dev);

    pci_register_bar(dev, ICH9_IDP_BAR, PCI_BASE_ADDRESS_SPACE_IO,
                     &d->ahci.idp);
    pci_register_bar(dev, ICH9_MEM_BAR, PCI_BASE_ADDRESS_SPACE_MEMORY,
                     &d->ahci.mem);

    sata_cap_offset = pci_add_capability2(dev, PCI_CAP_ID_SATA,
                                          ICH9_SATA_CAP_OFFSET, SATA_CAP_SIZE,
                                          errp);
    if (sata_cap_offset < 0) {
        return;
    }

    sata_cap = dev->config + sata_cap_offset;
    pci_set_word(sata_cap + SATA_CAP_REV, 0x10);
    pci_set_long(sata_cap + SATA_CAP_BAR,
                 (ICH9_IDP_BAR + 0x4) | (ICH9_IDP_INDEX_LOG2 << 4));
    d->ahci.idp_offset = ICH9_IDP_INDEX;

    /* Although the AHCI 1.3 specification states that the first capability
     * should be PMCAP, the Intel ICH9 data sheet specifies that the ICH9
     * AHCI device puts the MSI capability first, pointing to 0x80. */
    msi_init(dev, ICH9_MSI_CAP_OFFSET, 1, true, false);
}
Beispiel #19
0
static int pci_ich9_ahci_init(PCIDevice *dev)
{
    struct AHCIPCIState *d;
    int sata_cap_offset;
    uint8_t *sata_cap;
    d = DO_UPCAST(struct AHCIPCIState, card, dev);

    ahci_init(&d->ahci, &dev->qdev, 6);

    pci_config_set_prog_interface(d->card.config, AHCI_PROGMODE_MAJOR_REV_1);

    d->card.config[PCI_CACHE_LINE_SIZE] = 0x08;  /* Cache line size */
    d->card.config[PCI_LATENCY_TIMER]   = 0x00;  /* Latency timer */
    pci_config_set_interrupt_pin(d->card.config, 1);

    /* XXX Software should program this register */
    d->card.config[0x90]   = 1 << 6; /* Address Map Register - AHCI mode */

    qemu_register_reset(ahci_reset, d);

    msi_init(dev, 0x50, 1, true, false);
    d->ahci.irq = d->card.irq[0];

    pci_register_bar(&d->card, ICH9_IDP_BAR, PCI_BASE_ADDRESS_SPACE_IO,
                     &d->ahci.idp);
    pci_register_bar(&d->card, ICH9_MEM_BAR, PCI_BASE_ADDRESS_SPACE_MEMORY,
                     &d->ahci.mem);

    sata_cap_offset = pci_add_capability(&d->card, PCI_CAP_ID_SATA,
                                         ICH9_SATA_CAP_OFFSET, SATA_CAP_SIZE);
    if (sata_cap_offset < 0) {
        return sata_cap_offset;
    }

    sata_cap = d->card.config + sata_cap_offset;
    pci_set_word(sata_cap + SATA_CAP_REV, 0x10);
    pci_set_long(sata_cap + SATA_CAP_BAR,
                 (ICH9_IDP_BAR + 0x4) | (ICH9_IDP_INDEX_LOG2 << 4));
    d->ahci.idp_offset = ICH9_IDP_INDEX;

    return 0;
}
Beispiel #20
0
static bool pcie_aer_inject_cor_error(PCIEAERInject *inj,
                                      uint32_t uncor_status,
                                      bool is_advisory_nonfatal)
{
    PCIDevice *dev = inj->dev;

    inj->devsta |= PCI_EXP_DEVSTA_CED;
    if (inj->unsupported_request) {
        inj->devsta |= PCI_EXP_DEVSTA_URD;
    }
    pci_set_word(dev->config + dev->exp.exp_cap + PCI_EXP_DEVSTA, inj->devsta);

    if (inj->aer_cap) {
        uint32_t mask;
        pci_long_test_and_set_mask(inj->aer_cap + PCI_ERR_COR_STATUS,
                                   inj->error_status);
        mask = pci_get_long(inj->aer_cap + PCI_ERR_COR_MASK);
        if (mask & inj->error_status) {
            return false;
        }
        if (is_advisory_nonfatal) {
            uint32_t uncor_mask =
                pci_get_long(inj->aer_cap + PCI_ERR_UNCOR_MASK);
            if (!(uncor_mask & uncor_status)) {
                inj->log_overflow = !!pcie_aer_record_error(dev, inj->err);
            }
            pci_long_test_and_set_mask(inj->aer_cap + PCI_ERR_UNCOR_STATUS,
                                       uncor_status);
        }
    }

    if (inj->unsupported_request && !(inj->devctl & PCI_EXP_DEVCTL_URRE)) {
        return false;
    }
    if (!(inj->devctl & PCI_EXP_DEVCTL_CERE)) {
        return false;
    }

    inj->msg.severity = PCI_ERR_ROOT_CMD_COR_EN;
    return true;
}
Beispiel #21
0
/* default qdev initialization function for PCI-to-PCI bridge */
int pci_bridge_initfn(PCIDevice *dev)
{
    PCIBus *parent = dev->bus;
    PCIBridge *br = DO_UPCAST(PCIBridge, dev, dev);
    PCIBus *sec_bus = &br->sec_bus;

    pci_word_test_and_set_mask(dev->config + PCI_STATUS,
                               PCI_STATUS_66MHZ | PCI_STATUS_FAST_BACK);
    pci_config_set_class(dev->config, PCI_CLASS_BRIDGE_PCI);
    dev->config[PCI_HEADER_TYPE] =
        (dev->config[PCI_HEADER_TYPE] & PCI_HEADER_TYPE_MULTI_FUNCTION) |
        PCI_HEADER_TYPE_BRIDGE;
    pci_set_word(dev->config + PCI_SEC_STATUS,
                 PCI_STATUS_66MHZ | PCI_STATUS_FAST_BACK);

    /*
     * If we don't specify the name, the bus will be addressed as <id>.0, where
     * id is the device id.
     * Since PCI Bridge devices have a single bus each, we don't need the index:
     * let users address the bus using the device name.
     */
    if (!br->bus_name && dev->qdev.id && *dev->qdev.id) {
	    br->bus_name = dev->qdev.id;
    }

    qbus_create_inplace(&sec_bus->qbus, TYPE_PCI_BUS, &dev->qdev,
                        br->bus_name);
    sec_bus->parent_dev = dev;
    sec_bus->map_irq = br->map_irq;
    sec_bus->address_space_mem = &br->address_space_mem;
    memory_region_init(&br->address_space_mem, "pci_bridge_pci", INT64_MAX);
    sec_bus->address_space_io = &br->address_space_io;
    memory_region_init(&br->address_space_io, "pci_bridge_io", 65536);
    pci_bridge_region_init(br);
    QLIST_INIT(&sec_bus->child);
    QLIST_INSERT_HEAD(&parent->child, sec_bus, sibling);
    return 0;
}
Beispiel #22
0
int pcie_cap_init(PCIDevice *dev, uint8_t offset,
                  uint8_t type, uint8_t port,
                  Error **errp)
{
    /* PCIe cap v2 init */
    int pos;
    uint8_t *exp_cap;

    assert(pci_is_express(dev));

    pos = pci_add_capability(dev, PCI_CAP_ID_EXP, offset,
                             PCI_EXP_VER2_SIZEOF, errp);
    if (pos < 0) {
        return pos;
    }
    dev->exp.exp_cap = pos;
    exp_cap = dev->config + pos;

    /* Filling values common with v1 */
    pcie_cap_v1_fill(dev, port, type, PCI_EXP_FLAGS_VER2);

    /* Fill link speed and width options */
    pcie_cap_fill_slot_lnk(dev);

    /* Filling v2 specific values */
    pci_set_long(exp_cap + PCI_EXP_DEVCAP2,
                 PCI_EXP_DEVCAP2_EFF | PCI_EXP_DEVCAP2_EETLPP);

    pci_set_word(dev->wmask + pos + PCI_EXP_DEVCTL2, PCI_EXP_DEVCTL2_EETLPPB);

    if (dev->cap_present & QEMU_PCIE_EXTCAP_INIT) {
        /* read-only to behave like a 'NULL' Extended Capability Header */
        pci_set_long(dev->wmask + PCI_CONFIG_SPACE_SIZE, 0);
    }

    return pos;
}
Beispiel #23
0
void pcie_cap_root_reset(PCIDevice *dev)
{
    pci_set_word(dev->config + dev->exp.exp_cap + PCI_EXP_RTCTL, 0);
}
Beispiel #24
0
/* root control/capabilities/status. PME isn't emulated for now */
void pcie_cap_root_init(PCIDevice *dev)
{
    pci_set_word(dev->wmask + dev->exp.exp_cap + PCI_EXP_RTCTL,
                 PCI_EXP_RTCTL_SECEE | PCI_EXP_RTCTL_SENFEE |
                 PCI_EXP_RTCTL_SEFEE);
}
Beispiel #25
0
static void e1000e_pci_realize(PCIDevice *pci_dev, Error **errp)
{
    static const uint16_t e1000e_pmrb_offset = 0x0C8;
    static const uint16_t e1000e_pcie_offset = 0x0E0;
    static const uint16_t e1000e_aer_offset =  0x100;
    static const uint16_t e1000e_dsn_offset =  0x140;
    E1000EState *s = E1000E(pci_dev);
    uint8_t *macaddr;
    int ret;

    trace_e1000e_cb_pci_realize();

    pci_dev->config_write = e1000e_write_config;

    pci_dev->config[PCI_CACHE_LINE_SIZE] = 0x10;
    pci_dev->config[PCI_INTERRUPT_PIN] = 1;

    pci_set_word(pci_dev->config + PCI_SUBSYSTEM_VENDOR_ID, s->subsys_ven);
    pci_set_word(pci_dev->config + PCI_SUBSYSTEM_ID, s->subsys);

    s->subsys_ven_used = s->subsys_ven;
    s->subsys_used = s->subsys;

    /* Define IO/MMIO regions */
    memory_region_init_io(&s->mmio, OBJECT(s), &mmio_ops, s,
                          "e1000e-mmio", E1000E_MMIO_SIZE);
    pci_register_bar(pci_dev, E1000E_MMIO_IDX,
                     PCI_BASE_ADDRESS_SPACE_MEMORY, &s->mmio);

    /*
     * We provide a dummy implementation for the flash BAR
     * for drivers that may theoretically probe for its presence.
     */
    memory_region_init(&s->flash, OBJECT(s),
                       "e1000e-flash", E1000E_FLASH_SIZE);
    pci_register_bar(pci_dev, E1000E_FLASH_IDX,
                     PCI_BASE_ADDRESS_SPACE_MEMORY, &s->flash);

    memory_region_init_io(&s->io, OBJECT(s), &io_ops, s,
                          "e1000e-io", E1000E_IO_SIZE);
    pci_register_bar(pci_dev, E1000E_IO_IDX,
                     PCI_BASE_ADDRESS_SPACE_IO, &s->io);

    memory_region_init(&s->msix, OBJECT(s), "e1000e-msix",
                       E1000E_MSIX_SIZE);
    pci_register_bar(pci_dev, E1000E_MSIX_IDX,
                     PCI_BASE_ADDRESS_SPACE_MEMORY, &s->msix);

    /* Create networking backend */
    qemu_macaddr_default_if_unset(&s->conf.macaddr);
    macaddr = s->conf.macaddr.a;

    e1000e_init_msix(s);

    if (pcie_endpoint_cap_v1_init(pci_dev, e1000e_pcie_offset) < 0) {
        hw_error("Failed to initialize PCIe capability");
    }

    ret = msi_init(PCI_DEVICE(s), 0xD0, 1, true, false, NULL);
    if (ret) {
        trace_e1000e_msi_init_fail(ret);
    }

    if (e1000e_add_pm_capability(pci_dev, e1000e_pmrb_offset,
                                  PCI_PM_CAP_DSI) < 0) {
        hw_error("Failed to initialize PM capability");
    }

    if (pcie_aer_init(pci_dev, PCI_ERR_VER, e1000e_aer_offset,
                      PCI_ERR_SIZEOF, NULL) < 0) {
        hw_error("Failed to initialize AER capability");
    }

    pcie_dev_ser_num_init(pci_dev, e1000e_dsn_offset,
                          e1000e_gen_dsn(macaddr));

    e1000e_init_net_peer(s, pci_dev, macaddr);

    /* Initialize core */
    e1000e_core_realize(s);

    e1000e_core_pci_realize(&s->core,
                            e1000e_eeprom_template,
                            sizeof(e1000e_eeprom_template),
                            macaddr);
}
Beispiel #26
0
/*
 * 6.2.6 Error Message Control
 * Figure 6-3
 * root port part
 */
static void pcie_aer_msg_root_port(PCIDevice *dev, const PCIEAERMsg *msg)
{
    uint16_t cmd;
    uint8_t *aer_cap;
    uint32_t root_cmd;
    uint32_t root_status, prev_status;

    cmd = pci_get_word(dev->config + PCI_COMMAND);
    aer_cap = dev->config + dev->exp.aer_cap;
    root_cmd = pci_get_long(aer_cap + PCI_ERR_ROOT_COMMAND);
    prev_status = root_status = pci_get_long(aer_cap + PCI_ERR_ROOT_STATUS);

    if (cmd & PCI_COMMAND_SERR) {
        /* System Error.
         *
         * The way to report System Error is platform specific and
         * it isn't implemented in qemu right now.
         * So just discard the error for now.
         * OS which cares of aer would receive errors via
         * native aer mechanims, so this wouldn't matter.
         */
    }

    /* Errro Message Received: Root Error Status register */
    switch (msg->severity) {
    case PCI_ERR_ROOT_CMD_COR_EN:
        if (root_status & PCI_ERR_ROOT_COR_RCV) {
            root_status |= PCI_ERR_ROOT_MULTI_COR_RCV;
        } else {
            pci_set_word(aer_cap + PCI_ERR_ROOT_ERR_SRC + PCI_ERR_SRC_COR_OFFS,
                         msg->source_id);
        }
        root_status |= PCI_ERR_ROOT_COR_RCV;
        break;
    case PCI_ERR_ROOT_CMD_NONFATAL_EN:
        root_status |= PCI_ERR_ROOT_NONFATAL_RCV;
        break;
    case PCI_ERR_ROOT_CMD_FATAL_EN:
        if (!(root_status & PCI_ERR_ROOT_UNCOR_RCV)) {
            root_status |= PCI_ERR_ROOT_FIRST_FATAL;
        }
        root_status |= PCI_ERR_ROOT_FATAL_RCV;
        break;
    default:
        abort();
        break;
    }
    if (pcie_aer_msg_is_uncor(msg)) {
        if (root_status & PCI_ERR_ROOT_UNCOR_RCV) {
            root_status |= PCI_ERR_ROOT_MULTI_UNCOR_RCV;
        } else {
            pci_set_word(aer_cap + PCI_ERR_ROOT_ERR_SRC +
                         PCI_ERR_SRC_UNCOR_OFFS, msg->source_id);
        }
        root_status |= PCI_ERR_ROOT_UNCOR_RCV;
    }
    pci_set_long(aer_cap + PCI_ERR_ROOT_STATUS, root_status);

    /* 6.2.4.1.2 Interrupt Generation */
    /* All the above did was set some bits in the status register.
     * Specifically these that match message severity.
     * The below code relies on this fact. */
    if (!(root_cmd & msg->severity) ||
        (pcie_aer_status_to_cmd(prev_status) & root_cmd)) {
        /* Condition is not being set or was already true so nothing to do. */
        return;
    }

    pcie_aer_root_notify(dev);
}
Beispiel #27
0
/* Initialize the MSI-X structures */
int msix_init(struct PCIDevice *dev, unsigned short nentries,
              MemoryRegion *table_bar, uint8_t table_bar_nr,
              unsigned table_offset, MemoryRegion *pba_bar,
              uint8_t pba_bar_nr, unsigned pba_offset, uint8_t cap_pos)
{
    int cap;
    unsigned table_size, pba_size;
    uint8_t *config;

    /* Nothing to do if MSI is not supported by interrupt controller */
    if (!msi_supported) {
        return -ENOTSUP;
    }

    if (nentries < 1 || nentries > PCI_MSIX_FLAGS_QSIZE + 1) {
        return -EINVAL;
    }

    table_size = nentries * PCI_MSIX_ENTRY_SIZE;
    pba_size = QEMU_ALIGN_UP(nentries, 64) / 8;

    /* Sanity test: table & pba don't overlap, fit within BARs, min aligned */
    if ((table_bar_nr == pba_bar_nr &&
         ranges_overlap(table_offset, table_size, pba_offset, pba_size)) ||
        table_offset + table_size > memory_region_size(table_bar) ||
        pba_offset + pba_size > memory_region_size(pba_bar) ||
        (table_offset | pba_offset) & PCI_MSIX_FLAGS_BIRMASK) {
        return -EINVAL;
    }

    cap = pci_add_capability(dev, PCI_CAP_ID_MSIX, cap_pos, MSIX_CAP_LENGTH);
    if (cap < 0) {
        return cap;
    }

    dev->msix_cap = cap;
    dev->cap_present |= QEMU_PCI_CAP_MSIX;
    config = dev->config + cap;

    pci_set_word(config + PCI_MSIX_FLAGS, nentries - 1);
    dev->msix_entries_nr = nentries;
    dev->msix_function_masked = true;

    pci_set_long(config + PCI_MSIX_TABLE, table_offset | table_bar_nr);
    pci_set_long(config + PCI_MSIX_PBA, pba_offset | pba_bar_nr);

    /* Make flags bit writable. */
    dev->wmask[cap + MSIX_CONTROL_OFFSET] |= MSIX_ENABLE_MASK |
                                             MSIX_MASKALL_MASK;

    dev->msix_table = g_malloc0(table_size);
    dev->msix_pba = g_malloc0(pba_size);
    dev->msix_entry_used = g_malloc0(nentries * sizeof *dev->msix_entry_used);

    msix_mask_all(dev, nentries);

    memory_region_init_io(&dev->msix_table_mmio, OBJECT(dev), &msix_table_mmio_ops, dev,
                          "msix-table", table_size);
    memory_region_add_subregion(table_bar, table_offset, &dev->msix_table_mmio);
    memory_region_init_io(&dev->msix_pba_mmio, OBJECT(dev), &msix_pba_mmio_ops, dev,
                          "msix-pba", pba_size);
    memory_region_add_subregion(pba_bar, pba_offset, &dev->msix_pba_mmio);

    return 0;
}