static void pci_bios_init_bridges(u16 bdf) { u16 vendor_id = pci_config_readw(bdf, PCI_VENDOR_ID); u16 device_id = pci_config_readw(bdf, PCI_DEVICE_ID); if (vendor_id == PCI_VENDOR_ID_INTEL && (device_id == PCI_DEVICE_ID_INTEL_82371SB_0 || device_id == PCI_DEVICE_ID_INTEL_82371AB_0)) { int i, irq; u8 elcr[2]; /* PIIX3/PIIX4 PCI to ISA bridge */ elcr[0] = 0x00; elcr[1] = 0x00; for (i = 0; i < 4; i++) { irq = pci_irqs[i]; /* set to trigger level */ elcr[irq >> 3] |= (1 << (irq & 7)); /* activate irq remapping in PIIX */ pci_config_writeb(bdf, 0x60 + i, irq); } outb(elcr[0], 0x4d0); outb(elcr[1], 0x4d1); dprintf(1, "PIIX3/PIIX4 init: elcr=%02x %02x\n", elcr[0], elcr[1]); }
static void qemu_detect(void) { if (!CONFIG_QEMU_HARDWARE) return; // check northbridge @ 00:00.0 u16 v = pci_config_readw(0, PCI_VENDOR_ID); if (v == 0x0000 || v == 0xffff) return; u16 d = pci_config_readw(0, PCI_DEVICE_ID); u16 sv = pci_config_readw(0, PCI_SUBSYSTEM_VENDOR_ID); u16 sd = pci_config_readw(0, PCI_SUBSYSTEM_ID); if (sv != 0x1af4 || /* Red Hat, Inc */ sd != 0x1100) /* Qemu virtual machine */ return; PlatformRunningOn |= PF_QEMU; switch (d) { case 0x1237: dprintf(1, "Running on QEMU (i440fx)\n"); break; case 0x29c0: dprintf(1, "Running on QEMU (q35)\n"); break; default: dprintf(1, "Running on QEMU (unknown nb: %04x:%04x)\n", v, d); break; } kvm_detect(); }
static void pci_setup_device(int bdf, uint32_t *p_io_base, uint32_t *p_mem_base) { int vendor_id, device_id, class_id, region; vendor_id = pci_config_readw(bdf, PCI_VENDOR_ID); device_id = pci_config_readw(bdf, PCI_DEVICE_ID); class_id = pci_config_readw(bdf, PCI_CLASS_DEVICE); printf("PCI: %02x:%02x:%x class %04x id %04x:%04x\r\n", PCI_BUS(bdf), PCI_SLOT(bdf), PCI_FUNC(bdf), class_id, vendor_id, device_id); for (region = 0; region < PCI_REGION_ROM; region++) { int ofs = PCI_BASE_ADDRESS_0 + region * 4; uint32_t old, mask, val, size, align; uint32_t *p_base; old = pci_config_readl(bdf, ofs); if (old & PCI_BASE_ADDRESS_SPACE_IO) { mask = PCI_BASE_ADDRESS_IO_MASK; p_base = p_io_base; } else { mask = PCI_BASE_ADDRESS_MEM_MASK; p_base = p_mem_base; } pci_config_writel(bdf, ofs, -1); val = pci_config_readl(bdf, ofs); pci_config_writel(bdf, ofs, old); align = size = ~(val & mask) + 1; if (val != 0) { uint32_t addr = *p_base; addr = (addr + align - 1) & ~(align - 1); *p_base = addr + size; pci_config_writel(bdf, ofs, addr); printf("PCI: region %d: %08x\r\n", region, addr); if ((val & PCI_BASE_ADDRESS_MEM_TYPE_MASK) == PCI_BASE_ADDRESS_MEM_TYPE_64) { pci_config_writel(bdf, ofs + 4, 0); region++; } } } pci_config_maskw(bdf, PCI_COMMAND, 0, PCI_COMMAND_IO | PCI_COMMAND_MEMORY); /* Map the interrupt. */ }
int pci_next(int bdf, int *pmax) { int max; if (PCI_FUNC(bdf) == 1) { /* If the last device was not a multi-function device, skip to next. */ if ((pci_config_readb(bdf-1, PCI_HEADER_TYPE) & 0x80) == 0) bdf += 7; } max = *pmax; while (1) { uint16_t vendor; /* ??? Support multiple PCI busses here at some point. */ if (bdf >= max) return -1; /* Check if there is a device present at the location. */ vendor = pci_config_readw(bdf, PCI_VENDOR_ID); if (vendor != 0x0000 && vendor != 0xffff) return bdf; bdf += (PCI_FUNC(bdf) == 0 ? 8 : 1); } }
void pci_config_maskw(int bdf, int addr, uint16_t off, uint16_t on) { uint16_t val = pci_config_readw(bdf, addr); val = (val & ~off) | on; pci_config_writew(bdf, addr, val); }
int bochsvga_setup(void) { int ret = stdvga_setup(); if (ret) return ret; /* Sanity checks */ dispi_write(VBE_DISPI_INDEX_ID, VBE_DISPI_ID0); if (dispi_read(VBE_DISPI_INDEX_ID) != VBE_DISPI_ID0) { dprintf(1, "No VBE DISPI interface detected, falling back to stdvga\n"); return 0; } dispi_write(VBE_DISPI_INDEX_ID, VBE_DISPI_ID5); SET_VGA(dispi_found, 1); if (GET_GLOBAL(HaveRunInit)) return 0; u32 lfb_addr = VBE_DISPI_LFB_PHYSICAL_ADDRESS; int bdf = GET_GLOBAL(VgaBDF); if (CONFIG_VGA_PCI && bdf >= 0) { u16 vendor = pci_config_readw(bdf, PCI_VENDOR_ID); int barid; switch (vendor) { case 0x15ad: /* qemu vmware vga */ barid = 1; break; case 0x1af4: /* virtio-vga */ barid = 2; break; default: /* stdvga, qxl */ barid = 0; break; } u32 bar = pci_config_readl(bdf, PCI_BASE_ADDRESS_0 + barid * 4); lfb_addr = bar & PCI_BASE_ADDRESS_MEM_MASK; dprintf(1, "VBE DISPI: bdf %02x:%02x.%x, bar %d\n", pci_bdf_to_bus(bdf) , pci_bdf_to_dev(bdf), pci_bdf_to_fn(bdf), barid); } SET_VGA(VBE_framebuffer, lfb_addr); u32 totalmem = dispi_read(VBE_DISPI_INDEX_VIDEO_MEMORY_64K) * 64 * 1024; SET_VGA(VBE_total_memory, totalmem); SET_VGA(VBE_win_granularity, 64); SET_VGA(VBE_capabilities, VBE_CAPABILITY_8BIT_DAC); dprintf(1, "VBE DISPI: lfb_addr=%x, size %d MB\n", lfb_addr, totalmem >> 20); // Validate modes u16 en = dispi_read(VBE_DISPI_INDEX_ENABLE); dispi_write(VBE_DISPI_INDEX_ENABLE, en | VBE_DISPI_GETCAPS); u16 max_xres = dispi_read(VBE_DISPI_INDEX_XRES); u16 max_bpp = dispi_read(VBE_DISPI_INDEX_BPP); dispi_write(VBE_DISPI_INDEX_ENABLE, en); struct bochsvga_mode *m = bochsvga_modes; for (; m < &bochsvga_modes[ARRAY_SIZE(bochsvga_modes)]; m++) { u16 width = GET_GLOBAL(m->info.width); u16 height = GET_GLOBAL(m->info.height); u8 depth = GET_GLOBAL(m->info.depth); u32 mem = (height * DIV_ROUND_UP(width * vga_bpp(&m->info), 8) * stdvga_vram_ratio(&m->info)); if (width > max_xres || depth > max_bpp || mem > totalmem) { dprintf(1, "Removing mode %x\n", GET_GLOBAL(m->mode)); SET_VGA(m->mode, 0xffff); } } return 0; }