static int amd_fam14h_disable(cstate_t *state, unsigned int cpu) { int enable_bit, pci_offset, ret; uint32_t val; ret = amd_fam14h_get_pci_info(state, &pci_offset, &enable_bit, cpu); if (ret) return ret; val = pci_read_long(amd_fam14h_pci_dev, pci_offset); dprint("%s: offset: 0x%x %u\n", state->name, pci_offset, val); if (state->id == NBP1) { nbp1_entered = (val & (1 << PCI_NBP1_ACTIVE_BIT)) | (val & (1 << PCI_NBP1_ENTERED_BIT)); dprint("NBP1 was %sentered - 0x%x - enable_bit: " "%d - pci_offset: 0x%x\n", nbp1_entered ? "" : "not ", val, enable_bit, pci_offset); return ret; } current_count[state->id][cpu] = val; dprint("%s: Current - %llu (%u)\n", state->name, current_count[state->id][cpu], cpu); dprint("%s: Previous - %llu (%u)\n", state->name, previous_count[state->id][cpu], cpu); val = pci_read_long(amd_fam14h_pci_dev, PCI_MONITOR_ENABLE_REG); val &= ~(1 << enable_bit); pci_write_long(amd_fam14h_pci_dev, PCI_MONITOR_ENABLE_REG, val); return 0; }
static int amd_fam14h_init(cstate_t *state, unsigned int cpu) { int enable_bit, pci_offset, ret; uint32_t val; ret = amd_fam14h_get_pci_info(state, &pci_offset, &enable_bit, cpu); if (ret) return ret; if (state->id == NBP1) { val = pci_read_long(amd_fam14h_pci_dev, pci_offset); val |= 1 << enable_bit; val = pci_write_long(amd_fam14h_pci_dev, pci_offset, val); return ret; } val = pci_read_long(amd_fam14h_pci_dev, PCI_MONITOR_ENABLE_REG); dprint("Init %s: read at offset: 0x%x val: %u\n", state->name, PCI_MONITOR_ENABLE_REG, (unsigned int) val); val |= 1 << enable_bit; pci_write_long(amd_fam14h_pci_dev, PCI_MONITOR_ENABLE_REG, val); dprint("Init %s: offset: 0x%x enable_bit: %d - val: %u (%u)\n", state->name, PCI_MONITOR_ENABLE_REG, enable_bit, (unsigned int) val, cpu); pci_write_long(amd_fam14h_pci_dev, pci_offset, 0); previous_count[state->id][cpu] = 0; return 0; }
static int amd_fam14h_init(cstate_t *state, unsigned int cpu) { int enable_bit, pci_offset, ret; uint32_t val; ret = amd_fam14h_get_pci_info(state, &pci_offset, &enable_bit, cpu); if (ret) return ret; /* NBP1 needs extra treating -> write 1 to D18F6x98 bit 1 for init */ if (state->id == NBP1) { val = pci_read_long(amd_fam14h_pci_dev, pci_offset); val |= 1 << enable_bit; val = pci_write_long(amd_fam14h_pci_dev, pci_offset, val); return ret; } /* Enable monitor */ val = pci_read_long(amd_fam14h_pci_dev, PCI_MONITOR_ENABLE_REG); dprint("Init %s: read at offset: 0x%x val: %u\n", state->name, PCI_MONITOR_ENABLE_REG, (unsigned int) val); val |= 1 << enable_bit; pci_write_long(amd_fam14h_pci_dev, PCI_MONITOR_ENABLE_REG, val); dprint("Init %s: offset: 0x%x enable_bit: %d - val: %u (%u)\n", state->name, PCI_MONITOR_ENABLE_REG, enable_bit, (unsigned int) val, cpu); /* Set counter to zero */ pci_write_long(amd_fam14h_pci_dev, pci_offset, 0); previous_count[state->id][cpu] = 0; return 0; }
/* * MCH-ICH Serial Interconnect Ingress Root Complex MMIO configuration space */ int print_dmibar(struct pci_dev *nb) { int i, size = (4 * 1024); volatile uint8_t *dmibar; uint64_t dmibar_phys; printf("\n============= DMIBAR ============\n\n"); switch (nb->device_id) { case PCI_DEVICE_ID_INTEL_82915: case PCI_DEVICE_ID_INTEL_82945GM: case PCI_DEVICE_ID_INTEL_82945GSE: case PCI_DEVICE_ID_INTEL_82945P: case PCI_DEVICE_ID_INTEL_82975X: dmibar_phys = pci_read_long(nb, 0x4c) & 0xfffffffe; break; case PCI_DEVICE_ID_INTEL_PM965: case PCI_DEVICE_ID_INTEL_Q965: case PCI_DEVICE_ID_INTEL_82Q35: case PCI_DEVICE_ID_INTEL_82G33: case PCI_DEVICE_ID_INTEL_82Q33: case PCI_DEVICE_ID_INTEL_GS45: case PCI_DEVICE_ID_INTEL_ATOM_DXXX: case PCI_DEVICE_ID_INTEL_ATOM_NXXX: dmibar_phys = pci_read_long(nb, 0x68) & 0xfffffffe; dmibar_phys |= ((uint64_t)pci_read_long(nb, 0x6c)) << 32; break; case PCI_DEVICE_ID_INTEL_82810: case PCI_DEVICE_ID_INTEL_82810DC: case PCI_DEVICE_ID_INTEL_82810E_MC: case PCI_DEVICE_ID_INTEL_82865: printf("This northbridge does not have DMIBAR.\n"); return 1; case PCI_DEVICE_ID_INTEL_X58: dmibar_phys = pci_read_long(nb, 0x50) & 0xfffff000; break; default: printf("Error: Dumping DMIBAR on this northbridge is not (yet) supported.\n"); return 1; } dmibar = map_physical(dmibar_phys, size); if (dmibar == NULL) { perror("Error mapping DMIBAR"); exit(1); } printf("DMIBAR = 0x%08llx (MEM)\n\n", dmibar_phys); for (i = 0; i < size; i += 4) { if (*(uint32_t *)(dmibar + i)) printf("0x%04x: 0x%08x\n", i, *(uint32_t *)(dmibar+i)); } unmap_physical((void *)dmibar, size); return 0; }
int try_ich(struct pci_access *pci, uint16_t reg_gpiobase, uint16_t reg_gc, const char *desc, int *fatal) { MSG("Checking for a %s system", desc); struct pci_dev *d31f0 = pci_find_dev(pci, 0, 31, 0); uint32_t gpiobase = pci_read_long(d31f0, reg_gpiobase); uint8_t gc = pci_read_byte(d31f0, reg_gc); MSG("GPIOBASE=%08x, GC=%02x", gpiobase, gc); if(gpiobase == 0xffffffff) { *fatal = 1; ERR("Cannot read GPIOBASE, are you running me as root?"); } else if(gpiobase == 0) { ERR("GPIOBASE not implemented at %04x", reg_gpiobase); } else if(!(gpiobase & 1)) { *fatal = 1; ERR("GPIOBASE is not an I/O BAR"); } if(!(gpiobase & 0xfffc)) { const uint32_t DEFAULT_GPIOBASE = 0x0480; MSG("GPIOBASE is not configured, setting to %08x and hoping this works", DEFAULT_GPIOBASE); pci_write_long(d31f0, reg_gpiobase, DEFAULT_GPIOBASE); gpiobase = pci_read_long(d31f0, reg_gpiobase); if((gpiobase & 0xfffc) != DEFAULT_GPIOBASE) { ERR("Cannot set GPIOBASE"); } } MSG("GPIO decoding is %s", (gc & REG_ICHx_GC_EN) ? "enabled" : "disabled"); MSG("GPIO lockdown is %s", (gc & REG_ICHx_GC_GLE) ? "enabled" : "disabled"); if(!(gc & REG_ICHx_GC_EN)) { MSG("Enabling GPIO decoding"); pci_write_byte(d31f0, reg_gc, gc | REG_ICHx_GC_EN); gc = pci_read_byte(d31f0, reg_gc); if(!(gc & REG_ICHx_GC_EN)) { ERR("Cannot enable GPIO decoding"); } } gpiobase &= 0xfffc; if(ioperm(gpiobase, 128, 1) == -1) { ERR("Cannot access I/O ports %04x:%04x", gpiobase, gpiobase + 128); } for(int n = 1; n < 3; n++) { MSG("USE_SEL%d=%08x", n, inl(gpiobase + ichx_regs[GPIO_USE_SEL][n])); MSG("IO_SEL%d=%08x", n, inl(gpiobase + ichx_regs[GPIO_IO_SEL][n])); MSG("LVL%d=%08x", n, inl(gpiobase + ichx_regs[GPIO_LVL][n])); } return 0; }
int print_ambs(struct pci_dev *dev, struct pci_access *pacc) { struct pci_dev *dev16; int branch, channel, amb; int max_branch, max_channel, max_amb; volatile void *ambconfig; uint64_t ambconfig_phys; printf("\n============= AMBs ============\n\n"); switch (dev->device_id) { case PCI_DEVICE_ID_INTEL_I5000P: case PCI_DEVICE_ID_INTEL_I5000X: case PCI_DEVICE_ID_INTEL_I5000Z: max_branch = 2; if (!(dev16 = pci_get_dev(pacc, 0, 0, 0x10, 0))) { perror("Error: no device 0:16.0\n"); return 1; } ambconfig_phys = ((uint64_t)pci_read_long(dev16, 0x4c) << 32) | pci_read_long(dev16, 0x48); max_channel = pci_read_byte(dev16, 0x56)/max_branch; max_amb = pci_read_byte(dev16, 0x57); pci_free_dev(dev16); break; default: fprintf(stderr, "Error: Dumping AMBs on this MCH is not (yet) supported.\n"); return 1; } if (!(ambconfig = map_physical(ambconfig_phys, AMB_CONFIG_SPACE_SIZE))) { fprintf(stderr, "Error mapping AMB config space\n"); return 1; } for(branch = 0; branch < max_branch; branch++) { for(channel = 0; channel < max_channel; channel++) { for(amb = 0; amb < max_amb; amb++) { dump_amb(ambconfig, branch, channel, amb); } } } unmap_physical((void *)ambconfig, AMB_CONFIG_SPACE_SIZE); return 0; }
int get_pch_sbreg_addr(struct pci_access *pci, pciaddr_t *sbreg_addr) { MSG("Checking for a Series 10 PCH system"); struct pci_dev *d31f1 = pci_get_dev(pci, 0, 0, 31, 1); pci_fill_info(d31f1, PCI_FILL_IDENT); if(d31f1->vendor_id == 0xffff) { MSG("Cannot find D31:F1, assuming it is hidden by firmware"); uint32_t p2sb_ctrl = pci_read_long(d31f1, REG_P2SB_CTRL); MSG("P2SB_CTRL=%02x", p2sb_ctrl); if(!(p2sb_ctrl & REG_P2SB_CTRL_HIDE)) { ERR("D31:F1 is hidden but P2SB_E1 is not 0xff, bailing out"); } MSG("Unhiding P2SB"); pci_write_long(d31f1, REG_P2SB_CTRL, p2sb_ctrl & ~REG_P2SB_CTRL_HIDE); p2sb_ctrl = pci_read_long(d31f1, REG_P2SB_CTRL); MSG("P2SB_CTRL=%02x", p2sb_ctrl); if(p2sb_ctrl & REG_P2SB_CTRL_HIDE) { ERR("Cannot unhide PS2B"); } pci_fill_info(d31f1, PCI_FILL_RESCAN | PCI_FILL_IDENT); if(d31f1->vendor_id == 0xffff) { ERR("P2SB unhidden but does not enumerate, bailing out"); } } pci_fill_info(d31f1, PCI_FILL_RESCAN | PCI_FILL_IDENT | PCI_FILL_BASES); if(d31f1->vendor_id != 0x8086) { ERR("Vendor of D31:F1 is not Intel"); } else if((uint32_t)d31f1->base_addr[0] == 0xffffffff) { ERR("SBREG_BAR is not implemented in D31:F1"); } *sbreg_addr = d31f1->base_addr[0] &~ 0xf; MSG("SBREG_ADDR=%08lx", *sbreg_addr); MSG("Hiding P2SB again"); uint32_t p2sb_ctrl = pci_read_long(d31f1, REG_P2SB_CTRL); pci_write_long(d31f1, REG_P2SB_CTRL, p2sb_ctrl | REG_P2SB_CTRL_HIDE); pci_fill_info(d31f1, PCI_FILL_RESCAN | PCI_FILL_IDENT); if(d31f1->vendor_id != 0xffff) { ERR("Cannot hide P2SB"); } return 0; }
static void pci_scan_ext_caps(struct pci_dev *d) { byte been_there[0x1000]; int where = 0x100; if (!pci_find_cap(d, PCI_CAP_ID_EXP, PCI_CAP_NORMAL)) return; memset(been_there, 0, 0x1000); do { u32 header; int id; header = pci_read_long(d, where); if (!header || header == 0xffffffff) break; id = header & 0xffff; if (been_there[where]++) break; pci_add_cap(d, where, id, PCI_CAP_EXTENDED); where = header >> 20; } while (where); }
int gfxnvidia_init(void) { uint32_t reg32; get_io_perms(); io_base_addr = pcidev_init(PCI_VENDOR_ID_NVIDIA, PCI_BASE_ADDRESS_0, gfx_nvidia); io_base_addr += 0x300000; msg_pinfo("Detected NVIDIA I/O base address: 0x%x.\n", io_base_addr); /* Allow access to flash interface (will disable screen). */ reg32 = pci_read_long(pcidev_dev, 0x50); reg32 &= ~(1 << 0); rpci_write_long(pcidev_dev, 0x50, reg32); nvidia_bar = physmap("NVIDIA", io_base_addr, 16 * 1024 * 1024); buses_supported = CHIP_BUSTYPE_PARALLEL; /* Write/erase doesn't work. */ programmer_may_write = 0; return 0; }
int atahpt_init(void) { struct pci_dev *dev = NULL; uint32_t reg32; if (rget_io_perms()) return 1; dev = pcidev_init(ata_hpt, PCI_BASE_ADDRESS_4); if (!dev) return 1; io_base_addr = pcidev_readbar(dev, PCI_BASE_ADDRESS_4); if (!io_base_addr) return 1; /* Enable flash access. */ reg32 = pci_read_long(dev, REG_FLASH_ACCESS); reg32 |= (1 << 24); rpci_write_long(dev, REG_FLASH_ACCESS, reg32); register_par_master(&par_master_atahpt, BUS_PARALLEL); return 0; }
int gfxnvidia_init(void) { uint32_t reg32; if (rget_io_perms()) return 1; io_base_addr = pcidev_init(PCI_BASE_ADDRESS_0, gfx_nvidia); io_base_addr += 0x300000; msg_pinfo("Detected NVIDIA I/O base address: 0x%x.\n", io_base_addr); nvidia_bar = physmap("NVIDIA", io_base_addr, GFXNVIDIA_MEMMAP_SIZE); /* Must be done before rpci calls. */ if (register_shutdown(gfxnvidia_shutdown, NULL)) return 1; /* Allow access to flash interface (will disable screen). */ reg32 = pci_read_long(pcidev_dev, 0x50); reg32 &= ~(1 << 0); rpci_write_long(pcidev_dev, 0x50, reg32); /* Write/erase doesn't work. */ programmer_may_write = 0; register_par_programmer(&par_programmer_gfxnvidia, BUS_PARALLEL); return 0; }
static void exec_op(struct op *op, struct pci_dev *dev) { char *mm[] = { NULL, "%02x", "%04x", NULL, "%08x" }; char *m = mm[op->width]; unsigned int x; int i, addr; if (verbose) printf("%02x:%02x.%x:%02x", dev->bus, dev->dev, dev->func, op->addr); addr = op->addr; if (op->num_values >= 0) for(i=0; i<op->num_values; i++) { if (verbose) { putchar(' '); printf(m, op->values[i]); } if (demo_mode) continue; switch (op->width) { case 1: pci_write_byte(dev, addr, op->values[i]); break; case 2: pci_write_word(dev, addr, op->values[i]); break; default: pci_write_long(dev, addr, op->values[i]); break; } addr += op->width; } else { if (verbose) printf(" = "); if (!demo_mode) { switch (op->width) { case 1: x = pci_read_byte(dev, addr); break; case 2: x = pci_read_word(dev, addr); break; default: x = pci_read_long(dev, addr); break; } printf(m, x); } else putchar('?'); } putchar('\n'); }
void pdev_flr(struct pci_dev *pci_dev) { int pos; int dev_cap; int dev_status; pos = find_cap_offset(pci_dev, PCI_CAP_ID_EXP); if ( pos ) { dev_cap = pci_read_long(pci_dev, pos + PCI_EXP_DEVCAP); if ( dev_cap & PCI_EXP_DEVCAP_FLR ) { pci_write_word(pci_dev, pos + PCI_EXP_DEVCTL, PCI_EXP_DEVCTL_FLR); do { dev_status = pci_read_long(pci_dev, pos + PCI_EXP_DEVSTA); } while (dev_status & PCI_EXP_DEVSTA_TRPND); } } }
void pci_generic_scan_bus(struct pci_access *a, byte *busmap, int bus) { int dev, multi, ht; struct pci_dev *t; a->debug("Scanning bus %02x for devices...\n", bus); if (busmap[bus]) { a->warning("Bus %02x seen twice (firmware bug). Ignored.", bus); return; } busmap[bus] = 1; t = pci_alloc_dev(a); t->bus = bus; for (dev=0; dev<32; dev++) { t->dev = dev; multi = 0; for (t->func=0; !t->func || multi && t->func<8; t->func++) { u32 vd = pci_read_long(t, PCI_VENDOR_ID); struct pci_dev *d; if (!vd || vd == 0xffffffff) continue; ht = pci_read_byte(t, PCI_HEADER_TYPE); if (!t->func) multi = ht & 0x80; ht &= 0x7f; d = pci_alloc_dev(a); d->bus = t->bus; d->dev = t->dev; d->func = t->func; d->vendor_id = vd & 0xffff; d->device_id = vd >> 16U; d->known_fields = PCI_FILL_IDENT; d->hdrtype = ht; pci_link_dev(a, d); switch (ht) { case PCI_HEADER_TYPE_NORMAL: break; case PCI_HEADER_TYPE_BRIDGE: case PCI_HEADER_TYPE_CARDBUS: pci_generic_scan_bus(a, busmap, pci_read_byte(t, PCI_SECONDARY_BUS)); break; default: a->debug("Device %04x:%02x:%02x.%d has unknown header type %02x.\n", d->domain, d->bus, d->dev, d->func, ht); } } } pci_free_dev(t); }
static size_t get_bar_size(struct pci_access* a, struct pci_dev* d, size_t i) { const enum pci_access_type saved_method = a->method; const int addr = PCI_BASE_ADDRESS_0 + (int)i * 4; uint32_t size; uint32_t saved_long; a->method = PCI_ACCESS_I386_TYPE1; saved_long = pci_read_long(d, addr); pci_write_long(d, addr, (uint32_t)-1); size = pci_read_long(d, addr); size &= ~PCI_ADDR_FLAG_MASK; size = ~size + 1; pci_write_long(d, addr, saved_long); a->method = saved_method; return size; }
uintptr_t pcidev_readbar(struct pci_dev *dev, int bar) { uint64_t addr; uint32_t upperaddr; uint8_t headertype; uint16_t supported_cycles; enum pci_bartype bartype = TYPE_UNKNOWN; headertype = pci_read_byte(dev, PCI_HEADER_TYPE) & 0x7f; msg_pspew("PCI header type 0x%02x\n", headertype); /* Don't use dev->base_addr[x] (as value for 'bar'), won't work on older libpci. */ addr = pci_read_long(dev, bar); /* Sanity checks. */ switch (headertype) { case PCI_HEADER_TYPE_NORMAL: switch (bar) { case PCI_BASE_ADDRESS_0: case PCI_BASE_ADDRESS_1: case PCI_BASE_ADDRESS_2: case PCI_BASE_ADDRESS_3: case PCI_BASE_ADDRESS_4: case PCI_BASE_ADDRESS_5: if ((addr & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_IO) bartype = TYPE_IOBAR; else bartype = TYPE_MEMBAR; break; case PCI_ROM_ADDRESS: bartype = TYPE_ROMBAR; break; } break; case PCI_HEADER_TYPE_BRIDGE: switch (bar) { case PCI_BASE_ADDRESS_0: case PCI_BASE_ADDRESS_1: if ((addr & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_IO) bartype = TYPE_IOBAR; else bartype = TYPE_MEMBAR; break; case PCI_ROM_ADDRESS1: bartype = TYPE_ROMBAR; break; } break; case PCI_HEADER_TYPE_CARDBUS: break; default: msg_perr("Unknown PCI header type 0x%02x, BAR type cannot be determined reliably.\n", headertype); break; } supported_cycles = pci_read_word(dev, PCI_COMMAND); msg_pdbg("Requested BAR is of type "); switch (bartype) { case TYPE_MEMBAR: msg_pdbg("MEM"); if (!(supported_cycles & PCI_COMMAND_MEMORY)) { msg_perr("MEM BAR access requested, but device has MEM space accesses disabled.\n"); /* TODO: Abort here? */ } msg_pdbg(", %sbit, %sprefetchable\n", ((addr & 0x6) == 0x0) ? "32" : (((addr & 0x6) == 0x4) ? "64" : "reserved"), (addr & 0x8) ? "" : "not "); if ((addr & 0x6) == 0x4) { /* The spec says that a 64-bit register consumes * two subsequent dword locations. */ upperaddr = pci_read_long(dev, bar + 4); if (upperaddr != 0x00000000) { /* Fun! A real 64-bit resource. */ if (sizeof(uintptr_t) != sizeof(uint64_t)) { msg_perr("BAR unreachable!"); /* TODO: Really abort here? If multiple PCI devices match, * we might never tell the user about the other devices. */ return 0; } addr |= (uint64_t)upperaddr << 32; } } addr &= PCI_BASE_ADDRESS_MEM_MASK; break; case TYPE_IOBAR: msg_pdbg("I/O\n"); #if __FLASHROM_HAVE_OUTB__ if (!(supported_cycles & PCI_COMMAND_IO)) { msg_perr("I/O BAR access requested, but device has I/O space accesses disabled.\n"); /* TODO: Abort here? */ } #else msg_perr("I/O BAR access requested, but flashrom does not support I/O BAR access on this " "platform (yet).\n"); #endif addr &= PCI_BASE_ADDRESS_IO_MASK; break; case TYPE_ROMBAR: msg_pdbg("ROM\n"); /* Not sure if this check is needed. */ if (!(supported_cycles & PCI_COMMAND_MEMORY)) { msg_perr("MEM BAR access requested, but device has MEM space accesses disabled.\n"); /* TODO: Abort here? */ } addr &= PCI_ROM_ADDRESS_MASK; break; case TYPE_UNKNOWN: msg_perr("BAR type unknown, please report a bug at [email protected]\n"); } return (uintptr_t)addr; }
/* * PCIe MMIO configuration space */ int print_pciexbar(struct pci_dev *nb) { uint64_t pciexbar_reg; uint64_t pciexbar_phys; volatile uint8_t *pciexbar; int max_busses, devbase, i; int bus, dev, fn; printf("========= PCIEXBAR ========\n\n"); switch (nb->device_id) { case PCI_DEVICE_ID_INTEL_82915: case PCI_DEVICE_ID_INTEL_82945GM: case PCI_DEVICE_ID_INTEL_82945GSE: case PCI_DEVICE_ID_INTEL_82945P: case PCI_DEVICE_ID_INTEL_82975X: pciexbar_reg = pci_read_long(nb, 0x48); break; case PCI_DEVICE_ID_INTEL_82946: case PCI_DEVICE_ID_INTEL_82965PM: case PCI_DEVICE_ID_INTEL_82Q965: case PCI_DEVICE_ID_INTEL_82Q35: case PCI_DEVICE_ID_INTEL_82G33: case PCI_DEVICE_ID_INTEL_82Q33: case PCI_DEVICE_ID_INTEL_82X38: case PCI_DEVICE_ID_INTEL_32X0: case PCI_DEVICE_ID_INTEL_82XX4X: case PCI_DEVICE_ID_INTEL_82Q45: case PCI_DEVICE_ID_INTEL_82G45: case PCI_DEVICE_ID_INTEL_82G41: case PCI_DEVICE_ID_INTEL_82B43: case PCI_DEVICE_ID_INTEL_82B43_2: case PCI_DEVICE_ID_INTEL_ATOM_DXXX: case PCI_DEVICE_ID_INTEL_ATOM_NXXX: case PCI_DEVICE_ID_INTEL_CORE_2ND_GEN_D: case PCI_DEVICE_ID_INTEL_CORE_2ND_GEN_M: case PCI_DEVICE_ID_INTEL_CORE_2ND_GEN_E3: case PCI_DEVICE_ID_INTEL_CORE_3RD_GEN_D: case PCI_DEVICE_ID_INTEL_CORE_3RD_GEN_M: case PCI_DEVICE_ID_INTEL_CORE_3RD_GEN_E3: case PCI_DEVICE_ID_INTEL_CORE_3RD_GEN_015c: case PCI_DEVICE_ID_INTEL_CORE_4TH_GEN_D: case PCI_DEVICE_ID_INTEL_CORE_4TH_GEN_M: case PCI_DEVICE_ID_INTEL_CORE_4TH_GEN_E3: case PCI_DEVICE_ID_INTEL_CORE_4TH_GEN_U: case PCI_DEVICE_ID_INTEL_CORE_5TH_GEN_U: case PCI_DEVICE_ID_INTEL_CORE_6TH_GEN_D2: case PCI_DEVICE_ID_INTEL_CORE_6TH_GEN_M: case PCI_DEVICE_ID_INTEL_CORE_6TH_GEN_WST: case PCI_DEVICE_ID_INTEL_CORE_6TH_GEN_D: case PCI_DEVICE_ID_INTEL_CORE_7TH_GEN_U: case PCI_DEVICE_ID_INTEL_CORE_7TH_GEN_Y: case PCI_DEVICE_ID_INTEL_CORE_7TH_GEN_U_Q: pciexbar_reg = pci_read_long(nb, 0x60); pciexbar_reg |= ((uint64_t)pci_read_long(nb, 0x64)) << 32; break; case PCI_DEVICE_ID_INTEL_82810: case PCI_DEVICE_ID_INTEL_82810_DC: case PCI_DEVICE_ID_INTEL_82810E_DC: case PCI_DEVICE_ID_INTEL_82865: printf("Error: This northbridge does not have PCIEXBAR.\n"); return 1; default: printf("Error: Dumping PCIEXBAR on this northbridge is not (yet) supported.\n"); return 1; } if (!(pciexbar_reg & (1 << 0))) { printf("PCIEXBAR register is disabled.\n"); return 0; } switch ((pciexbar_reg >> 1) & 3) { case 0: // 256MB pciexbar_phys = pciexbar_reg & (0xffULL << 28); max_busses = 256; break; case 1: // 128M pciexbar_phys = pciexbar_reg & (0x1ffULL << 27); max_busses = 128; break; case 2: // 64M pciexbar_phys = pciexbar_reg & (0x3ffULL << 26); max_busses = 64; break; default: // RSVD printf("Undefined address base. Bailing out.\n"); return 1; } printf("PCIEXBAR: 0x%08" PRIx64 "\n", pciexbar_phys); pciexbar = map_physical(pciexbar_phys, (max_busses * 1024 * 1024)); if (pciexbar == NULL) { perror("Error mapping PCIEXBAR"); exit(1); } for (bus = 0; bus < max_busses; bus++) { for (dev = 0; dev < 32; dev++) { for (fn = 0; fn < 8; fn++) { devbase = (bus * 1024 * 1024) + (dev * 32 * 1024) + (fn * 4 * 1024); if (*(uint16_t *)(pciexbar + devbase) == 0xffff) continue; /* This is a heuristics. Anyone got a better check? */ if( (*(uint32_t *)(pciexbar + devbase + 256) == 0xffffffff) && (*(uint32_t *)(pciexbar + devbase + 512) == 0xffffffff) ) { #if DEBUG printf("Skipped non-PCIe device %02x:%02x.%01x\n", bus, dev, fn); #endif continue; } printf("\nPCIe %02x:%02x.%01x extended config space:", bus, dev, fn); for (i = 0; i < 4096; i++) { if((i % 0x10) == 0) printf("\n%04x:", i); printf(" %02x", *(pciexbar+devbase+i)); } printf("\n"); } } } unmap_physical((void *)pciexbar, (max_busses * 1024 * 1024)); return 0; }
/* * MCH-ICH Serial Interconnect Ingress Root Complex MMIO configuration space */ int print_dmibar(struct pci_dev *nb) { int i, size = (4 * 1024); volatile uint8_t *dmibar; uint64_t dmibar_phys; const io_register_t *dmi_registers = NULL; printf("\n============= DMIBAR ============\n\n"); switch (nb->device_id) { case PCI_DEVICE_ID_INTEL_82915: case PCI_DEVICE_ID_INTEL_82945GM: case PCI_DEVICE_ID_INTEL_82945GSE: case PCI_DEVICE_ID_INTEL_82945P: case PCI_DEVICE_ID_INTEL_82975X: dmibar_phys = pci_read_long(nb, 0x4c) & 0xfffffffe; break; case PCI_DEVICE_ID_INTEL_82946: case PCI_DEVICE_ID_INTEL_82965PM: case PCI_DEVICE_ID_INTEL_82Q965: case PCI_DEVICE_ID_INTEL_82Q35: case PCI_DEVICE_ID_INTEL_82G33: case PCI_DEVICE_ID_INTEL_82Q33: case PCI_DEVICE_ID_INTEL_82X38: case PCI_DEVICE_ID_INTEL_32X0: case PCI_DEVICE_ID_INTEL_82XX4X: case PCI_DEVICE_ID_INTEL_82Q45: case PCI_DEVICE_ID_INTEL_82G45: case PCI_DEVICE_ID_INTEL_82G41: case PCI_DEVICE_ID_INTEL_82B43: case PCI_DEVICE_ID_INTEL_82B43_2: case PCI_DEVICE_ID_INTEL_ATOM_DXXX: case PCI_DEVICE_ID_INTEL_ATOM_NXXX: dmibar_phys = pci_read_long(nb, 0x68) & 0xfffffffe; dmibar_phys |= ((uint64_t)pci_read_long(nb, 0x6c)) << 32; break; case PCI_DEVICE_ID_INTEL_82810: case PCI_DEVICE_ID_INTEL_82810_DC: case PCI_DEVICE_ID_INTEL_82810E_DC: case PCI_DEVICE_ID_INTEL_82865: printf("This northbridge does not have DMIBAR.\n"); return 1; case PCI_DEVICE_ID_INTEL_82X58: dmibar_phys = pci_read_long(nb, 0x50) & 0xfffff000; break; case PCI_DEVICE_ID_INTEL_CORE_0TH_GEN: /* DMIBAR is called DMIRCBAR in Nehalem */ dmibar_phys = pci_read_long(nb, 0x50) & 0xfffff000; /* 31:12 */ dmi_registers = nehalem_dmi_registers; size = ARRAY_SIZE(nehalem_dmi_registers); break; case PCI_DEVICE_ID_INTEL_CORE_1ST_GEN: dmibar_phys = pci_read_long(nb, 0x68); dmibar_phys |= ((uint64_t)pci_read_long(nb, 0x6c)) << 32; dmibar_phys &= 0x0000000ffffff000UL; /* 35:12 */ dmi_registers = westmere_dmi_registers; size = ARRAY_SIZE(westmere_dmi_registers); break; case PCI_DEVICE_ID_INTEL_CORE_2ND_GEN_D: case PCI_DEVICE_ID_INTEL_CORE_2ND_GEN_M: case PCI_DEVICE_ID_INTEL_CORE_2ND_GEN_E3: dmi_registers = sandybridge_dmi_registers; size = ARRAY_SIZE(sandybridge_dmi_registers); /* fall through */ case PCI_DEVICE_ID_INTEL_CORE_3RD_GEN_D: /* pretty printing not implemented yet */ case PCI_DEVICE_ID_INTEL_CORE_3RD_GEN_M: case PCI_DEVICE_ID_INTEL_CORE_3RD_GEN_E3: case PCI_DEVICE_ID_INTEL_CORE_3RD_GEN_015c: case PCI_DEVICE_ID_INTEL_CORE_4TH_GEN_D: case PCI_DEVICE_ID_INTEL_CORE_4TH_GEN_M: case PCI_DEVICE_ID_INTEL_CORE_4TH_GEN_E3: dmibar_phys = pci_read_long(nb, 0x68); dmibar_phys |= ((uint64_t)pci_read_long(nb, 0x6c)) << 32; dmibar_phys &= 0x0000007ffffff000UL; /* 38:12 */ break; case PCI_DEVICE_ID_INTEL_CORE_4TH_GEN_U: case PCI_DEVICE_ID_INTEL_CORE_5TH_GEN_U: dmi_registers = haswell_ult_dmi_registers; size = ARRAY_SIZE(haswell_ult_dmi_registers); dmibar_phys = pci_read_long(nb, 0x68); dmibar_phys |= ((uint64_t)pci_read_long(nb, 0x6c)) << 32; dmibar_phys &= 0x0000007ffffff000UL; /* 38:12 */ break; case PCI_DEVICE_ID_INTEL_CORE_6TH_GEN_D2: case PCI_DEVICE_ID_INTEL_CORE_6TH_GEN_M: case PCI_DEVICE_ID_INTEL_CORE_6TH_GEN_WST: case PCI_DEVICE_ID_INTEL_CORE_6TH_GEN_D: case PCI_DEVICE_ID_INTEL_CORE_7TH_GEN_U: case PCI_DEVICE_ID_INTEL_CORE_7TH_GEN_Y: case PCI_DEVICE_ID_INTEL_CORE_7TH_GEN_U_Q: dmi_registers = skylake_dmi_registers; size = ARRAY_SIZE(skylake_dmi_registers); dmibar_phys = pci_read_long(nb, 0x68); dmibar_phys |= ((uint64_t)pci_read_long(nb, 0x6c)) << 32; dmibar_phys &= 0x0000007ffffff000UL; /* 38:12 */ break; default: printf("Error: Dumping DMIBAR on this northbridge is not (yet) supported.\n"); return 1; } dmibar = map_physical(dmibar_phys, size); if (dmibar == NULL) { perror("Error mapping DMIBAR"); exit(1); } printf("DMIBAR = 0x%08" PRIx64 " (MEM)\n\n", dmibar_phys); if (dmi_registers != NULL) { for (i = 0; i < size; i++) { switch (dmi_registers[i].size) { case 4: printf("dmibase+0x%04x: 0x%08x (%s)\n", dmi_registers[i].addr, *(uint32_t *)(dmibar+dmi_registers[i].addr), dmi_registers[i].name); break; case 2: printf("dmibase+0x%04x: 0x%04x (%s)\n", dmi_registers[i].addr, *(uint16_t *)(dmibar+dmi_registers[i].addr), dmi_registers[i].name); break; case 1: printf("dmibase+0x%04x: 0x%02x (%s)\n", dmi_registers[i].addr, *(uint8_t *)(dmibar+dmi_registers[i].addr), dmi_registers[i].name); break; } } } else { for (i = 0; i < size; i += 4) { if (*(uint32_t *)(dmibar + i)) printf("0x%04x: 0x%08x\n", i, *(uint32_t *)(dmibar+i)); } } unmap_physical((void *)dmibar, size); return 0; }
/* * Egress Port Root Complex MMIO configuration space */ int print_epbar(struct pci_dev *nb) { int i, size = (4 * 1024); volatile uint8_t *epbar; uint64_t epbar_phys; printf("\n============= EPBAR =============\n\n"); switch (nb->device_id) { case PCI_DEVICE_ID_INTEL_82915: case PCI_DEVICE_ID_INTEL_82945GM: case PCI_DEVICE_ID_INTEL_82945GSE: case PCI_DEVICE_ID_INTEL_82945P: case PCI_DEVICE_ID_INTEL_82946: case PCI_DEVICE_ID_INTEL_82975X: epbar_phys = pci_read_long(nb, 0x40) & 0xfffffffe; break; case PCI_DEVICE_ID_INTEL_82965PM: case PCI_DEVICE_ID_INTEL_82Q965: case PCI_DEVICE_ID_INTEL_82Q35: case PCI_DEVICE_ID_INTEL_82G33: case PCI_DEVICE_ID_INTEL_82Q33: case PCI_DEVICE_ID_INTEL_82X38: case PCI_DEVICE_ID_INTEL_32X0: case PCI_DEVICE_ID_INTEL_82XX4X: case PCI_DEVICE_ID_INTEL_82Q45: case PCI_DEVICE_ID_INTEL_82G45: case PCI_DEVICE_ID_INTEL_82G41: case PCI_DEVICE_ID_INTEL_82B43: case PCI_DEVICE_ID_INTEL_82B43_2: case PCI_DEVICE_ID_INTEL_ATOM_DXXX: case PCI_DEVICE_ID_INTEL_ATOM_NXXX: case PCI_DEVICE_ID_INTEL_CORE_2ND_GEN_D: case PCI_DEVICE_ID_INTEL_CORE_2ND_GEN_M: case PCI_DEVICE_ID_INTEL_CORE_2ND_GEN_E3: case PCI_DEVICE_ID_INTEL_CORE_3RD_GEN_D: case PCI_DEVICE_ID_INTEL_CORE_3RD_GEN_M: case PCI_DEVICE_ID_INTEL_CORE_3RD_GEN_E3: case PCI_DEVICE_ID_INTEL_CORE_3RD_GEN_015c: case PCI_DEVICE_ID_INTEL_CORE_4TH_GEN_D: case PCI_DEVICE_ID_INTEL_CORE_4TH_GEN_M: case PCI_DEVICE_ID_INTEL_CORE_4TH_GEN_E3: case PCI_DEVICE_ID_INTEL_CORE_4TH_GEN_U: case PCI_DEVICE_ID_INTEL_CORE_5TH_GEN_U: case PCI_DEVICE_ID_INTEL_CORE_6TH_GEN_D2: case PCI_DEVICE_ID_INTEL_CORE_6TH_GEN_M: case PCI_DEVICE_ID_INTEL_CORE_6TH_GEN_WST: case PCI_DEVICE_ID_INTEL_CORE_6TH_GEN_D: case PCI_DEVICE_ID_INTEL_CORE_7TH_GEN_U: case PCI_DEVICE_ID_INTEL_CORE_7TH_GEN_Y: case PCI_DEVICE_ID_INTEL_CORE_7TH_GEN_U_Q: epbar_phys = pci_read_long(nb, 0x40) & 0xfffffffe; epbar_phys |= ((uint64_t)pci_read_long(nb, 0x44)) << 32; break; case PCI_DEVICE_ID_INTEL_82810: case PCI_DEVICE_ID_INTEL_82810_DC: case PCI_DEVICE_ID_INTEL_82810E_DC: case PCI_DEVICE_ID_INTEL_82830M: case PCI_DEVICE_ID_INTEL_82865: printf("This northbridge does not have EPBAR.\n"); return 1; default: printf("Error: Dumping EPBAR on this northbridge is not (yet) supported.\n"); return 1; } epbar = map_physical(epbar_phys, size); if (epbar == NULL) { perror("Error mapping EPBAR"); exit(1); } printf("EPBAR = 0x%08" PRIx64 " (MEM)\n\n", epbar_phys); for (i = 0; i < size; i += 4) { if (*(uint32_t *)(epbar + i)) printf("0x%04x: 0x%08x\n", i, *(uint32_t *)(epbar+i)); } unmap_physical((void *)epbar, size); return 0; }
int mcp6x_spi_init(int want_spi) { uint16_t status; uint32_t mcp6x_spibaraddr; struct pci_dev *smbusdev; /* Look for the SMBus device (SMBus PCI class) */ smbusdev = pci_dev_find_vendorclass(0x10de, 0x0c05); if (!smbusdev) { if (want_spi) { msg_perr("ERROR: SMBus device not found. Not enabling " "SPI.\n"); return 1; } else { msg_pinfo("Odd. SMBus device not found.\n"); return 0; } } msg_pdbg("Found SMBus device %04x:%04x at %02x:%02x:%01x\n", smbusdev->vendor_id, smbusdev->device_id, smbusdev->bus, smbusdev->dev, smbusdev->func); /* Locate the BAR where the SPI interface lives. */ mcp6x_spibaraddr = pci_read_long(smbusdev, 0x74); /* BAR size is 64k, bits 15..4 are zero, bit 3..0 declare a * 32-bit non-prefetchable memory BAR. */ mcp6x_spibaraddr &= ~0xffff; msg_pdbg("MCP SPI BAR is at 0x%08x\n", mcp6x_spibaraddr); /* Accessing a NULL pointer BAR is evil. Don't do it. */ if (!mcp6x_spibaraddr && want_spi) { msg_perr("Error: Chipset is strapped for SPI, but MCP SPI BAR is invalid.\n"); return 1; } else if (!mcp6x_spibaraddr && !want_spi) { msg_pdbg("MCP SPI is not used.\n"); return 0; } else if (mcp6x_spibaraddr && !want_spi) { msg_pdbg("Strange. MCP SPI BAR is valid, but chipset apparently doesn't have SPI enabled.\n"); /* FIXME: Should we enable SPI anyway? */ return 0; } /* Map the BAR. Bytewise/wordwise access at 0x530 and 0x540. */ mcp6x_spibar = rphysmap("NVIDIA MCP6x SPI", mcp6x_spibaraddr, 0x544); if (mcp6x_spibar == ERROR_PTR) return 1; status = mmio_readw(mcp6x_spibar + 0x530); msg_pdbg("SPI control is 0x%04x, req=%i, gnt=%i\n", status, (status >> MCP6X_SPI_REQUEST) & 0x1, (status >> MCP6X_SPI_GRANT) & 0x1); mcp_gpiostate = status & 0xff; if (bitbang_spi_init(&bitbang_spi_master_mcp6x)) { /* This should never happen. */ msg_perr("MCP6X bitbang SPI master init failed!\n"); return 1; } return 0; }
int print_spibar(struct pci_dev *sb) { int i, size = 0, rcba_size = 0x4000; volatile uint8_t *rcba; uint32_t rcba_phys; const io_register_t *spi_register = NULL; uint32_t spibaroffset; printf("\n============= SPI Bar ==============\n\n"); switch (sb->device_id) { case PCI_DEVICE_ID_INTEL_ICH6: printf("This southbridge does not have a SPI controller.\n"); return 1; case PCI_DEVICE_ID_INTEL_ICH7: case PCI_DEVICE_ID_INTEL_ICH7M: case PCI_DEVICE_ID_INTEL_ICH7DH: case PCI_DEVICE_ID_INTEL_ICH7MDH: spibaroffset = ICH78_SPIBAR; rcba_phys = pci_read_long(sb, 0xf0) & 0xfffffffe; size = ARRAY_SIZE(ich7_spi_bar_registers); spi_register = ich7_spi_bar_registers; break; case PCI_DEVICE_ID_INTEL_ICH8: spibaroffset = ICH78_SPIBAR; rcba_phys = pci_read_long(sb, 0xf0) & 0xfffffffe; size = ARRAY_SIZE(spi_bar_registers); spi_register = spi_bar_registers; break; case PCI_DEVICE_ID_INTEL_ICH8M: case PCI_DEVICE_ID_INTEL_ICH8ME: case PCI_DEVICE_ID_INTEL_ICH9DH: case PCI_DEVICE_ID_INTEL_ICH9DO: case PCI_DEVICE_ID_INTEL_ICH9R: case PCI_DEVICE_ID_INTEL_ICH9: case PCI_DEVICE_ID_INTEL_ICH9M: case PCI_DEVICE_ID_INTEL_ICH9ME: case PCI_DEVICE_ID_INTEL_ICH10: case PCI_DEVICE_ID_INTEL_ICH10R: case PCI_DEVICE_ID_INTEL_NM10: case PCI_DEVICE_ID_INTEL_I63XX: case PCI_DEVICE_ID_INTEL_3400: case PCI_DEVICE_ID_INTEL_3420: case PCI_DEVICE_ID_INTEL_3450: case PCI_DEVICE_ID_INTEL_3400_DESKTOP: case PCI_DEVICE_ID_INTEL_3400_MOBILE: case PCI_DEVICE_ID_INTEL_3400_MOBILE_SFF: case PCI_DEVICE_ID_INTEL_B55_A: case PCI_DEVICE_ID_INTEL_B55_B: case PCI_DEVICE_ID_INTEL_H55: case PCI_DEVICE_ID_INTEL_H57: case PCI_DEVICE_ID_INTEL_HM55: case PCI_DEVICE_ID_INTEL_HM57: case PCI_DEVICE_ID_INTEL_P55: case PCI_DEVICE_ID_INTEL_PM55: case PCI_DEVICE_ID_INTEL_Q57: case PCI_DEVICE_ID_INTEL_QM57: case PCI_DEVICE_ID_INTEL_QS57: case PCI_DEVICE_ID_INTEL_Z68: case PCI_DEVICE_ID_INTEL_P67: case PCI_DEVICE_ID_INTEL_UM67: case PCI_DEVICE_ID_INTEL_HM65: case PCI_DEVICE_ID_INTEL_H67: case PCI_DEVICE_ID_INTEL_HM67: case PCI_DEVICE_ID_INTEL_Q65: case PCI_DEVICE_ID_INTEL_QS67: case PCI_DEVICE_ID_INTEL_Q67: case PCI_DEVICE_ID_INTEL_QM67: case PCI_DEVICE_ID_INTEL_B65: case PCI_DEVICE_ID_INTEL_C202: case PCI_DEVICE_ID_INTEL_C204: case PCI_DEVICE_ID_INTEL_C206: case PCI_DEVICE_ID_INTEL_H61: case PCI_DEVICE_ID_INTEL_Z77: case PCI_DEVICE_ID_INTEL_Z75: case PCI_DEVICE_ID_INTEL_Q77: case PCI_DEVICE_ID_INTEL_Q75: case PCI_DEVICE_ID_INTEL_B75: case PCI_DEVICE_ID_INTEL_H77: case PCI_DEVICE_ID_INTEL_C216: case PCI_DEVICE_ID_INTEL_QM77: case PCI_DEVICE_ID_INTEL_QS77: case PCI_DEVICE_ID_INTEL_HM77: case PCI_DEVICE_ID_INTEL_UM77: case PCI_DEVICE_ID_INTEL_HM76: case PCI_DEVICE_ID_INTEL_HM75: case PCI_DEVICE_ID_INTEL_HM70: case PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_FULL: case PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_PREM: case PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_BASE: case PCI_DEVICE_ID_INTEL_WILDCATPOINT_LP_PREM: case PCI_DEVICE_ID_INTEL_WILDCATPOINT_LP: case PCI_DEVICE_ID_INTEL_C8_MOBILE: case PCI_DEVICE_ID_INTEL_C8_DESKTOP: case PCI_DEVICE_ID_INTEL_Z87: case PCI_DEVICE_ID_INTEL_Z85: case PCI_DEVICE_ID_INTEL_HM86: case PCI_DEVICE_ID_INTEL_H87: case PCI_DEVICE_ID_INTEL_HM87: case PCI_DEVICE_ID_INTEL_Q85: case PCI_DEVICE_ID_INTEL_Q87: case PCI_DEVICE_ID_INTEL_QM87: case PCI_DEVICE_ID_INTEL_B85: case PCI_DEVICE_ID_INTEL_C222: case PCI_DEVICE_ID_INTEL_C224: case PCI_DEVICE_ID_INTEL_C226: case PCI_DEVICE_ID_INTEL_H81: spibaroffset = ICH9_SPIBAR; rcba_phys = pci_read_long(sb, 0xf0) & 0xfffffffe; size = ARRAY_SIZE(spi_bar_registers); spi_register = spi_bar_registers; break; case PCI_DEVICE_ID_INTEL_ICH: case PCI_DEVICE_ID_INTEL_ICH0: case PCI_DEVICE_ID_INTEL_ICH2: case PCI_DEVICE_ID_INTEL_ICH4: case PCI_DEVICE_ID_INTEL_ICH4M: case PCI_DEVICE_ID_INTEL_ICH5: printf("This southbridge does not have RCBA.\n"); return 1; default: printf("Error: Dumping RCBA on this southbridge is not (yet) supported.\n"); return 1; } rcba = map_physical(rcba_phys, rcba_size); if (rcba == NULL) { perror("Error mapping RCBA"); exit(1); } for (i = 0; i < size; i++) { switch(spi_register[i].size) { case 1: printf("0x%08x = %s\n", *(uint8_t *)(rcba + spibaroffset + spi_register[i].addr), spi_register[i].name); break; case 2: printf("0x%08x = %s\n", *(uint16_t *)(rcba + spibaroffset + spi_register[i].addr), spi_register[i].name); break; case 4: printf("0x%08x = %s\n", *(uint32_t *)(rcba + spibaroffset + spi_register[i].addr), spi_register[i].name); break; case 8: printf("0x%08x%08x = %s\n", *(uint32_t *)(rcba + spibaroffset + spi_register[i].addr + 4), *(uint32_t *)(rcba + spibaroffset + spi_register[i].addr), spi_register[i].name); break; } } unmap_physical((void *)rcba, rcba_size); return 0; }
static int is_nbp1_capable(void) { uint32_t val; val = pci_read_long(amd_fam14h_pci_dev, PCI_NBP1_CAP_OFFSET); return val & (1 << 31); }
/* * (G)MCH MMIO Config Space */ int print_mchbar(struct pci_dev *nb) { int i, size = (16 * 1024); volatile uint8_t *mchbar; uint64_t mchbar_phys; printf("\n============= MCHBAR ============\n\n"); switch (nb->device_id) { case PCI_DEVICE_ID_INTEL_82915: case PCI_DEVICE_ID_INTEL_82945GM: case PCI_DEVICE_ID_INTEL_82945GSE: case PCI_DEVICE_ID_INTEL_82945P: case PCI_DEVICE_ID_INTEL_82975X: mchbar_phys = pci_read_long(nb, 0x44) & 0xfffffffe; break; case PCI_DEVICE_ID_INTEL_PM965: case PCI_DEVICE_ID_INTEL_82Q35: case PCI_DEVICE_ID_INTEL_82G33: case PCI_DEVICE_ID_INTEL_82Q33: mchbar_phys = pci_read_long(nb, 0x48) & 0xfffffffe; mchbar_phys |= ((uint64_t)pci_read_long(nb, 0x4c)) << 32; break; case PCI_DEVICE_ID_INTEL_Q965: case PCI_DEVICE_ID_INTEL_ATOM_DXXX: case PCI_DEVICE_ID_INTEL_ATOM_NXXX: mchbar_phys = pci_read_long(nb, 0x48); /* Test if bit 0 of the MCHBAR reg is 1 to enable memory reads. * If it isn't, try to set it. This may fail, because there is * some bit that locks that bit, and isn't in the public * datasheets. */ if(!(mchbar_phys & 1)) { printf("Access to the MCHBAR is currently disabled, "\ "attempting to enable.\n"); mchbar_phys |= 0x1; pci_write_long(nb, 0x48, mchbar_phys); if(pci_read_long(nb, 0x48) & 1) printf("Enabled successfully.\n"); else printf("Enable FAILED!\n"); } mchbar_phys &= 0xfffffffe; mchbar_phys |= ((uint64_t)pci_read_long(nb, 0x4c)) << 32; break; case PCI_DEVICE_ID_INTEL_82443LX: case PCI_DEVICE_ID_INTEL_82443BX: case PCI_DEVICE_ID_INTEL_82810: case PCI_DEVICE_ID_INTEL_82810E_MC: case PCI_DEVICE_ID_INTEL_82810DC: case PCI_DEVICE_ID_INTEL_82830M: printf("This northbrigde does not have MCHBAR.\n"); return 1; case PCI_DEVICE_ID_INTEL_GS45: mchbar_phys = pci_read_long(nb, 0x48) & 0xfffffffe; mchbar_phys |= ((uint64_t)pci_read_long(nb, 0x4c)) << 32; break; default: printf("Error: Dumping MCHBAR on this northbridge is not (yet) supported.\n"); return 1; } mchbar = map_physical(mchbar_phys, size); if (mchbar == NULL) { perror("Error mapping MCHBAR"); exit(1); } printf("MCHBAR = 0x%08llx (MEM)\n\n", mchbar_phys); for (i = 0; i < size; i += 4) { if (*(uint32_t *)(mchbar + i)) printf("0x%04x: 0x%08x\n", i, *(uint32_t *)(mchbar+i)); } unmap_physical((void *)mchbar, size); return 0; }
static void exec_op(struct op *op, struct pci_dev *dev) { const char * const formats[] = { NULL, " %02x", " %04x", NULL, " %08x" }; const char * const mask_formats[] = { NULL, " %02x->(%02x:%02x)->%02x", " %04x->(%04x:%04x)->%04x", NULL, " %08x->(%08x:%08x)->%08x" }; unsigned int i, x, y; int addr = 0; int width = op->width; char slot[16]; sprintf(slot, "%04x:%02x:%02x.%x", dev->domain, dev->bus, dev->dev, dev->func); trace("%s ", slot); if (op->cap_type) { struct pci_cap *cap; cap = pci_find_cap(dev, op->cap_id, op->cap_type); if (cap) addr = cap->addr; else die("%s: %s %04x not found", slot, ((op->cap_type == PCI_CAP_NORMAL) ? "Capability" : "Extended capability"), op->cap_id); trace(((op->cap_type == PCI_CAP_NORMAL) ? "(cap %02x @%02x) " : "(ecap %04x @%03x) "), op->cap_id, addr); } addr += op->addr; trace("@%02x", addr); /* We have already checked it when parsing, but addressing relative to capabilities can change the address. */ if (addr & (width-1)) die("%s: Unaligned access of width %d to register %04x", slot, width, addr); if (addr + width > 0x1000) die("%s: Access of width %d to register %04x out of range", slot, width, addr); if (op->num_values) { for (i=0; i<op->num_values; i++) { if ((op->values[i].mask & max_values[width]) == max_values[width]) { x = op->values[i].value; trace(formats[width], op->values[i].value); } else { switch (width) { case 1: y = pci_read_byte(dev, addr); break; case 2: y = pci_read_word(dev, addr); break; default: y = pci_read_long(dev, addr); break; } x = (y & ~op->values[i].mask) | op->values[i].value; trace(mask_formats[width], y, op->values[i].value, op->values[i].mask, x); } if (!demo_mode) { switch (width) { case 1: pci_write_byte(dev, addr, x); break; case 2: pci_write_word(dev, addr, x); break; default: pci_write_long(dev, addr, x); break; } } addr += width; } trace("\n"); } else { trace(" = "); switch (width) { case 1: x = pci_read_byte(dev, addr); break; case 2: x = pci_read_word(dev, addr); break; default: x = pci_read_long(dev, addr); break; } printf(formats[width]+1, x); putchar('\n'); } }
int pci_generic_fill_info(struct pci_dev *d, int flags) { struct pci_access *a = d->access; if ((flags & (PCI_FILL_BASES | PCI_FILL_ROM_BASE)) && d->hdrtype < 0) d->hdrtype = pci_read_byte(d, PCI_HEADER_TYPE) & 0x7f; if (flags & PCI_FILL_IDENT) { d->vendor_id = pci_read_word(d, PCI_VENDOR_ID); d->device_id = pci_read_word(d, PCI_DEVICE_ID); } if (flags & PCI_FILL_CLASS) d->device_class = pci_read_word(d, PCI_CLASS_DEVICE); if (flags & PCI_FILL_IRQ) d->irq = pci_read_byte(d, PCI_INTERRUPT_LINE); if (flags & PCI_FILL_BASES) { int cnt = 0, i; memset(d->base_addr, 0, sizeof(d->base_addr)); switch (d->hdrtype) { case PCI_HEADER_TYPE_NORMAL: cnt = 6; break; case PCI_HEADER_TYPE_BRIDGE: cnt = 2; break; case PCI_HEADER_TYPE_CARDBUS: cnt = 1; break; } if (cnt) { for (i=0; i<cnt; i++) { u32 x = pci_read_long(d, PCI_BASE_ADDRESS_0 + i*4); if (!x || x == (u32) ~0) continue; if ((x & PCI_BASE_ADDRESS_SPACE) == PCI_BASE_ADDRESS_SPACE_IO) d->base_addr[i] = x; else { if ((x & PCI_BASE_ADDRESS_MEM_TYPE_MASK) != PCI_BASE_ADDRESS_MEM_TYPE_64) d->base_addr[i] = x; else if (i >= cnt-1) a->warning("%04x:%02x:%02x.%d: Invalid 64-bit address seen for BAR %d.", d->domain, d->bus, d->dev, d->func, i); else { u32 y = pci_read_long(d, PCI_BASE_ADDRESS_0 + (++i)*4); #ifdef PCI_HAVE_64BIT_ADDRESS d->base_addr[i-1] = x | (((pciaddr_t) y) << 32); #else if (y) a->warning("%04x:%02x:%02x.%d 64-bit device address ignored.", d->domain, d->bus, d->dev, d->func); else d->base_addr[i-1] = x; #endif } } } } } if (flags & PCI_FILL_ROM_BASE) { int reg = 0; d->rom_base_addr = 0; switch (d->hdrtype) { case PCI_HEADER_TYPE_NORMAL: reg = PCI_ROM_ADDRESS; break; case PCI_HEADER_TYPE_BRIDGE: reg = PCI_ROM_ADDRESS1; break; } if (reg) { u32 u = pci_read_long(d, reg); if (u != 0xffffffff) d->rom_base_addr = u; } } if (flags & (PCI_FILL_CAPS | PCI_FILL_EXT_CAPS)) flags |= pci_scan_caps(d, flags); return flags & ~PCI_FILL_SIZES; }
int pci_generic_fill_info(struct pci_dev *d, int flags) { struct pci_access *a = d->access; if (flags & PCI_FILL_IDENT) { d->vendor_id = pci_read_word(d, PCI_VENDOR_ID); d->device_id = pci_read_word(d, PCI_DEVICE_ID); } if (flags & PCI_FILL_IRQ) d->irq = pci_read_byte(d, PCI_INTERRUPT_LINE); if (flags & PCI_FILL_BASES) { int cnt = 0, i; //bzero(d->base_addr, sizeof(d->base_addr)); memset(d->base_addr,0, sizeof(d->base_addr)); switch (d->hdrtype) { case PCI_HEADER_TYPE_NORMAL: cnt = 6; break; case PCI_HEADER_TYPE_BRIDGE: cnt = 2; break; case PCI_HEADER_TYPE_CARDBUS: cnt = 1; break; } if (cnt) { u16 cmd = pci_read_word(d, PCI_COMMAND); for(i=0; i<cnt; i++) { u32 x = pci_read_long(d, PCI_BASE_ADDRESS_0 + i*4); if (!x || x == (u32) ~0) continue; d->base_addr[i] = x; if (x & PCI_BASE_ADDRESS_SPACE_IO) { if (!a->buscentric && !(cmd & PCI_COMMAND_IO)) d->base_addr[i] = 0; } else if (a->buscentric || (cmd & PCI_COMMAND_MEMORY)) { if ((x & PCI_BASE_ADDRESS_MEM_TYPE_MASK) == PCI_BASE_ADDRESS_MEM_TYPE_64) { if (i >= cnt-1) a->warning("%02x:%02x.%d: Invalid 64-bit address seen.", d->bus, d->dev, d->func); else { u32 y = pci_read_long(d, PCI_BASE_ADDRESS_0 + (++i)*4); #ifdef HAVE_64BIT_ADDRESS d->base_addr[i-1] |= ((pciaddr_t) y) << 32; #else if (y) { a->warning("%02x:%02x.%d 64-bit device address ignored.", d->bus, d->dev, d->func); d->base_addr[i-1] = 0; } #endif } } } else d->base_addr[i] = 0; } } } if (flags & PCI_FILL_ROM_BASE) { int reg = 0; d->rom_base_addr = 0; switch (d->hdrtype) { case PCI_HEADER_TYPE_NORMAL: reg = PCI_ROM_ADDRESS; break; case PCI_HEADER_TYPE_BRIDGE: reg = PCI_ROM_ADDRESS1; break; } if (reg) { u32 a = pci_read_long(d, reg); if (a & PCI_ROM_ADDRESS_ENABLE) d->rom_base_addr = a; } } return flags & ~PCI_FILL_SIZES; }
int print_rcba(struct pci_dev *sb) { int i, size = 0x4000; volatile uint8_t *rcba; uint32_t rcba_phys; printf("\n============= RCBA ==============\n\n"); switch (sb->device_id) { case PCI_DEVICE_ID_INTEL_ICH6: case PCI_DEVICE_ID_INTEL_ICH7: case PCI_DEVICE_ID_INTEL_ICH7M: case PCI_DEVICE_ID_INTEL_ICH7DH: case PCI_DEVICE_ID_INTEL_ICH7MDH: case PCI_DEVICE_ID_INTEL_ICH8: case PCI_DEVICE_ID_INTEL_ICH8M: case PCI_DEVICE_ID_INTEL_ICH8ME: case PCI_DEVICE_ID_INTEL_ICH9DH: case PCI_DEVICE_ID_INTEL_ICH9DO: case PCI_DEVICE_ID_INTEL_ICH9R: case PCI_DEVICE_ID_INTEL_ICH9: case PCI_DEVICE_ID_INTEL_ICH9M: case PCI_DEVICE_ID_INTEL_ICH9ME: case PCI_DEVICE_ID_INTEL_ICH10: case PCI_DEVICE_ID_INTEL_ICH10R: case PCI_DEVICE_ID_INTEL_NM10: case PCI_DEVICE_ID_INTEL_I63XX: case PCI_DEVICE_ID_INTEL_3400: case PCI_DEVICE_ID_INTEL_3420: case PCI_DEVICE_ID_INTEL_3450: case PCI_DEVICE_ID_INTEL_3400_DESKTOP: case PCI_DEVICE_ID_INTEL_3400_MOBILE: case PCI_DEVICE_ID_INTEL_3400_MOBILE_SFF: case PCI_DEVICE_ID_INTEL_B55_A: case PCI_DEVICE_ID_INTEL_B55_B: case PCI_DEVICE_ID_INTEL_H55: case PCI_DEVICE_ID_INTEL_H57: case PCI_DEVICE_ID_INTEL_HM55: case PCI_DEVICE_ID_INTEL_HM57: case PCI_DEVICE_ID_INTEL_P55: case PCI_DEVICE_ID_INTEL_PM55: case PCI_DEVICE_ID_INTEL_Q57: case PCI_DEVICE_ID_INTEL_QM57: case PCI_DEVICE_ID_INTEL_QS57: case PCI_DEVICE_ID_INTEL_Z68: case PCI_DEVICE_ID_INTEL_P67: case PCI_DEVICE_ID_INTEL_UM67: case PCI_DEVICE_ID_INTEL_HM65: case PCI_DEVICE_ID_INTEL_H67: case PCI_DEVICE_ID_INTEL_HM67: case PCI_DEVICE_ID_INTEL_Q65: case PCI_DEVICE_ID_INTEL_QS67: case PCI_DEVICE_ID_INTEL_Q67: case PCI_DEVICE_ID_INTEL_QM67: case PCI_DEVICE_ID_INTEL_B65: case PCI_DEVICE_ID_INTEL_C202: case PCI_DEVICE_ID_INTEL_C204: case PCI_DEVICE_ID_INTEL_C206: case PCI_DEVICE_ID_INTEL_H61: case PCI_DEVICE_ID_INTEL_Z77: case PCI_DEVICE_ID_INTEL_Z75: case PCI_DEVICE_ID_INTEL_Q77: case PCI_DEVICE_ID_INTEL_Q75: case PCI_DEVICE_ID_INTEL_B75: case PCI_DEVICE_ID_INTEL_H77: case PCI_DEVICE_ID_INTEL_C216: case PCI_DEVICE_ID_INTEL_QM77: case PCI_DEVICE_ID_INTEL_QS77: case PCI_DEVICE_ID_INTEL_HM77: case PCI_DEVICE_ID_INTEL_UM77: case PCI_DEVICE_ID_INTEL_HM76: case PCI_DEVICE_ID_INTEL_HM75: case PCI_DEVICE_ID_INTEL_HM70: case PCI_DEVICE_ID_INTEL_NM70: case PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_FULL: case PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_PREM: case PCI_DEVICE_ID_INTEL_LYNXPOINT_LP_BASE: case PCI_DEVICE_ID_INTEL_WILDCATPOINT_LP_PREM: case PCI_DEVICE_ID_INTEL_WILDCATPOINT_LP: case PCI_DEVICE_ID_INTEL_C8_MOBILE: case PCI_DEVICE_ID_INTEL_C8_DESKTOP: case PCI_DEVICE_ID_INTEL_Z87: case PCI_DEVICE_ID_INTEL_Z85: case PCI_DEVICE_ID_INTEL_HM86: case PCI_DEVICE_ID_INTEL_H87: case PCI_DEVICE_ID_INTEL_HM87: case PCI_DEVICE_ID_INTEL_Q85: case PCI_DEVICE_ID_INTEL_Q87: case PCI_DEVICE_ID_INTEL_QM87: case PCI_DEVICE_ID_INTEL_B85: case PCI_DEVICE_ID_INTEL_C222: case PCI_DEVICE_ID_INTEL_C224: case PCI_DEVICE_ID_INTEL_C226: case PCI_DEVICE_ID_INTEL_H81: rcba_phys = pci_read_long(sb, 0xf0) & 0xfffffffe; break; case PCI_DEVICE_ID_INTEL_ICH: case PCI_DEVICE_ID_INTEL_ICH0: case PCI_DEVICE_ID_INTEL_ICH2: case PCI_DEVICE_ID_INTEL_ICH4: case PCI_DEVICE_ID_INTEL_ICH4M: case PCI_DEVICE_ID_INTEL_ICH5: printf("This southbridge does not have RCBA.\n"); return 1; default: printf("Error: Dumping RCBA on this southbridge is not (yet) supported.\n"); return 1; } rcba = map_physical(rcba_phys, size); if (rcba == NULL) { perror("Error mapping RCBA"); exit(1); } printf("RCBA = 0x%08x (MEM)\n\n", rcba_phys); for (i = 0; i < size; i += 4) { if (*(uint32_t *)(rcba + i)) printf("0x%04x: 0x%08x\n", i, *(uint32_t *)(rcba + i)); } unmap_physical((void *)rcba, size); return 0; }