void Memory_hook(void* arg1, void* arg2, void* arg3, void* arg4) { /* our code only works on Intel chipsets so make sure here */ if (pci_config_read16(PCIADDR(0, 0x00, 0), 0x00) != 0x8086) bootInfo->memDetect = false; else bootInfo->memDetect = true; /* manually */ getBoolForKey(kUseMemDetectKey, &bootInfo->memDetect, &bootInfo->bootConfig); if (bootInfo->memDetect) { if (dram_controller_dev != NULL) { // Rek: pci dev ram controller direct and fully informative scan ... scan_dram_controller(dram_controller_dev); } //Azi: gone on Kabyl's smbios update - reminder // unfortunately still necesary for some comp where spd cant read correct speed // scan_memory(&Platform); scan_spd(&Platform); // check Mek's implementation! } }
static int __sabre_read_pci_cfg(struct pci_bus *bus_dev, unsigned int devfn, int where, int size, u32 *value) { struct pci_pbm_info *pbm = bus_dev->sysdata; unsigned char bus = bus_dev->number; u32 *addr; u16 tmp16; u8 tmp8; switch (size) { case 1: *value = 0xff; break; case 2: *value = 0xffff; break; case 4: *value = 0xffffffff; break; } addr = sabre_pci_config_mkaddr(pbm, bus, devfn, where); if (!addr) return PCIBIOS_SUCCESSFUL; if (__sabre_out_of_range(pbm, bus, devfn)) return PCIBIOS_SUCCESSFUL; switch (size) { case 1: pci_config_read8((u8 *) addr, &tmp8); *value = tmp8; break; case 2: if (where & 0x01) { printk("pci_read_config_word: misaligned reg [%x]\n", where); return PCIBIOS_SUCCESSFUL; } pci_config_read16((u16 *) addr, &tmp16); *value = tmp16; break; case 4: if (where & 0x03) { printk("pci_read_config_dword: misaligned reg [%x]\n", where); return PCIBIOS_SUCCESSFUL; } pci_config_read32(addr, value); break; } return PCIBIOS_SUCCESSFUL; }
void Memory_start(void) { if (pci_config_read16(PCIADDR(0, 0x00, 0), 0x00) != 0x8086) return; register_hook_callback("PCIDevice", &Memory_PCIDevice_hook); register_hook_callback("ScanMemory", &Memory_hook); register_hook_callback("isMemoryRegistred", &is_Memory_Registred_Hook); }
zx_status_t Gtt::Init(Controller* controller) { controller_ = controller; zx_status_t status = pci_get_bti(controller->pci(), 0, bti_.reset_and_get_address()); if (status != ZX_OK) { LOG_ERROR("Failed to get bti (%d)\n", status); return status; } zx_info_bti_t info; status = bti_.get_info(ZX_INFO_BTI, &info, sizeof(zx_info_bti_t), nullptr, nullptr); if (status != ZX_OK) { LOG_ERROR("Failed to fetch bti info (%d)\n", status); return status; } min_contiguity_ = info.minimum_contiguity; // Calculate the size of the gtt. auto gmch_gfx_ctrl = registers::GmchGfxControl::Get().FromValue(0); status = pci_config_read16(controller_->pci(), gmch_gfx_ctrl.kAddr, gmch_gfx_ctrl.reg_value_ptr()); if (status != ZX_OK) { LOG_ERROR("Failed to read GfxControl\n"); return status; } uint32_t gtt_size = gmch_gfx_ctrl.gtt_mappable_mem_size(); LOG_TRACE("Gtt::Init gtt_size (for page tables) 0x%x\n", gtt_size); status = zx::vmo::create(PAGE_SIZE, 0, &scratch_buffer_); if (status != ZX_OK) { LOG_ERROR("Failed to alloc scratch buffer (%d)\n", status); return status; } status = bti_.pin(ZX_BTI_PERM_READ, scratch_buffer_, 0, PAGE_SIZE, &scratch_buffer_paddr_, 1, &scratch_buffer_pmt_); if (status != ZX_OK) { LOG_ERROR("Failed to look up scratch buffer (%d)\n", status); return status; } scratch_buffer_.op_range(ZX_VMO_OP_CACHE_CLEAN, 0, PAGE_SIZE, nullptr, 0); // Populate the gtt with the scratch buffer. uint64_t pte = gen_pte_encode(scratch_buffer_paddr_); unsigned i; for (i = 0; i < gtt_size / sizeof(uint64_t); i++) { controller_->mmio_space()->Write<uint64_t>(pte, get_pte_offset(i)); } controller_->mmio_space()->Read<uint32_t>(get_pte_offset(i - i)); // Posting read gfx_mem_size_ = gtt_size / sizeof(uint64_t) * PAGE_SIZE; return region_allocator_.AddRegion({ .base = 0, .size = gfx_mem_size_ });
static void read_smb_intel(pci_dt_t *smbus_dev) { int i, speed; uint8_t spd_size, spd_type; uint32_t base, mmio, hostc; // bool dump = false; RamSlotInfo_t* slot; uint16_t cmd = pci_config_read16(smbus_dev->dev.addr, 0x04); DBG("SMBus CmdReg: 0x%x\n", cmd); pci_config_write16(smbus_dev->dev.addr, 0x04, cmd | 1); mmio = pci_config_read32(smbus_dev->dev.addr, 0x10);// & ~0x0f; base = pci_config_read16(smbus_dev->dev.addr, 0x20) & 0xFFFE; hostc = pci_config_read8(smbus_dev->dev.addr, 0x40); IOLog("Scanning SMBus [%04x:%04x], mmio: 0x%x, ioport: 0x%x, hostc: 0x%x\n", smbus_dev->vendor_id, smbus_dev->device_id, mmio, base, hostc); // Search MAX_RAM_SLOTS slots // for (i = 0; i < MAX_RAM_SLOTS; i++){ // spd_size = smb_read_byte_intel(base, 0x50 + i, 0); } // for
/** scan mem for memory autodection purpose */ void scan_mem() { static bool done = false; if (done) return; /* our code only works on Intel chipsets so make sure here */ if (pci_config_read16(PCIADDR(0, 0x00, 0), 0x00) != 0x8086) bootInfo->memDetect = false; else bootInfo->memDetect = true; /* manually */ getBoolForKey(kUseMemDetect, &bootInfo->memDetect, &bootInfo->chameleonConfig); if (bootInfo->memDetect) { if (dram_controller_dev != NULL) { scan_dram_controller(dram_controller_dev); // Rek: pci dev ram controller direct and fully informative scan ... } scan_spd(&Platform); } done = true; }
/* At least on Sabre, it is necessary to access all PCI host controller * registers at their natural size, otherwise zeros are returned. * Strange but true, and I see no language in the UltraSPARC-IIi * programmer's manual that mentions this even indirectly. */ static int sun4u_read_pci_cfg_host(struct pci_pbm_info *pbm, unsigned char bus, unsigned int devfn, int where, int size, u32 *value) { u32 tmp32, *addr; u16 tmp16; u8 tmp8; addr = sun4u_config_mkaddr(pbm, bus, devfn, where); if (!addr) return PCIBIOS_SUCCESSFUL; switch (size) { case 1: if (where < 8) { unsigned long align = (unsigned long) addr; align &= ~1; pci_config_read16((u16 *)align, &tmp16); if (where & 1) *value = tmp16 >> 8; else *value = tmp16 & 0xff; } else {
/** Register a new PCI ATA channel. * @param pci_device PCI device the channel is on. * @param idx Channel index. * @param ctrl_base Control registers base address. * @param cmd_base Command registers base address. * @param bm_base Bus master base address. * @param irq IRQ number. * @return Pointer to ATA channel structure if present. */ static ata_channel_t *pci_ata_channel_add(pci_device_t *pci_device, int idx, uint32_t ctrl_base, uint32_t cmd_base, uint32_t bm_base, uint32_t irq) { uint16_t pci_cmd_old, pci_cmd_new; pci_ata_channel_t *channel; bool dma = true; status_t ret; /* Configure the PCI device appropriately. */ pci_cmd_old = pci_cmd_new = pci_config_read16(pci_device, PCI_CONFIG_COMMAND); pci_cmd_new &= ~PCI_COMMAND_INT_DISABLE; pci_cmd_new |= (PCI_COMMAND_IO | PCI_COMMAND_BUS_MASTER); if(pci_cmd_new != pci_cmd_old) { pci_config_write16(pci_device, PCI_CONFIG_COMMAND, pci_cmd_new); kprintf(LOG_DEBUG, "ata: reconfigured PCI device %d:%02x.%d (old: 0x%04x, new: 0x%04x)\n", pci_device->bus, pci_device->device, pci_device->function, pci_cmd_old, pci_cmd_new); } /* Check presence by writing a value to the low LBA port on the channel, * then reading it back. If the value is the same, it is present. */ out8(cmd_base + ATA_CMD_REG_LBA_LOW, 0xAB); if(in8(cmd_base + ATA_CMD_REG_LBA_LOW) != 0xAB) { if(pci_cmd_new != pci_cmd_old) { pci_config_write16(pci_device, PCI_CONFIG_COMMAND, pci_cmd_old); } return NULL; } /* Allocate our information structure. */ channel = kmalloc(sizeof(*channel), MM_WAIT); channel->channel = NULL; channel->pci_device = pci_device; channel->ctrl_base = ctrl_base; channel->cmd_base = cmd_base; channel->bus_master_base = bm_base + (idx * 8); channel->irq = irq; channel->prdt = NULL; /* If the bus master is in simplex mode, disable DMA on the second * channel. According to the Haiku code, Intel controllers use this for * something other than simplex mode. */ if(pci_device->vendor_id != 0x8086) { if(in8(bm_base + PCI_ATA_BM_REG_STATUS) & PCI_ATA_BM_STATUS_SIMPLEX && idx > 1) { dma = false; } } /* Allocate a PRDT if necessary. */ if(dma) { phys_alloc(PRDT_SIZE, 0, 0, 0, (phys_ptr_t)0x100000000, MM_WAIT, &channel->prdt_phys); channel->prdt = phys_map(channel->prdt_phys, PRDT_SIZE, MM_WAIT); } /* Register the IRQ handler. */ ret = irq_register(channel->irq, pci_ata_irq_handler, NULL, channel); if(ret != STATUS_SUCCESS) { kprintf(LOG_WARN, "ata: failed to register PCI ATA IRQ handler %u\n", channel->irq); if(dma) { phys_unmap(channel->prdt, PRDT_SIZE, true); phys_free(channel->prdt_phys, PRDT_SIZE); } kfree(channel); return NULL; } /* Try to register the ATA channel. */ channel->channel = ata_sff_channel_add(pci_device->node, idx, &pci_ata_channel_ops, channel, dma, PRDT_ENTRIES, (phys_ptr_t)0x100000000); if(!channel->channel) { irq_unregister(channel->irq, pci_ata_irq_handler, NULL, channel); if(dma) { phys_unmap(channel->prdt, PRDT_SIZE, true); phys_free(channel->prdt_phys, PRDT_SIZE); } kfree(channel); return NULL; } return channel->channel; }
bool getSMBOemProcessorBusSpeed(returnType *value) { if (Platform.CPU.Vendor == CPUID_VENDOR_INTEL) // Intel { switch (Platform.CPU.Family) { case 0x06: { switch (Platform.CPU.Model) { case CPU_MODEL_DOTHAN: // Intel Pentium M case CPU_MODEL_YONAH: // Intel Mobile Core Solo, Duo case CPU_MODEL_MEROM: // Intel Mobile Core 2 Solo, Duo, Xeon 30xx, Xeon 51xx, Xeon X53xx, Xeon E53xx, Xeon X32xx case CPU_MODEL_PENRYN: // Intel Core 2 Solo, Duo, Quad, Extreme, Xeon X54xx, Xeon X33xx case CPU_MODEL_ATOM: // Intel Atom (45nm) return false; case CPU_MODEL_NEHALEM: // Intel Core i7, Xeon W35xx, Xeon X55xx, Xeon E55xx LGA1366 (45nm) case CPU_MODEL_FIELDS: // Intel Core i5, i7, Xeon X34xx LGA1156 (45nm) case CPU_MODEL_DALES: case CPU_MODEL_DALES_32NM: // Intel Core i3, i5 LGA1156 (32nm) case CPU_MODEL_WESTMERE: // Intel Core i7, Xeon X56xx, Xeon E56xx, Xeon W36xx LGA1366 (32nm) 6 Core case CPU_MODEL_NEHALEM_EX: // Intel Xeon X75xx, Xeon X65xx, Xeon E75xx, Xeon E65x case CPU_MODEL_WESTMERE_EX: // Intel Xeon E7 { // thanks to dgobe for i3/i5/i7 bus speed detection int nhm_bus = 0x3F; static long possible_nhm_bus[] = {0xFF, 0x7F, 0x3F}; unsigned long did, vid; int i; // Nehalem supports Scrubbing // First, locate the PCI bus where the MCH is located for(i = 0; i < sizeof(possible_nhm_bus); i++) { vid = pci_config_read16(PCIADDR(possible_nhm_bus[i], 3, 4), 0x00); did = pci_config_read16(PCIADDR(possible_nhm_bus[i], 3, 4), 0x02); vid &= 0xFFFF; did &= 0xFF00; if(vid == 0x8086 && did >= 0x2C00) nhm_bus = possible_nhm_bus[i]; } unsigned long qpimult, qpibusspeed; qpimult = pci_config_read32(PCIADDR(nhm_bus, 2, 1), 0x50); qpimult &= 0x7F; DBG("qpimult %d\n", qpimult); qpibusspeed = (qpimult * 2 * (Platform.CPU.FSBFrequency/1000000)); // Rek: rounding decimals to match original mac profile info if (qpibusspeed%100 != 0)qpibusspeed = ((qpibusspeed+50)/100)*100; DBG("qpibusspeed %d\n", qpibusspeed); value->word = qpibusspeed; return true; } } } } } return false; }
static int sm_get_bus_speed (const char *name, int table_num) { if (Platform.CPU.Vendor == 0x756E6547) // Intel { switch (Platform.CPU.Family) { case 0x06: { switch (Platform.CPU.Model) { case CPU_MODEL_PENTIUM_M: // Pentium M 0x0D case CPU_MODEL_YONAH: // Yonah 0x0E case CPU_MODEL_MEROM: // Merom 0x0F case CPU_MODEL_PENRYN: // Penryn 0x17 case CPU_MODEL_ATOM: // Atom 45nm 0x1C return 0; // TODO: populate bus speed for these processors // case CPU_MODEL_FIELDS: // Intel Core i5, i7 LGA1156 (45nm) // if (strstr(Platform.CPU.BrandString, "Core(TM) i5")) // return 2500; // Core i5 // return 4800; // Core i7 // case CPU_MODEL_NEHALEM: // Intel Core i7 LGA1366 (45nm) // case CPU_MODEL_NEHALEM_EX: // case CPU_MODEL_DALES: // Intel Core i5, i7 LGA1156 (45nm) ??? // return 4800; // GT/s / 1000 // case CPU_MODEL_WESTMERE_EX: // Intel Core i7 LGA1366 (45nm) 6 Core ??? return 0; // TODO: populate bus speed for these processors // case 0x19: // Intel Core i5 650 @3.20 Ghz // return 2500; // why? Intel spec says 2.5GT/s case 0x19: // Intel Core i5 650 @3.20 Ghz case CPU_MODEL_NEHALEM: // Intel Core i7 LGA1366 (45nm) case CPU_MODEL_FIELDS: // Intel Core i5, i7 LGA1156 (45nm) case CPU_MODEL_DALES: // Intel Core i5, i7 LGA1156 (45nm) ??? case CPU_MODEL_DALES_32NM: // Intel Core i3, i5, i7 LGA1156 (32nm) case CPU_MODEL_WESTMERE: // Intel Core i7 LGA1366 (32nm) 6 Core case CPU_MODEL_NEHALEM_EX: // Intel Core i7 LGA1366 (45nm) 6 Core ??? { // thanks to dgobe for i3/i5/i7 bus speed detection int nhm_bus = 0x3F; static long possible_nhm_bus[] = {0xFF, 0x7F, 0x3F}; unsigned long did, vid; int i; // Nehalem supports Scrubbing // First, locate the PCI bus where the MCH is located for(i = 0; i < sizeof(possible_nhm_bus); i++) { vid = pci_config_read16(PCIADDR(possible_nhm_bus[i], 3, 4), 0x00); did = pci_config_read16(PCIADDR(possible_nhm_bus[i], 3, 4), 0x02); vid &= 0xFFFF; did &= 0xFF00; if(vid == 0x8086 && did >= 0x2C00) nhm_bus = possible_nhm_bus[i]; } unsigned long qpimult, qpibusspeed; qpimult = pci_config_read32(PCIADDR(nhm_bus, 2, 1), 0x50); qpimult &= 0x7F; DBG("qpimult %d\n", qpimult); qpibusspeed = (qpimult * 2 * (Platform.CPU.FSBFrequency/1000000)); // Rek: rounding decimals to match original mac profile info if (qpibusspeed%100 != 0)qpibusspeed = ((qpibusspeed+50)/100)*100; DBG("qpibusspeed %d\n", qpibusspeed); return qpibusspeed; } } } } } return 0; }
bool getSMBOemProcessorBusSpeed(returnType *value) { if (Platform.CPU.Vendor == CPUID_VENDOR_INTEL) { // Intel switch (Platform.CPU.Family) { case 0x06: { switch (Platform.CPU.Model) { case CPU_MODEL_PENTIUM_M: case CPU_MODEL_DOTHAN: // Intel Pentium M case CPU_MODEL_YONAH: // Intel Mobile Core Solo, Duo case CPU_MODEL_MEROM: // Intel Mobile Core 2 Solo, Duo, Xeon 30xx, Xeon 51xx, Xeon X53xx, Xeon E53xx, Xeon X32xx case CPU_MODEL_PENRYN: // Intel Core 2 Solo, Duo, Quad, Extreme, Xeon X54xx, Xeon X33xx case CPU_MODEL_ATOM: // Intel Atom (45nm) return false; case 0x19: case CPU_MODEL_NEHALEM: // Intel Core i7, Xeon W35xx, Xeon X55xx, Xeon E55xx LGA1366 (45nm) case CPU_MODEL_FIELDS: // Intel Core i5, i7, Xeon X34xx LGA1156 (45nm) case CPU_MODEL_DALES: case CPU_MODEL_DALES_32NM: // Intel Core i3, i5 LGA1156 (32nm) case CPU_MODEL_WESTMERE: // Intel Core i7, Xeon X56xx, Xeon E56xx, Xeon W36xx LGA1366 (32nm) 6 Core case CPU_MODEL_NEHALEM_EX: // Intel Xeon X75xx, Xeon X65xx, Xeon E75xx, Xeon E65x case CPU_MODEL_WESTMERE_EX: // Intel Xeon E7 // case CPU_MODEL_SANDYBRIDGE: // Intel Core i3, i5, i7 LGA1155 (32nm) // MacMan removed not valid for this CPU // case CPU_MODEL_IVYBRIDGE: // Intel Core i3, i5, i7 LGA1155 (22nm) // MacMan removed not valid for this CPU // case CPU_MODEL_IVYBRIDGE_XEON: // MacMan moved // case CPU_MODEL_HASWELL: // MacMan removed not valid for this CPU // case CPU_MODEL_JAKETOWN: // Intel Core i7, Xeon E5 LGA2011 (32nm)// MacMan moved { // thanks to dgobe for i3/i5/i7 bus speed detection int nhm_bus = 0x3F; static long possible_nhm_bus[] = {0xFF, 0x7F, 0x3F}; unsigned long did, vid; unsigned int i; // Nehalem supports Scrubbing // First, locate the PCI bus where the MCH is located for(i = 0; i < (sizeof(possible_nhm_bus)/sizeof(possible_nhm_bus[0])); i++) { vid = pci_config_read16(PCIADDR(possible_nhm_bus[i], 3, 4), 0x00); did = pci_config_read16(PCIADDR(possible_nhm_bus[i], 3, 4), 0x02); vid &= 0xFFFF; did &= 0xFF00; if(vid == 0x8086 && did >= 0x2C00) { nhm_bus = possible_nhm_bus[i]; } } unsigned long qpimult, qpibusspeed; qpimult = pci_config_read32(PCIADDR(nhm_bus, 2, 1), 0x50); qpimult &= 0x7F; DBG("qpimult %d\n", qpimult); qpibusspeed = (qpimult * 2 * (Platform.CPU.FSBFrequency/1000000LL)); // Rek: rounding decimals to match original mac profile info if (qpibusspeed%100 != 0) { qpibusspeed = ((qpibusspeed+50)/100)*100; } DBG("qpibusspeed %d\n", qpibusspeed); value->word = qpibusspeed; return true; } // MacMan the following CPUs have fixed DMI2 speeds case CPU_MODEL_IVYBRIDGE_XEON: // Intel Core i7, Xeon E5 v2 LGA2011 (22nm) case CPU_MODEL_JAKETOWN: // Intel Core i7, Xeon E5 LGA2011 (32nm) case CPU_MODEL_HASWELL_SVR: // Intel Core i7, Xeon E5 LGA2011v3 { unsigned long dmi2speed; dmi2speed = 5000; DBG("dmi2speed %d\n", dmi2speed); value->word = dmi2speed; return true; } default: break; //Unsupported CPU type } } default: break; } } return false; }
static uint16_t camkes_pci_read16(void *cookie, vmm_pci_address_t addr, unsigned int offset) { return pci_config_read16(addr.bus, addr.dev, addr.fun, offset); }