/* * Program a region into the outbound ATU * The ATU supports 16 regions that can be programmed independently. * pcie, PCIe Device Struct * index, Which iATU region are we programming? * type, Type of PCIe txn being generated on the PCIe bus * cpu_addr, Physical source address to translate in the CPU's address space * pci_addr, Destination Address in the PCIe address space * size Size of the aperature that we're translating. */ zx_status_t DwPcie::ProgramOutboundAtu(const uint32_t index, const uint32_t type, const zx_paddr_t cpu_addr, const uintptr_t pci_addr, const size_t size) { // The ATU supports a limited number of regions. ZX_DEBUG_ASSERT(index < kAtuRegionCount); // Each ATU region has its own bank of registers at this offset from the // DBI base const size_t bank_offset = (0x3 << 20) | (index << 9); volatile uint8_t* atu_base = reinterpret_cast<volatile uint8_t*>(dbi_.get()) + bank_offset; volatile atu_ctrl_regs_t* regs = reinterpret_cast<volatile atu_ctrl_regs_t*>(atu_base); // Memory transactions that are in the following range will get translated // to PCI bus transactions: // // [cpu_addr, cpu_addr + size - 1] regs->unroll_lower_base = lo32(cpu_addr); regs->unroll_upper_base = hi32(cpu_addr); regs->unroll_limit = lo32(cpu_addr + size - 1); // Target of the transactions above. regs->unroll_lower_target = lo32(pci_addr); regs->unroll_upper_target = hi32(pci_addr); // Region Ctrl 1 contains a number of fields. The Low 5 bits of the field // indicate the type of transaction to dispatch onto the PCIe bus. regs->region_ctrl1 = type; // Each region can individually be marked as Enabled or Disabled. regs->region_ctrl2 |= kAtuRegionCtrlEnable; regs->region_ctrl2 |= kAtuCfgShiftMode; // Wait for the enable to take effect. for (unsigned int i = 0; i < kAtuProgramRetries; ++i) { if (regs->region_ctrl2 & kAtuRegionCtrlEnable) { return ZX_OK; } // Wait a little bit before trying again. zx_nanosleep(zx_deadline_after(ZX_USEC(kAtuWaitEnableTimeoutUs))); } return ZX_ERR_TIMED_OUT; }
void memcons_add_properties(void) { uint64_t addr = (u64)&memcons; dt_add_property_cells(opal_node, "ibm,opal-memcons", hi32(addr), lo32(addr)); }
static void trace_add_dt_props(void) { unsigned int i; u64 *prop, tmask; prop = malloc(sizeof(u64) * 2 * debug_descriptor.num_traces); for (i = 0; i < debug_descriptor.num_traces; i++) { prop[i * 2] = cpu_to_fdt64(debug_descriptor.trace_phys[i]); prop[i * 2 + 1] = cpu_to_fdt64(debug_descriptor.trace_size[i]); } dt_add_property(opal_node, "ibm,opal-traces", prop, sizeof(u64) * 2 * i); free(prop); tmask = (uint64_t)&debug_descriptor.trace_mask; dt_add_property_cells(opal_node, "ibm,opal-trace-mask", hi32(tmask), lo32(tmask)); }
struct dt_node * add_core_common(struct dt_node *cpus, const struct sppcia_cpu_cache *cache, const struct sppaca_cpu_timebase *tb, uint32_t int_server, bool okay) { const char *name; struct dt_node *cpu; uint32_t version; uint64_t freq; const uint8_t pa_features[] = { 6, 0, 0xf6, 0x3f, 0xc7, 0x00, 0x80, 0xc0 }; printf(" Cache: I=%u D=%u/%u/%u/%u\n", be32_to_cpu(cache->icache_size_kb), be32_to_cpu(cache->l1_dcache_size_kb), be32_to_cpu(cache->l2_dcache_size_kb), be32_to_cpu(cache->l3_dcache_size_kb), be32_to_cpu(cache->l35_dcache_size_kb)); /* Use the boot CPU PVR to make up a CPU name in the device-tree * since the HDAT doesn't seem to tell.... */ version = mfspr(SPR_PVR); switch(PVR_TYPE(version)) { case PVR_TYPE_P7: name = "PowerPC,POWER7"; break; case PVR_TYPE_P7P: name = "PowerPC,POWER7+"; break; case PVR_TYPE_P8E: case PVR_TYPE_P8: name = "PowerPC,POWER8"; break; default: name = "PowerPC,Unknown"; } cpu = dt_new_addr(cpus, name, int_server); assert(cpu); dt_add_property_string(cpu, "device_type", "cpu"); dt_add_property_string(cpu, "status", okay ? "okay" : "bad"); dt_add_property_cells(cpu, "reg", int_server); dt_add_property_cells(cpu, "cpu-version", version); dt_add_property(cpu, "64-bit", NULL, 0); dt_add_property(cpu, "32-64-bridge", NULL, 0); dt_add_property(cpu, "graphics", NULL, 0); dt_add_property(cpu, "general-purpose", NULL, 0); dt_add_property_cells(cpu, "ibm,processor-segment-sizes", 0x1c, 0x28, 0xffffffff, 0xffffffff); dt_add_property_cells(cpu, "ibm,processor-page-sizes", 0xc, 0x10, 0x18, 0x22); /* Page size encodings appear to be the same for P7 and P8 */ dt_add_property_cells(cpu, "ibm,segment-page-sizes", 0x0c, 0x000, 3, 0x0c, 0x0000, /* 4K seg 4k pages */ 0x10, 0x0007, /* 4K seg 64k pages */ 0x18, 0x0038, /* 4K seg 16M pages */ 0x10, 0x110, 2, 0x10, 0x0001, /* 64K seg 64k pages */ 0x18, 0x0008, /* 64K seg 16M pages */ 0x18, 0x100, 1, 0x18, 0x0000, /* 16M seg 16M pages */ 0x22, 0x120, 1, 0x22, 0x0003); /* 16G seg 16G pages */ dt_add_property(cpu, "ibm,pa-features", pa_features, sizeof(pa_features)); dt_add_property_cells(cpu, "ibm,slb-size", 0x20); dt_add_property_cells(cpu, "ibm,vmx", 0x2); dt_add_property_cells(cpu, "ibm,dfp", 0x2); dt_add_property_cells(cpu, "ibm,purr", 0x1); dt_add_property_cells(cpu, "ibm,spurr", 0x1); /* * Do not create "clock-frequency" if the frequency doesn't * fit in a single cell */ freq = ((uint64_t)be32_to_cpu(tb->actual_clock_speed)) * 1000000ul; if (freq <= 0xfffffffful) dt_add_property_cells(cpu, "clock-frequency", freq); dt_add_property_cells(cpu, "ibm,extended-clock-frequency", hi32(freq), lo32(freq)); /* FIXME: Hardcoding is bad. */ dt_add_property_cells(cpu, "timebase-frequency", 512000000); dt_add_property_cells(cpu, "ibm,extended-timebase-frequency", 0, 512000000); dt_add_property_cells(cpu, "reservation-granule-size", be32_to_cpu(cache->reservation_size)); dt_add_property_cells(cpu, "d-tlb-size", be32_to_cpu(cache->dtlb_entries)); dt_add_property_cells(cpu, "i-tlb-size", be32_to_cpu(cache->itlb_entries)); /* Assume unified TLB */ dt_add_property_cells(cpu, "tlb-size", be32_to_cpu(cache->dtlb_entries)); dt_add_property_cells(cpu, "d-tlb-sets", be32_to_cpu(cache->dtlb_assoc_sets)); dt_add_property_cells(cpu, "i-tlb-sets", be32_to_cpu(cache->itlb_assoc_sets)); dt_add_property_cells(cpu, "tlb-sets", be32_to_cpu(cache->dtlb_assoc_sets)); dt_add_property_cells(cpu, "d-cache-block-size", be32_to_cpu(cache->dcache_block_size)); dt_add_property_cells(cpu, "i-cache-block-size", be32_to_cpu(cache->icache_block_size)); dt_add_property_cells(cpu, "d-cache-size", be32_to_cpu(cache->l1_dcache_size_kb)*1024); dt_add_property_cells(cpu, "i-cache-size", be32_to_cpu(cache->icache_size_kb)*1024); dt_add_property_cells(cpu, "i-cache-sets", be32_to_cpu(cache->icache_assoc_sets)); dt_add_property_cells(cpu, "d-cache-sets", be32_to_cpu(cache->dcache_assoc_sets)); if (cache->icache_line_size != cache->icache_block_size) dt_add_property_cells(cpu, "i-cache-line-size", be32_to_cpu(cache->icache_line_size)); if (cache->l1_dcache_line_size != cache->dcache_block_size) dt_add_property_cells(cpu, "d-cache-line-size", be32_to_cpu(cache->l1_dcache_line_size)); return cpu; }
static void firenze_send_pci_inventory(void) { uint64_t base, abase, end, aend, offset; int64_t rc; if (!fsp_pcie_inv) return; prlog(PR_DEBUG, "PLAT: Sending PCI inventory to FSP, table has" " %d entries\n", fsp_pcie_inv->num_entries); { unsigned int i; prlog(PR_DEBUG, "HWP SLT VDID DVID SVID SDID\n"); prlog(PR_DEBUG, "---------------------------\n"); for (i = 0; i < fsp_pcie_inv->num_entries; i++) { struct fsp_pcie_entry *e = &fsp_pcie_inv->entries[i]; prlog(PR_DEBUG, "%03d %03d %04x %04x %04x %04x\n", e->hw_proc_id, e->slot_idx, e->vendor_id, e->device_id, e->subsys_vendor_id, e->subsys_device_id); } } /* * Get the location of the table in a form we can send * to the FSP */ base = (uint64_t)fsp_pcie_inv; end = base + sizeof(struct fsp_pcie_inventory) + fsp_pcie_inv->num_entries * fsp_pcie_inv->entry_size; abase = base & ~0xffful; aend = (end + 0xffful) & ~0xffful; offset = PSI_DMA_PCIE_INVENTORY + (base & 0xfff); /* We can only accomodate so many entries in the PSI map */ if ((aend - abase) > PSI_DMA_PCIE_INVENTORY_SIZE) { prerror("PLAT: PCIe inventory too large (%lld bytes)\n", aend - abase); goto bail; } /* Map this in the TCEs */ fsp_tce_map(PSI_DMA_PCIE_INVENTORY, (void *)abase, aend - abase); /* Send FSP message */ rc = fsp_sync_msg(fsp_mkmsg(FSP_CMD_PCI_POWER_CONF, 3, hi32(offset), lo32(offset), end - base), true); if (rc) prerror("PLAT: FSP error %lld sending inventory\n", rc); /* Unmap */ fsp_tce_unmap(PSI_DMA_PCIE_INVENTORY, aend - abase); bail: /* * We free the inventory. We'll have to redo that on hotplug * when we support it but that isn't the case yet */ free(fsp_pcie_inv); fsp_pcie_inv = NULL; }