static void map_debug_areas(void) { uint64_t t, i; /* Our memcons is in a section of its own and already * aligned to 4K. The buffers are mapped as a whole */ fsp_tce_map(PSI_DMA_MEMCONS, &memcons, 0x1000); fsp_tce_map(PSI_DMA_LOG_BUF, (void*)INMEM_CON_START, INMEM_CON_LEN); debug_descriptor.memcons_tce = PSI_DMA_MEMCONS; t = memcons.obuf_phys - INMEM_CON_START + PSI_DMA_LOG_BUF; debug_descriptor.memcons_obuf_tce = t; t = memcons.ibuf_phys - INMEM_CON_START + PSI_DMA_LOG_BUF; debug_descriptor.memcons_ibuf_tce = t; /* We only have space in the TCE table for the trace * areas on P8 */ if (proc_gen != proc_gen_p8) return; t = PSI_DMA_TRACE_BASE; for (i = 0; i < debug_descriptor.num_traces; i++) { /* * Trace buffers are misaligned by 0x10 due to the lock * in the trace structure, and their size is also not * completely aligned. (They are allocated so that with * the lock included, they do cover entire multiple of * a 4K page however). * * This means we have to map the lock into the TCEs and * align everything. Not a huge deal but needs to be * taken into account. * * Note: Maybe we should map them read-only... */ uint64_t tstart, tend, toff, tsize; tstart = ALIGN_DOWN(debug_descriptor.trace_phys[i], 0x1000); tend = ALIGN_UP(debug_descriptor.trace_phys[i] + debug_descriptor.trace_size[i], 0x1000); toff = debug_descriptor.trace_phys[i] - tstart; tsize = tend - tstart; fsp_tce_map(t, (void *)tstart, tsize); debug_descriptor.trace_tce[i] = t + toff; t += tsize; } }
static inline void code_update_tce_map(uint32_t tce_offset, void *buffer, uint32_t size) { uint32_t tlen = ALIGN_UP(size, TCE_PSIZE); fsp_tce_map(PSI_DMA_CODE_UPD + tce_offset, buffer, tlen); }
static int dump_region_tce_map(void) { int i; uint32_t t_size = 0, size; uint64_t addr; for (i = 0; i < cur_mdst_entry; i++) { addr = dump_mem_region[i].addr & ~TCE_MASK; size = get_dump_region_map_size(dump_mem_region[i].addr, dump_mem_region[i].size); if (t_size + size > max_dump_size) break; /* TCE mapping */ fsp_tce_map(PSI_DMA_HYP_DUMP + t_size, (void *)addr, size); /* Add entry to MDST table */ mdst_table[i].type = dump_mem_region[i].type; mdst_table[i].size = dump_mem_region[i].size; mdst_table[i].addr = PSI_DMA_HYP_DUMP + t_size; /* TCE alignment adjustment */ mdst_table[i].addr += dump_mem_region[i].addr & 0xfff; t_size += size; } return i; }
static void firenze_send_pci_inventory(void) { uint64_t base, abase, end, aend, offset; int64_t rc; if (!fsp_pcie_inv) return; prlog(PR_DEBUG, "PLAT: Sending PCI inventory to FSP, table has" " %d entries\n", fsp_pcie_inv->num_entries); { unsigned int i; prlog(PR_DEBUG, "HWP SLT VDID DVID SVID SDID\n"); prlog(PR_DEBUG, "---------------------------\n"); for (i = 0; i < fsp_pcie_inv->num_entries; i++) { struct fsp_pcie_entry *e = &fsp_pcie_inv->entries[i]; prlog(PR_DEBUG, "%03d %03d %04x %04x %04x %04x\n", e->hw_proc_id, e->slot_idx, e->vendor_id, e->device_id, e->subsys_vendor_id, e->subsys_device_id); } } /* * Get the location of the table in a form we can send * to the FSP */ base = (uint64_t)fsp_pcie_inv; end = base + sizeof(struct fsp_pcie_inventory) + fsp_pcie_inv->num_entries * fsp_pcie_inv->entry_size; abase = base & ~0xffful; aend = (end + 0xffful) & ~0xffful; offset = PSI_DMA_PCIE_INVENTORY + (base & 0xfff); /* We can only accomodate so many entries in the PSI map */ if ((aend - abase) > PSI_DMA_PCIE_INVENTORY_SIZE) { prerror("PLAT: PCIe inventory too large (%lld bytes)\n", aend - abase); goto bail; } /* Map this in the TCEs */ fsp_tce_map(PSI_DMA_PCIE_INVENTORY, (void *)abase, aend - abase); /* Send FSP message */ rc = fsp_sync_msg(fsp_mkmsg(FSP_CMD_PCI_POWER_CONF, 3, hi32(offset), lo32(offset), end - base), true); if (rc) prerror("PLAT: FSP error %lld sending inventory\n", rc); /* Unmap */ fsp_tce_unmap(PSI_DMA_PCIE_INVENTORY, aend - abase); bail: /* * We free the inventory. We'll have to redo that on hotplug * when we support it but that isn't the case yet */ free(fsp_pcie_inv); fsp_pcie_inv = NULL; }