static void ipmi_get_chassis_boot_opt_resp(struct ipmi_msg *msg) { bmc_boot_opt_waiting = false; if (msg->cc != IPMI_CC_NO_ERROR) { prlog(PR_INFO, "IPMI: IPMI_CHASSIS_GET_BOOT_OPT cmd returned error" " [rc : 0x%x]\n", msg->data[0]); ipmi_free_msg(msg); return; } if (msg->resp_size == sizeof(struct ipmi_sys_boot_opt)) { bmc_boot_opt_valid = true; memcpy(ipmi_sys_boot_opt, msg->data, msg->resp_size); } else { prlog(PR_WARNING, "IPMI: IPMI_CHASSIS_GET_BOOT_OPT unexpected response size\n"); } ipmi_free_msg(msg); }
void fsp_epow_init(void) { struct dt_node *np; fsp_register_client(&fsp_epow_client, FSP_MCLASS_SERVICE); opal_register(OPAL_GET_EPOW_STATUS, fsp_opal_get_epow_status, 2); np = dt_new(opal_node, "epow"); dt_add_property_strings(np, "compatible", "ibm,opal-v3-epow"); dt_add_property_strings(np, "epow-classes", "power", "temperature", "cooling"); prlog(PR_TRACE, "FSP EPOW support initialized\n"); }
/* Process FSP sent EPOW based information */ static void epow_process_ex1_event(u8 *epow) { epow_status[OPAL_SYSEPOW_POWER] &= ~OPAL_SYSPOWER_UPS; epow_status[OPAL_SYSEPOW_TEMP] &= ~(OPAL_SYSTEMP_AMB | OPAL_SYSTEMP_INT); if (epow[4] == EPOW_ON_UPS) { prlog(PR_TRACE, "FSP message with EPOW_ON_UPS\n"); epow_status[OPAL_SYSEPOW_POWER] |= OPAL_SYSPOWER_UPS; } if (epow[4] == EPOW_TMP_AMB) { prlog(PR_TRACE, "FSP message with EPOW_TMP_AMB\n"); epow_status[OPAL_SYSEPOW_TEMP] |= OPAL_SYSTEMP_AMB; } if (epow[4] == EPOW_TMP_INT) { prlog(PR_TRACE, "FSP message with EPOW_TMP_INT\n"); epow_status[OPAL_SYSEPOW_TEMP] |= OPAL_SYSTEMP_INT; } }
static void show_all_regs(struct npu2 *npu, int brick_index) { int i, stack, stack_min, stack_max; uint64_t fir_val, mask_val, fir_addr, mask_addr; struct npu2_dev *dev; npu2_scom_dump_t scom_reg; if (brick_index != -1) { stack_min = stack_max = NPU2_STACK_STCK_0 + brick_index / 2; } else { stack_min = NPU2_STACK_STCK_0; stack_max = NPU2_STACK_STCK_2; /* Avoid dumping unused stacks for opencapi on Lagrange */ if (npu->total_devices == 2) stack_min = stack_max = NPU2_STACK_STCK_1; } /* NPU FIRs */ for (i = 0; i < NPU2_TOTAL_FIR_REGISTERS; i++) { fir_addr = NPU2_FIR_REGISTER_0 + i * NPU2_FIR_OFFSET; mask_addr = fir_addr + NPU2_FIR_MASK_OFFSET; xscom_read(npu->chip_id, fir_addr, &fir_val); xscom_read(npu->chip_id, mask_addr, &mask_val); prlog(PR_ERR, "NPU[%d] FIR%d = 0x%016llx (mask 0x%016llx => 0x%016llx)\n", npu->chip_id, i, fir_val, mask_val, fir_val & ~mask_val); } /* NPU global, per-stack registers */ for (i = 0; i < ARRAY_SIZE(npu2_scom_dump_global); i++) { for (stack = stack_min; stack <= stack_max; stack++) print_one_npu_reg(npu, &npu2_scom_dump_global[i], stack); } /* * NPU global registers, stack independent * We have only one for now, so dump it directly */ scom_reg.name = "XTS.REG.ERR_HOLD"; scom_reg.block = NPU2_BLOCK_XTS; scom_reg.offset = 0; print_one_npu_reg(npu, &scom_reg, NPU2_STACK_MISC); /* nvlink- or opencapi-specific registers */ for (i = 0; i < npu->total_devices; i++) { dev = &npu->devices[i]; if (brick_index == -1 || dev->brick_index == brick_index) { if (dev->type == NPU2_DEV_TYPE_NVLINK) show_nvlink_regs(npu, dev->brick_index); else if (dev->type == NPU2_DEV_TYPE_OPENCAPI) show_opencapi_regs(npu, dev->brick_index); } } }
void __noreturn assert_fail(const char *msg) { /** * @fwts-label FailedAssert * @fwts-advice OPAL hit an assert(). During normal usage (even * testing) we should never hit an assert. There are other code * paths for controlled shutdown/panic in the event of catastrophic * errors. */ prlog(PR_EMERG, "Assert fail: %s\n", msg); _abort(msg); }
const char *slca_get_vpd_name(uint16_t slca_index) { const struct slca_entry *s_entry; s_entry = slca_get_entry(slca_index); if (s_entry) return (const char *)s_entry->fru_id; else prlog(PR_NOTICE, "SLCA: Can't find fru_id for index %d\n", slca_index); return NULL; }
const char *slca_get_loc_code_index(uint16_t slca_index) { const struct slca_entry *s_entry; s_entry = slca_get_entry(slca_index); if (s_entry) return s_entry->loc_code; else prlog(PR_NOTICE, "SLCA: Entry %d bad idata\n", slca_index); return NULL; }
static void hmi_print_debug(const uint8_t *msg, uint64_t hmer) { const char *loc; uint32_t core_id, thread_index; core_id = pir_to_core_id(this_cpu()->pir); thread_index = cpu_get_thread_index(this_cpu()); loc = chip_loc_code(this_cpu()->chip_id); if (!loc) loc = "Not Available"; if (hmer & (SPR_HMER_TFAC_ERROR | SPR_HMER_TFMR_PARITY_ERROR)) { prlog(PR_DEBUG, "[Loc: %s]: P:%d C:%d T:%d: TFMR(%016lx) %s\n", loc, this_cpu()->chip_id, core_id, thread_index, mfspr(SPR_TFMR), msg); } else { prlog(PR_DEBUG, "[Loc: %s]: P:%d C:%d T:%d: %s\n", loc, this_cpu()->chip_id, core_id, thread_index, msg); } }
void opal_table_init(void) { struct opal_table_entry *s = __opal_table_start; struct opal_table_entry *e = __opal_table_end; prlog(PR_DEBUG, "OPAL table: %p .. %p, branch table: %p\n", s, e, opal_branch_table); while(s < e) { opal_branch_table[s->token] = function_entry_address(s->func); opal_num_args[s->token] = s->nargs; s++; } }
void skiboot_gcov_done(void) { struct gcov_info *i = gcov_info_list; if (i->filename) printf("GCOV: gcov_info_list looks sane (first file: %s)\n", i->filename); else prlog(PR_WARNING, "GCOV: gcov_info_list doesn't look sane. " "i->filename == NULL."); printf("GCOV: gcov_info_list at 0x%p\n", gcov_info_list); }
static void lxvpd_parse_1004_map(struct phb *phb, const uint8_t *sm, uint8_t size, uint32_t slot_size) { struct lxvpd_pci_slot_data *sdata; struct lxvpd_pci_slot *s; const struct pci_slot_entry_1004 *entry; uint8_t num_slots, slot; num_slots = (size / sizeof(struct pci_slot_entry_1004)); sdata = lxvpd_alloc_slots(phb, num_slots, slot_size); /* Iterate through the entries in the keyword */ entry = (const struct pci_slot_entry_1004 *)sm; for (slot = 0; slot < num_slots; slot++, entry++) { s = sdata->slots + slot * sdata->entry_size; /* Figure out PCI slot info */ lxvpd_format_label(s->label, entry->label, 3); s->slot_index = entry->slot_index; s->switch_id = entry->pba >> 4; s->vswitch_id = entry->pba & 0xf; s->dev_id = entry->sba; s->pluggable = ((entry->p0.byte & 0x20) == 0); s->power_ctl = !!(entry->p0.byte & 0x40); s->bus_clock = entry->p2.bus_clock - 4; s->connector_type = entry->p2.connector_type - 5; s->card_desc = entry->p3.byte >> 6; if (entry->p3.byte < 0xc0) s->card_desc -= 4; s->card_mech = (entry->p3.byte >> 4) & 0x3; s->pwr_led_ctl = (entry->p3.byte & 0xf) >> 2; s->attn_led_ctl = entry->p3.byte & 0x3; switch(entry->p1.wired_lanes) { case 1: s->wired_lanes = PCI_SLOT_WIRED_LANES_PCIX_32; break; case 2: /* fall through */ case 3: s->wired_lanes = PCI_SLOT_WIRED_LANES_PCIX_64; break; case 4: s->wired_lanes = PCI_SLOT_WIRED_LANES_PCIE_X1; break; case 5: s->wired_lanes = PCI_SLOT_WIRED_LANES_PCIE_X4; break; case 6: s->wired_lanes = PCI_SLOT_WIRED_LANES_PCIE_X8; break; case 7: s->wired_lanes = PCI_SLOT_WIRED_LANES_PCIE_X16; break; default: s->wired_lanes = PCI_SLOT_WIRED_LANES_UNKNOWN; } prlog(PR_DEBUG, "1004 Platform data [%s] %02x %02x on PHB%04x\n", s->label, s->switch_id, s->dev_id, phb->opal_id); } }
static int flash_nvram_probe(struct flash *flash, struct ffs_handle *ffs) { uint32_t start, size, part; bool ecc; int rc; prlog(PR_INFO, "FLASH: probing for NVRAM\n"); rc = ffs_lookup_part(ffs, "NVRAM", &part); if (rc) { prlog(PR_WARNING, "FLASH: no NVRAM partition found\n"); return OPAL_HARDWARE; } rc = ffs_part_info(ffs, part, NULL, &start, &size, NULL, &ecc); if (rc) { /** * @fwts-label NVRAMNoPartition * @fwts-advice OPAL could not find an NVRAM partition * on the system flash. Check that the system flash * has a valid partition table, and that the firmware * build process has added a NVRAM partition. */ prlog(PR_ERR, "FLASH: Can't parse ffs info for NVRAM\n"); return OPAL_HARDWARE; } nvram_flash = flash; nvram_offset = start; nvram_size = ecc ? ecc_buffer_size_minus_ecc(size) : size; platform.nvram_info = flash_nvram_info; platform.nvram_start_read = flash_nvram_start_read; platform.nvram_write = flash_nvram_write; return 0; }
/* incoming message handlers */ static int prd_msg_handle_attn_ack(struct opal_prd_msg *msg) { int rc; lock(&ipoll_lock); rc = __ipoll_update_mask(msg->attn_ack.proc, false, msg->attn_ack.ipoll_ack & PRD_IPOLL_MASK); unlock(&ipoll_lock); if (rc) prlog(PR_ERR, "PRD: Unable to unmask ipoll!\n"); return rc; }
/* Entry point for interrupts */ void prd_psi_interrupt(uint32_t proc) { int rc; lock(&events_lock); rc = ipoll_record_and_mask_pending(proc); if (rc) prlog(PR_ERR, "PRD: Failed to update IPOLL mask\n"); __prd_event(proc, EVENT_ATTN); unlock(&events_lock); }
void init_opal_console(void) { assert(!opal_cons_init); opal_cons_init = true; if (dummy_console_enabled() && opal_con_driver != &dummy_opal_con) { prlog(PR_WARNING, "OPAL: Dummy console forced, %s ignored\n", opal_con_driver->name); opal_con_driver = &dummy_opal_con; } prlog(PR_NOTICE, "OPAL: Using %s\n", opal_con_driver->name); if (opal_con_driver->init) opal_con_driver->init(); opal_register(OPAL_CONSOLE_READ, opal_con_driver->read, 3); opal_register(OPAL_CONSOLE_WRITE, opal_con_driver->write, 3); opal_register(OPAL_CONSOLE_FLUSH, opal_con_driver->flush, 1); opal_register(OPAL_CONSOLE_WRITE_BUFFER_SPACE, opal_con_driver->space, 2); }
static void p8_sbe_dump_timer_ffdc(void) { uint64_t i, val; int64_t rc; static const uint32_t dump_regs[] = { 0xe0000, 0xe0001, 0xe0002, 0xe0003, 0xe0004, 0xe0005, 0xe0006, 0xe0007, 0xe0008, 0xe0009, 0xe000a, 0xe000b, 0xe000c, 0xe000d, 0xe000e, 0xe000f, 0xe0010, 0xe0011, 0xe0012, 0xe0013, 0xe0014, 0xe0015, 0xe0016, 0xe0017, 0xe0018, 0xe0019, 0x5001c, 0x50038, 0x50039, 0x5003a, 0x5003b }; /** * @fwts-label SLWRegisterDump * @fwts-advice An error condition occurred in sleep/winkle * engines timer state machine. Dumping debug information to * root-cause. OPAL/skiboot may be stuck on some operation that * requires SLW timer state machine (e.g. core powersaving) */ prlog(PR_DEBUG, "SLW: Register state:\n"); for (i = 0; i < ARRAY_SIZE(dump_regs); i++) { uint32_t reg = dump_regs[i]; rc = xscom_read(sbe_timer_chip, reg, &val); if (rc) { prlog(PR_DEBUG, "SLW: XSCOM error %lld reading" " reg 0x%x\n", rc, reg); break; } prlog(PR_DEBUG, "SLW: %5x = %016llx\n", reg, val); } }
/* * Get System Attention Indicator SLCA entry */ static const struct slca_entry *slca_get_sai_entry(void) { int count; unsigned int i; struct HDIF_common_hdr *slca_hdr; slca_hdr = get_hdif(&spira.ntuples.slca, SLCA_HDIF_SIG); if (!slca_hdr) { prerror("SLCA Invalid\n"); return NULL; } count = HDIF_get_iarray_size(slca_hdr, SLCA_IDATA_ARRAY); if (count < 0) { prerror("SLCA: Can't find SLCA array size!\n"); return NULL; } for (i = 0; i < count; i++) { const struct slca_entry *s_entry; unsigned int entry_sz; s_entry = HDIF_get_iarray_item(slca_hdr, SLCA_IDATA_ARRAY, i, &entry_sz); if (s_entry && VPD_ID(s_entry->fru_id[0], s_entry->fru_id[1]) == SLCA_SAI_INDICATOR_ID) { prlog(PR_TRACE, "SLCA: SAI index: 0x%x\n", s_entry->my_index); prlog(PR_TRACE, "SLCA: SAI location code: %s\n", s_entry->loc_code); return s_entry; } } return NULL; }
/* * Get IPL side */ static void get_ipl_side(void) { struct dt_node *iplp; const char *side = NULL; iplp = dt_find_by_path(dt_root, "ipl-params/ipl-params"); if (iplp) side = dt_prop_get_def(iplp, "cec-ipl-side", NULL); prlog(PR_NOTICE, "CUPD: IPL SIDE = %s\n", side); if (!side || !strcmp(side, "temp")) ipl_side = FW_IPL_SIDE_TEMP; else ipl_side = FW_IPL_SIDE_PERM; }
static bool read_pba_bar(struct proc_chip *chip, unsigned int bar_no, uint64_t *base, uint64_t *size) { uint64_t bar, mask; int rc; rc = xscom_read(chip->id, pba_bar0 + bar_no, &bar); if (rc) { prerror("SLW: Error %d reading PBA BAR%d on chip %d\n", rc, bar_no, chip->id); return false; } rc = xscom_read(chip->id, pba_barmask0 + bar_no, &mask); if (rc) { prerror("SLW: Error %d reading PBA BAR MASK%d on chip %d\n", rc, bar_no, chip->id); return false; } prlog(PR_DEBUG, " PBA BAR%d : 0x%016llx\n", bar_no, bar); prlog(PR_DEBUG, " PBA MASK%d: 0x%016llx\n", bar_no, mask); if (mask == PBA_MASK_ALL_BITS) { /* * This could happen if all HOMER users are not enabled during * early system bringup. Skip using the PBA BAR. */ mask = 0; bar = 0; prerror(" PBA MASK%d uninitalized skipping BAR\n", bar_no); } *base = bar & 0x0ffffffffffffffful; *size = (mask | 0xfffff) + 1; return (*base) != 0; }
long opal_bad_token(uint64_t token) { /** * @fwts-label OPALBadToken * @fwts-advice OPAL was called with a bad token. On POWER8 and * earlier, Linux kernels had a bug where they wouldn't check * if firmware supported particular OPAL calls before making them. * It is, in fact, harmless for these cases. On systems newer than * POWER8, this should never happen and indicates a kernel bug * where OPAL_CHECK_TOKEN isn't being called where it should be. */ prlog(PR_ERR, "OPAL: Called with bad token %lld !\n", token); return OPAL_PARAMETER; }
int wait_for_resource_loaded(enum resource_id id, uint32_t idx) { int r = resource_loaded(id, idx); int waited = 0; while(r == OPAL_BUSY) { opal_run_pollers(); time_wait_ms_nopoll(5); waited+=5; r = resource_loaded(id, idx); } prlog(PR_TRACE, "PLATFORM: wait_for_resource_loaded %x/%x %u ms\n", id, idx, waited); return r; }
void __noreturn _abort(const char *msg) { static bool in_abort = false; if (in_abort) for (;;) ; in_abort = true; prlog(PR_EMERG, "Aborting!\n"); backtrace(); if (platform.terminate) platform.terminate(msg); for (;;) ; }
static bool add_address_range(struct dt_node *root, const struct HDIF_ms_area_id *id, const struct HDIF_ms_area_address_range *arange) { struct dt_node *mem; u64 reg[2]; char *name; u32 chip_id; size_t namesz = sizeof("memory@") + STR_MAX_CHARS(reg[0]); name = (char*)malloc(namesz); prlog(PR_DEBUG, " Range: 0x%016llx..0x%016llx " "on Chip 0x%x mattr: 0x%x\n", (long long)arange->start, (long long)arange->end, pcid_to_chip_id(arange->chip), arange->mirror_attr); /* reg contains start and length */ reg[0] = cleanup_addr(be64_to_cpu(arange->start)); reg[1] = cleanup_addr(be64_to_cpu(arange->end)) - reg[0]; chip_id = pcid_to_chip_id(be32_to_cpu(arange->chip)); if (be16_to_cpu(id->flags) & MS_AREA_SHARED) { /* Only enter shared nodes once. */ mem = find_shared(root, be16_to_cpu(id->share_id), reg[0], reg[1]); if (mem) { append_chip_id(mem, chip_id); return true; } } snprintf(name, namesz, "memory@%llx", (long long)reg[0]); mem = dt_new(root, name); dt_add_property_string(mem, "device_type", "memory"); dt_add_property_cells(mem, "ibm,chip-id", chip_id); dt_add_property_u64s(mem, "reg", reg[0], reg[1]); if (be16_to_cpu(id->flags) & MS_AREA_SHARED) dt_add_property_cells(mem, DT_PRIVATE "share-id", be16_to_cpu(id->share_id)); free(name); return true; }
static bool __memory_parse(struct dt_node *root) { struct HDIF_common_hdr *ms_vpd; const struct msvpd_ms_addr_config *msac; const struct msvpd_total_config_ms *tcms; unsigned int size; ms_vpd = get_hdif(&spira.ntuples.ms_vpd, MSVPD_HDIF_SIG); if (!ms_vpd) { prerror("MS VPD: invalid\n"); op_display(OP_FATAL, OP_MOD_MEM, 0x0000); return false; } if (be32_to_cpu(spira.ntuples.ms_vpd.act_len) < sizeof(*ms_vpd)) { prerror("MS VPD: invalid size %u\n", be32_to_cpu(spira.ntuples.ms_vpd.act_len)); op_display(OP_FATAL, OP_MOD_MEM, 0x0001); return false; } prlog(PR_DEBUG, "MS VPD: is at %p\n", ms_vpd); msac = HDIF_get_idata(ms_vpd, MSVPD_IDATA_MS_ADDR_CONFIG, &size); if (!CHECK_SPPTR(msac) || size < sizeof(*msac)) { prerror("MS VPD: bad msac size %u @ %p\n", size, msac); op_display(OP_FATAL, OP_MOD_MEM, 0x0002); return false; } prlog(PR_DEBUG, "MS VPD: MSAC is at %p\n", msac); dt_add_property_u64(dt_root, DT_PRIVATE "maxmem", be64_to_cpu(msac->max_configured_ms_address)); tcms = HDIF_get_idata(ms_vpd, MSVPD_IDATA_TOTAL_CONFIG_MS, &size); if (!CHECK_SPPTR(tcms) || size < sizeof(*tcms)) { prerror("MS VPD: Bad tcms size %u @ %p\n", size, tcms); op_display(OP_FATAL, OP_MOD_MEM, 0x0003); return false; } prlog(PR_DEBUG, "MS VPD: TCMS is at %p\n", tcms); prlog(PR_DEBUG, "MS VPD: Maximum configured address: 0x%llx\n", (long long)be64_to_cpu(msac->max_configured_ms_address)); prlog(PR_DEBUG, "MS VPD: Maximum possible address: 0x%llx\n", (long long)be64_to_cpu(msac->max_possible_ms_address)); get_msareas(root, ms_vpd); prlog(PR_INFO, "MS VPD: Total MB of RAM: 0x%llx\n", (long long)be64_to_cpu(tcms->total_in_mb)); return true; }
static int populate_ipoll_msg(struct opal_prd_msg *msg, uint32_t proc) { uint64_t ipoll_mask; int rc; lock(&ipoll_lock); rc = xscom_read(proc, PRD_IPOLL_REG_MASK, &ipoll_mask); unlock(&ipoll_lock); if (rc) { prlog(PR_ERR, "PRD: Unable to read ipoll status (chip %d)!\n", proc); return -1; } msg->attn.proc = proc; msg->attn.ipoll_status = ipoll_status[proc]; msg->attn.ipoll_mask = ipoll_mask; return 0; }
int ipmi_chassis_control(uint8_t request) { struct ipmi_msg *msg; if (!ipmi_present()) return OPAL_CLOSED; if (request > IPMI_CHASSIS_SOFT_SHUTDOWN) return OPAL_PARAMETER; msg = ipmi_mkmsg_simple(IPMI_CHASSIS_CONTROL, &request, sizeof(request)); if (!msg) return OPAL_HARDWARE; prlog(PR_INFO, "IPMI: sending chassis control request 0x%02x\n", request); return ipmi_queue_msg(msg); }
static int nx_cfg_umac_status_ctrl(u32 gcid, u64 xcfg) { u64 uctrl; int rc; #define CRB_ENABLE 1 rc = xscom_read(gcid, xcfg, &uctrl); if (rc) return rc; uctrl = SETFIELD(NX_P9_UMAC_STATUS_CTRL_CRB_ENABLE, uctrl, CRB_ENABLE); rc = xscom_write(gcid, xcfg, uctrl); if (rc) prerror("NX%d: ERROR: Setting UMAC Status Control failure %d\n", gcid, rc); else prlog(PR_DEBUG, "NX%d: Setting UMAC Status Control 0x%016lx\n", gcid, (unsigned long)uctrl); return rc; }
static void lxvpd_parse_1005_map(struct phb *phb, const uint8_t *sm, uint8_t size, uint32_t slot_size) { struct lxvpd_pci_slot_data *sdata; struct lxvpd_pci_slot *s; const struct pci_slot_entry_1005 *entry; uint8_t num_slots, slot; num_slots = (size / sizeof(struct pci_slot_entry_1005)); sdata = lxvpd_alloc_slots(phb, num_slots, slot_size); /* Iterate through the entries in the keyword */ entry = (const struct pci_slot_entry_1005 *)sm; for (slot = 0; slot < num_slots; slot++, entry++) { s = sdata->slots + slot * sdata->entry_size; /* Put slot info into pci device structure */ lxvpd_format_label(s->label, entry->label, 8); s->slot_index = entry->slot_index; s->switch_id = entry->pba >> 4; s->vswitch_id = entry->pba & 0xf; s->dev_id = entry->switch_device_id; s->pluggable = (entry->p0.pluggable == 0); s->power_ctl = entry->p0.power_ctl; s->bus_clock = entry->p2.bus_clock; s->connector_type = entry->p2.connector_type; s->card_desc = entry->p3.byte >> 6; s->card_mech = (entry->p3.byte >> 4) & 0x3; s->pwr_led_ctl = (entry->p3.byte & 0xf) >> 2; s->attn_led_ctl = entry->p3.byte & 0x3; s->wired_lanes = entry->p1.wired_lanes; if (s->wired_lanes > PCI_SLOT_WIRED_LANES_PCIE_X32) s->wired_lanes = PCI_SLOT_WIRED_LANES_UNKNOWN; prlog(PR_DEBUG, "1005 Platform data [%s] %02x %02x on PHB%04x\n", s->label, s->switch_id, s->dev_id, phb->opal_id); } }
void nx_create_compress_node(struct dt_node *node) { u32 gcid, pb_base; int rc; gcid = dt_get_chip_id(node); pb_base = dt_get_address(node, 0, NULL); prlog(PR_INFO, "NX%d: 842 at 0x%x\n", gcid, pb_base); if (dt_node_is_compatible(node, "ibm,power9-nx")) { u64 cfg_mmio, cfg_txwc, cfg_uctrl, cfg_dma; printf("Found ibm,power9-nx\n"); cfg_mmio = pb_base + NX_P9_UMAC_VAS_MMIO_BAR; cfg_dma = pb_base + NX_P9_DMA_VAS_MMIO_BAR; cfg_txwc = pb_base + NX_P9_UMAC_TX_WINDOW_CONTEXT_BAR; cfg_uctrl = pb_base + NX_P9_UMAC_STATUS_CTRL; rc = nx_cfg_umac_vas_mmio(gcid, cfg_mmio); if (rc) return; rc = nx_cfg_dma_vas_mmio(gcid, cfg_dma); if (rc) return; rc = nx_cfg_umac_tx_wc(gcid, cfg_txwc); if (rc) return; rc = nx_cfg_umac_status_ctrl(gcid, cfg_uctrl); if (rc) return; p9_nx_enable_842(node, gcid, pb_base); p9_nx_enable_gzip(node, gcid, pb_base); } else nx_enable_842(node, gcid, pb_base); }
static int nx_cfg_umac_vas_mmio(u32 gcid, u64 xcfg) { int rc = 0; u64 cfg; cfg = vas_get_hvwc_mmio_bar(gcid); /* * NOTE: Write the entire bar address to SCOM. VAS/NX will extract * the relevant (NX_P9_UMAC_VAS_MMIO_ADDR) bits. IOW, _don't_ * just write the bit field like: * * cfg = SETFIELD(NX_P9_UMAC_VAS_MMIO_ADDR, 0ULL, cfg); */ rc = xscom_write(gcid, xcfg, cfg); if (rc) prerror("NX%d: ERROR: UMAC VAS MMIO BAR, %d\n", gcid, rc); else prlog(PR_DEBUG, "NX%d: UMAC VAS MMIO BAR, 0x%016lx, " "xcfg 0x%llx\n", gcid, (unsigned long)cfg, xcfg); return rc; }