static int64_t opb_read(struct proc_chip *chip, uint32_t addr, uint32_t *data, uint32_t sz) { uint64_t ctl = ECCB_CTL_MAGIC | ECCB_CTL_READ, stat; int64_t rc, tout; if (sz != 1 && sz != 2 && sz != 4) { prerror("LPC: Invalid data size %d\n", sz); return OPAL_PARAMETER; } ctl = SETFIELD(ECCB_CTL_DATASZ, ctl, sz); ctl = SETFIELD(ECCB_CTL_ADDRLEN, ctl, ECCB_ADDRLEN_4B); ctl = SETFIELD(ECCB_CTL_ADDR, ctl, addr); rc = xscom_write(chip->id, chip->lpc_xbase + ECCB_CTL, ctl); if (rc) { log_simple_error(&e_info(OPAL_RC_LPC_READ), "LPC: XSCOM write to ECCB CTL error %lld\n", rc); return rc; } for (tout = 0; tout < ECCB_TIMEOUT; tout++) { rc = xscom_read(chip->id, chip->lpc_xbase + ECCB_STAT, &stat); if (rc) { log_simple_error(&e_info(OPAL_RC_LPC_READ), "LPC: XSCOM read from ECCB STAT err %lld\n", rc); return rc; } if (stat & ECCB_STAT_OP_DONE) { uint32_t rdata = GETFIELD(ECCB_STAT_RD_DATA, stat); if (stat & ECCB_STAT_ERR_MASK) { log_simple_error(&e_info(OPAL_RC_LPC_READ), "LPC: Error status: 0x%llx\n", stat); return OPAL_HARDWARE; } switch(sz) { case 1: *data = rdata >> 24; break; case 2: *data = rdata >> 16; break; default: *data = rdata; break; } return 0; } time_wait(100); }
void log_simple_error(struct opal_err_info *e_info, const char *fmt, ...) { struct errorlog *buf; int tag = 0x44455343; /* ASCII of DESC */ va_list list; char err_msg[250]; va_start(list, fmt); vsnprintf(err_msg, sizeof(err_msg), fmt, list); va_end(list); /* Log the error on to Sapphire console */ prerror("%s", err_msg); buf = opal_elog_create(e_info); if (buf == NULL) prerror("ELOG: Error getting buffer to log error\n"); else { opal_elog_update_user_dump(buf, err_msg, tag, strlen(err_msg)); if (platform.elog_commit(buf)) prerror("ELOG: Re-try error logging\n"); } }
static void listreplace (Pedge_t *oldep, Pedge_t *newep) { elist_t *lp; for (lp = elist; lp; lp = lp->next) { if (lp->ep != oldep) continue; lp->ep = newep; return; } if (!lp) { prerror ("cannot find list element to replace"); abort (); } }
static void trace_add_desc(struct trace_info *t, uint64_t size) { unsigned int i = debug_descriptor.num_traces; if (i >= DEBUG_DESC_MAX_TRACES) { prerror("TRACE: Debug descriptor trace list full !\n"); return; } debug_descriptor.num_traces++; debug_descriptor.trace_phys[i] = (uint64_t)&t->tb; debug_descriptor.trace_tce[i] = 0; /* populated later */ debug_descriptor.trace_size[i] = size; }
static int nx_cfg_dma_vas_mmio(u32 gcid, u64 xcfg) { int rc = 0; u64 cfg; cfg = vas_get_hvwc_mmio_bar(gcid); /* * NOTE: Write the entire bar address to SCOM. VAS/NX will extract * the relevant (NX_P9_UMAC_VAS_MMIO_ADDR) bits. IOW, _don't_ * just write the bit field like: * * cfg = SETFIELD(NX_P9_DMA_VAS_MMIO_ADDR, 0ULL, cfg); */ rc = xscom_write(gcid, xcfg, cfg); if (rc) prerror("NX%d: ERROR: DMA VAS MMIO BAR, %d\n", gcid, rc); else prerror("NX%d: DMA VAS MMIO BAR, 0x%016lx, xcfg 0x%llx\n", gcid, (unsigned long)cfg, xcfg); return rc; }
/* * Get System Attention Indicator SLCA entry */ static const struct slca_entry *slca_get_sai_entry(void) { int count; unsigned int i; struct HDIF_common_hdr *slca_hdr; slca_hdr = get_hdif(&spira.ntuples.slca, SLCA_HDIF_SIG); if (!slca_hdr) { prerror("SLCA Invalid\n"); return NULL; } count = HDIF_get_iarray_size(slca_hdr, SLCA_IDATA_ARRAY); if (count < 0) { prerror("SLCA: Can't find SLCA array size!\n"); return NULL; } for (i = 0; i < count; i++) { const struct slca_entry *s_entry; unsigned int entry_sz; s_entry = HDIF_get_iarray_item(slca_hdr, SLCA_IDATA_ARRAY, i, &entry_sz); if (s_entry && VPD_ID(s_entry->fru_id[0], s_entry->fru_id[1]) == SLCA_SAI_INDICATOR_ID) { prlog(PR_TRACE, "SLCA: SAI index: 0x%x\n", s_entry->my_index); prlog(PR_TRACE, "SLCA: SAI location code: %s\n", s_entry->loc_code); return s_entry; } } return NULL; }
static bool read_pba_bar(struct proc_chip *chip, unsigned int bar_no, uint64_t *base, uint64_t *size) { uint64_t bar, mask; int rc; rc = xscom_read(chip->id, pba_bar0 + bar_no, &bar); if (rc) { prerror("SLW: Error %d reading PBA BAR%d on chip %d\n", rc, bar_no, chip->id); return false; } rc = xscom_read(chip->id, pba_barmask0 + bar_no, &mask); if (rc) { prerror("SLW: Error %d reading PBA BAR MASK%d on chip %d\n", rc, bar_no, chip->id); return false; } prlog(PR_DEBUG, " PBA BAR%d : 0x%016llx\n", bar_no, bar); prlog(PR_DEBUG, " PBA MASK%d: 0x%016llx\n", bar_no, mask); if (mask == PBA_MASK_ALL_BITS) { /* * This could happen if all HOMER users are not enabled during * early system bringup. Skip using the PBA BAR. */ mask = 0; bar = 0; prerror(" PBA MASK%d uninitalized skipping BAR\n", bar_no); } *base = bar & 0x0ffffffffffffffful; *size = (mask | 0xfffff) + 1; return (*base) != 0; }
static struct dt_node *new_node(const char *name) { struct dt_node *node = malloc(sizeof *node); if (!node) { prerror("Failed to allocate node\n"); abort(); } node->name = take_name(name); node->parent = NULL; list_head_init(&node->properties); list_head_init(&node->children); /* FIXME: locking? */ node->phandle = new_phandle(); return node; }
static bool send_response_to_fsp(u32 cmd_sub_mod) { struct fsp_msg *rsp; int rc = -ENOMEM; rsp = fsp_mkmsg(cmd_sub_mod, 0); if (rsp) rc = fsp_queue_msg(rsp, fsp_freemsg); if (rc) { fsp_freemsg(rsp); /* XXX Generate error logs */ prerror("Error %d queueing FSP memory error reply\n", rc); return false; } return true; }
static int64_t opb_mmio_read(struct lpcm *lpc, uint32_t addr, uint32_t *data, uint32_t sz) { switch (sz) { case 1: *data = in_8(lpc->mbase + addr); return OPAL_SUCCESS; case 2: *data = in_be16(lpc->mbase + addr); return OPAL_SUCCESS; case 4: *data = in_be32(lpc->mbase + addr); return OPAL_SUCCESS; } prerror("Invalid data size %d\n", sz); return OPAL_PARAMETER; }
static int64_t opb_mmio_write(struct lpcm *lpc, uint32_t addr, uint32_t data, uint32_t sz) { switch (sz) { case 1: out_8(lpc->mbase + addr, data); return OPAL_SUCCESS; case 2: out_be16(lpc->mbase + addr, data); return OPAL_SUCCESS; case 4: out_be32(lpc->mbase + addr, data); return OPAL_SUCCESS; } prerror("Invalid data size %d\n", sz); return OPAL_PARAMETER; }
/* Allocate trace buffers once we know memory topology */ void init_trace_buffers(void) { struct cpu_thread *t; struct trace_info *any = &boot_tracebuf.trace_info; uint64_t size; /* Boot the boot trace in the debug descriptor */ trace_add_desc(any, sizeof(boot_tracebuf.buf)); /* Allocate a trace buffer for each primary cpu. */ for_each_cpu(t) { if (t->is_secondary) continue; /* Use a 4K alignment for TCE mapping */ size = ALIGN_UP(sizeof(*t->trace) + tracebuf_extra(), 0x1000); t->trace = local_alloc(t->chip_id, size, 0x1000); if (t->trace) { any = t->trace; memset(t->trace, 0, size); init_lock(&t->trace->lock); t->trace->tb.mask = cpu_to_be64(TBUF_SZ - 1); t->trace->tb.max_size = cpu_to_be32(MAX_SIZE); trace_add_desc(any, sizeof(t->trace->tb) + tracebuf_extra()); } else prerror("TRACE: cpu 0x%x allocation failed\n", t->pir); } /* In case any allocations failed, share trace buffers. */ for_each_cpu(t) { if (!t->is_secondary && !t->trace) t->trace = any; } /* And copy those to the secondaries. */ for_each_cpu(t) { if (!t->is_secondary) continue; t->trace = t->primary->trace; } /* Trace node in DT. */ trace_add_dt_props(); }
static void decode_malfunction(struct OpalHMIEvent *hmi_evt, uint64_t *out_flags) { int i; uint64_t malf_alert, flags; flags = 0; if (!setup_scom_addresses()) { prerror("Failed to setup scom addresses\n"); /* Send an unknown HMI event. */ hmi_evt->u.xstop_error.xstop_type = CHECKSTOP_TYPE_UNKNOWN; hmi_evt->u.xstop_error.xstop_reason = 0; queue_hmi_event(hmi_evt, false, out_flags); return; } xscom_read(this_cpu()->chip_id, malf_alert_scom, &malf_alert); if (!malf_alert) return; for (i = 0; i < 64; i++) { if (malf_alert & PPC_BIT(i)) { xscom_write(this_cpu()->chip_id, malf_alert_scom, ~PPC_BIT(i)); find_capp_checkstop_reason(i, hmi_evt, &flags); find_nx_checkstop_reason(i, hmi_evt, &flags); find_npu_checkstop_reason(i, hmi_evt, &flags); } } find_core_checkstop_reason(hmi_evt, &flags); /* * If we fail to find checkstop reason, send an unknown HMI event. */ if (!(flags & OPAL_HMI_FLAGS_NEW_EVENT)) { hmi_evt->u.xstop_error.xstop_type = CHECKSTOP_TYPE_UNKNOWN; hmi_evt->u.xstop_error.xstop_reason = 0; queue_hmi_event(hmi_evt, false, &flags); } *out_flags |= flags; }
/* * Check that the nvram partition layout is sane and that it * contains our required partitions. If not, we re-format the * lot of it */ int nvram_check(void *nvram_image, const uint32_t nvram_size) { unsigned int offset = 0; bool found_common = false; bool found_skiboot = false; while (offset + sizeof(struct chrp_nvram_hdr) < nvram_size) { struct chrp_nvram_hdr *h = nvram_image + offset; if (chrp_nv_cksum(h) != h->cksum) { prerror("NVRAM: Partition at offset 0x%x" " has bad checksum\n", offset); goto failed; } if (h->len < 1) { prerror("NVRAM: Partition at offset 0x%x" " has incorrect 0 length\n", offset); goto failed; } if (h->sig == NVRAM_SIG_SYSTEM && strcmp(h->name, NVRAM_NAME_COMMON) == 0) found_common = true; if (h->sig == NVRAM_SIG_FW_PRIV && strcmp(h->name, NVRAM_NAME_FW_PRIV) == 0) found_skiboot = true; offset += h->len << 4; if (offset > nvram_size) { prerror("NVRAM: Partition at offset 0x%x" " extends beyond end of nvram !\n", offset); goto failed; } } if (!found_common) { prerror("NVRAM: Common partition not found !\n"); goto failed; } if (!found_skiboot) { prerror("NVRAM: Skiboot private partition " "not found !\n"); goto failed; } prerror("NVRAM: Layout appears sane\n"); return 0; failed: return -1; }
static void set_wdt(uint8_t action, uint16_t count, uint8_t pretimeout) { struct ipmi_msg *ipmi_msg; ipmi_msg = ipmi_mkmsg(IPMI_DEFAULT_INTERFACE, IPMI_SET_WDT, ipmi_wdt_complete, NULL, NULL, 6, 0); if (!ipmi_msg) { prerror("Unable to allocate set wdt message\n"); return; } ipmi_msg->error = ipmi_wdt_complete; ipmi_msg->data[0] = TIMER_USE_POST | TIMER_USE_DONT_LOG; /* Timer Use */ ipmi_msg->data[1] = action; /* Timer Actions */ ipmi_msg->data[2] = pretimeout; /* Pre-timeout Interval */ ipmi_msg->data[3] = 0; /* Timer Use Flags */ ipmi_msg->data[4] = count & 0xff; /* Initial countdown (lsb) */ ipmi_msg->data[5] = (count >> 8) & 0xff; /* Initial countdown (msb) */ ipmi_queue_msg(ipmi_msg); }
static void listdelete (Pedge_t *ep) { elist_t *lp; for (lp = elist; lp; lp = lp->next) { if (lp->ep != ep) continue; if (lp->prev) lp->prev->next = lp->next; if (lp->next) lp->next->prev = lp->prev; if (elist == lp) elist = lp->next; free (lp); return; } if (!lp) { prerror ("cannot find list element to delete"); abort (); } }
static const struct slot_table_entry *match_slot_phb_entry(struct phb *phb) { uint32_t chip_id = dt_get_chip_id(phb->dt_node); uint32_t phb_idx = dt_prop_get_u32_def(phb->dt_node, "ibm,phb-index", 0); const struct slot_table_entry *ent; if (!slot_top_table) return NULL; for (ent = slot_top_table; ent->etype != st_end; ent++) { if (ent->etype != st_phb) { prerror("SLOT: Bad DEV entry type in table !\n"); continue; } if (ent->location == ST_LOC_PHB(chip_id, phb_idx)) return ent; } return NULL; }
static int nx_cfg_umac_status_ctrl(u32 gcid, u64 xcfg) { u64 uctrl; int rc; #define CRB_ENABLE 1 rc = xscom_read(gcid, xcfg, &uctrl); if (rc) return rc; uctrl = SETFIELD(NX_P9_UMAC_STATUS_CTRL_CRB_ENABLE, uctrl, CRB_ENABLE); rc = xscom_write(gcid, xcfg, uctrl); if (rc) prerror("NX%d: ERROR: Setting UMAC Status Control failure %d\n", gcid, rc); else prlog(PR_DEBUG, "NX%d: Setting UMAC Status Control 0x%016lx\n", gcid, (unsigned long)uctrl); return rc; }
static int setup_scom_addresses(void) { switch (proc_gen) { case proc_gen_p8: malf_alert_scom = P8_MALFUNC_ALERT; nx_status_reg = P8_NX_STATUS_REG; nx_dma_engine_fir = P8_NX_DMA_ENGINE_FIR; nx_pbi_fir = P8_NX_PBI_FIR; return 1; case proc_gen_p9: malf_alert_scom = P9_MALFUNC_ALERT; nx_status_reg = P9_NX_STATUS_REG; nx_dma_engine_fir = P9_NX_DMA_ENGINE_FIR; nx_pbi_fir = P9_NX_PBI_FIR; return 1; default: prerror("%s: Unknown CPU type\n", __func__); break; } return 0; }
int nvram_format(void *nvram_image, uint32_t nvram_size) { struct chrp_nvram_hdr *h; unsigned int offset = 0; prerror("NVRAM: Re-initializing\n"); memset(nvram_image, 0, nvram_size); /* Create private partition */ if (nvram_size - offset < NVRAM_SIZE_FW_PRIV) return -1; h = nvram_image + offset; h->sig = NVRAM_SIG_FW_PRIV; h->len = NVRAM_SIZE_FW_PRIV >> 4; strcpy(h->name, NVRAM_NAME_FW_PRIV); h->cksum = chrp_nv_cksum(h); offset += NVRAM_SIZE_FW_PRIV; /* Create common partition */ if (nvram_size - offset < NVRAM_SIZE_COMMON) return -1; h = nvram_image + offset; h->sig = NVRAM_SIG_SYSTEM; h->len = NVRAM_SIZE_COMMON >> 4; strcpy(h->name, NVRAM_NAME_COMMON); h->cksum = chrp_nv_cksum(h); offset += NVRAM_SIZE_COMMON; /* Create free space partition */ if (nvram_size - offset < sizeof(struct chrp_nvram_hdr)) return -1; h = nvram_image + offset; h->sig = NVRAM_SIG_FREE; h->len = (nvram_size - offset) >> 4; /* We have the full 12 bytes here */ memcpy(h->name, NVRAM_NAME_FREE, 12); h->cksum = chrp_nv_cksum(h); return 0; }
struct cpu_job *__cpu_queue_job(struct cpu_thread *cpu, const char *name, void (*func)(void *data), void *data, bool no_return) { struct cpu_job *job; if (cpu && !cpu_is_available(cpu)) { prerror("CPU: Tried to queue job on unavailable CPU 0x%04x\n", cpu->pir); return NULL; } job = zalloc(sizeof(struct cpu_job)); if (!job) return NULL; job->func = func; job->data = data; job->name = name; job->complete = false; job->no_return = no_return; if (cpu == NULL) { lock(&global_job_queue_lock); list_add_tail(&global_job_queue, &job->link); unlock(&global_job_queue_lock); } else if (cpu != this_cpu()) { lock(&cpu->job_lock); list_add_tail(&cpu->job_queue, &job->link); unlock(&cpu->job_lock); } else { func(data); job->complete = true; } /* XXX Add poking of CPU with interrupt */ return job; }
void vpd_iohub_load(struct dt_node *hub_node) { void *vpd; size_t sz; const uint32_t *p; unsigned int lx_idx; const char *lxr; p = dt_prop_get_def(hub_node, "ibm,vpd-lx-info", NULL); if (!p) return; lx_idx = p[0]; lxr = (const char *)&p[1]; vpd = vpd_lid_load(lxr, lx_idx, &sz); if (!vpd) { prerror("VPD: Failed to load VPD LID\n"); } else { dt_add_property(hub_node, "ibm,io-vpd", vpd, sz); free(vpd); } }
static int flash_nvram_start_read(void *dst, uint32_t src, uint32_t len) { int rc; if (!try_lock(&flash_lock)) return OPAL_BUSY; if (!nvram_flash) { rc = OPAL_HARDWARE; goto out; } if (nvram_flash->busy) { rc = OPAL_BUSY; goto out; } if ((src + len) > nvram_size) { prerror("FLASH_NVRAM: read out of bound (0x%x,0x%x)\n", src, len); rc = OPAL_PARAMETER; goto out; } nvram_flash->busy = true; unlock(&flash_lock); rc = blocklevel_read(nvram_flash->bl, nvram_offset + src, dst, len); lock(&flash_lock); nvram_flash->busy = false; out: unlock(&flash_lock); if (!rc) nvram_read_complete(true); return rc; }
int parse_i2c_devs(const struct HDIF_common_hdr *hdr, int idata_index, struct dt_node *xscom) { struct dt_node *i2cm, *bus, *node; const struct hdat_i2c_type *type; const struct hdat_i2c_info *info; const struct i2c_dev *dev; const char *name, *compat; const struct host_i2c_hdr *ahdr; uint32_t dev_addr; uint32_t version; uint32_t size; uint32_t purpose; int i, count; /* * This code makes a few assumptions about XSCOM addrs, etc * and will need updating for new processors */ assert(proc_gen == proc_gen_p9); /* * Emit an error if we get a newer version. This is an interim measure * until the new version format is finalised. */ ahdr = HDIF_get_idata(hdr, idata_index, &size); if (!ahdr || !size) return -1; /* * Some hostboots don't correctly fill the version field. On these * the offset from the start of the header to the start of the array * is 16 bytes. */ if (be32_to_cpu(ahdr->hdr.offset) == 16) { version = 1; prerror("I2C: HDAT device array has no version! Assuming v1\n"); } else { version = be32_to_cpu(ahdr->version); } if (version == 2) { prlog(PR_INFO, "I2C: v%d found, but not supported. Parsing as v1\n", version); } else if (version > 2) { prerror("I2C: v%d found, but not supported! THIS IS A BUG\n", version); return -1; } count = HDIF_get_iarray_size(hdr, idata_index); for (i = 0; i < count; i++) { dev = HDIF_get_iarray_item(hdr, idata_index, i, &size); /* * XXX: Some broken hostboots populate i2c devs with zeros. * Workaround them for now. */ if (is_zeros(dev, size)) { prerror("I2C: Ignoring broken i2c dev %d\n", i); continue; } /* * On some systems the CFAM I2C master is represented in the * host I2C table as engine 6. There are only 4 (0, 1, 2, 3) * engines accessible to the host via XSCOM so filter out * engines outside this range so we don't create bogus * i2cm@<addr> nodes. */ if (dev->i2cm_engine >= 4 && proc_gen == proc_gen_p9) continue; i2cm = get_i2cm_node(xscom, dev->i2cm_engine); bus = get_bus_node(i2cm, dev->i2cm_port, be16_to_cpu(dev->i2c_bus_freq)); /* * Looks like hostboot gives the address as an 8 bit, left * justified quantity (i.e it includes the R/W bit). So we need * to strip it off to get an address linux can use. */ dev_addr = dev->dev_addr >> 1; purpose = be32_to_cpu(dev->purpose); type = map_type(dev->type); info = get_info(purpose); /* HACK: Hostboot doesn't export the correct type information * for the DIMM SPD EEPROMs. This is a problem because SPD * EEPROMs have a different wire protocol to the atmel,24XXXX * series. The main difference being that SPD EEPROMs have an * 8bit offset rather than a 16bit offset. This means that the * driver will send 2 bytes when doing a random read, * potentially overwriting part of the SPD information. * * Just to make things interested the FSP also gets the device * type wrong. To work around both just set the device-type to * "spd" for anything in the 0x50 to 0x57 range since that's the * SPD eeprom range. * * XXX: Future chips might not use engine 3 for the DIMM buses. */ if (dev->i2cm_engine == 3 && dev_addr >= 0x50 && dev_addr < 0x58) { compat = "spd"; name = "eeprom"; } else if (type) { compat = type->compat; name = type->name; } else { name = "unknown"; compat = NULL; } /* * An i2c device is unknown if either the i2c device list is * outdated or the device is marked as unknown (0xFF) in the * hdat. Log both cases to see what/where/why. */ if (!type || dev->type == 0xFF) { prlog(PR_NOTICE, "HDAT I2C: found e%dp%d - %s@%x dp:%02x (%#x:%s)\n", dev->i2cm_engine, dev->i2cm_port, name, dev_addr, dev->dev_port, purpose, info->label); continue; } prlog(PR_DEBUG, "HDAT I2C: found e%dp%d - %s@%x dp:%02x (%#x:%s)\n", dev->i2cm_engine, dev->i2cm_port, name, dev_addr, dev->dev_port, purpose, info->label); /* * Multi-port device require special handling since we need to * generate the device-specific DT bindings. For now we're just * going to ignore them since these devices are owned by FW * any way. */ if (dev->dev_port != 0xff) continue; node = dt_new_addr(bus, name, dev_addr); if (!node) continue; dt_add_property_cells(node, "reg", dev_addr); dt_add_property_cells(node, "link-id", be32_to_cpu(dev->i2c_link)); if (compat) dt_add_property_string(node, "compatible", compat); if (info->label) dt_add_property_string(node, "label", info->label); if (!info->whitelist) dt_add_property_string(node, "status", "reserved"); /* * Set a default timeout of 2s on the ports with a TPM. This is * to work around a bug with certain TPM firmwares that can * clock stretch for long periods of time and will lock up * until they are power cycled if a STOP condition is sent * during this period. */ if (dev->type == 0x3) dt_add_property_cells(bus, "timeout-ms", 2000); /* XXX: SLCA index? */ } return 0; }
/* Present a g_error message to the user */ void os_show_error(g_error e) { prerror(e); }
void internal_error(const char *message) { prerror("Internal programming error: %s\n", message); exit(1); }
/* * load a resource from FLASH * buf and len shouldn't account for ECC even if partition is ECCed. * * The API here is a bit strange. * If resource has a STB container, buf will contain it * If loading subpartition with STB container, buff will *NOT* contain it * For trusted boot, the whole partition containing the subpart is measured. * * Additionally, the logic to work out how much to read from flash is insane. */ static int flash_load_resource(enum resource_id id, uint32_t subid, void *buf, size_t *len) { int i; int rc = OPAL_RESOURCE; struct ffs_handle *ffs; struct flash *flash; const char *name; bool status = false; bool ecc; bool part_signed = false; void *bufp = buf; size_t bufsz = *len; int ffs_part_num, ffs_part_start, ffs_part_size; int content_size = 0; int offset = 0; lock(&flash_lock); if (!system_flash) { /** * @fwts-label SystemFlashNotFound * @fwts-advice No system flash was found. Check for missing * calls flash_register(...). */ prlog(PR_WARNING, "FLASH: Can't load resource id:%i. " "No system flash found\n", id); goto out_unlock; } flash = system_flash; if (flash->busy) goto out_unlock; for (i = 0, name = NULL; i < ARRAY_SIZE(part_name_map); i++) { if (part_name_map[i].id == id) { name = part_name_map[i].name; break; } } if (!name) { prerror("FLASH: Couldn't find partition for id %d\n", id); goto out_unlock; } /* * If partition doesn't have a subindex but the caller specifies one, * we fail. eg. kernel partition doesn't have a subindex */ if ((part_name_map[i].subid == RESOURCE_SUBID_NONE) && (subid != RESOURCE_SUBID_NONE)) { prerror("PLAT: Partition %s doesn't have subindex\n", name); goto out_unlock; } rc = ffs_init(0, flash->size, flash->bl, &ffs, 1); if (rc) { prerror("FLASH: Can't open ffs handle: %d\n", rc); goto out_unlock; } rc = ffs_lookup_part(ffs, name, &ffs_part_num); if (rc) { /* This is not an error per-se, some partitions * are purposefully absent, don't spam the logs */ prlog(PR_DEBUG, "FLASH: No %s partition\n", name); goto out_free_ffs; } rc = ffs_part_info(ffs, ffs_part_num, NULL, &ffs_part_start, NULL, &ffs_part_size, &ecc); if (rc) { prerror("FLASH: Failed to get %s partition info\n", name); goto out_free_ffs; } prlog(PR_DEBUG,"FLASH: %s partition %s ECC\n", name, ecc ? "has" : "doesn't have"); if (ffs_part_size < SECURE_BOOT_HEADERS_SIZE) { prerror("FLASH: secboot headers bigger than " "partition size 0x%x\n", ffs_part_size); goto out_free_ffs; } rc = blocklevel_read(flash->bl, ffs_part_start, bufp, SECURE_BOOT_HEADERS_SIZE); if (rc) { prerror("FLASH: failed to read the first 0x%x from " "%s partition, rc %d\n", SECURE_BOOT_HEADERS_SIZE, name, rc); goto out_free_ffs; } part_signed = stb_is_container(bufp, SECURE_BOOT_HEADERS_SIZE); prlog(PR_DEBUG, "FLASH: %s partition %s signed\n", name, part_signed ? "is" : "isn't"); /* * part_start/size are raw pointers into the partition. * ie. they will account for ECC if included. */ if (part_signed) { bufp += SECURE_BOOT_HEADERS_SIZE; bufsz -= SECURE_BOOT_HEADERS_SIZE; content_size = stb_sw_payload_size(buf, SECURE_BOOT_HEADERS_SIZE); *len = content_size + SECURE_BOOT_HEADERS_SIZE; if (content_size > bufsz) { prerror("FLASH: content size > buffer size\n"); rc = OPAL_PARAMETER; goto out_free_ffs; } ffs_part_start += SECURE_BOOT_HEADERS_SIZE; rc = blocklevel_read(flash->bl, ffs_part_start, bufp, content_size); if (rc) { prerror("FLASH: failed to read content size %d" " %s partition, rc %d\n", content_size, name, rc); goto out_free_ffs; } if (subid == RESOURCE_SUBID_NONE) goto done_reading; rc = flash_subpart_info(bufp, content_size, ffs_part_size, NULL, subid, &offset, &content_size); if (rc) { prerror("FLASH: Failed to parse subpart info for %s\n", name); goto out_free_ffs; } bufp += offset; goto done_reading; } else /* stb_signed */ { /* * Back to the old way of doing things, no STB header. */ if (subid == RESOURCE_SUBID_NONE) { if (id == RESOURCE_ID_KERNEL || id == RESOURCE_ID_INITRAMFS) { /* * Because actualSize is a lie, we compute the * size of the BOOTKERNEL based on what the ELF * headers say. Otherwise we end up reading more * than we should */ content_size = sizeof_elf_from_hdr(buf); if (!content_size) { prerror("FLASH: Invalid ELF header part" " %s\n", name); rc = OPAL_RESOURCE; goto out_free_ffs; } } else { content_size = ffs_part_size; } if (content_size > bufsz) { prerror("FLASH: %s content size %d > " " buffer size %lu\n", name, content_size, bufsz); rc = OPAL_PARAMETER; goto out_free_ffs; } prlog(PR_DEBUG, "FLASH: computed %s size %u\n", name, content_size); rc = blocklevel_read(flash->bl, ffs_part_start, buf, content_size); if (rc) { prerror("FLASH: failed to read content size %d" " %s partition, rc %d\n", content_size, name, rc); goto out_free_ffs; } *len = content_size; goto done_reading; } BUILD_ASSERT(FLASH_SUBPART_HEADER_SIZE <= SECURE_BOOT_HEADERS_SIZE); rc = flash_subpart_info(bufp, SECURE_BOOT_HEADERS_SIZE, ffs_part_size, &ffs_part_size, subid, &offset, &content_size); if (rc) { prerror("FLASH: FAILED reading subpart info. rc=%d\n", rc); goto out_free_ffs; } *len = ffs_part_size; prlog(PR_DEBUG, "FLASH: Computed %s partition size: %u " "(subpart %u size %u offset %u)\n", name, ffs_part_size, subid, content_size, offset); /* * For a sub partition, we read the whole (computed) * partition, and then measure that. * Afterwards, we memmove() things back into place for * the caller. */ rc = blocklevel_read(flash->bl, ffs_part_start, buf, ffs_part_size); bufp += offset; } done_reading: /* * Verify and measure the retrieved PNOR partition as part of the * secure boot and trusted boot requirements */ secureboot_verify(id, buf, *len); trustedboot_measure(id, buf, *len); /* Find subpartition */ if (subid != RESOURCE_SUBID_NONE) { memmove(buf, bufp, content_size); *len = content_size; } status = true; out_free_ffs: ffs_close(ffs); out_unlock: unlock(&flash_lock); return status ? OPAL_SUCCESS : rc; }
static struct dt_node *flash_add_dt_node(struct flash *flash, int id) { int i; int rc; const char *name; bool ecc; struct ffs_handle *ffs; int ffs_part_num, ffs_part_start, ffs_part_size; struct dt_node *flash_node; struct dt_node *partition_container_node; struct dt_node *partition_node; flash_node = dt_new_addr(opal_node, "flash", id); dt_add_property_strings(flash_node, "compatible", "ibm,opal-flash"); dt_add_property_cells(flash_node, "ibm,opal-id", id); dt_add_property_u64(flash_node, "reg", flash->size); dt_add_property_cells(flash_node, "ibm,flash-block-size", flash->block_size); if (flash->no_erase) dt_add_property(flash_node, "no-erase", NULL, 0); /* we fix to 32-bits */ dt_add_property_cells(flash_node, "#address-cells", 1); dt_add_property_cells(flash_node, "#size-cells", 1); /* Add partition container node */ partition_container_node = dt_new(flash_node, "partitions"); dt_add_property_strings(partition_container_node, "compatible", "fixed-partitions"); /* we fix to 32-bits */ dt_add_property_cells(partition_container_node, "#address-cells", 1); dt_add_property_cells(partition_container_node, "#size-cells", 1); /* Add partitions */ for (i = 0, name = NULL; i < ARRAY_SIZE(part_name_map); i++) { name = part_name_map[i].name; rc = ffs_init(0, flash->size, flash->bl, &ffs, 1); if (rc) { prerror("FLASH: Can't open ffs handle\n"); continue; } rc = ffs_lookup_part(ffs, name, &ffs_part_num); if (rc) { /* This is not an error per-se, some partitions * are purposefully absent, don't spam the logs */ prlog(PR_DEBUG, "FLASH: No %s partition\n", name); continue; } rc = ffs_part_info(ffs, ffs_part_num, NULL, &ffs_part_start, NULL, &ffs_part_size, &ecc); if (rc) { prerror("FLASH: Failed to get %s partition info\n", name); continue; } partition_node = dt_new_addr(partition_container_node, "partition", ffs_part_start); dt_add_property_strings(partition_node, "label", name); dt_add_property_cells(partition_node, "reg", ffs_part_start, ffs_part_size); if (part_name_map[i].id != RESOURCE_ID_KERNEL_FW) { /* Mark all partitions other than the full PNOR and the boot kernel * firmware as read only. These two partitions are the only partitions * that are properly erase block aligned at this time. */ dt_add_property(partition_node, "read-only", NULL, 0); } } partition_node = dt_new_addr(partition_container_node, "partition", 0); dt_add_property_strings(partition_node, "label", "PNOR"); dt_add_property_cells(partition_node, "reg", 0, flash->size); return flash_node; }
/* This is called with the timer lock held, so there is no * issue with re-entrancy or concurrence */ void p8_sbe_update_timer_expiry(uint64_t new_target) { uint64_t count, gen, gen2, req, now; int64_t rc; if (!sbe_has_timer || new_target == sbe_timer_target) return; sbe_timer_target = new_target; _xscom_lock(); now = mftb(); /* Calculate how many increments from now, rounded up */ if (now < new_target) count = (new_target - now + sbe_timer_inc - 1) / sbe_timer_inc; else count = 1; /* Max counter is 24-bit */ if (count > 0xffffff) count = 0xffffff; /* Fabricate update request */ req = (1ull << 63) | (count << 32); prlog(PR_TRACE, "SLW: TMR expiry: 0x%llx, req: %016llx\n", count, req); do { /* Grab generation and spin if odd */ for (;;) { rc = _xscom_read(sbe_timer_chip, 0xE0006, &gen, false); if (rc) { prerror("SLW: Error %lld reading tmr gen " " count\n", rc); _xscom_unlock(); return; } if (!(gen & 1)) break; if (tb_compare(now + msecs_to_tb(1), mftb()) == TB_ABEFOREB) { /** * @fwts-label SLWTimerStuck * @fwts-advice The SLeep/Winkle Engine (SLW) * failed to increment the generation number * within our timeout period (it *should* have * done so within ~10us, not >1ms. OPAL uses * the SLW timer to schedule some operations, * but can fall back to the (much less frequent * OPAL poller, which although does not affect * functionality, runs *much* less frequently. * This could have the effect of slow I2C * operations (for example). It may also mean * that you *had* an increase in jitter, due * to slow interactions with SLW. * This error may also occur if the machine * is connected to via soft FSI. */ prerror("SLW: timer stuck, falling back to OPAL pollers. You will likely have slower I2C and may have experienced increased jitter.\n"); prlog(PR_DEBUG, "SLW: Stuck with odd generation !\n"); _xscom_unlock(); sbe_has_timer = false; p8_sbe_dump_timer_ffdc(); return; } } rc = _xscom_write(sbe_timer_chip, 0x5003A, req, false); if (rc) { prerror("SLW: Error %lld writing tmr request\n", rc); _xscom_unlock(); return; } /* Re-check gen count */ rc = _xscom_read(sbe_timer_chip, 0xE0006, &gen2, false); if (rc) { prerror("SLW: Error %lld re-reading tmr gen " " count\n", rc); _xscom_unlock(); return; } } while(gen != gen2); _xscom_unlock(); /* Check if the timer is working. If at least 1ms has elapsed * since the last call to this function, check that the gen * count has changed */ if (tb_compare(sbe_last_gen_stamp + msecs_to_tb(1), now) == TB_ABEFOREB) { if (sbe_last_gen == gen) { prlog(PR_ERR, "SLW: Timer appears to not be running !\n"); sbe_has_timer = false; p8_sbe_dump_timer_ffdc(); } sbe_last_gen = gen; sbe_last_gen_stamp = mftb(); } prlog(PR_TRACE, "SLW: gen: %llx\n", gen); }
/* Helper to load a VPD LID. Pass a ptr to the corresponding LX keyword */ static void *vpd_lid_load(const uint8_t *lx, uint8_t lxrn, size_t *size) { /* Now this is a guess game as we don't have the info from the * pHyp folks. But basically, it seems to boil down to loading * a LID whose name is 0x80e000yy where yy is the last 2 digits * of the LX record in hex. * * [ Correction: After a chat with some folks, it looks like it's * actually 4 digits, though the lid number is limited to fff * so we weren't far off. ] * * For safety, we look for a matching LX record in an LXRn * (n = lxrn argument) or in VINI if lxrn=0xff */ uint32_t lid_no = 0x80e00000 | ((lx[6] & 0xf) << 8) | lx[7]; /* We don't quite know how to get to the LID directory so * we don't know the size. Let's allocate 16K. All the VPD LIDs * I've seen so far are much smaller. */ #define VPD_LID_MAX_SIZE 0x4000 void *data = malloc(VPD_LID_MAX_SIZE); char record[4] = "LXR0"; const void *valid_lx; uint8_t lx_size; int rc; if (!data) { prerror("VPD: Failed to allocate memory for LID\n"); return NULL; } /* Adjust LID number for flash side */ lid_no = fsp_adjust_lid_side(lid_no); printf("VPD: Trying to load VPD LID 0x%08x...\n", lid_no); *size = VPD_LID_MAX_SIZE; /* Load it from the FSP */ rc = fsp_fetch_data(0, FSP_DATASET_NONSP_LID, lid_no, 0, data, size); if (rc) { prerror("VPD: Error %d loading VPD LID\n", rc); goto fail; } /* Validate it */ if (lxrn < 9) record[3] = '0' + lxrn; else memcpy(record, "VINI", 4); valid_lx = vpd_find(data, *size, record, "LX", &lx_size); if (!valid_lx || lx_size != 8) { prerror("VPD: Cannot find validation LX record\n"); goto fail; } if (memcmp(valid_lx, lx, 8) != 0) { prerror("VPD: LX record mismatch !\n"); goto fail; } printf("VPD: Loaded %zu bytes\n", *size); /* Got it ! */ return realloc(data, *size); fail: free(data); return NULL; }