static bool __memory_parse(struct dt_node *root) { struct HDIF_common_hdr *ms_vpd; const struct msvpd_ms_addr_config *msac; const struct msvpd_total_config_ms *tcms; unsigned int size; ms_vpd = get_hdif(&spira.ntuples.ms_vpd, MSVPD_HDIF_SIG); if (!ms_vpd) { prerror("MS VPD: invalid\n"); op_display(OP_FATAL, OP_MOD_MEM, 0x0000); return false; } if (be32_to_cpu(spira.ntuples.ms_vpd.act_len) < sizeof(*ms_vpd)) { prerror("MS VPD: invalid size %u\n", be32_to_cpu(spira.ntuples.ms_vpd.act_len)); op_display(OP_FATAL, OP_MOD_MEM, 0x0001); return false; } prlog(PR_DEBUG, "MS VPD: is at %p\n", ms_vpd); msac = HDIF_get_idata(ms_vpd, MSVPD_IDATA_MS_ADDR_CONFIG, &size); if (!CHECK_SPPTR(msac) || size < sizeof(*msac)) { prerror("MS VPD: bad msac size %u @ %p\n", size, msac); op_display(OP_FATAL, OP_MOD_MEM, 0x0002); return false; } prlog(PR_DEBUG, "MS VPD: MSAC is at %p\n", msac); dt_add_property_u64(dt_root, DT_PRIVATE "maxmem", be64_to_cpu(msac->max_configured_ms_address)); tcms = HDIF_get_idata(ms_vpd, MSVPD_IDATA_TOTAL_CONFIG_MS, &size); if (!CHECK_SPPTR(tcms) || size < sizeof(*tcms)) { prerror("MS VPD: Bad tcms size %u @ %p\n", size, tcms); op_display(OP_FATAL, OP_MOD_MEM, 0x0003); return false; } prlog(PR_DEBUG, "MS VPD: TCMS is at %p\n", tcms); prlog(PR_DEBUG, "MS VPD: Maximum configured address: 0x%llx\n", (long long)be64_to_cpu(msac->max_configured_ms_address)); prlog(PR_DEBUG, "MS VPD: Maximum possible address: 0x%llx\n", (long long)be64_to_cpu(msac->max_possible_ms_address)); get_msareas(root, ms_vpd); prlog(PR_INFO, "MS VPD: Total MB of RAM: 0x%llx\n", (long long)be64_to_cpu(tcms->total_in_mb)); return true; }
static void vpd_add_ram_area(const struct HDIF_common_hdr *msarea) { unsigned int i; unsigned int ram_sz; const struct HDIF_common_hdr *ramarea; const struct HDIF_child_ptr *ramptr; const struct HDIF_ram_area_id *ram_id; struct dt_node *ram_node; ramptr = HDIF_child_arr(msarea, 0); if (!CHECK_SPPTR(ramptr)) { prerror("MS AREA: No RAM area at %p\n", msarea); return; } for (i = 0; i < be32_to_cpu(ramptr->count); i++) { ramarea = HDIF_child(msarea, ramptr, i, "RAM "); if (!CHECK_SPPTR(ramarea)) continue; ram_id = HDIF_get_idata(ramarea, 2, &ram_sz); if (!CHECK_SPPTR(ram_id)) continue; if ((be16_to_cpu(ram_id->flags) & RAM_AREA_INSTALLED) && (be16_to_cpu(ram_id->flags) & RAM_AREA_FUNCTIONAL)) { ram_node = dt_add_vpd_node(ramarea, 0, 1); if (ram_node) { add_chip_id_to_ram_area(msarea, ram_node); add_size_to_ram_area(ram_node, ramarea, 1); } } } }
static void add_chip_id_to_ram_area(const struct HDIF_common_hdr *msarea, struct dt_node *ram_area) { const struct HDIF_array_hdr *arr; const struct HDIF_ms_area_address_range *arange; unsigned int size; u32 chip_id; /* Safe to assume pointers are valid here. */ arr = HDIF_get_idata(msarea, 4, &size); arange = (void *)arr + be32_to_cpu(arr->offset); chip_id = pcid_to_chip_id(be32_to_cpu(arange->chip)); dt_add_property_cells(ram_area, "ibm,chip-id", chip_id); }
static void add_size_to_ram_area(struct dt_node *ram_node, const struct HDIF_common_hdr *hdr, int indx_vpd) { const void *fruvpd; unsigned int fruvpd_sz; const void *kw; char *str; uint8_t kwsz; fruvpd = HDIF_get_idata(hdr, indx_vpd, &fruvpd_sz); if (!CHECK_SPPTR(fruvpd)) return; /* DIMM Size */ kw = vpd_find(fruvpd, fruvpd_sz, "VINI", "SZ", &kwsz); if (!kw) return; str = zalloc(kwsz + 1); memcpy(str, kw, kwsz); dt_add_property_string(ram_node, "size", str); free(str); }
int parse_i2c_devs(const struct HDIF_common_hdr *hdr, int idata_index, struct dt_node *xscom) { struct dt_node *i2cm, *bus, *node; const struct hdat_i2c_type *type; const struct hdat_i2c_info *info; const struct i2c_dev *dev; const char *name, *compat; const struct host_i2c_hdr *ahdr; uint32_t dev_addr; uint32_t version; uint32_t size; uint32_t purpose; int i, count; /* * This code makes a few assumptions about XSCOM addrs, etc * and will need updating for new processors */ assert(proc_gen == proc_gen_p9); /* * Emit an error if we get a newer version. This is an interim measure * until the new version format is finalised. */ ahdr = HDIF_get_idata(hdr, idata_index, &size); if (!ahdr || !size) return -1; /* * Some hostboots don't correctly fill the version field. On these * the offset from the start of the header to the start of the array * is 16 bytes. */ if (be32_to_cpu(ahdr->hdr.offset) == 16) { version = 1; prerror("I2C: HDAT device array has no version! Assuming v1\n"); } else { version = be32_to_cpu(ahdr->version); } if (version == 2) { prlog(PR_INFO, "I2C: v%d found, but not supported. Parsing as v1\n", version); } else if (version > 2) { prerror("I2C: v%d found, but not supported! THIS IS A BUG\n", version); return -1; } count = HDIF_get_iarray_size(hdr, idata_index); for (i = 0; i < count; i++) { dev = HDIF_get_iarray_item(hdr, idata_index, i, &size); /* * XXX: Some broken hostboots populate i2c devs with zeros. * Workaround them for now. */ if (is_zeros(dev, size)) { prerror("I2C: Ignoring broken i2c dev %d\n", i); continue; } /* * On some systems the CFAM I2C master is represented in the * host I2C table as engine 6. There are only 4 (0, 1, 2, 3) * engines accessible to the host via XSCOM so filter out * engines outside this range so we don't create bogus * i2cm@<addr> nodes. */ if (dev->i2cm_engine >= 4 && proc_gen == proc_gen_p9) continue; i2cm = get_i2cm_node(xscom, dev->i2cm_engine); bus = get_bus_node(i2cm, dev->i2cm_port, be16_to_cpu(dev->i2c_bus_freq)); /* * Looks like hostboot gives the address as an 8 bit, left * justified quantity (i.e it includes the R/W bit). So we need * to strip it off to get an address linux can use. */ dev_addr = dev->dev_addr >> 1; purpose = be32_to_cpu(dev->purpose); type = map_type(dev->type); info = get_info(purpose); /* HACK: Hostboot doesn't export the correct type information * for the DIMM SPD EEPROMs. This is a problem because SPD * EEPROMs have a different wire protocol to the atmel,24XXXX * series. The main difference being that SPD EEPROMs have an * 8bit offset rather than a 16bit offset. This means that the * driver will send 2 bytes when doing a random read, * potentially overwriting part of the SPD information. * * Just to make things interested the FSP also gets the device * type wrong. To work around both just set the device-type to * "spd" for anything in the 0x50 to 0x57 range since that's the * SPD eeprom range. * * XXX: Future chips might not use engine 3 for the DIMM buses. */ if (dev->i2cm_engine == 3 && dev_addr >= 0x50 && dev_addr < 0x58) { compat = "spd"; name = "eeprom"; } else if (type) { compat = type->compat; name = type->name; } else { name = "unknown"; compat = NULL; } /* * An i2c device is unknown if either the i2c device list is * outdated or the device is marked as unknown (0xFF) in the * hdat. Log both cases to see what/where/why. */ if (!type || dev->type == 0xFF) { prlog(PR_NOTICE, "HDAT I2C: found e%dp%d - %s@%x dp:%02x (%#x:%s)\n", dev->i2cm_engine, dev->i2cm_port, name, dev_addr, dev->dev_port, purpose, info->label); continue; } prlog(PR_DEBUG, "HDAT I2C: found e%dp%d - %s@%x dp:%02x (%#x:%s)\n", dev->i2cm_engine, dev->i2cm_port, name, dev_addr, dev->dev_port, purpose, info->label); /* * Multi-port device require special handling since we need to * generate the device-specific DT bindings. For now we're just * going to ignore them since these devices are owned by FW * any way. */ if (dev->dev_port != 0xff) continue; node = dt_new_addr(bus, name, dev_addr); if (!node) continue; dt_add_property_cells(node, "reg", dev_addr); dt_add_property_cells(node, "link-id", be32_to_cpu(dev->i2c_link)); if (compat) dt_add_property_string(node, "compatible", compat); if (info->label) dt_add_property_string(node, "label", info->label); if (!info->whitelist) dt_add_property_string(node, "status", "reserved"); /* * Set a default timeout of 2s on the ports with a TPM. This is * to work around a bug with certain TPM firmwares that can * clock stretch for long periods of time and will lock up * until they are power cycled if a STOP condition is sent * during this period. */ if (dev->type == 0x3) dt_add_property_cells(bus, "timeout-ms", 2000); /* XXX: SLCA index? */ } return 0; }
static void get_msareas(struct dt_node *root, const struct HDIF_common_hdr *ms_vpd) { unsigned int i; const struct HDIF_child_ptr *msptr; /* First childptr refers to msareas. */ msptr = HDIF_child_arr(ms_vpd, MSVPD_CHILD_MS_AREAS); if (!CHECK_SPPTR(msptr)) { prerror("MS VPD: no children at %p\n", ms_vpd); return; } for (i = 0; i < be32_to_cpu(msptr->count); i++) { const struct HDIF_common_hdr *msarea; const struct HDIF_array_hdr *arr; const struct HDIF_ms_area_address_range *arange; const struct HDIF_ms_area_id *id; const void *fruid; unsigned int size, j; u16 flags; msarea = HDIF_child(ms_vpd, msptr, i, "MSAREA"); if (!CHECK_SPPTR(msarea)) return; id = HDIF_get_idata(msarea, 2, &size); if (!CHECK_SPPTR(id)) return; if (size < sizeof(*id)) { prerror("MS VPD: %p msarea #%i id size too small!\n", ms_vpd, i); return; } flags = be16_to_cpu(id->flags); prlog(PR_DEBUG, "MS VPD: %p, area %i: %s %s %s\n", ms_vpd, i, flags & MS_AREA_INSTALLED ? "installed" : "not installed", flags & MS_AREA_FUNCTIONAL ? "functional" : "not functional", flags & MS_AREA_SHARED ? "shared" : "not shared"); if ((flags & (MS_AREA_INSTALLED|MS_AREA_FUNCTIONAL)) != (MS_AREA_INSTALLED|MS_AREA_FUNCTIONAL)) continue; arr = HDIF_get_idata(msarea, 4, &size); if (!CHECK_SPPTR(arr)) continue; if (size < sizeof(*arr)) { prerror("MS VPD: %p msarea #%i arr size too small!\n", ms_vpd, i); return; } if (be32_to_cpu(arr->eactsz) < sizeof(*arange)) { prerror("MS VPD: %p msarea #%i arange size too small!\n", ms_vpd, i); return; } fruid = HDIF_get_idata(msarea, 0, &size); if (!CHECK_SPPTR(fruid)) return; /* Add Raiser card VPD */ if (be16_to_cpu(id->parent_type) & MS_PTYPE_RISER_CARD) dt_add_vpd_node(msarea, 0, 1); /* Add RAM Area VPD */ vpd_add_ram_area(msarea); /* This offset is from the arr, not the header! */ arange = (void *)arr + be32_to_cpu(arr->offset); for (j = 0; j < be32_to_cpu(arr->ecnt); j++) { if (!add_address_range(root, id, arange)) return; arange = (void *)arange + be32_to_cpu(arr->esize); } } }
static struct dt_node *add_cpu_node(struct dt_node *cpus, const struct HDIF_common_hdr *paca, const struct sppaca_cpu_id *id, bool okay) { const struct sppaca_cpu_timebase *timebase; const struct sppaca_cpu_cache *cache; const struct sppaca_cpu_attr *attr; struct dt_node *cpu; u32 no, size, ve_flags, l2_phandle, chip_id; /* We use the process_interrupt_line as the res id */ no = be32_to_cpu(id->process_interrupt_line); ve_flags = be32_to_cpu(id->verify_exists_flags); prlog(PR_INFO, "CPU[%i]: PIR=%i RES=%i %s %s(%u threads)\n", paca_index(paca), be32_to_cpu(id->pir), no, ve_flags & CPU_ID_PACA_RESERVED ? "**RESERVED**" : cpu_state(ve_flags), ve_flags & CPU_ID_SECONDARY_THREAD ? "[secondary] " : (be32_to_cpu(id->pir) == boot_cpu->pir ? "[boot] " : ""), ((ve_flags & CPU_ID_NUM_SECONDARY_THREAD_MASK) >> CPU_ID_NUM_SECONDARY_THREAD_SHIFT) + 1); timebase = HDIF_get_idata(paca, SPPACA_IDATA_TIMEBASE, &size); if (!timebase || size < sizeof(*timebase)) { prerror("CPU[%i]: bad timebase size %u @ %p\n", paca_index(paca), size, timebase); return NULL; } cache = HDIF_get_idata(paca, SPPACA_IDATA_CACHE_SIZE, &size); if (!cache || size < sizeof(*cache)) { prerror("CPU[%i]: bad cache size %u @ %p\n", paca_index(paca), size, cache); return NULL; } cpu = add_core_common(cpus, cache, timebase, no, okay); /* Core attributes */ attr = HDIF_get_idata(paca, SPPACA_IDATA_CPU_ATTR, &size); if (attr) add_core_attr(cpu, be32_to_cpu(attr->attr)); /* Add cache info */ l2_phandle = add_core_cache_info(cpus, cache, no, okay); dt_add_property_cells(cpu, "l2-cache", l2_phandle); /* We append the secondary cpus in __cpu_parse */ dt_add_property_cells(cpu, "ibm,ppc-interrupt-server#s", no); dt_add_property_cells(cpu, DT_PRIVATE "hw_proc_id", be32_to_cpu(id->hardware_proc_id)); dt_add_property_cells(cpu, "ibm,pir", be32_to_cpu(id->pir)); chip_id = pcid_to_chip_id(be32_to_cpu(id->processor_chip_id)); dt_add_property_cells(cpu, "ibm,chip-id", chip_id); return cpu; }