static struct dt_node *dt_create_i2c_device(struct dt_node *bus, uint8_t addr, const char *name, const char *compat, const char *label) { struct dt_node *dev; dev = dt_new_addr(bus, name, addr); if (!dev) return NULL; dt_add_property_string(dev, "compatible", compat); dt_add_property_string(dev, "label", label); dt_add_property_cells(dev, "reg", addr); dt_add_property_string(dev, "status", "reserved"); return dev; }
static void trace_add_dt_props(void) { unsigned int i; u64 *prop, tmask; prop = malloc(sizeof(u64) * 2 * debug_descriptor.num_traces); for (i = 0; i < debug_descriptor.num_traces; i++) { prop[i * 2] = cpu_to_fdt64(debug_descriptor.trace_phys[i]); prop[i * 2 + 1] = cpu_to_fdt64(debug_descriptor.trace_size[i]); } dt_add_property(opal_node, "ibm,opal-traces", prop, sizeof(u64) * 2 * i); free(prop); tmask = (uint64_t)&debug_descriptor.trace_mask; dt_add_property_cells(opal_node, "ibm,opal-trace-mask", hi32(tmask), lo32(tmask)); }
void nvram_read_complete(bool success) { struct dt_node *np; /* Read not successful, error out and free the buffer */ if (!success) { free(nvram_image); nvram_size = 0; return; } /* Check and maybe format nvram */ nvram_check(); /* Add nvram node */ np = dt_new(opal_node, "nvram"); dt_add_property_cells(np, "#bytes", nvram_size); dt_add_property_string(np, "compatible", "ibm,opal-nvram"); /* Mark ready */ nvram_ready = true; }
int parse_i2c_devs(const struct HDIF_common_hdr *hdr, int idata_index, struct dt_node *xscom) { struct dt_node *i2cm, *bus, *node; const struct hdat_i2c_type *type; const struct hdat_i2c_info *info; const struct i2c_dev *dev; const char *name, *compat; const struct host_i2c_hdr *ahdr; uint32_t dev_addr; uint32_t version; uint32_t size; uint32_t purpose; int i, count; /* * This code makes a few assumptions about XSCOM addrs, etc * and will need updating for new processors */ assert(proc_gen == proc_gen_p9); /* * Emit an error if we get a newer version. This is an interim measure * until the new version format is finalised. */ ahdr = HDIF_get_idata(hdr, idata_index, &size); if (!ahdr || !size) return -1; /* * Some hostboots don't correctly fill the version field. On these * the offset from the start of the header to the start of the array * is 16 bytes. */ if (be32_to_cpu(ahdr->hdr.offset) == 16) { version = 1; prerror("I2C: HDAT device array has no version! Assuming v1\n"); } else { version = be32_to_cpu(ahdr->version); } if (version == 2) { prlog(PR_INFO, "I2C: v%d found, but not supported. Parsing as v1\n", version); } else if (version > 2) { prerror("I2C: v%d found, but not supported! THIS IS A BUG\n", version); return -1; } count = HDIF_get_iarray_size(hdr, idata_index); for (i = 0; i < count; i++) { dev = HDIF_get_iarray_item(hdr, idata_index, i, &size); /* * XXX: Some broken hostboots populate i2c devs with zeros. * Workaround them for now. */ if (is_zeros(dev, size)) { prerror("I2C: Ignoring broken i2c dev %d\n", i); continue; } /* * On some systems the CFAM I2C master is represented in the * host I2C table as engine 6. There are only 4 (0, 1, 2, 3) * engines accessible to the host via XSCOM so filter out * engines outside this range so we don't create bogus * i2cm@<addr> nodes. */ if (dev->i2cm_engine >= 4 && proc_gen == proc_gen_p9) continue; i2cm = get_i2cm_node(xscom, dev->i2cm_engine); bus = get_bus_node(i2cm, dev->i2cm_port, be16_to_cpu(dev->i2c_bus_freq)); /* * Looks like hostboot gives the address as an 8 bit, left * justified quantity (i.e it includes the R/W bit). So we need * to strip it off to get an address linux can use. */ dev_addr = dev->dev_addr >> 1; purpose = be32_to_cpu(dev->purpose); type = map_type(dev->type); info = get_info(purpose); /* HACK: Hostboot doesn't export the correct type information * for the DIMM SPD EEPROMs. This is a problem because SPD * EEPROMs have a different wire protocol to the atmel,24XXXX * series. The main difference being that SPD EEPROMs have an * 8bit offset rather than a 16bit offset. This means that the * driver will send 2 bytes when doing a random read, * potentially overwriting part of the SPD information. * * Just to make things interested the FSP also gets the device * type wrong. To work around both just set the device-type to * "spd" for anything in the 0x50 to 0x57 range since that's the * SPD eeprom range. * * XXX: Future chips might not use engine 3 for the DIMM buses. */ if (dev->i2cm_engine == 3 && dev_addr >= 0x50 && dev_addr < 0x58) { compat = "spd"; name = "eeprom"; } else if (type) { compat = type->compat; name = type->name; } else { name = "unknown"; compat = NULL; } /* * An i2c device is unknown if either the i2c device list is * outdated or the device is marked as unknown (0xFF) in the * hdat. Log both cases to see what/where/why. */ if (!type || dev->type == 0xFF) { prlog(PR_NOTICE, "HDAT I2C: found e%dp%d - %s@%x dp:%02x (%#x:%s)\n", dev->i2cm_engine, dev->i2cm_port, name, dev_addr, dev->dev_port, purpose, info->label); continue; } prlog(PR_DEBUG, "HDAT I2C: found e%dp%d - %s@%x dp:%02x (%#x:%s)\n", dev->i2cm_engine, dev->i2cm_port, name, dev_addr, dev->dev_port, purpose, info->label); /* * Multi-port device require special handling since we need to * generate the device-specific DT bindings. For now we're just * going to ignore them since these devices are owned by FW * any way. */ if (dev->dev_port != 0xff) continue; node = dt_new_addr(bus, name, dev_addr); if (!node) continue; dt_add_property_cells(node, "reg", dev_addr); dt_add_property_cells(node, "link-id", be32_to_cpu(dev->i2c_link)); if (compat) dt_add_property_string(node, "compatible", compat); if (info->label) dt_add_property_string(node, "label", info->label); if (!info->whitelist) dt_add_property_string(node, "status", "reserved"); /* * Set a default timeout of 2s on the ports with a TPM. This is * to work around a bug with certain TPM firmwares that can * clock stretch for long periods of time and will lock up * until they are power cycled if a STOP condition is sent * during this period. */ if (dev->type == 0x3) dt_add_property_cells(bus, "timeout-ms", 2000); /* XXX: SLCA index? */ } return 0; }
int main(void) { const struct dt_property *names, *ranges; struct mem_region *r; unsigned int i, l, c; uint64_t *rangep; const char *name; void *buf; /* Use malloc for the heap, so valgrind can find issues. */ skiboot_heap.start = (long)real_malloc(TEST_HEAP_SIZE); skiboot_heap.len = TEST_HEAP_SIZE; skiboot_os_reserve.len = skiboot_heap.start; dt_root = dt_new_root(""); dt_add_property_cells(dt_root, "#address-cells", 2); dt_add_property_cells(dt_root, "#size-cells", 2); buf = real_malloc(1024*1024); add_mem_node((unsigned long)buf, 1024*1024); /* Now convert. */ mem_region_init(); /* create our reservations */ for (i = 0; i < ARRAY_SIZE(test_regions); i++) mem_reserve(test_regions[i].name, test_regions[i].addr, 0x1000); /* release unused */ mem_region_release_unused(); /* and create reservations */ mem_region_add_dt_reserved(); /* ensure we can't create further reservations */ r = new_region("test.4", 0x5000, 0x1000, NULL, REGION_RESERVED); assert(!add_region(r)); /* check dt properties */ names = dt_find_property(dt_root, "reserved-names"); ranges = dt_find_property(dt_root, "reserved-ranges"); assert(names && ranges); /* walk through names & ranges properies, ensuring that the test * regions are all present */ for (name = names->prop, rangep = (uint64_t *)ranges->prop, c = 0; name < names->prop + names->len; name += l, rangep += 2) { uint64_t addr; addr = dt_get_number(rangep, 2); l = strlen(name) + 1; for (i = 0; i < ARRAY_SIZE(test_regions); i++) { if (strcmp(test_regions[i].name, name)) continue; assert(test_regions[i].addr == addr); assert(!test_regions[i].found); test_regions[i].found = true; c++; } } assert(c == ARRAY_SIZE(test_regions)); dt_free(dt_root); real_free(buf); real_free((void *)(long)skiboot_heap.start); return 0; }
static struct dt_node *flash_add_dt_node(struct flash *flash, int id) { int i; int rc; const char *name; bool ecc; struct ffs_handle *ffs; int ffs_part_num, ffs_part_start, ffs_part_size; struct dt_node *flash_node; struct dt_node *partition_container_node; struct dt_node *partition_node; flash_node = dt_new_addr(opal_node, "flash", id); dt_add_property_strings(flash_node, "compatible", "ibm,opal-flash"); dt_add_property_cells(flash_node, "ibm,opal-id", id); dt_add_property_u64(flash_node, "reg", flash->size); dt_add_property_cells(flash_node, "ibm,flash-block-size", flash->block_size); if (flash->no_erase) dt_add_property(flash_node, "no-erase", NULL, 0); /* we fix to 32-bits */ dt_add_property_cells(flash_node, "#address-cells", 1); dt_add_property_cells(flash_node, "#size-cells", 1); /* Add partition container node */ partition_container_node = dt_new(flash_node, "partitions"); dt_add_property_strings(partition_container_node, "compatible", "fixed-partitions"); /* we fix to 32-bits */ dt_add_property_cells(partition_container_node, "#address-cells", 1); dt_add_property_cells(partition_container_node, "#size-cells", 1); /* Add partitions */ for (i = 0, name = NULL; i < ARRAY_SIZE(part_name_map); i++) { name = part_name_map[i].name; rc = ffs_init(0, flash->size, flash->bl, &ffs, 1); if (rc) { prerror("FLASH: Can't open ffs handle\n"); continue; } rc = ffs_lookup_part(ffs, name, &ffs_part_num); if (rc) { /* This is not an error per-se, some partitions * are purposefully absent, don't spam the logs */ prlog(PR_DEBUG, "FLASH: No %s partition\n", name); continue; } rc = ffs_part_info(ffs, ffs_part_num, NULL, &ffs_part_start, NULL, &ffs_part_size, &ecc); if (rc) { prerror("FLASH: Failed to get %s partition info\n", name); continue; } partition_node = dt_new_addr(partition_container_node, "partition", ffs_part_start); dt_add_property_strings(partition_node, "label", name); dt_add_property_cells(partition_node, "reg", ffs_part_start, ffs_part_size); if (part_name_map[i].id != RESOURCE_ID_KERNEL_FW) { /* Mark all partitions other than the full PNOR and the boot kernel * firmware as read only. These two partitions are the only partitions * that are properly erase block aligned at this time. */ dt_add_property(partition_node, "read-only", NULL, 0); } } partition_node = dt_new_addr(partition_container_node, "partition", 0); dt_add_property_strings(partition_node, "label", "PNOR"); dt_add_property_cells(partition_node, "reg", 0, flash->size); return flash_node; }
int nx_cfg_rx_fifo(struct dt_node *node, const char *compat, const char *priority, u32 gcid, u32 pid, u32 tid, u64 umac_bar, u64 umac_notify) { u64 cfg; int rc, size; uint64_t fifo; u32 lpid = 0xfff; /* All 1's for 12 bits in UMAC notify match reg */ #define MATCH_ENABLE 1 fifo = (uint64_t) local_alloc(gcid, RX_FIFO_SIZE, RX_FIFO_SIZE); assert(fifo); /* * When configuring the address of the Rx FIFO into the Receive FIFO * BAR, we should _NOT_ shift the address into bits 8:53. Instead we * should copy the address as is and VAS/NX will extract relevant bits. */ /* * Section 5.21 of P9 NX Workbook Version 2.42 shows Receive FIFO BAR * 54:56 represents FIFO size * 000 = 1KB, 8 CRBs * 001 = 2KB, 16 CRBs * 010 = 4KB, 32 CRBs * 011 = 8KB, 64 CRBs * 100 = 16KB, 128 CRBs * 101 = 32KB, 256 CRBs * 110 = 111 reserved */ size = RX_FIFO_SIZE / 1024; cfg = SETFIELD(NX_P9_RX_FIFO_BAR_SIZE, fifo, ilog2(size)); rc = xscom_write(gcid, umac_bar, cfg); if (rc) { prerror("NX%d: ERROR: Setting UMAC FIFO bar failure %d\n", gcid, rc); return rc; } else prlog(PR_DEBUG, "NX%d: Setting UMAC FIFO bar 0x%016lx\n", gcid, (unsigned long)cfg); rc = xscom_read(gcid, umac_notify, &cfg); if (rc) return rc; /* * VAS issues asb_notify with the unique ID to identify the target * co-processor/engine. Logical partition ID (lpid), process ID (pid), * and thread ID (tid) combination is used to define the unique ID * in the system. Export these values in device-tree such that the * driver configure RxFIFO with VAS. Set these values in RxFIFO notify * match register for each engine which compares the ID with each * request. * To define unique indentification, 0xfff (1's for 12 bits), * co-processor type, and counter within coprocessor type are used * for lpid, pid, and tid respectively. */ cfg = SETFIELD(NX_P9_RX_FIFO_NOTIFY_MATCH_LPID, cfg, lpid); cfg = SETFIELD(NX_P9_RX_FIFO_NOTIFY_MATCH_PID, cfg, pid); cfg = SETFIELD(NX_P9_RX_FIFO_NOTIFY_MATCH_TID, cfg, tid); cfg = SETFIELD(NX_P9_RX_FIFO_NOTIFY_MATCH_MATCH_ENABLE, cfg, MATCH_ENABLE); rc = xscom_write(gcid, umac_notify, cfg); if (rc) { prerror("NX%d: ERROR: Setting UMAC notify match failure %d\n", gcid, rc); return rc; } else prlog(PR_DEBUG, "NX%d: Setting UMAC notify match 0x%016lx\n", gcid, (unsigned long)cfg); dt_add_property_string(node, "compatible", compat); dt_add_property_string(node, "priority", priority); dt_add_property_u64(node, "rx-fifo-address", fifo); dt_add_property_cells(node, "rx-fifo-size", RX_FIFO_SIZE); dt_add_property_cells(node, "lpid", lpid); dt_add_property_cells(node, "pid", pid); dt_add_property_cells(node, "tid", tid); return 0; }
struct dt_node * add_core_common(struct dt_node *cpus, const struct sppcia_cpu_cache *cache, const struct sppaca_cpu_timebase *tb, uint32_t int_server, bool okay) { const char *name; struct dt_node *cpu; uint32_t version; uint64_t freq; const uint8_t pa_features[] = { 6, 0, 0xf6, 0x3f, 0xc7, 0x00, 0x80, 0xc0 }; printf(" Cache: I=%u D=%u/%u/%u/%u\n", be32_to_cpu(cache->icache_size_kb), be32_to_cpu(cache->l1_dcache_size_kb), be32_to_cpu(cache->l2_dcache_size_kb), be32_to_cpu(cache->l3_dcache_size_kb), be32_to_cpu(cache->l35_dcache_size_kb)); /* Use the boot CPU PVR to make up a CPU name in the device-tree * since the HDAT doesn't seem to tell.... */ version = mfspr(SPR_PVR); switch(PVR_TYPE(version)) { case PVR_TYPE_P7: name = "PowerPC,POWER7"; break; case PVR_TYPE_P7P: name = "PowerPC,POWER7+"; break; case PVR_TYPE_P8E: case PVR_TYPE_P8: name = "PowerPC,POWER8"; break; default: name = "PowerPC,Unknown"; } cpu = dt_new_addr(cpus, name, int_server); assert(cpu); dt_add_property_string(cpu, "device_type", "cpu"); dt_add_property_string(cpu, "status", okay ? "okay" : "bad"); dt_add_property_cells(cpu, "reg", int_server); dt_add_property_cells(cpu, "cpu-version", version); dt_add_property(cpu, "64-bit", NULL, 0); dt_add_property(cpu, "32-64-bridge", NULL, 0); dt_add_property(cpu, "graphics", NULL, 0); dt_add_property(cpu, "general-purpose", NULL, 0); dt_add_property_cells(cpu, "ibm,processor-segment-sizes", 0x1c, 0x28, 0xffffffff, 0xffffffff); dt_add_property_cells(cpu, "ibm,processor-page-sizes", 0xc, 0x10, 0x18, 0x22); /* Page size encodings appear to be the same for P7 and P8 */ dt_add_property_cells(cpu, "ibm,segment-page-sizes", 0x0c, 0x000, 3, 0x0c, 0x0000, /* 4K seg 4k pages */ 0x10, 0x0007, /* 4K seg 64k pages */ 0x18, 0x0038, /* 4K seg 16M pages */ 0x10, 0x110, 2, 0x10, 0x0001, /* 64K seg 64k pages */ 0x18, 0x0008, /* 64K seg 16M pages */ 0x18, 0x100, 1, 0x18, 0x0000, /* 16M seg 16M pages */ 0x22, 0x120, 1, 0x22, 0x0003); /* 16G seg 16G pages */ dt_add_property(cpu, "ibm,pa-features", pa_features, sizeof(pa_features)); dt_add_property_cells(cpu, "ibm,slb-size", 0x20); dt_add_property_cells(cpu, "ibm,vmx", 0x2); dt_add_property_cells(cpu, "ibm,dfp", 0x2); dt_add_property_cells(cpu, "ibm,purr", 0x1); dt_add_property_cells(cpu, "ibm,spurr", 0x1); /* * Do not create "clock-frequency" if the frequency doesn't * fit in a single cell */ freq = ((uint64_t)be32_to_cpu(tb->actual_clock_speed)) * 1000000ul; if (freq <= 0xfffffffful) dt_add_property_cells(cpu, "clock-frequency", freq); dt_add_property_cells(cpu, "ibm,extended-clock-frequency", hi32(freq), lo32(freq)); /* FIXME: Hardcoding is bad. */ dt_add_property_cells(cpu, "timebase-frequency", 512000000); dt_add_property_cells(cpu, "ibm,extended-timebase-frequency", 0, 512000000); dt_add_property_cells(cpu, "reservation-granule-size", be32_to_cpu(cache->reservation_size)); dt_add_property_cells(cpu, "d-tlb-size", be32_to_cpu(cache->dtlb_entries)); dt_add_property_cells(cpu, "i-tlb-size", be32_to_cpu(cache->itlb_entries)); /* Assume unified TLB */ dt_add_property_cells(cpu, "tlb-size", be32_to_cpu(cache->dtlb_entries)); dt_add_property_cells(cpu, "d-tlb-sets", be32_to_cpu(cache->dtlb_assoc_sets)); dt_add_property_cells(cpu, "i-tlb-sets", be32_to_cpu(cache->itlb_assoc_sets)); dt_add_property_cells(cpu, "tlb-sets", be32_to_cpu(cache->dtlb_assoc_sets)); dt_add_property_cells(cpu, "d-cache-block-size", be32_to_cpu(cache->dcache_block_size)); dt_add_property_cells(cpu, "i-cache-block-size", be32_to_cpu(cache->icache_block_size)); dt_add_property_cells(cpu, "d-cache-size", be32_to_cpu(cache->l1_dcache_size_kb)*1024); dt_add_property_cells(cpu, "i-cache-size", be32_to_cpu(cache->icache_size_kb)*1024); dt_add_property_cells(cpu, "i-cache-sets", be32_to_cpu(cache->icache_assoc_sets)); dt_add_property_cells(cpu, "d-cache-sets", be32_to_cpu(cache->dcache_assoc_sets)); if (cache->icache_line_size != cache->icache_block_size) dt_add_property_cells(cpu, "i-cache-line-size", be32_to_cpu(cache->icache_line_size)); if (cache->l1_dcache_line_size != cache->dcache_block_size) dt_add_property_cells(cpu, "d-cache-line-size", be32_to_cpu(cache->l1_dcache_line_size)); return cpu; }
int main(void) { struct dt_node *root, *c1, *c2, *gc1, *gc2, *gc3, *ggc1, *i; const struct dt_property *p; struct dt_property *p2; unsigned int n; root = dt_new_root("root"); assert(!list_top(&root->properties, struct dt_property, list)); c1 = dt_new(root, "c1"); assert(!list_top(&c1->properties, struct dt_property, list)); c2 = dt_new(root, "c2"); assert(!list_top(&c2->properties, struct dt_property, list)); gc1 = dt_new(c1, "gc1"); assert(!list_top(&gc1->properties, struct dt_property, list)); gc2 = dt_new(c1, "gc2"); assert(!list_top(&gc2->properties, struct dt_property, list)); gc3 = dt_new(c1, "gc3"); assert(!list_top(&gc3->properties, struct dt_property, list)); ggc1 = dt_new(gc1, "ggc1"); assert(!list_top(&ggc1->properties, struct dt_property, list)); for (n = 0, i = dt_first(root); i; i = dt_next(root, i), n++) { assert(!list_top(&i->properties, struct dt_property, list)); dt_add_property_cells(i, "visited", 1); } assert(n == 6); for (n = 0, i = dt_first(root); i; i = dt_next(root, i), n++) { p = list_top(&i->properties, struct dt_property, list); assert(strcmp(p->name, "visited") == 0); assert(p->len == sizeof(u32)); assert(fdt32_to_cpu(*(u32 *)p->prop) == 1); } assert(n == 6); dt_add_property_cells(c1, "some-property", 1, 2, 3); p = dt_find_property(c1, "some-property"); assert(p); assert(strcmp(p->name, "some-property") == 0); assert(p->len == sizeof(u32) * 3); assert(fdt32_to_cpu(*(u32 *)p->prop) == 1); assert(fdt32_to_cpu(*((u32 *)p->prop + 1)) == 2); assert(fdt32_to_cpu(*((u32 *)p->prop + 2)) == 3); /* Test freeing a single node */ assert(!list_empty(&gc1->children)); dt_free(ggc1); assert(list_empty(&gc1->children)); /* Test rodata logic. */ assert(!is_rodata("hello")); assert(is_rodata(__rodata_start)); strcpy(__rodata_start, "name"); ggc1 = dt_new(root, __rodata_start); assert(ggc1->name == __rodata_start); /* Test string node. */ dt_add_property_string(ggc1, "somestring", "someval"); assert(dt_has_node_property(ggc1, "somestring", "someval")); assert(!dt_has_node_property(ggc1, "somestrin", "someval")); assert(!dt_has_node_property(ggc1, "somestring", "someva")); assert(!dt_has_node_property(ggc1, "somestring", "somevale")); /* Test resizing property. */ p = p2 = __dt_find_property(c1, "some-property"); assert(p); n = p2->len; while (p2 == p) { n *= 2; dt_resize_property(&p2, n); } assert(dt_find_property(c1, "some-property") == p2); list_check(&c1->properties, "properties after resizing"); dt_del_property(c1, p2); list_check(&c1->properties, "properties after delete"); /* No leaks for valgrind! */ dt_free(root); return 0; }
struct dt_node * add_core_common(struct dt_node *cpus, const struct sppcia_cpu_cache *cache, const struct sppaca_cpu_timebase *tb, uint32_t int_server, bool okay) { const char *name; struct dt_node *cpu; uint32_t version; uint64_t freq; const uint8_t pa_features_p7[] = { 6, 0, 0xf6, 0x3f, 0xc7, 0x00, 0x80, 0xc0 }; const uint8_t pa_features_p7p[] = { 6, 0, 0xf6, 0x3f, 0xc7, 0xc0, 0x80, 0xc0 }; const uint8_t pa_features_p8[] = { 24, 0, 0xf6, 0x3f, 0xc7, 0xc0, 0x80, 0xd0, 0x80, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x80, 0x00, 0x80, 0x00, 0x80, 0x00, 0x80, 0x00, }; const uint8_t pa_features_p9n_dd20[] = { 64, 0, 0xf6, 0x3f, 0xc7, 0xc0, 0x80, 0xd0, 0x80, 0x00, /* 0 .. 7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 8 .. 15 */ 0x80, 0x00, 0x80, 0x00, 0x80, 0x00, 0x00, 0x00, /* 16 .. 23 */ 0x80, 0x00, 0x80, 0x00, 0x80, 0x00, 0x00, 0x00, /* 24 .. 31 */ 0x80, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80, 0x00, /* 32 .. 39 */ 0x80, 0x00, 0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 40 .. 47 */ 0x80, 0x00, 0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 48 .. 55 */ 0x80, 0x00, 0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 56 .. 63 */ }; const uint8_t pa_features_p9[] = { 64, 0, 0xf6, 0x3f, 0xc7, 0xc0, 0x80, 0xd0, 0x80, 0x00, /* 0 .. 7 */ 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, /* 8 .. 15 */ 0x80, 0x00, 0x80, 0x00, 0x80, 0x00, 0x00, 0x00, /* 16 .. 23 */ 0x80, 0x00, 0x80, 0x00, 0x80, 0x00, 0x00, 0x00, /* 24 .. 31 */ 0x80, 0x00, 0x80, 0x00, 0x00, 0x00, 0x80, 0x00, /* 32 .. 39 */ 0x80, 0x00, 0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 40 .. 47 */ 0x80, 0x00, 0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 48 .. 55 */ 0x80, 0x00, 0x80, 0x00, 0x80, 0x00, 0x80, 0x00, /* 56 .. 63 */ }; const uint8_t *pa_features; size_t pa_features_size; prlog(PR_INFO, " Cache: I=%u D=%u/%u/%u/%u\n", be32_to_cpu(cache->icache_size_kb), be32_to_cpu(cache->l1_dcache_size_kb), be32_to_cpu(cache->l2_dcache_size_kb), be32_to_cpu(cache->l3_dcache_size_kb), be32_to_cpu(cache->l35_dcache_size_kb)); /* Use the boot CPU PVR to make up a CPU name in the device-tree * since the HDAT doesn't seem to tell.... */ version = mfspr(SPR_PVR); switch(PVR_TYPE(version)) { case PVR_TYPE_P7: name = "PowerPC,POWER7"; pa_features = pa_features_p7; pa_features_size = sizeof(pa_features_p7); break; case PVR_TYPE_P7P: name = "PowerPC,POWER7+"; pa_features = pa_features_p7p; pa_features_size = sizeof(pa_features_p7p); break; case PVR_TYPE_P8E: case PVR_TYPE_P8: case PVR_TYPE_P8NVL: name = "PowerPC,POWER8"; pa_features = pa_features_p8; pa_features_size = sizeof(pa_features_p8); break; case PVR_TYPE_P9: case PVR_TYPE_P9P: name = "PowerPC,POWER9"; if (is_power9n(version) && (PVR_VERS_MAJ(version) == 2) && (PVR_VERS_MIN(version) == 0)) { /* P9N DD2.0 */ pa_features = pa_features_p9n_dd20; pa_features_size = sizeof(pa_features_p9n_dd20); } else { pa_features = pa_features_p9; pa_features_size = sizeof(pa_features_p9); } break; default: name = "PowerPC,Unknown"; pa_features = NULL; } cpu = dt_new_addr(cpus, name, int_server); assert(cpu); dt_add_property_string(cpu, "device_type", "cpu"); dt_add_property_string(cpu, "status", okay ? "okay" : "bad"); dt_add_property_cells(cpu, "reg", int_server); dt_add_property_cells(cpu, "cpu-version", version); dt_add_property(cpu, "64-bit", NULL, 0); dt_add_property(cpu, "32-64-bridge", NULL, 0); dt_add_property(cpu, "graphics", NULL, 0); dt_add_property(cpu, "general-purpose", NULL, 0); dt_add_property_cells(cpu, "ibm,processor-segment-sizes", 0x1c, 0x28, 0xffffffff, 0xffffffff); dt_add_property_cells(cpu, "ibm,processor-page-sizes", 0xc, 0x10, 0x18, 0x22); if (proc_gen == proc_gen_p9) dt_add_property_cells(cpu, "ibm,processor-radix-AP-encodings", 0x0000000c, 0xa0000010, 0x20000015, 0x4000001e); /* Page size encodings appear to be the same for P7 and P8 */ dt_add_property_cells(cpu, "ibm,segment-page-sizes", 0x0c, 0x000, 3, 0x0c, 0x0000, /* 4K seg 4k pages */ 0x10, 0x0007, /* 4K seg 64k pages */ 0x18, 0x0038, /* 4K seg 16M pages */ 0x10, 0x110, 2, 0x10, 0x0001, /* 64K seg 64k pages */ 0x18, 0x0008, /* 64K seg 16M pages */ 0x18, 0x100, 1, 0x18, 0x0000, /* 16M seg 16M pages */ 0x22, 0x120, 1, 0x22, 0x0003); /* 16G seg 16G pages */ if (pa_features) { dt_add_property(cpu, "ibm,pa-features", pa_features, pa_features_size); } dt_add_property_cells(cpu, "ibm,slb-size", 0x20); dt_add_property_cells(cpu, "ibm,vmx", 0x2); dt_add_property_cells(cpu, "ibm,dfp", 0x2); dt_add_property_cells(cpu, "ibm,purr", 0x1); dt_add_property_cells(cpu, "ibm,spurr", 0x1); /* * Do not create "clock-frequency" if the frequency doesn't * fit in a single cell */ freq = ((uint64_t)be32_to_cpu(tb->actual_clock_speed)) * 1000000ul; if (freq <= 0xfffffffful) dt_add_property_cells(cpu, "clock-frequency", freq); dt_add_property_u64(cpu, "ibm,extended-clock-frequency", freq); /* FIXME: Hardcoding is bad. */ dt_add_property_cells(cpu, "timebase-frequency", 512000000); dt_add_property_cells(cpu, "ibm,extended-timebase-frequency", 0, 512000000); dt_add_property_cells(cpu, "reservation-granule-size", be32_to_cpu(cache->reservation_size)); dt_add_property_cells(cpu, "d-tlb-size", be32_to_cpu(cache->dtlb_entries)); dt_add_property_cells(cpu, "i-tlb-size", be32_to_cpu(cache->itlb_entries)); /* Assume unified TLB */ dt_add_property_cells(cpu, "tlb-size", be32_to_cpu(cache->dtlb_entries)); dt_add_property_cells(cpu, "d-tlb-sets", be32_to_cpu(cache->dtlb_assoc_sets)); dt_add_property_cells(cpu, "i-tlb-sets", be32_to_cpu(cache->itlb_assoc_sets)); dt_add_property_cells(cpu, "tlb-sets", be32_to_cpu(cache->dtlb_assoc_sets)); dt_add_property_cells(cpu, "d-cache-block-size", be32_to_cpu(cache->dcache_block_size)); dt_add_property_cells(cpu, "i-cache-block-size", be32_to_cpu(cache->icache_block_size)); dt_add_property_cells(cpu, "d-cache-size", be32_to_cpu(cache->l1_dcache_size_kb)*1024); dt_add_property_cells(cpu, "i-cache-size", be32_to_cpu(cache->icache_size_kb)*1024); dt_add_property_cells(cpu, "i-cache-sets", be32_to_cpu(cache->icache_assoc_sets)); dt_add_property_cells(cpu, "d-cache-sets", be32_to_cpu(cache->dcache_assoc_sets)); if (cache->icache_line_size != cache->icache_block_size) dt_add_property_cells(cpu, "i-cache-line-size", be32_to_cpu(cache->icache_line_size)); if (cache->l1_dcache_line_size != cache->dcache_block_size) dt_add_property_cells(cpu, "d-cache-line-size", be32_to_cpu(cache->l1_dcache_line_size)); return cpu; }
static struct dt_node *add_cpu_node(struct dt_node *cpus, const struct HDIF_common_hdr *paca, const struct sppaca_cpu_id *id, bool okay) { const struct sppaca_cpu_timebase *timebase; const struct sppaca_cpu_cache *cache; const struct sppaca_cpu_attr *attr; struct dt_node *cpu; u32 no, size, ve_flags, l2_phandle, chip_id; /* We use the process_interrupt_line as the res id */ no = be32_to_cpu(id->process_interrupt_line); ve_flags = be32_to_cpu(id->verify_exists_flags); prlog(PR_INFO, "CPU[%i]: PIR=%i RES=%i %s %s(%u threads)\n", paca_index(paca), be32_to_cpu(id->pir), no, ve_flags & CPU_ID_PACA_RESERVED ? "**RESERVED**" : cpu_state(ve_flags), ve_flags & CPU_ID_SECONDARY_THREAD ? "[secondary] " : (be32_to_cpu(id->pir) == boot_cpu->pir ? "[boot] " : ""), ((ve_flags & CPU_ID_NUM_SECONDARY_THREAD_MASK) >> CPU_ID_NUM_SECONDARY_THREAD_SHIFT) + 1); timebase = HDIF_get_idata(paca, SPPACA_IDATA_TIMEBASE, &size); if (!timebase || size < sizeof(*timebase)) { prerror("CPU[%i]: bad timebase size %u @ %p\n", paca_index(paca), size, timebase); return NULL; } cache = HDIF_get_idata(paca, SPPACA_IDATA_CACHE_SIZE, &size); if (!cache || size < sizeof(*cache)) { prerror("CPU[%i]: bad cache size %u @ %p\n", paca_index(paca), size, cache); return NULL; } cpu = add_core_common(cpus, cache, timebase, no, okay); /* Core attributes */ attr = HDIF_get_idata(paca, SPPACA_IDATA_CPU_ATTR, &size); if (attr) add_core_attr(cpu, be32_to_cpu(attr->attr)); /* Add cache info */ l2_phandle = add_core_cache_info(cpus, cache, no, okay); dt_add_property_cells(cpu, "l2-cache", l2_phandle); /* We append the secondary cpus in __cpu_parse */ dt_add_property_cells(cpu, "ibm,ppc-interrupt-server#s", no); dt_add_property_cells(cpu, DT_PRIVATE "hw_proc_id", be32_to_cpu(id->hardware_proc_id)); dt_add_property_cells(cpu, "ibm,pir", be32_to_cpu(id->pir)); chip_id = pcid_to_chip_id(be32_to_cpu(id->processor_chip_id)); dt_add_property_cells(cpu, "ibm,chip-id", chip_id); return cpu; }