static void of_bus_default_count_cells(struct device_node *dev, int *addrc, int *sizec) { if (addrc) *addrc = prom_n_addr_cells(dev); if (sizec) *sizec = prom_n_size_cells(dev); }
static unsigned long read_cell_ul(struct device_node *device, unsigned int **buf) { int i; unsigned long result = 0; i = prom_n_size_cells(device); /* bug on i>2 ?? */ while (i--) { result = (result << 32) | **buf; (*buf)++; } return result; }
void of_parse_dma_window(struct device_node *dn, unsigned char *dma_window_prop, unsigned long *busno, unsigned long *phys, unsigned long *size) { u32 *dma_window, cells; unsigned char *prop; dma_window = (u32 *)dma_window_prop; /* busno is always one cell */ *busno = *(dma_window++); prop = get_property(dn, "ibm,#dma-address-cells", NULL); if (!prop) prop = get_property(dn, "#address-cells", NULL); cells = prop ? *(u32 *)prop : prom_n_addr_cells(dn); *phys = of_read_number(dma_window, cells); dma_window += cells; prop = get_property(dn, "ibm,#dma-size-cells", NULL); cells = prop ? *(u32 *)prop : prom_n_size_cells(dn); *size = of_read_number(dma_window, cells); }
struct pci_controller * __devinit init_phb_dynamic(struct device_node *dn) { struct device_node *root = of_find_node_by_path("/"); unsigned int root_size_cells = 0; struct pci_controller *phb; int primary; root_size_cells = prom_n_size_cells(root); primary = list_empty(&hose_list); phb = pcibios_alloc_controller(dn); if (!phb) return NULL; setup_phb(dn, phb, root_size_cells); pci_process_bridge_OF_ranges(phb, dn, primary); pci_setup_phb_io_dynamic(phb, primary); of_node_put(root); pci_devs_phb_init_dynamic(phb); scan_phb(phb); return phb; }
/** * setup_cpu_maps - initialize the following cpu maps: * cpu_possible_map * cpu_present_map * cpu_sibling_map * * Having the possible map set up early allows us to restrict allocations * of things like irqstacks to num_possible_cpus() rather than NR_CPUS. * * We do not initialize the online map here; cpus set their own bits in * cpu_online_map as they come up. * * This function is valid only for Open Firmware systems. finish_device_tree * must be called before using this. * * While we're here, we may as well set the "physical" cpu ids in the paca. */ static void __init setup_cpu_maps(void) { struct device_node *dn = NULL; int cpu = 0; int swap_cpuid = 0; check_smt_enabled(); while ((dn = of_find_node_by_type(dn, "cpu")) && cpu < NR_CPUS) { u32 *intserv; int j, len = sizeof(u32), nthreads; intserv = (u32 *)get_property(dn, "ibm,ppc-interrupt-server#s", &len); if (!intserv) intserv = (u32 *)get_property(dn, "reg", NULL); nthreads = len / sizeof(u32); for (j = 0; j < nthreads && cpu < NR_CPUS; j++) { /* * Only spin up secondary threads if SMT is enabled. * We must leave space in the logical map for the * threads. */ if (j == 0 || smt_enabled_at_boot) { cpu_set(cpu, cpu_present_map); set_hard_smp_processor_id(cpu, intserv[j]); } if (intserv[j] == boot_cpuid_phys) swap_cpuid = cpu; cpu_set(cpu, cpu_possible_map); cpu++; } } /* Swap CPU id 0 with boot_cpuid_phys, so we can always assume that * boot cpu is logical 0. */ if (boot_cpuid_phys != get_hard_smp_processor_id(0)) { u32 tmp; tmp = get_hard_smp_processor_id(0); set_hard_smp_processor_id(0, boot_cpuid_phys); set_hard_smp_processor_id(swap_cpuid, tmp); } /* * On pSeries LPAR, we need to know how many cpus * could possibly be added to this partition. */ if (systemcfg->platform == PLATFORM_PSERIES_LPAR && (dn = of_find_node_by_path("/rtas"))) { int num_addr_cell, num_size_cell, maxcpus; unsigned int *ireg; num_addr_cell = prom_n_addr_cells(dn); num_size_cell = prom_n_size_cells(dn); ireg = (unsigned int *) get_property(dn, "ibm,lrdr-capacity", NULL); if (!ireg) goto out; maxcpus = ireg[num_addr_cell + num_size_cell]; /* Double maxcpus for processors which have SMT capability */ if (cur_cpu_spec->cpu_features & CPU_FTR_SMT) maxcpus *= 2; if (maxcpus > NR_CPUS) { printk(KERN_WARNING "Partition configured for %d cpus, " "operating system maximum is %d.\n", maxcpus, NR_CPUS); maxcpus = NR_CPUS; } else printk(KERN_INFO "Partition configured for %d cpus.\n", maxcpus); for (cpu = 0; cpu < maxcpus; cpu++) cpu_set(cpu, cpu_possible_map); out: of_node_put(dn); } /* * Do the sibling map; assume only two threads per processor. */ for_each_cpu(cpu) { cpu_set(cpu, cpu_sibling_map[cpu]); if (cur_cpu_spec->cpu_features & CPU_FTR_SMT) cpu_set(cpu ^ 0x1, cpu_sibling_map[cpu]); } systemcfg->processorCount = num_present_cpus(); }
static int __init parse_numa_properties(void) { struct device_node *cpu; struct device_node *memory; int *cpu_associativity; int *memory_associativity; int depth; int max_domain = 0; cpu = find_type_devices("cpu"); if (!cpu) return -1; memory = find_type_devices("memory"); if (!memory) return -1; cpu_associativity = (int *)get_property(cpu, "ibm,associativity", NULL); if (!cpu_associativity) return -1; memory_associativity = (int *)get_property(memory, "ibm,associativity", NULL); if (!memory_associativity) return -1; /* find common depth */ if (cpu_associativity[0] < memory_associativity[0]) depth = cpu_associativity[0]; else depth = memory_associativity[0]; for (cpu = find_type_devices("cpu"); cpu; cpu = cpu->next) { int *tmp; int cpu_nr, numa_domain; tmp = (int *)get_property(cpu, "reg", NULL); if (!tmp) continue; cpu_nr = *tmp; tmp = (int *)get_property(cpu, "ibm,associativity", NULL); if (!tmp) continue; numa_domain = tmp[depth]; /* FIXME */ if (numa_domain == 0xffff) { dbg("cpu %d has no numa doman\n", cpu_nr); numa_domain = 0; } if (numa_domain >= MAX_NUMNODES) BUG(); if (max_domain < numa_domain) max_domain = numa_domain; map_cpu_to_node(cpu_nr, numa_domain); } for (memory = find_type_devices("memory"); memory; memory = memory->next) { int *tmp1, *tmp2; unsigned long i; unsigned long start = 0; unsigned long size = 0; int numa_domain; int ranges; tmp1 = (int *)get_property(memory, "reg", NULL); if (!tmp1) continue; ranges = memory->n_addrs; new_range: i = prom_n_size_cells(memory); while (i--) { start = (start << 32) | *tmp1; tmp1++; } i = prom_n_size_cells(memory); while (i--) { size = (size << 32) | *tmp1; tmp1++; } start = _ALIGN_DOWN(start, MEMORY_INCREMENT); size = _ALIGN_UP(size, MEMORY_INCREMENT); if ((start + size) > MAX_MEMORY) BUG(); tmp2 = (int *)get_property(memory, "ibm,associativity", NULL); if (!tmp2) continue; numa_domain = tmp2[depth]; /* FIXME */ if (numa_domain == 0xffff) { dbg("memory has no numa doman\n"); numa_domain = 0; } if (numa_domain >= MAX_NUMNODES) BUG(); if (max_domain < numa_domain) max_domain = numa_domain; /* * For backwards compatibility, OF splits the first node * into two regions (the first being 0-4GB). Check for * this simple case and complain if there is a gap in * memory */ if (node_data[numa_domain].node_spanned_pages) { unsigned long shouldstart = node_data[numa_domain].node_start_pfn + node_data[numa_domain].node_spanned_pages; if (shouldstart != (start / PAGE_SIZE)) { printk(KERN_ERR "Hole in node, disabling " "region start %lx length %lx\n", start, size); continue; } node_data[numa_domain].node_spanned_pages += size / PAGE_SIZE; } else { node_data[numa_domain].node_start_pfn = start / PAGE_SIZE; node_data[numa_domain].node_spanned_pages = size / PAGE_SIZE; } for (i = start ; i < (start+size); i += MEMORY_INCREMENT) numa_memory_lookup_table[i >> MEMORY_INCREMENT_SHIFT] = numa_domain; dbg("memory region %lx to %lx maps to domain %d\n", start, start+size, numa_domain); ranges--; if (ranges) goto new_range; } numnodes = max_domain + 1; return 0; }
unsigned long __init find_and_init_phbs(void) { struct device_node *node; struct pci_controller *phb; unsigned int root_size_cells = 0; unsigned int index; unsigned int *opprop = NULL; struct device_node *root = of_find_node_by_path("/"); if (ppc64_interrupt_controller == IC_OPEN_PIC) { opprop = (unsigned int *)get_property(root, "platform-open-pic", NULL); } root_size_cells = prom_n_size_cells(root); index = 0; for (node = of_get_next_child(root, NULL); node != NULL; node = of_get_next_child(root, node)) { if (node->type == NULL || strcmp(node->type, "pci") != 0) continue; phb = pcibios_alloc_controller(node); if (!phb) continue; setup_phb(node, phb, root_size_cells); pci_process_bridge_OF_ranges(phb, node, 0); pci_setup_phb_io(phb, index == 0); #ifdef CONFIG_PPC_PSERIES if (ppc64_interrupt_controller == IC_OPEN_PIC && pSeries_mpic) { int addr = root_size_cells * (index + 2) - 1; mpic_assign_isu(pSeries_mpic, index, opprop[addr]); } #endif index++; } of_node_put(root); pci_devs_phb_init(); /* * pci_probe_only and pci_assign_all_buses can be set via properties * in chosen. */ if (of_chosen) { int *prop; prop = (int *)get_property(of_chosen, "linux,pci-probe-only", NULL); if (prop) pci_probe_only = *prop; prop = (int *)get_property(of_chosen, "linux,pci-assign-all-buses", NULL); if (prop) pci_assign_all_buses = *prop; } return 0; }