void sn2_global_tlb_purge(struct mm_struct *mm, unsigned long start, unsigned long end, unsigned long nbits) { int i, ibegin, shub1, cnode, mynasid, cpu, lcpu = 0, nasid; int mymm = (mm == current->active_mm && mm == current->mm); int use_cpu_ptcga; volatile unsigned long *ptc0, *ptc1; unsigned long itc, itc2, flags, data0 = 0, data1 = 0, rr_value, old_rr = 0; short nasids[MAX_NUMNODES], nix; nodemask_t nodes_flushed; int active, max_active, deadlock, flush_opt = sn2_flush_opt; if (flush_opt > 2) { sn2_ipi_flush_all_tlb(mm); return; } nodes_clear(nodes_flushed); i = 0; for_each_cpu(cpu, mm_cpumask(mm)) { cnode = cpu_to_node(cpu); node_set(cnode, nodes_flushed); lcpu = cpu; i++; }
/* unregister memory section under all nodes that it spans */ int unregister_mem_sect_under_nodes(struct memory_block *mem_blk, unsigned long phys_index) { NODEMASK_ALLOC(nodemask_t, unlinked_nodes, GFP_KERNEL); unsigned long pfn, sect_start_pfn, sect_end_pfn; if (!mem_blk) { NODEMASK_FREE(unlinked_nodes); return -EFAULT; } if (!unlinked_nodes) return -ENOMEM; nodes_clear(*unlinked_nodes); sect_start_pfn = section_nr_to_pfn(phys_index); sect_end_pfn = sect_start_pfn + PAGES_PER_SECTION - 1; for (pfn = sect_start_pfn; pfn <= sect_end_pfn; pfn++) { int nid; nid = get_nid_for_pfn(pfn); if (nid < 0) continue; if (!node_online(nid)) continue; if (node_test_and_set(nid, *unlinked_nodes)) continue; sysfs_remove_link(&node_devices[nid]->dev.kobj, kobject_name(&mem_blk->dev.kobj)); sysfs_remove_link(&mem_blk->dev.kobj, kobject_name(&node_devices[nid]->dev.kobj)); } NODEMASK_FREE(unlinked_nodes); return 0; }
/* * Function: smp_dump_qct() * * Description: gets memory layout from the quad config table. This * function also updates node_online_map with the nodes (quads) present. */ static void __init smp_dump_qct(void) { int node; struct eachquadmem *eq; struct sys_cfg_data *scd = (struct sys_cfg_data *)__va(SYS_CFG_DATA_PRIV_ADDR); nodes_clear(node_online_map); for_each_node(node) { if (scd->quads_present31_0 & (1 << node)) { node_set_online(node); eq = &scd->eq[node]; /* Convert to pages */ node_start_pfn[node] = MB_TO_PAGES( eq->hi_shrd_mem_start - eq->priv_mem_size); node_end_pfn[node] = MB_TO_PAGES( eq->hi_shrd_mem_start + eq->hi_shrd_mem_size); memory_present(node, node_start_pfn[node], node_end_pfn[node]); node_remap_size[node] = node_memmap_size_bytes(node, node_start_pfn[node], node_end_pfn[node]); } } }
void cpu_node_probe(void) { int i, highest = 0; gda_t *gdap = GDA; /* * Initialize the arrays to invalid nodeid (-1) */ for (i = 0; i < MAX_COMPACT_NODES; i++) compact_to_nasid_node[i] = INVALID_NASID; for (i = 0; i < MAX_NASIDS; i++) nasid_to_compact_node[i] = INVALID_CNODEID; for (i = 0; i < MAXCPUS; i++) cpuid_to_compact_node[i] = INVALID_CNODEID; /* * MCD - this whole "compact node" stuff can probably be dropped, * as we can handle sparse numbering now */ nodes_clear(node_online_map); for (i = 0; i < MAX_COMPACT_NODES; i++) { nasid_t nasid = gdap->g_nasidtable[i]; if (nasid == INVALID_NASID) break; compact_to_nasid_node[i] = nasid; nasid_to_compact_node[nasid] = i; node_set_online(num_online_nodes()); highest = do_cpumask(i, nasid, highest); } printk("Discovered %d cpus on %d nodes\n", highest + 1, num_online_nodes()); }
/* unregister memory section under all nodes that it spans */ int unregister_mem_sect_under_nodes(struct memory_block *mem_blk) { nodemask_t unlinked_nodes; unsigned long pfn, sect_start_pfn, sect_end_pfn; if (!mem_blk) return -EFAULT; nodes_clear(unlinked_nodes); sect_start_pfn = section_nr_to_pfn(mem_blk->phys_index); sect_end_pfn = sect_start_pfn + PAGES_PER_SECTION - 1; for (pfn = sect_start_pfn; pfn <= sect_end_pfn; pfn++) { int nid; nid = get_nid_for_pfn(pfn); if (nid < 0) continue; if (!node_online(nid)) continue; if (node_test_and_set(nid, unlinked_nodes)) continue; sysfs_remove_link(&node_devices[nid].sysdev.kobj, kobject_name(&mem_blk->sysdev.kobj)); } return 0; }
static int get_user_node_mask(unsigned long __user *user_mask_ptr, unsigned len, nodemask_t *new_mask) { if (len < sizeof(nodemask_t)) nodes_clear(*new_mask); else if (len > sizeof(nodemask_t)) len = sizeof(nodemask_t); return copy_from_user(new_mask, user_mask_ptr, len) ? -EFAULT : 0; }
/* * Function: smp_dump_qct() * * Description: gets memory layout from the quad config table. This * function also updates node_online_map with the nodes (quads) present. */ static void __init smp_dump_qct(void) { struct sys_cfg_data *scd; int node; scd = (void *)__va(SYS_CFG_DATA_PRIV_ADDR); nodes_clear(node_online_map); for_each_node(node) { if (scd->quads_present31_0 & (1 << node)) numaq_register_node(node, scd); } }
void __init setup_arch(char **cmdline_p) { ROOT_DEV = old_decode_dev(ORIG_ROOT_DEV); boot_cpu_data.cpu_clock = M32R_CPUCLK; boot_cpu_data.bus_clock = M32R_BUSCLK; boot_cpu_data.timer_divide = M32R_TIMER_DIVIDE; #ifdef CONFIG_BLK_DEV_RAM rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK; rd_prompt = ((RAMDISK_FLAGS & RAMDISK_PROMPT_FLAG) != 0); rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0); #endif if (!MOUNT_ROOT_RDONLY) root_mountflags &= ~MS_RDONLY; #ifdef CONFIG_VT #if defined(CONFIG_VGA_CONSOLE) conswitchp = &vga_con; #elif defined(CONFIG_DUMMY_CONSOLE) conswitchp = &dummy_con; #endif #endif #ifdef CONFIG_DISCONTIGMEM nodes_clear(node_online_map); node_set_online(0); node_set_online(1); #endif /* CONFIG_DISCONTIGMEM */ init_mm.start_code = (unsigned long) _text; init_mm.end_code = (unsigned long) _etext; init_mm.end_data = (unsigned long) _edata; init_mm.brk = (unsigned long) _end; code_resource.start = virt_to_phys(_text); code_resource.end = virt_to_phys(_etext)-1; data_resource.start = virt_to_phys(_etext); data_resource.end = virt_to_phys(_edata)-1; parse_mem_cmdline(cmdline_p); setup_memory(); paging_init(); }
int __init get_memcfg_from_srat(void) { int i, j, nid; if (srat_disabled()) goto out_fail; if (acpi_numa_init() < 0) goto out_fail; if (num_memory_chunks == 0) { printk(KERN_DEBUG "could not find any ACPI SRAT memory areas.\n"); goto out_fail; } /* Calculate total number of nodes in system from PXM bitmap and create * a set of sequential node IDs starting at zero. (ACPI doesn't seem * to specify the range of _PXM values.) */ /* * MCD - we no longer HAVE to number nodes sequentially. PXM domain * numbers could go as high as 256, and MAX_NUMNODES for i386 is typically * 32, so we will continue numbering them in this manner until MAX_NUMNODES * approaches MAX_PXM_DOMAINS for i386. */ nodes_clear(node_online_map); for (i = 0; i < MAX_PXM_DOMAINS; i++) { if (BMAP_TEST(pxm_bitmap, i)) { int nid = acpi_map_pxm_to_node(i); node_set_online(nid); } } BUG_ON(num_online_nodes() == 0); /* set cnode id in memory chunk structure */ for (i = 0; i < num_memory_chunks; i++) node_memory_chunk[i].nid = pxm_to_node(node_memory_chunk[i].pxm); printk(KERN_DEBUG "pxm bitmap: "); for (i = 0; i < sizeof(pxm_bitmap); i++) { printk(KERN_CONT "%02x ", pxm_bitmap[i]); } printk(KERN_CONT "\n"); printk(KERN_DEBUG "Number of logical nodes in system = %d\n", num_online_nodes()); printk(KERN_DEBUG "Number of memory chunks in system = %d\n", num_memory_chunks); for (i = 0; i < MAX_LOCAL_APIC; i++) set_apicid_to_node(i, pxm_to_node(apicid_to_pxm[i])); for (j = 0; j < num_memory_chunks; j++){ struct node_memory_chunk_s * chunk = &node_memory_chunk[j]; printk(KERN_DEBUG "chunk %d nid %d start_pfn %08lx end_pfn %08lx\n", j, chunk->nid, chunk->start_pfn, chunk->end_pfn); if (node_read_chunk(chunk->nid, chunk)) continue; memblock_x86_register_active_regions(chunk->nid, chunk->start_pfn, min(chunk->end_pfn, max_pfn)); } /* for out of order entries in SRAT */ sort_node_map(); for_each_online_node(nid) { unsigned long start = node_start_pfn[nid]; unsigned long end = min(node_end_pfn[nid], max_pfn); memory_present(nid, start, end); node_remap_size[nid] = node_memmap_size_bytes(nid, start, end); } return 1; out_fail: printk(KERN_DEBUG "failed to get NUMA memory information from SRAT" " table\n"); return 0; }
void __init acpi_numa_arch_fixup(void) { int i, j, node_from, node_to; /* If there's no SRAT, fix the phys_id and mark node 0 online */ if (srat_num_cpus == 0) { node_set_online(0); node_cpuid[0].phys_id = hard_smp_processor_id(); return; } /* * MCD - This can probably be dropped now. No need for pxm ID to node ID * mapping with sparse node numbering iff MAX_PXM_DOMAINS <= MAX_NUMNODES. */ nodes_clear(node_online_map); for (i = 0; i < MAX_PXM_DOMAINS; i++) { if (pxm_bit_test(i)) { int nid = acpi_map_pxm_to_node(i); node_set_online(nid); } } /* set logical node id in memory chunk structure */ for (i = 0; i < num_node_memblks; i++) node_memblk[i].nid = pxm_to_node(node_memblk[i].nid); /* assign memory bank numbers for each chunk on each node */ for_each_online_node(i) { int bank; bank = 0; for (j = 0; j < num_node_memblks; j++) if (node_memblk[j].nid == i) node_memblk[j].bank = bank++; } /* set logical node id in cpu structure */ for_each_possible_early_cpu(i) node_cpuid[i].nid = pxm_to_node(node_cpuid[i].nid); printk(KERN_INFO "Number of logical nodes in system = %d\n", num_online_nodes()); printk(KERN_INFO "Number of memory chunks in system = %d\n", num_node_memblks); if (!slit_table) return; memset(numa_slit, -1, sizeof(numa_slit)); for (i = 0; i < slit_table->locality_count; i++) { if (!pxm_bit_test(i)) continue; node_from = pxm_to_node(i); for (j = 0; j < slit_table->locality_count; j++) { if (!pxm_bit_test(j)) continue; node_to = pxm_to_node(j); node_distance(node_from, node_to) = slit_table->entry[i * slit_table->locality_count + j]; } } #ifdef SLIT_DEBUG printk("ACPI 2.0 SLIT locality table:\n"); for_each_online_node(i) { for_each_online_node(j) printk("%03d ", node_distance(i, j)); printk("\n"); } #endif }
/* Parse the ACPI Static Resource Affinity Table */ static int __init acpi20_parse_srat(struct acpi_table_srat *sratp) { u8 *start, *end, *p; int i, j, nid; start = (u8 *)(&(sratp->reserved) + 1); /* skip header */ p = start; end = (u8 *)sratp + sratp->header.length; memset(pxm_bitmap, 0, sizeof(pxm_bitmap)); /* init proximity domain bitmap */ memset(node_memory_chunk, 0, sizeof(node_memory_chunk)); num_memory_chunks = 0; while (p < end) { switch (*p) { case ACPI_SRAT_TYPE_CPU_AFFINITY: parse_cpu_affinity_structure(p); break; case ACPI_SRAT_TYPE_MEMORY_AFFINITY: parse_memory_affinity_structure(p); break; default: printk("ACPI 2.0 SRAT: unknown entry skipped: type=0x%02X, len=%d\n", p[0], p[1]); break; } p += p[1]; if (p[1] == 0) { printk("acpi20_parse_srat: Entry length value is zero;" " can't parse any further!\n"); break; } } if (num_memory_chunks == 0) { printk("could not finy any ACPI SRAT memory areas.\n"); goto out_fail; } /* Calculate total number of nodes in system from PXM bitmap and create * a set of sequential node IDs starting at zero. (ACPI doesn't seem * to specify the range of _PXM values.) */ /* * MCD - we no longer HAVE to number nodes sequentially. PXM domain * numbers could go as high as 256, and MAX_NUMNODES for i386 is typically * 32, so we will continue numbering them in this manner until MAX_NUMNODES * approaches MAX_PXM_DOMAINS for i386. */ nodes_clear(node_online_map); for (i = 0; i < MAX_PXM_DOMAINS; i++) { if (BMAP_TEST(pxm_bitmap, i)) { int nid = acpi_map_pxm_to_node(i); node_set_online(nid); } } BUG_ON(num_online_nodes() == 0); /* set cnode id in memory chunk structure */ for (i = 0; i < num_memory_chunks; i++) node_memory_chunk[i].nid = pxm_to_node(node_memory_chunk[i].pxm); printk("pxm bitmap: "); for (i = 0; i < sizeof(pxm_bitmap); i++) { printk("%02X ", pxm_bitmap[i]); } printk("\n"); printk("Number of logical nodes in system = %d\n", num_online_nodes()); printk("Number of memory chunks in system = %d\n", num_memory_chunks); for (i = 0; i < MAX_APICID; i++) apicid_2_node[i] = pxm_to_node(apicid_to_pxm[i]); for (j = 0; j < num_memory_chunks; j++){ struct node_memory_chunk_s * chunk = &node_memory_chunk[j]; printk("chunk %d nid %d start_pfn %08lx end_pfn %08lx\n", j, chunk->nid, chunk->start_pfn, chunk->end_pfn); node_read_chunk(chunk->nid, chunk); add_active_range(chunk->nid, chunk->start_pfn, chunk->end_pfn); } for_each_online_node(nid) { unsigned long start = node_start_pfn[nid]; unsigned long end = node_end_pfn[nid]; memory_present(nid, start, end); node_remap_size[nid] = node_memmap_size_bytes(nid, start, end); } return 1; out_fail: return 0; }
void __init setup_arch(char **cmdline_p) { enable_mmu(); ROOT_DEV = old_decode_dev(ORIG_ROOT_DEV); #ifdef CONFIG_BLK_DEV_RAM rd_image_start = RAMDISK_FLAGS & RAMDISK_IMAGE_START_MASK; rd_prompt = ((RAMDISK_FLAGS & RAMDISK_PROMPT_FLAG) != 0); rd_doload = ((RAMDISK_FLAGS & RAMDISK_LOAD_FLAG) != 0); #endif if (!MOUNT_ROOT_RDONLY) root_mountflags &= ~MS_RDONLY; init_mm.start_code = (unsigned long) _text; init_mm.end_code = (unsigned long) _etext; init_mm.end_data = (unsigned long) _edata; init_mm.brk = (unsigned long) _end; code_resource.start = virt_to_phys(_text); code_resource.end = virt_to_phys(_etext)-1; data_resource.start = virt_to_phys(_etext); data_resource.end = virt_to_phys(_edata)-1; memory_start = (unsigned long)PAGE_OFFSET+__MEMORY_START; memory_end = memory_start + __MEMORY_SIZE; #ifdef CONFIG_CMDLINE_BOOL strlcpy(command_line, CONFIG_CMDLINE, sizeof(command_line)); #else strlcpy(command_line, COMMAND_LINE, sizeof(command_line)); #endif /* Save unparsed command line copy for /proc/cmdline */ memcpy(boot_command_line, command_line, COMMAND_LINE_SIZE); *cmdline_p = command_line; parse_early_param(); sh_mv_setup(); /* * Find the highest page frame number we have available */ max_pfn = PFN_DOWN(__pa(memory_end)); /* * Determine low and high memory ranges: */ max_low_pfn = max_pfn; min_low_pfn = __MEMORY_START >> PAGE_SHIFT; nodes_clear(node_online_map); /* Setup bootmem with available RAM */ setup_memory(); sparse_init(); #ifdef CONFIG_DUMMY_CONSOLE conswitchp = &dummy_con; #endif /* Perform the machine specific initialisation */ if (likely(sh_mv.mv_setup)) sh_mv.mv_setup(cmdline_p); paging_init(); }
void __init acpi_numa_arch_fixup(void) { int i, j, node_from, node_to; if (srat_num_cpus == 0) { node_set_online(0); node_cpuid[0].phys_id = hard_smp_processor_id(); return; } nodes_clear(node_online_map); for (i = 0; i < MAX_PXM_DOMAINS; i++) { if (pxm_bit_test(i)) { int nid = acpi_map_pxm_to_node(i); node_set_online(nid); } } for (i = 0; i < num_node_memblks; i++) node_memblk[i].nid = pxm_to_node(node_memblk[i].nid); for_each_online_node(i) { int bank; bank = 0; for (j = 0; j < num_node_memblks; j++) if (node_memblk[j].nid == i) node_memblk[j].bank = bank++; } for_each_possible_early_cpu(i) node_cpuid[i].nid = pxm_to_node(node_cpuid[i].nid); printk(KERN_INFO "Number of logical nodes in system = %d\n", num_online_nodes()); printk(KERN_INFO "Number of memory chunks in system = %d\n", num_node_memblks); if (!slit_table) { for (i = 0; i < MAX_NUMNODES; i++) for (j = 0; j < MAX_NUMNODES; j++) node_distance(i, j) = i == j ? LOCAL_DISTANCE : REMOTE_DISTANCE; return; } memset(numa_slit, -1, sizeof(numa_slit)); for (i = 0; i < slit_table->locality_count; i++) { if (!pxm_bit_test(i)) continue; node_from = pxm_to_node(i); for (j = 0; j < slit_table->locality_count; j++) { if (!pxm_bit_test(j)) continue; node_to = pxm_to_node(j); node_distance(node_from, node_to) = slit_table->entry[i * slit_table->locality_count + j]; } } #ifdef SLIT_DEBUG printk("ACPI 2.0 SLIT locality table:\n"); for_each_online_node(i) { for_each_online_node(j) printk("%03d ", node_distance(i, j)); printk("\n"); } #endif }