static int __init topology_init(void) { int i, ret; #ifdef CONFIG_NEED_MULTIPLE_NODES for_each_online_node(i) register_one_node(i); #endif for_each_present_cpu(i) { struct cpu *c = &per_cpu(cpu_devices, i); c->hotpluggable = 1; ret = register_cpu(c, i); if (unlikely(ret)) printk(KERN_WARNING "%s: register_cpu %d failed (%d)\n", __func__, i, ret); } #if defined(CONFIG_NUMA) && !defined(CONFIG_SMP) for_each_online_node(i) if (i != numa_node_id()) register_cpu_under_node(raw_smp_processor_id(), i); #endif return 0; }
static int __init topology_init(void) { int i, ret; #ifdef CONFIG_NEED_MULTIPLE_NODES for_each_online_node(i) register_one_node(i); #endif for_each_present_cpu(i) { ret = register_cpu(&per_cpu(cpu_devices, i), i); if (unlikely(ret)) printk(KERN_WARNING "%s: register_cpu %d failed (%d)\n", __FUNCTION__, i, ret); } #if defined(CONFIG_NUMA) && !defined(CONFIG_SMP) /* * In the UP case, make sure the CPU association is still * registered under each node. Without this, sysfs fails * to make the connection between nodes other than node0 * and cpu0. */ for_each_online_node(i) if (i != numa_node_id()) register_cpu_under_node(raw_smp_processor_id(), i); #endif return 0; }
unsigned long __init numa_free_all_bootmem(void) { unsigned long pages = 0; int i; for_each_online_node(i) pages += free_all_bootmem_node(NODE_DATA(i)); <<<<<<< HEAD
/** * sn_init_pdas - setup node data areas * * One time setup for Node Data Area. Called by sn_setup(). */ static void __init sn_init_pdas(char **cmdline_p) { cnodeid_t cnode; memset(pda->cnodeid_to_nasid_table, -1, sizeof(pda->cnodeid_to_nasid_table)); for_each_online_node(cnode) pda->cnodeid_to_nasid_table[cnode] = pxm_to_nasid(nid_to_pxm_map[cnode]); numionodes = num_online_nodes(); scan_for_ionodes(); /* * Allocate & initalize the nodepda for each node. */ for_each_online_node(cnode) { nodepdaindr[cnode] = alloc_bootmem_node(NODE_DATA(cnode), sizeof(nodepda_t)); memset(nodepdaindr[cnode], 0, sizeof(nodepda_t)); memset(nodepdaindr[cnode]->phys_cpuid, -1, sizeof(nodepdaindr[cnode]->phys_cpuid)); } /* * Allocate & initialize nodepda for TIOs. For now, put them on node 0. */ for (cnode = num_online_nodes(); cnode < numionodes; cnode++) { nodepdaindr[cnode] = alloc_bootmem_node(NODE_DATA(0), sizeof(nodepda_t)); memset(nodepdaindr[cnode], 0, sizeof(nodepda_t)); } /* * Now copy the array of nodepda pointers to each nodepda. */ for (cnode = 0; cnode < numionodes; cnode++) memcpy(nodepdaindr[cnode]->pernode_pdaindr, nodepdaindr, sizeof(nodepdaindr)); /* * Set up IO related platform-dependent nodepda fields. * The following routine actually sets up the hubinfo struct * in nodepda. */ for_each_online_node(cnode) { bte_init_node(nodepdaindr[cnode], cnode); } /* * Initialize the per node hubdev. This includes IO Nodes and * headless/memless nodes. */ for (cnode = 0; cnode < numionodes; cnode++) { hubdev_init_node(nodepdaindr[cnode], cnode); } }
/* * The ARC has requested that the filesystem drop entries from the dentry * and inode caches. This can occur when the ARC needs to free meta data * blocks but can't because they are all pinned by entries in these caches. */ int zfs_sb_prune(struct super_block *sb, unsigned long nr_to_scan, int *objects) { zfs_sb_t *zsb = sb->s_fs_info; int error = 0; #if defined(HAVE_SHRINK) || defined(HAVE_SPLIT_SHRINKER_CALLBACK) struct shrinker *shrinker = &sb->s_shrink; struct shrink_control sc = { .nr_to_scan = nr_to_scan, .gfp_mask = GFP_KERNEL, }; #endif ZFS_ENTER(zsb); #if defined(HAVE_SPLIT_SHRINKER_CALLBACK) && \ defined(SHRINK_CONTROL_HAS_NID) && \ defined(SHRINKER_NUMA_AWARE) if (sb->s_shrink.flags & SHRINKER_NUMA_AWARE) { *objects = 0; for_each_online_node(sc.nid) *objects += (*shrinker->scan_objects)(shrinker, &sc); } else { *objects = (*shrinker->scan_objects)(shrinker, &sc); } #elif defined(HAVE_SPLIT_SHRINKER_CALLBACK) *objects = (*shrinker->scan_objects)(shrinker, &sc); #elif defined(HAVE_SHRINK) *objects = (*shrinker->shrink)(shrinker, &sc); #elif defined(HAVE_D_PRUNE_ALIASES) #define D_PRUNE_ALIASES_IS_DEFAULT *objects = zfs_sb_prune_aliases(zsb, nr_to_scan); #else #error "No available dentry and inode cache pruning mechanism." #endif #if defined(HAVE_D_PRUNE_ALIASES) && !defined(D_PRUNE_ALIASES_IS_DEFAULT) #undef D_PRUNE_ALIASES_IS_DEFAULT /* * Fall back to zfs_sb_prune_aliases if the kernel's per-superblock * shrinker couldn't free anything, possibly due to the inodes being * allocated in a different memcg. */ if (*objects == 0) *objects = zfs_sb_prune_aliases(zsb, nr_to_scan); #endif ZFS_EXIT(zsb); dprintf_ds(zsb->z_os->os_dsl_dataset, "pruning, nr_to_scan=%lu objects=%d error=%d\n", nr_to_scan, *objects, error); return (error); }
static inline unsigned long get_num_physpages(void) { int nid; unsigned long phys_pages = 0; for_each_online_node(nid) phys_pages += node_present_pages(nid); return phys_pages; }
static int __init topology_init(void) { int i; for_each_online_node(i) arch_register_node(i); for_each_cpu(i) arch_register_cpu(i); return 0; }
unsigned long __init numa_free_all_bootmem(void) { unsigned long pages = 0; int i; for_each_online_node(i) pages += free_all_bootmem_node(NODE_DATA(i)); pages += free_low_memory_core_early(MAX_NUMNODES); return pages; }
static void ip27_machine_halt(void) { int i; #ifdef CONFIG_SMP smp_send_stop(); #endif for_each_online_node(i) REMOTE_HUB_S(COMPACT_TO_NASID_NODEID(i), PROMOP_REG, PROMOP_RESTART); LOCAL_HUB_S(NI_PORT_RESET, NPR_PORTRESET | NPR_LOCALRESET); noreturn; }
static int __init topology_init(void) { int i; #ifdef CONFIG_NUMA for_each_online_node(i) register_one_node(i); #endif /* CONFIG_NUMA */ for_each_present_cpu(i) arch_register_cpu(i); return 0; }
static ssize_t node_read_distance(struct sys_device * dev, char * buf) { int nid = dev->id; int len = 0; int i; /* buf currently PAGE_SIZE, need ~4 chars per node */ BUILD_BUG_ON(MAX_NUMNODES*4 > PAGE_SIZE/2); for_each_online_node(i) len += sprintf(buf + len, "%s%d", i ? " " : "", node_distance(nid, i)); len += sprintf(buf + len, "\n"); return len; }
void __init prom_prepare_cpus(unsigned int max_cpus) { cnodeid_t cnode; for_each_online_node(cnode) intr_clear_all(COMPACT_TO_NASID_NODEID(cnode)); replicate_kernel_text(); /* * Assumption to be fixed: we're always booted on logical / physical * processor 0. While we're always running on logical processor 0 * this still means this is physical processor zero; it might for * example be disabled in the firwware. */ alloc_cpupda(0, 0); }
static void __init dma32_free_bootmem(void) { int node; if (end_pfn <= MAX_DMA32_PFN) return; if (!dma32_bootmem_ptr) return; for_each_online_node(node) free_bootmem_node(NODE_DATA(node), __pa(dma32_bootmem_ptr), dma32_bootmem_size); dma32_bootmem_ptr = NULL; dma32_bootmem_size = 0; }
static int __init topology_init(void) { int i, ret; #ifdef CONFIG_NUMA for_each_online_node(i) register_one_node(i); #endif /* CONFIG_NUMA */ for_each_present_cpu(i) { ret = register_cpu(&per_cpu(cpu_devices, i), i); if (ret) printk(KERN_WARNING "topology_init: register_cpu %d " "failed (%d)\n", i, ret); } return 0; }
static ssize_t node_read_distance(struct device *dev, struct device_attribute *attr, char * buf) { int nid = dev->id; int len = 0; int i; /* * buf is currently PAGE_SIZE in length and each node needs 4 chars * at the most (distance + space or newline). */ BUILD_BUG_ON(MAX_NUMNODES * 4 > PAGE_SIZE); for_each_online_node(i) len += sprintf(buf + len, "%s%d", i ? " " : "", node_distance(nid, i)); len += sprintf(buf + len, "\n"); return len; }
/* XXX How to pass the reboot command to the firmware??? */ static void ip27_machine_restart(char *command) { #if 0 int i; #endif printk("Reboot started from CPU %d\n", smp_processor_id()); #ifdef CONFIG_SMP smp_send_stop(); #endif #if 0 for_each_online_node(i) REMOTE_HUB_S(COMPACT_TO_NASID_NODEID(i), PROMOP_REG, PROMOP_REBOOT); #else LOCAL_HUB_S(NI_PORT_RESET, NPR_PORTRESET | NPR_LOCALRESET); #endif noreturn; }
/* * The ARC has requested that the filesystem drop entries from the dentry * and inode caches. This can occur when the ARC needs to free meta data * blocks but can't because they are all pinned by entries in these caches. */ int zfs_sb_prune(struct super_block *sb, unsigned long nr_to_scan, int *objects) { zfs_sb_t *zsb = sb->s_fs_info; int error = 0; #if defined(HAVE_SHRINK) || defined(HAVE_SPLIT_SHRINKER_CALLBACK) struct shrinker *shrinker = &sb->s_shrink; struct shrink_control sc = { .nr_to_scan = nr_to_scan, .gfp_mask = GFP_KERNEL, }; #endif ZFS_ENTER(zsb); #if defined(HAVE_SPLIT_SHRINKER_CALLBACK) && \ defined(SHRINK_CONTROL_HAS_NID) && \ defined(SHRINKER_NUMA_AWARE) if (sb->s_shrink.flags & SHRINKER_NUMA_AWARE) { *objects = 0; for_each_online_node(sc.nid) *objects += (*shrinker->scan_objects)(shrinker, &sc); } else { *objects = (*shrinker->scan_objects)(shrinker, &sc); } #elif defined(HAVE_SPLIT_SHRINKER_CALLBACK) *objects = (*shrinker->scan_objects)(shrinker, &sc); #elif defined(HAVE_SHRINK) *objects = (*shrinker->shrink)(shrinker, &sc); #elif defined(HAVE_D_PRUNE_ALIASES) *objects = zfs_sb_prune_aliases(zsb, nr_to_scan); #else #error "No available dentry and inode cache pruning mechanism." #endif ZFS_EXIT(zsb); dprintf_ds(zsb->z_os->os_dsl_dataset, "pruning, nr_to_scan=%lu objects=%d error=%d\n", nr_to_scan, *objects, error); return (error); }
static int __init topology_init(void) { int i, ret; #ifdef CONFIG_NUMA for_each_online_node(i) register_one_node(i); #endif for_each_present_cpu(i) { struct cpu *c = &per_cpu(cpu_devices, i); c->hotpluggable = 1; ret = register_cpu(c, i); if (ret) printk(KERN_WARNING "topology_init: register_cpu %d " "failed (%d)\n", i, ret); } return 0; }
void __init acpi_numa_arch_fixup(void) { int i, j, node_from, node_to; if (srat_num_cpus == 0) { node_set_online(0); node_cpuid[0].phys_id = hard_smp_processor_id(); return; } nodes_clear(node_online_map); for (i = 0; i < MAX_PXM_DOMAINS; i++) { if (pxm_bit_test(i)) { int nid = acpi_map_pxm_to_node(i); node_set_online(nid); } } for (i = 0; i < num_node_memblks; i++) node_memblk[i].nid = pxm_to_node(node_memblk[i].nid); for_each_online_node(i) { int bank; bank = 0; for (j = 0; j < num_node_memblks; j++) if (node_memblk[j].nid == i) node_memblk[j].bank = bank++; } for_each_possible_early_cpu(i) node_cpuid[i].nid = pxm_to_node(node_cpuid[i].nid); printk(KERN_INFO "Number of logical nodes in system = %d\n", num_online_nodes()); printk(KERN_INFO "Number of memory chunks in system = %d\n", num_node_memblks); if (!slit_table) { for (i = 0; i < MAX_NUMNODES; i++) for (j = 0; j < MAX_NUMNODES; j++) node_distance(i, j) = i == j ? LOCAL_DISTANCE : REMOTE_DISTANCE; return; } memset(numa_slit, -1, sizeof(numa_slit)); for (i = 0; i < slit_table->locality_count; i++) { if (!pxm_bit_test(i)) continue; node_from = pxm_to_node(i); for (j = 0; j < slit_table->locality_count; j++) { if (!pxm_bit_test(j)) continue; node_to = pxm_to_node(j); node_distance(node_from, node_to) = slit_table->entry[i * slit_table->locality_count + j]; } } #ifdef SLIT_DEBUG printk("ACPI 2.0 SLIT locality table:\n"); for_each_online_node(i) { for_each_online_node(j) printk("%03d ", node_distance(i, j)); printk("\n"); } #endif }
void __init acpi_numa_arch_fixup(void) { int i, j, node_from, node_to; /* If there's no SRAT, fix the phys_id and mark node 0 online */ if (srat_num_cpus == 0) { node_set_online(0); node_cpuid[0].phys_id = hard_smp_processor_id(); return; } /* * MCD - This can probably be dropped now. No need for pxm ID to node ID * mapping with sparse node numbering iff MAX_PXM_DOMAINS <= MAX_NUMNODES. */ nodes_clear(node_online_map); for (i = 0; i < MAX_PXM_DOMAINS; i++) { if (pxm_bit_test(i)) { int nid = acpi_map_pxm_to_node(i); node_set_online(nid); } } /* set logical node id in memory chunk structure */ for (i = 0; i < num_node_memblks; i++) node_memblk[i].nid = pxm_to_node(node_memblk[i].nid); /* assign memory bank numbers for each chunk on each node */ for_each_online_node(i) { int bank; bank = 0; for (j = 0; j < num_node_memblks; j++) if (node_memblk[j].nid == i) node_memblk[j].bank = bank++; } /* set logical node id in cpu structure */ for_each_possible_early_cpu(i) node_cpuid[i].nid = pxm_to_node(node_cpuid[i].nid); printk(KERN_INFO "Number of logical nodes in system = %d\n", num_online_nodes()); printk(KERN_INFO "Number of memory chunks in system = %d\n", num_node_memblks); if (!slit_table) return; memset(numa_slit, -1, sizeof(numa_slit)); for (i = 0; i < slit_table->locality_count; i++) { if (!pxm_bit_test(i)) continue; node_from = pxm_to_node(i); for (j = 0; j < slit_table->locality_count; j++) { if (!pxm_bit_test(j)) continue; node_to = pxm_to_node(j); node_distance(node_from, node_to) = slit_table->entry[i * slit_table->locality_count + j]; } } #ifdef SLIT_DEBUG printk("ACPI 2.0 SLIT locality table:\n"); for_each_online_node(i) { for_each_online_node(j) printk("%03d ", node_distance(i, j)); printk("\n"); } #endif }