void __init vmi_time_init(void) { unsigned int cpu; /* Disable PIT: BIOSes start PIT CH0 with 18.2hz peridic. */ outb_pit(0x3a, PIT_MODE); /* binary, mode 5, LSB/MSB, ch 0 */ vmi_time_init_clockevent(); setup_irq(0, &vmi_clock_action); for_each_possible_cpu(cpu) per_cpu(vector_irq, cpu)[vmi_get_timer_vector()] = 0; }
static __init int blk_iopoll_setup(void) { int i; for_each_possible_cpu(i) INIT_LIST_HEAD(&per_cpu(blk_cpu_iopoll, i)); open_softirq(BLOCK_IOPOLL_SOFTIRQ, blk_iopoll_softirq); register_hotcpu_notifier(&blk_iopoll_cpu_notifier); return 0; }
static void b15_rac_enable(void) { unsigned int cpu; u32 enable = 0; for_each_possible_cpu(cpu) enable |= (RAC_DATA_INST_EN_MASK << (cpu * RAC_CPU_SHIFT)); b15_rac_disable_and_flush(); __b15_rac_enable(enable); }
static void setup_iommu_pool_hash(void) { unsigned int i; static bool do_once; if (do_once) return; do_once = true; for_each_possible_cpu(i) per_cpu(iommu_hash_common, i) = hash_32(i, IOMMU_POOL_HASHBITS); }
static __init int blk_softirq_init(void) { int i; for_each_possible_cpu(i) INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i)); open_softirq(BLOCK_SOFTIRQ, blk_done_softirq); register_hotcpu_notifier(&blk_cpu_notifier); return 0; }
static int __init check_nmi_watchdog(void) { volatile int endflag = 0; unsigned int *prev_nmi_count; int cpu; if (nmi_watchdog == NMI_NONE) return 0; prev_nmi_count = kmalloc(NR_CPUS * sizeof(int), GFP_KERNEL); if (!prev_nmi_count) return -1; printk(KERN_INFO "Testing NMI watchdog ... "); if (nmi_watchdog == NMI_LOCAL_APIC) smp_call_function(nmi_cpu_busy, (void *)&endflag, 0, 0); for_each_possible_cpu(cpu) prev_nmi_count[cpu] = per_cpu(irq_stat, cpu).__nmi_count; local_irq_enable(); mdelay((100*1000)/nmi_hz); // wait 100 ticks for_each_possible_cpu(cpu) { #ifdef CONFIG_SMP /* Check cpu_callin_map here because that is set after the timer is started. */ if (!cpu_isset(cpu, cpu_callin_map)) continue; #endif if (nmi_count(cpu) - prev_nmi_count[cpu] <= 5) { endflag = 1; printk("CPU#%d: NMI appears to be stuck (%d->%d)!\n", cpu, prev_nmi_count[cpu], nmi_count(cpu)); nmi_active = 0; lapic_nmi_owner &= ~LAPIC_NMI_WATCHDOG; kfree(prev_nmi_count); return -1; } } endflag = 1; printk("OK.\n"); /* now that we know it works we can reduce NMI frequency to something more reasonable; makes a difference in some configs */ if (nmi_watchdog == NMI_LOCAL_APIC) nmi_hz = 10000; kfree(prev_nmi_count); return 0; }
unsigned int kstat_irqs(unsigned int irq) { struct irq_desc *desc = irq_to_desc(irq); int cpu; int sum = 0; if (!desc || !desc->kstat_irqs) return 0; for_each_possible_cpu(cpu) sum += *per_cpu_ptr(desc->kstat_irqs, cpu); return sum; }
static void cpufreq_early_suspend(struct early_suspend *h) { int cpu; if (screen_off_cap) { pr_debug(CPUFREQ_CAP_TAG "%s: limit freq to %d\n", __func__, screen_off_max_freq); screen_off_cap_active = true; for_each_possible_cpu(cpu) msm_cpufreq_set_freq_limits(cpu, MSM_CPUFREQ_NO_LIMIT, screen_off_max_freq); } }
static void cpufreq_late_resume(struct early_suspend *h) { int cpu; if (screen_off_cap){ pr_info(CPUFREQ_CAP_TAG "%s: release limit freq to %d\n", __func__, screen_off_max_freq); screen_off_cap_active = false; for_each_possible_cpu(cpu) msm_cpufreq_set_freq_limits(cpu, MSM_CPUFREQ_NO_LIMIT, MSM_CPUFREQ_NO_LIMIT); } }
unsigned int kstat_irqs(unsigned int irq) { struct irq_desc *desc = irq_to_desc(irq); int cpu; int sum = 0; if (!desc) return 0; for_each_possible_cpu(cpu) sum += desc->kstat_irqs[cpu]; return sum; }
static void release_callchain_buffers_rcu(struct rcu_head *head) { struct callchain_cpus_entries *entries; int cpu; entries = container_of(head, struct callchain_cpus_entries, rcu_head); for_each_possible_cpu(cpu) kfree(entries->cpu_entries[cpu]); kfree(entries); }
/* ** inventory.c:do_inventory() hasn't yet been run and thus we ** don't 'discover' the additional CPUs until later. */ void __init smp_prepare_cpus(unsigned int max_cpus) { int cpu; for_each_possible_cpu(cpu) spin_lock_init(&per_cpu(ipi_lock, cpu)); init_cpu_present(cpumask_of(0)); parisc_max_cpus = max_cpus; if (!max_cpus) printk(KERN_INFO "SMP mode deactivated.\n"); }
static __init int blk_softirq_init(void) { int i; for_each_possible_cpu(i) INIT_LIST_HEAD(&per_cpu(blk_cpu_done, i)); open_softirq(BLOCK_SOFTIRQ, blk_done_softirq); cpuhp_setup_state_nocalls(CPUHP_BLOCK_SOFTIRQ_DEAD, "block/softirq:dead", NULL, blk_softirq_cpu_dead); return 0; }
static int __init setup_smt_snooze_delay(char *str) { unsigned int cpu; long snooze; if (!cpu_has_feature(CPU_FTR_SMT)) return 1; snooze = simple_strtol(str, NULL, 10); for_each_possible_cpu(cpu) per_cpu(smt_snooze_delay, cpu) = snooze; return 1; }
static void init_evtchn_cpu_bindings(void) { int i; /* By default all event channels notify CPU#0. */ for (i = 0; i < NR_IRQS; i++) set_native_irq_info(i, cpumask_of_cpu(0)); memset(cpu_evtchn, 0, sizeof(cpu_evtchn)); for_each_possible_cpu(i) memset(cpu_evtchn_mask[i], (i == 0) ? ~0 : 0, sizeof(cpu_evtchn_mask[i])); }
void __init setup_cpu_entry_areas(void) { unsigned int cpu; setup_cpu_entry_area_ptes(); for_each_possible_cpu(cpu) setup_cpu_entry_area(cpu); /* * This is the last essential update to swapper_pgdir which needs * to be synchronized to initial_page_table on 32bit. */ sync_initial_page_table(); }
static void __wakeup_reset(struct trace_array *tr) { int cpu; for_each_possible_cpu(cpu) tracing_reset(tr, cpu); wakeup_cpu = -1; wakeup_prio = -1; if (wakeup_task) put_task_struct(wakeup_task); wakeup_task = NULL; }
void xen_setup_vcpu_info_placement(void) { int cpu; for_each_possible_cpu(cpu) xen_vcpu_setup(cpu); if (have_vcpu_info_placement) { pv_irq_ops.save_fl = __PV_IS_CALLEE_SAVE(xen_save_fl_direct); pv_irq_ops.restore_fl = __PV_IS_CALLEE_SAVE(xen_restore_fl_direct); pv_irq_ops.irq_disable = __PV_IS_CALLEE_SAVE(xen_irq_disable_direct); pv_irq_ops.irq_enable = __PV_IS_CALLEE_SAVE(xen_irq_enable_direct); pv_mmu_ops.read_cr2 = xen_read_cr2_direct; } }
static void __init xen_pv_smp_prepare_cpus(unsigned int max_cpus) { unsigned cpu; unsigned int i; if (skip_ioapic_setup) { char *m = (max_cpus == 0) ? "The nosmp parameter is incompatible with Xen; " \ "use Xen dom0_max_vcpus=1 parameter" : "The noapic parameter is incompatible with Xen"; xen_raw_printk(m); panic(m); } xen_init_lock_cpu(0); smp_store_boot_cpu_info(); cpu_data(0).x86_max_cores = 1; for_each_possible_cpu(i) { zalloc_cpumask_var(&per_cpu(cpu_sibling_map, i), GFP_KERNEL); zalloc_cpumask_var(&per_cpu(cpu_core_map, i), GFP_KERNEL); zalloc_cpumask_var(&per_cpu(cpu_llc_shared_map, i), GFP_KERNEL); } set_cpu_sibling_map(0); speculative_store_bypass_ht_init(); xen_pmu_init(0); if (xen_smp_intr_init(0) || xen_smp_intr_init_pv(0)) BUG(); if (!alloc_cpumask_var(&xen_cpu_initialized_map, GFP_KERNEL)) panic("could not allocate xen_cpu_initialized_map\n"); cpumask_copy(xen_cpu_initialized_map, cpumask_of(0)); /* Restrict the possible_map according to max_cpus. */ while ((num_possible_cpus() > 1) && (num_possible_cpus() > max_cpus)) { for (cpu = nr_cpu_ids - 1; !cpu_possible(cpu); cpu--) continue; set_cpu_possible(cpu, false); } for_each_possible_cpu(cpu) set_cpu_present(cpu, true); }
void touch_nmi_watchdog (void) { int i; /* * Just reset the alert counters, (other CPUs might be * spinning on locks we hold): */ for_each_possible_cpu(i) alert_counter[i] = 0; /* * Tickle the softlockup detector too: */ touch_softlockup_watchdog(); }
/** * kstat_irqs - Get the statistics for an interrupt * @irq: The interrupt number * * Returns the sum of interrupt counts on all cpus since boot for * @irq. The caller must ensure that the interrupt is not removed * concurrently. */ unsigned int kstat_irqs(unsigned int irq) { struct irq_desc *desc = irq_to_desc(irq); unsigned int sum = 0; int cpu; if (!desc || !desc->kstat_irqs) return 0; if (!irq_settings_is_per_cpu_devid(desc) && !irq_settings_is_per_cpu(desc)) return desc->tot_count; for_each_possible_cpu(cpu) sum += *per_cpu_ptr(desc->kstat_irqs, cpu); return sum; }
static int __init setup_smt_snooze_delay(char *str) { unsigned int cpu; int snooze; if (!cpu_has_feature(CPU_FTR_SMT)) return 1; smt_snooze_cmdline = 1; if (get_option(&str, &snooze)) { for_each_possible_cpu(cpu) per_cpu(smt_snooze_delay, cpu) = snooze; } return 1; }
/* This is called once we have the cpu_possible_map */ void xen_setup_vcpu_info_placement(void) { int cpu; for_each_possible_cpu(cpu) xen_vcpu_setup(cpu); /* xen_vcpu_setup managed to place the vcpu_info within the percpu area for all cpus, so make use of it */ if (have_vcpu_info_placement) { pv_irq_ops.save_fl = __PV_IS_CALLEE_SAVE(xen_save_fl_direct); pv_irq_ops.restore_fl = __PV_IS_CALLEE_SAVE(xen_restore_fl_direct); pv_irq_ops.irq_disable = __PV_IS_CALLEE_SAVE(xen_irq_disable_direct); pv_irq_ops.irq_enable = __PV_IS_CALLEE_SAVE(xen_irq_enable_direct); pv_mmu_ops.read_cr2 = xen_read_cr2_direct; } }
static int set_input_boost_freq(const char *buf, const struct kernel_param *kp) { int i, ntokens = 0; unsigned int val, cpu; const char *cp = buf; bool enabled = false; while ((cp = strpbrk(cp + 1, " :"))) ntokens++; /* single number: apply to all CPUs */ if (!ntokens) { if (sscanf(buf, "%u\n", &val) != 1) return -EINVAL; for_each_possible_cpu(i) per_cpu(sync_info, i).input_boost_freq = val; goto check_enable; } /* CPU:value pair */ if (!(ntokens % 2)) return -EINVAL; cp = buf; for (i = 0; i < ntokens; i += 2) { if (sscanf(cp, "%u:%u", &cpu, &val) != 2) return -EINVAL; if (cpu > num_possible_cpus()) return -EINVAL; per_cpu(sync_info, cpu).input_boost_freq = val; cp = strchr(cp, ' '); cp++; } check_enable: for_each_possible_cpu(i) { if (per_cpu(sync_info, i).input_boost_freq) { enabled = true; break; } } input_boost_enabled = enabled; return 0; }
static void ipcomp_free_scratches(void) { int i; void * __percpu *scratches; if (--ipcomp_scratch_users) return; scratches = ipcomp_scratches; if (!scratches) return; for_each_possible_cpu(i) vfree(*per_cpu_ptr(scratches, i)); free_percpu(scratches); }
void switch_APIC_timer_to_ipi(void *cpumask) { cpumask_t mask = *(cpumask_t *)cpumask; int cpu = smp_processor_id(); if (cpu_isset(cpu, mask) && !cpu_isset(cpu, timer_interrupt_broadcast_ipi_mask)) { disable_APIC_timer(); cpu_set(cpu, timer_interrupt_broadcast_ipi_mask); #ifdef CONFIG_HIGH_RES_TIMERS printk("Disabling NO_HZ and high resolution timers " "due to timer broadcasting\n"); for_each_possible_cpu(cpu) per_cpu(lapic_events, cpu).features &= ~CLOCK_EVT_FEAT_ONESHOT; #endif } }
int mips_cpc_probe(void) { phys_addr_t addr; unsigned cpu; for_each_possible_cpu(cpu) spin_lock_init(&per_cpu(cpc_core_lock, cpu)); addr = mips_cpc_phys_base(); if (!addr) return -ENODEV; mips_cpc_base = ioremap_nocache(addr, 0x8000); if (!mips_cpc_base) return -ENXIO; return 0; }
void set_pgdat_percpu_threshold(pg_data_t *pgdat, int (*calculate_pressure)(struct zone *)) { struct zone *zone; int cpu; int threshold; int i; for (i = 0; i < pgdat->nr_zones; i++) { zone = &pgdat->node_zones[i]; if (!zone->percpu_drift_mark) continue; threshold = (*calculate_pressure)(zone); for_each_possible_cpu(cpu) per_cpu_ptr(zone->pageset, cpu)->stat_threshold = threshold; } }
static ssize_t show_memcpy_count(struct device *dev, struct device_attribute *attr, char *buf) { struct dma_chan *chan; unsigned long count = 0; int i; int err; mutex_lock(&dma_list_mutex); chan = dev_to_dma_chan(dev); if (chan) { for_each_possible_cpu(i) count += per_cpu_ptr(chan->local, i)->memcpy_count; err = sprintf(buf, "%lu\n", count); } else err = -ENODEV; mutex_unlock(&dma_list_mutex); return err; }
/** * percpu_ida_init - initialize a percpu tag pool * @pool: pool to initialize * @nr_tags: number of tags that will be available for allocation * * Initializes @pool so that it can be used to allocate tags - integers in the * range [0, nr_tags). Typically, they'll be used by driver code to refer to a * preallocated array of tag structures. * * Allocation is percpu, but sharding is limited by nr_tags - for best * performance, the workload should not span more cpus than nr_tags / 128. */ int __percpu_ida_init(struct percpu_ida *pool, unsigned long nr_tags, unsigned long max_size, unsigned long batch_size) { unsigned i, cpu, order; memset(pool, 0, sizeof(*pool)); init_waitqueue_head(&pool->wait); spin_lock_init(&pool->lock); pool->nr_tags = nr_tags; pool->percpu_max_size = max_size; pool->percpu_batch_size = batch_size; /* Guard against overflow */ if (nr_tags > (unsigned) INT_MAX + 1) { pr_err("percpu_ida_init(): nr_tags too large\n"); return -EINVAL; } order = get_order(nr_tags * sizeof(unsigned)); pool->freelist = (void *) __get_free_pages(GFP_KERNEL, order); if (!pool->freelist) return -ENOMEM; for (i = 0; i < nr_tags; i++) pool->freelist[i] = i; pool->nr_free = nr_tags; pool->tag_cpu = __alloc_percpu(sizeof(struct percpu_ida_cpu) + pool->percpu_max_size * sizeof(unsigned), sizeof(unsigned)); if (!pool->tag_cpu) goto err; for_each_possible_cpu(cpu) spin_lock_init(&per_cpu_ptr(pool->tag_cpu, cpu)->lock); return 0; err: percpu_ida_destroy(pool); return -ENOMEM; }