static irqreturn_t ipi_interrupt_handler(int irq, void *arg) { unsigned int message = (unsigned int)(long)arg; unsigned int cpu = hard_smp_processor_id(); unsigned int offs = 4 * cpu; unsigned int x; x = __raw_readl(0xfe410070 + offs); x &= (1 << (message << 2)); __raw_writel(x, 0xfe410080 + offs); smp_message_recv(message); return IRQ_HANDLED; }
int safe_smp_processor_id(void) { int apicid, cpuid; if (!boot_cpu_has(X86_FEATURE_APIC)) return 0; apicid = hard_smp_processor_id(); if (apicid == BAD_APICID) return 0; cpuid = convert_apicid_to_cpu(apicid); return cpuid >= 0 ? cpuid : 0; }
static void __cpuinit srat_detect_node(struct cpuinfo_x86 *c) { #if defined(CONFIG_NUMA) && defined(CONFIG_X86_64) unsigned node; int cpu = smp_processor_id(); int apicid = cpu_has_apic ? hard_smp_processor_id() : c->apicid; /* Don't do the funky fallback heuristics the AMD version employs for now. */ node = apicid_to_node[apicid]; if (node == NUMA_NO_NODE || !node_online(node)) node = first_node(node_online_map); numa_set_node(cpu, node); #endif }
/*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*=*/ void __devinit smp_prepare_boot_cpu(void) { bsp_phys_id = hard_smp_processor_id(); physid_set(bsp_phys_id, phys_cpu_present_map); cpu_set(0, cpu_online_map); /* BSP's cpu_id == 0 */ cpu_set(0, cpu_callout_map); cpu_set(0, cpu_callin_map); /* * Initialize the logical to physical CPU number mapping */ init_cpu_to_physid(); map_cpu_to_physid(0, bsp_phys_id); current_thread_info()->cpu = 0; }
/* Check if this thread is the owner for PerfCounters in this core */ int nlm_common_pmc_owner(void) { int cpu_id ; unsigned long flags; /* Allow only one thread in each core to set perfcounter events */ spin_lock_irqsave(&nlm_common_perf_lock, flags); cpu_id = netlogic_cpu_id(); if(nlm_common_perf_core_setup[cpu_id] == hard_smp_processor_id()) { spin_unlock_irqrestore(&nlm_common_perf_lock, flags); return 1; } spin_unlock_irqrestore(&nlm_common_perf_lock, flags); return 0; }
void __init smp_prepare_boot_cpu(void) { int cpuid = hard_smp_processor_id(); if (cpuid >= NR_CPUS) { prom_printf("Serious problem, boot cpu id >= NR_CPUS\n"); prom_halt(); } if (cpuid != 0) printk("boot cpu id != 0, this could work but is untested\n"); current_thread_info()->cpu = cpuid; cpu_set(cpuid, cpu_online_map); cpu_set(cpuid, phys_cpu_present_map); }
void armadaxp_fabric_restore_deepIdle(void) { unsigned int processor_id = hard_smp_processor_id(); MV_U32 reg; /* cancel request power down */ reg = MV_REG_READ(PM_CONTROL_AND_CONFIG_REG(processor_id)); reg &= ~PM_CONTROL_AND_CONFIG_PWDDN_REQ; MV_REG_WRITE(PM_CONTROL_AND_CONFIG_REG(processor_id), reg); #ifdef CONFIG_CACHE_AURORA_L2 /* cancel ask HW to power down the L2 Cache if possible */ reg = MV_REG_READ(PM_CONTROL_AND_CONFIG_REG(processor_id)); reg &= ~PM_CONTROL_AND_CONFIG_L2_PWDDN; MV_REG_WRITE(PM_CONTROL_AND_CONFIG_REG(processor_id), reg); #endif /* cancel Disable delivering of other CPU core cache maintenance instruction, * TLB, and Instruction synchronization to the CPU core */ /* TODO */ /* cancel Enable wakeup events */ reg = MV_REG_READ(PM_STATUS_AND_MASK_REG(processor_id)); reg &= ~(PM_STATUS_AND_MASK_IRQ_WAKEUP | PM_STATUS_AND_MASK_FIQ_WAKEUP); reg &= ~PM_STATUS_AND_MASK_CPU_IDLE_WAIT; reg &= ~PM_STATUS_AND_MASK_SNP_Q_EMPTY_WAIT; // reg &= ~PM_STATUS_AND_MASK_DBG_WAKEUP; /* Mask interrupts */ reg &= ~(PM_STATUS_AND_MASK_IRQ_MASK | PM_STATUS_AND_MASK_FIQ_MASK); MV_REG_WRITE(PM_STATUS_AND_MASK_REG(processor_id), reg); #if defined CONFIG_AURORA_IO_CACHE_COHERENCY || CONFIG_SMP /* cancel Disable delivery of snoop requests to the CPU core by setting */ hw_sem_lock(); reg = MV_REG_READ(MV_COHERENCY_FABRIC_CTRL_REG); reg |= 1 << (24 + processor_id); MV_REG_WRITE(MV_COHERENCY_FABRIC_CTRL_REG, reg); hw_sem_unlock(); #endif #ifdef CONFIG_ARMADA_XP_DEEP_IDLE_L2_WA /* restore CIB Control and Configuration register except CIB Ack */ MV_REG_WRITE(MV_CIB_CTRL_CFG_REG, cib_ctrl_cfg_reg & ~(1 << 9)); /* add delay needed for erratum "Dunit MBus starvation causes poor performance during deep-idle " */ udelay(10); /* restore CIB Control including CIB Ack */ MV_REG_WRITE(MV_CIB_CTRL_CFG_REG, cib_ctrl_cfg_reg); #endif }
/* * Activate a secondary processor. head.S calls this. */ int __cpuinit start_secondary (void *unused) { /* Early console may use I/O ports */ ia64_set_kr(IA64_KR_IO_BASE, __pa(ia64_iobase)); #ifndef CONFIG_PRINTK_TIME Dprintk("start_secondary: starting CPU 0x%x\n", hard_smp_processor_id()); #endif efi_map_pal_code(); cpu_init(); preempt_disable(); smp_callin(); cpu_idle(); return 0; }
static irqreturn_t grp1_tmr1_irq(int irq, void *dev_id) { struct clock_event_device *c = dev_id; u32 cpuid = hard_smp_processor_id(); unsigned long flags; WARN_ON(cpuid != 3); spin_lock_irqsave(&soc_tmr_lock[1], flags); disable_timer(group1_base, 1); spin_unlock_irqrestore(&soc_tmr_lock[1], flags); c->event_handler(c); return IRQ_HANDLED; }
/* * Activate a secondary processor. head.S calls this. */ int __init start_secondary (void *unused) { extern int cpu_idle (void); Dprintk("start_secondary: starting CPU 0x%x\n", hard_smp_processor_id()); efi_map_pal_code(); cpu_init(); smp_callin(); Dprintk("CPU %d is set to go.\n", smp_processor_id()); while (!atomic_read(&smp_commenced)) ; Dprintk("CPU %d is starting idle.\n", smp_processor_id()); return cpu_idle(); }
static int percpu_timer_set_next_event(unsigned long delta, struct clock_event_device *dev) { unsigned long flags; u32 cpuid = hard_smp_processor_id(); u32 timern = cpuid & 0x1; void __iomem *base = irq_map[cpuid].base; /* if cpu is offline, should not set next event on local timer */ BUG_ON(!cpu_online(cpuid)); spin_lock_irqsave(&soc_tmr_lock[cpuid>>1], flags); reprogram_timer(base, timern, delta); spin_unlock_irqrestore(&soc_tmr_lock[cpuid>>1], flags); return 0; }
/* * platform-specific code to shutdown a CPU * * Called with IRQs disabled */ void __ref platform_cpu_die(unsigned int cpu) { #ifdef DEBUG unsigned int this_cpu = hard_smp_processor_id(); if (cpu != this_cpu) { printk(KERN_CRIT "Eek! platform_cpu_die running on %u, should be %u\n", this_cpu, cpu); } #endif complete(&cpu_killed); /* * we're ready for shutdown now, so do it */ platform_do_lowpower(cpu); }
/* * platform-specific code to shutdown a CPU * * Called with IRQs disabled */ void platform_cpu_die(unsigned int cpu) { #ifdef DEBUG unsigned int this_cpu = hard_smp_processor_id(); if (cpu != this_cpu) { // printk(KERN_CRIT "Eek! platform_cpu_die running on %u, should be %u\n", ; BUG(); } #endif complete(&cpu_killed); /* directly enter low power state, skipping secure registers */ platform_do_lowpower(cpu); }
static irqreturn_t j2_ipi_interrupt_handler(int irq, void *arg) { unsigned cpu = hard_smp_processor_id(); volatile unsigned *pmsg = &per_cpu(j2_ipi_messages, cpu); unsigned messages, i; do messages = *pmsg; while (cmpxchg(pmsg, messages, 0) != messages); if (!messages) return IRQ_NONE; for (i=0; i<SMP_MSG_NR; i++) if (messages & (1U<<i)) smp_message_recv(i); return IRQ_HANDLED; }
static void __cpuinit srat_detect_node(void) { #if defined(CONFIG_NUMA) && defined(CONFIG_X86_64) unsigned node; int cpu = smp_processor_id(); int apicid = hard_smp_processor_id(); /* Don't do the funky fallback heuristics the AMD version employs for now. */ node = apicid_to_node[apicid]; if (node == NUMA_NO_NODE || !node_online(node)) node = first_node(node_online_map); numa_set_node(cpu, node); printk(KERN_INFO "CPU %d/0x%x -> Node %d\n", cpu, apicid, node); #endif }
int __cpuinit xlr_wakeup_secondary_cpus(void) { unsigned int i, boot_cpu; /* * In case of RMI boot, hit with NMI to get the cores * from bootloader to linux code. */ boot_cpu = hard_smp_processor_id(); nlm_set_nmi_handler(nlm_rmiboot_preboot); for (i = 0; i < NR_CPUS; i++) { if (i == boot_cpu || (nlm_cpumask & (1u << i)) == 0) continue; nlm_pic_send_ipi(nlm_pic_base, i, 1, 1); /* send NMI */ } return 0; }
int __cpuinit xlr_wakeup_secondary_cpus(void) { unsigned int i, boot_cpu; /* */ boot_cpu = hard_smp_processor_id(); nlm_set_nmi_handler(nlm_rmiboot_preboot); for (i = 0; i < NR_CPUS; i++) { if (i == boot_cpu || (nlm_cpumask & (1u << i)) == 0) continue; nlm_pic_send_ipi(nlm_pic_base, i, 1, 1); /* */ } return 0; }
inline void perfctr_cpu_resume(struct perfctr_cpu_state *state) { int cpu_id; cpu_id = hard_smp_processor_id() / 4; spin_lock (&pmc_resource[cpu_id].lock); if ( pmc_resource[cpu_id].current_thread != -1 ) { // printk (KERN_INFO "PMCounters unavailable for process %d\n", current->pid); spin_unlock (&pmc_resource[cpu_id].lock); return; } pmc_resource[cpu_id].current_thread = netlogic_thr_id(); spin_unlock (&pmc_resource[cpu_id].lock); if (perfctr_cstatus_has_ictrs(state->cstatus)) { perfctr_cpu_iresume(state); } // the counters are triggered, having been frozen in _iresume() // that preceded this point. So, the model is to trigger the // registere to collect the numbers and record the start state // that completes the 'resume' process. perfctr_cpu_write_control(state); { struct perfctr_low_ctrs now; unsigned int i, cstatus, nrctrs; perfctr_cpu_read_counters(state, &now); cstatus = state->cstatus; // the start state of the registers has to be recorded only // in resume() and that is what is being done. if (perfctr_cstatus_has_tsc(cstatus)) { state->tsc_start = now.tsc; } nrctrs = perfctr_cstatus_nractrs(cstatus); for (i = 0; i < nrctrs; ++i) { state->pmc[i].start = now.pmc[i]; } } /* XXX: if (SMP && start.tsc == now.tsc) ++now.tsc; */ }
void platform_cpu_die(unsigned int cpu) { #ifdef DEBUG unsigned int this_cpu = hard_smp_processor_id(); if (cpu != this_cpu) { printk(KERN_CRIT "Eek! platform_cpu_die running on %u, should be %u\n", this_cpu, cpu); BUG(); } #endif tegra_sleep_reset(); /* * tegra_cpu_suspend can return through tegra_cpu_resume, but that * should never happen for a hotplugged cpu */ BUG(); }
void die_if_kernel(char * str, struct pt_regs *regs, long err, unsigned long *r9_15) { if (regs->ps & 8) return; #ifdef __SMP__ printk("CPU %d ", hard_smp_processor_id()); #endif printk("%s(%d): %s %ld\n", current->comm, current->pid, str, err); dik_show_regs(regs, r9_15); dik_show_code((unsigned int *)regs->pc); dik_show_trace((unsigned long *)(regs+1)); if (current->tss.flags & (1UL << 63)) { printk("die_if_kernel recursion detected.\n"); sti(); while (1); } current->tss.flags |= (1UL << 63); do_exit(SIGSEGV); }
void die_if_kernel(char * str, struct pt_regs *regs, long err, unsigned long *r9_15) { if (regs->ps & 8) return; #ifdef CONFIG_SMP printk("CPU %d ", hard_smp_processor_id()); #endif printk("%s(%d): %s %ld\n", current->comm, task_pid_nr(current), str, err); dik_show_regs(regs, r9_15); add_taint(TAINT_DIE); dik_show_trace((unsigned long *)(regs+1)); dik_show_code((unsigned int *)regs->pc); if (test_and_set_thread_flag (TIF_DIE_IF_KERNEL)) { printk("die_if_kernel recursion detected.\n"); local_irq_enable(); while (1); } do_exit(SIGSEGV); }
/* * cpu_init() initializes state that is per-CPU. */ void __devinit cpu_init (void) { int addr = hard_smp_processor_id(); /* * Store processor id in lowcore (used e.g. in timer_interrupt) */ get_cpu_id(&S390_lowcore.cpu_data.cpu_id); S390_lowcore.cpu_data.cpu_addr = addr; /* * Force FPU initialization: */ clear_thread_flag(TIF_USEDFPU); clear_used_math(); atomic_inc(&init_mm.mm_count); current->active_mm = &init_mm; if (current->mm) BUG(); enter_lazy_tlb(&init_mm, current); }
/* * Activate a secondary processor. head.S calls this. */ int __devinit start_secondary (void *unused) { /* Early console may use I/O ports */ ia64_set_kr(IA64_KR_IO_BASE, __pa(ia64_iobase)); #ifndef XEN Dprintk("start_secondary: starting CPU 0x%x\n", hard_smp_processor_id()); efi_map_pal_code(); #endif cpu_init(); smp_callin(); #ifdef XEN if (vmx_enabled) vmx_init_env(0, 0); startup_cpu_idle_loop(); #else cpu_idle(); #endif return 0; }
/* * Cycle through the APs sending Wakeup IPIs to boot each. */ void __init smp_prepare_cpus (unsigned int max_cpus) { int boot_cpu_id = hard_smp_processor_id(); /* * Initialize the per-CPU profiling counter/multiplier */ smp_setup_percpu_timer(); /* * We have the boot CPU online for sure. */ cpu_set(0, cpu_online_map); cpu_set(0, cpu_callin_map); local_cpu_data->loops_per_jiffy = loops_per_jiffy; ia64_cpu_to_sapicid[0] = boot_cpu_id; printk(KERN_INFO "Boot processor id 0x%x/0x%x\n", 0, boot_cpu_id); current_thread_info()->cpu = 0; /* * If SMP should be disabled, then really disable it! */ if (!max_cpus) { printk(KERN_INFO "SMP mode deactivated.\n"); cpus_clear(cpu_online_map); cpus_clear(cpu_present_map); cpus_clear(cpu_possible_map); cpu_set(0, cpu_online_map); cpu_set(0, cpu_present_map); cpu_set(0, cpu_possible_map); return; } }
static void percpu_timer_set_mode(enum clock_event_mode mode, struct clock_event_device *dev) { unsigned long flags; u32 cpuid = hard_smp_processor_id(); u32 timern = cpuid & 0x1; void __iomem *base = irq_map[cpuid].base; spin_lock_irqsave(&soc_tmr_lock[cpuid>>1], flags); switch (mode) { case CLOCK_EVT_MODE_ONESHOT: case CLOCK_EVT_MODE_UNUSED: case CLOCK_EVT_MODE_SHUTDOWN: disable_timer(base, timern); break; case CLOCK_EVT_MODE_RESUME: case CLOCK_EVT_MODE_PERIODIC: break; } spin_unlock_irqrestore(&soc_tmr_lock[cpuid>>1], flags); }
/* * Initialize the logical CPU number to SAPICID mapping */ void __init smp_build_cpu_map (void) { int sapicid, cpu, i; int boot_cpu_id = hard_smp_processor_id(); for (cpu = 0; cpu < NR_CPUS; cpu++) { ia64_cpu_to_sapicid[cpu] = -1; } ia64_cpu_to_sapicid[0] = boot_cpu_id; init_cpu_present(cpumask_of(0)); set_cpu_possible(0, true); for (cpu = 1, i = 0; i < smp_boot_data.cpu_count; i++) { sapicid = smp_boot_data.cpu_phys_id[i]; if (sapicid == boot_cpu_id) continue; set_cpu_present(cpu, true); set_cpu_possible(cpu, true); ia64_cpu_to_sapicid[cpu] = sapicid; cpu++; } }
static void netlogic_cpu_stop(void *args) { int cpu_id = netlogic_cpu_id(); int core_start = cpu_id * 4; int i; unsigned long flags; local_irq_save(flags); g_stop_pmc[hard_smp_processor_id()] = 1; local_irq_restore(flags); if(nlm_common_pmc_owned()) return; /* Stop all counters on current CPU */ local_irq_save(flags); __write_32bit_c0_register($25, 0, 0); __write_32bit_c0_register($25, 2, 0); for(i=0; i < 4; i++) { nlm_common_pc_of_mask1[core_start + i] = 0; nlm_common_pc_of_mask2[core_start + i] = 0; } local_irq_restore(flags); }
void platform_cpu_die(unsigned int cpu) { #ifdef DEBUG unsigned int this_cpu = hard_smp_processor_id(); if (cpu != this_cpu) { printk(KERN_CRIT "Eek! platform_cpu_die running on %u, should be %u\n", this_cpu, cpu); BUG(); } #endif gic_cpu_exit(0); barrier(); complete(&per_cpu(cpu_killed, cpu)); flush_cache_all(); barrier(); __cortex_a9_save(0); /* return happens from __cortex_a9_restore */ barrier(); writel(smp_processor_id(), EVP_CPU_RESET_VECTOR); }
int safe_smp_processor_id(void) { int apicid, i; if (disable_apic) return 0; apicid = hard_smp_processor_id(); if (x86_cpu_to_apicid[apicid] == apicid) return apicid; for (i = 0; i < NR_CPUS; ++i) { if (x86_cpu_to_apicid[i] == apicid) return i; } /* No entries in x86_cpu_to_apicid? Either no MPS|ACPI, * or called too early. Either way, we must be CPU 0. */ if (x86_cpu_to_apicid[0] == BAD_APICID) return 0; return 0; /* Should not happen */ }
/* cpuid returns the value latched in the HW at reset, not the APIC ID * register's value. For any box whose BIOS changes APIC IDs, like * clustered APIC systems, we must use hard_smp_processor_id. * * See Intel's IA-32 SW Dev's Manual Vol2 under CPUID. */ static unsigned int phys_pkg_id(int index_msb) { return hard_smp_processor_id() >> index_msb; }