static inline s64 idle_loop_epilog(unsigned long in_purr, ktime_t kt_before) { get_lppaca()->wait_state_cycles += mfspr(SPRN_PURR) - in_purr; get_lppaca()->idle = 0; return ktime_to_us(ktime_sub(ktime_get_real(), kt_before)); }
static inline void idle_loop_epilog(unsigned long in_purr) { u64 wait_cycles; wait_cycles = be64_to_cpu(get_lppaca()->wait_state_cycles); wait_cycles += mfspr(SPRN_PURR) - in_purr; get_lppaca()->wait_state_cycles = cpu_to_be64(wait_cycles); get_lppaca()->idle = 0; }
static inline void idle_loop_epilog(unsigned long in_purr) { u64 wait_cycles; wait_cycles = be64_to_cpu(get_lppaca()->wait_state_cycles); wait_cycles += mfspr(SPRN_PURR) - in_purr; get_lppaca()->wait_state_cycles = cpu_to_be64(wait_cycles); get_lppaca()->idle = 0; if (irqs_disabled()) local_irq_enable(); ppc64_runlatch_on(); }
static void pseries_dedicated_idle_sleep(void) { unsigned int cpu = smp_processor_id(); unsigned long start_snooze; unsigned long *smt_snooze_delay = &__get_cpu_var(smt_snooze_delay); unsigned long in_purr, out_purr; /* * Indicate to the HV that we are idle. Now would be * a good time to find other work to dispatch. */ get_lppaca()->idle = 1; get_lppaca()->cpuctls_task_attrs = 1; in_purr = mfspr(SPRN_PURR); /* * We come in with interrupts disabled, and need_resched() * has been checked recently. If we should poll for a little * while, do so. */ if (*smt_snooze_delay) { start_snooze = get_tb() + *smt_snooze_delay * tb_ticks_per_usec; local_irq_enable(); set_thread_flag(TIF_POLLING_NRFLAG); while (get_tb() < start_snooze) { if (need_resched() || cpu_is_offline(cpu)) goto out; ppc64_runlatch_off(); HMT_low(); HMT_very_low(); } HMT_medium(); clear_thread_flag(TIF_POLLING_NRFLAG); smp_mb(); local_irq_disable(); if (need_resched() || cpu_is_offline(cpu)) goto out; } cede_processor(); out: HMT_medium(); get_lppaca()->cpuctls_task_attrs = 0; out_purr = mfspr(SPRN_PURR); get_lppaca()->wait_state_cycles += out_purr - in_purr; get_lppaca()->idle = 0; }
static void pseries_kexec_cpu_down(int crash_shutdown, int secondary) { /* */ if (firmware_has_feature(FW_FEATURE_SPLPAR) && !crash_shutdown) { int ret; int cpu = smp_processor_id(); int hwcpu = hard_smp_processor_id(); if (get_lppaca()->dtl_enable_mask) { ret = unregister_dtl(hwcpu); if (ret) { pr_err("WARNING: DTL deregistration for cpu " "%d (hw %d) failed with %d\n", cpu, hwcpu, ret); } } ret = unregister_slb_shadow(hwcpu); if (ret) { pr_err("WARNING: SLB shadow buffer deregistration " "for cpu %d (hw %d) failed with %d\n", cpu, hwcpu, ret); } ret = unregister_vpa(hwcpu); if (ret) { pr_err("WARNING: VPA deregistration for cpu %d " "(hw %d) failed with %d\n", cpu, hwcpu, ret); } } }
static int dedicated_cede_loop(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { unsigned long in_purr; idle_loop_prolog(&in_purr); get_lppaca()->donate_dedicated_cpu = 1; HMT_medium(); check_and_cede_processor(); get_lppaca()->donate_dedicated_cpu = 0; idle_loop_epilog(in_purr); return index; }
static inline void idle_loop_prolog(unsigned long *in_purr) { *in_purr = mfspr(SPRN_PURR); /* * Indicate to the HV that we are idle. Now would be * a good time to find other work to dispatch. */ get_lppaca()->idle = 1; }
static void pseries_shared_idle_sleep(void) { /* * Indicate to the HV that we are idle. Now would be * a good time to find other work to dispatch. */ get_lppaca()->idle = 1; /* * Yield the processor to the hypervisor. We return if * an external interrupt occurs (which are driven prior * to returning here) or if a prod occurs from another * processor. When returning here, external interrupts * are enabled. */ cede_processor(); get_lppaca()->idle = 0; }
static int dedicated_cede_loop(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { unsigned long in_purr; ktime_t kt_before; idle_loop_prolog(&in_purr, &kt_before); get_lppaca()->donate_dedicated_cpu = 1; ppc64_runlatch_off(); HMT_medium(); check_and_cede_processor(); get_lppaca()->donate_dedicated_cpu = 0; dev->last_residency = (int)idle_loop_epilog(in_purr, kt_before); return index; }
static void pseries_lpar_enable_pmcs(void) { unsigned long set, reset; set = 1UL << 63; reset = 0; plpar_hcall_norets(H_PERFMON, set, reset); /* instruct hypervisor to maintain PMCs */ if (firmware_has_feature(FW_FEATURE_SPLPAR)) get_lppaca()->pmcregs_in_use = 1; }
static void __init pSeries_setup_arch(void) { /* Discover PIC type and setup ppc_md accordingly */ pseries_discover_pic(); /* openpic global configuration register (64-bit format). */ /* openpic Interrupt Source Unit pointer (64-bit format). */ /* python0 facility area (mmio) (64-bit format) REAL address. */ /* init to some ~sane value until calibrate_delay() runs */ loops_per_jiffy = 50000000; if (ROOT_DEV == 0) { printk("No ramdisk, default root is /dev/sda2\n"); ROOT_DEV = Root_SDA2; } fwnmi_init(); /* Find and initialize PCI host bridges */ init_pci_config_tokens(); find_and_init_phbs(); eeh_init(); pSeries_nvram_init(); /* Choose an idle loop */ if (firmware_has_feature(FW_FEATURE_SPLPAR)) { vpa_init(boot_cpuid); if (get_lppaca()->shared_proc) { printk(KERN_DEBUG "Using shared processor idle loop\n"); ppc_md.power_save = pseries_shared_idle_sleep; } else { printk(KERN_DEBUG "Using dedicated idle loop\n"); ppc_md.power_save = pseries_dedicated_idle_sleep; } } else { printk(KERN_DEBUG "Using default idle loop\n"); } if (firmware_has_feature(FW_FEATURE_LPAR)) ppc_md.enable_pmcs = pseries_lpar_enable_pmcs; else ppc_md.enable_pmcs = power4_enable_pmcs; }
/* * Document me. */ static void __init iSeries_setup_arch(void) { if (get_lppaca()->shared_proc) { ppc_md.idle_loop = iseries_shared_idle; printk(KERN_DEBUG "Using shared processor idle loop\n"); } else { ppc_md.idle_loop = iseries_dedicated_idle; printk(KERN_DEBUG "Using dedicated idle loop\n"); } /* Setup the Lp Event Queue */ setup_hvlpevent_queue(); printk("Max logical processors = %d\n", itVpdAreas.xSlicMaxLogicalProcs); printk("Max physical processors = %d\n", itVpdAreas.xSlicMaxPhysicalProcs); }
/* * pseries_idle_probe() * Choose state table for shared versus dedicated partition */ static int pseries_idle_probe(void) { if (cpuidle_disable != IDLE_NO_OVERRIDE) return -ENODEV; if (firmware_has_feature(FW_FEATURE_SPLPAR)) { if (lppaca_shared_proc(get_lppaca())) { cpuidle_state_table = shared_states; max_idle_state = ARRAY_SIZE(shared_states); } else { cpuidle_state_table = dedicated_states; max_idle_state = ARRAY_SIZE(dedicated_states); } } else return -ENODEV; return 0; }
static void yield_shared_processor(void) { unsigned long tb; HvCall_setEnabledInterrupts(HvCall_MaskIPI | HvCall_MaskLpEvent | HvCall_MaskLpProd | HvCall_MaskTimeout); tb = get_tb(); /* Compute future tb value when yield should expire */ HvCall_yieldProcessor(HvCall_YieldTimed, tb+tb_ticks_per_jiffy); /* * The decrementer stops during the yield. Force a fake decrementer * here and let the timer_interrupt code sort out the actual time. */ get_lppaca()->int_dword.fields.decr_int = 1; ppc64_runlatch_on(); process_iSeries_events(); }
static void pseries_kexec_cpu_down(int crash_shutdown, int secondary) { /* Don't risk a hypervisor call if we're crashing */ if (firmware_has_feature(FW_FEATURE_SPLPAR) && !crash_shutdown) { unsigned long addr; addr = __pa(get_slb_shadow()); if (unregister_slb_shadow(hard_smp_processor_id(), addr)) printk("SLB shadow buffer deregistration of " "cpu %u (hw_cpu_id %d) failed\n", smp_processor_id(), hard_smp_processor_id()); addr = __pa(get_lppaca()); if (unregister_vpa(hard_smp_processor_id(), addr)) { printk("VPA deregistration of cpu %u (hw_cpu_id %d) " "failed\n", smp_processor_id(), hard_smp_processor_id()); } } }
/* * pseries_idle_probe() * Choose state table for shared versus dedicated partition */ static int pseries_idle_probe(void) { if (!firmware_has_feature(FW_FEATURE_SPLPAR)) return -ENODEV; if (cpuidle_disable != IDLE_NO_OVERRIDE) return -ENODEV; if (max_idle_state == 0) { printk(KERN_DEBUG "pseries processor idle disabled.\n"); return -EPERM; } if (get_lppaca()->shared_proc) cpuidle_state_table = shared_states; else cpuidle_state_table = dedicated_states; return 0; }
static inline void idle_loop_epilog(unsigned long in_purr) { get_lppaca()->wait_state_cycles += mfspr(SPRN_PURR) - in_purr; get_lppaca()->idle = 0; }
static void pseries_mach_cpu_die(void) { unsigned int cpu = smp_processor_id(); unsigned int hwcpu = hard_smp_processor_id(); u8 cede_latency_hint = 0; local_irq_disable(); idle_task_exit(); if (xive_enabled()) xive_teardown_cpu(); else xics_teardown_cpu(); if (get_preferred_offline_state(cpu) == CPU_STATE_INACTIVE) { set_cpu_current_state(cpu, CPU_STATE_INACTIVE); if (ppc_md.suspend_disable_cpu) ppc_md.suspend_disable_cpu(); cede_latency_hint = 2; get_lppaca()->idle = 1; if (!lppaca_shared_proc(get_lppaca())) get_lppaca()->donate_dedicated_cpu = 1; while (get_preferred_offline_state(cpu) == CPU_STATE_INACTIVE) { while (!prep_irq_for_idle()) { local_irq_enable(); local_irq_disable(); } extended_cede_processor(cede_latency_hint); } local_irq_disable(); if (!lppaca_shared_proc(get_lppaca())) get_lppaca()->donate_dedicated_cpu = 0; get_lppaca()->idle = 0; if (get_preferred_offline_state(cpu) == CPU_STATE_ONLINE) { unregister_slb_shadow(hwcpu); hard_irq_disable(); /* * Call to start_secondary_resume() will not return. * Kernel stack will be reset and start_secondary() * will be called to continue the online operation. */ start_secondary_resume(); } } /* Requested state is CPU_STATE_OFFLINE at this point */ WARN_ON(get_preferred_offline_state(cpu) != CPU_STATE_OFFLINE); set_cpu_current_state(cpu, CPU_STATE_OFFLINE); unregister_slb_shadow(hwcpu); rtas_stop_self(); /* Should never get here... */ BUG(); for(;;); }