static void yield_shared_processor(void) { unsigned long tb; unsigned long yieldTime; HvCall_setEnabledInterrupts(HvCall_MaskIPI | HvCall_MaskLpEvent | HvCall_MaskLpProd | HvCall_MaskTimeout); tb = get_tb(); /* Compute future tb value when yield should expire */ HvCall_yieldProcessor(HvCall_YieldTimed, tb+tb_ticks_per_jiffy); yieldTime = get_tb() - tb; if (yieldTime > maxYieldTime) maxYieldTime = yieldTime; if (yieldTime < minYieldTime) minYieldTime = yieldTime; /* * The decrementer stops during the yield. Force a fake decrementer * here and let the timer_interrupt code sort out the actual time. */ get_paca()->lppaca.xIntDword.xFields.xDecrInt = 1; process_iSeries_events(); }
unsigned long __init rtas_get_boot_time(void) { int ret[8]; int error; unsigned int wait_time; u64 max_wait_tb; max_wait_tb = get_tb() + tb_ticks_per_usec * 1000 * MAX_RTC_WAIT; do { error = rtas_call(rtas_token("get-time-of-day"), 0, 8, ret); wait_time = rtas_busy_delay_time(error); if (wait_time) { /* */ udelay(wait_time*1000); } } while (wait_time && (get_tb() < max_wait_tb)); if (error != 0) { printk_ratelimited(KERN_WARNING "error: reading the clock failed (%d)\n", error); return 0; } return mktime(ret[0], ret[1], ret[2], ret[3], ret[4], ret[5]); }
int rtas_set_rtc_time(struct rtc_time *tm) { int error, wait_time; u64 max_wait_tb; max_wait_tb = get_tb() + tb_ticks_per_usec * 1000 * MAX_RTC_WAIT; do { error = rtas_call(rtas_token("set-time-of-day"), 7, 1, NULL, tm->tm_year + 1900, tm->tm_mon + 1, tm->tm_mday, tm->tm_hour, tm->tm_min, tm->tm_sec, 0); wait_time = rtas_busy_delay_time(error); if (wait_time) { if (in_interrupt()) return 1; /* */ msleep(wait_time); } } while (wait_time && (get_tb() < max_wait_tb)); if (error != 0) printk_ratelimited(KERN_WARNING "error: setting the clock failed (%d)\n", error); return 0; }
static void smp_chrp_setup_cpu(int cpu_nr) { static atomic_t ready = ATOMIC_INIT(1); static volatile int frozen = 0; if (systemcfg->platform == PLATFORM_PSERIES_LPAR) { /* timebases already synced under the hypervisor. */ paca[cpu_nr].next_jiffy_update_tb = tb_last_stamp = get_tb(); if (cpu_nr == 0) { systemcfg->tb_orig_stamp = tb_last_stamp; /* Should update naca->stamp_xsec. * For now we leave it which means the time can be some * number of msecs off until someone does a settimeofday() */ } smp_tb_synchronized = 1; } else { if (cpu_nr == 0) { /* wait for all the others */ while (atomic_read(&ready) < smp_num_cpus) barrier(); atomic_set(&ready, 1); /* freeze the timebase */ rtas_call(rtas_token("freeze-time-base"), 0, 1, NULL); mb(); frozen = 1; set_tb(0, 0); paca[0].next_jiffy_update_tb = 0; smp_space_timers(smp_num_cpus); while (atomic_read(&ready) < smp_num_cpus) barrier(); /* thaw the timebase again */ rtas_call(rtas_token("thaw-time-base"), 0, 1, NULL); mb(); frozen = 0; tb_last_stamp = get_tb(); systemcfg->tb_orig_stamp = tb_last_stamp; smp_tb_synchronized = 1; } else { atomic_inc(&ready); while (!frozen) barrier(); set_tb(0, 0); mb(); atomic_inc(&ready); while (frozen) barrier(); } } if (OpenPIC_Addr) { do_openpic_setup_cpu(); } else { if (cpu_nr > 0) xics_setup_cpu(); } }
/* Copy data touched by real-mode code from shadow vcpu back to vcpu */ void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu, struct kvmppc_book3s_shadow_vcpu *svcpu) { /* * vcpu_put would just call us again because in_use hasn't * been updated yet. */ preempt_disable(); /* * Maybe we were already preempted and synced the svcpu from * our preempt notifiers. Don't bother touching this svcpu then. */ if (!svcpu->in_use) goto out; vcpu->arch.gpr[0] = svcpu->gpr[0]; vcpu->arch.gpr[1] = svcpu->gpr[1]; vcpu->arch.gpr[2] = svcpu->gpr[2]; vcpu->arch.gpr[3] = svcpu->gpr[3]; vcpu->arch.gpr[4] = svcpu->gpr[4]; vcpu->arch.gpr[5] = svcpu->gpr[5]; vcpu->arch.gpr[6] = svcpu->gpr[6]; vcpu->arch.gpr[7] = svcpu->gpr[7]; vcpu->arch.gpr[8] = svcpu->gpr[8]; vcpu->arch.gpr[9] = svcpu->gpr[9]; vcpu->arch.gpr[10] = svcpu->gpr[10]; vcpu->arch.gpr[11] = svcpu->gpr[11]; vcpu->arch.gpr[12] = svcpu->gpr[12]; vcpu->arch.gpr[13] = svcpu->gpr[13]; vcpu->arch.cr = svcpu->cr; vcpu->arch.xer = svcpu->xer; vcpu->arch.ctr = svcpu->ctr; vcpu->arch.lr = svcpu->lr; vcpu->arch.pc = svcpu->pc; vcpu->arch.shadow_srr1 = svcpu->shadow_srr1; vcpu->arch.fault_dar = svcpu->fault_dar; vcpu->arch.fault_dsisr = svcpu->fault_dsisr; vcpu->arch.last_inst = svcpu->last_inst; #ifdef CONFIG_PPC_BOOK3S_64 vcpu->arch.shadow_fscr = svcpu->shadow_fscr; #endif /* * Update purr and spurr using time base on exit. */ vcpu->arch.purr += get_tb() - vcpu->arch.entry_tb; vcpu->arch.spurr += get_tb() - vcpu->arch.entry_tb; vcpu->arch.vtb += get_vtb() - vcpu->arch.entry_vtb; if (cpu_has_feature(CPU_FTR_ARCH_207S)) vcpu->arch.ic += mfspr(SPRN_IC) - vcpu->arch.entry_ic; svcpu->in_use = false; out: preempt_enable(); }
static void pseries_dedicated_idle_sleep(void) { unsigned int cpu = smp_processor_id(); unsigned long start_snooze; unsigned long *smt_snooze_delay = &__get_cpu_var(smt_snooze_delay); unsigned long in_purr, out_purr; /* * Indicate to the HV that we are idle. Now would be * a good time to find other work to dispatch. */ get_lppaca()->idle = 1; get_lppaca()->cpuctls_task_attrs = 1; in_purr = mfspr(SPRN_PURR); /* * We come in with interrupts disabled, and need_resched() * has been checked recently. If we should poll for a little * while, do so. */ if (*smt_snooze_delay) { start_snooze = get_tb() + *smt_snooze_delay * tb_ticks_per_usec; local_irq_enable(); set_thread_flag(TIF_POLLING_NRFLAG); while (get_tb() < start_snooze) { if (need_resched() || cpu_is_offline(cpu)) goto out; ppc64_runlatch_off(); HMT_low(); HMT_very_low(); } HMT_medium(); clear_thread_flag(TIF_POLLING_NRFLAG); smp_mb(); local_irq_disable(); if (need_resched() || cpu_is_offline(cpu)) goto out; } cede_processor(); out: HMT_medium(); get_lppaca()->cpuctls_task_attrs = 0; out_purr = mfspr(SPRN_PURR); get_lppaca()->wait_state_cycles += out_purr - in_purr; get_lppaca()->idle = 0; }
/* * Return the number of jiffies until the next timeout. If the timeout is * longer than the NEXT_TIMER_MAX_DELTA, then return NEXT_TIMER_MAX_DELTA * because the larger value can break the timer APIs. */ static unsigned long watchdog_next_timeout(struct kvm_vcpu *vcpu) { u64 tb, wdt_tb, wdt_ticks = 0; u64 nr_jiffies = 0; u32 period = TCR_GET_WP(vcpu->arch.tcr); wdt_tb = 1ULL << (63 - period); tb = get_tb(); /* * The watchdog timeout will hapeen when TB bit corresponding * to watchdog will toggle from 0 to 1. */ if (tb & wdt_tb) wdt_ticks = wdt_tb; wdt_ticks += wdt_tb - (tb & (wdt_tb - 1)); /* Convert timebase ticks to jiffies */ nr_jiffies = wdt_ticks; if (do_div(nr_jiffies, tb_ticks_per_jiffy)) nr_jiffies++; return min_t(unsigned long long, nr_jiffies, NEXT_TIMER_MAX_DELTA); }
void kvmppc_vcpu_block(struct kvm_vcpu *vcpu) { u64 now; unsigned long dec_nsec; now = get_tb(); if (now >= vcpu->arch.dec_expires && !kvmppc_core_pending_dec(vcpu)) kvmppc_core_queue_dec(vcpu); if (vcpu->arch.pending_exceptions) return; if (vcpu->arch.dec_expires != ~(u64)0) { dec_nsec = (vcpu->arch.dec_expires - now) * NSEC_PER_SEC / tb_ticks_per_sec; hrtimer_start(&vcpu->arch.dec_timer, ktime_set(0, dec_nsec), HRTIMER_MODE_REL); } kvmppc_vcpu_blocked(vcpu); kvm_vcpu_block(vcpu); vcpu->stat.halt_wakeup++; if (vcpu->arch.dec_expires != ~(u64)0) hrtimer_try_to_cancel(&vcpu->arch.dec_timer); kvmppc_vcpu_unblocked(vcpu); }
void kvmppc_emulate_dec(struct kvm_vcpu *vcpu) { unsigned long dec_nsec; pr_debug("mtDEC: %x\n", vcpu->arch.dec); #ifdef CONFIG_PPC_BOOK3S /* mtdec lowers the interrupt line when positive. */ kvmppc_core_dequeue_dec(vcpu); /* POWER4+ triggers a dec interrupt if the value is < 0 */ if (vcpu->arch.dec & 0x80000000) { hrtimer_try_to_cancel(&vcpu->arch.dec_timer); kvmppc_core_queue_dec(vcpu); return; } #endif if (kvmppc_dec_enabled(vcpu)) { /* The decrementer ticks at the same rate as the timebase, so * that's how we convert the guest DEC value to the number of * host ticks. */ hrtimer_try_to_cancel(&vcpu->arch.dec_timer); dec_nsec = vcpu->arch.dec; dec_nsec *= 1000; dec_nsec /= tb_ticks_per_usec; hrtimer_start(&vcpu->arch.dec_timer, ktime_set(0, dec_nsec), HRTIMER_MODE_REL); vcpu->arch.dec_jiffies = get_tb(); } else { hrtimer_try_to_cancel(&vcpu->arch.dec_timer); } }
/* Activate a secondary processor. */ int __devinit start_secondary(void *unused) { unsigned int cpu = smp_processor_id(); atomic_inc(&init_mm.mm_count); current->active_mm = &init_mm; smp_store_cpu_info(cpu); set_dec(tb_ticks_per_jiffy); preempt_disable(); cpu_callin_map[cpu] = 1; smp_ops->setup_cpu(cpu); if (smp_ops->take_timebase) smp_ops->take_timebase(); if (system_state > SYSTEM_BOOTING) per_cpu(last_jiffy, cpu) = get_tb(); spin_lock(&call_lock); cpu_set(cpu, cpu_online_map); spin_unlock(&call_lock); local_irq_enable(); cpu_idle(); return 0; }
/* Copy data needed by real-mode code from vcpu to shadow vcpu */ void kvmppc_copy_to_svcpu(struct kvmppc_book3s_shadow_vcpu *svcpu, struct kvm_vcpu *vcpu) { svcpu->gpr[0] = vcpu->arch.gpr[0]; svcpu->gpr[1] = vcpu->arch.gpr[1]; svcpu->gpr[2] = vcpu->arch.gpr[2]; svcpu->gpr[3] = vcpu->arch.gpr[3]; svcpu->gpr[4] = vcpu->arch.gpr[4]; svcpu->gpr[5] = vcpu->arch.gpr[5]; svcpu->gpr[6] = vcpu->arch.gpr[6]; svcpu->gpr[7] = vcpu->arch.gpr[7]; svcpu->gpr[8] = vcpu->arch.gpr[8]; svcpu->gpr[9] = vcpu->arch.gpr[9]; svcpu->gpr[10] = vcpu->arch.gpr[10]; svcpu->gpr[11] = vcpu->arch.gpr[11]; svcpu->gpr[12] = vcpu->arch.gpr[12]; svcpu->gpr[13] = vcpu->arch.gpr[13]; svcpu->cr = vcpu->arch.cr; svcpu->xer = vcpu->arch.xer; svcpu->ctr = vcpu->arch.ctr; svcpu->lr = vcpu->arch.lr; svcpu->pc = vcpu->arch.pc; #ifdef CONFIG_PPC_BOOK3S_64 svcpu->shadow_fscr = vcpu->arch.shadow_fscr; #endif /* * Now also save the current time base value. We use this * to find the guest purr and spurr value. */ vcpu->arch.entry_tb = get_tb(); vcpu->arch.entry_vtb = get_vtb(); if (cpu_has_feature(CPU_FTR_ARCH_207S)) vcpu->arch.entry_ic = mfspr(SPRN_IC); svcpu->in_use = true; }
void ps3_set_pm_bookmark(u64 tag, u64 incident, u64 th_id) { u64 bookmark; bookmark = (get_tb() & 0x00000000FFFFFFFFULL) | PS3_PM_BOOKMARK_TAG_KERNEL; bookmark = ((tag << 56) & PS3_PM_BOOKMARK_TAG_MASK_LO) | (incident << 48) | (th_id << 32) | bookmark; ps3_set_bookmark(bookmark); }
static void __devinit cell_give_timebase(void) { spin_lock(&timebase_lock); rtas_call(rtas_token("freeze-time-base"), 0, 1, NULL); timebase = get_tb(); spin_unlock(&timebase_lock); while (timebase) barrier(); rtas_call(rtas_token("thaw-time-base"), 0, 1, NULL); }
int kvmppc_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu) { u64 now; if (signal_pending(current)) { run->exit_reason = KVM_EXIT_INTR; return -EINTR; } flush_fp_to_thread(current); flush_altivec_to_thread(current); flush_vsx_to_thread(current); preempt_disable(); /* * Make sure we are running on thread 0, and that * secondary threads are offline. * XXX we should also block attempts to bring any * secondary threads online. */ if (threads_per_core > 1) { int cpu = smp_processor_id(); int thr = cpu_thread_in_core(cpu); if (thr) goto out; while (++thr < threads_per_core) if (cpu_online(cpu + thr)) goto out; } kvm_guest_enter(); __kvmppc_vcore_entry(NULL, vcpu); kvm_guest_exit(); preempt_enable(); kvm_resched(vcpu); now = get_tb(); /* cancel pending dec exception if dec is positive */ if (now < vcpu->arch.dec_expires && kvmppc_core_pending_dec(vcpu)) kvmppc_core_dequeue_dec(vcpu); return kvmppc_handle_exit(run, vcpu, current); out: preempt_enable(); return -EBUSY; }
void rtas_get_rtc_time(struct rtc_time *rtc_tm) { int ret[8]; int error; unsigned int wait_time; u64 max_wait_tb; max_wait_tb = get_tb() + tb_ticks_per_usec * 1000 * MAX_RTC_WAIT; do { error = rtas_call(rtas_token("get-time-of-day"), 0, 8, ret); wait_time = rtas_busy_delay_time(error); if (wait_time) { if (in_interrupt()) { memset(rtc_tm, 0, sizeof(struct rtc_time)); printk_ratelimited(KERN_WARNING "error: reading clock " "would delay interrupt\n"); return; /* */ } msleep(wait_time); } } while (wait_time && (get_tb() < max_wait_tb)); if (error != 0) { printk_ratelimited(KERN_WARNING "error: reading the clock failed (%d)\n", error); return; } rtc_tm->tm_sec = ret[5]; rtc_tm->tm_min = ret[4]; rtc_tm->tm_hour = ret[3]; rtc_tm->tm_mday = ret[2]; rtc_tm->tm_mon = ret[1] - 1; rtc_tm->tm_year = ret[0] - 1900; }
/* * timer_interrupt - gets called when the decrementer overflows, * with interrupts disabled. */ int timer_interrupt(struct pt_regs * regs) { int next_dec; unsigned long cur_tb; struct paca_struct *lpaca = get_paca(); unsigned long cpu = lpaca->xPacaIndex; irq_enter(); #ifndef CONFIG_PPC_ISERIES ppc64_do_profile(regs); #endif lpaca->xLpPaca.xIntDword.xFields.xDecrInt = 0; while (lpaca->next_jiffy_update_tb <= (cur_tb = get_tb())) { #ifdef CONFIG_SMP smp_local_timer_interrupt(regs); #endif if (cpu == boot_cpuid) { write_seqlock(&xtime_lock); tb_last_stamp = lpaca->next_jiffy_update_tb; do_timer(regs); timer_sync_xtime( cur_tb ); timer_check_rtc(); write_sequnlock(&xtime_lock); if ( adjusting_time && (time_adjust == 0) ) ppc_adjtimex(); } lpaca->next_jiffy_update_tb += tb_ticks_per_jiffy; } next_dec = lpaca->next_jiffy_update_tb - cur_tb; if (next_dec > lpaca->default_decr) next_dec = lpaca->default_decr; set_dec(next_dec); #ifdef CONFIG_PPC_ISERIES { struct ItLpQueue *lpq = lpaca->lpQueuePtr; if (lpq && ItLpQueue_isLpIntPending(lpq)) lpEvent_count += ItLpQueue_process(lpq, regs); } #endif irq_exit(); return 1; }
/* * Real-mode H_CONFER implementation. * We check if we are the only vcpu out of this virtual core * still running in the guest and not ceded. If so, we pop up * to the virtual-mode implementation; if not, just return to * the guest. */ long int kvmppc_rm_h_confer(struct kvm_vcpu *vcpu, int target, unsigned int yield_count) { struct kvmppc_vcore *vc = vcpu->arch.vcore; int threads_running; int threads_ceded; int threads_conferring; u64 stop = get_tb() + 10 * tb_ticks_per_usec; int rv = H_SUCCESS; /* => don't yield */ set_bit(vcpu->arch.ptid, &vc->conferring_threads); while ((get_tb() < stop) && (VCORE_EXIT_COUNT(vc) == 0)) { threads_running = VCORE_ENTRY_COUNT(vc); threads_ceded = hweight32(vc->napping_threads); threads_conferring = hweight32(vc->conferring_threads); if (threads_ceded + threads_conferring >= threads_running) { rv = H_TOO_HARD; /* => do yield */ break; } } clear_bit(vcpu->arch.ptid, &vc->conferring_threads); return rv; }
static int snooze_loop(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { u64 snooze_exit_time; local_irq_enable(); set_thread_flag(TIF_POLLING_NRFLAG); snooze_exit_time = get_tb() + snooze_timeout; ppc64_runlatch_off(); while (!need_resched()) { HMT_low(); HMT_very_low(); if (snooze_timeout_en && get_tb() > snooze_exit_time) break; } HMT_medium(); ppc64_runlatch_on(); clear_thread_flag(TIF_POLLING_NRFLAG); smp_mb(); return index; }
static int snooze_loop(struct cpuidle_device *dev, struct cpuidle_driver *drv, int index) { unsigned long in_purr; ktime_t kt_before; unsigned long start_snooze; long snooze = drv->states[0].target_residency; idle_loop_prolog(&in_purr, &kt_before); if (snooze) { start_snooze = get_tb() + snooze * tb_ticks_per_usec; local_irq_enable(); set_thread_flag(TIF_POLLING_NRFLAG); while ((snooze < 0) || (get_tb() < start_snooze)) { if (need_resched() || cpu_is_offline(dev->cpu)) goto out; ppc64_runlatch_off(); HMT_low(); HMT_very_low(); } HMT_medium(); clear_thread_flag(TIF_POLLING_NRFLAG); smp_mb(); local_irq_disable(); } out: HMT_medium(); dev->last_residency = (int)idle_loop_epilog(in_purr, kt_before); return index; }
/* * Real-mode H_CONFER implementation. * We check if we are the only vcpu out of this virtual core * still running in the guest and not ceded. If so, we pop up * to the virtual-mode implementation; if not, just return to * the guest. */ long int kvmppc_rm_h_confer(struct kvm_vcpu *vcpu, int target, unsigned int yield_count) { struct kvmppc_vcore *vc = local_paca->kvm_hstate.kvm_vcore; int ptid = local_paca->kvm_hstate.ptid; int threads_running; int threads_ceded; int threads_conferring; u64 stop = get_tb() + 10 * tb_ticks_per_usec; int rv = H_SUCCESS; /* => don't yield */ set_bit(ptid, &vc->conferring_threads); while ((get_tb() < stop) && !VCORE_IS_EXITING(vc)) { threads_running = VCORE_ENTRY_MAP(vc); threads_ceded = vc->napping_threads; threads_conferring = vc->conferring_threads; if ((threads_ceded | threads_conferring) == threads_running) { rv = H_TOO_HARD; /* => do yield */ break; } } clear_bit(ptid, &vc->conferring_threads); return rv; }
void rtas_give_timebase(void) { unsigned long flags; local_irq_save(flags); hard_irq_disable(); arch_spin_lock(&timebase_lock); rtas_call(rtas_token("freeze-time-base"), 0, 1, NULL); timebase = get_tb(); arch_spin_unlock(&timebase_lock); while (timebase) barrier(); rtas_call(rtas_token("thaw-time-base"), 0, 1, NULL); local_irq_restore(flags); }
static int proc_titantod_show(struct seq_file *m, void *v) { unsigned long tb0, titan_tod; tb0 = get_tb(); titan_tod = HvCallXm_loadTod(); seq_printf(m, "Titan\n" ); seq_printf(m, " time base = %016lx\n", tb0); seq_printf(m, " titan tod = %016lx\n", titan_tod); seq_printf(m, " xProcFreq = %016x\n", xIoHriProcessorVpd[0].xProcFreq); seq_printf(m, " xTimeBaseFreq = %016x\n", xIoHriProcessorVpd[0].xTimeBaseFreq); seq_printf(m, " tb_ticks_per_jiffy = %lu\n", tb_ticks_per_jiffy); seq_printf(m, " tb_ticks_per_usec = %lu\n", tb_ticks_per_usec); if (!startTitan) { startTitan = titan_tod; startTb = tb0; } else { unsigned long titan_usec = (titan_tod - startTitan) >> 12; unsigned long tb_ticks = (tb0 - startTb); unsigned long titan_jiffies = titan_usec / (1000000/HZ); unsigned long titan_jiff_usec = titan_jiffies * (1000000/HZ); unsigned long titan_jiff_rem_usec = titan_usec - titan_jiff_usec; unsigned long tb_jiffies = tb_ticks / tb_ticks_per_jiffy; unsigned long tb_jiff_ticks = tb_jiffies * tb_ticks_per_jiffy; unsigned long tb_jiff_rem_ticks = tb_ticks - tb_jiff_ticks; unsigned long tb_jiff_rem_usec = tb_jiff_rem_ticks / tb_ticks_per_usec; unsigned long new_tb_ticks_per_jiffy = (tb_ticks * (1000000/HZ))/titan_usec; seq_printf(m, " titan elapsed = %lu uSec\n", titan_usec); seq_printf(m, " tb elapsed = %lu ticks\n", tb_ticks); seq_printf(m, " titan jiffies = %lu.%04lu \n", titan_jiffies, titan_jiff_rem_usec); seq_printf(m, " tb jiffies = %lu.%04lu\n", tb_jiffies, tb_jiff_rem_usec); seq_printf(m, " new tb_ticks_per_jiffy = %lu\n", new_tb_ticks_per_jiffy); } return 0; }
static void get_sregs_base(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) { u64 tb = get_tb(); sregs->u.e.features |= KVM_SREGS_E_BASE; sregs->u.e.csrr0 = vcpu->arch.csrr0; sregs->u.e.csrr1 = vcpu->arch.csrr1; sregs->u.e.mcsr = vcpu->arch.mcsr; sregs->u.e.esr = vcpu->arch.shared->esr; sregs->u.e.dear = vcpu->arch.shared->dar; sregs->u.e.tsr = vcpu->arch.tsr; sregs->u.e.tcr = vcpu->arch.tcr; sregs->u.e.dec = kvmppc_get_dec(vcpu, tb); sregs->u.e.tb = tb; sregs->u.e.vrsave = vcpu->arch.vrsave; }
int proc_get_titanTod (char *page, char **start, off_t off, int count, int *eof, void *data) { int len = 0; unsigned long tb0, titan_tod; tb0 = get_tb(); titan_tod = HvCallXm_loadTod(); len += sprintf( page+len, "Titan\n" ); len += sprintf( page+len, " time base = %016lx\n", tb0 ); len += sprintf( page+len, " titan tod = %016lx\n", titan_tod ); len += sprintf( page+len, " xProcFreq = %016x\n", xIoHriProcessorVpd[0].xProcFreq ); len += sprintf( page+len, " xTimeBaseFreq = %016x\n", xIoHriProcessorVpd[0].xTimeBaseFreq ); len += sprintf( page+len, " tb_ticks_per_jiffy = %lu\n", tb_ticks_per_jiffy ); len += sprintf( page+len, " tb_ticks_per_usec = %lu\n", tb_ticks_per_usec ); if ( !startTitan ) { startTitan = titan_tod; startTb = tb0; } else { unsigned long titan_usec = (titan_tod - startTitan) >> 12; unsigned long tb_ticks = (tb0 - startTb); unsigned long titan_jiffies = titan_usec / (1000000/HZ); unsigned long titan_jiff_usec = titan_jiffies * (1000000/HZ); unsigned long titan_jiff_rem_usec = titan_usec - titan_jiff_usec; unsigned long tb_jiffies = tb_ticks / tb_ticks_per_jiffy; unsigned long tb_jiff_ticks = tb_jiffies * tb_ticks_per_jiffy; unsigned long tb_jiff_rem_ticks = tb_ticks - tb_jiff_ticks; unsigned long tb_jiff_rem_usec = tb_jiff_rem_ticks / tb_ticks_per_usec; unsigned long new_tb_ticks_per_jiffy = (tb_ticks * (1000000/HZ))/titan_usec; len += sprintf( page+len, " titan elapsed = %lu uSec\n", titan_usec); len += sprintf( page+len, " tb elapsed = %lu ticks\n", tb_ticks); len += sprintf( page+len, " titan jiffies = %lu.%04lu \n", titan_jiffies, titan_jiff_rem_usec ); len += sprintf( page+len, " tb jiffies = %lu.%04lu\n", tb_jiffies, tb_jiff_rem_usec ); len += sprintf( page+len, " new tb_ticks_per_jiffy = %lu\n", new_tb_ticks_per_jiffy ); } return pmc_calc_metrics( page, start, off, count, eof, len ); }
void kvmppc_emulate_dec(struct kvm_vcpu *vcpu) { unsigned long dec_nsec; unsigned long long dec_time; pr_debug("mtDEC: %x\n", vcpu->arch.dec); hrtimer_try_to_cancel(&vcpu->arch.dec_timer); #ifdef CONFIG_PPC_BOOK3S /* mtdec lowers the interrupt line when positive. */ kvmppc_core_dequeue_dec(vcpu); /* POWER4+ triggers a dec interrupt if the value is < 0 */ if (vcpu->arch.dec & 0x80000000) { kvmppc_core_queue_dec(vcpu); return; } #endif #ifdef CONFIG_BOOKE /* On BOOKE, DEC = 0 is as good as decrementer not enabled */ if (vcpu->arch.dec == 0) return; #endif /* * The decrementer ticks at the same rate as the timebase, so * that's how we convert the guest DEC value to the number of * host ticks. */ dec_time = vcpu->arch.dec; /* * Guest timebase ticks at the same frequency as host decrementer. * So use the host decrementer calculations for decrementer emulation. */ dec_time = dec_time << decrementer_clockevent.shift; do_div(dec_time, decrementer_clockevent.mult); dec_nsec = do_div(dec_time, NSEC_PER_SEC); hrtimer_start(&vcpu->arch.dec_timer, ktime_set(dec_time, dec_nsec), HRTIMER_MODE_REL); vcpu->arch.dec_jiffies = get_tb(); }
static void iSeries_tb_recal(void) { struct div_result divres; unsigned long titan, tb; tb = get_tb(); titan = HvCallXm_loadTod(); if ( iSeries_recal_titan ) { unsigned long tb_ticks = tb - iSeries_recal_tb; unsigned long titan_usec = (titan - iSeries_recal_titan) >> 12; unsigned long new_tb_ticks_per_sec = (tb_ticks * USEC_PER_SEC)/titan_usec; unsigned long new_tb_ticks_per_jiffy = (new_tb_ticks_per_sec+(HZ/2))/HZ; long tick_diff = new_tb_ticks_per_jiffy - tb_ticks_per_jiffy; char sign = '+'; /* make sure tb_ticks_per_sec and tb_ticks_per_jiffy are consistent */ new_tb_ticks_per_sec = new_tb_ticks_per_jiffy * HZ; if ( tick_diff < 0 ) { tick_diff = -tick_diff; sign = '-'; } if ( tick_diff ) { if ( tick_diff < tb_ticks_per_jiffy/25 ) { printk( "Titan recalibrate: new tb_ticks_per_jiffy = %lu (%c%ld)\n", new_tb_ticks_per_jiffy, sign, tick_diff ); tb_ticks_per_jiffy = new_tb_ticks_per_jiffy; tb_ticks_per_sec = new_tb_ticks_per_sec; div128_by_32( XSEC_PER_SEC, 0, tb_ticks_per_sec, &divres ); do_gtod.tb_ticks_per_sec = tb_ticks_per_sec; tb_to_xs = divres.result_low; do_gtod.varp->tb_to_xs = tb_to_xs; } else { printk( "Titan recalibrate: FAILED (difference > 4 percent)\n" " new tb_ticks_per_jiffy = %lu\n" " old tb_ticks_per_jiffy = %lu\n", new_tb_ticks_per_jiffy, tb_ticks_per_jiffy ); } } } iSeries_recal_titan = titan; iSeries_recal_tb = tb; }
void ps3_disable_pm(u32 cpu) { int result; u64 tmp; ps3_set_bookmark(get_tb() | PS3_PM_BOOKMARK_STOP); result = lv1_stop_lpm(lpm_priv->lpm_id, &tmp); if (result) { if(result != LV1_WRONG_STATE) dev_err(sbd_core(), "%s:%u: lv1_stop_lpm failed: %s\n", __func__, __LINE__, ps3_result(result)); return; } lpm_priv->tb_count = tmp; dev_dbg(sbd_core(), "%s:%u: tb_count %llu (%llxh)\n", __func__, __LINE__, lpm_priv->tb_count, lpm_priv->tb_count); }
static void yield_shared_processor(void) { unsigned long tb; HvCall_setEnabledInterrupts(HvCall_MaskIPI | HvCall_MaskLpEvent | HvCall_MaskLpProd | HvCall_MaskTimeout); tb = get_tb(); /* Compute future tb value when yield should expire */ HvCall_yieldProcessor(HvCall_YieldTimed, tb+tb_ticks_per_jiffy); /* * The decrementer stops during the yield. Force a fake decrementer * here and let the timer_interrupt code sort out the actual time. */ get_lppaca()->int_dword.fields.decr_int = 1; ppc64_runlatch_on(); process_iSeries_events(); }
static void smp_core99_give_timebase(void) { /* Open i2c bus for synchronous access */ if (pmac_low_i2c_open(pmac_tb_clock_chip_host, 0)) panic("Can't open i2c for TB sync !\n"); spin_lock(&timebase_lock); (*pmac_tb_freeze)(1); mb(); timebase = get_tb(); spin_unlock(&timebase_lock); while (timebase) barrier(); spin_lock(&timebase_lock); (*pmac_tb_freeze)(0); spin_unlock(&timebase_lock); /* Close i2c bus */ pmac_low_i2c_close(pmac_tb_clock_chip_host); }
void __init time_init(void) { /* This function is only called on the boot processor */ unsigned long flags; struct rtc_time tm; ppc_md.calibrate_decr(); #ifdef CONFIG_PPC_ISERIES if (!piranha_simulator) #endif ppc_md.get_boot_time(&tm); write_seqlock_irqsave(&xtime_lock, flags); xtime.tv_sec = mktime(tm.tm_year + 1900, tm.tm_mon + 1, tm.tm_mday, tm.tm_hour, tm.tm_min, tm.tm_sec); tb_last_stamp = get_tb(); do_gtod.tb_orig_stamp = tb_last_stamp; do_gtod.varp = &do_gtod.vars[0]; do_gtod.var_idx = 0; do_gtod.varp->stamp_xsec = xtime.tv_sec * XSEC_PER_SEC; do_gtod.tb_ticks_per_sec = tb_ticks_per_sec; do_gtod.varp->tb_to_xs = tb_to_xs; do_gtod.tb_to_us = tb_to_us; xtime_sync_interval = tb_ticks_per_sec - (tb_ticks_per_sec/8); next_xtime_sync_tb = tb_last_stamp + xtime_sync_interval; time_freq = 0; xtime.tv_nsec = 0; last_rtc_update = xtime.tv_sec; set_normalized_timespec(&wall_to_monotonic, -xtime.tv_sec, -xtime.tv_nsec); write_sequnlock_irqrestore(&xtime_lock, flags); /* Not exact, but the timer interrupt takes care of this */ set_dec(tb_ticks_per_jiffy); }