/* * linux/arch/arm/kernel/vst.c * * VST code for ARM. * * 2004 VST and IDLE code, by George Anzinger * * 2004 (c) MontaVista Software, Inc. * Copyright 2004 Sony Corporation. * Copyright 2004 Matsushita Electric Industrial Co., Ltd. * * This file is licensed under the terms of the GNU General Public * License version 2. This program is licensed "as is" without any * warranty of any kind, whether express or implied. */ #include <linux/vst.h> #include <linux/hrtime.h> /* get_arch_cycles, arch_cycles_per_jiffy */ #include <linux/time.h> /* xtime_lock */ #include <asm/irq.h> /* to get the disable/enable irq code */ #include <asm/mach/irq.h> #define stop_timer() /* just let it expire.... */ void do_vst_wakeup(struct pt_regs *regs, int irq_flag) { unsigned long jiffies_delta, jiffies_f = jiffies; unsigned long flags; if (!in_vst_sleep()) return; vst_waking(); write_seqlock_irqsave(&xtime_lock, flags); if (irq_flag ) vst_successful_exit++; else vst_external_intr_exit++; stop_timer(); /* * OK, now we need to get jiffies up to the right value. Here * we lean on the HRT patch to give us some notion of where we * are. */ jiffies_delta = get_arch_cycles(jiffies_f) / arch_cycles_per_jiffy; if (jiffies_delta) { /* * One or more jiffie has elapsed. Do all but the last one * here and then call do_timer() to get the last and update * the wall clock. */ jiffies_delta--; vst_bump_jiffies_by(jiffies_delta); vst_skipped_interrupts += jiffies_delta; run_local_timers(); } else { conditional_run_timers(); } write_sequnlock_irqrestore(&xtime_lock, flags); return; }
static unsigned long consider_steal_time(unsigned long new_itm) { unsigned long stolen, blocked; unsigned long delta_itm = 0, stolentick = 0; int cpu = smp_processor_id(); struct vcpu_runstate_info runstate; struct task_struct *p = current; get_runstate_snapshot(&runstate); /* * Check for vcpu migration effect * In this case, itc value is reversed. * This causes huge stolen value. * This function just checks and reject this effect. */ if (!time_after_eq(runstate.time[RUNSTATE_blocked], per_cpu(xen_blocked_time, cpu))) blocked = 0; if (!time_after_eq(runstate.time[RUNSTATE_runnable] + runstate.time[RUNSTATE_offline], per_cpu(xen_stolen_time, cpu))) stolen = 0; if (!time_after(delta_itm + new_itm, ia64_get_itc())) stolentick = ia64_get_itc() - new_itm; do_div(stolentick, NS_PER_TICK); stolentick++; do_div(stolen, NS_PER_TICK); if (stolen > stolentick) stolen = stolentick; stolentick -= stolen; do_div(blocked, NS_PER_TICK); if (blocked > stolentick) blocked = stolentick; if (stolen > 0 || blocked > 0) { account_steal_ticks(stolen); account_idle_ticks(blocked); run_local_timers(); rcu_check_callbacks(cpu, user_mode(get_irq_regs())); scheduler_tick(); run_posix_cpu_timers(p); delta_itm += local_cpu_data->itm_delta * (stolen + blocked); if (cpu == time_keeper_id) xtime_update(stolen + blocked); local_cpu_data->itm_next = delta_itm + new_itm; per_cpu(xen_stolen_time, cpu) += NS_PER_TICK * stolen; per_cpu(xen_blocked_time, cpu) += NS_PER_TICK * blocked; } return delta_itm; }