static void do_stolen_accounting(void) { struct vcpu_runstate_info state; struct vcpu_runstate_info *snap; s64 runnable, offline, stolen; cputime_t ticks; get_runstate_snapshot(&state); WARN_ON(state.state != RUNSTATE_running); snap = this_cpu_ptr(&xen_runstate_snapshot); /* work out how much time the VCPU has not been runn*ing* */ runnable = state.time[RUNSTATE_runnable] - snap->time[RUNSTATE_runnable]; offline = state.time[RUNSTATE_offline] - snap->time[RUNSTATE_offline]; *snap = state; /* Add the appropriate number of ticks of stolen time, including any left-overs from last time. */ stolen = runnable + offline + __this_cpu_read(xen_residual_stolen); if (stolen < 0) stolen = 0; ticks = iter_div_u64_rem(stolen, NS_PER_TICK, &stolen); __this_cpu_write(xen_residual_stolen, stolen); account_steal_ticks(ticks); }
/* * Xen sched_clock implementation. Returns the number of unstolen * nanoseconds, which is nanoseconds the VCPU spent in RUNNING+BLOCKED * states. */ unsigned long long xen_sched_clock(void) { struct vcpu_runstate_info state; cycle_t now; u64 ret; s64 offset; /* * Ideally sched_clock should be called on a per-cpu basis * anyway, so preempt should already be disabled, but that's * not current practice at the moment. */ preempt_disable(); now = xen_clocksource_read(); get_runstate_snapshot(&state); WARN_ON(state.state != RUNSTATE_running); offset = now - state.state_entry_time; if (offset < 0) offset = 0; ret = state.time[RUNSTATE_blocked] + state.time[RUNSTATE_running] + offset; preempt_enable(); return ret; }
static void do_stolen_accounting(void) { struct vcpu_runstate_info state; struct vcpu_runstate_info *snap; s64 blocked, runnable, offline, stolen; cputime_t ticks; get_runstate_snapshot(&state); WARN_ON(state.state != RUNSTATE_running); snap = &__get_cpu_var(runstate_snapshot); /* work out how much time the VCPU has not been runn*ing* */ blocked = state.time[RUNSTATE_blocked] - snap->time[RUNSTATE_blocked]; runnable = state.time[RUNSTATE_runnable] - snap->time[RUNSTATE_runnable]; offline = state.time[RUNSTATE_offline] - snap->time[RUNSTATE_offline]; *snap = state; /* Add the appropriate number of ticks of stolen time, including any left-overs from last time. Passing NULL to account_steal_time accounts the time as stolen. */ stolen = runnable + offline + __get_cpu_var(residual_stolen); if (stolen < 0) stolen = 0; ticks = iter_div_u64_rem(stolen, NS_PER_TICK, &stolen); __get_cpu_var(residual_stolen) = stolen; account_steal_time(NULL, ticks); /* Add the appropriate number of ticks of blocked time, including any left-overs from last time. Passing idle to account_steal_time accounts the time as idle/wait. */ blocked += __get_cpu_var(residual_blocked); if (blocked < 0) blocked = 0; ticks = iter_div_u64_rem(blocked, NS_PER_TICK, &blocked); __get_cpu_var(residual_blocked) = blocked; account_steal_time(idle_task(smp_processor_id()), ticks); }
static void do_stolen_accounting(void) { struct vcpu_runstate_info state; struct vcpu_runstate_info *snap; s64 blocked, runnable, offline, stolen; cputime_t ticks; get_runstate_snapshot(&state); WARN_ON(state.state != RUNSTATE_running); snap = &__get_cpu_var(xen_runstate_snapshot); blocked = state.time[RUNSTATE_blocked] - snap->time[RUNSTATE_blocked]; runnable = state.time[RUNSTATE_runnable] - snap->time[RUNSTATE_runnable]; offline = state.time[RUNSTATE_offline] - snap->time[RUNSTATE_offline]; *snap = state; stolen = runnable + offline + __this_cpu_read(xen_residual_stolen); if (stolen < 0) stolen = 0; ticks = iter_div_u64_rem(stolen, NS_PER_TICK, &stolen); __this_cpu_write(xen_residual_stolen, stolen); account_steal_ticks(ticks); blocked += __this_cpu_read(xen_residual_blocked); if (blocked < 0) blocked = 0; ticks = iter_div_u64_rem(blocked, NS_PER_TICK, &blocked); __this_cpu_write(xen_residual_blocked, blocked); account_idle_ticks(ticks); }
/* * Xen sched_clock implementation. Returns the number of unstolen * nanoseconds, which is nanoseconds the VCPU spent in RUNNING+BLOCKED * states. */ static unsigned long long xen_sched_clock(void) { struct vcpu_runstate_info runstate; unsigned long long now; unsigned long long offset; unsigned long long ret; /* * Ideally sched_clock should be called on a per-cpu basis * anyway, so preempt should already be disabled, but that's * not current practice at the moment. */ preempt_disable(); /* * both ia64_native_sched_clock() and xen's runstate are * based on mAR.ITC. So difference of them makes sense. */ now = ia64_native_sched_clock(); get_runstate_snapshot(&runstate); WARN_ON(runstate.state != RUNSTATE_running); offset = 0; if (now > runstate.state_entry_time) offset = now - runstate.state_entry_time; ret = runstate.time[RUNSTATE_blocked] + runstate.time[RUNSTATE_running] + offset; preempt_enable(); return ret; }
static unsigned long consider_steal_time(unsigned long new_itm) { unsigned long stolen, blocked; unsigned long delta_itm = 0, stolentick = 0; int cpu = smp_processor_id(); struct vcpu_runstate_info runstate; struct task_struct *p = current; get_runstate_snapshot(&runstate); /* * Check for vcpu migration effect * In this case, itc value is reversed. * This causes huge stolen value. * This function just checks and reject this effect. */ if (!time_after_eq(runstate.time[RUNSTATE_blocked], per_cpu(xen_blocked_time, cpu))) blocked = 0; if (!time_after_eq(runstate.time[RUNSTATE_runnable] + runstate.time[RUNSTATE_offline], per_cpu(xen_stolen_time, cpu))) stolen = 0; if (!time_after(delta_itm + new_itm, ia64_get_itc())) stolentick = ia64_get_itc() - new_itm; do_div(stolentick, NS_PER_TICK); stolentick++; do_div(stolen, NS_PER_TICK); if (stolen > stolentick) stolen = stolentick; stolentick -= stolen; do_div(blocked, NS_PER_TICK); if (blocked > stolentick) blocked = stolentick; if (stolen > 0 || blocked > 0) { account_steal_ticks(stolen); account_idle_ticks(blocked); run_local_timers(); rcu_check_callbacks(cpu, user_mode(get_irq_regs())); scheduler_tick(); run_posix_cpu_timers(p); delta_itm += local_cpu_data->itm_delta * (stolen + blocked); if (cpu == time_keeper_id) xtime_update(stolen + blocked); local_cpu_data->itm_next = delta_itm + new_itm; per_cpu(xen_stolen_time, cpu) += NS_PER_TICK * stolen; per_cpu(xen_blocked_time, cpu) += NS_PER_TICK * blocked; } return delta_itm; }