static void do_stolen_accounting(void) { struct vcpu_runstate_info state; struct vcpu_runstate_info *snap; s64 runnable, offline, stolen; cputime_t ticks; get_runstate_snapshot(&state); WARN_ON(state.state != RUNSTATE_running); snap = this_cpu_ptr(&xen_runstate_snapshot); /* work out how much time the VCPU has not been runn*ing* */ runnable = state.time[RUNSTATE_runnable] - snap->time[RUNSTATE_runnable]; offline = state.time[RUNSTATE_offline] - snap->time[RUNSTATE_offline]; *snap = state; /* Add the appropriate number of ticks of stolen time, including any left-overs from last time. */ stolen = runnable + offline + __this_cpu_read(xen_residual_stolen); if (stolen < 0) stolen = 0; ticks = iter_div_u64_rem(stolen, NS_PER_TICK, &stolen); __this_cpu_write(xen_residual_stolen, stolen); account_steal_ticks(ticks); }
static void do_stolen_accounting(void) { struct vcpu_runstate_info state; struct vcpu_runstate_info *snap; s64 blocked, runnable, offline, stolen; cputime_t ticks; get_runstate_snapshot(&state); WARN_ON(state.state != RUNSTATE_running); snap = &__get_cpu_var(runstate_snapshot); /* work out how much time the VCPU has not been runn*ing* */ blocked = state.time[RUNSTATE_blocked] - snap->time[RUNSTATE_blocked]; runnable = state.time[RUNSTATE_runnable] - snap->time[RUNSTATE_runnable]; offline = state.time[RUNSTATE_offline] - snap->time[RUNSTATE_offline]; *snap = state; /* Add the appropriate number of ticks of stolen time, including any left-overs from last time. */ stolen = runnable + offline + __get_cpu_var(residual_stolen); if (stolen < 0) stolen = 0; ticks = iter_div_u64_rem(stolen, NS_PER_TICK, &stolen); __get_cpu_var(residual_stolen) = stolen; account_steal_ticks(ticks); /* Add the appropriate number of ticks of blocked time, including any left-overs from last time. */ blocked += __get_cpu_var(residual_blocked); if (blocked < 0) blocked = 0; ticks = iter_div_u64_rem(blocked, NS_PER_TICK, &blocked); __get_cpu_var(residual_blocked) = blocked; account_idle_ticks(ticks); }
static void do_stolen_accounting(void) { struct vcpu_runstate_info state; struct vcpu_runstate_info *snap; s64 blocked, runnable, offline, stolen; cputime_t ticks; get_runstate_snapshot(&state); WARN_ON(state.state != RUNSTATE_running); snap = &__get_cpu_var(xen_runstate_snapshot); blocked = state.time[RUNSTATE_blocked] - snap->time[RUNSTATE_blocked]; runnable = state.time[RUNSTATE_runnable] - snap->time[RUNSTATE_runnable]; offline = state.time[RUNSTATE_offline] - snap->time[RUNSTATE_offline]; *snap = state; stolen = runnable + offline + __this_cpu_read(xen_residual_stolen); if (stolen < 0) stolen = 0; ticks = iter_div_u64_rem(stolen, NS_PER_TICK, &stolen); __this_cpu_write(xen_residual_stolen, stolen); account_steal_ticks(ticks); blocked += __this_cpu_read(xen_residual_blocked); if (blocked < 0) blocked = 0; ticks = iter_div_u64_rem(blocked, NS_PER_TICK, &blocked); __this_cpu_write(xen_residual_blocked, blocked); account_idle_ticks(ticks); }
static unsigned long consider_steal_time(unsigned long new_itm) { unsigned long stolen, blocked; unsigned long delta_itm = 0, stolentick = 0; int cpu = smp_processor_id(); struct vcpu_runstate_info runstate; struct task_struct *p = current; get_runstate_snapshot(&runstate); /* * Check for vcpu migration effect * In this case, itc value is reversed. * This causes huge stolen value. * This function just checks and reject this effect. */ if (!time_after_eq(runstate.time[RUNSTATE_blocked], per_cpu(xen_blocked_time, cpu))) blocked = 0; if (!time_after_eq(runstate.time[RUNSTATE_runnable] + runstate.time[RUNSTATE_offline], per_cpu(xen_stolen_time, cpu))) stolen = 0; if (!time_after(delta_itm + new_itm, ia64_get_itc())) stolentick = ia64_get_itc() - new_itm; do_div(stolentick, NS_PER_TICK); stolentick++; do_div(stolen, NS_PER_TICK); if (stolen > stolentick) stolen = stolentick; stolentick -= stolen; do_div(blocked, NS_PER_TICK); if (blocked > stolentick) blocked = stolentick; if (stolen > 0 || blocked > 0) { account_steal_ticks(stolen); account_idle_ticks(blocked); run_local_timers(); rcu_check_callbacks(cpu, user_mode(get_irq_regs())); scheduler_tick(); run_posix_cpu_timers(p); delta_itm += local_cpu_data->itm_delta * (stolen + blocked); if (cpu == time_keeper_id) xtime_update(stolen + blocked); local_cpu_data->itm_next = delta_itm + new_itm; per_cpu(xen_stolen_time, cpu) += NS_PER_TICK * stolen; per_cpu(xen_blocked_time, cpu) += NS_PER_TICK * blocked; } return delta_itm; }