void kvmppc_update_timing_stats(struct kvm_vcpu *vcpu) { u64 exit = vcpu->arch.timing_last_exit; u64 enter = vcpu->arch.timing_last_enter.tv64; /* save exit time, used next exit when the reenter time is known */ vcpu->arch.timing_last_exit = vcpu->arch.timing_exit.tv64; if (unlikely(vcpu->arch.last_exit_type == 0xDEAD || exit == 0)) return; /* skip incomplete cycle (e.g. after reset) */ /* update statistics for average and standard deviation */ add_exit_timing(vcpu, (enter - exit), vcpu->arch.last_exit_type); /* enter -> timing_last_exit is time spent in guest - log this too */ add_exit_timing(vcpu, (vcpu->arch.timing_last_exit - enter), TIMEINGUEST); }
void kvmppc_update_timing_stats(struct kvm_vcpu *vcpu) { u64 exit = vcpu->arch.timing_last_exit; u64 enter = vcpu->arch.timing_last_enter.tv64; /* */ vcpu->arch.timing_last_exit = vcpu->arch.timing_exit.tv64; if (unlikely(vcpu->arch.last_exit_type == 0xDEAD || exit == 0)) return; /* */ /* */ add_exit_timing(vcpu, (enter - exit), vcpu->arch.last_exit_type); /* */ add_exit_timing(vcpu, (vcpu->arch.timing_last_exit - enter), TIMEINGUEST); }