static void __init reset_tod_clock(void) { u64 time; if (store_tod_clock(&time) == 0) return; /* TOD clock not running. Set the clock to Unix Epoch. */ if (set_tod_clock(TOD_UNIX_EPOCH) != 0 || store_tod_clock(&time) != 0) disabled_wait(); memset(tod_clock_base, 0, 16); *(__u64 *) &tod_clock_base[1] = TOD_UNIX_EPOCH; S390_lowcore.last_update_clock = TOD_UNIX_EPOCH; }
/* Handle SCK (SET CLOCK) interception */ static int handle_set_clock(struct kvm_vcpu *vcpu) { struct kvm_vcpu *cpup; s64 hostclk, val; u64 op2; int i; if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); op2 = kvm_s390_get_base_disp_s(vcpu); if (op2 & 7) /* Operand must be on a doubleword boundary */ return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); if (get_guest(vcpu, val, (u64 __user *) op2)) return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); if (store_tod_clock(&hostclk)) { kvm_s390_set_psw_cc(vcpu, 3); return 0; } val = (val - hostclk) & ~0x3fUL; mutex_lock(&vcpu->kvm->lock); kvm_for_each_vcpu(i, cpup, vcpu->kvm) cpup->arch.sie_block->epoch = val; mutex_unlock(&vcpu->kvm->lock); kvm_s390_set_psw_cc(vcpu, 0); return 0; }
/* Create the trailer data at the end of a page. */ static void cf_diag_trailer(struct cf_trailer_entry *te) { struct cpu_cf_events *cpuhw = this_cpu_ptr(&cpu_cf_events); struct cpuid cpuid; te->cfvn = cpuhw->info.cfvn; /* Counter version numbers */ te->csvn = cpuhw->info.csvn; get_cpu_id(&cpuid); /* Machine type */ te->mach_type = cpuid.machine; te->cpu_speed = cf_diag_cpu_speed; if (te->cpu_speed) te->speed = 1; te->clock_base = 1; /* Save clock base */ memcpy(&te->tod_base, &tod_clock_base[1], 8); store_tod_clock((__u64 *)&te->timestamp); }