static int handle_test_block(struct kvm_vcpu *vcpu) { unsigned long hva; gpa_t addr; int reg2; if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); kvm_s390_get_regs_rre(vcpu, NULL, ®2); addr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK; addr = kvm_s390_real_to_abs(vcpu, addr); hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(addr)); if (kvm_is_error_hva(hva)) return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); /* * We don't expect errors on modern systems, and do not care * about storage keys (yet), so let's just clear the page. */ if (clear_user((void __user *)hva, PAGE_SIZE) != 0) return -EFAULT; kvm_s390_set_psw_cc(vcpu, 0); vcpu->run->s.regs.gprs[0] = 0; return 0; }
static int handle_epsw(struct kvm_vcpu *vcpu) { int reg1, reg2; kvm_s390_get_regs_rre(vcpu, ®1, ®2); /* This basically extracts the mask half of the psw. */ vcpu->run->s.regs.gprs[reg1] &= 0xffffffff00000000; vcpu->run->s.regs.gprs[reg1] |= vcpu->arch.sie_block->gpsw.mask >> 32; if (reg2) { vcpu->run->s.regs.gprs[reg2] &= 0xffffffff00000000; vcpu->run->s.regs.gprs[reg2] |= vcpu->arch.sie_block->gpsw.mask & 0x00000000ffffffff; } return 0; }
static int handle_pfmf(struct kvm_vcpu *vcpu) { int reg1, reg2; unsigned long start, end; vcpu->stat.instruction_pfmf++; kvm_s390_get_regs_rre(vcpu, ®1, ®2); if (!MACHINE_HAS_PFMF) return kvm_s390_inject_program_int(vcpu, PGM_OPERATION); if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); if (vcpu->run->s.regs.gprs[reg1] & PFMF_RESERVED) return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); /* Only provide non-quiescing support if the host supports it */ if (vcpu->run->s.regs.gprs[reg1] & PFMF_NQ && S390_lowcore.stfl_fac_list & 0x00020000) return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); /* No support for conditional-SSKE */ if (vcpu->run->s.regs.gprs[reg1] & (PFMF_MR | PFMF_MC)) return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); start = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK; switch (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) { case 0x00000000: end = (start + (1UL << 12)) & ~((1UL << 12) - 1); break; case 0x00001000: end = (start + (1UL << 20)) & ~((1UL << 20) - 1); break; /* We dont support EDAT2 case 0x00002000: end = (start + (1UL << 31)) & ~((1UL << 31) - 1); break;*/ default: return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); } while (start < end) { unsigned long useraddr; useraddr = gmap_translate(start, vcpu->arch.gmap); if (IS_ERR((void *)useraddr)) return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) { if (clear_user((void __user *)useraddr, PAGE_SIZE)) return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); } if (vcpu->run->s.regs.gprs[reg1] & PFMF_SK) { if (set_guest_storage_key(current->mm, useraddr, vcpu->run->s.regs.gprs[reg1] & PFMF_KEY, vcpu->run->s.regs.gprs[reg1] & PFMF_NQ)) return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); } start += PAGE_SIZE; } if (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) vcpu->run->s.regs.gprs[reg2] = end; return 0; }