static int handle_test_block(struct kvm_vcpu *vcpu) { unsigned long hva; gpa_t addr; int reg2; if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); kvm_s390_get_regs_rre(vcpu, NULL, ®2); addr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK; addr = kvm_s390_real_to_abs(vcpu, addr); hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(addr)); if (kvm_is_error_hva(hva)) return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); /* * We don't expect errors on modern systems, and do not care * about storage keys (yet), so let's just clear the page. */ if (clear_user((void __user *)hva, PAGE_SIZE) != 0) return -EFAULT; kvm_s390_set_psw_cc(vcpu, 0); vcpu->run->s.regs.gprs[0] = 0; return 0; }
static int handle_set_prefix(struct kvm_vcpu *vcpu) { u64 operand2; u32 address = 0; u8 tmp; vcpu->stat.instruction_spx++; if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); operand2 = kvm_s390_get_base_disp_s(vcpu); /* must be word boundary */ if (operand2 & 3) return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); /* get the value */ if (get_guest(vcpu, address, (u32 __user *) operand2)) return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); address = address & 0x7fffe000u; /* make sure that the new value is valid memory */ if (copy_from_guest_absolute(vcpu, &tmp, address, 1) || (copy_from_guest_absolute(vcpu, &tmp, address + PAGE_SIZE, 1))) return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); kvm_s390_set_prefix(vcpu, address); VCPU_EVENT(vcpu, 5, "setting prefix to %x", address); trace_kvm_s390_handle_prefix(vcpu, 1, address); return 0; }
/* Handle SCK (SET CLOCK) interception */ static int handle_set_clock(struct kvm_vcpu *vcpu) { struct kvm_vcpu *cpup; s64 hostclk, val; u64 op2; int i; if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); op2 = kvm_s390_get_base_disp_s(vcpu); if (op2 & 7) /* Operand must be on a doubleword boundary */ return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); if (get_guest(vcpu, val, (u64 __user *) op2)) return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); if (store_tod_clock(&hostclk)) { kvm_s390_set_psw_cc(vcpu, 3); return 0; } val = (val - hostclk) & ~0x3fUL; mutex_lock(&vcpu->kvm->lock); kvm_for_each_vcpu(i, cpup, vcpu->kvm) cpup->arch.sie_block->epoch = val; mutex_unlock(&vcpu->kvm->lock); kvm_s390_set_psw_cc(vcpu, 0); return 0; }
static int handle_store_prefix(struct kvm_vcpu *vcpu) { u64 operand2; u32 address; vcpu->stat.instruction_stpx++; operand2 = kvm_s390_get_base_disp_s(vcpu); /* must be word boundary */ if (operand2 & 3) { kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); goto out; } address = vcpu->arch.sie_block->prefix; address = address & 0x7fffe000u; /* get the value */ if (put_guest_u32(vcpu, operand2, address)) { kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); goto out; } VCPU_EVENT(vcpu, 5, "storing prefix to %x", address); trace_kvm_s390_handle_prefix(vcpu, 0, address); out: return 0; }
static int handle_store_prefix(struct kvm_vcpu *vcpu) { int base2 = vcpu->arch.sie_block->ipb >> 28; int disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16); u64 operand2; u32 address; vcpu->stat.instruction_stpx++; operand2 = disp2; if (base2) operand2 += vcpu->arch.guest_gprs[base2]; /* must be word boundary */ if (operand2 & 3) { kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); goto out; } address = vcpu->arch.sie_block->prefix; address = address & 0x7fffe000u; /* get the value */ if (put_guest_u32(vcpu, operand2, address)) { kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); goto out; } VCPU_EVENT(vcpu, 5, "storing prefix to %x", address); out: return 0; }
static int handle_store_cpu_address(struct kvm_vcpu *vcpu) { u64 useraddr; int rc; vcpu->stat.instruction_stap++; useraddr = kvm_s390_get_base_disp_s(vcpu); if (useraddr & 1) { kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); goto out; } rc = put_guest_u16(vcpu, useraddr, vcpu->vcpu_id); if (rc == -EFAULT) { kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); goto out; } VCPU_EVENT(vcpu, 5, "storing cpu address to %llx", useraddr); trace_kvm_s390_handle_stap(vcpu, useraddr); out: return 0; }
static int handle_stidp(struct kvm_vcpu *vcpu) { int base2 = vcpu->arch.sie_block->ipb >> 28; int disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16); u64 operand2; int rc; vcpu->stat.instruction_stidp++; operand2 = disp2; if (base2) operand2 += vcpu->arch.guest_gprs[base2]; if (operand2 & 7) { kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); goto out; } rc = put_guest_u64(vcpu, operand2, vcpu->arch.stidp_data); if (rc == -EFAULT) { kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); goto out; } VCPU_EVENT(vcpu, 5, "%s", "store cpu id"); out: return 0; }
static int handle_store_prefix(struct kvm_vcpu *vcpu) { u64 operand2; u32 address; vcpu->stat.instruction_stpx++; if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); operand2 = kvm_s390_get_base_disp_s(vcpu); /* must be word boundary */ if (operand2 & 3) return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); address = vcpu->arch.sie_block->prefix; address = address & 0x7fffe000u; /* get the value */ if (put_guest(vcpu, address, (u32 __user *)operand2)) return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); VCPU_EVENT(vcpu, 5, "storing prefix to %x", address); trace_kvm_s390_handle_prefix(vcpu, 0, address); return 0; }
static int handle_stsi(struct kvm_vcpu *vcpu) { int fc = (vcpu->arch.guest_gprs[0] & 0xf0000000) >> 28; int sel1 = vcpu->arch.guest_gprs[0] & 0xff; int sel2 = vcpu->arch.guest_gprs[1] & 0xffff; int base2 = vcpu->arch.sie_block->ipb >> 28; int disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16); u64 operand2; unsigned long mem; vcpu->stat.instruction_stsi++; VCPU_EVENT(vcpu, 4, "stsi: fc: %x sel1: %x sel2: %x", fc, sel1, sel2); operand2 = disp2; if (base2) operand2 += vcpu->arch.guest_gprs[base2]; if (operand2 & 0xfff && fc > 0) return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); switch (fc) { case 0: vcpu->arch.guest_gprs[0] = 3 << 28; vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44); return 0; case 1: /* same handling for 1 and 2 */ case 2: mem = get_zeroed_page(GFP_KERNEL); if (!mem) goto out_fail; if (stsi((void *) mem, fc, sel1, sel2) == -ENOSYS) goto out_mem; break; case 3: if (sel1 != 2 || sel2 != 2) goto out_fail; mem = get_zeroed_page(GFP_KERNEL); if (!mem) goto out_fail; handle_stsi_3_2_2(vcpu, (void *) mem); break; default: goto out_fail; } if (copy_to_guest_absolute(vcpu, operand2, (void *) mem, PAGE_SIZE)) { kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); goto out_mem; } free_page(mem); vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44); vcpu->arch.guest_gprs[0] = 0; return 0; out_mem: free_page(mem); out_fail: /* condition code 3 */ vcpu->arch.sie_block->gpsw.mask |= 3ul << 44; return 0; }
static int handle_store_cpu_address(struct kvm_vcpu *vcpu) { int base2 = vcpu->arch.sie_block->ipb >> 28; int disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16); u64 useraddr; int rc; vcpu->stat.instruction_stap++; useraddr = disp2; if (base2) useraddr += vcpu->arch.guest_gprs[base2]; if (useraddr & 1) { kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); goto out; } rc = put_guest_u16(vcpu, useraddr, vcpu->vcpu_id); if (rc == -EFAULT) { kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); goto out; } VCPU_EVENT(vcpu, 5, "storing cpu address to %llx", useraddr); out: return 0; }
static int handle_io_inst(struct kvm_vcpu *vcpu) { VCPU_EVENT(vcpu, 4, "%s", "I/O instruction"); if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); if (vcpu->kvm->arch.css_support) { /* * Most I/O instructions will be handled by userspace. * Exceptions are tpi and the interrupt portion of tsch. */ if (vcpu->arch.sie_block->ipa == 0xb236) return handle_tpi(vcpu); if (vcpu->arch.sie_block->ipa == 0xb235) return handle_tsch(vcpu); /* Handle in userspace. */ return -EOPNOTSUPP; } else { /* * Set condition code 3 to stop the guest from issueing channel * I/O instructions. */ vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44); vcpu->arch.sie_block->gpsw.mask |= (3 & 3ul) << 44; return 0; } }
static int handle_sckpf(struct kvm_vcpu *vcpu) { u32 value; if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); if (vcpu->run->s.regs.gprs[0] & 0x00000000ffff0000) return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); value = vcpu->run->s.regs.gprs[0] & 0x000000000000ffff; vcpu->arch.sie_block->todpr = value; return 0; }
static int handle_stfl(struct kvm_vcpu *vcpu) { int rc; vcpu->stat.instruction_stfl++; if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); rc = copy_to_guest(vcpu, offsetof(struct _lowcore, stfl_fac_list), vfacilities, 4); if (rc) return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); VCPU_EVENT(vcpu, 5, "store facility list value %x", *(unsigned int *) vfacilities); trace_kvm_s390_handle_stfl(vcpu, *(unsigned int *) vfacilities); return 0; }
int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu) { u64 addr; psw_compat_t new_psw; if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OPERATION); addr = kvm_s390_get_base_disp_s(vcpu); if (addr & 7) { kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); goto out; } if (copy_from_guest(vcpu, &new_psw, addr, sizeof(new_psw))) { kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); goto out; } if (!(new_psw.mask & PSW32_MASK_BASE)) { kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); goto out; } vcpu->arch.sie_block->gpsw.mask = (new_psw.mask & ~PSW32_MASK_BASE) << 32; vcpu->arch.sie_block->gpsw.addr = new_psw.addr; if ((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_UNASSIGNED) || (!(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_ADDR_MODE) && (vcpu->arch.sie_block->gpsw.addr & ~PSW_ADDR_24)) || ((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_ADDR_MODE) == PSW_MASK_EA)) { kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); goto out; } handle_new_psw(vcpu); out: return 0; }
static int handle_lpswe(struct kvm_vcpu *vcpu) { psw_t new_psw; u64 addr; if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); addr = kvm_s390_get_base_disp_s(vcpu); if (addr & 7) return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); if (copy_from_guest(vcpu, &new_psw, addr, sizeof(new_psw))) return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); vcpu->arch.sie_block->gpsw = new_psw; if (!is_valid_psw(&vcpu->arch.sie_block->gpsw)) return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); handle_new_psw(vcpu); return 0; }
static int handle_set_prefix(struct kvm_vcpu *vcpu) { int base2 = vcpu->arch.sie_block->ipb >> 28; int disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16); u64 operand2; u32 address = 0; u8 tmp; vcpu->stat.instruction_spx++; operand2 = disp2; if (base2) operand2 += vcpu->arch.guest_gprs[base2]; /* must be word boundary */ if (operand2 & 3) { kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); goto out; } /* get the value */ if (get_guest_u32(vcpu, operand2, &address)) { kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); goto out; } address = address & 0x7fffe000u; /* make sure that the new value is valid memory */ if (copy_from_guest_absolute(vcpu, &tmp, address, 1) || (copy_from_guest_absolute(vcpu, &tmp, address + PAGE_SIZE, 1))) { kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); goto out; } vcpu->arch.sie_block->prefix = address; vcpu->arch.sie_block->ihcpu = 0xffff; VCPU_EVENT(vcpu, 5, "setting prefix to %x", address); out: return 0; }
static int handle_tprot(struct kvm_vcpu *vcpu) { u64 address1, address2; struct vm_area_struct *vma; unsigned long user_address; vcpu->stat.instruction_tprot++; if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); kvm_s390_get_base_disp_sse(vcpu, &address1, &address2); /* we only handle the Linux memory detection case: * access key == 0 * guest DAT == off * everything else goes to userspace. */ if (address2 & 0xf0) return -EOPNOTSUPP; if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT) return -EOPNOTSUPP; down_read(¤t->mm->mmap_sem); user_address = __gmap_translate(address1, vcpu->arch.gmap); if (IS_ERR_VALUE(user_address)) goto out_inject; vma = find_vma(current->mm, user_address); if (!vma) goto out_inject; vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44); if (!(vma->vm_flags & VM_WRITE) && (vma->vm_flags & VM_READ)) vcpu->arch.sie_block->gpsw.mask |= (1ul << 44); if (!(vma->vm_flags & VM_WRITE) && !(vma->vm_flags & VM_READ)) vcpu->arch.sie_block->gpsw.mask |= (2ul << 44); up_read(¤t->mm->mmap_sem); return 0; out_inject: up_read(¤t->mm->mmap_sem); return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); }
static int handle_stidp(struct kvm_vcpu *vcpu) { u64 operand2; vcpu->stat.instruction_stidp++; if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); operand2 = kvm_s390_get_base_disp_s(vcpu); if (operand2 & 7) return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); if (put_guest(vcpu, vcpu->arch.stidp_data, (u64 __user *)operand2)) return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); VCPU_EVENT(vcpu, 5, "%s", "store cpu id"); return 0; }
static int handle_skey(struct kvm_vcpu *vcpu) { vcpu->stat.instruction_storage_key++; if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); vcpu->arch.sie_block->gpsw.addr = __rewind_psw(vcpu->arch.sie_block->gpsw, 4); VCPU_EVENT(vcpu, 4, "%s", "retrying storage key operation"); return 0; }
static int handle_stfl(struct kvm_vcpu *vcpu) { unsigned int facility_list; int rc; vcpu->stat.instruction_stfl++; if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); /* only pass the facility bits, which we can handle */ facility_list = S390_lowcore.stfl_fac_list & 0xff82fff3; rc = copy_to_guest(vcpu, offsetof(struct _lowcore, stfl_fac_list), &facility_list, sizeof(facility_list)); if (rc) return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); VCPU_EVENT(vcpu, 5, "store facility list value %x", facility_list); trace_kvm_s390_handle_stfl(vcpu, facility_list); return 0; }
static int handle_store_cpu_address(struct kvm_vcpu *vcpu) { u64 useraddr; vcpu->stat.instruction_stap++; if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); useraddr = kvm_s390_get_base_disp_s(vcpu); if (useraddr & 1) return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); if (put_guest(vcpu, vcpu->vcpu_id, (u16 __user *)useraddr)) return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); VCPU_EVENT(vcpu, 5, "storing cpu address to %llx", useraddr); trace_kvm_s390_handle_stap(vcpu, useraddr); return 0; }
static int handle_lctlg(struct kvm_vcpu *vcpu) { int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; int reg3 = vcpu->arch.sie_block->ipa & 0x000f; int base2 = vcpu->arch.sie_block->ipb >> 28; int disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16) + ((vcpu->arch.sie_block->ipb & 0xff00) << 4); u64 useraddr; int reg, rc; vcpu->stat.instruction_lctlg++; if ((vcpu->arch.sie_block->ipb & 0xff) != 0x2f) return -ENOTSUPP; useraddr = disp2; if (base2) useraddr += vcpu->arch.guest_gprs[base2]; if (useraddr & 7) return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); reg = reg1; VCPU_EVENT(vcpu, 5, "lctlg r1:%x, r3:%x,b2:%x,d2:%x", reg1, reg3, base2, disp2); do { rc = get_guest_u64(vcpu, useraddr, &vcpu->arch.sie_block->gcr[reg]); if (rc == -EFAULT) { kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); break; } useraddr += 8; if (reg == reg3) break; reg = (reg + 1) % 16; } while (1); return 0; }
static int handle_tpi(struct kvm_vcpu *vcpu) { struct kvm_s390_interrupt_info *inti; u64 addr; int cc; addr = kvm_s390_get_base_disp_s(vcpu); if (addr & 3) return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); cc = 0; inti = kvm_s390_get_io_int(vcpu->kvm, vcpu->run->s.regs.crs[6], 0); if (!inti) goto no_interrupt; cc = 1; if (addr) { /* * Store the two-word I/O interruption code into the * provided area. */ if (put_guest(vcpu, inti->io.subchannel_id, (u16 __user *)addr) || put_guest(vcpu, inti->io.subchannel_nr, (u16 __user *)(addr + 2)) || put_guest(vcpu, inti->io.io_int_parm, (u32 __user *)(addr + 4))) return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); } else { /* * Store the three-word I/O interruption code into * the appropriate lowcore area. */ put_guest(vcpu, inti->io.subchannel_id, (u16 __user *) __LC_SUBCHANNEL_ID); put_guest(vcpu, inti->io.subchannel_nr, (u16 __user *) __LC_SUBCHANNEL_NR); put_guest(vcpu, inti->io.io_int_parm, (u32 __user *) __LC_IO_INT_PARM); put_guest(vcpu, inti->io.io_int_word, (u32 __user *) __LC_IO_INT_WORD); } kfree(inti); no_interrupt: /* Set condition code and we're done. */ vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44); vcpu->arch.sie_block->gpsw.mask |= (cc & 3ul) << 44; return 0; }
int kvm_s390_handle_priv_eb(struct kvm_vcpu *vcpu) { intercept_handler_t handler; /* All eb instructions that end up here are privileged. */ if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OPERATION); handler = eb_handlers[vcpu->arch.sie_block->ipb & 0xff]; if (handler) return handler(vcpu); return -EOPNOTSUPP; }
static int handle_set_prefix(struct kvm_vcpu *vcpu) { u64 operand2; u32 address = 0; u8 tmp; vcpu->stat.instruction_spx++; operand2 = kvm_s390_get_base_disp_s(vcpu); /* must be word boundary */ if (operand2 & 3) { kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); goto out; } /* get the value */ if (get_guest_u32(vcpu, operand2, &address)) { kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); goto out; } address = address & 0x7fffe000u; /* make sure that the new value is valid memory */ if (copy_from_guest_absolute(vcpu, &tmp, address, 1) || (copy_from_guest_absolute(vcpu, &tmp, address + PAGE_SIZE, 1))) { kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); goto out; } kvm_s390_set_prefix(vcpu, address); VCPU_EVENT(vcpu, 5, "setting prefix to %x", address); trace_kvm_s390_handle_prefix(vcpu, 1, address); out: return 0; }
static int handle_lctl(struct kvm_vcpu *vcpu) { int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; int reg3 = vcpu->arch.sie_block->ipa & 0x000f; int base2 = vcpu->arch.sie_block->ipb >> 28; int disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16); u64 useraddr; u32 val = 0; int reg, rc; vcpu->stat.instruction_lctl++; useraddr = disp2; if (base2) useraddr += vcpu->arch.guest_gprs[base2]; if (useraddr & 3) return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); VCPU_EVENT(vcpu, 5, "lctl r1:%x, r3:%x,b2:%x,d2:%x", reg1, reg3, base2, disp2); reg = reg1; do { rc = get_guest_u32(vcpu, useraddr, &val); if (rc == -EFAULT) { kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); break; } vcpu->arch.sie_block->gcr[reg] &= 0xffffffff00000000ul; vcpu->arch.sie_block->gcr[reg] |= val; useraddr += 4; if (reg == reg3) break; reg = (reg + 1) % 16; } while (1); return 0; }
int kvm_s390_handle_lctl(struct kvm_vcpu *vcpu) { int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4; int reg3 = vcpu->arch.sie_block->ipa & 0x000f; u64 useraddr; u32 val = 0; int reg, rc; vcpu->stat.instruction_lctl++; if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE) return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); useraddr = kvm_s390_get_base_disp_rs(vcpu); if (useraddr & 3) return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); VCPU_EVENT(vcpu, 5, "lctl r1:%x, r3:%x, addr:%llx", reg1, reg3, useraddr); trace_kvm_s390_handle_lctl(vcpu, 0, reg1, reg3, useraddr); reg = reg1; do { rc = get_guest(vcpu, val, (u32 __user *) useraddr); if (rc) return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); vcpu->arch.sie_block->gcr[reg] &= 0xffffffff00000000ul; vcpu->arch.sie_block->gcr[reg] |= val; useraddr += 4; if (reg == reg3) break; reg = (reg + 1) % 16; } while (1); return 0; }
static int handle_lpswe(struct kvm_vcpu *vcpu) { u64 addr; psw_t new_psw; addr = kvm_s390_get_base_disp_s(vcpu); if (addr & 7) { kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); goto out; } if (copy_from_guest(vcpu, &new_psw, addr, sizeof(new_psw))) { kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); goto out; } vcpu->arch.sie_block->gpsw.mask = new_psw.mask; vcpu->arch.sie_block->gpsw.addr = new_psw.addr; if ((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_UNASSIGNED) || (((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_ADDR_MODE) == PSW_MASK_BA) && (vcpu->arch.sie_block->gpsw.addr & ~PSW_ADDR_31)) || (!(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_ADDR_MODE) && (vcpu->arch.sie_block->gpsw.addr & ~PSW_ADDR_24)) || ((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_ADDR_MODE) == PSW_MASK_EA)) { kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); goto out; } handle_new_psw(vcpu); out: return 0; }
static int handle_stidp(struct kvm_vcpu *vcpu) { u64 operand2; int rc; vcpu->stat.instruction_stidp++; operand2 = kvm_s390_get_base_disp_s(vcpu); if (operand2 & 7) { kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); goto out; } rc = put_guest_u64(vcpu, operand2, vcpu->arch.stidp_data); if (rc == -EFAULT) { kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); goto out; } VCPU_EVENT(vcpu, 5, "%s", "store cpu id"); out: return 0; }
int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu) { psw_t *gpsw = &vcpu->arch.sie_block->gpsw; psw_compat_t new_psw; u64 addr; if (gpsw->mask & PSW_MASK_PSTATE) return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP); addr = kvm_s390_get_base_disp_s(vcpu); if (addr & 7) return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); if (copy_from_guest(vcpu, &new_psw, addr, sizeof(new_psw))) return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING); if (!(new_psw.mask & PSW32_MASK_BASE)) return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); gpsw->mask = (new_psw.mask & ~PSW32_MASK_BASE) << 32; gpsw->mask |= new_psw.addr & PSW32_ADDR_AMODE; gpsw->addr = new_psw.addr & ~PSW32_ADDR_AMODE; if (!is_valid_psw(gpsw)) return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION); handle_new_psw(vcpu); return 0; }