static int decode_hsr(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, struct kvm_exit_mmio *mmio) { unsigned long rt, len; bool is_write, sign_extend; if (kvm_vcpu_dabt_isextabt(vcpu)) { /* cache operation on I/O addr, tell guest unsupported */ kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu)); return 1; } if (kvm_vcpu_dabt_iss1tw(vcpu)) { /* page table accesses IO mem: tell guest to fix its TTBR */ kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu)); return 1; } len = kvm_vcpu_dabt_get_as(vcpu); if (unlikely(len < 0)) return len; is_write = kvm_vcpu_dabt_iswrite(vcpu); sign_extend = kvm_vcpu_dabt_issext(vcpu); rt = kvm_vcpu_dabt_get_rd(vcpu); if (kvm_vcpu_reg_is_pc(vcpu, rt)) { /* IO memory trying to read/write pc */ kvm_inject_pabt(vcpu, kvm_vcpu_get_hfar(vcpu)); return 1; } mmio->is_write = is_write; mmio->phys_addr = fault_ipa; mmio->len = len; vcpu->arch.mmio_decode.sign_extend = sign_extend; vcpu->arch.mmio_decode.rt = rt; /* * The MMIO instruction is emulated and should not be re-executed * in the guest. */ kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu)); return 0; }
static int decode_hsr(struct kvm_vcpu *vcpu, bool *is_write, int *len) { unsigned long rt; int access_size; bool sign_extend; if (kvm_vcpu_dabt_isextabt(vcpu)) { /* cache operation on I/O addr, tell guest unsupported */ kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu)); return 1; } if (kvm_vcpu_dabt_iss1tw(vcpu)) { /* page table accesses IO mem: tell guest to fix its TTBR */ kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu)); return 1; } access_size = kvm_vcpu_dabt_get_as(vcpu); if (unlikely(access_size < 0)) return access_size; *is_write = kvm_vcpu_dabt_iswrite(vcpu); sign_extend = kvm_vcpu_dabt_issext(vcpu); rt = kvm_vcpu_dabt_get_rd(vcpu); *len = access_size; vcpu->arch.mmio_decode.sign_extend = sign_extend; vcpu->arch.mmio_decode.rt = rt; /* * The MMIO instruction is emulated and should not be re-executed * in the guest. */ kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu)); return 0; }