static void __hyp_text __vgic_v3_read_iar(struct kvm_vcpu *vcpu, u32 vmcr, int rt) { u64 lr_val; u8 lr_prio, pmr; int lr, grp; grp = __vgic_v3_get_group(vcpu); lr = __vgic_v3_highest_priority_lr(vcpu, vmcr, &lr_val); if (lr < 0) goto spurious; if (grp != !!(lr_val & ICH_LR_GROUP)) goto spurious; pmr = (vmcr & ICH_VMCR_PMR_MASK) >> ICH_VMCR_PMR_SHIFT; lr_prio = (lr_val & ICH_LR_PRIORITY_MASK) >> ICH_LR_PRIORITY_SHIFT; if (pmr <= lr_prio) goto spurious; if (__vgic_v3_get_highest_active_priority() <= __vgic_v3_pri_to_pre(lr_prio, vmcr, grp)) goto spurious; lr_val &= ~ICH_LR_STATE; /* No active state for LPIs */ if ((lr_val & ICH_LR_VIRTUAL_ID_MASK) <= VGIC_MAX_SPI) lr_val |= ICH_LR_ACTIVE_BIT; __gic_v3_set_lr(lr_val, lr); __vgic_v3_set_active_priority(lr_prio, vmcr, grp); vcpu_set_reg(vcpu, rt, lr_val & ICH_LR_VIRTUAL_ID_MASK); return; spurious: vcpu_set_reg(vcpu, rt, ICC_IAR1_EL1_SPURIOUS); }
/** * kvm_handle_mmio_return -- Handle MMIO loads after user space emulation * @vcpu: The VCPU pointer * @run: The VCPU run struct containing the mmio data * * This should only be called after returning from userspace for MMIO load * emulation. */ int kvm_handle_mmio_return(struct kvm_vcpu *vcpu, struct kvm_run *run) { unsigned long data; unsigned int len; int mask; if (!run->mmio.is_write) { len = run->mmio.len; if (len > sizeof(unsigned long)) return -EINVAL; data = mmio_read_buf(run->mmio.data, len); if (vcpu->arch.mmio_decode.sign_extend && len < sizeof(unsigned long)) { mask = 1U << ((len * 8) - 1); data = (data ^ mask) - mask; } trace_kvm_mmio(KVM_TRACE_MMIO_READ, len, run->mmio.phys_addr, data); data = vcpu_data_host_to_guest(vcpu, data, len); vcpu_set_reg(vcpu, vcpu->arch.mmio_decode.rt, data); } return 0; }
static int handle_smc(struct kvm_vcpu *vcpu, struct kvm_run *run) { /* * "If an SMC instruction executed at Non-secure EL1 is * trapped to EL2 because HCR_EL2.TSC is 1, the exception is a * Trap exception, not a Secure Monitor Call exception [...]" * * We need to advance the PC after the trap, as it would * otherwise return to the same address... */ vcpu_set_reg(vcpu, 0, ~0UL); kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu)); return 1; }
static int handle_hvc(struct kvm_vcpu *vcpu, struct kvm_run *run) { int ret; trace_kvm_hvc_arm64(*vcpu_pc(vcpu), vcpu_get_reg(vcpu, 0), kvm_vcpu_hvc_get_imm(vcpu)); vcpu->stat.hvc_exit_stat++; ret = kvm_hvc_call_handler(vcpu); if (ret < 0) { vcpu_set_reg(vcpu, 0, ~0UL); return 1; } return ret; }
/* * __vgic_v2_perform_cpuif_access -- perform a GICV access on behalf of the * guest. * * @vcpu: the offending vcpu * * Returns: * 1: GICV access successfully performed * 0: Not a GICV access * -1: Illegal GICV access */ int __hyp_text __vgic_v2_perform_cpuif_access(struct kvm_vcpu *vcpu) { struct kvm *kvm = kern_hyp_va(vcpu->kvm); struct vgic_dist *vgic = &kvm->arch.vgic; phys_addr_t fault_ipa; void __iomem *addr; int rd; /* Build the full address */ fault_ipa = kvm_vcpu_get_fault_ipa(vcpu); fault_ipa |= kvm_vcpu_get_hfar(vcpu) & GENMASK(11, 0); /* If not for GICV, move on */ if (fault_ipa < vgic->vgic_cpu_base || fault_ipa >= (vgic->vgic_cpu_base + KVM_VGIC_V2_CPU_SIZE)) return 0; /* Reject anything but a 32bit access */ if (kvm_vcpu_dabt_get_as(vcpu) != sizeof(u32)) return -1; /* Not aligned? Don't bother */ if (fault_ipa & 3) return -1; rd = kvm_vcpu_dabt_get_rd(vcpu); addr = hyp_symbol_addr(kvm_vgic_global_state)->vcpu_hyp_va; addr += fault_ipa - vgic->vgic_cpu_base; if (kvm_vcpu_dabt_iswrite(vcpu)) { u32 data = vcpu_get_reg(vcpu, rd); if (__is_be(vcpu)) { /* guest pre-swabbed data, undo this for writel() */ data = swab32(data); } writel_relaxed(data, addr); } else { u32 data = readl_relaxed(addr); if (__is_be(vcpu)) { /* guest expects swabbed data */ data = swab32(data); } vcpu_set_reg(vcpu, rd, data); } return 1; }