/* * Write back the vcpu FPSIMD regs if they are dirty, and invalidate the * cpu FPSIMD regs so that they can't be spuriously reused if this vcpu * disappears and another task or vcpu appears that recycles the same * struct fpsimd_state. */ void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu) { unsigned long flags; local_irq_save(flags); if (vcpu->arch.flags & KVM_ARM64_FP_ENABLED) { /* Clean guest FP state to memory and invalidate cpu view */ fpsimd_save(); fpsimd_flush_cpu_state(); } else if (system_supports_sve()) { /* * The FPSIMD/SVE state in the CPU has not been touched, and we * have SVE (and VHE): CPACR_EL1 (alias CPTR_EL2) has been * reset to CPACR_EL1_DEFAULT by the Hyp code, disabling SVE * for EL0. To avoid spurious traps, restore the trap state * seen by kvm_arch_vcpu_load_fp(): */ if (vcpu->arch.flags & KVM_ARM64_HOST_SVE_ENABLED) sysreg_clear_set(CPACR_EL1, 0, CPACR_EL1_ZEN_EL0EN); else sysreg_clear_set(CPACR_EL1, CPACR_EL1_ZEN_EL0EN, 0); } update_thread_flag(TIF_SVE, vcpu->arch.flags & KVM_ARM64_HOST_SVE_IN_USE); local_irq_restore(flags); }
/* * Write back the vcpu FPSIMD regs if they are dirty, and invalidate the * cpu FPSIMD regs so that they can't be spuriously reused if this vcpu * disappears and another task or vcpu appears that recycles the same * struct fpsimd_state. */ void kvm_arch_vcpu_put_fp(struct kvm_vcpu *vcpu) { local_bh_disable(); update_thread_flag(TIF_SVE, vcpu->arch.flags & KVM_ARM64_HOST_SVE_IN_USE); if (vcpu->arch.flags & KVM_ARM64_FP_ENABLED) { /* Clean guest FP state to memory and invalidate cpu view */ fpsimd_save(); fpsimd_flush_cpu_state(); } else if (!test_thread_flag(TIF_FOREIGN_FPSTATE)) { /* Ensure user trap controls are correctly restored */ fpsimd_bind_task_to_cpu(); } local_bh_enable(); }
int sve_set_vector_length(struct task_struct *task, unsigned long vl, unsigned long flags) { if (flags & ~(unsigned long)(PR_SVE_VL_INHERIT | PR_SVE_SET_VL_ONEXEC)) return -EINVAL; if (!sve_vl_valid(vl)) return -EINVAL; /* * Clamp to the maximum vector length that VL-agnostic SVE code can * work with. A flag may be assigned in the future to allow setting * of larger vector lengths without confusing older software. */ if (vl > SVE_VL_ARCH_MAX) vl = SVE_VL_ARCH_MAX; vl = find_supported_vector_length(vl); if (flags & (PR_SVE_VL_INHERIT | PR_SVE_SET_VL_ONEXEC)) task->thread.sve_vl_onexec = vl; else /* Reset VL to system default on next exec: */ task->thread.sve_vl_onexec = 0; /* Only actually set the VL if not deferred: */ if (flags & PR_SVE_SET_VL_ONEXEC) goto out; if (vl == task->thread.sve_vl) goto out; /* * To ensure the FPSIMD bits of the SVE vector registers are preserved, * write any live register state back to task_struct, and convert to a * non-SVE thread. */ if (task == current) { local_bh_disable(); fpsimd_save(); set_thread_flag(TIF_FOREIGN_FPSTATE); } fpsimd_flush_task_state(task); if (test_and_clear_tsk_thread_flag(task, TIF_SVE)) sve_to_fpsimd(task); if (task == current) local_bh_enable(); /* * Force reallocation of task SVE state to the correct size * on next use: */ sve_free(task); task->thread.sve_vl = vl; out: update_tsk_thread_flag(task, TIF_SVE_VL_INHERIT, flags & PR_SVE_VL_INHERIT); return 0; }