void fpsimd_flush_thread(void) { preempt_disable(); memset(¤t->thread.fpsimd_state, 0, sizeof(struct fpsimd_state)); fpsimd_load_state(¤t->thread.fpsimd_state); preempt_enable(); }
void kernel_neon_end(void) { if (current->mm) fpsimd_load_state(¤t->thread.fpsimd_state); preempt_enable(); }
static int restore_fpsimd_context(struct fpsimd_context __user *ctx) { struct fpsimd_state fpsimd; __u32 magic, size; int err = 0; /* check the magic/size information */ __get_user_error(magic, &ctx->head.magic, err); __get_user_error(size, &ctx->head.size, err); if (err) return -EFAULT; if (magic != FPSIMD_MAGIC || size != sizeof(struct fpsimd_context)) return -EINVAL; /* copy the FP and status/control registers */ err = __copy_from_user(fpsimd.vregs, ctx->vregs, sizeof(fpsimd.vregs)); __get_user_error(fpsimd.fpsr, &ctx->fpsr, err); __get_user_error(fpsimd.fpcr, &ctx->fpcr, err); /* load the hardware registers from the fpsimd_state structure */ if (!err) { preempt_disable(); fpsimd_load_state(&fpsimd); preempt_enable(); } return err ? -EFAULT : 0; }
void fpsimd_thread_switch(struct task_struct *next) { /* check if not kernel threads */ if (current->mm) fpsimd_save_state(¤t->thread.fpsimd_state); if (next->mm) fpsimd_load_state(&next->thread.fpsimd_state); }
/* * Update current's FPSIMD/SVE registers from thread_struct. * * This function should be called only when the FPSIMD/SVE state in * thread_struct is known to be up to date, when preparing to enter * userspace. * * Softirqs (and preemption) must be disabled. */ static void task_fpsimd_load(void) { WARN_ON(!in_softirq() && !irqs_disabled()); if (system_supports_sve() && test_thread_flag(TIF_SVE)) sve_load_state(sve_pffr(¤t->thread), ¤t->thread.uw.fpsimd_state.fpsr, sve_vq_from_vl(current->thread.sve_vl) - 1); else fpsimd_load_state(¤t->thread.uw.fpsimd_state); }
/* * Load an updated userland FPSIMD state for 'current' from memory and set the * flag that indicates that the FPSIMD register contents are the most recent * FPSIMD state of 'current' */ void fpsimd_update_current_state(struct fpsimd_state *state) { preempt_disable(); fpsimd_load_state(state); if (test_and_clear_thread_flag(TIF_FOREIGN_FPSTATE)) { struct fpsimd_state *st = ¤t->thread.fpsimd_state; this_cpu_write(fpsimd_last_state, st); st->cpu = smp_processor_id(); } preempt_enable(); }
void kernel_neon_end(void) { if (in_interrupt()) { struct fpsimd_partial_state *s = this_cpu_ptr( in_irq() ? &hardirq_fpsimdstate : &softirq_fpsimdstate); fpsimd_load_partial_state(s); } else { if (current->mm) fpsimd_load_state(¤t->thread.fpsimd_state); preempt_enable(); } }
/* * Load the userland FPSIMD state of 'current' from memory, but only if the * FPSIMD state already held in the registers is /not/ the most recent FPSIMD * state of 'current' */ void fpsimd_restore_current_state(void) { if (!system_supports_fpsimd()) return; preempt_disable(); if (test_and_clear_thread_flag(TIF_FOREIGN_FPSTATE)) { struct fpsimd_state *st = ¤t->thread.fpsimd_state; fpsimd_load_state(st); this_cpu_write(fpsimd_last_state, st); st->cpu = smp_processor_id(); } preempt_enable(); }
static int fpsimd_cpu_pm_notifier(struct notifier_block *self, unsigned long cmd, void *v) { switch (cmd) { case CPU_PM_ENTER: if (current->mm) fpsimd_save_state(¤t->thread.fpsimd_state); break; case CPU_PM_EXIT: if (current->mm) fpsimd_load_state(¤t->thread.fpsimd_state); break; case CPU_PM_ENTER_FAILED: default: return NOTIFY_DONE; } return NOTIFY_OK; }