void disable_TSC(void) { preempt_disable(); if (!test_and_set_thread_flag(TIF_NOTSC)) /* * Must flip the CPU state synchronously with * TIF_NOTSC in the current running context. */ hard_disable_TSC(); preempt_enable(); }
static void disable_cpuid(void) { preempt_disable(); if (!test_and_set_thread_flag(TIF_NOCPUID)) { /* * Must flip the CPU state synchronously with * TIF_NOCPUID in the current running context. */ set_cpuid_faulting(true); } preempt_enable(); }
void die_if_kernel(char * str, struct pt_regs *regs, long err, unsigned long *r9_15) { if (regs->ps & 8) return; #ifdef CONFIG_SMP printk("CPU %d ", hard_smp_processor_id()); #endif printk("%s(%d): %s %ld\n", current->comm, task_pid_nr(current), str, err); dik_show_regs(regs, r9_15); add_taint(TAINT_DIE); dik_show_trace((unsigned long *)(regs+1)); dik_show_code((unsigned int *)regs->pc); if (test_and_set_thread_flag (TIF_DIE_IF_KERNEL)) { printk("die_if_kernel recursion detected.\n"); local_irq_enable(); while (1); } do_exit(SIGSEGV); }
/* * Kernel-side NEON support functions */ void kernel_neon_begin_partial(u32 num_regs) { if (in_interrupt()) { struct fpsimd_partial_state *s = this_cpu_ptr( in_irq() ? &hardirq_fpsimdstate : &softirq_fpsimdstate); BUG_ON(num_regs > 32); fpsimd_save_partial_state(s, roundup(num_regs, 2)); } else { /* * Save the userland FPSIMD state if we have one and if we * haven't done so already. Clear fpsimd_last_state to indicate * that there is no longer userland FPSIMD state in the * registers. */ preempt_disable(); if (current->mm && !test_and_set_thread_flag(TIF_FOREIGN_FPSTATE)) fpsimd_save_state(¤t->thread.fpsimd_state); this_cpu_write(fpsimd_last_state, NULL); } }