static int vfp_hotplug_notifier(struct notifier_block *b, unsigned long action, void *data) { if (action == CPU_STARTING) vfp_enable(NULL); return NOTIFY_OK; }
static void vfp_pm_resume(void) { /* ensure we have access to the vfp */ vfp_enable(NULL); /* and disable it to ensure the next usage restores the state */ fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN); }
void vfp_pm_resume(void) { vfp_enable(NULL); fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN); }
static int vfp_hotplug(struct notifier_block *b, unsigned long action, void *hcpu) { if (action == CPU_DYING || action == CPU_DYING_FROZEN) { vfp_force_reload((long)hcpu, current_thread_info()); } else if (action == CPU_STARTING || action == CPU_STARTING_FROZEN) vfp_enable(NULL); return NOTIFY_OK; }
/* * VFP support code initialisation. */ static int __init vfp_init(void) { unsigned int vfpsid; unsigned int cpu_arch = cpu_architecture(); if (cpu_arch >= CPU_ARCH_ARMv6) vfp_enable(NULL); /* * First check that there is a VFP that we can use. * The handler is already setup to just log calls, so * we just need to read the VFPSID register. */ vfp_vector = vfp_testing_entry; barrier(); vfpsid = fmrx(FPSID); barrier(); vfp_vector = vfp_null_entry; printk(KERN_INFO "VFP support v0.3: "); if (VFP_arch) printk("not present\n"); else if (vfpsid & FPSID_NODOUBLE) { printk("no double precision support\n"); } else { smp_call_function(vfp_enable, NULL, 1); VFP_arch = (vfpsid & FPSID_ARCH_MASK) >> FPSID_ARCH_BIT; /* Extract the architecture version */ printk("implementor %02x architecture %d part %02x variant %x rev %x\n", (vfpsid & FPSID_IMPLEMENTER_MASK) >> FPSID_IMPLEMENTER_BIT, (vfpsid & FPSID_ARCH_MASK) >> FPSID_ARCH_BIT, (vfpsid & FPSID_PART_MASK) >> FPSID_PART_BIT, (vfpsid & FPSID_VARIANT_MASK) >> FPSID_VARIANT_BIT, (vfpsid & FPSID_REV_MASK) >> FPSID_REV_BIT); vfp_vector = vfp_support_entry; thread_register_notifier(&vfp_notifier_block); vfp_pm_init(); /* * We detected VFP, and the support code is * in place; report VFP support to userspace. */ elf_hwcap |= HWCAP_VFP; #ifdef CONFIG_NEON /* * Check for the presence of the Advanced SIMD * load/store instructions, integer and single * precision floating point operations. */ if ((fmrx(MVFR1) & 0x000fff00) == 0x00011100) elf_hwcap |= HWCAP_NEON; #endif } return 0; }
/* * VFP hardware can lose all context when a CPU goes offline. * As we will be running in SMP mode with CPU hotplug, we will save the * hardware state at every thread switch. We clear our held state when * a CPU has been killed, indicating that the VFP hardware doesn't contain * a threads VFP state. When a CPU starts up, we re-enable access to the * VFP hardware. * * Both CPU_DYING and CPU_STARTING are called on the CPU which * is being offlined/onlined. */ static int vfp_hotplug(struct notifier_block *b, unsigned long action, void *hcpu) { if (action == CPU_DYING || action == CPU_DYING_FROZEN) vfp_current_hw_state[(long)hcpu] = NULL; else if (action == CPU_STARTING || action == CPU_STARTING_FROZEN) vfp_enable(NULL); return NOTIFY_OK; }
static int vfp_pm_resume(struct sys_device *dev) { /* ensure we have access to the vfp */ vfp_enable(NULL); /* and disable it to ensure the next usage restores the state */ fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN); return 0; }
/* * VFP hardware can lose all context when a CPU goes offline. * As we will be running in SMP mode with CPU hotplug, we will save the * hardware state at every thread switch. We clear our held state when * a CPU has been killed, indicating that the VFP hardware doesn't contain * a threads VFP state. When a CPU starts up, we re-enable access to the * VFP hardware. * * Both CPU_DYING and CPU_STARTING are called on the CPU which * is being offlined/onlined. */ static int vfp_hotplug(struct notifier_block *b, unsigned long action, void *hcpu) { if (action == CPU_DYING || action == CPU_DYING_FROZEN) { unsigned int cpu = (long)hcpu; last_VFP_context[cpu] = NULL; } else if (action == CPU_STARTING || action == CPU_STARTING_FROZEN) vfp_enable(NULL); return NOTIFY_OK; }
int vfp_flush_context(void) { unsigned long flags; struct thread_info *ti; u32 fpexc; u32 cpu; int saved = 0; local_irq_save(flags); ti = current_thread_info(); fpexc = fmrx(FPEXC); cpu = ti->cpu; #ifdef CONFIG_SMP /* On SMP, if VFP is enabled, save the old state */ if ((fpexc & FPEXC_EN) && vfp_current_hw_state[cpu]) { vfp_current_hw_state[cpu]->hard.cpu = cpu; #else /* If there is a VFP context we must save it. */ if (vfp_current_hw_state[cpu]) { /* Enable VFP so we can save the old state. */ fmxr(FPEXC, fpexc | FPEXC_EN); isb(); #endif vfp_save_state(vfp_current_hw_state[cpu], fpexc); /* disable, just in case */ fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN); saved = 1; } else if (vfp_current_hw_state[ti->cpu]) { #ifndef CONFIG_SMP fmxr(FPEXC, fpexc | FPEXC_EN); vfp_save_state(vfp_current_hw_state[ti->cpu], fpexc); fmxr(FPEXC, fpexc); #endif } vfp_current_hw_state[cpu] = NULL; /* clear any information we had about last context state */ vfp_current_hw_state[ti->cpu] = NULL; local_irq_restore(flags); return saved; } void vfp_reinit(void) { /* ensure we have access to the vfp */ vfp_enable(NULL); /* and disable it to ensure the next usage restores the state */ fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN); }
static int vfp_pm_resume(struct sys_device *dev) { unsigned int cpu_arch = cpu_architecture(); /* ensure we have access to the vfp */ if (cpu_arch >= CPU_ARCH_ARMv6) { vfp_enable(NULL); } /* and disable it to ensure the next usage restores the state */ fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN); return 0; }
void vfp_pm_save_context(void) { u32 fpexc = fmrx(FPEXC); unsigned int cpu = get_cpu(); /* Save last_VFP_context if needed */ if (last_VFP_context[cpu]) { /* Enable vfp to save context */ if (!(fpexc & FPEXC_EN)) { vfp_enable(NULL); fmxr(FPEXC, fpexc | FPEXC_EN); } vfp_save_state(last_VFP_context[cpu], fpexc); /* disable, just in case */ fmxr(FPEXC, fpexc & ~FPEXC_EN); last_VFP_context[cpu] = NULL; } put_cpu(); }
int vfp_flush_context(void) { struct thread_info *ti = current_thread_info(); u32 fpexc = fmrx(FPEXC); u32 cpu = ti->cpu; int saved = 0; #ifdef CONFIG_SMP /* On SMP, if VFP is enabled, save the old state */ if ((fpexc & FPEXC_EN) && last_VFP_context[cpu]) { #else /* If there is a VFP context we must save it. */ if (last_VFP_context[cpu]) { /* Enable VFP so we can save the old state. */ fmxr(FPEXC, fpexc | FPEXC_EN); isb(); #endif vfp_save_state(last_VFP_context[cpu], fpexc); /* disable, just in case */ fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN); last_VFP_context[cpu] = NULL; saved = 1; } return saved; } void vfp_reinit(void) { /* ensure we have access to the vfp */ vfp_enable(NULL); /* and disable it to ensure the next usage restores the state */ fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN); }
/* * VFP support code initialisation. */ static int __init vfp_init(void) { unsigned int vfpsid; unsigned int cpu_arch = cpu_architecture(); if (cpu_arch >= CPU_ARCH_ARMv6) vfp_enable(NULL); /* * First check that there is a VFP that we can use. * The handler is already setup to just log calls, so * we just need to read the VFPSID register. */ vfp_vector = vfp_testing_entry; barrier(); vfpsid = fmrx(FPSID); barrier(); vfp_vector = vfp_null_entry; printk(KERN_INFO "VFP support v0.3: "); if (VFP_arch) printk("not present\n"); else if (vfpsid & FPSID_NODOUBLE) { printk("no double precision support\n"); } else { hotcpu_notifier(vfp_hotplug, 0); smp_call_function(vfp_enable, NULL, 1); VFP_arch = (vfpsid & FPSID_ARCH_MASK) >> FPSID_ARCH_BIT; /* Extract the architecture version */ printk("implementor %02x architecture %d part %02x variant %x rev %x\n", (vfpsid & FPSID_IMPLEMENTER_MASK) >> FPSID_IMPLEMENTER_BIT, (vfpsid & FPSID_ARCH_MASK) >> FPSID_ARCH_BIT, (vfpsid & FPSID_PART_MASK) >> FPSID_PART_BIT, (vfpsid & FPSID_VARIANT_MASK) >> FPSID_VARIANT_BIT, (vfpsid & FPSID_REV_MASK) >> FPSID_REV_BIT); vfp_vector = vfp_support_entry; thread_register_notifier(&vfp_notifier_block); vfp_pm_init(); /* * We detected VFP, and the support code is * in place; report VFP support to userspace. */ elf_hwcap |= HWCAP_VFP; #ifdef CONFIG_VFPv3 if (VFP_arch >= 2) { elf_hwcap |= HWCAP_VFPv3; /* * Check for VFPv3 D16 and VFPv4 D16. CPUs in * this configuration only have 16 x 64bit * registers. */ if (((fmrx(MVFR0) & MVFR0_A_SIMD_MASK)) == 1) elf_hwcap |= HWCAP_VFPv3D16; /* also v4-D16 */ } #endif /* * Check for the presence of the Advanced SIMD * load/store instructions, integer and single * precision floating point operations. Only check * for NEON if the hardware has the MVFR registers. */ if ((read_cpuid_id() & 0x000f0000) == 0x000f0000) { #ifdef CONFIG_NEON if ((fmrx(MVFR1) & 0x000fff00) == 0x00011100) elf_hwcap |= HWCAP_NEON; #endif if ((fmrx(MVFR1) & 0xf0000000) == 0x10000000 || (read_cpuid_id() & 0xff00fc00) == 0x51000400) elf_hwcap |= HWCAP_VFPv4; } } return 0; }