Exemplo n.º 1
0
/*
 * When this function is called with the following 'cmd's, the following
 * is true while this function is being run:
 *  THREAD_NOFTIFY_SWTICH:
 *   - the previously running thread will not be scheduled onto another CPU.
 *   - the next thread to be run (v) will not be running on another CPU.
 *   - thread->cpu is the local CPU number
 *   - not preemptible as we're called in the middle of a thread switch
 *  THREAD_NOTIFY_FLUSH:
 *   - the thread (v) will be running on the local CPU, so
 *	v === current_thread_info()
 *   - thread->cpu is the local CPU number at the time it is accessed,
 *	but may change at any time.
 *   - we could be preempted if tree preempt rcu is enabled, so
 *	it is unsafe to use thread->cpu.
 *  THREAD_NOTIFY_EXIT
 *   - the thread (v) will be running on the local CPU, so
 *	v === current_thread_info()
 *   - thread->cpu is the local CPU number at the time it is accessed,
 *	but may change at any time.
 *   - we could be preempted if tree preempt rcu is enabled, so
 *	it is unsafe to use thread->cpu.
 */
static int vfp_notifier(struct notifier_block *self, unsigned long cmd, void *v)
{
	struct thread_info *thread = v;
	u32 fpexc;
#ifdef CONFIG_SMP
	unsigned int cpu;
#endif

	switch (cmd) {
	case THREAD_NOTIFY_SWITCH:
		fpexc = fmrx(FPEXC);

#ifdef CONFIG_SMP
		cpu = thread->cpu;

		/*
		 * On SMP, if VFP is enabled, save the old state in
		 * case the thread migrates to a different CPU. The
		 * restoring is done lazily.
		 */
		if ((fpexc & FPEXC_EN) && vfp_current_hw_state[cpu])
			vfp_save_state(vfp_current_hw_state[cpu], fpexc);
#endif

		/*
		 * Always disable VFP so we can lazily save/restore the
		 * old state.
		 */
		fmxr(FPEXC, fpexc & ~FPEXC_EN);
		break;

	case THREAD_NOTIFY_FLUSH:
		vfp_thread_flush(thread);
		break;

	case THREAD_NOTIFY_EXIT:
		vfp_thread_exit(thread);
		break;

	case THREAD_NOTIFY_COPY:
		vfp_thread_copy(thread);
		break;
	}

	return NOTIFY_DONE;
}
/*
 * Ensure that the VFP state stored in 'thread->vfpstate' is up to date
 * with the hardware state.
 */
void vfp_sync_hwstate(struct thread_info *thread)
{
	unsigned int cpu = get_cpu();

	if (vfp_state_in_hw(cpu, thread)) {
		u32 fpexc = fmrx(FPEXC);

		/*
		 * Save the last VFP state on this CPU.
		 */
		fmxr(FPEXC, fpexc | FPEXC_EN);
		vfp_save_state(&thread->vfpstate, fpexc | FPEXC_EN);
		fmxr(FPEXC, fpexc);
	}

	put_cpu();
}
int vfp_flush_context(void)
{
	unsigned long flags;
	struct thread_info *ti;
	u32 fpexc;
	u32 cpu;
	int saved = 0;

	local_irq_save(flags);

	ti = current_thread_info();
	fpexc = fmrx(FPEXC);
	cpu = ti->cpu;

#ifdef CONFIG_SMP
	/* On SMP, if VFP is enabled, save the old state */
	if ((fpexc & FPEXC_EN) && last_VFP_context[cpu]) {
		last_VFP_context[cpu]->hard.cpu = cpu;
#else
	/* If there is a VFP context we must save it. */
	if (last_VFP_context[cpu]) {
		/* Enable VFP so we can save the old state. */
		fmxr(FPEXC, fpexc | FPEXC_EN);
		isb();
#endif
		vfp_save_state(last_VFP_context[cpu], fpexc);

		/* disable, just in case */
		fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN);
		saved = 1;
	}
	last_VFP_context[cpu] = NULL;

	local_irq_restore(flags);

	return saved;
}

void vfp_reinit(void)
{
	/* ensure we have access to the vfp */
	vfp_enable(NULL);

	/* and disable it to ensure the next usage restores the state */
	fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN);
}
/*
 * When this function is called with the following 'cmd's, the following
 * is true while this function is being run:
 *  THREAD_NOFTIFY_SWTICH:
 *   - the previously running thread will not be scheduled onto another CPU.
 *   - the next thread to be run (v) will not be running on another CPU.
 *   - thread->cpu is the local CPU number
 *   - not preemptible as we're called in the middle of a thread switch
 *  THREAD_NOTIFY_FLUSH:
 *   - the thread (v) will be running on the local CPU, so
 *	v === current_thread_info()
 *   - thread->cpu is the local CPU number at the time it is accessed,
 *	but may change at any time.
 *   - we could be preempted if tree preempt rcu is enabled, so
 *	it is unsafe to use thread->cpu.
 *  THREAD_NOTIFY_EXIT
 *   - the thread (v) will be running on the local CPU, so
 *	v === current_thread_info()
 *   - thread->cpu is the local CPU number at the time it is accessed,
 *	but may change at any time.
 *   - we could be preempted if tree preempt rcu is enabled, so
 *	it is unsafe to use thread->cpu.
 */
static int vfp_notifier(struct notifier_block *self, unsigned long cmd, void *v)
{
	struct thread_info *thread = v;

	if (likely(cmd == THREAD_NOTIFY_SWITCH)) {
		u32 fpexc = fmrx(FPEXC);

#ifdef CONFIG_SMP
		unsigned int cpu = thread->cpu;

		/*
		 * On SMP, if VFP is enabled, save the old state in
		 * case the thread migrates to a different CPU. The
		 * restoring is done lazily.
		 */
		if ((fpexc & FPEXC_EN) && last_VFP_context[cpu]) {
			vfp_save_state(last_VFP_context[cpu], fpexc);
			last_VFP_context[cpu]->hard.cpu = cpu;
		}
		/*
		 * Thread migration, just force the reloading of the
		 * state on the new CPU in case the VFP registers
		 * contain stale data.
		 */
		if (thread->vfpstate.hard.cpu != cpu)
			last_VFP_context[cpu] = NULL;
#endif

		/*
		 * Always disable VFP so we can lazily save/restore the
		 * old state.
		 */
		fmxr(FPEXC, fpexc & ~FPEXC_EN);
		return NOTIFY_DONE;
	}

	if (cmd == THREAD_NOTIFY_FLUSH)
		vfp_thread_flush(thread);
	else
		vfp_thread_exit(thread);

	return NOTIFY_DONE;
}
static int vfp_pm_suspend(struct sys_device *dev, pm_message_t state)
{
	struct thread_info *ti = current_thread_info();
	u32 fpexc = fmrx(FPEXC);

	/* if vfp is on, then save state for resumption */
	if (fpexc & FPEXC_EN) {
		printk(KERN_DEBUG "%s: saving vfp state\n", __func__);
		vfp_save_state(&ti->vfpstate, fpexc);

		/* disable, just in case */
		fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN);
	}

	/* clear any information we had about last context state */
	memset(last_VFP_context, 0, sizeof(last_VFP_context));

	return 0;
}
static int vfp_pm_suspend(void)
{
	struct thread_info *ti = current_thread_info();
	u32 fpexc = fmrx(FPEXC);

	/* if vfp is on, then save state for resumption */
	if (fpexc & FPEXC_EN) {
		printk(KERN_DEBUG "%s: saving vfp state\n", __func__);
		vfp_save_state(&ti->vfpstate, fpexc);

		/* disable, just in case */
		fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN);
	}

	/* clear any information we had about last context state */
	vfp_current_hw_state[ti->cpu] = NULL;

	return 0;
}
Exemplo n.º 7
0
void vfp_sync_hwstate(struct thread_info *thread)
{
	unsigned int cpu = get_cpu();

	/*
	 * If the thread we're interested in is the current owner of the
	 * hardware VFP state, then we need to save its state.
	 */
	if (vfp_current_hw_state[cpu] == &thread->vfpstate) {
		u32 fpexc = fmrx(FPEXC);

		/*
		 * Save the last VFP state on this CPU.
		 */
		fmxr(FPEXC, fpexc | FPEXC_EN);
		vfp_save_state(&thread->vfpstate, fpexc | FPEXC_EN);
		fmxr(FPEXC, fpexc);
	}

	put_cpu();
}
Exemplo n.º 8
0
int
fill_fpregs(struct thread *td, struct fpreg *regs)
{
#ifdef VFP
	struct pcb *pcb;

	pcb = td->td_pcb;
	if ((pcb->pcb_fpflags & PCB_FP_STARTED) != 0) {
		/*
		 * If we have just been running VFP instructions we will
		 * need to save the state to memcpy it below.
		 */
		vfp_save_state(td, pcb);

		memcpy(regs->fp_q, pcb->pcb_vfp, sizeof(regs->fp_q));
		regs->fp_cr = pcb->pcb_fpcr;
		regs->fp_sr = pcb->pcb_fpsr;
	} else
#endif
		memset(regs->fp_q, 0, sizeof(regs->fp_q));
	return (0);
}
Exemplo n.º 9
0
void vfp_pm_save_context(void)
{
	u32 fpexc = fmrx(FPEXC);
	unsigned int cpu = get_cpu();

	/* Save last_VFP_context if needed */
	if (last_VFP_context[cpu]) {
		/* Enable vfp to save context */
		if (!(fpexc & FPEXC_EN)) {
			vfp_enable(NULL);
			fmxr(FPEXC, fpexc | FPEXC_EN);
		}

		vfp_save_state(last_VFP_context[cpu], fpexc);

		/* disable, just in case */
		fmxr(FPEXC, fpexc & ~FPEXC_EN);

		last_VFP_context[cpu] = NULL;
	}

	put_cpu();
}
static int vfp_notifier(struct notifier_block *self, unsigned long cmd, void *v)
{
	struct thread_info *thread = v;
	u32 fpexc;
#ifdef CONFIG_SMP
	unsigned int cpu;
#endif

	switch (cmd) {
	case THREAD_NOTIFY_SWITCH:
		fpexc = fmrx(FPEXC);

#ifdef CONFIG_SMP
		cpu = thread->cpu;

		if ((fpexc & FPEXC_EN) && vfp_current_hw_state[cpu])
			vfp_save_state(vfp_current_hw_state[cpu], fpexc);
#endif

		fmxr(FPEXC, fpexc & ~FPEXC_EN);
		break;

	case THREAD_NOTIFY_FLUSH:
		vfp_thread_flush(thread);
		break;

	case THREAD_NOTIFY_EXIT:
		vfp_thread_exit(thread);
		break;

	case THREAD_NOTIFY_COPY:
		vfp_thread_copy(thread);
		break;
	}

	return NOTIFY_DONE;
}
Exemplo n.º 11
0
static int vfp_cpu_pm_notifier(struct notifier_block *self, unsigned long cmd,
	void *v)
{
	u32 fpexc = fmrx(FPEXC);
	unsigned int cpu = smp_processor_id();

	switch (cmd) {
	case CPU_PM_ENTER:
		if (vfp_current_hw_state[cpu]) {
			fmxr(FPEXC, fpexc | FPEXC_EN);
			vfp_save_state(vfp_current_hw_state[cpu], fpexc);
			/* force a reload when coming back from idle */
			vfp_current_hw_state[cpu] = NULL;
			fmxr(FPEXC, fpexc & ~FPEXC_EN);
		}
		break;
	case CPU_PM_ENTER_FAILED:
	case CPU_PM_EXIT:
		/* make sure VFP is disabled when leaving idle */
		fmxr(FPEXC, fpexc & ~FPEXC_EN);
		break;
	}
	return NOTIFY_OK;
}
Exemplo n.º 12
0
static void ctxt_switch_from(struct vcpu *p)
{
    p2m_save_state(p);

    /* CP 15 */
    p->arch.csselr = READ_SYSREG(CSSELR_EL1);

    /* Control Registers */
    p->arch.cpacr = READ_SYSREG(CPACR_EL1);

    p->arch.contextidr = READ_SYSREG(CONTEXTIDR_EL1);
    p->arch.tpidr_el0 = READ_SYSREG(TPIDR_EL0);
    p->arch.tpidrro_el0 = READ_SYSREG(TPIDRRO_EL0);
    p->arch.tpidr_el1 = READ_SYSREG(TPIDR_EL1);

    /* Arch timer */
    p->arch.cntkctl = READ_SYSREG32(CNTKCTL_EL1);
    virt_timer_save(p);

    if ( is_32bit_domain(p->domain) && cpu_has_thumbee )
    {
        p->arch.teecr = READ_SYSREG32(TEECR32_EL1);
        p->arch.teehbr = READ_SYSREG32(TEEHBR32_EL1);
    }

#ifdef CONFIG_ARM_32
    p->arch.joscr = READ_CP32(JOSCR);
    p->arch.jmcr = READ_CP32(JMCR);
#endif

    isb();

    /* MMU */
    p->arch.vbar = READ_SYSREG(VBAR_EL1);
    p->arch.ttbcr = READ_SYSREG(TCR_EL1);
    p->arch.ttbr0 = READ_SYSREG64(TTBR0_EL1);
    p->arch.ttbr1 = READ_SYSREG64(TTBR1_EL1);
    if ( is_32bit_domain(p->domain) )
        p->arch.dacr = READ_SYSREG(DACR32_EL2);
    p->arch.par = READ_SYSREG64(PAR_EL1);
#if defined(CONFIG_ARM_32)
    p->arch.mair0 = READ_CP32(MAIR0);
    p->arch.mair1 = READ_CP32(MAIR1);
    p->arch.amair0 = READ_CP32(AMAIR0);
    p->arch.amair1 = READ_CP32(AMAIR1);
#else
    p->arch.mair = READ_SYSREG64(MAIR_EL1);
    p->arch.amair = READ_SYSREG64(AMAIR_EL1);
#endif

    /* Fault Status */
#if defined(CONFIG_ARM_32)
    p->arch.dfar = READ_CP32(DFAR);
    p->arch.ifar = READ_CP32(IFAR);
    p->arch.dfsr = READ_CP32(DFSR);
#elif defined(CONFIG_ARM_64)
    p->arch.far = READ_SYSREG64(FAR_EL1);
    p->arch.esr = READ_SYSREG64(ESR_EL1);
#endif

    if ( is_32bit_domain(p->domain) )
        p->arch.ifsr  = READ_SYSREG(IFSR32_EL2);
    p->arch.afsr0 = READ_SYSREG(AFSR0_EL1);
    p->arch.afsr1 = READ_SYSREG(AFSR1_EL1);

    /* XXX MPU */

    /* VFP */
    vfp_save_state(p);

    /* VGIC */
    gic_save_state(p);

    isb();
    context_saved(p);
}
Exemplo n.º 13
0
static int vfp_notifier(struct notifier_block *self, unsigned long cmd, void *v)
{
	struct thread_info *thread = v;
	union vfp_state *vfp;
	__u32 cpu = thread->cpu;

	if (likely(cmd == THREAD_NOTIFY_SWITCH)) {
		u32 fpexc = fmrx(FPEXC);

#ifdef CONFIG_SMP
		/*
		 * RCU locking is needed in case last_VFP_context[cpu] is
		 * released on a different CPU.
		 */
		rcu_read_lock();
		vfp = last_VFP_context[cpu];
		/*
		 * On SMP, if VFP is enabled, save the old state in
		 * case the thread migrates to a different CPU. The
		 * restoring is done lazily.
		 */
		if ((fpexc & FPEXC_EN) && vfp) {
			vfp_save_state(vfp, fpexc);
			vfp->hard.cpu = cpu;
		}
		rcu_read_unlock();
		/*
		 * Thread migration, just force the reloading of the
		 * state on the new CPU in case the VFP registers
		 * contain stale data.
		 */
		if (thread->vfpstate.hard.cpu != cpu)
			last_VFP_context[cpu] = NULL;
#endif

		/*
		 * Always disable VFP so we can lazily save/restore the
		 * old state.
		 */
		fmxr(FPEXC, fpexc & ~FPEXC_EN);
		return NOTIFY_DONE;
	}

	vfp = &thread->vfpstate;
	if (cmd == THREAD_NOTIFY_FLUSH) {
		/*
		 * Per-thread VFP initialisation.
		 */
		memset(vfp, 0, sizeof(union vfp_state));

		vfp->hard.fpexc = FPEXC_EN;
		vfp->hard.fpscr = FPSCR_ROUND_NEAREST;

		/*
		 * Disable VFP to ensure we initialise it first.
		 */
		fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN);
	}

	/* flush and release case: Per-thread VFP cleanup. */
#ifndef CONFIG_SMP
	if (last_VFP_context[cpu] == vfp)
		last_VFP_context[cpu] = NULL;
#else
	/*
	 * Since release_thread() may be called from a different CPU, we use
	 * cmpxchg() here to avoid a race with the vfp_support_entry() code
	 * which modifies last_VFP_context[cpu]. Note that on SMP systems, a
	 * STR instruction on a different CPU clears the global exclusive
	 * monitor state.
	 */
	(void)cmpxchg(&last_VFP_context[cpu], vfp, NULL);
#endif

	return NOTIFY_DONE;
}
Exemplo n.º 14
0
static int vfp_notifier(struct notifier_block *self, unsigned long cmd, void *v)
{
	struct thread_info *thread = v;
	union vfp_state *vfp;
	unsigned long flags;
	__u32 cpu = thread->cpu;

	if (likely(cmd == THREAD_NOTIFY_SWITCH)) {
		u32 fpexc;

		local_irq_save_hw_cond(flags);
		fpexc = fmrx(FPEXC);
#ifdef CONFIG_SMP
		/*
		 * On SMP, if VFP is enabled, save the old state in
		 * case the thread migrates to a different CPU. The
		 * restoring is done lazily.
		 */
		if ((fpexc & FPEXC_EN) && last_VFP_context[cpu]) {
			vfp_save_state(last_VFP_context[cpu], fpexc);
			last_VFP_context[cpu]->hard.cpu = cpu;
		}
		/*
		 * Thread migration, just force the reloading of the
		 * state on the new CPU in case the VFP registers
		 * contain stale data.
		 */
		if (thread->vfpstate.hard.cpu != cpu)
			last_VFP_context[cpu] = NULL;
#endif

		/*
		 * Always disable VFP so we can lazily save/restore the
		 * old state.
		 */
		fmxr(FPEXC, fpexc & ~FPEXC_EN);
		local_irq_restore_hw_cond(flags);
		return NOTIFY_DONE;
	}

	vfp = &thread->vfpstate;
	if (cmd == THREAD_NOTIFY_FLUSH) {
		/*
		 * Per-thread VFP initialisation.
		 */
		memset(vfp, 0, sizeof(union vfp_state));

		vfp->hard.fpexc = FPEXC_EN;
		vfp->hard.fpscr = FPSCR_ROUND_NEAREST;

		/*
		 * Disable VFP to ensure we initialise it first.
		 */
		local_irq_save_hw_cond(flags);
		fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN);
	} else
		local_irq_save_hw_cond(flags);

	/* flush and release case: Per-thread VFP cleanup. */
	if (last_VFP_context[cpu] == vfp)
		last_VFP_context[cpu] = NULL;
	local_irq_restore_hw_cond(flags);

	return NOTIFY_DONE;
}