Exemple #1
0
asmlinkage void smp_invalidate_interrupt (void)
{
        unsigned long cpu, flags;

	local_irq_save_hw_cond(flags);
	
	cpu = smp_processor_id();
	
	if (!test_bit(cpu, &flush_cpumask))
		return;
		/* 
		 * This was a BUG() but until someone can quote me the
		 * line from the intel manual that guarantees an IPI to
		 * multiple CPUs is retried _only_ on the erroring CPUs
		 * its staying as a return
		 *
		 * BUG();
		 */
		 
	if (flush_mm == cpu_tlbstate[cpu].active_mm) {
		if (cpu_tlbstate[cpu].state == TLBSTATE_OK) {
			if (flush_va == FLUSH_ALL)
				local_flush_tlb();
			else
				__flush_tlb_one(flush_va);
		} else
			leave_mm(cpu);
	}
	ack_APIC_irq();
	clear_bit(cpu, &flush_cpumask);
	
	local_irq_restore_hw_cond(flags);
}
Exemple #2
0
static inline void __send_IPI_shortcut(unsigned int shortcut, int vector)
{
	/*
	 * Subtle. In the case of the 'never do double writes' workaround
	 * we have to lock out interrupts to be safe.  As we don't care
	 * of the value read we use an atomic rmw access to avoid costly
	 * cli/sti.  Otherwise we use an even cheaper single atomic write
	 * to the APIC.
	 */
	unsigned int cfg;
	unsigned long flags;

	local_irq_save_hw_cond(flags);	

	/*
	 * Wait for idle.
	 */
	apic_wait_icr_idle();

	/*
	 * No need to touch the target chip field
	 */
	cfg = __prepare_ICR(shortcut, vector);

	/*
	 * Send the IPI. The write to APIC_ICR fires this off.
	 */
	apic_write_around(APIC_ICR, cfg);

	local_irq_restore_hw_cond(flags);
}
Exemple #3
0
void flush_tlb_current_task(void)
{
	struct mm_struct *mm = current->mm;
	unsigned long cpu_mask = mm->cpu_vm_mask & ~(1 << smp_processor_id());
	unsigned long flags;

	local_irq_save_hw_cond(flags);
	
	local_flush_tlb();
	if (cpu_mask)
		flush_tlb_others(cpu_mask, mm, FLUSH_ALL);

	local_irq_restore_hw_cond(flags);
}
Exemple #4
0
void flush_tlb_page(struct vm_area_struct * vma, unsigned long va)
{
	struct mm_struct *mm = vma->vm_mm;
	unsigned long cpu_mask = mm->cpu_vm_mask & ~(1 << smp_processor_id());
	unsigned long flags;

	local_irq_save_hw_cond(flags);
	
	if (current->active_mm == mm) {
		if(current->mm)
			__flush_tlb_one(va);
		 else
		 	leave_mm(smp_processor_id());
	}

	local_irq_restore_hw_cond(flags);
	
	if (cpu_mask)
		flush_tlb_others(cpu_mask, mm, va);
}
static int vfp_notifier(struct notifier_block *self, unsigned long cmd, void *v)
{
	struct thread_info *thread = v;
	union vfp_state *vfp;
	unsigned long flags;
	__u32 cpu = thread->cpu;

	if (likely(cmd == THREAD_NOTIFY_SWITCH)) {
		u32 fpexc;

		local_irq_save_hw_cond(flags);
		fpexc = fmrx(FPEXC);
#ifdef CONFIG_SMP
		/*
		 * On SMP, if VFP is enabled, save the old state in
		 * case the thread migrates to a different CPU. The
		 * restoring is done lazily.
		 */
		if ((fpexc & FPEXC_EN) && last_VFP_context[cpu]) {
			vfp_save_state(last_VFP_context[cpu], fpexc);
			last_VFP_context[cpu]->hard.cpu = cpu;
		}
		/*
		 * Thread migration, just force the reloading of the
		 * state on the new CPU in case the VFP registers
		 * contain stale data.
		 */
		if (thread->vfpstate.hard.cpu != cpu)
			last_VFP_context[cpu] = NULL;
#endif

		/*
		 * Always disable VFP so we can lazily save/restore the
		 * old state.
		 */
		fmxr(FPEXC, fpexc & ~FPEXC_EN);
		local_irq_restore_hw_cond(flags);
		return NOTIFY_DONE;
	}

	vfp = &thread->vfpstate;
	if (cmd == THREAD_NOTIFY_FLUSH) {
		/*
		 * Per-thread VFP initialisation.
		 */
		memset(vfp, 0, sizeof(union vfp_state));

		vfp->hard.fpexc = FPEXC_EN;
		vfp->hard.fpscr = FPSCR_ROUND_NEAREST;

		/*
		 * Disable VFP to ensure we initialise it first.
		 */
		local_irq_save_hw_cond(flags);
		fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN);
	} else
		local_irq_save_hw_cond(flags);

	/* flush and release case: Per-thread VFP cleanup. */
	if (last_VFP_context[cpu] == vfp)
		last_VFP_context[cpu] = NULL;
	local_irq_restore_hw_cond(flags);

	return NOTIFY_DONE;
}