Esempio n. 1
0
void mips_cm_lock_other(unsigned int cluster, unsigned int core,
			unsigned int vp, unsigned int block)
{
	unsigned int curr_core, cm_rev;
	u32 val;

	cm_rev = mips_cm_revision();
	preempt_disable();

	if (cm_rev >= CM_REV_CM3) {
		val = core << __ffs(CM3_GCR_Cx_OTHER_CORE);
		val |= vp << __ffs(CM3_GCR_Cx_OTHER_VP);

		if (cm_rev >= CM_REV_CM3_5) {
			val |= CM_GCR_Cx_OTHER_CLUSTER_EN;
			val |= cluster << __ffs(CM_GCR_Cx_OTHER_CLUSTER);
			val |= block << __ffs(CM_GCR_Cx_OTHER_BLOCK);
		} else {
			WARN_ON(cluster != 0);
			WARN_ON(block != CM_GCR_Cx_OTHER_BLOCK_LOCAL);
		}

		/*
		 * We need to disable interrupts in SMP systems in order to
		 * ensure that we don't interrupt the caller with code which
		 * may modify the redirect register. We do so here in a
		 * slightly obscure way by using a spin lock, since this has
		 * the neat property of also catching any nested uses of
		 * mips_cm_lock_other() leading to a deadlock or a nice warning
		 * with lockdep enabled.
		 */
		spin_lock_irqsave(this_cpu_ptr(&cm_core_lock),
				  *this_cpu_ptr(&cm_core_lock_flags));
	} else {
		WARN_ON(cluster != 0);
		WARN_ON(block != CM_GCR_Cx_OTHER_BLOCK_LOCAL);

		/*
		 * We only have a GCR_CL_OTHER per core in systems with
		 * CM 2.5 & older, so have to ensure other VP(E)s don't
		 * race with us.
		 */
		curr_core = cpu_core(&current_cpu_data);
		spin_lock_irqsave(&per_cpu(cm_core_lock, curr_core),
				  per_cpu(cm_core_lock_flags, curr_core));

		val = core << __ffs(CM_GCR_Cx_OTHER_CORENUM);
	}

	write_gcr_cl_other(val);

	/*
	 * Ensure the core-other region reflects the appropriate core &
	 * VP before any accesses to it occur.
	 */
	mb();
}
Esempio n. 2
0
void mips_cpc_unlock_other(void)
{
	unsigned int curr_core;

	if (mips_cm_revision() >= CM_REV_CM3)
		/* Systems with CM >= 3 lock the CPC via mips_cm_lock_other */
		return;

	curr_core = cpu_core(&current_cpu_data);
	spin_unlock_irqrestore(&per_cpu(cpc_core_lock, curr_core),
			       per_cpu(cpc_core_lock_flags, curr_core));
	preempt_enable();
}
Esempio n. 3
0
void mips_cm_unlock_other(void)
{
	unsigned int curr_core;

	if (mips_cm_revision() < CM_REV_CM3) {
		curr_core = cpu_core(&current_cpu_data);
		spin_unlock_irqrestore(&per_cpu(cm_core_lock, curr_core),
				       per_cpu(cm_core_lock_flags, curr_core));
	} else {
		spin_unlock_irqrestore(this_cpu_ptr(&cm_core_lock),
				       *this_cpu_ptr(&cm_core_lock_flags));
	}

	preempt_enable();
}
Esempio n. 4
0
void mips_cpc_lock_other(unsigned int core)
{
	unsigned int curr_core;

	if (mips_cm_revision() >= CM_REV_CM3)
		/* Systems with CM >= 3 lock the CPC via mips_cm_lock_other */
		return;

	preempt_disable();
	curr_core = cpu_core(&current_cpu_data);
	spin_lock_irqsave(&per_cpu(cpc_core_lock, curr_core),
			  per_cpu(cpc_core_lock_flags, curr_core));
	write_cpc_cl_other(core << __ffs(CPC_Cx_OTHER_CORENUM));

	/*
	 * Ensure the core-other region reflects the appropriate core &
	 * VP before any accesses to it occur.
	 */
	mb();
}