Exemplo n.º 1
0
void __cpuinit flush_tlb_handlers(void)
{
	local_flush_icache_range((unsigned long)handle_tlbl,
			   (unsigned long)handle_tlbl + sizeof(handle_tlbl));
	local_flush_icache_range((unsigned long)handle_tlbs,
			   (unsigned long)handle_tlbs + sizeof(handle_tlbs));
	local_flush_icache_range((unsigned long)handle_tlbm,
			   (unsigned long)handle_tlbm + sizeof(handle_tlbm));
}
void __cpuinit brcm_wr_vec(unsigned long dst, char *start, char *end)
{
	memcpy((void *)dst, start, end - start);
	dma_cache_wback((unsigned long)start, end - start);
	local_flush_icache_range(dst, dst + (end - start));
	instruction_hazard();
}
Exemplo n.º 3
0
/**
 * kvm_mips_trans_replace() - Replace trapping instruction in guest memory.
 * @vcpu:	Virtual CPU.
 * @opc:	PC of instruction to replace.
 * @replace:	Instruction to write
 */
static int kvm_mips_trans_replace(struct kvm_vcpu *vcpu, u32 *opc,
				  union mips_instruction replace)
{
	unsigned long kseg0_opc, flags;

	if (KVM_GUEST_KSEGX(opc) == KVM_GUEST_KSEG0) {
		kseg0_opc =
		    CKSEG0ADDR(kvm_mips_translate_guest_kseg0_to_hpa
			       (vcpu, (unsigned long) opc));
		memcpy((void *)kseg0_opc, (void *)&replace, sizeof(u32));
		local_flush_icache_range(kseg0_opc, kseg0_opc + 32);
	} else if (KVM_GUEST_KSEGX((unsigned long) opc) == KVM_GUEST_KSEG23) {
		local_irq_save(flags);
		memcpy((void *)opc, (void *)&replace, sizeof(u32));
		local_flush_icache_range((unsigned long)opc,
					 (unsigned long)opc + 32);
		local_irq_restore(flags);
	} else {
		kvm_err("%s: Invalid address: %p\n", __func__, opc);
		return -EFAULT;
	}

	return 0;
}
Exemplo n.º 4
0
static int brcmstb_cpu_disable(void)
{
	unsigned int cpu = smp_processor_id();

	if (cpu == 0)
		return -EBUSY;

	printk(KERN_INFO "SMP: CPU%d is offline\n", cpu);

	cpu_clear(cpu, cpu_online_map);

	local_flush_tlb_all();
	local_flush_icache_range(0, ~0);

	return 0;
}
Exemplo n.º 5
0
static void * __init cps_gen_entry_code(unsigned cpu, enum cps_pm_state state)
{
	struct uasm_label *l = labels;
	struct uasm_reloc *r = relocs;
	u32 *buf, *p;
	const unsigned r_online = a0;
	const unsigned r_nc_count = a1;
	const unsigned r_pcohctl = t7;
	const unsigned max_instrs = 256;
	unsigned cpc_cmd;
	enum {
		lbl_incready = 1,
		lbl_poll_cont,
		lbl_secondary_hang,
		lbl_disable_coherence,
		lbl_flush_fsb,
		lbl_invicache,
		lbl_flushdcache,
		lbl_hang,
		lbl_set_cont,
		lbl_secondary_cont,
		lbl_decready,
	};

	/* Allocate a buffer to hold the generated code */
	p = buf = kcalloc(max_instrs, sizeof(u32), GFP_KERNEL);
	if (!buf)
		return NULL;

	/* Clear labels & relocs ready for (re)use */
	memset(labels, 0, sizeof(labels));
	memset(relocs, 0, sizeof(relocs));

	if (state == CPS_PM_POWER_GATED) {
		/* Power gating relies upon CPS SMP */
		if (!mips_cps_smp_in_use())
			goto out_err;

		/*
		 * Save CPU state. Note the non-standard calling convention
		 * with the return address placed in v0 to avoid clobbering
		 * the ra register before it is saved.
		 */
		UASM_i_LA(&p, t0, (long)mips_cps_pm_save);
		uasm_i_jalr(&p, v0, t0);
		uasm_i_nop(&p);
	}

	/*
	 * Load addresses of required CM & CPC registers. This is done early
	 * because they're needed in both the enable & disable coherence steps
	 * but in the coupled case the enable step will only run on one VPE.
	 */
	UASM_i_LA(&p, r_pcohctl, (long)_gcmp_base + GCMPCLCBOFS(COHCTL));

	if (coupled_coherence) {
		/* Increment ready_count */
		uasm_i_sync(&p, stype_ordering);
		uasm_build_label(&l, p, lbl_incready);
		uasm_i_ll(&p, t1, 0, r_nc_count);
		uasm_i_addiu(&p, t2, t1, 1);
		uasm_i_sc(&p, t2, 0, r_nc_count);
		uasm_il_beqz(&p, &r, t2, lbl_incready);
		uasm_i_addiu(&p, t1, t1, 1);

		/* Ordering barrier */
		uasm_i_sync(&p, stype_ordering);

		/*
		 * If this is the last VPE to become ready for non-coherence
		 * then it should branch below.
		 */
		uasm_il_beq(&p, &r, t1, r_online, lbl_disable_coherence);
		uasm_i_nop(&p);

		if (state < CPS_PM_POWER_GATED) {
			/*
			 * Otherwise this is not the last VPE to become ready
			 * for non-coherence. It needs to wait until coherence
			 * has been disabled before proceeding, which it will do
			 * by polling for the top bit of ready_count being set.
			 */
			uasm_i_addiu(&p, t1, zero, -1);
			uasm_build_label(&l, p, lbl_poll_cont);
			uasm_i_lw(&p, t0, 0, r_nc_count);
			uasm_il_bltz(&p, &r, t0, lbl_secondary_cont);
			uasm_i_ehb(&p);
			uasm_i_yield(&p, zero, t1);
			uasm_il_b(&p, &r, lbl_poll_cont);
			uasm_i_nop(&p);
		} else {
			/*
			 * The core will lose power & this VPE will not continue
			 * so it can simply halt here.
			 */
			uasm_i_addiu(&p, t0, zero, TCHALT_H);
			uasm_i_mtc0(&p, t0, 2, 4);
			uasm_build_label(&l, p, lbl_secondary_hang);
			uasm_il_b(&p, &r, lbl_secondary_hang);
			uasm_i_nop(&p);
		}
	}

	/*
	 * This is the point of no return - this VPE will now proceed to
	 * disable coherence. At this point we *must* be sure that no other
	 * VPE within the core will interfere with the L1 dcache.
	 */
	uasm_build_label(&l, p, lbl_disable_coherence);

	/* Invalidate the L1 icache */
	cps_gen_cache_routine(&p, &l, &r, &cpu_data[cpu].icache,
			      Index_Invalidate_I, lbl_invicache);

	/* Writeback & invalidate the L1 dcache */
	cps_gen_cache_routine(&p, &l, &r, &cpu_data[cpu].dcache,
			      Index_Writeback_Inv_D, lbl_flushdcache);

	/* Completion barrier */
	uasm_i_sync(&p, stype_memory);
	uasm_i_ehb(&p);

	/*
	 * Disable all but self interventions. The load from COHCTL is defined
	 * by the interAptiv & proAptiv SUMs as ensuring that the operation
	 * resulting from the preceeding store is complete.
	 */
	uasm_i_addiu(&p, t0, zero, 1 << cpu_data[cpu].core);
	uasm_i_sw(&p, t0, 0, r_pcohctl);
	uasm_i_lw(&p, t0, 0, r_pcohctl);

	/* Sync to ensure previous interventions are complete */
	uasm_i_sync(&p, stype_intervention);
	uasm_i_ehb(&p);

	/* Disable coherence */
	uasm_i_sw(&p, zero, 0, r_pcohctl);
	uasm_i_lw(&p, t0, 0, r_pcohctl);

	if (state >= CPS_PM_CLOCK_GATED) {
		/* TODO: determine whether required based on CPC version */
		cps_gen_flush_fsb(&p, &l, &r, &cpu_data[cpu].dcache,
				  lbl_flush_fsb);

		/* Determine the CPC command to issue */
		switch (state) {
		case CPS_PM_CLOCK_GATED:
			cpc_cmd = CPC_Cx_CMD_CLOCKOFF;
			break;
		case CPS_PM_POWER_GATED:
			cpc_cmd = CPC_Cx_CMD_PWRDOWN;
			break;
		default:
			BUG();
			goto out_err;
		}

		/* Issue the CPC command */
		UASM_i_LA(&p, t0, (long)addr_cpc_cl_cmd());
		uasm_i_addiu(&p, t1, zero, cpc_cmd);
		uasm_i_sw(&p, t1, 0, t0);

		if (state == CPS_PM_POWER_GATED) {
			/* If anything goes wrong just hang */
			uasm_build_label(&l, p, lbl_hang);
			uasm_il_b(&p, &r, lbl_hang);
			uasm_i_nop(&p);

			/*
			 * There's no point generating more code, the core is
			 * powered down & if powered back up will run from the
			 * reset vector not from here.
			 */
			goto gen_done;
		}

		/* Completion barrier */
		uasm_i_sync(&p, stype_memory);
		uasm_i_ehb(&p);
	}

	if (state == CPS_PM_NC_WAIT) {
		/*
		 * At this point it is safe for all VPEs to proceed with
		 * execution. This VPE will set the top bit of ready_count
		 * to indicate to the other VPEs that they may continue.
		 */
		if (coupled_coherence)
			cps_gen_set_top_bit(&p, &l, &r, r_nc_count,
					    lbl_set_cont);

		/*
		 * VPEs which did not disable coherence will continue
		 * executing, after coherence has been disabled, from this
		 * point.
		 */
		uasm_build_label(&l, p, lbl_secondary_cont);

		/* Now perform our wait */
		uasm_i_wait(&p, 0);
	}

	/*
	 * Re-enable coherence. Note that for CPS_PM_NC_WAIT all coupled VPEs
	 * will run this. The first will actually re-enable coherence & the
	 * rest will just be performing a rather unusual nop.
	 */
	uasm_i_addiu(&p, t0, zero, GCMP_CCB_COHCTL_DOMAIN_MSK);
	uasm_i_sw(&p, t0, 0, r_pcohctl);
	uasm_i_lw(&p, t0, 0, r_pcohctl);

	/* Completion barrier */
	uasm_i_sync(&p, stype_memory);
	uasm_i_ehb(&p);

	if (coupled_coherence && (state == CPS_PM_NC_WAIT)) {
		/* Decrement ready_count */
		uasm_build_label(&l, p, lbl_decready);
		uasm_i_sync(&p, stype_ordering);
		uasm_i_ll(&p, t1, 0, r_nc_count);
		uasm_i_addiu(&p, t2, t1, -1);
		uasm_i_sc(&p, t2, 0, r_nc_count);
		uasm_il_beqz(&p, &r, t2, lbl_decready);
		uasm_i_andi(&p, v0, t1, (1 << fls(smp_num_siblings)) - 1);

		/* Ordering barrier */
		uasm_i_sync(&p, stype_ordering);
	}

	if (coupled_coherence && (state == CPS_PM_CLOCK_GATED)) {
		/*
		 * At this point it is safe for all VPEs to proceed with
		 * execution. This VPE will set the top bit of ready_count
		 * to indicate to the other VPEs that they may continue.
		 */
		cps_gen_set_top_bit(&p, &l, &r, r_nc_count, lbl_set_cont);

		/*
		 * This core will be reliant upon another core sending a
		 * power-up command to the CPC in order to resume operation.
		 * Thus an arbitrary VPE can't trigger the core leaving the
		 * idle state and the one that disables coherence might as well
		 * be the one to re-enable it. The rest will continue from here
		 * after that has been done.
		 */
		uasm_build_label(&l, p, lbl_secondary_cont);

		/* Ordering barrier */
		uasm_i_sync(&p, stype_ordering);
	}

	/* The core is coherent, time to return to C code */
	uasm_i_jr(&p, ra);
	uasm_i_nop(&p);

gen_done:
	/* Ensure the code didn't exceed the resources allocated for it */
	BUG_ON((p - buf) > max_instrs);
	BUG_ON((l - labels) > ARRAY_SIZE(labels));
	BUG_ON((r - relocs) > ARRAY_SIZE(relocs));

	/* Patch branch offsets */
	uasm_resolve_relocs(relocs, labels);

	/* Flush the icache */
	local_flush_icache_range((unsigned long)buf, (unsigned long)p);

	return buf;
out_err:
	kfree(buf);
	return NULL;
}