Пример #1
0
/**
 * kvm_mips_build_exit() - Assemble common guest exit handler.
 * @addr:	Address to start writing code.
 *
 * Assemble the generic guest exit handling code. This is called by the
 * exception vectors (generated by kvm_mips_build_exception()), and calls
 * kvm_mips_handle_exit(), then either resumes the guest or returns to the host
 * depending on the return value.
 *
 * Returns:	Next address after end of written function.
 */
void *kvm_mips_build_exit(void *addr)
{
	u32 *p = addr;
	unsigned int i;
	struct uasm_label labels[3];
	struct uasm_reloc relocs[3];
	struct uasm_label *l = labels;
	struct uasm_reloc *r = relocs;

	memset(labels, 0, sizeof(labels));
	memset(relocs, 0, sizeof(relocs));

	/*
	 * Generic Guest exception handler. We end up here when the guest
	 * does something that causes a trap to kernel mode.
	 *
	 * Both k0/k1 registers will have already been saved (k0 into the vcpu
	 * structure, and k1 into the scratch_tmp register).
	 *
	 * The k1 register will already contain the kvm_vcpu_arch pointer.
	 */

	/* Start saving Guest context to VCPU */
	for (i = 0; i < 32; ++i) {
		/* Guest k0/k1 saved later */
		if (i == K0 || i == K1)
			continue;
		UASM_i_SW(&p, i, offsetof(struct kvm_vcpu_arch, gprs[i]), K1);
	}

#ifndef CONFIG_CPU_MIPSR6
	/* We need to save hi/lo and restore them on the way out */
	uasm_i_mfhi(&p, T0);
	UASM_i_SW(&p, T0, offsetof(struct kvm_vcpu_arch, hi), K1);

	uasm_i_mflo(&p, T0);
	UASM_i_SW(&p, T0, offsetof(struct kvm_vcpu_arch, lo), K1);
#endif

	/* Finally save guest k1 to VCPU */
	uasm_i_ehb(&p);
	UASM_i_MFC0(&p, T0, scratch_tmp[0], scratch_tmp[1]);
	UASM_i_SW(&p, T0, offsetof(struct kvm_vcpu_arch, gprs[K1]), K1);

	/* Now that context has been saved, we can use other registers */

	/* Restore vcpu */
	UASM_i_MFC0(&p, A1, scratch_vcpu[0], scratch_vcpu[1]);
	uasm_i_move(&p, S1, A1);

	/* Restore run (vcpu->run) */
	UASM_i_LW(&p, A0, offsetof(struct kvm_vcpu, run), A1);
	/* Save pointer to run in s0, will be saved by the compiler */
	uasm_i_move(&p, S0, A0);

	/*
	 * Save Host level EPC, BadVaddr and Cause to VCPU, useful to process
	 * the exception
	 */
	UASM_i_MFC0(&p, K0, C0_EPC);
	UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu_arch, pc), K1);

	UASM_i_MFC0(&p, K0, C0_BADVADDR);
	UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu_arch, host_cp0_badvaddr),
		  K1);

	uasm_i_mfc0(&p, K0, C0_CAUSE);
	uasm_i_sw(&p, K0, offsetof(struct kvm_vcpu_arch, host_cp0_cause), K1);

	/* Now restore the host state just enough to run the handlers */

	/* Switch EBASE to the one used by Linux */
	/* load up the host EBASE */
	uasm_i_mfc0(&p, V0, C0_STATUS);

	uasm_i_lui(&p, AT, ST0_BEV >> 16);
	uasm_i_or(&p, K0, V0, AT);

	uasm_i_mtc0(&p, K0, C0_STATUS);
	uasm_i_ehb(&p);

	UASM_i_LA_mostly(&p, K0, (long)&ebase);
	UASM_i_LW(&p, K0, uasm_rel_lo((long)&ebase), K0);
	build_set_exc_base(&p, K0);

	if (raw_cpu_has_fpu) {
		/*
		 * If FPU is enabled, save FCR31 and clear it so that later
		 * ctc1's don't trigger FPE for pending exceptions.
		 */
		uasm_i_lui(&p, AT, ST0_CU1 >> 16);
		uasm_i_and(&p, V1, V0, AT);
		uasm_il_beqz(&p, &r, V1, label_fpu_1);
		 uasm_i_nop(&p);
		uasm_i_cfc1(&p, T0, 31);
		uasm_i_sw(&p, T0, offsetof(struct kvm_vcpu_arch, fpu.fcr31),
			  K1);
		uasm_i_ctc1(&p, ZERO, 31);
		uasm_l_fpu_1(&l, p);
	}

	if (cpu_has_msa) {
		/*
		 * If MSA is enabled, save MSACSR and clear it so that later
		 * instructions don't trigger MSAFPE for pending exceptions.
		 */
		uasm_i_mfc0(&p, T0, C0_CONFIG5);
		uasm_i_ext(&p, T0, T0, 27, 1); /* MIPS_CONF5_MSAEN */
		uasm_il_beqz(&p, &r, T0, label_msa_1);
		 uasm_i_nop(&p);
		uasm_i_cfcmsa(&p, T0, MSA_CSR);
		uasm_i_sw(&p, T0, offsetof(struct kvm_vcpu_arch, fpu.msacsr),
			  K1);
		uasm_i_ctcmsa(&p, MSA_CSR, ZERO);
		uasm_l_msa_1(&l, p);
	}

	/* Now that the new EBASE has been loaded, unset BEV and KSU_USER */
	uasm_i_addiu(&p, AT, ZERO, ~(ST0_EXL | KSU_USER | ST0_IE));
	uasm_i_and(&p, V0, V0, AT);
	uasm_i_lui(&p, AT, ST0_CU0 >> 16);
	uasm_i_or(&p, V0, V0, AT);
	uasm_i_mtc0(&p, V0, C0_STATUS);
	uasm_i_ehb(&p);

	/* Load up host GP */
	UASM_i_LW(&p, GP, offsetof(struct kvm_vcpu_arch, host_gp), K1);

	/* Need a stack before we can jump to "C" */
	UASM_i_LW(&p, SP, offsetof(struct kvm_vcpu_arch, host_stack), K1);

	/* Saved host state */
	UASM_i_ADDIU(&p, SP, SP, -(int)sizeof(struct pt_regs));

	/*
	 * XXXKYMA do we need to load the host ASID, maybe not because the
	 * kernel entries are marked GLOBAL, need to verify
	 */

	/* Restore host scratch registers, as we'll have clobbered them */
	kvm_mips_build_restore_scratch(&p, K0, SP);

	/* Restore RDHWR access */
	UASM_i_LA_mostly(&p, K0, (long)&hwrena);
	uasm_i_lw(&p, K0, uasm_rel_lo((long)&hwrena), K0);
	uasm_i_mtc0(&p, K0, C0_HWRENA);

	/* Jump to handler */
	/*
	 * XXXKYMA: not sure if this is safe, how large is the stack??
	 * Now jump to the kvm_mips_handle_exit() to see if we can deal
	 * with this in the kernel
	 */
	UASM_i_LA(&p, T9, (unsigned long)kvm_mips_handle_exit);
	uasm_i_jalr(&p, RA, T9);
	 UASM_i_ADDIU(&p, SP, SP, -CALLFRAME_SIZ);

	uasm_resolve_relocs(relocs, labels);

	p = kvm_mips_build_ret_from_exit(p);

	return p;
}
Пример #2
0
static void * __init cps_gen_entry_code(unsigned cpu, enum cps_pm_state state)
{
	struct uasm_label *l = labels;
	struct uasm_reloc *r = relocs;
	u32 *buf, *p;
	const unsigned r_online = a0;
	const unsigned r_nc_count = a1;
	const unsigned r_pcohctl = t7;
	const unsigned max_instrs = 256;
	unsigned cpc_cmd;
	enum {
		lbl_incready = 1,
		lbl_poll_cont,
		lbl_secondary_hang,
		lbl_disable_coherence,
		lbl_flush_fsb,
		lbl_invicache,
		lbl_flushdcache,
		lbl_hang,
		lbl_set_cont,
		lbl_secondary_cont,
		lbl_decready,
	};

	/* Allocate a buffer to hold the generated code */
	p = buf = kcalloc(max_instrs, sizeof(u32), GFP_KERNEL);
	if (!buf)
		return NULL;

	/* Clear labels & relocs ready for (re)use */
	memset(labels, 0, sizeof(labels));
	memset(relocs, 0, sizeof(relocs));

	if (state == CPS_PM_POWER_GATED) {
		/* Power gating relies upon CPS SMP */
		if (!mips_cps_smp_in_use())
			goto out_err;

		/*
		 * Save CPU state. Note the non-standard calling convention
		 * with the return address placed in v0 to avoid clobbering
		 * the ra register before it is saved.
		 */
		UASM_i_LA(&p, t0, (long)mips_cps_pm_save);
		uasm_i_jalr(&p, v0, t0);
		uasm_i_nop(&p);
	}

	/*
	 * Load addresses of required CM & CPC registers. This is done early
	 * because they're needed in both the enable & disable coherence steps
	 * but in the coupled case the enable step will only run on one VPE.
	 */
	UASM_i_LA(&p, r_pcohctl, (long)_gcmp_base + GCMPCLCBOFS(COHCTL));

	if (coupled_coherence) {
		/* Increment ready_count */
		uasm_i_sync(&p, stype_ordering);
		uasm_build_label(&l, p, lbl_incready);
		uasm_i_ll(&p, t1, 0, r_nc_count);
		uasm_i_addiu(&p, t2, t1, 1);
		uasm_i_sc(&p, t2, 0, r_nc_count);
		uasm_il_beqz(&p, &r, t2, lbl_incready);
		uasm_i_addiu(&p, t1, t1, 1);

		/* Ordering barrier */
		uasm_i_sync(&p, stype_ordering);

		/*
		 * If this is the last VPE to become ready for non-coherence
		 * then it should branch below.
		 */
		uasm_il_beq(&p, &r, t1, r_online, lbl_disable_coherence);
		uasm_i_nop(&p);

		if (state < CPS_PM_POWER_GATED) {
			/*
			 * Otherwise this is not the last VPE to become ready
			 * for non-coherence. It needs to wait until coherence
			 * has been disabled before proceeding, which it will do
			 * by polling for the top bit of ready_count being set.
			 */
			uasm_i_addiu(&p, t1, zero, -1);
			uasm_build_label(&l, p, lbl_poll_cont);
			uasm_i_lw(&p, t0, 0, r_nc_count);
			uasm_il_bltz(&p, &r, t0, lbl_secondary_cont);
			uasm_i_ehb(&p);
			uasm_i_yield(&p, zero, t1);
			uasm_il_b(&p, &r, lbl_poll_cont);
			uasm_i_nop(&p);
		} else {
			/*
			 * The core will lose power & this VPE will not continue
			 * so it can simply halt here.
			 */
			uasm_i_addiu(&p, t0, zero, TCHALT_H);
			uasm_i_mtc0(&p, t0, 2, 4);
			uasm_build_label(&l, p, lbl_secondary_hang);
			uasm_il_b(&p, &r, lbl_secondary_hang);
			uasm_i_nop(&p);
		}
	}

	/*
	 * This is the point of no return - this VPE will now proceed to
	 * disable coherence. At this point we *must* be sure that no other
	 * VPE within the core will interfere with the L1 dcache.
	 */
	uasm_build_label(&l, p, lbl_disable_coherence);

	/* Invalidate the L1 icache */
	cps_gen_cache_routine(&p, &l, &r, &cpu_data[cpu].icache,
			      Index_Invalidate_I, lbl_invicache);

	/* Writeback & invalidate the L1 dcache */
	cps_gen_cache_routine(&p, &l, &r, &cpu_data[cpu].dcache,
			      Index_Writeback_Inv_D, lbl_flushdcache);

	/* Completion barrier */
	uasm_i_sync(&p, stype_memory);
	uasm_i_ehb(&p);

	/*
	 * Disable all but self interventions. The load from COHCTL is defined
	 * by the interAptiv & proAptiv SUMs as ensuring that the operation
	 * resulting from the preceeding store is complete.
	 */
	uasm_i_addiu(&p, t0, zero, 1 << cpu_data[cpu].core);
	uasm_i_sw(&p, t0, 0, r_pcohctl);
	uasm_i_lw(&p, t0, 0, r_pcohctl);

	/* Sync to ensure previous interventions are complete */
	uasm_i_sync(&p, stype_intervention);
	uasm_i_ehb(&p);

	/* Disable coherence */
	uasm_i_sw(&p, zero, 0, r_pcohctl);
	uasm_i_lw(&p, t0, 0, r_pcohctl);

	if (state >= CPS_PM_CLOCK_GATED) {
		/* TODO: determine whether required based on CPC version */
		cps_gen_flush_fsb(&p, &l, &r, &cpu_data[cpu].dcache,
				  lbl_flush_fsb);

		/* Determine the CPC command to issue */
		switch (state) {
		case CPS_PM_CLOCK_GATED:
			cpc_cmd = CPC_Cx_CMD_CLOCKOFF;
			break;
		case CPS_PM_POWER_GATED:
			cpc_cmd = CPC_Cx_CMD_PWRDOWN;
			break;
		default:
			BUG();
			goto out_err;
		}

		/* Issue the CPC command */
		UASM_i_LA(&p, t0, (long)addr_cpc_cl_cmd());
		uasm_i_addiu(&p, t1, zero, cpc_cmd);
		uasm_i_sw(&p, t1, 0, t0);

		if (state == CPS_PM_POWER_GATED) {
			/* If anything goes wrong just hang */
			uasm_build_label(&l, p, lbl_hang);
			uasm_il_b(&p, &r, lbl_hang);
			uasm_i_nop(&p);

			/*
			 * There's no point generating more code, the core is
			 * powered down & if powered back up will run from the
			 * reset vector not from here.
			 */
			goto gen_done;
		}

		/* Completion barrier */
		uasm_i_sync(&p, stype_memory);
		uasm_i_ehb(&p);
	}

	if (state == CPS_PM_NC_WAIT) {
		/*
		 * At this point it is safe for all VPEs to proceed with
		 * execution. This VPE will set the top bit of ready_count
		 * to indicate to the other VPEs that they may continue.
		 */
		if (coupled_coherence)
			cps_gen_set_top_bit(&p, &l, &r, r_nc_count,
					    lbl_set_cont);

		/*
		 * VPEs which did not disable coherence will continue
		 * executing, after coherence has been disabled, from this
		 * point.
		 */
		uasm_build_label(&l, p, lbl_secondary_cont);

		/* Now perform our wait */
		uasm_i_wait(&p, 0);
	}

	/*
	 * Re-enable coherence. Note that for CPS_PM_NC_WAIT all coupled VPEs
	 * will run this. The first will actually re-enable coherence & the
	 * rest will just be performing a rather unusual nop.
	 */
	uasm_i_addiu(&p, t0, zero, GCMP_CCB_COHCTL_DOMAIN_MSK);
	uasm_i_sw(&p, t0, 0, r_pcohctl);
	uasm_i_lw(&p, t0, 0, r_pcohctl);

	/* Completion barrier */
	uasm_i_sync(&p, stype_memory);
	uasm_i_ehb(&p);

	if (coupled_coherence && (state == CPS_PM_NC_WAIT)) {
		/* Decrement ready_count */
		uasm_build_label(&l, p, lbl_decready);
		uasm_i_sync(&p, stype_ordering);
		uasm_i_ll(&p, t1, 0, r_nc_count);
		uasm_i_addiu(&p, t2, t1, -1);
		uasm_i_sc(&p, t2, 0, r_nc_count);
		uasm_il_beqz(&p, &r, t2, lbl_decready);
		uasm_i_andi(&p, v0, t1, (1 << fls(smp_num_siblings)) - 1);

		/* Ordering barrier */
		uasm_i_sync(&p, stype_ordering);
	}

	if (coupled_coherence && (state == CPS_PM_CLOCK_GATED)) {
		/*
		 * At this point it is safe for all VPEs to proceed with
		 * execution. This VPE will set the top bit of ready_count
		 * to indicate to the other VPEs that they may continue.
		 */
		cps_gen_set_top_bit(&p, &l, &r, r_nc_count, lbl_set_cont);

		/*
		 * This core will be reliant upon another core sending a
		 * power-up command to the CPC in order to resume operation.
		 * Thus an arbitrary VPE can't trigger the core leaving the
		 * idle state and the one that disables coherence might as well
		 * be the one to re-enable it. The rest will continue from here
		 * after that has been done.
		 */
		uasm_build_label(&l, p, lbl_secondary_cont);

		/* Ordering barrier */
		uasm_i_sync(&p, stype_ordering);
	}

	/* The core is coherent, time to return to C code */
	uasm_i_jr(&p, ra);
	uasm_i_nop(&p);

gen_done:
	/* Ensure the code didn't exceed the resources allocated for it */
	BUG_ON((p - buf) > max_instrs);
	BUG_ON((l - labels) > ARRAY_SIZE(labels));
	BUG_ON((r - relocs) > ARRAY_SIZE(relocs));

	/* Patch branch offsets */
	uasm_resolve_relocs(relocs, labels);

	/* Flush the icache */
	local_flush_icache_range((unsigned long)buf, (unsigned long)p);

	return buf;
out_err:
	kfree(buf);
	return NULL;
}