Esempio n. 1
0
static void __cpuinit
iPTE_SW(u32 **p, struct uasm_reloc **r, unsigned int pte, unsigned int ptr,
	unsigned int mode)
{
#ifdef CONFIG_64BIT_PHYS_ADDR
	unsigned int hwmode = mode & (_PAGE_VALID | _PAGE_DIRTY);
#endif

	uasm_i_ori(p, pte, pte, mode);
#ifdef CONFIG_SMP
# ifdef CONFIG_64BIT_PHYS_ADDR
	if (cpu_has_64bits)
		uasm_i_scd(p, pte, 0, ptr);
	else
# endif
		UASM_i_SC(p, pte, 0, ptr);

	if (r10000_llsc_war())
		uasm_il_beqzl(p, r, pte, label_smp_pgtable_change);
	else
		uasm_il_beqz(p, r, pte, label_smp_pgtable_change);

# ifdef CONFIG_64BIT_PHYS_ADDR
	if (!cpu_has_64bits) {
		/* no uasm_i_nop needed */
		uasm_i_ll(p, pte, sizeof(pte_t) / 2, ptr);
		uasm_i_ori(p, pte, pte, hwmode);
		uasm_i_sc(p, pte, sizeof(pte_t) / 2, ptr);
		uasm_il_beqz(p, r, pte, label_smp_pgtable_change);
		/* no uasm_i_nop needed */
		uasm_i_lw(p, pte, 0, ptr);
	} else
		uasm_i_nop(p);
# else
	uasm_i_nop(p);
# endif
#else
# ifdef CONFIG_64BIT_PHYS_ADDR
	if (cpu_has_64bits)
		uasm_i_sd(p, pte, 0, ptr);
	else
# endif
		UASM_i_SW(p, pte, 0, ptr);

# ifdef CONFIG_64BIT_PHYS_ADDR
	if (!cpu_has_64bits) {
		uasm_i_lw(p, pte, sizeof(pte_t) / 2, ptr);
		uasm_i_ori(p, pte, pte, hwmode);
		uasm_i_sw(p, pte, sizeof(pte_t) / 2, ptr);
		uasm_i_lw(p, pte, 0, ptr);
	}
# endif
#endif
}
Esempio n. 2
0
/**
 * kvm_mips_build_ret_to_host() - Assemble code to return to the host.
 * @addr:	Address to start writing code.
 *
 * Assemble the code to handle return from the guest exit handler
 * (kvm_mips_handle_exit()) back to the host, i.e. to the caller of the vcpu_run
 * function generated by kvm_mips_build_vcpu_run().
 *
 * Returns:	Next address after end of written function.
 */
static void *kvm_mips_build_ret_to_host(void *addr)
{
	u32 *p = addr;
	unsigned int i;

	/* EBASE is already pointing to Linux */
	UASM_i_LW(&p, K1, offsetof(struct kvm_vcpu_arch, host_stack), K1);
	UASM_i_ADDIU(&p, K1, K1, -(int)sizeof(struct pt_regs));

	/*
	 * r2/v0 is the return code, shift it down by 2 (arithmetic)
	 * to recover the err code
	 */
	uasm_i_sra(&p, K0, V0, 2);
	uasm_i_move(&p, V0, K0);

	/* Load context saved on the host stack */
	for (i = 16; i < 31; ++i) {
		if (i == 24)
			i = 28;
		UASM_i_LW(&p, i, offsetof(struct pt_regs, regs[i]), K1);
	}

	/* Restore RDHWR access */
	UASM_i_LA_mostly(&p, K0, (long)&hwrena);
	uasm_i_lw(&p, K0, uasm_rel_lo((long)&hwrena), K0);
	uasm_i_mtc0(&p, K0, C0_HWRENA);

	/* Restore RA, which is the address we will return to */
	UASM_i_LW(&p, RA, offsetof(struct pt_regs, regs[RA]), K1);
	uasm_i_jr(&p, RA);
	 uasm_i_nop(&p);

	return p;
}
Esempio n. 3
0
static inline void build_copy_store_pref(u32 **buf, int off)
{
	if (off & cache_line_mask())
		return;

	if (pref_bias_copy_store) {
		uasm_i_pref(buf, pref_dst_mode, pref_bias_copy_store + off,
			    A0);
	} else if (cache_line_size == (half_copy_loop_size << 1)) {
		if (cpu_has_cache_cdex_s) {
			uasm_i_cache(buf, Create_Dirty_Excl_SD, off, A0);
		} else if (cpu_has_cache_cdex_p) {
			if (R4600_V1_HIT_CACHEOP_WAR && cpu_is_r4600_v1_x()) {
				uasm_i_nop(buf);
				uasm_i_nop(buf);
				uasm_i_nop(buf);
				uasm_i_nop(buf);
			}

			if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x())
				uasm_i_lw(buf, ZERO, ZERO, AT);

			uasm_i_cache(buf, Create_Dirty_Excl_D, off, A0);
		}
	}
}
Esempio n. 4
0
build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr)
{
	long pgdc = (long)pgd_current;

	/* 32 bit SMP has smp_processor_id() stored in CONTEXT. */
#ifdef CONFIG_SMP
#ifdef  CONFIG_MIPS_MT_SMTC
	/*
	 * SMTC uses TCBind value as "CPU" index
	 */
	uasm_i_mfc0(p, ptr, C0_TCBIND);
	UASM_i_LA_mostly(p, tmp, pgdc);
	uasm_i_srl(p, ptr, ptr, 19);
#else
	/*
	 * smp_processor_id() << 3 is stored in CONTEXT.
         */
	uasm_i_mfc0(p, ptr, C0_CONTEXT);
	UASM_i_LA_mostly(p, tmp, pgdc);
	uasm_i_srl(p, ptr, ptr, 23);
#endif
	uasm_i_addu(p, ptr, tmp, ptr);
#else
	UASM_i_LA_mostly(p, ptr, pgdc);
#endif
	uasm_i_mfc0(p, tmp, C0_BADVADDR); /* get faulting address */
	uasm_i_lw(p, ptr, uasm_rel_lo(pgdc), ptr);
	uasm_i_srl(p, tmp, tmp, PGDIR_SHIFT); /* get pgd only bits */
	uasm_i_sll(p, tmp, tmp, PGD_T_LOG2);
	uasm_i_addu(p, ptr, ptr, tmp); /* add in pgd offset */
}
Esempio n. 5
0
static void build_copy_load(u32 **buf, int reg, int off)
{
	if (cpu_has_64bit_gp_regs) {
		uasm_i_ld(buf, reg, off, A1);
	} else {
		uasm_i_lw(buf, reg, off, A1);
	}
}
Esempio n. 6
0
static void __cpuinit
build_r3000_tlbchange_handler_head(u32 **p, unsigned int pte,
				   unsigned int ptr)
{
	long pgdc = (long)pgd_current;

	uasm_i_mfc0(p, pte, C0_BADVADDR);
	uasm_i_lui(p, ptr, uasm_rel_hi(pgdc)); /* cp0 delay */
	uasm_i_lw(p, ptr, uasm_rel_lo(pgdc), ptr);
	uasm_i_srl(p, pte, pte, 22); /* load delay */
	uasm_i_sll(p, pte, pte, 2);
	uasm_i_addu(p, ptr, ptr, pte);
	uasm_i_mfc0(p, pte, C0_CONTEXT);
	uasm_i_lw(p, ptr, 0, ptr); /* cp0 delay */
	uasm_i_andi(p, pte, pte, 0xffc); /* load delay */
	uasm_i_addu(p, ptr, ptr, pte);
	uasm_i_lw(p, pte, 0, ptr);
	uasm_i_tlbp(p); /* load delay */
}
Esempio n. 7
0
static __init void build_update_entries(u32 **p, unsigned int tmp,
					unsigned int ptep)
{
	/*
	 * 64bit address support (36bit on a 32bit CPU) in a 32bit
	 * Kernel is a special case. Only a few CPUs use it.
	 */
#ifdef CONFIG_64BIT_PHYS_ADDR
	if (cpu_has_64bits) {
		uasm_i_ld(p, tmp, 0, ptep); /* get even pte */
		uasm_i_ld(p, ptep, sizeof(pte_t), ptep); /* get odd pte */
		uasm_i_dsrl(p, tmp, tmp, 6); /* convert to entrylo0 */
		uasm_i_mtc0(p, tmp, C0_ENTRYLO0); /* load it */
		uasm_i_dsrl(p, ptep, ptep, 6); /* convert to entrylo1 */
		uasm_i_mtc0(p, ptep, C0_ENTRYLO1); /* load it */
	} else {
		int pte_off_even = sizeof(pte_t) / 2;
		int pte_off_odd = pte_off_even + sizeof(pte_t);

		/* The pte entries are pre-shifted */
		uasm_i_lw(p, tmp, pte_off_even, ptep); /* get even pte */
		uasm_i_mtc0(p, tmp, C0_ENTRYLO0); /* load it */
		uasm_i_lw(p, ptep, pte_off_odd, ptep); /* get odd pte */
		uasm_i_mtc0(p, ptep, C0_ENTRYLO1); /* load it */
	}
#else
	UASM_i_LW(p, tmp, 0, ptep); /* get even pte */
	UASM_i_LW(p, ptep, sizeof(pte_t), ptep); /* get odd pte */
	if (r45k_bvahwbug())
		build_tlb_probe_entry(p);
	UASM_i_SRL(p, tmp, tmp, 6); /* convert to entrylo0 */
	if (r4k_250MHZhwbug())
		uasm_i_mtc0(p, 0, C0_ENTRYLO0);
	uasm_i_mtc0(p, tmp, C0_ENTRYLO0); /* load it */
	UASM_i_SRL(p, ptep, ptep, 6); /* convert to entrylo1 */
	if (r45k_bvahwbug())
		uasm_i_mfc0(p, tmp, C0_INDEX);
	if (r4k_250MHZhwbug())
		uasm_i_mtc0(p, 0, C0_ENTRYLO1);
	uasm_i_mtc0(p, ptep, C0_ENTRYLO1); /* load it */
#endif
}
Esempio n. 8
0
/*
 * The R3000 TLB handler is simple.
 */
static void __init build_r3000_tlb_refill_handler(void)
{
	long pgdc = (long)pgd_current;
	u32 *p;
	int i;

	memset(tlb_handler, 0, sizeof(tlb_handler));
	p = tlb_handler;

	uasm_i_mfc0(&p, K0, C0_BADVADDR);
	uasm_i_lui(&p, K1, uasm_rel_hi(pgdc)); /* cp0 delay */
	uasm_i_lw(&p, K1, uasm_rel_lo(pgdc), K1);
	uasm_i_srl(&p, K0, K0, 22); /* load delay */
	uasm_i_sll(&p, K0, K0, 2);
	uasm_i_addu(&p, K1, K1, K0);
	uasm_i_mfc0(&p, K0, C0_CONTEXT);
	uasm_i_lw(&p, K1, 0, K1); /* cp0 delay */
	uasm_i_andi(&p, K0, K0, 0xffc); /* load delay */
	uasm_i_addu(&p, K1, K1, K0);
	uasm_i_lw(&p, K0, 0, K1);
	uasm_i_nop(&p); /* load delay */
	uasm_i_mtc0(&p, K0, C0_ENTRYLO0);
	uasm_i_mfc0(&p, K1, C0_EPC); /* cp0 delay */
	uasm_i_tlbwr(&p); /* cp0 delay */
	uasm_i_jr(&p, K1);
	uasm_i_rfe(&p); /* branch delay */

	if (p > tlb_handler + 32)
		panic("TLB refill handler space exceeded");

	pr_debug("Wrote TLB refill handler (%u instructions).\n",
		 (unsigned int)(p - tlb_handler));

	pr_debug("\t.set push\n");
	pr_debug("\t.set noreorder\n");
	for (i = 0; i < (p - tlb_handler); i++)
		pr_debug("\t.word 0x%08x\n", tlb_handler[i]);
	pr_debug("\t.set pop\n");

	memcpy((void *)ebase, tlb_handler, 0x80);
}
Esempio n. 9
0
/**
 * kvm_mips_build_exit() - Assemble common guest exit handler.
 * @addr:	Address to start writing code.
 *
 * Assemble the generic guest exit handling code. This is called by the
 * exception vectors (generated by kvm_mips_build_exception()), and calls
 * kvm_mips_handle_exit(), then either resumes the guest or returns to the host
 * depending on the return value.
 *
 * Returns:	Next address after end of written function.
 */
void *kvm_mips_build_exit(void *addr)
{
	u32 *p = addr;
	unsigned int i;
	struct uasm_label labels[3];
	struct uasm_reloc relocs[3];
	struct uasm_label *l = labels;
	struct uasm_reloc *r = relocs;

	memset(labels, 0, sizeof(labels));
	memset(relocs, 0, sizeof(relocs));

	/*
	 * Generic Guest exception handler. We end up here when the guest
	 * does something that causes a trap to kernel mode.
	 *
	 * Both k0/k1 registers will have already been saved (k0 into the vcpu
	 * structure, and k1 into the scratch_tmp register).
	 *
	 * The k1 register will already contain the kvm_vcpu_arch pointer.
	 */

	/* Start saving Guest context to VCPU */
	for (i = 0; i < 32; ++i) {
		/* Guest k0/k1 saved later */
		if (i == K0 || i == K1)
			continue;
		UASM_i_SW(&p, i, offsetof(struct kvm_vcpu_arch, gprs[i]), K1);
	}

#ifndef CONFIG_CPU_MIPSR6
	/* We need to save hi/lo and restore them on the way out */
	uasm_i_mfhi(&p, T0);
	UASM_i_SW(&p, T0, offsetof(struct kvm_vcpu_arch, hi), K1);

	uasm_i_mflo(&p, T0);
	UASM_i_SW(&p, T0, offsetof(struct kvm_vcpu_arch, lo), K1);
#endif

	/* Finally save guest k1 to VCPU */
	uasm_i_ehb(&p);
	UASM_i_MFC0(&p, T0, scratch_tmp[0], scratch_tmp[1]);
	UASM_i_SW(&p, T0, offsetof(struct kvm_vcpu_arch, gprs[K1]), K1);

	/* Now that context has been saved, we can use other registers */

	/* Restore vcpu */
	UASM_i_MFC0(&p, A1, scratch_vcpu[0], scratch_vcpu[1]);
	uasm_i_move(&p, S1, A1);

	/* Restore run (vcpu->run) */
	UASM_i_LW(&p, A0, offsetof(struct kvm_vcpu, run), A1);
	/* Save pointer to run in s0, will be saved by the compiler */
	uasm_i_move(&p, S0, A0);

	/*
	 * Save Host level EPC, BadVaddr and Cause to VCPU, useful to process
	 * the exception
	 */
	UASM_i_MFC0(&p, K0, C0_EPC);
	UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu_arch, pc), K1);

	UASM_i_MFC0(&p, K0, C0_BADVADDR);
	UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu_arch, host_cp0_badvaddr),
		  K1);

	uasm_i_mfc0(&p, K0, C0_CAUSE);
	uasm_i_sw(&p, K0, offsetof(struct kvm_vcpu_arch, host_cp0_cause), K1);

	/* Now restore the host state just enough to run the handlers */

	/* Switch EBASE to the one used by Linux */
	/* load up the host EBASE */
	uasm_i_mfc0(&p, V0, C0_STATUS);

	uasm_i_lui(&p, AT, ST0_BEV >> 16);
	uasm_i_or(&p, K0, V0, AT);

	uasm_i_mtc0(&p, K0, C0_STATUS);
	uasm_i_ehb(&p);

	UASM_i_LA_mostly(&p, K0, (long)&ebase);
	UASM_i_LW(&p, K0, uasm_rel_lo((long)&ebase), K0);
	build_set_exc_base(&p, K0);

	if (raw_cpu_has_fpu) {
		/*
		 * If FPU is enabled, save FCR31 and clear it so that later
		 * ctc1's don't trigger FPE for pending exceptions.
		 */
		uasm_i_lui(&p, AT, ST0_CU1 >> 16);
		uasm_i_and(&p, V1, V0, AT);
		uasm_il_beqz(&p, &r, V1, label_fpu_1);
		 uasm_i_nop(&p);
		uasm_i_cfc1(&p, T0, 31);
		uasm_i_sw(&p, T0, offsetof(struct kvm_vcpu_arch, fpu.fcr31),
			  K1);
		uasm_i_ctc1(&p, ZERO, 31);
		uasm_l_fpu_1(&l, p);
	}

	if (cpu_has_msa) {
		/*
		 * If MSA is enabled, save MSACSR and clear it so that later
		 * instructions don't trigger MSAFPE for pending exceptions.
		 */
		uasm_i_mfc0(&p, T0, C0_CONFIG5);
		uasm_i_ext(&p, T0, T0, 27, 1); /* MIPS_CONF5_MSAEN */
		uasm_il_beqz(&p, &r, T0, label_msa_1);
		 uasm_i_nop(&p);
		uasm_i_cfcmsa(&p, T0, MSA_CSR);
		uasm_i_sw(&p, T0, offsetof(struct kvm_vcpu_arch, fpu.msacsr),
			  K1);
		uasm_i_ctcmsa(&p, MSA_CSR, ZERO);
		uasm_l_msa_1(&l, p);
	}

	/* Now that the new EBASE has been loaded, unset BEV and KSU_USER */
	uasm_i_addiu(&p, AT, ZERO, ~(ST0_EXL | KSU_USER | ST0_IE));
	uasm_i_and(&p, V0, V0, AT);
	uasm_i_lui(&p, AT, ST0_CU0 >> 16);
	uasm_i_or(&p, V0, V0, AT);
	uasm_i_mtc0(&p, V0, C0_STATUS);
	uasm_i_ehb(&p);

	/* Load up host GP */
	UASM_i_LW(&p, GP, offsetof(struct kvm_vcpu_arch, host_gp), K1);

	/* Need a stack before we can jump to "C" */
	UASM_i_LW(&p, SP, offsetof(struct kvm_vcpu_arch, host_stack), K1);

	/* Saved host state */
	UASM_i_ADDIU(&p, SP, SP, -(int)sizeof(struct pt_regs));

	/*
	 * XXXKYMA do we need to load the host ASID, maybe not because the
	 * kernel entries are marked GLOBAL, need to verify
	 */

	/* Restore host scratch registers, as we'll have clobbered them */
	kvm_mips_build_restore_scratch(&p, K0, SP);

	/* Restore RDHWR access */
	UASM_i_LA_mostly(&p, K0, (long)&hwrena);
	uasm_i_lw(&p, K0, uasm_rel_lo((long)&hwrena), K0);
	uasm_i_mtc0(&p, K0, C0_HWRENA);

	/* Jump to handler */
	/*
	 * XXXKYMA: not sure if this is safe, how large is the stack??
	 * Now jump to the kvm_mips_handle_exit() to see if we can deal
	 * with this in the kernel
	 */
	UASM_i_LA(&p, T9, (unsigned long)kvm_mips_handle_exit);
	uasm_i_jalr(&p, RA, T9);
	 UASM_i_ADDIU(&p, SP, SP, -CALLFRAME_SIZ);

	uasm_resolve_relocs(relocs, labels);

	p = kvm_mips_build_ret_from_exit(p);

	return p;
}
Esempio n. 10
0
/**
 * kvm_mips_build_enter_guest() - Assemble code to resume guest execution.
 * @addr:	Address to start writing code.
 *
 * Assemble the code to resume guest execution. This code is common between the
 * initial entry into the guest from the host, and returning from the exit
 * handler back to the guest.
 *
 * Returns:	Next address after end of written function.
 */
static void *kvm_mips_build_enter_guest(void *addr)
{
	u32 *p = addr;
	unsigned int i;
	struct uasm_label labels[2];
	struct uasm_reloc relocs[2];
	struct uasm_label *l = labels;
	struct uasm_reloc *r = relocs;

	memset(labels, 0, sizeof(labels));
	memset(relocs, 0, sizeof(relocs));

	/* Set Guest EPC */
	UASM_i_LW(&p, T0, offsetof(struct kvm_vcpu_arch, pc), K1);
	UASM_i_MTC0(&p, T0, C0_EPC);

	/* Set the ASID for the Guest Kernel */
	UASM_i_LW(&p, T0, offsetof(struct kvm_vcpu_arch, cop0), K1);
	UASM_i_LW(&p, T0, offsetof(struct mips_coproc, reg[MIPS_CP0_STATUS][0]),
		  T0);
	uasm_i_andi(&p, T0, T0, KSU_USER | ST0_ERL | ST0_EXL);
	uasm_i_xori(&p, T0, T0, KSU_USER);
	uasm_il_bnez(&p, &r, T0, label_kernel_asid);
	 UASM_i_ADDIU(&p, T1, K1,
		      offsetof(struct kvm_vcpu_arch, guest_kernel_asid));
	/* else user */
	UASM_i_ADDIU(&p, T1, K1,
		     offsetof(struct kvm_vcpu_arch, guest_user_asid));
	uasm_l_kernel_asid(&l, p);

	/* t1: contains the base of the ASID array, need to get the cpu id  */
	/* smp_processor_id */
	uasm_i_lw(&p, T2, offsetof(struct thread_info, cpu), GP);
	/* x4 */
	uasm_i_sll(&p, T2, T2, 2);
	UASM_i_ADDU(&p, T3, T1, T2);
	uasm_i_lw(&p, K0, 0, T3);
#ifdef CONFIG_MIPS_ASID_BITS_VARIABLE
	/* x sizeof(struct cpuinfo_mips)/4 */
	uasm_i_addiu(&p, T3, ZERO, sizeof(struct cpuinfo_mips)/4);
	uasm_i_mul(&p, T2, T2, T3);

	UASM_i_LA_mostly(&p, AT, (long)&cpu_data[0].asid_mask);
	UASM_i_ADDU(&p, AT, AT, T2);
	UASM_i_LW(&p, T2, uasm_rel_lo((long)&cpu_data[0].asid_mask), AT);
	uasm_i_and(&p, K0, K0, T2);
#else
	uasm_i_andi(&p, K0, K0, MIPS_ENTRYHI_ASID);
#endif
	uasm_i_mtc0(&p, K0, C0_ENTRYHI);
	uasm_i_ehb(&p);

	/* Disable RDHWR access */
	uasm_i_mtc0(&p, ZERO, C0_HWRENA);

	/* load the guest context from VCPU and return */
	for (i = 1; i < 32; ++i) {
		/* Guest k0/k1 loaded later */
		if (i == K0 || i == K1)
			continue;
		UASM_i_LW(&p, i, offsetof(struct kvm_vcpu_arch, gprs[i]), K1);
	}

#ifndef CONFIG_CPU_MIPSR6
	/* Restore hi/lo */
	UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, hi), K1);
	uasm_i_mthi(&p, K0);

	UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, lo), K1);
	uasm_i_mtlo(&p, K0);
#endif

	/* Restore the guest's k0/k1 registers */
	UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, gprs[K0]), K1);
	UASM_i_LW(&p, K1, offsetof(struct kvm_vcpu_arch, gprs[K1]), K1);

	/* Jump to guest */
	uasm_i_eret(&p);

	uasm_resolve_relocs(relocs, labels);

	return p;
}
Esempio n. 11
0
static void * __init cps_gen_entry_code(unsigned cpu, enum cps_pm_state state)
{
	struct uasm_label *l = labels;
	struct uasm_reloc *r = relocs;
	u32 *buf, *p;
	const unsigned r_online = a0;
	const unsigned r_nc_count = a1;
	const unsigned r_pcohctl = t7;
	const unsigned max_instrs = 256;
	unsigned cpc_cmd;
	enum {
		lbl_incready = 1,
		lbl_poll_cont,
		lbl_secondary_hang,
		lbl_disable_coherence,
		lbl_flush_fsb,
		lbl_invicache,
		lbl_flushdcache,
		lbl_hang,
		lbl_set_cont,
		lbl_secondary_cont,
		lbl_decready,
	};

	/* Allocate a buffer to hold the generated code */
	p = buf = kcalloc(max_instrs, sizeof(u32), GFP_KERNEL);
	if (!buf)
		return NULL;

	/* Clear labels & relocs ready for (re)use */
	memset(labels, 0, sizeof(labels));
	memset(relocs, 0, sizeof(relocs));

	if (state == CPS_PM_POWER_GATED) {
		/* Power gating relies upon CPS SMP */
		if (!mips_cps_smp_in_use())
			goto out_err;

		/*
		 * Save CPU state. Note the non-standard calling convention
		 * with the return address placed in v0 to avoid clobbering
		 * the ra register before it is saved.
		 */
		UASM_i_LA(&p, t0, (long)mips_cps_pm_save);
		uasm_i_jalr(&p, v0, t0);
		uasm_i_nop(&p);
	}

	/*
	 * Load addresses of required CM & CPC registers. This is done early
	 * because they're needed in both the enable & disable coherence steps
	 * but in the coupled case the enable step will only run on one VPE.
	 */
	UASM_i_LA(&p, r_pcohctl, (long)_gcmp_base + GCMPCLCBOFS(COHCTL));

	if (coupled_coherence) {
		/* Increment ready_count */
		uasm_i_sync(&p, stype_ordering);
		uasm_build_label(&l, p, lbl_incready);
		uasm_i_ll(&p, t1, 0, r_nc_count);
		uasm_i_addiu(&p, t2, t1, 1);
		uasm_i_sc(&p, t2, 0, r_nc_count);
		uasm_il_beqz(&p, &r, t2, lbl_incready);
		uasm_i_addiu(&p, t1, t1, 1);

		/* Ordering barrier */
		uasm_i_sync(&p, stype_ordering);

		/*
		 * If this is the last VPE to become ready for non-coherence
		 * then it should branch below.
		 */
		uasm_il_beq(&p, &r, t1, r_online, lbl_disable_coherence);
		uasm_i_nop(&p);

		if (state < CPS_PM_POWER_GATED) {
			/*
			 * Otherwise this is not the last VPE to become ready
			 * for non-coherence. It needs to wait until coherence
			 * has been disabled before proceeding, which it will do
			 * by polling for the top bit of ready_count being set.
			 */
			uasm_i_addiu(&p, t1, zero, -1);
			uasm_build_label(&l, p, lbl_poll_cont);
			uasm_i_lw(&p, t0, 0, r_nc_count);
			uasm_il_bltz(&p, &r, t0, lbl_secondary_cont);
			uasm_i_ehb(&p);
			uasm_i_yield(&p, zero, t1);
			uasm_il_b(&p, &r, lbl_poll_cont);
			uasm_i_nop(&p);
		} else {
			/*
			 * The core will lose power & this VPE will not continue
			 * so it can simply halt here.
			 */
			uasm_i_addiu(&p, t0, zero, TCHALT_H);
			uasm_i_mtc0(&p, t0, 2, 4);
			uasm_build_label(&l, p, lbl_secondary_hang);
			uasm_il_b(&p, &r, lbl_secondary_hang);
			uasm_i_nop(&p);
		}
	}

	/*
	 * This is the point of no return - this VPE will now proceed to
	 * disable coherence. At this point we *must* be sure that no other
	 * VPE within the core will interfere with the L1 dcache.
	 */
	uasm_build_label(&l, p, lbl_disable_coherence);

	/* Invalidate the L1 icache */
	cps_gen_cache_routine(&p, &l, &r, &cpu_data[cpu].icache,
			      Index_Invalidate_I, lbl_invicache);

	/* Writeback & invalidate the L1 dcache */
	cps_gen_cache_routine(&p, &l, &r, &cpu_data[cpu].dcache,
			      Index_Writeback_Inv_D, lbl_flushdcache);

	/* Completion barrier */
	uasm_i_sync(&p, stype_memory);
	uasm_i_ehb(&p);

	/*
	 * Disable all but self interventions. The load from COHCTL is defined
	 * by the interAptiv & proAptiv SUMs as ensuring that the operation
	 * resulting from the preceeding store is complete.
	 */
	uasm_i_addiu(&p, t0, zero, 1 << cpu_data[cpu].core);
	uasm_i_sw(&p, t0, 0, r_pcohctl);
	uasm_i_lw(&p, t0, 0, r_pcohctl);

	/* Sync to ensure previous interventions are complete */
	uasm_i_sync(&p, stype_intervention);
	uasm_i_ehb(&p);

	/* Disable coherence */
	uasm_i_sw(&p, zero, 0, r_pcohctl);
	uasm_i_lw(&p, t0, 0, r_pcohctl);

	if (state >= CPS_PM_CLOCK_GATED) {
		/* TODO: determine whether required based on CPC version */
		cps_gen_flush_fsb(&p, &l, &r, &cpu_data[cpu].dcache,
				  lbl_flush_fsb);

		/* Determine the CPC command to issue */
		switch (state) {
		case CPS_PM_CLOCK_GATED:
			cpc_cmd = CPC_Cx_CMD_CLOCKOFF;
			break;
		case CPS_PM_POWER_GATED:
			cpc_cmd = CPC_Cx_CMD_PWRDOWN;
			break;
		default:
			BUG();
			goto out_err;
		}

		/* Issue the CPC command */
		UASM_i_LA(&p, t0, (long)addr_cpc_cl_cmd());
		uasm_i_addiu(&p, t1, zero, cpc_cmd);
		uasm_i_sw(&p, t1, 0, t0);

		if (state == CPS_PM_POWER_GATED) {
			/* If anything goes wrong just hang */
			uasm_build_label(&l, p, lbl_hang);
			uasm_il_b(&p, &r, lbl_hang);
			uasm_i_nop(&p);

			/*
			 * There's no point generating more code, the core is
			 * powered down & if powered back up will run from the
			 * reset vector not from here.
			 */
			goto gen_done;
		}

		/* Completion barrier */
		uasm_i_sync(&p, stype_memory);
		uasm_i_ehb(&p);
	}

	if (state == CPS_PM_NC_WAIT) {
		/*
		 * At this point it is safe for all VPEs to proceed with
		 * execution. This VPE will set the top bit of ready_count
		 * to indicate to the other VPEs that they may continue.
		 */
		if (coupled_coherence)
			cps_gen_set_top_bit(&p, &l, &r, r_nc_count,
					    lbl_set_cont);

		/*
		 * VPEs which did not disable coherence will continue
		 * executing, after coherence has been disabled, from this
		 * point.
		 */
		uasm_build_label(&l, p, lbl_secondary_cont);

		/* Now perform our wait */
		uasm_i_wait(&p, 0);
	}

	/*
	 * Re-enable coherence. Note that for CPS_PM_NC_WAIT all coupled VPEs
	 * will run this. The first will actually re-enable coherence & the
	 * rest will just be performing a rather unusual nop.
	 */
	uasm_i_addiu(&p, t0, zero, GCMP_CCB_COHCTL_DOMAIN_MSK);
	uasm_i_sw(&p, t0, 0, r_pcohctl);
	uasm_i_lw(&p, t0, 0, r_pcohctl);

	/* Completion barrier */
	uasm_i_sync(&p, stype_memory);
	uasm_i_ehb(&p);

	if (coupled_coherence && (state == CPS_PM_NC_WAIT)) {
		/* Decrement ready_count */
		uasm_build_label(&l, p, lbl_decready);
		uasm_i_sync(&p, stype_ordering);
		uasm_i_ll(&p, t1, 0, r_nc_count);
		uasm_i_addiu(&p, t2, t1, -1);
		uasm_i_sc(&p, t2, 0, r_nc_count);
		uasm_il_beqz(&p, &r, t2, lbl_decready);
		uasm_i_andi(&p, v0, t1, (1 << fls(smp_num_siblings)) - 1);

		/* Ordering barrier */
		uasm_i_sync(&p, stype_ordering);
	}

	if (coupled_coherence && (state == CPS_PM_CLOCK_GATED)) {
		/*
		 * At this point it is safe for all VPEs to proceed with
		 * execution. This VPE will set the top bit of ready_count
		 * to indicate to the other VPEs that they may continue.
		 */
		cps_gen_set_top_bit(&p, &l, &r, r_nc_count, lbl_set_cont);

		/*
		 * This core will be reliant upon another core sending a
		 * power-up command to the CPC in order to resume operation.
		 * Thus an arbitrary VPE can't trigger the core leaving the
		 * idle state and the one that disables coherence might as well
		 * be the one to re-enable it. The rest will continue from here
		 * after that has been done.
		 */
		uasm_build_label(&l, p, lbl_secondary_cont);

		/* Ordering barrier */
		uasm_i_sync(&p, stype_ordering);
	}

	/* The core is coherent, time to return to C code */
	uasm_i_jr(&p, ra);
	uasm_i_nop(&p);

gen_done:
	/* Ensure the code didn't exceed the resources allocated for it */
	BUG_ON((p - buf) > max_instrs);
	BUG_ON((l - labels) > ARRAY_SIZE(labels));
	BUG_ON((r - relocs) > ARRAY_SIZE(relocs));

	/* Patch branch offsets */
	uasm_resolve_relocs(relocs, labels);

	/* Flush the icache */
	local_flush_icache_range((unsigned long)buf, (unsigned long)p);

	return buf;
out_err:
	kfree(buf);
	return NULL;
}
Esempio n. 12
0
static void __init cps_gen_flush_fsb(u32 **pp, struct uasm_label **pl,
				     struct uasm_reloc **pr,
				     const struct cache_desc *dcache,
				     int lbl)
{
	unsigned i, fsb_size = 8;
	unsigned num_loads = (fsb_size * 3) / 2;
	unsigned line_stride = 2;

	/*
	 * Ensure that the fill/store buffer (FSB) is not holding the results
	 * of a prefetch, since if it is then the CPC sequencer may become
	 * stuck in the D3 (ClrBus) state whilst entering a low power state.
	 */

	/* TODO: this is interAptiv-specific, generalise it */

	/* Preserve perf counter 1 setup */
	uasm_i_mfc0(pp, t2, 25, 2); /* PerfCtl1 */
	uasm_i_mfc0(pp, t3, 25, 3); /* PerfCnt1 */

	/* Setup perf counter 1 to count FSB full pipeline stalls */
	uasm_i_addiu(pp, t0, zero, 0x66f);
	uasm_i_mtc0(pp, t0, 25, 2); /* PerfCtl1 */
	uasm_i_ehb(pp);
	uasm_i_mtc0(pp, zero, 25, 3); /* PerfCnt1 */
	uasm_i_ehb(pp);

	/* Base address for loads */
	UASM_i_LA(pp, t0, (long)CKSEG0);

	/* Start of clear loop */
	uasm_build_label(pl, *pp, lbl);

	/* Perform some loads to fill the FSB */
	for (i = 0; i < num_loads; i++)
		uasm_i_lw(pp, zero, i * dcache->linesz * line_stride, t0);

	/*
	 * Invalidate the new D-cache entries so that the cache will need
	 * refilling (via the FSB) if the loop is executed again.
	 */
	for (i = 0; i < num_loads; i++) {
		uasm_i_cache(pp, Hit_Invalidate_D,
			     i * dcache->linesz * line_stride, t0);
		uasm_i_cache(pp, Hit_Writeback_Inv_SD,
			     i * dcache->linesz * line_stride, t0);
	}

	/* Completion barrier */
	uasm_i_sync(pp, stype_memory);
	uasm_i_ehb(pp);

	/* Check whether the pipeline stalled due to the FSB being full */
	uasm_i_mfc0(pp, t1, 25, 3); /* PerfCnt1 */

	/* Loop if it didn't */
	uasm_il_beqz(pp, pr, t1, lbl);
	uasm_i_nop(pp);

	/* Restore perf counter 1. The count may well now be wrong... */
	uasm_i_mtc0(pp, t2, 25, 2); /* PerfCtl1 */
	uasm_i_ehb(pp);
	uasm_i_mtc0(pp, t3, 25, 3); /* PerfCnt1 */
	uasm_i_ehb(pp);
}
Esempio n. 13
0
static int __init cps_gen_flush_fsb(u32 **pp, struct uasm_label **pl,
				    struct uasm_reloc **pr,
				    const struct cpuinfo_mips *cpu_info,
				    int lbl)
{
	unsigned i, fsb_size = 8;
	unsigned num_loads = (fsb_size * 3) / 2;
	unsigned line_stride = 2;
	unsigned line_size = cpu_info->dcache.linesz;
	unsigned perf_counter, perf_event;
	unsigned revision = cpu_info->processor_id & PRID_REV_MASK;

	/*
	 * Determine whether this CPU requires an FSB flush, and if so which
	 * performance counter/event reflect stalls due to a full FSB.
	 */
	switch (__get_cpu_type(cpu_info->cputype)) {
	case CPU_INTERAPTIV:
		perf_counter = 1;
		perf_event = 51;
		break;

	case CPU_PROAPTIV:
		/* Newer proAptiv cores don't require this workaround */
		if (revision >= PRID_REV_ENCODE_332(1, 1, 0))
			return 0;

		/* On older ones it's unavailable */
		return -1;

	/* CPUs which do not require the workaround */
	case CPU_P5600:
	case CPU_I6400:
		return 0;

	default:
		WARN_ONCE(1, "pm-cps: FSB flush unsupported for this CPU\n");
		return -1;
	}

	/*
	 * Ensure that the fill/store buffer (FSB) is not holding the results
	 * of a prefetch, since if it is then the CPC sequencer may become
	 * stuck in the D3 (ClrBus) state whilst entering a low power state.
	 */

	/* Preserve perf counter setup */
	uasm_i_mfc0(pp, t2, 25, (perf_counter * 2) + 0); /* PerfCtlN */
	uasm_i_mfc0(pp, t3, 25, (perf_counter * 2) + 1); /* PerfCntN */

	/* Setup perf counter to count FSB full pipeline stalls */
	uasm_i_addiu(pp, t0, zero, (perf_event << 5) | 0xf);
	uasm_i_mtc0(pp, t0, 25, (perf_counter * 2) + 0); /* PerfCtlN */
	uasm_i_ehb(pp);
	uasm_i_mtc0(pp, zero, 25, (perf_counter * 2) + 1); /* PerfCntN */
	uasm_i_ehb(pp);

	/* Base address for loads */
	UASM_i_LA(pp, t0, (long)CKSEG0);

	/* Start of clear loop */
	uasm_build_label(pl, *pp, lbl);

	/* Perform some loads to fill the FSB */
	for (i = 0; i < num_loads; i++)
		uasm_i_lw(pp, zero, i * line_size * line_stride, t0);

	/*
	 * Invalidate the new D-cache entries so that the cache will need
	 * refilling (via the FSB) if the loop is executed again.
	 */
	for (i = 0; i < num_loads; i++) {
		uasm_i_cache(pp, Hit_Invalidate_D,
			     i * line_size * line_stride, t0);
		uasm_i_cache(pp, Hit_Writeback_Inv_SD,
			     i * line_size * line_stride, t0);
	}

	/* Completion barrier */
	uasm_i_sync(pp, stype_memory);
	uasm_i_ehb(pp);

	/* Check whether the pipeline stalled due to the FSB being full */
	uasm_i_mfc0(pp, t1, 25, (perf_counter * 2) + 1); /* PerfCntN */

	/* Loop if it didn't */
	uasm_il_beqz(pp, pr, t1, lbl);
	uasm_i_nop(pp);

	/* Restore perf counter 1. The count may well now be wrong... */
	uasm_i_mtc0(pp, t2, 25, (perf_counter * 2) + 0); /* PerfCtlN */
	uasm_i_ehb(pp);
	uasm_i_mtc0(pp, t3, 25, (perf_counter * 2) + 1); /* PerfCntN */
	uasm_i_ehb(pp);

	return 0;
}