/** * kvm_mips_build_exception() - Assemble first level guest exception handler. * @addr: Address to start writing code. * @handler: Address of common handler (within range of @addr). * * Assemble exception vector code for guest execution. The generated vector will * branch to the common exception handler generated by kvm_mips_build_exit(). * * Returns: Next address after end of written function. */ void *kvm_mips_build_exception(void *addr, void *handler) { u32 *p = addr; struct uasm_label labels[2]; struct uasm_reloc relocs[2]; struct uasm_label *l = labels; struct uasm_reloc *r = relocs; memset(labels, 0, sizeof(labels)); memset(relocs, 0, sizeof(relocs)); /* Save guest k1 into scratch register */ UASM_i_MTC0(&p, K1, scratch_tmp[0], scratch_tmp[1]); /* Get the VCPU pointer from the VCPU scratch register */ UASM_i_MFC0(&p, K1, scratch_vcpu[0], scratch_vcpu[1]); UASM_i_ADDIU(&p, K1, K1, offsetof(struct kvm_vcpu, arch)); /* Save guest k0 into VCPU structure */ UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu_arch, gprs[K0]), K1); /* Branch to the common handler */ uasm_il_b(&p, &r, label_exit_common); uasm_i_nop(&p); uasm_l_exit_common(&l, handler); uasm_resolve_relocs(relocs, labels); return p; }
static void __cpuinit build_r4000_tlb_modify_handler(void) { u32 *p = handle_tlbm; struct uasm_label *l = labels; struct uasm_reloc *r = relocs; memset(handle_tlbm, 0, sizeof(handle_tlbm)); memset(labels, 0, sizeof(labels)); memset(relocs, 0, sizeof(relocs)); build_r4000_tlbchange_handler_head(&p, &l, &r, K0, K1); build_pte_modifiable(&p, &l, &r, K0, K1, label_nopage_tlbm); if (m4kc_tlbp_war()) build_tlb_probe_entry(&p); /* Present and writable bits set, set accessed and dirty bits. */ build_make_write(&p, &r, K0, K1); build_r4000_tlbchange_handler_tail(&p, &l, &r, K0, K1); uasm_l_nopage_tlbm(&l, p); uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff); uasm_i_nop(&p); if ((p - handle_tlbm) > FASTPATH_SIZE) panic("TLB modify handler fastpath space exceeded"); uasm_resolve_relocs(relocs, labels); pr_debug("Wrote TLB modify handler fastpath (%u instructions).\n", (unsigned int)(p - handle_tlbm)); dump_handler(handle_tlbm, ARRAY_SIZE(handle_tlbm)); }
static void __cpuinit build_r3000_tlb_modify_handler(void) { u32 *p = handle_tlbm; struct uasm_label *l = labels; struct uasm_reloc *r = relocs; memset(handle_tlbm, 0, sizeof(handle_tlbm)); memset(labels, 0, sizeof(labels)); memset(relocs, 0, sizeof(relocs)); build_r3000_tlbchange_handler_head(&p, K0, K1); build_pte_modifiable(&p, &l, &r, K0, K1, label_nopage_tlbm); uasm_i_nop(&p); /* load delay */ build_make_write(&p, &r, K0, K1); build_r3000_pte_reload_tlbwi(&p, K0, K1); uasm_l_nopage_tlbm(&l, p); uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff); uasm_i_nop(&p); if ((p - handle_tlbm) > FASTPATH_SIZE) panic("TLB modify handler fastpath space exceeded"); uasm_resolve_relocs(relocs, labels); pr_debug("Wrote TLB modify handler fastpath (%u instructions).\n", (unsigned int)(p - handle_tlbm)); dump_handler(handle_tlbm, ARRAY_SIZE(handle_tlbm)); }
/** * kvm_mips_build_ret_from_exit() - Assemble guest exit return handler. * @addr: Address to start writing code. * * Assemble the code to handle the return from kvm_mips_handle_exit(), either * resuming the guest or returning to the host depending on the return value. * * Returns: Next address after end of written function. */ static void *kvm_mips_build_ret_from_exit(void *addr) { u32 *p = addr; struct uasm_label labels[2]; struct uasm_reloc relocs[2]; struct uasm_label *l = labels; struct uasm_reloc *r = relocs; memset(labels, 0, sizeof(labels)); memset(relocs, 0, sizeof(relocs)); /* Return from handler Make sure interrupts are disabled */ uasm_i_di(&p, ZERO); uasm_i_ehb(&p); /* * XXXKYMA: k0/k1 could have been blown away if we processed * an exception while we were handling the exception from the * guest, reload k1 */ uasm_i_move(&p, K1, S1); UASM_i_ADDIU(&p, K1, K1, offsetof(struct kvm_vcpu, arch)); /* * Check return value, should tell us if we are returning to the * host (handle I/O etc)or resuming the guest */ uasm_i_andi(&p, T0, V0, RESUME_HOST); uasm_il_bnez(&p, &r, T0, label_return_to_host); uasm_i_nop(&p); p = kvm_mips_build_ret_to_guest(p); uasm_l_return_to_host(&l, p); p = kvm_mips_build_ret_to_host(p); uasm_resolve_relocs(relocs, labels); return p; }
static void __cpuinit build_r4000_tlb_load_handler(void) { u32 *p = handle_tlbl; struct uasm_label *l = labels; struct uasm_reloc *r = relocs; memset(handle_tlbl, 0, sizeof(handle_tlbl)); memset(labels, 0, sizeof(labels)); memset(relocs, 0, sizeof(relocs)); if (bcm1250_m3_war()) { UASM_i_MFC0(&p, K0, C0_BADVADDR); UASM_i_MFC0(&p, K1, C0_ENTRYHI); uasm_i_xor(&p, K0, K0, K1); UASM_i_SRL(&p, K0, K0, PAGE_SHIFT + 1); uasm_il_bnez(&p, &r, K0, label_leave); /* No need for uasm_i_nop */ } build_r4000_tlbchange_handler_head(&p, &l, &r, K0, K1); build_pte_present(&p, &l, &r, K0, K1, label_nopage_tlbl); if (m4kc_tlbp_war()) build_tlb_probe_entry(&p); build_make_valid(&p, &r, K0, K1); build_r4000_tlbchange_handler_tail(&p, &l, &r, K0, K1); uasm_l_nopage_tlbl(&l, p); uasm_i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff); uasm_i_nop(&p); if ((p - handle_tlbl) > FASTPATH_SIZE) panic("TLB load handler fastpath space exceeded"); uasm_resolve_relocs(relocs, labels); pr_debug("Wrote TLB load handler fastpath (%u instructions).\n", (unsigned int)(p - handle_tlbl)); dump_handler(handle_tlbl, ARRAY_SIZE(handle_tlbl)); }
/** * kvm_mips_build_exit() - Assemble common guest exit handler. * @addr: Address to start writing code. * * Assemble the generic guest exit handling code. This is called by the * exception vectors (generated by kvm_mips_build_exception()), and calls * kvm_mips_handle_exit(), then either resumes the guest or returns to the host * depending on the return value. * * Returns: Next address after end of written function. */ void *kvm_mips_build_exit(void *addr) { u32 *p = addr; unsigned int i; struct uasm_label labels[3]; struct uasm_reloc relocs[3]; struct uasm_label *l = labels; struct uasm_reloc *r = relocs; memset(labels, 0, sizeof(labels)); memset(relocs, 0, sizeof(relocs)); /* * Generic Guest exception handler. We end up here when the guest * does something that causes a trap to kernel mode. * * Both k0/k1 registers will have already been saved (k0 into the vcpu * structure, and k1 into the scratch_tmp register). * * The k1 register will already contain the kvm_vcpu_arch pointer. */ /* Start saving Guest context to VCPU */ for (i = 0; i < 32; ++i) { /* Guest k0/k1 saved later */ if (i == K0 || i == K1) continue; UASM_i_SW(&p, i, offsetof(struct kvm_vcpu_arch, gprs[i]), K1); } #ifndef CONFIG_CPU_MIPSR6 /* We need to save hi/lo and restore them on the way out */ uasm_i_mfhi(&p, T0); UASM_i_SW(&p, T0, offsetof(struct kvm_vcpu_arch, hi), K1); uasm_i_mflo(&p, T0); UASM_i_SW(&p, T0, offsetof(struct kvm_vcpu_arch, lo), K1); #endif /* Finally save guest k1 to VCPU */ uasm_i_ehb(&p); UASM_i_MFC0(&p, T0, scratch_tmp[0], scratch_tmp[1]); UASM_i_SW(&p, T0, offsetof(struct kvm_vcpu_arch, gprs[K1]), K1); /* Now that context has been saved, we can use other registers */ /* Restore vcpu */ UASM_i_MFC0(&p, A1, scratch_vcpu[0], scratch_vcpu[1]); uasm_i_move(&p, S1, A1); /* Restore run (vcpu->run) */ UASM_i_LW(&p, A0, offsetof(struct kvm_vcpu, run), A1); /* Save pointer to run in s0, will be saved by the compiler */ uasm_i_move(&p, S0, A0); /* * Save Host level EPC, BadVaddr and Cause to VCPU, useful to process * the exception */ UASM_i_MFC0(&p, K0, C0_EPC); UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu_arch, pc), K1); UASM_i_MFC0(&p, K0, C0_BADVADDR); UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu_arch, host_cp0_badvaddr), K1); uasm_i_mfc0(&p, K0, C0_CAUSE); uasm_i_sw(&p, K0, offsetof(struct kvm_vcpu_arch, host_cp0_cause), K1); /* Now restore the host state just enough to run the handlers */ /* Switch EBASE to the one used by Linux */ /* load up the host EBASE */ uasm_i_mfc0(&p, V0, C0_STATUS); uasm_i_lui(&p, AT, ST0_BEV >> 16); uasm_i_or(&p, K0, V0, AT); uasm_i_mtc0(&p, K0, C0_STATUS); uasm_i_ehb(&p); UASM_i_LA_mostly(&p, K0, (long)&ebase); UASM_i_LW(&p, K0, uasm_rel_lo((long)&ebase), K0); build_set_exc_base(&p, K0); if (raw_cpu_has_fpu) { /* * If FPU is enabled, save FCR31 and clear it so that later * ctc1's don't trigger FPE for pending exceptions. */ uasm_i_lui(&p, AT, ST0_CU1 >> 16); uasm_i_and(&p, V1, V0, AT); uasm_il_beqz(&p, &r, V1, label_fpu_1); uasm_i_nop(&p); uasm_i_cfc1(&p, T0, 31); uasm_i_sw(&p, T0, offsetof(struct kvm_vcpu_arch, fpu.fcr31), K1); uasm_i_ctc1(&p, ZERO, 31); uasm_l_fpu_1(&l, p); } if (cpu_has_msa) { /* * If MSA is enabled, save MSACSR and clear it so that later * instructions don't trigger MSAFPE for pending exceptions. */ uasm_i_mfc0(&p, T0, C0_CONFIG5); uasm_i_ext(&p, T0, T0, 27, 1); /* MIPS_CONF5_MSAEN */ uasm_il_beqz(&p, &r, T0, label_msa_1); uasm_i_nop(&p); uasm_i_cfcmsa(&p, T0, MSA_CSR); uasm_i_sw(&p, T0, offsetof(struct kvm_vcpu_arch, fpu.msacsr), K1); uasm_i_ctcmsa(&p, MSA_CSR, ZERO); uasm_l_msa_1(&l, p); } /* Now that the new EBASE has been loaded, unset BEV and KSU_USER */ uasm_i_addiu(&p, AT, ZERO, ~(ST0_EXL | KSU_USER | ST0_IE)); uasm_i_and(&p, V0, V0, AT); uasm_i_lui(&p, AT, ST0_CU0 >> 16); uasm_i_or(&p, V0, V0, AT); uasm_i_mtc0(&p, V0, C0_STATUS); uasm_i_ehb(&p); /* Load up host GP */ UASM_i_LW(&p, GP, offsetof(struct kvm_vcpu_arch, host_gp), K1); /* Need a stack before we can jump to "C" */ UASM_i_LW(&p, SP, offsetof(struct kvm_vcpu_arch, host_stack), K1); /* Saved host state */ UASM_i_ADDIU(&p, SP, SP, -(int)sizeof(struct pt_regs)); /* * XXXKYMA do we need to load the host ASID, maybe not because the * kernel entries are marked GLOBAL, need to verify */ /* Restore host scratch registers, as we'll have clobbered them */ kvm_mips_build_restore_scratch(&p, K0, SP); /* Restore RDHWR access */ UASM_i_LA_mostly(&p, K0, (long)&hwrena); uasm_i_lw(&p, K0, uasm_rel_lo((long)&hwrena), K0); uasm_i_mtc0(&p, K0, C0_HWRENA); /* Jump to handler */ /* * XXXKYMA: not sure if this is safe, how large is the stack?? * Now jump to the kvm_mips_handle_exit() to see if we can deal * with this in the kernel */ UASM_i_LA(&p, T9, (unsigned long)kvm_mips_handle_exit); uasm_i_jalr(&p, RA, T9); UASM_i_ADDIU(&p, SP, SP, -CALLFRAME_SIZ); uasm_resolve_relocs(relocs, labels); p = kvm_mips_build_ret_from_exit(p); return p; }
/** * kvm_mips_build_enter_guest() - Assemble code to resume guest execution. * @addr: Address to start writing code. * * Assemble the code to resume guest execution. This code is common between the * initial entry into the guest from the host, and returning from the exit * handler back to the guest. * * Returns: Next address after end of written function. */ static void *kvm_mips_build_enter_guest(void *addr) { u32 *p = addr; unsigned int i; struct uasm_label labels[2]; struct uasm_reloc relocs[2]; struct uasm_label *l = labels; struct uasm_reloc *r = relocs; memset(labels, 0, sizeof(labels)); memset(relocs, 0, sizeof(relocs)); /* Set Guest EPC */ UASM_i_LW(&p, T0, offsetof(struct kvm_vcpu_arch, pc), K1); UASM_i_MTC0(&p, T0, C0_EPC); /* Set the ASID for the Guest Kernel */ UASM_i_LW(&p, T0, offsetof(struct kvm_vcpu_arch, cop0), K1); UASM_i_LW(&p, T0, offsetof(struct mips_coproc, reg[MIPS_CP0_STATUS][0]), T0); uasm_i_andi(&p, T0, T0, KSU_USER | ST0_ERL | ST0_EXL); uasm_i_xori(&p, T0, T0, KSU_USER); uasm_il_bnez(&p, &r, T0, label_kernel_asid); UASM_i_ADDIU(&p, T1, K1, offsetof(struct kvm_vcpu_arch, guest_kernel_asid)); /* else user */ UASM_i_ADDIU(&p, T1, K1, offsetof(struct kvm_vcpu_arch, guest_user_asid)); uasm_l_kernel_asid(&l, p); /* t1: contains the base of the ASID array, need to get the cpu id */ /* smp_processor_id */ uasm_i_lw(&p, T2, offsetof(struct thread_info, cpu), GP); /* x4 */ uasm_i_sll(&p, T2, T2, 2); UASM_i_ADDU(&p, T3, T1, T2); uasm_i_lw(&p, K0, 0, T3); #ifdef CONFIG_MIPS_ASID_BITS_VARIABLE /* x sizeof(struct cpuinfo_mips)/4 */ uasm_i_addiu(&p, T3, ZERO, sizeof(struct cpuinfo_mips)/4); uasm_i_mul(&p, T2, T2, T3); UASM_i_LA_mostly(&p, AT, (long)&cpu_data[0].asid_mask); UASM_i_ADDU(&p, AT, AT, T2); UASM_i_LW(&p, T2, uasm_rel_lo((long)&cpu_data[0].asid_mask), AT); uasm_i_and(&p, K0, K0, T2); #else uasm_i_andi(&p, K0, K0, MIPS_ENTRYHI_ASID); #endif uasm_i_mtc0(&p, K0, C0_ENTRYHI); uasm_i_ehb(&p); /* Disable RDHWR access */ uasm_i_mtc0(&p, ZERO, C0_HWRENA); /* load the guest context from VCPU and return */ for (i = 1; i < 32; ++i) { /* Guest k0/k1 loaded later */ if (i == K0 || i == K1) continue; UASM_i_LW(&p, i, offsetof(struct kvm_vcpu_arch, gprs[i]), K1); } #ifndef CONFIG_CPU_MIPSR6 /* Restore hi/lo */ UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, hi), K1); uasm_i_mthi(&p, K0); UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, lo), K1); uasm_i_mtlo(&p, K0); #endif /* Restore the guest's k0/k1 registers */ UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, gprs[K0]), K1); UASM_i_LW(&p, K1, offsetof(struct kvm_vcpu_arch, gprs[K1]), K1); /* Jump to guest */ uasm_i_eret(&p); uasm_resolve_relocs(relocs, labels); return p; }
static void * __init cps_gen_entry_code(unsigned cpu, enum cps_pm_state state) { struct uasm_label *l = labels; struct uasm_reloc *r = relocs; u32 *buf, *p; const unsigned r_online = a0; const unsigned r_nc_count = a1; const unsigned r_pcohctl = t7; const unsigned max_instrs = 256; unsigned cpc_cmd; enum { lbl_incready = 1, lbl_poll_cont, lbl_secondary_hang, lbl_disable_coherence, lbl_flush_fsb, lbl_invicache, lbl_flushdcache, lbl_hang, lbl_set_cont, lbl_secondary_cont, lbl_decready, }; /* Allocate a buffer to hold the generated code */ p = buf = kcalloc(max_instrs, sizeof(u32), GFP_KERNEL); if (!buf) return NULL; /* Clear labels & relocs ready for (re)use */ memset(labels, 0, sizeof(labels)); memset(relocs, 0, sizeof(relocs)); if (state == CPS_PM_POWER_GATED) { /* Power gating relies upon CPS SMP */ if (!mips_cps_smp_in_use()) goto out_err; /* * Save CPU state. Note the non-standard calling convention * with the return address placed in v0 to avoid clobbering * the ra register before it is saved. */ UASM_i_LA(&p, t0, (long)mips_cps_pm_save); uasm_i_jalr(&p, v0, t0); uasm_i_nop(&p); } /* * Load addresses of required CM & CPC registers. This is done early * because they're needed in both the enable & disable coherence steps * but in the coupled case the enable step will only run on one VPE. */ UASM_i_LA(&p, r_pcohctl, (long)_gcmp_base + GCMPCLCBOFS(COHCTL)); if (coupled_coherence) { /* Increment ready_count */ uasm_i_sync(&p, stype_ordering); uasm_build_label(&l, p, lbl_incready); uasm_i_ll(&p, t1, 0, r_nc_count); uasm_i_addiu(&p, t2, t1, 1); uasm_i_sc(&p, t2, 0, r_nc_count); uasm_il_beqz(&p, &r, t2, lbl_incready); uasm_i_addiu(&p, t1, t1, 1); /* Ordering barrier */ uasm_i_sync(&p, stype_ordering); /* * If this is the last VPE to become ready for non-coherence * then it should branch below. */ uasm_il_beq(&p, &r, t1, r_online, lbl_disable_coherence); uasm_i_nop(&p); if (state < CPS_PM_POWER_GATED) { /* * Otherwise this is not the last VPE to become ready * for non-coherence. It needs to wait until coherence * has been disabled before proceeding, which it will do * by polling for the top bit of ready_count being set. */ uasm_i_addiu(&p, t1, zero, -1); uasm_build_label(&l, p, lbl_poll_cont); uasm_i_lw(&p, t0, 0, r_nc_count); uasm_il_bltz(&p, &r, t0, lbl_secondary_cont); uasm_i_ehb(&p); uasm_i_yield(&p, zero, t1); uasm_il_b(&p, &r, lbl_poll_cont); uasm_i_nop(&p); } else { /* * The core will lose power & this VPE will not continue * so it can simply halt here. */ uasm_i_addiu(&p, t0, zero, TCHALT_H); uasm_i_mtc0(&p, t0, 2, 4); uasm_build_label(&l, p, lbl_secondary_hang); uasm_il_b(&p, &r, lbl_secondary_hang); uasm_i_nop(&p); } } /* * This is the point of no return - this VPE will now proceed to * disable coherence. At this point we *must* be sure that no other * VPE within the core will interfere with the L1 dcache. */ uasm_build_label(&l, p, lbl_disable_coherence); /* Invalidate the L1 icache */ cps_gen_cache_routine(&p, &l, &r, &cpu_data[cpu].icache, Index_Invalidate_I, lbl_invicache); /* Writeback & invalidate the L1 dcache */ cps_gen_cache_routine(&p, &l, &r, &cpu_data[cpu].dcache, Index_Writeback_Inv_D, lbl_flushdcache); /* Completion barrier */ uasm_i_sync(&p, stype_memory); uasm_i_ehb(&p); /* * Disable all but self interventions. The load from COHCTL is defined * by the interAptiv & proAptiv SUMs as ensuring that the operation * resulting from the preceeding store is complete. */ uasm_i_addiu(&p, t0, zero, 1 << cpu_data[cpu].core); uasm_i_sw(&p, t0, 0, r_pcohctl); uasm_i_lw(&p, t0, 0, r_pcohctl); /* Sync to ensure previous interventions are complete */ uasm_i_sync(&p, stype_intervention); uasm_i_ehb(&p); /* Disable coherence */ uasm_i_sw(&p, zero, 0, r_pcohctl); uasm_i_lw(&p, t0, 0, r_pcohctl); if (state >= CPS_PM_CLOCK_GATED) { /* TODO: determine whether required based on CPC version */ cps_gen_flush_fsb(&p, &l, &r, &cpu_data[cpu].dcache, lbl_flush_fsb); /* Determine the CPC command to issue */ switch (state) { case CPS_PM_CLOCK_GATED: cpc_cmd = CPC_Cx_CMD_CLOCKOFF; break; case CPS_PM_POWER_GATED: cpc_cmd = CPC_Cx_CMD_PWRDOWN; break; default: BUG(); goto out_err; } /* Issue the CPC command */ UASM_i_LA(&p, t0, (long)addr_cpc_cl_cmd()); uasm_i_addiu(&p, t1, zero, cpc_cmd); uasm_i_sw(&p, t1, 0, t0); if (state == CPS_PM_POWER_GATED) { /* If anything goes wrong just hang */ uasm_build_label(&l, p, lbl_hang); uasm_il_b(&p, &r, lbl_hang); uasm_i_nop(&p); /* * There's no point generating more code, the core is * powered down & if powered back up will run from the * reset vector not from here. */ goto gen_done; } /* Completion barrier */ uasm_i_sync(&p, stype_memory); uasm_i_ehb(&p); } if (state == CPS_PM_NC_WAIT) { /* * At this point it is safe for all VPEs to proceed with * execution. This VPE will set the top bit of ready_count * to indicate to the other VPEs that they may continue. */ if (coupled_coherence) cps_gen_set_top_bit(&p, &l, &r, r_nc_count, lbl_set_cont); /* * VPEs which did not disable coherence will continue * executing, after coherence has been disabled, from this * point. */ uasm_build_label(&l, p, lbl_secondary_cont); /* Now perform our wait */ uasm_i_wait(&p, 0); } /* * Re-enable coherence. Note that for CPS_PM_NC_WAIT all coupled VPEs * will run this. The first will actually re-enable coherence & the * rest will just be performing a rather unusual nop. */ uasm_i_addiu(&p, t0, zero, GCMP_CCB_COHCTL_DOMAIN_MSK); uasm_i_sw(&p, t0, 0, r_pcohctl); uasm_i_lw(&p, t0, 0, r_pcohctl); /* Completion barrier */ uasm_i_sync(&p, stype_memory); uasm_i_ehb(&p); if (coupled_coherence && (state == CPS_PM_NC_WAIT)) { /* Decrement ready_count */ uasm_build_label(&l, p, lbl_decready); uasm_i_sync(&p, stype_ordering); uasm_i_ll(&p, t1, 0, r_nc_count); uasm_i_addiu(&p, t2, t1, -1); uasm_i_sc(&p, t2, 0, r_nc_count); uasm_il_beqz(&p, &r, t2, lbl_decready); uasm_i_andi(&p, v0, t1, (1 << fls(smp_num_siblings)) - 1); /* Ordering barrier */ uasm_i_sync(&p, stype_ordering); } if (coupled_coherence && (state == CPS_PM_CLOCK_GATED)) { /* * At this point it is safe for all VPEs to proceed with * execution. This VPE will set the top bit of ready_count * to indicate to the other VPEs that they may continue. */ cps_gen_set_top_bit(&p, &l, &r, r_nc_count, lbl_set_cont); /* * This core will be reliant upon another core sending a * power-up command to the CPC in order to resume operation. * Thus an arbitrary VPE can't trigger the core leaving the * idle state and the one that disables coherence might as well * be the one to re-enable it. The rest will continue from here * after that has been done. */ uasm_build_label(&l, p, lbl_secondary_cont); /* Ordering barrier */ uasm_i_sync(&p, stype_ordering); } /* The core is coherent, time to return to C code */ uasm_i_jr(&p, ra); uasm_i_nop(&p); gen_done: /* Ensure the code didn't exceed the resources allocated for it */ BUG_ON((p - buf) > max_instrs); BUG_ON((l - labels) > ARRAY_SIZE(labels)); BUG_ON((r - relocs) > ARRAY_SIZE(relocs)); /* Patch branch offsets */ uasm_resolve_relocs(relocs, labels); /* Flush the icache */ local_flush_icache_range((unsigned long)buf, (unsigned long)p); return buf; out_err: kfree(buf); return NULL; }
void build_copy_page(void) { int off; u32 *buf = (u32 *)©_page_array; struct uasm_label *l = labels; struct uasm_reloc *r = relocs; int i; memset(labels, 0, sizeof(labels)); memset(relocs, 0, sizeof(relocs)); set_prefetch_parameters(); /* * This algorithm makes the following assumptions: * - All prefetch biases are multiples of 8 words. * - The prefetch biases are less than one page. * - The store prefetch bias isn't greater than the load * prefetch bias. */ BUG_ON(pref_bias_copy_load % (8 * copy_word_size)); BUG_ON(pref_bias_copy_store % (8 * copy_word_size)); BUG_ON(PAGE_SIZE < pref_bias_copy_load); BUG_ON(pref_bias_copy_store > pref_bias_copy_load); off = PAGE_SIZE - pref_bias_copy_load; if (off > 0xffff || !pref_bias_copy_load) pg_addiu(&buf, A2, A0, off); else uasm_i_ori(&buf, A2, A0, off); if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x()) uasm_i_lui(&buf, AT, 0xa000); off = cache_line_size ? min(8, pref_bias_copy_load / cache_line_size) * cache_line_size : 0; while (off) { build_copy_load_pref(&buf, -off); off -= cache_line_size; } off = cache_line_size ? min(8, pref_bias_copy_store / cache_line_size) * cache_line_size : 0; while (off) { build_copy_store_pref(&buf, -off); off -= cache_line_size; } uasm_l_copy_pref_both(&l, buf); do { build_copy_load_pref(&buf, off); build_copy_load(&buf, T0, off); build_copy_load_pref(&buf, off + copy_word_size); build_copy_load(&buf, T1, off + copy_word_size); build_copy_load_pref(&buf, off + 2 * copy_word_size); build_copy_load(&buf, T2, off + 2 * copy_word_size); build_copy_load_pref(&buf, off + 3 * copy_word_size); build_copy_load(&buf, T3, off + 3 * copy_word_size); build_copy_store_pref(&buf, off); build_copy_store(&buf, T0, off); build_copy_store_pref(&buf, off + copy_word_size); build_copy_store(&buf, T1, off + copy_word_size); build_copy_store_pref(&buf, off + 2 * copy_word_size); build_copy_store(&buf, T2, off + 2 * copy_word_size); build_copy_store_pref(&buf, off + 3 * copy_word_size); build_copy_store(&buf, T3, off + 3 * copy_word_size); off += 4 * copy_word_size; } while (off < half_copy_loop_size); pg_addiu(&buf, A1, A1, 2 * off); pg_addiu(&buf, A0, A0, 2 * off); off = -off; do { build_copy_load_pref(&buf, off); build_copy_load(&buf, T0, off); build_copy_load_pref(&buf, off + copy_word_size); build_copy_load(&buf, T1, off + copy_word_size); build_copy_load_pref(&buf, off + 2 * copy_word_size); build_copy_load(&buf, T2, off + 2 * copy_word_size); build_copy_load_pref(&buf, off + 3 * copy_word_size); build_copy_load(&buf, T3, off + 3 * copy_word_size); build_copy_store_pref(&buf, off); build_copy_store(&buf, T0, off); build_copy_store_pref(&buf, off + copy_word_size); build_copy_store(&buf, T1, off + copy_word_size); build_copy_store_pref(&buf, off + 2 * copy_word_size); build_copy_store(&buf, T2, off + 2 * copy_word_size); build_copy_store_pref(&buf, off + 3 * copy_word_size); if (off == -(4 * copy_word_size)) uasm_il_bne(&buf, &r, A2, A0, label_copy_pref_both); build_copy_store(&buf, T3, off + 3 * copy_word_size); off += 4 * copy_word_size; } while (off < 0); if (pref_bias_copy_load - pref_bias_copy_store) { pg_addiu(&buf, A2, A0, pref_bias_copy_load - pref_bias_copy_store); uasm_l_copy_pref_store(&l, buf); off = 0; do { build_copy_load(&buf, T0, off); build_copy_load(&buf, T1, off + copy_word_size); build_copy_load(&buf, T2, off + 2 * copy_word_size); build_copy_load(&buf, T3, off + 3 * copy_word_size); build_copy_store_pref(&buf, off); build_copy_store(&buf, T0, off); build_copy_store_pref(&buf, off + copy_word_size); build_copy_store(&buf, T1, off + copy_word_size); build_copy_store_pref(&buf, off + 2 * copy_word_size); build_copy_store(&buf, T2, off + 2 * copy_word_size); build_copy_store_pref(&buf, off + 3 * copy_word_size); build_copy_store(&buf, T3, off + 3 * copy_word_size); off += 4 * copy_word_size; } while (off < half_copy_loop_size); pg_addiu(&buf, A1, A1, 2 * off); pg_addiu(&buf, A0, A0, 2 * off); off = -off; do { build_copy_load(&buf, T0, off); build_copy_load(&buf, T1, off + copy_word_size); build_copy_load(&buf, T2, off + 2 * copy_word_size); build_copy_load(&buf, T3, off + 3 * copy_word_size); build_copy_store_pref(&buf, off); build_copy_store(&buf, T0, off); build_copy_store_pref(&buf, off + copy_word_size); build_copy_store(&buf, T1, off + copy_word_size); build_copy_store_pref(&buf, off + 2 * copy_word_size); build_copy_store(&buf, T2, off + 2 * copy_word_size); build_copy_store_pref(&buf, off + 3 * copy_word_size); if (off == -(4 * copy_word_size)) uasm_il_bne(&buf, &r, A2, A0, label_copy_pref_store); build_copy_store(&buf, T3, off + 3 * copy_word_size); off += 4 * copy_word_size; } while (off < 0); } if (pref_bias_copy_store) { pg_addiu(&buf, A2, A0, pref_bias_copy_store); uasm_l_copy_nopref(&l, buf); off = 0; do { build_copy_load(&buf, T0, off); build_copy_load(&buf, T1, off + copy_word_size); build_copy_load(&buf, T2, off + 2 * copy_word_size); build_copy_load(&buf, T3, off + 3 * copy_word_size); build_copy_store(&buf, T0, off); build_copy_store(&buf, T1, off + copy_word_size); build_copy_store(&buf, T2, off + 2 * copy_word_size); build_copy_store(&buf, T3, off + 3 * copy_word_size); off += 4 * copy_word_size; } while (off < half_copy_loop_size); pg_addiu(&buf, A1, A1, 2 * off); pg_addiu(&buf, A0, A0, 2 * off); off = -off; do { build_copy_load(&buf, T0, off); build_copy_load(&buf, T1, off + copy_word_size); build_copy_load(&buf, T2, off + 2 * copy_word_size); build_copy_load(&buf, T3, off + 3 * copy_word_size); build_copy_store(&buf, T0, off); build_copy_store(&buf, T1, off + copy_word_size); build_copy_store(&buf, T2, off + 2 * copy_word_size); if (off == -(4 * copy_word_size)) uasm_il_bne(&buf, &r, A2, A0, label_copy_nopref); build_copy_store(&buf, T3, off + 3 * copy_word_size); off += 4 * copy_word_size; } while (off < 0); } uasm_i_jr(&buf, RA); uasm_i_nop(&buf); BUG_ON(buf > copy_page_array + ARRAY_SIZE(copy_page_array)); uasm_resolve_relocs(relocs, labels); pr_debug("Synthesized copy page handler (%u instructions).\n", (u32)(buf - copy_page_array)); pr_debug("\t.set push\n"); pr_debug("\t.set noreorder\n"); for (i = 0; i < (buf - copy_page_array); i++) pr_debug("\t.word 0x%08x\n", copy_page_array[i]); pr_debug("\t.set pop\n"); }
void __cpuinit build_clear_page(void) { int off; u32 *buf = (u32 *)&clear_page_array; struct uasm_label *l = labels; struct uasm_reloc *r = relocs; int i; memset(labels, 0, sizeof(labels)); memset(relocs, 0, sizeof(relocs)); if (current_cpu_data.cputype == CPU_CAVIUM_OCTEON2) { const unsigned int wb_nudge = 26; pg_addiu(&buf, T0, A0, PAGE_SIZE); UASM_i_ADDIU(&buf, A1, A0, 128); uasm_l_clear_pref(&l, buf); uasm_i_zcbt(&buf, A0); uasm_i_pref(&buf, wb_nudge, 0, A0); UASM_i_ADDIU(&buf, A0, A0, 256); uasm_i_zcbt(&buf, A1); uasm_i_pref(&buf, wb_nudge, 0, A1); UASM_i_ADDIU(&buf, A1, A1, 256); uasm_i_zcbt(&buf, A0); uasm_i_pref(&buf, wb_nudge, 0, A0); UASM_i_ADDIU(&buf, A0, A0, 256); uasm_i_zcbt(&buf, A1); uasm_i_pref(&buf, wb_nudge, 0, A1); UASM_i_ADDIU(&buf, A1, A1, 256); uasm_i_zcbt(&buf, A0); uasm_i_pref(&buf, wb_nudge, 0, A0); UASM_i_ADDIU(&buf, A0, A0, 256); uasm_i_zcbt(&buf, A1); uasm_i_pref(&buf, wb_nudge, 0, A1); UASM_i_ADDIU(&buf, A1, A1, 256); uasm_i_zcbt(&buf, A0); uasm_i_pref(&buf, wb_nudge, 0, A0); UASM_i_ADDIU(&buf, A0, A0, 256); uasm_i_zcbt(&buf, A1); uasm_i_pref(&buf, wb_nudge, 0, A1); uasm_il_bne(&buf, &r, A0, T0, label_clear_pref); UASM_i_ADDIU(&buf, A1, A1, 256); } else { set_prefetch_parameters(); /* * This algorithm makes the following assumptions: * - The prefetch bias is a multiple of 2 words. * - The prefetch bias is less than one page. */ BUG_ON(pref_bias_clear_store % (2 * clear_word_size)); BUG_ON(PAGE_SIZE < pref_bias_clear_store); off = PAGE_SIZE - pref_bias_clear_store; if (off > 0xffff || !pref_bias_clear_store) pg_addiu(&buf, A2, A0, off); else uasm_i_ori(&buf, A2, A0, off); if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x()) uasm_i_lui(&buf, AT, 0xa000); off = cache_line_size ? min(8, pref_bias_clear_store / cache_line_size) * cache_line_size : 0; while (off) { build_clear_pref(&buf, -off); off -= cache_line_size; } uasm_l_clear_pref(&l, buf); do { build_clear_pref(&buf, off); build_clear_store(&buf, off); off += clear_word_size; } while (off < half_clear_loop_size); pg_addiu(&buf, A0, A0, 2 * off); off = -off; do { build_clear_pref(&buf, off); if (off == -clear_word_size) uasm_il_bne(&buf, &r, A0, A2, label_clear_pref); build_clear_store(&buf, off); off += clear_word_size; } while (off < 0); if (pref_bias_clear_store) { pg_addiu(&buf, A2, A0, pref_bias_clear_store); uasm_l_clear_nopref(&l, buf); off = 0; do { build_clear_store(&buf, off); off += clear_word_size; } while (off < half_clear_loop_size); pg_addiu(&buf, A0, A0, 2 * off); off = -off; do { if (off == -clear_word_size) uasm_il_bne(&buf, &r, A0, A2, label_clear_nopref); build_clear_store(&buf, off); off += clear_word_size; } while (off < 0); } } uasm_i_jr(&buf, RA); uasm_i_nop(&buf); BUG_ON(buf > clear_page_array + ARRAY_SIZE(clear_page_array)); uasm_resolve_relocs(relocs, labels); pr_debug("Synthesized clear page handler (%u instructions).\n", (u32)(buf - clear_page_array)); pr_debug("\t.set push\n"); pr_debug("\t.set noreorder\n"); for (i = 0; i < (buf - clear_page_array); i++) pr_debug("\t.word 0x%08x\n", clear_page_array[i]); pr_debug("\t.set pop\n"); }
static void __cpuinit build_r4000_tlb_refill_handler(void) { u32 *p = tlb_handler; struct uasm_label *l = labels; struct uasm_reloc *r = relocs; u32 *f; unsigned int final_len; int i; memset(tlb_handler, 0, sizeof(tlb_handler)); memset(labels, 0, sizeof(labels)); memset(relocs, 0, sizeof(relocs)); memset(final_handler, 0, sizeof(final_handler)); /* * create the plain linear handler */ if (bcm1250_m3_war()) { UASM_i_MFC0(&p, K0, C0_BADVADDR); UASM_i_MFC0(&p, K1, C0_ENTRYHI); uasm_i_xor(&p, K0, K0, K1); UASM_i_SRL(&p, K0, K0, PAGE_SHIFT + 1); uasm_il_bnez(&p, &r, K0, label_leave); /* No need for uasm_i_nop */ } #ifdef CONFIG_64BIT build_get_pmde64(&p, &l, &r, K0, K1); /* get pmd in K1 */ #else build_get_pgde32(&p, K0, K1); /* get pgd in K1 */ #endif build_get_ptep(&p, K0, K1); build_update_entries(&p, K0, K1); build_tlb_write_entry(&p, &l, &r, tlb_random); uasm_l_leave(&l, p); uasm_i_eret(&p); /* return from trap */ #ifdef CONFIG_64BIT build_get_pgd_vmalloc64(&p, &l, &r, K0, K1); #endif /* * Overflow check: For the 64bit handler, we need at least one * free instruction slot for the wrap-around branch. In worst * case, if the intended insertion point is a delay slot, we * need three, with the second nop'ed and the third being * unused. */ /* Loongson2 ebase is different than r4k, we have more space */ #if defined(CONFIG_32BIT) || defined(CONFIG_CPU_LOONGSON2) if ((p - tlb_handler) > 64) panic("TLB refill handler space exceeded"); #else if (((p - tlb_handler) > 63) || (((p - tlb_handler) > 61) && uasm_insn_has_bdelay(relocs, tlb_handler + 29))) panic("TLB refill handler space exceeded"); #endif /* * Now fold the handler in the TLB refill handler space. */ #if defined(CONFIG_32BIT) || defined(CONFIG_CPU_LOONGSON2) f = final_handler; /* Simplest case, just copy the handler. */ uasm_copy_handler(relocs, labels, tlb_handler, p, f); final_len = p - tlb_handler; #else /* CONFIG_64BIT */ f = final_handler + 32; if ((p - tlb_handler) <= 32) { /* Just copy the handler. */ uasm_copy_handler(relocs, labels, tlb_handler, p, f); final_len = p - tlb_handler; } else { u32 *split = tlb_handler + 30; /* * Find the split point. */ if (uasm_insn_has_bdelay(relocs, split - 1)) split--; /* Copy first part of the handler. */ uasm_copy_handler(relocs, labels, tlb_handler, split, f); f += split - tlb_handler; /* Insert branch. */ uasm_l_split(&l, final_handler); uasm_il_b(&f, &r, label_split); if (uasm_insn_has_bdelay(relocs, split)) uasm_i_nop(&f); else { uasm_copy_handler(relocs, labels, split, split + 1, f); uasm_move_labels(labels, f, f + 1, -1); f++; split++; } /* Copy the rest of the handler. */ uasm_copy_handler(relocs, labels, split, p, final_handler); final_len = (f - (final_handler + 32)) + (p - split); } #endif /* CONFIG_64BIT */ uasm_resolve_relocs(relocs, labels); pr_debug("Wrote TLB refill handler (%u instructions).\n", final_len); f = final_handler; #if defined(CONFIG_64BIT) && !defined(CONFIG_CPU_LOONGSON2) if (final_len > 32) final_len = 64; else f = final_handler + 32; #endif /* CONFIG_64BIT */ pr_debug("\t.set push\n"); pr_debug("\t.set noreorder\n"); for (i = 0; i < final_len; i++) pr_debug("\t.word 0x%08x\n", f[i]); pr_debug("\t.set pop\n"); memcpy((void *)ebase, final_handler, 0x100); }
void build_clear_page(void) { int off; u32 *buf = &__clear_page_start; struct uasm_label *l = labels; struct uasm_reloc *r = relocs; int i; static atomic_t run_once = ATOMIC_INIT(0); if (atomic_xchg(&run_once, 1)) { return; } memset(labels, 0, sizeof(labels)); memset(relocs, 0, sizeof(relocs)); set_prefetch_parameters(); /* * This algorithm makes the following assumptions: * - The prefetch bias is a multiple of 2 words. * - The prefetch bias is less than one page. */ BUG_ON(pref_bias_clear_store % (2 * clear_word_size)); BUG_ON(PAGE_SIZE < pref_bias_clear_store); off = PAGE_SIZE - pref_bias_clear_store; if (off > 0xffff || !pref_bias_clear_store) pg_addiu(&buf, A2, A0, off); else uasm_i_ori(&buf, A2, A0, off); if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x()) uasm_i_lui(&buf, AT, uasm_rel_hi(0xa0000000)); off = cache_line_size ? min(8, pref_bias_clear_store / cache_line_size) * cache_line_size : 0; while (off) { build_clear_pref(&buf, -off); off -= cache_line_size; } uasm_l_clear_pref(&l, buf); do { build_clear_pref(&buf, off); build_clear_store(&buf, off); off += clear_word_size; } while (off < half_clear_loop_size); pg_addiu(&buf, A0, A0, 2 * off); off = -off; do { build_clear_pref(&buf, off); if (off == -clear_word_size) uasm_il_bne(&buf, &r, A0, A2, label_clear_pref); build_clear_store(&buf, off); off += clear_word_size; } while (off < 0); if (pref_bias_clear_store) { pg_addiu(&buf, A2, A0, pref_bias_clear_store); uasm_l_clear_nopref(&l, buf); off = 0; do { build_clear_store(&buf, off); off += clear_word_size; } while (off < half_clear_loop_size); pg_addiu(&buf, A0, A0, 2 * off); off = -off; do { if (off == -clear_word_size) uasm_il_bne(&buf, &r, A0, A2, label_clear_nopref); build_clear_store(&buf, off); off += clear_word_size; } while (off < 0); } uasm_i_jr(&buf, RA); uasm_i_nop(&buf); BUG_ON(buf > &__clear_page_end); uasm_resolve_relocs(relocs, labels); pr_debug("Synthesized clear page handler (%u instructions).\n", (u32)(buf - &__clear_page_start)); pr_debug("\t.set push\n"); pr_debug("\t.set noreorder\n"); for (i = 0; i < (buf - &__clear_page_start); i++) pr_debug("\t.word 0x%08x\n", (&__clear_page_start)[i]); pr_debug("\t.set pop\n"); }