/** * kvm_mips_build_exception() - Assemble first level guest exception handler. * @addr: Address to start writing code. * @handler: Address of common handler (within range of @addr). * * Assemble exception vector code for guest execution. The generated vector will * branch to the common exception handler generated by kvm_mips_build_exit(). * * Returns: Next address after end of written function. */ void *kvm_mips_build_exception(void *addr, void *handler) { u32 *p = addr; struct uasm_label labels[2]; struct uasm_reloc relocs[2]; struct uasm_label *l = labels; struct uasm_reloc *r = relocs; memset(labels, 0, sizeof(labels)); memset(relocs, 0, sizeof(relocs)); /* Save guest k1 into scratch register */ UASM_i_MTC0(&p, K1, scratch_tmp[0], scratch_tmp[1]); /* Get the VCPU pointer from the VCPU scratch register */ UASM_i_MFC0(&p, K1, scratch_vcpu[0], scratch_vcpu[1]); UASM_i_ADDIU(&p, K1, K1, offsetof(struct kvm_vcpu, arch)); /* Save guest k0 into VCPU structure */ UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu_arch, gprs[K0]), K1); /* Branch to the common handler */ uasm_il_b(&p, &r, label_exit_common); uasm_i_nop(&p); uasm_l_exit_common(&l, handler); uasm_resolve_relocs(relocs, labels); return p; }
static void __cpuinit build_r4000_tlb_refill_handler(void) { u32 *p = tlb_handler; struct uasm_label *l = labels; struct uasm_reloc *r = relocs; u32 *f; unsigned int final_len; int i; memset(tlb_handler, 0, sizeof(tlb_handler)); memset(labels, 0, sizeof(labels)); memset(relocs, 0, sizeof(relocs)); memset(final_handler, 0, sizeof(final_handler)); /* * create the plain linear handler */ if (bcm1250_m3_war()) { UASM_i_MFC0(&p, K0, C0_BADVADDR); UASM_i_MFC0(&p, K1, C0_ENTRYHI); uasm_i_xor(&p, K0, K0, K1); UASM_i_SRL(&p, K0, K0, PAGE_SHIFT + 1); uasm_il_bnez(&p, &r, K0, label_leave); /* No need for uasm_i_nop */ } #ifdef CONFIG_64BIT build_get_pmde64(&p, &l, &r, K0, K1); /* get pmd in K1 */ #else build_get_pgde32(&p, K0, K1); /* get pgd in K1 */ #endif build_get_ptep(&p, K0, K1); build_update_entries(&p, K0, K1); build_tlb_write_entry(&p, &l, &r, tlb_random); uasm_l_leave(&l, p); uasm_i_eret(&p); /* return from trap */ #ifdef CONFIG_64BIT build_get_pgd_vmalloc64(&p, &l, &r, K0, K1); #endif /* * Overflow check: For the 64bit handler, we need at least one * free instruction slot for the wrap-around branch. In worst * case, if the intended insertion point is a delay slot, we * need three, with the second nop'ed and the third being * unused. */ /* Loongson2 ebase is different than r4k, we have more space */ #if defined(CONFIG_32BIT) || defined(CONFIG_CPU_LOONGSON2) if ((p - tlb_handler) > 64) panic("TLB refill handler space exceeded"); #else if (((p - tlb_handler) > 63) || (((p - tlb_handler) > 61) && uasm_insn_has_bdelay(relocs, tlb_handler + 29))) panic("TLB refill handler space exceeded"); #endif /* * Now fold the handler in the TLB refill handler space. */ #if defined(CONFIG_32BIT) || defined(CONFIG_CPU_LOONGSON2) f = final_handler; /* Simplest case, just copy the handler. */ uasm_copy_handler(relocs, labels, tlb_handler, p, f); final_len = p - tlb_handler; #else /* CONFIG_64BIT */ f = final_handler + 32; if ((p - tlb_handler) <= 32) { /* Just copy the handler. */ uasm_copy_handler(relocs, labels, tlb_handler, p, f); final_len = p - tlb_handler; } else { u32 *split = tlb_handler + 30; /* * Find the split point. */ if (uasm_insn_has_bdelay(relocs, split - 1)) split--; /* Copy first part of the handler. */ uasm_copy_handler(relocs, labels, tlb_handler, split, f); f += split - tlb_handler; /* Insert branch. */ uasm_l_split(&l, final_handler); uasm_il_b(&f, &r, label_split); if (uasm_insn_has_bdelay(relocs, split)) uasm_i_nop(&f); else { uasm_copy_handler(relocs, labels, split, split + 1, f); uasm_move_labels(labels, f, f + 1, -1); f++; split++; } /* Copy the rest of the handler. */ uasm_copy_handler(relocs, labels, split, p, final_handler); final_len = (f - (final_handler + 32)) + (p - split); } #endif /* CONFIG_64BIT */ uasm_resolve_relocs(relocs, labels); pr_debug("Wrote TLB refill handler (%u instructions).\n", final_len); f = final_handler; #if defined(CONFIG_64BIT) && !defined(CONFIG_CPU_LOONGSON2) if (final_len > 32) final_len = 64; else f = final_handler + 32; #endif /* CONFIG_64BIT */ pr_debug("\t.set push\n"); pr_debug("\t.set noreorder\n"); for (i = 0; i < final_len; i++) pr_debug("\t.word 0x%08x\n", f[i]); pr_debug("\t.set pop\n"); memcpy((void *)ebase, final_handler, 0x100); }
static void * __init cps_gen_entry_code(unsigned cpu, enum cps_pm_state state) { struct uasm_label *l = labels; struct uasm_reloc *r = relocs; u32 *buf, *p; const unsigned r_online = a0; const unsigned r_nc_count = a1; const unsigned r_pcohctl = t7; const unsigned max_instrs = 256; unsigned cpc_cmd; enum { lbl_incready = 1, lbl_poll_cont, lbl_secondary_hang, lbl_disable_coherence, lbl_flush_fsb, lbl_invicache, lbl_flushdcache, lbl_hang, lbl_set_cont, lbl_secondary_cont, lbl_decready, }; /* Allocate a buffer to hold the generated code */ p = buf = kcalloc(max_instrs, sizeof(u32), GFP_KERNEL); if (!buf) return NULL; /* Clear labels & relocs ready for (re)use */ memset(labels, 0, sizeof(labels)); memset(relocs, 0, sizeof(relocs)); if (state == CPS_PM_POWER_GATED) { /* Power gating relies upon CPS SMP */ if (!mips_cps_smp_in_use()) goto out_err; /* * Save CPU state. Note the non-standard calling convention * with the return address placed in v0 to avoid clobbering * the ra register before it is saved. */ UASM_i_LA(&p, t0, (long)mips_cps_pm_save); uasm_i_jalr(&p, v0, t0); uasm_i_nop(&p); } /* * Load addresses of required CM & CPC registers. This is done early * because they're needed in both the enable & disable coherence steps * but in the coupled case the enable step will only run on one VPE. */ UASM_i_LA(&p, r_pcohctl, (long)_gcmp_base + GCMPCLCBOFS(COHCTL)); if (coupled_coherence) { /* Increment ready_count */ uasm_i_sync(&p, stype_ordering); uasm_build_label(&l, p, lbl_incready); uasm_i_ll(&p, t1, 0, r_nc_count); uasm_i_addiu(&p, t2, t1, 1); uasm_i_sc(&p, t2, 0, r_nc_count); uasm_il_beqz(&p, &r, t2, lbl_incready); uasm_i_addiu(&p, t1, t1, 1); /* Ordering barrier */ uasm_i_sync(&p, stype_ordering); /* * If this is the last VPE to become ready for non-coherence * then it should branch below. */ uasm_il_beq(&p, &r, t1, r_online, lbl_disable_coherence); uasm_i_nop(&p); if (state < CPS_PM_POWER_GATED) { /* * Otherwise this is not the last VPE to become ready * for non-coherence. It needs to wait until coherence * has been disabled before proceeding, which it will do * by polling for the top bit of ready_count being set. */ uasm_i_addiu(&p, t1, zero, -1); uasm_build_label(&l, p, lbl_poll_cont); uasm_i_lw(&p, t0, 0, r_nc_count); uasm_il_bltz(&p, &r, t0, lbl_secondary_cont); uasm_i_ehb(&p); uasm_i_yield(&p, zero, t1); uasm_il_b(&p, &r, lbl_poll_cont); uasm_i_nop(&p); } else { /* * The core will lose power & this VPE will not continue * so it can simply halt here. */ uasm_i_addiu(&p, t0, zero, TCHALT_H); uasm_i_mtc0(&p, t0, 2, 4); uasm_build_label(&l, p, lbl_secondary_hang); uasm_il_b(&p, &r, lbl_secondary_hang); uasm_i_nop(&p); } } /* * This is the point of no return - this VPE will now proceed to * disable coherence. At this point we *must* be sure that no other * VPE within the core will interfere with the L1 dcache. */ uasm_build_label(&l, p, lbl_disable_coherence); /* Invalidate the L1 icache */ cps_gen_cache_routine(&p, &l, &r, &cpu_data[cpu].icache, Index_Invalidate_I, lbl_invicache); /* Writeback & invalidate the L1 dcache */ cps_gen_cache_routine(&p, &l, &r, &cpu_data[cpu].dcache, Index_Writeback_Inv_D, lbl_flushdcache); /* Completion barrier */ uasm_i_sync(&p, stype_memory); uasm_i_ehb(&p); /* * Disable all but self interventions. The load from COHCTL is defined * by the interAptiv & proAptiv SUMs as ensuring that the operation * resulting from the preceeding store is complete. */ uasm_i_addiu(&p, t0, zero, 1 << cpu_data[cpu].core); uasm_i_sw(&p, t0, 0, r_pcohctl); uasm_i_lw(&p, t0, 0, r_pcohctl); /* Sync to ensure previous interventions are complete */ uasm_i_sync(&p, stype_intervention); uasm_i_ehb(&p); /* Disable coherence */ uasm_i_sw(&p, zero, 0, r_pcohctl); uasm_i_lw(&p, t0, 0, r_pcohctl); if (state >= CPS_PM_CLOCK_GATED) { /* TODO: determine whether required based on CPC version */ cps_gen_flush_fsb(&p, &l, &r, &cpu_data[cpu].dcache, lbl_flush_fsb); /* Determine the CPC command to issue */ switch (state) { case CPS_PM_CLOCK_GATED: cpc_cmd = CPC_Cx_CMD_CLOCKOFF; break; case CPS_PM_POWER_GATED: cpc_cmd = CPC_Cx_CMD_PWRDOWN; break; default: BUG(); goto out_err; } /* Issue the CPC command */ UASM_i_LA(&p, t0, (long)addr_cpc_cl_cmd()); uasm_i_addiu(&p, t1, zero, cpc_cmd); uasm_i_sw(&p, t1, 0, t0); if (state == CPS_PM_POWER_GATED) { /* If anything goes wrong just hang */ uasm_build_label(&l, p, lbl_hang); uasm_il_b(&p, &r, lbl_hang); uasm_i_nop(&p); /* * There's no point generating more code, the core is * powered down & if powered back up will run from the * reset vector not from here. */ goto gen_done; } /* Completion barrier */ uasm_i_sync(&p, stype_memory); uasm_i_ehb(&p); } if (state == CPS_PM_NC_WAIT) { /* * At this point it is safe for all VPEs to proceed with * execution. This VPE will set the top bit of ready_count * to indicate to the other VPEs that they may continue. */ if (coupled_coherence) cps_gen_set_top_bit(&p, &l, &r, r_nc_count, lbl_set_cont); /* * VPEs which did not disable coherence will continue * executing, after coherence has been disabled, from this * point. */ uasm_build_label(&l, p, lbl_secondary_cont); /* Now perform our wait */ uasm_i_wait(&p, 0); } /* * Re-enable coherence. Note that for CPS_PM_NC_WAIT all coupled VPEs * will run this. The first will actually re-enable coherence & the * rest will just be performing a rather unusual nop. */ uasm_i_addiu(&p, t0, zero, GCMP_CCB_COHCTL_DOMAIN_MSK); uasm_i_sw(&p, t0, 0, r_pcohctl); uasm_i_lw(&p, t0, 0, r_pcohctl); /* Completion barrier */ uasm_i_sync(&p, stype_memory); uasm_i_ehb(&p); if (coupled_coherence && (state == CPS_PM_NC_WAIT)) { /* Decrement ready_count */ uasm_build_label(&l, p, lbl_decready); uasm_i_sync(&p, stype_ordering); uasm_i_ll(&p, t1, 0, r_nc_count); uasm_i_addiu(&p, t2, t1, -1); uasm_i_sc(&p, t2, 0, r_nc_count); uasm_il_beqz(&p, &r, t2, lbl_decready); uasm_i_andi(&p, v0, t1, (1 << fls(smp_num_siblings)) - 1); /* Ordering barrier */ uasm_i_sync(&p, stype_ordering); } if (coupled_coherence && (state == CPS_PM_CLOCK_GATED)) { /* * At this point it is safe for all VPEs to proceed with * execution. This VPE will set the top bit of ready_count * to indicate to the other VPEs that they may continue. */ cps_gen_set_top_bit(&p, &l, &r, r_nc_count, lbl_set_cont); /* * This core will be reliant upon another core sending a * power-up command to the CPC in order to resume operation. * Thus an arbitrary VPE can't trigger the core leaving the * idle state and the one that disables coherence might as well * be the one to re-enable it. The rest will continue from here * after that has been done. */ uasm_build_label(&l, p, lbl_secondary_cont); /* Ordering barrier */ uasm_i_sync(&p, stype_ordering); } /* The core is coherent, time to return to C code */ uasm_i_jr(&p, ra); uasm_i_nop(&p); gen_done: /* Ensure the code didn't exceed the resources allocated for it */ BUG_ON((p - buf) > max_instrs); BUG_ON((l - labels) > ARRAY_SIZE(labels)); BUG_ON((r - relocs) > ARRAY_SIZE(relocs)); /* Patch branch offsets */ uasm_resolve_relocs(relocs, labels); /* Flush the icache */ local_flush_icache_range((unsigned long)buf, (unsigned long)p); return buf; out_err: kfree(buf); return NULL; }
/* * BVADDR is the faulting address, PTR is scratch. * PTR will hold the pgd for vmalloc. */ static __init void build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r, unsigned int bvaddr, unsigned int ptr) { long swpd = (long)swapper_pg_dir; #ifdef MODULE_START long modd = (long)module_pg_dir; uasm_l_module_alloc(l, *p); /* * Assumption: * VMALLOC_START >= 0xc000000000000000UL * MODULE_START >= 0xe000000000000000UL */ UASM_i_SLL(p, ptr, bvaddr, 2); uasm_il_bgez(p, r, ptr, label_vmalloc); if (uasm_in_compat_space_p(MODULE_START) && !uasm_rel_lo(MODULE_START)) { uasm_i_lui(p, ptr, uasm_rel_hi(MODULE_START)); /* delay slot */ } else { /* unlikely configuration */ uasm_i_nop(p); /* delay slot */ UASM_i_LA(p, ptr, MODULE_START); } uasm_i_dsubu(p, bvaddr, bvaddr, ptr); if (uasm_in_compat_space_p(modd) && !uasm_rel_lo(modd)) { uasm_il_b(p, r, label_vmalloc_done); uasm_i_lui(p, ptr, uasm_rel_hi(modd)); } else { UASM_i_LA_mostly(p, ptr, modd); uasm_il_b(p, r, label_vmalloc_done); if (uasm_in_compat_space_p(modd)) uasm_i_addiu(p, ptr, ptr, uasm_rel_lo(modd)); else uasm_i_daddiu(p, ptr, ptr, uasm_rel_lo(modd)); } uasm_l_vmalloc(l, *p); if (uasm_in_compat_space_p(MODULE_START) && !uasm_rel_lo(MODULE_START) && MODULE_START << 32 == VMALLOC_START) uasm_i_dsll32(p, ptr, ptr, 0); /* typical case */ else UASM_i_LA(p, ptr, VMALLOC_START); #else uasm_l_vmalloc(l, *p); UASM_i_LA(p, ptr, VMALLOC_START); #endif uasm_i_dsubu(p, bvaddr, bvaddr, ptr); if (uasm_in_compat_space_p(swpd) && !uasm_rel_lo(swpd)) { uasm_il_b(p, r, label_vmalloc_done); uasm_i_lui(p, ptr, uasm_rel_hi(swpd)); } else { UASM_i_LA_mostly(p, ptr, swpd); uasm_il_b(p, r, label_vmalloc_done); if (uasm_in_compat_space_p(swpd)) uasm_i_addiu(p, ptr, ptr, uasm_rel_lo(swpd)); else uasm_i_daddiu(p, ptr, ptr, uasm_rel_lo(swpd)); } }