static void __cpuinit iPTE_SW(u32 **p, struct uasm_reloc **r, unsigned int pte, unsigned int ptr, unsigned int mode) { #ifdef CONFIG_64BIT_PHYS_ADDR unsigned int hwmode = mode & (_PAGE_VALID | _PAGE_DIRTY); #endif uasm_i_ori(p, pte, pte, mode); #ifdef CONFIG_SMP # ifdef CONFIG_64BIT_PHYS_ADDR if (cpu_has_64bits) uasm_i_scd(p, pte, 0, ptr); else # endif UASM_i_SC(p, pte, 0, ptr); if (r10000_llsc_war()) uasm_il_beqzl(p, r, pte, label_smp_pgtable_change); else uasm_il_beqz(p, r, pte, label_smp_pgtable_change); # ifdef CONFIG_64BIT_PHYS_ADDR if (!cpu_has_64bits) { /* no uasm_i_nop needed */ uasm_i_ll(p, pte, sizeof(pte_t) / 2, ptr); uasm_i_ori(p, pte, pte, hwmode); uasm_i_sc(p, pte, sizeof(pte_t) / 2, ptr); uasm_il_beqz(p, r, pte, label_smp_pgtable_change); /* no uasm_i_nop needed */ uasm_i_lw(p, pte, 0, ptr); } else uasm_i_nop(p); # else uasm_i_nop(p); # endif #else # ifdef CONFIG_64BIT_PHYS_ADDR if (cpu_has_64bits) uasm_i_sd(p, pte, 0, ptr); else # endif UASM_i_SW(p, pte, 0, ptr); # ifdef CONFIG_64BIT_PHYS_ADDR if (!cpu_has_64bits) { uasm_i_lw(p, pte, sizeof(pte_t) / 2, ptr); uasm_i_ori(p, pte, pte, hwmode); uasm_i_sw(p, pte, sizeof(pte_t) / 2, ptr); uasm_i_lw(p, pte, 0, ptr); } # endif #endif }
/** * kvm_mips_build_ret_to_guest() - Assemble code to return to the guest. * @addr: Address to start writing code. * * Assemble the code to handle return from the guest exit handler * (kvm_mips_handle_exit()) back to the guest. * * Returns: Next address after end of written function. */ static void *kvm_mips_build_ret_to_guest(void *addr) { u32 *p = addr; /* Put the saved pointer to vcpu (s1) back into the scratch register */ UASM_i_MTC0(&p, S1, scratch_vcpu[0], scratch_vcpu[1]); /* Load up the Guest EBASE to minimize the window where BEV is set */ UASM_i_LW(&p, T0, offsetof(struct kvm_vcpu_arch, guest_ebase), K1); /* Switch EBASE back to the one used by KVM */ uasm_i_mfc0(&p, V1, C0_STATUS); uasm_i_lui(&p, AT, ST0_BEV >> 16); uasm_i_or(&p, K0, V1, AT); uasm_i_mtc0(&p, K0, C0_STATUS); uasm_i_ehb(&p); build_set_exc_base(&p, T0); /* Setup status register for running guest in UM */ uasm_i_ori(&p, V1, V1, ST0_EXL | KSU_USER | ST0_IE); UASM_i_LA(&p, AT, ~(ST0_CU0 | ST0_MX)); uasm_i_and(&p, V1, V1, AT); uasm_i_mtc0(&p, V1, C0_STATUS); uasm_i_ehb(&p); p = kvm_mips_build_enter_guest(p); return p; }
/** * build_set_exc_base() - Assemble code to write exception base address. * @p: Code buffer pointer. * @reg: Source register (generated code may set WG bit in @reg). * * Assemble code to modify the exception base address in the EBase register, * using the appropriately sized access and setting the WG bit if necessary. */ static inline void build_set_exc_base(u32 **p, unsigned int reg) { if (cpu_has_ebase_wg) { /* Set WG so that all the bits get written */ uasm_i_ori(p, reg, reg, MIPS_EBASE_WG); UASM_i_MTC0(p, reg, C0_EBASE); } else { uasm_i_mtc0(p, reg, C0_EBASE); } }
/* Load a u32 immediate to a register */ static inline void emit_load_imm(unsigned int dst, u32 imm, struct jit_ctx *ctx) { if (ctx->target != NULL) { /* addiu can only handle s16 */ if (!is_range16(imm)) { u32 *p = &ctx->target[ctx->idx]; uasm_i_lui(&p, r_tmp_imm, (s32)imm >> 16); p = &ctx->target[ctx->idx + 1]; uasm_i_ori(&p, dst, r_tmp_imm, imm & 0xffff); } else {
static void __cpuinit build_r4000_tlbchange_handler_tail(u32 **p, struct uasm_label **l, struct uasm_reloc **r, unsigned int tmp, unsigned int ptr) { uasm_i_ori(p, ptr, ptr, sizeof(pte_t)); uasm_i_xori(p, ptr, ptr, sizeof(pte_t)); build_update_entries(p, tmp, ptr); build_tlb_write_entry(p, l, r, tlb_indexed); uasm_l_leave(l, *p); uasm_i_eret(p); /* return from trap */ #ifdef CONFIG_64BIT build_get_pgd_vmalloc64(p, l, r, tmp, ptr); #endif }
static inline void __cpuinit pg_addiu(u32 **buf, unsigned int reg1, unsigned int reg2, unsigned int off) { if (cpu_has_64bit_gp_regs && DADDI_WAR && r4k_daddiu_bug()) { if (off > 0x7fff) { uasm_i_lui(buf, T9, uasm_rel_hi(off)); uasm_i_addiu(buf, T9, T9, uasm_rel_lo(off)); } else uasm_i_addiu(buf, T9, ZERO, off); uasm_i_daddu(buf, reg1, reg2, T9); } else { if (off > 0x7fff) { if (off == 0x8000) { uasm_i_ori(buf, T9, ZERO, 0x8000); } else { uasm_i_lui(buf, T9, uasm_rel_hi(off)); if (uasm_rel_lo(off) != 0) uasm_i_addiu(buf, T9, T9, uasm_rel_lo(off)); } UASM_i_ADDU(buf, reg1, reg2, T9); } else UASM_i_ADDIU(buf, reg1, reg2, off); } }
void build_copy_page(void) { int off; u32 *buf = (u32 *)©_page_array; struct uasm_label *l = labels; struct uasm_reloc *r = relocs; int i; memset(labels, 0, sizeof(labels)); memset(relocs, 0, sizeof(relocs)); set_prefetch_parameters(); /* * This algorithm makes the following assumptions: * - All prefetch biases are multiples of 8 words. * - The prefetch biases are less than one page. * - The store prefetch bias isn't greater than the load * prefetch bias. */ BUG_ON(pref_bias_copy_load % (8 * copy_word_size)); BUG_ON(pref_bias_copy_store % (8 * copy_word_size)); BUG_ON(PAGE_SIZE < pref_bias_copy_load); BUG_ON(pref_bias_copy_store > pref_bias_copy_load); off = PAGE_SIZE - pref_bias_copy_load; if (off > 0xffff || !pref_bias_copy_load) pg_addiu(&buf, A2, A0, off); else uasm_i_ori(&buf, A2, A0, off); if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x()) uasm_i_lui(&buf, AT, 0xa000); off = cache_line_size ? min(8, pref_bias_copy_load / cache_line_size) * cache_line_size : 0; while (off) { build_copy_load_pref(&buf, -off); off -= cache_line_size; } off = cache_line_size ? min(8, pref_bias_copy_store / cache_line_size) * cache_line_size : 0; while (off) { build_copy_store_pref(&buf, -off); off -= cache_line_size; } uasm_l_copy_pref_both(&l, buf); do { build_copy_load_pref(&buf, off); build_copy_load(&buf, T0, off); build_copy_load_pref(&buf, off + copy_word_size); build_copy_load(&buf, T1, off + copy_word_size); build_copy_load_pref(&buf, off + 2 * copy_word_size); build_copy_load(&buf, T2, off + 2 * copy_word_size); build_copy_load_pref(&buf, off + 3 * copy_word_size); build_copy_load(&buf, T3, off + 3 * copy_word_size); build_copy_store_pref(&buf, off); build_copy_store(&buf, T0, off); build_copy_store_pref(&buf, off + copy_word_size); build_copy_store(&buf, T1, off + copy_word_size); build_copy_store_pref(&buf, off + 2 * copy_word_size); build_copy_store(&buf, T2, off + 2 * copy_word_size); build_copy_store_pref(&buf, off + 3 * copy_word_size); build_copy_store(&buf, T3, off + 3 * copy_word_size); off += 4 * copy_word_size; } while (off < half_copy_loop_size); pg_addiu(&buf, A1, A1, 2 * off); pg_addiu(&buf, A0, A0, 2 * off); off = -off; do { build_copy_load_pref(&buf, off); build_copy_load(&buf, T0, off); build_copy_load_pref(&buf, off + copy_word_size); build_copy_load(&buf, T1, off + copy_word_size); build_copy_load_pref(&buf, off + 2 * copy_word_size); build_copy_load(&buf, T2, off + 2 * copy_word_size); build_copy_load_pref(&buf, off + 3 * copy_word_size); build_copy_load(&buf, T3, off + 3 * copy_word_size); build_copy_store_pref(&buf, off); build_copy_store(&buf, T0, off); build_copy_store_pref(&buf, off + copy_word_size); build_copy_store(&buf, T1, off + copy_word_size); build_copy_store_pref(&buf, off + 2 * copy_word_size); build_copy_store(&buf, T2, off + 2 * copy_word_size); build_copy_store_pref(&buf, off + 3 * copy_word_size); if (off == -(4 * copy_word_size)) uasm_il_bne(&buf, &r, A2, A0, label_copy_pref_both); build_copy_store(&buf, T3, off + 3 * copy_word_size); off += 4 * copy_word_size; } while (off < 0); if (pref_bias_copy_load - pref_bias_copy_store) { pg_addiu(&buf, A2, A0, pref_bias_copy_load - pref_bias_copy_store); uasm_l_copy_pref_store(&l, buf); off = 0; do { build_copy_load(&buf, T0, off); build_copy_load(&buf, T1, off + copy_word_size); build_copy_load(&buf, T2, off + 2 * copy_word_size); build_copy_load(&buf, T3, off + 3 * copy_word_size); build_copy_store_pref(&buf, off); build_copy_store(&buf, T0, off); build_copy_store_pref(&buf, off + copy_word_size); build_copy_store(&buf, T1, off + copy_word_size); build_copy_store_pref(&buf, off + 2 * copy_word_size); build_copy_store(&buf, T2, off + 2 * copy_word_size); build_copy_store_pref(&buf, off + 3 * copy_word_size); build_copy_store(&buf, T3, off + 3 * copy_word_size); off += 4 * copy_word_size; } while (off < half_copy_loop_size); pg_addiu(&buf, A1, A1, 2 * off); pg_addiu(&buf, A0, A0, 2 * off); off = -off; do { build_copy_load(&buf, T0, off); build_copy_load(&buf, T1, off + copy_word_size); build_copy_load(&buf, T2, off + 2 * copy_word_size); build_copy_load(&buf, T3, off + 3 * copy_word_size); build_copy_store_pref(&buf, off); build_copy_store(&buf, T0, off); build_copy_store_pref(&buf, off + copy_word_size); build_copy_store(&buf, T1, off + copy_word_size); build_copy_store_pref(&buf, off + 2 * copy_word_size); build_copy_store(&buf, T2, off + 2 * copy_word_size); build_copy_store_pref(&buf, off + 3 * copy_word_size); if (off == -(4 * copy_word_size)) uasm_il_bne(&buf, &r, A2, A0, label_copy_pref_store); build_copy_store(&buf, T3, off + 3 * copy_word_size); off += 4 * copy_word_size; } while (off < 0); } if (pref_bias_copy_store) { pg_addiu(&buf, A2, A0, pref_bias_copy_store); uasm_l_copy_nopref(&l, buf); off = 0; do { build_copy_load(&buf, T0, off); build_copy_load(&buf, T1, off + copy_word_size); build_copy_load(&buf, T2, off + 2 * copy_word_size); build_copy_load(&buf, T3, off + 3 * copy_word_size); build_copy_store(&buf, T0, off); build_copy_store(&buf, T1, off + copy_word_size); build_copy_store(&buf, T2, off + 2 * copy_word_size); build_copy_store(&buf, T3, off + 3 * copy_word_size); off += 4 * copy_word_size; } while (off < half_copy_loop_size); pg_addiu(&buf, A1, A1, 2 * off); pg_addiu(&buf, A0, A0, 2 * off); off = -off; do { build_copy_load(&buf, T0, off); build_copy_load(&buf, T1, off + copy_word_size); build_copy_load(&buf, T2, off + 2 * copy_word_size); build_copy_load(&buf, T3, off + 3 * copy_word_size); build_copy_store(&buf, T0, off); build_copy_store(&buf, T1, off + copy_word_size); build_copy_store(&buf, T2, off + 2 * copy_word_size); if (off == -(4 * copy_word_size)) uasm_il_bne(&buf, &r, A2, A0, label_copy_nopref); build_copy_store(&buf, T3, off + 3 * copy_word_size); off += 4 * copy_word_size; } while (off < 0); } uasm_i_jr(&buf, RA); uasm_i_nop(&buf); BUG_ON(buf > copy_page_array + ARRAY_SIZE(copy_page_array)); uasm_resolve_relocs(relocs, labels); pr_debug("Synthesized copy page handler (%u instructions).\n", (u32)(buf - copy_page_array)); pr_debug("\t.set push\n"); pr_debug("\t.set noreorder\n"); for (i = 0; i < (buf - copy_page_array); i++) pr_debug("\t.word 0x%08x\n", copy_page_array[i]); pr_debug("\t.set pop\n"); }
void __cpuinit build_clear_page(void) { int off; u32 *buf = (u32 *)&clear_page_array; struct uasm_label *l = labels; struct uasm_reloc *r = relocs; int i; memset(labels, 0, sizeof(labels)); memset(relocs, 0, sizeof(relocs)); if (current_cpu_data.cputype == CPU_CAVIUM_OCTEON2) { const unsigned int wb_nudge = 26; pg_addiu(&buf, T0, A0, PAGE_SIZE); UASM_i_ADDIU(&buf, A1, A0, 128); uasm_l_clear_pref(&l, buf); uasm_i_zcbt(&buf, A0); uasm_i_pref(&buf, wb_nudge, 0, A0); UASM_i_ADDIU(&buf, A0, A0, 256); uasm_i_zcbt(&buf, A1); uasm_i_pref(&buf, wb_nudge, 0, A1); UASM_i_ADDIU(&buf, A1, A1, 256); uasm_i_zcbt(&buf, A0); uasm_i_pref(&buf, wb_nudge, 0, A0); UASM_i_ADDIU(&buf, A0, A0, 256); uasm_i_zcbt(&buf, A1); uasm_i_pref(&buf, wb_nudge, 0, A1); UASM_i_ADDIU(&buf, A1, A1, 256); uasm_i_zcbt(&buf, A0); uasm_i_pref(&buf, wb_nudge, 0, A0); UASM_i_ADDIU(&buf, A0, A0, 256); uasm_i_zcbt(&buf, A1); uasm_i_pref(&buf, wb_nudge, 0, A1); UASM_i_ADDIU(&buf, A1, A1, 256); uasm_i_zcbt(&buf, A0); uasm_i_pref(&buf, wb_nudge, 0, A0); UASM_i_ADDIU(&buf, A0, A0, 256); uasm_i_zcbt(&buf, A1); uasm_i_pref(&buf, wb_nudge, 0, A1); uasm_il_bne(&buf, &r, A0, T0, label_clear_pref); UASM_i_ADDIU(&buf, A1, A1, 256); } else { set_prefetch_parameters(); /* * This algorithm makes the following assumptions: * - The prefetch bias is a multiple of 2 words. * - The prefetch bias is less than one page. */ BUG_ON(pref_bias_clear_store % (2 * clear_word_size)); BUG_ON(PAGE_SIZE < pref_bias_clear_store); off = PAGE_SIZE - pref_bias_clear_store; if (off > 0xffff || !pref_bias_clear_store) pg_addiu(&buf, A2, A0, off); else uasm_i_ori(&buf, A2, A0, off); if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x()) uasm_i_lui(&buf, AT, 0xa000); off = cache_line_size ? min(8, pref_bias_clear_store / cache_line_size) * cache_line_size : 0; while (off) { build_clear_pref(&buf, -off); off -= cache_line_size; } uasm_l_clear_pref(&l, buf); do { build_clear_pref(&buf, off); build_clear_store(&buf, off); off += clear_word_size; } while (off < half_clear_loop_size); pg_addiu(&buf, A0, A0, 2 * off); off = -off; do { build_clear_pref(&buf, off); if (off == -clear_word_size) uasm_il_bne(&buf, &r, A0, A2, label_clear_pref); build_clear_store(&buf, off); off += clear_word_size; } while (off < 0); if (pref_bias_clear_store) { pg_addiu(&buf, A2, A0, pref_bias_clear_store); uasm_l_clear_nopref(&l, buf); off = 0; do { build_clear_store(&buf, off); off += clear_word_size; } while (off < half_clear_loop_size); pg_addiu(&buf, A0, A0, 2 * off); off = -off; do { if (off == -clear_word_size) uasm_il_bne(&buf, &r, A0, A2, label_clear_nopref); build_clear_store(&buf, off); off += clear_word_size; } while (off < 0); } } uasm_i_jr(&buf, RA); uasm_i_nop(&buf); BUG_ON(buf > clear_page_array + ARRAY_SIZE(clear_page_array)); uasm_resolve_relocs(relocs, labels); pr_debug("Synthesized clear page handler (%u instructions).\n", (u32)(buf - clear_page_array)); pr_debug("\t.set push\n"); pr_debug("\t.set noreorder\n"); for (i = 0; i < (buf - clear_page_array); i++) pr_debug("\t.word 0x%08x\n", clear_page_array[i]); pr_debug("\t.set pop\n"); }
void build_clear_page(void) { int off; u32 *buf = &__clear_page_start; struct uasm_label *l = labels; struct uasm_reloc *r = relocs; int i; static atomic_t run_once = ATOMIC_INIT(0); if (atomic_xchg(&run_once, 1)) { return; } memset(labels, 0, sizeof(labels)); memset(relocs, 0, sizeof(relocs)); set_prefetch_parameters(); /* * This algorithm makes the following assumptions: * - The prefetch bias is a multiple of 2 words. * - The prefetch bias is less than one page. */ BUG_ON(pref_bias_clear_store % (2 * clear_word_size)); BUG_ON(PAGE_SIZE < pref_bias_clear_store); off = PAGE_SIZE - pref_bias_clear_store; if (off > 0xffff || !pref_bias_clear_store) pg_addiu(&buf, A2, A0, off); else uasm_i_ori(&buf, A2, A0, off); if (R4600_V2_HIT_CACHEOP_WAR && cpu_is_r4600_v2_x()) uasm_i_lui(&buf, AT, uasm_rel_hi(0xa0000000)); off = cache_line_size ? min(8, pref_bias_clear_store / cache_line_size) * cache_line_size : 0; while (off) { build_clear_pref(&buf, -off); off -= cache_line_size; } uasm_l_clear_pref(&l, buf); do { build_clear_pref(&buf, off); build_clear_store(&buf, off); off += clear_word_size; } while (off < half_clear_loop_size); pg_addiu(&buf, A0, A0, 2 * off); off = -off; do { build_clear_pref(&buf, off); if (off == -clear_word_size) uasm_il_bne(&buf, &r, A0, A2, label_clear_pref); build_clear_store(&buf, off); off += clear_word_size; } while (off < 0); if (pref_bias_clear_store) { pg_addiu(&buf, A2, A0, pref_bias_clear_store); uasm_l_clear_nopref(&l, buf); off = 0; do { build_clear_store(&buf, off); off += clear_word_size; } while (off < half_clear_loop_size); pg_addiu(&buf, A0, A0, 2 * off); off = -off; do { if (off == -clear_word_size) uasm_il_bne(&buf, &r, A0, A2, label_clear_nopref); build_clear_store(&buf, off); off += clear_word_size; } while (off < 0); } uasm_i_jr(&buf, RA); uasm_i_nop(&buf); BUG_ON(buf > &__clear_page_end); uasm_resolve_relocs(relocs, labels); pr_debug("Synthesized clear page handler (%u instructions).\n", (u32)(buf - &__clear_page_start)); pr_debug("\t.set push\n"); pr_debug("\t.set noreorder\n"); for (i = 0; i < (buf - &__clear_page_start); i++) pr_debug("\t.word 0x%08x\n", (&__clear_page_start)[i]); pr_debug("\t.set pop\n"); }