void gencfc1(void) { #ifdef INTERPRET_CFC1 gencallinterp((native_type)cached_interpreter_table.CFC1, 0); #else gencheck_cop1_unusable(); #ifdef __x86_64__ if(dst->f.r.nrd == 31) mov_xreg32_m32rel(EAX, (unsigned int*)&FCR31); else mov_xreg32_m32rel(EAX, (unsigned int*)&FCR0); mov_m32rel_xreg32((unsigned int*)dst->f.r.rt, EAX); sar_reg32_imm8(EAX, 31); mov_m32rel_xreg32(((unsigned int*)dst->f.r.rt)+1, EAX); #else if(dst->f.r.nrd == 31) mov_eax_memoffs32((unsigned int*)&FCR31); else mov_eax_memoffs32((unsigned int*)&FCR0); mov_memoffs32_eax((unsigned int*)dst->f.r.rt); sar_reg32_imm8(EAX, 31); mov_memoffs32_eax(((unsigned int*)dst->f.r.rt)+1); #endif #endif }
void gencfc1(void) { #if defined(COUNT_INSTR) inc_m32rel(&instr_count[113]); #endif #ifdef INTERPRET_CFC1 gencallinterp((unsigned long long)cached_interpreter_table.CFC1, 0); #else gencheck_cop1_unusable(); if(dst->f.r.nrd == 31) mov_xreg32_m32rel(EAX, (unsigned int*)&FCR31); else mov_xreg32_m32rel(EAX, (unsigned int*)&FCR0); mov_m32rel_xreg32((unsigned int*)dst->f.r.rt, EAX); sar_reg32_imm8(EAX, 31); mov_m32rel_xreg32(((unsigned int*)dst->f.r.rt)+1, EAX); #endif }
void genctc1(void) { #if defined(COUNT_INSTR) inc_m32rel(&instr_count[116]); #endif #ifdef INTERPRET_CTC1 gencallinterp((unsigned long long)cached_interpreter_table.CTC1, 0); #else gencheck_cop1_unusable(); if (dst->f.r.nrd != 31) return; mov_xreg32_m32rel(EAX, (unsigned int*)dst->f.r.rt); mov_m32rel_xreg32((unsigned int*)&FCR31, EAX); and_eax_imm32(3); cmp_eax_imm32(0); jne_rj(13); mov_m32rel_imm32((unsigned int*)&rounding_mode, 0x33F); // 11 jmp_imm_short(51); // 2 cmp_eax_imm32(1); // 5 jne_rj(13); // 2 mov_m32rel_imm32((unsigned int*)&rounding_mode, 0xF3F); // 11 jmp_imm_short(31); // 2 cmp_eax_imm32(2); // 5 jne_rj(13); // 2 mov_m32rel_imm32((unsigned int*)&rounding_mode, 0xB3F); // 11 jmp_imm_short(11); // 2 mov_m32rel_imm32((unsigned int*)&rounding_mode, 0x73F); // 11 fldcw_m16rel((unsigned short*)&rounding_mode); #endif }
void gendmtc1(void) { #if defined(COUNT_INSTR) inc_m32rel(&instr_count[115]); #endif #ifdef INTERPRET_DMTC1 gencallinterp((unsigned long long)cached_interpreter_table.DMTC1, 0); #else gencheck_cop1_unusable(); mov_xreg32_m32rel(EAX, (unsigned int*)dst->f.r.rt); mov_xreg32_m32rel(EBX, ((unsigned int*)dst->f.r.rt)+1); mov_xreg64_m64rel(RDX, (unsigned long long *)(®_cop1_double[dst->f.r.nrd])); mov_preg64_reg32(RDX, EAX); mov_preg64pimm32_reg32(RDX, 4, EBX); #endif }
void genctc1(void) { #ifdef INTERPRET_CTC1 gencallinterp((native_type)cached_interpreter_table.CTC1, 0); #else gencheck_cop1_unusable(); if (dst->f.r.nrd != 31) return; #ifdef __x86_64__ mov_xreg32_m32rel(EAX, (unsigned int*)dst->f.r.rt); mov_m32rel_xreg32((unsigned int*)&FCR31, EAX); and_eax_imm32(3); cmp_eax_imm32(0); jne_rj(13); mov_m32rel_imm32((unsigned int*)&rounding_mode, 0x33F); // 11 jmp_imm_short(51); // 2 cmp_eax_imm32(1); // 5 jne_rj(13); // 2 mov_m32rel_imm32((unsigned int*)&rounding_mode, 0xF3F); // 11 jmp_imm_short(31); // 2 cmp_eax_imm32(2); // 5 jne_rj(13); // 2 mov_m32rel_imm32((unsigned int*)&rounding_mode, 0xB3F); // 11 jmp_imm_short(11); // 2 mov_m32rel_imm32((unsigned int*)&rounding_mode, 0x73F); // 11 fldcw_m16rel((unsigned short*)&rounding_mode); #else mov_eax_memoffs32((unsigned int*)dst->f.r.rt); mov_memoffs32_eax((unsigned int*)&FCR31); and_eax_imm32(3); cmp_eax_imm32(0); jne_rj(12); mov_m32_imm32((unsigned int*)&rounding_mode, 0x33F); // 10 jmp_imm_short(48); // 2 cmp_eax_imm32(1); // 5 jne_rj(12); // 2 mov_m32_imm32((unsigned int*)&rounding_mode, 0xF3F); // 10 jmp_imm_short(29); // 2 cmp_eax_imm32(2); // 5 jne_rj(12); // 2 mov_m32_imm32((unsigned int*)&rounding_mode, 0xB3F); // 10 jmp_imm_short(10); // 2 mov_m32_imm32((unsigned int*)&rounding_mode, 0x73F); // 10 fldcw_m16((unsigned short*)&rounding_mode); #endif #endif }
// this function finds a register to put the data contained in addr, // if there was another value before it's cleanly removed of the // register cache. After that, the register number is returned. // If data are already cached, the function only returns the register number int allocate_register_32(unsigned int *addr) { int reg = 0, i; // is it already cached ? if (addr != NULL) { for (i = 0; i < 8; i++) { if (last_access[i] != NULL && (unsigned int *) reg_content[i] == addr) { precomp_instr *last = last_access[i]+1; while (last <= dst) { last->reg_cache_infos.needed_registers[i] = reg_content[i]; last++; } last_access[i] = dst; is64bits[i] = 0; return i; } } } // it's not cached, so take the least recently used register reg = lru_register(); if (last_access[reg]) free_register(reg); else { while (free_since[reg] <= dst) { free_since[reg]->reg_cache_infos.needed_registers[reg] = NULL; free_since[reg]++; } } last_access[reg] = dst; reg_content[reg] = (unsigned long long *) addr; dirty[reg] = 0; is64bits[reg] = 0; if (addr != NULL) { if (addr == (unsigned int *) r0) xor_reg32_reg32(reg, reg); else mov_xreg32_m32rel(reg, addr); } return reg; }
void gendmtc1(void) { #ifdef INTERPRET_DMTC1 gencallinterp((native_type)cached_interpreter_table.DMTC1, 0); #else gencheck_cop1_unusable(); #ifdef __x86_64__ mov_xreg32_m32rel(EAX, (unsigned int*)dst->f.r.rt); mov_xreg32_m32rel(EBX, ((unsigned int*)dst->f.r.rt)+1); mov_xreg64_m64rel(RDX, (unsigned long long *)(®_cop1_double[dst->f.r.nrd])); mov_preg64_reg32(RDX, EAX); mov_preg64pimm32_reg32(RDX, 4, EBX); #else mov_eax_memoffs32((unsigned int*)dst->f.r.rt); mov_reg32_m32(EBX, ((unsigned int*)dst->f.r.rt)+1); mov_reg32_m32(EDX, (unsigned int*)(®_cop1_double[dst->f.r.nrd])); mov_preg32_reg32(EDX, EAX); mov_preg32pimm32_reg32(EDX, 4, EBX); #endif #endif }
void genmtc1(void) { #if defined(COUNT_INSTR) inc_m32rel(&instr_count[114]); #endif #ifdef INTERPRET_MTC1 gencallinterp((unsigned long long)cached_interpreter_table.MTC1, 0); #else gencheck_cop1_unusable(); mov_xreg32_m32rel(EAX, (unsigned int*)g_dev.r4300.recomp.dst->f.r.rt); mov_xreg64_m64rel(RBX, (unsigned long long *)(&(r4300_cp1_regs_simple())[g_dev.r4300.recomp.dst->f.r.nrd])); mov_preg64_reg32(RBX, EAX); #endif }
void allocate_register_32_manually(int reg, unsigned int *addr) { int i; /* check if we just happen to already have this r4300 reg cached in the requested x86 reg */ if (last_access[reg] != NULL && reg_content[reg] == (unsigned long long *) addr) { precomp_instr *last = last_access[reg] + 1; while (last <= dst) { last->reg_cache_infos.needed_registers[reg] = reg_content[reg]; last++; } last_access[reg] = dst; /* we won't touch is64bits or dirty; the register returned is "read-only" */ return; } /* otherwise free up the requested x86 register */ if (last_access[reg]) free_register(reg); else { while (free_since[reg] <= dst) { free_since[reg]->reg_cache_infos.needed_registers[reg] = NULL; free_since[reg]++; } } /* if the r4300 register is already cached in a different x86 register, then copy it to the requested x86 register */ for (i=0; i<8; i++) { if (last_access[i] != NULL && reg_content[i] == (unsigned long long *) addr) { precomp_instr *last = last_access[i]+1; while (last <= dst) { last->reg_cache_infos.needed_registers[i] = reg_content[i]; last++; } last_access[i] = dst; if (is64bits[i]) mov_reg64_reg64(reg, i); else mov_reg32_reg32(reg, i); last_access[reg] = dst; is64bits[reg] = is64bits[i]; dirty[reg] = dirty[i]; reg_content[reg] = reg_content[i]; /* free the previous x86 register used to cache this r4300 register */ free_since[i] = dst + 1; last_access[i] = NULL; return; } } /* otherwise just load the 32-bit value into the requested register */ last_access[reg] = dst; reg_content[reg] = (unsigned long long *) addr; dirty[reg] = 0; is64bits[reg] = 0; if ((unsigned long long *) addr == r0) xor_reg32_reg32(reg, reg); else mov_xreg32_m32rel(reg, addr); }
void genjalr(void) { #if defined(COUNT_INSTR) inc_m32rel(&instr_count[62]); #endif #ifdef INTERPRET_JALR gencallinterp((unsigned long long)cached_interpreter_table.JALR, 0); #else static unsigned int precomp_instr_size = sizeof(precomp_instr); unsigned int diff = (unsigned int) offsetof(precomp_instr, local_addr); unsigned int diff_need = (unsigned int) offsetof(precomp_instr, reg_cache_infos.need_map); unsigned int diff_wrap = (unsigned int) offsetof(precomp_instr, reg_cache_infos.jump_wrapper); if (((dst->addr & 0xFFF) == 0xFFC && (dst->addr < 0x80000000 || dst->addr >= 0xC0000000))||no_compiled_jump) { gencallinterp((unsigned long long)cached_interpreter_table.JALR, 1); return; } free_registers_move_start(); mov_xreg32_m32rel(EAX, (unsigned int *)dst->f.r.rs); mov_m32rel_xreg32((unsigned int *)&local_rs, EAX); gendelayslot(); mov_m32rel_imm32((unsigned int *)(dst-1)->f.r.rd, dst->addr+4); if ((dst->addr+4) & 0x80000000) mov_m32rel_imm32(((unsigned int *)(dst-1)->f.r.rd)+1, 0xFFFFFFFF); else mov_m32rel_imm32(((unsigned int *)(dst-1)->f.r.rd)+1, 0); mov_xreg32_m32rel(EAX, (unsigned int *)&local_rs); mov_m32rel_xreg32((unsigned int *)&last_addr, EAX); gencheck_interupt_reg(); mov_xreg32_m32rel(EAX, (unsigned int *)&local_rs); mov_reg32_reg32(EBX, EAX); and_eax_imm32(0xFFFFF000); cmp_eax_imm32(dst_block->start & 0xFFFFF000); je_near_rj(0); jump_start_rel32(); mov_m32rel_xreg32(&jump_to_address, EBX); mov_reg64_imm64(RAX, (unsigned long long) (dst+1)); mov_m64rel_xreg64((unsigned long long *)(&PC), RAX); mov_reg64_imm64(RAX, (unsigned long long) jump_to_func); call_reg64(RAX); /* will never return from call */ jump_end_rel32(); mov_reg64_imm64(RSI, (unsigned long long) dst_block->block); mov_reg32_reg32(EAX, EBX); sub_eax_imm32(dst_block->start); shr_reg32_imm8(EAX, 2); mul_m32rel((unsigned int *)(&precomp_instr_size)); mov_reg32_preg64preg64pimm32(EBX, RAX, RSI, diff_need); cmp_reg32_imm32(EBX, 1); jne_rj(11); add_reg32_imm32(EAX, diff_wrap); // 6 add_reg64_reg64(RAX, RSI); // 3 jmp_reg64(RAX); // 2 mov_reg32_preg64preg64pimm32(EBX, RAX, RSI, diff); mov_rax_memoffs64((unsigned long long *) &dst_block->code); add_reg64_reg64(RAX, RBX); jmp_reg64(RAX); #endif }