void genxor(void) { #ifdef INTERPRET_XOR gencallinterp((unsigned int)cached_interpreter_table.XOR, 0); #else int rs1 = allocate_64_register1((unsigned int *)g_dev.r4300.recomp.dst->f.r.rs); int rs2 = allocate_64_register2((unsigned int *)g_dev.r4300.recomp.dst->f.r.rs); int rt1 = allocate_64_register1((unsigned int *)g_dev.r4300.recomp.dst->f.r.rt); int rt2 = allocate_64_register2((unsigned int *)g_dev.r4300.recomp.dst->f.r.rt); int rd1 = allocate_64_register1_w((unsigned int *)g_dev.r4300.recomp.dst->f.r.rd); int rd2 = allocate_64_register2_w((unsigned int *)g_dev.r4300.recomp.dst->f.r.rd); if (rt1 != rd1 && rs1 != rd1) { mov_reg32_reg32(rd1, rs1); mov_reg32_reg32(rd2, rs2); xor_reg32_reg32(rd1, rt1); xor_reg32_reg32(rd2, rt2); } else { int temp = lru_register(); free_register(temp); mov_reg32_reg32(temp, rs1); xor_reg32_reg32(temp, rt1); mov_reg32_reg32(rd1, temp); mov_reg32_reg32(temp, rs2); xor_reg32_reg32(temp, rt2); mov_reg32_reg32(rd2, temp); } #endif }
void genxor() { #ifdef INTERPRET_XOR gencallinterp((unsigned long)XOR, 0); #else int rs1 = allocate_64_register1((unsigned long *)dst->f.r.rs); int rs2 = allocate_64_register2((unsigned long *)dst->f.r.rs); int rt1 = allocate_64_register1((unsigned long *)dst->f.r.rt); int rt2 = allocate_64_register2((unsigned long *)dst->f.r.rt); int rd1 = allocate_64_register1_w((unsigned long *)dst->f.r.rd); int rd2 = allocate_64_register2_w((unsigned long *)dst->f.r.rd); if (rt1 != rd1 && rs1 != rd1) { mov_reg32_reg32(rd1, rs1); mov_reg32_reg32(rd2, rs2); xor_reg32_reg32(rd1, rt1); xor_reg32_reg32(rd2, rt2); } else { int temp = lru_register(); free_register(temp); mov_reg32_reg32(temp, rs1); xor_reg32_reg32(temp, rt1); mov_reg32_reg32(rd1, temp); mov_reg32_reg32(temp, rs2); xor_reg32_reg32(temp, rt2); mov_reg32_reg32(rd2, temp); } #endif }
void genxor(usf_state_t * state) { #ifdef INTERPRET_XOR gencallinterp(state, (unsigned int)state->current_instruction_table.XOR, 0); #else int rs1 = allocate_64_register1(state, (unsigned int *)state->dst->f.r.rs); int rs2 = allocate_64_register2(state, (unsigned int *)state->dst->f.r.rs); int rt1 = allocate_64_register1(state, (unsigned int *)state->dst->f.r.rt); int rt2 = allocate_64_register2(state, (unsigned int *)state->dst->f.r.rt); int rd1 = allocate_64_register1_w(state, (unsigned int *)state->dst->f.r.rd); int rd2 = allocate_64_register2_w(state, (unsigned int *)state->dst->f.r.rd); if (rt1 != rd1 && rs1 != rd1) { mov_reg32_reg32(state, rd1, rs1); mov_reg32_reg32(state, rd2, rs2); xor_reg32_reg32(state, rd1, rt1); xor_reg32_reg32(state, rd2, rt2); } else { int temp = lru_register(state); free_register(state, temp); mov_reg32_reg32(state, temp, rs1); xor_reg32_reg32(state, temp, rt1); mov_reg32_reg32(state, rd1, temp); mov_reg32_reg32(state, temp, rs2); xor_reg32_reg32(state, temp, rt2); mov_reg32_reg32(state, rd2, temp); } #endif }
void gendsrlv(usf_state_t * state) { #ifdef INTERPRET_DSRLV gencallinterp(state, (unsigned int)state->current_instruction_table.DSRLV, 0); #else int rt1, rt2, rd1, rd2; allocate_register_manually(state, ECX, (unsigned int *)state->dst->f.r.rs); rt1 = allocate_64_register1(state, (unsigned int *)state->dst->f.r.rt); rt2 = allocate_64_register2(state, (unsigned int *)state->dst->f.r.rt); rd1 = allocate_64_register1_w(state, (unsigned int *)state->dst->f.r.rd); rd2 = allocate_64_register2_w(state, (unsigned int *)state->dst->f.r.rd); if (rd1 != ECX && rd2 != ECX) { mov_reg32_reg32(state, rd1, rt1); mov_reg32_reg32(state, rd2, rt2); shrd_reg32_reg32_cl(state, rd1,rd2); shr_reg32_cl(state, rd2); test_reg32_imm32(state, ECX, 0x20); je_rj(state, 4); mov_reg32_reg32(state, rd1, rd2); // 2 xor_reg32_reg32(state, rd2, rd2); // 2 } else { int temp1, temp2; force_32(state, ECX); temp1 = lru_register(state); temp2 = lru_register_exc1(state, temp1); free_register(state, temp1); free_register(state, temp2); mov_reg32_reg32(state, temp1, rt1); mov_reg32_reg32(state, temp2, rt2); shrd_reg32_reg32_cl(state, temp1, temp2); shr_reg32_cl(state, temp2); test_reg32_imm32(state, ECX, 0x20); je_rj(state, 4); mov_reg32_reg32(state, temp1, temp2); // 2 xor_reg32_reg32(state, temp2, temp2); // 2 mov_reg32_reg32(state, rd1, temp1); mov_reg32_reg32(state, rd2, temp2); } #endif }
void gendsllv(void) { #ifdef INTERPRET_DSLLV gencallinterp((unsigned int)cached_interpreter_table.DSLLV, 0); #else int rt1, rt2, rd1, rd2; allocate_register_manually(ECX, (unsigned int *)g_dev.r4300.recomp.dst->f.r.rs); rt1 = allocate_64_register1((unsigned int *)g_dev.r4300.recomp.dst->f.r.rt); rt2 = allocate_64_register2((unsigned int *)g_dev.r4300.recomp.dst->f.r.rt); rd1 = allocate_64_register1_w((unsigned int *)g_dev.r4300.recomp.dst->f.r.rd); rd2 = allocate_64_register2_w((unsigned int *)g_dev.r4300.recomp.dst->f.r.rd); if (rd1 != ECX && rd2 != ECX) { mov_reg32_reg32(rd1, rt1); mov_reg32_reg32(rd2, rt2); shld_reg32_reg32_cl(rd2,rd1); shl_reg32_cl(rd1); test_reg32_imm32(ECX, 0x20); je_rj(4); mov_reg32_reg32(rd2, rd1); // 2 xor_reg32_reg32(rd1, rd1); // 2 } else { int temp1, temp2; force_32(ECX); temp1 = lru_register(); temp2 = lru_register_exc1(temp1); free_register(temp1); free_register(temp2); mov_reg32_reg32(temp1, rt1); mov_reg32_reg32(temp2, rt2); shld_reg32_reg32_cl(temp2, temp1); shl_reg32_cl(temp1); test_reg32_imm32(ECX, 0x20); je_rj(4); mov_reg32_reg32(temp2, temp1); // 2 xor_reg32_reg32(temp1, temp1); // 2 mov_reg32_reg32(rd1, temp1); mov_reg32_reg32(rd2, temp2); } #endif }
void gendsrlv() { #ifdef INTERPRET_DSRLV gencallinterp((unsigned long)DSRLV, 0); #else int rt1, rt2, rd1, rd2; allocate_register_manually(ECX, (unsigned long *)dst->f.r.rs); rt1 = allocate_64_register1((unsigned long *)dst->f.r.rt); rt2 = allocate_64_register2((unsigned long *)dst->f.r.rt); rd1 = allocate_64_register1_w((unsigned long *)dst->f.r.rd); rd2 = allocate_64_register2_w((unsigned long *)dst->f.r.rd); if (rd1 != ECX && rd2 != ECX) { mov_reg32_reg32(rd1, rt1); mov_reg32_reg32(rd2, rt2); shrd_reg32_reg32_cl(rd1,rd2); shr_reg32_cl(rd2); test_reg32_imm32(ECX, 0x20); je_rj(4); mov_reg32_reg32(rd1, rd2); // 2 xor_reg32_reg32(rd2, rd2); // 2 } else { int temp1, temp2; force_32(ECX); temp1 = lru_register(); temp2 = lru_register_exc1(temp1); free_register(temp1); free_register(temp2); mov_reg32_reg32(temp1, rt1); mov_reg32_reg32(temp2, rt2); shrd_reg32_reg32_cl(temp1, temp2); shr_reg32_cl(temp2); test_reg32_imm32(ECX, 0x20); je_rj(4); mov_reg32_reg32(temp1, temp2); // 2 xor_reg32_reg32(temp2, temp2); // 2 mov_reg32_reg32(rd1, temp1); mov_reg32_reg32(rd2, temp2); } #endif }
// this function finds a register to put the data contained in addr, // if there was another value before it's cleanly removed of the // register cache. After that, the register number is returned. // If data are already cached, the function only returns the register number int allocate_register_32(unsigned int *addr) { int reg = 0, i; // is it already cached ? if (addr != NULL) { for (i = 0; i < 8; i++) { if (last_access[i] != NULL && (unsigned int *) reg_content[i] == addr) { precomp_instr *last = last_access[i]+1; while (last <= dst) { last->reg_cache_infos.needed_registers[i] = reg_content[i]; last++; } last_access[i] = dst; is64bits[i] = 0; return i; } } } // it's not cached, so take the least recently used register reg = lru_register(); if (last_access[reg]) free_register(reg); else { while (free_since[reg] <= dst) { free_since[reg]->reg_cache_infos.needed_registers[reg] = NULL; free_since[reg]++; } } last_access[reg] = dst; reg_content[reg] = (unsigned long long *) addr; dirty[reg] = 0; is64bits[reg] = 0; if (addr != NULL) { if (addr == (unsigned int *) r0) xor_reg32_reg32(reg, reg); else mov_xreg32_m32rel(reg, addr); } return reg; }
void gendsrl32() { #ifdef INTERPRET_DSRL32 gencallinterp((unsigned long)DSRL32, 0); #else int rt2 = allocate_64_register2((unsigned long *)dst->f.r.rt); int rd1 = allocate_64_register1_w((unsigned long *)dst->f.r.rd); int rd2 = allocate_64_register2_w((unsigned long *)dst->f.r.rd); mov_reg32_reg32(rd1, rt2); shr_reg32_imm8(rd1, dst->f.r.sa); xor_reg32_reg32(rd2, rd2); #endif }
void gendsrl32(usf_state_t * state) { #ifdef INTERPRET_DSRL32 gencallinterp(state, (unsigned int)state->current_instruction_table.DSRL32, 0); #else int rt2 = allocate_64_register2(state, (unsigned int *)state->dst->f.r.rt); int rd1 = allocate_64_register1_w(state, (unsigned int *)state->dst->f.r.rd); int rd2 = allocate_64_register2_w(state, (unsigned int *)state->dst->f.r.rd); mov_reg32_reg32(state, rd1, rt2); shr_reg32_imm8(state, rd1, state->dst->f.r.sa); xor_reg32_reg32(state, rd2, rd2); #endif }
void gendsll32(void) { #ifdef INTERPRET_DSLL32 gencallinterp((unsigned int)cached_interpreter_table.DSLL32, 0); #else int rt1 = allocate_64_register1((unsigned int *)dst->f.r.rt); int rd1 = allocate_64_register1_w((unsigned int *)dst->f.r.rd); int rd2 = allocate_64_register2_w((unsigned int *)dst->f.r.rd); mov_reg32_reg32(rd2, rt1); shl_reg32_imm8(rd2, dst->f.r.sa); xor_reg32_reg32(rd1, rd1); #endif }
void gendsrl32(void) { #ifdef INTERPRET_DSRL32 gencallinterp((unsigned int)cached_interpreter_table.DSRL32, 0); #else int rt2 = allocate_64_register2((unsigned int *)g_dev.r4300.recomp.dst->f.r.rt); int rd1 = allocate_64_register1_w((unsigned int *)g_dev.r4300.recomp.dst->f.r.rd); int rd2 = allocate_64_register2_w((unsigned int *)g_dev.r4300.recomp.dst->f.r.rd); mov_reg32_reg32(rd1, rt2); shr_reg32_imm8(rd1, g_dev.r4300.recomp.dst->f.r.sa); xor_reg32_reg32(rd2, rd2); #endif }
void gendivu(usf_state_t * state) { #ifdef INTERPRET_DIVU gencallinterp(state, (unsigned int)state->current_instruction_table.DIVU, 0); #else int rs, rt; allocate_register_manually_w(state, EAX, (unsigned int *)&state->lo, 0); allocate_register_manually_w(state, EDX, (unsigned int *)&state->hi, 0); rs = allocate_register(state, (unsigned int*)state->dst->f.r.rs); rt = allocate_register(state, (unsigned int*)state->dst->f.r.rt); cmp_reg32_imm32(state, rt, 0); je_rj(state, (rs == EAX ? 0 : 2) + 2 + 2); mov_reg32_reg32(state, EAX, rs); // 0 or 2 xor_reg32_reg32(state, EDX, EDX); // 2 div_reg32(state, rt); // 2 #endif }
void gendivu(void) { #ifdef INTERPRET_DIVU gencallinterp((unsigned int)cached_interpreter_table.DIVU, 0); #else int rs, rt; allocate_register_manually_w(EAX, (unsigned int *)r4300_mult_lo(), 0); allocate_register_manually_w(EDX, (unsigned int *)r4300_mult_hi(), 0); rs = allocate_register((unsigned int*)g_dev.r4300.recomp.dst->f.r.rs); rt = allocate_register((unsigned int*)g_dev.r4300.recomp.dst->f.r.rt); cmp_reg32_imm32(rt, 0); je_rj((rs == EAX ? 0 : 2) + 2 + 2); mov_reg32_reg32(EAX, rs); // 0 or 2 xor_reg32_reg32(EDX, EDX); // 2 div_reg32(rt); // 2 #endif }
void gendivu() { #ifdef INTERPRET_DIVU gencallinterp((unsigned long)DIVU, 0); #else int rs, rt; allocate_register_manually_w(EAX, (unsigned long *)&lo, 0); allocate_register_manually_w(EDX, (unsigned long *)&hi, 0); rs = allocate_register((unsigned long*)dst->f.r.rs); rt = allocate_register((unsigned long*)dst->f.r.rt); cmp_reg32_imm32(rt, 0); je_rj((rs == EAX ? 0 : 2) + 2 + 2); mov_reg32_reg32(EAX, rs); // 0 or 2 xor_reg32_reg32(EDX, EDX); // 2 div_reg32(rt); // 2 #endif }
void gendivu(void) { #if defined(COUNT_INSTR) inc_m32abs(&instr_count[74]); #endif #ifdef INTERPRET_DIVU gencallinterp((unsigned long long)DIVU, 0); #else int rs, rt; allocate_register_32_manually_w(EAX, (unsigned int *)&lo); allocate_register_32_manually_w(EDX, (unsigned int *)&hi); rs = allocate_register_32((unsigned int*)dst->f.r.rs); rt = allocate_register_32((unsigned int*)dst->f.r.rt); cmp_reg32_imm32(rt, 0); je_rj((rs == EAX ? 0 : 2) + 2 + 2); mov_reg32_reg32(EAX, rs); // 0 or 2 xor_reg32_reg32(EDX, EDX); // 2 div_reg32(rt); // 2 #endif }
void gendsll() { #ifdef INTERPRET_DSLL gencallinterp((unsigned long)DSLL, 0); #else int rt1 = allocate_64_register1((unsigned long *)dst->f.r.rt); int rt2 = allocate_64_register2((unsigned long *)dst->f.r.rt); int rd1 = allocate_64_register1_w((unsigned long *)dst->f.r.rd); int rd2 = allocate_64_register2_w((unsigned long *)dst->f.r.rd); mov_reg32_reg32(rd1, rt1); mov_reg32_reg32(rd2, rt2); shld_reg32_reg32_imm8(rd2, rd1, dst->f.r.sa); shl_reg32_imm8(rd1, dst->f.r.sa); if (dst->f.r.sa & 0x20) { mov_reg32_reg32(rd2, rd1); xor_reg32_reg32(rd1, rd1); } #endif }
void gendsrl(void) { #ifdef INTERPRET_DSRL gencallinterp((unsigned int)cached_interpreter_table.DSRL, 0); #else int rt1 = allocate_64_register1((unsigned int *)dst->f.r.rt); int rt2 = allocate_64_register2((unsigned int *)dst->f.r.rt); int rd1 = allocate_64_register1_w((unsigned int *)dst->f.r.rd); int rd2 = allocate_64_register2_w((unsigned int *)dst->f.r.rd); mov_reg32_reg32(rd1, rt1); mov_reg32_reg32(rd2, rt2); shrd_reg32_reg32_imm8(rd1, rd2, dst->f.r.sa); shr_reg32_imm8(rd2, dst->f.r.sa); if (dst->f.r.sa & 0x20) { mov_reg32_reg32(rd1, rd2); xor_reg32_reg32(rd2, rd2); } #endif }
void gendsll(usf_state_t * state) { #ifdef INTERPRET_DSLL gencallinterp(state, (unsigned int)state->current_instruction_table.DSLL, 0); #else int rt1 = allocate_64_register1(state, (unsigned int *)state->dst->f.r.rt); int rt2 = allocate_64_register2(state, (unsigned int *)state->dst->f.r.rt); int rd1 = allocate_64_register1_w(state, (unsigned int *)state->dst->f.r.rd); int rd2 = allocate_64_register2_w(state, (unsigned int *)state->dst->f.r.rd); mov_reg32_reg32(state, rd1, rt1); mov_reg32_reg32(state, rd2, rt2); shld_reg32_reg32_imm8(state, rd2, rd1, state->dst->f.r.sa); shl_reg32_imm8(state, rd1, state->dst->f.r.sa); if (state->dst->f.r.sa & 0x20) { mov_reg32_reg32(state, rd2, rd1); xor_reg32_reg32(state, rd1, rd1); } #endif }
void gendsll(void) { #ifdef INTERPRET_DSLL gencallinterp((unsigned int)cached_interpreter_table.DSLL, 0); #else int rt1 = allocate_64_register1((unsigned int *)g_dev.r4300.recomp.dst->f.r.rt); int rt2 = allocate_64_register2((unsigned int *)g_dev.r4300.recomp.dst->f.r.rt); int rd1 = allocate_64_register1_w((unsigned int *)g_dev.r4300.recomp.dst->f.r.rd); int rd2 = allocate_64_register2_w((unsigned int *)g_dev.r4300.recomp.dst->f.r.rd); mov_reg32_reg32(rd1, rt1); mov_reg32_reg32(rd2, rt2); shld_reg32_reg32_imm8(rd2, rd1, g_dev.r4300.recomp.dst->f.r.sa); shl_reg32_imm8(rd1, g_dev.r4300.recomp.dst->f.r.sa); if (g_dev.r4300.recomp.dst->f.r.sa & 0x20) { mov_reg32_reg32(rd2, rd1); xor_reg32_reg32(rd1, rd1); } #endif }
void allocate_register_32_manually(int reg, unsigned int *addr) { int i; /* check if we just happen to already have this r4300 reg cached in the requested x86 reg */ if (last_access[reg] != NULL && reg_content[reg] == (unsigned long long *) addr) { precomp_instr *last = last_access[reg] + 1; while (last <= dst) { last->reg_cache_infos.needed_registers[reg] = reg_content[reg]; last++; } last_access[reg] = dst; /* we won't touch is64bits or dirty; the register returned is "read-only" */ return; } /* otherwise free up the requested x86 register */ if (last_access[reg]) free_register(reg); else { while (free_since[reg] <= dst) { free_since[reg]->reg_cache_infos.needed_registers[reg] = NULL; free_since[reg]++; } } /* if the r4300 register is already cached in a different x86 register, then copy it to the requested x86 register */ for (i=0; i<8; i++) { if (last_access[i] != NULL && reg_content[i] == (unsigned long long *) addr) { precomp_instr *last = last_access[i]+1; while (last <= dst) { last->reg_cache_infos.needed_registers[i] = reg_content[i]; last++; } last_access[i] = dst; if (is64bits[i]) mov_reg64_reg64(reg, i); else mov_reg32_reg32(reg, i); last_access[reg] = dst; is64bits[reg] = is64bits[i]; dirty[reg] = dirty[i]; reg_content[reg] = reg_content[i]; /* free the previous x86 register used to cache this r4300 register */ free_since[i] = dst + 1; last_access[i] = NULL; return; } } /* otherwise just load the 32-bit value into the requested register */ last_access[reg] = dst; reg_content[reg] = (unsigned long long *) addr; dirty[reg] = 0; is64bits[reg] = 0; if ((unsigned long long *) addr == r0) xor_reg32_reg32(reg, reg); else mov_xreg32_m32rel(reg, addr); }
// this function finds a register to put the data contained in addr, // if there was another value before it's cleanly removed of the // register cache. After that, the register number is returned. // If data are already cached, the function only returns the register number int allocate_register(usf_state_t * state, unsigned int *addr) { unsigned int oldest_access = 0xFFFFFFFF; int reg = 0, i; // is it already cached ? if (addr != NULL) { for (i=0; i<8; i++) { if (state->last_access[i] != NULL && state->reg_content[i] == addr) { precomp_instr *last = state->last_access[i]+1; while (last <= state->dst) { last->reg_cache_infos.needed_registers[i] = state->reg_content[i]; last++; } state->last_access[i] = state->dst; if (state->r64[i] != -1) { last = state->last_access[state->r64[i]]+1; while (last <= state->dst) { last->reg_cache_infos.needed_registers[state->r64[i]] = state->reg_content[state->r64[i]]; last++; } state->last_access[state->r64[i]] = state->dst; } return i; } } } // if it's not cached, we take the least recently used register for (i=0; i<8; i++) { if (i != ESP && i != ESI && (unsigned int)state->last_access[i] < oldest_access) { oldest_access = (int)state->last_access[i]; reg = i; } } if (oldest_access == 0xFFFFFFFF) { int i = rand(); } if (state->last_access[reg]) free_register(state, reg); else { while (state->free_since[reg] <= state->dst) { state->free_since[reg]->reg_cache_infos.needed_registers[reg] = NULL; state->free_since[reg]++; } } state->last_access[reg] = state->dst; state->reg_content[reg] = addr; state->dirty[reg] = 0; state->r64[reg] = -1; if (addr != NULL) { if (addr == state->r0 || addr == state->r0+1) xor_reg32_reg32(state, reg, reg); else mov_reg32_m32(state, reg, addr); } return reg; }
void allocate_register_manually_w(int reg, unsigned int *addr, int load) { int i; if (last_access[reg] != NULL && reg_content[reg] == addr) { precomp_instr *last = last_access[reg]+1; while (last <= dst) { last->reg_cache_infos.needed_registers[reg] = reg_content[reg]; last++; } last_access[reg] = dst; if (r64[reg] != -1) { last = last_access[r64[reg]]+1; while (last <= dst) { last->reg_cache_infos.needed_registers[r64[reg]] = reg_content[r64[reg]]; last++; } last_access[r64[reg]] = NULL; free_since[r64[reg]] = dst+1; r64[reg] = -1; } dirty[reg] = 1; return; } if (last_access[reg]) free_register(reg); else { while (free_since[reg] <= dst) { free_since[reg]->reg_cache_infos.needed_registers[reg] = NULL; free_since[reg]++; } } // is it already cached ? for (i=0; i<8; i++) { if (last_access[i] != NULL && reg_content[i] == addr) { precomp_instr *last = last_access[i]+1; while (last <= dst) { last->reg_cache_infos.needed_registers[i] = reg_content[i]; last++; } last_access[i] = dst; if (r64[i] != -1) { last = last_access[r64[i]]+1; while (last <= dst) { last->reg_cache_infos.needed_registers[r64[i]] = NULL; last++; } free_since[r64[i]] = dst+1; last_access[r64[i]] = NULL; r64[i] = -1; } if (load) mov_reg32_reg32(reg, i); last_access[reg] = dst; dirty[reg] = 1; r64[reg] = -1; reg_content[reg] = reg_content[i]; free_since[i] = dst+1; last_access[i] = NULL; return; } } last_access[reg] = dst; reg_content[reg] = addr; dirty[reg] = 1; r64[reg] = -1; if (addr != NULL && load) { if (addr == r0 || addr == r0+1) xor_reg32_reg32(reg, reg); else mov_reg32_m32(reg, addr); } }
// this function finds a register to put the data contained in addr, // if there was another value before it's cleanly removed of the // register cache. After that, the register number is returned. // If data are already cached, the function only returns the register number int allocate_register(unsigned int *addr) { unsigned int oldest_access = 0xFFFFFFFF; int reg = 0, i; // is it already cached ? if (addr != NULL) { for (i=0; i<8; i++) { if (last_access[i] != NULL && reg_content[i] == addr) { precomp_instr *last = last_access[i]+1; while (last <= dst) { last->reg_cache_infos.needed_registers[i] = reg_content[i]; last++; } last_access[i] = dst; if (r64[i] != -1) { last = last_access[r64[i]]+1; while (last <= dst) { last->reg_cache_infos.needed_registers[r64[i]] = reg_content[r64[i]]; last++; } last_access[r64[i]] = dst; } return i; } } } // if it's not cached, we take the least recently used register for (i=0; i<8; i++) { if (i != ESP && (unsigned int)last_access[i] < oldest_access) { oldest_access = (int)last_access[i]; reg = i; } } if (last_access[reg]) free_register(reg); else { while (free_since[reg] <= dst) { free_since[reg]->reg_cache_infos.needed_registers[reg] = NULL; free_since[reg]++; } } last_access[reg] = dst; reg_content[reg] = addr; dirty[reg] = 0; r64[reg] = -1; if (addr != NULL) { if (addr == r0 || addr == r0+1) xor_reg32_reg32(reg, reg); else mov_reg32_m32(reg, addr); } return reg; }
void allocate_register_manually_w(usf_state_t * state, int reg, unsigned int *addr, int load) { int i; if (state->last_access[reg] != NULL && state->reg_content[reg] == addr) { precomp_instr *last = state->last_access[reg]+1; while (last <= state->dst) { last->reg_cache_infos.needed_registers[reg] = state->reg_content[reg]; last++; } state->last_access[reg] = state->dst; if (state->r64[reg] != -1) { last = state->last_access[state->r64[reg]]+1; while (last <= state->dst) { last->reg_cache_infos.needed_registers[state->r64[reg]] = state->reg_content[state->r64[reg]]; last++; } state->last_access[state->r64[reg]] = NULL; state->free_since[state->r64[reg]] = state->dst+1; state->r64[reg] = -1; } state->dirty[reg] = 1; return; } if (state->last_access[reg]) free_register(state, reg); else { while (state->free_since[reg] <= state->dst) { state->free_since[reg]->reg_cache_infos.needed_registers[reg] = NULL; state->free_since[reg]++; } } // is it already cached ? for (i=0; i<8; i++) { if (state->last_access[i] != NULL && state->reg_content[i] == addr) { precomp_instr *last = state->last_access[i]+1; while (last <= state->dst) { last->reg_cache_infos.needed_registers[i] = state->reg_content[i]; last++; } state->last_access[i] = state->dst; if (state->r64[i] != -1) { last = state->last_access[state->r64[i]]+1; while (last <= state->dst) { last->reg_cache_infos.needed_registers[state->r64[i]] = NULL; last++; } state->free_since[state->r64[i]] = state->dst+1; state->last_access[state->r64[i]] = NULL; state->r64[i] = -1; } if (load) mov_reg32_reg32(state, reg, i); state->last_access[reg] = state->dst; state->dirty[reg] = 1; state->r64[reg] = -1; state->reg_content[reg] = state->reg_content[i]; state->free_since[i] = state->dst+1; state->last_access[i] = NULL; return; } } state->last_access[reg] = state->dst; state->reg_content[reg] = addr; state->dirty[reg] = 1; state->r64[reg] = -1; if (addr != NULL && load) { if (addr == state->r0 || addr == state->r0+1) xor_reg32_reg32(state, reg, reg); else mov_reg32_m32(state, reg, addr); } }
void allocate_register_manually_w(int reg, unsigned int *addr, int load) { int i; if (g_dev.r4300.regcache_state.last_access[reg] != NULL && g_dev.r4300.regcache_state.reg_content[reg] == addr) { struct precomp_instr *last = g_dev.r4300.regcache_state.last_access[reg]+1; while (last <= g_dev.r4300.recomp.dst) { last->reg_cache_infos.needed_registers[reg] = g_dev.r4300.regcache_state.reg_content[reg]; last++; } g_dev.r4300.regcache_state.last_access[reg] = g_dev.r4300.recomp.dst; if (g_dev.r4300.regcache_state.r64[reg] != -1) { last = g_dev.r4300.regcache_state.last_access[g_dev.r4300.regcache_state.r64[reg]]+1; while (last <= g_dev.r4300.recomp.dst) { last->reg_cache_infos.needed_registers[g_dev.r4300.regcache_state.r64[reg]] = g_dev.r4300.regcache_state.reg_content[g_dev.r4300.regcache_state.r64[reg]]; last++; } g_dev.r4300.regcache_state.last_access[g_dev.r4300.regcache_state.r64[reg]] = NULL; g_dev.r4300.regcache_state.free_since[g_dev.r4300.regcache_state.r64[reg]] = g_dev.r4300.recomp.dst+1; g_dev.r4300.regcache_state.r64[reg] = -1; } g_dev.r4300.regcache_state.dirty[reg] = 1; return; } if (g_dev.r4300.regcache_state.last_access[reg]) free_register(reg); else { while (g_dev.r4300.regcache_state.free_since[reg] <= g_dev.r4300.recomp.dst) { g_dev.r4300.regcache_state.free_since[reg]->reg_cache_infos.needed_registers[reg] = NULL; g_dev.r4300.regcache_state.free_since[reg]++; } } // is it already cached ? for (i=0; i<8; i++) { if (g_dev.r4300.regcache_state.last_access[i] != NULL && g_dev.r4300.regcache_state.reg_content[i] == addr) { struct precomp_instr *last = g_dev.r4300.regcache_state.last_access[i]+1; while (last <= g_dev.r4300.recomp.dst) { last->reg_cache_infos.needed_registers[i] = g_dev.r4300.regcache_state.reg_content[i]; last++; } g_dev.r4300.regcache_state.last_access[i] = g_dev.r4300.recomp.dst; if (g_dev.r4300.regcache_state.r64[i] != -1) { last = g_dev.r4300.regcache_state.last_access[g_dev.r4300.regcache_state.r64[i]]+1; while (last <= g_dev.r4300.recomp.dst) { last->reg_cache_infos.needed_registers[g_dev.r4300.regcache_state.r64[i]] = NULL; last++; } g_dev.r4300.regcache_state.free_since[g_dev.r4300.regcache_state.r64[i]] = g_dev.r4300.recomp.dst+1; g_dev.r4300.regcache_state.last_access[g_dev.r4300.regcache_state.r64[i]] = NULL; g_dev.r4300.regcache_state.r64[i] = -1; } if (load) mov_reg32_reg32(reg, i); g_dev.r4300.regcache_state.last_access[reg] = g_dev.r4300.recomp.dst; g_dev.r4300.regcache_state.dirty[reg] = 1; g_dev.r4300.regcache_state.r64[reg] = -1; g_dev.r4300.regcache_state.reg_content[reg] = g_dev.r4300.regcache_state.reg_content[i]; g_dev.r4300.regcache_state.free_since[i] = g_dev.r4300.recomp.dst+1; g_dev.r4300.regcache_state.last_access[i] = NULL; return; } } g_dev.r4300.regcache_state.last_access[reg] = g_dev.r4300.recomp.dst; g_dev.r4300.regcache_state.reg_content[reg] = addr; g_dev.r4300.regcache_state.dirty[reg] = 1; g_dev.r4300.regcache_state.r64[reg] = -1; if (addr != NULL && load) { if (addr == g_dev.r4300.regcache_state.r0 || addr == g_dev.r4300.regcache_state.r0+1) xor_reg32_reg32(reg, reg); else mov_reg32_m32(reg, addr); } }