void gendmtc1(void) { #ifdef INTERPRET_DMTC1 gencallinterp((unsigned int)DMTC1, 0); #else gencheck_cop1_unusable(); mov_eax_memoffs32((unsigned int*)dst->f.r.rt); mov_reg32_m32(EBX, ((unsigned int*)dst->f.r.rt)+1); mov_reg32_m32(EDX, (unsigned int*)(®_cop1_double[dst->f.r.nrd])); mov_preg32_reg32(EDX, EAX); mov_preg32pimm32_reg32(EDX, 4, EBX); #endif }
void genmtc1(void) { #ifdef INTERPRET_MTC1 gencallinterp((unsigned int)MTC1, 0); #else gencheck_cop1_unusable(); mov_eax_memoffs32((unsigned int*)dst->f.r.rt); mov_reg32_m32(EBX, (unsigned int*)(®_cop1_simple[dst->f.r.nrd])); mov_preg32_reg32(EBX, EAX); #endif }
void gendmtc1(void) { #ifdef INTERPRET_DMTC1 gencallinterp((native_type)cached_interpreter_table.DMTC1, 0); #else gencheck_cop1_unusable(); #ifdef __x86_64__ mov_xreg32_m32rel(EAX, (unsigned int*)dst->f.r.rt); mov_xreg32_m32rel(EBX, ((unsigned int*)dst->f.r.rt)+1); mov_xreg64_m64rel(RDX, (unsigned long long *)(®_cop1_double[dst->f.r.nrd])); mov_preg64_reg32(RDX, EAX); mov_preg64pimm32_reg32(RDX, 4, EBX); #else mov_eax_memoffs32((unsigned int*)dst->f.r.rt); mov_reg32_m32(EBX, ((unsigned int*)dst->f.r.rt)+1); mov_reg32_m32(EDX, (unsigned int*)(®_cop1_double[dst->f.r.nrd])); mov_preg32_reg32(EDX, EAX); mov_preg32pimm32_reg32(EDX, 4, EBX); #endif #endif }
void allocate_register_manually_w(int reg, unsigned int *addr, int load) { int i; if (last_access[reg] != NULL && reg_content[reg] == addr) { precomp_instr *last = last_access[reg]+1; while (last <= dst) { last->reg_cache_infos.needed_registers[reg] = reg_content[reg]; last++; } last_access[reg] = dst; if (r64[reg] != -1) { last = last_access[r64[reg]]+1; while (last <= dst) { last->reg_cache_infos.needed_registers[r64[reg]] = reg_content[r64[reg]]; last++; } last_access[r64[reg]] = NULL; free_since[r64[reg]] = dst+1; r64[reg] = -1; } dirty[reg] = 1; return; } if (last_access[reg]) free_register(reg); else { while (free_since[reg] <= dst) { free_since[reg]->reg_cache_infos.needed_registers[reg] = NULL; free_since[reg]++; } } // is it already cached ? for (i=0; i<8; i++) { if (last_access[i] != NULL && reg_content[i] == addr) { precomp_instr *last = last_access[i]+1; while (last <= dst) { last->reg_cache_infos.needed_registers[i] = reg_content[i]; last++; } last_access[i] = dst; if (r64[i] != -1) { last = last_access[r64[i]]+1; while (last <= dst) { last->reg_cache_infos.needed_registers[r64[i]] = NULL; last++; } free_since[r64[i]] = dst+1; last_access[r64[i]] = NULL; r64[i] = -1; } if (load) mov_reg32_reg32(reg, i); last_access[reg] = dst; dirty[reg] = 1; r64[reg] = -1; reg_content[reg] = reg_content[i]; free_since[i] = dst+1; last_access[i] = NULL; return; } } last_access[reg] = dst; reg_content[reg] = addr; dirty[reg] = 1; r64[reg] = -1; if (addr != NULL && load) { if (addr == r0 || addr == r0+1) xor_reg32_reg32(reg, reg); else mov_reg32_m32(reg, addr); } }
// this function finds a register to put the data contained in addr, // if there was another value before it's cleanly removed of the // register cache. After that, the register number is returned. // If data are already cached, the function only returns the register number int allocate_register(unsigned int *addr) { unsigned int oldest_access = 0xFFFFFFFF; int reg = 0, i; // is it already cached ? if (addr != NULL) { for (i=0; i<8; i++) { if (last_access[i] != NULL && reg_content[i] == addr) { precomp_instr *last = last_access[i]+1; while (last <= dst) { last->reg_cache_infos.needed_registers[i] = reg_content[i]; last++; } last_access[i] = dst; if (r64[i] != -1) { last = last_access[r64[i]]+1; while (last <= dst) { last->reg_cache_infos.needed_registers[r64[i]] = reg_content[r64[i]]; last++; } last_access[r64[i]] = dst; } return i; } } } // if it's not cached, we take the least recently used register for (i=0; i<8; i++) { if (i != ESP && (unsigned int)last_access[i] < oldest_access) { oldest_access = (int)last_access[i]; reg = i; } } if (last_access[reg]) free_register(reg); else { while (free_since[reg] <= dst) { free_since[reg]->reg_cache_infos.needed_registers[reg] = NULL; free_since[reg]++; } } last_access[reg] = dst; reg_content[reg] = addr; dirty[reg] = 0; r64[reg] = -1; if (addr != NULL) { if (addr == r0 || addr == r0+1) xor_reg32_reg32(reg, reg); else mov_reg32_m32(reg, addr); } return reg; }
void allocate_register_manually_w(int reg, unsigned int *addr, int load) { int i; if (g_dev.r4300.regcache_state.last_access[reg] != NULL && g_dev.r4300.regcache_state.reg_content[reg] == addr) { struct precomp_instr *last = g_dev.r4300.regcache_state.last_access[reg]+1; while (last <= g_dev.r4300.recomp.dst) { last->reg_cache_infos.needed_registers[reg] = g_dev.r4300.regcache_state.reg_content[reg]; last++; } g_dev.r4300.regcache_state.last_access[reg] = g_dev.r4300.recomp.dst; if (g_dev.r4300.regcache_state.r64[reg] != -1) { last = g_dev.r4300.regcache_state.last_access[g_dev.r4300.regcache_state.r64[reg]]+1; while (last <= g_dev.r4300.recomp.dst) { last->reg_cache_infos.needed_registers[g_dev.r4300.regcache_state.r64[reg]] = g_dev.r4300.regcache_state.reg_content[g_dev.r4300.regcache_state.r64[reg]]; last++; } g_dev.r4300.regcache_state.last_access[g_dev.r4300.regcache_state.r64[reg]] = NULL; g_dev.r4300.regcache_state.free_since[g_dev.r4300.regcache_state.r64[reg]] = g_dev.r4300.recomp.dst+1; g_dev.r4300.regcache_state.r64[reg] = -1; } g_dev.r4300.regcache_state.dirty[reg] = 1; return; } if (g_dev.r4300.regcache_state.last_access[reg]) free_register(reg); else { while (g_dev.r4300.regcache_state.free_since[reg] <= g_dev.r4300.recomp.dst) { g_dev.r4300.regcache_state.free_since[reg]->reg_cache_infos.needed_registers[reg] = NULL; g_dev.r4300.regcache_state.free_since[reg]++; } } // is it already cached ? for (i=0; i<8; i++) { if (g_dev.r4300.regcache_state.last_access[i] != NULL && g_dev.r4300.regcache_state.reg_content[i] == addr) { struct precomp_instr *last = g_dev.r4300.regcache_state.last_access[i]+1; while (last <= g_dev.r4300.recomp.dst) { last->reg_cache_infos.needed_registers[i] = g_dev.r4300.regcache_state.reg_content[i]; last++; } g_dev.r4300.regcache_state.last_access[i] = g_dev.r4300.recomp.dst; if (g_dev.r4300.regcache_state.r64[i] != -1) { last = g_dev.r4300.regcache_state.last_access[g_dev.r4300.regcache_state.r64[i]]+1; while (last <= g_dev.r4300.recomp.dst) { last->reg_cache_infos.needed_registers[g_dev.r4300.regcache_state.r64[i]] = NULL; last++; } g_dev.r4300.regcache_state.free_since[g_dev.r4300.regcache_state.r64[i]] = g_dev.r4300.recomp.dst+1; g_dev.r4300.regcache_state.last_access[g_dev.r4300.regcache_state.r64[i]] = NULL; g_dev.r4300.regcache_state.r64[i] = -1; } if (load) mov_reg32_reg32(reg, i); g_dev.r4300.regcache_state.last_access[reg] = g_dev.r4300.recomp.dst; g_dev.r4300.regcache_state.dirty[reg] = 1; g_dev.r4300.regcache_state.r64[reg] = -1; g_dev.r4300.regcache_state.reg_content[reg] = g_dev.r4300.regcache_state.reg_content[i]; g_dev.r4300.regcache_state.free_since[i] = g_dev.r4300.recomp.dst+1; g_dev.r4300.regcache_state.last_access[i] = NULL; return; } } g_dev.r4300.regcache_state.last_access[reg] = g_dev.r4300.recomp.dst; g_dev.r4300.regcache_state.reg_content[reg] = addr; g_dev.r4300.regcache_state.dirty[reg] = 1; g_dev.r4300.regcache_state.r64[reg] = -1; if (addr != NULL && load) { if (addr == g_dev.r4300.regcache_state.r0 || addr == g_dev.r4300.regcache_state.r0+1) xor_reg32_reg32(reg, reg); else mov_reg32_m32(reg, addr); } }
void allocate_register_manually_w(usf_state_t * state, int reg, unsigned int *addr, int load) { int i; if (state->last_access[reg] != NULL && state->reg_content[reg] == addr) { precomp_instr *last = state->last_access[reg]+1; while (last <= state->dst) { last->reg_cache_infos.needed_registers[reg] = state->reg_content[reg]; last++; } state->last_access[reg] = state->dst; if (state->r64[reg] != -1) { last = state->last_access[state->r64[reg]]+1; while (last <= state->dst) { last->reg_cache_infos.needed_registers[state->r64[reg]] = state->reg_content[state->r64[reg]]; last++; } state->last_access[state->r64[reg]] = NULL; state->free_since[state->r64[reg]] = state->dst+1; state->r64[reg] = -1; } state->dirty[reg] = 1; return; } if (state->last_access[reg]) free_register(state, reg); else { while (state->free_since[reg] <= state->dst) { state->free_since[reg]->reg_cache_infos.needed_registers[reg] = NULL; state->free_since[reg]++; } } // is it already cached ? for (i=0; i<8; i++) { if (state->last_access[i] != NULL && state->reg_content[i] == addr) { precomp_instr *last = state->last_access[i]+1; while (last <= state->dst) { last->reg_cache_infos.needed_registers[i] = state->reg_content[i]; last++; } state->last_access[i] = state->dst; if (state->r64[i] != -1) { last = state->last_access[state->r64[i]]+1; while (last <= state->dst) { last->reg_cache_infos.needed_registers[state->r64[i]] = NULL; last++; } state->free_since[state->r64[i]] = state->dst+1; state->last_access[state->r64[i]] = NULL; state->r64[i] = -1; } if (load) mov_reg32_reg32(state, reg, i); state->last_access[reg] = state->dst; state->dirty[reg] = 1; state->r64[reg] = -1; state->reg_content[reg] = state->reg_content[i]; state->free_since[i] = state->dst+1; state->last_access[i] = NULL; return; } } state->last_access[reg] = state->dst; state->reg_content[reg] = addr; state->dirty[reg] = 1; state->r64[reg] = -1; if (addr != NULL && load) { if (addr == state->r0 || addr == state->r0+1) xor_reg32_reg32(state, reg, reg); else mov_reg32_m32(state, reg, addr); } }
// this function finds a register to put the data contained in addr, // if there was another value before it's cleanly removed of the // register cache. After that, the register number is returned. // If data are already cached, the function only returns the register number int allocate_register(usf_state_t * state, unsigned int *addr) { unsigned int oldest_access = 0xFFFFFFFF; int reg = 0, i; // is it already cached ? if (addr != NULL) { for (i=0; i<8; i++) { if (state->last_access[i] != NULL && state->reg_content[i] == addr) { precomp_instr *last = state->last_access[i]+1; while (last <= state->dst) { last->reg_cache_infos.needed_registers[i] = state->reg_content[i]; last++; } state->last_access[i] = state->dst; if (state->r64[i] != -1) { last = state->last_access[state->r64[i]]+1; while (last <= state->dst) { last->reg_cache_infos.needed_registers[state->r64[i]] = state->reg_content[state->r64[i]]; last++; } state->last_access[state->r64[i]] = state->dst; } return i; } } } // if it's not cached, we take the least recently used register for (i=0; i<8; i++) { if (i != ESP && i != ESI && (unsigned int)state->last_access[i] < oldest_access) { oldest_access = (int)state->last_access[i]; reg = i; } } if (oldest_access == 0xFFFFFFFF) { int i = rand(); } if (state->last_access[reg]) free_register(state, reg); else { while (state->free_since[reg] <= state->dst) { state->free_since[reg]->reg_cache_infos.needed_registers[reg] = NULL; state->free_since[reg]++; } } state->last_access[reg] = state->dst; state->reg_content[reg] = addr; state->dirty[reg] = 0; state->r64[reg] = -1; if (addr != NULL) { if (addr == state->r0 || addr == state->r0+1) xor_reg32_reg32(state, reg, reg); else mov_reg32_m32(state, reg, addr); } return reg; }