Esempio n. 1
0
void gencfc1(void)
{
#ifdef INTERPRET_CFC1
   gencallinterp((native_type)cached_interpreter_table.CFC1, 0);
#else
   gencheck_cop1_unusable();
#ifdef __x86_64__
   if(dst->f.r.nrd == 31)
      mov_xreg32_m32rel(EAX, (unsigned int*)&FCR31);
   else
      mov_xreg32_m32rel(EAX, (unsigned int*)&FCR0);
   mov_m32rel_xreg32((unsigned int*)dst->f.r.rt, EAX);
   sar_reg32_imm8(EAX, 31);
   mov_m32rel_xreg32(((unsigned int*)dst->f.r.rt)+1, EAX);
#else
   if(dst->f.r.nrd == 31)
      mov_eax_memoffs32((unsigned int*)&FCR31);
   else
      mov_eax_memoffs32((unsigned int*)&FCR0);
   mov_memoffs32_eax((unsigned int*)dst->f.r.rt);
   sar_reg32_imm8(EAX, 31);
   mov_memoffs32_eax(((unsigned int*)dst->f.r.rt)+1);
#endif
#endif
}
Esempio n. 2
0
void gencfc1(void)
{
#ifdef INTERPRET_CFC1
   gencallinterp((unsigned int)CFC1, 0);
#else
   gencheck_cop1_unusable();
   if(dst->f.r.nrd == 31) mov_eax_memoffs32((unsigned int*)&FCR31);
   else mov_eax_memoffs32((unsigned int*)&FCR0);
   mov_memoffs32_eax((unsigned int*)dst->f.r.rt);
   sar_reg32_imm8(EAX, 31);
   mov_memoffs32_eax(((unsigned int*)dst->f.r.rt)+1);
#endif
}
Esempio n. 3
0
void genctc1(void)
{
#ifdef INTERPRET_CTC1
   gencallinterp((unsigned int)CTC1, 0);
#else
   gencheck_cop1_unusable();
   
   if (dst->f.r.nrd != 31) return;
   mov_eax_memoffs32((unsigned int*)dst->f.r.rt);
   mov_memoffs32_eax((unsigned int*)&FCR31);
   and_eax_imm32(3);
   
   cmp_eax_imm32(0);
   jne_rj(12);
   mov_m32_imm32((unsigned int*)&rounding_mode, 0x33F); // 10
   jmp_imm_short(48); // 2
   
   cmp_eax_imm32(1); // 5
   jne_rj(12); // 2
   mov_m32_imm32((unsigned int*)&rounding_mode, 0xF3F); // 10
   jmp_imm_short(29); // 2
   
   cmp_eax_imm32(2); // 5
   jne_rj(12); // 2
   mov_m32_imm32((unsigned int*)&rounding_mode, 0xB3F); // 10
   jmp_imm_short(10); // 2
   
   mov_m32_imm32((unsigned int*)&rounding_mode, 0x73F); // 10
   
   fldcw_m16((unsigned short*)&rounding_mode);
#endif
}
Esempio n. 4
0
void genctc1(void)
{
#ifdef INTERPRET_CTC1
   gencallinterp((native_type)cached_interpreter_table.CTC1, 0);
#else
   gencheck_cop1_unusable();
   
   if (dst->f.r.nrd != 31)
      return;
#ifdef __x86_64__
   mov_xreg32_m32rel(EAX, (unsigned int*)dst->f.r.rt);
   mov_m32rel_xreg32((unsigned int*)&FCR31, EAX);
   and_eax_imm32(3);
   
   cmp_eax_imm32(0);
   jne_rj(13);
   mov_m32rel_imm32((unsigned int*)&rounding_mode, 0x33F); // 11
   jmp_imm_short(51); // 2
   
   cmp_eax_imm32(1); // 5
   jne_rj(13); // 2
   mov_m32rel_imm32((unsigned int*)&rounding_mode, 0xF3F); // 11
   jmp_imm_short(31); // 2
   
   cmp_eax_imm32(2); // 5
   jne_rj(13); // 2
   mov_m32rel_imm32((unsigned int*)&rounding_mode, 0xB3F); // 11
   jmp_imm_short(11); // 2
   
   mov_m32rel_imm32((unsigned int*)&rounding_mode, 0x73F); // 11
   
   fldcw_m16rel((unsigned short*)&rounding_mode);
#else
   mov_eax_memoffs32((unsigned int*)dst->f.r.rt);
   mov_memoffs32_eax((unsigned int*)&FCR31);
   and_eax_imm32(3);
   
   cmp_eax_imm32(0);
   jne_rj(12);
   mov_m32_imm32((unsigned int*)&rounding_mode, 0x33F); // 10
   jmp_imm_short(48); // 2
   
   cmp_eax_imm32(1); // 5
   jne_rj(12); // 2
   mov_m32_imm32((unsigned int*)&rounding_mode, 0xF3F); // 10
   jmp_imm_short(29); // 2
   
   cmp_eax_imm32(2); // 5
   jne_rj(12); // 2
   mov_m32_imm32((unsigned int*)&rounding_mode, 0xB3F); // 10
   jmp_imm_short(10); // 2
   
   mov_m32_imm32((unsigned int*)&rounding_mode, 0x73F); // 10
   
   fldcw_m16((unsigned short*)&rounding_mode);
#endif
#endif
}
Esempio n. 5
0
void gendmultu()
{
#ifdef INTERPRET_DMULTU
   gencallinterp((unsigned long)DMULTU, 0);
#else
   free_all_registers();
   simplify_access();
   
   mov_eax_memoffs32((unsigned long *)dst->f.r.rs);
   mul_m32((unsigned long *)dst->f.r.rt); // EDX:EAX = temp1
   mov_memoffs32_eax((unsigned long *)(&lo));
   
   mov_reg32_reg32(EBX, EDX); // EBX = temp1>>32
   mov_eax_memoffs32((unsigned long *)dst->f.r.rs);
   mul_m32((unsigned long *)(dst->f.r.rt)+1);
   add_reg32_reg32(EBX, EAX);
   adc_reg32_imm32(EDX, 0);
   mov_reg32_reg32(ECX, EDX); // ECX:EBX = temp2
   
   mov_eax_memoffs32((unsigned long *)(dst->f.r.rs)+1);
   mul_m32((unsigned long *)dst->f.r.rt); // EDX:EAX = temp3
   
   add_reg32_reg32(EBX, EAX);
   adc_reg32_imm32(ECX, 0); // ECX:EBX = result2
   mov_m32_reg32((unsigned long*)(&lo)+1, EBX);
   
   mov_reg32_reg32(ESI, EDX); // ESI = temp3>>32
   mov_eax_memoffs32((unsigned long *)(dst->f.r.rs)+1);
   mul_m32((unsigned long *)(dst->f.r.rt)+1);
   add_reg32_reg32(EAX, ESI);
   adc_reg32_imm32(EDX, 0); // EDX:EAX = temp4
   
   add_reg32_reg32(EAX, ECX);
   adc_reg32_imm32(EDX, 0); // EDX:EAX = result3
   mov_memoffs32_eax((unsigned long *)(&hi));
   mov_m32_reg32((unsigned long *)(&hi)+1, EDX);
#endif
}
Esempio n. 6
0
void gendmultu(usf_state_t * state)
{
#ifdef INTERPRET_DMULTU
   gencallinterp(state, (unsigned int)state->current_instruction_table.DMULTU, 0);
#else
   free_all_registers(state);
   simplify_access(state);
   
   mov_eax_memoffs32(state, (unsigned int *)state->dst->f.r.rs);
   mul_m32(state, (unsigned int *)state->dst->f.r.rt); // EDX:EAX = temp1
   mov_memoffs32_eax(state, (unsigned int *)(&state->lo));
   
   mov_reg32_reg32(state, EBX, EDX); // EBX = temp1>>32
   mov_eax_memoffs32(state, (unsigned int *)state->dst->f.r.rs);
   mul_m32(state, (unsigned int *)(state->dst->f.r.rt)+1);
   add_reg32_reg32(state, EBX, EAX);
   adc_reg32_imm32(state, EDX, 0);
   mov_reg32_reg32(state, ECX, EDX); // ECX:EBX = temp2
   
   mov_eax_memoffs32(state, (unsigned int *)(state->dst->f.r.rs)+1);
   mul_m32(state, (unsigned int *)state->dst->f.r.rt); // EDX:EAX = temp3
   
   add_reg32_reg32(state, EBX, EAX);
   adc_reg32_imm32(state, ECX, 0); // ECX:EBX = result2
   mov_m32_reg32(state, (unsigned int*)(&state->lo)+1, EBX);
   
   mov_reg32_reg32(state, EDI, EDX); // EDI = temp3>>32
   mov_eax_memoffs32(state, (unsigned int *)(state->dst->f.r.rs)+1);
   mul_m32(state, (unsigned int *)(state->dst->f.r.rt)+1);
   add_reg32_reg32(state, EAX, EDI);
   adc_reg32_imm32(state, EDX, 0); // EDX:EAX = temp4
   
   add_reg32_reg32(state, EAX, ECX);
   adc_reg32_imm32(state, EDX, 0); // EDX:EAX = result3
   mov_memoffs32_eax(state,(unsigned int *)(&state->hi));
   mov_m32_reg32(state, (unsigned int *)(&state->hi)+1, EDX);
#endif
}
Esempio n. 7
0
void gendmultu(void)
{
#ifdef INTERPRET_DMULTU
   gencallinterp((unsigned int)cached_interpreter_table.DMULTU, 0);
#else
   free_all_registers();
   simplify_access();
   
   mov_eax_memoffs32((unsigned int *)g_dev.r4300.recomp.dst->f.r.rs);
   mul_m32((unsigned int *)g_dev.r4300.recomp.dst->f.r.rt); // EDX:EAX = temp1
   mov_memoffs32_eax((unsigned int *)(r4300_mult_lo()));
   
   mov_reg32_reg32(EBX, EDX); // EBX = temp1>>32
   mov_eax_memoffs32((unsigned int *)g_dev.r4300.recomp.dst->f.r.rs);
   mul_m32((unsigned int *)(g_dev.r4300.recomp.dst->f.r.rt)+1);
   add_reg32_reg32(EBX, EAX);
   adc_reg32_imm32(EDX, 0);
   mov_reg32_reg32(ECX, EDX); // ECX:EBX = temp2
   
   mov_eax_memoffs32((unsigned int *)(g_dev.r4300.recomp.dst->f.r.rs)+1);
   mul_m32((unsigned int *)g_dev.r4300.recomp.dst->f.r.rt); // EDX:EAX = temp3
   
   add_reg32_reg32(EBX, EAX);
   adc_reg32_imm32(ECX, 0); // ECX:EBX = result2
   mov_m32_reg32((unsigned int*)(r4300_mult_lo())+1, EBX);
   
   mov_reg32_reg32(ESI, EDX); // ESI = temp3>>32
   mov_eax_memoffs32((unsigned int *)(g_dev.r4300.recomp.dst->f.r.rs)+1);
   mul_m32((unsigned int *)(g_dev.r4300.recomp.dst->f.r.rt)+1);
   add_reg32_reg32(EAX, ESI);
   adc_reg32_imm32(EDX, 0); // EDX:EAX = temp4
   
   add_reg32_reg32(EAX, ECX);
   adc_reg32_imm32(EDX, 0); // EDX:EAX = result3
   mov_memoffs32_eax((unsigned int *)(r4300_mult_hi()));
   mov_m32_reg32((unsigned int *)(r4300_mult_hi())+1, EDX);
#endif
}
Esempio n. 8
0
void genjalr()
{
#ifdef INTERPRET_JALR
   gencallinterp((unsigned long)JALR, 0);
#else
   static unsigned long precomp_instr_size = sizeof(precomp_instr);
   unsigned long diff = 
     (unsigned long)(&dst->local_addr) - (unsigned long)(dst);
   unsigned long diff_need = 
     (unsigned long)(&dst->reg_cache_infos.need_map) - (unsigned long)(dst);
   unsigned long diff_wrap = 
     (unsigned long)(&dst->reg_cache_infos.jump_wrapper) - (unsigned long)(dst);
   unsigned long temp, temp2;
   
   if (((dst->addr & 0xFFF) == 0xFFC && 
       (dst->addr < 0x80000000 || dst->addr >= 0xC0000000))||no_compiled_jump)
     {
	gencallinterp((unsigned long)JALR, 1);
	return;
     }
   
   free_all_registers();
   simplify_access();
   mov_eax_memoffs32((unsigned long *)dst->f.r.rs);
   mov_memoffs32_eax((unsigned long *)&local_rs);
   
   gendelayslot();
   
   mov_m32_imm32((unsigned long *)(dst-1)->f.r.rd, dst->addr+4);
   if ((dst->addr+4) & 0x80000000)
     mov_m32_imm32(((unsigned long *)(dst-1)->f.r.rd)+1, 0xFFFFFFFF);
   else
     mov_m32_imm32(((unsigned long *)(dst-1)->f.r.rd)+1, 0);
   
   mov_eax_memoffs32((unsigned long *)&local_rs);
   mov_memoffs32_eax((unsigned long *)&last_addr);
   
   gencheck_interupt_reg();
   
   mov_eax_memoffs32((unsigned long *)&local_rs);
   mov_reg32_reg32(EBX, EAX);
   and_eax_imm32(0xFFFFF000);
   cmp_eax_imm32(dst_block->start & 0xFFFFF000);
   je_near_rj(0);
   temp = code_length;
   
   mov_m32_reg32(&jump_to_address, EBX);
   mov_m32_imm32((unsigned long*)(&PC), (unsigned long)(dst+1));
   mov_reg32_imm32(EAX, (unsigned long)jump_to_func);
   call_reg32(EAX);
   
   temp2 = code_length;
   code_length = temp-4;
   put32(temp2 - temp);
   code_length = temp2;
   
   mov_reg32_reg32(EAX, EBX);
   sub_eax_imm32(dst_block->start);
   shr_reg32_imm8(EAX, 2);
   mul_m32((unsigned long *)(&precomp_instr_size));
   
   mov_reg32_preg32pimm32(EBX, EAX, (unsigned long)(dst_block->block)+diff_need);
   cmp_reg32_imm32(EBX, 1);
   jne_rj(7);
   
   add_eax_imm32((unsigned long)(dst_block->block)+diff_wrap); // 5
   jmp_reg32(EAX); // 2
   
   mov_reg32_preg32pimm32(EAX, EAX, (unsigned long)(dst_block->block)+diff);
   add_reg32_m32(EAX, (unsigned long *)(&dst_block->code));
   
   jmp_reg32(EAX);
#endif
}
Esempio n. 9
0
void genjr(void)
{
#ifdef INTERPRET_JR
    gencallinterp((unsigned int)cached_interpreter_table.JR, 1);
#else
    static unsigned int precomp_instr_size = sizeof(precomp_instr);
    unsigned int diff =
        (unsigned int)(&dst->local_addr) - (unsigned int)(dst);
    unsigned int diff_need =
        (unsigned int)(&dst->reg_cache_infos.need_map) - (unsigned int)(dst);
    unsigned int diff_wrap =
        (unsigned int)(&dst->reg_cache_infos.jump_wrapper) - (unsigned int)(dst);

    if (((dst->addr & 0xFFF) == 0xFFC &&
            (dst->addr < 0x80000000 || dst->addr >= 0xC0000000))||no_compiled_jump)
    {
        gencallinterp((unsigned int)cached_interpreter_table.JR, 1);
        return;
    }

    free_all_registers();
    simplify_access();
    mov_eax_memoffs32((unsigned int *)dst->f.i.rs);
    mov_memoffs32_eax((unsigned int *)&local_rs);

    gendelayslot();

    mov_eax_memoffs32((unsigned int *)&local_rs);
    mov_memoffs32_eax((unsigned int *)&last_addr);

    gencheck_interupt_reg();

    mov_eax_memoffs32((unsigned int *)&local_rs);
    mov_reg32_reg32(EBX, EAX);
    and_eax_imm32(0xFFFFF000);
    cmp_eax_imm32(dst_block->start & 0xFFFFF000);
    je_near_rj(0);

    jump_start_rel32();

    mov_m32_reg32(&jump_to_address, EBX);
    mov_m32_imm32((unsigned int*)(&PC), (unsigned int)(dst+1));
    mov_reg32_imm32(EAX, (unsigned int)jump_to_func);
    call_reg32(EAX);

    jump_end_rel32();

    mov_reg32_reg32(EAX, EBX);
    sub_eax_imm32(dst_block->start);
    shr_reg32_imm8(EAX, 2);
    mul_m32((unsigned int *)(&precomp_instr_size));

    mov_reg32_preg32pimm32(EBX, EAX, (unsigned int)(dst_block->block)+diff_need);
    cmp_reg32_imm32(EBX, 1);
    jne_rj(7);

    add_eax_imm32((unsigned int)(dst_block->block)+diff_wrap); // 5
    jmp_reg32(EAX); // 2

    mov_reg32_preg32pimm32(EAX, EAX, (unsigned int)(dst_block->block)+diff);
    add_reg32_m32(EAX, (unsigned int *)(&dst_block->code));

    jmp_reg32(EAX);
#endif
}
Esempio n. 10
0
void genjalr(usf_state_t * state)
{
#ifdef INTERPRET_JALR
   gencallinterp(state, (unsigned int)state->current_instruction_table.JALR, 0);
#else
   unsigned int diff =
     (unsigned int)(&state->dst->local_addr) - (unsigned int)(state->dst);
   unsigned int diff_need =
     (unsigned int)(&state->dst->reg_cache_infos.need_map) - (unsigned int)(state->dst);
   unsigned int diff_wrap =
     (unsigned int)(&state->dst->reg_cache_infos.jump_wrapper) - (unsigned int)(state->dst);
   
   if (((state->dst->addr & 0xFFF) == 0xFFC &&
       (state->dst->addr < 0x80000000 || state->dst->addr >= 0xC0000000))||state->no_compiled_jump)
     {
    gencallinterp(state, (unsigned int)state->current_instruction_table.JALR, 1);
    return;
     }
   
   free_all_registers(state);
   simplify_access(state);
   mov_eax_memoffs32(state, (unsigned int *)state->dst->f.r.rs);
   mov_memoffs32_eax(state, (unsigned int *)&state->local_rs);
   
   gendelayslot(state);
   
   mov_m32_imm32(state, (unsigned int *)(state->dst-1)->f.r.rd, state->dst->addr+4);
   if ((state->dst->addr+4) & 0x80000000)
     mov_m32_imm32(state, ((unsigned int *)(state->dst-1)->f.r.rd)+1, 0xFFFFFFFF);
   else
     mov_m32_imm32(state, ((unsigned int *)(state->dst-1)->f.r.rd)+1, 0);
   
   mov_eax_memoffs32(state, (unsigned int *)&state->local_rs);
   mov_memoffs32_eax(state, (unsigned int *)&state->last_addr);
   
   gencheck_interupt_reg(state);
   
   mov_eax_memoffs32(state, (unsigned int *)&state->local_rs);
   mov_reg32_reg32(state, EBX, EAX);
   and_eax_imm32(state, 0xFFFFF000);
   cmp_eax_imm32(state, state->dst_block->start & 0xFFFFF000);
   je_near_rj(state, 0);

   jump_start_rel32(state);
   
   mov_m32_reg32(state, &state->jump_to_address, EBX);
   mov_m32_imm32(state, (unsigned int*)(&state->PC), (unsigned int)(state->dst+1));
   mov_reg32_imm32(state, EBX, (unsigned int)jump_to_func);
   mov_reg32_reg32(state, RP0, ESI);
   call_reg32(state, EBX);

   jump_end_rel32(state);
   
   mov_reg32_reg32(state, EAX, EBX);
   sub_eax_imm32(state, state->dst_block->start);
   shr_reg32_imm8(state, EAX, 2);
   mul_m32(state, (unsigned int *)(&state->precomp_instr_size));
   
   mov_reg32_preg32pimm32(state, EBX, EAX, (unsigned int)(state->dst_block->block)+diff_need);
   cmp_reg32_imm32(state, EBX, 1);
   jne_rj(state, 7);
   
   add_eax_imm32(state, (unsigned int)(state->dst_block->block)+diff_wrap); // 5
   jmp_reg32(state, EAX); // 2
   
   mov_reg32_preg32pimm32(state, EAX, EAX, (unsigned int)(state->dst_block->block)+diff);
   add_reg32_m32(state, EAX, (unsigned int *)(&state->dst_block->code));
   
   jmp_reg32(state, EAX);
#endif
}
Esempio n. 11
0
void genjalr(void)
{
#ifdef INTERPRET_JALR
   gencallinterp((unsigned int)cached_interpreter_table.JALR, 0);
#else
   unsigned int diff =
     (unsigned int)(&g_dev.r4300.recomp.dst->local_addr) - (unsigned int)(g_dev.r4300.recomp.dst);
   unsigned int diff_need =
     (unsigned int)(&g_dev.r4300.recomp.dst->reg_cache_infos.need_map) - (unsigned int)(g_dev.r4300.recomp.dst);
   unsigned int diff_wrap =
     (unsigned int)(&g_dev.r4300.recomp.dst->reg_cache_infos.jump_wrapper) - (unsigned int)(g_dev.r4300.recomp.dst);
   
   if (((g_dev.r4300.recomp.dst->addr & 0xFFF) == 0xFFC && 
       (g_dev.r4300.recomp.dst->addr < 0x80000000 || g_dev.r4300.recomp.dst->addr >= 0xC0000000))||g_dev.r4300.recomp.no_compiled_jump)
     {
    gencallinterp((unsigned int)cached_interpreter_table.JALR, 1);
    return;
     }
   
   free_all_registers();
   simplify_access();
   mov_eax_memoffs32((unsigned int *)g_dev.r4300.recomp.dst->f.r.rs);
   mov_memoffs32_eax((unsigned int *)&g_dev.r4300.local_rs);
   
   gendelayslot();
   
   mov_m32_imm32((unsigned int *)(g_dev.r4300.recomp.dst-1)->f.r.rd, g_dev.r4300.recomp.dst->addr+4);
   if ((g_dev.r4300.recomp.dst->addr+4) & 0x80000000)
     mov_m32_imm32(((unsigned int *)(g_dev.r4300.recomp.dst-1)->f.r.rd)+1, 0xFFFFFFFF);
   else
     mov_m32_imm32(((unsigned int *)(g_dev.r4300.recomp.dst-1)->f.r.rd)+1, 0);
   
   mov_eax_memoffs32((unsigned int *)&g_dev.r4300.local_rs);
   mov_memoffs32_eax((unsigned int *)&g_dev.r4300.cp0.last_addr);
   
   gencheck_interupt_reg();
   
   mov_eax_memoffs32((unsigned int *)&g_dev.r4300.local_rs);
   mov_reg32_reg32(EBX, EAX);
   and_eax_imm32(0xFFFFF000);
   cmp_eax_imm32(g_dev.r4300.recomp.dst_block->start & 0xFFFFF000);
   je_near_rj(0);

   jump_start_rel32();
   
   mov_m32_reg32(&g_dev.r4300.cached_interp.jump_to_address, EBX);
   mov_m32_imm32((unsigned int*)(&(*r4300_pc_struct())), (unsigned int)(g_dev.r4300.recomp.dst+1));
   mov_reg32_imm32(EAX, (unsigned int)jump_to_func);
   call_reg32(EAX);
   
   jump_end_rel32();
   
   mov_reg32_reg32(EAX, EBX);
   sub_eax_imm32(g_dev.r4300.recomp.dst_block->start);
   shr_reg32_imm8(EAX, 2);
   mul_m32((unsigned int *)(&precomp_instr_size));
   
   mov_reg32_preg32pimm32(EBX, EAX, (unsigned int)(g_dev.r4300.recomp.dst_block->block)+diff_need);
   cmp_reg32_imm32(EBX, 1);
   jne_rj(7);
   
   add_eax_imm32((unsigned int)(g_dev.r4300.recomp.dst_block->block)+diff_wrap); // 5
   jmp_reg32(EAX); // 2
   
   mov_reg32_preg32pimm32(EAX, EAX, (unsigned int)(g_dev.r4300.recomp.dst_block->block)+diff);
   add_reg32_m32(EAX, (unsigned int *)(&g_dev.r4300.recomp.dst_block->code));
   
   jmp_reg32(EAX);
#endif
}