static int first_nibble_is_8(RAnal* anal, RAnalOp* op, ut16 code){ if (IS_BT_OR_BF(code)) { op->type = R_ANAL_OP_TYPE_CJMP; //Jump if true or jump if false insns op->jump = disarm_8bit_offset (op->addr, GET_BTF_OFFSET(code)); op->fail = op->addr + 2 ; op->eob = true; if (IS_BTS(code) || IS_BFS(code)) op->delay = 1; //Only /S versions have a delay slot } else if (IS_MOVB_REGDISP_R0(code)) { // 10000100mmmmi4*1 mov.b @(<disp>,<REG_M>),R0 op->type = R_ANAL_OP_TYPE_LOAD; op->dst = anal_fill_ai_rg (anal, 0); op->src[0] = anal_fill_reg_disp_mem (anal, GET_SOURCE_REG(code), code&0x0F, BYTE_SIZE); } else if (IS_MOVW_REGDISP_R0(code)) { // 10000101mmmmi4*2 mov.w @(<disp>,<REG_M>),R0 op->type = R_ANAL_OP_TYPE_LOAD; op->dst = anal_fill_ai_rg (anal, 0); op->src[0] = anal_fill_reg_disp_mem (anal, GET_SOURCE_REG(code), code&0x0F, WORD_SIZE); } else if (IS_CMPIMM(code)) { op->type = R_ANAL_OP_TYPE_CMP; //todo : finish implementing } else if (IS_MOVB_R0_REGDISP(code)) { /* 10000000mmmmi4*1 mov.b R0,@(<disp>,<REG_M>)*/ op->type = R_ANAL_OP_TYPE_STORE; op->src[0] = anal_fill_ai_rg (anal, 0); op->dst = anal_fill_reg_disp_mem (anal, GET_SOURCE_REG(code), code&0x0F, BYTE_SIZE); } else if (IS_MOVW_R0_REGDISP(code)) { // 10000001mmmmi4*2 mov.w R0,@(<disp>,<REG_M>)) op->type = R_ANAL_OP_TYPE_STORE; op->src[0] = anal_fill_ai_rg (anal, 0); op->dst = anal_fill_reg_disp_mem (anal, GET_SOURCE_REG(code), code&0x0F, WORD_SIZE); } return op->size; }
static int first_nibble_is_3(RAnal* anal, RAnalOp* op, ut16 code){ //TODO Handle carry/overflow , CMP/xx? if( IS_ADD(code) || IS_ADDC(code) || IS_ADDV(code) ) { op->type = R_ANAL_OP_TYPE_ADD; op->src[0] = anal_fill_ai_rg (anal, GET_SOURCE_REG(code)); op->dst = anal_fill_ai_rg (anal, GET_TARGET_REG(code)); } else if ( IS_SUB(code) || IS_SUBC(code) || IS_SUBV(code)) { op->type = R_ANAL_OP_TYPE_SUB; op->src[0] = anal_fill_ai_rg (anal, GET_SOURCE_REG(code)); op->dst = anal_fill_ai_rg (anal, GET_TARGET_REG(code)); } else if (IS_CMPEQ(code) || IS_CMPGE(code) || IS_CMPGT(code) || IS_CMPHI(code) || IS_CMPHS(code)) { //TODO : finish implementing op->type = R_ANAL_OP_TYPE_CMP; op->src[0] = anal_fill_ai_rg (anal, GET_TARGET_REG(code)); op->src[1] = anal_fill_ai_rg (anal, GET_SOURCE_REG(code)); } else if (IS_DIV1(code)) { op->type = R_ANAL_OP_TYPE_DIV; op->src[0] = anal_fill_ai_rg (anal, GET_TARGET_REG(code)); op->src[1] = anal_fill_ai_rg (anal, GET_SOURCE_REG(code)); //todo: dest ? } else if (IS_DMULU(code) || IS_DMULS(code)) { op->type = R_ANAL_OP_TYPE_MUL; op->src[0] = anal_fill_ai_rg (anal, GET_SOURCE_REG(code)); op->src[1] = anal_fill_ai_rg (anal, GET_TARGET_REG(code)); //todo: dest=MACL,MACH } return op->size; }
static void anal_add(RAnal *anal, RAnalOp *op, x86im_instr_object io) { st64 imm, disp; imm = r_hex_bin_truncate (io.imm, io.imm_size); disp = r_hex_bin_truncate (io.disp, io.disp_size); op->type = R_ANAL_OP_TYPE_ADD; switch (io.id) { case X86IM_IO_ID_ADD_MM_RG: /* add [0x0ff | reg1+reg2+0x0ff], reg */ op->dst = anal_fill_ai_mm (anal, io); op->src[0] = anal_fill_ai_rg (anal, io, 0); /* TODO: Deprecate */ if (io.mem_base == 0) { /* add [0x0ff], reg */ op->ref = disp; } else if ((X86IM_IO_ROP_GET_ID (io.mem_base) == X86IM_IO_ROP_ID_EBP) && io.mem_index == 0) { /* add [ebp+0x0ff], reg*/ op->stackop = R_ANAL_STACK_SET; op->ref = disp; } break; case X86IM_IO_ID_ADD_RG_MM: /* add reg, [0x0ff | reg1+reg2+0x0ff] */ op->dst = anal_fill_ai_rg (anal, io, 0); op->src[0] = anal_fill_ai_mm (anal, io); /* TODO: Deprecate */ if (io.mem_base == 0) { /* add reg, [0x0ff] */ op->ref = disp; } else if ((X86IM_IO_ROP_GET_ID (io.mem_base) == X86IM_IO_ROP_ID_EBP) && io.mem_index == 0) { /* add reg, [ebp+0x0ff] */ op->stackop = R_ANAL_STACK_GET; op->ref = disp; } break; case X86IM_IO_ID_ADD_R1_R2: /* add reg2, reg1 */ case X86IM_IO_ID_ADD_R2_R1: op->dst = anal_fill_ai_rg (anal, io, 0); op->src[0] = anal_fill_ai_rg (anal, io, 1); break; case X86IM_IO_ID_ADD_MM_IM: /* add [0x0ff | reg1+reg2+0x0ff], 0x1 */ op->dst = anal_fill_ai_mm (anal, io); op->src[0] = anal_fill_im (anal, io); /* TODO: Deprecate */ if (io.mem_base == 0) { /* add [0x0ff], 0x1 */ op->ref = disp; } break; case X86IM_IO_ID_ADD_RG_IM: /* add reg, 0x1 */ case X86IM_IO_ID_ADD_AC_IM: op->dst = anal_fill_ai_rg (anal, io, 0); op->src[0] = anal_fill_im (anal, io); /* TODO: Deprecate */ if (X86IM_IO_ROP_GET_ID (io.rop[0]) == X86IM_IO_ROP_ID_ESP) { /* add esp, 0x1 */ op->stackop = R_ANAL_STACK_INCSTACK; op->value = imm; op->stackptr = -imm; } break; } }
static int first_nibble_is_4(RAnal* anal, RAnalOp* op, ut16 code){ switch (code & 0xF0FF) { //todo: implement case 0x4020: //shal op->type = R_ANAL_OP_TYPE_SAL; break; case 0x4021: //shar op->type = R_ANAL_OP_TYPE_SAR; break; case 0x4000: //shll case 0x4008: //shll2 case 0x4018: //shll8 case 0x4028: //shll16 op->type = R_ANAL_OP_TYPE_SHL; break; case 0x4001: //shlr case 0x4009: //shlr2 case 0x4019: //shlr8 case 0x4029: //shlr16 op->type = R_ANAL_OP_TYPE_SHR; break; default: break; } if (IS_JSR(code)) { op->type = R_ANAL_OP_TYPE_UCALL; //call to reg op->delay = 1; op->dst = anal_fill_ai_rg (anal, GET_TARGET_REG(code)); } else if ( IS_JMP(code) ) { op->type = R_ANAL_OP_TYPE_UJMP; //jmp to reg op->dst = anal_fill_ai_rg (anal, GET_TARGET_REG(code)); op->delay = 1; op->eob = true; } else if (IS_CMPPL(code) || IS_CMPPZ(code)) { op->type = R_ANAL_OP_TYPE_CMP; //todo: implement } else if (IS_LDCLSR1(code) || IS_LDSLMAC(code) || IS_LDSLPR(code)) { op->type = R_ANAL_OP_TYPE_POP; //todo: implement } else if (IS_LDCSR1(code) || IS_LDSMAC(code) || IS_LDSPR(code)) { op->type = R_ANAL_OP_TYPE_MOV; //todo: implement } else if (IS_ROT(code)) { op->type = (code&1)? R_ANAL_OP_TYPE_ROR:R_ANAL_OP_TYPE_ROL; //todo: implement rot* vs rotc* } else if (IS_STCLSR1(code) || IS_STSLMAC(code) || IS_STSLPR(code)) { op->type = R_ANAL_OP_TYPE_PUSH; //todo: implement st*.l *,@-Rn } else if (IS_TASB(code)) { op->type = R_ANAL_OP_TYPE_UNK; //todo: implement } else if (IS_DT(code)) { op->type = R_ANAL_OP_TYPE_UNK; //todo: implement } return op->size; }
static int first_nibble_is_2(RAnal* anal, RAnalOp* op, ut16 code){ //TODO handle mov.x Rm, @Rn, (X)OR/AND regs, if (IS_MOVB_REG_TO_REGREF(code)){ op->type = R_ANAL_OP_TYPE_MOV; op->src[0] = anal_fill_ai_rg(anal,GET_SOURCE_REG(code)); op->dst = anal_fill_reg_ref(anal,GET_TARGET_REG(code),BYTE_SIZE); } else if (IS_MOVW_REG_TO_REGREF(code)){ op->type = R_ANAL_OP_TYPE_MOV; op->src[0] = anal_fill_ai_rg(anal,GET_SOURCE_REG(code)); op->dst = anal_fill_reg_ref(anal,GET_TARGET_REG(code),WORD_SIZE); } else if (IS_MOVL_REG_TO_REGREF(code)){ op->type = R_ANAL_OP_TYPE_MOV; op->src[0] = anal_fill_ai_rg(anal,GET_SOURCE_REG(code)); op->dst = anal_fill_reg_ref(anal,GET_TARGET_REG(code),LONG_SIZE); } else if (IS_AND_REGS(code)){ op->type = R_ANAL_OP_TYPE_AND; op->src[0] = anal_fill_ai_rg(anal,GET_SOURCE_REG(code)); op->dst = anal_fill_ai_rg(anal,GET_TARGET_REG(code)); } else if (IS_XOR_REGS(code)){ op->type = R_ANAL_OP_TYPE_XOR; op->src[0] = anal_fill_ai_rg(anal,GET_SOURCE_REG(code)); op->dst = anal_fill_ai_rg(anal,GET_TARGET_REG(code)); } else if (IS_OR_REGS(code)){ op->type = R_ANAL_OP_TYPE_OR; op->src[0] = anal_fill_ai_rg(anal,GET_SOURCE_REG(code)); op->dst = anal_fill_ai_rg(anal,GET_TARGET_REG(code)); } //TODO Handle 'pushes' (mov Rm,@-Rn) //TODO Handle CMP/STR ?? return op->size; }
static int first_nibble_is_3(RAnal* anal, RAnalOp* op, ut16 code){ //TODO Handle carry/overflow , CMP/xx? if( IS_ADD(code) || IS_ADDC(code) || IS_ADDV(code) ){ op->type = R_ANAL_OP_TYPE_ADD; op->src[0] = anal_fill_ai_rg(anal,GET_SOURCE_REG(code)); op->dst = anal_fill_ai_rg(anal,GET_TARGET_REG(code)); } else if ( IS_SUB(code) || IS_SUBC(code) || IS_SUBV(code)){ op->type = R_ANAL_OP_TYPE_SUB; op->src[0] = anal_fill_ai_rg(anal,GET_SOURCE_REG(code)); op->dst = anal_fill_ai_rg(anal,GET_TARGET_REG(code)); } return op->size; }
static int first_nibble_is_4(RAnal* anal, RAnalOp* op, ut16 code){ if(IS_JSR(code)){ op->type = R_ANAL_OP_TYPE_UCALL; //call to reg op->delay = 1; op->dst = anal_fill_ai_rg(anal,GET_TARGET_REG(code)); } else if ( IS_JMP(code) ){ op->type = R_ANAL_OP_TYPE_UJMP; //jmp to reg op->dst = anal_fill_ai_rg(anal,GET_TARGET_REG(code)); op->delay = 1; op->eob = R_TRUE; } //TODO shifts + many system insns + CMP/P[L|Z]?? return op->size; }
//nibble=9; 1001nnnni8p2.... mov.w @(<disp>,PC),<REG_N> static int movw_pcdisp_reg(RAnal* anal, RAnalOp* op, ut16 code){ op->type = R_ANAL_OP_TYPE_LOAD; op->dst = anal_fill_ai_rg (anal, GET_TARGET_REG(code)); //op->src[0] = anal_fill_reg_disp_mem(anal,PC_IDX,code&0xFF,WORD_SIZE); //XXX trash in 2 commits op->src[0] = anal_pcrel_disp_mov (anal, op, code&0xFF, WORD_SIZE); return op->size; }
static void anal_cmp(RAnal *anal, RAnalOp *op, x86im_instr_object io) { //st64 imm = r_hex_bin_truncate (io.imm, io.imm_size); st64 disp = r_hex_bin_truncate (io.disp, io.disp_size); op->type = R_ANAL_OP_TYPE_CMP; switch (io.id) { case X86IM_IO_ID_CMP_MM_RG: /* cmp [0x0ff | reg1+reg2+0x0ff], reg */ op->src[0] = anal_fill_ai_mm (anal, io); op->src[1] = anal_fill_ai_rg (anal, io, 0); /* TODO: Deprecate */ if (io.mem_base == 0) { /* cmp [0x0ff], reg */ op->ref = disp; } break; case X86IM_IO_ID_CMP_R1_R2: /* cmp reg2, reg1 */ case X86IM_IO_ID_CMP_R2_R1: op->src[0] = anal_fill_ai_rg (anal, io, 0); op->src[1] = anal_fill_ai_rg (anal, io, 1); break; case X86IM_IO_ID_CMP_RG_MM: /* cmp reg, [0x0ff | reg1+reg2+0x0ff] */ op->src[0] = anal_fill_ai_rg (anal, io, 0); op->src[1] = anal_fill_ai_mm (anal, io); /* TODO: Deprecate */ if (io.mem_base == 0) { /* cmp reg, [0x0ff] */ op->ref = disp; } break; case X86IM_IO_ID_CMP_MM_IM: /* cmp [0x0ff | reg1+reg2+0x0ff], 0x1 */ op->src[0] = anal_fill_ai_mm (anal, io); op->src[1] = anal_fill_im (anal, io); /* TODO: Deprecate */ if (io.mem_base == 0) { /* cmp [0x0ff], 0x1 */ op->ref = disp; } else if ((X86IM_IO_ROP_GET_ID (io.mem_base) == X86IM_IO_ROP_ID_EBP) && io.mem_index == 0) { /* cmp [ebp+0x0ff], 0x1*/ op->stackop = R_ANAL_STACK_GET; op->ref = disp; } break; case X86IM_IO_ID_CMP_RG_IM: /* cmp reg, 0x1 */ case X86IM_IO_ID_CMP_AC_IM: op->src[0] = anal_fill_ai_rg (anal, io, 0); op->src[1] = anal_fill_im (anal, io); break; } }
static void anal_lea(RAnal *anal, RAnalOp *op, x86im_instr_object io) { //st64 imm = r_hex_bin_truncate (io.imm, io.imm_size); //st64 disp = r_hex_bin_truncate (io.disp, io.disp_size); op->type = R_ANAL_OP_TYPE_LEA; /* lea reg, [0x0ff | reg1+reg2+0x0ff] */ op->dst = anal_fill_ai_rg (anal, io, 0); op->src[0] = anal_fill_ai_mm (anal, io); }
static int first_nibble_is_8(RAnal* anal, RAnalOp* op, ut16 code){ if (IS_BT_OR_BF(code)){ op->type = R_ANAL_OP_TYPE_CJMP; //Jump if true or jump if false insns op->jump = (op->addr << 1) +4 + (st8)GET_BTF_OFFSET(code); op->fail = op->addr + 2 ; op->eob = R_TRUE; if (IS_BTS(code) || IS_BFS(code)) op->delay = 1; //Only /S versions have a delay slot } else if (IS_MOVB_REGDISP_R0(code)){ op->type = R_ANAL_OP_TYPE_MOV; op->dst = anal_fill_ai_rg(anal,GET_TARGET_REG(code)); op->src[0] = anal_fill_reg_disp_mem(anal,GET_SOURCE_REG(code),code&0x0F,BYTE_SIZE); } else if (IS_MOVW_REGDISP_R0(code)){ op->type = R_ANAL_OP_TYPE_MOV; op->dst = anal_fill_ai_rg(anal,GET_TARGET_REG(code)); op->src[0] = anal_fill_reg_disp_mem(anal,GET_SOURCE_REG(code),code&0x0F,WORD_SIZE); } //TODO some movs + CMP/EQ?? return op->size; }
static int first_nibble_is_6(RAnal* anal, RAnalOp* op, ut16 code){ if(IS_MOV_REGS(code)){ op->type = R_ANAL_OP_TYPE_MOV; op->src[0] = anal_fill_ai_rg(anal,GET_SOURCE_REG(code)); op->dst = anal_fill_ai_rg(anal,GET_TARGET_REG(code)); } else if (IS_MOVB_REGREF_TO_REG(code)){ op->type = R_ANAL_OP_TYPE_MOV; op->src[0] = anal_fill_reg_ref(anal,GET_SOURCE_REG(code),BYTE_SIZE); op->dst = anal_fill_ai_rg(anal,GET_TARGET_REG(code)); } else if (IS_MOVW_REGREF_TO_REG(code)){ op->type = R_ANAL_OP_TYPE_MOV; op->src[0] = anal_fill_reg_ref(anal,GET_SOURCE_REG(code),WORD_SIZE); op->dst = anal_fill_ai_rg(anal,GET_TARGET_REG(code)); } else if (IS_MOVL_REGREF_TO_REG(code)){ op->type = R_ANAL_OP_TYPE_MOV; op->src[0] = anal_fill_reg_ref(anal,GET_SOURCE_REG(code),LONG_SIZE); op->dst = anal_fill_ai_rg(anal,GET_TARGET_REG(code)); } //TODO neg(c) + MOV.L @Rm+,Rn return op->size; }
static void anal_call(RAnal *anal, RAnalOp *op, x86im_instr_object io) { st64 imm, disp; imm = r_hex_bin_truncate (io.imm, io.imm_size); disp = r_hex_bin_truncate (io.disp, io.disp_size); switch (io.id) { case X86IM_IO_ID_CALL_N_R: /* call 0x0ff */ op->type = R_ANAL_OP_TYPE_CALL; op->dst = anal_fill_r (anal, io, op->addr); op->jump = op->addr + io.len + imm; op->fail = op->addr + io.len; break; case X86IM_IO_ID_CALL_N_AI_MM: /* call [0x0ff | reg1+reg2+0x0ff] */ case X86IM_IO_ID_CALL_F_AI_MM: /* call dword far [0x0ff | reg1+reg2+0x0ff] */ op->dst = anal_fill_ai_mm (anal, io); op->type = R_ANAL_OP_TYPE_UCALL; op->fail = op->addr + io.len; /* TODO: Deprecate */ if (io.mem_base == 0) op->ref = disp; if (anal->iob.io != NULL) { if (io.mem_base == X86IM_IO_ROP_ID_RIP) { op->type = R_ANAL_OP_TYPE_CALL; op->jump = 0LL; anal->iob.read_at(anal->iob.io, op->addr + io.len + disp, (ut8*)&op->jump, anal->bits==64?8:4); } else if (io.mem_base == 0) { op->type = R_ANAL_OP_TYPE_CALL; op->jump = 0LL; anal->iob.read_at(anal->iob.io, disp, (ut8*)&op->jump, anal->bits==64?8:4); } } break; case X86IM_IO_ID_CALL_N_AI_RG: /* call reg */ op->type = R_ANAL_OP_TYPE_UCALL; op->dst = anal_fill_ai_rg (anal, io, 0); op->fail = op->addr + io.len; break; case X86IM_IO_ID_CALL_F_A: /* call dword sel:0x0ff */ op->type = R_ANAL_OP_TYPE_UCALL; op->dst = anal_fill_f (anal, io); /* TODO: Deprecate */ op->selector = io.selector; op->ref = imm; op->fail = op->addr + io.len; break; } }
static void anal_push(RAnal *anal, RAnalOp *op, x86im_instr_object io) { st64 imm = r_hex_bin_truncate (io.imm, io.imm_size); st64 disp = r_hex_bin_truncate (io.disp, io.disp_size); switch (io.id) { case X86IM_IO_ID_PUSH_MM: /* push [0x0ff | reg1+reg2+0x0ff] */ op->type = R_ANAL_OP_TYPE_UPUSH; op->src[0] = anal_fill_ai_mm (anal, io); /* TODO: Deprecate */ op->stackptr = io.mem_size; if (io.mem_base == 0) { /* push [0x0ff] */ op->ref = disp; } else if ((X86IM_IO_ROP_GET_ID (io.mem_base) == X86IM_IO_ROP_ID_EBP) && io.mem_index == 0) { /* push [ebp+0x0ff] */ op->stackop = R_ANAL_STACK_GET; op->ref = disp; } break; case X86IM_IO_ID_PUSH_RG1: /* push reg */ case X86IM_IO_ID_PUSH_RG2: op->type = R_ANAL_OP_TYPE_UPUSH; op->src[0] = anal_fill_ai_rg (anal, io, 0); /* TODO: Deprecate */ if (X86IM_IO_ROP_IS_GPR16(io.rop[0])) op->stackptr = 2; else if (X86IM_IO_ROP_IS_GPR32(io.rop[0])) op->stackptr = 4; else if (X86IM_IO_ROP_IS_GPR64(io.rop[0])) op->stackptr = 8; break; case X86IM_IO_ID_PUSH_IM: /* push 0x1 */ op->type = R_ANAL_OP_TYPE_PUSH; op->src[0] = anal_fill_im (anal, io); /* TODO: Deprecate */ op->value = imm; op->stackptr = io.imm_size; break; case X86IM_IO_ID_PUSH_SR1: /* push sr */ case X86IM_IO_ID_PUSH_SR2: /* io.rop[0] = sr */ op->type = R_ANAL_OP_TYPE_UPUSH; break; case X86IM_IO_ID_PUSHAD: /* pushad */ case X86IM_IO_ID_PUSHF: /* pushf */ op->type = R_ANAL_OP_TYPE_UPUSH; break; } }
static int first_nibble_is_c(RAnal* anal, RAnalOp* op, ut16 code){ if (IS_TRAP(code)){ op->type = R_ANAL_OP_TYPE_SWI; op->val = (ut8)(code&0xFF); } else if (IS_MOVA_PCREL_R0(code)){ op->type = R_ANAL_OP_TYPE_MOV; op->src[0] = anal_pcrel_disp_mov(anal,op,code&0xFF); op->dst = anal_fill_ai_rg(anal,0); //Always R0 } else if (IS_AND_IMM_R0(code)){ op->type = R_ANAL_OP_TYPE_AND; op->src[0] = anal_fill_im(anal,code&0xFF); op->dst = anal_fill_ai_rg(anal,0); //Always R0 } else if (IS_OR_IMM_R0(code)){ op->type = R_ANAL_OP_TYPE_OR; op->src[0] = anal_fill_im(anal,code&0xFF); op->dst = anal_fill_ai_rg(anal,0); //Always R0 } else if (IS_XOR_IMM_R0(code)){ op->type = R_ANAL_OP_TYPE_XOR; op->src[0] = anal_fill_im(anal,code&0xFF); op->dst = anal_fill_ai_rg(anal,0); //Always R0 } //TODO Logic insns referencing GBR return op->size; }
static int anal_jmp(RAnal *anal, RAnalOp *op, x86im_instr_object io) { st64 imm = r_hex_bin_truncate (io.imm, io.imm_size); st64 disp = r_hex_bin_truncate (io.disp, io.disp_size); op->eob = R_TRUE; switch (io.id) { case X86IM_IO_ID_JMP_N_R_S: /* jmp short 0x0ff */ case X86IM_IO_ID_JMP_N_R: /* jmp 0x0ff */ op->type = R_ANAL_OP_TYPE_JMP; op->jump = op->addr + io.len + imm; op->dst = anal_fill_r (anal, io, op->addr); break; case X86IM_IO_ID_JMP_N_AI_MM: /* jmp [0x0ff | reg1+reg2+0x0ff] */ case X86IM_IO_ID_JMP_F_AI_MM: /* jmp dword far [0x0ff | reg1+reg2+0x0ff] */ op->dst = anal_fill_ai_mm (anal, io); op->type = R_ANAL_OP_TYPE_UJMP; /* TODO: Deprecate */ if (io.mem_base == 0) op->ref = disp; if (anal->iob.io != NULL) { if (io.mem_base == X86IM_IO_ROP_ID_RIP) { op->type = R_ANAL_OP_TYPE_JMP; op->jump = 0LL; anal->iob.read_at(anal->iob.io, op->addr + io.len + disp, (ut8*)&op->jump, anal->bits==64?8:4); } else if (io.mem_base == 0) { op->type = R_ANAL_OP_TYPE_JMP; op->jump = 0LL; anal->iob.read_at(anal->iob.io, disp, (ut8*)&op->jump, anal->bits==64?8:4); } } break; case X86IM_IO_ID_JMP_N_AI_RG: /* jmp reg */ op->type = R_ANAL_OP_TYPE_UJMP; op->dst = anal_fill_ai_rg (anal, io, 0); break; case X86IM_IO_ID_JMP_F_A: /* jmp dword sel:0x0ff */ op->type = R_ANAL_OP_TYPE_UJMP; op->dst = anal_fill_f (anal, io); /* TODO: Deprecate */ op->selector = io.selector; op->ref = imm; break; } return io.len; }
/* 16 decoder routines, based on 1st nibble value */ static int first_nibble_is_0(RAnal* anal, RAnalOp* op, ut16 code){ if(IS_BSRF(code)){ /* Call 'far' subroutine Rn+PC+4 */ op->type = R_ANAL_OP_TYPE_UCALL; op->delay = 1; op->dst = anal_regrel_jump(anal,op,GET_TARGET_REG(code)); } else if (IS_BRAF(code)){ /* Unconditional branch to Rn+PC+4, no delay slot */ op->type = R_ANAL_OP_TYPE_UJMP; op->dst = anal_regrel_jump(anal,op,GET_TARGET_REG(code)); op->eob = R_TRUE; } else if( IS_RTS(code) ){ /* Ret from subroutine. Returns to pr */ //TODO Convert into jump pr? op->type = R_ANAL_OP_TYPE_RET; op->delay = 1; op->eob = R_TRUE; } else if (IS_RTE(code)){ //TODO Convert into jmp spc? Indicate ssr->sr as well? op->type = R_ANAL_OP_TYPE_RET; op->delay = 1; op->eob = R_TRUE; } else if (IS_MOVB_REG_TO_R0REL(code)){ op->type = R_ANAL_OP_TYPE_MOV; op->src[0] = anal_fill_ai_rg(anal,GET_SOURCE_REG(code)); op->dst = anal_fill_r0_reg_ref(anal,GET_TARGET_REG(code),BYTE_SIZE); } else if (IS_MOVW_REG_TO_R0REL(code)){ op->type = R_ANAL_OP_TYPE_MOV; op->src[0] = anal_fill_ai_rg(anal,GET_SOURCE_REG(code)); op->dst = anal_fill_r0_reg_ref(anal,GET_TARGET_REG(code),WORD_SIZE); } else if (IS_MOVL_REG_TO_R0REL(code)){ op->type = R_ANAL_OP_TYPE_MOV; op->src[0] = anal_fill_ai_rg(anal,GET_SOURCE_REG(code)); op->dst = anal_fill_r0_reg_ref(anal,GET_TARGET_REG(code),LONG_SIZE); } else if (IS_MOVB_R0REL_TO_REG(code)){ op->type = R_ANAL_OP_TYPE_MOV; op->src[0] = anal_fill_r0_reg_ref(anal,GET_SOURCE_REG(code),BYTE_SIZE); op->dst = anal_fill_ai_rg(anal,GET_TARGET_REG(code)); } else if (IS_MOVW_R0REL_TO_REG(code)){ op->type = R_ANAL_OP_TYPE_MOV; op->src[0] = anal_fill_r0_reg_ref(anal,GET_SOURCE_REG(code),WORD_SIZE); op->dst = anal_fill_ai_rg(anal,GET_TARGET_REG(code)); } else if (IS_MOVL_R0REL_TO_REG(code)){ op->type = R_ANAL_OP_TYPE_MOV; op->src[0] = anal_fill_r0_reg_ref(anal,GET_SOURCE_REG(code),LONG_SIZE); op->dst = anal_fill_ai_rg(anal,GET_TARGET_REG(code)); } //TODO Check missing insns, specially STC might be interesting return op->size; }
static void anal_pop(RAnal *anal, RAnalOp *op, x86im_instr_object io) { //st64 imm = r_hex_bin_truncate (io.imm, io.imm_size); st64 disp = r_hex_bin_truncate (io.disp, io.disp_size); op->type = R_ANAL_OP_TYPE_POP; switch (io.id) { case X86IM_IO_ID_POP_MM: /* pop [0x0ff | reg1+reg2+0x0ff] */ op->dst = anal_fill_ai_mm (anal, io); /* TODO: Deprecate */ if (io.mem_base == 0) { /* pop [0x0ff] */ op->ref = disp; } op->stackptr = -io.mem_size; break; case X86IM_IO_ID_POP_RG1: /* pop reg */ case X86IM_IO_ID_POP_RG2: op->dst = anal_fill_ai_rg (anal, io, 0); /* TODO: Deprecate */ if (X86IM_IO_ROP_IS_GPR16 (io.rop[0])) op->stackptr = -2; else if (X86IM_IO_ROP_IS_GPR32 (io.rop[0])) op->stackptr = -4; else if (X86IM_IO_ROP_IS_GPR64 (io.rop[0])) op->stackptr = -8; break; case X86IM_IO_ID_POP_SR2: /* pop sr */ case X86IM_IO_ID_POP_SR1: /* io.rop[0] = sr */ break; case X86IM_IO_ID_POPAD: /* popad */ case X86IM_IO_ID_POPF: /* popf */ break; } }
static int first_nibble_is_c(RAnal* anal, RAnalOp* op, ut16 code){ if (IS_TRAP(code)) { op->type = R_ANAL_OP_TYPE_SWI; op->val = (ut8)(code&0xFF); } else if (IS_MOVA_PCREL_R0(code)) { // 11000111i8p4.... mova @(<disp>,PC),R0 op->type = R_ANAL_OP_TYPE_LEA; op->src[0] = anal_pcrel_disp_mov (anal, op, code&0xFF, LONG_SIZE); //this is wrong ! op->dst = anal_fill_ai_rg (anal, 0); //Always R0 } else if (IS_BINLOGIC_IMM_R0(code)) { // 110010__i8 (binop) #imm, R0 op->src[0] = anal_fill_im (anal, code&0xFF); op->src[1] = anal_fill_ai_rg (anal, 0); //Always R0 op->dst = anal_fill_ai_rg (anal, 0); //Always R0 except tst #imm, R0 switch (code & 0xFF00) { case 0xC800: //tst //TODO : get correct op->dst ! (T flag) op->type = R_ANAL_OP_TYPE_ACMP; break; case 0xC900: //and op->type = R_ANAL_OP_TYPE_AND; break; case 0xCA00: //xor op->type = R_ANAL_OP_TYPE_XOR; break; case 0xCB00: //or op->type = R_ANAL_OP_TYPE_OR; break; } } else if (IS_BINLOGIC_IMM_GBR(code)) { //110011__i8 (binop).b #imm, @(R0,GBR) op->src[0] = anal_fill_im (anal, code&0xFF); switch (code & 0xFF00) { case 0xCC00: //tst //TODO : get correct op->dst ! (T flag) op->type = R_ANAL_OP_TYPE_ACMP; break; case 0xCD00: //and op->type = R_ANAL_OP_TYPE_AND; break; case 0xCE00: //xor op->type = R_ANAL_OP_TYPE_XOR; break; case 0xCF00: //or op->type = R_ANAL_OP_TYPE_OR; break; } //TODO : implement @(R0,GBR) dest and src[1] } else if (IS_MOVB_R0_GBRREF(code)) { //11000000i8*1.... mov.b R0,@(<disp>,GBR) op->type = R_ANAL_OP_TYPE_STORE; op->src[0] = anal_fill_ai_rg (anal, 0); //todo: implement @(disp,GBR) dest } else if (IS_MOVW_R0_GBRREF(code)) { //11000001i8*2.... mov.w R0,@(<disp>,GBR) op->type = R_ANAL_OP_TYPE_STORE; op->src[0] = anal_fill_ai_rg (anal, 0); //todo: implement @(disp,GBR) dest } else if (IS_MOVL_R0_GBRREF(code)) { //11000010i8*4.... mov.l R0,@(<disp>,GBR) op->type = R_ANAL_OP_TYPE_STORE; op->src[0] = anal_fill_ai_rg (anal, 0); //todo: implement @(disp,GBR) dest } else if (IS_MOVB_GBRREF_R0(code)) { //11000100i8*1.... mov.b @(<disp>,GBR),R0 op->type = R_ANAL_OP_TYPE_LOAD; op->dst = anal_fill_ai_rg (anal, 0); //todo: implement @(disp,GBR) src } else if (IS_MOVW_GBRREF_R0(code)) { //11000101i8*2.... mov.w @(<disp>,GBR),R0 op->type = R_ANAL_OP_TYPE_LOAD; op->dst = anal_fill_ai_rg (anal, 0); //todo: implement @(disp,GBR) src } else if (IS_MOVL_GBRREF_R0(code)) { //11000110i8*4.... mov.l @(<disp>,GBR),R0 op->type = R_ANAL_OP_TYPE_LOAD; op->dst = anal_fill_ai_rg (anal, 0); //todo: implement @(disp,GBR) src } return op->size; }
static int first_nibble_is_6(RAnal* anal, RAnalOp* op, ut16 code){ if (IS_MOV_REGS(code)) { op->type = R_ANAL_OP_TYPE_MOV; op->src[0] = anal_fill_ai_rg (anal, GET_SOURCE_REG(code)); op->dst = anal_fill_ai_rg (anal, GET_TARGET_REG(code)); } else if (IS_MOVB_REGREF_TO_REG(code)) { op->type = R_ANAL_OP_TYPE_LOAD; op->src[0] = anal_fill_reg_ref (anal, GET_SOURCE_REG(code), BYTE_SIZE); op->dst = anal_fill_ai_rg (anal, GET_TARGET_REG(code)); } else if (IS_MOVW_REGREF_TO_REG(code)) { op->type = R_ANAL_OP_TYPE_LOAD; op->src[0] = anal_fill_reg_ref (anal, GET_SOURCE_REG(code), WORD_SIZE); op->dst = anal_fill_ai_rg (anal, GET_TARGET_REG(code)); } else if (IS_MOVL_REGREF_TO_REG(code)) { op->type = R_ANAL_OP_TYPE_LOAD; op->src[0] = anal_fill_reg_ref (anal, GET_SOURCE_REG(code), LONG_SIZE); op->dst = anal_fill_ai_rg (anal, GET_TARGET_REG(code)); } else if (IS_EXT(code)) { //ext{s,u}.{b,w} instructs. todo : more detail ? op->type = R_ANAL_OP_TYPE_MOV; op->src[0] = anal_fill_ai_rg (anal, GET_SOURCE_REG(code)); op->dst = anal_fill_ai_rg (anal, GET_TARGET_REG(code)); } else if (IS_MOVB_POP(code) || IS_MOVW_POP(code) || IS_MOVL_POP(code)) { /* 0110nnnnmmmm0100 mov.b @<REG_M>+,<REG_N>*/ /* 0110nnnnmmmm0101 mov.w @<REG_M>+,<REG_N>*/ /* 0110nnnnmmmm0110 mov.l @<REG_M>+,<REG_N>*/ op->type = R_ANAL_OP_TYPE_POP; op->dst = anal_fill_ai_rg (anal, GET_TARGET_REG(code)); //todo : op->src for pop = ? } else if (IS_NEG(code)) { //todo: neg and negc details op->type = R_ANAL_OP_TYPE_UNK; /* 0110nnnnmmmm1010 negc*/ /* 0110nnnnmmmm1010 neg */ op->src[0] = anal_fill_ai_rg (anal, GET_SOURCE_REG(code)); op->dst = anal_fill_ai_rg (anal, GET_TARGET_REG(code)); } else if (IS_NOT(code)) { //todo : details? op->type = R_ANAL_OP_TYPE_NOT; op->src[0] = anal_fill_ai_rg (anal, GET_SOURCE_REG(code)); op->dst = anal_fill_ai_rg (anal, GET_TARGET_REG(code)); } else if (IS_SWAP(code)) { /* 0110nnnnmmmm1000 swap.b <REG_M>,<REG_N>*/ /* 0110nnnnmmmm1001 swap.w <REG_M>,<REG_N>*/ op->type = R_ANAL_OP_TYPE_MOV; //todo : details } return op->size; }
//nibble=7; 0111nnnni8*1.... add #<imm>,<REG_N> static int add_imm(RAnal* anal, RAnalOp* op, ut16 code){ op->type = R_ANAL_OP_TYPE_ADD; op->src[0] = anal_fill_im (anal, (st8)(code&0xFF)); //Casting to (st8) forces sign-extension. op->dst = anal_fill_ai_rg (anal, GET_TARGET_REG(code)); return op->size; }
static int movw_pcdisp_reg(RAnal* anal, RAnalOp* op, ut16 code){ op->type = R_ANAL_OP_TYPE_MOV; op->dst = anal_fill_ai_rg(anal, GET_TARGET_REG(code)); op->src[0] = anal_fill_reg_disp_mem(anal,PC_IDX,code&0xFF,WORD_SIZE); return op->size; }
//nibble=5; 0101nnnnmmmmi4*4 mov.l @(<disp>,<REG_M>),<REG_N> static int movl_rdisp_reg(RAnal* anal, RAnalOp* op, ut16 code){ op->type = R_ANAL_OP_TYPE_LOAD; op->dst = anal_fill_ai_rg (anal, GET_TARGET_REG(code)); op->src[0] = anal_fill_reg_disp_mem (anal, GET_SOURCE_REG(code), code&0x0F, LONG_SIZE); return op->size; }
//nibble=d; 1101nnnni8 : mov.l @(<disp>,PC), Rn static int movl_pcdisp_reg(RAnal* anal, RAnalOp* op, ut16 code){ op->type = R_ANAL_OP_TYPE_LOAD; op->src[0] = anal_pcrel_disp_mov (anal, op, code&0xFF, LONG_SIZE); op->dst = anal_fill_ai_rg (anal, GET_TARGET_REG(code)); return op->size; }
static int first_nibble_is_2(RAnal* anal, RAnalOp* op, ut16 code){ if (IS_MOVB_REG_TO_REGREF(code)) { // 0010nnnnmmmm0000 mov.b <REG_M>,@<REG_N> op->type = R_ANAL_OP_TYPE_STORE; op->src[0] = anal_fill_ai_rg (anal, GET_SOURCE_REG(code)); op->dst = anal_fill_reg_ref (anal, GET_TARGET_REG(code), BYTE_SIZE); } else if (IS_MOVW_REG_TO_REGREF(code)) { op->type = R_ANAL_OP_TYPE_STORE; op->src[0] = anal_fill_ai_rg (anal, GET_SOURCE_REG(code)); op->dst = anal_fill_reg_ref (anal, GET_TARGET_REG(code), WORD_SIZE); } else if (IS_MOVL_REG_TO_REGREF(code)) { op->type = R_ANAL_OP_TYPE_STORE; op->src[0] = anal_fill_ai_rg (anal, GET_SOURCE_REG(code)); op->dst = anal_fill_reg_ref (anal, GET_TARGET_REG(code), LONG_SIZE); } else if (IS_AND_REGS(code)) { op->type = R_ANAL_OP_TYPE_AND; op->src[0] = anal_fill_ai_rg (anal, GET_SOURCE_REG(code)); op->dst = anal_fill_ai_rg (anal, GET_TARGET_REG(code)); } else if (IS_XOR_REGS(code)) { op->type = R_ANAL_OP_TYPE_XOR; op->src[0] = anal_fill_ai_rg (anal, GET_SOURCE_REG(code)); op->dst = anal_fill_ai_rg (anal, GET_TARGET_REG(code)); } else if (IS_OR_REGS(code)) { op->type = R_ANAL_OP_TYPE_OR; op->src[0] = anal_fill_ai_rg (anal, GET_SOURCE_REG(code)); op->dst = anal_fill_ai_rg (anal, GET_TARGET_REG(code)); } else if (IS_PUSHB(code) || IS_PUSHW(code) || IS_PUSHL(code)) { op->type = R_ANAL_OP_TYPE_PUSH; //TODO Handle 'pushes' (mov Rm,@-Rn) } else if (IS_TSTRR(code)) { op->type = R_ANAL_OP_TYPE_ACMP; //TODO: handle tst reg,reg } else if (IS_CMPSTR(code)) { //0010nnnnmmmm1100 cmp/str <REG_M>,<REG_N> op->type = R_ANAL_OP_TYPE_ACMP; //maybe not? op->src[0] = anal_fill_ai_rg (anal, GET_SOURCE_REG(code)); op->src[1] = anal_fill_ai_rg (anal, GET_TARGET_REG(code)); //todo: handle cmp/str byte-per-byte cmp? } else if (IS_XTRCT(code)) { //0010nnnnmmmm1101 xtrct <REG_M>,<REG_N> op->type = R_ANAL_OP_TYPE_MOV; op->src[0] = anal_fill_ai_rg (anal, GET_SOURCE_REG(code)); op->src[1] = anal_fill_ai_rg (anal, GET_TARGET_REG(code)); op->dst = anal_fill_ai_rg (anal, GET_TARGET_REG(code)); //todo: add details ? } else if (IS_DIV0S(code)) { op->type = R_ANAL_OP_TYPE_DIV; //todo: add details? } else if (IS_MULUW(code) || IS_MULSW(code)) { //0010nnnnmmmm111_ mul{s,u}.w <REG_M>,<REG_N> op->type = R_ANAL_OP_TYPE_MUL; op->src[0] = anal_fill_ai_rg(anal,GET_SOURCE_REG(code)); op->src[1] = anal_fill_ai_rg(anal,GET_TARGET_REG(code)); //todo: dest=MACL } return op->size; }
/* 16 decoder routines, based on 1st nibble value */ static int first_nibble_is_0(RAnal* anal, RAnalOp* op, ut16 code){ if(IS_BSRF(code)) { /* Call 'far' subroutine Rn+PC+4 */ op->type = R_ANAL_OP_TYPE_UCALL; op->delay = 1; op->dst = anal_regrel_jump (anal, op, GET_TARGET_REG(code)); } else if (IS_BRAF(code)) { /* Unconditional branch to Rn+PC+4, no delay slot */ op->type = R_ANAL_OP_TYPE_UJMP; op->dst = anal_regrel_jump (anal, op, GET_TARGET_REG(code)); op->eob = true; } else if( IS_RTS(code) ) { /* Ret from subroutine. Returns to pr */ //TODO Convert into jump pr? op->type = R_ANAL_OP_TYPE_RET; op->delay = 1; op->eob = true; } else if (IS_RTE(code)) { //TODO Convert into jmp spc? Indicate ssr->sr as well? op->type = R_ANAL_OP_TYPE_RET; op->delay = 1; op->eob = true; } else if (IS_MOVB_REG_TO_R0REL(code)) { //0000nnnnmmmm0100 mov.b <REG_M>,@(R0,<REG_N>) op->type = R_ANAL_OP_TYPE_STORE; op->src[0] = anal_fill_ai_rg (anal, GET_SOURCE_REG(code)); op->dst = anal_fill_r0_reg_ref (anal, GET_TARGET_REG(code), BYTE_SIZE); } else if (IS_MOVW_REG_TO_R0REL(code)) { op->type = R_ANAL_OP_TYPE_STORE; op->src[0] = anal_fill_ai_rg (anal, GET_SOURCE_REG(code)); op->dst = anal_fill_r0_reg_ref (anal, GET_TARGET_REG(code), WORD_SIZE); } else if (IS_MOVL_REG_TO_R0REL(code)) { op->type = R_ANAL_OP_TYPE_STORE; op->src[0] = anal_fill_ai_rg (anal, GET_SOURCE_REG(code)); op->dst = anal_fill_r0_reg_ref(anal, GET_TARGET_REG(code), LONG_SIZE); } else if (IS_MOVB_R0REL_TO_REG(code)) { op->type = R_ANAL_OP_TYPE_LOAD; op->src[0] = anal_fill_r0_reg_ref (anal, GET_SOURCE_REG(code), BYTE_SIZE); op->dst = anal_fill_ai_rg (anal, GET_TARGET_REG(code)); } else if (IS_MOVW_R0REL_TO_REG(code)) { op->type = R_ANAL_OP_TYPE_LOAD; op->src[0] = anal_fill_r0_reg_ref (anal, GET_SOURCE_REG(code), WORD_SIZE); op->dst = anal_fill_ai_rg (anal, GET_TARGET_REG(code)); } else if (IS_MOVL_R0REL_TO_REG(code)) { op->type = R_ANAL_OP_TYPE_LOAD; op->src[0] = anal_fill_r0_reg_ref (anal, GET_SOURCE_REG(code), LONG_SIZE); op->dst = anal_fill_ai_rg (anal, GET_TARGET_REG(code)); } else if (IS_NOP(code)) { op->type = R_ANAL_OP_TYPE_NOP; } else if (IS_CLRT(code)) { op->type = R_ANAL_OP_TYPE_UNK; //TODO : implement flag } else if (IS_SETT(code)) { op->type = R_ANAL_OP_TYPE_UNK; } else if (IS_CLRMAC(code)) { op->type = R_ANAL_OP_TYPE_UNK; //TODO : type_mov ? } else if (IS_DIV0U(code)) { op->type = R_ANAL_OP_TYPE_DIV; } else if (IS_MOVT(code)) { op->type = R_ANAL_OP_TYPE_MOV; //op->src[0] = //TODO: figure out how to get T flag from sr reg op->dst = anal_fill_ai_rg (anal, GET_TARGET_REG(code)); } else if (IS_MULL(code)) { op->type = R_ANAL_OP_TYPE_MUL; op->src[0] = anal_fill_ai_rg (anal, GET_TARGET_REG(code)); op->src[1] = anal_fill_ai_rg (anal, GET_SOURCE_REG(code)); //op->dst = //TODO: figure out how to set MACL + MACH } else if (IS_SLEEP(code)) { op->type = R_ANAL_OP_TYPE_UNK; } else if (IS_STSMAC(code)) { //0000nnnn0000101_ sts MAC*,<REG_N> op->type = R_ANAL_OP_TYPE_MOV; op->dst = anal_fill_ai_rg (anal, GET_TARGET_REG(code)); } else if (IS_STCSR1(code)) { //0000nnnn00010010 stc {sr,gbr,vbr,ssr},<REG_N> op->type = R_ANAL_OP_TYPE_MOV; op->dst = anal_fill_ai_rg (anal, GET_TARGET_REG(code)); //todo: plug in src } else if (IS_STSPR(code)) { //0000nnnn00101010 sts PR,<REG_N> op->type = R_ANAL_OP_TYPE_MOV; op->dst = anal_fill_ai_rg (anal, GET_TARGET_REG(code)); //todo: plug in src } //TODO Check missing insns, especially STC might be interesting return op->size; }
/* @(R0,Rx) references for all sizes */ static RAnalValue *anal_fill_r0_reg_ref(RAnal *anal, int reg, st64 size){ RAnalValue *ret = anal_fill_ai_rg (anal, 0); ret->regdelta = r_reg_get (anal->reg, regs[reg], R_REG_TYPE_GPR); ret->memref = size; return ret; }
/* Rn */ static RAnalValue *anal_fill_reg_ref(RAnal *anal, int reg, st64 size){ RAnalValue *ret = anal_fill_ai_rg (anal, reg); ret->memref = size; return ret; }
//nibble=e; 1110nnnni8*1.... mov #<imm>,<REG_N> static int mov_imm_reg(RAnal* anal, RAnalOp* op, ut16 code){ op->type = R_ANAL_OP_TYPE_MOV; op->dst = anal_fill_ai_rg (anal, GET_TARGET_REG(code)); op->src[0] = anal_fill_im (anal, (st8)(code & 0xFF)); return op->size; }
/* Implements @(disp,Rn) , size=1 for .b, 2 for .w, 4 for .l */ static RAnalValue *anal_fill_reg_disp_mem(RAnal *anal, int reg, st64 delta, st64 size) { RAnalValue *ret = anal_fill_ai_rg (anal, reg); ret->memref = size; ret->delta = delta*size; return ret; }