extern uint16_t inp_uint16(struct ud* u) { uint16_t r, ret; ret = inp_next(u); r = inp_next(u); return ret | (r << 8); }
extern uint32_t inp_uint32(struct ud* u) { uint32_t r, ret; ret = inp_next(u); r = inp_next(u); ret = ret | (r << 8); r = inp_next(u); ret = ret | (r << 16); r = inp_next(u); return ret | (r << 24); }
/* ----------------------------------------------------------------------------- * inp_peek() - Peek into the next byte in source. * ----------------------------------------------------------------------------- */ extern uint8_t inp_peek(struct ud* u) { uint8_t r = inp_next(u); if ( !u->error ) inp_back(u); /* Don't backup if there was an error */ return r; }
static __inline unsigned int modrm( struct ud * u ) { if ( !u->have_modrm ) { u->modrm = inp_next( u ); u->have_modrm = 1; } return u->modrm; }
/* * decode_3dnow() * * Decoding 3dnow is a little tricky because of its strange opcode * structure. The final opcode disambiguation depends on the last * byte that comes after the operands have been decoded. Fortunately, * all 3dnow instructions have the same set of operand types. So we * go ahead and decode the instruction by picking an arbitrarily chosen * valid entry in the table, decode the operands, and read the final * byte to resolve the menmonic. */ static __inline int decode_3dnow(struct ud* u) { uint16_t ptr; assert(u->le->type == UD_TAB__OPC_3DNOW); assert(u->le->table[0xc] != 0); decode_insn(u, u->le->table[0xc]); inp_next(u); if (u->error) { return -1; } ptr = u->le->table[inp_curr(u)]; assert((ptr & 0x8000) == 0); u->mnemonic = ud_itab[ptr].mnemonic; return 0; }
static int decode_opcode(struct ud *u) { uint16_t ptr; UD_ASSERT(u->le->type == UD_TAB__OPC_TABLE); UD_RETURN_ON_ERROR(u); u->primary_opcode = inp_curr(u); ptr = u->le->table[inp_curr(u)]; if (ptr & 0x8000) { u->le = &ud_lookup_table_list[ptr & ~0x8000]; if (u->le->type == UD_TAB__OPC_TABLE) { inp_next(u); return decode_opcode(u); } } return decode_ext(u, ptr); }
static __inline int decode_opcode(struct ud *u) { uint16_t ptr; assert(u->le->type == UD_TAB__OPC_TABLE); inp_next(u); if (u->error) { return -1; } ptr = u->le->table[inp_curr(u)]; if (ptr & 0x8000) { u->le = &ud_lookup_table_list[ptr & ~0x8000]; if (u->le->type == UD_TAB__OPC_TABLE) { return decode_opcode(u); } } return decode_ext(u, ptr); }
extern uint64_t inp_uint64(struct ud* u) { uint64_t r, ret; ret = inp_next(u); r = inp_next(u); ret = ret | (r << 8); r = inp_next(u); ret = ret | (r << 16); r = inp_next(u); ret = ret | (r << 24); r = inp_next(u); ret = ret | (r << 32); r = inp_next(u); ret = ret | (r << 40); r = inp_next(u); ret = ret | (r << 48); r = inp_next(u); return ret | (r << 56); }
/* Extracts instruction prefixes. */ static int get_prefixes( struct ud* u ) { unsigned int have_pfx = 1; unsigned int i; uint8_t curr; /* if in error state, bail out */ if ( u->error ) return -1; /* keep going as long as there are prefixes available */ for ( i = 0; have_pfx ; ++i ) { /* Get next byte. */ inp_next(u); if ( u->error ) return -1; curr = inp_curr( u ); /* rex prefixes in 64bit mode */ if ( u->dis_mode == 64 && ( curr & 0xF0 ) == 0x40 ) { u->pfx_rex = curr; } else { switch ( curr ) { case 0x2E : u->pfx_seg = UD_R_CS; u->pfx_rex = 0; break; case 0x36 : u->pfx_seg = UD_R_SS; u->pfx_rex = 0; break; case 0x3E : u->pfx_seg = UD_R_DS; u->pfx_rex = 0; break; case 0x26 : u->pfx_seg = UD_R_ES; u->pfx_rex = 0; break; case 0x64 : u->pfx_seg = UD_R_FS; u->pfx_rex = 0; break; case 0x65 : u->pfx_seg = UD_R_GS; u->pfx_rex = 0; break; case 0x67 : /* adress-size override prefix */ u->pfx_adr = 0x67; u->pfx_rex = 0; break; case 0xF0 : u->pfx_lock = 0xF0; u->pfx_rex = 0; break; case 0x66: /* the 0x66 sse prefix is only effective if no other sse prefix * has already been specified. */ if ( !u->pfx_insn ) u->pfx_insn = 0x66; u->pfx_opr = 0x66; u->pfx_rex = 0; break; case 0xF2: u->pfx_insn = 0xF2; u->pfx_repne = 0xF2; u->pfx_rex = 0; break; case 0xF3: u->pfx_insn = 0xF3; u->pfx_rep = 0xF3; u->pfx_repe = 0xF3; u->pfx_rex = 0; break; default : /* No more prefixes */ have_pfx = 0; break; } } /* check if we reached max instruction length */ if ( i + 1 == MAX_INSN_LENGTH ) { u->error = 1; break; } } /* return status */ if ( u->error ) return -1; /* rewind back one byte in stream, since the above loop * stops with a non-prefix byte. */ inp_back(u); /* speculatively determine the effective operand mode, * based on the prefixes and the current disassembly * mode. This may be inaccurate, but useful for mode * dependent decoding. */ if ( u->dis_mode == 64 ) { u->opr_mode = REX_W( u->pfx_rex ) ? 64 : ( ( u->pfx_opr ) ? 16 : 32 ) ; u->adr_mode = ( u->pfx_adr ) ? 32 : 64; } else if ( u->dis_mode == 32 ) { u->opr_mode = ( u->pfx_opr ) ? 16 : 32; u->adr_mode = ( u->pfx_adr ) ? 16 : 32; } else if ( u->dis_mode == 16 ) { u->opr_mode = ( u->pfx_opr ) ? 32 : 16; u->adr_mode = ( u->pfx_adr ) ? 32 : 16; } return 0; }
static int do_prefixes( struct ud* u ) { int have_pfx = 1; int i; uint8_t last_pfx = -1; if ( u->error ) return -1; /* if in error state, bail out */ for ( i = 0; have_pfx ; ++i ) { uint8_t curr; /* Get next byte. */ inp_next(u); if ( u->error ) return -1; curr = inp_curr( u ); /* Rex prefixes in 64bit mode */ if ( u->dis_mode == 64 && ( curr & 0xF0 ) == 0x40 ) { u->pfx_rex = curr; } else { switch ( curr ) { /* TBD: Need to find out the behavior in the case of multiple * segment prefixes. */ case 0x2E : u->pfx_seg = UD_R_CS; break; case 0x36 : u->pfx_seg = UD_R_SS; break; case 0x3E : u->pfx_seg = UD_R_DS; break; case 0x26 : u->pfx_seg = UD_R_ES; break; case 0x64 : u->pfx_seg = UD_R_FS; break; case 0x65 : u->pfx_seg = UD_R_GS; break; case 0x67 : /* adress-size override prefix */ u->pfx_adr = 0x67; break; case 0xF0 : u->pfx_lock= 0xF0; break; case 0x66 : { /* operand-size override, and SSE modifier */ /* if there was already an F2, F3 prefix, 66 becomes * in effective. */ if ( u->pfx_insn != 0xF2 && u->pfx_insn != 0xF3 ) { u->pfx_insn = 0x66; } /* operand size prefix */ u->pfx_opr = 0x66; break; } /* 0xF2 is an SSE instruction modifier */ case 0xF2 : { u->pfx_insn = 0xF2; u->pfx_repne= 0xF2; break; } /* 0xF3 is an SSE instruction modifier */ case 0xF3 : { u->pfx_insn = 0xF3; u->pfx_rep = 0xF3; break; } default : { /* No more prefixes */ have_pfx = 0; } } } /* check if we reached max instruction length */ if ( i == 14 ) { u->error = 1; break; } /* we keep the last prefix for checking 0x66 insn modifier. */ last_pfx = curr; } /* return status */ if ( u->error ) return -1; /* rewind back one byte in stream, since the above loop stopped * with a non-prefix byte. */ inp_back(u); return 0; }
/*------------------------------------------------------------------------------ * inp_uintN() - return uintN from source. *------------------------------------------------------------------------------ */ extern uint8_t inp_uint8(struct ud* u) { return inp_next(u); }
/* ----------------------------------------------------------------------------- * decode_modrm() - Decodes ModRM Byte * ----------------------------------------------------------------------------- */ static void decode_modrm(struct ud* u, struct ud_operand *op, unsigned int s, unsigned char rm_type, struct ud_operand *opreg, unsigned int reg_size, unsigned char reg_type) { unsigned char mod, rm, reg; inp_next(u); /* get mod, r/m and reg fields */ mod = MODRM_MOD(inp_curr(u)); rm = (REX_B(u->pfx_rex) << 3) | MODRM_RM(inp_curr(u)); reg = (REX_R(u->pfx_rex) << 3) | MODRM_REG(inp_curr(u)); op->size = (uint8_t) resolve_operand_size(u, s); /* if mod is 11b, then the UD_R_m specifies a gpr/mmx/sse/control/debug */ if (mod == 3) { op->type = UD_OP_REG; if (rm_type == T_GPR) op->base = decode_gpr(u, op->size, rm); else op->base = resolve_reg(u, rm_type, (REX_B(u->pfx_rex) << 3) | (rm&7)); } /* else its memory addressing */ else { op->type = UD_OP_MEM; /* 64bit addressing */ if (u->adr_mode == 64) { op->base = UD_R_RAX + rm; /* get offset type */ if (mod == 1) op->offset = 8; else if (mod == 2) op->offset = 32; else if (mod == 0 && (rm & 7) == 5) { op->base = UD_R_RIP; op->offset = 32; } else op->offset = 0; /* Scale-Index-Base (SIB) */ if ((rm & 7) == 4) { inp_next(u); op->scale = (1 << SIB_S(inp_curr(u))) & ~1; op->index = UD_R_RAX + (SIB_I(inp_curr(u)) | (REX_X(u->pfx_rex) << 3)); op->base = UD_R_RAX + (SIB_B(inp_curr(u)) | (REX_B(u->pfx_rex) << 3)); /* special conditions for base reference */ if (op->index == UD_R_RSP) { op->index = UD_NONE; op->scale = UD_NONE; } if (op->base == UD_R_RBP || op->base == UD_R_R13) { if (mod == 0) op->base = UD_NONE; if (mod == 1) op->offset = 8; else op->offset = 32; } } } /* 32-Bit addressing mode */ else if (u->adr_mode == 32) { /* get base */ op->base = UD_R_EAX + rm; /* get offset type */ if (mod == 1) op->offset = 8; else if (mod == 2) op->offset = 32; else if (mod == 0 && rm == 5) { op->base = UD_NONE; op->offset = 32; } else op->offset = 0; /* Scale-Index-Base (SIB) */ if ((rm & 7) == 4) { inp_next(u); op->scale = (1 << SIB_S(inp_curr(u))) & ~1; op->index = UD_R_EAX + (SIB_I(inp_curr(u)) | (REX_X(u->pfx_rex) << 3)); op->base = UD_R_EAX + (SIB_B(inp_curr(u)) | (REX_B(u->pfx_rex) << 3)); if (op->index == UD_R_ESP) { op->index = UD_NONE; op->scale = UD_NONE; } /* special condition for base reference */ if (op->base == UD_R_EBP) { if (mod == 0) op->base = UD_NONE; if (mod == 1) op->offset = 8; else op->offset = 32; } } } /* 16bit addressing mode */ else { switch (rm) { case 0: op->base = UD_R_BX; op->index = UD_R_SI; break; case 1: op->base = UD_R_BX; op->index = UD_R_DI; break; case 2: op->base = UD_R_BP; op->index = UD_R_SI; break; case 3: op->base = UD_R_BP; op->index = UD_R_DI; break; case 4: op->base = UD_R_SI; break; case 5: op->base = UD_R_DI; break; case 6: op->base = UD_R_BP; break; case 7: op->base = UD_R_BX; break; } if (mod == 0 && rm == 6) { op->offset= 16; op->base = UD_NONE; } else if (mod == 1) op->offset = 8; else if (mod == 2) op->offset = 16; } } /* extract offset, if any */ switch(op->offset) { case 8 : op->lval.ubyte = inp_uint8(u); break; case 16: op->lval.uword = inp_uint16(u); break; case 32: op->lval.udword = inp_uint32(u); break; default: break; } /* resolve register encoded in reg field */ if (opreg) { opreg->type = UD_OP_REG; opreg->size = (uint8_t)resolve_operand_size(u, reg_size); if (reg_type == T_GPR) opreg->base = decode_gpr(u, opreg->size, reg); else opreg->base = resolve_reg(u, reg_type, reg); } }
/* * inp_uint8 * int_uint16 * int_uint32 * int_uint64 * Load little-endian values from input */ static uint8_t inp_uint8(struct ud* u) { return inp_next(u); }
/* ----------------------------------------------------------------------------- * inp_move() - Move ahead n input bytes. * ----------------------------------------------------------------------------- */ extern void inp_move(struct ud* u, size_t n) { while (n--) inp_next(u); }
/* * decode_prefixes * * Extracts instruction prefixes. */ static int decode_prefixes(struct ud *u) { int done = 0; uint8_t curr, last = 0; UD_RETURN_ON_ERROR(u); do { last = curr; curr = inp_next(u); UD_RETURN_ON_ERROR(u); if (u->inp_ctr == MAX_INSN_LENGTH) { UD_RETURN_WITH_ERROR(u, "max instruction length"); } switch (curr) { case 0x2E: u->pfx_seg = UD_R_CS; break; case 0x36: u->pfx_seg = UD_R_SS; break; case 0x3E: u->pfx_seg = UD_R_DS; break; case 0x26: u->pfx_seg = UD_R_ES; break; case 0x64: u->pfx_seg = UD_R_FS; break; case 0x65: u->pfx_seg = UD_R_GS; break; case 0x67: /* adress-size override prefix */ u->pfx_adr = 0x67; break; case 0xF0: u->pfx_lock = 0xF0; break; case 0x66: u->pfx_opr = 0x66; break; case 0xF2: u->pfx_str = 0xf2; break; case 0xF3: u->pfx_str = 0xf3; break; default: /* consume if rex */ done = (u->dis_mode == 64 && (curr & 0xF0) == 0x40) ? 0 : 1; break; } } while (!done); /* rex prefixes in 64bit mode, must be the last prefix */ if (u->dis_mode == 64 && (last & 0xF0) == 0x40) { u->pfx_rex = last; } return 0; }
/* * decode_modrm_rm * * Decodes rm field of mod/rm byte * */ static void decode_modrm_rm(struct ud *u, struct ud_operand *op, unsigned char type, unsigned int size) { unsigned char mod, rm, reg; /* get mod, r/m and reg fields */ mod = MODRM_MOD(modrm(u)); rm = (REX_B(u->pfx_rex) << 3) | MODRM_RM(modrm(u)); reg = (REX_R(u->pfx_rex) << 3) | MODRM_REG(modrm(u)); op->size = resolve_operand_size(u, size); /* * If mod is 11b, then the modrm.rm specifies a register. * */ if (mod == 3) { op->type = UD_OP_REG; if (type == T_GPR) { op->base = decode_gpr(u, op->size, rm); } else { op->base = resolve_reg(u, type, (REX_B(u->pfx_rex) << 3) | (rm & 7)); } return; } /* * !11 => Memory Address */ op->type = UD_OP_MEM; if (u->adr_mode == 64) { op->base = UD_R_RAX + rm; if (mod == 1) { op->offset = 8; } else if (mod == 2) { op->offset = 32; } else if (mod == 0 && (rm & 7) == 5) { op->base = UD_R_RIP; op->offset = 32; } else { op->offset = 0; } /* * Scale-Index-Base (SIB) */ if ((rm & 7) == 4) { inp_next(u); op->scale = (1 << SIB_S(inp_curr(u))) & ~1; op->index = UD_R_RAX + (SIB_I(inp_curr(u)) | (REX_X(u->pfx_rex) << 3)); op->base = UD_R_RAX + (SIB_B(inp_curr(u)) | (REX_B(u->pfx_rex) << 3)); /* special conditions for base reference */ if (op->index == UD_R_RSP) { op->index = UD_NONE; op->scale = UD_NONE; } if (op->base == UD_R_RBP || op->base == UD_R_R13) { if (mod == 0) { op->base = UD_NONE; } if (mod == 1) { op->offset = 8; } else { op->offset = 32; } } } } else if (u->adr_mode == 32) { op->base = UD_R_EAX + rm; if (mod == 1) { op->offset = 8; } else if (mod == 2) { op->offset = 32; } else if (mod == 0 && rm == 5) { op->base = UD_NONE; op->offset = 32; } else { op->offset = 0; } /* Scale-Index-Base (SIB) */ if ((rm & 7) == 4) { inp_next(u); op->scale = (1 << SIB_S(inp_curr(u))) & ~1; op->index = UD_R_EAX + (SIB_I(inp_curr(u)) | (REX_X(u->pfx_rex) << 3)); op->base = UD_R_EAX + (SIB_B(inp_curr(u)) | (REX_B(u->pfx_rex) << 3)); if (op->index == UD_R_ESP) { op->index = UD_NONE; op->scale = UD_NONE; } /* special condition for base reference */ if (op->base == UD_R_EBP) { if (mod == 0) { op->base = UD_NONE; } if (mod == 1) { op->offset = 8; } else { op->offset = 32; } } } } else { const unsigned int bases[] = { UD_R_BX, UD_R_BX, UD_R_BP, UD_R_BP, UD_R_SI, UD_R_DI, UD_R_BP, UD_R_BX }; const unsigned int indices[] = { UD_R_SI, UD_R_DI, UD_R_SI, UD_R_DI, UD_NONE, UD_NONE, UD_NONE, UD_NONE }; op->base = bases[rm & 7]; op->index = indices[rm & 7]; if (mod == 0 && rm == 6) { op->offset= 16; op->base = UD_NONE; } else if (mod == 1) { op->offset = 8; } else if (mod == 2) { op->offset = 16; } } /* * extract offset, if any */ switch (op->offset) { case 8 : op->lval.ubyte = inp_uint8(u); break; case 16: op->lval.uword = inp_uint16(u); break; case 32: op->lval.udword = inp_uint32(u); break; case 64: op->lval.uqword = inp_uint64(u); break; default: break; } }
/* * decode_modrm_rm * * Decodes rm field of mod/rm byte * */ static void decode_modrm_rm(struct ud *u, struct ud_operand *op, unsigned char type, /* register type */ unsigned int size) /* operand size */ { size_t offset = 0; unsigned char mod, rm; /* get mod, r/m and reg fields */ mod = MODRM_MOD(modrm(u)); rm = (REX_B(u->pfx_rex) << 3) | MODRM_RM(modrm(u)); /* * If mod is 11b, then the modrm.rm specifies a register. * */ if (mod == 3) { decode_reg(u, op, type, rm, size); return; } /* * !11b => Memory Address */ op->type = UD_OP_MEM; op->size = resolve_operand_size(u, size); if (u->adr_mode == 64) { op->base = UD_R_RAX + rm; if (mod == 1) { offset = 8; } else if (mod == 2) { offset = 32; } else if (mod == 0 && (rm & 7) == 5) { op->base = UD_R_RIP; offset = 32; } else { offset = 0; } /* * Scale-Index-Base (SIB) */ if ((rm & 7) == 4) { inp_next(u); op->scale = (1 << SIB_S(inp_curr(u))) & ~1; op->index = UD_R_RAX + (SIB_I(inp_curr(u)) | (REX_X(u->pfx_rex) << 3)); op->base = UD_R_RAX + (SIB_B(inp_curr(u)) | (REX_B(u->pfx_rex) << 3)); /* special conditions for base reference */ if (op->index == UD_R_RSP) { op->index = UD_NONE; op->scale = UD_NONE; } if (op->base == UD_R_RBP || op->base == UD_R_R13) { if (mod == 0) { op->base = UD_NONE; } if (mod == 1) { offset = 8; } else { offset = 32; } } } } else if (u->adr_mode == 32) { op->base = UD_R_EAX + rm; if (mod == 1) { offset = 8; } else if (mod == 2) { offset = 32; } else if (mod == 0 && rm == 5) { op->base = UD_NONE; offset = 32; } else { offset = 0; } /* Scale-Index-Base (SIB) */ if ((rm & 7) == 4) { inp_next(u); op->scale = (1 << SIB_S(inp_curr(u))) & ~1; op->index = UD_R_EAX + (SIB_I(inp_curr(u)) | (REX_X(u->pfx_rex) << 3)); op->base = UD_R_EAX + (SIB_B(inp_curr(u)) | (REX_B(u->pfx_rex) << 3)); if (op->index == UD_R_ESP) { op->index = UD_NONE; op->scale = UD_NONE; } /* special condition for base reference */ if (op->base == UD_R_EBP) { if (mod == 0) { op->base = UD_NONE; } if (mod == 1) { offset = 8; } else { offset = 32; } } } } else { const unsigned int bases[] = { UD_R_BX, UD_R_BX, UD_R_BP, UD_R_BP, UD_R_SI, UD_R_DI, UD_R_BP, UD_R_BX }; const unsigned int indices[] = { UD_R_SI, UD_R_DI, UD_R_SI, UD_R_DI, UD_NONE, UD_NONE, UD_NONE, UD_NONE }; op->base = bases[rm & 7]; op->index = indices[rm & 7]; if (mod == 0 && rm == 6) { offset = 16; op->base = UD_NONE; } else if (mod == 1) { offset = 8; } else if (mod == 2) { offset = 16; } } if (offset) { decode_mem_disp(u, offset, op); } }
/* Searches the instruction tables for the right entry. */ static int search_itab( struct ud * u ) { struct ud_itab_entry * e = NULL; enum ud_itab_index table; uint8_t peek; uint8_t did_peek = 0; uint8_t curr; uint8_t index; /* if in state of error, return */ if ( u->error ) return -1; /* get first byte of opcode. */ inp_next(u); if ( u->error ) return -1; curr = inp_curr(u); /* resolve xchg, nop, pause crazyness */ if ( 0x90 == curr ) { if ( !( u->dis_mode == 64 && REX_B( u->pfx_rex ) ) ) { if ( u->pfx_rep ) { u->pfx_rep = 0; e = & ie_pause; } else { e = & ie_nop; } goto found_entry; } } /* get top-level table */ if ( 0x0F == curr ) { table = ITAB__0F; curr = inp_next(u); if ( u->error ) return -1; /* 2byte opcodes can be modified by 0x66, F3, and F2 prefixes */ if ( 0x66 == u->pfx_insn ) { if ( ud_itab_list[ ITAB__PFX_SSE66__0F ][ curr ].mnemonic != UD_Iinvalid ) { table = ITAB__PFX_SSE66__0F; u->pfx_opr = 0; } } else if ( 0xF2 == u->pfx_insn ) { if ( ud_itab_list[ ITAB__PFX_SSEF2__0F ][ curr ].mnemonic != UD_Iinvalid ) { table = ITAB__PFX_SSEF2__0F; u->pfx_repne = 0; } } else if ( 0xF3 == u->pfx_insn ) { if ( ud_itab_list[ ITAB__PFX_SSEF3__0F ][ curr ].mnemonic != UD_Iinvalid ) { table = ITAB__PFX_SSEF3__0F; u->pfx_repe = 0; u->pfx_rep = 0; } } /* pick an instruction from the 1byte table */ } else { table = ITAB__1BYTE; } index = curr; search: e = & ud_itab_list[ table ][ index ]; /* if mnemonic constant is a standard instruction constant * our search is over. */ if ( e->mnemonic < UD_Id3vil ) { if ( e->mnemonic == UD_Iinvalid ) { if ( did_peek ) { inp_next( u ); if ( u->error ) return -1; } goto found_entry; } goto found_entry; } table = e->prefix; switch ( e->mnemonic ) { case UD_Igrp_reg: peek = inp_peek( u ); did_peek = 1; index = MODRM_REG( peek ); break; case UD_Igrp_mod: peek = inp_peek( u ); did_peek = 1; index = MODRM_MOD( peek ); if ( index == 3 ) index = ITAB__MOD_INDX__11; else index = ITAB__MOD_INDX__NOT_11; break; case UD_Igrp_rm: curr = inp_next( u ); did_peek = 0; if ( u->error ) return -1; index = MODRM_RM( curr ); break; case UD_Igrp_x87: curr = inp_next( u ); did_peek = 0; if ( u->error ) return -1; index = curr - 0xC0; break; case UD_Igrp_osize: if ( u->opr_mode == 64 ) index = ITAB__MODE_INDX__64; else if ( u->opr_mode == 32 ) index = ITAB__MODE_INDX__32; else index = ITAB__MODE_INDX__16; break; case UD_Igrp_asize: if ( u->adr_mode == 64 ) index = ITAB__MODE_INDX__64; else if ( u->adr_mode == 32 ) index = ITAB__MODE_INDX__32; else index = ITAB__MODE_INDX__16; break; case UD_Igrp_mode: if ( u->dis_mode == 64 ) index = ITAB__MODE_INDX__64; else if ( u->dis_mode == 32 ) index = ITAB__MODE_INDX__32; else index = ITAB__MODE_INDX__16; break; case UD_Igrp_vendor: if ( u->vendor == UD_VENDOR_INTEL ) index = ITAB__VENDOR_INDX__INTEL; else if ( u->vendor == UD_VENDOR_AMD ) index = ITAB__VENDOR_INDX__AMD; /* else */ /* assert( !"unrecognized vendor id" ); */ break; case UD_Id3vil: /* assert( !"invalid instruction mnemonic constant Id3vil" ); */ break; default: /* assert( !"invalid instruction mnemonic constant" ); */ break; } goto search; found_entry: u->itab_entry = e; u->mnemonic = u->itab_entry->mnemonic; return 0; }
/* * decode_prefixes * * Extracts instruction prefixes. */ static int decode_prefixes(struct ud *u) { unsigned int have_pfx = 1; unsigned int i; uint8_t curr; /* if in error state, bail out */ if ( u->error ) return -1; /* keep going as long as there are prefixes available */ for ( i = 0; have_pfx ; ++i ) { /* Get next byte. */ inp_next(u); if ( u->error ) return -1; curr = inp_curr( u ); /* rex prefixes in 64bit mode */ if ( u->dis_mode == 64 && ( curr & 0xF0 ) == 0x40 ) { u->pfx_rex = curr; } else { switch ( curr ) { case 0x2E : u->pfx_seg = UD_R_CS; u->pfx_rex = 0; break; case 0x36 : u->pfx_seg = UD_R_SS; u->pfx_rex = 0; break; case 0x3E : u->pfx_seg = UD_R_DS; u->pfx_rex = 0; break; case 0x26 : u->pfx_seg = UD_R_ES; u->pfx_rex = 0; break; case 0x64 : u->pfx_seg = UD_R_FS; u->pfx_rex = 0; break; case 0x65 : u->pfx_seg = UD_R_GS; u->pfx_rex = 0; break; case 0x67 : /* adress-size override prefix */ u->pfx_adr = 0x67; u->pfx_rex = 0; break; case 0xF0 : u->pfx_lock = 0xF0; u->pfx_rex = 0; break; case 0x66: /* the 0x66 sse prefix is only effective if no other sse prefix * has already been specified. */ if ( !u->pfx_insn ) u->pfx_insn = 0x66; u->pfx_opr = 0x66; u->pfx_rex = 0; break; case 0xF2: u->pfx_insn = 0xF2; u->pfx_repne = 0xF2; u->pfx_rex = 0; break; case 0xF3: u->pfx_insn = 0xF3; u->pfx_rep = 0xF3; u->pfx_repe = 0xF3; u->pfx_rex = 0; break; default : /* No more prefixes */ have_pfx = 0; break; } } /* check if we reached max instruction length */ if ( i + 1 == MAX_INSN_LENGTH ) { u->error = 1; break; } } /* return status */ if ( u->error ) return -1; /* rewind back one byte in stream, since the above loop * stops with a non-prefix byte. */ inp_back(u); return 0; }