/* return the branch type of the (branch) inst */ uint instr_branch_type(instr_t *cti_instr) { instr_get_opcode(cti_instr); /* ensure opcode is valid */ if (instr_get_opcode(cti_instr) == OP_blx) { /* To handle the mode switch we go through the ibl. * FIXME i#1551: once we have far linking through stubs we should * remove this and have a faster link through the stub. */ return LINK_INDIRECT|LINK_CALL; } /* We treate a predicated call as a cbr, not a call */ else if (instr_is_cbr_arch(cti_instr) || instr_is_ubr_arch(cti_instr)) return LINK_DIRECT|LINK_JMP; else if (instr_is_call_direct(cti_instr)) return LINK_DIRECT|LINK_CALL; else if (instr_is_call_indirect(cti_instr)) return LINK_INDIRECT|LINK_CALL; else if (instr_is_return(cti_instr)) return LINK_INDIRECT|LINK_RETURN; else if (instr_is_mbr_arch(cti_instr)) return LINK_INDIRECT|LINK_JMP; else CLIENT_ASSERT(false, "instr_branch_type: unknown opcode"); return LINK_INDIRECT; }
/* Here we attempt to combine a loop involving ldex (load exclusive) and * stex (store exclusive) into an OP_ldstex macro-instruction. The algorithm * is roughly this: * * Decode up to (2 * N) instructions while: * - none of them are indirect branches or system calls * - none of them is a direct branch out of these (2 * N) instructions * - none of them is OP_xx (to be safe) * - there is, or might yet be, both ldex and stex in the first N * - none of them is a non-branch PC-relative instruction: ADR, ADRP, * PC-relative PRFM, literal load (this last condition could be removed * if we mangled such instructions as we encountered them) * * To save time, give up if the first instruction is neither ldex nor stex * and there is no branch to it. * Take a sub-block containing both ldex and stex from the first N instructions. * Expand this sub-block to a minimal single-entry single-exit block. * Give up if the sub-block grows beyond N instructions. * Finally, give up if the sub-block does not contain the first instruction. * Also give up if the sub-block uses all of X0-X5 and the stolen register * because we would be unable to mangle such a block. * * XXX: This function uses a lot of CPU time. It could be made faster in * several ways, for example by caching decoded instructions or using a * custom decoder to recognise the particular instructions that we care * about here. */ byte * decode_ldstex(dcontext_t *dcontext, byte *pc_, byte *orig_pc_, instr_t *instr_ldstex) { # define N (MAX_INSTR_LENGTH / AARCH64_INSTR_SIZE) instr_t ibuf[2 * N]; uint *pc = (uint *)pc_; uint *orig_pc = (uint *)orig_pc_; bool seen_ldex = false; bool seen_stex = false; bool seen_branch_to_start = false; bool failed = false; int ldstex_beg = -1; int ldstex_end = -1; int i, len; /* Decode up to 2 * N instructions. */ for (i = 0; i < N; i++) { instr_t *instr = &ibuf[i]; instr_init(dcontext, instr); decode_from_copy(dcontext, (byte *)(pc + i), (byte *)(orig_pc + i), instr); if (instr_is_mbr_arch(instr) || instr_is_syscall(instr) || instr_get_opcode(instr) == OP_xx || instr_is_nonbranch_pcrel(instr)) break; if (instr_is_ubr_arch(instr) || instr_is_cbr_arch(instr)) { ptr_uint_t target = (ptr_uint_t)instr_get_branch_target_pc(instr); if (target < (ptr_uint_t)pc || target > (ptr_uint_t)(pc + 2 * N)) break; if (target == (ptr_uint_t)pc) seen_branch_to_start = true; } if (instr_is_exclusive_load(instr)) seen_ldex = true; if (instr_is_exclusive_store(instr)) seen_stex = true; if (i + 1 >= N && !(seen_ldex && seen_stex)) break; if (ldstex_beg == -1 && (seen_ldex || seen_stex)) ldstex_beg = i; if (ldstex_end == -1 && (seen_ldex && seen_stex)) ldstex_end = i + 1; } if (i < N) { instr_reset(dcontext, &ibuf[i]); len = i; } else len = N; /* Quick check for hopeless situations. */ if (len == 0 || !(seen_ldex && seen_stex) || !(seen_branch_to_start || (instr_is_exclusive_load(&ibuf[0]) || instr_is_exclusive_store(&ibuf[0])))) { for (i = 0; i < len; i++) instr_reset(dcontext, &ibuf[i]); return NULL; } /* There are several ways we could choose a sub-block containing both ldex * and stex from the first N instructions. Investigate further, perhaps. * We have already set ldstex_beg and ldstex_end. */ ASSERT(ldstex_beg != -1 && ldstex_end != -1 && ldstex_beg < ldstex_end); /* Expand ldstex sub-block until it is a single-entry single-exit block. */ for (;;) { int new_beg = ldstex_beg; int new_end = ldstex_end; for (i = ldstex_beg; i < ldstex_end; i++) { instr_t *instr = &ibuf[i]; if (instr_is_ubr_arch(instr) || instr_is_cbr_arch(instr)) { int target = (uint *)instr_get_branch_target_pc(instr) - pc; if (target > len) { failed = true; break; } if (target < new_beg) new_beg = target; if (target > new_end) new_end = target; } } if (new_beg == ldstex_beg && new_end == ldstex_end) break; ldstex_beg = new_beg; ldstex_end = new_end; } if (ldstex_beg != 0) failed = true; if (!failed) { /* Check whether the sub-block uses the stolen register and all of X0-X5. * If it does, it would be impossible to mangle it so it is better not to * create an OP_ldstex. */ reg_id_t regs[] = { dr_reg_stolen, DR_REG_X0, DR_REG_X1, DR_REG_X2, DR_REG_X3, DR_REG_X4, DR_REG_X5 }; int r; for (r = 0; r < sizeof(regs) / sizeof(*regs); r++) { for (i = ldstex_beg; i < ldstex_end; i++) { if (instr_uses_reg(&ibuf[i], regs[r])) break; } if (i >= ldstex_end) break; } if (r >= sizeof(regs) / sizeof(*regs)) failed = true; } if (!failed) { instr_create_ldstex(dcontext, ldstex_end - ldstex_beg, pc + ldstex_beg, &ibuf[ldstex_beg], instr_ldstex); } for (i = 0; i < len; i++) instr_reset(dcontext, &ibuf[i]); return failed ? NULL : (byte *)(pc + ldstex_end); }