/* * Disassembles the insn at @pc and sets @next_pc to next PC (which could be * @pc +2/4/6 (ARCompact ISA allows free intermixing of 16/32 bit insns). * * If @pc is a branch * -@tgt_if_br is set to branch target. * -If branch has delay slot, @next_pc updated with actual next PC. */ int __kprobes disasm_next_pc(unsigned long pc, struct pt_regs *regs, struct callee_regs *cregs, unsigned long *next_pc, unsigned long *tgt_if_br) { struct disasm_state instr; memset(&instr, 0, sizeof(struct disasm_state)); disasm_instr(pc, &instr, 0, regs, cregs); *next_pc = pc + instr.instr_len; /* Instruction with possible two targets branch, jump and loop */ if (instr.is_branch) *tgt_if_br = instr.target; /* For the instructions with delay slots, the fall through is the * instruction following the instruction in delay slot. */ if (instr.delay_slot) { struct disasm_state instr_d; disasm_instr(*next_pc, &instr_d, 0, regs, cregs); *next_pc += instr_d.instr_len; } /* Zero Overhead Loop - end of the loop */ if (!(regs->status32 & STATUS32_L) && (*next_pc == regs->lp_end) && (regs->lp_count > 1)) { *next_pc = regs->lp_start; } return instr.is_branch; }
BasicBlock * cpu_translate_singlestep(cpu_t *cpu, BasicBlock *bb_ret, BasicBlock *bb_trap) { addr_t new_pc; tag_t tag; BasicBlock *cur_bb = NULL, *bb_target = NULL, *bb_next = NULL, *bb_cont = NULL; addr_t next_pc, pc = cpu->f.get_pc(cpu, cpu->rf.grf); cur_bb = BasicBlock::Create(_CTX(), "instruction", cpu->dyncom_engine->cur_func, 0); if (LOGGING) disasm_instr(cpu, pc); cpu->f.tag_instr(cpu, pc, &tag, &new_pc, &next_pc); /* get target basic block */ if ((tag & TAG_RET) || (new_pc == NEW_PC_NONE)) /* translate_instr() will set PC */ bb_target = bb_ret; else if (tag & (TAG_CALL|TAG_BRANCH)) bb_target = create_singlestep_return_basicblock(cpu, new_pc, bb_ret); /* get not-taken & conditional basic block */ if (tag & TAG_CONDITIONAL) bb_next = create_singlestep_return_basicblock(cpu, next_pc, bb_ret); bb_cont = translate_instr(cpu, pc, next_pc, tag, bb_target, bb_trap, bb_next, bb_ret, cur_bb); /* If it's not a branch, append "store PC & return" to basic block */ if (bb_cont) emit_store_pc_return(cpu, bb_cont, next_pc, bb_ret); return cur_bb; }
/* * Handle an unaligned access * Returns 0 if successfully handled, 1 if some error happened */ int misaligned_fixup(unsigned long address, struct pt_regs *regs, struct callee_regs *cregs) { struct disasm_state state; char buf[TASK_COMM_LEN]; /* handle user mode only and only if enabled by sysadmin */ if (!user_mode(regs) || !unaligned_enabled) return 1; if (no_unaligned_warning) { pr_warn_once("%s(%d) made unaligned access which was emulated" " by kernel assist\n. This can degrade application" " performance significantly\n. To enable further" " logging of such instances, please \n" " echo 0 > /proc/sys/kernel/ignore-unaligned-usertrap\n", get_task_comm(buf, current), task_pid_nr(current)); } else { /* Add rate limiting if it gets down to it */ pr_warn("%s(%d): unaligned access to/from 0x%lx by PC: 0x%lx\n", get_task_comm(buf, current), task_pid_nr(current), address, regs->ret); } disasm_instr(regs->ret, &state, 1, regs, cregs); if (state.fault) goto fault; /* ldb/stb should not have unaligned exception */ if ((state.zz == 1) || (state.di)) goto fault; if (!state.write) fixup_load(&state, regs, cregs); else fixup_store(&state, regs, cregs); if (state.fault) goto fault; if (delay_mode(regs)) { regs->ret = regs->bta; regs->status32 &= ~STATUS_DE_MASK; } else { regs->ret += state.instr_len; } return 0; fault: pr_err("Alignment trap: fault in fix-up %08lx at [<%08lx>]\n", state.words[0], address); return 1; }
void disasm(FILE* f, symbol_table_t* symtab, void* mem, size_t max_addr) { unsigned_t addr = 0; while(addr < max_addr) { symbol_t* sym; if ((sym = symbol_table_find_label(symtab, addr)) != NULL) fprintf(f, "%s:\n", sym->name); addr = disasm_instr(f, symtab, addr, mem); } }
/* * disassemble a code stream */ void CTcT3Unasm::disasm(CTcUnasSrc *src, CTcUnasOut *out) { /* keep going until we run out of source material */ for (;;) { char ch; /* get the next byte; stop if we've reached the end of the source */ if (src->next_byte(&ch)) break; /* disassemble this instruction */ disasm_instr(src, out, ch); } }
BasicBlock * cpu_translate_singlestep_bb(cpu_t *cpu, BasicBlock *bb_ret, BasicBlock *bb_trap) { addr_t entry = cpu->f.get_pc(cpu, cpu->rf.grf); addr_t pc = entry; BasicBlock *cur_bb = create_basicblock(cpu, pc, cpu->cur_func, BB_TYPE_NORMAL); tag_t tag; BasicBlock *bb_target = NULL, *bb_next = NULL, *bb_cont = NULL; do { //printf("%s:%d\n", __func__, __LINE__); addr_t new_pc, next_pc; if (LOGGING) disasm_instr(cpu, pc); cpu->f.tag_instr(cpu, pc, &tag, &new_pc, &next_pc); /* get target basic block */ if (tag & TAG_RET) bb_target = bb_ret; if (tag & (TAG_CALL|TAG_BRANCH)) { if (new_pc == NEW_PC_NONE) { /* translate_instr() will set PC */ bb_target = bb_ret; } else { if (new_pc == entry) /* tight loop */ bb_target = cur_bb; else bb_target = create_singlestep_return_basicblock(cpu, new_pc, bb_ret); } } /* get not-taken basic block */ if (tag & TAG_CONDITIONAL) bb_next = create_singlestep_return_basicblock(cpu, next_pc, bb_ret); bb_cont = translate_instr(cpu, pc, tag, bb_target, bb_trap, bb_next, cur_bb); pc = next_pc; } while (is_inside_code_area(cpu, pc) && /* end of code section */ bb_cont); /* last intruction jumped away */ return cur_bb; }
BasicBlock * cpu_translate_all(cpu_t *cpu, BasicBlock *bb_ret, BasicBlock *bb_trap) { // find all instructions that need labels and create basic blocks for them int bbs = 0; addr_t pc; pc = cpu->code_start; while (pc < cpu->code_end) { // Do not create the basic block if it is already present in some other function. if (is_start_of_basicblock(cpu, pc) && !(get_tag(cpu, pc) & TAG_TRANSLATED)) { create_basicblock(cpu, pc, cpu->cur_func, BB_TYPE_NORMAL); bbs++; } pc++; } LOG("bbs: %d\n", bbs); // create dispatch basicblock BasicBlock* bb_dispatch = BasicBlock::Create(_CTX(), "dispatch", cpu->cur_func, 0); Value *v_pc = new LoadInst(cpu->ptr_PC, "", false, bb_dispatch); SwitchInst* sw = SwitchInst::Create(v_pc, bb_ret, bbs, bb_dispatch); // translate basic blocks bbaddr_map &bb_addr = cpu->func_bb[cpu->cur_func]; bbaddr_map::const_iterator it; for (it = bb_addr.begin(); it != bb_addr.end(); it++) { pc = it->first; BasicBlock *cur_bb = it->second; tag_t tag; BasicBlock *bb_target = NULL, *bb_next = NULL, *bb_cont = NULL; // Tag the function as translated. or_tag(cpu, pc, TAG_TRANSLATED); LOG("basicblock: L%08llx\n", (unsigned long long)pc); // Add dispatch switch case for basic block. ConstantInt* c = ConstantInt::get(getIntegerType(cpu->info.address_size), pc); sw->addCase(c, cur_bb); do { tag_t dummy1; if (LOGGING) disasm_instr(cpu, pc); tag = get_tag(cpu, pc); /* get address of the following instruction */ addr_t new_pc, next_pc; cpu->f.tag_instr(cpu, pc, &dummy1, &new_pc, &next_pc); /* get target basic block */ if (tag & TAG_RET) bb_target = bb_dispatch; if (tag & (TAG_CALL|TAG_BRANCH)) { if (new_pc == NEW_PC_NONE) /* translate_instr() will set PC */ bb_target = bb_dispatch; else bb_target = (BasicBlock*)lookup_basicblock(cpu, cpu->cur_func, new_pc, bb_ret, BB_TYPE_NORMAL); } /* get not-taken basic block */ if (tag & TAG_CONDITIONAL) bb_next = (BasicBlock*)lookup_basicblock(cpu, cpu->cur_func, next_pc, bb_ret, BB_TYPE_NORMAL); bb_cont = translate_instr(cpu, pc, tag, bb_target, bb_trap, bb_next, cur_bb); pc = next_pc; } while ( /* new basic block starts here (and we haven't translated it yet)*/ (!is_start_of_basicblock(cpu, pc)) && /* end of code section */ //XXX no: this is whether it's TAG_CODE is_code(cpu, pc) && /* last intruction jumped away */ bb_cont ); /* link with next basic block if there isn't a control flow instr. already */ if (bb_cont) { BasicBlock *target = (BasicBlock*)lookup_basicblock(cpu, cpu->cur_func, pc, bb_ret, BB_TYPE_NORMAL); LOG("info: linking continue $%04llx!\n", (unsigned long long)pc); BranchInst::Create(target, bb_cont); } } return bb_dispatch; }