BasicBlock * cpu_translate_singlestep(cpu_t *cpu, BasicBlock *bb_ret, BasicBlock *bb_trap) { addr_t new_pc; tag_t tag; BasicBlock *cur_bb = NULL, *bb_target = NULL, *bb_next = NULL, *bb_cont = NULL; addr_t next_pc, pc = cpu->f.get_pc(cpu, cpu->rf.grf); cur_bb = BasicBlock::Create(_CTX(), "instruction", cpu->dyncom_engine->cur_func, 0); if (LOGGING) disasm_instr(cpu, pc); cpu->f.tag_instr(cpu, pc, &tag, &new_pc, &next_pc); /* get target basic block */ if ((tag & TAG_RET) || (new_pc == NEW_PC_NONE)) /* translate_instr() will set PC */ bb_target = bb_ret; else if (tag & (TAG_CALL|TAG_BRANCH)) bb_target = create_singlestep_return_basicblock(cpu, new_pc, bb_ret); /* get not-taken & conditional basic block */ if (tag & TAG_CONDITIONAL) bb_next = create_singlestep_return_basicblock(cpu, next_pc, bb_ret); bb_cont = translate_instr(cpu, pc, next_pc, tag, bb_target, bb_trap, bb_next, bb_ret, cur_bb); /* If it's not a branch, append "store PC & return" to basic block */ if (bb_cont) emit_store_pc_return(cpu, bb_cont, next_pc, bb_ret); return cur_bb; }
static void arch_6502_trap(cpu_t *cpu, addr_t pc, BasicBlock *bb) { Value* v_pc = CONST16(pc); new StoreInst(v_pc, cpu->ptr_PC, bb); ReturnInst::Create(_CTX(), CONST32(JIT_RETURN_TRAP), bb); }
void arch_debug_me(cpu_t *cpu, BasicBlock *bb) { if (cpu->ptr_func_debug == NULL) return; Type const *intptr_type = cpu->exec_engine->getTargetData()->getIntPtrType(_CTX()); Constant *v_cpu = ConstantInt::get(intptr_type, (uintptr_t)cpu); Value *v_cpu_ptr = ConstantExpr::getIntToPtr(v_cpu, PointerType::getUnqual(intptr_type)); // XXX synchronize cpu context! CallInst::Create(cpu->ptr_func_debug, v_cpu_ptr, "", bb); }
static void syscall_func_init(cpu_t *cpu){ //types std::vector<const Type*> type_func_syscall_args; PointerType *type_intptr = PointerType::get(cpu->dyncom_engine->exec_engine->getTargetData()->getIntPtrType(_CTX()), 0); const IntegerType *type_i32 = IntegerType::get(_CTX(), 32); type_func_syscall_args.push_back(type_intptr); /* intptr *cpu */ type_func_syscall_args.push_back(type_i32); /* unsinged int */ FunctionType *type_func_syscall_callout = FunctionType::get( Type::getVoidTy(cpu->dyncom_engine->mod->getContext()), //return type_func_syscall_args, /* Params */ false); /* isVarArg */ Constant *syscall_const = cpu->dyncom_engine->mod->getOrInsertFunction("syscall_func", //function name type_func_syscall_callout); //return if(syscall_const == NULL) fprintf(stderr, "Error:cannot insert function:syscall.\n"); Function *syscall_func = cast<Function>(syscall_const); syscall_func->setCallingConv(CallingConv::C); cpu->dyncom_engine->ptr_arch_func[1] = syscall_func; }
cpu_t * cpu_new(cpu_arch_t arch, uint32_t flags, uint32_t arch_flags) { cpu_t *cpu; llvm::InitializeNativeTarget(); cpu = new cpu_t; assert(cpu != NULL); memset(&cpu->info, 0, sizeof(cpu->info)); memset(&cpu->rf, 0, sizeof(cpu->rf)); cpu->info.type = arch; cpu->info.name = "noname"; cpu->info.common_flags = flags; cpu->info.arch_flags = arch_flags; switch (arch) { case CPU_ARCH_6502: cpu->f = arch_func_6502; break; case CPU_ARCH_M68K: cpu->f = arch_func_m68k; break; case CPU_ARCH_MIPS: cpu->f = arch_func_mips; break; case CPU_ARCH_M88K: cpu->f = arch_func_m88k; break; case CPU_ARCH_ARM: cpu->f = arch_func_arm; break; case CPU_ARCH_8086: cpu->f = arch_func_8086; break; case CPU_ARCH_FAPRA: cpu->f = arch_func_fapra; break; default: printf("illegal arch: %d\n", arch); exit(1); } cpu->code_start = 0; cpu->code_end = 0; cpu->code_entry = 0; cpu->tag = NULL; uint32_t i; for (i = 0; i < sizeof(cpu->func)/sizeof(*cpu->func); i++) cpu->func[i] = NULL; for (i = 0; i < sizeof(cpu->fp)/sizeof(*cpu->fp); i++) cpu->fp[i] = NULL; cpu->functions = 0; cpu->flags_codegen = CPU_CODEGEN_OPTIMIZE; cpu->flags_debug = CPU_DEBUG_NONE; cpu->flags_hint = CPU_HINT_NONE; cpu->flags = 0; // init the frontend cpu->f.init(cpu, &cpu->info, &cpu->rf); assert(is_valid_gpr_size(cpu->info.register_size[CPU_REG_GPR]) && "the specified GPR size is not guaranteed to work"); assert(is_valid_fpr_size(cpu->info.register_size[CPU_REG_FPR]) && "the specified FPR size is not guaranteed to work"); assert(is_valid_vr_size(cpu->info.register_size[CPU_REG_VR]) && "the specified VR size is not guaranteed to work"); assert(is_valid_gpr_size(cpu->info.register_size[CPU_REG_XR]) && "the specified XR size is not guaranteed to work"); uint32_t count = cpu->info.register_count[CPU_REG_GPR]; if (count != 0) { cpu->ptr_gpr = (Value **)calloc(count, sizeof(Value *)); cpu->in_ptr_gpr = (Value **)calloc(count, sizeof(Value *)); } else { cpu->ptr_gpr = NULL; cpu->in_ptr_gpr = NULL; } count = cpu->info.register_count[CPU_REG_XR]; if (count != 0) { cpu->ptr_xr = (Value **)calloc(count, sizeof(Value *)); cpu->in_ptr_xr = (Value **)calloc(count, sizeof(Value *)); } else { cpu->ptr_xr = NULL; cpu->in_ptr_xr = NULL; } count = cpu->info.register_count[CPU_REG_FPR]; if (count != 0) { cpu->ptr_fpr = (Value **)calloc(count, sizeof(Value *)); cpu->in_ptr_fpr = (Value **)calloc(count, sizeof(Value *)); } else { cpu->ptr_fpr = NULL; cpu->in_ptr_fpr = NULL; } if (cpu->info.psr_size != 0) { cpu->ptr_FLAG = (Value **)calloc(cpu->info.flags_count, sizeof(Value*)); assert(cpu->ptr_FLAG != NULL); } // init LLVM cpu->mod = new Module(cpu->info.name, _CTX()); assert(cpu->mod != NULL); cpu->exec_engine = ExecutionEngine::create(cpu->mod); assert(cpu->exec_engine != NULL); // check if FP80 and FP128 are supported by this architecture. // XXX there is a better way to do this? std::string data_layout = cpu->exec_engine->getDataLayout()->getStringRepresentation(); if (data_layout.find("f80") != std::string::npos) { LOG("INFO: FP80 supported.\n"); cpu->flags |= CPU_FLAG_FP80; } if (data_layout.find("f128") != std::string::npos) { LOG("INFO: FP128 supported.\n"); cpu->flags |= CPU_FLAG_FP128; } // check if we need to swap guest memory. if (cpu->exec_engine->getDataLayout()->isLittleEndian() ^ IS_LITTLE_ENDIAN(cpu)) cpu->flags |= CPU_FLAG_SWAPMEM; cpu->timer_total[TIMER_TAG] = 0; cpu->timer_total[TIMER_FE] = 0; cpu->timer_total[TIMER_BE] = 0; cpu->timer_total[TIMER_RUN] = 0; return cpu; }
BasicBlock * cpu_translate_all(cpu_t *cpu, BasicBlock *bb_ret, BasicBlock *bb_trap) { // find all instructions that need labels and create basic blocks for them int bbs = 0; addr_t pc; pc = cpu->code_start; while (pc < cpu->code_end) { // Do not create the basic block if it is already present in some other function. if (is_start_of_basicblock(cpu, pc) && !(get_tag(cpu, pc) & TAG_TRANSLATED)) { create_basicblock(cpu, pc, cpu->cur_func, BB_TYPE_NORMAL); bbs++; } pc++; } LOG("bbs: %d\n", bbs); // create dispatch basicblock BasicBlock* bb_dispatch = BasicBlock::Create(_CTX(), "dispatch", cpu->cur_func, 0); Value *v_pc = new LoadInst(cpu->ptr_PC, "", false, bb_dispatch); SwitchInst* sw = SwitchInst::Create(v_pc, bb_ret, bbs, bb_dispatch); // translate basic blocks bbaddr_map &bb_addr = cpu->func_bb[cpu->cur_func]; bbaddr_map::const_iterator it; for (it = bb_addr.begin(); it != bb_addr.end(); it++) { pc = it->first; BasicBlock *cur_bb = it->second; tag_t tag; BasicBlock *bb_target = NULL, *bb_next = NULL, *bb_cont = NULL; // Tag the function as translated. or_tag(cpu, pc, TAG_TRANSLATED); LOG("basicblock: L%08llx\n", (unsigned long long)pc); // Add dispatch switch case for basic block. ConstantInt* c = ConstantInt::get(getIntegerType(cpu->info.address_size), pc); sw->addCase(c, cur_bb); do { tag_t dummy1; if (LOGGING) disasm_instr(cpu, pc); tag = get_tag(cpu, pc); /* get address of the following instruction */ addr_t new_pc, next_pc; cpu->f.tag_instr(cpu, pc, &dummy1, &new_pc, &next_pc); /* get target basic block */ if (tag & TAG_RET) bb_target = bb_dispatch; if (tag & (TAG_CALL|TAG_BRANCH)) { if (new_pc == NEW_PC_NONE) /* translate_instr() will set PC */ bb_target = bb_dispatch; else bb_target = (BasicBlock*)lookup_basicblock(cpu, cpu->cur_func, new_pc, bb_ret, BB_TYPE_NORMAL); } /* get not-taken basic block */ if (tag & TAG_CONDITIONAL) bb_next = (BasicBlock*)lookup_basicblock(cpu, cpu->cur_func, next_pc, bb_ret, BB_TYPE_NORMAL); bb_cont = translate_instr(cpu, pc, tag, bb_target, bb_trap, bb_next, cur_bb); pc = next_pc; } while ( /* new basic block starts here (and we haven't translated it yet)*/ (!is_start_of_basicblock(cpu, pc)) && /* end of code section */ //XXX no: this is whether it's TAG_CODE is_code(cpu, pc) && /* last intruction jumped away */ bb_cont ); /* link with next basic block if there isn't a control flow instr. already */ if (bb_cont) { BasicBlock *target = (BasicBlock*)lookup_basicblock(cpu, cpu->cur_func, pc, bb_ret, BB_TYPE_NORMAL); LOG("info: linking continue $%04llx!\n", (unsigned long long)pc); BranchInst::Create(target, bb_cont); } } return bb_dispatch; }
/** * @brief Create a new CPU core structure and initialize the llmv Module,ExectionEngine * * @param arch the architecture type of CPU core * @param flags some flags,such as floating point,little/big endian * @param arch_flags target machine bits * * @return pointer of CPU core structure */ cpu_t * cpu_new(uint32_t flags, uint32_t arch_flags, arch_func_t arch_func) { cpu_t *cpu; llvm::InitializeNativeTarget(); cpu = new cpu_t; assert(cpu != NULL); memset(&cpu->info, 0, sizeof(cpu->info)); memset(&cpu->rf, 0, sizeof(cpu->rf)); cpu->info.name = "noname"; cpu->info.common_flags = flags; cpu->info.arch_flags = arch_flags; cpu->f = arch_func; cpu->icounter = 0; cpu->dyncom_engine = new dyncom_engine_t; cpu->dyncom_engine->code_start = 0; cpu->dyncom_engine->code_end = 0; cpu->dyncom_engine->code_entry = 0; cpu->dyncom_engine->tag = NULL; /* init hash fast map */ #ifdef HASH_FAST_MAP cpu->dyncom_engine->fmap = (fast_map)malloc(sizeof(void*) * HASH_FAST_MAP_SIZE); memset(cpu->dyncom_engine->fmap, 0, sizeof(addr_t) * HASH_FAST_MAP_SIZE); #endif uint32_t i; for (i = 0; i < 4; i++) { cpu->dyncom_engine->tag_array[i] = NULL; cpu->dyncom_engine->code_size[i] = 0; } cpu->dyncom_engine->tag_table = (tag_t ***)malloc(TAG_LEVEL1_TABLE_SIZE * sizeof(tag_t **)); memset(cpu->dyncom_engine->tag_table, 0, TAG_LEVEL1_TABLE_SIZE * sizeof(tag_t **)); for (i = 0; i < sizeof(cpu->dyncom_engine->func)/sizeof(*cpu->dyncom_engine->func); i++) cpu->dyncom_engine->func[i] = NULL; for (i = 0; i < sizeof(cpu->dyncom_engine->fp)/sizeof(*cpu->dyncom_engine->fp); i++) cpu->dyncom_engine->fp[i] = NULL; cpu->dyncom_engine->functions = 0; cpu->dyncom_engine->flags_codegen = CPU_CODEGEN_OPTIMIZE; cpu->dyncom_engine->flags_debug = CPU_DEBUG_NONE; cpu->dyncom_engine->flags_hint = CPU_HINT_NONE; cpu->dyncom_engine->flags = 0; // init the frontend cpu->f.init(cpu, &cpu->info, &cpu->rf); assert(is_valid_gpr_size(cpu->info.register_size[CPU_REG_GPR]) && "the specified GPR size is not guaranteed to work"); assert(is_valid_fpr_size(cpu->info.register_size[CPU_REG_FPR]) && "the specified FPR size is not guaranteed to work"); assert(is_valid_vr_size(cpu->info.register_size[CPU_REG_VR]) && "the specified VR size is not guaranteed to work"); assert(is_valid_gpr_size(cpu->info.register_size[CPU_REG_XR]) && "the specified XR size is not guaranteed to work"); uint32_t count = cpu->info.register_count[CPU_REG_GPR]; if (count != 0) { cpu->ptr_gpr = (Value **)calloc(count, sizeof(Value *)); cpu->in_ptr_gpr = (Value **)calloc(count, sizeof(Value *)); } else { cpu->ptr_gpr = NULL; cpu->in_ptr_gpr = NULL; } count = cpu->info.register_count[CPU_REG_XR]; if (count != 0) { cpu->ptr_xr = (Value **)calloc(count, sizeof(Value *)); cpu->in_ptr_xr = (Value **)calloc(count, sizeof(Value *)); } else { cpu->ptr_xr = NULL; cpu->in_ptr_xr = NULL; } count = cpu->info.register_count[CPU_REG_SPR]; if (count != 0) { cpu->ptr_spr = (Value **)calloc(count, sizeof(Value *)); cpu->in_ptr_spr = (Value **)calloc(count, sizeof(Value *)); } else { cpu->ptr_spr = NULL; cpu->in_ptr_spr = NULL; } count = cpu->info.register_count[CPU_REG_FPR]; if (count != 0) { cpu->ptr_fpr = (Value **)calloc(count, sizeof(Value *)); cpu->in_ptr_fpr = (Value **)calloc(count, sizeof(Value *)); } else { cpu->ptr_fpr = NULL; cpu->in_ptr_fpr = NULL; } if (cpu->info.psr_size != 0) { cpu->ptr_FLAG = (Value **)calloc(cpu->info.flags_count, sizeof(Value*)); assert(cpu->ptr_FLAG != NULL); } // init LLVM cpu->dyncom_engine->mod = new Module(cpu->info.name, _CTX()); assert(cpu->dyncom_engine->mod != NULL); cpu->dyncom_engine->exec_engine = ExecutionEngine::create(cpu->dyncom_engine->mod); assert(cpu->dyncom_engine->exec_engine != NULL); // check if FP80 and FP128 are supported by this architecture. // XXX there is a better way to do this? std::string data_layout = cpu->dyncom_engine->exec_engine->getTargetData()->getStringRepresentation(); if (data_layout.find("f80") != std::string::npos) { LOG("INFO: FP80 supported.\n"); cpu->dyncom_engine->flags |= CPU_FLAG_FP80; } if (data_layout.find("f128") != std::string::npos) { LOG("INFO: FP128 supported.\n"); cpu->dyncom_engine->flags |= CPU_FLAG_FP128; } // check if we need to swap guest memory. if (cpu->dyncom_engine->exec_engine->getTargetData()->isLittleEndian() ^ IS_LITTLE_ENDIAN(cpu)) cpu->dyncom_engine->flags |= CPU_FLAG_SWAPMEM; cpu->timer_total[TIMER_TAG] = 0; cpu->timer_total[TIMER_FE] = 0; cpu->timer_total[TIMER_BE] = 0; cpu->timer_total[TIMER_RUN] = 0; cpu->timer_total[TIMER_OPT] = 0; debug_func_init(cpu); syscall_func_init(cpu); return cpu; }
/* * init the global functions. * By default the first callout function is debug function */ static void debug_func_init(cpu_t *cpu){ //types std::vector<const Type*> type_func_debug_args; PointerType *type_intptr = PointerType::get(cpu->dyncom_engine->exec_engine->getTargetData()->getIntPtrType(_CTX()), 0); type_func_debug_args.push_back(type_intptr); /* intptr *cpu */ FunctionType *type_func_debug_callout = FunctionType::get( Type::getVoidTy(cpu->dyncom_engine->mod->getContext()), //return type_func_debug_args, /* Params */ false); /* isVarArg */ Constant *debug_const = cpu->dyncom_engine->mod->getOrInsertFunction("debug_output", //function name type_func_debug_callout); //return if(debug_const == NULL) fprintf(stderr, "Error:cannot insert function:debug.\n"); Function *debug_func = cast<Function>(debug_const); debug_func->setCallingConv(CallingConv::C); cpu->dyncom_engine->ptr_arch_func[0] = debug_func; }