cpu_t * cpu_new(cpu_arch_t arch, uint32_t flags, uint32_t arch_flags) { cpu_t *cpu; llvm::InitializeNativeTarget(); cpu = new cpu_t; assert(cpu != NULL); memset(&cpu->info, 0, sizeof(cpu->info)); memset(&cpu->rf, 0, sizeof(cpu->rf)); cpu->info.type = arch; cpu->info.name = "noname"; cpu->info.common_flags = flags; cpu->info.arch_flags = arch_flags; switch (arch) { case CPU_ARCH_6502: cpu->f = arch_func_6502; break; case CPU_ARCH_M68K: cpu->f = arch_func_m68k; break; case CPU_ARCH_MIPS: cpu->f = arch_func_mips; break; case CPU_ARCH_M88K: cpu->f = arch_func_m88k; break; case CPU_ARCH_ARM: cpu->f = arch_func_arm; break; case CPU_ARCH_8086: cpu->f = arch_func_8086; break; case CPU_ARCH_FAPRA: cpu->f = arch_func_fapra; break; default: printf("illegal arch: %d\n", arch); exit(1); } cpu->code_start = 0; cpu->code_end = 0; cpu->code_entry = 0; cpu->tag = NULL; uint32_t i; for (i = 0; i < sizeof(cpu->func)/sizeof(*cpu->func); i++) cpu->func[i] = NULL; for (i = 0; i < sizeof(cpu->fp)/sizeof(*cpu->fp); i++) cpu->fp[i] = NULL; cpu->functions = 0; cpu->flags_codegen = CPU_CODEGEN_OPTIMIZE; cpu->flags_debug = CPU_DEBUG_NONE; cpu->flags_hint = CPU_HINT_NONE; cpu->flags = 0; // init the frontend cpu->f.init(cpu, &cpu->info, &cpu->rf); assert(is_valid_gpr_size(cpu->info.register_size[CPU_REG_GPR]) && "the specified GPR size is not guaranteed to work"); assert(is_valid_fpr_size(cpu->info.register_size[CPU_REG_FPR]) && "the specified FPR size is not guaranteed to work"); assert(is_valid_vr_size(cpu->info.register_size[CPU_REG_VR]) && "the specified VR size is not guaranteed to work"); assert(is_valid_gpr_size(cpu->info.register_size[CPU_REG_XR]) && "the specified XR size is not guaranteed to work"); uint32_t count = cpu->info.register_count[CPU_REG_GPR]; if (count != 0) { cpu->ptr_gpr = (Value **)calloc(count, sizeof(Value *)); cpu->in_ptr_gpr = (Value **)calloc(count, sizeof(Value *)); } else { cpu->ptr_gpr = NULL; cpu->in_ptr_gpr = NULL; } count = cpu->info.register_count[CPU_REG_XR]; if (count != 0) { cpu->ptr_xr = (Value **)calloc(count, sizeof(Value *)); cpu->in_ptr_xr = (Value **)calloc(count, sizeof(Value *)); } else { cpu->ptr_xr = NULL; cpu->in_ptr_xr = NULL; } count = cpu->info.register_count[CPU_REG_FPR]; if (count != 0) { cpu->ptr_fpr = (Value **)calloc(count, sizeof(Value *)); cpu->in_ptr_fpr = (Value **)calloc(count, sizeof(Value *)); } else { cpu->ptr_fpr = NULL; cpu->in_ptr_fpr = NULL; } if (cpu->info.psr_size != 0) { cpu->ptr_FLAG = (Value **)calloc(cpu->info.flags_count, sizeof(Value*)); assert(cpu->ptr_FLAG != NULL); } // init LLVM cpu->mod = new Module(cpu->info.name, _CTX()); assert(cpu->mod != NULL); cpu->exec_engine = ExecutionEngine::create(cpu->mod); assert(cpu->exec_engine != NULL); // check if FP80 and FP128 are supported by this architecture. // XXX there is a better way to do this? std::string data_layout = cpu->exec_engine->getDataLayout()->getStringRepresentation(); if (data_layout.find("f80") != std::string::npos) { LOG("INFO: FP80 supported.\n"); cpu->flags |= CPU_FLAG_FP80; } if (data_layout.find("f128") != std::string::npos) { LOG("INFO: FP128 supported.\n"); cpu->flags |= CPU_FLAG_FP128; } // check if we need to swap guest memory. if (cpu->exec_engine->getDataLayout()->isLittleEndian() ^ IS_LITTLE_ENDIAN(cpu)) cpu->flags |= CPU_FLAG_SWAPMEM; cpu->timer_total[TIMER_TAG] = 0; cpu->timer_total[TIMER_FE] = 0; cpu->timer_total[TIMER_BE] = 0; cpu->timer_total[TIMER_RUN] = 0; return cpu; }
/** * @brief Create a new CPU core structure and initialize the llmv Module,ExectionEngine * * @param arch the architecture type of CPU core * @param flags some flags,such as floating point,little/big endian * @param arch_flags target machine bits * * @return pointer of CPU core structure */ cpu_t * cpu_new(uint32_t flags, uint32_t arch_flags, arch_func_t arch_func) { cpu_t *cpu; llvm::InitializeNativeTarget(); cpu = new cpu_t; assert(cpu != NULL); memset(&cpu->info, 0, sizeof(cpu->info)); memset(&cpu->rf, 0, sizeof(cpu->rf)); cpu->info.name = "noname"; cpu->info.common_flags = flags; cpu->info.arch_flags = arch_flags; cpu->f = arch_func; cpu->icounter = 0; cpu->dyncom_engine = new dyncom_engine_t; cpu->dyncom_engine->code_start = 0; cpu->dyncom_engine->code_end = 0; cpu->dyncom_engine->code_entry = 0; cpu->dyncom_engine->tag = NULL; /* init hash fast map */ #ifdef HASH_FAST_MAP cpu->dyncom_engine->fmap = (fast_map)malloc(sizeof(void*) * HASH_FAST_MAP_SIZE); memset(cpu->dyncom_engine->fmap, 0, sizeof(addr_t) * HASH_FAST_MAP_SIZE); #endif uint32_t i; for (i = 0; i < 4; i++) { cpu->dyncom_engine->tag_array[i] = NULL; cpu->dyncom_engine->code_size[i] = 0; } cpu->dyncom_engine->tag_table = (tag_t ***)malloc(TAG_LEVEL1_TABLE_SIZE * sizeof(tag_t **)); memset(cpu->dyncom_engine->tag_table, 0, TAG_LEVEL1_TABLE_SIZE * sizeof(tag_t **)); for (i = 0; i < sizeof(cpu->dyncom_engine->func)/sizeof(*cpu->dyncom_engine->func); i++) cpu->dyncom_engine->func[i] = NULL; for (i = 0; i < sizeof(cpu->dyncom_engine->fp)/sizeof(*cpu->dyncom_engine->fp); i++) cpu->dyncom_engine->fp[i] = NULL; cpu->dyncom_engine->functions = 0; cpu->dyncom_engine->flags_codegen = CPU_CODEGEN_OPTIMIZE; cpu->dyncom_engine->flags_debug = CPU_DEBUG_NONE; cpu->dyncom_engine->flags_hint = CPU_HINT_NONE; cpu->dyncom_engine->flags = 0; // init the frontend cpu->f.init(cpu, &cpu->info, &cpu->rf); assert(is_valid_gpr_size(cpu->info.register_size[CPU_REG_GPR]) && "the specified GPR size is not guaranteed to work"); assert(is_valid_fpr_size(cpu->info.register_size[CPU_REG_FPR]) && "the specified FPR size is not guaranteed to work"); assert(is_valid_vr_size(cpu->info.register_size[CPU_REG_VR]) && "the specified VR size is not guaranteed to work"); assert(is_valid_gpr_size(cpu->info.register_size[CPU_REG_XR]) && "the specified XR size is not guaranteed to work"); uint32_t count = cpu->info.register_count[CPU_REG_GPR]; if (count != 0) { cpu->ptr_gpr = (Value **)calloc(count, sizeof(Value *)); cpu->in_ptr_gpr = (Value **)calloc(count, sizeof(Value *)); } else { cpu->ptr_gpr = NULL; cpu->in_ptr_gpr = NULL; } count = cpu->info.register_count[CPU_REG_XR]; if (count != 0) { cpu->ptr_xr = (Value **)calloc(count, sizeof(Value *)); cpu->in_ptr_xr = (Value **)calloc(count, sizeof(Value *)); } else { cpu->ptr_xr = NULL; cpu->in_ptr_xr = NULL; } count = cpu->info.register_count[CPU_REG_SPR]; if (count != 0) { cpu->ptr_spr = (Value **)calloc(count, sizeof(Value *)); cpu->in_ptr_spr = (Value **)calloc(count, sizeof(Value *)); } else { cpu->ptr_spr = NULL; cpu->in_ptr_spr = NULL; } count = cpu->info.register_count[CPU_REG_FPR]; if (count != 0) { cpu->ptr_fpr = (Value **)calloc(count, sizeof(Value *)); cpu->in_ptr_fpr = (Value **)calloc(count, sizeof(Value *)); } else { cpu->ptr_fpr = NULL; cpu->in_ptr_fpr = NULL; } if (cpu->info.psr_size != 0) { cpu->ptr_FLAG = (Value **)calloc(cpu->info.flags_count, sizeof(Value*)); assert(cpu->ptr_FLAG != NULL); } // init LLVM cpu->dyncom_engine->mod = new Module(cpu->info.name, _CTX()); assert(cpu->dyncom_engine->mod != NULL); cpu->dyncom_engine->exec_engine = ExecutionEngine::create(cpu->dyncom_engine->mod); assert(cpu->dyncom_engine->exec_engine != NULL); // check if FP80 and FP128 are supported by this architecture. // XXX there is a better way to do this? std::string data_layout = cpu->dyncom_engine->exec_engine->getTargetData()->getStringRepresentation(); if (data_layout.find("f80") != std::string::npos) { LOG("INFO: FP80 supported.\n"); cpu->dyncom_engine->flags |= CPU_FLAG_FP80; } if (data_layout.find("f128") != std::string::npos) { LOG("INFO: FP128 supported.\n"); cpu->dyncom_engine->flags |= CPU_FLAG_FP128; } // check if we need to swap guest memory. if (cpu->dyncom_engine->exec_engine->getTargetData()->isLittleEndian() ^ IS_LITTLE_ENDIAN(cpu)) cpu->dyncom_engine->flags |= CPU_FLAG_SWAPMEM; cpu->timer_total[TIMER_TAG] = 0; cpu->timer_total[TIMER_FE] = 0; cpu->timer_total[TIMER_BE] = 0; cpu->timer_total[TIMER_RUN] = 0; cpu->timer_total[TIMER_OPT] = 0; debug_func_init(cpu); syscall_func_init(cpu); return cpu; }