/* The cpu state corresponding to 'searched_pc' is restored. */ int cpu_restore_state(CPUState *env, TranslationBlock *tb, unsigned long searched_pc) { TCGContext *s = ctx->tcg_ctx; int j; unsigned long tc_ptr; tcg_func_start(s); gen_intermediate_code(env, tb, 1); /* find opc index corresponding to search_pc */ tc_ptr = (unsigned long)tb->tc_ptr; if (searched_pc < tc_ptr) return -1; s->tb_next_offset = tb->tb_next_offset; s->tb_jmp_offset = tb->tb_jmp_offset; s->tb_next = NULL; j = tcg_gen_code_search_pc(s, (uint8_t *)tc_ptr, searched_pc - tc_ptr); if (j < 0) return -1; /* now find start of instruction before */ while (ctx->gen_opc_instr_start[j] == 0) j--; restore_state_to_opc(env, tb, j); return 0; }
/* return non zero if the very first instruction is invalid so that the virtual CPU can trigger an exception. '*gen_code_size_ptr' contains the size of the generated code (host code). */ int cpu_gen_code(CPUState *env, TranslationBlock *tb, int *gen_code_size_ptr) { TCGContext *s = &tcg_ctx; uint8_t *gen_code_buf; int gen_code_size; #ifdef CONFIG_PROFILER int64_t ti; #endif #ifdef CONFIG_PROFILER s->tb_count1++; /* includes aborted translations because of exceptions */ ti = profile_getclock(); #endif tcg_func_start(s); #if 1 /* yclin */ code_marker_begin(); #endif gen_intermediate_code(env, tb); #if 1 /* yclin */ code_marker_end(); #endif /* generate machine code */ gen_code_buf = tb->tc_ptr; tb->tb_next_offset[0] = 0xffff; tb->tb_next_offset[1] = 0xffff; s->tb_next_offset = tb->tb_next_offset; #ifdef USE_DIRECT_JUMP s->tb_jmp_offset = tb->tb_jmp_offset; s->tb_next = NULL; #else s->tb_jmp_offset = NULL; s->tb_next = tb->tb_next; #endif #ifdef CONFIG_PROFILER s->tb_count++; s->interm_time += profile_getclock() - ti; s->code_time -= profile_getclock(); #endif gen_code_size = tcg_gen_code(s, gen_code_buf); *gen_code_size_ptr = gen_code_size; #ifdef CONFIG_PROFILER s->code_time += profile_getclock(); s->code_in_len += tb->size; s->code_out_len += gen_code_size; #endif #ifdef DEBUG_DISAS if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM)) { qemu_log("OUT: [size=%d]\n", *gen_code_size_ptr); log_disas(tb->tc_ptr, *gen_code_size_ptr); qemu_log("\n"); qemu_log_flush(); } #endif return 0; }
/* return non zero if the very first instruction is invalid so that the virtual CPU can trigger an exception. '*gen_code_size_ptr' contains the size of the generated code (host code). */ int cpu_gen_code(CPUState *env, TranslationBlock *tb, int *gen_code_size_ptr) { uint8_t *gen_code_buf; int gen_code_size; if (gen_intermediate_code(env, tb) < 0) return -1; /* generate machine code */ tb->tb_next_offset[0] = 0xffff; tb->tb_next_offset[1] = 0xffff; gen_code_buf = tb->tc_ptr; #ifdef USE_DIRECT_JUMP /* the following two entries are optional (only used for string ops) */ tb->tb_jmp_offset[2] = 0xffff; tb->tb_jmp_offset[3] = 0xffff; #endif dyngen_labels(gen_labels, nb_gen_labels, gen_code_buf, gen_opc_buf); gen_code_size = dyngen_code(gen_code_buf, tb->tb_next_offset, #ifdef USE_DIRECT_JUMP tb->tb_jmp_offset, #else NULL, #endif gen_opc_buf, gen_opparam_buf, gen_labels); *gen_code_size_ptr = gen_code_size; return 0; }
/* '*gen_code_size_ptr' contains the size of the generated code (host code). */ void cpu_gen_code(CPUState *env, TranslationBlock *tb, int *gen_code_size_ptr) { TCGContext *s = ctx->tcg_ctx; uint8_t *gen_code_buf; int gen_code_size; tcg_func_start(s); gen_intermediate_code(env, tb, 0); /* generate machine code */ gen_code_buf = tb->tc_ptr; tb->tb_next_offset[0] = 0xffff; tb->tb_next_offset[1] = 0xffff; s->tb_next_offset = tb->tb_next_offset; s->tb_jmp_offset = tb->tb_jmp_offset; s->tb_next = NULL; gen_code_size = tcg_gen_code(s, gen_code_buf); *gen_code_size_ptr = gen_code_size; }
/* return non zero if the very first instruction is invalid so that the virtual CPU can trigger an exception. '*gen_code_size_ptr' contains the size of the generated code (host code). */ int cpu_gen_code(CPUState *env, TranslationBlock *tb, int *gen_code_size_ptr) { TCGContext *s = tcg_ctx_env; uint8_t *gen_code_buf; int gen_code_size; #ifdef CONFIG_PROFILER int64_t ti; #endif #ifdef CONFIG_PROFILER s->tb_count1++; /* includes aborted translations because of exceptions */ ti = profile_getclock(); #endif #if defined(CONFIG_HYBRID) int64_t start_cycle = 0, end_cycle = 0; if (llvm_profile_enabled) start_cycle = cpu_get_real_ticks(); #endif tcg_func_start(env, s); gen_intermediate_code(env, tb); /* generate machine code */ s->tb = tb; gen_code_buf = tb->tc_ptr; tb->tb_next_offset[0] = 0xffff; tb->tb_next_offset[1] = 0xffff; s->tb_next_offset = tb->tb_next_offset; #ifdef USE_DIRECT_JUMP s->tb_jmp_offset = tb->tb_jmp_offset; s->tb_next = NULL; #else s->tb_jmp_offset = NULL; s->tb_next = tb->tb_next; #endif #ifdef CONFIG_PROFILER s->tb_count++; s->interm_time += profile_getclock() - ti; s->code_time -= profile_getclock(); #endif #if defined(CONFIG_LLVM) && !defined(CONFIG_HYBRID) gen_code_size = llvm_gen_block(env, s, tb); #else gen_code_size = tcg_gen_code(s, gen_code_buf); #endif *gen_code_size_ptr = gen_code_size; #ifdef CONFIG_PROFILER s->code_time += profile_getclock(); s->code_in_len += tb->size; s->code_out_len += gen_code_size; #endif #if defined(CONFIG_HYBRID) if (llvm_profile_enabled) { end_cycle = cpu_get_real_ticks(); llvm_tb_record(tb, gen_code_size, end_cycle-start_cycle); } #endif #ifdef DEBUG_DISAS if (qemu_loglevel_mask(CPU_LOG_TB_OUT_ASM)) { qemu_log("OUT: [size=%d]\n", *gen_code_size_ptr); log_disas(tb->tc_ptr, *gen_code_size_ptr); qemu_log("\n"); qemu_log_flush(); } #endif return 0; }