static int cpu_write_c_reg(CPUS390XState *env, uint8_t *mem_buf, int n) { switch (n) { case S390_C0_REGNUM ... S390_C15_REGNUM: env->cregs[n] = ldtul_p(mem_buf); if (tcg_enabled()) { tlb_flush(ENV_GET_CPU(env), 1); } cpu_synchronize_post_init(ENV_GET_CPU(env)); return 8; default: return 0; } }
static void superh_cpu_initfn(Object *obj) { CPUState *cs = CPU(obj); SuperHCPU *cpu = SUPERH_CPU(obj); CPUSH4State *env = &cpu->env; cs->env_ptr = env; cpu_exec_init(env); env->movcal_backup_tail = &(env->movcal_backup); if (tcg_enabled()) { sh4_translate_init(); } }
static void cap_safe_bounds_check_apply(sPAPRMachineState *spapr, uint8_t val, Error **errp) { uint8_t kvm_val = kvmppc_get_cap_safe_bounds_check(); if (tcg_enabled() && val) { /* TODO - for now only allow broken for TCG */ error_setg(errp, "Requested safe bounds check capability level not supported by tcg, try a different value for cap-sbbc"); } else if (kvm_enabled() && (val > kvm_val)) { error_setg(errp, "Requested safe bounds check capability level not supported by kvm, try cap-sbbc=%s", cap_sbbc_possible.vals[kvm_val]); } }
static void tilegx_cpu_initfn(Object *obj) { CPUState *cs = CPU(obj); TileGXCPU *cpu = TILEGX_CPU(obj); CPUTLGState *env = &cpu->env; static bool tcg_initialized; cs->env_ptr = env; cpu_exec_init(cs, &error_abort); if (tcg_enabled() && !tcg_initialized) { tcg_initialized = true; tilegx_tcg_init(); } }
void qemu_init_vcpu(void *_env) { CPUArchState *env = _env; env->nr_cores = smp_cores; env->nr_threads = smp_threads; env->stopped = 1; if (kvm_enabled()) { qemu_kvm_start_vcpu(env); } else if (tcg_enabled()) { qemu_tcg_init_vcpu(env); } else { qemu_dummy_start_vcpu(env); } }
bool s390_cpu_has_int(S390CPU *cpu) { #ifndef CONFIG_USER_ONLY if (!tcg_enabled()) { return false; } return s390_cpu_has_mcck_int(cpu) || s390_cpu_has_ext_int(cpu) || s390_cpu_has_io_int(cpu) || s390_cpu_has_restart_int(cpu) || s390_cpu_has_stop_int(cpu); #else return false; #endif }
static void cap_htm_apply(sPAPRMachineState *spapr, uint8_t val, Error **errp) { if (!val) { /* TODO: We don't support disabling htm yet */ return; } if (tcg_enabled()) { error_setg(errp, "No Transactional Memory support in TCG, try cap-htm=off"); } else if (kvm_enabled() && !kvmppc_has_cap_htm()) { error_setg(errp, "KVM implementation does not support Transactional Memory, try cap-htm=off" ); } }
uint64_t get_psw_mask(CPUS390XState *env) { uint64_t r = env->psw.mask; if (tcg_enabled()) { env->cc_op = calc_cc(env, env->cc_op, env->cc_src, env->cc_dst, env->cc_vr); r &= ~PSW_MASK_CC; assert(!(env->cc_op & ~3)); r |= (uint64_t)env->cc_op << 44; } return r; }
static void xtensa_cpu_initfn(Object *obj) { CPUState *cs = CPU(obj); XtensaCPU *cpu = XTENSA_CPU(obj); XtensaCPUClass *xcc = XTENSA_CPU_GET_CLASS(obj); CPUXtensaState *env = &cpu->env; static bool tcg_inited; cs->env_ptr = env; env->config = xcc->config; if (tcg_enabled() && !tcg_inited) { tcg_inited = true; xtensa_translate_init(); } }
static void xtensa_cpu_initfn(Object *obj) { CPUState *cs = CPU(obj); XtensaCPU *cpu = XTENSA_CPU(obj); CPUXtensaState *env = &cpu->env; static bool tcg_inited; cs->env_ptr = env; cpu_exec_init(env); if (tcg_enabled() && !tcg_inited) { tcg_inited = true; xtensa_translate_init(); cpu_set_debug_excp_handler(xtensa_breakpoint_handler); } }
void qemu_init_vcpu(void *_env) { CPUArchState *env = _env; CPUState *cpu = ENV_GET_CPU(env); env->nr_cores = smp_cores; env->nr_threads = smp_threads; cpu->stopped = true; if (kvm_enabled()) { qemu_kvm_start_vcpu(env); } else if (tcg_enabled()) { qemu_tcg_init_vcpu(cpu); } else { qemu_dummy_start_vcpu(env); } }
static void arm_cpu_initfn(Object *obj) { CPUState *cs = CPU(obj); ARMCPU *cpu = ARM_CPU(obj); static bool inited; cs->env_ptr = &cpu->env; cpu_exec_init(&cpu->env); cpu->cp_regs = g_hash_table_new_full(g_int_hash, g_int_equal, g_free, g_free); if (tcg_enabled() && !inited) { inited = true; arm_translate_init(); } }
static void mb_cpu_initfn(Object *obj) { CPUState *cs = CPU(obj); MicroBlazeCPU *cpu = MICROBLAZE_CPU(obj); CPUMBState *env = &cpu->env; static bool tcg_initialized; cs->env_ptr = env; cpu_exec_init(env); set_float_rounding_mode(float_round_nearest_even, &env->fp_status); if (tcg_enabled() && !tcg_initialized) { tcg_initialized = true; mb_tcg_init(); } }
static void lm32_cpu_initfn(Object *obj) { CPUState *cs = CPU(obj); LM32CPU *cpu = LM32_CPU(obj); CPULM32State *env = &cpu->env; static bool tcg_initialized; cs->env_ptr = env; cpu_exec_init(env); env->flags = 0; if (tcg_enabled() && !tcg_initialized) { tcg_initialized = true; lm32_translate_init(); } }
static void openrisc_cpu_initfn(Object *obj) { CPUState *cs = CPU(obj); OpenRISCCPU *cpu = OPENRISC_CPU(obj); static int inited; cs->env_ptr = &cpu->env; cpu_exec_init(&cpu->env); #ifndef CONFIG_USER_ONLY cpu_openrisc_mmu_init(cpu); #endif if (tcg_enabled() && !inited) { inited = 1; openrisc_translate_init(); } }
void s390_realize_cpu_model(CPUState *cs, Error **errp) { S390CPUClass *xcc = S390_CPU_GET_CLASS(cs); S390CPU *cpu = S390_CPU(cs); const S390CPUModel *max_model; if (xcc->kvm_required && !kvm_enabled()) { error_setg(errp, "CPU definition requires KVM"); return; } if (!cpu->model) { /* no host model support -> perform compatibility stuff */ apply_cpu_model(NULL, errp); return; } max_model = get_max_cpu_model(errp); if (*errp) { error_prepend(errp, "CPU models are not available: "); return; } /* copy over properties that can vary */ cpu->model->lowest_ibc = max_model->lowest_ibc; cpu->model->cpu_id = max_model->cpu_id; cpu->model->cpu_id_format = max_model->cpu_id_format; cpu->model->cpu_ver = max_model->cpu_ver; check_consistency(cpu->model); check_compatibility(max_model, cpu->model, errp); if (*errp) { return; } apply_cpu_model(cpu->model, errp); cpu->env.cpuid = s390_cpuid_from_cpu_model(cpu->model); if (tcg_enabled()) { /* basic mode, write the cpu address into the first 4 bit of the ID */ cpu->env.cpuid = deposit64(cpu->env.cpuid, 54, 4, cpu->env.cpu_num); } }
static void cap_safe_indirect_branch_apply(sPAPRMachineState *spapr, uint8_t val, Error **errp) { uint8_t kvm_val = kvmppc_get_cap_safe_indirect_branch(); if (val == SPAPR_CAP_WORKAROUND) { /* Can only be Broken or Fixed */ error_setg(errp, "Requested safe indirect branch capability level \"workaround\" not valid, try cap-ibs=%s", cap_ibs_possible.vals[kvm_val]); } else if (tcg_enabled() && val) { /* TODO - for now only allow broken for TCG */ error_setg(errp, "Requested safe indirect branch capability level not supported by tcg, try a different value for cap-ibs"); } else if (kvm_enabled() && val && (val != kvm_val)) { error_setg(errp, "Requested safe indirect branch capability level not supported by kvm, try cap-ibs=%s", cap_ibs_possible.vals[kvm_val]); } }
static int cpu_post_load(void *opaque, int version_id) { S390CPU *cpu = opaque; /* * As the cpu state is pushed to kvm via kvm_set_mp_state rather * than via cpu_synchronize_state, we need update kvm here. */ if (kvm_enabled()) { kvm_s390_set_cpu_state(cpu, cpu->env.cpu_state); return kvm_s390_vcpu_interrupt_post_load(cpu); } if (tcg_enabled()) { /* Rearm the CKC timer if necessary */ tcg_s390_tod_updated(CPU(cpu), RUN_ON_CPU_NULL); } return 0; }
void load_psw(CPUS390XState *env, uint64_t mask, uint64_t addr) { uint64_t old_mask = env->psw.mask; env->psw.addr = addr; env->psw.mask = mask; /* KVM will handle all WAITs and trigger a WAIT exit on disabled_wait */ if (!tcg_enabled()) { return; } env->cc_op = (mask >> 44) & 3; if ((old_mask ^ mask) & PSW_MASK_PER) { s390_cpu_recompute_watchpoints(CPU(s390_env_get_cpu(env))); } if (mask & PSW_MASK_WAIT) { s390_handle_wait(s390_env_get_cpu(env)); } }
static void arm_cpu_initfn(Object *obj) { CPUState *cs = CPU(obj); ARMCPU *cpu = ARM_CPU(obj); static bool inited; cs->env_ptr = &cpu->env; cpu_exec_init(&cpu->env); cpu->cp_regs = g_hash_table_new_full(g_int_hash, g_int_equal, g_free, g_free); #ifndef CONFIG_USER_ONLY /* Our inbound IRQ and FIQ lines */ if (kvm_enabled()) { qdev_init_gpio_in(DEVICE(cpu), arm_cpu_kvm_set_irq, 2); } else { qdev_init_gpio_in(DEVICE(cpu), arm_cpu_set_irq, 2); } cpu->gt_timer[GTIMER_PHYS] = timer_new(QEMU_CLOCK_VIRTUAL, GTIMER_SCALE, arm_gt_ptimer_cb, cpu); cpu->gt_timer[GTIMER_VIRT] = timer_new(QEMU_CLOCK_VIRTUAL, GTIMER_SCALE, arm_gt_vtimer_cb, cpu); qdev_init_gpio_out(DEVICE(cpu), cpu->gt_timer_outputs, ARRAY_SIZE(cpu->gt_timer_outputs)); #endif /* DTB consumers generally don't in fact care what the 'compatible' * string is, so always provide some string and trust that a hypothetical * picky DTB consumer will also provide a helpful error message. */ cpu->dtb_compatible = "qemu,unknown"; cpu->psci_version = 1; /* By default assume PSCI v0.1 */ cpu->kvm_target = QEMU_KVM_ARM_TARGET_NONE; if (tcg_enabled() && !inited) { inited = true; arm_translate_init(); } }
static void mb_cpu_initfn(Object *obj) { CPUState *cs = CPU(obj); MicroBlazeCPU *cpu = MICROBLAZE_CPU(obj); CPUMBState *env = &cpu->env; static bool tcg_initialized; cs->env_ptr = env; cpu_exec_init(env); set_float_rounding_mode(float_round_nearest_even, &env->fp_status); #ifndef CONFIG_USER_ONLY /* Inbound IRQ and FIR lines */ qdev_init_gpio_in(DEVICE(cpu), microblaze_cpu_set_irq, 2); #endif if (tcg_enabled() && !tcg_initialized) { tcg_initialized = true; mb_tcg_init(); } }
CPUXtensaState *cpu_xtensa_init(const char *cpu_model) { static int tcg_inited; static int debug_handler_inited; XtensaCPU *cpu; CPUXtensaState *env; const XtensaConfig *config = NULL; XtensaConfigList *core = xtensa_cores; for (; core; core = core->next) if (strcmp(core->config->name, cpu_model) == 0) { config = core->config; break; } if (config == NULL) { return NULL; } cpu = XTENSA_CPU(object_new(TYPE_XTENSA_CPU)); env = &cpu->env; env->config = config; if (!tcg_inited) { tcg_inited = 1; xtensa_translate_init(); } if (!debug_handler_inited && tcg_enabled()) { debug_handler_inited = 1; prev_debug_excp_handler = cpu_set_debug_excp_handler(breakpoint_handler); } xtensa_irq_init(env); qemu_init_vcpu(env); return env; }
int s390_cpu_gdb_read_register(CPUState *cs, uint8_t *mem_buf, int n) { S390CPU *cpu = S390_CPU(cs); CPUS390XState *env = &cpu->env; uint64_t val; int cc_op; switch (n) { case S390_PSWM_REGNUM: if (tcg_enabled()) { cc_op = calc_cc(env, env->cc_op, env->cc_src, env->cc_dst, env->cc_vr); val = deposit64(env->psw.mask, 44, 2, cc_op); return gdb_get_regl(mem_buf, val); } return gdb_get_regl(mem_buf, env->psw.mask); case S390_PSWA_REGNUM: return gdb_get_regl(mem_buf, env->psw.addr); case S390_R0_REGNUM ... S390_R15_REGNUM: return gdb_get_regl(mem_buf, env->regs[n - S390_R0_REGNUM]); } return 0; }
void qemu_mutex_lock_iothread(void) { LOGD_CPUS("%s1\n", __func__); if (!tcg_enabled()) { LOGD_CPUS("%s2\n", __func__); qemu_mutex_lock(&qemu_global_mutex); LOGD_CPUS("%s3\n", __func__); } else { LOGD_CPUS("%s4\n", __func__); iothread_requesting_mutex = true; if (qemu_mutex_trylock(&qemu_global_mutex)) { LOGD_CPUS("%s5\n", __func__); qemu_cpu_kick_thread(first_cpu); LOGD_CPUS("%s6\n", __func__); qemu_mutex_lock(&qemu_global_mutex); LOGD_CPUS("%s7\n", __func__); } LOGD_CPUS("%s8\n", __func__); iothread_requesting_mutex = false; LOGD_CPUS("%s9\n", __func__); qemu_cond_broadcast(&qemu_io_proceeded_cond); LOGD_CPUS("%s10\n", __func__); } }
int s390_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n) { S390CPU *cpu = S390_CPU(cs); CPUS390XState *env = &cpu->env; target_ulong tmpl = ldtul_p(mem_buf); switch (n) { case S390_PSWM_REGNUM: env->psw.mask = tmpl; if (tcg_enabled()) { env->cc_op = extract64(tmpl, 44, 2); } break; case S390_PSWA_REGNUM: env->psw.addr = tmpl; break; case S390_R0_REGNUM ... S390_R15_REGNUM: env->regs[n - S390_R0_REGNUM] = tmpl; break; default: return 0; } return 8; }
void handle_diag_308(CPUS390XState *env, uint64_t r1, uint64_t r3) { uint64_t addr = env->regs[r1]; uint64_t subcode = env->regs[r3]; IplParameterBlock *iplb; if (env->psw.mask & PSW_MASK_PSTATE) { program_interrupt(env, PGM_PRIVILEGED, ILEN_LATER_INC); return; } if ((subcode & ~0x0ffffULL) || (subcode > 6)) { program_interrupt(env, PGM_SPECIFICATION, ILEN_LATER_INC); return; } switch (subcode) { case 0: modified_clear_reset(s390_env_get_cpu(env)); if (tcg_enabled()) { cpu_loop_exit(CPU(s390_env_get_cpu(env))); } break; case 1: load_normal_reset(s390_env_get_cpu(env)); if (tcg_enabled()) { cpu_loop_exit(CPU(s390_env_get_cpu(env))); } break; case 3: s390_reipl_request(); if (tcg_enabled()) { cpu_loop_exit(CPU(s390_env_get_cpu(env))); } break; case 5: if ((r1 & 1) || (addr & 0x0fffULL)) { program_interrupt(env, PGM_SPECIFICATION, ILEN_LATER_INC); return; } if (!address_space_access_valid(&address_space_memory, addr, sizeof(IplParameterBlock), false)) { program_interrupt(env, PGM_ADDRESSING, ILEN_LATER_INC); return; } iplb = g_malloc0(sizeof(struct IplParameterBlock)); cpu_physical_memory_read(addr, iplb, sizeof(struct IplParameterBlock)); if (!s390_ipl_update_diag308(iplb)) { env->regs[r1 + 1] = DIAG_308_RC_OK; } else { env->regs[r1 + 1] = DIAG_308_RC_INVALID; } g_free(iplb); return; case 6: if ((r1 & 1) || (addr & 0x0fffULL)) { program_interrupt(env, PGM_SPECIFICATION, ILEN_LATER_INC); return; } if (!address_space_access_valid(&address_space_memory, addr, sizeof(IplParameterBlock), true)) { program_interrupt(env, PGM_ADDRESSING, ILEN_LATER_INC); return; } iplb = s390_ipl_get_iplb(); if (iplb) { cpu_physical_memory_write(addr, iplb, sizeof(struct IplParameterBlock)); env->regs[r1 + 1] = DIAG_308_RC_OK; } else { env->regs[r1 + 1] = DIAG_308_RC_NO_CONF; } return; default: hw_error("Unhandled diag308 subcode %" PRIx64, subcode); break; } }