static void *bamboo_load_device_tree(void *addr, uint32_t ramsize, target_phys_addr_t initrd_base, target_phys_addr_t initrd_size, const char *kernel_cmdline) { void *fdt = NULL; #ifdef HAVE_FDT uint32_t mem_reg_property[] = { 0, 0, ramsize }; char *path; int pathlen; int ret; pathlen = snprintf(NULL, 0, "%s/%s", bios_dir, BINARY_DEVICE_TREE_FILE) + 1; path = qemu_malloc(pathlen); if (path == NULL) return NULL; snprintf(path, pathlen, "%s/%s", bios_dir, BINARY_DEVICE_TREE_FILE); fdt = load_device_tree(path, addr); free(path); if (fdt == NULL) goto out; /* Manipulate device tree in memory. */ ret = qemu_devtree_setprop(fdt, "/memory", "reg", mem_reg_property, sizeof(mem_reg_property)); if (ret < 0) fprintf(stderr, "couldn't set /memory/reg\n"); ret = qemu_devtree_setprop_cell(fdt, "/chosen", "linux,initrd-start", initrd_base); if (ret < 0) fprintf(stderr, "couldn't set /chosen/linux,initrd-start\n"); ret = qemu_devtree_setprop_cell(fdt, "/chosen", "linux,initrd-end", (initrd_base + initrd_size)); if (ret < 0) fprintf(stderr, "couldn't set /chosen/linux,initrd-end\n"); ret = qemu_devtree_setprop_string(fdt, "/chosen", "bootargs", kernel_cmdline); if (ret < 0) fprintf(stderr, "couldn't set /chosen/bootargs\n"); if (kvm_enabled()) kvmppc_fdt_update(fdt); out: #endif return fdt; }
static void *bamboo_load_device_tree(target_phys_addr_t addr, uint32_t ramsize, target_phys_addr_t initrd_base, target_phys_addr_t initrd_size, const char *kernel_cmdline) { void *fdt = NULL; #ifdef HAVE_FDT uint32_t mem_reg_property[] = { 0, 0, ramsize }; char *filename; int fdt_size; int ret; filename = qemu_find_file(QEMU_FILE_TYPE_BIOS, BINARY_DEVICE_TREE_FILE); if (!filename) { goto out; } fdt = load_device_tree(filename, &fdt_size); qemu_free(filename); if (fdt == NULL) { goto out; } /* Manipulate device tree in memory. */ ret = qemu_devtree_setprop(fdt, "/memory", "reg", mem_reg_property, sizeof(mem_reg_property)); if (ret < 0) fprintf(stderr, "couldn't set /memory/reg\n"); ret = qemu_devtree_setprop_cell(fdt, "/chosen", "linux,initrd-start", initrd_base); if (ret < 0) fprintf(stderr, "couldn't set /chosen/linux,initrd-start\n"); ret = qemu_devtree_setprop_cell(fdt, "/chosen", "linux,initrd-end", (initrd_base + initrd_size)); if (ret < 0) fprintf(stderr, "couldn't set /chosen/linux,initrd-end\n"); ret = qemu_devtree_setprop_string(fdt, "/chosen", "bootargs", kernel_cmdline); if (ret < 0) fprintf(stderr, "couldn't set /chosen/bootargs\n"); if (kvm_enabled()) kvmppc_fdt_update(fdt); cpu_physical_memory_write (addr, (void *)fdt, fdt_size); out: #endif return fdt; }
void qemu_init_vcpu(void *_env) { CPUState *env = _env; env->nr_cores = smp_cores; env->nr_threads = smp_threads; if (kvm_enabled()) kvm_start_vcpu(env); else tcg_init_vcpu(env); }
DeviceState *s390_flic_kvm_create(void) { DeviceState *dev = NULL; if (kvm_enabled()) { dev = qdev_create(NULL, TYPE_KVM_S390_FLIC); object_property_add_child(qdev_get_machine(), TYPE_KVM_S390_FLIC, OBJECT(dev), NULL); } return dev; }
void cpu_inject_restart(S390CPU *cpu) { CPUS390XState *env = &cpu->env; if (kvm_enabled()) { kvm_s390_restart_interrupt(cpu); return; } env->pending_int |= INTERRUPT_RESTART; cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HARD); }
void qemu_init_vcpu(void *_env) { CPUOldState *env = _env; if (kvm_enabled()) kvm_init_vcpu(env); #ifdef CONFIG_HAX if (hax_enabled()) hax_init_vcpu(env); #endif return; }
static void ccw_init(QEMUMachineInitArgs *args) { ram_addr_t my_ram_size = args->ram_size; MemoryRegion *sysmem = get_system_memory(); MemoryRegion *ram = g_new(MemoryRegion, 1); int shift = 0; uint8_t *storage_keys; int ret; VirtualCssBus *css_bus; /* s390x ram size detection needs a 16bit multiplier + an increment. So guests > 64GB can be specified in 2MB steps etc. */ while ((my_ram_size >> (20 + shift)) > 65535) { shift++; } my_ram_size = my_ram_size >> (20 + shift) << (20 + shift); /* let's propagate the changed ram size into the global variable. */ ram_size = my_ram_size; /* get a BUS */ css_bus = virtual_css_bus_init(); s390_sclp_init(); s390_init_ipl_dev(args->kernel_filename, args->kernel_cmdline, args->initrd_filename, "s390-ccw.img"); /* register hypercalls */ virtio_ccw_register_hcalls(); /* allocate RAM */ memory_region_init_ram(ram, NULL, "s390.ram", my_ram_size); vmstate_register_ram_global(ram); memory_region_add_subregion(sysmem, 0, ram); /* allocate storage keys */ storage_keys = g_malloc0(my_ram_size / TARGET_PAGE_SIZE); /* init CPUs */ s390_init_cpus(args->cpu_model, storage_keys); if (kvm_enabled()) { kvm_s390_enable_css_support(s390_cpu_addr2state(0)); } /* * Create virtual css and set it as default so that non mcss-e * enabled guests only see virtio devices. */ ret = css_create_css_image(VIRTUAL_CSSID, true); assert(ret == 0); /* Create VirtIO network adapters */ s390_create_virtio_net(BUS(css_bus), "virtio-net-ccw"); }
void cpu_inject_stop(S390CPU *cpu) { CPUS390XState *env = &cpu->env; if (kvm_enabled()) { kvm_s390_stop_interrupt(cpu); return; } env->pending_int |= INTERRUPT_STOP; cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HARD); }
/* Mark vector as unused. */ void msix_vector_unuse(PCIDevice *dev, unsigned vector) { if (vector >= dev->msix_entries_nr || !dev->msix_entry_used[vector]) { return; } if (--dev->msix_entry_used[vector]) { return; } if (kvm_enabled() && kvm_irqchip_in_kernel()) { kvm_msix_vector_del(dev, vector); } msix_clr_pending(dev, vector); }
void qemu_mutex_lock_iothread(void) { if (kvm_enabled()) { qemu_mutex_lock(&qemu_global_mutex); } else { qemu_mutex_lock(&qemu_fair_mutex); if (qemu_mutex_trylock(&qemu_global_mutex)) { qemu_cpu_kick_thread(first_cpu); qemu_mutex_lock(&qemu_global_mutex); } qemu_mutex_unlock(&qemu_fair_mutex); } }
bool ri_allowed(void) { if (kvm_enabled()) { MachineClass *mc = MACHINE_GET_CLASS(qdev_get_machine()); if (object_class_dynamic_cast(OBJECT_CLASS(mc), TYPE_S390_CCW_MACHINE)) { S390CcwMachineClass *s390mc = S390_MACHINE_CLASS(mc); return s390mc->ri_allowed; } } return 0; }
void qemu_mutex_lock_iothread(void) { if (kvm_enabled()) { qemu_mutex_lock(&qemu_global_mutex); } else { qemu_mutex_lock(&qemu_fair_mutex); if (qemu_mutex_trylock(&qemu_global_mutex)) { qemu_thread_signal(tcg_cpu_thread, SIG_IPI); qemu_mutex_lock(&qemu_global_mutex); } qemu_mutex_unlock(&qemu_fair_mutex); } }
void vcpus_get_ecxs(uint64_t *ecxs, size_t ecxs_size) { CPUState *penv = first_cpu; int i = 0; for (; penv; penv = penv->next_cpu) { if (!kvm_enabled()) { ecxs[i] = penv->regs[R_ECX]; i++; } } }
void vcpus_get_eips(uint64_t *eips, size_t eips_size) { CPUState *penv = first_cpu; int i = 0; for (; penv; penv = penv->next_cpu) { if (!kvm_enabled()) { eips[i] = penv->eip; i++; } } }
void vcpus_set_n_branches(uint64_t *n_branches, size_t n_branches_size) { CPUState *penv = first_cpu; int i = 0; for (; penv; penv = penv->next_cpu) { if (!kvm_enabled()) { penv->n_branches = n_branches[i]; i++; } } }
void qemu_init_vcpu(CPUState *cpu) { cpu->nr_cores = smp_cores; cpu->nr_threads = smp_threads; cpu->stopped = true; if (kvm_enabled()) { qemu_kvm_start_vcpu(cpu); } else if (tcg_enabled()) { qemu_tcg_init_vcpu(cpu); } else { qemu_dummy_start_vcpu(cpu); } }
static void msix_free_irq_entries(PCIDevice *dev) { int vector; if (kvm_enabled() && kvm_irqchip_in_kernel()) { kvm_msix_free(dev); } for (vector = 0; vector < dev->msix_entries_nr; ++vector) { dev->msix_entry_used[vector] = 0; msix_clr_pending(dev, vector); } }
void s390_program_interrupt(CPUS390XState *env, uint32_t code, int ilen, uintptr_t ra) { S390CPU *cpu = s390_env_get_cpu(env); if (kvm_enabled()) { kvm_s390_program_interrupt(cpu, code); } else if (tcg_enabled()) { tcg_s390_program_interrupt(env, code, ilen, ra); } else { g_assert_not_reached(); } }
void qemu_init_vcpu(void *_env) { CPUState *env = _env; env->nr_cores = smp_cores; env->nr_threads = smp_threads; env->stopped = 1; if (kvm_enabled()) { qemu_kvm_start_vcpu(env); } else { qemu_tcg_init_vcpu(env); } }
/* Initialize the MSI-X structures. Note: if MSI-X is supported, BAR size is * modified, it should be retrieved with msix_bar_size. */ int msix_init(struct PCIDevice *dev, unsigned short nentries, unsigned bar_nr, unsigned bar_size) { int ret; fprintf(stderr, "CDY msix_init, dev %p, vec_count 0x%x\n", dev, nentries); /* Nothing to do if MSI is not supported by interrupt controller */ if (!msix_supported) return -ENOTSUP; if (nentries > MSIX_MAX_ENTRIES) return -EINVAL; #ifdef KVM_CAP_IRQCHIP if (kvm_enabled() && qemu_kvm_irqchip_in_kernel()) { fprintf(stderr, "CDY kvm_enabled and qemu_kvm_irqchip\n"); dev->msix_irq_entries = qemu_malloc(nentries * sizeof *dev->msix_irq_entries); } #endif dev->msix_entry_used = qemu_mallocz(MSIX_MAX_ENTRIES * sizeof *dev->msix_entry_used); dev->msix_table_page = qemu_mallocz(MSIX_PAGE_SIZE); dev->msix_mmio_index = cpu_register_io_memory(msix_mmio_read, msix_mmio_write, dev); fprintf(stderr, "CDY after cpu_register_io_memory, index = 0x%x\n", dev->msix_mmio_index); if (dev->msix_mmio_index == -1) { ret = -EBUSY; goto err_index; } dev->msix_entries_nr = nentries; ret = msix_add_config(dev, nentries, bar_nr, bar_size); fprintf(stderr, "CDY after msix_add_config, ret = 0x%x\n", ret); if (ret) goto err_config; dev->cap_present |= QEMU_PCI_CAP_MSIX; return 0; err_config: dev->msix_entries_nr = 0; cpu_unregister_io_memory(dev->msix_mmio_index); err_index: qemu_free(dev->msix_table_page); dev->msix_table_page = NULL; qemu_free(dev->msix_entry_used); dev->msix_entry_used = NULL; return ret; }
static int qemu_signal_init(void) { int sigfd; sigset_t set; #ifdef CONFIG_IOTHREAD /* SIGUSR2 used by posix-aio-compat.c */ sigemptyset(&set); sigaddset(&set, SIGUSR2); pthread_sigmask(SIG_UNBLOCK, &set, NULL); /* * SIG_IPI must be blocked in the main thread and must not be caught * by sigwait() in the signal thread. Otherwise, the cpu thread will * not catch it reliably. */ sigemptyset(&set); sigaddset(&set, SIG_IPI); pthread_sigmask(SIG_BLOCK, &set, NULL); sigemptyset(&set); sigaddset(&set, SIGIO); sigaddset(&set, SIGALRM); sigaddset(&set, SIGBUS); #else sigemptyset(&set); sigaddset(&set, SIGBUS); if (kvm_enabled()) { /* * We need to process timer signals synchronously to avoid a race * between exit_request check and KVM vcpu entry. */ sigaddset(&set, SIGIO); sigaddset(&set, SIGALRM); } #endif pthread_sigmask(SIG_BLOCK, &set, NULL); sigfd = qemu_signalfd(&set); if (sigfd == -1) { fprintf(stderr, "failed to create signalfd\n"); return -errno; } fcntl_setfl(sigfd, O_NONBLOCK); qemu_set_fd_handler2(sigfd, NULL, sigfd_handler, NULL, (void *)(intptr_t)sigfd); return 0; }
/* service interrupts are floating therefore we must not pass an cpustate */ void s390_sclp_extint(uint32_t parm) { S390CPU *dummy_cpu = s390_cpu_addr2state(0); CPUS390XState *env = &dummy_cpu->env; if (kvm_enabled()) { #ifdef CONFIG_KVM kvm_s390_interrupt_internal(env, KVM_S390_INT_SERVICE, parm, 0, 1); #endif } else { env->psw.addr += 4; cpu_inject_ext(env, EXT_SERVICE, parm, 0); } }
static bool have_balloon(Error **errp) { if (kvm_enabled() && !kvm_has_sync_mmu()) { error_set(errp, ERROR_CLASS_KVM_MISSING_CAP, "Using KVM without synchronous MMU, balloon unavailable"); return false; } if (!balloon_event_fn) { error_set(errp, ERROR_CLASS_DEVICE_NOT_ACTIVE, "No balloon device has been activated"); return false; } return true; }
bool cpu_exec_all(void) { int r; /* Account partial waits to the vm_clock. */ qemu_clock_warp(vm_clock); if (next_cpu == NULL) { next_cpu = first_cpu; } for (; next_cpu != NULL && !exit_request; next_cpu = next_cpu->next_cpu) { CPUState *env = next_cpu; qemu_clock_enable(vm_clock, (env->singlestep_enabled & SSTEP_NOTIMER) == 0); #ifndef CONFIG_IOTHREAD if (qemu_alarm_pending()) { break; } #endif if (cpu_can_run(env)) { if (kvm_enabled()) { r = kvm_cpu_exec(env); qemu_kvm_eat_signals(env); } else { r = tcg_cpu_exec(env); } if (r == EXCP_TRIPLE) { cpu_dump_state(env, stderr, fprintf, 0); fprintf(stderr, "Triple fault. Halting for inspection via" " QEMU monitor.\n"); if (gdbserver_running()) r = EXCP_DEBUG; else { vm_stop(0); break; } } if (r == EXCP_DEBUG) { cpu_handle_guest_debug(env); break; } } else if (env->stop || env->stopped) { break; } } exit_request = 0; return !all_cpu_threads_idle(); }
static bool cpu_thread_is_idle(CPUState *env) { if (env->stop || env->queued_work_first) { return false; } if (env->stopped || !vm_running) { return true; } if (!env->halted || qemu_cpu_has_work(env) || (kvm_enabled() && kvm_irqchip_in_kernel())) { return false; } return true; }
void qemu_mutex_lock_iothread(void) { if (kvm_enabled()) { qemu_mutex_lock(&qemu_global_mutex); } else { iothread_requesting_mutex = true; if (qemu_mutex_trylock(&qemu_global_mutex)) { qemu_cpu_kick_thread(first_cpu); qemu_mutex_lock(&qemu_global_mutex); } iothread_requesting_mutex = false; qemu_cond_broadcast(&qemu_io_proceeded_cond); } }
static int cpu_post_load(void *opaque, int version_id) { ARMCPU *cpu = opaque; int i, v; /* Update the values list from the incoming migration data. * Anything in the incoming data which we don't know about is * a migration failure; anything we know about but the incoming * data doesn't specify retains its current (reset) value. * The indexes list remains untouched -- we only inspect the * incoming migration index list so we can match the values array * entries with the right slots in our own values array. */ for (i = 0, v = 0; i < cpu->cpreg_array_len && v < cpu->cpreg_vmstate_array_len; i++) { if (cpu->cpreg_vmstate_indexes[v] > cpu->cpreg_indexes[i]) { /* register in our list but not incoming : skip it */ continue; } if (cpu->cpreg_vmstate_indexes[v] < cpu->cpreg_indexes[i]) { /* register in their list but not ours: fail migration */ return -1; } /* matching register, copy the value over */ cpu->cpreg_values[i] = cpu->cpreg_vmstate_values[v]; v++; } if (kvm_enabled()) { if (!write_list_to_kvmstate(cpu)) { return -1; } /* Note that it's OK for the TCG side not to know about * every register in the list; KVM is authoritative if * we're using it. */ write_list_to_cpustate(cpu); } else { if (!write_list_to_cpustate(cpu)) { return -1; } } hw_breakpoint_update_all(cpu); hw_watchpoint_update_all(cpu); return 0; }
static void cap_safe_bounds_check_apply(sPAPRMachineState *spapr, uint8_t val, Error **errp) { uint8_t kvm_val = kvmppc_get_cap_safe_bounds_check(); if (tcg_enabled() && val) { /* TODO - for now only allow broken for TCG */ error_setg(errp, "Requested safe bounds check capability level not supported by tcg, try a different value for cap-sbbc"); } else if (kvm_enabled() && (val > kvm_val)) { error_setg(errp, "Requested safe bounds check capability level not supported by kvm, try cap-sbbc=%s", cap_sbbc_possible.vals[kvm_val]); } }
void s390_skeys_init(void) { Object *obj; if (kvm_enabled()) { obj = object_new(TYPE_KVM_S390_SKEYS); } else { obj = object_new(TYPE_QEMU_S390_SKEYS); } object_property_add_child(qdev_get_machine(), TYPE_S390_SKEYS, obj, NULL); object_unref(obj); qdev_init_nofail(DEVICE(obj)); }
static int cpu_post_load(void *opaque, int version_id) { S390CPU *cpu = opaque; /* * As the cpu state is pushed to kvm via kvm_set_mp_state rather * than via cpu_synchronize_state, we need update kvm here. */ if (kvm_enabled()) { kvm_s390_set_cpu_state(cpu, cpu->env.cpu_state); return kvm_s390_vcpu_interrupt_post_load(cpu); } return 0; }