void *ps2_kbd_init(void (*update_irq)(void *, int), void *update_arg) { PS2KbdState *s = (PS2KbdState *)qemu_mallocz(sizeof(PS2KbdState)); s->common.update_irq = update_irq; s->common.update_arg = update_arg; s->scancode_set = 2; ps2_reset(&s->common); register_savevm("ps2kbd", 0, 3, ps2_kbd_save, ps2_kbd_load, s); gui_register_dev_key_callback(ps2_put_keycode, s); qemu_register_reset(ps2_reset, &s->common); return s; }
QEMUDeviceClass *qdev_new(const char *name, QDEVCreateFn create, int nirq) { QEMUDeviceClass *dc = qemu_mallocz(sizeof(*dc)); dc->num_irqs = nirq; dc->create = create; dc->name = qemu_strdup(name); dc->next = all_dc; all_dc = dc; return dc; }
void *ps2_kbd_init(void (*update_irq)(void *, int), void *update_arg) { PS2KbdState *s = (PS2KbdState *)qemu_mallocz(sizeof(PS2KbdState)); TEMU_KbdState = s; s->common.update_irq = update_irq; s->common.update_arg = update_arg; ps2_reset(&s->common); register_savevm("ps2kbd", 0, 2, ps2_kbd_save, ps2_kbd_load, s); qemu_add_kbd_event_handler(ps2_put_keycode, s); qemu_register_reset(ps2_reset, &s->common); return s; }
/* Initialize the MSI-X structures. Note: if MSI-X is supported, BAR size is * modified, it should be retrieved with msix_bar_size. */ int msix_init(struct PCIDevice *dev, unsigned short nentries, MemoryRegion *bar, unsigned bar_nr, unsigned bar_size) { int ret; /* Nothing to do if MSI is not supported by interrupt controller */ if (!msix_supported) return -ENOTSUP; if (nentries > MSIX_MAX_ENTRIES) return -EINVAL; dev->msix_entry_used = qemu_mallocz(MSIX_MAX_ENTRIES * sizeof *dev->msix_entry_used); dev->msix_table_page = qemu_mallocz(MSIX_PAGE_SIZE); msix_mask_all(dev, nentries); memory_region_init_io(&dev->msix_mmio, &msix_mmio_ops, dev, "msix", MSIX_PAGE_SIZE); dev->msix_entries_nr = nentries; ret = msix_add_config(dev, nentries, bar_nr, bar_size); if (ret) goto err_config; dev->cap_present |= QEMU_PCI_CAP_MSIX; msix_mmio_setup(dev, bar); return 0; err_config: dev->msix_entries_nr = 0; memory_region_destroy(&dev->msix_mmio); qemu_free(dev->msix_table_page); dev->msix_table_page = NULL; qemu_free(dev->msix_entry_used); dev->msix_entry_used = NULL; return ret; }
UserDefTwo * qmp_user_def_cmd2(UserDefOne * ud1a, UserDefOne * ud1b, Error **errp) { UserDefTwo *ret; UserDefOne *ud1c = qemu_mallocz(sizeof(UserDefOne)); UserDefOne *ud1d = qemu_mallocz(sizeof(UserDefOne)); ud1c->string = strdup(ud1a->string); ud1c->integer = ud1a->integer; ud1d->string = strdup(ud1b->string); ud1d->integer = ud1b->integer; ret = qemu_mallocz(sizeof(UserDefTwo)); ret->string = strdup("blah1"); ret->dict.string = strdup("blah2"); ret->dict.dict.userdef = ud1c; ret->dict.dict.string = strdup("blah3"); ret->dict.has_dict2 = true; ret->dict.dict2.userdef = ud1d; ret->dict.dict2.string = strdup("blah4"); return ret; }
void hostmem_init(HostMem *hostmem) { memset(hostmem, 0, sizeof(*hostmem)); qemu_mutex_init(&hostmem->mem_lock); hostmem->mem = qemu_mallocz(sizeof(*hostmem->mem)); hostmem->client.set_memory = hostmem_client_set_memory; hostmem->client.sync_dirty_bitmap = hostmem_client_sync_dirty_bitmap; hostmem->client.migration_log = hostmem_client_migration_log; cpu_register_phys_memory_client(&hostmem->client); }
MigrationState *exec_start_outgoing_migration(const char *command, int64_t bandwidth_limit, int detach) { FdMigrationState *s; FILE *f; s = qemu_mallocz(sizeof(*s)); f = popen(command, "w"); if (f == NULL) { dprintf("Unable to popen exec target\n"); goto err_after_alloc; } s->fd = fileno(f); if (s->fd == -1) { dprintf("Unable to retrieve file descriptor for popen'd handle\n"); goto err_after_open; } if (fcntl(s->fd, F_SETFD, O_NONBLOCK) == -1) { dprintf("Unable to set nonblocking mode on file descriptor\n"); goto err_after_open; } s->opaque = qemu_popen(f, "w"); s->close = exec_close; s->get_error = file_errno; s->write = file_write; s->mig_state.cancel = migrate_fd_cancel; s->mig_state.get_status = migrate_fd_get_status; s->mig_state.release = migrate_fd_release; s->state = MIG_STATE_ACTIVE; s->mon_resume = NULL; s->bandwidth_limit = bandwidth_limit; if (!detach) migrate_fd_monitor_suspend(s); migrate_fd_connect(s); return &s->mig_state; err_after_open: pclose(f); err_after_alloc: qemu_free(s); return NULL; }
void *load_device_tree(const char *filename_path, void *load_addr) { int dt_file_size; int dt_file_load_size; int new_dt_size; int ret; void *dt_file = NULL; void *fdt; dt_file_size = get_image_size(filename_path); if (dt_file_size < 0) { printf("Unable to get size of device tree file '%s'\n", filename_path); goto fail; } /* First allocate space in qemu for device tree */ dt_file = qemu_mallocz(dt_file_size); if (dt_file == NULL) { printf("Unable to allocate memory in qemu for device tree\n"); goto fail; } dt_file_load_size = load_image(filename_path, dt_file); /* Second we place new copy of 2x size in guest memory * This give us enough room for manipulation. */ new_dt_size = dt_file_size * 2; fdt = load_addr; ret = fdt_open_into(dt_file, fdt, new_dt_size); if (ret) { printf("Unable to copy device tree in memory\n"); goto fail; } /* Check sanity of device tree */ if (fdt_check_header(fdt)) { printf ("Device tree file loaded into memory is invalid: %s\n", filename_path); goto fail; } /* free qemu memory with old device tree */ qemu_free(dt_file); return fdt; fail: qemu_free(dt_file); return NULL; }
/* initialize the QFB device */ void nand_dev_init(uint32_t base) { int iomemtype; static int instance_id = 0; nand_dev_controller_state *s; s = (nand_dev_controller_state *)qemu_mallocz(sizeof(nand_dev_controller_state)); iomemtype = cpu_register_io_memory(nand_dev_readfn, nand_dev_writefn, s); cpu_register_physical_memory(base, 0x00000fff, iomemtype); s->base = base; register_savevm( "nand_dev", instance_id++, NAND_DEV_STATE_SAVE_VERSION, nand_dev_controller_state_save, nand_dev_controller_state_load, s); }
static QEMUClock *qemu_new_clock(int type) { QEMUClock *clock; clock = qemu_mallocz(sizeof(QEMUClock)); clock->type = type; clock->enabled = 1; notifier_list_init(&clock->reset_notifiers); /* required to detect & report backward jumps */ if (type == QEMU_CLOCK_HOST) { clock->last = get_clock_realtime(); } return clock; }
static qemu_irq *r2d_fpga_init(target_phys_addr_t base, qemu_irq irl) { int iomemtype; r2d_fpga_t *s; s = qemu_mallocz(sizeof(r2d_fpga_t)); s->irl = irl; iomemtype = cpu_register_io_memory(r2d_fpga_readfn, r2d_fpga_writefn, s); cpu_register_physical_memory(base, 0x40, iomemtype); return qemu_allocate_irqs(r2d_fpga_irq_set, s, NR_IRQS); }
static QEMUFile *qemu_fopen_bdrv(BlockDriverState *bs, int64_t offset, int is_writable) { QEMUFileBdrv *s; s = qemu_mallocz(sizeof(QEMUFileBdrv)); s->bs = bs; s->base_offset = offset; if (is_writable) return qemu_fopen_ops(s, block_put_buffer, NULL, bdrv_fclose, NULL, NULL); return qemu_fopen_ops(s, NULL, block_get_buffer, bdrv_fclose, NULL, NULL); }
static void syborg_int_init(SysBusDevice *dev) { SyborgIntState *s = FROM_SYSBUS(SyborgIntState, dev); int iomemtype; sysbus_init_irq(dev, &s->parent_irq); qdev_init_gpio_in(&dev->qdev, syborg_int_set_irq, s->num_irqs); iomemtype = cpu_register_io_memory(syborg_int_readfn, syborg_int_writefn, s); sysbus_init_mmio(dev, 0x1000, iomemtype); s->flags = qemu_mallocz(s->num_irqs * sizeof(syborg_int_flags)); register_savevm("syborg_int", -1, 1, syborg_int_save, syborg_int_load, s); }
static arm_timer_state *arm_timer_init(uint32_t freq) { arm_timer_state *s; QEMUBH *bh; s = (arm_timer_state *)qemu_mallocz(sizeof(arm_timer_state)); s->freq = freq; s->control = TIMER_CTRL_IE; bh = qemu_bh_new(arm_timer_tick, s); s->timer = ptimer_init(bh); register_savevm("arm_timer", -1, 1, arm_timer_save, arm_timer_load, s); return s; }
MigrationState *fd_start_outgoing_migration(Monitor *mon, const char *fdname, int64_t bandwidth_limit, int detach, int blk, int inc) { FdMigrationState *s; s = qemu_mallocz(sizeof(*s)); s->fd = monitor_get_fd(mon, fdname); if (s->fd == -1) { DPRINTF("fd_migration: invalid file descriptor identifier\n"); goto err_after_alloc; } if (fcntl(s->fd, F_SETFL, O_NONBLOCK) == -1) { DPRINTF("Unable to set nonblocking mode on file descriptor\n"); goto err_after_open; } s->get_error = fd_errno; s->write = fd_write; s->close = fd_close; s->mig_state.cancel = migrate_fd_cancel; s->mig_state.get_status = migrate_fd_get_status; s->mig_state.release = migrate_fd_release; s->mig_state.blk = blk; s->mig_state.shared = inc; s->state = MIG_STATE_ACTIVE; s->mon = NULL; s->bandwidth_limit = bandwidth_limit; if (!detach) { migrate_fd_monitor_suspend(s, mon); } migrate_fd_connect(s); return &s->mig_state; err_after_open: close(s->fd); err_after_alloc: qemu_free(s); return NULL; }
/* test enum values nested in schema-defined structs */ static void test_nested_enums(void) { QmpOutputVisitor *mo; QmpInputVisitor *mi; Visitor *v; NestedEnumsOne *nested_enums, *nested_enums_cpy = NULL; Error *err = NULL; QObject *obj; QString *str; nested_enums = qemu_mallocz(sizeof(NestedEnumsOne)); nested_enums->enum1 = ENUM_ONE_VALUE1; nested_enums->enum2 = ENUM_ONE_VALUE2; nested_enums->enum3 = ENUM_ONE_VALUE3; nested_enums->enum4 = ENUM_ONE_VALUE3; nested_enums->has_enum2 = false; nested_enums->has_enum4 = true; /* C type -> QObject */ mo = qmp_output_visitor_new(); v = qmp_output_get_visitor(mo); visit_type_NestedEnumsOne(v, &nested_enums, NULL, &err); if (err) { g_error("%s", error_get_pretty(err)); } obj = qmp_output_get_qobject(mo); g_assert(obj); str = qobject_to_json_pretty(obj); g_print("%s\n", qstring_get_str(str)); QDECREF(str); /* QObject -> C type */ mi = qmp_input_visitor_new(obj); v = qmp_input_get_visitor(mi); visit_type_NestedEnumsOne(v, &nested_enums_cpy, NULL, &err); if (err) { g_error("%s", error_get_pretty(err)); } g_assert(nested_enums_cpy); g_assert(nested_enums_cpy->enum1 == nested_enums->enum1); g_assert(nested_enums_cpy->enum3 == nested_enums->enum3); g_assert(nested_enums_cpy->enum4 == nested_enums->enum4); g_assert(nested_enums_cpy->has_enum2 == false); g_assert(nested_enums_cpy->has_enum4 == true); qobject_decref(obj); qapi_free_NestedEnumsOne(nested_enums); qapi_free_NestedEnumsOne(nested_enums_cpy); }
qemu_irq *mcf_intc_init(target_phys_addr_t base, CPUState *env) { mcf_intc_state *s; int iomemtype; s = qemu_mallocz(sizeof(mcf_intc_state)); s->env = env; mcf_intc_reset(s); iomemtype = cpu_register_io_memory(0, mcf_intc_readfn, mcf_intc_writefn, s); cpu_register_physical_memory(base, 0x100, iomemtype); return qemu_allocate_irqs(mcf_intc_set_irq, s, 64); }
void arm_sysctl_init(uint32_t base, uint32_t sys_id) { arm_sysctl_state *s; int iomemtype; s = (arm_sysctl_state *)qemu_mallocz(sizeof(arm_sysctl_state)); s->sys_id = sys_id; /* The MPcore bootloader uses these flags to start secondary CPUs. We don't use a bootloader, so do this here. */ s->flags = 3; iomemtype = cpu_register_io_memory(0, arm_sysctl_readfn, arm_sysctl_writefn, s); cpu_register_physical_memory(base, 0x00001000, iomemtype); /* ??? Save/restore. */ }
/* If fd is zero, it means that the parallel device uses the console */ ParallelState *parallel_mm_init(target_phys_addr_t base, int it_shift, qemu_irq irq, CharDriverState *chr) { ParallelState *s; int io_sw; s = qemu_mallocz(sizeof(ParallelState)); s->irq = irq; s->chr = chr; s->it_shift = it_shift; qemu_register_reset(parallel_reset, s); io_sw = cpu_register_io_memory(parallel_mm_read_sw, parallel_mm_write_sw, s); cpu_register_physical_memory(base, 8 << it_shift, io_sw); return s; }
SkinScreen* skin_load_configuration(const char* file, int portrait) { //printf("skin_config.c: >> skin_load_configuration\n"); SkinScreen *skin = (SkinScreen*) qemu_mallocz(sizeof(SkinScreen)); skin->es = (EmulatedScreen*) qemu_mallocz(sizeof(EmulatedScreen)); //printf("skin_load_configuration 1\n"); skin->config = (SkinConfig*) qemu_mallocz(sizeof(SkinConfig)); //printf("skin_load_configuration 2\n"); if (portrait) skin->rotate_req = skin->rotate = on; skin->zoom_factor = 100; if (skin_load_file(skin, file)) { skin_screen_free(skin); skin = NULL; } else { //printf("skin_config.c: skin_load_configuration, skin loaded '%s'\n", file); skin->path = (char *)qemu_malloc(strlen(file) + 1); strcpy(skin->path, file); } return skin; }
void cs_init(target_phys_addr_t base, int irq, void *intctl) { int cs_io_memory; CSState *s; s = qemu_mallocz(sizeof(CSState)); if (!s) return; cs_io_memory = cpu_register_io_memory(0, cs_mem_read, cs_mem_write, s); cpu_register_physical_memory(base, CS_MAXADDR, cs_io_memory); register_savevm("cs4231", base, 1, cs_save, cs_load, s); qemu_register_reset(cs_reset, s); cs_reset(s); }
/* initialize the trace device */ void trace_dev_init() { trace_dev_state *s; s = (trace_dev_state *)qemu_mallocz(sizeof(trace_dev_state)); s->dev.name = "qemu_trace"; s->dev.id = -1; s->dev.base = 0; // will be allocated dynamically s->dev.size = 0x2000; s->dev.irq = 0; s->dev.irq_count = 0; goldfish_device_add(&s->dev, trace_dev_readfn, trace_dev_writefn, s); exec_path[0] = exec_arg[0] = '\0'; }
/* If fd is zero, it means that the parallel device uses the console */ ParallelState *parallel_mm_init(target_phys_addr_t base, int it_shift, qemu_irq irq, CharDriverState *chr) { ParallelState *s; int io_sw; s = (ParallelState *)qemu_mallocz(sizeof(ParallelState)); if (!s) return NULL; parallel_reset(s, irq, chr); s->base = base; s->it_shift = it_shift; io_sw = cpu_register_io_memory(0, parallel_mm_read_sw, parallel_mm_write_sw, s); cpu_register_physical_memory(base, 8 << it_shift, io_sw); return s; }
int kvm_create_vm(kvm_context_t kvm) { int fd; #ifdef KVM_CAP_IRQ_ROUTING kvm->irq_routes = qemu_mallocz(sizeof(*kvm->irq_routes)); kvm->nr_allocated_irq_routes = 0; #endif fd = kvm_ioctl(kvm_state, KVM_CREATE_VM, 0); if (fd < 0) { fprintf(stderr, "kvm_create_vm: %m\n"); return -1; } kvm_state->vmfd = fd; return 0; }
int vhost_dev_start(struct vhost_dev *hdev, VirtIODevice *vdev) { int i, r; r = vhost_dev_set_features(hdev, hdev->log_enabled); if (r < 0) { goto fail; } r = ioctl(hdev->control, VHOST_SET_MEM_TABLE, hdev->mem); if (r < 0) { r = -errno; goto fail; } if (hdev->log_enabled) { hdev->log_size = vhost_get_log_size(hdev); hdev->log = hdev->log_size ? qemu_mallocz(hdev->log_size * sizeof *hdev->log) : NULL; r = ioctl(hdev->control, VHOST_SET_LOG_BASE, (uint64_t)(unsigned long)hdev->log); if (r < 0) { r = -errno; goto fail; } } for (i = 0; i < hdev->nvqs; ++i) { r = vhost_virtqueue_init(hdev, vdev, hdev->vqs + i, i); if (r < 0) { goto fail_vq; } } hdev->started = true; return 0; fail_vq: while (--i >= 0) { vhost_virtqueue_cleanup(hdev, vdev, hdev->vqs + i, i); } fail: return r; }
static CPUTimer* cpu_timer_create(const char* name, CPUState *env, QEMUBHFunc *cb, uint32_t frequency, uint64_t disabled_mask) { CPUTimer *timer = qemu_mallocz(sizeof (CPUTimer)); timer->name = name; timer->frequency = frequency; timer->disabled_mask = disabled_mask; timer->disabled = 1; timer->clock_offset = qemu_get_clock(vm_clock); timer->qtimer = qemu_new_timer(vm_clock, cb, env); return timer; }
static void assigned_dev_ioport_map(PCIDevice *pci_dev, int region_num, uint32_t addr, uint32_t size, int type) { AssignedDevice *r_dev = container_of(pci_dev, AssignedDevice, dev); AssignedDevRegion *region = &r_dev->v_addrs[region_num]; int first_map = (region->e_size == 0); CPUState *env; region->e_physbase = addr; region->e_size = size; DEBUG("e_phys=0x%x r_baseport=%x type=0x%x len=%d region_num=%d \n", addr, region->u.r_baseport, type, size, region_num); if (first_map) { struct ioperm_data *data; data = qemu_mallocz(sizeof(struct ioperm_data)); if (data == NULL) { fprintf(stderr, "%s: Out of memory\n", __func__); exit(1); } data->start_port = region->u.r_baseport; data->num = region->r_size; data->turn_on = 1; kvm_add_ioperm_data(data); for (env = first_cpu; env; env = env->next_cpu) kvm_ioperm(env, data); } register_ioport_read(addr, size, 1, assigned_dev_ioport_readb, (r_dev->v_addrs + region_num)); register_ioport_read(addr, size, 2, assigned_dev_ioport_readw, (r_dev->v_addrs + region_num)); register_ioport_read(addr, size, 4, assigned_dev_ioport_readl, (r_dev->v_addrs + region_num)); register_ioport_write(addr, size, 1, assigned_dev_ioport_writeb, (r_dev->v_addrs + region_num)); register_ioport_write(addr, size, 2, assigned_dev_ioport_writew, (r_dev->v_addrs + region_num)); register_ioport_write(addr, size, 4, assigned_dev_ioport_writel, (r_dev->v_addrs + region_num)); }
void pl022_init(uint32_t base, qemu_irq irq, int (*xfer_cb)(void *, int), void * opaque) { int iomemtype; pl022_state *s; s = (pl022_state *)qemu_mallocz(sizeof(pl022_state)); iomemtype = cpu_register_io_memory(0, pl022_readfn, pl022_writefn, s); cpu_register_physical_memory(base, 0x00001000, iomemtype); s->base = base; s->irq = irq; s->xfer_cb = xfer_cb; s->opaque = opaque; pl022_reset(s); /* ??? Save/restore. */ }
void goldfish_leds_init(uint32_t base) { struct goldfish_leds_state *s; s = (struct goldfish_leds_state *)qemu_mallocz(sizeof(*s)); s->dev.name = "goldfish_leds"; s->dev.id = 0; s->dev.base = base; s->dev.size = 0x1000; s->dev.irq_count = 0; goldfish_device_add(&s->dev, leds_readfn, leds_writefn, s); #ifdef CONFIG_VPMU GlobalVPMU.adev_stat_ptr->lcd_brightness = &(s->brightness); #endif }
struct HCIInfo *bt_host_hci(const char *id) { struct bt_host_hci_s *s; int fd = -1; # ifdef CONFIG_BLUEZ int dev_id = hci_devid(id); struct hci_filter flt; if (dev_id < 0) { fprintf(stderr, "qemu: `%s' not available\n", id); return 0; } fd = hci_open_dev(dev_id); /* XXX: can we ensure nobody else has the device opened? */ # endif if (fd < 0) { fprintf(stderr, "qemu: Can't open `%s': %s (%i)\n", id, strerror(errno), errno); return NULL; } # ifdef CONFIG_BLUEZ hci_filter_clear(&flt); hci_filter_all_ptypes(&flt); hci_filter_all_events(&flt); if (setsockopt(fd, SOL_HCI, HCI_FILTER, &flt, sizeof(flt)) < 0) { fprintf(stderr, "qemu: Can't set HCI filter on socket (%i)\n", errno); return 0; } # endif s = qemu_mallocz(sizeof(struct bt_host_hci_s)); s->fd = fd; s->hci.cmd_send = bt_host_cmd; s->hci.sco_send = bt_host_sco; s->hci.acl_send = bt_host_acl; s->hci.bdaddr_set = bt_host_bdaddr_set; qemu_set_fd_handler2(s->fd, bt_host_read_poll, bt_host_read, NULL, s); return &s->hci; }