int vmm_vcpu_irq_deinit(struct vmm_vcpu *vcpu) { /* Sanity Checks */ if (!vcpu) { return VMM_EFAIL; } /* For Orphan VCPU just return */ if (!vcpu->is_normal) { return VMM_OK; } /* Stop wfi_timeout event */ vmm_timer_event_stop(vcpu->irqs.wfi.priv); /* Free wfi_timeout event */ vmm_free(vcpu->irqs.wfi.priv); vcpu->irqs.wfi.priv = NULL; /* Free flags */ vmm_free(vcpu->irqs.irq); vcpu->irqs.irq = NULL; return VMM_OK; }
int vmm_fb_unregister(struct vmm_fb_info *info) { int rc; struct vmm_fb_event event; struct vmm_classdev *cd; if (info == NULL) { return VMM_EFAIL; } if (info->dev == NULL) { return VMM_EFAIL; } cd = vmm_devdrv_find_classdev(VMM_FB_CLASS_NAME, info->dev->node->name); if (!cd) { return VMM_EFAIL; } rc = vmm_devdrv_unregister_classdev(VMM_FB_CLASS_NAME, cd); if (!rc) { vmm_free(cd); } if (info->pixmap.addr && (info->pixmap.flags & FB_PIXMAP_DEFAULT)) { vmm_free(info->pixmap.addr); } vmm_fb_destroy_modelist(&info->modelist); event.info = info; vmm_fb_notifier_call_chain(FB_EVENT_FB_UNREGISTERED, &event); return rc; }
int mempool_destroy(struct mempool *mp) { int rc = VMM_OK; if (!mp) { return VMM_EFAIL; } switch (mp->type) { case MEMPOOL_TYPE_RAW: rc = vmm_host_memunmap(mp->entity_base); break; case MEMPOOL_TYPE_RAM: rc = vmm_host_free_pages(mp->entity_base, mp->d.ram.page_count); break; case MEMPOOL_TYPE_HEAP: vmm_free((void *)mp->entity_base); break; default: return VMM_EINVALID; }; fifo_free(mp->f); vmm_free(mp); return rc; }
int vmm_ringbuf_free(struct vmm_ringbuf *rb) { vmm_free(rb->keys); vmm_free(rb); return VMM_OK; }
static void virtio_blk_req_done(struct virtio_blk_dev_req *req, u8 status) { struct virtio_device *dev = req->bdev->vdev; int queueid = req->vq - req->bdev->vqs; if (req->read_iov && req->len && req->r.data && (status == VIRTIO_BLK_S_OK) && (req->r.type == VMM_REQUEST_READ)) { virtio_buf_to_iovec_write(dev, req->read_iov, req->read_iov_cnt, req->r.data, req->len); } if (req->read_iov) { vmm_free(req->read_iov); req->read_iov = NULL; req->read_iov_cnt = 0; } if (req->r.data) { vmm_free(req->r.data); req->r.data = NULL; } virtio_buf_to_iovec_write(dev, &req->status_iov, 1, &status, 1); virtio_queue_set_used_elem(req->vq, req->head, req->len); if (virtio_queue_should_signal(req->vq)) { dev->tra->notify(dev, queueid); } }
int vmm_host_irqext_dispose_mapping(u32 hirq) { int rc = VMM_OK; irq_flags_t flags; struct vmm_host_irq *irq = NULL; if (hirq < CONFIG_HOST_IRQ_COUNT) { return vmm_host_irq_set_hwirq(hirq, hirq); } vmm_write_lock_irqsave_lite(&iectrl.lock, flags); if (iectrl.count <= (hirq - CONFIG_HOST_IRQ_COUNT)) { rc = VMM_EINVALID; goto done; } irq = iectrl.irqs[hirq - CONFIG_HOST_IRQ_COUNT]; iectrl.irqs[hirq - CONFIG_HOST_IRQ_COUNT] = NULL; if (irq) { if (irq->name) { vmm_free((void *)irq->name); } vmm_free(irq); } done: vmm_write_unlock_irqrestore_lite(&iectrl.lock, flags); return rc; }
static void __blockdev_free(struct vmm_blockdev *bdev, bool free_rq) { if (free_rq) { vmm_free(bdev->rq); } vmm_free(bdev); }
static int ne2k_driver_probe(struct vmm_driver *dev, const struct vmm_devid *devid) { int rc; struct vmm_netdev *ndev; struct nic_priv_data *priv_data; ndev = vmm_malloc(sizeof(struct vmm_netdev)); if(!ndev) { rc = VMM_EFAIL; goto free_nothing; } vmm_memset(ndev,0, sizeof(struct vmm_netdev)); priv_data = vmm_malloc(sizeof(struct nic_priv_data)); if(!priv_data) { rc = VMM_EFAIL; goto free_chardev; } vmm_memset(priv_data,0, sizeof(struct nic_priv_data)); if (ne2k_init(priv_data)) { rc = VMM_EFAIL; goto free_chardev; } priv_data->txrx_thread = vmm_hyperthread_create("ne2k-isa-driver", dp83902a_poll, priv_data); if (priv_data == NULL) { rc = VMM_EFAIL; goto free_chardev; } vmm_hyperthread_run(priv_data->txrx_thread); vmm_strcpy(ndev->name, dev->node->name); ndev->dev = dev; ndev->ioctl = NULL; ndev->read = ne2k_read; ndev->write = ne2k_write; ndev->priv = (void *)priv_data; rc = vmm_netdev_register(ndev); if(rc) { goto free_port; } dev->priv = (void *)ndev; return VMM_OK; free_port: vmm_free(priv_data); free_chardev: vmm_free(ndev); free_nothing: return rc; }
void vmm_fb_release(struct vmm_fb_info *info) { if (!info) return; if (info->apertures) { vmm_free(info->apertures); } vmm_free(info); }
static int iso9660fs_unmount(struct mount *m) { struct iso9660_mount_data *mdata = m->m_data; if (mdata) { if (mdata->root_dir) vmm_free(mdata->root_dir); vmm_free(mdata); } m->m_data = NULL; return VMM_OK; }
static int uart_driver_remove(struct vmm_device *dev) { int rc = VMM_OK; struct vmm_chardev *cd =(struct vmm_chardev*)dev->priv; if (cd) { rc = vmm_chardev_unregister(cd); vmm_free(cd->priv); vmm_free(cd); dev->priv = NULL; } return rc; }
void m_ext_free(struct vmm_mbuf *m) { if (!(--(m->m_extref))) { /* dropping the last reference */ if (m->m_extfree) { (*m->m_extfree)(m, m->m_extbuf, m->m_extlen, m->m_extarg); } else { vmm_free(m->m_extbuf); } } if (!(--(m->m_ref))) { vmm_free(m); } }
int arch_guest_init(struct vmm_guest *guest) { if (!guest->reset_count) { guest->arch_priv = vmm_malloc(sizeof(struct arm_guest_priv)); if (!guest->arch_priv) { return VMM_ENOMEM; } arm_guest_priv(guest)->ttbl = mmu_lpae_ttbl_alloc(TTBL_STAGE2); if (!arm_guest_priv(guest)->ttbl) { vmm_free(guest->arch_priv); guest->arch_priv = NULL; return VMM_ENOMEM; } if (vmm_devtree_read_u32(guest->node, "psci_version", &arm_guest_priv(guest)->psci_version)) { /* By default, assume PSCI v0.1 */ arm_guest_priv(guest)->psci_version = 1; } } return VMM_OK; }
int vmm_rtcdev_register(struct vmm_rtcdev *rdev) { int rc; struct vmm_classdev *cd; if (!(rdev && rdev->set_time && rdev->get_time)) { return VMM_EFAIL; } cd = vmm_zalloc(sizeof(struct vmm_classdev)); if (!cd) { return VMM_EFAIL; } INIT_LIST_HEAD(&cd->head); strcpy(cd->name, rdev->name); cd->dev = rdev->dev; cd->priv = rdev; rc = vmm_devdrv_register_classdev(VMM_RTCDEV_CLASS_NAME, cd); if (rc != VMM_OK) { vmm_free(cd); } return rc; }
void* unpack_elf(void* elf) { struct elf_header* header = elf; struct elf_program_header* ph; /* Ist es ueberhaupt eine ELF-Datei? */ if (header->magic != ELF_MAGIC) { show_cod(0, "No ELF"); return 0; } void* elf_mod_entry = (void*) (header->entry); ph = (struct elf_program_header*) (((char*) header) + header->ph_offset); for (uint32_t n = 0; n < header->ph_entry_count; n++, ph++) { void* dest = (void*) ph->virt_addr; void* src = ((char*) header) + ph->offset; /* Nur Program Header vom Typ LOAD laden */ if (ph->type != 1) { continue; } for (uint32_t offset = 0; offset < ph->mem_size; offset += 0x1000) { vmm_free(dest + offset); vmm_alloc_addr(dest + offset, 0); } memcpy(dest, src, ph->file_size); } return elf_mod_entry; }
struct vmm_ringbuf *vmm_ringbuf_alloc(u32 key_size, u32 key_count) { struct vmm_ringbuf *rb; rb = vmm_malloc(sizeof(struct vmm_ringbuf)); if (!rb) { return NULL; } INIT_SPIN_LOCK(&rb->lock); rb->keys = vmm_malloc(key_size * key_count); if (!rb->keys) { goto rb_init_fail; } rb->key_size = key_size; rb->key_count = key_count; rb->read_pos = 0; rb->write_pos = 0; rb->avail_count = 0; return rb; rb_init_fail: vmm_free(rb); return NULL; }
int netstack_socket_accept(struct netstack_socket *sk, struct netstack_socket **new_sk) { err_t err; struct netconn *newconn; struct netstack_socket *tsk; if (!sk || !sk->priv || !new_sk) { return VMM_EINVALID; } tsk = vmm_zalloc(sizeof(struct netstack_socket)); if (!tsk) { return VMM_ENOMEM; } memcpy(tsk, sk, sizeof(struct netstack_socket)); tsk->priv = NULL; err = netconn_accept(sk->priv, &newconn); if (err != ERR_OK) { vmm_free(tsk); return VMM_EFAIL; } tsk->priv = newconn; *new_sk = tsk; return VMM_OK; }
int arch_guest_init(struct vmm_guest *guest) { struct riscv_guest_priv *priv; if (!guest->reset_count) { guest->arch_priv = vmm_malloc(sizeof(struct riscv_guest_priv)); if (!guest->arch_priv) { return VMM_ENOMEM; } priv = riscv_guest_priv(guest); priv->time_offset = vmm_manager_guest_reset_timestamp(guest); priv->time_offset = priv->time_offset * vmm_timer_clocksource_frequency(); priv->time_offset = udiv64(priv->time_offset, 1000000000ULL); priv->pgtbl = cpu_mmu_pgtbl_alloc(PGTBL_STAGE2); if (!priv->pgtbl) { vmm_free(guest->arch_priv); guest->arch_priv = NULL; return VMM_ENOMEM; } } return VMM_OK; }
static int __init vmm_blockdev_init(void) { int rc; struct vmm_class *c; vmm_printf("Initialize Block Device Framework\n"); c = vmm_malloc(sizeof(struct vmm_class)); if (!c) { rc = VMM_ENOMEM; goto fail; } INIT_LIST_HEAD(&c->head); if (strlcpy(c->name, VMM_BLOCKDEV_CLASS_NAME, sizeof(c->name)) >= sizeof(c->name)) { rc = VMM_EOVERFLOW; goto free_class; } INIT_LIST_HEAD(&c->classdev_list); rc = vmm_devdrv_register_class(c); if (rc) { goto free_class; } return VMM_OK; free_class: vmm_free(c); fail: return rc; }
static int virtio_pci_bar_probe(struct vmm_guest *guest, struct vmm_emudev *edev, const struct vmm_devtree_nodeid *eid) { int rc = VMM_OK; struct virtio_pci_dev *vdev; vdev = vmm_zalloc(sizeof(struct virtio_pci_dev)); if (!vdev) { rc = VMM_ENOMEM; goto virtio_pci_probe_done; } vdev->guest = guest; vmm_snprintf(vdev->dev.name, VMM_VIRTIO_DEVICE_MAX_NAME_LEN, "%s/%s", guest->name, edev->node->name); vdev->dev.edev = edev; vdev->dev.tra = &pci_tra; vdev->dev.tra_data = vdev; vdev->dev.guest = guest; vdev->config = (struct vmm_virtio_pci_config) { .queue_num = 256, }; rc = vmm_devtree_read_u32(edev->node, "virtio_type", &vdev->dev.id.type); if (rc) { goto virtio_pci_probe_freestate_fail; } rc = vmm_devtree_read_u32_atindex(edev->node, VMM_DEVTREE_INTERRUPTS_ATTR_NAME, &vdev->irq, 0); if (rc) { goto virtio_pci_probe_freestate_fail; } if ((rc = vmm_virtio_register_device(&vdev->dev))) { goto virtio_pci_probe_freestate_fail; } edev->priv = vdev; goto virtio_pci_probe_done; virtio_pci_probe_freestate_fail: vmm_free(vdev); virtio_pci_probe_done: return rc; } static struct vmm_devtree_nodeid virtio_pci_emuid_table[] = { { .type = "virtio", .compatible = "virtio,pci", }, { /* end of list */ }, };
static struct vmm_blockdev *__blockdev_alloc(bool alloc_rq) { struct vmm_blockdev *bdev; bdev = vmm_zalloc(sizeof(struct vmm_blockdev)); if (!bdev) { return NULL; } INIT_LIST_HEAD(&bdev->head); INIT_MUTEX(&bdev->child_lock); bdev->child_count = 0; INIT_LIST_HEAD(&bdev->child_list); if (alloc_rq) { bdev->rq = vmm_zalloc(sizeof(struct vmm_request_queue)); if (!bdev->rq) { vmm_free(bdev); return NULL; } INIT_SPIN_LOCK(&bdev->rq->lock); } else { bdev->rq = NULL; } return bdev; }
static int _irqext_expand(void) { unsigned int old_size = iectrl.count; unsigned int new_size = iectrl.count + HOST_IRQEXT_CHUNK; struct vmm_host_irq **irqs = NULL; unsigned long *bitmap = NULL; irqs = realloc(iectrl.irqs, old_size * sizeof (struct vmm_host_irq *), new_size * sizeof (struct vmm_host_irq *)); if (!irqs) { vmm_printf("%s: Failed to reallocate extended IRQ array from " "%d to %d bytes\n", __func__, old_size, new_size); return VMM_ENOMEM; } old_size = BITMAP_SIZE(old_size); new_size = BITMAP_SIZE(new_size); bitmap = realloc(iectrl.bitmap, old_size, new_size); if (!bitmap) { vmm_printf("%s: Failed to reallocate extended IRQ bitmap from " "%d to %d bytes\n", __func__, old_size, new_size); vmm_free(irqs); return VMM_ENOMEM; } iectrl.irqs = irqs; iectrl.bitmap = bitmap; iectrl.count += HOST_IRQEXT_CHUNK; return VMM_OK; }
int vmm_chardev_register(struct vmm_chardev * cdev) { int rc; struct vmm_classdev *cd; if (!(cdev && cdev->read && cdev->write)) { return VMM_EFAIL; } cd = vmm_malloc(sizeof(struct vmm_classdev)); if (!cd) { return VMM_EFAIL; } vmm_memset(cd, 0, sizeof(struct vmm_classdev)); INIT_LIST_HEAD(&cd->head); vmm_strcpy(cd->name, cdev->name); cd->dev = cdev->dev; cd->priv = cdev; rc = vmm_devdrv_register_classdev(VMM_CHARDEV_CLASS_NAME, cd); if (rc != VMM_OK) { vmm_free(cd); } return rc; }
static void virtio_blk_disconnect(struct virtio_device *dev) { struct virtio_blk_dev *bdev = dev->emu_data; vmm_blockdev_unregister_client(&bdev->blk_client); vmm_free(bdev); }
int vmm_blockdev_register(struct vmm_blockdev * bdev) { int rc; struct vmm_classdev *cd; if (bdev == NULL) { return VMM_EFAIL; } if (bdev->readblk == NULL || bdev->writeblk == NULL) { return VMM_EFAIL; } cd = vmm_malloc(sizeof(struct vmm_classdev)); if (!cd) { return VMM_EFAIL; } INIT_LIST_HEAD(&cd->head); strcpy(cd->name, bdev->name); cd->dev = bdev->dev; cd->priv = bdev; rc = vmm_devdrv_register_classdev(VMM_BLOCKDEV_CLASS_NAME, cd); if (rc) { cd->dev = NULL; cd->priv = NULL; vmm_free(cd); return rc; } return VMM_OK; }
int vmm_netport_init(void) { int rc; struct vmm_class *c; vmm_printf("Initialize Network Port Framework\n"); c = vmm_malloc(sizeof(struct vmm_class)); if (!c) return VMM_EFAIL; INIT_LIST_HEAD(&c->head); strcpy(c->name, VMM_NETPORT_CLASS_NAME); INIT_LIST_HEAD(&c->classdev_list); rc = vmm_devdrv_register_class(c); if (rc) { vmm_printf("Failed to register %s class\n", VMM_NETPORT_CLASS_NAME); vmm_free(c); return rc; } return VMM_OK; }
void rbd_destroy(struct rbd *d) { vmm_host_ram_reserve(d->addr, d->size); vmm_blockdev_unregister(d->bdev); vmm_blockdev_free(d->bdev); vmm_free(d); }
/** * clk_register_fixed_rate - register fixed-rate clock with the clock framework * @dev: device that is registering this clock * @name: name of this clock * @parent_name: name of clock's parent * @flags: framework-specific flags * @fixed_rate: non-adjustable clock rate */ struct clk *clk_register_fixed_rate(struct vmm_device *dev, const char *name, const char *parent_name, unsigned long flags, unsigned long fixed_rate) { struct clk_fixed_rate *fixed; struct clk *clk; struct clk_init_data init; /* allocate fixed-rate clock */ fixed = vmm_zalloc(sizeof(struct clk_fixed_rate)); if (!fixed) { vmm_printf("%s: could not allocate fixed clk\n", __func__); return NULL; } init.name = name; init.ops = &clk_fixed_rate_ops; init.flags = flags | CLK_IS_BASIC; init.parent_names = (parent_name ? &parent_name: NULL); init.num_parents = (parent_name ? 1 : 0); /* struct clk_fixed_rate assignments */ fixed->fixed_rate = fixed_rate; fixed->hw.init = &init; /* register the clock */ clk = clk_register(dev, &fixed->hw); if (!clk) vmm_free(fixed); return clk; }
int vmm_netport_unregister(struct vmm_netport *port) { int rc; struct vmm_classdev *cd; if (!port) { return VMM_EFAIL; } rc = vmm_netswitch_port_remove(port); if (rc) { return rc; } cd = vmm_devdrv_find_classdev(VMM_NETPORT_CLASS_NAME, port->name); if (!cd) { return VMM_EFAIL; } rc = vmm_devdrv_unregister_classdev(VMM_NETPORT_CLASS_NAME, cd); if (!rc) { vmm_free(cd); } return rc; }
struct netstack_socket *netstack_socket_alloc(enum netstack_socket_type type) { struct netstack_socket *sk; struct netconn *conn; sk = vmm_zalloc(sizeof(struct netstack_socket)); if (!sk) { return NULL; } switch (type) { case NETSTACK_SOCKET_TCP: conn = netconn_new(NETCONN_TCP); break; case NETSTACK_SOCKET_UDP: conn = netconn_new(NETCONN_UDP); break; default: conn = NULL; break; }; if (!conn) { vmm_free(sk); return NULL; } sk->priv = conn; return sk; }