Beispiel #1
0
struct vmm_ringbuf *vmm_ringbuf_alloc(u32 key_size, u32 key_count)
{
	struct vmm_ringbuf *rb;

	rb = vmm_malloc(sizeof(struct vmm_ringbuf));
	if (!rb) {
		return NULL;
	}

	INIT_SPIN_LOCK(&rb->lock);
	rb->keys = vmm_malloc(key_size * key_count);
	if (!rb->keys) {
		goto rb_init_fail;
	}
	rb->key_size = key_size;
	rb->key_count = key_count;
	rb->read_pos = 0;
	rb->write_pos = 0;
	rb->avail_count = 0;

	return rb;

rb_init_fail:
	vmm_free(rb);
	return NULL;
}
Beispiel #2
0
void libfdt_node_parse_recursive(vmm_devtree_node_t * node,
				 char **data_ptr, char *str_buf)
{
	vmm_devtree_node_t *child;
	vmm_devtree_attr_t *attr;

	if (LIBFDT_DATA32(*data_ptr) != FDT_BEGIN_NODE)
		return;

	*data_ptr += sizeof(u32);

	node->name = vmm_malloc(vmm_strlen(*data_ptr) + 1);
	vmm_strcpy(node->name, *data_ptr);
	node->type = VMM_DEVTREE_NODETYPE_UNKNOWN;
	node->priv = NULL;
	*data_ptr += vmm_strlen(*data_ptr) + 1;
	while ((u32) (*data_ptr) % sizeof(u32) != 0)
		(*data_ptr)++;

	while (LIBFDT_DATA32(*data_ptr) != FDT_END_NODE) {
		switch (LIBFDT_DATA32(*data_ptr)) {
		case FDT_PROP:
			*data_ptr += sizeof(u32);
			attr = vmm_malloc(sizeof(vmm_devtree_attr_t));
			INIT_LIST_HEAD(&attr->head);
			attr->len = LIBFDT_DATA32(*data_ptr);
			*data_ptr += sizeof(u32);
			attr->name = &str_buf[LIBFDT_DATA32(*data_ptr)];
			*data_ptr += sizeof(u32);
			attr->value = vmm_malloc(attr->len);
			vmm_memcpy(attr->value, *data_ptr, attr->len);
			*data_ptr += attr->len;
			while ((u32) (*data_ptr) % sizeof(u32) != 0)
				(*data_ptr)++;
			list_add_tail(&node->attr_list, &attr->head);
			break;
		case FDT_NOP:
			*data_ptr += sizeof(u32);
			break;
		case FDT_BEGIN_NODE:
			child = vmm_malloc(sizeof(vmm_devtree_node_t));
			INIT_LIST_HEAD(&child->head);
			INIT_LIST_HEAD(&child->attr_list);
			INIT_LIST_HEAD(&child->child_list);
			child->parent = node;
			libfdt_node_parse_recursive(child, data_ptr, str_buf);
			list_add_tail(&node->child_list, &child->head);
			break;
		default:
			return;
			break;
		};
	}

	*data_ptr += sizeof(u32);

	return;
}
Beispiel #3
0
static int ne2k_driver_probe(struct vmm_driver *dev, const struct vmm_devid *devid)
{
	int rc;
	struct vmm_netdev *ndev;
	struct nic_priv_data *priv_data;
	
	ndev = vmm_malloc(sizeof(struct vmm_netdev));
	if(!ndev) {
		rc = VMM_EFAIL;
		goto free_nothing;
	}
	vmm_memset(ndev,0, sizeof(struct vmm_netdev));

	priv_data = vmm_malloc(sizeof(struct nic_priv_data));
	if(!priv_data) {
		rc = VMM_EFAIL;
		goto free_chardev;
	}
	vmm_memset(priv_data,0, sizeof(struct nic_priv_data));

	if (ne2k_init(priv_data)) {
		rc = VMM_EFAIL;
		goto free_chardev;
	}

	priv_data->txrx_thread = vmm_hyperthread_create("ne2k-isa-driver", dp83902a_poll, priv_data);

	if (priv_data == NULL) {
		rc = VMM_EFAIL;
		goto free_chardev;
	}

	vmm_hyperthread_run(priv_data->txrx_thread);

	vmm_strcpy(ndev->name, dev->node->name);
	ndev->dev = dev;
	ndev->ioctl = NULL;
	ndev->read = ne2k_read;
	ndev->write = ne2k_write;
	ndev->priv = (void *)priv_data;

	rc = vmm_netdev_register(ndev);
	if(rc) {
		goto free_port;
	}

	dev->priv = (void *)ndev;

	return VMM_OK;

free_port:
	vmm_free(priv_data);
free_chardev:
	vmm_free(ndev);
free_nothing:
	return rc;
}
Beispiel #4
0
int arch_guest_init(struct vmm_guest *guest)
{
	struct riscv_guest_priv *priv;

	if (!guest->reset_count) {
		guest->arch_priv = vmm_malloc(sizeof(struct riscv_guest_priv));
		if (!guest->arch_priv) {
			return VMM_ENOMEM;
		}
		priv = riscv_guest_priv(guest);

		priv->time_offset = vmm_manager_guest_reset_timestamp(guest);
		priv->time_offset =
			priv->time_offset * vmm_timer_clocksource_frequency();
		priv->time_offset = udiv64(priv->time_offset, 1000000000ULL);

		priv->pgtbl = cpu_mmu_pgtbl_alloc(PGTBL_STAGE2);
		if (!priv->pgtbl) {
			vmm_free(guest->arch_priv);
			guest->arch_priv = NULL;
			return VMM_ENOMEM;
		}
	}

	return VMM_OK;
}
Beispiel #5
0
int vmm_netport_init(void)
{
	int rc;
	struct vmm_class *c;

	vmm_printf("Initialize Network Port Framework\n");

	c = vmm_malloc(sizeof(struct vmm_class));
	if (!c)
		return VMM_EFAIL;

	INIT_LIST_HEAD(&c->head);
	strcpy(c->name, VMM_NETPORT_CLASS_NAME);
	INIT_LIST_HEAD(&c->classdev_list);

	rc = vmm_devdrv_register_class(c);
	if (rc) {
		vmm_printf("Failed to register %s class\n",
			VMM_NETPORT_CLASS_NAME);
		vmm_free(c);
		return rc;
	}

	return VMM_OK;
}
Beispiel #6
0
int vmm_netdev_register(struct vmm_netdev * ndev)
{
	struct vmm_classdev *cd;

	if (ndev == NULL) {
		return VMM_EFAIL;
	}
	if (ndev->read == NULL || ndev->write == NULL) {
		return VMM_EFAIL;
	}

	cd = vmm_malloc(sizeof(struct vmm_classdev));
	if (!cd) {
		return VMM_EFAIL;
	}

	INIT_LIST_HEAD(&cd->head);
	vmm_strcpy(cd->name, ndev->name);
	cd->dev = ndev->dev;
	cd->priv = ndev;

	vmm_devdrv_register_classdev(VMM_NETDEV_CLASS_NAME, cd);

	return VMM_OK;
}
int arch_guest_init(struct vmm_guest *guest)
{
	if (!guest->reset_count) {
		guest->arch_priv = vmm_malloc(sizeof(struct arm_guest_priv));
		if (!guest->arch_priv) {
			return VMM_ENOMEM;
		}

		arm_guest_priv(guest)->ttbl = mmu_lpae_ttbl_alloc(TTBL_STAGE2);
		if (!arm_guest_priv(guest)->ttbl) {
			vmm_free(guest->arch_priv);
			guest->arch_priv = NULL;
			return VMM_ENOMEM;
		}

		if (vmm_devtree_read_u32(guest->node,
				"psci_version",
				&arm_guest_priv(guest)->psci_version)) {
			/* By default, assume PSCI v0.1 */
			arm_guest_priv(guest)->psci_version = 1;
		}
	}

	return VMM_OK;
}
Beispiel #8
0
static int __init vmm_blockdev_init(void)
{
	int rc;
	struct vmm_class *c;

	vmm_printf("Initialize Block Device Framework\n");

	c = vmm_malloc(sizeof(struct vmm_class));
	if (!c) {
		rc = VMM_ENOMEM;
		goto fail;
	}

	INIT_LIST_HEAD(&c->head);

	if (strlcpy(c->name, VMM_BLOCKDEV_CLASS_NAME, sizeof(c->name)) >=
	    sizeof(c->name)) {
		rc = VMM_EOVERFLOW;
		goto free_class;
	}

	INIT_LIST_HEAD(&c->classdev_list);

	rc = vmm_devdrv_register_class(c);
	if (rc) {
		goto free_class;
	}

	return VMM_OK;

free_class:
	vmm_free(c);
fail:
	return rc;
}
Beispiel #9
0
int vmm_blockdev_register(struct vmm_blockdev * bdev)
{
	int rc;
	struct vmm_classdev *cd;

	if (bdev == NULL) {
		return VMM_EFAIL;
	}
	if (bdev->readblk == NULL || bdev->writeblk == NULL) {
		return VMM_EFAIL;
	}

	cd = vmm_malloc(sizeof(struct vmm_classdev));
	if (!cd) {
		return VMM_EFAIL;
	}

	INIT_LIST_HEAD(&cd->head);
	strcpy(cd->name, bdev->name);
	cd->dev = bdev->dev;
	cd->priv = bdev;

	rc = vmm_devdrv_register_classdev(VMM_BLOCKDEV_CLASS_NAME, cd);
	if (rc) {
		cd->dev = NULL;
		cd->priv = NULL;
		vmm_free(cd);
		return rc;
	}

	return VMM_OK;
}
Beispiel #10
0
int vmm_chardev_register(struct vmm_chardev * cdev)
{
	int rc;
	struct vmm_classdev *cd;

	if (!(cdev && cdev->read && cdev->write)) {
		return VMM_EFAIL;
	}

	cd = vmm_malloc(sizeof(struct vmm_classdev));
	if (!cd) {
		return VMM_EFAIL;
	}

	vmm_memset(cd, 0, sizeof(struct vmm_classdev));

	INIT_LIST_HEAD(&cd->head);
	vmm_strcpy(cd->name, cdev->name);
	cd->dev = cdev->dev;
	cd->priv = cdev;

	rc = vmm_devdrv_register_classdev(VMM_CHARDEV_CLASS_NAME, cd);
	if (rc != VMM_OK) {
		vmm_free(cd);
	}

	return rc;
}
Beispiel #11
0
void *m_ext_get(struct vmm_mbuf *m, u32 size, enum vmm_mbuf_alloc_types how)
{
	void *buf;
	u32 slab;
	struct mempool *mp = NULL;

	if (VMM_MBUF_ALLOC_DMA == how) {
		buf = vmm_dma_malloc(size);
		if (!buf) {
			return NULL;
		}
		m->m_flags |= M_EXT_DMA;
		MEXTADD(m, buf, size, ext_dma_free, NULL);
	} else {
		for (slab = 0; slab < EPOOL_SLAB_COUNT; slab++) {
			if (size <= epool_slab_buf_size(slab)) {
				mp = mbpctrl.epool_slabs[slab];
				break;
			}
		}

		if (mp && (buf = mempool_malloc(mp))) {
			m->m_flags |= M_EXT_POOL;
			MEXTADD(m, buf, size, ext_pool_free, mp);
		} else if ((buf = vmm_malloc(size))) {
			m->m_flags |= M_EXT_HEAP;
			MEXTADD(m, buf, size, ext_heap_free, NULL);
		} else {
			return NULL;
		}
	}

	return m->m_extbuf;
}
Beispiel #12
0
int vmm_libfdt_parse(virtual_addr_t fdt_addr,
		     vmm_devtree_node_t ** root,
		     char **string_buffer, size_t * string_buffer_size)
{
	virtual_addr_t data_addr, str_addr;
	size_t data_size, str_size;
	char *data_ptr;
	struct vmm_fdt_header *header;

	header = (struct vmm_fdt_header *)fdt_addr;

	/* Check magic number of header for sainity */
	if (header->magic != FDT_MAGIC) {
		return VMM_EFAIL;
	}

	/* Compute data location & size */
	data_addr = fdt_addr;
	data_addr += sizeof(vmm_fdt_header_t);
	data_addr += sizeof(vmm_fdt_reserve_entry_t);
	data_size = header->size_dt_struct;

	/* Compute strings location & size */
	str_addr = data_addr + data_size;
	str_size = header->size_dt_strings;

	/* Allocate string buffer */
	*string_buffer = vmm_malloc(str_size);
	vmm_memcpy(*string_buffer, (char *)str_addr, str_size);
	*string_buffer_size = str_size;

	/* Setup root node */
	*root = vmm_malloc(sizeof(vmm_devtree_node_t));
	INIT_LIST_HEAD(&(*root)->head);
	INIT_LIST_HEAD(&(*root)->attr_list);
	INIT_LIST_HEAD(&(*root)->child_list);
	(*root)->name = NULL;
	(*root)->type = VMM_DEVTREE_NODETYPE_UNKNOWN;
	(*root)->priv = NULL;
	(*root)->parent = NULL;

	/* Parse FDT recursively */
	data_ptr = (char *)data_addr;
	libfdt_node_parse_recursive(*root, &data_ptr, *string_buffer);

	return VMM_OK;
}
Beispiel #13
0
static int heap_init(struct vmm_heap_control *heap,
		     bool is_normal, const u32 size_kb, u32 mem_flags)
{
	int rc = VMM_OK;

	memset(heap, 0, sizeof(*heap));

	heap->heap_size = size_kb * 1024;
	heap->heap_start = (void *)vmm_host_alloc_pages(
					VMM_SIZE_TO_PAGE(heap->heap_size),
					mem_flags);
	if (!heap->heap_start) {
		return VMM_ENOMEM;
	}

	rc = vmm_host_va2pa((virtual_addr_t)heap->heap_start,
			    &heap->heap_start_pa);
	if (rc) {
		goto fail_free_pages;
	}

	/* 12.5 percent for house-keeping */
	heap->hk_size = (heap->heap_size) / 8;

	/* Always have book keeping area for
	 * non-normal heaps in normal heap
	 */
	if (is_normal) {
		heap->hk_start = heap->heap_start;
		heap->mem_start = heap->heap_start + heap->hk_size;
		heap->mem_size = heap->heap_size - heap->hk_size;
	} else {
		heap->hk_start = vmm_malloc(heap->hk_size);
		if (!heap->hk_start) {
			rc = VMM_ENOMEM;
			goto fail_free_pages;
		}
		heap->mem_start = heap->heap_start;
		heap->mem_size = heap->heap_size;
	}

	rc = buddy_allocator_init(&heap->ba,
			  heap->hk_start, heap->hk_size,
			  (unsigned long)heap->mem_start, heap->mem_size,
			  HEAP_MIN_BIN, HEAP_MAX_BIN);
	if (rc) {
		goto fail_free_pages;
	}

	return VMM_OK;

fail_free_pages:
	vmm_host_free_pages((virtual_addr_t)heap->heap_start,
			    VMM_SIZE_TO_PAGE(heap->heap_size));
	return rc;
}
Beispiel #14
0
void *vmm_zalloc(virtual_size_t size)
{
	void *ret = vmm_malloc(size);

	if (ret) {
		memset(ret, 0, size);
	}

	return ret;
}
Beispiel #15
0
struct _VMCS_OBJECT * vmcs_act_create(GUEST_CPU_HANDLE gcpu)
{
    struct _VMCS_ACTUAL_OBJECT *p_vmcs;

#ifdef JLMDEBUG
    bprint("vmcs_act_create\n");
#endif
    p_vmcs = vmm_malloc(sizeof(*p_vmcs));
    if (NULL == p_vmcs) {
        VMM_LOG(mask_anonymous, level_trace,"[vmcs] %s: Allocation failed\n", __FUNCTION__);
        return NULL;
    }
    p_vmcs->cache = cache64_create(VMCS_FIELD_COUNT);
    if (NULL == p_vmcs->cache) {
        vmm_mfree(p_vmcs);
        VMM_LOG(mask_anonymous, level_trace,"[vmcs] %s: Allocation failed\n", __FUNCTION__);
        return NULL;
    }
#ifdef JLMDEBUG
    bprint("about to set vmcs entries in vmcs create\n");
#endif
    p_vmcs->hva = vmcs_hw_allocate_region(&p_vmcs->hpa);    // validate it's ok TBD
    p_vmcs->flags|= NEVER_ACTIVATED_FLAG;
    p_vmcs->owning_host_cpu = CPU_NEVER_USED;
    p_vmcs->gcpu_owner = gcpu;
    p_vmcs->vmcs_base->vmcs_read = vmcs_act_read;
    p_vmcs->vmcs_base->vmcs_write = vmcs_act_write;
    p_vmcs->vmcs_base->vmcs_flush_to_cpu = vmcs_act_flush_to_cpu;
    p_vmcs->vmcs_base->vmcs_flush_to_memory = vmcs_act_flush_to_memory;
    p_vmcs->vmcs_base->vmcs_is_dirty = vmcs_act_is_dirty;
    p_vmcs->vmcs_base->vmcs_get_owner = vmcs_act_get_owner;
    p_vmcs->vmcs_base->vmcs_destroy = vmcs_act_destroy;
    p_vmcs->vmcs_base->vmcs_add_msr_to_vmexit_store_list = 
        vmcs_act_add_msr_to_vmexit_store_list;
    p_vmcs->vmcs_base->vmcs_add_msr_to_vmexit_load_list = 
        vmcs_act_add_msr_to_vmexit_load_list;
    p_vmcs->vmcs_base->vmcs_add_msr_to_vmenter_load_list = 
        vmcs_act_add_msr_to_vmenter_load_list;
    p_vmcs->vmcs_base->vmcs_add_msr_to_vmexit_store_and_vmenter_load_list  = 
        vmcs_act_add_msr_to_vmexit_store_and_vmenter_load_lists;
    p_vmcs->vmcs_base->vmcs_delete_msr_from_vmexit_store_list = 
        vmcs_act_delete_msr_from_vmexit_store_list;
    p_vmcs->vmcs_base->vmcs_delete_msr_from_vmexit_load_list = 
        vmcs_act_delete_msr_from_vmexit_load_list;
    p_vmcs->vmcs_base->vmcs_delete_msr_from_vmenter_load_list = 
        vmcs_act_delete_msr_from_vmenter_load_list;
    p_vmcs->vmcs_base->vmcs_delete_msr_from_vmexit_store_and_vmenter_load_list  = 
        vmcs_act_delete_msr_from_vmexit_store_and_vmenter_load_lists;
    p_vmcs->vmcs_base->level = VMCS_MERGED;
    p_vmcs->vmcs_base->skip_access_checking = FALSE;
    p_vmcs->vmcs_base->signature = VMCS_SIGNATURE;
    vmcs_init_all_msr_lists(p_vmcs->vmcs_base);
    return p_vmcs->vmcs_base;
}
Beispiel #16
0
static int __init cmd_profile_init(void)
{
	count_array =
	    vmm_malloc(sizeof(struct count_record) * kallsyms_num_syms);

	if (count_array == NULL) {
		return VMM_EFAIL;
	}

	return vmm_cmdmgr_register_cmd(&cmd_profile);
}
Beispiel #17
0
VMDB_THREAD_CONTEXT * vmdb_thread_context_create(void)
    {
    VMDB_THREAD_CONTEXT *vmdb = vmm_malloc(sizeof(*vmdb));

    if (NULL != vmdb) {
        vmdb->dr7 = DR7_MUST_ONE_BITS;
        }
    else {
        VMDB_LOG(level_error,"[vmdb] %s failed due to memory lack %d\n", __FUNCTION__);
        }

    return vmdb;
    }
Beispiel #18
0
static void vmexit_cpuid_filter_install( GUEST_HANDLE guest,
                    ADDRESS  cpuid, CPUID_FILTER_HANDLER handler)
{
    LIST_ELEMENT            *filter_desc_list = guest_get_cpuid_list(guest);
    CPUID_FILTER_DESCRIPTOR *p_filter_desc = vmm_malloc(sizeof(*p_filter_desc));

    VMM_ASSERT(NULL != p_filter_desc);
    if (NULL != p_filter_desc) {
        p_filter_desc->cpuid   = cpuid;
        p_filter_desc->handler = handler;
        list_add(filter_desc_list, &p_filter_desc->list);
    }
}
Beispiel #19
0
int fb_alloc_cmap(struct fb_cmap *cmap, int len, int transp)
{
	int size = len * sizeof(u16);
	int ret = VMM_ENOMEM;

	if (cmap->len != len) {
		fb_dealloc_cmap(cmap);
		if (!len)
			return 0;

		cmap->red = vmm_malloc(size);
		if (!cmap->red)
			goto fail;
		cmap->green = vmm_malloc(size);
		if (!cmap->green)
			goto fail;
		cmap->blue = vmm_malloc(size);
		if (!cmap->blue)
			goto fail;
		if (transp) {
			cmap->transp = vmm_malloc(size);
			if (!cmap->transp)
				goto fail;
		} else {
			cmap->transp = NULL;
		}
	}
	cmap->start = 0;
	cmap->len = len;
	ret = fb_copy_cmap(fb_default_cmap(len), cmap);
	if (ret)
		goto fail;
	return 0;

fail:
	fb_dealloc_cmap(cmap);
	return ret;
}
Beispiel #20
0
int vmm_blockdev_register(struct vmm_blockdev *bdev)
{
	int rc;
	struct vmm_classdev *cd;
	struct vmm_blockdev_event event;

	if (!bdev) {
		return VMM_EFAIL;
	}

	if (!(bdev->flags & VMM_BLOCKDEV_RDONLY) &&
	    !(bdev->flags & VMM_BLOCKDEV_RW)) {
		return VMM_EINVALID;
	}

	cd = vmm_malloc(sizeof(struct vmm_classdev));
	if (!cd) {
		rc = VMM_ENOMEM;
		goto fail;
	}

	INIT_LIST_HEAD(&cd->head);
	if (strlcpy(cd->name, bdev->name, sizeof(cd->name)) >=
	    sizeof(cd->name)) {
		rc = VMM_EOVERFLOW;
		goto free_classdev;
	}
	cd->dev = bdev->dev;
	cd->priv = bdev;

	rc = vmm_devdrv_register_classdev(VMM_BLOCKDEV_CLASS_NAME, cd);
	if (rc) {
		goto free_classdev;
	}

	/* Broadcast register event */
	event.bdev = bdev;
	event.data = NULL;
	vmm_blocking_notifier_call(&bdev_notifier_chain, 
				   VMM_BLOCKDEV_EVENT_REGISTER, 
				   &event);

	return VMM_OK;

free_classdev:
	vmm_free(cd);
fail:
	return rc;
}
BOOLEAN gpci_guest_initialize(GUEST_ID guest_id)
{
    GUEST_PCI_DEVICES *gpci = NULL;

    gpci = (GUEST_PCI_DEVICES *)vmm_memory_alloc(sizeof(GUEST_PCI_DEVICES));
    VMM_ASSERT(gpci);
    if(gpci == NULL) {
        return FALSE;
    }
    gpci->guest_id = guest_id;
    list_add(guest_pci_devices, gpci->guests);
    gpci->gcpu_pci_access_address = (PCI_CONFIG_ADDRESS *) vmm_malloc(guest_gcpu_count(guest_handle(guest_id)) * sizeof(PCI_CONFIG_ADDRESS));
    VMM_ASSERT(gpci->gcpu_pci_access_address);
    apply_default_device_assignment(guest_id);
    return TRUE;
}
Beispiel #22
0
int __init vmm_profiler_init(void)
{
	pctrl.stat =
	    vmm_malloc(sizeof(struct vmm_profiler_stat) * kallsyms_num_syms);

	if (pctrl.stat == NULL) {
		return VMM_EFAIL;
	}

	vmm_memset(pctrl.stat, 0, sizeof(struct vmm_profiler_stat) *
		   kallsyms_num_syms);

	INIT_SPIN_LOCK(&pctrl.lock);

	return VMM_OK;
}
Beispiel #23
0
static int cmd_flash_write(struct vmm_chardev *cdev,
			   int  argc,
			   char **argv)
{
	int err = VMM_OK;
	int idx = 0;
	size_t retlen = 0;
	flash_op op;
	u_char *buf = NULL;

	if (VMM_OK != (err = flash_args_common(cdev, argc, argv, &op))) {
		return err;
	}

	vmm_cprintf(cdev, "Before writing, the %s block at 0x%08X must have "
		    "been erased?\n", op.mtd->name,
		    op.offset & ~op.mtd->erasesize_mask);
	if (!flash_question(cdev)) {
		vmm_cprintf(cdev, "Exiting...\n");
		return VMM_OK;
	}

	if (argc - 4 <= 0) {
		vmm_cprintf(cdev, "Nothing to write, exiting\n");
		return VMM_OK;
	}

	if (NULL == (buf = vmm_malloc(argc - 5))) {
		return VMM_ENOMEM;
	}

	for (idx = 0; idx < argc - 4; ++idx) {
		buf[idx] = strtoull(argv[idx + 4], NULL, 16);
		vmm_cprintf(cdev, "Writing at 0x%08X 0x%02X\n",
			    op.offset + idx, buf[idx]);
	}
	if (0 != mtd_write(op.mtd, op.offset, argc - 4, &retlen, buf)) {
		vmm_cprintf(cdev, "Failed to write %s at 0x%08X\n",
			    op.mtd->name, op.offset);
	}
	vmm_free(buf);

	return err;
}
Beispiel #24
0
int vmm_netport_register(struct vmm_netport *port)
{
	struct vmm_classdev *cd;
	int rc;

	if (port == NULL)
		return VMM_EFAIL;

	/* If port has invalid mac, assign a random one */
	if (!is_valid_ether_addr(port->macaddr)) {
		random_ether_addr(port->macaddr);
	}

	cd = vmm_malloc(sizeof(struct vmm_classdev));
	if (!cd) {
		rc = VMM_EFAIL;
		goto ret;
	}

	INIT_LIST_HEAD(&cd->head);
	strcpy(cd->name, port->name);
	cd->dev = port->dev;
	cd->priv = port;

	rc = vmm_devdrv_register_classdev(VMM_NETPORT_CLASS_NAME, cd);
	if (rc != VMM_OK) {
		vmm_printf("%s: Failed to register %s %s (error %d)\n",
			   __func__, VMM_NETPORT_CLASS_NAME, port->name, rc);
		goto fail_port_reg;
	}

#ifdef CONFIG_VERBOSE_MODE
	vmm_printf("%s: Registered netport %s\n", __func__, port->name);
#endif

	return rc;

fail_port_reg:
	cd->dev = NULL;
	cd->priv = NULL;
	vmm_free(cd);
ret:
	return rc;
}
Beispiel #25
0
int arch_guest_init(struct vmm_guest *guest)
{
	int rc;
	u32 ovect_flags;
	virtual_addr_t ovect_va;
	struct cpu_page pg;

	if (!guest->reset_count) {
		guest->arch_priv = vmm_malloc(sizeof(arm_guest_priv_t));
		if (!guest->arch_priv) {
			return VMM_EFAIL;
		}
		ovect_flags = 0x0;
		ovect_flags |= VMM_MEMORY_READABLE;
		ovect_flags |= VMM_MEMORY_WRITEABLE;
		ovect_flags |= VMM_MEMORY_CACHEABLE;
		ovect_flags |= VMM_MEMORY_EXECUTABLE;
		ovect_va = vmm_host_alloc_pages(1, ovect_flags);
		if (!ovect_va) {
			return VMM_EFAIL;
		}
		if ((rc = cpu_mmu_get_reserved_page(ovect_va, &pg))) {
			return rc;
		}
		if ((rc = cpu_mmu_unmap_reserved_page(&pg))) {
			return rc;
		}
#if defined(CONFIG_ARMV5)
		pg.ap = TTBL_AP_SRW_UR;
#else
		if (pg.ap == TTBL_AP_SR_U) {
			pg.ap = TTBL_AP_SR_UR;
		} else {
			pg.ap = TTBL_AP_SRW_UR;
		}
#endif
		if ((rc = cpu_mmu_map_reserved_page(&pg))) {
			return rc;
		}
		arm_guest_priv(guest)->ovect = (u32 *)ovect_va;
	}

	return VMM_OK;
}
Beispiel #26
0
int image_load(const char *path,
	       struct format *output_format,
	       struct fb_image *image)
{
	int fd = 0;
	int err = 0;
	char *buf = NULL;
	size_t len = 0;
	struct stat stat;
	parser parse_func;

	if (NULL == path)
		return VMM_EFAIL;

	if (0 > (fd = vfs_open(path, O_RDONLY, 0)))
		return fd;

	if (VMM_OK != (err = vfs_fstat(fd, &stat))) {
		goto out;
	}

	if (NULL == (buf = vmm_malloc(stat.st_size))) {
		err = VMM_ENOMEM;
		goto out;
	}

	len = vfs_read(fd, buf, stat.st_size);

	if (NULL == (parse_func = parser_get(buf, len))) {
		vmm_printf("Unsupported format\n");
		err = VMM_EFAIL;
		goto out;
	}

	err = parse_func(buf, len, image, output_format);

out:
	if ((VMM_OK != err) && buf)
		vmm_free(buf);
	if (fd >= 0)
		vfs_close(fd);

	return err;
}
Beispiel #27
0
int vmm_netport_register(struct vmm_netport *port)
{
	struct vmm_classdev *cd;
	int rc;

	if (port == NULL)
		return VMM_EFAIL;

	cd = vmm_malloc(sizeof(struct vmm_classdev));
	if (!cd) {
		rc = VMM_EFAIL;
		goto ret;
	}

	INIT_LIST_HEAD(&cd->head);
	strcpy(cd->name, port->name);
	cd->dev = port->dev;
	cd->priv = port;

	rc = vmm_devdrv_register_classdev(VMM_NETPORT_CLASS_NAME, cd);
	if (rc != VMM_OK) {
		vmm_printf("%s: Failed to register %s %s "
			   "with err 0x%x\n", __func__, 
			   VMM_NETPORT_CLASS_NAME,
			   port->name, rc);
		goto fail_port_reg;
	}

#ifdef CONFIG_VERBOSE_MODE
	vmm_printf("Successfully registered VMM netport: %s\n", port->name);
#endif

	return rc;

fail_port_reg:
	cd->dev = NULL;
	cd->priv = NULL;
	vmm_free(cd);
ret:
	return rc;
}
Beispiel #28
0
struct mempool *mempool_heap_create(u32 entity_size,
				    u32 entity_count)
{
	u32 e;
	virtual_addr_t va;
	struct mempool *mp;

	if (!entity_size || !entity_count) {
		return NULL;
	}

	mp = vmm_zalloc(sizeof(struct mempool));
	if (!mp) {
		return NULL;
	}

	mp->type = MEMPOOL_TYPE_HEAP;
	mp->entity_size = entity_size;
	mp->entity_count = entity_count;

	mp->f = fifo_alloc(sizeof(virtual_addr_t), mp->entity_count);
	if (!mp->f) {
		vmm_free(mp);
		return NULL;
	}

	mp->entity_base =
		(virtual_addr_t)vmm_malloc(entity_size * entity_count);
	if (!mp->entity_base) {
		fifo_free(mp->f);
		vmm_free(mp);
		return NULL;
	}

	for (e = 0; e < mp->entity_count; e++) {
		va = mp->entity_base + e * entity_size;
		fifo_enqueue(mp->f, &va, FALSE);
	}

	return mp;
}
Beispiel #29
0
/*
 * Space allocation routines.
 * These are also available as macros
 * for critical paths.
 */
struct vmm_mbuf *m_get(int nowait, int flags)
{
	struct vmm_mbuf *m;
	
	/* TODO: implement non-blocking variant */

	m = vmm_malloc(sizeof(struct vmm_mbuf));
	if (m == NULL)
		return NULL;

	INIT_LIST_HEAD(&m->m_list);
	m->m_next = NULL;
	m->m_data = NULL;
	m->m_len = 0;
	m->m_flags = flags;
	if(flags & M_PKTHDR)
		m->m_pktlen = 0;
	m->m_ref = 1;

	return m;
}
Beispiel #30
0
struct vmm_netport *vmm_netport_alloc(char *name)
{
	struct vmm_netport *port;

	port = vmm_zalloc(sizeof(struct vmm_netport));
	if (!port) {
		vmm_printf("%s Failed to allocate net port\n", __func__);
		return NULL;
	}

	port->name = vmm_malloc(strlen(name)+1);
	if (!port->name) {
		vmm_printf("%s Failed to allocate for net port\n", __func__);
		return NULL;
	}

	strcpy(port->name, name);
	INIT_LIST_HEAD(&port->head);

	return port;
}