Exemple #1
0
static struct vmm_blockdev *__blockdev_alloc(bool alloc_rq)
{
	struct vmm_blockdev *bdev;

	bdev = vmm_zalloc(sizeof(struct vmm_blockdev));
	if (!bdev) {
		return NULL;
	}

	INIT_LIST_HEAD(&bdev->head);
	INIT_MUTEX(&bdev->child_lock);
	bdev->child_count = 0;
	INIT_LIST_HEAD(&bdev->child_list);

	if (alloc_rq) {
		bdev->rq = vmm_zalloc(sizeof(struct vmm_request_queue));
		if (!bdev->rq) {
			vmm_free(bdev);
			return NULL;
		}

		INIT_SPIN_LOCK(&bdev->rq->lock);
	} else {
		bdev->rq = NULL;
	}

	return bdev;
}
Exemple #2
0
struct netstack_socket *netstack_socket_alloc(enum netstack_socket_type type)
{
	struct netstack_socket *sk;
	struct netconn *conn;

	sk = vmm_zalloc(sizeof(struct netstack_socket));
	if (!sk) {
		return NULL;
	}

	switch (type) {
	case NETSTACK_SOCKET_TCP:
		conn = netconn_new(NETCONN_TCP);
		break;
	case NETSTACK_SOCKET_UDP:
		conn = netconn_new(NETCONN_UDP);
		break;
	default:
		conn = NULL;
		break;
	};
	if (!conn) {
		vmm_free(sk);
		return NULL;
	}

	sk->priv = conn;

	return sk;
}
Exemple #3
0
static int virtio_pci_bar_probe(struct vmm_guest *guest,
				struct vmm_emudev *edev,
				const struct vmm_devtree_nodeid *eid)
{
	int rc = VMM_OK;
	struct virtio_pci_dev *vdev;

	vdev = vmm_zalloc(sizeof(struct virtio_pci_dev));
	if (!vdev) {
		rc = VMM_ENOMEM;
		goto virtio_pci_probe_done;
	}

	vdev->guest = guest;

	vmm_snprintf(vdev->dev.name, VMM_VIRTIO_DEVICE_MAX_NAME_LEN,
		     "%s/%s", guest->name, edev->node->name);
	vdev->dev.edev = edev;
	vdev->dev.tra = &pci_tra;
	vdev->dev.tra_data = vdev;
	vdev->dev.guest = guest;

	vdev->config = (struct vmm_virtio_pci_config) {
		.queue_num  = 256,
	};

	rc = vmm_devtree_read_u32(edev->node, "virtio_type",
				  &vdev->dev.id.type);
	if (rc) {
		goto virtio_pci_probe_freestate_fail;
	}

	rc = vmm_devtree_read_u32_atindex(edev->node,
					  VMM_DEVTREE_INTERRUPTS_ATTR_NAME,
					  &vdev->irq, 0);
	if (rc) {
		goto virtio_pci_probe_freestate_fail;
	}

	if ((rc = vmm_virtio_register_device(&vdev->dev))) {
		goto virtio_pci_probe_freestate_fail;
	}

	edev->priv = vdev;

	goto virtio_pci_probe_done;

virtio_pci_probe_freestate_fail:
	vmm_free(vdev);
virtio_pci_probe_done:
	return rc;
}

static struct vmm_devtree_nodeid virtio_pci_emuid_table[] = {
	{
		.type = "virtio",
		.compatible = "virtio,pci",
	},
	{ /* end of list */ },
};
Exemple #4
0
struct vmm_netport *vmm_netport_alloc(char *name, u32 queue_size)
{
	u32 i;
	struct dlist *l;
	struct vmm_netport *port;

	port = vmm_zalloc(sizeof(struct vmm_netport));
	if (!port) {
		vmm_printf("%s Failed to allocate net port\n", __func__);
		return NULL;
	}

	INIT_LIST_HEAD(&port->head);
	strncpy(port->name, name, VMM_NETPORT_MAX_NAME_SIZE);
	port->queue_size = (queue_size < VMM_NETPORT_MAX_QUEUE_SIZE) ?
				queue_size : VMM_NETPORT_MAX_QUEUE_SIZE;

	port->free_count = port->queue_size;
	INIT_SPIN_LOCK(&port->free_list_lock);
	INIT_LIST_HEAD(&port->free_list);

	for(i = 0; i < port->queue_size; i++) {
		l = &((port->xfer_pool + i)->head);
		list_add_tail(l, &port->free_list);
	}

	return port;
}
Exemple #5
0
/*
 * Space allocation routines.
 * These are also available as macros
 * for critical paths.
 */
struct vmm_mbuf *m_get(int nowait, int flags)
{
	struct vmm_mbuf *m;

	/* TODO: implement non-blocking variant */

	m = mempool_zalloc(mbpctrl.mpool);
	if (m) {
		m->m_freefn = mbuf_pool_free;
	} else if (NULL != (m = vmm_zalloc(sizeof(struct vmm_mbuf)))) {
		m->m_freefn = mbuf_heap_free;
	} else {
		return NULL;
	}

	INIT_LIST_HEAD(&m->m_list);
	m->m_next = NULL;
	m->m_data = NULL;
	m->m_len = 0;
	m->m_flags = flags;
	if (flags & M_PKTHDR) {
		m->m_pktlen = 0;
	}
	m->m_ref = 1;

	return m;
}
Exemple #6
0
/**
 * clk_register_fixed_rate - register fixed-rate clock with the clock framework
 * @dev: device that is registering this clock
 * @name: name of this clock
 * @parent_name: name of clock's parent
 * @flags: framework-specific flags
 * @fixed_rate: non-adjustable clock rate
 */
struct clk *clk_register_fixed_rate(struct vmm_device *dev, const char *name,
		const char *parent_name, unsigned long flags,
		unsigned long fixed_rate)
{
	struct clk_fixed_rate *fixed;
	struct clk *clk;
	struct clk_init_data init;

	/* allocate fixed-rate clock */
	fixed = vmm_zalloc(sizeof(struct clk_fixed_rate));
	if (!fixed) {
		vmm_printf("%s: could not allocate fixed clk\n", __func__);
		return NULL;
	}

	init.name = name;
	init.ops = &clk_fixed_rate_ops;
	init.flags = flags | CLK_IS_BASIC;
	init.parent_names = (parent_name ? &parent_name: NULL);
	init.num_parents = (parent_name ? 1 : 0);

	/* struct clk_fixed_rate assignments */
	fixed->fixed_rate = fixed_rate;
	fixed->hw.init = &init;

	/* register the clock */
	clk = clk_register(dev, &fixed->hw);

	if (!clk)
		vmm_free(fixed);

	return clk;
}
Exemple #7
0
int vmm_rtcdev_register(struct vmm_rtcdev *rdev)
{
	int rc;
	struct vmm_classdev *cd;

	if (!(rdev && rdev->set_time && rdev->get_time)) {
		return VMM_EFAIL;
	}

	cd = vmm_zalloc(sizeof(struct vmm_classdev));
	if (!cd) {
		return VMM_EFAIL;
	}

	INIT_LIST_HEAD(&cd->head);
	strcpy(cd->name, rdev->name);
	cd->dev = rdev->dev;
	cd->priv = rdev;

	rc = vmm_devdrv_register_classdev(VMM_RTCDEV_CLASS_NAME, cd);
	if (rc != VMM_OK) {
		vmm_free(cd);
	}

	return rc;
}
static int __init generic_timer_clocksource_init(struct vmm_devtree_node *node)
{
	struct vmm_clocksource *cs;

	generic_timer_get_freq(node);
	if (generic_timer_hz == 0) {
		return VMM_EFAIL;
	}

	cs = vmm_zalloc(sizeof(struct vmm_clocksource));
	if (!cs) {
		return VMM_EFAIL;
	}

	cs->name = "gen-timer";
	cs->rating = 400;
	cs->read = &generic_counter_read;
	cs->mask = VMM_CLOCKSOURCE_MASK(56);
	vmm_clocks_calc_mult_shift(&cs->mult, &cs->shift,
				   generic_timer_hz, VMM_NSEC_PER_SEC, 10);
	generic_timer_mult = cs->mult;
	generic_timer_shift = cs->shift;
	cs->priv = NULL;

	return vmm_clocksource_register(cs);
}
Exemple #9
0
struct vmm_fb_info *vmm_fb_alloc(size_t size, struct vmm_device *dev)
{
#define BYTES_PER_LONG (BITS_PER_LONG/8)
#define PADDING (BYTES_PER_LONG - (sizeof(struct vmm_fb_info) % BYTES_PER_LONG))
	int fb_info_size = sizeof(struct vmm_fb_info);
	struct vmm_fb_info *info;
	char *p;

	if (size) {
		fb_info_size += PADDING;
	}

	p = vmm_zalloc(fb_info_size + size);
	if (!p) {
		return NULL;
	}

	info = (struct vmm_fb_info *) p;

	if (size) {
		info->par = p + fb_info_size;
	}

	info->dev = dev;

	return info;
#undef PADDING
#undef BYTES_PER_LONG
}
Exemple #10
0
int __init omap3_gpt_clocksource_init(u32 gpt_num, physical_addr_t prm_pa)
{
	int rc;
	struct omap3_gpt_clocksource *cs;

	if ((rc = omap3_gpt_instance_init(gpt_num, prm_pa, NULL))) {
		return rc;
	}

	omap3_gpt_continuous(gpt_num);

	cs = vmm_zalloc(sizeof(struct omap3_gpt_clocksource));
	if (!cs) {
		return VMM_EFAIL;
	}

	cs->gpt_num = gpt_num;
	cs->clksrc.name = omap3_gpt_config[gpt_num].name;
	cs->clksrc.rating = 200;
	cs->clksrc.read = &omap3_gpt_clocksource_read;
	cs->clksrc.mask = 0xFFFFFFFF;
	cs->clksrc.mult = 
	vmm_clocksource_khz2mult((omap3_gpt_config[gpt_num].clk_hz)/1000, 24);
	cs->clksrc.shift = 24;
	cs->clksrc.priv = cs;

	return vmm_clocksource_register(&cs->clksrc);
}
Exemple #11
0
int netstack_socket_accept(struct netstack_socket *sk, 
			   struct netstack_socket **new_sk)
{
	err_t err;
	struct netconn *newconn;
	struct netstack_socket *tsk;

	if (!sk || !sk->priv || !new_sk) {
		return VMM_EINVALID;
	}

	tsk = vmm_zalloc(sizeof(struct netstack_socket));
	if (!tsk) {
		return VMM_ENOMEM;
	}
	
	memcpy(tsk, sk, sizeof(struct netstack_socket));

	tsk->priv = NULL;

	err = netconn_accept(sk->priv, &newconn);
	if (err != ERR_OK) {
		vmm_free(tsk);
		return VMM_EFAIL;
	}

	tsk->priv = newconn;

	*new_sk = tsk;

	return VMM_OK;
}
Exemple #12
0
static int __init vmm_rtcdev_init(void)
{
	int rc;
	struct vmm_class *c;

	vmm_printf("Initialize RTC Device Framework\n");

	c = vmm_zalloc(sizeof(struct vmm_class));
	if (!c) {
		return VMM_EFAIL;
	}

	INIT_LIST_HEAD(&c->head);

	if (strlcpy(c->name, VMM_RTCDEV_CLASS_NAME, sizeof(c->name)) >=
	    sizeof(c->name)) {
		rc = VMM_EOVERFLOW;
		goto free_class;
	}

	INIT_LIST_HEAD(&c->classdev_list);

	rc = vmm_devdrv_register_class(c);
	if (rc) {
		goto free_class;
	}

	return rc;

free_class:
	vmm_free(c);
	return rc;
}
Exemple #13
0
int arch_vcpu_init(struct vmm_vcpu *vcpu)
{
	u64 stack_start;
	extern struct cpuinfo_x86 cpu_info;

	if (!vcpu->is_normal) {
		/* For orphan vcpu */
		stack_start = vcpu->stack_va + vcpu->stack_sz - sizeof(u64);
		vcpu->regs.rip = vcpu->start_pc;
		vcpu->regs.rip = vcpu->start_pc;
		vcpu->regs.rsp = stack_start;
		vcpu->regs.cs = VMM_CODE_SEG_SEL;
		vcpu->regs.ss = VMM_DATA_SEG_SEL;
		vcpu->regs.rflags = (X86_EFLAGS_IF | X86_EFLAGS_PF | X86_EFLAGS_CF);
	} else {
		if (!vcpu->reset_count) {
			vcpu->arch_priv = vmm_zalloc(sizeof(struct x86_vcpu_priv));

			if (!vcpu->arch_priv)
				return VMM_EFAIL;

			INIT_SPIN_LOCK(&x86_vcpu_priv(vcpu)->lock);

			init_cpu_capabilities(vcpu);

			x86_vcpu_priv(vcpu)->hw_context = vmm_zalloc(sizeof(struct vcpu_hw_context));
			x86_vcpu_priv(vcpu)->hw_context->assoc_vcpu = vcpu;

			x86_vcpu_priv(vcpu)->hw_context->vcpu_emergency_shutdown = arch_vcpu_emergency_shutdown;
			cpu_init_vcpu_hw_context(&cpu_info, x86_vcpu_priv(vcpu)->hw_context);

			/*
			 * This vcpu has to run VMM code before and after guest mode
			 * switch. Prepare for the same.
			 */
			stack_start = vcpu->stack_va + vcpu->stack_sz - sizeof(u64);
			vcpu->regs.rip = (u64)arch_guest_vcpu_trampoline;
			vcpu->regs.rsp = stack_start;
			vcpu->regs.cs = VMM_CODE_SEG_SEL;
			vcpu->regs.ss = VMM_DATA_SEG_SEL;
			vcpu->regs.rdi = (u64)vcpu; /* this VCPU as parameter */
			vcpu->regs.rflags = (X86_EFLAGS_IF | X86_EFLAGS_PF | X86_EFLAGS_CF);
		}
	}

	return VMM_OK;
}
Exemple #14
0
static struct rbd *__rbd_create(struct vmm_device *dev,
				const char *name, 
				physical_addr_t pa, 
				physical_size_t sz)
{
	struct rbd *d;

	if (!name) {
		return NULL;
	}

	d = vmm_zalloc(sizeof(struct rbd));
	if (!d) {
		goto free_nothing;
	}
	d->addr = pa;
	d->size = sz;

	d->bdev = vmm_blockdev_alloc();
	if (!d->bdev) {
		goto free_rbd;
	}

	/* Setup block device instance */
	strncpy(d->bdev->name, name, VMM_FIELD_NAME_SIZE);
	strncpy(d->bdev->desc, "RAM backed block device", 
		VMM_FIELD_DESC_SIZE);
	d->bdev->dev = dev;
	d->bdev->flags = VMM_BLOCKDEV_RW;
	d->bdev->start_lba = 0;
	d->bdev->num_blocks = udiv64(d->size, RBD_BLOCK_SIZE);
	d->bdev->block_size = RBD_BLOCK_SIZE;

	/* Setup request queue for block device instance */
	d->bdev->rq->make_request = rbd_make_request;
	d->bdev->rq->abort_request = rbd_abort_request;
	d->bdev->rq->priv = d;

	/* Register block device instance */
	if (vmm_blockdev_register(d->bdev)) {
		goto free_bdev;
	}

	/* Reserve RAM space */
	if (vmm_host_ram_reserve(d->addr, d->size)) {
		goto unreg_bdev;
	}

	return d;

unreg_bdev:
	vmm_blockdev_unregister(d->bdev);
free_bdev:
	vmm_blockdev_free(d->bdev);
free_rbd:
	vmm_free(d);
free_nothing:
	return NULL;
}
Exemple #15
0
int fw_cfg_add_i64(fw_cfg_state_t *s, u16 key, u64 value)
{
	u64 *copy;

	copy = vmm_zalloc(sizeof(value));
	*copy = vmm_cpu_to_le64(value);
	return fw_cfg_add_bytes(s, key, copy, sizeof(value));
}
Exemple #16
0
int fw_cfg_add_i32(fw_cfg_state_t*s, u16 key, u32 value)
{
	u32 *copy;

	copy = vmm_zalloc(sizeof(value));
	*copy = vmm_cpu_to_le32(value);
	return fw_cfg_add_bytes(s, key, copy, sizeof(value));
}
Exemple #17
0
int fw_cfg_add_i16(fw_cfg_state_t *s, u16 key, u16 value)
{
	u16 *copy;

	copy = vmm_zalloc(sizeof(value));
	*copy = vmm_cpu_to_le16(value);
	return fw_cfg_add_bytes(s, key, copy, sizeof(value));
}
Exemple #18
0
static int arm11mpcore_emulator_probe(struct vmm_guest *guest,
				      struct vmm_emudev *edev,
				      const struct vmm_devtree_nodeid *eid)
{
	int rc = VMM_OK;
	struct arm11mpcore_priv_state *s;
	u32 parent_irq, timer_irq[2];

	s = vmm_zalloc(sizeof(struct arm11mpcore_priv_state));
	if (!s) {
		rc = VMM_ENOMEM;
		goto arm11mp_probe_done;
	}

	s->num_cpu = guest->vcpu_count;

	rc = vmm_devtree_read_u32(edev->node, "parent_irq", &parent_irq);
	if (rc) {
		goto arm11mp_probe_failed;
	}

	rc = vmm_devtree_read_u32_array(edev->node, "timer_irq",
					timer_irq, array_size(timer_irq));
	if (rc) {
		goto arm11mp_probe_failed;
	}

	/* Allocate and init MPT state */
	if (!(s->mpt = mptimer_state_alloc(guest, edev, s->num_cpu, 1000000,
				 	   timer_irq[0], timer_irq[1]))) {
		rc = VMM_ENOMEM;
		goto arm11mp_probe_failed;
	}

	/* Allocate and init GIC state */
	if (!(s->gic = gic_state_alloc(edev->node->name, guest, 
				       GIC_TYPE_ARM11MPCORE, s->num_cpu,
				       FALSE, 0, 96, parent_irq))) {
		rc = VMM_ENOMEM;
		goto arm11mp_gic_alloc_failed;
	}

	s->guest = guest;
	INIT_SPIN_LOCK(&s->lock);

	edev->priv = s;

	goto arm11mp_probe_done;

arm11mp_gic_alloc_failed:
	mptimer_state_free(s->mpt);

arm11mp_probe_failed:
	vmm_free(s);

arm11mp_probe_done:
	return rc;
}
Exemple #19
0
static int virtio_mmio_probe(struct vmm_guest *guest,
			     struct vmm_emudev *edev,
			     const struct vmm_devtree_nodeid *eid)
{
	int rc = VMM_OK;
	const char *attr;
	struct virtio_mmio_dev *m;

	m = vmm_zalloc(sizeof(struct virtio_mmio_dev));
	if (!m) {
		rc = VMM_EFAIL;
		goto virtio_mmio_probe_done;
	}

	m->guest = guest;

	vmm_snprintf(m->dev.name, VIRTIO_DEVICE_MAX_NAME_LEN, 
		     "%s/%s", guest->name, edev->node->name); 
	m->dev.edev = edev;
	m->dev.tra = &mmio_tra;
	m->dev.tra_data = m;
	m->dev.guest = guest;

	m->config = (struct virtio_mmio_config) {
		     .magic          = {'v', 'i', 'r', 't'},
		     .version        = 1,
		     .vendor_id      = 0x52535658, /* XVSR */
		     .queue_num_max  = 256,
	};

	attr = vmm_devtree_attrval(edev->node, "virtio_type");
	if (attr) {
		m->config.device_id = *((u32 *)attr);
	} else {
		rc = VMM_EFAIL;
		goto virtio_mmio_probe_freestate_fail;
	}

	m->dev.id.type = m->config.device_id;

	rc = vmm_devtree_irq_get(edev->node, &m->irq, 0);
	if (rc) {
		goto virtio_mmio_probe_freestate_fail;
	}

	if ((rc = virtio_register_device(&m->dev))) {
		goto virtio_mmio_probe_freestate_fail;
	}

	edev->priv = m;

	goto virtio_mmio_probe_done;

virtio_mmio_probe_freestate_fail:
	vmm_free(m);
virtio_mmio_probe_done:
	return rc;
}
Exemple #20
0
int fw_cfg_add_string(fw_cfg_state_t *s, u16 key, const char *value)
{
	size_t sz = strlen(value) + 1;
	u8 *data_dup = vmm_zalloc(sz);

	memcpy(data_dup, value, sz);

	return fw_cfg_add_bytes(s, key, data_dup, sz);
}
Exemple #21
0
static int virtio_mmio_probe(struct vmm_guest *guest,
			     struct vmm_emudev *edev,
			     const struct vmm_devtree_nodeid *eid)
{
	int rc = VMM_OK;
	struct virtio_mmio_dev *m;

	m = vmm_zalloc(sizeof(struct virtio_mmio_dev));
	if (!m) {
		rc = VMM_ENOMEM;
		goto virtio_mmio_probe_done;
	}

	m->guest = guest;

	vmm_snprintf(m->dev.name, VMM_VIRTIO_DEVICE_MAX_NAME_LEN, 
		     "%s/%s", guest->name, edev->node->name); 
	m->dev.edev = edev;
	m->dev.tra = &mmio_tra;
	m->dev.tra_data = m;
	m->dev.guest = guest;

	m->config = (struct vmm_virtio_mmio_config) {
		     .magic          = {'v', 'i', 'r', 't'},
		     .version        = 1,
		     .vendor_id      = 0x52535658, /* XVSR */
		     .queue_num_max  = 256,
	};

	rc = vmm_devtree_read_u32(edev->node, "virtio_type",
				  &m->config.device_id);
	if (rc) {
		goto virtio_mmio_probe_freestate_fail;
	}

	m->dev.id.type = m->config.device_id;

	rc = vmm_devtree_read_u32_atindex(edev->node,
					  VMM_DEVTREE_INTERRUPTS_ATTR_NAME,
					  &m->irq, 0);
	if (rc) {
		goto virtio_mmio_probe_freestate_fail;
	}

	if ((rc = vmm_virtio_register_device(&m->dev))) {
		goto virtio_mmio_probe_freestate_fail;
	}

	edev->priv = m;

	goto virtio_mmio_probe_done;

virtio_mmio_probe_freestate_fail:
	vmm_free(m);
virtio_mmio_probe_done:
	return rc;
}
Exemple #22
0
int __init epit_clocksource_init(void)
{
	int rc = VMM_ENODEV;
	u32 clock;
	struct vmm_devtree_node *node;
	struct epit_clocksource *ecs;

	/* find a epit compatible node */
	node = vmm_devtree_find_compatible(NULL, NULL, "freescale,epit-timer");
	if (!node) {
		goto fail;
	}

	/* Read clock frequency from node */
	rc = vmm_devtree_clock_frequency(node, &clock);
	if (rc) {
		goto fail;
	}

	/* allocate our struct */
	ecs = vmm_zalloc(sizeof(struct epit_clocksource));
	if (!ecs) {
		rc = VMM_ENOMEM;
		goto fail;
	}

	/* Map timer registers */
	rc = vmm_devtree_regmap(node, &ecs->base, 0);
	if (rc) {
		goto regmap_fail;
	}

	/* Setup clocksource */
	ecs->clksrc.name = node->name;
	ecs->clksrc.rating = 300;
	ecs->clksrc.read = epit_clksrc_read;
	ecs->clksrc.mask = VMM_CLOCKSOURCE_MASK(32);
	vmm_clocks_calc_mult_shift(&ecs->clksrc.mult,
				   &ecs->clksrc.shift,
				   clock, VMM_NSEC_PER_SEC, 10);
	ecs->clksrc.priv = ecs;

	/* Register clocksource */
	rc = vmm_clocksource_register(&ecs->clksrc);
	if (rc) {
		goto register_fail;
	}

	return VMM_OK;

 register_fail:
	vmm_devtree_regunmap(node, ecs->base, 0);
 regmap_fail:
	vmm_free(ecs);
 fail:
	return rc;
}
Exemple #23
0
static void vmm_fb_cvt_print_name(struct vmm_fb_cvt_data *cvt)
{
	u32 pixcount, pixcount_mod;
	int cnt = 255, offset = 0, read = 0;
	char *buf = vmm_zalloc(256);

	if (!buf)
		return;

	pixcount = (cvt->xres * udiv32(cvt->yres, cvt->interlace))/1000000;
	pixcount_mod = (cvt->xres * udiv32(cvt->yres, cvt->interlace)) % 1000000;
	pixcount_mod /= 1000;

	read = vmm_snprintf(buf+offset, cnt, "fbcvt: %dx%d@%d: CVT Name - ",
			cvt->xres, cvt->yres, cvt->refresh);
	offset += read;
	cnt -= read;

	if (cvt->status)
		vmm_snprintf(buf+offset, cnt, "Not a CVT standard - %d.%03d Mega "
			 "Pixel Image\n", pixcount, pixcount_mod);
	else {
		if (pixcount) {
			read = vmm_snprintf(buf+offset, cnt, "%d", pixcount);
			cnt -= read;
			offset += read;
		}

		read = vmm_snprintf(buf+offset, cnt, ".%03dM", pixcount_mod);
		cnt -= read;
		offset += read;

		if (cvt->aspect_ratio == 0)
			read = vmm_snprintf(buf+offset, cnt, "3");
		else if (cvt->aspect_ratio == 3)
			read = vmm_snprintf(buf+offset, cnt, "4");
		else if (cvt->aspect_ratio == 1 || cvt->aspect_ratio == 4)
			read = vmm_snprintf(buf+offset, cnt, "9");
		else if (cvt->aspect_ratio == 2)
			read = vmm_snprintf(buf+offset, cnt, "A");
		else
			read = 0;
		cnt -= read;
		offset += read;

		if (cvt->flags & FB_CVT_FLAG_REDUCED_BLANK) {
			read = vmm_snprintf(buf+offset, cnt, "-R");
			cnt -= read;
			offset += read;
		}
	}

	vmm_printf("%s\n", buf);
	vmm_free(buf);
}
static int vsdaemon_telnet_setup(struct vsdaemon *vsd, int argc, char **argv)
{
	int rc = VMM_OK;
	u32 port;
	struct vsdaemon_telnet *tnet;

	if (argc < 1) {
		return VMM_EINVALID;
	}

	port = strtoul(argv[0], NULL, 0);
	if (!vsdaemon_valid_port(port)) {
		return VMM_EINVALID;
	}

	tnet = vmm_zalloc(sizeof(*tnet));
	if (!tnet) {
		return VMM_ENOMEM;
	}

	tnet->port = port;

	tnet->sk = netstack_socket_alloc(NETSTACK_SOCKET_TCP);
	if (!tnet->sk) {
		rc = VMM_ENOMEM;
		goto fail1;
	}

	rc = netstack_socket_bind(tnet->sk, NULL, tnet->port); 
	if (rc) {
		goto fail2;
	}

	rc = netstack_socket_listen(tnet->sk);
	if (rc) {
		goto fail3;
	}

	tnet->active_sk = NULL;

	tnet->tx_buf_head = tnet->tx_buf_tail = tnet->tx_buf_count = 0;
	INIT_SPIN_LOCK(&tnet->tx_buf_lock);

	vsdaemon_transport_set_data(vsd, tnet);

	return VMM_OK;

fail3:
	netstack_socket_close(tnet->sk);
fail2:
	netstack_socket_free(tnet->sk);
fail1:
	vmm_free(tnet);
	return rc;
}
Exemple #25
0
struct net_device *alloc_etherdev(int sizeof_priv)
{
	struct net_device *ndev;

	ndev = vmm_zalloc(sizeof(struct net_device));

	if (!ndev) {
		vmm_printf("%s Failed to allocate net device\n", __func__);
		return NULL;
	}

	ndev->priv = (void *) vmm_zalloc(sizeof_priv);
	if (!ndev->priv) {
		vmm_printf("%s Failed to allocate ndev->priv of size %d\n",
				__func__, sizeof_priv);
		vmm_free(ndev);
		return NULL;
	}

	ndev->state = NETDEV_UNINITIALIZED;

	return ndev;
}
Exemple #26
0
static int virtio_net_connect(struct virtio_device *dev, 
			      struct virtio_emulator *emu)
{
	int i, rc;
	char *attr;
	struct virtio_net_dev *ndev;
	struct vmm_netswitch *nsw;

	ndev = vmm_zalloc(sizeof(struct virtio_net_dev));
	if (!ndev) {
		vmm_printf("Failed to allocate virtio net device....\n");
		return VMM_EFAIL;
	}

	ndev->vdev = dev;
	vmm_snprintf(ndev->name, VIRTIO_DEVICE_MAX_NAME_LEN, "%s", dev->name);
	ndev->port = vmm_netport_alloc(ndev->name, VMM_NETPORT_DEF_QUEUE_SIZE);
	ndev->port->mtu = VIRTIO_NET_MTU;
	ndev->port->link_changed = virtio_net_set_link;
	ndev->port->can_receive = virtio_net_can_receive;
	ndev->port->switch2port_xfer = virtio_net_switch2port_xfer;
	ndev->port->priv = ndev;

	rc = vmm_netport_register(ndev->port);
	if (rc) {
		vmm_netport_free(ndev->port);
		vmm_free(ndev);
		return rc;
	}

	attr = vmm_devtree_attrval(dev->edev->node, "switch");
	if (attr) {
		nsw = vmm_netswitch_find((char *)attr);
		if (!nsw) {
			vmm_printf("%s: Cannot find netswitch \"%s\"\n",
					__func__, (char *)attr);
		} else {
			vmm_netswitch_port_add(nsw, ndev->port);
		}
	}

	for (i = 0; i < 6; i++) {
		ndev->config.mac[i] = vmm_netport_mac(ndev->port)[i];
	}

	ndev->config.status = VIRTIO_NET_S_LINK_UP;
	dev->emu_data = ndev;

	return VMM_OK;
}
Exemple #27
0
int __init generic_timer_clocksource_init(void)
{
	int rc;
	struct vmm_clocksource *cs;
	struct vmm_devtree_node *node;

	node = vmm_devtree_find_matching(NULL, generic_timer_match);
	if (!node) {
		return VMM_ENODEV;
	}

	if (generic_timer_hz == 0) {
		rc =  vmm_devtree_clock_frequency(node, &generic_timer_hz);
		if (rc) {
			/* Use preconfigured counter frequency 
			 * in absence of dts node 
			 */
			generic_timer_hz = 
				generic_timer_reg_read(GENERIC_TIMER_REG_FREQ);
		} else {
			if (generic_timer_freq_writeable()) {
				/* Program the counter frequency 
				 * as per the dts node
				 */
				generic_timer_reg_write(GENERIC_TIMER_REG_FREQ,
							generic_timer_hz);
			}
		}
	}

	if (generic_timer_hz == 0) {
		return VMM_EFAIL;
	}

	cs = vmm_zalloc(sizeof(struct vmm_clocksource));
	if (!cs) {
		return VMM_EFAIL;
	}

	cs->name = "gen-timer";
	cs->rating = 400;
	cs->read = &generic_counter_read;
	cs->mask = VMM_CLOCKSOURCE_MASK(56);
	vmm_clocks_calc_mult_shift(&cs->mult, &cs->shift, 
				   generic_timer_hz, VMM_NSEC_PER_SEC, 10);
	cs->priv = NULL;

	return vmm_clocksource_register(cs);
}
Exemple #28
0
static struct dir_entry *path_to_dentry(struct vmm_blockdev *mdev,
					const char *path,
					struct dir_entry *pdentry)
{
	char dirname[VFS_MAX_NAME] = { 0 };
	struct dir_entry *dentry, *d_root;
	int i = 0, rd;
	int len = 0;
	char rpath[VFS_MAX_NAME];

	len = strlen(path);

	if (!len) return NULL;

	vmm_snprintf(rpath, len, "%s", path);
	if (rpath[len - 1] != '/') {
		rpath[len] = '/';
		rpath[len+1] = 0;
	}

	path = rpath;

	if (*path == '/') {
		path++;
		if (!*path) return pdentry;
	}

	while (*path && *path != '/') {
		dirname[i] = *path;
		path++;
		i++;
	}

	dentry = lookup_dentry(dirname, pdentry);
	if (dentry) {
		d_root = vmm_zalloc(dentry->dlen.lsb);
		rd = vmm_blockdev_read(mdev, (u8 *)d_root,
				       (dentry->start_lba.lsb * 2048),
				       dentry->dlen.lsb);
		if (rd != dentry->dlen.lsb) {
			vmm_free(d_root);
			return NULL;
		}
		dentry = path_to_dentry(mdev, path, d_root);
	}

	return dentry;
}
Exemple #29
0
static int ext4fs_vget(struct mount *m, struct vnode *v)
{
	int rc;
	struct ext4fs_node *node;

	node = vmm_zalloc(sizeof(struct ext4fs_node));
	if (!node) {
		return VMM_ENOMEM;
	}

	rc = ext4fs_node_init(node);

	v->v_data = node;

	return rc;
}
Exemple #30
0
static int virtio_blk_connect(struct virtio_device *dev, 
			      struct virtio_emulator *emu)
{
	int rc;
	char *attr;
	struct virtio_blk_dev *bdev;

	bdev = vmm_zalloc(sizeof(struct virtio_blk_dev));
	if (!bdev) {
		vmm_printf("Failed to allocate virtio block device....\n");
		return VMM_ENOMEM;
	}
	bdev->vdev = dev;

	bdev->blk_client.notifier_call = &virtio_blk_notification;
	bdev->blk_client.priority = 0;
	rc = vmm_blockdev_register_client(&bdev->blk_client);
	if (rc) {
		vmm_free(bdev);
		return rc;
	}

	INIT_SPIN_LOCK(&bdev->blk_lock);

	attr = vmm_devtree_attrval(dev->edev->node, "blkdev");
	if (attr) {
		if (strlcpy(bdev->blk_name,attr, sizeof(bdev->blk_name)) >=
		    sizeof(bdev->blk_name)) {
			vmm_free(bdev);
			return VMM_EOVERFLOW;
		}
		bdev->blk = vmm_blockdev_find(bdev->blk_name);
	} else {
		bdev->blk_name[0] = 0;
		bdev->blk = NULL;
	}

	bdev->config.capacity = (bdev->blk) ? bdev->blk->num_blocks : 0;
	bdev->config.seg_max = VIRTIO_BLK_DISK_SEG_MAX,
	bdev->config.blk_size = 
		(bdev->blk) ? bdev->blk->block_size : VIRTIO_BLK_SECTOR_SIZE;

	dev->emu_data = bdev;

	return VMM_OK;
}