Exemple #1
0
int vmm_fb_close(struct vmm_fb_info *info)
{
	struct vmm_fb_event event;

	if (!info) {
		return VMM_EFAIL;
	}

	vmm_mutex_lock(&info->lock);
	if (info->fbops->fb_release) {
		/* Note: we don't have userspace so, 
		 * always call fb_release with user=0
		 */
		info->fbops->fb_release(info, 0);
	}
	vmm_mutex_unlock(&info->lock);

	put_fb_info(info);

	vmm_mutex_lock(&info->lock);
	event.info = info;
	vmm_fb_notifier_call_chain(FB_EVENT_CLOSED, &event);
	vmm_mutex_unlock(&info->lock);

	return VMM_OK;
}
Exemple #2
0
int vmm_fb_open(struct vmm_fb_info *info)
{
	int res = 0;
	struct vmm_fb_event event;

	if (!info) {
		return VMM_EFAIL;
	}

	vmm_mutex_lock(&info->lock);
	event.info = info;
	vmm_fb_notifier_call_chain(FB_EVENT_OPENED, &event);
	vmm_mutex_unlock(&info->lock);

	get_fb_info(info);

	vmm_mutex_lock(&info->lock);
	if (info->fbops->fb_open) {
		/* Note: we don't have userspace so, 
		 * always call fb_open with user=0
		 */
		res = info->fbops->fb_open(info, 0);
	}
	vmm_mutex_unlock(&info->lock);

	if (res) {
		put_fb_info(info);
	}

	return res;
}
Exemple #3
0
int vmm_blockdev_add_child(struct vmm_blockdev *bdev, 
			   u64 start_lba, u64 num_blocks)
{
	int rc;
	struct vmm_blockdev *child_bdev;

	if (!bdev) {
		return VMM_EFAIL;
	}

	if (bdev->num_blocks < num_blocks) {
		return VMM_ERANGE;
	}
	if ((start_lba < bdev->start_lba) ||
	    ((bdev->start_lba + bdev->num_blocks) <= start_lba)) {
		return VMM_ERANGE;
	}
	if ((bdev->start_lba + bdev->num_blocks) < (start_lba + num_blocks)) {
		return VMM_ERANGE;
	}

	child_bdev = __blockdev_alloc(FALSE);
	child_bdev->parent = bdev;
	vmm_mutex_lock(&bdev->child_lock);
	vmm_snprintf(child_bdev->name, sizeof(child_bdev->name),
			"%sp%d", bdev->name, bdev->child_count);
	if (strlcpy(child_bdev->desc, bdev->desc, sizeof(child_bdev->desc)) >=
	    sizeof(child_bdev->desc)) {
		rc = VMM_EOVERFLOW;
		goto free_blockdev;
	}
	bdev->child_count++;
	list_add_tail(&child_bdev->head, &bdev->child_list);
	vmm_mutex_unlock(&bdev->child_lock);
	child_bdev->flags = bdev->flags;
	child_bdev->start_lba = start_lba;
	child_bdev->num_blocks = num_blocks;
	child_bdev->block_size = bdev->block_size;
	child_bdev->rq = bdev->rq;

	rc = vmm_blockdev_register(child_bdev);
	if (rc) {
		goto remove_from_list;
	}

	return rc;

remove_from_list:
	vmm_mutex_lock(&bdev->child_lock);
	list_del(&child_bdev->head);
	vmm_mutex_unlock(&bdev->child_lock);
free_blockdev:
	__blockdev_free(child_bdev, FALSE);
	return rc;
}
Exemple #4
0
static int gpex_guest_aspace_notification(struct vmm_notifier_block *nb,
					  unsigned long evt, void *data)
{
	int ret = NOTIFY_DONE;
	int rc;
	struct gpex_state *gpex =
		container_of(nb, struct gpex_state, guest_aspace_client);

	vmm_mutex_lock(&gpex->lock);

	switch (evt) {
	case VMM_GUEST_ASPACE_EVENT_RESET:
		if ((rc =
		     pci_emu_register_controller(gpex->node,
						 gpex->guest,
						 gpex->controller))
		    != VMM_OK) {
			GPEX_LOG(LVL_ERR,
				   "Failed to attach PCI controller.\n");
			goto _failed;
		}
		ret = NOTIFY_OK;
		break;
	default:
		break;
	}

 _failed:
	vmm_mutex_unlock(&gpex->lock);

	return ret;
}
Exemple #5
0
int vmm_lock_fb_info(struct vmm_fb_info *info)
{
	vmm_mutex_lock(&info->lock);
	if (!info->fbops) {
		vmm_mutex_unlock(&info->lock);
		return 0;
	}
	return 1;
}
Exemple #6
0
static int mutex4_worker_thread_main(void *data)
{
	int i;

	for (i = 0; i < NUM_LOOPS; i++) {
		/* Acquire wake mutex */
		vmm_mutex_lock(&mutex1);

		/* Release wake mutex */
		vmm_mutex_unlock(&mutex1);
	}

	/* Signal work done completion */
	vmm_completion_complete(&work_done);

	return 0;
}
Exemple #7
0
static int mutex7_worker_thread_main(void *data)
{
	/* Acquire mutex */
	vmm_mutex_lock(&mutex1);

	/*
	 * Set shared_data to signify that
	 * we think we have the mutex
	 */
	shared_data = 1;

	/* Wait indefinetly here. */
	while (1) {
		vmm_msleep(SLEEP_MSECS);
	}

	return 0;
}
Exemple #8
0
int vmm_blockdev_unregister(struct vmm_blockdev *bdev)
{
	int rc;
	struct dlist *l;
	struct vmm_blockdev *child_bdev;
	struct vmm_blockdev_event event;
	struct vmm_classdev *cd;

	if (!bdev) {
		return VMM_EFAIL;
	}

	/* Unreg & free child block devices */
	vmm_mutex_lock(&bdev->child_lock);
	while (!list_empty(&bdev->child_list)) {
		l = list_pop(&bdev->child_list);
		child_bdev = list_entry(l, struct vmm_blockdev, head);
		if ((rc = vmm_blockdev_unregister(child_bdev))) {
			vmm_mutex_unlock(&bdev->child_lock);
			return rc;
		}
		__blockdev_free(child_bdev, FALSE);
	}
	vmm_mutex_unlock(&bdev->child_lock);

	/* Broadcast unregister event */
	event.bdev = bdev;
	event.data = NULL;
	vmm_blocking_notifier_call(&bdev_notifier_chain, 
				   VMM_BLOCKDEV_EVENT_UNREGISTER, 
				   &event);

	cd = vmm_devdrv_find_classdev(VMM_BLOCKDEV_CLASS_NAME, bdev->name);
	if (!cd) {
		return VMM_EFAIL;
	}

	rc = vmm_devdrv_unregister_classdev(VMM_BLOCKDEV_CLASS_NAME, cd);
	if (!rc) {
		vmm_free(cd);
	}

	return rc;
}
Exemple #9
0
static int mutex4_do_test(struct vmm_chardev *cdev)
{
	int done_count = 0;

	/* Initialize work done completion */
	INIT_COMPLETION(&work_done);

	/* Acquire mutex1 */
	vmm_mutex_lock(&mutex1);

	/* Start workers */
	vmm_threads_start(workers[0]);
	vmm_threads_start(workers[1]);
	vmm_threads_start(workers[2]);
	vmm_threads_start(workers[3]);
	vmm_msleep(SLEEP_MSECS*40);

	/* Release mutex1 */
	vmm_mutex_unlock(&mutex1);

	/* Wait for workers to complete */
	do {
		if (done_count == NUM_THREADS) {
			break;
		}

		vmm_completion_wait(&work_done);
		done_count++;
	} while (1);

	/* Stop workers */
	vmm_threads_stop(workers[3]);
	vmm_threads_stop(workers[2]);
	vmm_threads_stop(workers[1]);
	vmm_threads_stop(workers[0]);

	return 0;
}
Exemple #10
0
static int gpex_emulator_probe(struct vmm_guest *guest,
			       struct vmm_emudev *edev,
			       const struct vmm_devtree_nodeid *eid)
{
	int rc = VMM_OK, i;
	char name[64];
	struct gpex_state *s;
	struct pci_class *class;

	s = vmm_zalloc(sizeof(struct gpex_state));
	if (!s) {
		GPEX_LOG(LVL_ERR, "Failed to allocate gpex state.\n");
		rc = VMM_EFAIL;
		goto _failed;
	}

	s->node = edev->node;
	s->guest = guest;
	s->controller = vmm_zalloc(sizeof(struct pci_host_controller));
	if (!s->controller) {
		GPEX_LOG(LVL_ERR, "Failed to allocate pci host contoller"
					"for gpex.\n");
		goto _failed;
	}
	INIT_MUTEX(&s->lock);
	INIT_LIST_HEAD(&s->controller->head);
	INIT_LIST_HEAD(&s->controller->attached_buses);
	INIT_SPIN_LOCK(&s->controller->lock);

	/* initialize class */
	class = PCI_CONTROLLER_TO_CLASS(s->controller);

	INIT_SPIN_LOCK(&class->lock);
	class->conf_header.vendor_id = PCI_VENDOR_ID_REDHAT;
	class->conf_header.device_id = PCI_DEVICE_ID_REDHAT_PCIE_HOST;
	class->config_read = gpex_config_read;
	class->config_write = gpex_config_write;

	rc = vmm_devtree_read_u32(edev->node, "nr_buses",
				  &s->controller->nr_buses);
	if (rc) {
		GPEX_LOG(LVL_ERR, "Failed to get fifo size in guest DTS.\n");
		goto _failed;
	}

	GPEX_LOG(LVL_VERBOSE, "%s: %d busses on this controller.\n",
		   __func__, s->controller->nr_buses);

	for (i = 0; i < s->controller->nr_buses; i++) {
		if ((rc = pci_emu_attach_new_pci_bus(s->controller, i))
		    != VMM_OK) {
			GPEX_LOG(LVL_ERR, "Failed to attach PCI bus %d\n",
				   i+1);
			goto _failed;
		}
	}

	strlcpy(name, guest->name, sizeof(name));
	strlcat(name, "/", sizeof(name));
	if (strlcat(name, edev->node->name, sizeof(name)) >= sizeof(name)) {
		rc = VMM_EOVERFLOW;
		goto _failed;
	}

	edev->priv = s;

	vmm_mutex_lock(&s->lock);

	if ((rc = pci_emu_register_controller(s->node, s->guest,
				s->controller)) != VMM_OK) {
			GPEX_LOG(LVL_ERR,
				   "Failed to attach PCI controller.\n");
			goto _controller_failed;
	}

	vmm_mutex_unlock(&s->lock);

	GPEX_LOG(LVL_VERBOSE, "Success.\n");

	goto _done;

_controller_failed:
	vmm_mutex_unlock(&s->lock);

_failed:
	if (s && s->controller) vmm_free(s->controller);
	if (s) vmm_free(s);

_done:
	return rc;
}
Exemple #11
0
int netstack_send_echo(u8 *ripaddr, u16 size, u16 seqno, 
			struct netstack_echo_reply *reply)
{
	int i, rc;
	u64 timeout = PING_DELAY_NS;
	struct pbuf *p;
	struct icmp_echo_hdr *iecho;
	size_t len = sizeof(struct icmp_echo_hdr) + size;

	LWIP_ASSERT("ping_size <= 0xffff", len <= 0xffff);

	/* Lock ping context for atomicity */
	vmm_mutex_lock(&lns.ping_lock);

	/* Alloc ping pbuf */
	p = pbuf_alloc(PBUF_IP, (u16_t)len, PBUF_RAM);
	if (!p) {
		vmm_mutex_unlock(&lns.ping_lock);
		return VMM_ENOMEM;
	}
	if ((p->len != p->tot_len) || (p->next != NULL)) {
		pbuf_free(p);
		vmm_mutex_unlock(&lns.ping_lock);
		return VMM_EFAIL;
	}

	/* Prepare ECHO request */
	iecho = (struct icmp_echo_hdr *)p->payload;
	ICMPH_TYPE_SET(iecho, ICMP_ECHO);
	ICMPH_CODE_SET(iecho, 0);
	iecho->chksum = 0;
	iecho->id     = PING_ID;
	iecho->seqno  = htons(seqno);
	for (i = 0; i < size; i++) {
		((char*)iecho)[sizeof(struct icmp_echo_hdr) + i] = (char)i;
	}
	iecho->chksum = inet_chksum(iecho, len);

	/* Prepare target address */
	IP4_ADDR(&lns.ping_addr, ripaddr[0],ripaddr[1],ripaddr[2],ripaddr[3]);

	/* Save ping info */
	lns.ping_seq_num = seqno;
	lns.ping_reply = reply;
	lns.ping_recv_tstamp = 0;
	lns.ping_send_tstamp = vmm_timer_timestamp();
	lns.ping_recv_tstamp = lns.ping_send_tstamp + PING_DELAY_NS;

	/* Send ping packet */
	raw_sendto(lns.ping_pcb, p, &lns.ping_addr);

	/* Wait for ping to complete with timeout */
	timeout = lns.ping_recv_tstamp - lns.ping_send_tstamp;
	rc = vmm_completion_wait_timeout(&lns.ping_done, &timeout);
	timeout = lns.ping_recv_tstamp - lns.ping_send_tstamp;
	lns.ping_reply->rtt = udiv64(timeout, 1000);

	/* Free ping pbuf */
	pbuf_free(p);

	/* Clear ping reply pointer */
	lns.ping_reply = NULL;

	/* Unloack ping context */
	vmm_mutex_unlock(&lns.ping_lock);

	return rc;
}
Exemple #12
0
int vmm_fb_register(struct vmm_fb_info *info)
{
	int rc;
	struct vmm_fb_event event;
	struct vmm_fb_videomode mode;
	struct vmm_classdev *cd;

	if (info == NULL) {
		return VMM_EFAIL;
	}
	if (info->fbops == NULL) {
		return VMM_EFAIL;
	}

	if ((rc = vmm_fb_check_foreignness(info))) {
		return rc;
	}

	vmm_fb_remove_conflicting_framebuffers(info->apertures, 
					       info->fix.id, FALSE);

	arch_atomic_write(&info->count, 1);
	INIT_MUTEX(&info->lock);

	if (info->pixmap.addr == NULL) {
		info->pixmap.addr = vmm_malloc(FBPIXMAPSIZE);
		if (info->pixmap.addr) {
			info->pixmap.size = FBPIXMAPSIZE;
			info->pixmap.buf_align = 1;
			info->pixmap.scan_align = 1;
			info->pixmap.access_align = 32;
			info->pixmap.flags = FB_PIXMAP_DEFAULT;
		}
	}	
	info->pixmap.offset = 0;

	if (!info->pixmap.blit_x)
		info->pixmap.blit_x = ~(u32)0;

	if (!info->pixmap.blit_y)
		info->pixmap.blit_y = ~(u32)0;

	if (!info->modelist.prev || !info->modelist.next) {
		INIT_LIST_HEAD(&info->modelist);
	}

	vmm_fb_var_to_videomode(&mode, &info->var);
	vmm_fb_add_videomode(&mode, &info->modelist);

	cd = vmm_malloc(sizeof(struct vmm_classdev));
	if (!cd) {
		rc = VMM_EFAIL;
		goto free_pixmap;
	}

	INIT_LIST_HEAD(&cd->head);
	strcpy(cd->name, info->dev->node->name);
	cd->dev = info->dev;
	cd->priv = info;

	rc = vmm_devdrv_register_classdev(VMM_FB_CLASS_NAME, cd);
	if (rc) {
		goto free_classdev;
	}

	vmm_mutex_lock(&info->lock);
	event.info = info;
	vmm_fb_notifier_call_chain(FB_EVENT_FB_REGISTERED, &event);
	vmm_mutex_unlock(&info->lock);

	return VMM_OK;

free_classdev:
	cd->dev = NULL;
	cd->priv = NULL;
	vmm_free(cd);
free_pixmap:
	if (info->pixmap.flags & FB_PIXMAP_DEFAULT) {
		vmm_free(info->pixmap.addr);
	}
	return rc;
}