Example #1
0
static void usb_release_device(struct vmm_device *ddev)
{
	irq_flags_t flags;
	struct usb_device *dev = to_usb_device(ddev);
	struct usb_device *parent = (ddev->parent) ?
				to_usb_device(ddev->parent) : NULL;

	/* Update HCD device number bitmap */
	vmm_spin_lock_irqsave(&dev->hcd->devicemap_lock, flags);
	__clear_bit(dev->devnum - 1, dev->hcd->devicemap);
	vmm_spin_unlock_irqrestore(&dev->hcd->devicemap_lock, flags);

	/* Update parent device */
	if (parent) {
		vmm_spin_lock_irqsave(&parent->children_lock, flags);
		parent->children[dev->portnum] = NULL;
		vmm_spin_unlock_irqrestore(&parent->children_lock, flags);
	}

	/* Root hubs aren't true devices, so free HCD resources */
	if (dev->hcd->driver->free_dev && parent) {
		dev->hcd->driver->free_dev(dev->hcd, dev);
	}

	/* Destroy HCD this will reduce HCD referenece count */
	usb_dref_hcd(dev->hcd);

	/* Release memory of the usb device */
	vmm_free(dev);
}
Example #2
0
int vmm_mutex_unlock(struct vmm_mutex *mut)
{
	int rc = VMM_EINVALID;
	irq_flags_t flags;
	struct vmm_vcpu *current_vcpu = vmm_scheduler_current_vcpu();

	BUG_ON(!mut);
	BUG_ON(!vmm_scheduler_orphan_context());

	vmm_spin_lock_irqsave(&mut->wq.lock, flags);

	if (mut->lock && mut->owner == current_vcpu) {
		mut->lock--;
		if (!mut->lock) {
			mut->owner = NULL;
			vmm_manager_vcpu_resource_remove(current_vcpu,
							 &mut->res);
			rc = __vmm_waitqueue_wakeall(&mut->wq);
			if (rc == VMM_ENOENT) {
				rc = VMM_OK;
			}
		} else {
			rc = VMM_OK;
		}
	}

	vmm_spin_unlock_irqrestore(&mut->wq.lock, flags);

	return rc;
}
Example #3
0
/* Process IRQ asserted in device emulation framework */
static int pl190_emulator_irq_handle(struct vmm_emupic *epic, 
				     u32 irq, int cpu, int level)
{
	irq_flags_t flags;
	struct pl190_emulator_state *s =
	    (struct pl190_emulator_state *)epic->priv;

	/* Ensure irq is in range (base_irq, base_irq + num_irq) */
	if ((irq < s->num_base_irq) || ((s->num_base_irq + s->num_irq) <= irq)) {
		return VMM_EMUPIC_IRQ_UNHANDLED;
	}

	irq -= s->num_base_irq;

	if (level == (s->level & (1u << irq))) {
		return VMM_EMUPIC_IRQ_HANDLED;
	}

	vmm_spin_lock_irqsave(&s->lock, flags);

	pl190_emulator_set_irq(s, irq, level);

	vmm_spin_unlock_irqrestore(&s->lock, flags);

	return VMM_EMUPIC_IRQ_HANDLED;
}
static void vsdaemon_telnet_receive_char(struct vsdaemon *vsd, u8 ch)
{
	irq_flags_t flags;
	struct vsdaemon_telnet *tnet = vsdaemon_transport_get_data(vsd);

	vmm_spin_lock_irqsave(&tnet->tx_buf_lock, flags);

	if (VSDAEMON_TXBUF_SIZE == tnet->tx_buf_count) {
		tnet->tx_buf_head++;
		if (tnet->tx_buf_head >= VSDAEMON_TXBUF_SIZE) {
			tnet->tx_buf_head = 0;
		}
		tnet->tx_buf_count--;
	}

	tnet->tx_buf[tnet->tx_buf_tail] = ch;

	tnet->tx_buf_tail++;
	if (tnet->tx_buf_tail >= VSDAEMON_TXBUF_SIZE) {
		tnet->tx_buf_tail = 0;
	}

	tnet->tx_buf_count++;

	vmm_spin_unlock_irqrestore(&tnet->tx_buf_lock, flags);
}
Example #5
0
static void __notrace vmm_profile_enter(void *ip, void *parent_ip)
{
	int index;
	irq_flags_t flags;

	if (pctrl.is_in_trace)
		return;

	pctrl.is_in_trace = 1;

	index = kallsyms_get_symbol_pos((long unsigned int)ip, NULL, NULL);

	if (pctrl.stat[index].is_tracing == 1) {
		goto out;
	}

	if (pctrl.stat[index].time_in != 0) {
		goto out;
	}

	flags = vmm_spin_lock_irqsave(&pctrl.lock);

	pctrl.stat[index].counter++;
	pctrl.stat[index].is_tracing = 1;
	pctrl.stat[index].time_in = vmm_timer_timestamp_for_profile();

	vmm_spin_unlock_irqrestore(&pctrl.lock, flags);

 out:
	pctrl.is_in_trace = 0;
}
Example #6
0
static int uip_switch2port_xfer(struct vmm_netport *port,
			 	struct vmm_mbuf *mbuf)
{
	struct uip_port_state *s = &uip_port_state;
	int rc = VMM_OK;
	unsigned long flags;
#ifdef UIP_DEBUG
	char tname[30];
#endif
	u8 *dstmac = ether_dstmac(mtod(mbuf, u8 *));
	/* do not accept frames which do not have either 
	 * our MAC or broadcast MAC */
	DPRINTF("UIP received frame with MAC[%s]",
			ethaddr_to_str(tname, srcmac));
	if(compare_ether_addr(dstmac, port->macaddr)
		&& !is_broadcast_ether_addr(dstmac)) {
		/* Reject packets addressed for someone else */
		DPRINTF("  and rejected \n");
		return VMM_EFAIL;
	} else {
		DPRINTF("  and accepted \n");
	}	
	vmm_spin_lock_irqsave(&s->lock, flags);
	list_add_tail(&mbuf->m_list, &s->rxbuf);
	vmm_spin_unlock_irqrestore(&s->lock, flags);
	vmm_completion_complete(&s->rx_possible);

	return rc;
}
Example #7
0
enum usb_device_state usb_get_device_state(struct usb_device *udev)
{
	irq_flags_t flags;
	enum usb_device_state ret;

	vmm_spin_lock_irqsave(&device_state_lock, flags);
	ret = udev->state;
	vmm_spin_unlock_irqrestore(&device_state_lock, flags);

	return ret;
}
Example #8
0
struct vmm_netport_xfer *vmm_netport_alloc_xfer(struct vmm_netport *port)
{
	struct dlist *l;
	irq_flags_t flags;

	if (!port) {
		return NULL;
	}

	vmm_spin_lock_irqsave(&port->free_list_lock, flags);
	if (list_empty(&port->free_list)) {
		vmm_spin_unlock_irqrestore(&port->free_list_lock, flags);
		return NULL;
	}
	l = list_pop(&port->free_list);
	port->free_count--;
	vmm_spin_unlock_irqrestore(&port->free_list_lock, flags);

	return list_entry(l, struct vmm_netport_xfer, head);
}
Example #9
0
int vmm_atomic_notifier_unregister(struct vmm_atomic_notifier_chain *nc,
				   struct vmm_notifier_block *n)
{
	irq_flags_t flags;
	int ret;

	vmm_spin_lock_irqsave(&nc->lock, flags);
	ret = notifier_chain_unregister(&nc->head, n);
	vmm_spin_unlock_irqrestore(&nc->lock, flags);

	return ret;
}
Example #10
0
/**
 *  Fills the uip_buf with packet from RX queue. In case RX queue is
 *  empty, we wait for sometime.
 */
int uip_netport_read(void)
{
	struct vmm_mbuf *mbuf;
	struct dlist *node;
	unsigned long flags;
	u64 timeout = 50000000;
	struct uip_port_state *s = &uip_port_state;

	/* Keep trying till RX buf is not empty */
	vmm_spin_lock_irqsave(&s->lock, flags);
	while(list_empty(&s->rxbuf)) {
		vmm_spin_unlock_irqrestore(&s->lock, flags);
		if(timeout) {
			/* Still time left for timeout so we wait */
			vmm_completion_wait_timeout(&s->rx_possible, &timeout);
		} else {
			/* We timed-out and buffer is still empty, so return */
			uip_len = 0;
			return uip_len;
		}
		vmm_spin_lock_irqsave(&s->lock, flags);
	}
	/* At this point we are sure rxbuf is non-empty, so we just
	 * dequeue a packet */
	node = list_pop(&s->rxbuf);
	mbuf = m_list_entry(node);
	vmm_spin_unlock_irqrestore(&s->lock, flags);
	if(mbuf == NULL) {
		vmm_panic("%s: mbuf is null\n", __func__);
	}
	if(!uip_buf) {
		vmm_panic("%s: uip_buf is null\n", __func__);
	}
	/* Copy the data from mbuf to uip_buf */
	uip_len = min(UIP_BUFSIZE, mbuf->m_pktlen);
	m_copydata(mbuf, 0, uip_len, uip_buf);
	/* Free the mbuf */
	m_freem(mbuf);
	return uip_len;
}
Example #11
0
struct vmm_vcpu *vmm_mutex_owner(struct vmm_mutex *mut)
{
	struct vmm_vcpu *ret;
	irq_flags_t flags;

	BUG_ON(!mut);

	vmm_spin_lock_irqsave(&mut->wq.lock, flags);
	ret = mut->owner;
	vmm_spin_unlock_irqrestore(&mut->wq.lock, flags);

	return ret;
}
Example #12
0
static void recursively_mark_NOTATTACHED(struct usb_device *udev)
{
	int i;
	irq_flags_t flags;

	vmm_spin_lock_irqsave(&udev->children_lock, flags);
	for (i = 0; i < udev->maxchild; ++i) {
		if (!udev->children[i]) {
			continue;
		}
		vmm_spin_unlock_irqrestore(&udev->children_lock, flags);
		recursively_mark_NOTATTACHED(udev->children[i]);
		vmm_spin_lock_irqsave(&udev->children_lock, flags);
	}
	vmm_spin_unlock_irqrestore(&udev->children_lock, flags);

	if (udev->state == USB_STATE_SUSPENDED) {
		udev->active_duration -= vmm_timer_timestamp();
	}

	udev->state = USB_STATE_NOTATTACHED;
}
Example #13
0
bool vmm_mutex_avail(struct vmm_mutex *mut)
{
	bool ret;
	irq_flags_t flags;

	BUG_ON(!mut);

	vmm_spin_lock_irqsave(&mut->wq.lock, flags);
	ret = (mut->lock) ? FALSE : TRUE;
	vmm_spin_unlock_irqrestore(&mut->wq.lock, flags);

	return ret;
}
Example #14
0
int __vmm_atomic_notifier_call(struct vmm_atomic_notifier_chain *nc,
				unsigned long val, void *v,
				int nr_to_call, int *nr_calls)
{
	irq_flags_t flags;
	int ret;

	vmm_spin_lock_irqsave(&nc->lock, flags);
	ret = notifier_call_chain(&nc->head, val, v, nr_to_call, nr_calls);
	vmm_spin_unlock_irqrestore(&nc->lock, flags);

	return ret;
}
Example #15
0
int vmm_blockdev_submit_request(struct vmm_blockdev *bdev,
				struct vmm_request *r)
{
	int rc;
	irq_flags_t flags;

	if (!bdev || !r || !bdev->rq) {
		rc = VMM_EFAIL;
		goto failed;
	}

	if ((r->type == VMM_REQUEST_WRITE) &&
	   !(bdev->flags & VMM_BLOCKDEV_RW)) {
		rc = VMM_EINVALID;
		goto failed;
	}

	if (bdev->num_blocks < r->bcnt) {
		rc = VMM_ERANGE;
		goto failed;
	}
	if ((r->lba < bdev->start_lba) ||
	    ((bdev->start_lba + bdev->num_blocks) <= r->lba)) {
		rc = VMM_ERANGE;
		goto failed;
	}
	if ((bdev->start_lba + bdev->num_blocks) < (r->lba + r->bcnt)) {
		rc = VMM_ERANGE;
		goto failed;
	}

	if (bdev->rq->make_request) {
		r->bdev = bdev;
		vmm_spin_lock_irqsave(&r->bdev->rq->lock, flags);
		rc = r->bdev->rq->make_request(r->bdev->rq, r);
		vmm_spin_unlock_irqrestore(&r->bdev->rq->lock, flags);
		if (rc) {
			r->bdev = NULL;
			return rc;
		}
	} else {
		rc = VMM_EFAIL;
		goto failed;
	}

	return VMM_OK;

failed:
	vmm_blockdev_fail_request(r);
	return rc;
}
Example #16
0
struct usb_device *usb_find_child(struct usb_device *hdev, int port1)
{
	irq_flags_t flags;
	struct usb_device *ret;

	if (port1 < 1 || port1 > hdev->maxchild)
		return NULL;

	vmm_spin_lock_irqsave(&hdev->children_lock, flags);
	ret = hdev->children[port1 - 1];
	vmm_spin_unlock_irqrestore(&hdev->children_lock, flags);

	return ret;
}
Example #17
0
void vmm_netport_free_xfer(struct vmm_netport *port, 
			   struct vmm_netport_xfer *xfer)
{
	irq_flags_t flags;

	if (!port || !xfer) {
		return;
	}

	vmm_spin_lock_irqsave(&port->free_list_lock, flags);
	list_add_tail(&xfer->head, &port->free_list);
	port->free_count++;
	vmm_spin_unlock_irqrestore(&port->free_list_lock, flags);
}
Example #18
0
int vmm_completion_complete(struct vmm_completion *cmpl)
{
	int rc = VMM_OK;
	irq_flags_t flags;

	BUG_ON(!cmpl);

	vmm_spin_lock_irqsave(&cmpl->wq.lock, flags);

	cmpl->done++;
	rc = __vmm_waitqueue_wakefirst(&cmpl->wq);

	vmm_spin_unlock_irqrestore(&cmpl->wq.lock, flags);

	return rc;
}
Example #19
0
bool vmm_completion_done(struct vmm_completion *cmpl)
{
	bool ret = TRUE;
	irq_flags_t flags;

	BUG_ON(!cmpl);

	vmm_spin_lock_irqsave(&cmpl->wq.lock, flags);

	if (!cmpl->done) {
		ret = FALSE;
	}

	vmm_spin_unlock_irqrestore(&cmpl->wq.lock, flags);

	return ret;
}
Example #20
0
/* Process IRQ asserted via device emulation framework */
static void pl190_irq_handle(u32 irq, int cpu, int level, void *opaque)
{
	irq_flags_t flags;
	struct pl190_emulator_state *s = opaque;

	irq -= s->base_irq;

	if (level == (s->level & (1u << irq))) {
		return;
	}

	vmm_spin_lock_irqsave(&s->lock, flags);

	pl190_set_irq(s, irq, level);

	vmm_spin_unlock_irqrestore(&s->lock, flags);
}
int arch_vcpu_irq_assert(struct vmm_vcpu *vcpu, u32 irq_no, u64 reason)
{
	u64 hcr;
	bool update_hcr;
	irq_flags_t flags;

	/* Skip IRQ & FIQ if VGIC available */
	if (arm_vgic_avail(vcpu) &&
	    ((irq_no == CPU_EXTERNAL_IRQ) || 
	     (irq_no == CPU_EXTERNAL_FIQ))) {
		return VMM_OK;
	}

	vmm_spin_lock_irqsave(&arm_priv(vcpu)->hcr_lock, flags);

	hcr = arm_priv(vcpu)->hcr;
	update_hcr = FALSE;

	switch(irq_no) {
	case CPU_EXTERNAL_IRQ:
		hcr |= HCR_VI_MASK;
		/* VI bit will be cleared on deassertion */
		update_hcr = TRUE;
		break;
	case CPU_EXTERNAL_FIQ:
		hcr |= HCR_VF_MASK;
		/* VF bit will be cleared on deassertion */
		update_hcr = TRUE;
		break;
	default:
		break;
	};

	if (update_hcr) {
		arm_priv(vcpu)->hcr = hcr;
		if (vcpu == vmm_scheduler_current_vcpu()) {
			msr(hcr_el2, hcr);
		}
	}

	vmm_spin_unlock_irqrestore(&arm_priv(vcpu)->hcr_lock, flags);

	return VMM_OK;
}
Example #22
0
static void __notrace vmm_profile_exit(void *ip, void *parent_ip)
{
	int index;
	u64 time;
	irq_flags_t flags;

	if (pctrl.is_in_trace) {
		return;
	}

	pctrl.is_in_trace = 1;

	index = kallsyms_get_symbol_pos((long unsigned int)ip, NULL, NULL);

	// If this function was no traced yet ...
	// we just return as we can't get the start timer
	if (pctrl.stat[index].is_tracing != 1) {
		goto out;
	}

	if (pctrl.stat[index].time_in == 0) {
		goto out;
	}

	flags = vmm_spin_lock_irqsave(&pctrl.lock);

	time = vmm_timer_timestamp_for_profile();

	if (pctrl.stat[index].time_in < time) {
		pctrl.stat[index].time += time - pctrl.stat[index].time_in;
	} else {
		//vmm_printf("negative time\n");
	}
	vmm_spin_unlock_irqrestore(&pctrl.lock, flags);

 out:
	pctrl.stat[index].time_in = 0;

	// OK we don't trace this function anymore
	pctrl.stat[index].is_tracing = 0;

	pctrl.is_in_trace = 0;
}
Example #23
0
int vmm_mutex_unlock(struct vmm_mutex *mut)
{
    int rc = VMM_OK;
    irq_flags_t flags;

    BUG_ON(!mut);
    BUG_ON(!vmm_scheduler_orphan_context());

    vmm_spin_lock_irqsave(&mut->wq.lock, flags);

    if (mut->lock && mut->owner == vmm_scheduler_current_vcpu()) {
        mut->lock = 0;
        mut->owner = NULL;
        rc = __vmm_waitqueue_wakeall(&mut->wq);
    }

    vmm_spin_unlock_irqrestore(&mut->wq.lock, flags);

    return rc;
}
Example #24
0
int vmm_blockdev_abort_request(struct vmm_request *r)
{
	int rc;
	irq_flags_t flags;

	if (!r || !r->bdev || !r->bdev->rq) {
		return VMM_EFAIL;
	}

	if (r->bdev->rq->abort_request) {
		vmm_spin_lock_irqsave(&r->bdev->rq->lock, flags);
		rc = r->bdev->rq->abort_request(r->bdev->rq, r);
		vmm_spin_unlock_irqrestore(&r->bdev->rq->lock, flags);
		if (rc) {
			return rc;
		}
	}

	return vmm_blockdev_fail_request(r);
}
Example #25
0
int vmm_blockdev_flush_cache(struct vmm_blockdev *bdev)
{
	int rc;
	irq_flags_t flags;

	if (!bdev || !bdev->rq) {
		return VMM_EFAIL;
	}

	if (bdev->rq->flush_cache) {
		vmm_spin_lock_irqsave(&bdev->rq->lock, flags);
		rc = bdev->rq->flush_cache(bdev->rq);
		vmm_spin_unlock_irqrestore(&bdev->rq->lock, flags);
		if (rc) {
			return rc;
		}
	}

	return VMM_OK;
}
Example #26
0
void __vmm_mutex_cleanup(struct vmm_vcpu *vcpu,
			 struct vmm_vcpu_resource *vcpu_res)
{
	irq_flags_t flags;
	struct vmm_mutex *mut = container_of(vcpu_res, struct vmm_mutex, res);

	if (!vcpu || !vcpu_res) {
		return;
	}

	vmm_spin_lock_irqsave(&mut->wq.lock, flags);

	if (mut->lock && mut->owner == vcpu) {
		mut->lock = 0;
		mut->owner = NULL;
		__vmm_waitqueue_wakeall(&mut->wq);
	}

	vmm_spin_unlock_irqrestore(&mut->wq.lock, flags);
}
Example #27
0
void usb_set_device_state(struct usb_device *udev,
			  enum usb_device_state new_state)
{
	irq_flags_t flags;

	vmm_spin_lock_irqsave(&device_state_lock, flags);
	if (udev->state == USB_STATE_NOTATTACHED) {
		;	/* do nothing */
	} else if (new_state != USB_STATE_NOTATTACHED) {
		if (udev->state == USB_STATE_SUSPENDED &&
			new_state != USB_STATE_SUSPENDED)
			udev->active_duration -= vmm_timer_timestamp();
		else if (new_state == USB_STATE_SUSPENDED &&
				udev->state != USB_STATE_SUSPENDED)
			udev->active_duration += vmm_timer_timestamp();
		udev->state = new_state;
	} else {
		recursively_mark_NOTATTACHED(udev);
	}
	vmm_spin_unlock_irqrestore(&device_state_lock, flags);
}
Example #28
0
static int mutex_lock_common(struct vmm_mutex *mut, u64 *timeout)
{
	int rc = VMM_OK;
	irq_flags_t flags;
	struct vmm_vcpu *current_vcpu = vmm_scheduler_current_vcpu();

	BUG_ON(!mut);
	BUG_ON(!vmm_scheduler_orphan_context());

	vmm_spin_lock_irqsave(&mut->wq.lock, flags);

	while (mut->lock) {
		/*
		 * If VCPU owning the lock try to acquire it again then let
		 * it acquire lock multiple times (as-per POSIX standard).
		 */
		if (mut->owner == current_vcpu) {
			break;
		}
		rc = __vmm_waitqueue_sleep(&mut->wq, timeout);
		if (rc) {
			/* Timeout or some other failure */
			break;
		}
	}
	if (rc == VMM_OK) {
		if (!mut->lock) {
			mut->lock = 1;
			vmm_manager_vcpu_resource_add(current_vcpu,
						      &mut->res);
			mut->owner = current_vcpu;
		} else {
			mut->lock++;
		}
	}

	vmm_spin_unlock_irqrestore(&mut->wq.lock, flags);

	return rc;
}
Example #29
0
static void vsdaemon_flush_tx_buffer(struct vsdaemon_telnet *tnet)
{
	int rc;
	u32 tx_count;
	irq_flags_t flags;
	u8 tx_buf[VSDAEMON_MAX_FLUSH_SIZE];

	while (1) {
		/* Lock connection state */
		vmm_spin_lock_irqsave(&tnet->tx_buf_lock, flags);

		/* Get data from Tx buffer */
		tx_count = 0;
		while (tnet->tx_buf_count &&
		       (tx_count < VSDAEMON_MAX_FLUSH_SIZE)) {
			tx_buf[tx_count] = tnet->tx_buf[tnet->tx_buf_head];
			tnet->tx_buf_head++;
			if (tnet->tx_buf_head >= VSDAEMON_TXBUF_SIZE) {
				tnet->tx_buf_head = 0;
			}
			tnet->tx_buf_count--;
			tx_count++;
		}

		/* Unlock connection state */
		vmm_spin_unlock_irqrestore(&tnet->tx_buf_lock, flags);

		/* Transmit the pending Tx data */
		if (tx_count && tnet->active_sk) {
			rc = netstack_socket_write(tnet->active_sk, 
						   &tx_buf[0], tx_count);
			if (rc) {
				return;
			}
		} else {
			return;
		}
	}
}
int arch_vcpu_irq_execute(struct vmm_vcpu *vcpu,
			  arch_regs_t *regs, 
			  u32 irq_no, u64 reason)
{
	int rc;
	irq_flags_t flags;

	/* Skip IRQ & FIQ if VGIC available */
	if (arm_vgic_avail(vcpu) &&
	    ((irq_no == CPU_EXTERNAL_IRQ) || 
	     (irq_no == CPU_EXTERNAL_FIQ))) {
		return VMM_OK;
	}

	/* Undefined, Data abort, and Prefetch abort 
	 * can only be emulated in normal context.
	 */
	switch(irq_no) {
	case CPU_UNDEF_INST_IRQ:
		rc = cpu_vcpu_inject_undef(vcpu, regs);
		break;
	case CPU_PREFETCH_ABORT_IRQ:
		rc = cpu_vcpu_inject_pabt(vcpu, regs);
		break;
	case CPU_DATA_ABORT_IRQ:
		rc = cpu_vcpu_inject_dabt(vcpu, regs, (virtual_addr_t)reason);
		break;
	default:
		rc = VMM_OK;
		break;
	};

	vmm_spin_lock_irqsave(&arm_priv(vcpu)->hcr_lock, flags);
	msr(hcr_el2, arm_priv(vcpu)->hcr);
	vmm_spin_unlock_irqrestore(&arm_priv(vcpu)->hcr_lock, flags);

	return rc;
}