Esempio n. 1
0
void evtchn_device_upcall(int port)
{
	struct per_user_data *u;

	spin_lock(&port_user_lock);

	mask_evtchn(port);
	clear_evtchn(port);

	if ((u = port_user[port]) != NULL) {
		if ((u->ring_prod - u->ring_cons) < EVTCHN_RING_SIZE) {
			u->ring[EVTCHN_RING_MASK(u->ring_prod)] = port;
			wmb(); /* Ensure ring contents visible */
			if (u->ring_cons == u->ring_prod++) {
				wake_up_interruptible(&u->evtchn_wait);
				kill_fasync(&u->evtchn_async_queue,
					    SIGIO, POLL_IN);
			}
		} else {
			u->ring_overflow = 1;
		}
	}

	spin_unlock(&port_user_lock);
}
Esempio n. 2
0
static irqreturn_t evtchn_interrupt(int irq, void *data)
{
	unsigned int port = (unsigned long)data;
	struct per_user_data *u;

	spin_lock(&port_user_lock);

	u = get_port_user(port);

	WARN(!get_port_enabled(port),
	     "Interrupt for port %d, but apparently not enabled; per-user %p\n",
	     port, u);

	disable_irq_nosync(irq);
	set_port_enabled(port, false);

	if ((u->ring_prod - u->ring_cons) < EVTCHN_RING_SIZE) {
		u->ring[EVTCHN_RING_MASK(u->ring_prod)] = port;
		wmb(); /* Ensure ring contents visible */
		if (u->ring_cons == u->ring_prod++) {
			wake_up_interruptible(&u->evtchn_wait);
			kill_fasync(&u->evtchn_async_queue,
				    SIGIO, POLL_IN);
		}
	} else
		u->ring_overflow = 1;

	spin_unlock(&port_user_lock);

	return IRQ_HANDLED;
}
Esempio n. 3
0
irqreturn_t evtchn_interrupt(int irq, void *data)
{
	unsigned int port = (unsigned long)data;
	struct per_user_data *u;

	spin_lock(&port_user_lock);

	u = port_user[port];

	disable_irq_nosync(irq);

	if ((u->ring_prod - u->ring_cons) < EVTCHN_RING_SIZE) {
		u->ring[EVTCHN_RING_MASK(u->ring_prod)] = port;
		wmb(); /* Ensure ring contents visible */
		if (u->ring_cons == u->ring_prod++) {
			wake_up_interruptible(&u->evtchn_wait);
			kill_fasync(&u->evtchn_async_queue,
				    SIGIO, POLL_IN);
		}
	} else {
		u->ring_overflow = 1;
	}

	spin_unlock(&port_user_lock);

	return IRQ_HANDLED;
}
Esempio n. 4
0
void
evtchn_device_upcall()
{
	struct evtsoftdata *ep;
	int port;

	/*
	 * This is quite gross, we had to leave the evtchn that led to this
	 * invocation in a per-cpu mailbox, retrieve it now.
	 * We do this because the interface doesn't offer us a way to pass
	 * a dynamic argument up through the generic interrupt service layer.
	 * The mailbox is safe since we either run with interrupts disabled or
	 * non-preemptable till we reach here.
	 */
	port = CPU->cpu_m.mcpu_ec_mbox;
	ASSERT(port != 0);
	CPU->cpu_m.mcpu_ec_mbox = 0;
	ec_clear_evtchn(port);
	mutex_enter(&port_user_lock);

	if ((ep = port_user[port]) != NULL) {
		mutex_enter(&ep->evtchn_lock);
		if ((ep->ring_prod - ep->ring_cons) < EVTCHN_RING_SIZE) {
			ep->ring[EVTCHN_RING_MASK(ep->ring_prod)] = port;
			/*
			 * Wake up reader when ring goes non-empty
			 */
			if (ep->ring_cons == ep->ring_prod++) {
				cv_signal(&ep->evtchn_wait);
				mutex_exit(&ep->evtchn_lock);
				pollwakeup(&ep->evtchn_pollhead,
				    POLLIN | POLLRDNORM);
				goto done;
			}
		} else {
			ep->ring_overflow = 1;
		}
		mutex_exit(&ep->evtchn_lock);
	}

done:
	mutex_exit(&port_user_lock);
}
Esempio n. 5
0
void 
evtchn_device_upcall(int port)
{
	mtx_lock(&upcall_lock);

	mask_evtchn(port);
	clear_evtchn(port);

	if ( ring != NULL ) {
		if ( (ring_prod - ring_cons) < EVTCHN_RING_SIZE ) {
			ring[EVTCHN_RING_MASK(ring_prod)] = (uint16_t)port;
			if ( ring_cons == ring_prod++ ) {
				wakeup(evtchn_waddr);
			}
		}
		else {
			ring_overflow = 1;
		}
	}

	mtx_unlock(&upcall_lock);
}
Esempio n. 6
0
static ssize_t evtchn_read(struct file *file, char __user *buf,
			   size_t count, loff_t *ppos)
{
	int rc;
	unsigned int c, p, bytes1 = 0, bytes2 = 0;
	struct per_user_data *u = file->private_data;

	/* Whole number of ports. */
	count &= ~(sizeof(evtchn_port_t)-1);

	if (count == 0)
		return 0;

	if (count > PAGE_SIZE)
		count = PAGE_SIZE;

	for (;;) {
		mutex_lock(&u->ring_cons_mutex);

		rc = -EFBIG;
		if (u->ring_overflow)
			goto unlock_out;

		c = u->ring_cons;
		p = u->ring_prod;
		if (c != p)
			break;

		mutex_unlock(&u->ring_cons_mutex);

		if (file->f_flags & O_NONBLOCK)
			return -EAGAIN;

		rc = wait_event_interruptible(u->evtchn_wait,
					      u->ring_cons != u->ring_prod);
		if (rc)
			return rc;
	}

	/* Byte lengths of two chunks. Chunk split (if any) is at ring wrap. */
	if (((c ^ p) & EVTCHN_RING_SIZE) != 0) {
		bytes1 = (EVTCHN_RING_SIZE - EVTCHN_RING_MASK(c)) *
			sizeof(evtchn_port_t);
		bytes2 = EVTCHN_RING_MASK(p) * sizeof(evtchn_port_t);
	} else {
		bytes1 = (p - c) * sizeof(evtchn_port_t);
		bytes2 = 0;
	}

	/* Truncate chunks according to caller's maximum byte count. */
	if (bytes1 > count) {
		bytes1 = count;
		bytes2 = 0;
	} else if ((bytes1 + bytes2) > count) {
		bytes2 = count - bytes1;
	}

	rc = -EFAULT;
	rmb(); /* Ensure that we see the port before we copy it. */
	if (copy_to_user(buf, &u->ring[EVTCHN_RING_MASK(c)], bytes1) ||
	    ((bytes2 != 0) &&
	     copy_to_user(&buf[bytes1], &u->ring[0], bytes2)))
		goto unlock_out;

	u->ring_cons += (bytes1 + bytes2) / sizeof(evtchn_port_t);
	rc = bytes1 + bytes2;

 unlock_out:
	mutex_unlock(&u->ring_cons_mutex);
	return rc;
}
Esempio n. 7
0
static int
evtchn_read(struct cdev *dev, struct uio *uio, int ioflag)
{
	int rc;
	unsigned int count, c, p, sst = 0, bytes1 = 0, bytes2 = 0;
	count = uio->uio_resid;
    
	count &= ~1; /* even number of bytes */

	if ( count == 0 )
	{
		rc = 0;
		goto out;
	}

	if ( count > PAGE_SIZE )
		count = PAGE_SIZE;

	for ( ; ; ) {
		if ( (c = ring_cons) != (p = ring_prod) )
			break;

		if ( ring_overflow ) {
			rc = EFBIG;
			goto out;
		}

		if (sst != 0) {
			rc = EINTR;
			goto out;
		}

		/* PCATCH == check for signals before and after sleeping 
		 * PWAIT == priority of waiting on resource 
		 */
		sst = tsleep(evtchn_waddr, PWAIT|PCATCH, "evchwt", 10);
	}

	/* Byte lengths of two chunks. Chunk split (if any) is at ring wrap. */
	if ( ((c ^ p) & EVTCHN_RING_SIZE) != 0 ) {
		bytes1 = (EVTCHN_RING_SIZE - EVTCHN_RING_MASK(c)) * sizeof(uint16_t);
		bytes2 = EVTCHN_RING_MASK(p) * sizeof(uint16_t);
	}
	else {
		bytes1 = (p - c) * sizeof(uint16_t);
		bytes2 = 0;
	}

	/* Truncate chunks according to caller's maximum byte count. */
	if ( bytes1 > count ) {
		bytes1 = count;
		bytes2 = 0;
	}
	else if ( (bytes1 + bytes2) > count ) {
		bytes2 = count - bytes1;
	}
    
	if ( uiomove(&ring[EVTCHN_RING_MASK(c)], bytes1, uio) ||
	     ((bytes2 != 0) && uiomove(&ring[0], bytes2, uio)))
		/* keeping this around as its replacement is not equivalent 
		 * copyout(&ring[0], &buf[bytes1], bytes2) 
		 */
	{
		rc = EFAULT;
		goto out;
	}

	ring_cons += (bytes1 + bytes2) / sizeof(uint16_t);

	rc = bytes1 + bytes2;

 out:
    
	return rc;
}
Esempio n. 8
0
/* ARGSUSED */
static int
evtchndrv_read(dev_t dev, struct uio *uio, cred_t *cr)
{
	int rc = 0;
	ssize_t count;
	unsigned int c, p, bytes1 = 0, bytes2 = 0;
	struct evtsoftdata *ep;
	minor_t minor = getminor(dev);

	if (secpolicy_xvm_control(cr))
		return (EPERM);

	ep = EVTCHNDRV_INST2SOFTS(EVTCHNDRV_MINOR2INST(minor));

	/* Whole number of ports. */
	count = uio->uio_resid;
	count &= ~(sizeof (evtchn_port_t) - 1);

	if (count == 0)
		return (0);

	if (count > PAGESIZE)
		count = PAGESIZE;

	mutex_enter(&ep->evtchn_lock);
	for (;;) {
		if (ep->ring_overflow) {
			rc = EFBIG;
			goto done;
		}

		if ((c = ep->ring_cons) != (p = ep->ring_prod))
			break;

		if (uio->uio_fmode & O_NONBLOCK) {
			rc = EAGAIN;
			goto done;
		}

		if (cv_wait_sig(&ep->evtchn_wait, &ep->evtchn_lock) == 0) {
			rc = EINTR;
			goto done;
		}
	}

	/* Byte lengths of two chunks. Chunk split (if any) is at ring wrap. */
	if (((c ^ p) & EVTCHN_RING_SIZE) != 0) {
		bytes1 = (EVTCHN_RING_SIZE - EVTCHN_RING_MASK(c)) *
		    sizeof (evtchn_port_t);
		bytes2 = EVTCHN_RING_MASK(p) * sizeof (evtchn_port_t);
	} else {
		bytes1 = (p - c) * sizeof (evtchn_port_t);
		bytes2 = 0;
	}

	/* Truncate chunks according to caller's maximum byte count. */
	if (bytes1 > count) {
		bytes1 = count;
		bytes2 = 0;
	} else if ((bytes1 + bytes2) > count) {
		bytes2 = count - bytes1;
	}

	if (uiomove(&ep->ring[EVTCHN_RING_MASK(c)], bytes1, UIO_READ, uio) ||
	    ((bytes2 != 0) && uiomove(&ep->ring[0], bytes2, UIO_READ, uio))) {
		rc = EFAULT;
		goto done;
	}

	ep->ring_cons += (bytes1 + bytes2) / sizeof (evtchn_port_t);
done:
	mutex_exit(&ep->evtchn_lock);
	return (rc);
}