예제 #1
0
static void
xenevt_free(struct xenevt_d *d)
{
	int i;
	KASSERT(mutex_owned(&devevent_lock));
	KASSERT(mutex_owned(&d->lock));

	for (i = 0; i < NR_EVENT_CHANNELS; i++ ) {
		if (devevent[i] == d) {
			evtchn_op_t op = { .cmd = 0 };
			int error;

			hypervisor_mask_event(i);
			xen_atomic_clear_bit(&d->ci->ci_evtmask[0], i);
			devevent[i] = NULL;

			op.cmd = EVTCHNOP_close;
			op.u.close.port = i;
			if ((error = HYPERVISOR_event_channel_op(&op))) {
				printf("xenevt_fclose: error %d from "
				    "hypervisor\n", -error);
			}
		}
	}
	mutex_exit(&d->lock);
	seldestroy(&d->sel);
	cv_destroy(&d->cv);
	mutex_destroy(&d->lock);
	free(d, M_DEVBUF);
}
예제 #2
0
void
xb_suspend_comms(device_t dev)
{
	int evtchn;

	evtchn = xen_start_info.store_evtchn;

	hypervisor_mask_event(evtchn);
	event_remove_handler(evtchn, wake_waiting, NULL);
	aprint_verbose_dev(dev, "removed event channel %d\n", evtchn);
}
예제 #3
0
static bool
xencons_suspend(device_t dev, const pmf_qual_t *qual) {

    int evtch;

    /* dom0 console should not be suspended */
    if (!xendomain_is_dom0()) {
        evtch = xen_start_info.console_evtchn;
        hypervisor_mask_event(evtch);
        if (event_remove_handler(evtch, xencons_handler,
                                 xencons_console_device) != 0) {
            aprint_error_dev(dev,
                             "can't remove handler: xencons_handler\n");
        }

        aprint_verbose_dev(dev, "removed event channel %d\n", evtch);
    }

    return true;
}
예제 #4
0
static int
xenevt_fioctl(struct file *fp, u_long cmd, void *addr)
{
	struct xenevt_d *d = fp->f_data;
	evtchn_op_t op = { .cmd = 0 };
	int error;

	switch(cmd) {
	case EVTCHN_RESET:
	case IOCTL_EVTCHN_RESET:
		mutex_enter(&d->lock);
		d->ring_read = d->ring_write = 0;
		d->flags = 0;
		mutex_exit(&d->lock);
		break;
	case IOCTL_EVTCHN_BIND_VIRQ:
	{
		struct ioctl_evtchn_bind_virq *bind_virq = addr;
		op.cmd = EVTCHNOP_bind_virq;
		op.u.bind_virq.virq = bind_virq->virq;
		op.u.bind_virq.vcpu = 0;
		if ((error = HYPERVISOR_event_channel_op(&op))) {
			printf("IOCTL_EVTCHN_BIND_VIRQ failed: virq %d error %d\n", bind_virq->virq, error);
			return -error;
		}
		bind_virq->port = op.u.bind_virq.port;
		mutex_enter(&devevent_lock);
		KASSERT(devevent[bind_virq->port] == NULL);
		devevent[bind_virq->port] = d;
		mutex_exit(&devevent_lock);
		xen_atomic_set_bit(&d->ci->ci_evtmask[0], bind_virq->port);
		hypervisor_unmask_event(bind_virq->port);
		break;
	}
	case IOCTL_EVTCHN_BIND_INTERDOMAIN:
	{
		struct ioctl_evtchn_bind_interdomain *bind_intd = addr;
		op.cmd = EVTCHNOP_bind_interdomain;
		op.u.bind_interdomain.remote_dom = bind_intd->remote_domain;
		op.u.bind_interdomain.remote_port = bind_intd->remote_port;
		if ((error = HYPERVISOR_event_channel_op(&op)))
			return -error;
		bind_intd->port = op.u.bind_interdomain.local_port;
		mutex_enter(&devevent_lock);
		KASSERT(devevent[bind_intd->port] == NULL);
		devevent[bind_intd->port] = d;
		mutex_exit(&devevent_lock);
		xen_atomic_set_bit(&d->ci->ci_evtmask[0], bind_intd->port);
		hypervisor_unmask_event(bind_intd->port);
		break;
	}
	case IOCTL_EVTCHN_BIND_UNBOUND_PORT:
	{
		struct ioctl_evtchn_bind_unbound_port *bind_unbound = addr;
		op.cmd = EVTCHNOP_alloc_unbound;
		op.u.alloc_unbound.dom = DOMID_SELF;
		op.u.alloc_unbound.remote_dom = bind_unbound->remote_domain;
		if ((error = HYPERVISOR_event_channel_op(&op)))
			return -error;
		bind_unbound->port = op.u.alloc_unbound.port;
		mutex_enter(&devevent_lock);
		KASSERT(devevent[bind_unbound->port] == NULL);
		devevent[bind_unbound->port] = d;
		mutex_exit(&devevent_lock);
		xen_atomic_set_bit(&d->ci->ci_evtmask[0], bind_unbound->port);
		hypervisor_unmask_event(bind_unbound->port);
		break;
	}
	case IOCTL_EVTCHN_UNBIND:
	{
		struct ioctl_evtchn_unbind *unbind = addr;
		
		if (unbind->port > NR_EVENT_CHANNELS)
			return EINVAL;
		mutex_enter(&devevent_lock);
		if (devevent[unbind->port] != d) {
			mutex_exit(&devevent_lock);
			return ENOTCONN;
		}
		devevent[unbind->port] = NULL;
		mutex_exit(&devevent_lock);
		hypervisor_mask_event(unbind->port);
		xen_atomic_clear_bit(&d->ci->ci_evtmask[0], unbind->port);
		op.cmd = EVTCHNOP_close;
		op.u.close.port = unbind->port;
		if ((error = HYPERVISOR_event_channel_op(&op)))
			return -error;
		break;
	}
	case IOCTL_EVTCHN_NOTIFY:
	{
		struct ioctl_evtchn_notify *notify = addr;
		
		if (notify->port > NR_EVENT_CHANNELS)
			return EINVAL;
		mutex_enter(&devevent_lock);
		if (devevent[notify->port] != d) {
			mutex_exit(&devevent_lock);
			return ENOTCONN;
		}
		hypervisor_notify_via_evtchn(notify->port);
		mutex_exit(&devevent_lock);
		break;
	}
	case FIONBIO:
		break;
	default:
		return EINVAL;
	}
	return 0;
}

/*      
 * Support for poll() system call  
 *
 * Return true if the specific operation will not block indefinitely.
 */      

static int
xenevt_fpoll(struct file *fp, int events)
{
	struct xenevt_d *d = fp->f_data;
	int revents = events & (POLLOUT | POLLWRNORM); /* we can always write */

	mutex_enter(&d->lock);
	if (events & (POLLIN | POLLRDNORM)) {
		if (d->ring_read != d->ring_write) {
			revents |= events & (POLLIN | POLLRDNORM);
		} else {
			/* Record that someone is waiting */
			selrecord(curlwp, &d->sel);
		}
	}
	mutex_exit(&d->lock);
	return (revents);
}