static void xenevt_free(struct xenevt_d *d) { int i; KASSERT(mutex_owned(&devevent_lock)); KASSERT(mutex_owned(&d->lock)); for (i = 0; i < NR_EVENT_CHANNELS; i++ ) { if (devevent[i] == d) { evtchn_op_t op = { .cmd = 0 }; int error; hypervisor_mask_event(i); xen_atomic_clear_bit(&d->ci->ci_evtmask[0], i); devevent[i] = NULL; op.cmd = EVTCHNOP_close; op.u.close.port = i; if ((error = HYPERVISOR_event_channel_op(&op))) { printf("xenevt_fclose: error %d from " "hypervisor\n", -error); } } } mutex_exit(&d->lock); seldestroy(&d->sel); cv_destroy(&d->cv); mutex_destroy(&d->lock); free(d, M_DEVBUF); }
int stipending(void) { volatile shared_info_t *s = HYPERVISOR_shared_info; struct cpu_info *ci; volatile struct vcpu_info *vci; int ret; ret = 0; ci = curcpu(); vci = ci->ci_vcpu; #if 0 if (HYPERVISOR_shared_info->events) printf("stipending events %08lx mask %08lx ilevel %d\n", HYPERVISOR_shared_info->events, HYPERVISOR_shared_info->events_mask, ci->ci_ilevel); #endif #ifdef EARLY_DEBUG_EVENT if (xen_atomic_test_bit(&s->evtchn_pending[0], debug_port)) { xen_debug_handler(NULL); xen_atomic_clear_bit(&s->evtchn_pending[0], debug_port); } #endif /* * we're only called after STIC, so we know that we'll have to * STI at the end */ while (vci->evtchn_upcall_pending) { cli(); vci->evtchn_upcall_pending = 0; evt_iterate_bits(&vci->evtchn_pending_sel, s->evtchn_pending, s->evtchn_mask, evt_set_pending, &ret); sti(); } #if 0 if (ci->ci_ipending & 0x1) printf("stipending events %08lx mask %08lx ilevel %d ipending %08x\n", HYPERVISOR_shared_info->events, HYPERVISOR_shared_info->events_mask, ci->ci_ilevel, ci->ci_ipending); #endif return (ret); }
void do_hypervisor_callback(struct intrframe *regs) { volatile shared_info_t *s = HYPERVISOR_shared_info; struct cpu_info *ci; volatile struct vcpu_info *vci; int level; ci = curcpu(); vci = ci->ci_vcpu; level = ci->ci_ilevel; // DDD printf("do_hypervisor_callback\n"); #ifdef EARLY_DEBUG_EVENT if (xen_atomic_test_bit(&s->evtchn_pending[0], debug_port)) { xen_debug_handler(NULL); xen_atomic_clear_bit(&s->evtchn_pending[0], debug_port); } #endif while (vci->evtchn_upcall_pending) { vci->evtchn_upcall_pending = 0; evt_iterate_bits(&vci->evtchn_pending_sel, s->evtchn_pending, s->evtchn_mask, evt_do_hypervisor_callback, regs); } #ifdef DIAGNOSTIC if (level != ci->ci_ilevel) printf("hypervisor done %08x level %d/%d ipending %08x\n", (uint)vci->evtchn_pending_sel, level, ci->ci_ilevel, ci->ci_ipending); #endif }
static int xenevt_fioctl(struct file *fp, u_long cmd, void *addr) { struct xenevt_d *d = fp->f_data; evtchn_op_t op = { .cmd = 0 }; int error; switch(cmd) { case EVTCHN_RESET: case IOCTL_EVTCHN_RESET: mutex_enter(&d->lock); d->ring_read = d->ring_write = 0; d->flags = 0; mutex_exit(&d->lock); break; case IOCTL_EVTCHN_BIND_VIRQ: { struct ioctl_evtchn_bind_virq *bind_virq = addr; op.cmd = EVTCHNOP_bind_virq; op.u.bind_virq.virq = bind_virq->virq; op.u.bind_virq.vcpu = 0; if ((error = HYPERVISOR_event_channel_op(&op))) { printf("IOCTL_EVTCHN_BIND_VIRQ failed: virq %d error %d\n", bind_virq->virq, error); return -error; } bind_virq->port = op.u.bind_virq.port; mutex_enter(&devevent_lock); KASSERT(devevent[bind_virq->port] == NULL); devevent[bind_virq->port] = d; mutex_exit(&devevent_lock); xen_atomic_set_bit(&d->ci->ci_evtmask[0], bind_virq->port); hypervisor_unmask_event(bind_virq->port); break; } case IOCTL_EVTCHN_BIND_INTERDOMAIN: { struct ioctl_evtchn_bind_interdomain *bind_intd = addr; op.cmd = EVTCHNOP_bind_interdomain; op.u.bind_interdomain.remote_dom = bind_intd->remote_domain; op.u.bind_interdomain.remote_port = bind_intd->remote_port; if ((error = HYPERVISOR_event_channel_op(&op))) return -error; bind_intd->port = op.u.bind_interdomain.local_port; mutex_enter(&devevent_lock); KASSERT(devevent[bind_intd->port] == NULL); devevent[bind_intd->port] = d; mutex_exit(&devevent_lock); xen_atomic_set_bit(&d->ci->ci_evtmask[0], bind_intd->port); hypervisor_unmask_event(bind_intd->port); break; } case IOCTL_EVTCHN_BIND_UNBOUND_PORT: { struct ioctl_evtchn_bind_unbound_port *bind_unbound = addr; op.cmd = EVTCHNOP_alloc_unbound; op.u.alloc_unbound.dom = DOMID_SELF; op.u.alloc_unbound.remote_dom = bind_unbound->remote_domain; if ((error = HYPERVISOR_event_channel_op(&op))) return -error; bind_unbound->port = op.u.alloc_unbound.port; mutex_enter(&devevent_lock); KASSERT(devevent[bind_unbound->port] == NULL); devevent[bind_unbound->port] = d; mutex_exit(&devevent_lock); xen_atomic_set_bit(&d->ci->ci_evtmask[0], bind_unbound->port); hypervisor_unmask_event(bind_unbound->port); break; } case IOCTL_EVTCHN_UNBIND: { struct ioctl_evtchn_unbind *unbind = addr; if (unbind->port > NR_EVENT_CHANNELS) return EINVAL; mutex_enter(&devevent_lock); if (devevent[unbind->port] != d) { mutex_exit(&devevent_lock); return ENOTCONN; } devevent[unbind->port] = NULL; mutex_exit(&devevent_lock); hypervisor_mask_event(unbind->port); xen_atomic_clear_bit(&d->ci->ci_evtmask[0], unbind->port); op.cmd = EVTCHNOP_close; op.u.close.port = unbind->port; if ((error = HYPERVISOR_event_channel_op(&op))) return -error; break; } case IOCTL_EVTCHN_NOTIFY: { struct ioctl_evtchn_notify *notify = addr; if (notify->port > NR_EVENT_CHANNELS) return EINVAL; mutex_enter(&devevent_lock); if (devevent[notify->port] != d) { mutex_exit(&devevent_lock); return ENOTCONN; } hypervisor_notify_via_evtchn(notify->port); mutex_exit(&devevent_lock); break; } case FIONBIO: break; default: return EINVAL; } return 0; } /* * Support for poll() system call * * Return true if the specific operation will not block indefinitely. */ static int xenevt_fpoll(struct file *fp, int events) { struct xenevt_d *d = fp->f_data; int revents = events & (POLLOUT | POLLWRNORM); /* we can always write */ mutex_enter(&d->lock); if (events & (POLLIN | POLLRDNORM)) { if (d->ring_read != d->ring_write) { revents |= events & (POLLIN | POLLRDNORM); } else { /* Record that someone is waiting */ selrecord(curlwp, &d->sel); } } mutex_exit(&d->lock); return (revents); }