/*ARGSUSED*/ static int privcmd_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *cr, int *rval) { if (secpolicy_xvm_control(cr)) return (EPERM); /* * Everything is a -native- data type. */ if ((mode & FMODELS) != FNATIVE) return (EOVERFLOW); switch (cmd) { case IOCTL_PRIVCMD_HYPERCALL: return (do_privcmd_hypercall((void *)arg, mode, cr, rval)); case IOCTL_PRIVCMD_MMAP: if (DOMAIN_IS_PRIVILEGED(xen_info)) return (do_privcmd_mmap((void *)arg, mode, cr)); break; case IOCTL_PRIVCMD_MMAPBATCH: if (DOMAIN_IS_PRIVILEGED(xen_info)) return (do_privcmd_mmapbatch((void *)arg, mode, cr)); break; default: break; } return (EINVAL); }
/*ARGSUSED*/ static int xpvtap_open(dev_t *devp, int flag, int otyp, cred_t *cred) { xpvtap_state_t *state; int instance; if (secpolicy_xvm_control(cred)) { return (EPERM); } instance = getminor(*devp); state = ddi_get_soft_state(xpvtap_statep, instance); if (state == NULL) { return (ENXIO); } /* we should only be opened once */ mutex_enter(&state->bt_open.bo_mutex); if (state->bt_open.bo_opened) { mutex_exit(&state->bt_open.bo_mutex); return (EBUSY); } state->bt_open.bo_opened = B_TRUE; mutex_exit(&state->bt_open.bo_mutex); /* * save the apps address space. need it for mapping/unmapping grefs * since will be doing it in a separate kernel thread. */ state->bt_map.um_as = curproc->p_as; return (0); }
/* ARGSUSED */ static int evtchndrv_write(dev_t dev, struct uio *uio, cred_t *cr) { int rc, i; ssize_t count; evtchn_port_t *kbuf; struct evtsoftdata *ep; ulong_t flags; minor_t minor = getminor(dev); evtchn_port_t sbuf[32]; if (secpolicy_xvm_control(cr)) return (EPERM); ep = EVTCHNDRV_INST2SOFTS(EVTCHNDRV_MINOR2INST(minor)); /* Whole number of ports. */ count = uio->uio_resid; count &= ~(sizeof (evtchn_port_t) - 1); if (count == 0) return (0); if (count > PAGESIZE) count = PAGESIZE; if (count <= sizeof (sbuf)) kbuf = sbuf; else kbuf = kmem_alloc(PAGESIZE, KM_SLEEP); if ((rc = uiomove(kbuf, count, UIO_WRITE, uio)) != 0) goto out; mutex_enter(&port_user_lock); for (i = 0; i < (count / sizeof (evtchn_port_t)); i++) if ((kbuf[i] < NR_EVENT_CHANNELS) && (port_user[kbuf[i]] == ep)) { flags = intr_clear(); ec_unmask_evtchn(kbuf[i]); intr_restore(flags); } mutex_exit(&port_user_lock); out: if (kbuf != sbuf) kmem_free(kbuf, PAGESIZE); return (rc); }
/*ARGSUSED8*/ static int privcmd_segmap(dev_t dev, off_t off, struct as *as, caddr_t *addrp, off_t len, uint_t prot, uint_t maxprot, uint_t flags, cred_t *cr) { struct segmf_crargs a; int error; if (secpolicy_xvm_control(cr)) return (EPERM); as_rangelock(as); if ((flags & MAP_FIXED) == 0) { map_addr(addrp, len, (offset_t)off, 0, flags); if (*addrp == NULL) { error = ENOMEM; goto rangeunlock; } } else { /* * User specified address */ (void) as_unmap(as, *addrp, len); } /* * The mapping *must* be MAP_SHARED at offset 0. * * (Foreign pages are treated like device memory; the * ioctl interface allows the backing objects to be * arbitrarily redefined to point at any machine frame.) */ if ((flags & MAP_TYPE) != MAP_SHARED || off != 0) { error = EINVAL; goto rangeunlock; } a.dev = dev; a.prot = (uchar_t)prot; a.maxprot = (uchar_t)maxprot; error = as_map(as, *addrp, len, segmf_create, &a); rangeunlock: as_rangeunlock(as); return (error); }
/*ARGSUSED*/ static int xpvtap_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *cred, int *rval) { xpvtap_state_t *state; int instance; if (secpolicy_xvm_control(cred)) { return (EPERM); } instance = getminor(dev); if (instance == -1) { return (EBADF); } state = ddi_get_soft_state(xpvtap_statep, instance); if (state == NULL) { return (EBADF); } switch (cmd) { case XPVTAP_IOCTL_RESP_PUSH: /* * wake thread, thread handles guest requests and user app * responses. */ mutex_enter(&state->bt_thread.ut_mutex); state->bt_thread.ut_wake = B_TRUE; cv_signal(&state->bt_thread.ut_wake_cv); mutex_exit(&state->bt_thread.ut_mutex); break; default: cmn_err(CE_WARN, "ioctl(%d) not supported\n", cmd); return (ENXIO); } return (0); }
/* ARGSUSED */ static int evtchndrv_ioctl(dev_t dev, int cmd, intptr_t data, int flag, cred_t *cr, int *rvalp) { int err = 0; struct evtsoftdata *ep; minor_t minor = getminor(dev); if (secpolicy_xvm_control(cr)) return (EPERM); ep = EVTCHNDRV_INST2SOFTS(EVTCHNDRV_MINOR2INST(minor)); *rvalp = 0; switch (cmd) { case IOCTL_EVTCHN_BIND_VIRQ: { struct ioctl_evtchn_bind_virq bind; if (copyin((void *)data, &bind, sizeof (bind))) { err = EFAULT; break; } if ((err = xen_bind_virq(bind.virq, 0, rvalp)) != 0) break; evtchn_bind_to_user(ep, *rvalp); break; } case IOCTL_EVTCHN_BIND_INTERDOMAIN: { struct ioctl_evtchn_bind_interdomain bind; if (copyin((void *)data, &bind, sizeof (bind))) { err = EFAULT; break; } if ((err = xen_bind_interdomain(bind.remote_domain, bind.remote_port, rvalp)) != 0) break; ec_bind_vcpu(*rvalp, 0); evtchn_bind_to_user(ep, *rvalp); break; } case IOCTL_EVTCHN_BIND_UNBOUND_PORT: { struct ioctl_evtchn_bind_unbound_port bind; if (copyin((void *)data, &bind, sizeof (bind))) { err = EFAULT; break; } if ((err = xen_alloc_unbound_evtchn(bind.remote_domain, rvalp)) != 0) break; evtchn_bind_to_user(ep, *rvalp); break; } case IOCTL_EVTCHN_UNBIND: { struct ioctl_evtchn_unbind unbind; if (copyin((void *)data, &unbind, sizeof (unbind))) { err = EFAULT; break; } if (unbind.port >= NR_EVENT_CHANNELS) { err = EFAULT; break; } mutex_enter(&port_user_lock); if (port_user[unbind.port] != ep) { mutex_exit(&port_user_lock); err = ENOTCONN; break; } evtchndrv_close_evtchn(unbind.port); mutex_exit(&port_user_lock); break; } case IOCTL_EVTCHN_NOTIFY: { struct ioctl_evtchn_notify notify; if (copyin((void *)data, ¬ify, sizeof (notify))) { err = EFAULT; break; } if (notify.port >= NR_EVENT_CHANNELS) { err = EINVAL; } else if (port_user[notify.port] != ep) { err = ENOTCONN; } else { ec_notify_via_evtchn(notify.port); } break; } default: err = ENOSYS; } return (err); }
/* ARGSUSED */ static int evtchndrv_read(dev_t dev, struct uio *uio, cred_t *cr) { int rc = 0; ssize_t count; unsigned int c, p, bytes1 = 0, bytes2 = 0; struct evtsoftdata *ep; minor_t minor = getminor(dev); if (secpolicy_xvm_control(cr)) return (EPERM); ep = EVTCHNDRV_INST2SOFTS(EVTCHNDRV_MINOR2INST(minor)); /* Whole number of ports. */ count = uio->uio_resid; count &= ~(sizeof (evtchn_port_t) - 1); if (count == 0) return (0); if (count > PAGESIZE) count = PAGESIZE; mutex_enter(&ep->evtchn_lock); for (;;) { if (ep->ring_overflow) { rc = EFBIG; goto done; } if ((c = ep->ring_cons) != (p = ep->ring_prod)) break; if (uio->uio_fmode & O_NONBLOCK) { rc = EAGAIN; goto done; } if (cv_wait_sig(&ep->evtchn_wait, &ep->evtchn_lock) == 0) { rc = EINTR; goto done; } } /* Byte lengths of two chunks. Chunk split (if any) is at ring wrap. */ if (((c ^ p) & EVTCHN_RING_SIZE) != 0) { bytes1 = (EVTCHN_RING_SIZE - EVTCHN_RING_MASK(c)) * sizeof (evtchn_port_t); bytes2 = EVTCHN_RING_MASK(p) * sizeof (evtchn_port_t); } else { bytes1 = (p - c) * sizeof (evtchn_port_t); bytes2 = 0; } /* Truncate chunks according to caller's maximum byte count. */ if (bytes1 > count) { bytes1 = count; bytes2 = 0; } else if ((bytes1 + bytes2) > count) { bytes2 = count - bytes1; } if (uiomove(&ep->ring[EVTCHN_RING_MASK(c)], bytes1, UIO_READ, uio) || ((bytes2 != 0) && uiomove(&ep->ring[0], bytes2, UIO_READ, uio))) { rc = EFAULT; goto done; } ep->ring_cons += (bytes1 + bytes2) / sizeof (evtchn_port_t); done: mutex_exit(&ep->evtchn_lock); return (rc); }
/*ARGSUSED*/ static int xpvtap_segmap(dev_t dev, off_t off, struct as *asp, caddr_t *addrp, off_t len, unsigned int prot, unsigned int maxprot, unsigned int flags, cred_t *cred_p) { struct segmf_crargs a; xpvtap_state_t *state; int instance; int e; if (secpolicy_xvm_control(cred_p)) { return (EPERM); } instance = getminor(dev); state = ddi_get_soft_state(xpvtap_statep, instance); if (state == NULL) { return (EBADF); } /* the user app should be doing a MAP_SHARED mapping */ if ((flags & MAP_TYPE) != MAP_SHARED) { return (EINVAL); } /* * if this is the user ring (offset = 0), devmap it (which ends up in * xpvtap_devmap). devmap will alloc and map the ring into the * app's VA space. */ if (off == 0) { e = devmap_setup(dev, (offset_t)off, asp, addrp, (size_t)len, prot, maxprot, flags, cred_p); return (e); } /* this should be the mmap for the gref pages (offset = PAGESIZE) */ if (off != PAGESIZE) { return (EINVAL); } /* make sure we get the size we're expecting */ if (len != XPVTAP_GREF_BUFSIZE) { return (EINVAL); } /* * reserve user app VA space for the gref pages and use segmf to * manage the backing store for the physical memory. segmf will * map in/out the grefs and fault them in/out. */ ASSERT(asp == state->bt_map.um_as); as_rangelock(asp); if ((flags & MAP_FIXED) == 0) { map_addr(addrp, len, 0, 0, flags); if (*addrp == NULL) { as_rangeunlock(asp); return (ENOMEM); } } else { /* User specified address */ (void) as_unmap(asp, *addrp, len); } a.dev = dev; a.prot = (uchar_t)prot; a.maxprot = (uchar_t)maxprot; e = as_map(asp, *addrp, len, segmf_create, &a); if (e != 0) { as_rangeunlock(asp); return (e); } as_rangeunlock(asp); /* * Stash user base address, and compute address where the request * array will end up. */ state->bt_map.um_guest_pages = (caddr_t)*addrp; state->bt_map.um_guest_size = (size_t)len; /* register an as callback so we can cleanup when the app goes away */ e = as_add_callback(asp, xpvtap_segmf_unregister, state, AS_UNMAP_EVENT, *addrp, len, KM_SLEEP); if (e != 0) { (void) as_unmap(asp, *addrp, len); return (EINVAL); } /* wake thread to see if there are requests already queued up */ mutex_enter(&state->bt_thread.ut_mutex); state->bt_thread.ut_wake = B_TRUE; cv_signal(&state->bt_thread.ut_wake_cv); mutex_exit(&state->bt_thread.ut_mutex); return (0); }