static int evtchn_release(struct inode *inode, struct file *filp) { int i; struct per_user_data *u = filp->private_data; spin_lock_irq(&port_user_lock); free_page((unsigned long)u->ring); for (i = 0; i < NR_EVENT_CHANNELS; i++) { if (get_port_user(i) != u) continue; disable_irq(irq_from_evtchn(i)); } spin_unlock_irq(&port_user_lock); for (i = 0; i < NR_EVENT_CHANNELS; i++) { if (get_port_user(i) != u) continue; evtchn_unbind_from_user(get_port_user(i), i); } kfree(u->name); kfree(u); return 0; }
static void evtchn_unbind_from_user(struct per_user_data *u, int port) { int irq = irq_from_evtchn(port); unbind_from_irqhandler(irq, (void *)(unsigned long)port); set_port_user(port, NULL); }
static void evtchn_unbind_from_user(struct per_user_data *u, int port) { int irq = irq_from_evtchn(port); unbind_from_irqhandler(irq, (void *)(unsigned long)port); /* make sure we unbind the irq handler before clearing the port */ barrier(); port_user[port] = NULL; }
static ssize_t evtchn_write(struct file *file, const char __user *buf, size_t count, loff_t *ppos) { int rc, i; evtchn_port_t *kbuf = (evtchn_port_t *)__get_free_page(GFP_KERNEL); struct per_user_data *u = file->private_data; if (kbuf == NULL) return -ENOMEM; /* Whole number of ports. */ count &= ~(sizeof(evtchn_port_t)-1); rc = 0; if (count == 0) goto out; if (count > PAGE_SIZE) count = PAGE_SIZE; rc = -EFAULT; if (copy_from_user(kbuf, buf, count) != 0) goto out; spin_lock_irq(&port_user_lock); for (i = 0; i < (count/sizeof(evtchn_port_t)); i++) { unsigned port = kbuf[i]; if (port < NR_EVENT_CHANNELS && get_port_user(port) == u && !get_port_enabled(port)) { set_port_enabled(port, true); enable_irq(irq_from_evtchn(port)); } } spin_unlock_irq(&port_user_lock); rc = count; out: free_page((unsigned long)kbuf); return rc; }
static long evtchn_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { int rc; struct per_user_data *u = file->private_data; void __user *uarg = (void __user *) arg; /* Prevent bind from racing with unbind */ mutex_lock(&u->bind_mutex); switch (cmd) { case IOCTL_EVTCHN_BIND_VIRQ: { struct ioctl_evtchn_bind_virq bind; struct evtchn_bind_virq bind_virq; rc = -EFAULT; if (copy_from_user(&bind, uarg, sizeof(bind))) break; bind_virq.virq = bind.virq; bind_virq.vcpu = 0; rc = HYPERVISOR_event_channel_op(EVTCHNOP_bind_virq, &bind_virq); if (rc != 0) break; rc = evtchn_bind_to_user(u, bind_virq.port); if (rc == 0) rc = bind_virq.port; break; } case IOCTL_EVTCHN_BIND_INTERDOMAIN: { struct ioctl_evtchn_bind_interdomain bind; struct evtchn_bind_interdomain bind_interdomain; rc = -EFAULT; if (copy_from_user(&bind, uarg, sizeof(bind))) break; bind_interdomain.remote_dom = bind.remote_domain; bind_interdomain.remote_port = bind.remote_port; rc = HYPERVISOR_event_channel_op(EVTCHNOP_bind_interdomain, &bind_interdomain); if (rc != 0) break; rc = evtchn_bind_to_user(u, bind_interdomain.local_port); if (rc == 0) rc = bind_interdomain.local_port; break; } case IOCTL_EVTCHN_BIND_UNBOUND_PORT: { struct ioctl_evtchn_bind_unbound_port bind; struct evtchn_alloc_unbound alloc_unbound; rc = -EFAULT; if (copy_from_user(&bind, uarg, sizeof(bind))) break; alloc_unbound.dom = DOMID_SELF; alloc_unbound.remote_dom = bind.remote_domain; rc = HYPERVISOR_event_channel_op(EVTCHNOP_alloc_unbound, &alloc_unbound); if (rc != 0) break; rc = evtchn_bind_to_user(u, alloc_unbound.port); if (rc == 0) rc = alloc_unbound.port; break; } case IOCTL_EVTCHN_UNBIND: { struct ioctl_evtchn_unbind unbind; rc = -EFAULT; if (copy_from_user(&unbind, uarg, sizeof(unbind))) break; rc = -EINVAL; if (unbind.port >= NR_EVENT_CHANNELS) break; rc = -ENOTCONN; if (get_port_user(unbind.port) != u) break; disable_irq(irq_from_evtchn(unbind.port)); evtchn_unbind_from_user(u, unbind.port); rc = 0; break; } case IOCTL_EVTCHN_NOTIFY: { struct ioctl_evtchn_notify notify; rc = -EFAULT; if (copy_from_user(¬ify, uarg, sizeof(notify))) break; if (notify.port >= NR_EVENT_CHANNELS) { rc = -EINVAL; } else if (get_port_user(notify.port) != u) { rc = -ENOTCONN; } else { notify_remote_via_evtchn(notify.port); rc = 0; } break; } case IOCTL_EVTCHN_RESET: { /* Initialise the ring to empty. Clear errors. */ mutex_lock(&u->ring_cons_mutex); spin_lock_irq(&port_user_lock); u->ring_cons = u->ring_prod = u->ring_overflow = 0; spin_unlock_irq(&port_user_lock); mutex_unlock(&u->ring_cons_mutex); rc = 0; break; } default: rc = -ENOSYS; break; } mutex_unlock(&u->bind_mutex); return rc; }