long evtchn_bind_vcpu(unsigned int port, unsigned int vcpu_id) { struct domain *d = current->domain; struct evtchn *chn; long rc = 0; if ( (vcpu_id >= d->max_vcpus) || (d->vcpu[vcpu_id] == NULL) ) return -ENOENT; spin_lock(&d->event_lock); if ( !port_is_valid(d, port) ) { rc = -EINVAL; goto out; } chn = evtchn_from_port(d, port); /* Guest cannot re-bind a Xen-attached event channel. */ if ( unlikely(consumer_is_xen(chn)) ) { rc = -EINVAL; goto out; } switch ( chn->state ) { case ECS_VIRQ: if ( virq_is_global(chn->u.virq) ) chn->notify_vcpu_id = vcpu_id; else rc = -EINVAL; break; case ECS_UNBOUND: case ECS_INTERDOMAIN: chn->notify_vcpu_id = vcpu_id; break; case ECS_PIRQ: if ( chn->notify_vcpu_id == vcpu_id ) break; unlink_pirq_port(chn, d->vcpu[chn->notify_vcpu_id]); chn->notify_vcpu_id = vcpu_id; pirq_set_affinity(d, chn->u.pirq.irq, cpumask_of(d->vcpu[vcpu_id]->processor)); link_pirq_port(port, chn, d->vcpu[vcpu_id]); break; default: rc = -EINVAL; break; } out: spin_unlock(&d->event_lock); return rc; }
static long evtchn_bind_pirq(evtchn_bind_pirq_t *bind) { struct evtchn *chn; struct domain *d = current->domain; struct vcpu *v = d->vcpu[0]; struct pirq *info; int port, pirq = bind->pirq; long rc; if ( (pirq < 0) || (pirq >= d->nr_pirqs) ) return -EINVAL; if ( !is_hvm_domain(d) && !irq_access_permitted(d, pirq) ) return -EPERM; spin_lock(&d->event_lock); if ( pirq_to_evtchn(d, pirq) != 0 ) ERROR_EXIT(-EEXIST); if ( (port = get_free_port(d)) < 0 ) ERROR_EXIT(port); chn = evtchn_from_port(d, port); info = pirq_get_info(d, pirq); if ( !info ) ERROR_EXIT(-ENOMEM); info->evtchn = port; rc = (!is_hvm_domain(d) ? pirq_guest_bind(v, info, !!(bind->flags & BIND_PIRQ__WILL_SHARE)) : 0); if ( rc != 0 ) { info->evtchn = 0; pirq_cleanup_check(info, d); goto out; } chn->state = ECS_PIRQ; chn->u.pirq.irq = pirq; link_pirq_port(port, chn, v); bind->port = port; #ifdef CONFIG_X86 if ( is_hvm_domain(d) && domain_pirq_to_irq(d, pirq) > 0 ) map_domain_emuirq_pirq(d, pirq, IRQ_PT); #endif out: spin_unlock(&d->event_lock); return rc; }
static long evtchn_bind_pirq(evtchn_bind_pirq_t *bind) { struct evtchn *chn; struct domain *d = current->domain; struct vcpu *v = d->vcpu[0]; int port, pirq = bind->pirq; long rc; if ( (pirq < 0) || (pirq >= d->nr_pirqs) ) return -EINVAL; if ( !is_hvm_domain(d) && !irq_access_permitted(d, pirq) ) return -EPERM; spin_lock(&d->event_lock); if ( d->pirq_to_evtchn[pirq] != 0 ) ERROR_EXIT(-EEXIST); if ( (port = get_free_port(d)) < 0 ) ERROR_EXIT(port); chn = evtchn_from_port(d, port); d->pirq_to_evtchn[pirq] = port; rc = (!is_hvm_domain(d) ? pirq_guest_bind( v, pirq, !!(bind->flags & BIND_PIRQ__WILL_SHARE)) : 0); if ( rc != 0 ) { d->pirq_to_evtchn[pirq] = 0; goto out; } chn->state = ECS_PIRQ; chn->u.pirq.irq = pirq; link_pirq_port(port, chn, v); bind->port = port; if ( is_hvm_domain(d) && domain_pirq_to_irq(d, pirq) > 0 ) map_domain_emuirq_pirq(d, pirq, IRQ_PT); out: spin_unlock(&d->event_lock); return rc; }