static int get_free_port(struct domain *d) { struct evtchn *chn; int port; int i, j; if ( d->is_dying ) return -EINVAL; for ( port = 0; port_is_valid(d, port); port++ ) if ( evtchn_from_port(d, port)->state == ECS_FREE ) return port; if ( port == MAX_EVTCHNS(d) ) return -ENOSPC; chn = xzalloc_array(struct evtchn, EVTCHNS_PER_BUCKET); if ( unlikely(chn == NULL) ) return -ENOMEM; bucket_from_port(d, port) = chn; for ( i = 0; i < EVTCHNS_PER_BUCKET; i++ ) { if ( xsm_alloc_security_evtchn(&chn[i]) ) { for ( j = 0; j < i; j++ ) xsm_free_security_evtchn(&chn[j]); xfree(chn); return -ENOMEM; } } return port; }
static long do_poll(struct sched_poll *sched_poll) { struct vcpu *v = current; struct domain *d = v->domain; evtchn_port_t port; long rc = 0; unsigned int i; /* Fairly arbitrary limit. */ if ( sched_poll->nr_ports > 128 ) return -EINVAL; if ( !guest_handle_okay(sched_poll->ports, sched_poll->nr_ports) ) return -EFAULT; set_bit(_VPF_blocked, &v->pause_flags); v->is_polling = 1; d->is_polling = 1; /* Check for events /after/ setting flags: avoids wakeup waiting race. */ smp_wmb(); for ( i = 0; i < sched_poll->nr_ports; i++ ) { rc = -EFAULT; if ( __copy_from_guest_offset(&port, sched_poll->ports, i, 1) ) goto out; rc = -EINVAL; if ( port >= MAX_EVTCHNS(d) ) goto out; rc = 0; if ( test_bit(port, shared_info_addr(d, evtchn_pending)) ) goto out; } if ( sched_poll->timeout != 0 ) set_timer(&v->poll_timer, sched_poll->timeout); TRACE_2D(TRC_SCHED_BLOCK, d->domain_id, v->vcpu_id); raise_softirq(SCHEDULE_SOFTIRQ); return 0; out: v->is_polling = 0; clear_bit(_VPF_blocked, &v->pause_flags); return rc; }
static long do_poll(struct sched_poll *sched_poll) { struct vcpu *v = current; struct domain *d = v->domain; evtchn_port_t port; long rc; unsigned int i; /* Fairly arbitrary limit. */ if ( sched_poll->nr_ports > 128 ) return -EINVAL; if ( !guest_handle_okay(sched_poll->ports, sched_poll->nr_ports) ) return -EFAULT; set_bit(_VPF_blocked, &v->pause_flags); v->poll_evtchn = -1; set_bit(v->vcpu_id, d->poll_mask); #ifndef CONFIG_X86 /* set_bit() implies mb() on x86 */ /* Check for events /after/ setting flags: avoids wakeup waiting race. */ smp_mb(); /* * Someone may have seen we are blocked but not that we are polling, or * vice versa. We are certainly being woken, so clean up and bail. Beyond * this point others can be guaranteed to clean up for us if they wake us. */ rc = 0; if ( (v->poll_evtchn == 0) || !test_bit(_VPF_blocked, &v->pause_flags) || !test_bit(v->vcpu_id, d->poll_mask) ) goto out; #endif rc = 0; if ( local_events_need_delivery() ) goto out; for ( i = 0; i < sched_poll->nr_ports; i++ ) { rc = -EFAULT; if ( __copy_from_guest_offset(&port, sched_poll->ports, i, 1) ) goto out; rc = -EINVAL; if ( port >= MAX_EVTCHNS(d) ) goto out; rc = 0; if ( test_bit(port, &shared_info(d, evtchn_pending)) ) goto out; } if ( sched_poll->nr_ports == 1 ) v->poll_evtchn = port; if ( sched_poll->timeout != 0 ) set_timer(&v->poll_timer, sched_poll->timeout); TRACE_2D(TRC_SCHED_BLOCK, d->domain_id, v->vcpu_id); raise_softirq(SCHEDULE_SOFTIRQ); return 0; out: v->poll_evtchn = 0; clear_bit(v->vcpu_id, d->poll_mask); clear_bit(_VPF_blocked, &v->pause_flags); return rc; }