static bool evtchn_2l_is_masked(const struct domain *d, evtchn_port_t port) { unsigned int max_ports = BITS_PER_EVTCHN_WORD(d) * BITS_PER_EVTCHN_WORD(d); ASSERT(port < max_ports); return port >= max_ports || test_bit(port, &shared_info(d, evtchn_mask)); }
int evtchn_unmask(unsigned int port) { struct domain *d = current->domain; struct vcpu *v; ASSERT(spin_is_locked(&d->event_lock)); if ( unlikely(!port_is_valid(d, port)) ) return -EINVAL; v = d->vcpu[evtchn_from_port(d, port)->notify_vcpu_id]; /* * These operations must happen in strict order. Based on * include/xen/event.h:evtchn_set_pending(). */ if ( test_and_clear_bit(port, &shared_info(d, evtchn_mask)) && test_bit (port, &shared_info(d, evtchn_pending)) && !test_and_set_bit (port / BITS_PER_EVTCHN_WORD(d), &vcpu_info(v, evtchn_pending_sel)) ) { vcpu_mark_events_pending(v); } return 0; }
static void evtchn_2l_print_state(struct domain *d, const struct evtchn *evtchn) { struct vcpu *v = d->vcpu[evtchn->notify_vcpu_id]; printk("%d", !!test_bit(evtchn->port / BITS_PER_EVTCHN_WORD(d), &vcpu_info(v, evtchn_pending_sel))); }
static void evtchn_2l_unmask(struct domain *d, struct evtchn *evtchn) { struct vcpu *v = d->vcpu[evtchn->notify_vcpu_id]; unsigned int port = evtchn->port; /* * These operations must happen in strict order. Based on * evtchn_2l_set_pending() above. */ if ( test_and_clear_bit(port, &shared_info(d, evtchn_mask)) && test_bit (port, &shared_info(d, evtchn_pending)) && !test_and_set_bit (port / BITS_PER_EVTCHN_WORD(d), &vcpu_info(v, evtchn_pending_sel)) ) { vcpu_mark_events_pending(v); } }
static int evtchn_set_pending(struct vcpu *v, int port) { struct domain *d = v->domain; int vcpuid; /* * The following bit operations must happen in strict order. * NB. On x86, the atomic bit operations also act as memory barriers. * There is therefore sufficiently strict ordering for this architecture -- * others may require explicit memory barriers. */ if ( test_and_set_bit(port, &shared_info(d, evtchn_pending)) ) return 1; if ( !test_bit (port, &shared_info(d, evtchn_mask)) && !test_and_set_bit(port / BITS_PER_EVTCHN_WORD(d), &vcpu_info(v, evtchn_pending_sel)) ) { vcpu_mark_events_pending(v); } /* Check if some VCPU might be polling for this event. */ if ( likely(bitmap_empty(d->poll_mask, d->max_vcpus)) ) return 0; /* Wake any interested (or potentially interested) pollers. */ for ( vcpuid = find_first_bit(d->poll_mask, d->max_vcpus); vcpuid < d->max_vcpus; vcpuid = find_next_bit(d->poll_mask, d->max_vcpus, vcpuid+1) ) { v = d->vcpu[vcpuid]; if ( ((v->poll_evtchn <= 0) || (v->poll_evtchn == port)) && test_and_clear_bit(vcpuid, d->poll_mask) ) { v->poll_evtchn = 0; vcpu_unblock(v); } } return 0; }
static void evtchn_2l_set_pending(struct vcpu *v, struct evtchn *evtchn) { struct domain *d = v->domain; unsigned int port = evtchn->port; /* * The following bit operations must happen in strict order. * NB. On x86, the atomic bit operations also act as memory barriers. * There is therefore sufficiently strict ordering for this architecture -- * others may require explicit memory barriers. */ if ( test_and_set_bit(port, &shared_info(d, evtchn_pending)) ) return; if ( !test_bit (port, &shared_info(d, evtchn_mask)) && !test_and_set_bit(port / BITS_PER_EVTCHN_WORD(d), &vcpu_info(v, evtchn_pending_sel)) ) { vcpu_mark_events_pending(v); } evtchn_check_pollers(d, port); }
void evtchn_2l_init(struct domain *d) { d->evtchn_port_ops = &evtchn_port_ops_2l; d->max_evtchns = BITS_PER_EVTCHN_WORD(d) * BITS_PER_EVTCHN_WORD(d); }