Ejemplo n.º 1
0
int evtchn_unmask(unsigned int port)
{
    struct domain *d = current->domain;
    struct vcpu   *v;

    ASSERT(spin_is_locked(&d->event_lock));

    if ( unlikely(!port_is_valid(d, port)) )
        return -EINVAL;

    v = d->vcpu[evtchn_from_port(d, port)->notify_vcpu_id];

    /*
     * These operations must happen in strict order. Based on
     * include/xen/event.h:evtchn_set_pending(). 
     */
    if ( test_and_clear_bit(port, &shared_info(d, evtchn_mask)) &&
         test_bit          (port, &shared_info(d, evtchn_pending)) &&
         !test_and_set_bit (port / BITS_PER_EVTCHN_WORD(d),
                            &vcpu_info(v, evtchn_pending_sel)) )
    {
        vcpu_mark_events_pending(v);
    }

    return 0;
}
Ejemplo n.º 2
0
static bool evtchn_2l_is_masked(const struct domain *d, evtchn_port_t port)
{
    unsigned int max_ports = BITS_PER_EVTCHN_WORD(d) * BITS_PER_EVTCHN_WORD(d);

    ASSERT(port < max_ports);
    return port >= max_ports || test_bit(port, &shared_info(d, evtchn_mask));
}
Ejemplo n.º 3
0
static void evtchn_2l_unmask(struct domain *d, struct evtchn *evtchn)
{
    struct vcpu *v = d->vcpu[evtchn->notify_vcpu_id];
    unsigned int port = evtchn->port;

    /*
     * These operations must happen in strict order. Based on
     * evtchn_2l_set_pending() above.
     */
    if ( test_and_clear_bit(port, &shared_info(d, evtchn_mask)) &&
         test_bit          (port, &shared_info(d, evtchn_pending)) &&
         !test_and_set_bit (port / BITS_PER_EVTCHN_WORD(d),
                            &vcpu_info(v, evtchn_pending_sel)) )
    {
        vcpu_mark_events_pending(v);
    }
}
Ejemplo n.º 4
0
Archivo: time.c Proyecto: Xilinx/xen
void update_domain_wallclock_time(struct domain *d)
{
    uint32_t *wc_version;
    uint64_t sec;

    spin_lock(&wc_lock);

    wc_version = &shared_info(d, wc_version);
    *wc_version = version_update_begin(*wc_version);
    smp_wmb();

    sec = wc_sec + d->time_offset_seconds;
    shared_info(d, wc_sec)    = sec;
    shared_info(d, wc_nsec)   = wc_nsec;
#ifdef CONFIG_X86
    if ( likely(!has_32bit_shinfo(d)) )
        d->shared_info->native.wc_sec_hi = sec >> 32;
    else
static int evtchn_set_pending(struct vcpu *v, int port)
{
    struct domain *d = v->domain;
    int vcpuid;

    /*
     * The following bit operations must happen in strict order.
     * NB. On x86, the atomic bit operations also act as memory barriers.
     * There is therefore sufficiently strict ordering for this architecture --
     * others may require explicit memory barriers.
     */

    if ( test_and_set_bit(port, &shared_info(d, evtchn_pending)) )
        return 1;

    if ( !test_bit        (port, &shared_info(d, evtchn_mask)) &&
         !test_and_set_bit(port / BITS_PER_EVTCHN_WORD(d),
                           &vcpu_info(v, evtchn_pending_sel)) )
    {
        vcpu_mark_events_pending(v);
    }
    
    /* Check if some VCPU might be polling for this event. */
    if ( likely(bitmap_empty(d->poll_mask, d->max_vcpus)) )
        return 0;

    /* Wake any interested (or potentially interested) pollers. */
    for ( vcpuid = find_first_bit(d->poll_mask, d->max_vcpus);
          vcpuid < d->max_vcpus;
          vcpuid = find_next_bit(d->poll_mask, d->max_vcpus, vcpuid+1) )
    {
        v = d->vcpu[vcpuid];
        if ( ((v->poll_evtchn <= 0) || (v->poll_evtchn == port)) &&
             test_and_clear_bit(vcpuid, d->poll_mask) )
        {
            v->poll_evtchn = 0;
            vcpu_unblock(v);
        }
    }

    return 0;
}
Ejemplo n.º 6
0
static void evtchn_2l_set_pending(struct vcpu *v, struct evtchn *evtchn)
{
    struct domain *d = v->domain;
    unsigned int port = evtchn->port;

    /*
     * The following bit operations must happen in strict order.
     * NB. On x86, the atomic bit operations also act as memory barriers.
     * There is therefore sufficiently strict ordering for this architecture --
     * others may require explicit memory barriers.
     */

    if ( test_and_set_bit(port, &shared_info(d, evtchn_pending)) )
        return;

    if ( !test_bit        (port, &shared_info(d, evtchn_mask)) &&
         !test_and_set_bit(port / BITS_PER_EVTCHN_WORD(d),
                           &vcpu_info(v, evtchn_pending_sel)) )
    {
        vcpu_mark_events_pending(v);
    }

    evtchn_check_pollers(d, port);
}
Ejemplo n.º 7
0
struct vcpu *alloc_vcpu(
    struct domain *d, unsigned int vcpu_id, unsigned int cpu_id)
{
    struct vcpu *v;

    BUG_ON((!is_idle_domain(d) || vcpu_id) && d->vcpu[vcpu_id]);

    if ( (v = alloc_vcpu_struct()) == NULL )
        return NULL;

    v->domain = d;
    v->vcpu_id = vcpu_id;

    spin_lock_init(&v->virq_lock);

    tasklet_init(&v->continue_hypercall_tasklet, NULL, 0);

    if ( !zalloc_cpumask_var(&v->cpu_affinity) ||
         !zalloc_cpumask_var(&v->cpu_affinity_tmp) ||
         !zalloc_cpumask_var(&v->cpu_affinity_saved) ||
         !zalloc_cpumask_var(&v->vcpu_dirty_cpumask) )
        goto fail_free;

    if ( is_idle_domain(d) )
    {
        v->runstate.state = RUNSTATE_running;
    }
    else
    {
        v->runstate.state = RUNSTATE_offline;        
        v->runstate.state_entry_time = NOW();
        set_bit(_VPF_down, &v->pause_flags);
        v->vcpu_info = ((vcpu_id < XEN_LEGACY_MAX_VCPUS)
                        ? (vcpu_info_t *)&shared_info(d, vcpu_info[vcpu_id])
                        : &dummy_vcpu_info);
        v->vcpu_info_mfn = INVALID_MFN;
        init_waitqueue_vcpu(v);
    }

    if ( sched_init_vcpu(v, cpu_id) != 0 )
        goto fail_wq;

    if ( vcpu_initialise(v) != 0 )
    {
        sched_destroy_vcpu(v);
 fail_wq:
        destroy_waitqueue_vcpu(v);
 fail_free:
        free_cpumask_var(v->cpu_affinity);
        free_cpumask_var(v->cpu_affinity_tmp);
        free_cpumask_var(v->cpu_affinity_saved);
        free_cpumask_var(v->vcpu_dirty_cpumask);
        free_vcpu_struct(v);
        return NULL;
    }

    d->vcpu[vcpu_id] = v;
    if ( vcpu_id != 0 )
    {
        int prev_id = v->vcpu_id - 1;
        while ( (prev_id >= 0) && (d->vcpu[prev_id] == NULL) )
            prev_id--;
        BUG_ON(prev_id < 0);
        v->next_in_list = d->vcpu[prev_id]->next_in_list;
        d->vcpu[prev_id]->next_in_list = v;
    }

    /* Must be called after making new vcpu visible to for_each_vcpu(). */
    vcpu_check_shutdown(v);

    domain_update_node_affinity(d);

    return v;
}
Ejemplo n.º 8
0
static long __evtchn_close(struct domain *d1, int port1)
{
    struct domain *d2 = NULL;
    struct vcpu   *v;
    struct evtchn *chn1, *chn2;
    int            port2;
    long           rc = 0;

 again:
    spin_lock(&d1->event_lock);

    if ( !port_is_valid(d1, port1) )
    {
        rc = -EINVAL;
        goto out;
    }

    chn1 = evtchn_from_port(d1, port1);

    /* Guest cannot close a Xen-attached event channel. */
    if ( unlikely(consumer_is_xen(chn1)) )
    {
        rc = -EINVAL;
        goto out;
    }

    switch ( chn1->state )
    {
    case ECS_FREE:
    case ECS_RESERVED:
        rc = -EINVAL;
        goto out;

    case ECS_UNBOUND:
        break;

    case ECS_PIRQ: {
        struct pirq *pirq = pirq_info(d1, chn1->u.pirq.irq);

        if ( !pirq )
            break;
        if ( !is_hvm_domain(d1) )
            pirq_guest_unbind(d1, pirq);
        pirq->evtchn = 0;
        pirq_cleanup_check(pirq, d1);
        unlink_pirq_port(chn1, d1->vcpu[chn1->notify_vcpu_id]);
#ifdef CONFIG_X86
        if ( is_hvm_domain(d1) && domain_pirq_to_irq(d1, pirq->pirq) > 0 )
            unmap_domain_pirq_emuirq(d1, pirq->pirq);
#endif
        break;
    }

    case ECS_VIRQ:
        for_each_vcpu ( d1, v )
        {
            if ( v->virq_to_evtchn[chn1->u.virq] != port1 )
                continue;
            v->virq_to_evtchn[chn1->u.virq] = 0;
            spin_barrier(&v->virq_lock);
        }
        break;

    case ECS_IPI:
        break;

    case ECS_INTERDOMAIN:
        if ( d2 == NULL )
        {
            d2 = chn1->u.interdomain.remote_dom;

            /* If we unlock d1 then we could lose d2. Must get a reference. */
            if ( unlikely(!get_domain(d2)) )
                BUG();

            if ( d1 < d2 )
            {
                spin_lock(&d2->event_lock);
            }
            else if ( d1 != d2 )
            {
                spin_unlock(&d1->event_lock);
                spin_lock(&d2->event_lock);
                goto again;
            }
        }
        else if ( d2 != chn1->u.interdomain.remote_dom )
        {
            /*
             * We can only get here if the port was closed and re-bound after
             * unlocking d1 but before locking d2 above. We could retry but
             * it is easier to return the same error as if we had seen the
             * port in ECS_CLOSED. It must have passed through that state for
             * us to end up here, so it's a valid error to return.
             */
            rc = -EINVAL;
            goto out;
        }

        port2 = chn1->u.interdomain.remote_port;
        BUG_ON(!port_is_valid(d2, port2));

        chn2 = evtchn_from_port(d2, port2);
        BUG_ON(chn2->state != ECS_INTERDOMAIN);
        BUG_ON(chn2->u.interdomain.remote_dom != d1);

        chn2->state = ECS_UNBOUND;
        chn2->u.unbound.remote_domid = d1->domain_id;
        break;

    default:
        BUG();
    }

    /* Clear pending event to avoid unexpected behavior on re-bind. */
    clear_bit(port1, &shared_info(d1, evtchn_pending));

    /* Reset binding to vcpu0 when the channel is freed. */
    chn1->state          = ECS_FREE;
    chn1->notify_vcpu_id = 0;

    xsm_evtchn_close_post(chn1);

 out:
    if ( d2 != NULL )
    {
        if ( d1 != d2 )
            spin_unlock(&d2->event_lock);
        put_domain(d2);
    }

    spin_unlock(&d1->event_lock);

    return rc;
}
Ejemplo n.º 9
0
static long do_poll(struct sched_poll *sched_poll)
{
    struct vcpu   *v = current;
    struct domain *d = v->domain;
    evtchn_port_t  port;
    long           rc;
    unsigned int   i;

    /* Fairly arbitrary limit. */
    if ( sched_poll->nr_ports > 128 )
        return -EINVAL;

    if ( !guest_handle_okay(sched_poll->ports, sched_poll->nr_ports) )
        return -EFAULT;

    set_bit(_VPF_blocked, &v->pause_flags);
    v->poll_evtchn = -1;
    set_bit(v->vcpu_id, d->poll_mask);

#ifndef CONFIG_X86 /* set_bit() implies mb() on x86 */
    /* Check for events /after/ setting flags: avoids wakeup waiting race. */
    smp_mb();

    /*
     * Someone may have seen we are blocked but not that we are polling, or
     * vice versa. We are certainly being woken, so clean up and bail. Beyond
     * this point others can be guaranteed to clean up for us if they wake us.
     */
    rc = 0;
    if ( (v->poll_evtchn == 0) ||
         !test_bit(_VPF_blocked, &v->pause_flags) ||
         !test_bit(v->vcpu_id, d->poll_mask) )
        goto out;
#endif

    rc = 0;
    if ( local_events_need_delivery() )
        goto out;

    for ( i = 0; i < sched_poll->nr_ports; i++ )
    {
        rc = -EFAULT;
        if ( __copy_from_guest_offset(&port, sched_poll->ports, i, 1) )
            goto out;

        rc = -EINVAL;
        if ( port >= MAX_EVTCHNS(d) )
            goto out;

        rc = 0;
        if ( test_bit(port, &shared_info(d, evtchn_pending)) )
            goto out;
    }

    if ( sched_poll->nr_ports == 1 )
        v->poll_evtchn = port;

    if ( sched_poll->timeout != 0 )
        set_timer(&v->poll_timer, sched_poll->timeout);

    TRACE_2D(TRC_SCHED_BLOCK, d->domain_id, v->vcpu_id);
    raise_softirq(SCHEDULE_SOFTIRQ);

    return 0;

 out:
    v->poll_evtchn = 0;
    clear_bit(v->vcpu_id, d->poll_mask);
    clear_bit(_VPF_blocked, &v->pause_flags);
    return rc;
}
Ejemplo n.º 10
0
static void evtchn_2l_clear_pending(struct domain *d, struct evtchn *evtchn)
{
    clear_bit(evtchn->port, &shared_info(d, evtchn_pending));
}
Ejemplo n.º 11
0
static bool_t evtchn_2l_is_masked(struct domain *d,
                                  const struct evtchn *evtchn)
{
    return test_bit(evtchn->port, &shared_info(d, evtchn_mask));
}