void evtchn_set_pending(struct vcpu *v, int port)
{
    struct domain *d = v->domain;
    shared_info_t *s = d->shared_info;

    /*
     * The following bit operations must happen in strict order.
     * NB. On x86, the atomic bit operations also act as memory barriers.
     * There is therefore sufficiently strict ordering for this architecture --
     * others may require explicit memory barriers.
     */

    if ( test_and_set_bit(port, __shared_info_addr(d, s, evtchn_pending)) )
        return;

    if ( !test_bit        (port, __shared_info_addr(d, s, evtchn_mask)) &&
         !test_and_set_bit(port / BITS_PER_GUEST_LONG(d),
                           vcpu_info_addr(v, evtchn_pending_sel)) )
    {
        vcpu_mark_events_pending(v);
    }
    
    /* Check if some VCPU might be polling for this event. */
    if ( unlikely(d->is_polling) )
    {
        d->is_polling = 0;
        smp_mb(); /* check vcpu poll-flags /after/ clearing domain poll-flag */
        for_each_vcpu ( d, v )
        {
            if ( !v->is_polling )
                continue;
            v->is_polling = 0;
            vcpu_unblock(v);
        }
    }
示例#2
0
static void
fw_hypercall_ipi (struct pt_regs *regs)
{
	int cpu = regs->r14;
	int vector = regs->r15;
	struct vcpu *targ;
	struct domain *d = current->domain;

	/* Be sure the target exists.  */
	if (cpu >= d->max_vcpus)
		return;
	targ = d->vcpu[cpu];
	if (targ == NULL)
		return;

  	if (vector == XEN_SAL_BOOT_RENDEZ_VEC
	    && (!targ->is_initialised
		|| test_bit(_VPF_down, &targ->pause_flags))) {

		/* First start: initialize vpcu.  */
		if (!targ->is_initialised) {
			if (arch_set_info_guest (targ, NULL) != 0) {
				printk ("arch_boot_vcpu: failure\n");
				return;
			}
		}
			
		/* First or next rendez-vous: set registers.  */
		vcpu_init_regs (targ);
		vcpu_regs (targ)->cr_iip = d->arch.sal_data->boot_rdv_ip;
		vcpu_regs (targ)->r1 = d->arch.sal_data->boot_rdv_r1;
		vcpu_regs (targ)->b0 = FW_HYPERCALL_SAL_RETURN_PADDR;

		if (test_and_clear_bit(_VPF_down,
				       &targ->pause_flags)) {
			vcpu_wake(targ);
			printk(XENLOG_INFO "arch_boot_vcpu: vcpu %d awaken\n",
			       targ->vcpu_id);
		}
		else
			printk ("arch_boot_vcpu: huu, already awaken!\n");
	}
	else {
		int running = targ->is_running;
		vcpu_pend_interrupt(targ, vector);
		vcpu_unblock(targ);
		if (running)
			smp_send_event_check_cpu(targ->processor);
	}
	return;
}
static int evtchn_set_pending(struct vcpu *v, int port)
{
    struct domain *d = v->domain;
    int vcpuid;

    /*
     * The following bit operations must happen in strict order.
     * NB. On x86, the atomic bit operations also act as memory barriers.
     * There is therefore sufficiently strict ordering for this architecture --
     * others may require explicit memory barriers.
     */

    if ( test_and_set_bit(port, &shared_info(d, evtchn_pending)) )
        return 1;

    if ( !test_bit        (port, &shared_info(d, evtchn_mask)) &&
         !test_and_set_bit(port / BITS_PER_EVTCHN_WORD(d),
                           &vcpu_info(v, evtchn_pending_sel)) )
    {
        vcpu_mark_events_pending(v);
    }
    
    /* Check if some VCPU might be polling for this event. */
    if ( likely(bitmap_empty(d->poll_mask, d->max_vcpus)) )
        return 0;

    /* Wake any interested (or potentially interested) pollers. */
    for ( vcpuid = find_first_bit(d->poll_mask, d->max_vcpus);
          vcpuid < d->max_vcpus;
          vcpuid = find_next_bit(d->poll_mask, d->max_vcpus, vcpuid+1) )
    {
        v = d->vcpu[vcpuid];
        if ( ((v->poll_evtchn <= 0) || (v->poll_evtchn == port)) &&
             test_and_clear_bit(vcpuid, d->poll_mask) )
        {
            v->poll_evtchn = 0;
            vcpu_unblock(v);
        }
    }

    return 0;
}