int mem_event_check_ring(struct domain *d)
{
    struct vcpu *curr = current;
    int free_requests;
    int ring_full;

    if ( !d->mem_event.ring_page )
        return -1;

    mem_event_ring_lock(d);

    free_requests = RING_FREE_REQUESTS(&d->mem_event.front_ring);
    if ( unlikely(free_requests < 2) )
    {
        gdprintk(XENLOG_INFO, "free request slots: %d\n", free_requests);
        WARN_ON(free_requests == 0);
    }
    ring_full = free_requests < MEM_EVENT_RING_THRESHOLD ? 1 : 0;

    if ( (curr->domain->domain_id == d->domain_id) && ring_full )
    {
        set_bit(_VPF_mem_event, &curr->pause_flags);
        vcpu_sleep_nosync(curr);
    }

    mem_event_ring_unlock(d);

    return ring_full;
}
示例#2
0
文件: vpsci.c 项目: Fantu/Xen
int32_t do_psci_cpu_off(uint32_t power_state)
{
    struct vcpu *v = current;
    if ( !test_and_set_bit(_VPF_down, &v->pause_flags) )
        vcpu_sleep_nosync(v);
    return PSCI_SUCCESS;
}
示例#3
0
void vcpu_sleep_sync(struct vcpu *v)
{
    vcpu_sleep_nosync(v);

    while ( !vcpu_runnable(v) && v->is_running )
        cpu_relax();

    sync_vcpu_execstate(v);
}
示例#4
0
/*
 * Force a VCPU through a deschedule/reschedule path.
 * For example, using this when setting the periodic timer period means that
 * most periodic-timer state need only be touched from within the scheduler
 * which can thus be done without need for synchronisation.
 */
void vcpu_force_reschedule(struct vcpu *v)
{
    vcpu_schedule_lock_irq(v);
    if ( v->is_running )
        set_bit(_VPF_migrating, &v->pause_flags);
    vcpu_schedule_unlock_irq(v);

    if ( test_bit(_VPF_migrating, &v->pause_flags) )
    {
        vcpu_sleep_nosync(v);
        vcpu_migrate(v);
    }
}
示例#5
0
void domain_pause_for_debugger(void)
{
    struct domain *d = current->domain;
    struct vcpu *v;

    atomic_inc(&d->pause_count);
    if ( test_and_set_bool(d->is_paused_by_controller) )
        domain_unpause(d); /* race-free atomic_dec(&d->pause_count) */

    for_each_vcpu ( d, v )
        vcpu_sleep_nosync(v);

    send_guest_global_virq(dom0, VIRQ_DEBUGGER);
}
示例#6
0
void domain_shutdown(struct domain *d, u8 reason)
{
    struct vcpu *v;

    if ( d->domain_id == 0 )
    {
        extern void machine_restart(char *);
        extern void machine_halt(void);

        debugger_trap_immediate();

        if ( reason == SHUTDOWN_poweroff ) 
        {
            printk("Domain 0 halted: halting machine.\n");
            machine_halt();
        }
        else if ( reason == SHUTDOWN_crash )
        {
            printk("Domain 0 crashed: rebooting machine in 5 seconds.\n");
            watchdog_disable();
            //mdelay(5000);
            machine_restart(0);
        }
        else
        {
            printk("Domain 0 shutdown: rebooting machine.\n");
            machine_restart(0);
        }
    }

    /* Mark the domain as shutting down. */
    d->shutdown_code = reason;

    /* Put every vcpu to sleep, but don't wait (avoids inter-vcpu deadlock). */
    for_each_vcpu ( d, v )
    {
        atomic_inc(&v->pausecnt);
        vcpu_sleep_nosync(v);
    }
示例#7
0
static int __vcpu_set_affinity(
    struct vcpu *v, cpumask_t *affinity,
    bool_t old_lock_status, bool_t new_lock_status)
{
    cpumask_t online_affinity, old_affinity;

    cpus_and(online_affinity, *affinity, cpu_online_map);
    if ( cpus_empty(online_affinity) )
        return -EINVAL;

    vcpu_schedule_lock_irq(v);

    if ( v->affinity_locked != old_lock_status )
    {
        BUG_ON(!v->affinity_locked);
        vcpu_schedule_unlock_irq(v);
        return -EBUSY;
    }

    v->affinity_locked = new_lock_status;

    old_affinity = v->cpu_affinity;
    v->cpu_affinity = *affinity;
    *affinity = old_affinity;
    if ( !cpu_isset(v->processor, v->cpu_affinity) )
        set_bit(_VPF_migrating, &v->pause_flags);

    vcpu_schedule_unlock_irq(v);

    if ( test_bit(_VPF_migrating, &v->pause_flags) )
    {
        vcpu_sleep_nosync(v);
        vcpu_migrate(v);
    }

    return 0;
}
示例#8
0
文件: schedule.c 项目: amodj/Utopia
/*
 * This function is used by cpu_hotplug code from stop_machine context.
 * Hence we can avoid needing to take the 
 */
void cpu_disable_scheduler(void)
{
    struct domain *d;
    struct vcpu *v;
    unsigned int cpu = smp_processor_id();

    for_each_domain ( d )
    {
        for_each_vcpu ( d, v )
        {
            if ( is_idle_vcpu(v) )
                continue;

            if ( (cpus_weight(v->cpu_affinity) == 1) &&
                 cpu_isset(cpu, v->cpu_affinity) )
            {
                printk("Breaking vcpu affinity for domain %d vcpu %d\n",
                        v->domain->domain_id, v->vcpu_id);
                cpus_setall(v->cpu_affinity);
            }

            /*
             * Migrate single-shot timers to CPU0. A new cpu will automatically
             * be chosen when the timer is next re-set.
             */
            if ( v->singleshot_timer.cpu == cpu )
                migrate_timer(&v->singleshot_timer, 0);

            if ( v->processor == cpu )
            {
                set_bit(_VPF_migrating, &v->pause_flags);
                vcpu_sleep_nosync(v);
                vcpu_migrate(v);
            }
        }
    }
}
void mem_event_mark_and_pause(struct vcpu *v)
{
    set_bit(_VPF_mem_event, &v->pause_flags);
    vcpu_sleep_nosync(v);
}
示例#10
0
int hvm_load(struct domain *d, hvm_domain_context_t *h)
{
    char *c;
    uint64_t cset;
    struct hvm_save_header hdr;
    struct hvm_save_descriptor *desc;
    hvm_load_handler handler;
    struct vcpu *v;
    
    /* Read the save header, which must be first */
    if ( hvm_load_entry(HEADER, h, &hdr) != 0 ) 
        return -1;

    if ( arch_hvm_load(d, &hdr) )
        return -1;

    c = strrchr(xen_changeset(), ':');
    if ( hdr.changeset == -1ULL )
        gdprintk(XENLOG_WARNING, 
                 "HVM restore: Xen changeset was not saved.\n");
    else if ( c == NULL )
        gdprintk(XENLOG_WARNING, 
                 "HVM restore: Xen changeset is not available.\n");
    else
    {
        cset = simple_strtoll(c, NULL, 16);
        if ( hdr.changeset != cset )
        gdprintk(XENLOG_WARNING, "HVM restore: saved Xen changeset (%#"PRIx64
                 ") does not match host (%#"PRIx64").\n", hdr.changeset, cset);
    }

    /* Down all the vcpus: we only re-enable the ones that had state saved. */
    for_each_vcpu(d, v) 
        if ( test_and_set_bit(_VPF_down, &v->pause_flags) )
            vcpu_sleep_nosync(v);

    for ( ; ; )
    {
        if ( h->size - h->cur < sizeof(struct hvm_save_descriptor) )
        {
            /* Run out of data */
            gdprintk(XENLOG_ERR, 
                     "HVM restore: save did not end with a null entry\n");
            return -1;
        }
        
        /* Read the typecode of the next entry  and check for the end-marker */
        desc = (struct hvm_save_descriptor *)(&h->data[h->cur]);
        if ( desc->typecode == 0 )
            return 0; 
        
        /* Find the handler for this entry */
        if ( (desc->typecode > HVM_SAVE_CODE_MAX) ||
             ((handler = hvm_sr_handlers[desc->typecode].load) == NULL) )
        {
            gdprintk(XENLOG_ERR, 
                     "HVM restore: unknown entry typecode %u\n", 
                     desc->typecode);
            return -1;
        }

        /* Load the entry */
        gdprintk(XENLOG_INFO, "HVM restore: %s %"PRIu16"\n",  
                 hvm_sr_handlers[desc->typecode].name, desc->instance);
        if ( handler(d, h) != 0 ) 
        {
            gdprintk(XENLOG_ERR, 
                     "HVM restore: failed to load entry %u/%u\n", 
                     desc->typecode, desc->instance);
            return -1;
        }
    }

    /* Not reached */
}
示例#11
0
IA64FAULT
ia64_hypercall(struct pt_regs *regs)
{
	struct vcpu *v = current;
	struct sal_ret_values x;
	efi_status_t efi_ret_value;
	fpswa_ret_t fpswa_ret;
	IA64FAULT fault; 
	unsigned long index = regs->r2 & FW_HYPERCALL_NUM_MASK_HIGH;

	perfc_incra(fw_hypercall, index >> 8);
	switch (index) {
	case FW_HYPERCALL_XEN:
		return xen_hypercall(regs);

	case FW_HYPERCALL_XEN_FAST:
		return xen_fast_hypercall(regs);

	case FW_HYPERCALL_PAL_CALL:
		//printk("*** PAL hypercall: index=%d\n",regs->r28);
		//FIXME: This should call a C routine
#if 0
		// This is very conservative, but avoids a possible
		// (and deadly) freeze in paravirtualized domains due
		// to a yet-to-be-found bug where pending_interruption
		// is zero when it shouldn't be. Since PAL is called
		// in the idle loop, this should resolve it
		VCPU(v,pending_interruption) = 1;
#endif
		if (regs->r28 == PAL_HALT_LIGHT) {
			if (vcpu_deliverable_interrupts(v) ||
				event_pending(v)) {
				perfc_incr(idle_when_pending);
				vcpu_pend_unspecified_interrupt(v);
//printk("idle w/int#%d pending!\n",pi);
//this shouldn't happen, but it apparently does quite a bit!  so don't
//allow it to happen... i.e. if a domain has an interrupt pending and
//it tries to halt itself because it thinks it is idle, just return here
//as deliver_pending_interrupt is called on the way out and will deliver it
			}
			else {
				perfc_incr(pal_halt_light);
				migrate_timer(&v->arch.hlt_timer,
				              v->processor);
				set_timer(&v->arch.hlt_timer,
				          vcpu_get_next_timer_ns(v));
				do_sched_op_compat(SCHEDOP_block, 0);
				/* do_block only pends a softirq */
				do_softirq();
				stop_timer(&v->arch.hlt_timer);
				/* do_block() calls
				 * local_event_delivery_enable(),
				 * but PAL CALL must be called with
				 * psr.i = 0 and psr.i is unchanged.
				 * SDM vol.2 Part I 11.10.2
				 * PAL Calling Conventions.
				 */
				local_event_delivery_disable();
			}
			regs->r8 = 0;
			regs->r9 = 0;
			regs->r10 = 0;
			regs->r11 = 0;
		}
		else {
			struct ia64_pal_retval y;

			if (regs->r28 >= PAL_COPY_PAL)
				y = xen_pal_emulator
					(regs->r28, vcpu_get_gr (v, 33),
					 vcpu_get_gr (v, 34),
					 vcpu_get_gr (v, 35));
			else
				y = xen_pal_emulator(regs->r28,regs->r29,
						     regs->r30,regs->r31);
			regs->r8 = y.status; regs->r9 = y.v0;
			regs->r10 = y.v1; regs->r11 = y.v2;
		}
		break;
	case FW_HYPERCALL_SAL_CALL:
		x = sal_emulator(vcpu_get_gr(v,32),vcpu_get_gr(v,33),
			vcpu_get_gr(v,34),vcpu_get_gr(v,35),
			vcpu_get_gr(v,36),vcpu_get_gr(v,37),
			vcpu_get_gr(v,38),vcpu_get_gr(v,39));
		regs->r8 = x.r8; regs->r9 = x.r9;
		regs->r10 = x.r10; regs->r11 = x.r11;
		break;
	case FW_HYPERCALL_SAL_RETURN:
	        if ( !test_and_set_bit(_VPF_down, &v->pause_flags) )
			vcpu_sleep_nosync(v);
		break;
	case FW_HYPERCALL_EFI_CALL:
		efi_ret_value = efi_emulator (regs, &fault);
		if (fault != IA64_NO_FAULT) return fault;
		regs->r8 = efi_ret_value;
		break;
	case FW_HYPERCALL_IPI:
		fw_hypercall_ipi (regs);
		break;
	case FW_HYPERCALL_SET_SHARED_INFO_VA:
	        regs->r8 = domain_set_shared_info_va (regs->r28);
		break;
	case FW_HYPERCALL_FPSWA_BASE:
		switch (regs->r2) {
		case FW_HYPERCALL_FPSWA_BROKEN:
			gdprintk(XENLOG_WARNING,
				 "Old fpswa hypercall was called (0x%lx).\n"
				 "Please update your domain builder. ip 0x%lx\n",
				 FW_HYPERCALL_FPSWA_BROKEN, regs->cr_iip);
			fpswa_ret = fw_hypercall_fpswa_error();
			break;
		case FW_HYPERCALL_FPSWA:
			fpswa_ret = fw_hypercall_fpswa(v, regs);
			break;
		default:
			gdprintk(XENLOG_ERR, "unknown fpswa hypercall %lx\n",
				 regs->r2);
			fpswa_ret = fw_hypercall_fpswa_error();
			break;
		}
		regs->r8  = fpswa_ret.status;
		regs->r9  = fpswa_ret.err0;
		regs->r10 = fpswa_ret.err1;
		regs->r11 = fpswa_ret.err2;
		break;
	case __HYPERVISOR_opt_feature:
	{
		XEN_GUEST_HANDLE(void) arg;
		struct xen_ia64_opt_feature optf;
		set_xen_guest_handle(arg, (void*)(vcpu_get_gr(v, 32)));
		if (copy_from_guest(&optf, arg, 1) == 0)
			regs->r8 = domain_opt_feature(v->domain, &optf);
		else
			regs->r8 = -EFAULT;
		break;
	}
	case FW_HYPERCALL_SIOEMU:
		sioemu_hypercall(regs);
		break;
	default:
		printk("unknown ia64 fw hypercall %lx\n", regs->r2);
		regs->r8 = do_ni_hypercall();
	}
	return IA64_NO_FAULT;
}
示例#12
0
void vcpu_pause_nosync(struct vcpu *v)
{
    atomic_inc(&v->pause_count);
    vcpu_sleep_nosync(v);
}