for_each_vcpu ( d, v ) { spinlock_t *lock; vcpudata = v->sched_priv; migrate_timer(&v->periodic_timer, new_p); migrate_timer(&v->singleshot_timer, new_p); migrate_timer(&v->poll_timer, new_p); cpumask_setall(v->cpu_hard_affinity); cpumask_setall(v->cpu_soft_affinity); lock = vcpu_schedule_lock_irq(v); v->processor = new_p; /* * With v->processor modified we must not * - make any further changes assuming we hold the scheduler lock, * - use vcpu_schedule_unlock_irq(). */ spin_unlock_irq(lock); v->sched_priv = vcpu_priv[v->vcpu_id]; if ( !d->is_dying ) sched_move_irqs(v); new_p = cpumask_cycle(new_p, c->cpu_valid); SCHED_OP(c->sched, insert_vcpu, v); SCHED_OP(old_ops, free_vdata, vcpudata); }
struct vcpu *__init dom0_setup_vcpu(struct domain *d, unsigned int vcpu_id, unsigned int prev_cpu) { unsigned int cpu = cpumask_cycle(prev_cpu, &dom0_cpus); struct vcpu *v = alloc_vcpu(d, vcpu_id, cpu); if ( v ) { if ( !d->is_pinned && !dom0_affinity_relaxed ) cpumask_copy(v->cpu_hard_affinity, &dom0_cpus); cpumask_copy(v->cpu_soft_affinity, &dom0_cpus); } return v; }