for_each_vcpu ( d, v ) { spinlock_t *lock; vcpudata = v->sched_priv; migrate_timer(&v->periodic_timer, new_p); migrate_timer(&v->singleshot_timer, new_p); migrate_timer(&v->poll_timer, new_p); cpumask_setall(v->cpu_hard_affinity); cpumask_setall(v->cpu_soft_affinity); lock = vcpu_schedule_lock_irq(v); v->processor = new_p; /* * With v->processor modified we must not * - make any further changes assuming we hold the scheduler lock, * - use vcpu_schedule_unlock_irq(). */ spin_unlock_irq(lock); v->sched_priv = vcpu_priv[v->vcpu_id]; if ( !d->is_dying ) sched_move_irqs(v); new_p = cpumask_cycle(new_p, c->cpu_valid); SCHED_OP(c->sched, insert_vcpu, v); SCHED_OP(old_ops, free_vdata, vcpudata); }
void rtc_migrate_timers(struct vcpu *v) { RTCState *s = vcpu_vrtc(v); if ( v->vcpu_id == 0 ) { migrate_timer(&s->update_timer, v->processor);; migrate_timer(&s->update_timer2, v->processor);; migrate_timer(&s->alarm_timer, v->processor);; } }
int virt_timer_restore(struct vcpu *v) { ASSERT(!is_idle_vcpu(v)); stop_timer(&v->arch.virt_timer.timer); migrate_timer(&v->arch.virt_timer.timer, v->processor); migrate_timer(&v->arch.phys_timer.timer, v->processor); WRITE_SYSREG64(v->domain->arch.virt_timer_base.offset, CNTVOFF_EL2); WRITE_SYSREG64(v->arch.virt_timer.cval, CNTV_CVAL_EL0); WRITE_SYSREG32(v->arch.virt_timer.ctl, CNTV_CTL_EL0); return 0; }
/* * This function is used by cpu_hotplug code from stop_machine context. * Hence we can avoid needing to take the */ void cpu_disable_scheduler(void) { struct domain *d; struct vcpu *v; unsigned int cpu = smp_processor_id(); for_each_domain ( d ) { for_each_vcpu ( d, v ) { if ( is_idle_vcpu(v) ) continue; if ( (cpus_weight(v->cpu_affinity) == 1) && cpu_isset(cpu, v->cpu_affinity) ) { printk("Breaking vcpu affinity for domain %d vcpu %d\n", v->domain->domain_id, v->vcpu_id); cpus_setall(v->cpu_affinity); } /* * Migrate single-shot timers to CPU0. A new cpu will automatically * be chosen when the timer is next re-set. */ if ( v->singleshot_timer.cpu == cpu ) migrate_timer(&v->singleshot_timer, 0); if ( v->processor == cpu ) { set_bit(_VPF_migrating, &v->pause_flags); vcpu_sleep_nosync(v); vcpu_migrate(v); } } } }
IA64FAULT ia64_hypercall(struct pt_regs *regs) { struct vcpu *v = current; struct sal_ret_values x; efi_status_t efi_ret_value; fpswa_ret_t fpswa_ret; IA64FAULT fault; unsigned long index = regs->r2 & FW_HYPERCALL_NUM_MASK_HIGH; perfc_incra(fw_hypercall, index >> 8); switch (index) { case FW_HYPERCALL_XEN: return xen_hypercall(regs); case FW_HYPERCALL_XEN_FAST: return xen_fast_hypercall(regs); case FW_HYPERCALL_PAL_CALL: //printk("*** PAL hypercall: index=%d\n",regs->r28); //FIXME: This should call a C routine #if 0 // This is very conservative, but avoids a possible // (and deadly) freeze in paravirtualized domains due // to a yet-to-be-found bug where pending_interruption // is zero when it shouldn't be. Since PAL is called // in the idle loop, this should resolve it VCPU(v,pending_interruption) = 1; #endif if (regs->r28 == PAL_HALT_LIGHT) { if (vcpu_deliverable_interrupts(v) || event_pending(v)) { perfc_incr(idle_when_pending); vcpu_pend_unspecified_interrupt(v); //printk("idle w/int#%d pending!\n",pi); //this shouldn't happen, but it apparently does quite a bit! so don't //allow it to happen... i.e. if a domain has an interrupt pending and //it tries to halt itself because it thinks it is idle, just return here //as deliver_pending_interrupt is called on the way out and will deliver it } else { perfc_incr(pal_halt_light); migrate_timer(&v->arch.hlt_timer, v->processor); set_timer(&v->arch.hlt_timer, vcpu_get_next_timer_ns(v)); do_sched_op_compat(SCHEDOP_block, 0); /* do_block only pends a softirq */ do_softirq(); stop_timer(&v->arch.hlt_timer); /* do_block() calls * local_event_delivery_enable(), * but PAL CALL must be called with * psr.i = 0 and psr.i is unchanged. * SDM vol.2 Part I 11.10.2 * PAL Calling Conventions. */ local_event_delivery_disable(); } regs->r8 = 0; regs->r9 = 0; regs->r10 = 0; regs->r11 = 0; } else { struct ia64_pal_retval y; if (regs->r28 >= PAL_COPY_PAL) y = xen_pal_emulator (regs->r28, vcpu_get_gr (v, 33), vcpu_get_gr (v, 34), vcpu_get_gr (v, 35)); else y = xen_pal_emulator(regs->r28,regs->r29, regs->r30,regs->r31); regs->r8 = y.status; regs->r9 = y.v0; regs->r10 = y.v1; regs->r11 = y.v2; } break; case FW_HYPERCALL_SAL_CALL: x = sal_emulator(vcpu_get_gr(v,32),vcpu_get_gr(v,33), vcpu_get_gr(v,34),vcpu_get_gr(v,35), vcpu_get_gr(v,36),vcpu_get_gr(v,37), vcpu_get_gr(v,38),vcpu_get_gr(v,39)); regs->r8 = x.r8; regs->r9 = x.r9; regs->r10 = x.r10; regs->r11 = x.r11; break; case FW_HYPERCALL_SAL_RETURN: if ( !test_and_set_bit(_VPF_down, &v->pause_flags) ) vcpu_sleep_nosync(v); break; case FW_HYPERCALL_EFI_CALL: efi_ret_value = efi_emulator (regs, &fault); if (fault != IA64_NO_FAULT) return fault; regs->r8 = efi_ret_value; break; case FW_HYPERCALL_IPI: fw_hypercall_ipi (regs); break; case FW_HYPERCALL_SET_SHARED_INFO_VA: regs->r8 = domain_set_shared_info_va (regs->r28); break; case FW_HYPERCALL_FPSWA_BASE: switch (regs->r2) { case FW_HYPERCALL_FPSWA_BROKEN: gdprintk(XENLOG_WARNING, "Old fpswa hypercall was called (0x%lx).\n" "Please update your domain builder. ip 0x%lx\n", FW_HYPERCALL_FPSWA_BROKEN, regs->cr_iip); fpswa_ret = fw_hypercall_fpswa_error(); break; case FW_HYPERCALL_FPSWA: fpswa_ret = fw_hypercall_fpswa(v, regs); break; default: gdprintk(XENLOG_ERR, "unknown fpswa hypercall %lx\n", regs->r2); fpswa_ret = fw_hypercall_fpswa_error(); break; } regs->r8 = fpswa_ret.status; regs->r9 = fpswa_ret.err0; regs->r10 = fpswa_ret.err1; regs->r11 = fpswa_ret.err2; break; case __HYPERVISOR_opt_feature: { XEN_GUEST_HANDLE(void) arg; struct xen_ia64_opt_feature optf; set_xen_guest_handle(arg, (void*)(vcpu_get_gr(v, 32))); if (copy_from_guest(&optf, arg, 1) == 0) regs->r8 = domain_opt_feature(v->domain, &optf); else regs->r8 = -EFAULT; break; } case FW_HYPERCALL_SIOEMU: sioemu_hypercall(regs); break; default: printk("unknown ia64 fw hypercall %lx\n", regs->r2); regs->r8 = do_ni_hypercall(); } return IA64_NO_FAULT; }