static void vcpu_migrate(struct vcpu *v) { unsigned long flags; int old_cpu; vcpu_schedule_lock_irqsave(v, flags); /* * NB. Check of v->running happens /after/ setting migration flag * because they both happen in (different) spinlock regions, and those * regions are strictly serialised. */ if ( v->is_running || !test_and_clear_bit(_VPF_migrating, &v->pause_flags) ) { vcpu_schedule_unlock_irqrestore(v, flags); return; } /* Switch to new CPU, then unlock old CPU. */ old_cpu = v->processor; v->processor = SCHED_OP(pick_cpu, v); spin_unlock_irqrestore( &per_cpu(schedule_data, old_cpu).schedule_lock, flags); /* Wake on new CPU. */ vcpu_wake(v); }
void mem_event_unpause_vcpus(struct domain *d) { struct vcpu *v; for_each_vcpu ( d, v ) if ( test_and_clear_bit(_VPF_mem_event, &v->pause_flags) ) vcpu_wake(v); }
void domain_unpause(struct domain *d) { struct vcpu *v; if ( atomic_dec_and_test(&d->pause_count) ) for_each_vcpu( d, v ) vcpu_wake(v); }
int do_psci_cpu_on(uint32_t vcpuid, register_t entry_point) { struct vcpu *v; struct domain *d = current->domain; struct vcpu_guest_context *ctxt; int rc; int is_thumb = entry_point & 1; if ( (vcpuid < 0) || (vcpuid >= MAX_VIRT_CPUS) ) return PSCI_EINVAL; if ( vcpuid >= d->max_vcpus || (v = d->vcpu[vcpuid]) == NULL ) return PSCI_EINVAL; /* THUMB set is not allowed with 64-bit domain */ if ( is_pv64_domain(d) && is_thumb ) return PSCI_EINVAL; if ( (ctxt = alloc_vcpu_guest_context()) == NULL ) return PSCI_DENIED; vgic_clear_pending_irqs(v); memset(ctxt, 0, sizeof(*ctxt)); ctxt->user_regs.pc64 = (u64) entry_point; ctxt->sctlr = SCTLR_GUEST_INIT; ctxt->ttbr0 = 0; ctxt->ttbr1 = 0; ctxt->ttbcr = 0; /* Defined Reset Value */ if ( is_pv32_domain(d) ) ctxt->user_regs.cpsr = PSR_GUEST32_INIT; #ifdef CONFIG_ARM_64 else ctxt->user_regs.cpsr = PSR_GUEST64_INIT; #endif /* Start the VCPU with THUMB set if it's requested by the kernel */ if ( is_thumb ) ctxt->user_regs.cpsr |= PSR_THUMB; ctxt->flags = VGCF_online; domain_lock(d); rc = arch_set_info_guest(v, ctxt); free_vcpu_guest_context(ctxt); if ( rc < 0 ) { domain_unlock(d); return PSCI_DENIED; } domain_unlock(d); vcpu_wake(v); return PSCI_SUCCESS; }
static void fw_hypercall_ipi (struct pt_regs *regs) { int cpu = regs->r14; int vector = regs->r15; struct vcpu *targ; struct domain *d = current->domain; /* Be sure the target exists. */ if (cpu >= d->max_vcpus) return; targ = d->vcpu[cpu]; if (targ == NULL) return; if (vector == XEN_SAL_BOOT_RENDEZ_VEC && (!targ->is_initialised || test_bit(_VPF_down, &targ->pause_flags))) { /* First start: initialize vpcu. */ if (!targ->is_initialised) { if (arch_set_info_guest (targ, NULL) != 0) { printk ("arch_boot_vcpu: failure\n"); return; } } /* First or next rendez-vous: set registers. */ vcpu_init_regs (targ); vcpu_regs (targ)->cr_iip = d->arch.sal_data->boot_rdv_ip; vcpu_regs (targ)->r1 = d->arch.sal_data->boot_rdv_r1; vcpu_regs (targ)->b0 = FW_HYPERCALL_SAL_RETURN_PADDR; if (test_and_clear_bit(_VPF_down, &targ->pause_flags)) { vcpu_wake(targ); printk(XENLOG_INFO "arch_boot_vcpu: vcpu %d awaken\n", targ->vcpu_id); } else printk ("arch_boot_vcpu: huu, already awaken!\n"); } else { int running = targ->is_running; vcpu_pend_interrupt(targ, vector); vcpu_unblock(targ); if (running) smp_send_event_check_cpu(targ->processor); } return; }
int do_psci_cpu_on(uint32_t vcpuid, register_t entry_point) { struct vcpu *v; struct domain *d = current->domain; struct vcpu_guest_context *ctxt; int rc; if ( (vcpuid < 0) || (vcpuid >= MAX_VIRT_CPUS) ) return PSCI_EINVAL; if ( vcpuid >= d->max_vcpus || (v = d->vcpu[vcpuid]) == NULL ) return PSCI_EINVAL; if ( (ctxt = alloc_vcpu_guest_context()) == NULL ) return PSCI_DENIED; vgic_clear_pending_irqs(v); memset(ctxt, 0, sizeof(*ctxt)); ctxt->user_regs.pc64 = (u64) entry_point; ctxt->sctlr = SCTLR_BASE; ctxt->ttbr0 = 0; ctxt->ttbr1 = 0; ctxt->ttbcr = 0; /* Defined Reset Value */ ctxt->user_regs.cpsr = PSR_GUEST_INIT; ctxt->flags = VGCF_online; domain_lock(d); rc = arch_set_info_guest(v, ctxt); free_vcpu_guest_context(ctxt); if ( rc < 0 ) { domain_unlock(d); return PSCI_DENIED; } domain_unlock(d); vcpu_wake(v); return PSCI_SUCCESS; }
void vcpu_unblock(struct vcpu *v) { if ( !test_and_clear_bit(_VPF_blocked, &v->pause_flags) ) return; /* Polling period ends when a VCPU is unblocked. */ if ( unlikely(v->poll_evtchn != 0) ) { v->poll_evtchn = 0; /* * We *must* re-clear _VPF_blocked to avoid racing other wakeups of * this VCPU (and it then going back to sleep on poll_mask). * Test-and-clear is idiomatic and ensures clear_bit not reordered. */ if ( test_and_clear_bit(v->vcpu_id, v->domain->poll_mask) ) clear_bit(_VPF_blocked, &v->pause_flags); } vcpu_wake(v); }
long evtchn_send(unsigned int lport) { struct evtchn *lchn, *rchn; struct domain *ld = current->domain, *rd; struct vcpu *rvcpu; int rport, ret = 0; spin_lock(&ld->evtchn_lock); if ( unlikely(!port_is_valid(ld, lport)) ) { spin_unlock(&ld->evtchn_lock); return -EINVAL; } lchn = evtchn_from_port(ld, lport); /* Guest cannot send via a Xen-attached event channel. */ if ( unlikely(lchn->consumer_is_xen) ) { spin_unlock(&ld->evtchn_lock); return -EINVAL; } ret = xsm_evtchn_send(ld, lchn); if ( ret ) goto out; switch ( lchn->state ) { case ECS_INTERDOMAIN: rd = lchn->u.interdomain.remote_dom; rport = lchn->u.interdomain.remote_port; rchn = evtchn_from_port(rd, rport); rvcpu = rd->vcpu[rchn->notify_vcpu_id]; if ( rchn->consumer_is_xen ) { /* Xen consumers need notification only if they are blocked. */ if ( test_and_clear_bit(_VPF_blocked_in_xen, &rvcpu->pause_flags) ) vcpu_wake(rvcpu); } else { evtchn_set_pending(rvcpu, rport); } break; case ECS_IPI: evtchn_set_pending(ld->vcpu[lchn->notify_vcpu_id], lport); break; case ECS_UNBOUND: /* silently drop the notification */ break; default: ret = -EINVAL; } out: spin_unlock(&ld->evtchn_lock); return ret; }
/* Default notification action: wake up from wait_on_xen_event_channel(). */ static void default_xen_notification_fn(struct vcpu *v, unsigned int port) { /* Consumer needs notification only if blocked. */ if ( test_and_clear_bit(_VPF_blocked_in_xen, &v->pause_flags) ) vcpu_wake(v); }
static int do_common_cpu_on(register_t target_cpu, register_t entry_point, register_t context_id,int ver) { struct vcpu *v; struct domain *d = current->domain; struct vcpu_guest_context *ctxt; int rc; int is_thumb = entry_point & 1; register_t vcpuid; vcpuid = vaffinity_to_vcpuid(target_cpu); if ( vcpuid >= d->max_vcpus || (v = d->vcpu[vcpuid]) == NULL ) return PSCI_INVALID_PARAMETERS; /* THUMB set is not allowed with 64-bit domain */ if ( is_64bit_domain(d) && is_thumb ) return PSCI_INVALID_PARAMETERS; if( ( ver == XEN_PSCI_V_0_2 ) && ( !test_bit(_VPF_down, &v->pause_flags) ) ) return PSCI_ALREADY_ON; if ( (ctxt = alloc_vcpu_guest_context()) == NULL ) return PSCI_DENIED; vgic_clear_pending_irqs(v); memset(ctxt, 0, sizeof(*ctxt)); ctxt->user_regs.pc64 = (u64) entry_point; ctxt->sctlr = SCTLR_GUEST_INIT; ctxt->ttbr0 = 0; ctxt->ttbr1 = 0; ctxt->ttbcr = 0; /* Defined Reset Value */ if ( is_32bit_domain(d) ) { ctxt->user_regs.cpsr = PSR_GUEST32_INIT; if( ver == XEN_PSCI_V_0_2 ) ctxt->user_regs.r0_usr = context_id; } #ifdef CONFIG_ARM_64 else { ctxt->user_regs.cpsr = PSR_GUEST64_INIT; if( ver == XEN_PSCI_V_0_2 ) ctxt->user_regs.x0 = context_id; } #endif /* Start the VCPU with THUMB set if it's requested by the kernel */ if ( is_thumb ) ctxt->user_regs.cpsr |= PSR_THUMB; ctxt->flags = VGCF_online; domain_lock(d); rc = arch_set_info_guest(v, ctxt); free_vcpu_guest_context(ctxt); if ( rc < 0 ) { domain_unlock(d); return PSCI_DENIED; } domain_unlock(d); vcpu_wake(v); return PSCI_SUCCESS; }
void vcpu_unpause(struct vcpu *v) { if ( atomic_dec_and_test(&v->pause_count) ) vcpu_wake(v); }