static int xen_vcpu_initialize(processorid_t id, vcpu_guest_context_t *vgc) { int err; if ((err = HYPERVISOR_vcpu_op(VCPUOP_initialise, id, vgc)) != 0) { char *str; int level = CE_WARN; switch (err) { case -X_EINVAL: /* * This interface squashes multiple error sources * to one error code. In particular, an X_EINVAL * code can mean: * * - the vcpu id is out of range * - cs or ss are in ring 0 * - cr3 is wrong * - an entry in the new gdt is above the * reserved entry * - a frame underneath the new gdt is bad */ str = "something is wrong :("; break; case -X_ENOENT: str = "no such cpu"; break; case -X_ENOMEM: str = "no mem to copy ctxt"; break; case -X_EFAULT: str = "bad address"; break; case -X_EEXIST: /* * Hmm. This error is returned if the vcpu has already * been initialized once before in the lifetime of this * domain. This is a logic error in the kernel. */ level = CE_PANIC; str = "already initialized"; break; default: level = CE_PANIC; str = "<unexpected>"; break; } cmn_err(level, "vcpu%d: failed to init: error %d: %s", id, -err, str); } return (err); }
static __cpuinit int cpu_initialize_context(unsigned int cpu, struct task_struct *idle) { struct vcpu_guest_context *ctxt; struct gdt_page *gdt = &per_cpu(gdt_page, cpu); if (cpu_test_and_set(cpu, cpu_initialized_map)) return 0; ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL); if (ctxt == NULL) return -ENOMEM; ctxt->flags = VGCF_IN_KERNEL; ctxt->user_regs.ds = __USER_DS; ctxt->user_regs.es = __USER_DS; ctxt->user_regs.fs = __KERNEL_PERCPU; ctxt->user_regs.gs = 0; ctxt->user_regs.ss = __KERNEL_DS; ctxt->user_regs.eip = (unsigned long)cpu_bringup_and_idle; ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */ memset(&ctxt->fpu_ctxt, 0, sizeof(ctxt->fpu_ctxt)); xen_copy_trap_info(ctxt->trap_ctxt); ctxt->ldt_ents = 0; BUG_ON((unsigned long)gdt->gdt & ~PAGE_MASK); make_lowmem_page_readonly(gdt->gdt); ctxt->gdt_frames[0] = virt_to_mfn(gdt->gdt); ctxt->gdt_ents = ARRAY_SIZE(gdt->gdt); ctxt->user_regs.cs = __KERNEL_CS; ctxt->user_regs.esp = idle->thread.sp0 - sizeof(struct pt_regs); ctxt->kernel_ss = __KERNEL_DS; ctxt->kernel_sp = idle->thread.sp0; ctxt->event_callback_cs = __KERNEL_CS; ctxt->event_callback_eip = (unsigned long)xen_hypervisor_callback; ctxt->failsafe_callback_cs = __KERNEL_CS; ctxt->failsafe_callback_eip = (unsigned long)xen_failsafe_callback; per_cpu(xen_cr3, cpu) = __pa(swapper_pg_dir); ctxt->ctrlreg[3] = xen_pfn_to_cr3(virt_to_mfn(swapper_pg_dir)); if (HYPERVISOR_vcpu_op(VCPUOP_initialise, cpu, ctxt)) BUG(); kfree(ctxt); return 0; }
static void stop_self(void *v) { int cpu = smp_processor_id(); /* make sure we're not pinning something down */ load_cr3(swapper_pg_dir); /* should set up a minimal gdt */ HYPERVISOR_vcpu_op(VCPUOP_down, cpu, NULL); BUG(); }
void xen_timer_resume(void) { int cpu; if (xen_clockevent != &xen_vcpuop_clockevent) return; for_each_online_cpu(cpu) { if (HYPERVISOR_vcpu_op(VCPUOP_stop_periodic_timer, cpu, NULL)) BUG(); } }
static void __init xen_fill_possible_map(void) { int i, rc; for (i = 0; i < nr_cpu_ids; i++) { rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL); if (rc >= 0) { num_processors++; set_cpu_possible(i, true); } } }
static void __init xen_fill_possible_map(void) { int i, rc; for (i = 0; i < NR_CPUS; i++) { rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL); if (rc >= 0) { num_processors++; cpu_set(i, cpu_possible_map); } } }
/* * On restore, set the vcpu placement up again. * If it fails, then we're in a bad state, since * we can't back out from using it... */ void xen_vcpu_restore(void) { int cpu; for_each_online_cpu(cpu) { bool other_cpu = (cpu != smp_processor_id()); if (other_cpu && HYPERVISOR_vcpu_op(VCPUOP_down, cpu, NULL)) BUG(); xen_setup_runstate_info(cpu); if (have_vcpu_info_placement) xen_vcpu_setup(cpu); if (other_cpu && HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL)) BUG(); } }
void init_smp(void) { unsigned int cpu; int res; memset(percpu, 0, sizeof(struct cpu_private) * MAX_VIRT_CPUS); init_cpu_pda(0); /* * Init of CPU0 is completed, smp_init_completed must be set before we * initialise remaining CPUs, because smp_proccessor_id macro will not * work properly */ smp_init_completed = 1; /* * We have now completed the init of cpu0 */ if (trace_smp()) tprintk("Initing SMP cpus.\n"); for (cpu = 1; cpu < MAX_VIRT_CPUS; cpu++) { per_cpu(cpu, cpu_state) = CPU_DOWN; res = HYPERVISOR_vcpu_op(VCPUOP_is_up, cpu, NULL); if (res >= 0) { if (trace_smp()) tprintk("Bringing up CPU=%d\n", cpu); cpu_initialize_context(cpu); BUG_ON(HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL)); spin_lock(&cpu_lock); smp_active++; spin_unlock(&cpu_lock); } } if (trace_smp()) { tprintk("SMP: %d CPUs active\n", smp_active); for (cpu = 0; cpu < MAX_VIRT_CPUS; cpu++) { tprintk("SMP: cpu_state %d %d\n", cpu, per_cpu(cpu, cpu_state)); } } if (trace_sched()) ttprintk("SMP %d\n", smp_active); }
static int __cpuinit xen_cpu_up(unsigned int cpu) { struct task_struct *idle = idle_task(cpu); int rc; #ifdef CONFIG_X86_64 /* Allocate node local memory for AP pdas */ WARN_ON(cpu == 0); if (cpu > 0) { rc = get_local_pda(cpu); if (rc) return rc; } #endif #ifdef CONFIG_X86_32 init_gdt(cpu); per_cpu(current_task, cpu) = idle; irq_ctx_init(cpu); #else cpu_pda(cpu)->pcurrent = idle; clear_tsk_thread_flag(idle, TIF_FORK); #endif xen_setup_timer(cpu); xen_init_lock_cpu(cpu); per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; /* make sure interrupts start blocked */ per_cpu(xen_vcpu, cpu)->evtchn_upcall_mask = 1; rc = cpu_initialize_context(cpu, idle); if (rc) return rc; if (num_online_cpus() == 1) alternatives_smp_switch(1); rc = xen_smp_intr_init(cpu); if (rc) return rc; rc = HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL); BUG_ON(rc); while(per_cpu(cpu_state, cpu) != CPU_ONLINE) { HYPERVISOR_sched_op(SCHEDOP_yield, 0); barrier(); } return 0; }
static int mptable_probe_cpus(void) { int i, rc; for (i = 0; i < MAXCPU; i++) { rc = HYPERVISOR_vcpu_op(VCPUOP_is_up, i, NULL); if (rc >= 0) cpu_add(i, (i == 0)); } return (0); }
static void __cpuinit xen_play_dead(void) /* used only with HOTPLUG_CPU */ { play_dead_common(); HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL); cpu_bringup(); /* * commit 4b0c0f294 (tick: Cleanup NOHZ per cpu data on cpu down) * clears certain data that the cpu_idle loop (which called us * and that we return from) expects. The only way to get that * data back is to call: */ tick_nohz_idle_enter(); }
static void stop_self(void *v) { int cpu = smp_processor_id(); load_cr3(swapper_pg_dir); set_cpu_online(cpu, false); HYPERVISOR_vcpu_op(VCPUOP_down, cpu, NULL); BUG(); }
static void __cpuinit xen_play_dead(void) /* used only with HOTPLUG_CPU */ { play_dead_common(); HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL); cpu_bringup(); /* * Balance out the preempt calls - as we are running in cpu_idle * loop which has been called at bootup from cpu_bringup_and_idle. * The cpucpu_bringup_and_idle called cpu_bringup which made a * preempt_disable() So this preempt_enable will balance it out. */ preempt_enable(); }
static void xen_vcpu_setup(int cpu) { struct vcpu_register_vcpu_info info; int err; struct vcpu_info *vcpup; BUG_ON(HYPERVISOR_shared_info == &xen_dummy_shared_info); /* * This path is called twice on PVHVM - first during bootup via * smp_init -> xen_hvm_cpu_notify, and then if the VCPU is being * hotplugged: cpu_up -> xen_hvm_cpu_notify. * As we can only do the VCPUOP_register_vcpu_info once lets * not over-write its result. * * For PV it is called during restore (xen_vcpu_restore) and bootup * (xen_setup_vcpu_info_placement). The hotplug mechanism does not * use this function. */ if (xen_hvm_domain()) { if (per_cpu(xen_vcpu, cpu) == &per_cpu(xen_vcpu_info, cpu)) return; } if (cpu < MAX_VIRT_CPUS) per_cpu(xen_vcpu,cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu]; if (!have_vcpu_info_placement) { if (cpu >= MAX_VIRT_CPUS) clamp_max_cpus(); return; } vcpup = &per_cpu(xen_vcpu_info, cpu); info.mfn = arbitrary_virt_to_mfn(vcpup); info.offset = offset_in_page(vcpup); /* Check to see if the hypervisor will put the vcpu_info structure where we want it, which allows direct access via a percpu-variable. */ err = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_info, cpu, &info); if (err) { printk(KERN_DEBUG "register_vcpu_info failed: err=%d\n", err); have_vcpu_info_placement = 0; clamp_max_cpus(); } else { /* This cpu is using the registered vcpu info, even if later ones fail to. */ per_cpu(xen_vcpu, cpu) = vcpup; } }
static inline void play_dead(void) { extern void idle_task_exit(void); /* XXXAP find proper place */ idle_task_exit(); local_irq_disable(); cpu_clear(smp_processor_id(), cpu_initialized); preempt_enable_no_resched(); HYPERVISOR_vcpu_op(VCPUOP_down, smp_processor_id(), NULL); /* Same as arch/xen/kernel/smpboot.c:cpu_bringup(). */ cpu_init(); preempt_disable(); local_irq_enable(); }
static void xen_cpu_die(unsigned int cpu) { while (HYPERVISOR_vcpu_op(VCPUOP_is_up, cpu, NULL)) { current->state = TASK_UNINTERRUPTIBLE; schedule_timeout(HZ/10); } unbind_from_irqhandler(per_cpu(xen_resched_irq, cpu), NULL); unbind_from_irqhandler(per_cpu(xen_callfunc_irq, cpu), NULL); unbind_from_irqhandler(per_cpu(xen_debug_irq, cpu), NULL); unbind_from_irqhandler(per_cpu(xen_callfuncsingle_irq, cpu), NULL); unbind_from_irqhandler(per_cpu(xen_irq_work, cpu), NULL); xen_uninit_lock_cpu(cpu); xen_teardown_timer(cpu); }
static void xen_pv_cpu_die(unsigned int cpu) { while (HYPERVISOR_vcpu_op(VCPUOP_is_up, xen_vcpu_nr(cpu), NULL)) { __set_current_state(TASK_UNINTERRUPTIBLE); schedule_timeout(HZ/10); } if (common_cpu_die(cpu) == 0) { xen_smp_intr_free(cpu); xen_uninit_lock_cpu(cpu); xen_teardown_timer(cpu); xen_pmu_finish(cpu); } }
long xen_vcpu_down(processorid_t id) { long err; if ((err = HYPERVISOR_vcpu_op(VCPUOP_down, id, NULL)) != 0) { /* * X_ENOENT: no such cpu * X_EINVAL: bad cpuid */ panic("vcpu%d: failed to stop: error %d", id, -(int)err); } return (err); }
static void xen_pv_play_dead(void) /* used only with HOTPLUG_CPU */ { play_dead_common(); HYPERVISOR_vcpu_op(VCPUOP_down, xen_vcpu_nr(smp_processor_id()), NULL); cpu_bringup(); /* * commit 4b0c0f294 (tick: Cleanup NOHZ per cpu data on cpu down) * clears certain data that the cpu_idle loop (which called us * and that we return from) expects. The only way to get that * data back is to call: */ tick_nohz_idle_enter(); tick_nohz_idle_stop_tick_protected(); cpuhp_online_idle(CPUHP_AP_ONLINE_IDLE); }
static void xen_cpu_die(unsigned int cpu) { while (HYPERVISOR_vcpu_op(VCPUOP_is_up, cpu, NULL)) { current->state = TASK_UNINTERRUPTIBLE; schedule_timeout(HZ/10); } unbind_from_irqhandler(per_cpu(resched_irq, cpu), NULL); unbind_from_irqhandler(per_cpu(callfunc_irq, cpu), NULL); unbind_from_irqhandler(per_cpu(debug_irq, cpu), NULL); unbind_from_irqhandler(per_cpu(callfuncsingle_irq, cpu), NULL); xen_uninit_lock_cpu(cpu); xen_teardown_timer(cpu); if (num_online_cpus() == 1) alternatives_smp_switch(0); }
void xen_send_IPI_one(unsigned int cpu, enum ipi_vector vector) { int irq; #ifdef CONFIG_X86 if (unlikely(vector == XEN_NMI_VECTOR)) { int rc = HYPERVISOR_vcpu_op(VCPUOP_send_nmi, cpu, NULL); if (rc < 0) printk(KERN_WARNING "Sending nmi to CPU%d failed (rc:%d)\n", cpu, rc); return; } #endif irq = per_cpu(ipi_to_irq, cpu)[vector]; BUG_ON(irq < 0); notify_remote_via_irq(irq); }
static int __cpuinit xen_cpu_up(unsigned int cpu) { struct task_struct *idle = idle_task(cpu); int rc; per_cpu(current_task, cpu) = idle; #ifdef CONFIG_X86_32 irq_ctx_init(cpu); #else clear_tsk_thread_flag(idle, TIF_FORK); per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - KERNEL_STACK_OFFSET + THREAD_SIZE; per_cpu(kernel_stack8k, cpu) = (unsigned long)task_stack_page(idle) - KERNEL_STACK_OFFSET + THREAD_SIZE - 8192; #endif xen_setup_runstate_info(cpu); xen_setup_timer(cpu); xen_init_lock_cpu(cpu); per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; /* make sure interrupts start blocked */ per_cpu(xen_vcpu, cpu)->evtchn_upcall_mask = 1; rc = cpu_initialize_context(cpu, idle); if (rc) return rc; if (num_online_cpus() == 1) alternatives_smp_switch(1); rc = xen_smp_intr_init(cpu); if (rc) return rc; rc = HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL); BUG_ON(rc); while(per_cpu(cpu_state, cpu) != CPU_ONLINE) { HYPERVISOR_sched_op(SCHEDOP_yield, NULL); barrier(); } return 0; }
/* taken from i386/kernel/time-xen.c */ static void xen_init_missing_ticks_accounting(int cpu) { struct vcpu_register_runstate_memory_area area; struct vcpu_runstate_info *runstate = &per_cpu(xen_runstate, cpu); int rc; memset(runstate, 0, sizeof(*runstate)); area.addr.v = runstate; rc = HYPERVISOR_vcpu_op(VCPUOP_register_runstate_memory_area, cpu, &area); WARN_ON(rc && rc != -ENOSYS); per_cpu(xen_blocked_time, cpu) = runstate->time[RUNSTATE_blocked]; per_cpu(xen_stolen_time, cpu) = runstate->time[RUNSTATE_runnable] + runstate->time[RUNSTATE_offline]; }
static int xen_vcpuop_set_next_event(unsigned long delta, struct clock_event_device *evt) { int cpu = smp_processor_id(); struct vcpu_set_singleshot_timer single; int ret; WARN_ON(!clockevent_state_oneshot(evt)); single.timeout_abs_ns = get_abs_timeout(delta); single.flags = VCPU_SSHOTTMR_future; ret = HYPERVISOR_vcpu_op(VCPUOP_set_singleshot_timer, cpu, &single); BUG_ON(ret != 0 && ret != -ETIME); return ret; }
static int xen_vcpuop_set_next_event(unsigned long delta, struct clock_event_device *evt) { int cpu = smp_processor_id(); struct vcpu_set_singleshot_timer single; int ret; WARN_ON(!clockevent_state_oneshot(evt)); single.timeout_abs_ns = get_abs_timeout(delta); /* Get an event anyway, even if the timeout is already expired */ single.flags = 0; ret = HYPERVISOR_vcpu_op(VCPUOP_set_singleshot_timer, cpu, &single); BUG_ON(ret != 0); return ret; }
void __ref setup_vcpu_info(unsigned int cpu) { struct vcpu_info *v = &per_cpu(vcpu_info, cpu); struct vcpu_register_vcpu_info info; #ifdef CONFIG_X86_64 static bool first = true; if (first) { first = false; info.mfn = early_arbitrary_virt_to_mfn(v); } else #endif info.mfn = arbitrary_virt_to_mfn(v); info.offset = offset_in_page(v); if (HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_info, cpu, &info)) BUG(); }
static void __init xen_percpu_init(void *unused) { struct vcpu_register_vcpu_info info; struct vcpu_info *vcpup; int err; int cpu = get_cpu(); pr_info("Xen: initializing cpu%d\n", cpu); vcpup = per_cpu_ptr(xen_vcpu_info, cpu); info.mfn = __pa(vcpup) >> PAGE_SHIFT; info.offset = offset_in_page(vcpup); err = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_info, cpu, &info); BUG_ON(err); per_cpu(xen_vcpu, cpu) = vcpup; enable_percpu_irq(xen_events_irq, 0); }
static int __cpuinit xen_cpu_up(unsigned int cpu, struct task_struct *idle) { int rc; per_cpu(current_task, cpu) = idle; per_cpu(current_tinfo, cpu) = &idle->tinfo; #ifdef CONFIG_X86_32 irq_ctx_init(cpu); #else clear_tsk_thread_flag(idle, TIF_FORK); per_cpu(kernel_stack, cpu) = (unsigned long)task_stack_page(idle) - 16 + THREAD_SIZE; #endif xen_setup_runstate_info(cpu); xen_setup_timer(cpu); xen_init_lock_cpu(cpu); per_cpu(cpu_state, cpu) = CPU_UP_PREPARE; /* make sure interrupts start blocked */ per_cpu(xen_vcpu, cpu)->evtchn_upcall_mask = 1; rc = cpu_initialize_context(cpu, idle); if (rc) return rc; if (num_online_cpus() == 1) /* Just in case we booted with a single CPU. */ alternatives_enable_smp(); rc = xen_smp_intr_init(cpu); if (rc) return rc; rc = HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL); BUG_ON(rc); while(per_cpu(cpu_state, cpu) != CPU_ONLINE) { HYPERVISOR_sched_op(SCHEDOP_yield, NULL); barrier(); } return 0; }
int __cpuinit xen_cpu_up(unsigned int cpu) { struct task_struct *idle = idle_task(cpu); int rc; #if 0 rc = cpu_up_check(cpu); if (rc) return rc; #endif init_gdt(cpu); per_cpu(current_task, cpu) = idle; irq_ctx_init(cpu); xen_setup_timer(cpu); /* make sure interrupts start blocked */ per_cpu(xen_vcpu, cpu)->evtchn_upcall_mask = 1; rc = cpu_initialize_context(cpu, idle); if (rc) return rc; if (num_online_cpus() == 1) alternatives_smp_switch(1); rc = xen_smp_intr_init(cpu); if (rc) return rc; smp_store_cpu_info(cpu); set_cpu_sibling_map(cpu); /* This must be done before setting cpu_online_map */ wmb(); cpu_set(cpu, cpu_online_map); rc = HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL); BUG_ON(rc); return 0; }
static void xen_vcpu_setup(int cpu) { struct vcpu_register_vcpu_info info; int err; struct vcpu_info *vcpup; BUG_ON(HYPERVISOR_shared_info == &xen_dummy_shared_info); if (cpu < MAX_VIRT_CPUS) per_cpu(xen_vcpu,cpu) = &HYPERVISOR_shared_info->vcpu_info[cpu]; if (!have_vcpu_info_placement) { if (cpu >= MAX_VIRT_CPUS) clamp_max_cpus(); return; } vcpup = &per_cpu(xen_vcpu_info, cpu); info.mfn = arbitrary_virt_to_mfn(vcpup); info.offset = offset_in_page(vcpup); printk(KERN_DEBUG "trying to map vcpu_info %d at %p, mfn %llx, offset %d\n", cpu, vcpup, info.mfn, info.offset); /* Check to see if the hypervisor will put the vcpu_info structure where we want it, which allows direct access via a percpu-variable. */ err = HYPERVISOR_vcpu_op(VCPUOP_register_vcpu_info, cpu, &info); if (err) { printk(KERN_DEBUG "register_vcpu_info failed: err=%d\n", err); have_vcpu_info_placement = 0; clamp_max_cpus(); } else { /* This cpu is using the registered vcpu info, even if later ones fail to. */ per_cpu(xen_vcpu, cpu) = vcpup; printk(KERN_DEBUG "cpu %d using vcpu_info at %p\n", cpu, vcpup); } }