static __cpuinit void cpu_bringup(void) { int cpu = smp_processor_id(); cpu_init(); touch_softlockup_watchdog(); preempt_disable(); xen_enable_sysenter(); xen_enable_syscall(); cpu = smp_processor_id(); smp_store_cpu_info(cpu); cpu_data(cpu).x86_max_cores = 1; set_cpu_sibling_map(cpu); xen_setup_cpu_clockevents(); cpu_set(cpu, cpu_online_map); x86_write_percpu(cpu_state, CPU_ONLINE); wmb(); /* We can take interrupts now: we're officially "up". */ local_irq_enable(); wmb(); /* make sure everything is out */ }
static inline void enter_lazy(enum paravirt_lazy_mode mode) { BUG_ON(x86_read_percpu(paravirt_lazy_mode) != PARAVIRT_LAZY_NONE); BUG_ON(preemptible()); x86_write_percpu(paravirt_lazy_mode, mode); }
void paravirt_leave_lazy(enum paravirt_lazy_mode mode) { BUG_ON(x86_read_percpu(paravirt_lazy_mode) != mode); BUG_ON(preemptible()); x86_write_percpu(paravirt_lazy_mode, PARAVIRT_LAZY_NONE); }
/* * switch_to(x,yn) should switch tasks from x to y. * * We fsave/fwait so that an exception goes off at the right time * (as a call from the fsave or fwait in effect) rather than to * the wrong process. Lazy FP saving no longer makes any sense * with modern CPU's, and this simplifies a lot of things (SMP * and UP become the same). * * NOTE! We used to use the x86 hardware context switching. The * reason for not using it any more becomes apparent when you * try to recover gracefully from saved state that is no longer * valid (stale segment register values in particular). With the * hardware task-switch, there is no way to fix up bad state in * a reasonable manner. * * The fact that Intel documents the hardware task-switching to * be slow is a fairly red herring - this code is not noticeably * faster. However, there _is_ some room for improvement here, * so the performance issues may eventually be a valid point. * More important, however, is the fact that this allows us much * more flexibility. * * The return value (in %eax) will be the "prev" task after * the task-switch, and shows up in ret_from_fork in entry.S, * for example. */ struct task_struct fastcall * __switch_to(struct task_struct *prev_p, struct task_struct *next_p) { struct thread_struct *prev = &prev_p->thread, *next = &next_p->thread; int cpu = smp_processor_id(); struct tss_struct *tss = &per_cpu(init_tss, cpu); /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */ __unlazy_fpu(prev_p); /* we're going to use this soon, after a few expensive things */ if (next_p->fpu_counter > 5) prefetch(&next->i387.fxsave); /* * Reload esp0. */ load_esp0(tss, next); /* * Save away %gs. No need to save %fs, as it was saved on the * stack on entry. No need to save %es and %ds, as those are * always kernel segments while inside the kernel. Doing this * before setting the new TLS descriptors avoids the situation * where we temporarily have non-reloadable segments in %fs * and %gs. This could be an issue if the NMI handler ever * used %fs or %gs (it does not today), or if the kernel is * running inside of a hypervisor layer. */ savesegment(gs, prev->gs); /* * Load the per-thread Thread-Local Storage descriptor. */ load_TLS(next, cpu); /* * Restore IOPL if needed. In normal use, the flags restore * in the switch assembly will handle this. But if the kernel * is running virtualized at a non-zero CPL, the popf will * not restore flags, so it must be done in a separate step. */ if (get_kernel_rpl() && unlikely(prev->iopl != next->iopl)) set_iopl_mask(next->iopl); /* * Now maybe handle debug registers and/or IO bitmaps */ if (unlikely((task_thread_info(next_p)->flags & _TIF_WORK_CTXSW) || test_tsk_thread_flag(prev_p, TIF_IO_BITMAP))) __switch_to_xtra(next_p, tss); disable_tsc(prev_p, next_p); /* * Leave lazy mode, flushing any hypercalls made here. * This must be done before restoring TLS segments so * the GDT and LDT are properly updated, and must be * done before math_state_restore, so the TS bit is up * to date. */ arch_leave_lazy_cpu_mode(); /* If the task has used fpu the last 5 timeslices, just do a full * restore of the math state immediately to avoid the trap; the * chances of needing FPU soon are obviously high now */ if (next_p->fpu_counter > 5) math_state_restore(); /* * Restore %gs if needed (which is common) */ if (prev->gs | next->gs) loadsegment(gs, next->gs); x86_write_percpu(current_task, next_p); return prev_p; }