// proc_run - make process "proc" running on cpu // NOTE: before call switch_to, should load base addr of "proc"'s new PDT void proc_run(struct proc_struct *proc) { if (proc != current) { bool intr_flag; struct proc_struct *prev = current, *next = proc; local_intr_save(intr_flag); { current = proc; load_esp0(next->kstack + KSTACKSIZE); lcr3(next->cr3); switch_to(&(prev->context), &(next->context)); } local_intr_restore(intr_flag); } }
// proc_run - make process "proc" running on cpu // NOTE: before call switch_to, should load base addr of "proc"'s new PDT void proc_run(struct proc_struct *proc) { if (proc != current) { unsigned long intr_flag; struct proc_struct *prev = current, *next = proc; local_irq_save(intr_flag); { current = proc; load_esp0(next->kstack + KSTACKSIZE); load_page_dir(next->pgdir_addr); switch_to(&(prev->context), &(next->context)); } local_irq_restore(intr_flag); } }
// proc_run - make process "proc" running on cpu // NOTE: before call switch_to, should load base addr of "proc"'s new PDT void proc_run(struct proc_struct *proc) { cprintf("proc run : pid = %d, name = %s\n", proc->pid, proc->name); if (proc != current) { bool intr_flag; struct proc_struct *prev = current, *next = proc; local_intr_save(intr_flag); { current = proc; load_esp0(next->kstack + KSTACKSIZE); lcr3(next->cr3); switch_to(&(prev->context), &(next->context)); } local_intr_restore(intr_flag); } }
/* * switch_to(x,yn) should switch tasks from x to y. * * We fsave/fwait so that an exception goes off at the right time * (as a call from the fsave or fwait in effect) rather than to * the wrong process. Lazy FP saving no longer makes any sense * with modern CPU's, and this simplifies a lot of things (SMP * and UP become the same). * * NOTE! We used to use the x86 hardware context switching. The * reason for not using it any more becomes apparent when you * try to recover gracefully from saved state that is no longer * valid (stale segment register values in particular). With the * hardware task-switch, there is no way to fix up bad state in * a reasonable manner. * * The fact that Intel documents the hardware task-switching to * be slow is a fairly red herring - this code is not noticeably * faster. However, there _is_ some room for improvement here, * so the performance issues may eventually be a valid point. * More important, however, is the fact that this allows us much * more flexibility. * * The return value (in %eax) will be the "prev" task after * the task-switch, and shows up in ret_from_fork in entry.S, * for example. */ struct task_struct fastcall * __switch_to(struct task_struct *prev_p, struct task_struct *next_p) { struct thread_struct *prev = &prev_p->thread, *next = &next_p->thread; int cpu = smp_processor_id(); struct tss_struct *tss = &per_cpu(init_tss, cpu); /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */ __unlazy_fpu(prev_p); /* we're going to use this soon, after a few expensive things */ if (next_p->fpu_counter > 5) prefetch(&next->i387.fxsave); /* * Reload esp0. */ load_esp0(tss, next); /* * Save away %gs. No need to save %fs, as it was saved on the * stack on entry. No need to save %es and %ds, as those are * always kernel segments while inside the kernel. Doing this * before setting the new TLS descriptors avoids the situation * where we temporarily have non-reloadable segments in %fs * and %gs. This could be an issue if the NMI handler ever * used %fs or %gs (it does not today), or if the kernel is * running inside of a hypervisor layer. */ savesegment(gs, prev->gs); /* * Load the per-thread Thread-Local Storage descriptor. */ load_TLS(next, cpu); /* * Restore IOPL if needed. In normal use, the flags restore * in the switch assembly will handle this. But if the kernel * is running virtualized at a non-zero CPL, the popf will * not restore flags, so it must be done in a separate step. */ if (get_kernel_rpl() && unlikely(prev->iopl != next->iopl)) set_iopl_mask(next->iopl); /* * Now maybe handle debug registers and/or IO bitmaps */ if (unlikely((task_thread_info(next_p)->flags & _TIF_WORK_CTXSW) || test_tsk_thread_flag(prev_p, TIF_IO_BITMAP))) __switch_to_xtra(next_p, tss); disable_tsc(prev_p, next_p); /* * Leave lazy mode, flushing any hypercalls made here. * This must be done before restoring TLS segments so * the GDT and LDT are properly updated, and must be * done before math_state_restore, so the TS bit is up * to date. */ arch_leave_lazy_cpu_mode(); /* If the task has used fpu the last 5 timeslices, just do a full * restore of the math state immediately to avoid the trap; the * chances of needing FPU soon are obviously high now */ if (next_p->fpu_counter > 5) math_state_restore(); /* * Restore %gs if needed (which is common) */ if (prev->gs | next->gs) loadsegment(gs, next->gs); x86_write_percpu(current_task, next_p); return prev_p; }