コード例 #1
0
int
ia32_clone_tls (struct task_struct *child, struct pt_regs *childregs)
{
	struct desc_struct *desc;
	struct ia32_user_desc info;
	int idx;

	if (copy_from_user(&info, (void __user *)(childregs->r14 & 0xffffffff), sizeof(info)))
		return -EFAULT;
	if (LDT_empty(&info))
		return -EINVAL;

	idx = info.entry_number;
	if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
		return -EINVAL;

	desc = child->thread.tls_array + idx - GDT_ENTRY_TLS_MIN;
	desc->a = LDT_entry_a(&info);
	desc->b = LDT_entry_b(&info);

	/* XXX: can this be done in a cleaner way ? */
	load_TLS(&child->thread, smp_processor_id());
	ia32_load_segment_descriptors(child);
	load_TLS(&current->thread, smp_processor_id());

	return 0;
}
コード例 #2
0
void
ia32_load_state (struct task_struct *t)
{
	unsigned long eflag, fsr, fcr, fir, fdr, tssd;
	struct pt_regs *regs = task_pt_regs(t);

	eflag = t->thread.eflag;
	fsr = t->thread.fsr;
	fcr = t->thread.fcr;
	fir = t->thread.fir;
	fdr = t->thread.fdr;
	tssd = load_desc(_TSS);					/* TSSD */

	ia64_setreg(_IA64_REG_AR_EFLAG, eflag);
	ia64_setreg(_IA64_REG_AR_FSR, fsr);
	ia64_setreg(_IA64_REG_AR_FCR, fcr);
	ia64_setreg(_IA64_REG_AR_FIR, fir);
	ia64_setreg(_IA64_REG_AR_FDR, fdr);
	current->thread.old_iob = ia64_get_kr(IA64_KR_IO_BASE);
	current->thread.old_k1 = ia64_get_kr(IA64_KR_TSSD);
	ia64_set_kr(IA64_KR_IO_BASE, IA32_IOBASE);
	ia64_set_kr(IA64_KR_TSSD, tssd);

	regs->r17 = (_TSS << 48) | (_LDT << 32) | (__u32) regs->r17;
	regs->r30 = load_desc(_LDT);				/* LDTD */
	load_TLS(&t->thread, smp_processor_id());
}
コード例 #3
0
static void set_tls_desc(struct task_struct *p, int idx,
			 const struct user_desc *info, int n)
{
	struct thread_struct *t = &p->thread;
	struct desc_struct *desc = &t->tls_array[idx - GDT_ENTRY_TLS_MIN];
	int cpu;

	/*
	 * We must not get preempted while modifying the TLS.
	 */
	cpu = get_cpu();

	while (n-- > 0) {
		if (LDT_empty(info))
			desc->a = desc->b = 0;
		else
			fill_ldt(desc, info);
		++info;
		++desc;
	}

	if (t == &current->thread)
		load_TLS(t, cpu);

	put_cpu();
}
コード例 #4
0
__notrace_funcgraph struct task_struct *
__switch_to(struct task_struct *prev_p, struct task_struct *next_p)
{
	struct thread_struct *prev = &prev_p->thread,
				 *next = &next_p->thread;
	int cpu = smp_processor_id();
	struct tss_struct *tss = &per_cpu(init_tss, cpu);
	bool preload_fpu;

	

	
	preload_fpu = tsk_used_math(next_p) && next_p->fpu_counter > 5;

	__unlazy_fpu(prev_p);

	
	if (preload_fpu)
		prefetch(next->xstate);

	
	load_sp0(tss, next);

	
	lazy_save_gs(prev->gs);

	
	load_TLS(next, cpu);

	
	if (get_kernel_rpl() && unlikely(prev->iopl != next->iopl))
		set_iopl_mask(next->iopl);

	
	if (unlikely(task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV ||
		     task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT))
		__switch_to_xtra(prev_p, next_p, tss);

	
	if (preload_fpu)
		clts();

	
	arch_end_context_switch(next_p);

	if (preload_fpu)
		__math_state_restore();

	
	if (prev->gs | next->gs)
		lazy_load_gs(next->gs);

	percpu_write(current_task, next_p);

	return prev_p;
}
コード例 #5
0
ファイル: tls.c プロジェクト: FatSunHYS/OSCourseDesign
int arch_switch_tls_tt(struct task_struct *from, struct task_struct *to)
{
	if (!host_supports_tls)
		return 0;

	if (needs_TLS_update(to))
		return load_TLS(0, to);

	return 0;
}
コード例 #6
0
int arch_switch_tls(struct task_struct *to)
{
	if (!host_supports_tls)
		return 0;

	if (likely(to->mm))
		return load_TLS(O_FORCE, to);

	return 0;
}
コード例 #7
0
ファイル: thread.c プロジェクト: amir9/longene
/*      
 * set teb on fs
 */     
int set_teb_selector(struct task_struct *tsk, long teb)
{
	int	cpu;

	set_tls_array(&tsk->thread, TEB_SELECTOR >> 3, teb, 1);
	cpu = get_cpu();
	load_TLS(&tsk->thread, cpu);
	put_cpu();

	return 0;
} /* end set_teb_selector */
コード例 #8
0
ファイル: tls.c プロジェクト: FatSunHYS/OSCourseDesign
int arch_switch_tls_skas(struct task_struct *from, struct task_struct *to)
{
	if (!host_supports_tls)
		return 0;

	/* We have no need whatsoever to switch TLS for kernel threads; beyond
	 * that, that would also result in us calling os_set_thread_area with
	 * userspace_pid[cpu] == 0, which gives an error. */
	if (likely(to->mm))
		return load_TLS(O_FORCE, to);

	return 0;
}
コード例 #9
0
ファイル: restore.c プロジェクト: virthub/virthub
int ckpt_restore_cpu(ckpt_desc_t desc)
{
    int i;
    ckpt_cpu_t cpu;
    unsigned long fs;
    unsigned long gs;
    struct pt_regs *regs = task_pt_regs(current);

    if (ckpt_read(desc, &cpu, sizeof(ckpt_cpu_t)) != sizeof(ckpt_cpu_t)) {
        log_err("failed to restore cpu");
        return -EIO;
    }

    log_restore_cpu("restoring regs ...");
    fs = cpu.regs.fs;
    gs = cpu.regs.gs;
    cpu.regs.fs = 0;
    cpu.regs.gs = 0;
    cpu.regs.cs = __USER_CS;
    cpu.regs.ds = __USER_DS;
    cpu.regs.es = __USER_DS;
    cpu.regs.ss = __USER_DS;
    cpu.regs.flags = 0x200 | (cpu.regs.flags & 0xff);
    *regs = cpu.regs;

    log_restore_cpu("restoring tls ...");
    for (i = 0; i < GDT_ENTRY_TLS_ENTRIES; i++) {
        if ((cpu.tls_array[i].b & 0x00207000) != 0x00007000) {
            cpu.tls_array[i].a = 0;
            cpu.tls_array[i].b = 0;
        }
        current->thread.tls_array[i] = cpu.tls_array[i];
    }
    load_TLS(&current->thread, get_cpu());
    put_cpu();
    i = fs >> 3;
    if ((i < GDT_ENTRY_TLS_MIN) || (i > GDT_ENTRY_TLS_MAX) || ((fs & 7) != 3))
        fs = 0;
    i = gs >> 3;
    if ((i < GDT_ENTRY_TLS_MIN) || (i > GDT_ENTRY_TLS_MAX) || ((gs & 7) != 3))
        gs = 0;
    regs->fs = fs;
    regs->gs = gs;
    log_regs(regs);

    current_thread_info()->sysenter_return = cpu.sysenter_return;
    log_restore_pos(desc);
    return 0;
}
コード例 #10
0
ファイル: tls32.c プロジェクト: 12019/hg556a_source
/*
 * Set a given TLS descriptor:
 * When you want addresses > 32bit use arch_prctl() 
 */
int do_set_thread_area(struct thread_struct *t, struct user_desc __user *u_info)
{
	struct user_desc info;
	struct n_desc_struct *desc;
	int cpu, idx;

	if (copy_from_user(&info, u_info, sizeof(info)))
		return -EFAULT;

	idx = info.entry_number;

	/*
	 * index -1 means the kernel should try to find and
	 * allocate an empty descriptor:
	 */
	if (idx == -1) {
		idx = get_free_idx();
		if (idx < 0)
			return idx;
		if (put_user(idx, &u_info->entry_number))
			return -EFAULT;
	}

	if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
		return -EINVAL;

	desc = ((struct n_desc_struct *)t->tls_array) + idx - GDT_ENTRY_TLS_MIN;

	/*
	 * We must not get preempted while modifying the TLS.
	 */
	cpu = get_cpu();

	if (LDT_empty(&info)) {
		desc->a = 0;
		desc->b = 0;
	} else {
		desc->a = LDT_entry_a(&info);
		desc->b = LDT_entry_b(&info);
	}
	if (t == &current->thread)
		load_TLS(t, cpu);

	put_cpu();
	return 0;
}
コード例 #11
0
__notrace_funcgraph struct task_struct *
__switch_to(struct task_struct *prev_p, struct task_struct *next_p)
{
	struct thread_struct *prev = &prev_p->thread,
				 *next = &next_p->thread;
	int cpu = smp_processor_id();
	struct tss_struct *tss = &per_cpu(init_tss, cpu);
	fpu_switch_t fpu;

	

	fpu = switch_fpu_prepare(prev_p, next_p, cpu);

	load_sp0(tss, next);

	lazy_save_gs(prev->gs);

	load_TLS(next, cpu);

	if (get_kernel_rpl() && unlikely(prev->iopl != next->iopl))
		set_iopl_mask(next->iopl);

	if (unlikely(task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV ||
		     task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT))
		__switch_to_xtra(prev_p, next_p, tss);

	arch_end_context_switch(next_p);

	if (prev->gs | next->gs)
		lazy_load_gs(next->gs);

	switch_fpu_finish(next_p, fpu);

	percpu_write(current_task, next_p);

	return prev_p;
}
コード例 #12
0
ファイル: process_32.c プロジェクト: xianjimli/datasafe
/*
 *	switch_to(x,yn) should switch tasks from x to y.
 *
 * We fsave/fwait so that an exception goes off at the right time
 * (as a call from the fsave or fwait in effect) rather than to
 * the wrong process. Lazy FP saving no longer makes any sense
 * with modern CPU's, and this simplifies a lot of things (SMP
 * and UP become the same).
 *
 * NOTE! We used to use the x86 hardware context switching. The
 * reason for not using it any more becomes apparent when you
 * try to recover gracefully from saved state that is no longer
 * valid (stale segment register values in particular). With the
 * hardware task-switch, there is no way to fix up bad state in
 * a reasonable manner.
 *
 * The fact that Intel documents the hardware task-switching to
 * be slow is a fairly red herring - this code is not noticeably
 * faster. However, there _is_ some room for improvement here,
 * so the performance issues may eventually be a valid point.
 * More important, however, is the fact that this allows us much
 * more flexibility.
 *
 * The return value (in %ax) will be the "prev" task after
 * the task-switch, and shows up in ret_from_fork in entry.S,
 * for example.
 */
__notrace_funcgraph struct task_struct *
__switch_to(struct task_struct *prev_p, struct task_struct *next_p)
{
	struct thread_struct *prev = &prev_p->thread,
				 *next = &next_p->thread;
	int cpu = smp_processor_id();
	struct tss_struct *tss = &per_cpu(init_tss, cpu);

	/* never put a printk in __switch_to... printk() calls wake_up*() indirectly */

	__unlazy_fpu(prev_p);
	if (next_p->mm)
		load_user_cs_desc(cpu, next_p->mm);

	/* we're going to use this soon, after a few expensive things */
	if (next_p->fpu_counter > 5)
		prefetch(next->xstate);

	/*
	 * Reload esp0.
	 */
	load_sp0(tss, next);

	/*
	 * Save away %gs. No need to save %fs, as it was saved on the
	 * stack on entry.  No need to save %es and %ds, as those are
	 * always kernel segments while inside the kernel.  Doing this
	 * before setting the new TLS descriptors avoids the situation
	 * where we temporarily have non-reloadable segments in %fs
	 * and %gs.  This could be an issue if the NMI handler ever
	 * used %fs or %gs (it does not today), or if the kernel is
	 * running inside of a hypervisor layer.
	 */
	lazy_save_gs(prev->gs);

	/*
	 * Load the per-thread Thread-Local Storage descriptor.
	 */
	load_TLS(next, cpu);

	/*
	 * Restore IOPL if needed.  In normal use, the flags restore
	 * in the switch assembly will handle this.  But if the kernel
	 * is running virtualized at a non-zero CPL, the popf will
	 * not restore flags, so it must be done in a separate step.
	 */
	if (get_kernel_rpl() && unlikely(prev->iopl != next->iopl))
		set_iopl_mask(next->iopl);

	/*
	 * Now maybe handle debug registers and/or IO bitmaps
	 */
	if (unlikely(task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV ||
		     task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT))
		__switch_to_xtra(prev_p, next_p, tss);

	/*
	 * Leave lazy mode, flushing any hypercalls made here.
	 * This must be done before restoring TLS segments so
	 * the GDT and LDT are properly updated, and must be
	 * done before math_state_restore, so the TS bit is up
	 * to date.
	 */
	arch_end_context_switch(next_p);

	/* If the task has used fpu the last 5 timeslices, just do a full
	 * restore of the math state immediately to avoid the trap; the
	 * chances of needing FPU soon are obviously high now
	 *
	 * tsk_used_math() checks prevent calling math_state_restore(),
	 * which can sleep in the case of !tsk_used_math()
	 */
	if (tsk_used_math(next_p) && next_p->fpu_counter > 5)
		math_state_restore();

	/*
	 * Restore %gs if needed (which is common)
	 */
	if (prev->gs | next->gs)
		lazy_load_gs(next->gs);

	percpu_write(current_task, next_p);

	return prev_p;
}
コード例 #13
0
int vmadump_restore_cpu(cr_rstrt_proc_req_t *ctx, struct file *file,
			struct pt_regs *regs) {
    struct vmadump_restore_tmps *x86tmps;
    struct thread_struct *threadtmp;
    struct pt_regs *regtmp;
    int r;
    int idx, i, cpu;
    uint16_t fsindex, gsindex;
#if HAVE_STRUCT_N_DESC_STRUCT
    struct n_desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES];
#else
    struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES];
#endif

    /* XXX: Note allocation assumes i387tmp and threadtmp are never active at the same time */
    x86tmps = kmalloc(sizeof(*x86tmps), GFP_KERNEL);
    if (!x86tmps) return -ENOMEM;
    regtmp = VMAD_REGTMP(x86tmps);
    threadtmp = VMAD_THREADTMP(x86tmps);

    r = read_kern(ctx, file, regtmp, sizeof(*regtmp));
    if (r != sizeof(*regtmp)) goto bad_read;

    /* Don't let the user pick funky segments */
    if ((regtmp->cs != __USER_CS && regtmp->cs != __USER32_CS) &&
	(regtmp->ss != __USER_DS && regtmp->ss != __USER32_DS)) {
	r = -EINVAL;
	goto bad_read;
    }

    /* Set our process type */
    if (regtmp->cs == __USER32_CS)
	set_thread_flag(TIF_IA32);
    else
	clear_thread_flag(TIF_IA32);	

    /* Only restore bottom 9 bits of eflags.  Restoring anything else
     * is bad bad mojo for security. (0x200 = interrupt enable) */
#if HAVE_PT_REGS_EFLAGS
    regtmp->eflags = 0x200 | (regtmp->eflags & 0x000000FF);
#elif HAVE_PT_REGS_FLAGS
    regtmp->flags = 0x200 | (regtmp->flags & 0x000000FF);
#else
    #error
#endif
    memcpy(regs, regtmp, sizeof(*regtmp));

    /* Restore FPU info (and later general "extended state") */
    r = vmadump_restore_i387(ctx, file, VMAD_I387TMP(x86tmps));
    if (r < 0) goto bad_read;
	
    /* XXX FIX ME: RESTORE DEBUG INFORMATION ?? */
    /* Here we read it but ignore it. */
    r = vmadump_restore_debugreg(ctx, file);
    if (r < 0) goto bad_read;

    /* user(r)sp, since we don't use the ptrace entry path in BLCR */
#if HAVE_THREAD_USERSP
    r = read_kern(ctx, file, &threadtmp->usersp, sizeof(threadtmp->usersp));
    if (r != sizeof(threadtmp->usersp)) goto bad_read;
    current->thread.usersp = threadtmp->usersp;
    vmad_write_oldrsp(threadtmp->usersp);
#elif HAVE_THREAD_USERRSP
    r = read_kern(ctx, file, &threadtmp->userrsp, sizeof(threadtmp->userrsp));
    if (r != sizeof(threadtmp->userrsp)) goto bad_read;
    current->thread.userrsp = threadtmp->userrsp;
    vmad_write_oldrsp(threadtmp->userrsp);
#else
    #error
#endif

    /*-- restore segmentation related stuff */

    /* Restore FS_BASE MSR */
    r = read_kern(ctx, file, &threadtmp->fs, sizeof(threadtmp->fs));
    if (r != sizeof(threadtmp->fs)) goto bad_read;
    if (threadtmp->fs >= TASK_SIZE) {
	r = -EINVAL;
	goto bad_read;
    }
    current->thread.fs = threadtmp->fs;
    if ((r = checking_wrmsrl(MSR_FS_BASE, threadtmp->fs)))
	goto bad_read;
	
    /* Restore GS_KERNEL_BASE MSR */
    r = read_kern(ctx, file, &threadtmp->gs, sizeof(threadtmp->gs));
    if (r != sizeof(threadtmp->gs)) goto bad_read;
    if (threadtmp->gs >= TASK_SIZE) {
	r = -EINVAL;
	goto bad_read;
    }
    current->thread.gs = threadtmp->gs;
    if ((r = checking_wrmsrl(MSR_KERNEL_GS_BASE, threadtmp->gs)))
	goto bad_read;

    /* Restore 32 bit segment stuff */
    r = read_kern(ctx, file, &fsindex, sizeof(fsindex));
    if (r != sizeof(fsindex)) goto bad_read;

    r = read_kern(ctx, file, &gsindex, sizeof(gsindex));
    if (r != sizeof(gsindex)) goto bad_read;

    r = read_kern(ctx, file, tls_array, sizeof(tls_array));
    if (r != sizeof(tls_array)) goto bad_read;

    /* Sanitize fs, gs.  These segment descriptors should load one
     * of the TLS entries and have DPL = 3.  If somebody is doing
     * some other LDT monkey business, I'm currently not
     * supporting that here.  Also, I'm presuming that the offsets
     * to the GDT_ENTRY_TLS_MIN is the same in both kernels. */
    idx = fsindex >> 3;
    if (idx<GDT_ENTRY_TLS_MIN || idx>GDT_ENTRY_TLS_MAX || (fsindex&7) != 3)
	fsindex = 0;
    idx = gsindex >> 3;
    if (idx<GDT_ENTRY_TLS_MIN || idx>GDT_ENTRY_TLS_MAX || (gsindex&7) != 3)
	gsindex = 0;

    /* Sanitize the TLS entries...
     * Make sure the following bits are set/not set:
     *  bit 12   : S    =  1    (code/data - not system)
     *  bit 13-14: DPL  = 11    (priv level = 3 (user))
     *  bit 21   :      =  0    (reserved)
     *
     * If the entry isn't valid, zero the whole descriptor.
     */
    for (i=0; i < GDT_ENTRY_TLS_ENTRIES; i++) {
	if (tls_array[i].b != 0 && 
	    (tls_array[i].b & 0x00207000) != 0x00007000) {
	    r = -EINVAL;
	    goto bad_read;
	}
    }

    /* Ok load this crap */
    cpu = get_cpu();	/* load_TLS can't get pre-empted. */
    memcpy(current->thread.tls_array, tls_array,
	   sizeof(current->thread.tls_array));
    current->thread.fsindex = fsindex;
    current->thread.gsindex = gsindex;
    load_TLS(&current->thread, cpu);

    loadsegment(fs, current->thread.fsindex);
    load_gs_index(current->thread.gsindex);
    put_cpu();

    /* In case cr_restart and child don't have same ABI */
    if (regtmp->cs == __USER32_CS) {
	loadsegment(ds, __USER32_DS);
	loadsegment(es, __USER32_DS);
    } else {
	loadsegment(ds, __USER_DS);
	loadsegment(es, __USER_DS);
    }

#if HAVE_THREAD_INFO_SYSENTER_RETURN
    {
	void *sysenter_return;
	r = read_kern(ctx, file, &sysenter_return, sizeof(sysenter_return));
	if (r != sizeof(sysenter_return)) goto bad_read;
	current_thread_info()->sysenter_return = sysenter_return;
    }
#endif
    
    kfree(x86tmps);
    return 0;

 bad_read:
    kfree(x86tmps);
    if (r >= 0) r = -EIO;
    return r;
}
コード例 #14
0
/*
 *	switch_to(x,y) should switch tasks from x to y.
 *
 * We fsave/fwait so that an exception goes off at the right time
 * (as a call from the fsave or fwait in effect) rather than to
 * the wrong process. Lazy FP saving no longer makes any sense
 * with modern CPU's, and this simplifies a lot of things (SMP
 * and UP become the same).
 *
 * NOTE! We used to use the x86 hardware context switching. The
 * reason for not using it any more becomes apparent when you
 * try to recover gracefully from saved state that is no longer
 * valid (stale segment register values in particular). With the
 * hardware task-switch, there is no way to fix up bad state in
 * a reasonable manner.
 *
 * The fact that Intel documents the hardware task-switching to
 * be slow is a fairly red herring - this code is not noticeably
 * faster. However, there _is_ some room for improvement here,
 * so the performance issues may eventually be a valid point.
 * More important, however, is the fact that this allows us much
 * more flexibility.
 *
 * The return value (in %ax) will be the "prev" task after
 * the task-switch, and shows up in ret_from_fork in entry.S,
 * for example.
 */
__visible __notrace_funcgraph struct task_struct *
__switch_to(struct task_struct *prev_p, struct task_struct *next_p)
{
	struct thread_struct *prev = &prev_p->thread,
			     *next = &next_p->thread;
	struct fpu *prev_fpu = &prev->fpu;
	struct fpu *next_fpu = &next->fpu;
	int cpu = smp_processor_id();
	struct tss_struct *tss = &per_cpu(cpu_tss, cpu);
	fpu_switch_t fpu_switch;

	/* never put a printk in __switch_to... printk() calls wake_up*() indirectly */

	fpu_switch = switch_fpu_prepare(prev_fpu, next_fpu, cpu);

	/*
	 * Save away %gs. No need to save %fs, as it was saved on the
	 * stack on entry.  No need to save %es and %ds, as those are
	 * always kernel segments while inside the kernel.  Doing this
	 * before setting the new TLS descriptors avoids the situation
	 * where we temporarily have non-reloadable segments in %fs
	 * and %gs.  This could be an issue if the NMI handler ever
	 * used %fs or %gs (it does not today), or if the kernel is
	 * running inside of a hypervisor layer.
	 */
	lazy_save_gs(prev->gs);

	/*
	 * Load the per-thread Thread-Local Storage descriptor.
	 */
	load_TLS(next, cpu);

	/*
	 * Restore IOPL if needed.  In normal use, the flags restore
	 * in the switch assembly will handle this.  But if the kernel
	 * is running virtualized at a non-zero CPL, the popf will
	 * not restore flags, so it must be done in a separate step.
	 */
	if (get_kernel_rpl() && unlikely(prev->iopl != next->iopl))
		set_iopl_mask(next->iopl);

	/*
	 * If it were not for PREEMPT_ACTIVE we could guarantee that the
	 * preempt_count of all tasks was equal here and this would not be
	 * needed.
	 */
	task_thread_info(prev_p)->saved_preempt_count = this_cpu_read(__preempt_count);
	this_cpu_write(__preempt_count, task_thread_info(next_p)->saved_preempt_count);

	/*
	 * Now maybe handle debug registers and/or IO bitmaps
	 */
	if (unlikely(task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV ||
		     task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT))
		__switch_to_xtra(prev_p, next_p, tss);

	/*
	 * Leave lazy mode, flushing any hypercalls made here.
	 * This must be done before restoring TLS segments so
	 * the GDT and LDT are properly updated, and must be
	 * done before fpu__restore(), so the TS bit is up
	 * to date.
	 */
	arch_end_context_switch(next_p);

	/*
	 * Reload esp0 and cpu_current_top_of_stack.  This changes
	 * current_thread_info().
	 */
	load_sp0(tss, next);
	this_cpu_write(cpu_current_top_of_stack,
		       (unsigned long)task_stack_page(next_p) +
		       THREAD_SIZE);

	/*
	 * Restore %gs if needed (which is common)
	 */
	if (prev->gs | next->gs)
		lazy_load_gs(next->gs);

	switch_fpu_finish(next_fpu, fpu_switch);

	this_cpu_write(current_task, next_p);

	return prev_p;
}
コード例 #15
0
ファイル: process_32.c プロジェクト: garyvan/openwrt-1.6
/*
 *	switch_to(x,y) should switch tasks from x to y.
 *
 * We fsave/fwait so that an exception goes off at the right time
 * (as a call from the fsave or fwait in effect) rather than to
 * the wrong process. Lazy FP saving no longer makes any sense
 * with modern CPU's, and this simplifies a lot of things (SMP
 * and UP become the same).
 *
 * NOTE! We used to use the x86 hardware context switching. The
 * reason for not using it any more becomes apparent when you
 * try to recover gracefully from saved state that is no longer
 * valid (stale segment register values in particular). With the
 * hardware task-switch, there is no way to fix up bad state in
 * a reasonable manner.
 *
 * The fact that Intel documents the hardware task-switching to
 * be slow is a fairly red herring - this code is not noticeably
 * faster. However, there _is_ some room for improvement here,
 * so the performance issues may eventually be a valid point.
 * More important, however, is the fact that this allows us much
 * more flexibility.
 *
 * The return value (in %ax) will be the "prev" task after
 * the task-switch, and shows up in ret_from_fork in entry.S,
 * for example.
 */
__notrace_funcgraph struct task_struct *
__switch_to(struct task_struct *prev_p, struct task_struct *next_p)
{
	struct thread_struct *prev = &prev_p->thread,
				 *next = &next_p->thread;
	int cpu = smp_processor_id();
	struct tss_struct *tss = init_tss + cpu;
	fpu_switch_t fpu;

	/* never put a printk in __switch_to... printk() calls wake_up*() indirectly */

	fpu = switch_fpu_prepare(prev_p, next_p, cpu);

	/*
	 * Reload esp0.
	 */
	load_sp0(tss, next);

	/*
	 * Save away %gs. No need to save %fs, as it was saved on the
	 * stack on entry.  No need to save %es and %ds, as those are
	 * always kernel segments while inside the kernel.  Doing this
	 * before setting the new TLS descriptors avoids the situation
	 * where we temporarily have non-reloadable segments in %fs
	 * and %gs.  This could be an issue if the NMI handler ever
	 * used %fs or %gs (it does not today), or if the kernel is
	 * running inside of a hypervisor layer.
	 */
	lazy_save_gs(prev->gs);

#ifdef CONFIG_PAX_MEMORY_UDEREF
	__set_fs(task_thread_info(next_p)->addr_limit);
#endif

	/*
	 * Load the per-thread Thread-Local Storage descriptor.
	 */
	load_TLS(next, cpu);

	/*
	 * Restore IOPL if needed.  In normal use, the flags restore
	 * in the switch assembly will handle this.  But if the kernel
	 * is running virtualized at a non-zero CPL, the popf will
	 * not restore flags, so it must be done in a separate step.
	 */
	if (get_kernel_rpl() && unlikely(prev->iopl != next->iopl))
		set_iopl_mask(next->iopl);

	/*
	 * Now maybe handle debug registers and/or IO bitmaps
	 */
	if (unlikely(task_thread_info(prev_p)->flags & _TIF_WORK_CTXSW_PREV ||
		     task_thread_info(next_p)->flags & _TIF_WORK_CTXSW_NEXT))
		__switch_to_xtra(prev_p, next_p, tss);

	switch_kmaps(prev_p, next_p);

	/*
	 * Leave lazy mode, flushing any hypercalls made here.
	 * This must be done before restoring TLS segments so
	 * the GDT and LDT are properly updated, and must be
	 * done before math_state_restore, so the TS bit is up
	 * to date.
	 */
	arch_end_context_switch(next_p);

	this_cpu_write(current_task, next_p);
	this_cpu_write(current_tinfo, &next_p->tinfo);

	/*
	 * Restore %gs if needed (which is common)
	 */
	if (prev->gs | next->gs)
		lazy_load_gs(next->gs);

	switch_fpu_finish(next_p, fpu);

	return prev_p;
}