Ejemplo n.º 1
0
static void __init xen_pv_smp_prepare_boot_cpu(void)
{
	BUG_ON(smp_processor_id() != 0);
	native_smp_prepare_boot_cpu();

	if (!xen_feature(XENFEAT_writable_page_tables))
		/* We've switched to the "real" per-cpu gdt, so make
		 * sure the old memory can be recycled. */
		make_lowmem_page_readwrite(xen_initial_gdt);

#ifdef CONFIG_X86_32
	/*
	 * Xen starts us with XEN_FLAT_RING1_DS, but linux code
	 * expects __USER_DS
	 */
	loadsegment(ds, __USER_DS);
	loadsegment(es, __USER_DS);
#endif

	xen_filter_cpu_maps();
	xen_setup_vcpu_info_placement();

	/*
	 * The alternative logic (which patches the unlock/lock) runs before
	 * the smp bootup up code is activated. Hence we need to set this up
	 * the core kernel is being patched. Otherwise we will have only
	 * modules patched but not core code.
	 */
	xen_init_spinlocks();
}
Ejemplo n.º 2
0
void load_percpu_segment(int cpu)
{
#ifdef CONFIG_X86_32
	loadsegment(fs, __KERNEL_PERCPU);
#else
	loadsegment(gs, 0);
	wrmsrl(MSR_GS_BASE, (unsigned long)per_cpu(irq_stack_union.gs_base, cpu));
#endif
}
Ejemplo n.º 3
0
/* OK, I lied.  There are three "thread local storage" GDT entries which change
 * on every context switch (these three entries are how glibc implements
 * __thread variables).  So we have a hypercall specifically for this case. */
static void lguest_load_tls(struct thread_struct *t, unsigned int cpu)
{
	/* There's one problem which normal hardware doesn't have: the Host
	 * can't handle us removing entries we're currently using.  So we clear
	 * the GS register here: if it's needed it'll be reloaded anyway. */
	loadsegment(gs, 0);
	lazy_hcall(LHCALL_LOAD_TLS, __pa(&t->tls_array), cpu, 0);
}
Ejemplo n.º 4
0
static void __restore_processor_state(struct saved_context *ctxt)
{
	/*
	 * control registers
	 */
	/* cr4 was introduced in the Pentium CPU */
	if (ctxt->cr4)
		write_cr4(ctxt->cr4);
	write_cr3(ctxt->cr3);
	write_cr2(ctxt->cr2);
	write_cr0(ctxt->cr0);

	/*
	 * now restore the descriptor tables to their proper values
	 * ltr is done i fix_processor_context().
	 */
	load_gdt(&ctxt->gdt);
	load_idt(&ctxt->idt);

	/*
	 * segment registers
	 */
	loadsegment(es, ctxt->es);
	loadsegment(fs, ctxt->fs);
	loadsegment(gs, ctxt->gs);
	loadsegment(ss, ctxt->ss);

	/*
	 * sysenter MSRs
	 */
	if (boot_cpu_has(X86_FEATURE_SEP))
		enable_sep_cpu();

	/*
	 * restore XCR0 for xsave capable cpu's.
	 */
	if (cpu_has_xsave)
		xsetbv(XCR_XFEATURE_ENABLED_MASK, pcntxt_mask);

	fix_processor_context();
	do_fpu_end();
	mtrr_ap_init();
	mcheck_init(&boot_cpu_data);
}
Ejemplo n.º 5
0
void __restore_processor_state(struct saved_context *ctxt)
{
	/*
	 * control registers
	 */
	write_cr4(ctxt->cr4);
	write_cr3(ctxt->cr3);
	write_cr2(ctxt->cr2);
	write_cr2(ctxt->cr0);

	/*
	 * now restore the descriptor tables to their proper values
	 * ltr is done i fix_processor_context().
	 */
 	load_gdt(&ctxt->gdt_limit);
 	load_idt(&ctxt->idt_limit);

	/*
	 * segment registers
	 */
 	loadsegment(es, ctxt->es);
 	loadsegment(fs, ctxt->fs);
 	loadsegment(gs, ctxt->gs);
 	loadsegment(ss, ctxt->ss);

#ifdef CONFIG_SYSENTER
	/*
	 * sysenter MSRs
	 */
	if (boot_cpu_has(X86_FEATURE_SEP))
		enable_sep_cpu();
#endif

	fix_processor_context();
	do_fpu_end();
	mtrr_ap_init();
}
Ejemplo n.º 6
0
void flush_thread(void)
{
	struct task_struct *tsk = current;

#if defined(CONFIG_X86_32) && !defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_PAX_MEMORY_UDEREF)
	loadsegment(gs, 0);
#endif
	flush_ptrace_hw_breakpoint(tsk);
	memset(tsk->thread.tls_array, 0, sizeof(tsk->thread.tls_array));
	/*
	 * Forget coprocessor state..
	 */
	tsk->fpu_counter = 0;
	clear_fpu(tsk);
	clear_used_math();
}
static void xen_load_tls(struct thread_struct *t, unsigned int cpu)
{
	if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_CPU) {
#ifdef CONFIG_X86_32
		lazy_load_gs(0);
#else
		loadsegment(fs, 0);
#endif
	}

	xen_mc_batch();

	load_TLS_descriptor(t, cpu, 0);
	load_TLS_descriptor(t, cpu, 1);
	load_TLS_descriptor(t, cpu, 2);

	xen_mc_issue(PARAVIRT_LAZY_CPU);
}
Ejemplo n.º 8
0
static void xen_load_tls(struct thread_struct *t, unsigned int cpu)
{
	/*
	 * XXX sleazy hack: If we're being called in a lazy-cpu zone
	 * and lazy gs handling is enabled, it means we're in a
	 * context switch, and %gs has just been saved.  This means we
	 * can zero it out to prevent faults on exit from the
	 * hypervisor if the next process has no %gs.  Either way, it
	 * has been saved, and the new value will get loaded properly.
	 * This will go away as soon as Xen has been modified to not
	 * save/restore %gs for normal hypercalls.
	 *
	 * On x86_64, this hack is not used for %gs, because gs points
	 * to KERNEL_GS_BASE (and uses it for PDA references), so we
	 * must not zero %gs on x86_64
	 *
	 * For x86_64, we need to zero %fs, otherwise we may get an
	 * exception between the new %fs descriptor being loaded and
	 * %fs being effectively cleared at __switch_to().
	 */
	if (paravirt_get_lazy_mode() == PARAVIRT_LAZY_CPU) {
#ifdef CONFIG_X86_32
		lazy_load_gs(0);
#else
		loadsegment(fs, 0);
#endif
	}

	xen_mc_batch();

	load_TLS_descriptor(t, cpu, 0);
	load_TLS_descriptor(t, cpu, 1);
	load_TLS_descriptor(t, cpu, 2);

	xen_mc_issue(PARAVIRT_LAZY_CPU);
}
Ejemplo n.º 9
0
int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
			compat_sigset_t *set, struct pt_regs *regs)
{
	struct rt_sigframe_ia32 __user *frame;
	void __user *restorer;
	int err = 0;
	void __user *fpstate = NULL;

	/* __copy_to_user optimizes that into a single 8 byte store */
	static const struct {
		u8 movl;
		u32 val;
		u16 int80;
		u8  pad;
	} __attribute__((packed)) code = {
		0xb8,
		__NR_ia32_rt_sigreturn,
		0x80cd,
		0
	};

	frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);

	if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
		return -EFAULT;

	put_user_try {
		put_user_ex(sig, &frame->sig);
		put_user_ex(ptr_to_compat(&frame->info), &frame->pinfo);
		put_user_ex(ptr_to_compat(&frame->uc), &frame->puc);
		err |= copy_siginfo_to_user32(&frame->info, info);

		/* Create the ucontext.  */
		if (cpu_has_xsave)
			put_user_ex(UC_FP_XSTATE, &frame->uc.uc_flags);
		else
			put_user_ex(0, &frame->uc.uc_flags);
		put_user_ex(0, &frame->uc.uc_link);
		put_user_ex(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp);
		put_user_ex(sas_ss_flags(regs->sp),
			    &frame->uc.uc_stack.ss_flags);
		put_user_ex(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
		err |= ia32_setup_sigcontext(&frame->uc.uc_mcontext, fpstate,
					     regs, set->sig[0]);
		err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));

		if (ka->sa.sa_flags & SA_RESTORER)
			restorer = ka->sa.sa_restorer;
		else if (current->mm->context.vdso)
			/* Return stub is in 32bit vsyscall page */
			restorer = VDSO32_SYMBOL(current->mm->context.vdso, rt_sigreturn);
		else
			restorer = &frame->retcode;
		put_user_ex(ptr_to_compat(restorer), &frame->pretcode);

		/*
		 * Not actually used anymore, but left because some gdb
		 * versions need it.
		 */
		put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
	} put_user_catch(err);

	if (err)
		return -EFAULT;

	/* Set up registers for signal handler */
	regs->sp = (unsigned long) frame;
	regs->ip = (unsigned long) ka->sa.sa_handler;

	/* Make -mregparm=3 work */
	regs->ax = sig;
	regs->dx = (unsigned long) &frame->info;
	regs->cx = (unsigned long) &frame->uc;

	loadsegment(ds, __USER32_DS);
	loadsegment(es, __USER32_DS);

	regs->cs = __USER32_CS;
	regs->ss = __USER32_DS;

	return 0;
}
Ejemplo n.º 10
0
int ia32_setup_frame(int sig, struct k_sigaction *ka,
		     compat_sigset_t *set, struct pt_regs *regs)
{
	struct sigframe_ia32 __user *frame;
	void __user *restorer;
	int err = 0;
	void __user *fpstate = NULL;

	/* copy_to_user optimizes that into a single 8 byte store */
	static const struct {
		u16 poplmovl;
		u32 val;
		u16 int80;
	} __attribute__((packed)) code = {
		0xb858,		 /* popl %eax ; movl $...,%eax */
		__NR_ia32_sigreturn,
		0x80cd,		/* int $0x80 */
	};

	frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);

	if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
		return -EFAULT;

	if (__put_user(sig, &frame->sig))
		return -EFAULT;

	if (ia32_setup_sigcontext(&frame->sc, fpstate, regs, set->sig[0]))
		return -EFAULT;

	if (_COMPAT_NSIG_WORDS > 1) {
		if (__copy_to_user(frame->extramask, &set->sig[1],
				   sizeof(frame->extramask)))
			return -EFAULT;
	}

	if (ka->sa.sa_flags & SA_RESTORER) {
		restorer = ka->sa.sa_restorer;
	} else {
		/* Return stub is in 32bit vsyscall page */
		if (current->mm->context.vdso)
			restorer = VDSO32_SYMBOL(current->mm->context.vdso,
						 sigreturn);
		else
			restorer = &frame->retcode;
	}

	put_user_try {
		put_user_ex(ptr_to_compat(restorer), &frame->pretcode);

		/*
		 * These are actually not used anymore, but left because some
		 * gdb versions depend on them as a marker.
		 */
		put_user_ex(*((const u64 *)&code), (u64 __user *)frame->retcode);
	} put_user_catch(err);

	if (err)
		return -EFAULT;

	/* Set up registers for signal handler */
	regs->sp = (unsigned long) frame;
	regs->ip = (unsigned long) ka->sa.sa_handler;

	/* Make -mregparm=3 work */
	regs->ax = sig;
	regs->dx = 0;
	regs->cx = 0;

	loadsegment(ds, __USER32_DS);
	loadsegment(es, __USER32_DS);

	regs->cs = __USER32_CS;
	regs->ss = __USER32_DS;

	return 0;
}
Ejemplo n.º 11
0
int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
			compat_sigset_t *set, struct pt_regs *regs)
{
	struct rt_sigframe_ia32 __user *frame;
	void __user *restorer;
	int err = 0;
	void __user *fpstate = NULL;

	
	static const struct {
		u8 movl;
		u32 val;
		u16 int80;
		u8  pad;
	} __attribute__((packed)) code = {
		0xb8,
		__NR_ia32_rt_sigreturn,
		0x80cd,
		0,
	};

	frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);

	if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
		return -EFAULT;

	put_user_try {
		put_user_ex(sig, &frame->sig);
		put_user_ex(ptr_to_compat(&frame->info), &frame->pinfo);
		put_user_ex(ptr_to_compat(&frame->uc), &frame->puc);
		err |= copy_siginfo_to_user32(&frame->info, info);

		
		if (cpu_has_xsave)
			put_user_ex(UC_FP_XSTATE, &frame->uc.uc_flags);
		else
			put_user_ex(0, &frame->uc.uc_flags);
		put_user_ex(0, &frame->uc.uc_link);
		put_user_ex(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp);
		put_user_ex(sas_ss_flags(regs->sp),
			    &frame->uc.uc_stack.ss_flags);
		put_user_ex(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
		err |= ia32_setup_sigcontext(&frame->uc.uc_mcontext, fpstate,
					     regs, set->sig[0]);
		err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));

		if (ka->sa.sa_flags & SA_RESTORER)
			restorer = ka->sa.sa_restorer;
		else
			restorer = VDSO32_SYMBOL(current->mm->context.vdso,
						 rt_sigreturn);
		put_user_ex(ptr_to_compat(restorer), &frame->pretcode);

		put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
	} put_user_catch(err);

	if (err)
		return -EFAULT;

	
	regs->sp = (unsigned long) frame;
	regs->ip = (unsigned long) ka->sa.sa_handler;

	
	regs->ax = sig;
	regs->dx = (unsigned long) &frame->info;
	regs->cx = (unsigned long) &frame->uc;

	loadsegment(ds, __USER32_DS);
	loadsegment(es, __USER32_DS);

	regs->cs = __USER32_CS;
	regs->ss = __USER32_DS;

	return 0;
}
Ejemplo n.º 12
0
int ia32_setup_frame(int sig, struct k_sigaction *ka,
		     compat_sigset_t *set, struct pt_regs *regs)
{
	struct sigframe_ia32 __user *frame;
	void __user *restorer;
	int err = 0;
	void __user *fpstate = NULL;

	
	static const struct {
		u16 poplmovl;
		u32 val;
		u16 int80;
	} __attribute__((packed)) code = {
		0xb858,		 
		__NR_ia32_sigreturn,
		0x80cd,		
	};

	frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);

	if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
		return -EFAULT;

	if (__put_user(sig, &frame->sig))
		return -EFAULT;

	if (ia32_setup_sigcontext(&frame->sc, fpstate, regs, set->sig[0]))
		return -EFAULT;

	if (_COMPAT_NSIG_WORDS > 1) {
		if (__copy_to_user(frame->extramask, &set->sig[1],
				   sizeof(frame->extramask)))
			return -EFAULT;
	}

	if (ka->sa.sa_flags & SA_RESTORER) {
		restorer = ka->sa.sa_restorer;
	} else {
		
		if (current->mm->context.vdso)
			restorer = VDSO32_SYMBOL(current->mm->context.vdso,
						 sigreturn);
		else
			restorer = &frame->retcode;
	}

	put_user_try {
		put_user_ex(ptr_to_compat(restorer), &frame->pretcode);

		put_user_ex(*((u64 *)&code), (u64 *)frame->retcode);
	} put_user_catch(err);

	if (err)
		return -EFAULT;

	
	regs->sp = (unsigned long) frame;
	regs->ip = (unsigned long) ka->sa.sa_handler;

	
	regs->ax = sig;
	regs->dx = 0;
	regs->cx = 0;

	loadsegment(ds, __USER32_DS);
	loadsegment(es, __USER32_DS);

	regs->cs = __USER32_CS;
	regs->ss = __USER32_DS;

	return 0;
}
Ejemplo n.º 13
0
/*
 * Set a given TLS descriptor:
 */
int do_set_thread_area(struct task_struct *p, int idx,
		       struct user_desc __user *u_info,
		       int can_allocate)
{
	struct user_desc info;
	unsigned short __maybe_unused sel, modified_sel;

	if (copy_from_user(&info, u_info, sizeof(info)))
		return -EFAULT;

	if (!tls_desc_okay(&info))
		return -EINVAL;

	if (idx == -1)
		idx = info.entry_number;

	/*
	 * index -1 means the kernel should try to find and
	 * allocate an empty descriptor:
	 */
	if (idx == -1 && can_allocate) {
		idx = get_free_idx();
		if (idx < 0)
			return idx;
		if (put_user(idx, &u_info->entry_number))
			return -EFAULT;
	}

	if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
		return -EINVAL;

	set_tls_desc(p, idx, &info, 1);

	/*
	 * If DS, ES, FS, or GS points to the modified segment, forcibly
	 * refresh it.  Only needed on x86_64 because x86_32 reloads them
	 * on return to user mode.
	 */
	modified_sel = (idx << 3) | 3;

	if (p == current) {
#ifdef CONFIG_X86_64
		savesegment(ds, sel);
		if (sel == modified_sel)
			loadsegment(ds, sel);

		savesegment(es, sel);
		if (sel == modified_sel)
			loadsegment(es, sel);

		savesegment(fs, sel);
		if (sel == modified_sel)
			loadsegment(fs, sel);

		savesegment(gs, sel);
		if (sel == modified_sel)
			load_gs_index(sel);
#endif

#ifdef CONFIG_X86_32_LAZY_GS
		savesegment(gs, sel);
		if (sel == modified_sel)
			loadsegment(gs, sel);
#endif
	} else {
#ifdef CONFIG_X86_64
		if (p->thread.fsindex == modified_sel)
			p->thread.fsbase = info.base_addr;

		if (p->thread.gsindex == modified_sel)
			p->thread.gsbase = info.base_addr;
#endif
	}

	return 0;
}
int vmadump_restore_cpu(cr_rstrt_proc_req_t *ctx, struct file *file,
			struct pt_regs *regs) {
    struct vmadump_restore_tmps *x86tmps;
    struct thread_struct *threadtmp;
    struct pt_regs *regtmp;
    int r;
    int idx, i, cpu;
    uint16_t fsindex, gsindex;
#if HAVE_STRUCT_N_DESC_STRUCT
    struct n_desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES];
#else
    struct desc_struct tls_array[GDT_ENTRY_TLS_ENTRIES];
#endif

    /* XXX: Note allocation assumes i387tmp and threadtmp are never active at the same time */
    x86tmps = kmalloc(sizeof(*x86tmps), GFP_KERNEL);
    if (!x86tmps) return -ENOMEM;
    regtmp = VMAD_REGTMP(x86tmps);
    threadtmp = VMAD_THREADTMP(x86tmps);

    r = read_kern(ctx, file, regtmp, sizeof(*regtmp));
    if (r != sizeof(*regtmp)) goto bad_read;

    /* Don't let the user pick funky segments */
    if ((regtmp->cs != __USER_CS && regtmp->cs != __USER32_CS) &&
	(regtmp->ss != __USER_DS && regtmp->ss != __USER32_DS)) {
	r = -EINVAL;
	goto bad_read;
    }

    /* Set our process type */
    if (regtmp->cs == __USER32_CS)
	set_thread_flag(TIF_IA32);
    else
	clear_thread_flag(TIF_IA32);	

    /* Only restore bottom 9 bits of eflags.  Restoring anything else
     * is bad bad mojo for security. (0x200 = interrupt enable) */
#if HAVE_PT_REGS_EFLAGS
    regtmp->eflags = 0x200 | (regtmp->eflags & 0x000000FF);
#elif HAVE_PT_REGS_FLAGS
    regtmp->flags = 0x200 | (regtmp->flags & 0x000000FF);
#else
    #error
#endif
    memcpy(regs, regtmp, sizeof(*regtmp));

    /* Restore FPU info (and later general "extended state") */
    r = vmadump_restore_i387(ctx, file, VMAD_I387TMP(x86tmps));
    if (r < 0) goto bad_read;
	
    /* XXX FIX ME: RESTORE DEBUG INFORMATION ?? */
    /* Here we read it but ignore it. */
    r = vmadump_restore_debugreg(ctx, file);
    if (r < 0) goto bad_read;

    /* user(r)sp, since we don't use the ptrace entry path in BLCR */
#if HAVE_THREAD_USERSP
    r = read_kern(ctx, file, &threadtmp->usersp, sizeof(threadtmp->usersp));
    if (r != sizeof(threadtmp->usersp)) goto bad_read;
    current->thread.usersp = threadtmp->usersp;
    vmad_write_oldrsp(threadtmp->usersp);
#elif HAVE_THREAD_USERRSP
    r = read_kern(ctx, file, &threadtmp->userrsp, sizeof(threadtmp->userrsp));
    if (r != sizeof(threadtmp->userrsp)) goto bad_read;
    current->thread.userrsp = threadtmp->userrsp;
    vmad_write_oldrsp(threadtmp->userrsp);
#else
    #error
#endif

    /*-- restore segmentation related stuff */

    /* Restore FS_BASE MSR */
    r = read_kern(ctx, file, &threadtmp->fs, sizeof(threadtmp->fs));
    if (r != sizeof(threadtmp->fs)) goto bad_read;
    if (threadtmp->fs >= TASK_SIZE) {
	r = -EINVAL;
	goto bad_read;
    }
    current->thread.fs = threadtmp->fs;
    if ((r = checking_wrmsrl(MSR_FS_BASE, threadtmp->fs)))
	goto bad_read;
	
    /* Restore GS_KERNEL_BASE MSR */
    r = read_kern(ctx, file, &threadtmp->gs, sizeof(threadtmp->gs));
    if (r != sizeof(threadtmp->gs)) goto bad_read;
    if (threadtmp->gs >= TASK_SIZE) {
	r = -EINVAL;
	goto bad_read;
    }
    current->thread.gs = threadtmp->gs;
    if ((r = checking_wrmsrl(MSR_KERNEL_GS_BASE, threadtmp->gs)))
	goto bad_read;

    /* Restore 32 bit segment stuff */
    r = read_kern(ctx, file, &fsindex, sizeof(fsindex));
    if (r != sizeof(fsindex)) goto bad_read;

    r = read_kern(ctx, file, &gsindex, sizeof(gsindex));
    if (r != sizeof(gsindex)) goto bad_read;

    r = read_kern(ctx, file, tls_array, sizeof(tls_array));
    if (r != sizeof(tls_array)) goto bad_read;

    /* Sanitize fs, gs.  These segment descriptors should load one
     * of the TLS entries and have DPL = 3.  If somebody is doing
     * some other LDT monkey business, I'm currently not
     * supporting that here.  Also, I'm presuming that the offsets
     * to the GDT_ENTRY_TLS_MIN is the same in both kernels. */
    idx = fsindex >> 3;
    if (idx<GDT_ENTRY_TLS_MIN || idx>GDT_ENTRY_TLS_MAX || (fsindex&7) != 3)
	fsindex = 0;
    idx = gsindex >> 3;
    if (idx<GDT_ENTRY_TLS_MIN || idx>GDT_ENTRY_TLS_MAX || (gsindex&7) != 3)
	gsindex = 0;

    /* Sanitize the TLS entries...
     * Make sure the following bits are set/not set:
     *  bit 12   : S    =  1    (code/data - not system)
     *  bit 13-14: DPL  = 11    (priv level = 3 (user))
     *  bit 21   :      =  0    (reserved)
     *
     * If the entry isn't valid, zero the whole descriptor.
     */
    for (i=0; i < GDT_ENTRY_TLS_ENTRIES; i++) {
	if (tls_array[i].b != 0 && 
	    (tls_array[i].b & 0x00207000) != 0x00007000) {
	    r = -EINVAL;
	    goto bad_read;
	}
    }

    /* Ok load this crap */
    cpu = get_cpu();	/* load_TLS can't get pre-empted. */
    memcpy(current->thread.tls_array, tls_array,
	   sizeof(current->thread.tls_array));
    current->thread.fsindex = fsindex;
    current->thread.gsindex = gsindex;
    load_TLS(&current->thread, cpu);

    loadsegment(fs, current->thread.fsindex);
    load_gs_index(current->thread.gsindex);
    put_cpu();

    /* In case cr_restart and child don't have same ABI */
    if (regtmp->cs == __USER32_CS) {
	loadsegment(ds, __USER32_DS);
	loadsegment(es, __USER32_DS);
    } else {
	loadsegment(ds, __USER_DS);
	loadsegment(es, __USER_DS);
    }

#if HAVE_THREAD_INFO_SYSENTER_RETURN
    {
	void *sysenter_return;
	r = read_kern(ctx, file, &sysenter_return, sizeof(sysenter_return));
	if (r != sizeof(sysenter_return)) goto bad_read;
	current_thread_info()->sysenter_return = sysenter_return;
    }
#endif
    
    kfree(x86tmps);
    return 0;

 bad_read:
    kfree(x86tmps);
    if (r >= 0) r = -EIO;
    return r;
}
Ejemplo n.º 15
0
int ia32_setup_rt_frame(int sig, struct ksignal *ksig,
			compat_sigset_t *set, struct pt_regs *regs)
{
	struct rt_sigframe_ia32 __user *frame;
	void __user *restorer;
	int err = 0;
	void __user *fpstate = NULL;

	/* __copy_to_user optimizes that into a single 8 byte store */
	static const struct {
		u8 movl;
		u32 val;
		u16 int80;
		u8  pad;
	} __attribute__((packed)) code = {
		0xb8,
		__NR_ia32_rt_sigreturn,
		0x80cd,
		0,
	};

	frame = get_sigframe(ksig, regs, sizeof(*frame), &fpstate);

	if (!access_ok(frame, sizeof(*frame)))
		return -EFAULT;

	put_user_try {
		put_user_ex(sig, &frame->sig);
		put_user_ex(ptr_to_compat(&frame->info), &frame->pinfo);
		put_user_ex(ptr_to_compat(&frame->uc), &frame->puc);

		/* Create the ucontext.  */
		if (boot_cpu_has(X86_FEATURE_XSAVE))
			put_user_ex(UC_FP_XSTATE, &frame->uc.uc_flags);
		else
			put_user_ex(0, &frame->uc.uc_flags);
		put_user_ex(0, &frame->uc.uc_link);
		compat_save_altstack_ex(&frame->uc.uc_stack, regs->sp);

		if (ksig->ka.sa.sa_flags & SA_RESTORER)
			restorer = ksig->ka.sa.sa_restorer;
		else
			restorer = current->mm->context.vdso +
				vdso_image_32.sym___kernel_rt_sigreturn;
		put_user_ex(ptr_to_compat(restorer), &frame->pretcode);

		/*
		 * Not actually used anymore, but left because some gdb
		 * versions need it.
		 */
		put_user_ex(*((u64 *)&code), (u64 __user *)frame->retcode);
	} put_user_catch(err);

	err |= __copy_siginfo_to_user32(&frame->info, &ksig->info, false);
	err |= ia32_setup_sigcontext(&frame->uc.uc_mcontext, fpstate,
				     regs, set->sig[0]);
	err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));

	if (err)
		return -EFAULT;

	/* Set up registers for signal handler */
	regs->sp = (unsigned long) frame;
	regs->ip = (unsigned long) ksig->ka.sa.sa_handler;

	/* Make -mregparm=3 work */
	regs->ax = sig;
	regs->dx = (unsigned long) &frame->info;
	regs->cx = (unsigned long) &frame->uc;

	loadsegment(ds, __USER32_DS);
	loadsegment(es, __USER32_DS);

	regs->cs = __USER32_CS;
	regs->ss = __USER32_DS;

	return 0;
}
Ejemplo n.º 16
0
int ia32_setup_rt_frame(int sig, struct k_sigaction *ka, siginfo_t *info,
			compat_sigset_t *set, struct pt_regs *regs)
{
	struct rt_sigframe __user *frame;
	void __user *restorer;
	int err = 0;
	void __user *fpstate = NULL;

	/* __copy_to_user optimizes that into a single 8 byte store */
	static const struct {
		u8 movl;
		u32 val;
		u16 int80;
		u16 pad;
		u8  pad2;
	} __attribute__((packed)) code = {
		0xb8,
		__NR_ia32_rt_sigreturn,
		0x80cd,
		0,
		0
	};

	frame = get_sigframe(ka, regs, sizeof(*frame), &fpstate);

	if (!access_ok(VERIFY_WRITE, frame, sizeof(*frame)))
		return -EFAULT;

	err |= __put_user(sig, &frame->sig);
	err |= __put_user(ptr_to_compat(&frame->info), &frame->pinfo);
	err |= __put_user(ptr_to_compat(&frame->uc), &frame->puc);
	err |= copy_siginfo_to_user32(&frame->info, info);
	if (err)
		return -EFAULT;

	/* Create the ucontext.  */
	if (cpu_has_xsave)
		err |= __put_user(UC_FP_XSTATE, &frame->uc.uc_flags);
	else
		err |= __put_user(0, &frame->uc.uc_flags);
	err |= __put_user(0, &frame->uc.uc_link);
	err |= __put_user(current->sas_ss_sp, &frame->uc.uc_stack.ss_sp);
	err |= __put_user(sas_ss_flags(regs->sp),
			  &frame->uc.uc_stack.ss_flags);
	err |= __put_user(current->sas_ss_size, &frame->uc.uc_stack.ss_size);
	err |= ia32_setup_sigcontext(&frame->uc.uc_mcontext, fpstate,
				     regs, set->sig[0]);
	err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
	if (err)
		return -EFAULT;

	if (ka->sa.sa_flags & SA_RESTORER)
		restorer = ka->sa.sa_restorer;
	else
		restorer = VDSO32_SYMBOL(current->mm->context.vdso,
					 rt_sigreturn);
	err |= __put_user(ptr_to_compat(restorer), &frame->pretcode);

	/*
	 * Not actually used anymore, but left because some gdb
	 * versions need it.
	 */
	err |= __copy_to_user(frame->retcode, &code, 8);
	if (err)
		return -EFAULT;

	/* Set up registers for signal handler */
	regs->sp = (unsigned long) frame;
	regs->ip = (unsigned long) ka->sa.sa_handler;

	/* Make -mregparm=3 work */
	regs->ax = sig;
	regs->dx = (unsigned long) &frame->info;
	regs->cx = (unsigned long) &frame->uc;

	/* Make -mregparm=3 work */
	regs->ax = sig;
	regs->dx = (unsigned long) &frame->info;
	regs->cx = (unsigned long) &frame->uc;

	loadsegment(ds, __USER32_DS);
	loadsegment(es, __USER32_DS);

	regs->cs = __USER32_CS;
	regs->ss = __USER32_DS;

#if DEBUG_SIG
	printk(KERN_DEBUG "SIG deliver (%s:%d): sp=%p pc=%lx ra=%u\n",
	       current->comm, current->pid, frame, regs->ip, frame->pretcode);
#endif

	return 0;
}
Ejemplo n.º 17
0
/*
 *	switch_to(x,yn) should switch tasks from x to y.
 *
 * We fsave/fwait so that an exception goes off at the right time
 * (as a call from the fsave or fwait in effect) rather than to
 * the wrong process. Lazy FP saving no longer makes any sense
 * with modern CPU's, and this simplifies a lot of things (SMP
 * and UP become the same).
 *
 * NOTE! We used to use the x86 hardware context switching. The
 * reason for not using it any more becomes apparent when you
 * try to recover gracefully from saved state that is no longer
 * valid (stale segment register values in particular). With the
 * hardware task-switch, there is no way to fix up bad state in
 * a reasonable manner.
 *
 * The fact that Intel documents the hardware task-switching to
 * be slow is a fairly red herring - this code is not noticeably
 * faster. However, there _is_ some room for improvement here,
 * so the performance issues may eventually be a valid point.
 * More important, however, is the fact that this allows us much
 * more flexibility.
 *
 * The return value (in %eax) will be the "prev" task after
 * the task-switch, and shows up in ret_from_fork in entry.S,
 * for example.
 */
struct task_struct fastcall * __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
{
    struct thread_struct *prev = &prev_p->thread,
                              *next = &next_p->thread;
    int cpu = smp_processor_id();
    struct tss_struct *tss = &per_cpu(init_tss, cpu);

    /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */

    __unlazy_fpu(prev_p);


    /* we're going to use this soon, after a few expensive things */
    if (next_p->fpu_counter > 5)
        prefetch(&next->i387.fxsave);

    /*
     * Reload esp0.
     */
    load_esp0(tss, next);

    /*
     * Save away %gs. No need to save %fs, as it was saved on the
     * stack on entry.  No need to save %es and %ds, as those are
     * always kernel segments while inside the kernel.  Doing this
     * before setting the new TLS descriptors avoids the situation
     * where we temporarily have non-reloadable segments in %fs
     * and %gs.  This could be an issue if the NMI handler ever
     * used %fs or %gs (it does not today), or if the kernel is
     * running inside of a hypervisor layer.
     */
    savesegment(gs, prev->gs);

    /*
     * Load the per-thread Thread-Local Storage descriptor.
     */
    load_TLS(next, cpu);

    /*
     * Restore IOPL if needed.  In normal use, the flags restore
     * in the switch assembly will handle this.  But if the kernel
     * is running virtualized at a non-zero CPL, the popf will
     * not restore flags, so it must be done in a separate step.
     */
    if (get_kernel_rpl() && unlikely(prev->iopl != next->iopl))
        set_iopl_mask(next->iopl);

    /*
     * Now maybe handle debug registers and/or IO bitmaps
     */
    if (unlikely((task_thread_info(next_p)->flags & _TIF_WORK_CTXSW)
                 || test_tsk_thread_flag(prev_p, TIF_IO_BITMAP)))
        __switch_to_xtra(next_p, tss);

    disable_tsc(prev_p, next_p);

    /*
     * Leave lazy mode, flushing any hypercalls made here.
     * This must be done before restoring TLS segments so
     * the GDT and LDT are properly updated, and must be
     * done before math_state_restore, so the TS bit is up
     * to date.
     */
    arch_leave_lazy_cpu_mode();

    /* If the task has used fpu the last 5 timeslices, just do a full
     * restore of the math state immediately to avoid the trap; the
     * chances of needing FPU soon are obviously high now
     */
    if (next_p->fpu_counter > 5)
        math_state_restore();

    /*
     * Restore %gs if needed (which is common)
     */
    if (prev->gs | next->gs)
        loadsegment(gs, next->gs);

    x86_write_percpu(current_task, next_p);

    return prev_p;
}