Пример #1
0
int __die(const char *str, struct pt_regs *regs, long err)
{
#ifdef CONFIG_X86_32
	unsigned short ss;
	unsigned long sp;
#endif
	printk(KERN_DEFAULT
	       "%s: %04lx [#%d] ", str, err & 0xffff, ++die_counter);
#ifdef CONFIG_PREEMPT
	printk("PREEMPT ");
#endif
#ifdef CONFIG_SMP
	printk("SMP ");
#endif
#ifdef CONFIG_DEBUG_PAGEALLOC
	printk("DEBUG_PAGEALLOC ");
#endif
#ifdef CONFIG_KASAN
	printk("KASAN");
#endif
	printk("\n");
	if (notify_die(DIE_OOPS, str, regs, err,
			current->thread.trap_nr, SIGSEGV) == NOTIFY_STOP)
		return 1;

	/* FIXME this should probably also need to print into buffer */
	print_modules();
	show_regs(regs);
#ifdef CONFIG_X86_32
	if (user_mode(regs)) {
		sp = regs->sp;
		ss = regs->ss & 0xffff;
	} else {
		sp = kernel_stack_pointer(regs);
		savesegment(ss, ss);
	}
	printk(KERN_EMERG "EIP: [<%08lx>] ", regs->ip);
	print_symbol("%s", regs->ip);
	printk(" SS:ESP %04x:%08lx\n", ss, sp);
#else
	/* Executive summary in case the oops scrolled away */
	printk(KERN_ALERT "RIP ");
	/* FIXME */
	printk_address(regs->ip);
	printk(" RSP <%016lx>\n", regs->sp);
#endif
	return 0;
}
Пример #2
0
int copy_thread(int nr, unsigned long clone_flags, unsigned long sp,
	unsigned long unused,
	struct task_struct * p, struct pt_regs * regs)
{
	struct pt_regs * childregs;
	struct task_struct *tsk;
	int err;

	childregs = task_pt_regs(p);
	*childregs = *regs;
	childregs->ax = 0;
	childregs->sp = sp;

	p->thread.sp = (unsigned long) childregs;
	p->thread.sp0 = (unsigned long) (childregs+1);

	p->thread.ip = (unsigned long) ret_from_fork;

	savesegment(gs, p->thread.gs);

	tsk = current;
	if (unlikely(test_tsk_thread_flag(tsk, TIF_IO_BITMAP))) {
		p->thread.io_bitmap_ptr = kmemdup(tsk->thread.io_bitmap_ptr,
						IO_BITMAP_BYTES, GFP_KERNEL);
		if (!p->thread.io_bitmap_ptr) {
			p->thread.io_bitmap_max = 0;
			return -ENOMEM;
		}
		set_tsk_thread_flag(p, TIF_IO_BITMAP);
	}

	err = 0;

	/*
	 * Set a new TLS for the child thread?
	 */
	if (clone_flags & CLONE_SETTLS)
		err = do_set_thread_area(p, -1,
			(struct user_desc __user *)childregs->si, 0);

	if (err && p->thread.io_bitmap_ptr) {
		kfree(p->thread.io_bitmap_ptr);
		p->thread.io_bitmap_max = 0;
	}
	return err;
}
Пример #3
0
int __kprobes __die(const char *str, struct pt_regs *regs, long err)
{
#ifdef CONFIG_X86_32
	unsigned short ss;
	unsigned long sp;
#endif
	printk(KERN_EMERG "%s: %04lx [#%d] ", str, err & 0xffff, ++die_counter);
#ifdef CONFIG_PREEMPT
	printk("PREEMPT ");
#endif
#ifdef CONFIG_SMP
	printk("SMP ");
#endif
#ifdef CONFIG_DEBUG_PAGEALLOC
	printk("DEBUG_PAGEALLOC");
#endif
	printk("\n");
	sysfs_printk_last_file();
	if (notify_die(DIE_OOPS, str, regs, err,
			current->thread.trap_no, SIGSEGV) == NOTIFY_STOP)
		return 1;

	show_registers(regs);
#ifdef CONFIG_X86_32
	if (user_mode_vm(regs)) {
		sp = regs->sp;
		ss = regs->ss & 0xffff;
	} else {
		sp = kernel_stack_pointer(regs);
		savesegment(ss, ss);
	}
	printk(KERN_EMERG "EIP: [<%08lx>] ", regs->ip);
	print_symbol("%s", regs->ip);
	printk(" SS:ESP %04x:%08lx\n", ss, sp);
#else
	/* Executive summary in case the oops scrolled away */
	printk(KERN_ALERT "RIP ");
	printk_address(regs->ip, 1);
	printk(" RSP <%016lx>\n", regs->sp);
#endif
	return 0;
}
Пример #4
0
static int
setup_sigcontext(struct sigcontext __user *sc, struct _fpstate __user *fpstate,
		 struct pt_regs *regs, unsigned long mask)
{
	int tmp, err = 0;

	err |= __put_user(regs->xfs, (unsigned int __user *)&sc->fs);
	savesegment(gs, tmp);
	err |= __put_user(tmp, (unsigned int __user *)&sc->gs);

	err |= __put_user(regs->xes, (unsigned int __user *)&sc->es);
	err |= __put_user(regs->xds, (unsigned int __user *)&sc->ds);
	err |= __put_user(regs->edi, &sc->edi);
	err |= __put_user(regs->esi, &sc->esi);
	err |= __put_user(regs->ebp, &sc->ebp);
	err |= __put_user(regs->esp, &sc->esp);
	err |= __put_user(regs->ebx, &sc->ebx);
	err |= __put_user(regs->edx, &sc->edx);
	err |= __put_user(regs->ecx, &sc->ecx);
	err |= __put_user(regs->eax, &sc->eax);
	err |= __put_user(current->thread.trap_no, &sc->trapno);
	err |= __put_user(current->thread.error_code, &sc->err);
	err |= __put_user(regs->eip, &sc->eip);
	err |= __put_user(regs->xcs, (unsigned int __user *)&sc->cs);
	err |= __put_user(regs->eflags, &sc->eflags);
	err |= __put_user(regs->esp, &sc->esp_at_signal);
	err |= __put_user(regs->xss, (unsigned int __user *)&sc->ss);

	tmp = save_i387(fpstate);
	if (tmp < 0)
	  err = 1;
	else
	  err |= __put_user(tmp ? fpstate : NULL, &sc->fpstate);

	/* non-iBCS2 extensions.. */
	err |= __put_user(mask, &sc->oldmask);
	err |= __put_user(current->thread.cr2, &sc->cr2);

	return err;
}
Пример #5
0
int __die(const char *str, struct pt_regs *regs, long err)
{
#ifdef CONFIG_X86_32
    unsigned short ss;
    unsigned long sp;
#endif
    printk(KERN_DEFAULT
           "%s: %04lx [#%d]%s%s%s%s\n", str, err & 0xffff, ++die_counter,
           IS_ENABLED(CONFIG_PREEMPT) ? " PREEMPT"         : "",
           IS_ENABLED(CONFIG_SMP)     ? " SMP"             : "",
           debug_pagealloc_enabled()  ? " DEBUG_PAGEALLOC" : "",
           IS_ENABLED(CONFIG_KASAN)   ? " KASAN"           : "");

    if (notify_die(DIE_OOPS, str, regs, err,
                   current->thread.trap_nr, SIGSEGV) == NOTIFY_STOP)
        return 1;

    print_modules();
    show_regs(regs);
#ifdef CONFIG_X86_32
    if (user_mode(regs)) {
        sp = regs->sp;
        ss = regs->ss & 0xffff;
    } else {
        sp = kernel_stack_pointer(regs);
        savesegment(ss, ss);
    }
    printk(KERN_EMERG "EIP: [<%08lx>] ", regs->ip);
    print_symbol("%s", regs->ip);
    printk(" SS:ESP %04x:%08lx\n", ss, sp);
#else
    /* Executive summary in case the oops scrolled away */
    printk(KERN_ALERT "RIP ");
    printk_address(regs->ip);
    printk(" RSP <%016lx>\n", regs->sp);
#endif
    return 0;
}
Пример #6
0
/*
 * fill in the user structure for a core dump..
 */
void dump_thread(struct pt_regs * regs, struct user * dump)
{
    int i;

    /* changed the size calculations - should hopefully work better. lbt */
    dump->magic = CMAGIC;
    dump->start_code = 0;
    dump->start_stack = regs->esp & ~(PAGE_SIZE - 1);
    dump->u_tsize = ((unsigned long) current->mm->end_code) >> PAGE_SHIFT;
    dump->u_dsize = ((unsigned long) (current->mm->brk + (PAGE_SIZE-1))) >> PAGE_SHIFT;
    dump->u_dsize -= dump->u_tsize;
    dump->u_ssize = 0;
    for (i = 0; i < 8; i++)
        dump->u_debugreg[i] = current->thread.debugreg[i];

    if (dump->start_stack < TASK_SIZE)
        dump->u_ssize = ((unsigned long) (TASK_SIZE - dump->start_stack)) >> PAGE_SHIFT;

    dump->regs.ebx = regs->ebx;
    dump->regs.ecx = regs->ecx;
    dump->regs.edx = regs->edx;
    dump->regs.esi = regs->esi;
    dump->regs.edi = regs->edi;
    dump->regs.ebp = regs->ebp;
    dump->regs.eax = regs->eax;
    dump->regs.ds = regs->xds;
    dump->regs.es = regs->xes;
    dump->regs.fs = regs->xfs;
    savesegment(gs,dump->regs.gs);
    dump->regs.orig_eax = regs->orig_eax;
    dump->regs.eip = regs->eip;
    dump->regs.cs = regs->xcs;
    dump->regs.eflags = regs->eflags;
    dump->regs.esp = regs->esp;
    dump->regs.ss = regs->xss;

    dump->u_fpvalid = dump_fpu (regs, &dump->i387);
}
Пример #7
0
void __show_regs(struct pt_regs *regs, int all)
{
	unsigned long cr0 = 0L, cr2 = 0L, cr3 = 0L, cr4 = 0L;
	unsigned long d0, d1, d2, d3, d6, d7;
	unsigned long sp;
	unsigned short ss, gs;
	const char *board;

	if (user_mode_vm(regs)) {
		sp = regs->sp;
		ss = regs->ss & 0xffff;
		gs = get_user_gs(regs);
	} else {
		sp = (unsigned long) (&regs->sp);
		savesegment(ss, ss);
		savesegment(gs, gs);
	}

	printk("\n");

	board = dmi_get_system_info(DMI_PRODUCT_NAME);
	if (!board)
		board = "";
	printk("Pid: %d, comm: %s %s (%s %.*s) %s\n",
			task_pid_nr(current), current->comm,
			print_tainted(), init_utsname()->release,
			(int)strcspn(init_utsname()->version, " "),
			init_utsname()->version, board);

	printk("EIP: %04x:[<%08lx>] EFLAGS: %08lx CPU: %d\n",
			(u16)regs->cs, regs->ip, regs->flags,
			smp_processor_id());
	print_symbol("EIP is at %s\n", regs->ip);

	printk("EAX: %08lx EBX: %08lx ECX: %08lx EDX: %08lx\n",
		regs->ax, regs->bx, regs->cx, regs->dx);
	printk("ESI: %08lx EDI: %08lx EBP: %08lx ESP: %08lx\n",
		regs->si, regs->di, regs->bp, sp);
	printk(" DS: %04x ES: %04x FS: %04x GS: %04x SS: %04x\n",
	       (u16)regs->ds, (u16)regs->es, (u16)regs->fs, gs, ss);

	if (!all)
		return;

	cr0 = read_cr0();
	cr2 = read_cr2();
	cr3 = read_cr3();
	cr4 = read_cr4_safe();
	printk("CR0: %08lx CR2: %08lx CR3: %08lx CR4: %08lx\n",
			cr0, cr2, cr3, cr4);

	get_debugreg(d0, 0);
	get_debugreg(d1, 1);
	get_debugreg(d2, 2);
	get_debugreg(d3, 3);
	printk("DR0: %08lx DR1: %08lx DR2: %08lx DR3: %08lx\n",
			d0, d1, d2, d3);

	get_debugreg(d6, 6);
	get_debugreg(d7, 7);
	printk("DR6: %08lx DR7: %08lx\n",
			d6, d7);
}
Пример #8
0
int copy_thread(int nr, unsigned long clone_flags, unsigned long esp,
	unsigned long unused,
	struct task_struct * p, struct pt_regs * regs)
{
	struct pt_regs * childregs;
	struct task_struct *tsk;
	int err;

	childregs = task_pt_regs(p);
	*childregs = *regs;
	childregs->eax = 0;
	childregs->esp = esp;

	p->thread.esp = (unsigned long) childregs;
	p->thread.esp0 = (unsigned long) (childregs+1);

	p->thread.eip = (unsigned long) ret_from_fork;

	savesegment(fs,p->thread.fs);
	savesegment(gs,p->thread.gs);

	tsk = current;
	if (unlikely(test_tsk_thread_flag(tsk, TIF_IO_BITMAP))) {
		p->thread.io_bitmap_ptr = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL);
		if (!p->thread.io_bitmap_ptr) {
			p->thread.io_bitmap_max = 0;
			return -ENOMEM;
		}
		memcpy(p->thread.io_bitmap_ptr, tsk->thread.io_bitmap_ptr,
			IO_BITMAP_BYTES);
		set_tsk_thread_flag(p, TIF_IO_BITMAP);
	}

	/*
	 * Set a new TLS for the child thread?
	 */
	if (clone_flags & CLONE_SETTLS) {
		struct desc_struct *desc;
		struct user_desc info;
		int idx;

		err = -EFAULT;
		if (copy_from_user(&info, (void __user *)childregs->esi, sizeof(info)))
			goto out;
		err = -EINVAL;
		if (LDT_empty(&info))
			goto out;

		idx = info.entry_number;
		if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
			goto out;

		desc = p->thread.tls_array + idx - GDT_ENTRY_TLS_MIN;
		desc->a = LDT_entry_a(&info);
		desc->b = LDT_entry_b(&info);
	}

	p->thread.iopl = current->thread.iopl;

	err = 0;
 out:
	if (err && p->thread.io_bitmap_ptr) {
		kfree(p->thread.io_bitmap_ptr);
		p->thread.io_bitmap_max = 0;
	}
	return err;
}
Пример #9
0
/*
 * Set a given TLS descriptor:
 */
int do_set_thread_area(struct task_struct *p, int idx,
		       struct user_desc __user *u_info,
		       int can_allocate)
{
	struct user_desc info;
	unsigned short __maybe_unused sel, modified_sel;

	if (copy_from_user(&info, u_info, sizeof(info)))
		return -EFAULT;

	if (!tls_desc_okay(&info))
		return -EINVAL;

	if (idx == -1)
		idx = info.entry_number;

	/*
	 * index -1 means the kernel should try to find and
	 * allocate an empty descriptor:
	 */
	if (idx == -1 && can_allocate) {
		idx = get_free_idx();
		if (idx < 0)
			return idx;
		if (put_user(idx, &u_info->entry_number))
			return -EFAULT;
	}

	if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
		return -EINVAL;

	set_tls_desc(p, idx, &info, 1);

	/*
	 * If DS, ES, FS, or GS points to the modified segment, forcibly
	 * refresh it.  Only needed on x86_64 because x86_32 reloads them
	 * on return to user mode.
	 */
	modified_sel = (idx << 3) | 3;

	if (p == current) {
#ifdef CONFIG_X86_64
		savesegment(ds, sel);
		if (sel == modified_sel)
			loadsegment(ds, sel);

		savesegment(es, sel);
		if (sel == modified_sel)
			loadsegment(es, sel);

		savesegment(fs, sel);
		if (sel == modified_sel)
			loadsegment(fs, sel);

		savesegment(gs, sel);
		if (sel == modified_sel)
			load_gs_index(sel);
#endif

#ifdef CONFIG_X86_32_LAZY_GS
		savesegment(gs, sel);
		if (sel == modified_sel)
			loadsegment(gs, sel);
#endif
	} else {
#ifdef CONFIG_X86_64
		if (p->thread.fsindex == modified_sel)
			p->thread.fsbase = info.base_addr;

		if (p->thread.gsindex == modified_sel)
			p->thread.gsbase = info.base_addr;
#endif
	}

	return 0;
}
long vmadump_store_cpu(cr_chkpt_proc_req_t *ctx, struct file *file,
		       struct pt_regs *regs) {
    long bytes = 0, r;

    /* Store struct pt_regs */
#ifdef CONFIG_XEN
    /* Ensure CS and SS are not Xen-modified, others restore based on CS */
    {   struct pt_regs regtmp = *regs;
        if (test_thread_flag(TIF_IA32)) {
            regtmp.cs = __USER32_CS;
            regtmp.ss = __USER32_DS;
        } else {
            regtmp.cs = __USER_CS;
            regtmp.ss = __USER_DS;
        }
        r = write_kern(ctx, file, &regtmp, sizeof(regtmp));
    }
#else
    r = write_kern(ctx, file, regs, sizeof(*regs));
#endif
    if (r != sizeof(*regs)) goto err;
    bytes += r;

    /* Store FPU info (and later general "extended state") */
    r = vmadump_store_i387(ctx, file);
    if (r <= 0) goto err;
    bytes += r;

    /* Store debugging state */
    r = vmadump_store_debugreg(ctx, file);
    if (r < 0) goto err;
    bytes += r;

    /* user(r)sp, since we don't use the ptrace entry path in BLCR */
#if HAVE_THREAD_USERSP
    current->thread.usersp = vmad_read_oldrsp();
    r = write_kern(ctx, file, &current->thread.usersp,
		   sizeof(current->thread.usersp));
    if (r != sizeof(current->thread.usersp)) goto err;
#elif HAVE_THREAD_USERRSP
    current->thread.userrsp = vmad_read_oldrsp();
    r = write_kern(ctx, file, &current->thread.userrsp,
		   sizeof(current->thread.userrsp));
    if (r != sizeof(current->thread.userrsp)) goto err;
#else
    #error
#endif
    bytes += r;

    /* Store all weird segmenty crap */

    /* 64-bit offsets for FS and GS */
    r = write_kern(ctx, file, &current->thread.fs,
		   sizeof(current->thread.fs));
    if (r != sizeof(current->thread.fs)) goto err;
    bytes += r;
    
    r = write_kern(ctx, file, &current->thread.gs,
		   sizeof(current->thread.gs));
    if (r != sizeof(current->thread.gs)) goto err;
    bytes += r;

    savesegment(fs,current->thread.fsindex);
    savesegment(gs,current->thread.gsindex);

    /* 32-bit segment descriptors for FS and GS */
    r = write_kern(ctx, file, &current->thread.fsindex,
		   sizeof(current->thread.fsindex));
    if (r != sizeof(current->thread.fsindex)) goto err;
    bytes += r;
    
    r = write_kern(ctx, file, &current->thread.gsindex,
		   sizeof(current->thread.gsindex));
    if (r != sizeof(current->thread.gsindex)) goto err;
    bytes += r;

    /* TLS segment descriptors */
    r = write_kern(ctx, file, &current->thread.tls_array,
		   sizeof(current->thread.tls_array));
    if (r != sizeof(current->thread.tls_array)) goto err;
    bytes += r;

#if HAVE_THREAD_INFO_SYSENTER_RETURN
    {
	void *sysenter_return = current_thread_info()->sysenter_return;
	r = write_kern(ctx, file, &sysenter_return, sizeof(sysenter_return));
	if (r != sizeof(sysenter_return)) goto err;
    }
#endif
    
    return bytes;

 err:
    if (r >= 0) r = -EIO;
    return r;
}
Пример #11
0
int copy_thread(int nr, unsigned long clone_flags, unsigned long esp,
	unsigned long unused,
	struct task_struct * p, struct pt_regs * regs)
{
	struct pt_regs * childregs;
	struct task_struct *tsk;
	int err, i;

	childregs = ((struct pt_regs *) (THREAD_SIZE + (unsigned long) p->thread_info)) - 1;
	*childregs = *regs;
	childregs->eax = 0;
	childregs->esp = esp;
	p->set_child_tid = p->clear_child_tid = NULL;

	p->thread.esp = (unsigned long) childregs;
	p->thread.esp0 = (unsigned long) (childregs+1);

	/*
	 * get the two stack pages, for the virtual stack.
	 *
	 * IMPORTANT: this code relies on the fact that the task
	 * structure is an THREAD_SIZE aligned piece of physical memory.
	 */
	for (i = 0; i < ARRAY_SIZE(p->thread.stack_page); i++)
		p->thread.stack_page[i] =
				virt_to_page((unsigned long)p->thread_info + (i*PAGE_SIZE));

	p->thread.eip = (unsigned long) ret_from_fork;
	p->thread_info->real_stack = p->thread_info;

	savesegment(fs,p->thread.fs);
	savesegment(gs,p->thread.gs);

	tsk = current;
	if (unlikely(NULL != tsk->thread.io_bitmap_ptr)) {
		p->thread.io_bitmap_ptr = kmalloc(IO_BITMAP_BYTES, GFP_KERNEL);
		if (!p->thread.io_bitmap_ptr) {
			p->thread.io_bitmap_max = 0;
			return -ENOMEM;
		}
		memcpy(p->thread.io_bitmap_ptr, tsk->thread.io_bitmap_ptr,
			IO_BITMAP_BYTES);
	}

	/*
	 * Set a new TLS for the child thread?
	 */
	if (clone_flags & CLONE_SETTLS) {
		struct desc_struct *desc;
		struct user_desc info;
		int idx;

		err = -EFAULT;
		if (copy_from_user(&info, (void __user *)childregs->esi, sizeof(info)))
			goto out;
		err = -EINVAL;
		if (LDT_empty(&info))
			goto out;

		idx = info.entry_number;
		if (idx < GDT_ENTRY_TLS_MIN || idx > GDT_ENTRY_TLS_MAX)
			goto out;

		desc = p->thread.tls_array + idx - GDT_ENTRY_TLS_MIN;
		desc->a = LDT_entry_a(&info);
		desc->b = LDT_entry_b(&info);
	}

	p->thread.io_pl = current->thread.io_pl;

	err = 0;
 out:
	if (err && p->thread.io_bitmap_ptr) {
		kfree(p->thread.io_bitmap_ptr);
		p->thread.io_bitmap_max = 0;
	}
	return err;
}
Пример #12
0
int copy_thread(unsigned long clone_flags, unsigned long sp,
	unsigned long arg, struct task_struct *p)
{
	struct pt_regs *childregs = task_stack_page(p) + THREAD_SIZE - sizeof(struct pt_regs) - 8;
	struct task_struct *tsk;
	int err;

	p->thread.sp = (unsigned long) childregs;
	p->thread.sp0 = (unsigned long) (childregs+1);
	p->tinfo.lowest_stack = (unsigned long)task_stack_page(p);

	if (unlikely(p->flags & PF_KTHREAD)) {
		/* kernel thread */
		memset(childregs, 0, sizeof(struct pt_regs));
		p->thread.ip = (unsigned long) ret_from_kernel_thread;
		savesegment(gs, childregs->gs);
		childregs->ds = __KERNEL_DS;
		childregs->es = __KERNEL_DS;
		childregs->fs = __KERNEL_PERCPU;
		childregs->bx = sp;	/* function */
		childregs->bp = arg;
		childregs->orig_ax = -1;
		childregs->cs = __KERNEL_CS | get_kernel_rpl();
		childregs->flags = X86_EFLAGS_IF | X86_EFLAGS_BIT1;
		p->fpu_counter = 0;
		p->thread.io_bitmap_ptr = NULL;
		memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));
		return 0;
	}
	*childregs = *current_pt_regs();
	childregs->ax = 0;
	if (sp)
		childregs->sp = sp;

	p->thread.ip = (unsigned long) ret_from_fork;
	task_user_gs(p) = get_user_gs(current_pt_regs());

	p->fpu_counter = 0;
	p->thread.io_bitmap_ptr = NULL;
	tsk = current;
	err = -ENOMEM;

	memset(p->thread.ptrace_bps, 0, sizeof(p->thread.ptrace_bps));

	if (unlikely(test_tsk_thread_flag(tsk, TIF_IO_BITMAP))) {
		p->thread.io_bitmap_ptr = kmemdup(tsk->thread.io_bitmap_ptr,
						IO_BITMAP_BYTES, GFP_KERNEL);
		if (!p->thread.io_bitmap_ptr) {
			p->thread.io_bitmap_max = 0;
			return -ENOMEM;
		}
		set_tsk_thread_flag(p, TIF_IO_BITMAP);
	}

	err = 0;

	/*
	 * Set a new TLS for the child thread?
	 */
	if (clone_flags & CLONE_SETTLS)
		err = do_set_thread_area(p, -1,
			(struct user_desc __user *)childregs->si, 0);

	if (err && p->thread.io_bitmap_ptr) {
		kfree(p->thread.io_bitmap_ptr);
		p->thread.io_bitmap_max = 0;
	}
	return err;
}
Пример #13
0
static int __cpuinit
cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
{
	struct vcpu_guest_context *ctxt;
	struct desc_struct *gdt;
	unsigned long gdt_mfn;

	if (cpumask_test_and_set_cpu(cpu, xen_cpu_initialized_map))
		return 0;

	ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
	if (ctxt == NULL)
		return -ENOMEM;

	gdt = get_cpu_gdt_table(cpu);

	ctxt->flags = VGCF_IN_KERNEL;
	ctxt->user_regs.ss = __KERNEL_DS;
#ifdef CONFIG_X86_32
	ctxt->user_regs.fs = __KERNEL_PERCPU;
	savesegment(gs, ctxt->user_regs.gs);
#else
	ctxt->gs_base_kernel = per_cpu_offset(cpu);
#endif
	ctxt->user_regs.eip = (unsigned long)cpu_bringup_and_idle;

	memset(&ctxt->fpu_ctxt, 0, sizeof(ctxt->fpu_ctxt));

	{
		ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */
		ctxt->user_regs.ds = __KERNEL_DS;
		ctxt->user_regs.es = __KERNEL_DS;

		xen_copy_trap_info(ctxt->trap_ctxt);

		ctxt->ldt_ents = 0;

		BUG_ON((unsigned long)gdt & ~PAGE_MASK);

		gdt_mfn = arbitrary_virt_to_mfn(gdt);
		make_lowmem_page_readonly(gdt);
		make_lowmem_page_readonly(mfn_to_virt(gdt_mfn));

		ctxt->gdt_frames[0] = gdt_mfn;
		ctxt->gdt_ents      = GDT_ENTRIES;

		ctxt->kernel_ss = __KERNEL_DS;
		ctxt->kernel_sp = idle->thread.sp0;

#ifdef CONFIG_X86_32
		ctxt->event_callback_cs     = __KERNEL_CS;
		ctxt->failsafe_callback_cs  = __KERNEL_CS;
#endif
		ctxt->event_callback_eip    =
					(unsigned long)xen_hypervisor_callback;
		ctxt->failsafe_callback_eip =
					(unsigned long)xen_failsafe_callback;
	}
	ctxt->user_regs.cs = __KERNEL_CS;
	ctxt->user_regs.esp = idle->thread.sp0 - sizeof(struct pt_regs);

	per_cpu(xen_cr3, cpu) = __pa(swapper_pg_dir);
	ctxt->ctrlreg[3] = xen_pfn_to_cr3(virt_to_mfn(swapper_pg_dir));

	if (HYPERVISOR_vcpu_op(VCPUOP_initialise, cpu, ctxt))
		BUG();

	kfree(ctxt);
	return 0;
}
Пример #14
0
/*
 *	switch_to(x,yn) should switch tasks from x to y.
 *
 * We fsave/fwait so that an exception goes off at the right time
 * (as a call from the fsave or fwait in effect) rather than to
 * the wrong process. Lazy FP saving no longer makes any sense
 * with modern CPU's, and this simplifies a lot of things (SMP
 * and UP become the same).
 *
 * NOTE! We used to use the x86 hardware context switching. The
 * reason for not using it any more becomes apparent when you
 * try to recover gracefully from saved state that is no longer
 * valid (stale segment register values in particular). With the
 * hardware task-switch, there is no way to fix up bad state in
 * a reasonable manner.
 *
 * The fact that Intel documents the hardware task-switching to
 * be slow is a fairly red herring - this code is not noticeably
 * faster. However, there _is_ some room for improvement here,
 * so the performance issues may eventually be a valid point.
 * More important, however, is the fact that this allows us much
 * more flexibility.
 *
 * The return value (in %eax) will be the "prev" task after
 * the task-switch, and shows up in ret_from_fork in entry.S,
 * for example.
 */
struct task_struct fastcall * __switch_to(struct task_struct *prev_p, struct task_struct *next_p)
{
    struct thread_struct *prev = &prev_p->thread,
                              *next = &next_p->thread;
    int cpu = smp_processor_id();
    struct tss_struct *tss = &per_cpu(init_tss, cpu);

    /* never put a printk in __switch_to... printk() calls wake_up*() indirectly */

    __unlazy_fpu(prev_p);


    /* we're going to use this soon, after a few expensive things */
    if (next_p->fpu_counter > 5)
        prefetch(&next->i387.fxsave);

    /*
     * Reload esp0.
     */
    load_esp0(tss, next);

    /*
     * Save away %gs. No need to save %fs, as it was saved on the
     * stack on entry.  No need to save %es and %ds, as those are
     * always kernel segments while inside the kernel.  Doing this
     * before setting the new TLS descriptors avoids the situation
     * where we temporarily have non-reloadable segments in %fs
     * and %gs.  This could be an issue if the NMI handler ever
     * used %fs or %gs (it does not today), or if the kernel is
     * running inside of a hypervisor layer.
     */
    savesegment(gs, prev->gs);

    /*
     * Load the per-thread Thread-Local Storage descriptor.
     */
    load_TLS(next, cpu);

    /*
     * Restore IOPL if needed.  In normal use, the flags restore
     * in the switch assembly will handle this.  But if the kernel
     * is running virtualized at a non-zero CPL, the popf will
     * not restore flags, so it must be done in a separate step.
     */
    if (get_kernel_rpl() && unlikely(prev->iopl != next->iopl))
        set_iopl_mask(next->iopl);

    /*
     * Now maybe handle debug registers and/or IO bitmaps
     */
    if (unlikely((task_thread_info(next_p)->flags & _TIF_WORK_CTXSW)
                 || test_tsk_thread_flag(prev_p, TIF_IO_BITMAP)))
        __switch_to_xtra(next_p, tss);

    disable_tsc(prev_p, next_p);

    /*
     * Leave lazy mode, flushing any hypercalls made here.
     * This must be done before restoring TLS segments so
     * the GDT and LDT are properly updated, and must be
     * done before math_state_restore, so the TS bit is up
     * to date.
     */
    arch_leave_lazy_cpu_mode();

    /* If the task has used fpu the last 5 timeslices, just do a full
     * restore of the math state immediately to avoid the trap; the
     * chances of needing FPU soon are obviously high now
     */
    if (next_p->fpu_counter > 5)
        math_state_restore();

    /*
     * Restore %gs if needed (which is common)
     */
    if (prev->gs | next->gs)
        loadsegment(gs, next->gs);

    x86_write_percpu(current_task, next_p);

    return prev_p;
}