示例#1
0
static __cpuinit int
cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
{
	struct vcpu_guest_context *ctxt;
	struct desc_struct *gdt;

	if (cpu_test_and_set(cpu, xen_cpu_initialized_map))
		return 0;

	ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
	if (ctxt == NULL)
		return -ENOMEM;

	gdt = get_cpu_gdt_table(cpu);

	ctxt->flags = VGCF_IN_KERNEL;
	ctxt->user_regs.ds = __KERNEL_DS;
	ctxt->user_regs.es = __KERNEL_DS;
	ctxt->user_regs.ss = __KERNEL_DS;
#ifdef CONFIG_X86_32
	ctxt->user_regs.fs = __KERNEL_PERCPU;
#endif
	ctxt->user_regs.eip = (unsigned long)cpu_bringup_and_idle;
	ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */

	memset(&ctxt->fpu_ctxt, 0, sizeof(ctxt->fpu_ctxt));

	xen_copy_trap_info(ctxt->trap_ctxt);

	ctxt->ldt_ents = 0;

	BUG_ON((unsigned long)gdt & ~PAGE_MASK);
	make_lowmem_page_readonly(gdt);

	ctxt->gdt_frames[0] = virt_to_mfn(gdt);
	ctxt->gdt_ents      = GDT_ENTRIES;

	ctxt->user_regs.cs = __KERNEL_CS;
	ctxt->user_regs.esp = idle->thread.sp0 - sizeof(struct pt_regs);

	ctxt->kernel_ss = __KERNEL_DS;
	ctxt->kernel_sp = idle->thread.sp0;

#ifdef CONFIG_X86_32
	ctxt->event_callback_cs     = __KERNEL_CS;
	ctxt->failsafe_callback_cs  = __KERNEL_CS;
#endif
	ctxt->event_callback_eip    = (unsigned long)xen_hypervisor_callback;
	ctxt->failsafe_callback_eip = (unsigned long)xen_failsafe_callback;

	per_cpu(xen_cr3, cpu) = __pa(swapper_pg_dir);
	ctxt->ctrlreg[3] = xen_pfn_to_cr3(virt_to_mfn(swapper_pg_dir));

	if (HYPERVISOR_vcpu_op(VCPUOP_initialise, cpu, ctxt))
		BUG();

	kfree(ctxt);
	return 0;
}
示例#2
0
文件: p2m.c 项目: DenisLug/mptcp
void xen_setup_mfn_list_list(void)
{
	if (xen_feature(XENFEAT_auto_translated_physmap))
		return;

	BUG_ON(HYPERVISOR_shared_info == &xen_dummy_shared_info);

	if (xen_start_info->flags & SIF_VIRT_P2M_4TOOLS)
		HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list = ~0UL;
	else
		HYPERVISOR_shared_info->arch.pfn_to_mfn_frame_list_list =
			virt_to_mfn(p2m_top_mfn);
	HYPERVISOR_shared_info->arch.max_pfn = xen_p2m_last_pfn;
	HYPERVISOR_shared_info->arch.p2m_generation = 0;
	HYPERVISOR_shared_info->arch.p2m_vaddr = (unsigned long)xen_p2m_addr;
	HYPERVISOR_shared_info->arch.p2m_cr3 =
		xen_pfn_to_cr3(virt_to_mfn(swapper_pg_dir));
}
示例#3
0
static void
cpu_initialize_context(unsigned int cpu)
{
	/* vcpu_guest_context_t is too large to allocate on the stack.
	 * Hence we allocate statically and protect it with a lock */
	vm_page_t m[NPGPTD + 2];
	static vcpu_guest_context_t ctxt;
	vm_offset_t boot_stack;
	vm_offset_t newPTD;
	vm_paddr_t ma[NPGPTD];
	int i;

	/*
	 * Page 0,[0-3]	PTD
	 * Page 1, [4]	boot stack
	 * Page [5]	PDPT
	 *
	 */
	for (i = 0; i < NPGPTD + 2; i++) {
		m[i] = vm_page_alloc(NULL, 0,
		    VM_ALLOC_NORMAL | VM_ALLOC_NOOBJ | VM_ALLOC_WIRED |
		    VM_ALLOC_ZERO);

		pmap_zero_page(m[i]);

	}
	boot_stack = kmem_alloc_nofault(kernel_map, PAGE_SIZE);
	newPTD = kmem_alloc_nofault(kernel_map, NPGPTD * PAGE_SIZE);
	ma[0] = VM_PAGE_TO_MACH(m[0])|PG_V;

#ifdef PAE	
	pmap_kenter(boot_stack, VM_PAGE_TO_PHYS(m[NPGPTD + 1]));
	for (i = 0; i < NPGPTD; i++) {
		((vm_paddr_t *)boot_stack)[i] =
		ma[i] = VM_PAGE_TO_MACH(m[i])|PG_V;
	}
#endif	

	/*
	 * Copy cpu0 IdlePTD to new IdlePTD - copying only
	 * kernel mappings
	 */
	pmap_qenter(newPTD, m, 4);
	
	memcpy((uint8_t *)newPTD + KPTDI*sizeof(vm_paddr_t),
	    (uint8_t *)PTOV(IdlePTD) + KPTDI*sizeof(vm_paddr_t),
	    nkpt*sizeof(vm_paddr_t));

	pmap_qremove(newPTD, 4);
	kmem_free(kernel_map, newPTD, 4 * PAGE_SIZE);
	/*
	 * map actual idle stack to boot_stack
	 */
	pmap_kenter(boot_stack, VM_PAGE_TO_PHYS(m[NPGPTD]));


	xen_pgdpt_pin(VM_PAGE_TO_MACH(m[NPGPTD + 1]));
	rw_wlock(&pvh_global_lock);
	for (i = 0; i < 4; i++) {
		int pdir = (PTDPTDI + i) / NPDEPG;
		int curoffset = (PTDPTDI + i) % NPDEPG;
		
		xen_queue_pt_update((vm_paddr_t)
		    ((ma[pdir] & ~PG_V) + (curoffset*sizeof(vm_paddr_t))), 
		    ma[i]);
	}
	PT_UPDATES_FLUSH();
	rw_wunlock(&pvh_global_lock);
	
	memset(&ctxt, 0, sizeof(ctxt));
	ctxt.flags = VGCF_IN_KERNEL;
	ctxt.user_regs.ds = GSEL(GDATA_SEL, SEL_KPL);
	ctxt.user_regs.es = GSEL(GDATA_SEL, SEL_KPL);
	ctxt.user_regs.fs = GSEL(GPRIV_SEL, SEL_KPL);
	ctxt.user_regs.gs = GSEL(GDATA_SEL, SEL_KPL);
	ctxt.user_regs.cs = GSEL(GCODE_SEL, SEL_KPL);
	ctxt.user_regs.ss = GSEL(GDATA_SEL, SEL_KPL);
	ctxt.user_regs.eip = (unsigned long)init_secondary;
	ctxt.user_regs.eflags = PSL_KERNEL | 0x1000; /* IOPL_RING1 */

	memset(&ctxt.fpu_ctxt, 0, sizeof(ctxt.fpu_ctxt));

	smp_trap_init(ctxt.trap_ctxt);

	ctxt.ldt_ents = 0;
	ctxt.gdt_frames[0] = (uint32_t)((uint64_t)vtomach(bootAPgdt) >> PAGE_SHIFT);
	ctxt.gdt_ents      = 512;

#ifdef __i386__
	ctxt.user_regs.esp = boot_stack + PAGE_SIZE;

	ctxt.kernel_ss = GSEL(GDATA_SEL, SEL_KPL);
	ctxt.kernel_sp = boot_stack + PAGE_SIZE;

	ctxt.event_callback_cs     = GSEL(GCODE_SEL, SEL_KPL);
	ctxt.event_callback_eip    = (unsigned long)Xhypervisor_callback;
	ctxt.failsafe_callback_cs  = GSEL(GCODE_SEL, SEL_KPL);
	ctxt.failsafe_callback_eip = (unsigned long)failsafe_callback;

	ctxt.ctrlreg[3] = VM_PAGE_TO_MACH(m[NPGPTD + 1]);
#else /* __x86_64__ */
	ctxt.user_regs.esp = idle->thread.rsp0 - sizeof(struct pt_regs);
	ctxt.kernel_ss = GSEL(GDATA_SEL, SEL_KPL);
	ctxt.kernel_sp = idle->thread.rsp0;

	ctxt.event_callback_eip    = (unsigned long)hypervisor_callback;
	ctxt.failsafe_callback_eip = (unsigned long)failsafe_callback;
	ctxt.syscall_callback_eip  = (unsigned long)system_call;

	ctxt.ctrlreg[3] = xen_pfn_to_cr3(virt_to_mfn(init_level4_pgt));

	ctxt.gs_base_kernel = (unsigned long)(cpu_pda(cpu));
#endif

	printf("gdtpfn=%lx pdptpfn=%lx\n",
	    ctxt.gdt_frames[0],
	    ctxt.ctrlreg[3] >> PAGE_SHIFT);

	PANIC_IF(HYPERVISOR_vcpu_op(VCPUOP_initialise, cpu, &ctxt));
	DELAY(3000);
	PANIC_IF(HYPERVISOR_vcpu_op(VCPUOP_up, cpu, NULL));
}
示例#4
0
/*
 * Fill in the remaining CPU context and initialize it.
 */
static int
mp_set_cpu_context(vcpu_guest_context_t *vgc, cpu_t *cp)
{
	uint_t vec, iopl;

	vgc->flags = VGCF_IN_KERNEL;

	/*
	 * fpu_ctx we leave as zero; on first fault we'll store
	 * sse_initial into it anyway.
	 */

#if defined(__amd64)
	vgc->user_regs.cs = KCS_SEL | SEL_KPL;	/* force to ring 3 */
#else
	vgc->user_regs.cs = KCS_SEL;
#endif
	vgc->user_regs.ds = KDS_SEL;
	vgc->user_regs.es = KDS_SEL;
	vgc->user_regs.ss = KDS_SEL;
	vgc->kernel_ss = KDS_SEL;

	/*
	 * Allow I/O privilege level for Dom0 kernel.
	 */
	if (DOMAIN_IS_INITDOMAIN(xen_info))
		iopl = (PS_IOPL & 0x1000); /* ring 1 */
	else
		iopl = 0;

#if defined(__amd64)
	vgc->user_regs.fs = 0;
	vgc->user_regs.gs = 0;
	vgc->user_regs.rflags = F_OFF | iopl;
#elif defined(__i386)
	vgc->user_regs.fs = KFS_SEL;
	vgc->user_regs.gs = KGS_SEL;
	vgc->user_regs.eflags = F_OFF | iopl;
	vgc->event_callback_cs = vgc->user_regs.cs;
	vgc->failsafe_callback_cs = vgc->user_regs.cs;
#endif

	/*
	 * Initialize the trap_info_t from the IDT
	 */
#if !defined(__lint)
	ASSERT(NIDT == sizeof (vgc->trap_ctxt) / sizeof (vgc->trap_ctxt[0]));
#endif
	for (vec = 0; vec < NIDT; vec++) {
		trap_info_t *ti = &vgc->trap_ctxt[vec];

		if (xen_idt_to_trap_info(vec,
		    &cp->cpu_m.mcpu_idt[vec], ti) == 0) {
			ti->cs = KCS_SEL;
			ti->vector = vec;
		}
	}

	/*
	 * No LDT
	 */

	/*
	 * (We assert in various places that the GDT is (a) aligned on a
	 * page boundary and (b) one page long, so this really should fit..)
	 */
#ifdef CRASH_XEN
	vgc->gdt_frames[0] = pa_to_ma(mmu_btop(cp->cpu_m.mcpu_gdtpa));
#else
	vgc->gdt_frames[0] = pfn_to_mfn(mmu_btop(cp->cpu_m.mcpu_gdtpa));
#endif
	vgc->gdt_ents = NGDT;

	vgc->ctrlreg[0] = CR0_ENABLE_FPU_FLAGS(getcr0());

#if defined(__i386)
	if (mmu.pae_hat)
		vgc->ctrlreg[3] =
		    xen_pfn_to_cr3(pfn_to_mfn(kas.a_hat->hat_htable->ht_pfn));
	else
#endif
		vgc->ctrlreg[3] =
		    pa_to_ma(mmu_ptob(kas.a_hat->hat_htable->ht_pfn));

	vgc->ctrlreg[4] = getcr4();

	vgc->event_callback_eip = (uintptr_t)xen_callback;
	vgc->failsafe_callback_eip = (uintptr_t)xen_failsafe_callback;
	vgc->flags |= VGCF_failsafe_disables_events;

#if defined(__amd64)
	/*
	 * XXPV should this be moved to init_cpu_syscall?
	 */
	vgc->syscall_callback_eip = (uintptr_t)sys_syscall;
	vgc->flags |= VGCF_syscall_disables_events;

	ASSERT(vgc->user_regs.gs == 0);
	vgc->gs_base_kernel = (uintptr_t)cp;
#endif

	return (xen_vcpu_initialize(cp->cpu_id, vgc));
}
示例#5
0
文件: smp.c 项目: SnakeDoc/GuestVM
static void cpu_initialize_context(unsigned int cpu)
{
    vcpu_guest_context_t ctxt;
    struct thread *idle_thread;

    init_cpu_pda(cpu);

    idle_thread = per_cpu(cpu, idle_thread);
    memset(&ctxt, 0, sizeof(ctxt));
    ctxt.flags = VGCF_IN_KERNEL;
    ctxt.user_regs.ds = __KERNEL_DS;
    ctxt.user_regs.es = 0;
    ctxt.user_regs.fs = 0;
    ctxt.user_regs.gs = 0;
    ctxt.user_regs.ss = __KERNEL_SS;
    ctxt.user_regs.eip = idle_thread->ip;
    ctxt.user_regs.eflags = X86_EFLAGS_IF | 0x1000;	/* IOPL_RING1 */
    memset(&ctxt.fpu_ctxt, 0, sizeof(ctxt.fpu_ctxt));
    ctxt.ldt_ents = 0;
    ctxt.gdt_ents = 0;
#ifdef __i386__
    ctxt.user_regs.cs = __KERNEL_CS;
    ctxt.user_regs.esp = idle_thread->sp;
    ctxt.kernel_ss = __KERNEL_SS;
    ctxt.kernel_sp = ctxt.user_regs.esp;
    ctxt.event_callback_cs = __KERNEL_CS;
    ctxt.event_callback_eip = (unsigned long)hypervisor_callback;
    ctxt.failsafe_callback_cs = __KERNEL_CS;
    ctxt.failsafe_callback_eip = (unsigned long)failsafe_callback;
    ctxt.ctrlreg[3] = xen_pfn_to_cr3(virt_to_mfn(start_info.pt_base));
#else /* __x86_64__ */
    ctxt.user_regs.cs = __KERNEL_CS;
    ctxt.user_regs.esp = idle_thread->sp;
    ctxt.kernel_ss = __KERNEL_SS;
    ctxt.kernel_sp = ctxt.user_regs.esp;
    ctxt.event_callback_eip = (unsigned long)hypervisor_callback;
    ctxt.failsafe_callback_eip = (unsigned long)failsafe_callback;
    ctxt.syscall_callback_eip = 0;
    ctxt.ctrlreg[3] = xen_pfn_to_cr3(virt_to_mfn(start_info.pt_base));
    ctxt.gs_base_kernel = (unsigned long)&percpu[cpu];
#endif
    int err = HYPERVISOR_vcpu_op(VCPUOP_initialise, cpu, &ctxt);
    if (err) {
	char *str;

	switch (err) {
	    case -EINVAL:
		/*
		 * This interface squashes multiple error sources
		 * to one error code.  In particular, an X_EINVAL
		 * code can mean:
		 *
		 * -	the vcpu id is out of range
		 * -	cs or ss are in ring 0
		 * -	cr3 is wrong
		 * -	an entry in the new gdt is above the
		 *	reserved entry
		 * -	a frame underneath the new gdt is bad
		 */
		str = "something is wrong :(";
		break;
	    case -ENOENT:
		str = "no such cpu";
		break;
	    case -ENOMEM:
		str = "no mem to copy ctxt";
		break;
	    case -EFAULT:
		str = "bad address";
		break;
	    case -EEXIST:
		/*
		 * Hmm.  This error is returned if the vcpu has already
		 * been initialized once before in the lifetime of this
		 995 			 * domain.  This is a logic error in the kernel.
		 996 			 */
		str = "already initialized";
		break;
	    default:
		str = "<unexpected>";
		break;
	}

	xprintk("vcpu%d: failed to init: error %d: %s",
		cpu, -err, str);
    }
}
示例#6
0
static int
cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
{
	struct vcpu_guest_context *ctxt;
	struct desc_struct *gdt;
	unsigned long gdt_mfn;

	/* used to tell cpu_init() that it can proceed with initialization */
	cpumask_set_cpu(cpu, cpu_callout_mask);
	if (cpumask_test_and_set_cpu(cpu, xen_cpu_initialized_map))
		return 0;

	ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
	if (ctxt == NULL)
		return -ENOMEM;

	gdt = get_cpu_gdt_rw(cpu);

#ifdef CONFIG_X86_32
	ctxt->user_regs.fs = __KERNEL_PERCPU;
	ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
#endif
	memset(&ctxt->fpu_ctxt, 0, sizeof(ctxt->fpu_ctxt));

	/*
	 * Bring up the CPU in cpu_bringup_and_idle() with the stack
	 * pointing just below where pt_regs would be if it were a normal
	 * kernel entry.
	 */
	ctxt->user_regs.eip = (unsigned long)cpu_bringup_and_idle;
	ctxt->flags = VGCF_IN_KERNEL;
	ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */
	ctxt->user_regs.ds = __USER_DS;
	ctxt->user_regs.es = __USER_DS;
	ctxt->user_regs.ss = __KERNEL_DS;
	ctxt->user_regs.cs = __KERNEL_CS;
	ctxt->user_regs.esp = (unsigned long)task_pt_regs(idle);

	xen_copy_trap_info(ctxt->trap_ctxt);

	ctxt->ldt_ents = 0;

	BUG_ON((unsigned long)gdt & ~PAGE_MASK);

	gdt_mfn = arbitrary_virt_to_mfn(gdt);
	make_lowmem_page_readonly(gdt);
	make_lowmem_page_readonly(mfn_to_virt(gdt_mfn));

	ctxt->gdt_frames[0] = gdt_mfn;
	ctxt->gdt_ents      = GDT_ENTRIES;

	/*
	 * Set SS:SP that Xen will use when entering guest kernel mode
	 * from guest user mode.  Subsequent calls to load_sp0() can
	 * change this value.
	 */
	ctxt->kernel_ss = __KERNEL_DS;
	ctxt->kernel_sp = task_top_of_stack(idle);

#ifdef CONFIG_X86_32
	ctxt->event_callback_cs     = __KERNEL_CS;
	ctxt->failsafe_callback_cs  = __KERNEL_CS;
#else
	ctxt->gs_base_kernel = per_cpu_offset(cpu);
#endif
	ctxt->event_callback_eip    =
		(unsigned long)xen_hypervisor_callback;
	ctxt->failsafe_callback_eip =
		(unsigned long)xen_failsafe_callback;
	per_cpu(xen_cr3, cpu) = __pa(swapper_pg_dir);

	ctxt->ctrlreg[3] = xen_pfn_to_cr3(virt_to_gfn(swapper_pg_dir));
	if (HYPERVISOR_vcpu_op(VCPUOP_initialise, xen_vcpu_nr(cpu), ctxt))
		BUG();

	kfree(ctxt);
	return 0;
}
示例#7
0
文件: smp.c 项目: mbgg/linux
static int __cpuinit
cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
{
	struct vcpu_guest_context *ctxt;
	struct desc_struct *gdt;
	unsigned long gdt_mfn;

	if (cpumask_test_and_set_cpu(cpu, xen_cpu_initialized_map))
		return 0;

	ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
	if (ctxt == NULL)
		return -ENOMEM;

	gdt = get_cpu_gdt_table(cpu);

	ctxt->flags = VGCF_IN_KERNEL;
	ctxt->user_regs.ss = __KERNEL_DS;
#ifdef CONFIG_X86_32
	ctxt->user_regs.fs = __KERNEL_PERCPU;
	ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
#else
	ctxt->gs_base_kernel = per_cpu_offset(cpu);
#endif
	ctxt->user_regs.eip = (unsigned long)cpu_bringup_and_idle;

	memset(&ctxt->fpu_ctxt, 0, sizeof(ctxt->fpu_ctxt));

	if (xen_feature(XENFEAT_auto_translated_physmap) &&
	    xen_feature(XENFEAT_supervisor_mode_kernel)) {
		/* Note: PVH is not supported on x86_32. */
#ifdef CONFIG_X86_64
		ctxt->user_regs.ds = __KERNEL_DS;
		ctxt->user_regs.es = 0;
		ctxt->user_regs.gs = 0;

		/* GUEST_GDTR_BASE and */
		ctxt->u.pvh.gdtaddr = (unsigned long)gdt;
		/* GUEST_GDTR_LIMIT in the VMCS. */
		ctxt->u.pvh.gdtsz = (unsigned long)(GDT_SIZE - 1);

		ctxt->gs_base_user = (unsigned long)
					per_cpu(irq_stack_union.gs_base, cpu);
#endif
	} else {
		ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */
		ctxt->user_regs.ds = __USER_DS;
		ctxt->user_regs.es = __USER_DS;

		xen_copy_trap_info(ctxt->trap_ctxt);

		ctxt->ldt_ents = 0;

		BUG_ON((unsigned long)gdt & ~PAGE_MASK);

		gdt_mfn = arbitrary_virt_to_mfn(gdt);
		make_lowmem_page_readonly(gdt);
		make_lowmem_page_readonly(mfn_to_virt(gdt_mfn));

		ctxt->u.pv.gdt_frames[0] = gdt_mfn;
		ctxt->u.pv.gdt_ents      = GDT_ENTRIES;

		ctxt->kernel_ss = __KERNEL_DS;
		ctxt->kernel_sp = idle->thread.sp0;

#ifdef CONFIG_X86_32
		ctxt->event_callback_cs     = __KERNEL_CS;
		ctxt->failsafe_callback_cs  = __KERNEL_CS;
#endif
		ctxt->event_callback_eip    =
					(unsigned long)xen_hypervisor_callback;
		ctxt->failsafe_callback_eip =
					(unsigned long)xen_failsafe_callback;
	}
	ctxt->user_regs.cs = __KERNEL_CS;
	ctxt->user_regs.esp = idle->thread.sp0 - sizeof(struct pt_regs);

	per_cpu(xen_cr3, cpu) = __pa(swapper_pg_dir);
	ctxt->ctrlreg[3] = xen_pfn_to_cr3(virt_to_mfn(swapper_pg_dir));

	if (HYPERVISOR_vcpu_op(VCPUOP_initialise, cpu, ctxt))
		BUG();

	kfree(ctxt);
	return 0;
}
示例#8
0
static int
cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
{
	struct vcpu_guest_context *ctxt;
	struct desc_struct *gdt;
	unsigned long gdt_mfn;

	/* used to tell cpu_init() that it can proceed with initialization */
	cpumask_set_cpu(cpu, cpu_callout_mask);
	if (cpumask_test_and_set_cpu(cpu, xen_cpu_initialized_map))
		return 0;

	ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
	if (ctxt == NULL)
		return -ENOMEM;

	gdt = get_cpu_gdt_rw(cpu);

#ifdef CONFIG_X86_32
	ctxt->user_regs.fs = __KERNEL_PERCPU;
	ctxt->user_regs.gs = __KERNEL_STACK_CANARY;
#endif
	memset(&ctxt->fpu_ctxt, 0, sizeof(ctxt->fpu_ctxt));

	ctxt->user_regs.eip = (unsigned long)cpu_bringup_and_idle;
	ctxt->flags = VGCF_IN_KERNEL;
	ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */
	ctxt->user_regs.ds = __USER_DS;
	ctxt->user_regs.es = __USER_DS;
	ctxt->user_regs.ss = __KERNEL_DS;

	xen_copy_trap_info(ctxt->trap_ctxt);

	ctxt->ldt_ents = 0;

	BUG_ON((unsigned long)gdt & ~PAGE_MASK);

	gdt_mfn = arbitrary_virt_to_mfn(gdt);
	make_lowmem_page_readonly(gdt);
	make_lowmem_page_readonly(mfn_to_virt(gdt_mfn));

	ctxt->gdt_frames[0] = gdt_mfn;
	ctxt->gdt_ents      = GDT_ENTRIES;

	ctxt->kernel_ss = __KERNEL_DS;
	ctxt->kernel_sp = idle->thread.sp0;

#ifdef CONFIG_X86_32
	ctxt->event_callback_cs     = __KERNEL_CS;
	ctxt->failsafe_callback_cs  = __KERNEL_CS;
#else
	ctxt->gs_base_kernel = per_cpu_offset(cpu);
#endif
	ctxt->event_callback_eip    =
		(unsigned long)xen_hypervisor_callback;
	ctxt->failsafe_callback_eip =
		(unsigned long)xen_failsafe_callback;
	ctxt->user_regs.cs = __KERNEL_CS;
	per_cpu(xen_cr3, cpu) = __pa(swapper_pg_dir);

	ctxt->user_regs.esp = idle->thread.sp0 - sizeof(struct pt_regs);
	ctxt->ctrlreg[3] = xen_pfn_to_cr3(virt_to_gfn(swapper_pg_dir));
	if (HYPERVISOR_vcpu_op(VCPUOP_initialise, xen_vcpu_nr(cpu), ctxt))
		BUG();

	kfree(ctxt);
	return 0;
}