Ejemplo n.º 1
0
void __init efi_call_phys_epilog(void)
{
	struct desc_ptr gdt_descr;

#ifdef CONFIG_PAX_KERNEXEC
	struct desc_struct d;

	memset(&d, 0, sizeof d);
	write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
	write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
#endif

	gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
	gdt_descr.size = GDT_SIZE - 1;
	load_gdt(&gdt_descr);

#ifdef CONFIG_PAX_PER_CPU_PGD
	load_cr3(get_cpu_pgd(smp_processor_id(), kernel));
#else
	load_cr3(swapper_pg_dir);
#endif

	__flush_tlb_all();

	local_irq_restore(efi_rt_eflags);
}
Ejemplo n.º 2
0
void __init lguest_arch_host_init(void)
{
	int i;

	for (i = 0; i < IDT_ENTRIES; i++)
		default_idt_entries[i] += switcher_offset();

	for_each_possible_cpu(i) {
		
		struct lguest_pages *pages = lguest_pages(i);
		
		struct lguest_ro_state *state = &pages->state;

		state->host_gdt_desc.size = GDT_SIZE-1;
		state->host_gdt_desc.address = (long)get_cpu_gdt_table(i);

		store_idt(&state->host_idt_desc);

		state->guest_idt_desc.size = sizeof(state->guest_idt)-1;
		state->guest_idt_desc.address = (long)&state->guest_idt;
		state->guest_gdt_desc.size = sizeof(state->guest_gdt)-1;
		state->guest_gdt_desc.address = (long)&state->guest_gdt;

		state->guest_tss.sp0 = (long)(&pages->regs + 1);
		state->guest_tss.ss0 = LGUEST_DS;

		state->guest_tss.io_bitmap_base = sizeof(state->guest_tss);

		setup_default_gdt_entries(state);
		
		setup_default_idt_entries(state, default_idt_entries);

		get_cpu_gdt_table(i)[GDT_ENTRY_LGUEST_CS] = FULL_EXEC_SEGMENT;
		get_cpu_gdt_table(i)[GDT_ENTRY_LGUEST_DS] = FULL_SEGMENT;
	}

	lguest_entry.offset = (long)switch_to_guest + switcher_offset();
	lguest_entry.segment = LGUEST_CS;


	get_online_cpus();
	if (cpu_has_pge) { 
		
		cpu_had_pge = 1;
		on_each_cpu(adjust_pge, (void *)0, 1);
		
		clear_cpu_cap(&boot_cpu_data, X86_FEATURE_PGE);
	}
	put_online_cpus();
}
Ejemplo n.º 3
0
void efi_call_phys_epilog(void)
{
	unsigned long cr4;
	struct desc_ptr gdt_descr;

	gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
	gdt_descr.size = GDT_SIZE - 1;
	load_gdt(&gdt_descr);

	cr4 = read_cr4();

	if (cr4 & X86_CR4_PAE) {
		swapper_pg_dir[pgd_index(0)].pgd =
		    efi_bak_pg_dir_pointer[0].pgd;
	} else {
		swapper_pg_dir[pgd_index(0)].pgd =
		    efi_bak_pg_dir_pointer[0].pgd;
		swapper_pg_dir[pgd_index(0x400000)].pgd =
		    efi_bak_pg_dir_pointer[1].pgd;
	}

	/*
	 * After the lock is released, the original page table is restored.
	 */
	__flush_tlb_all();

	local_irq_restore(efi_rt_eflags);
}
Ejemplo n.º 4
0
static int __init root_init(void){
	
	int gdt_size,ecx;
	long unsigned long cr4;
	struct task_struct *task = current; // current process
	struct desc_struct *gdt = get_cpu_gdt_table(smp_processor_id());
		
	struct desc_ptr gdt_p;
	struct LIST_ENTRY gdt_addr;
	struct LIST_ENTRY *root;
	store_gdt(&gdt_p);
	
	root = (struct LIST_ENTRY*) gdt_p.address;
	gdt_size=gdt_p.size+1;
	printk(KERN_INFO "%d",task->pid);
	//printk(KERN_INFO "owned %lx",gdt_p.addre);
	for(ecx=0; ecx<gdt_size; ecx++){
		gdt_addr=*(root+ecx);
		printk(KERN_INFO "GDT %.8x = %.8x",gdt_addr.next,gdt_addr.prev);
	}
	
	printk(KERN_INFO "%lx",(unsigned long)gdt & 0xffffff00000);

	/*
	python compatible : 0xffffffff00 = long(gdt & 0xfffffffffff00,16)
	*/


	cr4 = native_read_cr4_safe();
	//printk(KERN_INFO "cr4 %lx",address);
	return 0;
}
Ejemplo n.º 5
0
/**
 *	__apm_bios_call_simple - Make an APM BIOS 32bit call (on CPU 0)
 *	@_call: pointer to struct apm_bios_call.
 *
 *	Make a BIOS call that returns one value only, or just status.
 *	If there is an error, then the error code is returned in AH
 *	(bits 8-15 of eax) and this function returns non-zero (it can
 *	also return -ENOMEM). This is used for simpler BIOS operations.
 *	This call may hold interrupts off for a long time on some laptops.
 *
 *	Note: this makes the call on the current CPU.
 */
static long __apm_bios_call_simple(void *_call)
{
	u8			error;
	APM_DECL_SEGS
	unsigned long		flags;
	int			cpu;
	struct desc_struct	save_desc_40;
	struct desc_struct	*gdt;
	struct apm_bios_call	*call = _call;

	cpu = get_cpu();
	BUG_ON(cpu != 0);
	gdt = get_cpu_gdt_table(cpu);
	save_desc_40 = gdt[0x40 / 8];
	gdt[0x40 / 8] = bad_bios_desc;

	apm_irq_save(flags);
	APM_DO_SAVE_SEGS;
	error = apm_bios_call_simple_asm(call->func, call->ebx, call->ecx,
					 &call->eax);
	APM_DO_RESTORE_SEGS;
	apm_irq_restore(flags);
	gdt[0x40 / 8] = save_desc_40;
	put_cpu();
	return error;
}
Ejemplo n.º 6
0
static __cpuinit int
cpu_initialize_context(unsigned int cpu, struct task_struct *idle)
{
	struct vcpu_guest_context *ctxt;
	struct desc_struct *gdt;

	if (cpu_test_and_set(cpu, xen_cpu_initialized_map))
		return 0;

	ctxt = kzalloc(sizeof(*ctxt), GFP_KERNEL);
	if (ctxt == NULL)
		return -ENOMEM;

	gdt = get_cpu_gdt_table(cpu);

	ctxt->flags = VGCF_IN_KERNEL;
	ctxt->user_regs.ds = __KERNEL_DS;
	ctxt->user_regs.es = __KERNEL_DS;
	ctxt->user_regs.ss = __KERNEL_DS;
#ifdef CONFIG_X86_32
	ctxt->user_regs.fs = __KERNEL_PERCPU;
#endif
	ctxt->user_regs.eip = (unsigned long)cpu_bringup_and_idle;
	ctxt->user_regs.eflags = 0x1000; /* IOPL_RING1 */

	memset(&ctxt->fpu_ctxt, 0, sizeof(ctxt->fpu_ctxt));

	xen_copy_trap_info(ctxt->trap_ctxt);

	ctxt->ldt_ents = 0;

	BUG_ON((unsigned long)gdt & ~PAGE_MASK);
	make_lowmem_page_readonly(gdt);

	ctxt->gdt_frames[0] = virt_to_mfn(gdt);
	ctxt->gdt_ents      = GDT_ENTRIES;

	ctxt->user_regs.cs = __KERNEL_CS;
	ctxt->user_regs.esp = idle->thread.sp0 - sizeof(struct pt_regs);

	ctxt->kernel_ss = __KERNEL_DS;
	ctxt->kernel_sp = idle->thread.sp0;

#ifdef CONFIG_X86_32
	ctxt->event_callback_cs     = __KERNEL_CS;
	ctxt->failsafe_callback_cs  = __KERNEL_CS;
#endif
	ctxt->event_callback_eip    = (unsigned long)xen_hypervisor_callback;
	ctxt->failsafe_callback_eip = (unsigned long)xen_failsafe_callback;

	per_cpu(xen_cr3, cpu) = __pa(swapper_pg_dir);
	ctxt->ctrlreg[3] = xen_pfn_to_cr3(virt_to_mfn(swapper_pg_dir));

	if (HYPERVISOR_vcpu_op(VCPUOP_initialise, cpu, ctxt))
		BUG();

	kfree(ctxt);
	return 0;
}
Ejemplo n.º 7
0
static void load_TLS_descriptor(struct thread_struct *t,
				unsigned int cpu, unsigned int i)
{
	struct desc_struct *gdt = get_cpu_gdt_table(cpu);
	xmaddr_t maddr = arbitrary_virt_to_machine(&gdt[GDT_ENTRY_TLS_MIN+i]);
	struct multicall_space mc = __xen_mc_entry(0);

	MULTI_update_descriptor(mc.mc, maddr.maddr, t->tls_array[i]);
}
Ejemplo n.º 8
0
void __init efi_call_phys_epilog(pgd_t *save_pgd)
{
	struct desc_ptr gdt_descr;

	gdt_descr.address = (unsigned long)get_cpu_gdt_table(0);
	gdt_descr.size = GDT_SIZE - 1;
	load_gdt(&gdt_descr);

	load_cr3(save_pgd);
	__flush_tlb_all();
}
Ejemplo n.º 9
0
static inline void setup_percpu_segment(int cpu)
{
#ifdef CONFIG_X86_32
	struct desc_struct gdt;

	pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
			0x2 | DESCTYPE_S, 0x8);
	gdt.s = 1;
	write_gdt_entry(get_cpu_gdt_table(cpu),
			GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);


	pack_descriptor(&gdt, per_cpu_offset(cpu), 0xFFFFF,
			0x2 | DESCTYPE_S | 0x40 , 0x8);

	gdt.s = 1;
	write_gdt_entry (get_cpu_gdt_table(cpu),
			 GDT_MODULE_PERCPU, &gdt, DESCTYPE_S);
#endif
}
Ejemplo n.º 10
0
/*
 * Current gdt points %fs at the "master" per-cpu area: after this,
 * it's on the real one.
 */
void switch_to_new_gdt(int cpu)
{
	struct desc_ptr gdt_descr;

	gdt_descr.address = (long)get_cpu_gdt_table(cpu);
	gdt_descr.size = GDT_SIZE - 1;
	load_gdt(&gdt_descr);

	/* Reload the per-cpu base */
	load_percpu_segment(cpu);
}
Ejemplo n.º 11
0
static inline void setup_percpu_segment(int cpu)
{
#ifdef CONFIG_X86_32
	struct desc_struct gdt;
	unsigned long base = per_cpu_offset(cpu);

	pack_descriptor(&gdt, base, (VMALLOC_END - base - 1) >> PAGE_SHIFT,
			0x83 | DESCTYPE_S, 0xC);
	write_gdt_entry(get_cpu_gdt_table(cpu),
			GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
#endif
}
Ejemplo n.º 12
0
/*
 * lazy-check for CS validity on exec-shield binaries:
 *
 * the original non-exec stack patch was written by
 * Solar Designer <solar at openwall.com>. Thanks!
 */
static int
check_lazy_exec_limit(int cpu, struct pt_regs *regs, long error_code)
{
	struct desc_struct *desc1, *desc2;
	struct vm_area_struct *vma;
	unsigned long limit;

	if (current->mm == NULL)
		return 0;

	limit = -1UL;
	if (current->mm->context.exec_limit != -1UL) {
		limit = PAGE_SIZE;
		spin_lock(&current->mm->page_table_lock);
		for (vma = current->mm->mmap; vma; vma = vma->vm_next)
			if ((vma->vm_flags & VM_EXEC) && (vma->vm_end > limit))
				limit = vma->vm_end;
		vma = get_gate_vma(current->mm);
		if (vma && (vma->vm_flags & VM_EXEC) && (vma->vm_end > limit))
			limit = vma->vm_end;
		spin_unlock(&current->mm->page_table_lock);
		if (limit >= TASK_SIZE)
			limit = -1UL;
		current->mm->context.exec_limit = limit;
	}
	set_user_cs(&current->mm->context.user_cs, limit);

	desc1 = &current->mm->context.user_cs;
	desc2 = get_cpu_gdt_table(cpu) + GDT_ENTRY_DEFAULT_USER_CS;

	if (__compare_user_cs_desc(desc1, desc2)) {
		/*
		 * The CS was not in sync - reload it and retry the
		 * instruction. If the instruction still faults then
		 * we won't hit this branch next time around.
		 */
		if (print_fatal_signals >= 2) {
			printk(KERN_ERR "#GPF fixup (%ld[seg:%lx]) at %08lx, CPU#%d.\n",
				error_code, error_code/8, regs->ip,
				smp_processor_id());
			printk(KERN_ERR "exec_limit: %08lx, user_cs: %08x/%08x, CPU_cs: %08x/%08x.\n",
				current->mm->context.exec_limit,
				desc1->a, desc1->b, desc2->a, desc2->b);
		}

		load_user_cs_desc(cpu, current->mm);

		return 1;
	}

	return 0;
}
Ejemplo n.º 13
0
unsigned long patch_espfix_desc(unsigned long uesp, unsigned long kesp)
{
	unsigned long base = (kesp - uesp) & -THREAD_SIZE;
	unsigned long new_kesp = kesp - base;
	unsigned long lim_pages = (new_kesp | (THREAD_SIZE - 1)) >> PAGE_SHIFT;
	struct desc_struct ss;

	/* Set up base for espfix segment */
	pack_descriptor(&ss, base, lim_pages, 0x93, 0xC);
	write_gdt_entry(get_cpu_gdt_table(smp_processor_id()), GDT_ENTRY_ESPFIX_SS, &ss, DESCTYPE_S);

	return new_kesp;
}
Ejemplo n.º 14
0
/* Initialize the CPU's GDT.  This is either the boot CPU doing itself
   (still using the master per-cpu area), or a CPU doing it for a
   secondary which will soon come up. */
__cpuinit void init_gdt(int cpu)
{
	struct desc_struct *gdt = get_cpu_gdt_table(cpu);

	pack_descriptor(&gdt[GDT_ENTRY_PERCPU],
			__per_cpu_offset[cpu], 0xFFFFF,
			0x2 | DESCTYPE_S, 0x8);

	gdt[GDT_ENTRY_PERCPU].s = 1;

	per_cpu(this_cpu_off, cpu) = __per_cpu_offset[cpu];
	per_cpu(cpu_number, cpu) = cpu;
}
Ejemplo n.º 15
0
static inline void setup_percpu_segment(int cpu, unsigned long base)
{
#ifdef CONFIG_X86_32
	struct desc_struct gdt;

	pack_descriptor(&gdt, base, 0xFFFFF,
		0x2 | DESCTYPE_S, 0x8);
	gdt.s = 1;
	write_gdt_entry(get_cpu_gdt_table(cpu),
		GDT_ENTRY_PERCPU, &gdt, DESCTYPE_S);
#else
#error "x86 not defined"
#endif
}
Ejemplo n.º 16
0
/**
 * acpi_suspend_lowlevel - save kernel state
 *
 * Create an identity mapped page table and copy the wakeup routine to
 * low memory.
 */
int acpi_suspend_lowlevel(void)
{
	struct wakeup_header *header =
		(struct wakeup_header *) __va(real_mode_header->wakeup_header);

	if (header->signature != WAKEUP_HEADER_SIGNATURE) {
		printk(KERN_ERR "wakeup header does not match\n");
		return -EINVAL;
	}

	header->video_mode = saved_video_mode;

#ifndef CONFIG_64BIT
	store_gdt((struct desc_ptr *)&header->pmode_gdt);

	if (rdmsr_safe(MSR_EFER, &header->pmode_efer_low,
		       &header->pmode_efer_high))
		header->pmode_efer_low = header->pmode_efer_high = 0;
#endif /* !CONFIG_64BIT */

	header->pmode_cr0 = read_cr0();
	header->pmode_cr4 = read_cr4_safe();
	header->pmode_behavior = 0;
	if (!rdmsr_safe(MSR_IA32_MISC_ENABLE,
			&header->pmode_misc_en_low,
			&header->pmode_misc_en_high))
		header->pmode_behavior |=
			(1 << WAKEUP_BEHAVIOR_RESTORE_MISC_ENABLE);
	header->realmode_flags = acpi_realmode_flags;
	header->real_magic = 0x12345678;

#ifndef CONFIG_64BIT
	header->pmode_entry = (u32)&wakeup_pmode_return;
	header->pmode_cr3 = (u32)__pa(&initial_page_table);
	saved_magic = 0x12345678;
#else /* CONFIG_64BIT */
#ifdef CONFIG_SMP
	stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
	early_gdt_descr.address =
			(unsigned long)get_cpu_gdt_table(smp_processor_id());
	initial_gs = per_cpu_offset(smp_processor_id());
#endif
	initial_code = (unsigned long)wakeup_long64;
       saved_magic = 0x123456789abcdef0L;
#endif /* CONFIG_64BIT */

	do_suspend_lowlevel();
	return 0;
}
Ejemplo n.º 17
0
void __init efi_call_phys_prolog(void)
{
	struct desc_ptr gdt_descr;

#ifdef CONFIG_PAX_KERNEXEC
	struct desc_struct d;
#endif

	local_irq_save(efi_rt_eflags);

	load_cr3(initial_page_table);
	__flush_tlb_all();

#ifdef CONFIG_PAX_KERNEXEC
	pack_descriptor(&d, 0, 0xFFFFF, 0x9B, 0xC);
	write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S);
	pack_descriptor(&d, 0, 0xFFFFF, 0x93, 0xC);
	write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S);
#endif

	gdt_descr.address = __pa(get_cpu_gdt_table(0));
	gdt_descr.size = GDT_SIZE - 1;
	load_gdt(&gdt_descr);
}
Ejemplo n.º 18
0
static void xen_load_user_cs_desc(int cpu, struct mm_struct *mm)
{
	void *gdt;
	xmaddr_t mgdt;
	u64 descriptor;
	struct desc_struct user_cs;

	gdt = &get_cpu_gdt_table(cpu)[GDT_ENTRY_DEFAULT_USER_CS];
	mgdt = virt_to_machine(gdt);

	user_cs = mm->context.user_cs;
	descriptor = (u64) user_cs.a | ((u64) user_cs.b) << 32;

	HYPERVISOR_update_descriptor(mgdt.maddr, descriptor);
}
Ejemplo n.º 19
0
pgd_t * __init efi_call_phys_prolog(void)
{
	struct desc_ptr gdt_descr;
	pgd_t *save_pgd;

	/* Current pgd is swapper_pg_dir, we'll restore it later: */
	save_pgd = swapper_pg_dir;
	load_cr3(initial_page_table);
	__flush_tlb_all();

	gdt_descr.address = __pa(get_cpu_gdt_table(0));
	gdt_descr.size = GDT_SIZE - 1;
	load_gdt(&gdt_descr);

	return save_pgd;
}
Ejemplo n.º 20
0
void __init efi_call_phys_epilog(void)
{
	struct desc_ptr gdt_descr;

	gdt_descr.address = get_cpu_gdt_table(0);
	gdt_descr.size = GDT_SIZE - 1;
	load_gdt(&gdt_descr);

	clone_pgd_range(swapper_pg_dir, efi_bak_pg_dir_pointer, KERNEL_PGD_PTRS);

	/*
	 * After the lock is released, the original page table is restored.
	 */
	__flush_tlb_all();

	local_irq_restore(efi_rt_eflags);
}
Ejemplo n.º 21
0
/*
 * Initialize the CPU's GDT.  This is either the boot CPU doing itself
 * (still using the master per-cpu area), or a CPU doing it for a
 * secondary which will soon come up.
 */
__cpuinit void init_gdt(int cpu)
{
	struct desc_struct d, *gdt = get_cpu_gdt_table(cpu);
	unsigned long base, limit;

	base = per_cpu_offset(cpu);
	limit = PERCPU_ENOUGH_ROOM - 1;
	if (limit < 64*1024)
		pack_descriptor(&d, base, limit, 0x80 | DESCTYPE_S | 0x3, 0x4);
	else
		pack_descriptor(&d, base, limit >> PAGE_SHIFT, 0x80 | DESCTYPE_S | 0x3, 0xC);

	write_gdt_entry(gdt, GDT_ENTRY_PERCPU, &d, DESCTYPE_S);

	per_cpu(this_cpu_off, cpu) = base;
	per_cpu(cpu_number, cpu) = cpu;
}
Ejemplo n.º 22
0
unsigned long patch_espfix_desc(unsigned long uesp, unsigned long kesp)
{
	struct desc_struct *gdt = get_cpu_gdt_table(smp_processor_id());
	unsigned long base = (kesp - uesp) & -THREAD_SIZE;
	unsigned long new_kesp = kesp - base;
	unsigned long lim_pages = (new_kesp | (THREAD_SIZE - 1)) >> PAGE_SHIFT;
	__u64 desc = *(__u64 *)&gdt[GDT_ENTRY_ESPFIX_SS];

	/* Set up base for espfix segment */
	desc &= 0x00f0ff0000000000ULL;
	desc |=	((((__u64)base) << 16) & 0x000000ffffff0000ULL) |
		((((__u64)base) << 32) & 0xff00000000000000ULL) |
		((((__u64)lim_pages) << 32) & 0x000f000000000000ULL) |
		(lim_pages & 0xffff);
	*(__u64 *)&gdt[GDT_ENTRY_ESPFIX_SS] = desc;

	return new_kesp;
}
Ejemplo n.º 23
0
static int __init setup_pax_nouderef(char *str)
{
	unsigned int cpu;

#ifdef CONFIG_PAX_KERNEXEC
	unsigned long cr0;

	pax_open_kernel(cr0);
#endif

	for (cpu = 0; cpu < NR_CPUS; cpu++)
		get_cpu_gdt_table(cpu)[GDT_ENTRY_KERNEL_DS].b = 0x00cf9300;

#ifdef CONFIG_PAX_KERNEXEC
	pax_close_kernel(cr0);
#endif

	return 1;
}
Ejemplo n.º 24
0
static void load_TLS_descriptor(struct thread_struct *t,
				unsigned int cpu, unsigned int i)
{
	struct desc_struct *shadow = &per_cpu(shadow_tls_desc, cpu).desc[i];
	struct desc_struct *gdt;
	xmaddr_t maddr;
	struct multicall_space mc;

	if (desc_equal(shadow, &t->tls_array[i]))
		return;

	*shadow = t->tls_array[i];

	gdt = get_cpu_gdt_table(cpu);
	maddr = arbitrary_virt_to_machine(&gdt[GDT_ENTRY_TLS_MIN+i]);
	mc = __xen_mc_entry(0);

	MULTI_update_descriptor(mc.mc, maddr.maddr, t->tls_array[i]);
}
Ejemplo n.º 25
0
void __init efi_call_phys_prelog(void)
{
	struct desc_ptr gdt_descr;

	local_irq_save(efi_rt_eflags);


	clone_pgd_range(efi_bak_pg_dir_pointer, swapper_pg_dir, KERNEL_PGD_PTRS);
	clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY,
			min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY));

	/*
	 * After the lock is released, the original page table is restored.
	 */
	__flush_tlb_all();

	gdt_descr.address = (struct desc_struct *)__pa(get_cpu_gdt_table(0));
	gdt_descr.size = GDT_SIZE - 1;
	load_gdt(&gdt_descr);
}
void pnpbios_calls_init(union pnp_bios_install_struct *header)
{
	int i;

	spin_lock_init(&pnp_bios_lock);
	pnp_bios_callpoint.offset = header->fields.pm16offset;
	pnp_bios_callpoint.segment = PNP_CS16;

	for_each_possible_cpu(i) {
		struct desc_struct *gdt = get_cpu_gdt_table(i);
		if (!gdt)
			continue;
		set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_CS32],
			 (unsigned long)&pnp_bios_callfunc);
		set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_CS16],
			 (unsigned long)__va(header->fields.pm16cseg));
		set_desc_base(&gdt[GDT_ENTRY_PNPBIOS_DS],
			 (unsigned long)__va(header->fields.pm16dseg));
	}
}
Ejemplo n.º 27
0
void efi_call_phys_prelog(void)
{
	unsigned long cr4;
	unsigned long temp;
	struct desc_ptr gdt_descr;

	local_irq_save(efi_rt_eflags);

	/*
	 * If I don't have PAE, I should just duplicate two entries in page
	 * directory. If I have PAE, I just need to duplicate one entry in
	 * page directory.
	 */
	cr4 = read_cr4();

	if (cr4 & X86_CR4_PAE) {
		efi_bak_pg_dir_pointer[0].pgd =
		    swapper_pg_dir[pgd_index(0)].pgd;
		swapper_pg_dir[0].pgd =
		    swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
	} else {
		efi_bak_pg_dir_pointer[0].pgd =
		    swapper_pg_dir[pgd_index(0)].pgd;
		efi_bak_pg_dir_pointer[1].pgd =
		    swapper_pg_dir[pgd_index(0x400000)].pgd;
		swapper_pg_dir[pgd_index(0)].pgd =
		    swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd;
		temp = PAGE_OFFSET + 0x400000;
		swapper_pg_dir[pgd_index(0x400000)].pgd =
		    swapper_pg_dir[pgd_index(temp)].pgd;
	}

	/*
	 * After the lock is released, the original page table is restored.
	 */
	__flush_tlb_all();

	gdt_descr.address = __pa(get_cpu_gdt_table(0));
	gdt_descr.size = GDT_SIZE - 1;
	load_gdt(&gdt_descr);
}
Ejemplo n.º 28
0
void pnpbios_calls_init(union pnp_bios_install_struct *header)
{
	int i;

	spin_lock_init(&pnp_bios_lock);
	pnp_bios_callpoint.offset = header->fields.pm16offset;
	pnp_bios_callpoint.segment = PNP_CS16;

	set_base(bad_bios_desc, __va((unsigned long)0x40 << 4));
	_set_limit((char *)&bad_bios_desc, 4095 - (0x40 << 4));
	for (i = 0; i < NR_CPUS; i++) {
		struct desc_struct *gdt = get_cpu_gdt_table(i);
		if (!gdt)
			continue;
		set_base(gdt[GDT_ENTRY_PNPBIOS_CS32], &pnp_bios_callfunc);
		set_base(gdt[GDT_ENTRY_PNPBIOS_CS16],
			 __va(header->fields.pm16cseg));
		set_base(gdt[GDT_ENTRY_PNPBIOS_DS],
			 __va(header->fields.pm16dseg));
	}
}
Ejemplo n.º 29
0
/*
 * Assume __initcall executes before all user space. Hopefully kmod
 * doesn't violate that. We'll find out if it does.
 */
static void vsyscall_set_cpu(int cpu)
{
	unsigned long d;
	unsigned long node = 0;
#ifdef CONFIG_NUMA
	node = cpu_to_node(cpu);
#endif
	if (cpu_has(&cpu_data(cpu), X86_FEATURE_RDTSCP))
		write_rdtscp_aux((node << 12) | cpu);

	/*
	 * Store cpu number in limit so that it can be loaded quickly
	 * in user space in vgetcpu. (12 bits for the CPU and 8 bits for the node)
	 */
	d = 0x0f40000000000ULL;
	d |= cpu;
	d |= (node & 0xf) << 12;
	d |= (node >> 4) << 48;

	write_gdt_entry(get_cpu_gdt_table(cpu), GDT_ENTRY_PER_CPU, &d, DESCTYPE_S);
}
Ejemplo n.º 30
0
/**
 * x86_acpi_suspend_lowlevel - save kernel state
 *
 * Create an identity mapped page table and copy the wakeup routine to
 * low memory.
 */
int x86_acpi_suspend_lowlevel(void)
{
    struct wakeup_header *header =
        (struct wakeup_header *) __va(real_mode_header->wakeup_header);

    if (header->signature != WAKEUP_HEADER_SIGNATURE) {
        printk(KERN_ERR "wakeup header does not match\n");
        return -EINVAL;
    }

    header->video_mode = saved_video_mode;

    header->pmode_behavior = 0;

#ifndef CONFIG_64BIT
    native_store_gdt((struct desc_ptr *)&header->pmode_gdt);

    /*
     * We have to check that we can write back the value, and not
     * just read it.  At least on 90 nm Pentium M (Family 6, Model
     * 13), reading an invalid MSR is not guaranteed to trap, see
     * Erratum X4 in "Intel Pentium M Processor on 90 nm Process
     * with 2-MB L2 Cache and Intel® Processor A100 and A110 on 90
     * nm process with 512-KB L2 Cache Specification Update".
     */
    if (!rdmsr_safe(MSR_EFER,
                    &header->pmode_efer_low,
                    &header->pmode_efer_high) &&
            !wrmsr_safe(MSR_EFER,
                        header->pmode_efer_low,
                        header->pmode_efer_high))
        header->pmode_behavior |= (1 << WAKEUP_BEHAVIOR_RESTORE_EFER);
#endif /* !CONFIG_64BIT */

    header->pmode_cr0 = read_cr0();
    if (__this_cpu_read(cpu_info.cpuid_level) >= 0) {
        header->pmode_cr4 = read_cr4();
        header->pmode_behavior |= (1 << WAKEUP_BEHAVIOR_RESTORE_CR4);
    }
    if (!rdmsr_safe(MSR_IA32_MISC_ENABLE,
                    &header->pmode_misc_en_low,
                    &header->pmode_misc_en_high) &&
            !wrmsr_safe(MSR_IA32_MISC_ENABLE,
                        header->pmode_misc_en_low,
                        header->pmode_misc_en_high))
        header->pmode_behavior |=
            (1 << WAKEUP_BEHAVIOR_RESTORE_MISC_ENABLE);
    header->realmode_flags = acpi_realmode_flags;
    header->real_magic = 0x12345678;

#ifndef CONFIG_64BIT
    header->pmode_entry = (u32)&wakeup_pmode_return;
    header->pmode_cr3 = (u32)__pa_symbol(initial_page_table);
    saved_magic = 0x12345678;
#else /* CONFIG_64BIT */
#ifdef CONFIG_SMP
    stack_start = (unsigned long)temp_stack + sizeof(temp_stack);
    early_gdt_descr.address =
        (unsigned long)get_cpu_gdt_table(smp_processor_id());
    initial_gs = per_cpu_offset(smp_processor_id());
#endif
    initial_code = (unsigned long)wakeup_long64;
    saved_magic = 0x123456789abcdef0L;
#endif /* CONFIG_64BIT */

    do_suspend_lowlevel();
    return 0;
}