void __init efi_call_phys_epilog(void) { struct desc_ptr gdt_descr; #ifdef CONFIG_PAX_KERNEXEC struct desc_struct d; memset(&d, 0, sizeof d); write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S); write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S); #endif gdt_descr.address = (unsigned long)get_cpu_gdt_table(0); gdt_descr.size = GDT_SIZE - 1; load_gdt(&gdt_descr); #ifdef CONFIG_PAX_PER_CPU_PGD load_cr3(get_cpu_pgd(smp_processor_id(), kernel)); #else load_cr3(swapper_pg_dir); #endif __flush_tlb_all(); local_irq_restore(efi_rt_eflags); }
void init_kernel() { terminal_initialize(); init_serial(COM1); terminal_enable_serial_echo(COM1); terminal_printf("fOS version %s\n\n\n", KERNEL_VERSION); kinit(end + 4096, end + 4096 + (100 * 4096)); init_gdt(); load_gdt(); load_idt(); load_isrs(); irq_install(); asm volatile ( "sti" ); timer_install(); sleep(1); keyboard_install(); init_paging(); switch_to_paging(); }
void efi_call_phys_epilog(void) { unsigned long cr4; struct desc_ptr gdt_descr; gdt_descr.address = (unsigned long)get_cpu_gdt_table(0); gdt_descr.size = GDT_SIZE - 1; load_gdt(&gdt_descr); cr4 = read_cr4(); if (cr4 & X86_CR4_PAE) { swapper_pg_dir[pgd_index(0)].pgd = efi_bak_pg_dir_pointer[0].pgd; } else { swapper_pg_dir[pgd_index(0)].pgd = efi_bak_pg_dir_pointer[0].pgd; swapper_pg_dir[pgd_index(0x400000)].pgd = efi_bak_pg_dir_pointer[1].pgd; } /* * After the lock is released, the original page table is restored. */ __flush_tlb_all(); local_irq_restore(efi_rt_eflags); }
/* * ReBuild the initial GDT separating the kernel from the user memory * area. * * See the memory layout in the linker script. * * Kernel code starts at 0x100000 (1MB), and is 1 MB long * Kernel data starts at 0x200000 (2MB), and is 1 MB long * Kernel stack starts at 0x400000 (4MB), and is 1 MB long * (Stack grows downwards from 0x400000 to 0x300000) * * But there are other memory areas, esp. low ones (e.g. Video * memory), that we need to access. So the kernel data _segment_ has * to start from lowest memory. :-). * * User area starts from 0x401000 (4MB start offset of stack, and 1 * page size length), and is to the end of the memory. Define two * segments that overlap. We need to control specific * allocation. :-). * * For the moment, merging stack and data segments ... * => kernel code seg starts at 0, and ends at 2 MB! * => kernel data+stack seg starts at 2MB and is 2 MB long */ int initialise_gdt_with_disjoint_kernel_and_user_memory (void) { int retval = FALSE; int i = 0; for (i = 0; i < 8192; i++) global_descriptor_table[i].descriptor = 0; i = set_total_gdt_entries (0); retval = make_descriptor(0, 0, NULL_DESCRIPTOR_FLAGS, 0 ); i = inc_total_gdt_entries (); /* RING_0, DESC_CODEDATA, SEG_EXECUTE_READ, */ retval = make_descriptor ((MEMORYORIGIN), ((KERNELCODESTART + KERNELCODELENGTH)), RING_0_CODEDATA_DESC_READ_EXECUTE, 1 ); i = inc_total_gdt_entries (); /* RING_0, DESC_CODEDATA, SEG_READ_WRITE, */ retval = make_descriptor ((MEMORYORIGIN), (TOTALKERNEL), RING_0_CODEDATA_DESC_READ_WRITE, 2 ); i = inc_total_gdt_entries (); /* RING_3, DESC_CODEDATA, SEG_EXECUTE_READ, */ retval = make_descriptor ((USERMEMORYSTART), MAXMEMPAGES, RING_3_CODEDATA_DESC_READ_EXECUTE, 3 ); i = inc_total_gdt_entries (); /* RING_3, DESC_CODEDATA, SEG_READ_WRITE, */ retval = make_descriptor ((USERMEMORYSTART), MAXMEMPAGES, RING_3_CODEDATA_DESC_READ_WRITE, 4 ); i = get_total_gdt_entries (); load_gdt (); /* Could some status flags tell us if we did ok? */ return retval; }
void gdt_init(void) { struct gdt_ptr gdt_ptr; gdt_ptr.base = (unsigned long) GDT; gdt_ptr.limit = sizeof(GDT); load_gdt(&gdt_ptr); }
static void set_gdt(void *newgdt, __u16 limit) { struct desc_ptr curgdt; /* ia32 supports unaligned loads & stores */ curgdt.size = limit; curgdt.address = (unsigned long)newgdt; load_gdt(&curgdt); }
void __init efi_call_phys_epilog(pgd_t *save_pgd) { struct desc_ptr gdt_descr; gdt_descr.address = (unsigned long)get_cpu_gdt_table(0); gdt_descr.size = GDT_SIZE - 1; load_gdt(&gdt_descr); load_cr3(save_pgd); __flush_tlb_all(); }
/* * Current gdt points %fs at the "master" per-cpu area: after this, * it's on the real one. */ void switch_to_new_gdt(int cpu) { struct desc_ptr gdt_descr; gdt_descr.address = (long)get_cpu_gdt_table(cpu); gdt_descr.size = GDT_SIZE - 1; load_gdt(&gdt_descr); /* Reload the per-cpu base */ load_percpu_segment(cpu); }
void initialize_gdt() { struct desc_ptr desc_ptr; gdt_entries[GDT_ENTRY_KERNEL_CS] = FULL_EXEC_SEGMENT; gdt_entries[GDT_ENTRY_KERNEL_DS] = FULL_SEGMENT; desc_ptr.size = sizeof(gdt_entries); desc_ptr.address = (long)(&gdt_entries); load_gdt(&desc_ptr); }
void set_gdtr(void) { set_gdt(&gdt[0], 0, 0, 0, 0, 0, 0, 0, 0, 0); set_gdt(&gdt[1], 0, 0x0FFFFF, 1, 0, 1, 10, 1, 1, 1); set_gdt(&gdt[2], 0, 0x0FFFFF, 1, 0, 1, 2, 1, 1, 1); gdtr.size = NUM_GDT * sizeof( SEGMENT_DESCRIPTOR ); gdtr.base = ( SEGMENT_DESCRIPTOR *)gdt; load_gdt(); flush_pipeline(); }
pgd_t * __init efi_call_phys_prolog(void) { struct desc_ptr gdt_descr; pgd_t *save_pgd; /* Current pgd is swapper_pg_dir, we'll restore it later: */ save_pgd = swapper_pg_dir; load_cr3(initial_page_table); __flush_tlb_all(); gdt_descr.address = __pa(get_cpu_gdt_table(0)); gdt_descr.size = GDT_SIZE - 1; load_gdt(&gdt_descr); return save_pgd; }
void __init efi_call_phys_epilog(void) { struct desc_ptr gdt_descr; gdt_descr.address = get_cpu_gdt_table(0); gdt_descr.size = GDT_SIZE - 1; load_gdt(&gdt_descr); clone_pgd_range(swapper_pg_dir, efi_bak_pg_dir_pointer, KERNEL_PGD_PTRS); /* * After the lock is released, the original page table is restored. */ __flush_tlb_all(); local_irq_restore(efi_rt_eflags); }
static void __restore_processor_state(struct saved_context *ctxt) { /* * control registers */ /* cr4 was introduced in the Pentium CPU */ if (ctxt->cr4) write_cr4(ctxt->cr4); write_cr3(ctxt->cr3); write_cr2(ctxt->cr2); write_cr0(ctxt->cr0); /* * now restore the descriptor tables to their proper values * ltr is done i fix_processor_context(). */ load_gdt(&ctxt->gdt); load_idt(&ctxt->idt); /* * segment registers */ loadsegment(es, ctxt->es); loadsegment(fs, ctxt->fs); loadsegment(gs, ctxt->gs); loadsegment(ss, ctxt->ss); /* * sysenter MSRs */ if (boot_cpu_has(X86_FEATURE_SEP)) enable_sep_cpu(); /* * restore XCR0 for xsave capable cpu's. */ if (cpu_has_xsave) xsetbv(XCR_XFEATURE_ENABLED_MASK, pcntxt_mask); fix_processor_context(); do_fpu_end(); mtrr_ap_init(); mcheck_init(&boot_cpu_data); }
void __init efi_call_phys_prelog(void) { struct desc_ptr gdt_descr; local_irq_save(efi_rt_eflags); clone_pgd_range(efi_bak_pg_dir_pointer, swapper_pg_dir, KERNEL_PGD_PTRS); clone_pgd_range(swapper_pg_dir, swapper_pg_dir + KERNEL_PGD_BOUNDARY, min_t(unsigned long, KERNEL_PGD_PTRS, KERNEL_PGD_BOUNDARY)); /* * After the lock is released, the original page table is restored. */ __flush_tlb_all(); gdt_descr.address = (struct desc_struct *)__pa(get_cpu_gdt_table(0)); gdt_descr.size = GDT_SIZE - 1; load_gdt(&gdt_descr); }
void efi_call_phys_prelog(void) { unsigned long cr4; unsigned long temp; struct desc_ptr gdt_descr; local_irq_save(efi_rt_eflags); /* * If I don't have PAE, I should just duplicate two entries in page * directory. If I have PAE, I just need to duplicate one entry in * page directory. */ cr4 = read_cr4(); if (cr4 & X86_CR4_PAE) { efi_bak_pg_dir_pointer[0].pgd = swapper_pg_dir[pgd_index(0)].pgd; swapper_pg_dir[0].pgd = swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd; } else { efi_bak_pg_dir_pointer[0].pgd = swapper_pg_dir[pgd_index(0)].pgd; efi_bak_pg_dir_pointer[1].pgd = swapper_pg_dir[pgd_index(0x400000)].pgd; swapper_pg_dir[pgd_index(0)].pgd = swapper_pg_dir[pgd_index(PAGE_OFFSET)].pgd; temp = PAGE_OFFSET + 0x400000; swapper_pg_dir[pgd_index(0x400000)].pgd = swapper_pg_dir[pgd_index(temp)].pgd; } /* * After the lock is released, the original page table is restored. */ __flush_tlb_all(); gdt_descr.address = __pa(get_cpu_gdt_table(0)); gdt_descr.size = GDT_SIZE - 1; load_gdt(&gdt_descr); }
// Name: initialize_gdt // Description: This function will create the global descriptor table and then loads the GDTR // Parameter: - // Return: - void initialize_gdt() { struct descriptor null_entry, cs_entry, ds_entry; gdptr.base_address = (unsigned int)&global_desc_table; gdptr.size =(unsigned short int) sizeof(struct descriptor_table_entry)*3 - 1; null_entry = getDescriptor(0, 0, 0, 0, 0, 0, 0, 0, 0, 0); global_desc_table[0] = descriptortotableentry(null_entry); cs_entry = getDescriptor(0, 0xFFFFFFFF, 10, 1, 0, 0, 1, 1, 0, 1 ); global_desc_table[1] = descriptortotableentry(cs_entry); ds_entry = getDescriptor(0, 0xFFFFFFFF, 2, 1, 0, 0, 1, 1, 0, 1 ); global_desc_table[2] = descriptortotableentry(ds_entry); //putc('x'); load_gdt(); return; }
void __restore_processor_state(struct saved_context *ctxt) { /* * control registers */ write_cr4(ctxt->cr4); write_cr3(ctxt->cr3); write_cr2(ctxt->cr2); write_cr2(ctxt->cr0); /* * now restore the descriptor tables to their proper values * ltr is done i fix_processor_context(). */ load_gdt(&ctxt->gdt_limit); load_idt(&ctxt->idt_limit); /* * segment registers */ loadsegment(es, ctxt->es); loadsegment(fs, ctxt->fs); loadsegment(gs, ctxt->gs); loadsegment(ss, ctxt->ss); #ifdef CONFIG_SYSENTER /* * sysenter MSRs */ if (boot_cpu_has(X86_FEATURE_SEP)) enable_sep_cpu(); #endif fix_processor_context(); do_fpu_end(); mtrr_ap_init(); }
void __init efi_call_phys_prolog(void) { struct desc_ptr gdt_descr; #ifdef CONFIG_PAX_KERNEXEC struct desc_struct d; #endif local_irq_save(efi_rt_eflags); load_cr3(initial_page_table); __flush_tlb_all(); #ifdef CONFIG_PAX_KERNEXEC pack_descriptor(&d, 0, 0xFFFFF, 0x9B, 0xC); write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_CS, &d, DESCTYPE_S); pack_descriptor(&d, 0, 0xFFFFF, 0x93, 0xC); write_gdt_entry(get_cpu_gdt_table(0), GDT_ENTRY_KERNEXEC_EFI_DS, &d, DESCTYPE_S); #endif gdt_descr.address = __pa(get_cpu_gdt_table(0)); gdt_descr.size = GDT_SIZE - 1; load_gdt(&gdt_descr); }
static void __init init_gdt(void) { load_gdt(0); }