/* * Now running in a thread. Kick off other services, * invoke user bootstrap, enter pageout loop. */ static void kernel_bootstrap_thread(void) { processor_t processor = current_processor(); #define kernel_bootstrap_thread_kprintf(x...) /* kprintf("kernel_bootstrap_thread: " x) */ kernel_bootstrap_thread_kprintf("calling idle_thread_create\n"); /* * Create the idle processor thread. */ idle_thread_create(processor); /* * N.B. Do not stick anything else * before this point. * * Start up the scheduler services. */ kernel_bootstrap_thread_kprintf("calling sched_startup\n"); sched_startup(); /* * Thread lifecycle maintenance (teardown, stack allocation) */ kernel_bootstrap_thread_kprintf("calling thread_daemon_init\n"); thread_daemon_init(); /* * Thread callout service. */ kernel_bootstrap_thread_kprintf("calling thread_call_initialize\n"); thread_call_initialize(); /* * Remain on current processor as * additional processors come online. */ kernel_bootstrap_thread_kprintf("calling thread_bind\n"); thread_bind(processor); /* * Kick off memory mapping adjustments. */ kernel_bootstrap_thread_kprintf("calling mapping_adjust\n"); mapping_adjust(); /* * Create the clock service. */ kernel_bootstrap_thread_kprintf("calling clock_service_create\n"); clock_service_create(); /* * Create the device service. */ device_service_create(); kth_started = 1; #if (defined(__i386__) || defined(__x86_64__)) && NCOPY_WINDOWS > 0 /* * Create and initialize the physical copy window for processor 0 * This is required before starting kicking off IOKit. */ cpu_physwindow_init(0); #endif vm_kernel_reserved_entry_init(); #if MACH_KDP kernel_bootstrap_kprintf("calling kdp_init\n"); kdp_init(); #endif #if CONFIG_COUNTERS pmc_bootstrap(); #endif #if (defined(__i386__) || defined(__x86_64__)) if (turn_on_log_leaks && !new_nkdbufs) new_nkdbufs = 200000; start_kern_tracing(new_nkdbufs); if (turn_on_log_leaks) log_leaks = 1; #endif #ifdef IOKIT PE_init_iokit(); #endif (void) spllo(); /* Allow interruptions */ #if (defined(__i386__) || defined(__x86_64__)) && NCOPY_WINDOWS > 0 /* * Create and initialize the copy window for processor 0 * This also allocates window space for all other processors. * However, this is dependent on the number of processors - so this call * must be after IOKit has been started because IOKit performs processor * discovery. */ cpu_userwindow_init(0); #endif #if (!defined(__i386__) && !defined(__x86_64__)) if (turn_on_log_leaks && !new_nkdbufs) new_nkdbufs = 200000; start_kern_tracing(new_nkdbufs); if (turn_on_log_leaks) log_leaks = 1; #endif /* * Initialize the shared region module. */ vm_shared_region_init(); vm_commpage_init(); vm_commpage_text_init(); #if CONFIG_MACF mac_policy_initmach(); #endif /* * Initialize the global used for permuting kernel * addresses that may be exported to userland as tokens * using VM_KERNEL_ADDRPERM(). Force the random number * to be odd to avoid mapping a non-zero * word-aligned address to zero via addition. */ vm_kernel_addrperm = (vm_offset_t)early_random() | 1; /* * Start the user bootstrap. */ #ifdef MACH_BSD bsd_init(); #endif /* * Get rid of segments used to bootstrap kext loading. This removes * the KLD, PRELINK symtab, LINKEDIT, and symtab segments/load commands. */ #if 0 OSKextRemoveKextBootstrap(); #endif serial_keyboard_init(); /* Start serial keyboard if wanted */ vm_page_init_local_q(); thread_bind(PROCESSOR_NULL); /* * Become the pageout daemon. */ vm_pageout(); /*NOTREACHED*/ }
void cpu_desc_init( cpu_data_t *cdp, boolean_t is_boot_cpu) { cpu_desc_table_t *cdt = cdp->cpu_desc_tablep; cpu_desc_index_t *cdi = &cdp->cpu_desc_index; if (is_boot_cpu) { /* * Master CPU uses the tables built at boot time. * Just set the index pointers to the high shared-mapping space. * Note that the sysenter stack uses empty space above the ktss * in the HIGH_FIXED_KTSS page. In this case we don't map the * the real master_sstk in low memory. */ cdi->cdi_ktss = (struct i386_tss *) pmap_index_to_virt(HIGH_FIXED_KTSS) ; cdi->cdi_sstk = (vm_offset_t) (cdi->cdi_ktss + 1) + (vm_offset_t) &master_sstk.top - (vm_offset_t) &master_sstk; #if MACH_KDB cdi->cdi_dbtss = (struct i386_tss *) pmap_index_to_virt(HIGH_FIXED_DBTSS); #endif /* MACH_KDB */ cdi->cdi_gdt = (struct fake_descriptor *) pmap_index_to_virt(HIGH_FIXED_GDT); cdi->cdi_idt = (struct fake_descriptor *) pmap_index_to_virt(HIGH_FIXED_IDT); cdi->cdi_ldt = (struct fake_descriptor *) pmap_index_to_virt(HIGH_FIXED_LDT_BEGIN); } else { vm_offset_t cpu_hi_desc; cpu_hi_desc = pmap_cpu_high_shared_remap(cdp->cpu_number, HIGH_CPU_DESC, (vm_offset_t) cdt, 1); /* * Per-cpu GDT, IDT, LDT, KTSS descriptors are allocated in one * block (cpu_desc_table) and double-mapped into high shared space * in one page window. * Also, a transient stack for the fast sysenter path. The top of * which is set at context switch time to point to the PCB using * the high address. */ cdi->cdi_gdt = (struct fake_descriptor *) (cpu_hi_desc + offsetof(cpu_desc_table_t, gdt[0])); cdi->cdi_idt = (struct fake_descriptor *) (cpu_hi_desc + offsetof(cpu_desc_table_t, idt[0])); cdi->cdi_ktss = (struct i386_tss *) (cpu_hi_desc + offsetof(cpu_desc_table_t, ktss)); cdi->cdi_sstk = cpu_hi_desc + offsetof(cpu_desc_table_t, sstk.top); /* * LDT descriptors are mapped into a seperate area. */ cdi->cdi_ldt = (struct fake_descriptor *) pmap_cpu_high_shared_remap( cdp->cpu_number, HIGH_CPU_LDT_BEGIN, (vm_offset_t) cdp->cpu_ldtp, HIGH_CPU_LDT_END - HIGH_CPU_LDT_BEGIN + 1); /* * Copy the tables */ bcopy((char *)master_idt, (char *)cdt->idt, sizeof(master_idt)); bcopy((char *)master_gdt, (char *)cdt->gdt, sizeof(master_gdt)); bcopy((char *)master_ldt, (char *)cdp->cpu_ldtp, sizeof(master_ldt)); bzero((char *)&cdt->ktss, sizeof(struct i386_tss)); #if MACH_KDB cdi->cdi_dbtss = (struct i386_tss *) (cpu_hi_desc + offsetof(cpu_desc_table_t, dbtss)); bcopy((char *)&master_dbtss, (char *)&cdt->dbtss, sizeof(struct i386_tss)); #endif /* MACH_KDB */ /* * Fix up the entries in the GDT to point to * this LDT and this TSS. */ cdt->gdt[sel_idx(KERNEL_LDT)] = ldt_desc_pattern; cdt->gdt[sel_idx(KERNEL_LDT)].offset = (vm_offset_t) cdi->cdi_ldt; fix_desc(&cdt->gdt[sel_idx(KERNEL_LDT)], 1); cdt->gdt[sel_idx(USER_LDT)] = ldt_desc_pattern; cdt->gdt[sel_idx(USER_LDT)].offset = (vm_offset_t) cdi->cdi_ldt; fix_desc(&cdt->gdt[sel_idx(USER_LDT)], 1); cdt->gdt[sel_idx(KERNEL_TSS)] = tss_desc_pattern; cdt->gdt[sel_idx(KERNEL_TSS)].offset = (vm_offset_t) cdi->cdi_ktss; fix_desc(&cdt->gdt[sel_idx(KERNEL_TSS)], 1); cdt->gdt[sel_idx(CPU_DATA_GS)] = cpudata_desc_pattern; cdt->gdt[sel_idx(CPU_DATA_GS)].offset = (vm_offset_t) cdp; fix_desc(&cdt->gdt[sel_idx(CPU_DATA_GS)], 1); #if MACH_KDB cdt->gdt[sel_idx(DEBUG_TSS)] = tss_desc_pattern; cdt->gdt[sel_idx(DEBUG_TSS)].offset = (vm_offset_t) cdi->cdi_dbtss; fix_desc(&cdt->gdt[sel_idx(DEBUG_TSS)], 1); cdt->dbtss.esp0 = (int)(db_task_stack_store + (INTSTACK_SIZE * (cdp->cpu_number)) - sizeof (natural_t)); cdt->dbtss.esp = cdt->dbtss.esp0; cdt->dbtss.eip = (int)&db_task_start; #endif /* MACH_KDB */ cdt->ktss.ss0 = KERNEL_DS; cdt->ktss.io_bit_map_offset = 0x0FFF; /* no IO bitmap */ cpu_userwindow_init(cdp->cpu_number); cpu_physwindow_init(cdp->cpu_number); } }
void cpu_desc_init64( cpu_data_t *cdp, boolean_t is_boot_cpu) { cpu_desc_table64_t *cdt = (cpu_desc_table64_t *) cdp->cpu_desc_tablep; cpu_desc_index_t *cdi = &cdp->cpu_desc_index; if (is_boot_cpu) { /* * Master CPU uses the tables built at boot time. * Just set the index pointers to the low memory space. * Note that in 64-bit mode these are addressed in the * double-mapped window (uber-space). */ cdi->cdi_ktss = (struct i386_tss *) &master_ktss64; cdi->cdi_sstk = (vm_offset_t) &master_sstk.top; cdi->cdi_gdt = master_gdt; cdi->cdi_idt = (struct fake_descriptor *) &master_idt64; cdi->cdi_ldt = (struct fake_descriptor *) &master_ldt; /* Replace the expanded LDT and TSS slots in the GDT: */ *(struct fake_descriptor64 *) &master_gdt[sel_idx(KERNEL_LDT)] = kernel_ldt_desc64; *(struct fake_descriptor64 *) &master_gdt[sel_idx(KERNEL_TSS)] = kernel_tss_desc64; /* * Fix up the expanded descriptors for 64-bit. */ fix_desc64((void *) &master_idt64, IDTSZ); fix_desc64((void *) &master_gdt[sel_idx(KERNEL_LDT)], 1); fix_desc64((void *) &master_gdt[sel_idx(KERNEL_TSS)], 1); /* * Set the double-fault stack as IST1 in the 64-bit TSS */ master_ktss64.ist1 = UBER64(df_task_stack_end); } else { /* * Per-cpu GDT, IDT, KTSS descriptors are allocated in kernel * heap (cpu_desc_table) and double-mapped in uber-space * (over 4GB). * LDT descriptors are mapped into a separate area. */ cdi->cdi_gdt = (struct fake_descriptor *)cdt->gdt; cdi->cdi_idt = (struct fake_descriptor *)cdt->idt; cdi->cdi_ktss = (struct i386_tss *)&cdt->ktss; cdi->cdi_sstk = (vm_offset_t)&cdt->sstk.top; cdi->cdi_ldt = cdp->cpu_ldtp; /* * Copy the tables */ bcopy((char *)master_idt64, (char *)cdt->idt, sizeof(master_idt64)); bcopy((char *)master_gdt, (char *)cdt->gdt, sizeof(master_gdt)); bcopy((char *)master_ldt, (char *)cdp->cpu_ldtp, sizeof(master_ldt)); bcopy((char *)&master_ktss64, (char *)&cdt->ktss, sizeof(struct x86_64_tss)); /* * Fix up the entries in the GDT to point to * this LDT and this TSS. */ kernel_ldt_desc64.offset[0] = (vm_offset_t) cdi->cdi_ldt; *(struct fake_descriptor64 *) &cdt->gdt[sel_idx(KERNEL_LDT)] = kernel_ldt_desc64; fix_desc64(&cdt->gdt[sel_idx(KERNEL_LDT)], 1); kernel_ldt_desc64.offset[0] = (vm_offset_t) cdi->cdi_ldt; *(struct fake_descriptor64 *) &cdt->gdt[sel_idx(USER_LDT)] = kernel_ldt_desc64; fix_desc64(&cdt->gdt[sel_idx(USER_LDT)], 1); kernel_tss_desc64.offset[0] = (vm_offset_t) cdi->cdi_ktss; *(struct fake_descriptor64 *) &cdt->gdt[sel_idx(KERNEL_TSS)] = kernel_tss_desc64; fix_desc64(&cdt->gdt[sel_idx(KERNEL_TSS)], 1); cdt->gdt[sel_idx(CPU_DATA_GS)] = cpudata_desc_pattern; cdt->gdt[sel_idx(CPU_DATA_GS)].offset = (vm_offset_t) cdp; fix_desc(&cdt->gdt[sel_idx(CPU_DATA_GS)], 1); /* Set double-fault stack as IST1 */ cdt->ktss.ist1 = UBER64((unsigned long)cdt->dfstk + sizeof(cdt->dfstk)); /* * Allocate copyio windows. */ cpu_userwindow_init(cdp->cpu_number); cpu_physwindow_init(cdp->cpu_number); } /* Require that the top of the sysenter stack is 16-byte aligned */ if ((cdi->cdi_sstk % 16) != 0) panic("cpu_desc_init64() sysenter stack not 16-byte aligned"); }
/* * Now running in a thread. Kick off other services, * invoke user bootstrap, enter pageout loop. */ static void kernel_bootstrap_thread(void) { processor_t processor = current_processor(); #define kernel_bootstrap_thread_kprintf(x...) /* kprintf("kernel_bootstrap_thread: " x) */ kernel_bootstrap_thread_log("idle_thread_create"); /* * Create the idle processor thread. */ idle_thread_create(processor); /* * N.B. Do not stick anything else * before this point. * * Start up the scheduler services. */ kernel_bootstrap_thread_log("sched_startup"); sched_startup(); /* * Thread lifecycle maintenance (teardown, stack allocation) */ kernel_bootstrap_thread_log("thread_daemon_init"); thread_daemon_init(); /* Create kernel map entry reserve */ vm_kernel_reserved_entry_init(); /* * Thread callout service. */ kernel_bootstrap_thread_log("thread_call_initialize"); thread_call_initialize(); /* * Remain on current processor as * additional processors come online. */ kernel_bootstrap_thread_log("thread_bind"); thread_bind(processor); /* * Initialize ipc thread call support. */ kernel_bootstrap_thread_log("ipc_thread_call_init"); ipc_thread_call_init(); /* * Kick off memory mapping adjustments. */ kernel_bootstrap_thread_log("mapping_adjust"); mapping_adjust(); /* * Create the clock service. */ kernel_bootstrap_thread_log("clock_service_create"); clock_service_create(); /* * Create the device service. */ device_service_create(); kth_started = 1; #if (defined(__i386__) || defined(__x86_64__)) && NCOPY_WINDOWS > 0 /* * Create and initialize the physical copy window for processor 0 * This is required before starting kicking off IOKit. */ cpu_physwindow_init(0); #endif #if MACH_KDP kernel_bootstrap_log("kdp_init"); kdp_init(); #endif #if ALTERNATE_DEBUGGER alternate_debugger_init(); #endif #if KPC kpc_init(); #endif #if CONFIG_ECC_LOGGING ecc_log_init(); #endif #if KPERF kperf_bootstrap(); #endif #if HYPERVISOR hv_support_init(); #endif #if CONFIG_TELEMETRY kernel_bootstrap_log("bootprofile_init"); bootprofile_init(); #endif #if (defined(__i386__) || defined(__x86_64__)) && CONFIG_VMX vmx_init(); #endif #if (defined(__i386__) || defined(__x86_64__)) if (kdebug_serial) { new_nkdbufs = 1; if (trace_typefilter == 0) trace_typefilter = 1; } if (turn_on_log_leaks && !new_nkdbufs) new_nkdbufs = 200000; if (trace_typefilter) start_kern_tracing_with_typefilter(new_nkdbufs, FALSE, trace_typefilter); else start_kern_tracing(new_nkdbufs, FALSE); if (turn_on_log_leaks) log_leaks = 1; #endif kernel_bootstrap_log("prng_init"); prng_cpu_init(master_cpu); #ifdef IOKIT PE_init_iokit(); #endif assert(ml_get_interrupts_enabled() == FALSE); (void) spllo(); /* Allow interruptions */ #if (defined(__i386__) || defined(__x86_64__)) && NCOPY_WINDOWS > 0 /* * Create and initialize the copy window for processor 0 * This also allocates window space for all other processors. * However, this is dependent on the number of processors - so this call * must be after IOKit has been started because IOKit performs processor * discovery. */ cpu_userwindow_init(0); #endif #if (!defined(__i386__) && !defined(__x86_64__)) if (turn_on_log_leaks && !new_nkdbufs) new_nkdbufs = 200000; if (trace_typefilter) start_kern_tracing_with_typefilter(new_nkdbufs, FALSE, trace_typefilter); else start_kern_tracing(new_nkdbufs, FALSE); if (turn_on_log_leaks) log_leaks = 1; #endif /* * Initialize the shared region module. */ vm_shared_region_init(); vm_commpage_init(); vm_commpage_text_init(); #if CONFIG_MACF kernel_bootstrap_log("mac_policy_initmach"); mac_policy_initmach(); #endif #if CONFIG_SCHED_SFI kernel_bootstrap_log("sfi_init"); sfi_init(); #endif /* * Initialize the globals used for permuting kernel * addresses that may be exported to userland as tokens * using VM_KERNEL_ADDRPERM()/VM_KERNEL_ADDRPERM_EXTERNAL(). * Force the random number to be odd to avoid mapping a non-zero * word-aligned address to zero via addition. * Note: at this stage we can use the cryptographically secure PRNG * rather than early_random(). */ read_random(&vm_kernel_addrperm, sizeof(vm_kernel_addrperm)); vm_kernel_addrperm |= 1; read_random(&buf_kernel_addrperm, sizeof(buf_kernel_addrperm)); buf_kernel_addrperm |= 1; read_random(&vm_kernel_addrperm_ext, sizeof(vm_kernel_addrperm_ext)); vm_kernel_addrperm_ext |= 1; vm_set_restrictions(); /* * Start the user bootstrap. */ #ifdef MACH_BSD bsd_init(); #endif /* * Get rid of segments used to bootstrap kext loading. This removes * the KLD, PRELINK symtab, LINKEDIT, and symtab segments/load commands. */ OSKextRemoveKextBootstrap(); serial_keyboard_init(); /* Start serial keyboard if wanted */ vm_page_init_local_q(); thread_bind(PROCESSOR_NULL); /* * Become the pageout daemon. */ vm_pageout(); /*NOTREACHED*/ }