Example #1
0
/*
 * Now running in a thread.  Kick off other services,
 * invoke user bootstrap, enter pageout loop.
 */
static void
kernel_bootstrap_thread(void)
{
	processor_t		processor = current_processor();

#define kernel_bootstrap_thread_kprintf(x...) /* kprintf("kernel_bootstrap_thread: " x) */
	kernel_bootstrap_thread_log("idle_thread_create");
	/*
	 * Create the idle processor thread.
	 */
	idle_thread_create(processor);

	/*
	 * N.B. Do not stick anything else
	 * before this point.
	 *
	 * Start up the scheduler services.
	 */
	kernel_bootstrap_thread_log("sched_startup");
	sched_startup();

	/*
	 * Thread lifecycle maintenance (teardown, stack allocation)
	 */
	kernel_bootstrap_thread_log("thread_daemon_init");
	thread_daemon_init();

	/* Create kernel map entry reserve */
	vm_kernel_reserved_entry_init();

	/*
	 * Thread callout service.
	 */
	kernel_bootstrap_thread_log("thread_call_initialize");
	thread_call_initialize();

	/*
	 * Remain on current processor as
	 * additional processors come online.
	 */
	kernel_bootstrap_thread_log("thread_bind");
	thread_bind(processor);

	/*
	 * Initialize ipc thread call support.
	 */
	kernel_bootstrap_thread_log("ipc_thread_call_init");
	ipc_thread_call_init();

	/*
	 * Kick off memory mapping adjustments.
	 */
	kernel_bootstrap_thread_log("mapping_adjust");
	mapping_adjust();

	/*
	 *	Create the clock service.
	 */
	kernel_bootstrap_thread_log("clock_service_create");
	clock_service_create();

	/*
	 *	Create the device service.
	 */
	device_service_create();

	kth_started = 1;
		
#if (defined(__i386__) || defined(__x86_64__)) && NCOPY_WINDOWS > 0
	/*
	 * Create and initialize the physical copy window for processor 0
	 * This is required before starting kicking off  IOKit.
	 */
	cpu_physwindow_init(0);
#endif


	
#if MACH_KDP 
	kernel_bootstrap_log("kdp_init");
	kdp_init();
#endif

#if ALTERNATE_DEBUGGER
	alternate_debugger_init();
#endif

#if KPC
	kpc_init();
#endif

#if CONFIG_ECC_LOGGING
	ecc_log_init();
#endif 

#if KPERF
	kperf_bootstrap();
#endif

#if HYPERVISOR
	hv_support_init();
#endif

#if CONFIG_TELEMETRY
	kernel_bootstrap_log("bootprofile_init");
	bootprofile_init();
#endif

#if (defined(__i386__) || defined(__x86_64__)) && CONFIG_VMX
	vmx_init();
#endif

#if (defined(__i386__) || defined(__x86_64__))
	if (kdebug_serial) {
		new_nkdbufs = 1;
		if (trace_typefilter == 0)
			trace_typefilter = 1;
	}
	if (turn_on_log_leaks && !new_nkdbufs)
		new_nkdbufs = 200000;
	if (trace_typefilter)
		start_kern_tracing_with_typefilter(new_nkdbufs,
						   FALSE,
						   trace_typefilter);
	else
		start_kern_tracing(new_nkdbufs, FALSE);
	if (turn_on_log_leaks)
		log_leaks = 1;

#endif

	kernel_bootstrap_log("prng_init");
	prng_cpu_init(master_cpu);

#ifdef	IOKIT
	PE_init_iokit();
#endif

	assert(ml_get_interrupts_enabled() == FALSE);
	(void) spllo();		/* Allow interruptions */

#if (defined(__i386__) || defined(__x86_64__)) && NCOPY_WINDOWS > 0
	/*
	 * Create and initialize the copy window for processor 0
	 * This also allocates window space for all other processors.
	 * However, this is dependent on the number of processors - so this call
	 * must be after IOKit has been started because IOKit performs processor
	 * discovery.
	 */
	cpu_userwindow_init(0);
#endif

#if (!defined(__i386__) && !defined(__x86_64__))
	if (turn_on_log_leaks && !new_nkdbufs)
		new_nkdbufs = 200000;
	if (trace_typefilter)
		start_kern_tracing_with_typefilter(new_nkdbufs, FALSE, trace_typefilter);
	else
		start_kern_tracing(new_nkdbufs, FALSE);
	if (turn_on_log_leaks)
		log_leaks = 1;
#endif

	/*
	 *	Initialize the shared region module.
	 */
	vm_shared_region_init();
	vm_commpage_init();
	vm_commpage_text_init();


#if CONFIG_MACF
	kernel_bootstrap_log("mac_policy_initmach");
	mac_policy_initmach();
#endif

#if CONFIG_SCHED_SFI
	kernel_bootstrap_log("sfi_init");
	sfi_init();
#endif

	/*
	 * Initialize the globals used for permuting kernel
	 * addresses that may be exported to userland as tokens
	 * using VM_KERNEL_ADDRPERM()/VM_KERNEL_ADDRPERM_EXTERNAL().
	 * Force the random number to be odd to avoid mapping a non-zero
	 * word-aligned address to zero via addition.
	 * Note: at this stage we can use the cryptographically secure PRNG
	 * rather than early_random().
	 */
	read_random(&vm_kernel_addrperm, sizeof(vm_kernel_addrperm));
	vm_kernel_addrperm |= 1;
	read_random(&buf_kernel_addrperm, sizeof(buf_kernel_addrperm));
	buf_kernel_addrperm |= 1;
	read_random(&vm_kernel_addrperm_ext, sizeof(vm_kernel_addrperm_ext));
	vm_kernel_addrperm_ext |= 1;

	vm_set_restrictions();



	/*
	 *	Start the user bootstrap.
	 */
#ifdef	MACH_BSD
	bsd_init();
#endif

    /*
     * Get rid of segments used to bootstrap kext loading. This removes
     * the KLD, PRELINK symtab, LINKEDIT, and symtab segments/load commands.
     */
	OSKextRemoveKextBootstrap();

	serial_keyboard_init();		/* Start serial keyboard if wanted */

	vm_page_init_local_q();

	thread_bind(PROCESSOR_NULL);

	/*
	 *	Become the pageout daemon.
	 */
	vm_pageout();
	/*NOTREACHED*/
}
Example #2
0
/*
 * Now running in a thread.  Kick off other services,
 * invoke user bootstrap, enter pageout loop.
 */
static void
kernel_bootstrap_thread(void)
{
    processor_t		processor = current_processor();

#define kernel_bootstrap_thread_kprintf(x...) /* kprintf("kernel_bootstrap_thread: " x) */
    kernel_bootstrap_thread_kprintf("calling idle_thread_create\n");
    /*
     * Create the idle processor thread.
     */
    idle_thread_create(processor);

    /*
     * N.B. Do not stick anything else
     * before this point.
     *
     * Start up the scheduler services.
     */
    kernel_bootstrap_thread_kprintf("calling sched_startup\n");
    sched_startup();

    /*
     * Thread lifecycle maintenance (teardown, stack allocation)
     */
    kernel_bootstrap_thread_kprintf("calling thread_daemon_init\n");
    thread_daemon_init();

    /*
     * Thread callout service.
     */
    kernel_bootstrap_thread_kprintf("calling thread_call_initialize\n");
    thread_call_initialize();

    /*
     * Remain on current processor as
     * additional processors come online.
     */
    kernel_bootstrap_thread_kprintf("calling thread_bind\n");
    thread_bind(processor);

    /*
     * Kick off memory mapping adjustments.
     */
    kernel_bootstrap_thread_kprintf("calling mapping_adjust\n");
    mapping_adjust();

    /*
     *	Create the clock service.
     */
    kernel_bootstrap_thread_kprintf("calling clock_service_create\n");
    clock_service_create();

    /*
     *	Create the device service.
     */
    device_service_create();

    kth_started = 1;

#if (defined(__i386__) || defined(__x86_64__)) && NCOPY_WINDOWS > 0
    /*
     * Create and initialize the physical copy window for processor 0
     * This is required before starting kicking off  IOKit.
     */
    cpu_physwindow_init(0);
#endif

    vm_kernel_reserved_entry_init();

#if MACH_KDP
    kernel_bootstrap_kprintf("calling kdp_init\n");
    kdp_init();
#endif

#if CONFIG_COUNTERS
    pmc_bootstrap();
#endif

#if (defined(__i386__) || defined(__x86_64__))
    if (turn_on_log_leaks && !new_nkdbufs)
        new_nkdbufs = 200000;
    start_kern_tracing(new_nkdbufs);
    if (turn_on_log_leaks)
        log_leaks = 1;
#endif

#ifdef	IOKIT
    PE_init_iokit();
#endif

    (void) spllo();		/* Allow interruptions */

#if (defined(__i386__) || defined(__x86_64__)) && NCOPY_WINDOWS > 0
    /*
     * Create and initialize the copy window for processor 0
     * This also allocates window space for all other processors.
     * However, this is dependent on the number of processors - so this call
     * must be after IOKit has been started because IOKit performs processor
     * discovery.
     */
    cpu_userwindow_init(0);
#endif

#if (!defined(__i386__) && !defined(__x86_64__))
    if (turn_on_log_leaks && !new_nkdbufs)
        new_nkdbufs = 200000;
    start_kern_tracing(new_nkdbufs);
    if (turn_on_log_leaks)
        log_leaks = 1;
#endif

    /*
     *	Initialize the shared region module.
     */
    vm_shared_region_init();
    vm_commpage_init();
    vm_commpage_text_init();

#if CONFIG_MACF
    mac_policy_initmach();
#endif

    /*
     * Initialize the global used for permuting kernel
     * addresses that may be exported to userland as tokens
     * using VM_KERNEL_ADDRPERM(). Force the random number
     * to be odd to avoid mapping a non-zero
     * word-aligned address to zero via addition.
     */
    vm_kernel_addrperm = (vm_offset_t)early_random() | 1;

    /*
     *	Start the user bootstrap.
     */
#ifdef	MACH_BSD
    bsd_init();
#endif

    /*
     * Get rid of segments used to bootstrap kext loading. This removes
     * the KLD, PRELINK symtab, LINKEDIT, and symtab segments/load commands.
     */
#if 0
    OSKextRemoveKextBootstrap();
#endif

    serial_keyboard_init();		/* Start serial keyboard if wanted */

    vm_page_init_local_q();

    thread_bind(PROCESSOR_NULL);

    /*
     *	Become the pageout daemon.
     */
    vm_pageout();
    /*NOTREACHED*/
}
Example #3
0
/*
 * Now running in a thread.  Create the rest of the kernel threads
 * and the bootstrap task.
 */
void
start_kernel_threads(void)
{
	register int	i;

	/*
	 *	Create the idle threads and the other
	 *	service threads.
	 */
	for (i = 0; i < NCPUS; i++) {
	    if (machine_slot[i].is_cpu) {
		thread_t	th;
		spl_t	s;
		processor_t processor = cpu_to_processor(i);

		(void) thread_create_at(kernel_task, &th, idle_thread);
		s=splsched();
		thread_lock(th);
		thread_bind_locked(th, processor);
		processor->idle_thread = th;
		/*(void) thread_resume(th->top_act);*/
		th->state |= TH_RUN;
		thread_setrun( th, TRUE, TAIL_Q);
		thread_unlock( th );
		splx(s);
	    }
	}

	(void) kernel_thread(kernel_task, reaper_thread, (char *) 0);
#if	THREAD_SWAPPER
	(void) kernel_thread(kernel_task, swapin_thread, (char *) 0);
	(void) kernel_thread(kernel_task, swapout_thread, (char *) 0);
#endif	/* THREAD_SWAPPER */
#if	TASK_SWAPPER
	if (task_swap_on) {
		(void) kernel_thread(kernel_task, task_swapper, (char *) 0);
		(void) kernel_thread(kernel_task, task_swap_swapout_thread,
				     (char *) 0);
	}
#endif	/* TASK_SWAPPER */
	(void) kernel_thread(kernel_task, sched_thread, (char *) 0);
	(void) kernel_thread(kernel_task, timeout_thread, (char *) 0);
#if	NORMA_VM
	(void) kernel_thread(kernel_task, vm_object_thread, (char *) 0);
#endif	/* NORMA_VM */

	/*
	 *	Create the clock service.
	 */
	clock_service_create();

	/*
	 *	Create the device service.
	 */
	device_service_create();

	/*
	 *	Initialize distributed services, starting
	 *	with distributed IPC and progressing to any
	 *	services layered on top of that.
	 *
	 *	This stub exists even in non-NORMA systems.
	 */
	norma_bootstrap();

	/*
	 *	Initialize any testing services blocking the main kernel
	 *	thread so that the in-kernel tests run without interference
	 *	from other boot time activities. We will resume this thread
	 *	in kernel_test_thread(). 
	 */

#if	KERNEL_TEST

	/*
	 *	Initialize the lock that will be used to guard
	 *	variables that will be used in the test synchronization
	 *	scheme.
	 */ 
		simple_lock_init(&kernel_test_lock, ETAP_MISC_KERNEL_TEST);

#if	PARAGON860
	{
		char		*s;
		unsigned int	firstnode;

		/*
		 *	Only start up loopback tests on boot node.
		 */
		if ((s = (char *) getbootenv("BOOT_FIRST_NODE")) == 0)
			panic("startup");
		firstnode = atoi(s);
		(void) kernel_thread(kernel_task, kernel_test_thread,
				     (char * )(dipc_node_self() == 
						       (node_name) firstnode));
	}
#else	/* PARAGON860 */
	(void) kernel_thread(kernel_task, kernel_test_thread, (char *) 0);
#endif	/* PARAGON860 */
	{

		/*
		 *	The synchronization scheme uses a simple lock, two
		 *	booleans and the wakeup event. The wakeup event will
		 *	be posted by kernel_test_thread().
		 */
		spl_t s;
		s = splsched();
		simple_lock(&kernel_test_lock);
		while(!kernel_test_thread_sync_done){
			assert_wait((event_t) &start_kernel_threads, FALSE);
			start_kernel_threads_blocked = TRUE;
			simple_unlock(&kernel_test_lock);
			splx(s);
			thread_block((void (*)(void)) 0);
			s = splsched();
			simple_lock(&kernel_test_lock);
			start_kernel_threads_blocked = FALSE;
		}
		kernel_test_thread_sync_done = FALSE; /* Reset for next use */
		simple_unlock(&kernel_test_lock);
		splx(s);
	}


#endif	/* KERNEL_TEST */
	/*
	 *	Start the user bootstrap.
	 */
	bootstrap_create();

#if	XPR_DEBUG
	xprinit();		/* XXX */
#endif	/* XPR_DEBUG */

#if	NCPUS > 1
	/*
	 *	Create the shutdown thread.
	 */
	(void) kernel_thread(kernel_task, action_thread, (char *) 0);

	/*
	 *	Allow other CPUs to run.
	 *
	 * (this must be last, to allow bootstrap_create to fiddle with
	 *  its child thread before some cpu tries to run it)
	 */
	start_other_cpus();
#endif	/* NCPUS > 1 */

	/*
	 *	Become the pageout daemon.
	 */
	(void) spllo();
	vm_pageout();
	/*NOTREACHED*/
}