/*------------------------------------------------------------------------ * The init process, this is where it all begins... *------------------------------------------------------------------------ */ initproc() /* The beginning */ { kprintf( "\n\nCPSC 415, Jan 2012\n32 Bit Xeros 1.1\nLocated at: %x to %x\n", &entry, &end ); /* Your code goes here */ kprintf("Max addr is %d %x\n", maxaddr, maxaddr); kmeminit(); kprintf("memory inited\n"); dispatchinit(); kprintf("dispatcher inited\n"); contextinit(); kprintf("context inited inited\n"); create( root, PROC_STACK ); kprintf("create inited\n"); dispatch(); kprintf("Returned to init, you should never get here!\n"); /* This code should never be reached after you are done */ for(;;); /* loop forever */ }
/*------------------------------------------------------------------------ * The init process, this is where it all begins... *------------------------------------------------------------------------ */ void initproc( void ) /* The beginning */ { int i; kprintf( "\n\nCPSC 415, 2015W1 \n32 Bit Xeros 1.1\nLocated at: %x to %x\n", &entry, &end); /* Add your code below this line and before next comment */ // init memory kmeminit(); void (*funcPtr)(void); funcPtr = &root; create(funcPtr, 8000); //dispatch(); for (i = 0; i < 2000000; i++); /* Add all of your code before this comment and after the previous comment */ /* This code should never be reached after you are done */ kprintf("\n\nWhen the kernel is working properly "); kprintf("this line should never be printed!\n"); for(;;) ; /* loop forever */ }
/*------------------------------------------------------------------------ * The init process, this is where it all begins... *------------------------------------------------------------------------ */ void initproc( void ) /* The beginning */ { kprintf( "\n\nCPSC 415, 2012W1\nA2 Solution Kernel\n32 Bit Xeros 1.1\nLocated at: %x to %x\n", &entry, &end ); /* Your code goes here */ kprintf("Max addr is %d %x\n", maxaddr, maxaddr); kmeminit(); kprintf("Memory initialized.\n"); dispatchinit(); kprintf("Dispatcher initialized.\n"); contextinit(); kprintf("Context initialized.\n"); deviceinit(); kprintf("Devices initialized.\n"); kprintf("Creating Idle Process\n"); create(idleproc, PROC_STACK); kprintf("Creating Root Process\n"); create( root, PROC_STACK ); kprintf("System initialization completed\nSystem Starting\n"); dispatch(); kprintf("Returned to init, you should never get here!\n"); /* This code should never be reached after you are done */ for(;;); /* loop forever */ }
void entry() { __asm_initialize__(); kmeminit(); shell(); while(1); }
/*------------------------------------------------------------------------ * The init process, this is where it all begins... *------------------------------------------------------------------------ */ initproc() /* The beginning */ { kprintf( "\n\nCPSC 415, Jan 2012\n32 Bit Xeros 1.1\nLocated at: %x to %x\n", &entry, &end ); /* Your code goes here */ kprintf("Max addr is %d %x\n", maxaddr, maxaddr); kmeminit(); //kprintf("memory inited\n"); dispatchinit(); //kprintf("dispatcher inited\n"); contextinit(); //kprintf("context inited inited\n"); //kprintf("Creating Idle Process\n"); create(idleproc, PROC_STACK); // kprintf("Creating Root Process\n"); //create( root, PROC_STACK ); // create( test1, PROC_STACK ); // create( test2, PROC_STACK ); // create( test3, PROC_STACK ); // create( test4, PROC_STACK ); // create( test5, PROC_STACK ); // create( test6, PROC_STACK ); // create( test7, PROC_STACK ); create( test8, PROC_STACK ); kprintf("System initialization completed\n"); device_init(); dispatch(); kprintf("Returned to init, you should never get here!\n"); /* This code should never be reached after you are done */ for(;;); /* loop forever */ }
void uvm_init() { vaddr_t kvm_start, kvm_end; /* * step 0: ensure that the hardware set the page size */ if (uvmexp.pagesize == 0) { panic("uvm_init: page size not set"); } /* * step 1: zero the uvm structure */ memset(&uvm, 0, sizeof(uvm)); #ifndef OSKIT averunnable.fscale = FSCALE; #endif /* * step 2: init the page sub-system. this includes allocating the * vm_page structures, and setting up all the page queues (and * locks). available memory will be put in the "free" queue. * kvm_start and kvm_end will be set to the area of kernel virtual * memory which is available for general use. */ uvm_page_init(&kvm_start, &kvm_end); /* * step 3: init the map sub-system. allocates the static pool of * vm_map_entry structures that are used for "special" kernel maps * (e.g. kernel_map, kmem_map, etc...). */ uvm_map_init(); /* * step 4: setup the kernel's virtual memory data structures. this * includes setting up the kernel_map/kernel_object and the kmem_map/ * kmem_object. */ uvm_km_init(kvm_start, kvm_end); /* * step 5: init the pmap module. the pmap module is free to allocate * memory for its private use (e.g. pvlists). */ pmap_init(); /* * step 6: init the kernel memory allocator. after this call the * kernel memory allocator (malloc) can be used. */ kmeminit(); /* * step 7: init all pagers and the pager_map. */ uvm_pager_init(); /* * step 8: init anonymous memory systems (both amap and anons) */ amap_init(); /* init amap module */ uvm_anon_init(); /* allocate initial anons */ /* * the VM system is now up! now that malloc is up we can resize the * <obj,off> => <page> hash table for general use and enable paging * of kernel objects. */ uvm_page_rehash(); uao_create(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS, UAO_FLAG_KERNSWAP); /* * done! */ return; }
void uvm_init() { vaddr_t kvm_start, kvm_end; /* * step 0: ensure that the hardware set the page size */ if (uvmexp.pagesize == 0) { panic("uvm_init: page size not set"); } /* * step 1: zero the uvm structure */ memset(&uvm, 0, sizeof(uvm)); averunnable.fscale = FSCALE; /* * step 2: init the page sub-system. this includes allocating the * vm_page structures, and setting up all the page queues (and * locks). available memory will be put in the "free" queue. * kvm_start and kvm_end will be set to the area of kernel virtual * memory which is available for general use. */ uvm_page_init(&kvm_start, &kvm_end); /* * step 3: init the map sub-system. allocates the static pool of * vm_map_entry structures that are used for "special" kernel maps * (e.g. kernel_map, kmem_map, etc...). */ uvm_map_init(); /* * step 4: setup the kernel's virtual memory data structures. this * includes setting up the kernel_map/kernel_object and the kmem_map/ * kmem_object. */ uvm_km_init(kvm_start, kvm_end); /* * step 5: init the pmap module. the pmap module is free to allocate * memory for its private use (e.g. pvlists). */ pmap_init(); /* * step 6: init the kernel memory allocator. after this call the * kernel memory allocator (malloc) can be used. */ uvm_km_page_init(); kmeminit(); #if !defined(__HAVE_PMAP_DIRECT) kthread_create_deferred(uvm_km_createthread, NULL); #endif /* * step 7: init all pagers and the pager_map. */ uvm_pager_init(); /* * step 8: init anonymous memory system */ amap_init(); /* init amap module */ /* * the VM system is now up! now that malloc is up we can resize the * <obj,off> => <page> hash table for general use and enable paging * of kernel objects. */ uvm_page_rehash(); uao_create(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS, UAO_FLAG_KERNSWAP); /* * reserve some unmapped space for malloc/pool use after free usage */ #ifdef DEADBEEF0 kvm_start = trunc_page(DEADBEEF0) - PAGE_SIZE; if (uvm_map(kernel_map, &kvm_start, 3 * PAGE_SIZE, NULL, UVM_UNKNOWN_OFFSET, 0, UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE, UVM_ADV_RANDOM, UVM_FLAG_FIXED))) panic("uvm_init: cannot reserve dead beef @0x%x\n", DEADBEEF0); #endif #ifdef DEADBEEF1 kvm_start = trunc_page(DEADBEEF1) - PAGE_SIZE; if (uvm_map(kernel_map, &kvm_start, 3 * PAGE_SIZE, NULL, UVM_UNKNOWN_OFFSET, 0, UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE, UVM_ADV_RANDOM, UVM_FLAG_FIXED))) panic("uvm_init: cannot reserve dead beef @0x%x\n", DEADBEEF1); #endif /* * init anonymous memory systems */ uvm_anon_init(); }
void bsd_init(void) { struct uthread *ut; unsigned int i; struct vfs_context context; kern_return_t ret; struct ucred temp_cred; struct posix_cred temp_pcred; #if NFSCLIENT || CONFIG_IMAGEBOOT boolean_t netboot = FALSE; #endif #define bsd_init_kprintf(x...) /* kprintf("bsd_init: " x) */ throttle_init(); printf(copyright); bsd_init_kprintf("calling kmeminit\n"); kmeminit(); bsd_init_kprintf("calling parse_bsd_args\n"); parse_bsd_args(); #if CONFIG_DEV_KMEM bsd_init_kprintf("calling dev_kmem_init\n"); dev_kmem_init(); #endif /* Initialize kauth subsystem before instancing the first credential */ bsd_init_kprintf("calling kauth_init\n"); kauth_init(); /* Initialize process and pgrp structures. */ bsd_init_kprintf("calling procinit\n"); procinit(); /* Initialize the ttys (MUST be before kminit()/bsd_autoconf()!)*/ tty_init(); kernproc = &proc0; /* implicitly bzero'ed */ /* kernel_task->proc = kernproc; */ set_bsdtask_info(kernel_task,(void *)kernproc); /* give kernproc a name */ bsd_init_kprintf("calling process_name\n"); process_name("kernel_task", kernproc); /* allocate proc lock group attribute and group */ bsd_init_kprintf("calling lck_grp_attr_alloc_init\n"); proc_lck_grp_attr= lck_grp_attr_alloc_init(); proc_lck_grp = lck_grp_alloc_init("proc", proc_lck_grp_attr); #if CONFIG_FINE_LOCK_GROUPS proc_slock_grp = lck_grp_alloc_init("proc-slock", proc_lck_grp_attr); proc_fdmlock_grp = lck_grp_alloc_init("proc-fdmlock", proc_lck_grp_attr); proc_ucred_mlock_grp = lck_grp_alloc_init("proc-ucred-mlock", proc_lck_grp_attr); proc_mlock_grp = lck_grp_alloc_init("proc-mlock", proc_lck_grp_attr); #endif /* Allocate proc lock attribute */ proc_lck_attr = lck_attr_alloc_init(); #if 0 #if __PROC_INTERNAL_DEBUG lck_attr_setdebug(proc_lck_attr); #endif #endif #if CONFIG_FINE_LOCK_GROUPS proc_list_mlock = lck_mtx_alloc_init(proc_mlock_grp, proc_lck_attr); proc_klist_mlock = lck_mtx_alloc_init(proc_mlock_grp, proc_lck_attr); lck_mtx_init(&kernproc->p_mlock, proc_mlock_grp, proc_lck_attr); lck_mtx_init(&kernproc->p_fdmlock, proc_fdmlock_grp, proc_lck_attr); lck_mtx_init(&kernproc->p_ucred_mlock, proc_ucred_mlock_grp, proc_lck_attr); lck_spin_init(&kernproc->p_slock, proc_slock_grp, proc_lck_attr); #else proc_list_mlock = lck_mtx_alloc_init(proc_lck_grp, proc_lck_attr); proc_klist_mlock = lck_mtx_alloc_init(proc_lck_grp, proc_lck_attr); lck_mtx_init(&kernproc->p_mlock, proc_lck_grp, proc_lck_attr); lck_mtx_init(&kernproc->p_fdmlock, proc_lck_grp, proc_lck_attr); lck_mtx_init(&kernproc->p_ucred_mlock, proc_lck_grp, proc_lck_attr); lck_spin_init(&kernproc->p_slock, proc_lck_grp, proc_lck_attr); #endif assert(bsd_simul_execs != 0); execargs_cache_lock = lck_mtx_alloc_init(proc_lck_grp, proc_lck_attr); execargs_cache_size = bsd_simul_execs; execargs_free_count = bsd_simul_execs; execargs_cache = (vm_offset_t *)kalloc(bsd_simul_execs * sizeof(vm_offset_t)); bzero(execargs_cache, bsd_simul_execs * sizeof(vm_offset_t)); if (current_task() != kernel_task) printf("bsd_init: We have a problem, " "current task is not kernel task\n"); bsd_init_kprintf("calling get_bsdthread_info\n"); ut = (uthread_t)get_bsdthread_info(current_thread()); #if CONFIG_MACF /* * Initialize the MAC Framework */ mac_policy_initbsd(); kernproc->p_mac_enforce = 0; #if defined (__i386__) || defined (__x86_64__) /* * We currently only support this on i386/x86_64, as that is the * only lock code we have instrumented so far. */ check_policy_init(policy_check_flags); #endif #endif /* MAC */ /* Initialize System Override call */ init_system_override(); /* * Create process 0. */ proc_list_lock(); LIST_INSERT_HEAD(&allproc, kernproc, p_list); kernproc->p_pgrp = &pgrp0; LIST_INSERT_HEAD(PGRPHASH(0), &pgrp0, pg_hash); LIST_INIT(&pgrp0.pg_members); #ifdef CONFIG_FINE_LOCK_GROUPS lck_mtx_init(&pgrp0.pg_mlock, proc_mlock_grp, proc_lck_attr); #else lck_mtx_init(&pgrp0.pg_mlock, proc_lck_grp, proc_lck_attr); #endif /* There is no other bsd thread this point and is safe without pgrp lock */ LIST_INSERT_HEAD(&pgrp0.pg_members, kernproc, p_pglist); kernproc->p_listflag |= P_LIST_INPGRP; kernproc->p_pgrpid = 0; kernproc->p_uniqueid = 0; pgrp0.pg_session = &session0; pgrp0.pg_membercnt = 1; session0.s_count = 1; session0.s_leader = kernproc; session0.s_listflags = 0; #ifdef CONFIG_FINE_LOCK_GROUPS lck_mtx_init(&session0.s_mlock, proc_mlock_grp, proc_lck_attr); #else lck_mtx_init(&session0.s_mlock, proc_lck_grp, proc_lck_attr); #endif LIST_INSERT_HEAD(SESSHASH(0), &session0, s_hash); proc_list_unlock(); kernproc->task = kernel_task; kernproc->p_stat = SRUN; kernproc->p_flag = P_SYSTEM; kernproc->p_lflag = 0; kernproc->p_ladvflag = 0; #if DEVELOPMENT || DEBUG if (bootarg_disable_aslr) kernproc->p_flag |= P_DISABLE_ASLR; #endif kernproc->p_nice = NZERO; kernproc->p_pptr = kernproc; TAILQ_INIT(&kernproc->p_uthlist); TAILQ_INSERT_TAIL(&kernproc->p_uthlist, ut, uu_list); kernproc->sigwait = FALSE; kernproc->sigwait_thread = THREAD_NULL; kernproc->exit_thread = THREAD_NULL; kernproc->p_csflags = CS_VALID; /* * Create credential. This also Initializes the audit information. */ bsd_init_kprintf("calling bzero\n"); bzero(&temp_cred, sizeof(temp_cred)); bzero(&temp_pcred, sizeof(temp_pcred)); temp_pcred.cr_ngroups = 1; /* kern_proc, shouldn't call up to DS for group membership */ temp_pcred.cr_flags = CRF_NOMEMBERD; temp_cred.cr_audit.as_aia_p = audit_default_aia_p; bsd_init_kprintf("calling kauth_cred_create\n"); /* * We have to label the temp cred before we create from it to * properly set cr_ngroups, or the create will fail. */ posix_cred_label(&temp_cred, &temp_pcred); kernproc->p_ucred = kauth_cred_create(&temp_cred); /* update cred on proc */ PROC_UPDATE_CREDS_ONPROC(kernproc); /* give the (already exisiting) initial thread a reference on it */ bsd_init_kprintf("calling kauth_cred_ref\n"); kauth_cred_ref(kernproc->p_ucred); ut->uu_context.vc_ucred = kernproc->p_ucred; ut->uu_context.vc_thread = current_thread(); TAILQ_INIT(&kernproc->p_aio_activeq); TAILQ_INIT(&kernproc->p_aio_doneq); kernproc->p_aio_total_count = 0; kernproc->p_aio_active_count = 0; bsd_init_kprintf("calling file_lock_init\n"); file_lock_init(); #if CONFIG_MACF mac_cred_label_associate_kernel(kernproc->p_ucred); #endif /* Create the file descriptor table. */ kernproc->p_fd = &filedesc0; filedesc0.fd_cmask = cmask; filedesc0.fd_knlistsize = -1; filedesc0.fd_knlist = NULL; filedesc0.fd_knhash = NULL; filedesc0.fd_knhashmask = 0; /* Create the limits structures. */ kernproc->p_limit = &limit0; for (i = 0; i < sizeof(kernproc->p_rlimit)/sizeof(kernproc->p_rlimit[0]); i++) limit0.pl_rlimit[i].rlim_cur = limit0.pl_rlimit[i].rlim_max = RLIM_INFINITY; limit0.pl_rlimit[RLIMIT_NOFILE].rlim_cur = NOFILE; limit0.pl_rlimit[RLIMIT_NPROC].rlim_cur = maxprocperuid; limit0.pl_rlimit[RLIMIT_NPROC].rlim_max = maxproc; limit0.pl_rlimit[RLIMIT_STACK] = vm_initial_limit_stack; limit0.pl_rlimit[RLIMIT_DATA] = vm_initial_limit_data; limit0.pl_rlimit[RLIMIT_CORE] = vm_initial_limit_core; limit0.pl_refcnt = 1; kernproc->p_stats = &pstats0; kernproc->p_sigacts = &sigacts0; /* * Charge root for one process: launchd. */ bsd_init_kprintf("calling chgproccnt\n"); (void)chgproccnt(0, 1); /* * Allocate a kernel submap for pageable memory * for temporary copying (execve()). */ { vm_offset_t minimum; bsd_init_kprintf("calling kmem_suballoc\n"); assert(bsd_pageable_map_size != 0); ret = kmem_suballoc(kernel_map, &minimum, (vm_size_t)bsd_pageable_map_size, TRUE, VM_FLAGS_ANYWHERE | VM_MAKE_TAG(VM_KERN_MEMORY_BSD), &bsd_pageable_map); if (ret != KERN_SUCCESS) panic("bsd_init: Failed to allocate bsd pageable map"); } /* * Initialize buffers and hash links for buffers * * SIDE EFFECT: Starts a thread for bcleanbuf_thread(), so must * happen after a credential has been associated with * the kernel task. */ bsd_init_kprintf("calling bsd_bufferinit\n"); bsd_bufferinit(); /* Initialize the execve() semaphore */ bsd_init_kprintf("calling semaphore_create\n"); if (ret != KERN_SUCCESS) panic("bsd_init: Failed to create execve semaphore"); /* * Initialize the calendar. */ bsd_init_kprintf("calling IOKitInitializeTime\n"); IOKitInitializeTime(); bsd_init_kprintf("calling ubc_init\n"); ubc_init(); /* * Initialize device-switches. */ bsd_init_kprintf("calling devsw_init() \n"); devsw_init(); /* Initialize the file systems. */ bsd_init_kprintf("calling vfsinit\n"); vfsinit(); #if CONFIG_PROC_UUID_POLICY /* Initial proc_uuid_policy subsystem */ bsd_init_kprintf("calling proc_uuid_policy_init()\n"); proc_uuid_policy_init(); #endif #if SOCKETS /* Initialize per-CPU cache allocator */ mcache_init(); /* Initialize mbuf's. */ bsd_init_kprintf("calling mbinit\n"); mbinit(); net_str_id_init(); /* for mbuf tags */ #endif /* SOCKETS */ /* * Initializes security event auditing. * XXX: Should/could this occur later? */ #if CONFIG_AUDIT bsd_init_kprintf("calling audit_init\n"); audit_init(); #endif /* Initialize kqueues */ bsd_init_kprintf("calling knote_init\n"); knote_init(); /* Initialize for async IO */ bsd_init_kprintf("calling aio_init\n"); aio_init(); /* Initialize pipes */ bsd_init_kprintf("calling pipeinit\n"); pipeinit(); /* Initialize SysV shm subsystem locks; the subsystem proper is * initialized through a sysctl. */ #if SYSV_SHM bsd_init_kprintf("calling sysv_shm_lock_init\n"); sysv_shm_lock_init(); #endif #if SYSV_SEM bsd_init_kprintf("calling sysv_sem_lock_init\n"); sysv_sem_lock_init(); #endif #if SYSV_MSG bsd_init_kprintf("sysv_msg_lock_init\n"); sysv_msg_lock_init(); #endif bsd_init_kprintf("calling pshm_lock_init\n"); pshm_lock_init(); bsd_init_kprintf("calling psem_lock_init\n"); psem_lock_init(); pthread_init(); /* POSIX Shm and Sem */ bsd_init_kprintf("calling pshm_cache_init\n"); pshm_cache_init(); bsd_init_kprintf("calling psem_cache_init\n"); psem_cache_init(); bsd_init_kprintf("calling time_zone_slock_init\n"); time_zone_slock_init(); bsd_init_kprintf("calling select_waitq_init\n"); select_waitq_init(); /* * Initialize protocols. Block reception of incoming packets * until everything is ready. */ bsd_init_kprintf("calling sysctl_register_fixed\n"); sysctl_register_fixed(); bsd_init_kprintf("calling sysctl_mib_init\n"); sysctl_mib_init(); #if NETWORKING bsd_init_kprintf("calling dlil_init\n"); dlil_init(); bsd_init_kprintf("calling proto_kpi_init\n"); proto_kpi_init(); #endif /* NETWORKING */ #if SOCKETS bsd_init_kprintf("calling socketinit\n"); socketinit(); bsd_init_kprintf("calling domaininit\n"); domaininit(); iptap_init(); #if FLOW_DIVERT flow_divert_init(); #endif /* FLOW_DIVERT */ #endif /* SOCKETS */ kernproc->p_fd->fd_cdir = NULL; kernproc->p_fd->fd_rdir = NULL; #if CONFIG_FREEZE #ifndef CONFIG_MEMORYSTATUS #error "CONFIG_FREEZE defined without matching CONFIG_MEMORYSTATUS" #endif /* Initialise background freezing */ bsd_init_kprintf("calling memorystatus_freeze_init\n"); memorystatus_freeze_init(); #endif #if CONFIG_MEMORYSTATUS /* Initialize kernel memory status notifications */ bsd_init_kprintf("calling memorystatus_init\n"); memorystatus_init(); #endif /* CONFIG_MEMORYSTATUS */ bsd_init_kprintf("calling macx_init\n"); macx_init(); bsd_init_kprintf("calling acct_init\n"); acct_init(); #ifdef GPROF /* Initialize kernel profiling. */ kmstartup(); #endif bsd_init_kprintf("calling bsd_autoconf\n"); bsd_autoconf(); #if CONFIG_DTRACE dtrace_postinit(); #endif /* * We attach the loopback interface *way* down here to ensure * it happens after autoconf(), otherwise it becomes the * "primary" interface. */ #include <loop.h> #if NLOOP > 0 bsd_init_kprintf("calling loopattach\n"); loopattach(); /* XXX */ #endif #if NGIF /* Initialize gif interface (after lo0) */ gif_init(); #endif #if PFLOG /* Initialize packet filter log interface */ pfloginit(); #endif /* PFLOG */ #if NETHER > 0 /* Register the built-in dlil ethernet interface family */ bsd_init_kprintf("calling ether_family_init\n"); ether_family_init(); #endif /* ETHER */ #if NETWORKING /* Call any kext code that wants to run just after network init */ bsd_init_kprintf("calling net_init_run\n"); net_init_run(); #if CONTENT_FILTER cfil_init(); #endif #if PACKET_MANGLER pkt_mnglr_init(); #endif #if NECP /* Initialize Network Extension Control Policies */ necp_init(); #endif netagent_init(); /* register user tunnel kernel control handler */ utun_register_control(); #if IPSEC ipsec_register_control(); #endif /* IPSEC */ netsrc_init(); nstat_init(); tcp_cc_init(); #if MPTCP mptcp_control_register(); #endif /* MPTCP */ #endif /* NETWORKING */ bsd_init_kprintf("calling vnode_pager_bootstrap\n"); vnode_pager_bootstrap(); bsd_init_kprintf("calling inittodr\n"); inittodr(0); /* Mount the root file system. */ while( TRUE) { int err; bsd_init_kprintf("calling setconf\n"); setconf(); #if NFSCLIENT netboot = (mountroot == netboot_mountroot); #endif bsd_init_kprintf("vfs_mountroot\n"); if (0 == (err = vfs_mountroot())) break; rootdevice[0] = '\0'; #if NFSCLIENT if (netboot) { PE_display_icon( 0, "noroot"); /* XXX a netboot-specific icon would be nicer */ vc_progress_set(FALSE, 0); for (i=1; 1; i*=2) { printf("bsd_init: failed to mount network root, error %d, %s\n", err, PE_boot_args()); printf("We are hanging here...\n"); IOSleep(i*60*1000); } /*NOTREACHED*/ } #endif printf("cannot mount root, errno = %d\n", err); boothowto |= RB_ASKNAME; } IOSecureBSDRoot(rootdevice); context.vc_thread = current_thread(); context.vc_ucred = kernproc->p_ucred; mountlist.tqh_first->mnt_flag |= MNT_ROOTFS; bsd_init_kprintf("calling VFS_ROOT\n"); /* Get the vnode for '/'. Set fdp->fd_fd.fd_cdir to reference it. */ if (VFS_ROOT(mountlist.tqh_first, &rootvnode, &context)) panic("bsd_init: cannot find root vnode: %s", PE_boot_args()); rootvnode->v_flag |= VROOT; (void)vnode_ref(rootvnode); (void)vnode_put(rootvnode); filedesc0.fd_cdir = rootvnode; #if NFSCLIENT if (netboot) { int err; netboot = TRUE; /* post mount setup */ if ((err = netboot_setup()) != 0) { PE_display_icon( 0, "noroot"); /* XXX a netboot-specific icon would be nicer */ vc_progress_set(FALSE, 0); for (i=1; 1; i*=2) { printf("bsd_init: NetBoot could not find root, error %d: %s\n", err, PE_boot_args()); printf("We are hanging here...\n"); IOSleep(i*60*1000); } /*NOTREACHED*/ } } #endif #if CONFIG_IMAGEBOOT /* * See if a system disk image is present. If so, mount it and * switch the root vnode to point to it */ if (netboot == FALSE && imageboot_needed()) { /* * An image was found. No turning back: we're booted * with a kernel from the disk image. */ imageboot_setup(); } #endif /* CONFIG_IMAGEBOOT */ /* set initial time; all other resource data is already zero'ed */ microtime_with_abstime(&kernproc->p_start, &kernproc->p_stats->ps_start); #if DEVFS { char mounthere[] = "/dev"; /* !const because of internal casting */ bsd_init_kprintf("calling devfs_kernel_mount\n"); devfs_kernel_mount(mounthere); } #endif /* DEVFS */ /* Initialize signal state for process 0. */ bsd_init_kprintf("calling siginit\n"); siginit(kernproc); bsd_init_kprintf("calling bsd_utaskbootstrap\n"); bsd_utaskbootstrap(); #if defined(__LP64__) kernproc->p_flag |= P_LP64; #endif pal_kernel_announce(); bsd_init_kprintf("calling mountroot_post_hook\n"); /* invoke post-root-mount hook */ if (mountroot_post_hook != NULL) mountroot_post_hook(); #if 0 /* not yet */ consider_zone_gc(FALSE); #endif bsd_init_kprintf("done\n"); }
void uvm_init(void) { vaddr_t kvm_start, kvm_end; /* * step 0: ensure that the hardware set the page size */ if (uvmexp.pagesize == 0) { panic("uvm_init: page size not set"); } /* * step 1: zero the uvm structure */ memset(&uvm, 0, sizeof(uvm)); averunnable.fscale = FSCALE; uvm_amap_init(); /* * step 2: init the page sub-system. this includes allocating the * vm_page structures, and setting up all the page queues (and * locks). available memory will be put in the "free" queue. * kvm_start and kvm_end will be set to the area of kernel virtual * memory which is available for general use. */ uvm_page_init(&kvm_start, &kvm_end); /* * step 3: init the map sub-system. allocates the static pool of * vm_map_entry structures that are used for "special" kernel maps * (e.g. kernel_map, kmem_map, etc...). */ uvm_map_init(); /* * step 4: setup the kernel's virtual memory data structures. this * includes setting up the kernel_map/kernel_object. */ uvm_km_init(kvm_start, kvm_end); /* * step 5: init the pmap module. the pmap module is free to allocate * memory for its private use (e.g. pvlists). */ pmap_init(); /* * step 6: init the kernel memory allocator. after this call the * kernel memory allocator (malloc) can be used. this includes * setting up the kmem_map. */ kmeminit(); #ifdef DEBUG debug_init(); #endif /* * step 7: init all pagers and the pager_map. */ uvm_pager_init(); /* * step 8: init the uvm_loan() facility. */ uvm_loan_init(); /* * Initialize pools. This must be done before anyone manipulates * any vm_maps because we use a pool for some map entry structures. */ pool_subsystem_init(); /* * init slab memory allocator kmem(9). */ kmem_init(); /* * the VM system is now up! now that kmem is up we can resize the * <obj,off> => <page> hash table for general use and enable paging * of kernel objects. */ uao_create(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS, UAO_FLAG_KERNSWAP); uvmpdpol_reinit(); /* * init anonymous memory systems */ uvm_anon_init(); uvm_uarea_init(); /* * init readahead module */ uvm_ra_init(); }
void uvm_init(void) { vaddr_t kvm_start, kvm_end; /* * step 0: ensure that the hardware set the page size */ if (uvmexp.pagesize == 0) { panic("uvm_init: page size not set"); } /* * step 1: set up stats. */ averunnable.fscale = FSCALE; /* * step 2: init the page sub-system. this includes allocating the * vm_page structures, and setting up all the page queues (and * locks). available memory will be put in the "free" queue. * kvm_start and kvm_end will be set to the area of kernel virtual * memory which is available for general use. */ uvm_page_init(&kvm_start, &kvm_end); /* * step 3: init the map sub-system. allocates the static pool of * vm_map_entry structures that are used for "special" kernel maps * (e.g. kernel_map, kmem_map, etc...). */ uvm_map_init(); /* * step 4: setup the kernel's virtual memory data structures. this * includes setting up the kernel_map/kernel_object and the kmem_map/ * kmem_object. */ uvm_km_init(kvm_start, kvm_end); /* * step 5: init the pmap module. the pmap module is free to allocate * memory for its private use (e.g. pvlists). */ pmap_init(); /* * step 6: init the kernel memory allocator. after this call the * kernel memory allocator (malloc) can be used. */ kmeminit(); /* * step 6.5: init the dma allocator, which is backed by pools. */ dma_alloc_init(); /* * step 7: init all pagers and the pager_map. */ uvm_pager_init(); /* * step 8: init anonymous memory system */ amap_init(); /* init amap module */ /* * step 9: init uvm_km_page allocator memory. */ uvm_km_page_init(); /* * the VM system is now up! now that malloc is up we can * enable paging of kernel objects. */ uao_create(VM_MAX_KERNEL_ADDRESS - VM_MIN_KERNEL_ADDRESS, UAO_FLAG_KERNSWAP); /* * reserve some unmapped space for malloc/pool use after free usage */ #ifdef DEADBEEF0 kvm_start = trunc_page(DEADBEEF0) - PAGE_SIZE; if (uvm_map(kernel_map, &kvm_start, 3 * PAGE_SIZE, NULL, UVM_UNKNOWN_OFFSET, 0, UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE, UVM_ADV_RANDOM, UVM_FLAG_FIXED))) panic("uvm_init: cannot reserve dead beef @0x%x", DEADBEEF0); #endif #ifdef DEADBEEF1 kvm_start = trunc_page(DEADBEEF1) - PAGE_SIZE; if (uvm_map(kernel_map, &kvm_start, 3 * PAGE_SIZE, NULL, UVM_UNKNOWN_OFFSET, 0, UVM_MAPFLAG(UVM_PROT_NONE, UVM_PROT_NONE, UVM_INH_NONE, UVM_ADV_RANDOM, UVM_FLAG_FIXED))) panic("uvm_init: cannot reserve dead beef @0x%x", DEADBEEF1); #endif /* * init anonymous memory systems */ uvm_anon_init(); #ifndef SMALL_KERNEL /* * Switch kernel and kmem_map over to a best-fit allocator, * instead of walking the tree. */ uvm_map_set_uaddr(kernel_map, &kernel_map->uaddr_any[3], uaddr_bestfit_create(vm_map_min(kernel_map), vm_map_max(kernel_map))); uvm_map_set_uaddr(kmem_map, &kmem_map->uaddr_any[3], uaddr_bestfit_create(vm_map_min(kmem_map), vm_map_max(kmem_map))); #endif /* !SMALL_KERNEL */ }
void _main() { mem_extent_t *ramext; u8 sn[6]; u32 cpu_clk_hz = 0; rtc_time_t tm; s32 ret; /* This section runs with interrupts disabled. The boot console is not available in this section. */ preempt_disable(); /* Copy kernel read/write data areas into kernel RAM */ memcpy(&_sdata, &_etext, &_edata - &_sdata); /* Copy .data section to kernel RAM */ bzero(&_sbss, &_ebss - &_sbss); /* Initialise .bss section */ /* Begin platform initialisation */ if(plat_init() != SUCCESS) boot_early_fail(1); if(plat_mem_detect() != SUCCESS) /* Detect installed RAM, initialise memory extents */ boot_early_fail(2); /* Initialise kernel slabs */ slab_init(&_ebss); /* Slabs sit after the .bss section */ /* Initialise kernel heap */ kmeminit(g_slab_end, mem_get_highest_addr(MEM_EXTENT_KERN | MEM_EXTENT_RAM) - KERNEL_STACK_LEN); /* Initialise user heap. Place it in the largest user RAM extent. */ ramext = mem_get_largest_extent(MEM_EXTENT_USER | MEM_EXTENT_RAM); umeminit(ramext->base, ramext->base + ramext->len); /* By default, all exceptions cause a context-dump followed by a halt. */ cpu_irq_init_table(); /* Initialise device tree */ if(dev_init() != SUCCESS) boot_early_fail(3); /* It's not yet possible to initialise the real (platform) console because devices haven't been enumerated and interrupts are disabled. In the meantime, create a temporary in-memory kernel console device to capture output from the boot process. */ if(early_boot_console_init() != SUCCESS) boot_early_fail(4); printf("%s\nplatform: %s\n", g_warmup_message, plat_get_name()); printf("%uMB RAM detected\n", (mem_get_total_size(MEM_EXTENT_USER | MEM_EXTENT_RAM) + mem_get_total_size(MEM_EXTENT_KERN | MEM_EXTENT_RAM)) >> 20); /* === Initialise peripherals - phase 2 === */ if(dev_enumerate() != SUCCESS) boot_early_fail(5); /* Initialise the console */ if(plat_console_init() != SUCCESS) boot_early_fail(6); ret = sched_init("[sys]"); /* Init scheduler and create system process */ /* Enable interrupts and continue booting */ preempt_enable(); /* Copy the contents of the temporary console to the real console; close the temp console. */ early_boot_console_close(); /* Activate red LED while the boot process continues */ plat_led_off(LED_ALL); plat_led_on(LED_RED); /* Device enumeration is done; interrupts are enabled, and the console should be functional. Booting continues... */ /* Zero any user RAM extents. This happens after init'ing the DUART, because beeper. */ /* put("Clearing user RAM: "); mem_zero_extents(MEM_EXTENT_USER | MEM_EXTENT_RAM); puts("done"); */ /* Initialise the block cache, then scan mass-storage devices for partitions */ block_cache_init(2039); partition_init(); boot_list_mass_storage(); boot_list_partitions(); /* ret is set by the call to sched_init(), above */ if(ret != SUCCESS) printf("sched: init failed: %s\n", kstrerror(ret)); ret = vfs_init(); if(ret != SUCCESS) printf("vfs: init failed: %s\n", kstrerror(ret)); /* Display approximate CPU clock speed */ if(plat_get_cpu_clock(&cpu_clk_hz) == SUCCESS) printf("\nCPU fclk ~%2u.%uMHz\n", cpu_clk_hz / 1000000, (cpu_clk_hz % 1000000) / 100000); /* Initialise tick handler */ tick_init(); /* Display memory information */ printf("%u bytes of kernel heap memory available\n" "%u bytes of user memory available\n", kfreemem(), ufreemem()); /* Display platform serial number */ if(plat_get_serial_number(sn) == SUCCESS) { printf("Hardware serial number %02X%02X%02X%02X%02X%02X\n", sn[0], sn[1], sn[2], sn[3], sn[4], sn[5]); } /* Display the current date and time */ if(get_time(&tm) == SUCCESS) { char timebuf[12], datebuf[32]; if((time_iso8601(&tm, timebuf, sizeof(timebuf)) == SUCCESS) && (date_long(&tm, datebuf, sizeof(datebuf)) == SUCCESS)) printf("%s %s\n", timebuf, datebuf); else puts("Date/time invalid - please set clock"); } /* Create housekeeper process */ // proc_create(0, 0, "[hk]", NULL, housekeeper, 0, 0, PROC_TYPE_KERNEL, NULL, NULL); /* Initialise networking system */ ret = net_init(); if(ret != SUCCESS) printf("net: init failed: %s\n", kstrerror(ret)); /* Startup complete - activate green LED */ plat_led_off(LED_RED); plat_led_on(LED_GREEN); monitor(); /* start interactive "shell" thing */ cpu_halt(); /* should never be reached */ }
/* * This function is called very early on in the Mach startup, from the * function start_kernel_threads() in osfmk/kern/startup.c. It's called * in the context of the current (startup) task using a call to the * function kernel_thread_create() to jump into start_kernel_threads(). * Internally, kernel_thread_create() calls thread_create_internal(), * which calls uthread_alloc(). The function of uthread_alloc() is * normally to allocate a uthread structure, and fill out the uu_sigmask, * uu_context fields. It skips filling these out in the case of the "task" * being "kernel_task", because the order of operation is inverted. To * account for that, we need to manually fill in at least the contents * of the uu_context.vc_ucred field so that the uthread structure can be * used like any other. */ void bsd_init(void) { struct uthread *ut; unsigned int i; #if __i386__ || __x86_64__ int error; #endif struct vfs_context context; kern_return_t ret; struct ucred temp_cred; #define bsd_init_kprintf(x...) /* kprintf("bsd_init: " x) */ kernel_flock = funnel_alloc(KERNEL_FUNNEL); if (kernel_flock == (funnel_t *)0 ) { panic("bsd_init: Failed to allocate kernel funnel"); } printf(copyright); bsd_init_kprintf("calling kmeminit\n"); kmeminit(); bsd_init_kprintf("calling parse_bsd_args\n"); parse_bsd_args(); /* Initialize kauth subsystem before instancing the first credential */ bsd_init_kprintf("calling kauth_init\n"); kauth_init(); /* Initialize process and pgrp structures. */ bsd_init_kprintf("calling procinit\n"); procinit(); /* Initialize the ttys (MUST be before kminit()/bsd_autoconf()!)*/ tty_init(); kernproc = &proc0; /* implicitly bzero'ed */ /* kernel_task->proc = kernproc; */ set_bsdtask_info(kernel_task,(void *)kernproc); /* give kernproc a name */ bsd_init_kprintf("calling process_name\n"); process_name("kernel_task", kernproc); /* allocate proc lock group attribute and group */ bsd_init_kprintf("calling lck_grp_attr_alloc_init\n"); proc_lck_grp_attr= lck_grp_attr_alloc_init(); proc_lck_grp = lck_grp_alloc_init("proc", proc_lck_grp_attr); #ifndef CONFIG_EMBEDDED proc_slock_grp = lck_grp_alloc_init("proc-slock", proc_lck_grp_attr); proc_fdmlock_grp = lck_grp_alloc_init("proc-fdmlock", proc_lck_grp_attr); proc_mlock_grp = lck_grp_alloc_init("proc-mlock", proc_lck_grp_attr); #endif /* Allocate proc lock attribute */ proc_lck_attr = lck_attr_alloc_init(); #if 0 #if __PROC_INTERNAL_DEBUG lck_attr_setdebug(proc_lck_attr); #endif #endif #ifdef CONFIG_EMBEDDED proc_list_mlock = lck_mtx_alloc_init(proc_lck_grp, proc_lck_attr); proc_klist_mlock = lck_mtx_alloc_init(proc_lck_grp, proc_lck_attr); lck_mtx_init(&kernproc->p_mlock, proc_lck_grp, proc_lck_attr); lck_mtx_init(&kernproc->p_fdmlock, proc_lck_grp, proc_lck_attr); lck_spin_init(&kernproc->p_slock, proc_lck_grp, proc_lck_attr); #else proc_list_mlock = lck_mtx_alloc_init(proc_mlock_grp, proc_lck_attr); proc_klist_mlock = lck_mtx_alloc_init(proc_mlock_grp, proc_lck_attr); lck_mtx_init(&kernproc->p_mlock, proc_mlock_grp, proc_lck_attr); lck_mtx_init(&kernproc->p_fdmlock, proc_fdmlock_grp, proc_lck_attr); lck_spin_init(&kernproc->p_slock, proc_slock_grp, proc_lck_attr); #endif execargs_cache_lock = lck_mtx_alloc_init(proc_lck_grp, proc_lck_attr); execargs_cache_size = bsd_simul_execs; execargs_free_count = bsd_simul_execs; execargs_cache = (vm_offset_t *)kalloc(bsd_simul_execs * sizeof(vm_offset_t)); bzero(execargs_cache, bsd_simul_execs * sizeof(vm_offset_t)); if (current_task() != kernel_task) printf("bsd_init: We have a problem, " "current task is not kernel task\n"); bsd_init_kprintf("calling get_bsdthread_info\n"); ut = (uthread_t)get_bsdthread_info(current_thread()); #if CONFIG_MACF /* * Initialize the MAC Framework */ mac_policy_initbsd(); kernproc->p_mac_enforce = 0; #endif /* MAC */ /* * Create process 0. */ proc_list_lock(); LIST_INSERT_HEAD(&allproc, kernproc, p_list); kernproc->p_pgrp = &pgrp0; LIST_INSERT_HEAD(PGRPHASH(0), &pgrp0, pg_hash); LIST_INIT(&pgrp0.pg_members); #ifdef CONFIG_EMBEDDED lck_mtx_init(&pgrp0.pg_mlock, proc_lck_grp, proc_lck_attr); #else lck_mtx_init(&pgrp0.pg_mlock, proc_mlock_grp, proc_lck_attr); #endif /* There is no other bsd thread this point and is safe without pgrp lock */ LIST_INSERT_HEAD(&pgrp0.pg_members, kernproc, p_pglist); kernproc->p_listflag |= P_LIST_INPGRP; kernproc->p_pgrpid = 0; pgrp0.pg_session = &session0; pgrp0.pg_membercnt = 1; session0.s_count = 1; session0.s_leader = kernproc; session0.s_listflags = 0; #ifdef CONFIG_EMBEDDED lck_mtx_init(&session0.s_mlock, proc_lck_grp, proc_lck_attr); #else lck_mtx_init(&session0.s_mlock, proc_mlock_grp, proc_lck_attr); #endif LIST_INSERT_HEAD(SESSHASH(0), &session0, s_hash); proc_list_unlock(); #if CONFIG_LCTX kernproc->p_lctx = NULL; #endif kernproc->task = kernel_task; kernproc->p_stat = SRUN; kernproc->p_flag = P_SYSTEM; kernproc->p_nice = NZERO; kernproc->p_pptr = kernproc; TAILQ_INIT(&kernproc->p_uthlist); TAILQ_INSERT_TAIL(&kernproc->p_uthlist, ut, uu_list); kernproc->sigwait = FALSE; kernproc->sigwait_thread = THREAD_NULL; kernproc->exit_thread = THREAD_NULL; kernproc->p_csflags = CS_VALID; /* * Create credential. This also Initializes the audit information. */ bsd_init_kprintf("calling bzero\n"); bzero(&temp_cred, sizeof(temp_cred)); temp_cred.cr_ngroups = 1; temp_cred.cr_audit.as_aia_p = &audit_default_aia; /* XXX the following will go away with cr_au */ temp_cred.cr_au.ai_auid = AU_DEFAUDITID; bsd_init_kprintf("calling kauth_cred_create\n"); kernproc->p_ucred = kauth_cred_create(&temp_cred); /* give the (already exisiting) initial thread a reference on it */ bsd_init_kprintf("calling kauth_cred_ref\n"); kauth_cred_ref(kernproc->p_ucred); ut->uu_context.vc_ucred = kernproc->p_ucred; ut->uu_context.vc_thread = current_thread(); TAILQ_INIT(&kernproc->p_aio_activeq); TAILQ_INIT(&kernproc->p_aio_doneq); kernproc->p_aio_total_count = 0; kernproc->p_aio_active_count = 0; bsd_init_kprintf("calling file_lock_init\n"); file_lock_init(); #if CONFIG_MACF mac_cred_label_associate_kernel(kernproc->p_ucred); mac_task_label_update_cred (kernproc->p_ucred, (struct task *) kernproc->task); #endif /* Create the file descriptor table. */ filedesc0.fd_refcnt = 1+1; /* +1 so shutdown will not _FREE_ZONE */ kernproc->p_fd = &filedesc0; filedesc0.fd_cmask = cmask; filedesc0.fd_knlistsize = -1; filedesc0.fd_knlist = NULL; filedesc0.fd_knhash = NULL; filedesc0.fd_knhashmask = 0; /* Create the limits structures. */ kernproc->p_limit = &limit0; for (i = 0; i < sizeof(kernproc->p_rlimit)/sizeof(kernproc->p_rlimit[0]); i++) limit0.pl_rlimit[i].rlim_cur = limit0.pl_rlimit[i].rlim_max = RLIM_INFINITY; limit0.pl_rlimit[RLIMIT_NOFILE].rlim_cur = NOFILE; limit0.pl_rlimit[RLIMIT_NPROC].rlim_cur = maxprocperuid; limit0.pl_rlimit[RLIMIT_NPROC].rlim_max = maxproc; limit0.pl_rlimit[RLIMIT_STACK] = vm_initial_limit_stack; limit0.pl_rlimit[RLIMIT_DATA] = vm_initial_limit_data; limit0.pl_rlimit[RLIMIT_CORE] = vm_initial_limit_core; limit0.pl_refcnt = 1; kernproc->p_stats = &pstats0; kernproc->p_sigacts = &sigacts0; /* * Charge root for two processes: init and mach_init. */ bsd_init_kprintf("calling chgproccnt\n"); (void)chgproccnt(0, 1); /* * Allocate a kernel submap for pageable memory * for temporary copying (execve()). */ { vm_offset_t minimum; bsd_init_kprintf("calling kmem_suballoc\n"); ret = kmem_suballoc(kernel_map, &minimum, (vm_size_t)bsd_pageable_map_size, TRUE, VM_FLAGS_ANYWHERE, &bsd_pageable_map); if (ret != KERN_SUCCESS) panic("bsd_init: Failed to allocate bsd pageable map"); } /* * Initialize buffers and hash links for buffers * * SIDE EFFECT: Starts a thread for bcleanbuf_thread(), so must * happen after a credential has been associated with * the kernel task. */ bsd_init_kprintf("calling bsd_bufferinit\n"); bsd_bufferinit(); /* Initialize the execve() semaphore */ bsd_init_kprintf("calling semaphore_create\n"); if (ret != KERN_SUCCESS) panic("bsd_init: Failed to create execve semaphore"); /* * Initialize the calendar. */ bsd_init_kprintf("calling IOKitInitializeTime\n"); IOKitInitializeTime(); if (turn_on_log_leaks && !new_nkdbufs) new_nkdbufs = 200000; start_kern_tracing(new_nkdbufs); if (turn_on_log_leaks) log_leaks = 1; bsd_init_kprintf("calling ubc_init\n"); ubc_init(); /* Initialize the file systems. */ bsd_init_kprintf("calling vfsinit\n"); vfsinit(); #if SOCKETS /* Initialize per-CPU cache allocator */ mcache_init(); /* Initialize mbuf's. */ bsd_init_kprintf("calling mbinit\n"); mbinit(); net_str_id_init(); /* for mbuf tags */ #endif /* SOCKETS */ /* * Initializes security event auditing. * XXX: Should/could this occur later? */ #if CONFIG_AUDIT bsd_init_kprintf("calling audit_init\n"); audit_init(); #endif /* Initialize kqueues */ bsd_init_kprintf("calling knote_init\n"); knote_init(); /* Initialize for async IO */ bsd_init_kprintf("calling aio_init\n"); aio_init(); /* Initialize pipes */ bsd_init_kprintf("calling pipeinit\n"); pipeinit(); /* Initialize SysV shm subsystem locks; the subsystem proper is * initialized through a sysctl. */ #if SYSV_SHM bsd_init_kprintf("calling sysv_shm_lock_init\n"); sysv_shm_lock_init(); #endif #if SYSV_SEM bsd_init_kprintf("calling sysv_sem_lock_init\n"); sysv_sem_lock_init(); #endif #if SYSV_MSG bsd_init_kprintf("sysv_msg_lock_init\n"); sysv_msg_lock_init(); #endif bsd_init_kprintf("calling pshm_lock_init\n"); pshm_lock_init(); bsd_init_kprintf("calling psem_lock_init\n"); psem_lock_init(); pthread_init(); /* POSIX Shm and Sem */ bsd_init_kprintf("calling pshm_cache_init\n"); pshm_cache_init(); bsd_init_kprintf("calling psem_cache_init\n"); psem_cache_init(); bsd_init_kprintf("calling time_zone_slock_init\n"); time_zone_slock_init(); /* Stack snapshot facility lock */ stackshot_lock_init(); /* * Initialize protocols. Block reception of incoming packets * until everything is ready. */ bsd_init_kprintf("calling sysctl_register_fixed\n"); sysctl_register_fixed(); bsd_init_kprintf("calling sysctl_mib_init\n"); sysctl_mib_init(); #if NETWORKING bsd_init_kprintf("calling dlil_init\n"); dlil_init(); bsd_init_kprintf("calling proto_kpi_init\n"); proto_kpi_init(); #endif /* NETWORKING */ #if SOCKETS bsd_init_kprintf("calling socketinit\n"); socketinit(); bsd_init_kprintf("calling domaininit\n"); domaininit(); #endif /* SOCKETS */ kernproc->p_fd->fd_cdir = NULL; kernproc->p_fd->fd_rdir = NULL; #if CONFIG_EMBEDDED /* Initialize kernel memory status notifications */ bsd_init_kprintf("calling kern_memorystatus_init\n"); kern_memorystatus_init(); #endif #ifdef GPROF /* Initialize kernel profiling. */ kmstartup(); #endif /* kick off timeout driven events by calling first time */ thread_wakeup(&lbolt); timeout(lightning_bolt, 0, hz); bsd_init_kprintf("calling bsd_autoconf\n"); bsd_autoconf(); #if CONFIG_DTRACE dtrace_postinit(); #endif /* * We attach the loopback interface *way* down here to ensure * it happens after autoconf(), otherwise it becomes the * "primary" interface. */ #include <loop.h> #if NLOOP > 0 bsd_init_kprintf("calling loopattach\n"); loopattach(); /* XXX */ #endif #if PFLOG /* Initialize packet filter log interface */ pfloginit(); #endif /* PFLOG */ #if NETHER > 0 /* Register the built-in dlil ethernet interface family */ bsd_init_kprintf("calling ether_family_init\n"); ether_family_init(); #endif /* ETHER */ #if NETWORKING /* Call any kext code that wants to run just after network init */ bsd_init_kprintf("calling net_init_run\n"); net_init_run(); /* register user tunnel kernel control handler */ utun_register_control(); #endif /* NETWORKING */ bsd_init_kprintf("calling vnode_pager_bootstrap\n"); vnode_pager_bootstrap(); #if 0 /* XXX Hack for early debug stop */ printf("\nabout to sleep for 10 seconds\n"); IOSleep( 10 * 1000 ); /* Debugger("hello"); */ #endif bsd_init_kprintf("calling inittodr\n"); inittodr(0); #if CONFIG_EMBEDDED { /* print out early VM statistics */ kern_return_t kr1; vm_statistics_data_t stat; mach_msg_type_number_t count; count = HOST_VM_INFO_COUNT; kr1 = host_statistics(host_self(), HOST_VM_INFO, (host_info_t)&stat, &count); kprintf("Mach Virtual Memory Statistics (page size of 4096) bytes\n" "Pages free:\t\t\t%u.\n" "Pages active:\t\t\t%u.\n" "Pages inactive:\t\t\t%u.\n" "Pages wired down:\t\t%u.\n" "\"Translation faults\":\t\t%u.\n" "Pages copy-on-write:\t\t%u.\n" "Pages zero filled:\t\t%u.\n" "Pages reactivated:\t\t%u.\n" "Pageins:\t\t\t%u.\n" "Pageouts:\t\t\t%u.\n" "Object cache: %u hits of %u lookups (%d%% hit rate)\n", stat.free_count, stat.active_count, stat.inactive_count, stat.wire_count, stat.faults, stat.cow_faults, stat.zero_fill_count, stat.reactivations, stat.pageins, stat.pageouts, stat.hits, stat.lookups, (stat.hits == 0) ? 100 : ((stat.lookups * 100) / stat.hits)); } #endif /* CONFIG_EMBEDDED */ /* Mount the root file system. */ while( TRUE) { int err; bsd_init_kprintf("calling setconf\n"); setconf(); bsd_init_kprintf("vfs_mountroot\n"); if (0 == (err = vfs_mountroot())) break; rootdevice[0] = '\0'; #if NFSCLIENT if (mountroot == netboot_mountroot) { PE_display_icon( 0, "noroot"); /* XXX a netboot-specific icon would be nicer */ vc_progress_set(FALSE, 0); for (i=1; 1; i*=2) { printf("bsd_init: failed to mount network root, error %d, %s\n", err, PE_boot_args()); printf("We are hanging here...\n"); IOSleep(i*60*1000); } /*NOTREACHED*/ } #endif printf("cannot mount root, errno = %d\n", err); boothowto |= RB_ASKNAME; } IOSecureBSDRoot(rootdevice); context.vc_thread = current_thread(); context.vc_ucred = kernproc->p_ucred; mountlist.tqh_first->mnt_flag |= MNT_ROOTFS; bsd_init_kprintf("calling VFS_ROOT\n"); /* Get the vnode for '/'. Set fdp->fd_fd.fd_cdir to reference it. */ if (VFS_ROOT(mountlist.tqh_first, &rootvnode, &context)) panic("bsd_init: cannot find root vnode: %s", PE_boot_args()); rootvnode->v_flag |= VROOT; (void)vnode_ref(rootvnode); (void)vnode_put(rootvnode); filedesc0.fd_cdir = rootvnode; #if NFSCLIENT if (mountroot == netboot_mountroot) { int err; /* post mount setup */ if ((err = netboot_setup()) != 0) { PE_display_icon( 0, "noroot"); /* XXX a netboot-specific icon would be nicer */ vc_progress_set(FALSE, 0); for (i=1; 1; i*=2) { printf("bsd_init: NetBoot could not find root, error %d: %s\n", err, PE_boot_args()); printf("We are hanging here...\n"); IOSleep(i*60*1000); } /*NOTREACHED*/ } } #endif #if CONFIG_IMAGEBOOT /* * See if a system disk image is present. If so, mount it and * switch the root vnode to point to it */ if(imageboot_needed()) { int err; /* An image was found */ if((err = imageboot_setup())) { /* * this is not fatal. Keep trying to root * off the original media */ printf("%s: imageboot could not find root, %d\n", __FUNCTION__, err); } } #endif /* CONFIG_IMAGEBOOT */ /* set initial time; all other resource data is already zero'ed */ microtime(&kernproc->p_start); kernproc->p_stats->p_start = kernproc->p_start; /* for compat */ #if DEVFS { char mounthere[] = "/dev"; /* !const because of internal casting */ bsd_init_kprintf("calling devfs_kernel_mount\n"); devfs_kernel_mount(mounthere); } #endif /* DEVFS */ /* Initialize signal state for process 0. */ bsd_init_kprintf("calling siginit\n"); siginit(kernproc); bsd_init_kprintf("calling bsd_utaskbootstrap\n"); bsd_utaskbootstrap(); #if defined(__LP64__) kernproc->p_flag |= P_LP64; printf("Kernel is LP64\n"); #endif #if __i386__ || __x86_64__ /* this should be done after the root filesystem is mounted */ error = set_archhandler(kernproc, CPU_TYPE_POWERPC); // 10/30/08 - gab: <rdar://problem/6324501> // if default 'translate' can't be found, see if the understudy is available if (ENOENT == error) { strlcpy(exec_archhandler_ppc.path, kRosettaStandIn_str, MAXPATHLEN); error = set_archhandler(kernproc, CPU_TYPE_POWERPC); } if (error) /* XXX make more generic */ exec_archhandler_ppc.path[0] = 0; #endif bsd_init_kprintf("calling mountroot_post_hook\n"); /* invoke post-root-mount hook */ if (mountroot_post_hook != NULL) mountroot_post_hook(); #if 0 /* not yet */ consider_zone_gc(FALSE); #endif bsd_init_kprintf("done\n"); }