/* * Initialize the eventhandler mutex and list. */ void eventhandler_init(void) { eventhandler_mutex_grp_attr = lck_grp_attr_alloc_init(); eventhandler_mutex_grp = lck_grp_alloc_init("eventhandler", eventhandler_mutex_grp_attr); eventhandler_mutex_attr = lck_attr_alloc_init(); el_lock_grp_attr = lck_grp_attr_alloc_init(); el_lock_grp = lck_grp_alloc_init("eventhandler list", el_lock_grp_attr); el_lock_attr = lck_attr_alloc_init(); eventhandler_lists_ctxt_init(&evthdlr_lists_ctxt_glb); }
funnel_t * funnel_alloc( int type) { lck_mtx_t *m; funnel_t *fnl; if (funnel_lck_grp == LCK_GRP_NULL) { funnel_lck_grp_attr = lck_grp_attr_alloc_init(); funnel_lck_grp = lck_grp_alloc_init("Funnel", funnel_lck_grp_attr); funnel_lck_attr = lck_attr_alloc_init(); } if ((fnl = (funnel_t *)kalloc(sizeof(funnel_t))) != 0){ bzero((void *)fnl, sizeof(funnel_t)); if ((m = lck_mtx_alloc_init(funnel_lck_grp, funnel_lck_attr)) == (lck_mtx_t *)NULL) { kfree(fnl, sizeof(funnel_t)); return(THR_FUNNEL_NULL); } fnl->fnl_mutex = m; fnl->fnl_type = type; } return(fnl); }
/* initial setup done at time of sysinit */ void pipeinit(void) { nbigpipe=0; vm_size_t zone_size; zone_size = 8192 * sizeof(struct pipe); pipe_zone = zinit(sizeof(struct pipe), zone_size, 4096, "pipe zone"); /* allocate lock group attribute and group for pipe mutexes */ pipe_mtx_grp_attr = lck_grp_attr_alloc_init(); pipe_mtx_grp = lck_grp_alloc_init("pipe", pipe_mtx_grp_attr); /* allocate the lock attribute for pipe mutexes */ pipe_mtx_attr = lck_attr_alloc_init(); /* * Set up garbage collection for dead pipes */ zone_size = (PIPE_GARBAGE_QUEUE_LIMIT + 20) * sizeof(struct pipe_garbage); pipe_garbage_zone = (zone_t)zinit(sizeof(struct pipe_garbage), zone_size, 4096, "pipe garbage zone"); pipe_garbage_lock = lck_mtx_alloc_init(pipe_mtx_grp, pipe_mtx_attr); }
void os_reason_init() { int reasons_allocated = 0; /* * Initialize OS reason group and lock attributes */ os_reason_lock_grp_attr = lck_grp_attr_alloc_init(); os_reason_lock_grp = lck_grp_alloc_init("os_reason_lock", os_reason_lock_grp_attr); os_reason_lock_attr = lck_attr_alloc_init(); /* * Create OS reason zone. */ os_reason_zone = zinit(sizeof(struct os_reason), OS_REASON_MAX_COUNT * sizeof(struct os_reason), OS_REASON_MAX_COUNT, "os reasons"); if (os_reason_zone == NULL) { panic("failed to initialize os_reason_zone"); } /* * We pre-fill the OS reason zone to reduce the likelihood that * the jetsam thread and others block when they create an exit * reason. This pre-filled memory is not-collectable since it's * foreign memory crammed in as part of zfill(). */ reasons_allocated = zfill(os_reason_zone, OS_REASON_RESERVE_COUNT); assert(reasons_allocated > 0); }
void tcp_lro_init(void) { int i; bzero(lro_flow_list, sizeof (struct lro_flow) * TCP_LRO_NUM_FLOWS); for (i = 0; i < TCP_LRO_FLOW_MAP; i++) { lro_flow_map[i] = TCP_LRO_FLOW_UNINIT; } /* * allocate lock group attribute, group and attribute for tcp_lro_lock */ tcp_lro_mtx_grp_attr = lck_grp_attr_alloc_init(); tcp_lro_mtx_grp = lck_grp_alloc_init("tcplro", tcp_lro_mtx_grp_attr); tcp_lro_mtx_attr = lck_attr_alloc_init(); lck_mtx_init(&tcp_lro_lock, tcp_lro_mtx_grp, tcp_lro_mtx_attr); tcp_lro_timer = thread_call_allocate(tcp_lro_timer_proc, NULL); if (tcp_lro_timer == NULL) { panic_plain("%s: unable to allocate lro timer", __func__); } return; }
/* * Initialize cnode hash table. */ __private_extern__ void hfs_chashinit() { chash_lck_grp_attr= lck_grp_attr_alloc_init(); chash_lck_grp = lck_grp_alloc_init("cnode_hash", chash_lck_grp_attr); chash_lck_attr = lck_attr_alloc_init(); }
void init_system_override() { sys_override_mtx_grp_attr = lck_grp_attr_alloc_init(); sys_override_mtx_grp = lck_grp_alloc_init("system_override", sys_override_mtx_grp_attr); sys_override_mtx_attr = lck_attr_alloc_init(); lck_mtx_init(&sys_override_lock, sys_override_mtx_grp, sys_override_mtx_attr); io_throttle_assert_cnt = cpu_throttle_assert_cnt = 0; }
/* Initialize the mutex governing access to the SysV msg subsystem */ __private_extern__ void sysv_msg_lock_init( void ) { sysv_msg_subsys_lck_grp_attr = lck_grp_attr_alloc_init(); sysv_msg_subsys_lck_grp = lck_grp_alloc_init("sysv_msg_subsys_lock", sysv_msg_subsys_lck_grp_attr); sysv_msg_subsys_lck_attr = lck_attr_alloc_init(); lck_mtx_init(&sysv_msg_subsys_mutex, sysv_msg_subsys_lck_grp, sysv_msg_subsys_lck_attr); }
static inline void setup_locks() { /* Create locks. Cannot be done on the stack. */ osquery.lck_grp_attr = lck_grp_attr_alloc_init(); lck_grp_attr_setstat(osquery.lck_grp_attr); osquery.lck_grp = lck_grp_alloc_init("osquery", osquery.lck_grp_attr); osquery.lck_attr = lck_attr_alloc_init(); osquery.mtx = lck_mtx_alloc_init(osquery.lck_grp, osquery.lck_attr); }
static inline void setup_locks() { // Create locks. Cannot be done on the stack. osquery.lck_grp_attr = lck_grp_attr_alloc_init(); lck_grp_attr_setstat(osquery.lck_grp_attr); osquery.lck_grp = lck_grp_alloc_init("osquery", osquery.lck_grp_attr); osquery.lck_attr = lck_attr_alloc_init(); // MTX is the IOCTL API handling lock. // This assures only one daemon will use the kernel API simultaneously. osquery.mtx = lck_mtx_alloc_init(osquery.lck_grp, osquery.lck_attr); }
void bpf_init(__unused void *unused) { #ifdef __APPLE__ int i; int maj; if (bpf_devsw_installed == 0) { bpf_devsw_installed = 1; bpf_mlock_grp_attr = lck_grp_attr_alloc_init(); bpf_mlock_grp = lck_grp_alloc_init("bpf", bpf_mlock_grp_attr); bpf_mlock_attr = lck_attr_alloc_init(); bpf_mlock = lck_mtx_alloc_init(bpf_mlock_grp, bpf_mlock_attr); if (bpf_mlock == 0) { printf("bpf_init: failed to allocate bpf_mlock\n"); bpf_devsw_installed = 0; return; } maj = cdevsw_add(CDEV_MAJOR, &bpf_cdevsw); if (maj == -1) { if (bpf_mlock) lck_mtx_free(bpf_mlock, bpf_mlock_grp); if (bpf_mlock_attr) lck_attr_free(bpf_mlock_attr); if (bpf_mlock_grp) lck_grp_free(bpf_mlock_grp); if (bpf_mlock_grp_attr) lck_grp_attr_free(bpf_mlock_grp_attr); bpf_mlock = NULL; bpf_mlock_attr = NULL; bpf_mlock_grp = NULL; bpf_mlock_grp_attr = NULL; bpf_devsw_installed = 0; printf("bpf_init: failed to allocate a major number!\n"); return; } for (i = 0 ; i < NBPFILTER; i++) bpf_make_dev_t(maj); } #else cdevsw_add(&bpf_cdevsw); #endif }
rMutex rpal_mutex_create ( ) { lck_mtx_t* mutex = NULL; lck_grp_attr_t* gattr = NULL; lck_attr_t* lattr = NULL; if( 0 == g_lck_group ) { rpal_debug_info( "mutex group not created, creating" ); gattr = lck_grp_attr_alloc_init(); if( NULL == gattr ) { rpal_debug_critical( "could not create mutex group" ); return NULL; } lck_grp_attr_setstat( gattr ); g_lck_group = lck_grp_alloc_init( "hcphbs", gattr ); lck_grp_attr_free( gattr ); } if( NULL == g_lck_group ) { return NULL; } lattr = lck_attr_alloc_init(); if( NULL != lattr ) { mutex = lck_mtx_alloc_init( g_lck_group, lattr ); lck_attr_free( lattr ); } else { rpal_debug_critical( "could not create mutex attributes" ); } return mutex; }
/* * Initialize raw connection block q. */ void rip_init(struct protosw *pp, struct domain *dp) { #pragma unused(dp) static int rip_initialized = 0; struct inpcbinfo *pcbinfo; VERIFY((pp->pr_flags & (PR_INITIALIZED|PR_ATTACHED)) == PR_ATTACHED); if (rip_initialized) return; rip_initialized = 1; LIST_INIT(&ripcb); ripcbinfo.ipi_listhead = &ripcb; /* * XXX We don't use the hash list for raw IP, but it's easier * to allocate a one entry hash list than it is to check all * over the place for ipi_hashbase == NULL. */ ripcbinfo.ipi_hashbase = hashinit(1, M_PCB, &ripcbinfo.ipi_hashmask); ripcbinfo.ipi_porthashbase = hashinit(1, M_PCB, &ripcbinfo.ipi_porthashmask); ripcbinfo.ipi_zone = zinit(sizeof(struct inpcb), (4096 * sizeof(struct inpcb)), 4096, "ripzone"); pcbinfo = &ripcbinfo; /* * allocate lock group attribute and group for udp pcb mutexes */ pcbinfo->ipi_lock_grp_attr = lck_grp_attr_alloc_init(); pcbinfo->ipi_lock_grp = lck_grp_alloc_init("ripcb", pcbinfo->ipi_lock_grp_attr); /* * allocate the lock attribute for udp pcb mutexes */ pcbinfo->ipi_lock_attr = lck_attr_alloc_init(); if ((pcbinfo->ipi_lock = lck_rw_alloc_init(pcbinfo->ipi_lock_grp, pcbinfo->ipi_lock_attr)) == NULL) { panic("%s: unable to allocate PCB lock\n", __func__); /* NOTREACHED */ } in_pcbinfo_attach(&ripcbinfo); }
void lpx_datagram_init() { DEBUG_PRINT(DEBUG_MASK_DGRAM_TRACE, ("lpx_datagram_init: Entered.\n")); // Init Lock. datagram_mtx_grp_attr = lck_grp_attr_alloc_init(); lck_grp_attr_setdefault(datagram_mtx_grp_attr); datagram_mtx_grp = lck_grp_alloc_init("datagrampcb", datagram_mtx_grp_attr); datagram_mtx_attr = lck_attr_alloc_init(); lck_attr_setdefault(datagram_mtx_attr); if ((lpx_datagram_pcb.lpxp_list_rw = lck_rw_alloc_init(datagram_mtx_grp, datagram_mtx_attr)) == NULL) { DEBUG_PRINT(DEBUG_MASK_STREAM_ERROR, ("lpx_datagram_init: Can't alloc mtx\n")); } return; }
static kern_return_t register_locks(void) { /* already allocated? */ if (ucode_slock_grp_attr && ucode_slock_grp && ucode_slock_attr && ucode_slock) return KERN_SUCCESS; /* allocate lock group attribute and group */ if (!(ucode_slock_grp_attr = lck_grp_attr_alloc_init())) goto nomem_out; lck_grp_attr_setstat(ucode_slock_grp_attr); if (!(ucode_slock_grp = lck_grp_alloc_init("uccode_lock", ucode_slock_grp_attr))) goto nomem_out; /* Allocate lock attribute */ if (!(ucode_slock_attr = lck_attr_alloc_init())) goto nomem_out; /* Allocate the spin lock */ /* We keep one global spin-lock. We could have one per update * request... but srsly, why would you update microcode like that? */ if (!(ucode_slock = lck_spin_alloc_init(ucode_slock_grp, ucode_slock_attr))) goto nomem_out; return KERN_SUCCESS; nomem_out: /* clean up */ if (ucode_slock) lck_spin_free(ucode_slock, ucode_slock_grp); if (ucode_slock_attr) lck_attr_free(ucode_slock_attr); if (ucode_slock_grp) lck_grp_free(ucode_slock_grp); if (ucode_slock_grp_attr) lck_grp_attr_free(ucode_slock_grp_attr); return KERN_NO_SPACE; }
ADT_LOCK _adt_xnu_rw_lock_init(int max_threads) { ADT_LOCK rwlock; rwlock = adt_malloc(sizeof(adt_lock_t)); if(rwlock == NULL) { goto end; } rwlock->rw_lock_grp_attr=lck_grp_attr_alloc_init(); //TODO:not sure if the name needs to be unique rwlock->rw_lock_grp=lck_grp_alloc_init("adt_rw_lock", rwlock->rw_lock_grp_attr); rwlock->rw_lock_attr=lck_attr_alloc_init(); rwlock->rw_lock=lck_rw_alloc_init(rwlock->rw_lock_grp, rwlock->rw_lock_attr); end: return rwlock; }
/* * Initialize the framework; this is currently called as part of BSD init. */ __private_extern__ void mcache_init(void) { mcache_bkttype_t *btp; unsigned int i; char name[32]; ncpu = ml_get_max_cpus(); mcache_llock_grp_attr = lck_grp_attr_alloc_init(); mcache_llock_grp = lck_grp_alloc_init("mcache.list", mcache_llock_grp_attr); mcache_llock_attr = lck_attr_alloc_init(); mcache_llock = lck_mtx_alloc_init(mcache_llock_grp, mcache_llock_attr); mcache_zone = zinit(MCACHE_ALLOC_SIZE, 256 * MCACHE_ALLOC_SIZE, PAGE_SIZE, "mcache"); if (mcache_zone == NULL) panic("mcache_init: failed to allocate mcache zone\n"); zone_change(mcache_zone, Z_CALLERACCT, FALSE); LIST_INIT(&mcache_head); for (i = 0; i < sizeof (mcache_bkttype) / sizeof (*btp); i++) { btp = &mcache_bkttype[i]; (void) snprintf(name, sizeof (name), "bkt_%d", btp->bt_bktsize); btp->bt_cache = mcache_create(name, (btp->bt_bktsize + 1) * sizeof (void *), 0, 0, MCR_SLEEP); } PE_parse_boot_argn("mcache_flags", &mcache_flags, sizeof (mcache_flags)); mcache_flags &= MCF_FLAGS_MASK; mcache_audit_cache = mcache_create("audit", sizeof (mcache_audit_t), 0, 0, MCR_SLEEP); mcache_reap_interval = 15 * hz; mcache_applyall(mcache_cache_bkt_enable); mcache_ready = 1; }
void zfs_context_init(void) { uint64_t kern_mem_size; zfs_lock_attr = lck_attr_alloc_init(); zfs_group_attr = lck_grp_attr_alloc_init(); #if 0 lck_attr_setdebug(zfs_lock_attr); #endif zfs_mutex_group = lck_grp_alloc_init("zfs-mutex", zfs_group_attr); zfs_rwlock_group = lck_grp_alloc_init("zfs-rwlock", zfs_group_attr); zfs_spinlock_group = lck_grp_alloc_init("zfs-spinlock", zfs_group_attr); zfs_kmem_alloc_tag = OSMalloc_Tagalloc("ZFS general purpose", OSMT_DEFAULT); max_ncpus = 1; /* kernel memory space is 4 GB max */ kern_mem_size = MIN(max_mem, (uint64_t)0x0FFFFFFFFULL); /* Calculate number of pages of memory on the system */ physmem = kern_mem_size / PAGE_SIZE; /* Constrain our memory use on smaller memory systems */ if (kern_mem_size <= 0x20000000) zfs_footprint.maximum = kern_mem_size / 7; /* 512MB: ~15 % */ else if (kern_mem_size <= 0x30000000) zfs_footprint.maximum = kern_mem_size / 5; /* 768MB: ~20 % */ else if (kern_mem_size <= 0x40000000) zfs_footprint.maximum = kern_mem_size / 3; /* 1GB: ~33 % */ else /* set to 1GB limit maximum*/ zfs_footprint.maximum = MIN((kern_mem_size / 2), 0x40000000); recalc_target_footprint(100); printf("zfs_context_init: footprint.maximum=%lu, footprint.target=%lu\n", zfs_footprint.maximum, zfs_footprint.target); }
/* * Initialise cache headers */ int nullfs_init(__unused struct vfsconf * vfsp) { NULLFSDEBUG("%s\n", __FUNCTION__); /* assuming for now that this happens immediately and by default after fs * installation */ null_hashlck_grp_attr = lck_grp_attr_alloc_init(); if (null_hashlck_grp_attr == NULL) { goto error; } null_hashlck_grp = lck_grp_alloc_init("com.apple.filesystems.nullfs", null_hashlck_grp_attr); if (null_hashlck_grp == NULL) { goto error; } null_hashlck_attr = lck_attr_alloc_init(); if (null_hashlck_attr == NULL) { goto error; } lck_mtx_init(&null_hashmtx, null_hashlck_grp, null_hashlck_attr); null_node_hashtbl = hashinit(NULL_HASH_SIZE, M_TEMP, &null_hash_mask); NULLFSDEBUG("%s finished\n", __FUNCTION__); return (0); error: printf("NULLFS: failed to get lock element\n"); if (null_hashlck_grp_attr) { lck_grp_attr_free(null_hashlck_grp_attr); null_hashlck_grp_attr = NULL; } if (null_hashlck_grp) { lck_grp_free(null_hashlck_grp); null_hashlck_grp = NULL; } if (null_hashlck_attr) { lck_attr_free(null_hashlck_attr); null_hashlck_attr = NULL; } return KERN_FAILURE; }
int union_init(__unused struct vfsconf *vfsp) { int i; union_lck_grp_attr= lck_grp_attr_alloc_init(); #if DIAGNOSTIC lck_grp_attr_setstat(union_lck_grp_attr); #endif union_lck_grp = lck_grp_alloc_init("union", union_lck_grp_attr); union_lck_attr = lck_attr_alloc_init(); #if DIAGNOSTIC lck_attr_setdebug(union_lck_attr); #endif union_mtxp = lck_mtx_alloc_init(union_lck_grp, union_lck_attr); for (i = 0; i < NHASH; i++) LIST_INIT(&unhead[i]); bzero((caddr_t) unvplock, sizeof(unvplock)); /* add the hook for getdirentries */ union_dircheckp = union_dircheck; return (0); }
/* * Initialise reassembly queue and fragment identifier. */ void frag6_init(void) { /* ip6q_alloc() uses mbufs for IPv6 fragment queue structures */ _CASSERT(sizeof (struct ip6q) <= _MLEN); /* ip6af_alloc() uses mbufs for IPv6 fragment queue structures */ _CASSERT(sizeof (struct ip6asfrag) <= _MLEN); /* IPv6 fragment reassembly queue lock */ ip6qlock_grp_attr = lck_grp_attr_alloc_init(); ip6qlock_grp = lck_grp_alloc_init("ip6qlock", ip6qlock_grp_attr); ip6qlock_attr = lck_attr_alloc_init(); lck_mtx_init(&ip6qlock, ip6qlock_grp, ip6qlock_attr); lck_mtx_lock(&ip6qlock); /* Initialize IPv6 reassembly queue. */ ip6q.ip6q_next = ip6q.ip6q_prev = &ip6q; /* same limits as IPv4 */ ip6_maxfragpackets = nmbclusters / 32; ip6_maxfrags = ip6_maxfragpackets * 2; ip6q_updateparams(); lck_mtx_unlock(&ip6qlock); }
void udp_init() { vm_size_t str_size; struct inpcbinfo *pcbinfo; LIST_INIT(&udb); udbinfo.listhead = &udb; udbinfo.hashbase = hashinit(UDBHASHSIZE, M_PCB, &udbinfo.hashmask); udbinfo.porthashbase = hashinit(UDBHASHSIZE, M_PCB, &udbinfo.porthashmask); #ifdef __APPLE__ str_size = (vm_size_t) sizeof(struct inpcb); udbinfo.ipi_zone = (void *) zinit(str_size, 80000*str_size, 8192, "udpcb"); pcbinfo = &udbinfo; /* * allocate lock group attribute and group for udp pcb mutexes */ pcbinfo->mtx_grp_attr = lck_grp_attr_alloc_init(); pcbinfo->mtx_grp = lck_grp_alloc_init("udppcb", pcbinfo->mtx_grp_attr); pcbinfo->mtx_attr = lck_attr_alloc_init(); if ((pcbinfo->mtx = lck_rw_alloc_init(pcbinfo->mtx_grp, pcbinfo->mtx_attr)) == NULL) return; /* pretty much dead if this fails... */ in_pcb_nat_init(&udbinfo, AF_INET, IPPROTO_UDP, SOCK_DGRAM); #else udbinfo.ipi_zone = zinit("udpcb", sizeof(struct inpcb), maxsockets, ZONE_INTERRUPT, 0); #endif #if 0 /* for pcb sharing testing only */ stat = in_pcb_new_share_client(&udbinfo, &fake_owner); kprintf("udp_init in_pcb_new_share_client - stat = %d\n", stat); laddr.s_addr = 0x11646464; faddr.s_addr = 0x11646465; lport = 1500; in_pcb_grab_port(&udbinfo, 0, laddr, &lport, faddr, 1600, 0, fake_owner); kprintf("udp_init in_pcb_grab_port - stat = %d\n", stat); stat = in_pcb_rem_share_client(&udbinfo, fake_owner); kprintf("udp_init in_pcb_rem_share_client - stat = %d\n", stat); stat = in_pcb_new_share_client(&udbinfo, &fake_owner); kprintf("udp_init in_pcb_new_share_client(2) - stat = %d\n", stat); laddr.s_addr = 0x11646464; faddr.s_addr = 0x11646465; lport = 1500; stat = in_pcb_grab_port(&udbinfo, 0, laddr, &lport, faddr, 1600, 0, fake_owner); kprintf("udp_init in_pcb_grab_port(2) - stat = %d\n", stat); #endif }
/* * This function is called very early on in the Mach startup, from the * function start_kernel_threads() in osfmk/kern/startup.c. It's called * in the context of the current (startup) task using a call to the * function kernel_thread_create() to jump into start_kernel_threads(). * Internally, kernel_thread_create() calls thread_create_internal(), * which calls uthread_alloc(). The function of uthread_alloc() is * normally to allocate a uthread structure, and fill out the uu_sigmask, * uu_context fields. It skips filling these out in the case of the "task" * being "kernel_task", because the order of operation is inverted. To * account for that, we need to manually fill in at least the contents * of the uu_context.vc_ucred field so that the uthread structure can be * used like any other. */ void bsd_init(void) { struct uthread *ut; unsigned int i; #if __i386__ || __x86_64__ int error; #endif struct vfs_context context; kern_return_t ret; struct ucred temp_cred; #define bsd_init_kprintf(x...) /* kprintf("bsd_init: " x) */ kernel_flock = funnel_alloc(KERNEL_FUNNEL); if (kernel_flock == (funnel_t *)0 ) { panic("bsd_init: Failed to allocate kernel funnel"); } printf(copyright); bsd_init_kprintf("calling kmeminit\n"); kmeminit(); bsd_init_kprintf("calling parse_bsd_args\n"); parse_bsd_args(); /* Initialize kauth subsystem before instancing the first credential */ bsd_init_kprintf("calling kauth_init\n"); kauth_init(); /* Initialize process and pgrp structures. */ bsd_init_kprintf("calling procinit\n"); procinit(); /* Initialize the ttys (MUST be before kminit()/bsd_autoconf()!)*/ tty_init(); kernproc = &proc0; /* implicitly bzero'ed */ /* kernel_task->proc = kernproc; */ set_bsdtask_info(kernel_task,(void *)kernproc); /* give kernproc a name */ bsd_init_kprintf("calling process_name\n"); process_name("kernel_task", kernproc); /* allocate proc lock group attribute and group */ bsd_init_kprintf("calling lck_grp_attr_alloc_init\n"); proc_lck_grp_attr= lck_grp_attr_alloc_init(); proc_lck_grp = lck_grp_alloc_init("proc", proc_lck_grp_attr); #ifndef CONFIG_EMBEDDED proc_slock_grp = lck_grp_alloc_init("proc-slock", proc_lck_grp_attr); proc_fdmlock_grp = lck_grp_alloc_init("proc-fdmlock", proc_lck_grp_attr); proc_mlock_grp = lck_grp_alloc_init("proc-mlock", proc_lck_grp_attr); #endif /* Allocate proc lock attribute */ proc_lck_attr = lck_attr_alloc_init(); #if 0 #if __PROC_INTERNAL_DEBUG lck_attr_setdebug(proc_lck_attr); #endif #endif #ifdef CONFIG_EMBEDDED proc_list_mlock = lck_mtx_alloc_init(proc_lck_grp, proc_lck_attr); proc_klist_mlock = lck_mtx_alloc_init(proc_lck_grp, proc_lck_attr); lck_mtx_init(&kernproc->p_mlock, proc_lck_grp, proc_lck_attr); lck_mtx_init(&kernproc->p_fdmlock, proc_lck_grp, proc_lck_attr); lck_spin_init(&kernproc->p_slock, proc_lck_grp, proc_lck_attr); #else proc_list_mlock = lck_mtx_alloc_init(proc_mlock_grp, proc_lck_attr); proc_klist_mlock = lck_mtx_alloc_init(proc_mlock_grp, proc_lck_attr); lck_mtx_init(&kernproc->p_mlock, proc_mlock_grp, proc_lck_attr); lck_mtx_init(&kernproc->p_fdmlock, proc_fdmlock_grp, proc_lck_attr); lck_spin_init(&kernproc->p_slock, proc_slock_grp, proc_lck_attr); #endif execargs_cache_lock = lck_mtx_alloc_init(proc_lck_grp, proc_lck_attr); execargs_cache_size = bsd_simul_execs; execargs_free_count = bsd_simul_execs; execargs_cache = (vm_offset_t *)kalloc(bsd_simul_execs * sizeof(vm_offset_t)); bzero(execargs_cache, bsd_simul_execs * sizeof(vm_offset_t)); if (current_task() != kernel_task) printf("bsd_init: We have a problem, " "current task is not kernel task\n"); bsd_init_kprintf("calling get_bsdthread_info\n"); ut = (uthread_t)get_bsdthread_info(current_thread()); #if CONFIG_MACF /* * Initialize the MAC Framework */ mac_policy_initbsd(); kernproc->p_mac_enforce = 0; #endif /* MAC */ /* * Create process 0. */ proc_list_lock(); LIST_INSERT_HEAD(&allproc, kernproc, p_list); kernproc->p_pgrp = &pgrp0; LIST_INSERT_HEAD(PGRPHASH(0), &pgrp0, pg_hash); LIST_INIT(&pgrp0.pg_members); #ifdef CONFIG_EMBEDDED lck_mtx_init(&pgrp0.pg_mlock, proc_lck_grp, proc_lck_attr); #else lck_mtx_init(&pgrp0.pg_mlock, proc_mlock_grp, proc_lck_attr); #endif /* There is no other bsd thread this point and is safe without pgrp lock */ LIST_INSERT_HEAD(&pgrp0.pg_members, kernproc, p_pglist); kernproc->p_listflag |= P_LIST_INPGRP; kernproc->p_pgrpid = 0; pgrp0.pg_session = &session0; pgrp0.pg_membercnt = 1; session0.s_count = 1; session0.s_leader = kernproc; session0.s_listflags = 0; #ifdef CONFIG_EMBEDDED lck_mtx_init(&session0.s_mlock, proc_lck_grp, proc_lck_attr); #else lck_mtx_init(&session0.s_mlock, proc_mlock_grp, proc_lck_attr); #endif LIST_INSERT_HEAD(SESSHASH(0), &session0, s_hash); proc_list_unlock(); #if CONFIG_LCTX kernproc->p_lctx = NULL; #endif kernproc->task = kernel_task; kernproc->p_stat = SRUN; kernproc->p_flag = P_SYSTEM; kernproc->p_nice = NZERO; kernproc->p_pptr = kernproc; TAILQ_INIT(&kernproc->p_uthlist); TAILQ_INSERT_TAIL(&kernproc->p_uthlist, ut, uu_list); kernproc->sigwait = FALSE; kernproc->sigwait_thread = THREAD_NULL; kernproc->exit_thread = THREAD_NULL; kernproc->p_csflags = CS_VALID; /* * Create credential. This also Initializes the audit information. */ bsd_init_kprintf("calling bzero\n"); bzero(&temp_cred, sizeof(temp_cred)); temp_cred.cr_ngroups = 1; temp_cred.cr_audit.as_aia_p = &audit_default_aia; /* XXX the following will go away with cr_au */ temp_cred.cr_au.ai_auid = AU_DEFAUDITID; bsd_init_kprintf("calling kauth_cred_create\n"); kernproc->p_ucred = kauth_cred_create(&temp_cred); /* give the (already exisiting) initial thread a reference on it */ bsd_init_kprintf("calling kauth_cred_ref\n"); kauth_cred_ref(kernproc->p_ucred); ut->uu_context.vc_ucred = kernproc->p_ucred; ut->uu_context.vc_thread = current_thread(); TAILQ_INIT(&kernproc->p_aio_activeq); TAILQ_INIT(&kernproc->p_aio_doneq); kernproc->p_aio_total_count = 0; kernproc->p_aio_active_count = 0; bsd_init_kprintf("calling file_lock_init\n"); file_lock_init(); #if CONFIG_MACF mac_cred_label_associate_kernel(kernproc->p_ucred); mac_task_label_update_cred (kernproc->p_ucred, (struct task *) kernproc->task); #endif /* Create the file descriptor table. */ filedesc0.fd_refcnt = 1+1; /* +1 so shutdown will not _FREE_ZONE */ kernproc->p_fd = &filedesc0; filedesc0.fd_cmask = cmask; filedesc0.fd_knlistsize = -1; filedesc0.fd_knlist = NULL; filedesc0.fd_knhash = NULL; filedesc0.fd_knhashmask = 0; /* Create the limits structures. */ kernproc->p_limit = &limit0; for (i = 0; i < sizeof(kernproc->p_rlimit)/sizeof(kernproc->p_rlimit[0]); i++) limit0.pl_rlimit[i].rlim_cur = limit0.pl_rlimit[i].rlim_max = RLIM_INFINITY; limit0.pl_rlimit[RLIMIT_NOFILE].rlim_cur = NOFILE; limit0.pl_rlimit[RLIMIT_NPROC].rlim_cur = maxprocperuid; limit0.pl_rlimit[RLIMIT_NPROC].rlim_max = maxproc; limit0.pl_rlimit[RLIMIT_STACK] = vm_initial_limit_stack; limit0.pl_rlimit[RLIMIT_DATA] = vm_initial_limit_data; limit0.pl_rlimit[RLIMIT_CORE] = vm_initial_limit_core; limit0.pl_refcnt = 1; kernproc->p_stats = &pstats0; kernproc->p_sigacts = &sigacts0; /* * Charge root for two processes: init and mach_init. */ bsd_init_kprintf("calling chgproccnt\n"); (void)chgproccnt(0, 1); /* * Allocate a kernel submap for pageable memory * for temporary copying (execve()). */ { vm_offset_t minimum; bsd_init_kprintf("calling kmem_suballoc\n"); ret = kmem_suballoc(kernel_map, &minimum, (vm_size_t)bsd_pageable_map_size, TRUE, VM_FLAGS_ANYWHERE, &bsd_pageable_map); if (ret != KERN_SUCCESS) panic("bsd_init: Failed to allocate bsd pageable map"); } /* * Initialize buffers and hash links for buffers * * SIDE EFFECT: Starts a thread for bcleanbuf_thread(), so must * happen after a credential has been associated with * the kernel task. */ bsd_init_kprintf("calling bsd_bufferinit\n"); bsd_bufferinit(); /* Initialize the execve() semaphore */ bsd_init_kprintf("calling semaphore_create\n"); if (ret != KERN_SUCCESS) panic("bsd_init: Failed to create execve semaphore"); /* * Initialize the calendar. */ bsd_init_kprintf("calling IOKitInitializeTime\n"); IOKitInitializeTime(); if (turn_on_log_leaks && !new_nkdbufs) new_nkdbufs = 200000; start_kern_tracing(new_nkdbufs); if (turn_on_log_leaks) log_leaks = 1; bsd_init_kprintf("calling ubc_init\n"); ubc_init(); /* Initialize the file systems. */ bsd_init_kprintf("calling vfsinit\n"); vfsinit(); #if SOCKETS /* Initialize per-CPU cache allocator */ mcache_init(); /* Initialize mbuf's. */ bsd_init_kprintf("calling mbinit\n"); mbinit(); net_str_id_init(); /* for mbuf tags */ #endif /* SOCKETS */ /* * Initializes security event auditing. * XXX: Should/could this occur later? */ #if CONFIG_AUDIT bsd_init_kprintf("calling audit_init\n"); audit_init(); #endif /* Initialize kqueues */ bsd_init_kprintf("calling knote_init\n"); knote_init(); /* Initialize for async IO */ bsd_init_kprintf("calling aio_init\n"); aio_init(); /* Initialize pipes */ bsd_init_kprintf("calling pipeinit\n"); pipeinit(); /* Initialize SysV shm subsystem locks; the subsystem proper is * initialized through a sysctl. */ #if SYSV_SHM bsd_init_kprintf("calling sysv_shm_lock_init\n"); sysv_shm_lock_init(); #endif #if SYSV_SEM bsd_init_kprintf("calling sysv_sem_lock_init\n"); sysv_sem_lock_init(); #endif #if SYSV_MSG bsd_init_kprintf("sysv_msg_lock_init\n"); sysv_msg_lock_init(); #endif bsd_init_kprintf("calling pshm_lock_init\n"); pshm_lock_init(); bsd_init_kprintf("calling psem_lock_init\n"); psem_lock_init(); pthread_init(); /* POSIX Shm and Sem */ bsd_init_kprintf("calling pshm_cache_init\n"); pshm_cache_init(); bsd_init_kprintf("calling psem_cache_init\n"); psem_cache_init(); bsd_init_kprintf("calling time_zone_slock_init\n"); time_zone_slock_init(); /* Stack snapshot facility lock */ stackshot_lock_init(); /* * Initialize protocols. Block reception of incoming packets * until everything is ready. */ bsd_init_kprintf("calling sysctl_register_fixed\n"); sysctl_register_fixed(); bsd_init_kprintf("calling sysctl_mib_init\n"); sysctl_mib_init(); #if NETWORKING bsd_init_kprintf("calling dlil_init\n"); dlil_init(); bsd_init_kprintf("calling proto_kpi_init\n"); proto_kpi_init(); #endif /* NETWORKING */ #if SOCKETS bsd_init_kprintf("calling socketinit\n"); socketinit(); bsd_init_kprintf("calling domaininit\n"); domaininit(); #endif /* SOCKETS */ kernproc->p_fd->fd_cdir = NULL; kernproc->p_fd->fd_rdir = NULL; #if CONFIG_EMBEDDED /* Initialize kernel memory status notifications */ bsd_init_kprintf("calling kern_memorystatus_init\n"); kern_memorystatus_init(); #endif #ifdef GPROF /* Initialize kernel profiling. */ kmstartup(); #endif /* kick off timeout driven events by calling first time */ thread_wakeup(&lbolt); timeout(lightning_bolt, 0, hz); bsd_init_kprintf("calling bsd_autoconf\n"); bsd_autoconf(); #if CONFIG_DTRACE dtrace_postinit(); #endif /* * We attach the loopback interface *way* down here to ensure * it happens after autoconf(), otherwise it becomes the * "primary" interface. */ #include <loop.h> #if NLOOP > 0 bsd_init_kprintf("calling loopattach\n"); loopattach(); /* XXX */ #endif #if PFLOG /* Initialize packet filter log interface */ pfloginit(); #endif /* PFLOG */ #if NETHER > 0 /* Register the built-in dlil ethernet interface family */ bsd_init_kprintf("calling ether_family_init\n"); ether_family_init(); #endif /* ETHER */ #if NETWORKING /* Call any kext code that wants to run just after network init */ bsd_init_kprintf("calling net_init_run\n"); net_init_run(); /* register user tunnel kernel control handler */ utun_register_control(); #endif /* NETWORKING */ bsd_init_kprintf("calling vnode_pager_bootstrap\n"); vnode_pager_bootstrap(); #if 0 /* XXX Hack for early debug stop */ printf("\nabout to sleep for 10 seconds\n"); IOSleep( 10 * 1000 ); /* Debugger("hello"); */ #endif bsd_init_kprintf("calling inittodr\n"); inittodr(0); #if CONFIG_EMBEDDED { /* print out early VM statistics */ kern_return_t kr1; vm_statistics_data_t stat; mach_msg_type_number_t count; count = HOST_VM_INFO_COUNT; kr1 = host_statistics(host_self(), HOST_VM_INFO, (host_info_t)&stat, &count); kprintf("Mach Virtual Memory Statistics (page size of 4096) bytes\n" "Pages free:\t\t\t%u.\n" "Pages active:\t\t\t%u.\n" "Pages inactive:\t\t\t%u.\n" "Pages wired down:\t\t%u.\n" "\"Translation faults\":\t\t%u.\n" "Pages copy-on-write:\t\t%u.\n" "Pages zero filled:\t\t%u.\n" "Pages reactivated:\t\t%u.\n" "Pageins:\t\t\t%u.\n" "Pageouts:\t\t\t%u.\n" "Object cache: %u hits of %u lookups (%d%% hit rate)\n", stat.free_count, stat.active_count, stat.inactive_count, stat.wire_count, stat.faults, stat.cow_faults, stat.zero_fill_count, stat.reactivations, stat.pageins, stat.pageouts, stat.hits, stat.lookups, (stat.hits == 0) ? 100 : ((stat.lookups * 100) / stat.hits)); } #endif /* CONFIG_EMBEDDED */ /* Mount the root file system. */ while( TRUE) { int err; bsd_init_kprintf("calling setconf\n"); setconf(); bsd_init_kprintf("vfs_mountroot\n"); if (0 == (err = vfs_mountroot())) break; rootdevice[0] = '\0'; #if NFSCLIENT if (mountroot == netboot_mountroot) { PE_display_icon( 0, "noroot"); /* XXX a netboot-specific icon would be nicer */ vc_progress_set(FALSE, 0); for (i=1; 1; i*=2) { printf("bsd_init: failed to mount network root, error %d, %s\n", err, PE_boot_args()); printf("We are hanging here...\n"); IOSleep(i*60*1000); } /*NOTREACHED*/ } #endif printf("cannot mount root, errno = %d\n", err); boothowto |= RB_ASKNAME; } IOSecureBSDRoot(rootdevice); context.vc_thread = current_thread(); context.vc_ucred = kernproc->p_ucred; mountlist.tqh_first->mnt_flag |= MNT_ROOTFS; bsd_init_kprintf("calling VFS_ROOT\n"); /* Get the vnode for '/'. Set fdp->fd_fd.fd_cdir to reference it. */ if (VFS_ROOT(mountlist.tqh_first, &rootvnode, &context)) panic("bsd_init: cannot find root vnode: %s", PE_boot_args()); rootvnode->v_flag |= VROOT; (void)vnode_ref(rootvnode); (void)vnode_put(rootvnode); filedesc0.fd_cdir = rootvnode; #if NFSCLIENT if (mountroot == netboot_mountroot) { int err; /* post mount setup */ if ((err = netboot_setup()) != 0) { PE_display_icon( 0, "noroot"); /* XXX a netboot-specific icon would be nicer */ vc_progress_set(FALSE, 0); for (i=1; 1; i*=2) { printf("bsd_init: NetBoot could not find root, error %d: %s\n", err, PE_boot_args()); printf("We are hanging here...\n"); IOSleep(i*60*1000); } /*NOTREACHED*/ } } #endif #if CONFIG_IMAGEBOOT /* * See if a system disk image is present. If so, mount it and * switch the root vnode to point to it */ if(imageboot_needed()) { int err; /* An image was found */ if((err = imageboot_setup())) { /* * this is not fatal. Keep trying to root * off the original media */ printf("%s: imageboot could not find root, %d\n", __FUNCTION__, err); } } #endif /* CONFIG_IMAGEBOOT */ /* set initial time; all other resource data is already zero'ed */ microtime(&kernproc->p_start); kernproc->p_stats->p_start = kernproc->p_start; /* for compat */ #if DEVFS { char mounthere[] = "/dev"; /* !const because of internal casting */ bsd_init_kprintf("calling devfs_kernel_mount\n"); devfs_kernel_mount(mounthere); } #endif /* DEVFS */ /* Initialize signal state for process 0. */ bsd_init_kprintf("calling siginit\n"); siginit(kernproc); bsd_init_kprintf("calling bsd_utaskbootstrap\n"); bsd_utaskbootstrap(); #if defined(__LP64__) kernproc->p_flag |= P_LP64; printf("Kernel is LP64\n"); #endif #if __i386__ || __x86_64__ /* this should be done after the root filesystem is mounted */ error = set_archhandler(kernproc, CPU_TYPE_POWERPC); // 10/30/08 - gab: <rdar://problem/6324501> // if default 'translate' can't be found, see if the understudy is available if (ENOENT == error) { strlcpy(exec_archhandler_ppc.path, kRosettaStandIn_str, MAXPATHLEN); error = set_archhandler(kernproc, CPU_TYPE_POWERPC); } if (error) /* XXX make more generic */ exec_archhandler_ppc.path[0] = 0; #endif bsd_init_kprintf("calling mountroot_post_hook\n"); /* invoke post-root-mount hook */ if (mountroot_post_hook != NULL) mountroot_post_hook(); #if 0 /* not yet */ consider_zone_gc(FALSE); #endif bsd_init_kprintf("done\n"); }
void domaininit(void) { register struct domain *dp; /* * allocate lock group attribute and group for domain mutexes */ domain_proto_mtx_grp_attr = lck_grp_attr_alloc_init(); domain_proto_mtx_grp = lck_grp_alloc_init("domain", domain_proto_mtx_grp_attr); /* * allocate the lock attribute for per domain mutexes */ domain_proto_mtx_attr = lck_attr_alloc_init(); if ((domain_proto_mtx = lck_mtx_alloc_init(domain_proto_mtx_grp, domain_proto_mtx_attr)) == NULL) { printf("domaininit: can't init domain mtx for domain list\n"); return; /* we have a problem... */ } /* * Add all the static domains to the domains list */ lck_mtx_lock(domain_proto_mtx); concat_domain(&localdomain); concat_domain(&routedomain); concat_domain(&inetdomain); #if NETAT concat_domain(&atalkdomain); #endif #if INET6 concat_domain(&inet6domain); #endif #if IPSEC concat_domain(&keydomain); #endif #if NS concat_domain(&nsdomain); #endif #if ISO concat_domain(&isodomain); #endif #if CCITT concat_domain(&ccittdomain); #endif concat_domain(&ndrvdomain); concat_domain(&systemdomain); /* * Now ask them all to init (XXX including the routing domain, * see above) */ for (dp = domains; dp; dp = dp->dom_next) init_domain(dp); lck_mtx_unlock(domain_proto_mtx); timeout(pffasttimo, NULL, 1); timeout(pfslowtimo, NULL, 1); }
/* * IP6 initialization: fill in IP6 protocol switch table. * All protocols not implemented in kernel go to raw IP6 protocol handler. */ void ip6_init() { struct ip6protosw *pr; int i; struct timeval tv; #if DIAGNOSTIC if (sizeof(struct protosw) != sizeof(struct ip6protosw)) panic("sizeof(protosw) != sizeof(ip6protosw)"); #endif pr = (struct ip6protosw *)pffindproto_locked(PF_INET6, IPPROTO_RAW, SOCK_RAW); if (pr == 0) panic("ip6_init"); for (i = 0; i < IPPROTO_MAX; i++) ip6_protox[i] = pr; for (pr = (struct ip6protosw*)inet6domain.dom_protosw; pr; pr = pr->pr_next) { if(!(pr->pr_domain)) continue; /* If uninitialized, skip */ if (pr->pr_domain->dom_family == PF_INET6 && pr->pr_protocol && pr->pr_protocol != IPPROTO_RAW) { ip6_protox[pr->pr_protocol] = pr; } } ip6_mutex_grp_attr = lck_grp_attr_alloc_init(); ip6_mutex_grp = lck_grp_alloc_init("ip6", ip6_mutex_grp_attr); ip6_mutex_attr = lck_attr_alloc_init(); if ((ip6_mutex = lck_mtx_alloc_init(ip6_mutex_grp, ip6_mutex_attr)) == NULL) { panic("ip6_init: can't alloc ip6_mutex\n"); } if ((dad6_mutex = lck_mtx_alloc_init(ip6_mutex_grp, ip6_mutex_attr)) == NULL) { panic("ip6_init: can't alloc dad6_mutex\n"); } if ((nd6_mutex = lck_mtx_alloc_init(ip6_mutex_grp, ip6_mutex_attr)) == NULL) { panic("ip6_init: can't alloc nd6_mutex\n"); } if ((prefix6_mutex = lck_mtx_alloc_init(ip6_mutex_grp, ip6_mutex_attr)) == NULL) { panic("ip6_init: can't alloc prefix6_mutex\n"); } if ((scope6_mutex = lck_mtx_alloc_init(ip6_mutex_grp, ip6_mutex_attr)) == NULL) { panic("ip6_init: can't alloc scope6_mutex\n"); } inet6domain.dom_flags = DOM_REENTRANT; ip6intrq.ifq_maxlen = ip6qmaxlen; in6_ifaddr_init(); nd6_init(); frag6_init(); icmp6_init(); /* * in many cases, random() here does NOT return random number * as initialization during bootstrap time occur in fixed order. */ microtime(&tv); ip6_flow_seq = random() ^ tv.tv_usec; microtime(&tv); ip6_desync_factor = (random() ^ tv.tv_usec) % MAX_TEMP_DESYNC_FACTOR; timeout(ip6_init2, (caddr_t)0, 1 * hz); lck_mtx_unlock(domain_proto_mtx); proto_register_input(PF_INET6, ip6_proto_input, NULL, 0); lck_mtx_lock(domain_proto_mtx); }
/** * Helper function to create XNU VFS vnode object. * * @param mp Mount data structure * @param type vnode type (directory, regular file, etc) * @param pParent Parent vnode object (NULL for VBoxVFS root vnode) * @param fIsRoot Flag that indicates if created vnode object is * VBoxVFS root vnode (TRUE for VBoxVFS root vnode, FALSE * for all aother vnodes) * @param Path within Shared Folder * @param ret Returned newly created vnode * * @return 0 on success, error code otherwise */ int vboxvfs_create_vnode_internal(struct mount *mp, enum vtype type, vnode_t pParent, int fIsRoot, PSHFLSTRING Path, vnode_t *ret) { int rc; vnode_t vnode; vboxvfs_vnode_t *pVnodeData; vboxvfs_mount_t *pMount; AssertReturn(mp, EINVAL); pMount = (vboxvfs_mount_t *)vfs_fsprivate(mp); AssertReturn(pMount, EINVAL); AssertReturn(pMount->pLockGroup, EINVAL); AssertReturn(Path, EINVAL); pVnodeData = (vboxvfs_vnode_t *)RTMemAllocZ(sizeof(vboxvfs_vnode_t)); AssertReturn(pVnodeData, ENOMEM); /* Initialize private data */ pVnodeData->pHandle = SHFL_HANDLE_NIL; pVnodeData->pPath = Path; pVnodeData->pLockAttr = lck_attr_alloc_init(); if (pVnodeData->pLockAttr) { pVnodeData->pLock = lck_rw_alloc_init(pMount->pLockGroup, pVnodeData->pLockAttr); if (pVnodeData->pLock) { struct vnode_fsparam vnode_params; vnode_params.vnfs_mp = mp; vnode_params.vnfs_vtype = type; vnode_params.vnfs_str = NULL; vnode_params.vnfs_dvp = pParent; vnode_params.vnfs_fsnode = pVnodeData; /** Private data attached per xnu's vnode object */ vnode_params.vnfs_vops = g_VBoxVFSVnodeDirOpsVector; vnode_params.vnfs_markroot = fIsRoot; vnode_params.vnfs_marksystem = FALSE; vnode_params.vnfs_rdev = 0; vnode_params.vnfs_filesize = 0; vnode_params.vnfs_cnp = NULL; vnode_params.vnfs_flags = VNFS_ADDFSREF | VNFS_NOCACHE; rc = vnode_create(VNCREATE_FLAVOR, sizeof(vnode_params), &vnode_params, &vnode); if (rc == 0) *ret = vnode; return 0; } else { PDEBUG("Unable to allocate lock"); rc = ENOMEM; } lck_attr_free(pVnodeData->pLockAttr); } else { PDEBUG("Unable to allocate lock attr"); rc = ENOMEM; } return rc; }
void bsd_init(void) { struct uthread *ut; unsigned int i; struct vfs_context context; kern_return_t ret; struct ucred temp_cred; struct posix_cred temp_pcred; #if NFSCLIENT || CONFIG_IMAGEBOOT boolean_t netboot = FALSE; #endif #define bsd_init_kprintf(x...) /* kprintf("bsd_init: " x) */ throttle_init(); printf(copyright); bsd_init_kprintf("calling kmeminit\n"); kmeminit(); bsd_init_kprintf("calling parse_bsd_args\n"); parse_bsd_args(); #if CONFIG_DEV_KMEM bsd_init_kprintf("calling dev_kmem_init\n"); dev_kmem_init(); #endif /* Initialize kauth subsystem before instancing the first credential */ bsd_init_kprintf("calling kauth_init\n"); kauth_init(); /* Initialize process and pgrp structures. */ bsd_init_kprintf("calling procinit\n"); procinit(); /* Initialize the ttys (MUST be before kminit()/bsd_autoconf()!)*/ tty_init(); kernproc = &proc0; /* implicitly bzero'ed */ /* kernel_task->proc = kernproc; */ set_bsdtask_info(kernel_task,(void *)kernproc); /* give kernproc a name */ bsd_init_kprintf("calling process_name\n"); process_name("kernel_task", kernproc); /* allocate proc lock group attribute and group */ bsd_init_kprintf("calling lck_grp_attr_alloc_init\n"); proc_lck_grp_attr= lck_grp_attr_alloc_init(); proc_lck_grp = lck_grp_alloc_init("proc", proc_lck_grp_attr); #if CONFIG_FINE_LOCK_GROUPS proc_slock_grp = lck_grp_alloc_init("proc-slock", proc_lck_grp_attr); proc_fdmlock_grp = lck_grp_alloc_init("proc-fdmlock", proc_lck_grp_attr); proc_ucred_mlock_grp = lck_grp_alloc_init("proc-ucred-mlock", proc_lck_grp_attr); proc_mlock_grp = lck_grp_alloc_init("proc-mlock", proc_lck_grp_attr); #endif /* Allocate proc lock attribute */ proc_lck_attr = lck_attr_alloc_init(); #if 0 #if __PROC_INTERNAL_DEBUG lck_attr_setdebug(proc_lck_attr); #endif #endif #if CONFIG_FINE_LOCK_GROUPS proc_list_mlock = lck_mtx_alloc_init(proc_mlock_grp, proc_lck_attr); proc_klist_mlock = lck_mtx_alloc_init(proc_mlock_grp, proc_lck_attr); lck_mtx_init(&kernproc->p_mlock, proc_mlock_grp, proc_lck_attr); lck_mtx_init(&kernproc->p_fdmlock, proc_fdmlock_grp, proc_lck_attr); lck_mtx_init(&kernproc->p_ucred_mlock, proc_ucred_mlock_grp, proc_lck_attr); lck_spin_init(&kernproc->p_slock, proc_slock_grp, proc_lck_attr); #else proc_list_mlock = lck_mtx_alloc_init(proc_lck_grp, proc_lck_attr); proc_klist_mlock = lck_mtx_alloc_init(proc_lck_grp, proc_lck_attr); lck_mtx_init(&kernproc->p_mlock, proc_lck_grp, proc_lck_attr); lck_mtx_init(&kernproc->p_fdmlock, proc_lck_grp, proc_lck_attr); lck_mtx_init(&kernproc->p_ucred_mlock, proc_lck_grp, proc_lck_attr); lck_spin_init(&kernproc->p_slock, proc_lck_grp, proc_lck_attr); #endif assert(bsd_simul_execs != 0); execargs_cache_lock = lck_mtx_alloc_init(proc_lck_grp, proc_lck_attr); execargs_cache_size = bsd_simul_execs; execargs_free_count = bsd_simul_execs; execargs_cache = (vm_offset_t *)kalloc(bsd_simul_execs * sizeof(vm_offset_t)); bzero(execargs_cache, bsd_simul_execs * sizeof(vm_offset_t)); if (current_task() != kernel_task) printf("bsd_init: We have a problem, " "current task is not kernel task\n"); bsd_init_kprintf("calling get_bsdthread_info\n"); ut = (uthread_t)get_bsdthread_info(current_thread()); #if CONFIG_MACF /* * Initialize the MAC Framework */ mac_policy_initbsd(); kernproc->p_mac_enforce = 0; #if defined (__i386__) || defined (__x86_64__) /* * We currently only support this on i386/x86_64, as that is the * only lock code we have instrumented so far. */ check_policy_init(policy_check_flags); #endif #endif /* MAC */ /* Initialize System Override call */ init_system_override(); /* * Create process 0. */ proc_list_lock(); LIST_INSERT_HEAD(&allproc, kernproc, p_list); kernproc->p_pgrp = &pgrp0; LIST_INSERT_HEAD(PGRPHASH(0), &pgrp0, pg_hash); LIST_INIT(&pgrp0.pg_members); #ifdef CONFIG_FINE_LOCK_GROUPS lck_mtx_init(&pgrp0.pg_mlock, proc_mlock_grp, proc_lck_attr); #else lck_mtx_init(&pgrp0.pg_mlock, proc_lck_grp, proc_lck_attr); #endif /* There is no other bsd thread this point and is safe without pgrp lock */ LIST_INSERT_HEAD(&pgrp0.pg_members, kernproc, p_pglist); kernproc->p_listflag |= P_LIST_INPGRP; kernproc->p_pgrpid = 0; kernproc->p_uniqueid = 0; pgrp0.pg_session = &session0; pgrp0.pg_membercnt = 1; session0.s_count = 1; session0.s_leader = kernproc; session0.s_listflags = 0; #ifdef CONFIG_FINE_LOCK_GROUPS lck_mtx_init(&session0.s_mlock, proc_mlock_grp, proc_lck_attr); #else lck_mtx_init(&session0.s_mlock, proc_lck_grp, proc_lck_attr); #endif LIST_INSERT_HEAD(SESSHASH(0), &session0, s_hash); proc_list_unlock(); kernproc->task = kernel_task; kernproc->p_stat = SRUN; kernproc->p_flag = P_SYSTEM; kernproc->p_lflag = 0; kernproc->p_ladvflag = 0; #if DEVELOPMENT || DEBUG if (bootarg_disable_aslr) kernproc->p_flag |= P_DISABLE_ASLR; #endif kernproc->p_nice = NZERO; kernproc->p_pptr = kernproc; TAILQ_INIT(&kernproc->p_uthlist); TAILQ_INSERT_TAIL(&kernproc->p_uthlist, ut, uu_list); kernproc->sigwait = FALSE; kernproc->sigwait_thread = THREAD_NULL; kernproc->exit_thread = THREAD_NULL; kernproc->p_csflags = CS_VALID; /* * Create credential. This also Initializes the audit information. */ bsd_init_kprintf("calling bzero\n"); bzero(&temp_cred, sizeof(temp_cred)); bzero(&temp_pcred, sizeof(temp_pcred)); temp_pcred.cr_ngroups = 1; /* kern_proc, shouldn't call up to DS for group membership */ temp_pcred.cr_flags = CRF_NOMEMBERD; temp_cred.cr_audit.as_aia_p = audit_default_aia_p; bsd_init_kprintf("calling kauth_cred_create\n"); /* * We have to label the temp cred before we create from it to * properly set cr_ngroups, or the create will fail. */ posix_cred_label(&temp_cred, &temp_pcred); kernproc->p_ucred = kauth_cred_create(&temp_cred); /* update cred on proc */ PROC_UPDATE_CREDS_ONPROC(kernproc); /* give the (already exisiting) initial thread a reference on it */ bsd_init_kprintf("calling kauth_cred_ref\n"); kauth_cred_ref(kernproc->p_ucred); ut->uu_context.vc_ucred = kernproc->p_ucred; ut->uu_context.vc_thread = current_thread(); TAILQ_INIT(&kernproc->p_aio_activeq); TAILQ_INIT(&kernproc->p_aio_doneq); kernproc->p_aio_total_count = 0; kernproc->p_aio_active_count = 0; bsd_init_kprintf("calling file_lock_init\n"); file_lock_init(); #if CONFIG_MACF mac_cred_label_associate_kernel(kernproc->p_ucred); #endif /* Create the file descriptor table. */ kernproc->p_fd = &filedesc0; filedesc0.fd_cmask = cmask; filedesc0.fd_knlistsize = -1; filedesc0.fd_knlist = NULL; filedesc0.fd_knhash = NULL; filedesc0.fd_knhashmask = 0; /* Create the limits structures. */ kernproc->p_limit = &limit0; for (i = 0; i < sizeof(kernproc->p_rlimit)/sizeof(kernproc->p_rlimit[0]); i++) limit0.pl_rlimit[i].rlim_cur = limit0.pl_rlimit[i].rlim_max = RLIM_INFINITY; limit0.pl_rlimit[RLIMIT_NOFILE].rlim_cur = NOFILE; limit0.pl_rlimit[RLIMIT_NPROC].rlim_cur = maxprocperuid; limit0.pl_rlimit[RLIMIT_NPROC].rlim_max = maxproc; limit0.pl_rlimit[RLIMIT_STACK] = vm_initial_limit_stack; limit0.pl_rlimit[RLIMIT_DATA] = vm_initial_limit_data; limit0.pl_rlimit[RLIMIT_CORE] = vm_initial_limit_core; limit0.pl_refcnt = 1; kernproc->p_stats = &pstats0; kernproc->p_sigacts = &sigacts0; /* * Charge root for one process: launchd. */ bsd_init_kprintf("calling chgproccnt\n"); (void)chgproccnt(0, 1); /* * Allocate a kernel submap for pageable memory * for temporary copying (execve()). */ { vm_offset_t minimum; bsd_init_kprintf("calling kmem_suballoc\n"); assert(bsd_pageable_map_size != 0); ret = kmem_suballoc(kernel_map, &minimum, (vm_size_t)bsd_pageable_map_size, TRUE, VM_FLAGS_ANYWHERE | VM_MAKE_TAG(VM_KERN_MEMORY_BSD), &bsd_pageable_map); if (ret != KERN_SUCCESS) panic("bsd_init: Failed to allocate bsd pageable map"); } /* * Initialize buffers and hash links for buffers * * SIDE EFFECT: Starts a thread for bcleanbuf_thread(), so must * happen after a credential has been associated with * the kernel task. */ bsd_init_kprintf("calling bsd_bufferinit\n"); bsd_bufferinit(); /* Initialize the execve() semaphore */ bsd_init_kprintf("calling semaphore_create\n"); if (ret != KERN_SUCCESS) panic("bsd_init: Failed to create execve semaphore"); /* * Initialize the calendar. */ bsd_init_kprintf("calling IOKitInitializeTime\n"); IOKitInitializeTime(); bsd_init_kprintf("calling ubc_init\n"); ubc_init(); /* * Initialize device-switches. */ bsd_init_kprintf("calling devsw_init() \n"); devsw_init(); /* Initialize the file systems. */ bsd_init_kprintf("calling vfsinit\n"); vfsinit(); #if CONFIG_PROC_UUID_POLICY /* Initial proc_uuid_policy subsystem */ bsd_init_kprintf("calling proc_uuid_policy_init()\n"); proc_uuid_policy_init(); #endif #if SOCKETS /* Initialize per-CPU cache allocator */ mcache_init(); /* Initialize mbuf's. */ bsd_init_kprintf("calling mbinit\n"); mbinit(); net_str_id_init(); /* for mbuf tags */ #endif /* SOCKETS */ /* * Initializes security event auditing. * XXX: Should/could this occur later? */ #if CONFIG_AUDIT bsd_init_kprintf("calling audit_init\n"); audit_init(); #endif /* Initialize kqueues */ bsd_init_kprintf("calling knote_init\n"); knote_init(); /* Initialize for async IO */ bsd_init_kprintf("calling aio_init\n"); aio_init(); /* Initialize pipes */ bsd_init_kprintf("calling pipeinit\n"); pipeinit(); /* Initialize SysV shm subsystem locks; the subsystem proper is * initialized through a sysctl. */ #if SYSV_SHM bsd_init_kprintf("calling sysv_shm_lock_init\n"); sysv_shm_lock_init(); #endif #if SYSV_SEM bsd_init_kprintf("calling sysv_sem_lock_init\n"); sysv_sem_lock_init(); #endif #if SYSV_MSG bsd_init_kprintf("sysv_msg_lock_init\n"); sysv_msg_lock_init(); #endif bsd_init_kprintf("calling pshm_lock_init\n"); pshm_lock_init(); bsd_init_kprintf("calling psem_lock_init\n"); psem_lock_init(); pthread_init(); /* POSIX Shm and Sem */ bsd_init_kprintf("calling pshm_cache_init\n"); pshm_cache_init(); bsd_init_kprintf("calling psem_cache_init\n"); psem_cache_init(); bsd_init_kprintf("calling time_zone_slock_init\n"); time_zone_slock_init(); bsd_init_kprintf("calling select_waitq_init\n"); select_waitq_init(); /* * Initialize protocols. Block reception of incoming packets * until everything is ready. */ bsd_init_kprintf("calling sysctl_register_fixed\n"); sysctl_register_fixed(); bsd_init_kprintf("calling sysctl_mib_init\n"); sysctl_mib_init(); #if NETWORKING bsd_init_kprintf("calling dlil_init\n"); dlil_init(); bsd_init_kprintf("calling proto_kpi_init\n"); proto_kpi_init(); #endif /* NETWORKING */ #if SOCKETS bsd_init_kprintf("calling socketinit\n"); socketinit(); bsd_init_kprintf("calling domaininit\n"); domaininit(); iptap_init(); #if FLOW_DIVERT flow_divert_init(); #endif /* FLOW_DIVERT */ #endif /* SOCKETS */ kernproc->p_fd->fd_cdir = NULL; kernproc->p_fd->fd_rdir = NULL; #if CONFIG_FREEZE #ifndef CONFIG_MEMORYSTATUS #error "CONFIG_FREEZE defined without matching CONFIG_MEMORYSTATUS" #endif /* Initialise background freezing */ bsd_init_kprintf("calling memorystatus_freeze_init\n"); memorystatus_freeze_init(); #endif #if CONFIG_MEMORYSTATUS /* Initialize kernel memory status notifications */ bsd_init_kprintf("calling memorystatus_init\n"); memorystatus_init(); #endif /* CONFIG_MEMORYSTATUS */ bsd_init_kprintf("calling macx_init\n"); macx_init(); bsd_init_kprintf("calling acct_init\n"); acct_init(); #ifdef GPROF /* Initialize kernel profiling. */ kmstartup(); #endif bsd_init_kprintf("calling bsd_autoconf\n"); bsd_autoconf(); #if CONFIG_DTRACE dtrace_postinit(); #endif /* * We attach the loopback interface *way* down here to ensure * it happens after autoconf(), otherwise it becomes the * "primary" interface. */ #include <loop.h> #if NLOOP > 0 bsd_init_kprintf("calling loopattach\n"); loopattach(); /* XXX */ #endif #if NGIF /* Initialize gif interface (after lo0) */ gif_init(); #endif #if PFLOG /* Initialize packet filter log interface */ pfloginit(); #endif /* PFLOG */ #if NETHER > 0 /* Register the built-in dlil ethernet interface family */ bsd_init_kprintf("calling ether_family_init\n"); ether_family_init(); #endif /* ETHER */ #if NETWORKING /* Call any kext code that wants to run just after network init */ bsd_init_kprintf("calling net_init_run\n"); net_init_run(); #if CONTENT_FILTER cfil_init(); #endif #if PACKET_MANGLER pkt_mnglr_init(); #endif #if NECP /* Initialize Network Extension Control Policies */ necp_init(); #endif netagent_init(); /* register user tunnel kernel control handler */ utun_register_control(); #if IPSEC ipsec_register_control(); #endif /* IPSEC */ netsrc_init(); nstat_init(); tcp_cc_init(); #if MPTCP mptcp_control_register(); #endif /* MPTCP */ #endif /* NETWORKING */ bsd_init_kprintf("calling vnode_pager_bootstrap\n"); vnode_pager_bootstrap(); bsd_init_kprintf("calling inittodr\n"); inittodr(0); /* Mount the root file system. */ while( TRUE) { int err; bsd_init_kprintf("calling setconf\n"); setconf(); #if NFSCLIENT netboot = (mountroot == netboot_mountroot); #endif bsd_init_kprintf("vfs_mountroot\n"); if (0 == (err = vfs_mountroot())) break; rootdevice[0] = '\0'; #if NFSCLIENT if (netboot) { PE_display_icon( 0, "noroot"); /* XXX a netboot-specific icon would be nicer */ vc_progress_set(FALSE, 0); for (i=1; 1; i*=2) { printf("bsd_init: failed to mount network root, error %d, %s\n", err, PE_boot_args()); printf("We are hanging here...\n"); IOSleep(i*60*1000); } /*NOTREACHED*/ } #endif printf("cannot mount root, errno = %d\n", err); boothowto |= RB_ASKNAME; } IOSecureBSDRoot(rootdevice); context.vc_thread = current_thread(); context.vc_ucred = kernproc->p_ucred; mountlist.tqh_first->mnt_flag |= MNT_ROOTFS; bsd_init_kprintf("calling VFS_ROOT\n"); /* Get the vnode for '/'. Set fdp->fd_fd.fd_cdir to reference it. */ if (VFS_ROOT(mountlist.tqh_first, &rootvnode, &context)) panic("bsd_init: cannot find root vnode: %s", PE_boot_args()); rootvnode->v_flag |= VROOT; (void)vnode_ref(rootvnode); (void)vnode_put(rootvnode); filedesc0.fd_cdir = rootvnode; #if NFSCLIENT if (netboot) { int err; netboot = TRUE; /* post mount setup */ if ((err = netboot_setup()) != 0) { PE_display_icon( 0, "noroot"); /* XXX a netboot-specific icon would be nicer */ vc_progress_set(FALSE, 0); for (i=1; 1; i*=2) { printf("bsd_init: NetBoot could not find root, error %d: %s\n", err, PE_boot_args()); printf("We are hanging here...\n"); IOSleep(i*60*1000); } /*NOTREACHED*/ } } #endif #if CONFIG_IMAGEBOOT /* * See if a system disk image is present. If so, mount it and * switch the root vnode to point to it */ if (netboot == FALSE && imageboot_needed()) { /* * An image was found. No turning back: we're booted * with a kernel from the disk image. */ imageboot_setup(); } #endif /* CONFIG_IMAGEBOOT */ /* set initial time; all other resource data is already zero'ed */ microtime_with_abstime(&kernproc->p_start, &kernproc->p_stats->ps_start); #if DEVFS { char mounthere[] = "/dev"; /* !const because of internal casting */ bsd_init_kprintf("calling devfs_kernel_mount\n"); devfs_kernel_mount(mounthere); } #endif /* DEVFS */ /* Initialize signal state for process 0. */ bsd_init_kprintf("calling siginit\n"); siginit(kernproc); bsd_init_kprintf("calling bsd_utaskbootstrap\n"); bsd_utaskbootstrap(); #if defined(__LP64__) kernproc->p_flag |= P_LP64; #endif pal_kernel_announce(); bsd_init_kprintf("calling mountroot_post_hook\n"); /* invoke post-root-mount hook */ if (mountroot_post_hook != NULL) mountroot_post_hook(); #if 0 /* not yet */ consider_zone_gc(FALSE); #endif bsd_init_kprintf("done\n"); }