void tcp_lro_init(void) { int i; bzero(lro_flow_list, sizeof (struct lro_flow) * TCP_LRO_NUM_FLOWS); for (i = 0; i < TCP_LRO_FLOW_MAP; i++) { lro_flow_map[i] = TCP_LRO_FLOW_UNINIT; } /* * allocate lock group attribute, group and attribute for tcp_lro_lock */ tcp_lro_mtx_grp_attr = lck_grp_attr_alloc_init(); tcp_lro_mtx_grp = lck_grp_alloc_init("tcplro", tcp_lro_mtx_grp_attr); tcp_lro_mtx_attr = lck_attr_alloc_init(); lck_mtx_init(&tcp_lro_lock, tcp_lro_mtx_grp, tcp_lro_mtx_attr); tcp_lro_timer = thread_call_allocate(tcp_lro_timer_proc, NULL); if (tcp_lro_timer == NULL) { panic_plain("%s: unable to allocate lro timer", __func__); } return; }
void flowadv_init(void) { STAILQ_INIT(&fadv_list); /* Setup lock group and attribute for fadv_lock */ fadv_lock_grp_attr = lck_grp_attr_alloc_init(); fadv_lock_grp = lck_grp_alloc_init("fadv_lock", fadv_lock_grp_attr); lck_mtx_init(&fadv_lock, fadv_lock_grp, NULL); fadv_zone_size = P2ROUNDUP(sizeof (struct flowadv_fcentry), sizeof (u_int64_t)); fadv_zone = zinit(fadv_zone_size, FADV_ZONE_MAX * fadv_zone_size, 0, FADV_ZONE_NAME); if (fadv_zone == NULL) { panic("%s: failed allocating %s", __func__, FADV_ZONE_NAME); /* NOTREACHED */ } zone_change(fadv_zone, Z_EXPAND, TRUE); zone_change(fadv_zone, Z_CALLERACCT, FALSE); if (kernel_thread_start(flowadv_thread_func, NULL, &fadv_thread) != KERN_SUCCESS) { panic("%s: couldn't create flow event advisory thread", __func__); /* NOTREACHED */ } thread_deallocate(fadv_thread); }
void kpc_thread_init(void) { kpc_thread_lckgrp_attr = lck_grp_attr_alloc_init(); kpc_thread_lckgrp = lck_grp_alloc_init("kpc", kpc_thread_lckgrp_attr); lck_mtx_init(&kpc_thread_lock, kpc_thread_lckgrp, LCK_ATTR_NULL); }
void mutex_init_EXT( lck_mtx_t *mutex, __unused unsigned short tag) { lck_mtx_init(mutex, &LockCompatGroup, LCK_ATTR_NULL); }
/* * "Decode" rv for use in the call to dtrace_probe() */ if (rval == ERESTART) { munged_rv0 = -1LL; /* System call will be reissued in user mode. Make DTrace report a -1 return. */ munged_rv1 = -1LL; } else if (rval != EJUSTRETURN) { if (rval) { munged_rv0 = -1LL; /* Mimic what libc will do. */ munged_rv1 = -1LL; } else { switch (sy->stsy_return_type) { case _SYSCALL_RET_INT_T: munged_rv0 = rv[0]; munged_rv1 = rv[1]; break; case _SYSCALL_RET_UINT_T: munged_rv0 = ((u_int)rv[0]); munged_rv1 = ((u_int)rv[1]); break; case _SYSCALL_RET_OFF_T: case _SYSCALL_RET_UINT64_T: munged_rv0 = *(u_int64_t *)rv; munged_rv1 = 0LL; break; case _SYSCALL_RET_ADDR_T: case _SYSCALL_RET_SIZE_T: case _SYSCALL_RET_SSIZE_T: munged_rv0 = *(user_addr_t *)rv; munged_rv1 = 0LL; break; case _SYSCALL_RET_NONE: munged_rv0 = 0LL; munged_rv1 = 0LL; break; default: munged_rv0 = 0LL; munged_rv1 = 0LL; break; } } } else { munged_rv0 = 0LL; munged_rv1 = 0LL; } (*systrace_probe)(id, munged_rv0, munged_rv0, munged_rv1, (uint64_t)rval, 0); } } #endif /* __APPLE__ */ #define SYSTRACE_SHIFT 16 #define SYSTRACE_ISENTRY(x) ((int)(x) >> SYSTRACE_SHIFT) #define SYSTRACE_SYSNUM(x) ((int)(x) & ((1 << SYSTRACE_SHIFT) - 1)) #define SYSTRACE_ENTRY(id) ((1 << SYSTRACE_SHIFT) | (id)) #define SYSTRACE_RETURN(id) (id) #if ((1 << SYSTRACE_SHIFT) <= NSYSCALL) #error 1 << SYSTRACE_SHIFT must exceed number of system calls #endif static dev_info_t *systrace_devi; static dtrace_provider_id_t systrace_id; #if !defined (__APPLE__) static void systrace_init(struct sysent *actual, systrace_sysent_t **interposed) { systrace_sysent_t *sysent = *interposed; int i; if (sysent == NULL) { *interposed = sysent = kmem_zalloc(sizeof (systrace_sysent_t) * NSYSCALL, KM_SLEEP); } for (i = 0; i < NSYSCALL; i++) { struct sysent *a = &actual[i]; systrace_sysent_t *s = &sysent[i]; if (LOADABLE_SYSCALL(a) && !LOADED_SYSCALL(a)) continue; if (a->sy_callc == dtrace_systrace_syscall) continue; #ifdef _SYSCALL32_IMPL if (a->sy_callc == dtrace_systrace_syscall32) continue; #endif s->stsy_underlying = a->sy_callc; } } #else #define systrace_init _systrace_init /* Avoid name clash with Darwin automagic conf symbol */ static void systrace_init(struct sysent *actual, systrace_sysent_t **interposed) { systrace_sysent_t *ssysent = *interposed; /* Avoid sysent shadow warning from bsd/sys/sysent.h */ int i; if (ssysent == NULL) { *interposed = ssysent = kmem_zalloc(sizeof (systrace_sysent_t) * NSYSCALL, KM_SLEEP); } for (i = 0; i < NSYSCALL; i++) { struct sysent *a = &actual[i]; systrace_sysent_t *s = &ssysent[i]; if (LOADABLE_SYSCALL(a) && !LOADED_SYSCALL(a)) continue; if (a->sy_callc == dtrace_systrace_syscall) continue; #ifdef _SYSCALL32_IMPL if (a->sy_callc == dtrace_systrace_syscall32) continue; #endif s->stsy_underlying = a->sy_callc; s->stsy_return_type = a->sy_return_type; } lck_mtx_init(&dtrace_systrace_lock, dtrace_lck_grp, dtrace_lck_attr); }
void kpc_common_init(void) { kpc_config_lckgrp_attr = lck_grp_attr_alloc_init(); kpc_config_lckgrp = lck_grp_alloc_init("kpc", kpc_config_lckgrp_attr); lck_mtx_init(&kpc_config_lock, kpc_config_lckgrp, LCK_ATTR_NULL); }
__private_extern__ void hfs_chashinit_finish(struct hfsmount *hfsmp) { lck_mtx_init(&hfsmp->hfs_chash_mutex, chash_lck_grp, chash_lck_attr); hfsmp->hfs_cnodehashtbl = hashinit(desiredvnodes / 4, M_HFSMNT, &hfsmp->hfs_cnodehash); }
void init_system_override() { sys_override_mtx_grp_attr = lck_grp_attr_alloc_init(); sys_override_mtx_grp = lck_grp_alloc_init("system_override", sys_override_mtx_grp_attr); sys_override_mtx_attr = lck_attr_alloc_init(); lck_mtx_init(&sys_override_lock, sys_override_mtx_grp, sys_override_mtx_attr); io_throttle_assert_cnt = cpu_throttle_assert_cnt = 0; }
void OSMalloc_init( void) { queue_init(&OSMalloc_tag_list); OSMalloc_tag_lck_grp = lck_grp_alloc_init("OSMalloc_tag", LCK_GRP_ATTR_NULL); lck_mtx_init(&OSMalloc_tag_lock, OSMalloc_tag_lck_grp, LCK_ATTR_NULL); }
/* * thread_call_initialize: * * Initialize this module, called * early during system initialization. */ void thread_call_initialize(void) { thread_call_t call; thread_call_group_t group = &thread_call_group0; kern_return_t result; thread_t thread; int i; spl_t s; i = sizeof (thread_call_data_t); thread_call_zone = zinit(i, 4096 * i, 16 * i, "thread_call"); zone_change(thread_call_zone, Z_CALLERACCT, FALSE); zone_change(thread_call_zone, Z_NOENCRYPT, TRUE); lck_attr_setdefault(&thread_call_lck_attr); lck_grp_attr_setdefault(&thread_call_lck_grp_attr); lck_grp_init(&thread_call_queues_lck_grp, "thread_call_queues", &thread_call_lck_grp_attr); lck_grp_init(&thread_call_lck_grp, "thread_call", &thread_call_lck_grp_attr); #if defined(__i386__) || defined(__x86_64__) lck_mtx_init(&thread_call_lock_data, &thread_call_lck_grp, &thread_call_lck_attr); #else lck_spin_init(&thread_call_lock_data, &thread_call_lck_grp, &thread_call_lck_attr); #endif queue_init(&group->pending_queue); queue_init(&group->delayed_queue); s = splsched(); thread_call_lock_spin(); timer_call_setup(&group->delayed_timer, thread_call_delayed_timer, group); wait_queue_init(&group->idle_wqueue, SYNC_POLICY_FIFO); wait_queue_init(&group->daemon_wqueue, SYNC_POLICY_FIFO); queue_init(&thread_call_internal_queue); for ( call = internal_call_storage; call < &internal_call_storage[internal_call_count]; call++) { enqueue_tail(&thread_call_internal_queue, qe(call)); } thread_call_daemon_awake = TRUE; thread_call_unlock(); splx(s); result = kernel_thread_start_priority((thread_continue_t)thread_call_daemon, group, BASEPRI_PREEMPT + 1, &thread); if (result != KERN_SUCCESS) panic("thread_call_initialize"); thread_deallocate(thread); }
void telemetry_init(void) { kern_return_t ret; uint32_t telemetry_notification_leeway; lck_grp_init(&telemetry_lck_grp, "telemetry group", LCK_GRP_ATTR_NULL); lck_mtx_init(&telemetry_mtx, &telemetry_lck_grp, LCK_ATTR_NULL); if (!PE_parse_boot_argn("telemetry_buffer_size", &telemetry_buffer.size, sizeof(telemetry_buffer.size))) { telemetry_buffer.size = TELEMETRY_DEFAULT_BUFFER_SIZE; } if (telemetry_buffer.size > TELEMETRY_MAX_BUFFER_SIZE) telemetry_buffer.size = TELEMETRY_MAX_BUFFER_SIZE; ret = kmem_alloc(kernel_map, &telemetry_buffer.buffer, telemetry_buffer.size, VM_KERN_MEMORY_DIAG); if (ret != KERN_SUCCESS) { kprintf("Telemetry: Allocation failed: %d\n", ret); return; } bzero((void *) telemetry_buffer.buffer, telemetry_buffer.size); if (!PE_parse_boot_argn("telemetry_notification_leeway", &telemetry_notification_leeway, sizeof(telemetry_notification_leeway))) { /* * By default, notify the user to collect the buffer when there is this much space left in the buffer. */ telemetry_notification_leeway = TELEMETRY_DEFAULT_NOTIFY_LEEWAY; } if (telemetry_notification_leeway >= telemetry_buffer.size) { printf("telemetry: nonsensical telemetry_notification_leeway boot-arg %d changed to %d\n", telemetry_notification_leeway, TELEMETRY_DEFAULT_NOTIFY_LEEWAY); telemetry_notification_leeway = TELEMETRY_DEFAULT_NOTIFY_LEEWAY; } telemetry_buffer_notify_at = telemetry_buffer.size - telemetry_notification_leeway; if (!PE_parse_boot_argn("telemetry_sample_rate", &telemetry_sample_rate, sizeof(telemetry_sample_rate))) { telemetry_sample_rate = TELEMETRY_DEFAULT_SAMPLE_RATE; } /* * To enable telemetry for all tasks, include "telemetry_sample_all_tasks=1" in boot-args. */ if (!PE_parse_boot_argn("telemetry_sample_all_tasks", &telemetry_sample_all_tasks, sizeof(telemetry_sample_all_tasks))) { #if CONFIG_EMBEDDED && !(DEVELOPMENT || DEBUG) telemetry_sample_all_tasks = FALSE; #else telemetry_sample_all_tasks = TRUE; #endif /* CONFIG_EMBEDDED && !(DEVELOPMENT || DEBUG) */ } kprintf("Telemetry: Sampling %stasks once per %u second%s\n", (telemetry_sample_all_tasks) ? "all " : "", telemetry_sample_rate, telemetry_sample_rate == 1 ? "" : "s"); }
/* Initialize the mutex governing access to the SysV msg subsystem */ __private_extern__ void sysv_msg_lock_init( void ) { sysv_msg_subsys_lck_grp_attr = lck_grp_attr_alloc_init(); sysv_msg_subsys_lck_grp = lck_grp_alloc_init("sysv_msg_subsys_lock", sysv_msg_subsys_lck_grp_attr); sysv_msg_subsys_lck_attr = lck_attr_alloc_init(); lck_mtx_init(&sysv_msg_subsys_mutex, sysv_msg_subsys_lck_grp, sysv_msg_subsys_lck_attr); }
lck_mtx_t * lck_mtx_alloc_init(lck_grp_t *grp, lck_attr_t *attr) { lck_mtx_t *lck; if ((lck = (lck_mtx_t *)kalloc(sizeof(lck_mtx_t))) != 0) lck_mtx_init(lck, grp, attr); return(lck); }
int nullfs_init_lck(lck_mtx_t * lck) { int error = 1; if (lck && null_hashlck_grp && null_hashlck_attr) { lck_mtx_init(lck, null_hashlck_grp, null_hashlck_attr); error = 0; } return error; }
void kperf_bootstrap(void) { kperf_cfg_lckgrp_attr = lck_grp_attr_alloc_init(); kperf_cfg_lckgrp = lck_grp_alloc_init("kperf cfg", kperf_cfg_lckgrp_attr); lck_mtx_init(&kperf_cfg_lock, kperf_cfg_lckgrp, LCK_ATTR_NULL); kperf_cfg_initted = TRUE; }
void eventhandler_lists_ctxt_init(struct eventhandler_lists_ctxt *evthdlr_lists_ctxt) { VERIFY(evthdlr_lists_ctxt != NULL); TAILQ_INIT(&evthdlr_lists_ctxt->eventhandler_lists); evthdlr_lists_ctxt->eventhandler_lists_initted = 1; lck_mtx_init(&evthdlr_lists_ctxt->eventhandler_mutex, eventhandler_mutex_grp, eventhandler_mutex_attr); }
/* * thread_call_initialize: * * Initialize this module, called * early during system initialization. */ void thread_call_initialize(void) { thread_call_t call; kern_return_t result; thread_t thread; int i; i = sizeof (thread_call_data_t); thread_call_zone = zinit(i, 4096 * i, 16 * i, "thread_call"); zone_change(thread_call_zone, Z_CALLERACCT, FALSE); zone_change(thread_call_zone, Z_NOENCRYPT, TRUE); lck_attr_setdefault(&thread_call_lck_attr); lck_grp_attr_setdefault(&thread_call_lck_grp_attr); lck_grp_init(&thread_call_queues_lck_grp, "thread_call_queues", &thread_call_lck_grp_attr); lck_grp_init(&thread_call_lck_grp, "thread_call", &thread_call_lck_grp_attr); #if defined(__i386__) || defined(__x86_64__) lck_mtx_init(&thread_call_lock_data, &thread_call_lck_grp, &thread_call_lck_attr); #else lck_spin_init(&thread_call_lock_data, &thread_call_lck_grp, &thread_call_lck_attr); #endif nanotime_to_absolutetime(0, THREAD_CALL_DEALLOC_INTERVAL_NS, &thread_call_dealloc_interval_abs); wait_queue_init(&daemon_wqueue, SYNC_POLICY_FIFO); thread_call_group_setup(&thread_call_groups[THREAD_CALL_PRIORITY_LOW], THREAD_CALL_PRIORITY_LOW, 0, TRUE); thread_call_group_setup(&thread_call_groups[THREAD_CALL_PRIORITY_USER], THREAD_CALL_PRIORITY_USER, 0, TRUE); thread_call_group_setup(&thread_call_groups[THREAD_CALL_PRIORITY_KERNEL], THREAD_CALL_PRIORITY_KERNEL, 1, TRUE); thread_call_group_setup(&thread_call_groups[THREAD_CALL_PRIORITY_HIGH], THREAD_CALL_PRIORITY_HIGH, THREAD_CALL_THREAD_MIN, FALSE); disable_ints_and_lock(); queue_init(&thread_call_internal_queue); for ( call = internal_call_storage; call < &internal_call_storage[INTERNAL_CALL_COUNT]; call++) { enqueue_tail(&thread_call_internal_queue, qe(call)); } thread_call_daemon_awake = TRUE; enable_ints_and_unlock(); result = kernel_thread_start_priority((thread_continue_t)thread_call_daemon, NULL, BASEPRI_PREEMPT + 1, &thread); if (result != KERN_SUCCESS) panic("thread_call_initialize"); thread_deallocate(thread); }
void ipc_host_init(void) { ipc_port_t port; int i; lck_mtx_init(&realhost.lock, &host_notify_lock_grp, &host_notify_lock_attr); /* * Allocate and set up the two host ports. */ port = ipc_port_alloc_kernel(); if (port == IP_NULL) panic("ipc_host_init"); ipc_kobject_set(port, (ipc_kobject_t) &realhost, IKOT_HOST_SECURITY); kernel_set_special_port(&realhost, HOST_SECURITY_PORT, ipc_port_make_send(port)); port = ipc_port_alloc_kernel(); if (port == IP_NULL) panic("ipc_host_init"); ipc_kobject_set(port, (ipc_kobject_t) &realhost, IKOT_HOST); kernel_set_special_port(&realhost, HOST_PORT, ipc_port_make_send(port)); port = ipc_port_alloc_kernel(); if (port == IP_NULL) panic("ipc_host_init"); ipc_kobject_set(port, (ipc_kobject_t) &realhost, IKOT_HOST_PRIV); kernel_set_special_port(&realhost, HOST_PRIV_PORT, ipc_port_make_send(port)); /* the rest of the special ports will be set up later */ for (i = FIRST_EXCEPTION; i < EXC_TYPES_COUNT; i++) { realhost.exc_actions[i].port = IP_NULL; }/* for */ /* * Set up ipc for default processor set. */ ipc_pset_init(&pset0); ipc_pset_enable(&pset0); /* * And for master processor */ ipc_processor_init(master_processor); ipc_processor_enable(master_processor); }
/* * Initialise cache headers */ int nullfs_init(__unused struct vfsconf * vfsp) { NULLFSDEBUG("%s\n", __FUNCTION__); /* assuming for now that this happens immediately and by default after fs * installation */ null_hashlck_grp_attr = lck_grp_attr_alloc_init(); if (null_hashlck_grp_attr == NULL) { goto error; } null_hashlck_grp = lck_grp_alloc_init("com.apple.filesystems.nullfs", null_hashlck_grp_attr); if (null_hashlck_grp == NULL) { goto error; } null_hashlck_attr = lck_attr_alloc_init(); if (null_hashlck_attr == NULL) { goto error; } lck_mtx_init(&null_hashmtx, null_hashlck_grp, null_hashlck_attr); null_node_hashtbl = hashinit(NULL_HASH_SIZE, M_TEMP, &null_hash_mask); NULLFSDEBUG("%s finished\n", __FUNCTION__); return (0); error: printf("NULLFS: failed to get lock element\n"); if (null_hashlck_grp_attr) { lck_grp_attr_free(null_hashlck_grp_attr); null_hashlck_grp_attr = NULL; } if (null_hashlck_grp) { lck_grp_free(null_hashlck_grp); null_hashlck_grp = NULL; } if (null_hashlck_attr) { lck_attr_free(null_hashlck_attr); null_hashlck_attr = NULL; } return KERN_FAILURE; }
/* * Creates a new reason and initializes it with the provided reason * namespace and code. Also sets up the buffer and kcdata_descriptor * associated with the reason. Returns a pointer to the newly created * reason. * * Returns: * REASON_NULL if unable to allocate a reason or initialize the nested buffer * a pointer to the reason otherwise */ os_reason_t os_reason_create(uint32_t osr_namespace, uint64_t osr_code) { os_reason_t new_reason = OS_REASON_NULL; new_reason = (os_reason_t) zalloc(os_reason_zone); if (new_reason == OS_REASON_NULL) { #if OS_REASON_DEBUG /* * We rely on OS reasons to communicate important things such * as process exit reason information, we should be aware * when issues prevent us from allocating them. */ if (os_reason_debug_disabled) { kprintf("os_reason_create: failed to allocate reason with namespace: %u, code : %llu\n", osr_namespace, osr_code); } else { panic("os_reason_create: failed to allocate reason with namespace: %u, code: %llu\n", osr_namespace, osr_code); } #endif return new_reason; } bzero(new_reason, sizeof(*new_reason)); new_reason->osr_namespace = osr_namespace; new_reason->osr_code = osr_code; new_reason->osr_flags = 0; new_reason->osr_bufsize = 0; new_reason->osr_kcd_buf = NULL; lck_mtx_init(&new_reason->osr_lock, os_reason_lock_grp, os_reason_lock_attr); new_reason->osr_refcount = 1; return new_reason; }
/* * Initialise reassembly queue and fragment identifier. */ void frag6_init(void) { /* ip6q_alloc() uses mbufs for IPv6 fragment queue structures */ _CASSERT(sizeof (struct ip6q) <= _MLEN); /* ip6af_alloc() uses mbufs for IPv6 fragment queue structures */ _CASSERT(sizeof (struct ip6asfrag) <= _MLEN); /* IPv6 fragment reassembly queue lock */ ip6qlock_grp_attr = lck_grp_attr_alloc_init(); ip6qlock_grp = lck_grp_alloc_init("ip6qlock", ip6qlock_grp_attr); ip6qlock_attr = lck_attr_alloc_init(); lck_mtx_init(&ip6qlock, ip6qlock_grp, ip6qlock_attr); lck_mtx_lock(&ip6qlock); /* Initialize IPv6 reassembly queue. */ ip6q.ip6q_next = ip6q.ip6q_prev = &ip6q; /* same limits as IPv4 */ ip6_maxfragpackets = nmbclusters / 32; ip6_maxfrags = ip6_maxfragpackets * 2; ip6q_updateparams(); lck_mtx_unlock(&ip6qlock); }
/* * This function is called very early on in the Mach startup, from the * function start_kernel_threads() in osfmk/kern/startup.c. It's called * in the context of the current (startup) task using a call to the * function kernel_thread_create() to jump into start_kernel_threads(). * Internally, kernel_thread_create() calls thread_create_internal(), * which calls uthread_alloc(). The function of uthread_alloc() is * normally to allocate a uthread structure, and fill out the uu_sigmask, * uu_context fields. It skips filling these out in the case of the "task" * being "kernel_task", because the order of operation is inverted. To * account for that, we need to manually fill in at least the contents * of the uu_context.vc_ucred field so that the uthread structure can be * used like any other. */ void bsd_init(void) { struct uthread *ut; unsigned int i; #if __i386__ || __x86_64__ int error; #endif struct vfs_context context; kern_return_t ret; struct ucred temp_cred; #define bsd_init_kprintf(x...) /* kprintf("bsd_init: " x) */ kernel_flock = funnel_alloc(KERNEL_FUNNEL); if (kernel_flock == (funnel_t *)0 ) { panic("bsd_init: Failed to allocate kernel funnel"); } printf(copyright); bsd_init_kprintf("calling kmeminit\n"); kmeminit(); bsd_init_kprintf("calling parse_bsd_args\n"); parse_bsd_args(); /* Initialize kauth subsystem before instancing the first credential */ bsd_init_kprintf("calling kauth_init\n"); kauth_init(); /* Initialize process and pgrp structures. */ bsd_init_kprintf("calling procinit\n"); procinit(); /* Initialize the ttys (MUST be before kminit()/bsd_autoconf()!)*/ tty_init(); kernproc = &proc0; /* implicitly bzero'ed */ /* kernel_task->proc = kernproc; */ set_bsdtask_info(kernel_task,(void *)kernproc); /* give kernproc a name */ bsd_init_kprintf("calling process_name\n"); process_name("kernel_task", kernproc); /* allocate proc lock group attribute and group */ bsd_init_kprintf("calling lck_grp_attr_alloc_init\n"); proc_lck_grp_attr= lck_grp_attr_alloc_init(); proc_lck_grp = lck_grp_alloc_init("proc", proc_lck_grp_attr); #ifndef CONFIG_EMBEDDED proc_slock_grp = lck_grp_alloc_init("proc-slock", proc_lck_grp_attr); proc_fdmlock_grp = lck_grp_alloc_init("proc-fdmlock", proc_lck_grp_attr); proc_mlock_grp = lck_grp_alloc_init("proc-mlock", proc_lck_grp_attr); #endif /* Allocate proc lock attribute */ proc_lck_attr = lck_attr_alloc_init(); #if 0 #if __PROC_INTERNAL_DEBUG lck_attr_setdebug(proc_lck_attr); #endif #endif #ifdef CONFIG_EMBEDDED proc_list_mlock = lck_mtx_alloc_init(proc_lck_grp, proc_lck_attr); proc_klist_mlock = lck_mtx_alloc_init(proc_lck_grp, proc_lck_attr); lck_mtx_init(&kernproc->p_mlock, proc_lck_grp, proc_lck_attr); lck_mtx_init(&kernproc->p_fdmlock, proc_lck_grp, proc_lck_attr); lck_spin_init(&kernproc->p_slock, proc_lck_grp, proc_lck_attr); #else proc_list_mlock = lck_mtx_alloc_init(proc_mlock_grp, proc_lck_attr); proc_klist_mlock = lck_mtx_alloc_init(proc_mlock_grp, proc_lck_attr); lck_mtx_init(&kernproc->p_mlock, proc_mlock_grp, proc_lck_attr); lck_mtx_init(&kernproc->p_fdmlock, proc_fdmlock_grp, proc_lck_attr); lck_spin_init(&kernproc->p_slock, proc_slock_grp, proc_lck_attr); #endif execargs_cache_lock = lck_mtx_alloc_init(proc_lck_grp, proc_lck_attr); execargs_cache_size = bsd_simul_execs; execargs_free_count = bsd_simul_execs; execargs_cache = (vm_offset_t *)kalloc(bsd_simul_execs * sizeof(vm_offset_t)); bzero(execargs_cache, bsd_simul_execs * sizeof(vm_offset_t)); if (current_task() != kernel_task) printf("bsd_init: We have a problem, " "current task is not kernel task\n"); bsd_init_kprintf("calling get_bsdthread_info\n"); ut = (uthread_t)get_bsdthread_info(current_thread()); #if CONFIG_MACF /* * Initialize the MAC Framework */ mac_policy_initbsd(); kernproc->p_mac_enforce = 0; #endif /* MAC */ /* * Create process 0. */ proc_list_lock(); LIST_INSERT_HEAD(&allproc, kernproc, p_list); kernproc->p_pgrp = &pgrp0; LIST_INSERT_HEAD(PGRPHASH(0), &pgrp0, pg_hash); LIST_INIT(&pgrp0.pg_members); #ifdef CONFIG_EMBEDDED lck_mtx_init(&pgrp0.pg_mlock, proc_lck_grp, proc_lck_attr); #else lck_mtx_init(&pgrp0.pg_mlock, proc_mlock_grp, proc_lck_attr); #endif /* There is no other bsd thread this point and is safe without pgrp lock */ LIST_INSERT_HEAD(&pgrp0.pg_members, kernproc, p_pglist); kernproc->p_listflag |= P_LIST_INPGRP; kernproc->p_pgrpid = 0; pgrp0.pg_session = &session0; pgrp0.pg_membercnt = 1; session0.s_count = 1; session0.s_leader = kernproc; session0.s_listflags = 0; #ifdef CONFIG_EMBEDDED lck_mtx_init(&session0.s_mlock, proc_lck_grp, proc_lck_attr); #else lck_mtx_init(&session0.s_mlock, proc_mlock_grp, proc_lck_attr); #endif LIST_INSERT_HEAD(SESSHASH(0), &session0, s_hash); proc_list_unlock(); #if CONFIG_LCTX kernproc->p_lctx = NULL; #endif kernproc->task = kernel_task; kernproc->p_stat = SRUN; kernproc->p_flag = P_SYSTEM; kernproc->p_nice = NZERO; kernproc->p_pptr = kernproc; TAILQ_INIT(&kernproc->p_uthlist); TAILQ_INSERT_TAIL(&kernproc->p_uthlist, ut, uu_list); kernproc->sigwait = FALSE; kernproc->sigwait_thread = THREAD_NULL; kernproc->exit_thread = THREAD_NULL; kernproc->p_csflags = CS_VALID; /* * Create credential. This also Initializes the audit information. */ bsd_init_kprintf("calling bzero\n"); bzero(&temp_cred, sizeof(temp_cred)); temp_cred.cr_ngroups = 1; temp_cred.cr_audit.as_aia_p = &audit_default_aia; /* XXX the following will go away with cr_au */ temp_cred.cr_au.ai_auid = AU_DEFAUDITID; bsd_init_kprintf("calling kauth_cred_create\n"); kernproc->p_ucred = kauth_cred_create(&temp_cred); /* give the (already exisiting) initial thread a reference on it */ bsd_init_kprintf("calling kauth_cred_ref\n"); kauth_cred_ref(kernproc->p_ucred); ut->uu_context.vc_ucred = kernproc->p_ucred; ut->uu_context.vc_thread = current_thread(); TAILQ_INIT(&kernproc->p_aio_activeq); TAILQ_INIT(&kernproc->p_aio_doneq); kernproc->p_aio_total_count = 0; kernproc->p_aio_active_count = 0; bsd_init_kprintf("calling file_lock_init\n"); file_lock_init(); #if CONFIG_MACF mac_cred_label_associate_kernel(kernproc->p_ucred); mac_task_label_update_cred (kernproc->p_ucred, (struct task *) kernproc->task); #endif /* Create the file descriptor table. */ filedesc0.fd_refcnt = 1+1; /* +1 so shutdown will not _FREE_ZONE */ kernproc->p_fd = &filedesc0; filedesc0.fd_cmask = cmask; filedesc0.fd_knlistsize = -1; filedesc0.fd_knlist = NULL; filedesc0.fd_knhash = NULL; filedesc0.fd_knhashmask = 0; /* Create the limits structures. */ kernproc->p_limit = &limit0; for (i = 0; i < sizeof(kernproc->p_rlimit)/sizeof(kernproc->p_rlimit[0]); i++) limit0.pl_rlimit[i].rlim_cur = limit0.pl_rlimit[i].rlim_max = RLIM_INFINITY; limit0.pl_rlimit[RLIMIT_NOFILE].rlim_cur = NOFILE; limit0.pl_rlimit[RLIMIT_NPROC].rlim_cur = maxprocperuid; limit0.pl_rlimit[RLIMIT_NPROC].rlim_max = maxproc; limit0.pl_rlimit[RLIMIT_STACK] = vm_initial_limit_stack; limit0.pl_rlimit[RLIMIT_DATA] = vm_initial_limit_data; limit0.pl_rlimit[RLIMIT_CORE] = vm_initial_limit_core; limit0.pl_refcnt = 1; kernproc->p_stats = &pstats0; kernproc->p_sigacts = &sigacts0; /* * Charge root for two processes: init and mach_init. */ bsd_init_kprintf("calling chgproccnt\n"); (void)chgproccnt(0, 1); /* * Allocate a kernel submap for pageable memory * for temporary copying (execve()). */ { vm_offset_t minimum; bsd_init_kprintf("calling kmem_suballoc\n"); ret = kmem_suballoc(kernel_map, &minimum, (vm_size_t)bsd_pageable_map_size, TRUE, VM_FLAGS_ANYWHERE, &bsd_pageable_map); if (ret != KERN_SUCCESS) panic("bsd_init: Failed to allocate bsd pageable map"); } /* * Initialize buffers and hash links for buffers * * SIDE EFFECT: Starts a thread for bcleanbuf_thread(), so must * happen after a credential has been associated with * the kernel task. */ bsd_init_kprintf("calling bsd_bufferinit\n"); bsd_bufferinit(); /* Initialize the execve() semaphore */ bsd_init_kprintf("calling semaphore_create\n"); if (ret != KERN_SUCCESS) panic("bsd_init: Failed to create execve semaphore"); /* * Initialize the calendar. */ bsd_init_kprintf("calling IOKitInitializeTime\n"); IOKitInitializeTime(); if (turn_on_log_leaks && !new_nkdbufs) new_nkdbufs = 200000; start_kern_tracing(new_nkdbufs); if (turn_on_log_leaks) log_leaks = 1; bsd_init_kprintf("calling ubc_init\n"); ubc_init(); /* Initialize the file systems. */ bsd_init_kprintf("calling vfsinit\n"); vfsinit(); #if SOCKETS /* Initialize per-CPU cache allocator */ mcache_init(); /* Initialize mbuf's. */ bsd_init_kprintf("calling mbinit\n"); mbinit(); net_str_id_init(); /* for mbuf tags */ #endif /* SOCKETS */ /* * Initializes security event auditing. * XXX: Should/could this occur later? */ #if CONFIG_AUDIT bsd_init_kprintf("calling audit_init\n"); audit_init(); #endif /* Initialize kqueues */ bsd_init_kprintf("calling knote_init\n"); knote_init(); /* Initialize for async IO */ bsd_init_kprintf("calling aio_init\n"); aio_init(); /* Initialize pipes */ bsd_init_kprintf("calling pipeinit\n"); pipeinit(); /* Initialize SysV shm subsystem locks; the subsystem proper is * initialized through a sysctl. */ #if SYSV_SHM bsd_init_kprintf("calling sysv_shm_lock_init\n"); sysv_shm_lock_init(); #endif #if SYSV_SEM bsd_init_kprintf("calling sysv_sem_lock_init\n"); sysv_sem_lock_init(); #endif #if SYSV_MSG bsd_init_kprintf("sysv_msg_lock_init\n"); sysv_msg_lock_init(); #endif bsd_init_kprintf("calling pshm_lock_init\n"); pshm_lock_init(); bsd_init_kprintf("calling psem_lock_init\n"); psem_lock_init(); pthread_init(); /* POSIX Shm and Sem */ bsd_init_kprintf("calling pshm_cache_init\n"); pshm_cache_init(); bsd_init_kprintf("calling psem_cache_init\n"); psem_cache_init(); bsd_init_kprintf("calling time_zone_slock_init\n"); time_zone_slock_init(); /* Stack snapshot facility lock */ stackshot_lock_init(); /* * Initialize protocols. Block reception of incoming packets * until everything is ready. */ bsd_init_kprintf("calling sysctl_register_fixed\n"); sysctl_register_fixed(); bsd_init_kprintf("calling sysctl_mib_init\n"); sysctl_mib_init(); #if NETWORKING bsd_init_kprintf("calling dlil_init\n"); dlil_init(); bsd_init_kprintf("calling proto_kpi_init\n"); proto_kpi_init(); #endif /* NETWORKING */ #if SOCKETS bsd_init_kprintf("calling socketinit\n"); socketinit(); bsd_init_kprintf("calling domaininit\n"); domaininit(); #endif /* SOCKETS */ kernproc->p_fd->fd_cdir = NULL; kernproc->p_fd->fd_rdir = NULL; #if CONFIG_EMBEDDED /* Initialize kernel memory status notifications */ bsd_init_kprintf("calling kern_memorystatus_init\n"); kern_memorystatus_init(); #endif #ifdef GPROF /* Initialize kernel profiling. */ kmstartup(); #endif /* kick off timeout driven events by calling first time */ thread_wakeup(&lbolt); timeout(lightning_bolt, 0, hz); bsd_init_kprintf("calling bsd_autoconf\n"); bsd_autoconf(); #if CONFIG_DTRACE dtrace_postinit(); #endif /* * We attach the loopback interface *way* down here to ensure * it happens after autoconf(), otherwise it becomes the * "primary" interface. */ #include <loop.h> #if NLOOP > 0 bsd_init_kprintf("calling loopattach\n"); loopattach(); /* XXX */ #endif #if PFLOG /* Initialize packet filter log interface */ pfloginit(); #endif /* PFLOG */ #if NETHER > 0 /* Register the built-in dlil ethernet interface family */ bsd_init_kprintf("calling ether_family_init\n"); ether_family_init(); #endif /* ETHER */ #if NETWORKING /* Call any kext code that wants to run just after network init */ bsd_init_kprintf("calling net_init_run\n"); net_init_run(); /* register user tunnel kernel control handler */ utun_register_control(); #endif /* NETWORKING */ bsd_init_kprintf("calling vnode_pager_bootstrap\n"); vnode_pager_bootstrap(); #if 0 /* XXX Hack for early debug stop */ printf("\nabout to sleep for 10 seconds\n"); IOSleep( 10 * 1000 ); /* Debugger("hello"); */ #endif bsd_init_kprintf("calling inittodr\n"); inittodr(0); #if CONFIG_EMBEDDED { /* print out early VM statistics */ kern_return_t kr1; vm_statistics_data_t stat; mach_msg_type_number_t count; count = HOST_VM_INFO_COUNT; kr1 = host_statistics(host_self(), HOST_VM_INFO, (host_info_t)&stat, &count); kprintf("Mach Virtual Memory Statistics (page size of 4096) bytes\n" "Pages free:\t\t\t%u.\n" "Pages active:\t\t\t%u.\n" "Pages inactive:\t\t\t%u.\n" "Pages wired down:\t\t%u.\n" "\"Translation faults\":\t\t%u.\n" "Pages copy-on-write:\t\t%u.\n" "Pages zero filled:\t\t%u.\n" "Pages reactivated:\t\t%u.\n" "Pageins:\t\t\t%u.\n" "Pageouts:\t\t\t%u.\n" "Object cache: %u hits of %u lookups (%d%% hit rate)\n", stat.free_count, stat.active_count, stat.inactive_count, stat.wire_count, stat.faults, stat.cow_faults, stat.zero_fill_count, stat.reactivations, stat.pageins, stat.pageouts, stat.hits, stat.lookups, (stat.hits == 0) ? 100 : ((stat.lookups * 100) / stat.hits)); } #endif /* CONFIG_EMBEDDED */ /* Mount the root file system. */ while( TRUE) { int err; bsd_init_kprintf("calling setconf\n"); setconf(); bsd_init_kprintf("vfs_mountroot\n"); if (0 == (err = vfs_mountroot())) break; rootdevice[0] = '\0'; #if NFSCLIENT if (mountroot == netboot_mountroot) { PE_display_icon( 0, "noroot"); /* XXX a netboot-specific icon would be nicer */ vc_progress_set(FALSE, 0); for (i=1; 1; i*=2) { printf("bsd_init: failed to mount network root, error %d, %s\n", err, PE_boot_args()); printf("We are hanging here...\n"); IOSleep(i*60*1000); } /*NOTREACHED*/ } #endif printf("cannot mount root, errno = %d\n", err); boothowto |= RB_ASKNAME; } IOSecureBSDRoot(rootdevice); context.vc_thread = current_thread(); context.vc_ucred = kernproc->p_ucred; mountlist.tqh_first->mnt_flag |= MNT_ROOTFS; bsd_init_kprintf("calling VFS_ROOT\n"); /* Get the vnode for '/'. Set fdp->fd_fd.fd_cdir to reference it. */ if (VFS_ROOT(mountlist.tqh_first, &rootvnode, &context)) panic("bsd_init: cannot find root vnode: %s", PE_boot_args()); rootvnode->v_flag |= VROOT; (void)vnode_ref(rootvnode); (void)vnode_put(rootvnode); filedesc0.fd_cdir = rootvnode; #if NFSCLIENT if (mountroot == netboot_mountroot) { int err; /* post mount setup */ if ((err = netboot_setup()) != 0) { PE_display_icon( 0, "noroot"); /* XXX a netboot-specific icon would be nicer */ vc_progress_set(FALSE, 0); for (i=1; 1; i*=2) { printf("bsd_init: NetBoot could not find root, error %d: %s\n", err, PE_boot_args()); printf("We are hanging here...\n"); IOSleep(i*60*1000); } /*NOTREACHED*/ } } #endif #if CONFIG_IMAGEBOOT /* * See if a system disk image is present. If so, mount it and * switch the root vnode to point to it */ if(imageboot_needed()) { int err; /* An image was found */ if((err = imageboot_setup())) { /* * this is not fatal. Keep trying to root * off the original media */ printf("%s: imageboot could not find root, %d\n", __FUNCTION__, err); } } #endif /* CONFIG_IMAGEBOOT */ /* set initial time; all other resource data is already zero'ed */ microtime(&kernproc->p_start); kernproc->p_stats->p_start = kernproc->p_start; /* for compat */ #if DEVFS { char mounthere[] = "/dev"; /* !const because of internal casting */ bsd_init_kprintf("calling devfs_kernel_mount\n"); devfs_kernel_mount(mounthere); } #endif /* DEVFS */ /* Initialize signal state for process 0. */ bsd_init_kprintf("calling siginit\n"); siginit(kernproc); bsd_init_kprintf("calling bsd_utaskbootstrap\n"); bsd_utaskbootstrap(); #if defined(__LP64__) kernproc->p_flag |= P_LP64; printf("Kernel is LP64\n"); #endif #if __i386__ || __x86_64__ /* this should be done after the root filesystem is mounted */ error = set_archhandler(kernproc, CPU_TYPE_POWERPC); // 10/30/08 - gab: <rdar://problem/6324501> // if default 'translate' can't be found, see if the understudy is available if (ENOENT == error) { strlcpy(exec_archhandler_ppc.path, kRosettaStandIn_str, MAXPATHLEN); error = set_archhandler(kernproc, CPU_TYPE_POWERPC); } if (error) /* XXX make more generic */ exec_archhandler_ppc.path[0] = 0; #endif bsd_init_kprintf("calling mountroot_post_hook\n"); /* invoke post-root-mount hook */ if (mountroot_post_hook != NULL) mountroot_post_hook(); #if 0 /* not yet */ consider_zone_gc(FALSE); #endif bsd_init_kprintf("done\n"); }
/* * Create a new thread. * Doesn't start the thread running. */ static kern_return_t thread_create_internal( task_t parent_task, integer_t priority, thread_continue_t continuation, int options, #define TH_OPTION_NONE 0x00 #define TH_OPTION_NOCRED 0x01 #define TH_OPTION_NOSUSP 0x02 thread_t *out_thread) { thread_t new_thread; static thread_t first_thread = THREAD_NULL; /* * Allocate a thread and initialize static fields */ if (first_thread == THREAD_NULL) new_thread = first_thread = current_thread(); new_thread = (thread_t)zalloc(thread_zone); if (new_thread == THREAD_NULL) return (KERN_RESOURCE_SHORTAGE); if (new_thread != first_thread) *new_thread = thread_template; #ifdef MACH_BSD new_thread->uthread = uthread_alloc(parent_task, new_thread, (options & TH_OPTION_NOCRED) != 0); if (new_thread->uthread == NULL) { zfree(thread_zone, new_thread); return (KERN_RESOURCE_SHORTAGE); } #endif /* MACH_BSD */ if (machine_thread_create(new_thread, parent_task) != KERN_SUCCESS) { #ifdef MACH_BSD void *ut = new_thread->uthread; new_thread->uthread = NULL; /* cred free may not be necessary */ uthread_cleanup(parent_task, ut, parent_task->bsd_info); uthread_cred_free(ut); uthread_zone_free(ut); #endif /* MACH_BSD */ zfree(thread_zone, new_thread); return (KERN_FAILURE); } new_thread->task = parent_task; thread_lock_init(new_thread); wake_lock_init(new_thread); lck_mtx_init(&new_thread->mutex, &thread_lck_grp, &thread_lck_attr); ipc_thread_init(new_thread); queue_init(&new_thread->held_ulocks); new_thread->continuation = continuation; lck_mtx_lock(&tasks_threads_lock); task_lock(parent_task); if ( !parent_task->active || parent_task->halting || ((options & TH_OPTION_NOSUSP) != 0 && parent_task->suspend_count > 0) || (parent_task->thread_count >= task_threadmax && parent_task != kernel_task) ) { task_unlock(parent_task); lck_mtx_unlock(&tasks_threads_lock); #ifdef MACH_BSD { void *ut = new_thread->uthread; new_thread->uthread = NULL; uthread_cleanup(parent_task, ut, parent_task->bsd_info); /* cred free may not be necessary */ uthread_cred_free(ut); uthread_zone_free(ut); } #endif /* MACH_BSD */ ipc_thread_disable(new_thread); ipc_thread_terminate(new_thread); lck_mtx_destroy(&new_thread->mutex, &thread_lck_grp); machine_thread_destroy(new_thread); zfree(thread_zone, new_thread); return (KERN_FAILURE); } /* New threads inherit any default state on the task */ machine_thread_inherit_taskwide(new_thread, parent_task); task_reference_internal(parent_task); if (new_thread->task->rusage_cpu_flags & TASK_RUSECPU_FLAGS_PERTHR_LIMIT) { /* * This task has a per-thread CPU limit; make sure this new thread * gets its limit set too, before it gets out of the kernel. */ set_astledger(new_thread); } new_thread->t_threadledger = LEDGER_NULL; /* per thread ledger is not inherited */ new_thread->t_ledger = new_thread->task->ledger; if (new_thread->t_ledger) ledger_reference(new_thread->t_ledger); /* Cache the task's map */ new_thread->map = parent_task->map; /* Chain the thread onto the task's list */ queue_enter(&parent_task->threads, new_thread, thread_t, task_threads); parent_task->thread_count++; /* So terminating threads don't need to take the task lock to decrement */ hw_atomic_add(&parent_task->active_thread_count, 1); /* Protected by the tasks_threads_lock */ new_thread->thread_id = ++thread_unique_id; queue_enter(&threads, new_thread, thread_t, threads); threads_count++; timer_call_setup(&new_thread->wait_timer, thread_timer_expire, new_thread); timer_call_setup(&new_thread->depress_timer, thread_depress_expire, new_thread); #if CONFIG_COUNTERS /* * If parent task has any reservations, they need to be propagated to this * thread. */ new_thread->t_chud = (TASK_PMC_FLAG == (parent_task->t_chud & TASK_PMC_FLAG)) ? THREAD_PMC_FLAG : 0U; #endif /* Set the thread's scheduling parameters */ new_thread->sched_mode = SCHED(initial_thread_sched_mode)(parent_task); new_thread->sched_flags = 0; new_thread->max_priority = parent_task->max_priority; new_thread->task_priority = parent_task->priority; new_thread->priority = (priority < 0)? parent_task->priority: priority; if (new_thread->priority > new_thread->max_priority) new_thread->priority = new_thread->max_priority; #if CONFIG_EMBEDDED if (new_thread->priority < MAXPRI_THROTTLE) { new_thread->priority = MAXPRI_THROTTLE; } #endif /* CONFIG_EMBEDDED */ new_thread->importance = new_thread->priority - new_thread->task_priority; #if CONFIG_EMBEDDED new_thread->saved_importance = new_thread->importance; /* apple ios daemon starts all threads in darwin background */ if (parent_task->ext_appliedstate.apptype == PROC_POLICY_IOS_APPLE_DAEMON) { /* Cannot use generic routines here so apply darwin bacground directly */ new_thread->policystate.hw_bg = TASK_POLICY_BACKGROUND_ATTRIBUTE_ALL; /* set thread self backgrounding */ new_thread->appliedstate.hw_bg = new_thread->policystate.hw_bg; /* priority will get recomputed suitably bit later */ new_thread->importance = INT_MIN; /* to avoid changes to many pri compute routines, set the effect of those here */ new_thread->priority = MAXPRI_THROTTLE; } #endif /* CONFIG_EMBEDDED */ #if defined(CONFIG_SCHED_TRADITIONAL) new_thread->sched_stamp = sched_tick; new_thread->pri_shift = sched_pri_shift; #endif SCHED(compute_priority)(new_thread, FALSE); new_thread->active = TRUE; *out_thread = new_thread; { long dbg_arg1, dbg_arg2, dbg_arg3, dbg_arg4; kdbg_trace_data(parent_task->bsd_info, &dbg_arg2); KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, TRACEDBG_CODE(DBG_TRACE_DATA, 1) | DBG_FUNC_NONE, (vm_address_t)(uintptr_t)thread_tid(new_thread), dbg_arg2, 0, 0, 0); kdbg_trace_string(parent_task->bsd_info, &dbg_arg1, &dbg_arg2, &dbg_arg3, &dbg_arg4); KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, TRACEDBG_CODE(DBG_TRACE_STRING, 1) | DBG_FUNC_NONE, dbg_arg1, dbg_arg2, dbg_arg3, dbg_arg4, 0); } DTRACE_PROC1(lwp__create, thread_t, *out_thread); return (KERN_SUCCESS); }
void kalloc_init( void) { kern_return_t retval; vm_offset_t min; vm_size_t size, kalloc_map_size; register int i; /* * Scale the kalloc_map_size to physical memory size: stay below * 1/8th the total zone map size, or 128 MB (for a 32-bit kernel). */ kalloc_map_size = (vm_size_t)(sane_size >> 5); #if !__LP64__ if (kalloc_map_size > KALLOC_MAP_SIZE_MAX) kalloc_map_size = KALLOC_MAP_SIZE_MAX; #endif /* !__LP64__ */ if (kalloc_map_size < KALLOC_MAP_SIZE_MIN) kalloc_map_size = KALLOC_MAP_SIZE_MIN; retval = kmem_suballoc(kernel_map, &min, kalloc_map_size, FALSE, VM_FLAGS_ANYWHERE | VM_FLAGS_PERMANENT, &kalloc_map); if (retval != KERN_SUCCESS) panic("kalloc_init: kmem_suballoc failed"); kalloc_map_min = min; kalloc_map_max = min + kalloc_map_size - 1; /* * Ensure that zones up to size 8192 bytes exist. * This is desirable because messages are allocated * with kalloc, and messages up through size 8192 are common. */ if (PAGE_SIZE < 16*1024) kalloc_max = 16*1024; else kalloc_max = PAGE_SIZE; kalloc_max_prerounded = kalloc_max / 2 + 1; /* size it to be more than 16 times kalloc_max (256k) for allocations from kernel map */ kalloc_kernmap_size = (kalloc_max * 16) + 1; kalloc_largest_allocated = kalloc_kernmap_size; /* * Allocate a zone for each size we are going to handle. * We specify non-paged memory. Don't charge the caller * for the allocation, as we aren't sure how the memory * will be handled. */ for (i = 0; (size = k_zone_size[i]) < kalloc_max; i++) { k_zone[i] = zinit(size, k_zone_max[i] * size, size, k_zone_name[i]); zone_change(k_zone[i], Z_CALLERACCT, FALSE); } /* * Build the Direct LookUp Table for small allocations */ for (i = 0, size = 0; i <= N_K_ZDLUT; i++, size += KALLOC_MINALIGN) { int zindex = 0; while ((vm_size_t)k_zone_size[zindex] < size) zindex++; if (i == N_K_ZDLUT) { k_zindex_start = zindex; break; } k_zone_dlut[i] = (int8_t)zindex; } #ifdef KALLOC_DEBUG printf("kalloc_init: k_zindex_start %d\n", k_zindex_start); /* * Do a quick synthesis to see how well/badly we can * find-a-zone for a given size. * Useful when debugging/tweaking the array of zone sizes. * Cache misses probably more critical than compare-branches! */ for (i = 0; i < (int)N_K_ZONE; i++) { vm_size_t testsize = (vm_size_t)k_zone_size[i] - 1; int compare = 0; int zindex; if (testsize < MAX_SIZE_ZDLUT) { compare += 1; /* 'if' (T) */ long dindex = INDEX_ZDLUT(testsize); zindex = (int)k_zone_dlut[dindex]; } else if (testsize < kalloc_max_prerounded) { compare += 2; /* 'if' (F), 'if' (T) */ zindex = k_zindex_start; while ((vm_size_t)k_zone_size[zindex] < testsize) { zindex++; compare++; /* 'while' (T) */ } compare++; /* 'while' (F) */ } else break; /* not zone-backed */ zone_t z = k_zone[zindex]; printf("kalloc_init: req size %4lu: %11s took %d compare%s\n", (unsigned long)testsize, z->zone_name, compare, compare == 1 ? "" : "s"); } #endif kalloc_lck_grp = lck_grp_alloc_init("kalloc.large", LCK_GRP_ATTR_NULL); lck_mtx_init(&kalloc_lock, kalloc_lck_grp, LCK_ATTR_NULL); OSMalloc_init(); #ifdef MUTEX_ZONE lck_mtx_zone = zinit(sizeof(struct _lck_mtx_), 1024*256, 4096, "lck_mtx"); #endif }
/* * forkproc * * Description: Create a new process structure, given a parent process * structure. * * Parameters: parent_proc The parent process * * Returns: !NULL The new process structure * NULL Error (insufficient free memory) * * Note: When successful, the newly created process structure is * partially initialized; if a caller needs to deconstruct the * returned structure, they must call forkproc_free() to do so. */ proc_t forkproc(proc_t parent_proc) { proc_t child_proc; /* Our new process */ static int nextpid = 0, pidwrap = 0, nextpidversion = 0; int error = 0; struct session *sessp; uthread_t parent_uthread = (uthread_t)get_bsdthread_info(current_thread()); MALLOC_ZONE(child_proc, proc_t , sizeof *child_proc, M_PROC, M_WAITOK); if (child_proc == NULL) { printf("forkproc: M_PROC zone exhausted\n"); goto bad; } /* zero it out as we need to insert in hash */ bzero(child_proc, sizeof *child_proc); MALLOC_ZONE(child_proc->p_stats, struct pstats *, sizeof *child_proc->p_stats, M_PSTATS, M_WAITOK); if (child_proc->p_stats == NULL) { printf("forkproc: M_SUBPROC zone exhausted (p_stats)\n"); FREE_ZONE(child_proc, sizeof *child_proc, M_PROC); child_proc = NULL; goto bad; } MALLOC_ZONE(child_proc->p_sigacts, struct sigacts *, sizeof *child_proc->p_sigacts, M_SIGACTS, M_WAITOK); if (child_proc->p_sigacts == NULL) { printf("forkproc: M_SUBPROC zone exhausted (p_sigacts)\n"); FREE_ZONE(child_proc->p_stats, sizeof *child_proc->p_stats, M_PSTATS); FREE_ZONE(child_proc, sizeof *child_proc, M_PROC); child_proc = NULL; goto bad; } /* allocate a callout for use by interval timers */ child_proc->p_rcall = thread_call_allocate((thread_call_func_t)realitexpire, child_proc); if (child_proc->p_rcall == NULL) { FREE_ZONE(child_proc->p_sigacts, sizeof *child_proc->p_sigacts, M_SIGACTS); FREE_ZONE(child_proc->p_stats, sizeof *child_proc->p_stats, M_PSTATS); FREE_ZONE(child_proc, sizeof *child_proc, M_PROC); child_proc = NULL; goto bad; } /* * Find an unused PID. */ proc_list_lock(); nextpid++; retry: /* * If the process ID prototype has wrapped around, * restart somewhat above 0, as the low-numbered procs * tend to include daemons that don't exit. */ if (nextpid >= PID_MAX) { nextpid = 100; pidwrap = 1; } if (pidwrap != 0) { /* if the pid stays in hash both for zombie and runniing state */ if (pfind_locked(nextpid) != PROC_NULL) { nextpid++; goto retry; } if (pgfind_internal(nextpid) != PGRP_NULL) { nextpid++; goto retry; } if (session_find_internal(nextpid) != SESSION_NULL) { nextpid++; goto retry; } } nprocs++; child_proc->p_pid = nextpid; child_proc->p_idversion = nextpidversion++; #if 1 if (child_proc->p_pid != 0) { if (pfind_locked(child_proc->p_pid) != PROC_NULL) panic("proc in the list already\n"); } #endif /* Insert in the hash */ child_proc->p_listflag |= (P_LIST_INHASH | P_LIST_INCREATE); LIST_INSERT_HEAD(PIDHASH(child_proc->p_pid), child_proc, p_hash); proc_list_unlock(); /* * We've identified the PID we are going to use; initialize the new * process structure. */ child_proc->p_stat = SIDL; child_proc->p_pgrpid = PGRPID_DEAD; /* * The zero'ing of the proc was at the allocation time due to need * for insertion to hash. Copy the section that is to be copied * directly from the parent. */ bcopy(&parent_proc->p_startcopy, &child_proc->p_startcopy, (unsigned) ((caddr_t)&child_proc->p_endcopy - (caddr_t)&child_proc->p_startcopy)); /* * Some flags are inherited from the parent. * Duplicate sub-structures as needed. * Increase reference counts on shared objects. * The p_stats and p_sigacts substructs are set in vm_fork. */ child_proc->p_flag = (parent_proc->p_flag & (P_LP64 | P_TRANSLATED | P_AFFINITY)); if (parent_proc->p_flag & P_PROFIL) startprofclock(child_proc); /* * Note that if the current thread has an assumed identity, this * credential will be granted to the new process. */ child_proc->p_ucred = kauth_cred_get_with_ref(); #ifdef CONFIG_EMBEDDED lck_mtx_init(&child_proc->p_mlock, proc_lck_grp, proc_lck_attr); lck_mtx_init(&child_proc->p_fdmlock, proc_lck_grp, proc_lck_attr); #if CONFIG_DTRACE lck_mtx_init(&child_proc->p_dtrace_sprlock, proc_lck_grp, proc_lck_attr); #endif lck_spin_init(&child_proc->p_slock, proc_lck_grp, proc_lck_attr); #else /* !CONFIG_EMBEDDED */ lck_mtx_init(&child_proc->p_mlock, proc_mlock_grp, proc_lck_attr); lck_mtx_init(&child_proc->p_fdmlock, proc_fdmlock_grp, proc_lck_attr); #if CONFIG_DTRACE lck_mtx_init(&child_proc->p_dtrace_sprlock, proc_lck_grp, proc_lck_attr); #endif lck_spin_init(&child_proc->p_slock, proc_slock_grp, proc_lck_attr); #endif /* !CONFIG_EMBEDDED */ klist_init(&child_proc->p_klist); if (child_proc->p_textvp != NULLVP) { /* bump references to the text vnode */ /* Need to hold iocount across the ref call */ if (vnode_getwithref(child_proc->p_textvp) == 0) { error = vnode_ref(child_proc->p_textvp); vnode_put(child_proc->p_textvp); if (error != 0) child_proc->p_textvp = NULLVP; } } /* * Copy the parents per process open file table to the child; if * there is a per-thread current working directory, set the childs * per-process current working directory to that instead of the * parents. * * XXX may fail to copy descriptors to child */ child_proc->p_fd = fdcopy(parent_proc, parent_uthread->uu_cdir); #if SYSV_SHM if (parent_proc->vm_shm) { /* XXX may fail to attach shm to child */ (void)shmfork(parent_proc, child_proc); } #endif /* * inherit the limit structure to child */ proc_limitfork(parent_proc, child_proc); if (child_proc->p_limit->pl_rlimit[RLIMIT_CPU].rlim_cur != RLIM_INFINITY) { uint64_t rlim_cur = child_proc->p_limit->pl_rlimit[RLIMIT_CPU].rlim_cur; child_proc->p_rlim_cpu.tv_sec = (rlim_cur > __INT_MAX__) ? __INT_MAX__ : rlim_cur; } /* Intialize new process stats, including start time */ /* <rdar://6640543> non-zeroed portion contains garbage AFAICT */ bzero(&child_proc->p_stats->pstat_startzero, (unsigned) ((caddr_t)&child_proc->p_stats->pstat_endzero - (caddr_t)&child_proc->p_stats->pstat_startzero)); bzero(&child_proc->p_stats->user_p_prof, sizeof(struct user_uprof)); microtime(&child_proc->p_start); child_proc->p_stats->p_start = child_proc->p_start; /* for compat */ if (parent_proc->p_sigacts != NULL) (void)memcpy(child_proc->p_sigacts, parent_proc->p_sigacts, sizeof *child_proc->p_sigacts); else (void)memset(child_proc->p_sigacts, 0, sizeof *child_proc->p_sigacts); sessp = proc_session(parent_proc); if (sessp->s_ttyvp != NULL && parent_proc->p_flag & P_CONTROLT) OSBitOrAtomic(P_CONTROLT, &child_proc->p_flag); session_rele(sessp); /* * block all signals to reach the process. * no transition race should be occuring with the child yet, * but indicate that the process is in (the creation) transition. */ proc_signalstart(child_proc, 0); proc_transstart(child_proc, 0); child_proc->p_pcaction = (parent_proc->p_pcaction) & P_PCMAX; TAILQ_INIT(&child_proc->p_uthlist); TAILQ_INIT(&child_proc->p_aio_activeq); TAILQ_INIT(&child_proc->p_aio_doneq); /* Inherit the parent flags for code sign */ child_proc->p_csflags = parent_proc->p_csflags; /* * All processes have work queue locks; cleaned up by * reap_child_locked() */ workqueue_init_lock(child_proc); /* * Copy work queue information * * Note: This should probably only happen in the case where we are * creating a child that is a copy of the parent; since this * routine is called in the non-duplication case of vfork() * or posix_spawn(), then this information should likely not * be duplicated. * * <rdar://6640553> Work queue pointers that no longer point to code */ child_proc->p_wqthread = parent_proc->p_wqthread; child_proc->p_threadstart = parent_proc->p_threadstart; child_proc->p_pthsize = parent_proc->p_pthsize; child_proc->p_targconc = parent_proc->p_targconc; if ((parent_proc->p_lflag & P_LREGISTER) != 0) { child_proc->p_lflag |= P_LREGISTER; } child_proc->p_dispatchqueue_offset = parent_proc->p_dispatchqueue_offset; #if PSYNCH pth_proc_hashinit(child_proc); #endif /* PSYNCH */ #if CONFIG_LCTX child_proc->p_lctx = NULL; /* Add new process to login context (if any). */ if (parent_proc->p_lctx != NULL) { /* * <rdar://6640564> This should probably be delayed in the * vfork() or posix_spawn() cases. */ LCTX_LOCK(parent_proc->p_lctx); enterlctx(child_proc, parent_proc->p_lctx, 0); } #endif bad: return(child_proc); }
void bsd_init(void) { struct uthread *ut; unsigned int i; struct vfs_context context; kern_return_t ret; struct ucred temp_cred; struct posix_cred temp_pcred; #if NFSCLIENT || CONFIG_IMAGEBOOT boolean_t netboot = FALSE; #endif #define bsd_init_kprintf(x...) /* kprintf("bsd_init: " x) */ throttle_init(); printf(copyright); bsd_init_kprintf("calling kmeminit\n"); kmeminit(); bsd_init_kprintf("calling parse_bsd_args\n"); parse_bsd_args(); #if CONFIG_DEV_KMEM bsd_init_kprintf("calling dev_kmem_init\n"); dev_kmem_init(); #endif /* Initialize kauth subsystem before instancing the first credential */ bsd_init_kprintf("calling kauth_init\n"); kauth_init(); /* Initialize process and pgrp structures. */ bsd_init_kprintf("calling procinit\n"); procinit(); /* Initialize the ttys (MUST be before kminit()/bsd_autoconf()!)*/ tty_init(); kernproc = &proc0; /* implicitly bzero'ed */ /* kernel_task->proc = kernproc; */ set_bsdtask_info(kernel_task,(void *)kernproc); /* give kernproc a name */ bsd_init_kprintf("calling process_name\n"); process_name("kernel_task", kernproc); /* allocate proc lock group attribute and group */ bsd_init_kprintf("calling lck_grp_attr_alloc_init\n"); proc_lck_grp_attr= lck_grp_attr_alloc_init(); proc_lck_grp = lck_grp_alloc_init("proc", proc_lck_grp_attr); #if CONFIG_FINE_LOCK_GROUPS proc_slock_grp = lck_grp_alloc_init("proc-slock", proc_lck_grp_attr); proc_fdmlock_grp = lck_grp_alloc_init("proc-fdmlock", proc_lck_grp_attr); proc_ucred_mlock_grp = lck_grp_alloc_init("proc-ucred-mlock", proc_lck_grp_attr); proc_mlock_grp = lck_grp_alloc_init("proc-mlock", proc_lck_grp_attr); #endif /* Allocate proc lock attribute */ proc_lck_attr = lck_attr_alloc_init(); #if 0 #if __PROC_INTERNAL_DEBUG lck_attr_setdebug(proc_lck_attr); #endif #endif #if CONFIG_FINE_LOCK_GROUPS proc_list_mlock = lck_mtx_alloc_init(proc_mlock_grp, proc_lck_attr); proc_klist_mlock = lck_mtx_alloc_init(proc_mlock_grp, proc_lck_attr); lck_mtx_init(&kernproc->p_mlock, proc_mlock_grp, proc_lck_attr); lck_mtx_init(&kernproc->p_fdmlock, proc_fdmlock_grp, proc_lck_attr); lck_mtx_init(&kernproc->p_ucred_mlock, proc_ucred_mlock_grp, proc_lck_attr); lck_spin_init(&kernproc->p_slock, proc_slock_grp, proc_lck_attr); #else proc_list_mlock = lck_mtx_alloc_init(proc_lck_grp, proc_lck_attr); proc_klist_mlock = lck_mtx_alloc_init(proc_lck_grp, proc_lck_attr); lck_mtx_init(&kernproc->p_mlock, proc_lck_grp, proc_lck_attr); lck_mtx_init(&kernproc->p_fdmlock, proc_lck_grp, proc_lck_attr); lck_mtx_init(&kernproc->p_ucred_mlock, proc_lck_grp, proc_lck_attr); lck_spin_init(&kernproc->p_slock, proc_lck_grp, proc_lck_attr); #endif assert(bsd_simul_execs != 0); execargs_cache_lock = lck_mtx_alloc_init(proc_lck_grp, proc_lck_attr); execargs_cache_size = bsd_simul_execs; execargs_free_count = bsd_simul_execs; execargs_cache = (vm_offset_t *)kalloc(bsd_simul_execs * sizeof(vm_offset_t)); bzero(execargs_cache, bsd_simul_execs * sizeof(vm_offset_t)); if (current_task() != kernel_task) printf("bsd_init: We have a problem, " "current task is not kernel task\n"); bsd_init_kprintf("calling get_bsdthread_info\n"); ut = (uthread_t)get_bsdthread_info(current_thread()); #if CONFIG_MACF /* * Initialize the MAC Framework */ mac_policy_initbsd(); kernproc->p_mac_enforce = 0; #if defined (__i386__) || defined (__x86_64__) /* * We currently only support this on i386/x86_64, as that is the * only lock code we have instrumented so far. */ check_policy_init(policy_check_flags); #endif #endif /* MAC */ /* Initialize System Override call */ init_system_override(); /* * Create process 0. */ proc_list_lock(); LIST_INSERT_HEAD(&allproc, kernproc, p_list); kernproc->p_pgrp = &pgrp0; LIST_INSERT_HEAD(PGRPHASH(0), &pgrp0, pg_hash); LIST_INIT(&pgrp0.pg_members); #ifdef CONFIG_FINE_LOCK_GROUPS lck_mtx_init(&pgrp0.pg_mlock, proc_mlock_grp, proc_lck_attr); #else lck_mtx_init(&pgrp0.pg_mlock, proc_lck_grp, proc_lck_attr); #endif /* There is no other bsd thread this point and is safe without pgrp lock */ LIST_INSERT_HEAD(&pgrp0.pg_members, kernproc, p_pglist); kernproc->p_listflag |= P_LIST_INPGRP; kernproc->p_pgrpid = 0; kernproc->p_uniqueid = 0; pgrp0.pg_session = &session0; pgrp0.pg_membercnt = 1; session0.s_count = 1; session0.s_leader = kernproc; session0.s_listflags = 0; #ifdef CONFIG_FINE_LOCK_GROUPS lck_mtx_init(&session0.s_mlock, proc_mlock_grp, proc_lck_attr); #else lck_mtx_init(&session0.s_mlock, proc_lck_grp, proc_lck_attr); #endif LIST_INSERT_HEAD(SESSHASH(0), &session0, s_hash); proc_list_unlock(); kernproc->task = kernel_task; kernproc->p_stat = SRUN; kernproc->p_flag = P_SYSTEM; kernproc->p_lflag = 0; kernproc->p_ladvflag = 0; #if DEVELOPMENT || DEBUG if (bootarg_disable_aslr) kernproc->p_flag |= P_DISABLE_ASLR; #endif kernproc->p_nice = NZERO; kernproc->p_pptr = kernproc; TAILQ_INIT(&kernproc->p_uthlist); TAILQ_INSERT_TAIL(&kernproc->p_uthlist, ut, uu_list); kernproc->sigwait = FALSE; kernproc->sigwait_thread = THREAD_NULL; kernproc->exit_thread = THREAD_NULL; kernproc->p_csflags = CS_VALID; /* * Create credential. This also Initializes the audit information. */ bsd_init_kprintf("calling bzero\n"); bzero(&temp_cred, sizeof(temp_cred)); bzero(&temp_pcred, sizeof(temp_pcred)); temp_pcred.cr_ngroups = 1; /* kern_proc, shouldn't call up to DS for group membership */ temp_pcred.cr_flags = CRF_NOMEMBERD; temp_cred.cr_audit.as_aia_p = audit_default_aia_p; bsd_init_kprintf("calling kauth_cred_create\n"); /* * We have to label the temp cred before we create from it to * properly set cr_ngroups, or the create will fail. */ posix_cred_label(&temp_cred, &temp_pcred); kernproc->p_ucred = kauth_cred_create(&temp_cred); /* update cred on proc */ PROC_UPDATE_CREDS_ONPROC(kernproc); /* give the (already exisiting) initial thread a reference on it */ bsd_init_kprintf("calling kauth_cred_ref\n"); kauth_cred_ref(kernproc->p_ucred); ut->uu_context.vc_ucred = kernproc->p_ucred; ut->uu_context.vc_thread = current_thread(); TAILQ_INIT(&kernproc->p_aio_activeq); TAILQ_INIT(&kernproc->p_aio_doneq); kernproc->p_aio_total_count = 0; kernproc->p_aio_active_count = 0; bsd_init_kprintf("calling file_lock_init\n"); file_lock_init(); #if CONFIG_MACF mac_cred_label_associate_kernel(kernproc->p_ucred); #endif /* Create the file descriptor table. */ kernproc->p_fd = &filedesc0; filedesc0.fd_cmask = cmask; filedesc0.fd_knlistsize = -1; filedesc0.fd_knlist = NULL; filedesc0.fd_knhash = NULL; filedesc0.fd_knhashmask = 0; /* Create the limits structures. */ kernproc->p_limit = &limit0; for (i = 0; i < sizeof(kernproc->p_rlimit)/sizeof(kernproc->p_rlimit[0]); i++) limit0.pl_rlimit[i].rlim_cur = limit0.pl_rlimit[i].rlim_max = RLIM_INFINITY; limit0.pl_rlimit[RLIMIT_NOFILE].rlim_cur = NOFILE; limit0.pl_rlimit[RLIMIT_NPROC].rlim_cur = maxprocperuid; limit0.pl_rlimit[RLIMIT_NPROC].rlim_max = maxproc; limit0.pl_rlimit[RLIMIT_STACK] = vm_initial_limit_stack; limit0.pl_rlimit[RLIMIT_DATA] = vm_initial_limit_data; limit0.pl_rlimit[RLIMIT_CORE] = vm_initial_limit_core; limit0.pl_refcnt = 1; kernproc->p_stats = &pstats0; kernproc->p_sigacts = &sigacts0; /* * Charge root for one process: launchd. */ bsd_init_kprintf("calling chgproccnt\n"); (void)chgproccnt(0, 1); /* * Allocate a kernel submap for pageable memory * for temporary copying (execve()). */ { vm_offset_t minimum; bsd_init_kprintf("calling kmem_suballoc\n"); assert(bsd_pageable_map_size != 0); ret = kmem_suballoc(kernel_map, &minimum, (vm_size_t)bsd_pageable_map_size, TRUE, VM_FLAGS_ANYWHERE | VM_MAKE_TAG(VM_KERN_MEMORY_BSD), &bsd_pageable_map); if (ret != KERN_SUCCESS) panic("bsd_init: Failed to allocate bsd pageable map"); } /* * Initialize buffers and hash links for buffers * * SIDE EFFECT: Starts a thread for bcleanbuf_thread(), so must * happen after a credential has been associated with * the kernel task. */ bsd_init_kprintf("calling bsd_bufferinit\n"); bsd_bufferinit(); /* Initialize the execve() semaphore */ bsd_init_kprintf("calling semaphore_create\n"); if (ret != KERN_SUCCESS) panic("bsd_init: Failed to create execve semaphore"); /* * Initialize the calendar. */ bsd_init_kprintf("calling IOKitInitializeTime\n"); IOKitInitializeTime(); bsd_init_kprintf("calling ubc_init\n"); ubc_init(); /* * Initialize device-switches. */ bsd_init_kprintf("calling devsw_init() \n"); devsw_init(); /* Initialize the file systems. */ bsd_init_kprintf("calling vfsinit\n"); vfsinit(); #if CONFIG_PROC_UUID_POLICY /* Initial proc_uuid_policy subsystem */ bsd_init_kprintf("calling proc_uuid_policy_init()\n"); proc_uuid_policy_init(); #endif #if SOCKETS /* Initialize per-CPU cache allocator */ mcache_init(); /* Initialize mbuf's. */ bsd_init_kprintf("calling mbinit\n"); mbinit(); net_str_id_init(); /* for mbuf tags */ #endif /* SOCKETS */ /* * Initializes security event auditing. * XXX: Should/could this occur later? */ #if CONFIG_AUDIT bsd_init_kprintf("calling audit_init\n"); audit_init(); #endif /* Initialize kqueues */ bsd_init_kprintf("calling knote_init\n"); knote_init(); /* Initialize for async IO */ bsd_init_kprintf("calling aio_init\n"); aio_init(); /* Initialize pipes */ bsd_init_kprintf("calling pipeinit\n"); pipeinit(); /* Initialize SysV shm subsystem locks; the subsystem proper is * initialized through a sysctl. */ #if SYSV_SHM bsd_init_kprintf("calling sysv_shm_lock_init\n"); sysv_shm_lock_init(); #endif #if SYSV_SEM bsd_init_kprintf("calling sysv_sem_lock_init\n"); sysv_sem_lock_init(); #endif #if SYSV_MSG bsd_init_kprintf("sysv_msg_lock_init\n"); sysv_msg_lock_init(); #endif bsd_init_kprintf("calling pshm_lock_init\n"); pshm_lock_init(); bsd_init_kprintf("calling psem_lock_init\n"); psem_lock_init(); pthread_init(); /* POSIX Shm and Sem */ bsd_init_kprintf("calling pshm_cache_init\n"); pshm_cache_init(); bsd_init_kprintf("calling psem_cache_init\n"); psem_cache_init(); bsd_init_kprintf("calling time_zone_slock_init\n"); time_zone_slock_init(); bsd_init_kprintf("calling select_waitq_init\n"); select_waitq_init(); /* * Initialize protocols. Block reception of incoming packets * until everything is ready. */ bsd_init_kprintf("calling sysctl_register_fixed\n"); sysctl_register_fixed(); bsd_init_kprintf("calling sysctl_mib_init\n"); sysctl_mib_init(); #if NETWORKING bsd_init_kprintf("calling dlil_init\n"); dlil_init(); bsd_init_kprintf("calling proto_kpi_init\n"); proto_kpi_init(); #endif /* NETWORKING */ #if SOCKETS bsd_init_kprintf("calling socketinit\n"); socketinit(); bsd_init_kprintf("calling domaininit\n"); domaininit(); iptap_init(); #if FLOW_DIVERT flow_divert_init(); #endif /* FLOW_DIVERT */ #endif /* SOCKETS */ kernproc->p_fd->fd_cdir = NULL; kernproc->p_fd->fd_rdir = NULL; #if CONFIG_FREEZE #ifndef CONFIG_MEMORYSTATUS #error "CONFIG_FREEZE defined without matching CONFIG_MEMORYSTATUS" #endif /* Initialise background freezing */ bsd_init_kprintf("calling memorystatus_freeze_init\n"); memorystatus_freeze_init(); #endif #if CONFIG_MEMORYSTATUS /* Initialize kernel memory status notifications */ bsd_init_kprintf("calling memorystatus_init\n"); memorystatus_init(); #endif /* CONFIG_MEMORYSTATUS */ bsd_init_kprintf("calling macx_init\n"); macx_init(); bsd_init_kprintf("calling acct_init\n"); acct_init(); #ifdef GPROF /* Initialize kernel profiling. */ kmstartup(); #endif bsd_init_kprintf("calling bsd_autoconf\n"); bsd_autoconf(); #if CONFIG_DTRACE dtrace_postinit(); #endif /* * We attach the loopback interface *way* down here to ensure * it happens after autoconf(), otherwise it becomes the * "primary" interface. */ #include <loop.h> #if NLOOP > 0 bsd_init_kprintf("calling loopattach\n"); loopattach(); /* XXX */ #endif #if NGIF /* Initialize gif interface (after lo0) */ gif_init(); #endif #if PFLOG /* Initialize packet filter log interface */ pfloginit(); #endif /* PFLOG */ #if NETHER > 0 /* Register the built-in dlil ethernet interface family */ bsd_init_kprintf("calling ether_family_init\n"); ether_family_init(); #endif /* ETHER */ #if NETWORKING /* Call any kext code that wants to run just after network init */ bsd_init_kprintf("calling net_init_run\n"); net_init_run(); #if CONTENT_FILTER cfil_init(); #endif #if PACKET_MANGLER pkt_mnglr_init(); #endif #if NECP /* Initialize Network Extension Control Policies */ necp_init(); #endif netagent_init(); /* register user tunnel kernel control handler */ utun_register_control(); #if IPSEC ipsec_register_control(); #endif /* IPSEC */ netsrc_init(); nstat_init(); tcp_cc_init(); #if MPTCP mptcp_control_register(); #endif /* MPTCP */ #endif /* NETWORKING */ bsd_init_kprintf("calling vnode_pager_bootstrap\n"); vnode_pager_bootstrap(); bsd_init_kprintf("calling inittodr\n"); inittodr(0); /* Mount the root file system. */ while( TRUE) { int err; bsd_init_kprintf("calling setconf\n"); setconf(); #if NFSCLIENT netboot = (mountroot == netboot_mountroot); #endif bsd_init_kprintf("vfs_mountroot\n"); if (0 == (err = vfs_mountroot())) break; rootdevice[0] = '\0'; #if NFSCLIENT if (netboot) { PE_display_icon( 0, "noroot"); /* XXX a netboot-specific icon would be nicer */ vc_progress_set(FALSE, 0); for (i=1; 1; i*=2) { printf("bsd_init: failed to mount network root, error %d, %s\n", err, PE_boot_args()); printf("We are hanging here...\n"); IOSleep(i*60*1000); } /*NOTREACHED*/ } #endif printf("cannot mount root, errno = %d\n", err); boothowto |= RB_ASKNAME; } IOSecureBSDRoot(rootdevice); context.vc_thread = current_thread(); context.vc_ucred = kernproc->p_ucred; mountlist.tqh_first->mnt_flag |= MNT_ROOTFS; bsd_init_kprintf("calling VFS_ROOT\n"); /* Get the vnode for '/'. Set fdp->fd_fd.fd_cdir to reference it. */ if (VFS_ROOT(mountlist.tqh_first, &rootvnode, &context)) panic("bsd_init: cannot find root vnode: %s", PE_boot_args()); rootvnode->v_flag |= VROOT; (void)vnode_ref(rootvnode); (void)vnode_put(rootvnode); filedesc0.fd_cdir = rootvnode; #if NFSCLIENT if (netboot) { int err; netboot = TRUE; /* post mount setup */ if ((err = netboot_setup()) != 0) { PE_display_icon( 0, "noroot"); /* XXX a netboot-specific icon would be nicer */ vc_progress_set(FALSE, 0); for (i=1; 1; i*=2) { printf("bsd_init: NetBoot could not find root, error %d: %s\n", err, PE_boot_args()); printf("We are hanging here...\n"); IOSleep(i*60*1000); } /*NOTREACHED*/ } } #endif #if CONFIG_IMAGEBOOT /* * See if a system disk image is present. If so, mount it and * switch the root vnode to point to it */ if (netboot == FALSE && imageboot_needed()) { /* * An image was found. No turning back: we're booted * with a kernel from the disk image. */ imageboot_setup(); } #endif /* CONFIG_IMAGEBOOT */ /* set initial time; all other resource data is already zero'ed */ microtime_with_abstime(&kernproc->p_start, &kernproc->p_stats->ps_start); #if DEVFS { char mounthere[] = "/dev"; /* !const because of internal casting */ bsd_init_kprintf("calling devfs_kernel_mount\n"); devfs_kernel_mount(mounthere); } #endif /* DEVFS */ /* Initialize signal state for process 0. */ bsd_init_kprintf("calling siginit\n"); siginit(kernproc); bsd_init_kprintf("calling bsd_utaskbootstrap\n"); bsd_utaskbootstrap(); #if defined(__LP64__) kernproc->p_flag |= P_LP64; #endif pal_kernel_announce(); bsd_init_kprintf("calling mountroot_post_hook\n"); /* invoke post-root-mount hook */ if (mountroot_post_hook != NULL) mountroot_post_hook(); #if 0 /* not yet */ consider_zone_gc(FALSE); #endif bsd_init_kprintf("done\n"); }
void scope6_init(lck_grp_t *grp, lck_attr_t *attr) { bzero(&sid_default, sizeof(sid_default)); lck_mtx_init(&scope6_lock, grp, attr); }
/* * Create a new thread. * Doesn't start the thread running. */ static kern_return_t thread_create_internal( task_t parent_task, integer_t priority, thread_continue_t continuation, int options, #define TH_OPTION_NONE 0x00 #define TH_OPTION_NOCRED 0x01 #define TH_OPTION_NOSUSP 0x02 thread_t *out_thread) { thread_t new_thread; static thread_t first_thread; /* * Allocate a thread and initialize static fields */ if (first_thread == THREAD_NULL) new_thread = first_thread = current_thread(); else new_thread = (thread_t)zalloc(thread_zone); if (new_thread == THREAD_NULL) return (KERN_RESOURCE_SHORTAGE); if (new_thread != first_thread) *new_thread = thread_template; #ifdef MACH_BSD new_thread->uthread = uthread_alloc(parent_task, new_thread, (options & TH_OPTION_NOCRED) != 0); if (new_thread->uthread == NULL) { zfree(thread_zone, new_thread); return (KERN_RESOURCE_SHORTAGE); } #endif /* MACH_BSD */ if (machine_thread_create(new_thread, parent_task) != KERN_SUCCESS) { #ifdef MACH_BSD void *ut = new_thread->uthread; new_thread->uthread = NULL; /* cred free may not be necessary */ uthread_cleanup(parent_task, ut, parent_task->bsd_info); uthread_cred_free(ut); uthread_zone_free(ut); #endif /* MACH_BSD */ zfree(thread_zone, new_thread); return (KERN_FAILURE); } new_thread->task = parent_task; thread_lock_init(new_thread); wake_lock_init(new_thread); lck_mtx_init(&new_thread->mutex, &thread_lck_grp, &thread_lck_attr); ipc_thread_init(new_thread); queue_init(&new_thread->held_ulocks); new_thread->continuation = continuation; lck_mtx_lock(&tasks_threads_lock); task_lock(parent_task); if ( !parent_task->active || parent_task->halting || ((options & TH_OPTION_NOSUSP) != 0 && parent_task->suspend_count > 0) || (parent_task->thread_count >= task_threadmax && parent_task != kernel_task) ) { task_unlock(parent_task); lck_mtx_unlock(&tasks_threads_lock); #ifdef MACH_BSD { void *ut = new_thread->uthread; new_thread->uthread = NULL; uthread_cleanup(parent_task, ut, parent_task->bsd_info); /* cred free may not be necessary */ uthread_cred_free(ut); uthread_zone_free(ut); } #endif /* MACH_BSD */ ipc_thread_disable(new_thread); ipc_thread_terminate(new_thread); lck_mtx_destroy(&new_thread->mutex, &thread_lck_grp); machine_thread_destroy(new_thread); zfree(thread_zone, new_thread); return (KERN_FAILURE); } /* New threads inherit any default state on the task */ machine_thread_inherit_taskwide(new_thread, parent_task); task_reference_internal(parent_task); /* Cache the task's map */ new_thread->map = parent_task->map; /* Chain the thread onto the task's list */ queue_enter(&parent_task->threads, new_thread, thread_t, task_threads); parent_task->thread_count++; /* So terminating threads don't need to take the task lock to decrement */ hw_atomic_add(&parent_task->active_thread_count, 1); /* Protected by the tasks_threads_lock */ new_thread->thread_id = ++thread_unique_id; queue_enter(&threads, new_thread, thread_t, threads); threads_count++; timer_call_setup(&new_thread->wait_timer, thread_timer_expire, new_thread); timer_call_setup(&new_thread->depress_timer, thread_depress_expire, new_thread); #if CONFIG_COUNTERS /* * If parent task has any reservations, they need to be propagated to this * thread. */ new_thread->t_chud = (TASK_PMC_FLAG == (parent_task->t_chud & TASK_PMC_FLAG)) ? THREAD_PMC_FLAG : 0U; #endif /* Set the thread's scheduling parameters */ if (parent_task != kernel_task) new_thread->sched_mode |= TH_MODE_TIMESHARE; new_thread->max_priority = parent_task->max_priority; new_thread->task_priority = parent_task->priority; new_thread->priority = (priority < 0)? parent_task->priority: priority; if (new_thread->priority > new_thread->max_priority) new_thread->priority = new_thread->max_priority; new_thread->importance = new_thread->priority - new_thread->task_priority; new_thread->sched_stamp = sched_tick; new_thread->pri_shift = sched_pri_shift; compute_priority(new_thread, FALSE); new_thread->active = TRUE; *out_thread = new_thread; { long dbg_arg1, dbg_arg2, dbg_arg3, dbg_arg4; kdbg_trace_data(parent_task->bsd_info, &dbg_arg2); KERNEL_DEBUG_CONSTANT( TRACEDBG_CODE(DBG_TRACE_DATA, 1) | DBG_FUNC_NONE, (vm_address_t)(uintptr_t)thread_tid(new_thread), dbg_arg2, 0, 0, 0); kdbg_trace_string(parent_task->bsd_info, &dbg_arg1, &dbg_arg2, &dbg_arg3, &dbg_arg4); KERNEL_DEBUG_CONSTANT( TRACEDBG_CODE(DBG_TRACE_STRING, 1) | DBG_FUNC_NONE, dbg_arg1, dbg_arg2, dbg_arg3, dbg_arg4, 0); } DTRACE_PROC1(lwp__create, thread_t, *out_thread); return (KERN_SUCCESS); }