/* * ROUTINE: lock_set_init [private] * * Initialize the lock_set subsystem. */ void lock_set_init(void) { lck_grp_attr_setdefault(&lock_set_grp_attr); lck_grp_init(&lock_set_grp, "lock_set", &lock_set_grp_attr); lck_attr_setdefault(&lock_set_attr); }
/* setup interrupt sample buffers */ int kperf_init(void) { static lck_grp_attr_t lck_grp_attr; lck_mtx_assert(ktrace_lock, LCK_MTX_ASSERT_OWNED); unsigned ncpus = 0; int err; if (kperf_initted) { return 0; } lck_grp_attr_setdefault(&lck_grp_attr); lck_grp_init(&kperf_lck_grp, "kperf", &lck_grp_attr); ncpus = machine_info.logical_cpu_max; /* create buffers to remember which threads don't need to be sampled by PET */ kperf_thread_on_cpus = kalloc_tag(ncpus * sizeof(*kperf_thread_on_cpus), VM_KERN_MEMORY_DIAG); if (kperf_thread_on_cpus == NULL) { err = ENOMEM; goto error; } bzero(kperf_thread_on_cpus, ncpus * sizeof(*kperf_thread_on_cpus)); /* create the interrupt buffers */ intr_samplec = ncpus; intr_samplev = kalloc_tag(ncpus * sizeof(*intr_samplev), VM_KERN_MEMORY_DIAG); if (intr_samplev == NULL) { err = ENOMEM; goto error; } bzero(intr_samplev, ncpus * sizeof(*intr_samplev)); /* create kdebug trigger filter buffers */ if ((err = kperf_kdebug_init())) { goto error; } kperf_initted = TRUE; return 0; error: if (intr_samplev) { kfree(intr_samplev, ncpus * sizeof(*intr_samplev)); intr_samplev = NULL; intr_samplec = 0; } if (kperf_thread_on_cpus) { kfree(kperf_thread_on_cpus, ncpus * sizeof(*kperf_thread_on_cpus)); kperf_thread_on_cpus = NULL; } return err; }
void timer_call_initialize(void) { lck_attr_setdefault(&timer_call_lck_attr); lck_grp_attr_setdefault(&timer_call_lck_grp_attr); lck_grp_init(&timer_call_lck_grp, "timer_call", &timer_call_lck_grp_attr); nanotime_to_absolutetime(0, PAST_DEADLINE_TIMER_ADJUSTMENT_NS, &past_deadline_timer_adjustment); }
/* * thread_call_initialize: * * Initialize this module, called * early during system initialization. */ void thread_call_initialize(void) { thread_call_t call; thread_call_group_t group = &thread_call_group0; kern_return_t result; thread_t thread; int i; spl_t s; i = sizeof (thread_call_data_t); thread_call_zone = zinit(i, 4096 * i, 16 * i, "thread_call"); zone_change(thread_call_zone, Z_CALLERACCT, FALSE); zone_change(thread_call_zone, Z_NOENCRYPT, TRUE); lck_attr_setdefault(&thread_call_lck_attr); lck_grp_attr_setdefault(&thread_call_lck_grp_attr); lck_grp_init(&thread_call_queues_lck_grp, "thread_call_queues", &thread_call_lck_grp_attr); lck_grp_init(&thread_call_lck_grp, "thread_call", &thread_call_lck_grp_attr); #if defined(__i386__) || defined(__x86_64__) lck_mtx_init(&thread_call_lock_data, &thread_call_lck_grp, &thread_call_lck_attr); #else lck_spin_init(&thread_call_lock_data, &thread_call_lck_grp, &thread_call_lck_attr); #endif queue_init(&group->pending_queue); queue_init(&group->delayed_queue); s = splsched(); thread_call_lock_spin(); timer_call_setup(&group->delayed_timer, thread_call_delayed_timer, group); wait_queue_init(&group->idle_wqueue, SYNC_POLICY_FIFO); wait_queue_init(&group->daemon_wqueue, SYNC_POLICY_FIFO); queue_init(&thread_call_internal_queue); for ( call = internal_call_storage; call < &internal_call_storage[internal_call_count]; call++) { enqueue_tail(&thread_call_internal_queue, qe(call)); } thread_call_daemon_awake = TRUE; thread_call_unlock(); splx(s); result = kernel_thread_start_priority((thread_continue_t)thread_call_daemon, group, BASEPRI_PREEMPT + 1, &thread); if (result != KERN_SUCCESS) panic("thread_call_initialize"); thread_deallocate(thread); }
void timer_call_init(void) { lck_attr_setdefault(&timer_call_lck_attr); lck_grp_attr_setdefault(&timer_call_lck_grp_attr); lck_grp_init(&timer_call_lck_grp, "timer_call", &timer_call_lck_grp_attr); timer_longterm_init(); timer_call_init_abstime(); }
lck_grp_attr_t * lck_grp_attr_alloc_init( void) { lck_grp_attr_t *attr; if ((attr = (lck_grp_attr_t *)kalloc(sizeof(lck_grp_attr_t))) != 0) lck_grp_attr_setdefault(attr); return(attr); }
void lck_mod_init( void) { queue_init(&lck_grp_queue); mutex_init(&lck_grp_lock, 0); lck_grp_cnt = 0; lck_grp_attr_setdefault( &LockDefaultGroupAttr); lck_grp_init( &LockCompatGroup, "Compatibility APIs", LCK_GRP_ATTR_NULL); lck_attr_setdefault(&LockDefaultLckAttr); }
/* * thread_call_initialize: * * Initialize this module, called * early during system initialization. */ void thread_call_initialize(void) { thread_call_t call; kern_return_t result; thread_t thread; int i; i = sizeof (thread_call_data_t); thread_call_zone = zinit(i, 4096 * i, 16 * i, "thread_call"); zone_change(thread_call_zone, Z_CALLERACCT, FALSE); zone_change(thread_call_zone, Z_NOENCRYPT, TRUE); lck_attr_setdefault(&thread_call_lck_attr); lck_grp_attr_setdefault(&thread_call_lck_grp_attr); lck_grp_init(&thread_call_queues_lck_grp, "thread_call_queues", &thread_call_lck_grp_attr); lck_grp_init(&thread_call_lck_grp, "thread_call", &thread_call_lck_grp_attr); #if defined(__i386__) || defined(__x86_64__) lck_mtx_init(&thread_call_lock_data, &thread_call_lck_grp, &thread_call_lck_attr); #else lck_spin_init(&thread_call_lock_data, &thread_call_lck_grp, &thread_call_lck_attr); #endif nanotime_to_absolutetime(0, THREAD_CALL_DEALLOC_INTERVAL_NS, &thread_call_dealloc_interval_abs); wait_queue_init(&daemon_wqueue, SYNC_POLICY_FIFO); thread_call_group_setup(&thread_call_groups[THREAD_CALL_PRIORITY_LOW], THREAD_CALL_PRIORITY_LOW, 0, TRUE); thread_call_group_setup(&thread_call_groups[THREAD_CALL_PRIORITY_USER], THREAD_CALL_PRIORITY_USER, 0, TRUE); thread_call_group_setup(&thread_call_groups[THREAD_CALL_PRIORITY_KERNEL], THREAD_CALL_PRIORITY_KERNEL, 1, TRUE); thread_call_group_setup(&thread_call_groups[THREAD_CALL_PRIORITY_HIGH], THREAD_CALL_PRIORITY_HIGH, THREAD_CALL_THREAD_MIN, FALSE); disable_ints_and_lock(); queue_init(&thread_call_internal_queue); for ( call = internal_call_storage; call < &internal_call_storage[INTERNAL_CALL_COUNT]; call++) { enqueue_tail(&thread_call_internal_queue, qe(call)); } thread_call_daemon_awake = TRUE; enable_ints_and_unlock(); result = kernel_thread_start_priority((thread_continue_t)thread_call_daemon, NULL, BASEPRI_PREEMPT + 1, &thread); if (result != KERN_SUCCESS) panic("thread_call_initialize"); thread_deallocate(thread); }
void host_notify_init(void) { int i; for (i = 0; i <= HOST_NOTIFY_TYPE_MAX; i++) queue_init(&host_notify_queue[i]); lck_grp_attr_setdefault(&host_notify_lock_grp_attr); lck_grp_init(&host_notify_lock_grp, "host_notify", &host_notify_lock_grp_attr); lck_attr_setdefault(&host_notify_lock_attr); lck_mtx_init_ext(&host_notify_lock, &host_notify_lock_ext, &host_notify_lock_grp, &host_notify_lock_attr); i = sizeof (struct host_notify_entry); host_notify_zone = zinit(i, (4096 * i), (16 * i), "host_notify"); }
void lpx_datagram_init() { DEBUG_PRINT(DEBUG_MASK_DGRAM_TRACE, ("lpx_datagram_init: Entered.\n")); // Init Lock. datagram_mtx_grp_attr = lck_grp_attr_alloc_init(); lck_grp_attr_setdefault(datagram_mtx_grp_attr); datagram_mtx_grp = lck_grp_alloc_init("datagrampcb", datagram_mtx_grp_attr); datagram_mtx_attr = lck_attr_alloc_init(); lck_attr_setdefault(datagram_mtx_attr); if ((lpx_datagram_pcb.lpxp_list_rw = lck_rw_alloc_init(datagram_mtx_grp, datagram_mtx_attr)) == NULL) { DEBUG_PRINT(DEBUG_MASK_STREAM_ERROR, ("lpx_datagram_init: Can't alloc mtx\n")); } return; }
void lck_mod_init( void) { /* * Obtain "lcks" options:this currently controls lock statistics */ if (!PE_parse_boot_argn("lcks", &LcksOpts, sizeof (LcksOpts))) LcksOpts = 0; #if (DEVELOPMENT || DEBUG) && defined(__x86_64__) if (!PE_parse_boot_argn("-disable_mtx_chk", &LckDisablePreemptCheck, sizeof (LckDisablePreemptCheck))) LckDisablePreemptCheck = 0; #endif /* (DEVELOPMENT || DEBUG) && defined(__x86_64__) */ queue_init(&lck_grp_queue); /* * Need to bootstrap the LockCompatGroup instead of calling lck_grp_init() here. This avoids * grabbing the lck_grp_lock before it is initialized. */ bzero(&LockCompatGroup, sizeof(lck_grp_t)); (void) strncpy(LockCompatGroup.lck_grp_name, "Compatibility APIs", LCK_GRP_MAX_NAME); if (LcksOpts & enaLkStat) LockCompatGroup.lck_grp_attr = LCK_GRP_ATTR_STAT; else LockCompatGroup.lck_grp_attr = LCK_ATTR_NONE; LockCompatGroup.lck_grp_refcnt = 1; enqueue_tail(&lck_grp_queue, (queue_entry_t)&LockCompatGroup); lck_grp_cnt = 1; lck_grp_attr_setdefault(&LockDefaultGroupAttr); lck_attr_setdefault(&LockDefaultLckAttr); lck_mtx_init_ext(&lck_grp_lock, &lck_grp_lock_ext, &LockCompatGroup, &LockDefaultLckAttr); }
void thread_init(void) { thread_zone = zinit( sizeof(struct thread), thread_max * sizeof(struct thread), THREAD_CHUNK * sizeof(struct thread), "threads"); lck_grp_attr_setdefault(&thread_lck_grp_attr); lck_grp_init(&thread_lck_grp, "thread", &thread_lck_grp_attr); lck_attr_setdefault(&thread_lck_attr); stack_init(); /* * Initialize any machine-dependent * per-thread structures necessary. */ machine_thread_init(); }
void default_pager_initialize(void) { kern_return_t kr; __unused static char here[] = "default_pager_initialize"; lck_grp_attr_setdefault(&default_pager_lck_grp_attr); lck_grp_init(&default_pager_lck_grp, "default_pager", &default_pager_lck_grp_attr); lck_attr_setdefault(&default_pager_lck_attr); /* * Vm variables. */ #ifndef MACH_KERNEL vm_page_mask = vm_page_size - 1; assert((unsigned int) vm_page_size == vm_page_size); vm_page_shift = local_log2((unsigned int) vm_page_size); #endif /* * List of all vstructs. */ vstruct_zone = zinit(sizeof(struct vstruct), 10000 * sizeof(struct vstruct), 8192, "vstruct zone"); zone_change(vstruct_zone, Z_CALLERACCT, FALSE); zone_change(vstruct_zone, Z_NOENCRYPT, TRUE); VSL_LOCK_INIT(); queue_init(&vstruct_list.vsl_queue); vstruct_list.vsl_count = 0; VSTATS_LOCK_INIT(&global_stats.gs_lock); bs_initialize(); /* * Exported DMM port. */ default_pager_object = ipc_port_alloc_kernel(); /* * Export pager interfaces. */ #ifdef USER_PAGER if ((kr = netname_check_in(name_server_port, "UserPager", default_pager_self, default_pager_object)) != KERN_SUCCESS) { dprintf(("netname_check_in returned 0x%x\n", kr)); exit(1); } #else /* USER_PAGER */ { unsigned int clsize; memory_object_default_t dmm; dmm = default_pager_object; assert((unsigned int) vm_page_size == vm_page_size); clsize = ((unsigned int) vm_page_size << vstruct_def_clshift); kr = host_default_memory_manager(host_priv_self(), &dmm, clsize); if ((kr != KERN_SUCCESS) || (dmm != MEMORY_OBJECT_DEFAULT_NULL)) Panic("default memory manager"); } #endif /* USER_PAGER */ }