Beispiel #1
0
/*
 *	thread_call_initialize:
 *
 *	Initialize this module, called
 *	early during system initialization.
 */
void
thread_call_initialize(void)
{
	thread_call_t			call;
	thread_call_group_t		group = &thread_call_group0;
	kern_return_t			result;
	thread_t				thread;
	int						i;
	spl_t					s;

	i = sizeof (thread_call_data_t);
	thread_call_zone = zinit(i, 4096 * i, 16 * i, "thread_call");
	zone_change(thread_call_zone, Z_CALLERACCT, FALSE);
	zone_change(thread_call_zone, Z_NOENCRYPT, TRUE);

	lck_attr_setdefault(&thread_call_lck_attr);
	lck_grp_attr_setdefault(&thread_call_lck_grp_attr);
	lck_grp_init(&thread_call_queues_lck_grp, "thread_call_queues", &thread_call_lck_grp_attr);
	lck_grp_init(&thread_call_lck_grp, "thread_call", &thread_call_lck_grp_attr);

#if defined(__i386__) || defined(__x86_64__)
        lck_mtx_init(&thread_call_lock_data, &thread_call_lck_grp, &thread_call_lck_attr);
#else
        lck_spin_init(&thread_call_lock_data, &thread_call_lck_grp, &thread_call_lck_attr);
#endif
	queue_init(&group->pending_queue);
	queue_init(&group->delayed_queue);

	s = splsched();
	thread_call_lock_spin();

	timer_call_setup(&group->delayed_timer, thread_call_delayed_timer, group);

	wait_queue_init(&group->idle_wqueue, SYNC_POLICY_FIFO);
	wait_queue_init(&group->daemon_wqueue, SYNC_POLICY_FIFO);

	queue_init(&thread_call_internal_queue);
	for (
	    	call = internal_call_storage;
			call < &internal_call_storage[internal_call_count];
			call++) {

		enqueue_tail(&thread_call_internal_queue, qe(call));
	}

	thread_call_daemon_awake = TRUE;

	thread_call_unlock();
	splx(s);

	result = kernel_thread_start_priority((thread_continue_t)thread_call_daemon, group, BASEPRI_PREEMPT + 1, &thread);
	if (result != KERN_SUCCESS)
		panic("thread_call_initialize");

	thread_deallocate(thread);
}
Beispiel #2
0
/*
 *	thread_call_initialize:
 *
 *	Initialize this module, called
 *	early during system initialization.
 */
void
thread_call_initialize(void)
{
	thread_call_t			call;
	kern_return_t			result;
	thread_t			thread;
	int				i;

	i = sizeof (thread_call_data_t);
	thread_call_zone = zinit(i, 4096 * i, 16 * i, "thread_call");
	zone_change(thread_call_zone, Z_CALLERACCT, FALSE);
	zone_change(thread_call_zone, Z_NOENCRYPT, TRUE);

	lck_attr_setdefault(&thread_call_lck_attr);
	lck_grp_attr_setdefault(&thread_call_lck_grp_attr);
	lck_grp_init(&thread_call_queues_lck_grp, "thread_call_queues", &thread_call_lck_grp_attr);
	lck_grp_init(&thread_call_lck_grp, "thread_call", &thread_call_lck_grp_attr);

#if defined(__i386__) || defined(__x86_64__)
        lck_mtx_init(&thread_call_lock_data, &thread_call_lck_grp, &thread_call_lck_attr);
#else
        lck_spin_init(&thread_call_lock_data, &thread_call_lck_grp, &thread_call_lck_attr);
#endif

	nanotime_to_absolutetime(0, THREAD_CALL_DEALLOC_INTERVAL_NS, &thread_call_dealloc_interval_abs);
	wait_queue_init(&daemon_wqueue, SYNC_POLICY_FIFO);

	thread_call_group_setup(&thread_call_groups[THREAD_CALL_PRIORITY_LOW], THREAD_CALL_PRIORITY_LOW, 0, TRUE);
	thread_call_group_setup(&thread_call_groups[THREAD_CALL_PRIORITY_USER], THREAD_CALL_PRIORITY_USER, 0, TRUE);
	thread_call_group_setup(&thread_call_groups[THREAD_CALL_PRIORITY_KERNEL], THREAD_CALL_PRIORITY_KERNEL, 1, TRUE);
	thread_call_group_setup(&thread_call_groups[THREAD_CALL_PRIORITY_HIGH], THREAD_CALL_PRIORITY_HIGH, THREAD_CALL_THREAD_MIN, FALSE);

	disable_ints_and_lock();

	queue_init(&thread_call_internal_queue);
	for (
			call = internal_call_storage;
			call < &internal_call_storage[INTERNAL_CALL_COUNT];
			call++) {

		enqueue_tail(&thread_call_internal_queue, qe(call));
	}

	thread_call_daemon_awake = TRUE;

	enable_ints_and_unlock();

	result = kernel_thread_start_priority((thread_continue_t)thread_call_daemon, NULL, BASEPRI_PREEMPT + 1, &thread);
	if (result != KERN_SUCCESS)
		panic("thread_call_initialize");

	thread_deallocate(thread);
}
Beispiel #3
0
/*
 *	ROUTINE:	lock_set_init		[private]
 *
 *	Initialize the lock_set subsystem.
 */
void
lock_set_init(void)
{
	lck_grp_attr_setdefault(&lock_set_grp_attr);
	lck_grp_init(&lock_set_grp, "lock_set", &lock_set_grp_attr);
	lck_attr_setdefault(&lock_set_attr);
}
Beispiel #4
0
/* setup interrupt sample buffers */
int
kperf_init(void)
{
	static lck_grp_attr_t lck_grp_attr;

	lck_mtx_assert(ktrace_lock, LCK_MTX_ASSERT_OWNED);

	unsigned ncpus = 0;
	int err;

	if (kperf_initted) {
		return 0;
	}

	lck_grp_attr_setdefault(&lck_grp_attr);
	lck_grp_init(&kperf_lck_grp, "kperf", &lck_grp_attr);

	ncpus = machine_info.logical_cpu_max;

	/* create buffers to remember which threads don't need to be sampled by PET */
	kperf_thread_on_cpus = kalloc_tag(ncpus * sizeof(*kperf_thread_on_cpus),
	                                  VM_KERN_MEMORY_DIAG);
	if (kperf_thread_on_cpus == NULL) {
		err = ENOMEM;
		goto error;
	}
	bzero(kperf_thread_on_cpus, ncpus * sizeof(*kperf_thread_on_cpus));

	/* create the interrupt buffers */
	intr_samplec = ncpus;
	intr_samplev = kalloc_tag(ncpus * sizeof(*intr_samplev),
	                          VM_KERN_MEMORY_DIAG);
	if (intr_samplev == NULL) {
		err = ENOMEM;
		goto error;
	}
	bzero(intr_samplev, ncpus * sizeof(*intr_samplev));

	/* create kdebug trigger filter buffers */
	if ((err = kperf_kdebug_init())) {
		goto error;
	}

	kperf_initted = TRUE;
	return 0;

error:
	if (intr_samplev) {
		kfree(intr_samplev, ncpus * sizeof(*intr_samplev));
		intr_samplev = NULL;
		intr_samplec = 0;
	}

	if (kperf_thread_on_cpus) {
		kfree(kperf_thread_on_cpus, ncpus * sizeof(*kperf_thread_on_cpus));
		kperf_thread_on_cpus = NULL;
	}

	return err;
}
Beispiel #5
0
void
timer_call_initialize(void)
{
	lck_attr_setdefault(&timer_call_lck_attr);
	lck_grp_attr_setdefault(&timer_call_lck_grp_attr);
	lck_grp_init(&timer_call_lck_grp, "timer_call", &timer_call_lck_grp_attr);
	nanotime_to_absolutetime(0, PAST_DEADLINE_TIMER_ADJUSTMENT_NS, &past_deadline_timer_adjustment);
}
Beispiel #6
0
void telemetry_init(void)
{
	kern_return_t ret;
	uint32_t	  telemetry_notification_leeway;

	lck_grp_init(&telemetry_lck_grp, "telemetry group", LCK_GRP_ATTR_NULL);
	lck_mtx_init(&telemetry_mtx, &telemetry_lck_grp, LCK_ATTR_NULL);

	if (!PE_parse_boot_argn("telemetry_buffer_size", &telemetry_buffer.size, sizeof(telemetry_buffer.size))) {
		telemetry_buffer.size = TELEMETRY_DEFAULT_BUFFER_SIZE;
	}

	if (telemetry_buffer.size > TELEMETRY_MAX_BUFFER_SIZE)
		telemetry_buffer.size = TELEMETRY_MAX_BUFFER_SIZE;

	ret = kmem_alloc(kernel_map, &telemetry_buffer.buffer, telemetry_buffer.size, VM_KERN_MEMORY_DIAG);
	if (ret != KERN_SUCCESS) {
		kprintf("Telemetry: Allocation failed: %d\n", ret);
		return;
	}
	bzero((void *) telemetry_buffer.buffer, telemetry_buffer.size);

	if (!PE_parse_boot_argn("telemetry_notification_leeway", &telemetry_notification_leeway, sizeof(telemetry_notification_leeway))) {
		/*
		 * By default, notify the user to collect the buffer when there is this much space left in the buffer.
		 */
		telemetry_notification_leeway = TELEMETRY_DEFAULT_NOTIFY_LEEWAY;
	}
	if (telemetry_notification_leeway >= telemetry_buffer.size) {
		printf("telemetry: nonsensical telemetry_notification_leeway boot-arg %d changed to %d\n",
		       telemetry_notification_leeway, TELEMETRY_DEFAULT_NOTIFY_LEEWAY);
		telemetry_notification_leeway = TELEMETRY_DEFAULT_NOTIFY_LEEWAY;
	}
	telemetry_buffer_notify_at = telemetry_buffer.size - telemetry_notification_leeway;

	if (!PE_parse_boot_argn("telemetry_sample_rate", &telemetry_sample_rate, sizeof(telemetry_sample_rate))) {
		telemetry_sample_rate = TELEMETRY_DEFAULT_SAMPLE_RATE;
	}

	/*
	 * To enable telemetry for all tasks, include "telemetry_sample_all_tasks=1" in boot-args.
	 */
	if (!PE_parse_boot_argn("telemetry_sample_all_tasks", &telemetry_sample_all_tasks, sizeof(telemetry_sample_all_tasks))) {

#if CONFIG_EMBEDDED && !(DEVELOPMENT || DEBUG)
		telemetry_sample_all_tasks = FALSE;
#else
		telemetry_sample_all_tasks = TRUE;
#endif /* CONFIG_EMBEDDED && !(DEVELOPMENT || DEBUG) */

	}

	kprintf("Telemetry: Sampling %stasks once per %u second%s\n",
		(telemetry_sample_all_tasks) ? "all " : "",
		telemetry_sample_rate, telemetry_sample_rate == 1 ? "" : "s");
}
Beispiel #7
0
void
timer_call_init(void)
{
	lck_attr_setdefault(&timer_call_lck_attr);
	lck_grp_attr_setdefault(&timer_call_lck_grp_attr);
	lck_grp_init(&timer_call_lck_grp, "timer_call", &timer_call_lck_grp_attr);

	timer_longterm_init();
	timer_call_init_abstime();
}
Beispiel #8
0
void
lck_mod_init(
	void)
{
	queue_init(&lck_grp_queue);
	mutex_init(&lck_grp_lock, 0);
	lck_grp_cnt = 0;
	lck_grp_attr_setdefault( &LockDefaultGroupAttr);
	lck_grp_init( &LockCompatGroup, "Compatibility APIs", LCK_GRP_ATTR_NULL);
	lck_attr_setdefault(&LockDefaultLckAttr);
}
Beispiel #9
0
lck_grp_t *
lck_grp_alloc_init(
	const char*	grp_name,
	lck_grp_attr_t	*attr)
{
	lck_grp_t	*grp;

	if ((grp = (lck_grp_t *)kalloc(sizeof(lck_grp_t))) != 0)
		lck_grp_init(grp, grp_name, attr);

	return(grp);
}
Beispiel #10
0
void
host_notify_init(void)
{
	int		i;

	for (i = 0; i <= HOST_NOTIFY_TYPE_MAX; i++)
		queue_init(&host_notify_queue[i]);

	lck_grp_attr_setdefault(&host_notify_lock_grp_attr);
	lck_grp_init(&host_notify_lock_grp, "host_notify", &host_notify_lock_grp_attr);
	lck_attr_setdefault(&host_notify_lock_attr);

	lck_mtx_init_ext(&host_notify_lock, &host_notify_lock_ext, &host_notify_lock_grp, &host_notify_lock_attr);

	i = sizeof (struct host_notify_entry);
	host_notify_zone =
			zinit(i, (4096 * i), (16 * i), "host_notify");
}
Beispiel #11
0
void
thread_init(void)
{
	thread_zone = zinit(
			sizeof(struct thread),
			thread_max * sizeof(struct thread),
			THREAD_CHUNK * sizeof(struct thread),
			"threads");
	
	lck_grp_attr_setdefault(&thread_lck_grp_attr);
	lck_grp_init(&thread_lck_grp, "thread", &thread_lck_grp_attr);
	lck_attr_setdefault(&thread_lck_attr);
	
	stack_init();

	/*
	 *	Initialize any machine-dependent
	 *	per-thread structures necessary.
	 */
	machine_thread_init();
}
Beispiel #12
0
void
default_pager_initialize(void)
{
	kern_return_t		kr;
	__unused static char	here[] = "default_pager_initialize";

	lck_grp_attr_setdefault(&default_pager_lck_grp_attr);
	lck_grp_init(&default_pager_lck_grp, "default_pager", &default_pager_lck_grp_attr);
	lck_attr_setdefault(&default_pager_lck_attr);	

	/*
	 * Vm variables.
	 */
#ifndef MACH_KERNEL
	vm_page_mask = vm_page_size - 1;
	assert((unsigned int) vm_page_size == vm_page_size);
	vm_page_shift = local_log2((unsigned int) vm_page_size);
#endif

	/*
	 * List of all vstructs.
	 */
	vstruct_zone = zinit(sizeof(struct vstruct),
			     10000 * sizeof(struct vstruct),
			     8192, "vstruct zone");
	zone_change(vstruct_zone, Z_CALLERACCT, FALSE);
	zone_change(vstruct_zone, Z_NOENCRYPT, TRUE);

	VSL_LOCK_INIT();
	queue_init(&vstruct_list.vsl_queue);
	vstruct_list.vsl_count = 0;

	VSTATS_LOCK_INIT(&global_stats.gs_lock);

	bs_initialize();

	/*
	 * Exported DMM port.
	 */
	default_pager_object = ipc_port_alloc_kernel();


	/*
	 * Export pager interfaces.
	 */
#ifdef	USER_PAGER
	if ((kr = netname_check_in(name_server_port, "UserPager",
				   default_pager_self,
				   default_pager_object))
	    != KERN_SUCCESS) {
		dprintf(("netname_check_in returned 0x%x\n", kr));
		exit(1);
	}
#else	/* USER_PAGER */
	{
		unsigned int clsize;
		memory_object_default_t dmm;

		dmm = default_pager_object;
		assert((unsigned int) vm_page_size == vm_page_size);
		clsize = ((unsigned int) vm_page_size << vstruct_def_clshift);
		kr = host_default_memory_manager(host_priv_self(), &dmm, clsize);
		if ((kr != KERN_SUCCESS) ||
		    (dmm != MEMORY_OBJECT_DEFAULT_NULL))
			Panic("default memory manager");

	}
#endif	/* USER_PAGER */


}