__private_extern__ kern_return_t
chudxnu_cpu_timer_callback_enter(
	chudxnu_cpu_timer_callback_func_t	func,
	uint32_t				time,
	uint32_t				units)
{
	chudcpu_data_t	*chud_proc_info;
	boolean_t	oldlevel;

	oldlevel = ml_set_interrupts_enabled(FALSE);
	chud_proc_info = (chudcpu_data_t *)(current_cpu_datap()->cpu_chud);

	// cancel any existing callback for this cpu
	timer_call_cancel(&(chud_proc_info->cpu_timer_call));

	chud_proc_info->cpu_timer_callback_fn = func;

	clock_interval_to_deadline(time, units, &(chud_proc_info->t_deadline));
	timer_call_setup(&(chud_proc_info->cpu_timer_call),
			 chudxnu_private_cpu_timer_callback, NULL);
	timer_call_enter(&(chud_proc_info->cpu_timer_call),
			 chud_proc_info->t_deadline);

	KERNEL_DEBUG_CONSTANT(
		MACHDBG_CODE(DBG_MACH_CHUD,
			     CHUD_TIMER_CALLBACK_ENTER) | DBG_FUNC_NONE,
		(uint32_t) func, time, units, 0, 0);

	ml_set_interrupts_enabled(oldlevel);
	return KERN_SUCCESS;
}
Beispiel #2
0
/*
 *	Initialize the given processor for the cpu
 *	indicated by cpu_id, and assign to the
 *	specified processor set.
 */
void
processor_init(
	processor_t			processor,
	int					cpu_id,
	processor_set_t		pset)
{
	run_queue_init(&processor->runq);

	processor->state = PROCESSOR_OFF_LINE;
	processor->active_thread = processor->next_thread = processor->idle_thread = THREAD_NULL;
	processor->processor_set = pset;
	processor->current_pri = MINPRI;
	processor->cpu_id = cpu_id;
	timer_call_setup(&processor->quantum_timer, thread_quantum_expire, processor);
	processor->deadline = UINT64_MAX;
	processor->timeslice = 0;
	processor->processor_meta = PROCESSOR_META_NULL;
	processor->processor_self = IP_NULL;
	processor_data_init(processor);
	processor->processor_list = NULL;

	simple_lock(&processor_list_lock);
	if (processor_list == NULL)
		processor_list = processor;
	else
		processor_list_tail->processor_list = processor;
	processor_list_tail = processor;
	processor_count++;
	simple_unlock(&processor_list_lock);
}
Beispiel #3
0
/*
 *	thread_call_initialize:
 *
 *	Initialize this module, called
 *	early during system initialization.
 */
void
thread_call_initialize(void)
{
	thread_call_t			call;
	thread_call_group_t		group = &thread_call_group0;
	kern_return_t			result;
	thread_t				thread;
	int						i;
	spl_t					s;

	i = sizeof (thread_call_data_t);
	thread_call_zone = zinit(i, 4096 * i, 16 * i, "thread_call");
	zone_change(thread_call_zone, Z_CALLERACCT, FALSE);
	zone_change(thread_call_zone, Z_NOENCRYPT, TRUE);

	lck_attr_setdefault(&thread_call_lck_attr);
	lck_grp_attr_setdefault(&thread_call_lck_grp_attr);
	lck_grp_init(&thread_call_queues_lck_grp, "thread_call_queues", &thread_call_lck_grp_attr);
	lck_grp_init(&thread_call_lck_grp, "thread_call", &thread_call_lck_grp_attr);

#if defined(__i386__) || defined(__x86_64__)
        lck_mtx_init(&thread_call_lock_data, &thread_call_lck_grp, &thread_call_lck_attr);
#else
        lck_spin_init(&thread_call_lock_data, &thread_call_lck_grp, &thread_call_lck_attr);
#endif
	queue_init(&group->pending_queue);
	queue_init(&group->delayed_queue);

	s = splsched();
	thread_call_lock_spin();

	timer_call_setup(&group->delayed_timer, thread_call_delayed_timer, group);

	wait_queue_init(&group->idle_wqueue, SYNC_POLICY_FIFO);
	wait_queue_init(&group->daemon_wqueue, SYNC_POLICY_FIFO);

	queue_init(&thread_call_internal_queue);
	for (
	    	call = internal_call_storage;
			call < &internal_call_storage[internal_call_count];
			call++) {

		enqueue_tail(&thread_call_internal_queue, qe(call));
	}

	thread_call_daemon_awake = TRUE;

	thread_call_unlock();
	splx(s);

	result = kernel_thread_start_priority((thread_continue_t)thread_call_daemon, group, BASEPRI_PREEMPT + 1, &thread);
	if (result != KERN_SUCCESS)
		panic("thread_call_initialize");

	thread_deallocate(thread);
}
Beispiel #4
0
/*
 *	clock_config:
 *
 *	Called once at boot to configure the clock subsystem.
 */
void
clock_config(void)
{
	clock_lock_init();

	timer_call_setup(&calend_adjcall, (timer_call_func_t)calend_adjust_call, NULL);
	thread_call_setup(&calend_wakecall, (thread_call_func_t)IOKitResetTime, NULL);

	clock_oldconfig();
}
Beispiel #5
0
/*
 *	Initialize the given processor for the cpu
 *	indicated by cpu_id, and assign to the
 *	specified processor set.
 */
void
processor_init(
	processor_t			processor,
	int					cpu_id,
	processor_set_t		pset)
{
	spl_t		s;

	if (processor != master_processor) {
		/* Scheduler state for master_processor initialized in sched_init() */
		SCHED(processor_init)(processor);
	}

	processor->state = PROCESSOR_OFF_LINE;
	processor->active_thread = processor->next_thread = processor->idle_thread = THREAD_NULL;
	processor->processor_set = pset;
	processor_state_update_idle(processor);
	processor->starting_pri = MINPRI;
	processor->cpu_id = cpu_id;
	timer_call_setup(&processor->quantum_timer, thread_quantum_expire, processor);
	processor->quantum_end = UINT64_MAX;
	processor->deadline = UINT64_MAX;
	processor->first_timeslice = FALSE;
	processor->processor_primary = processor; /* no SMT relationship known at this point */
	processor->processor_secondary = NULL;
	processor->is_SMT = FALSE;
	processor->is_recommended = (pset->recommended_bitmask & (1ULL << cpu_id)) ? TRUE : FALSE;
	processor->processor_self = IP_NULL;
	processor_data_init(processor);
	processor->processor_list = NULL;

	s = splsched();
	pset_lock(pset);
	bit_set(pset->cpu_bitmask, cpu_id);
	if (pset->cpu_set_count++ == 0)
		pset->cpu_set_low = pset->cpu_set_hi = cpu_id;
	else {
		pset->cpu_set_low = (cpu_id < pset->cpu_set_low)? cpu_id: pset->cpu_set_low;
		pset->cpu_set_hi = (cpu_id > pset->cpu_set_hi)? cpu_id: pset->cpu_set_hi;
	}
	pset_unlock(pset);
	splx(s);

	simple_lock(&processor_list_lock);
	if (processor_list == NULL)
		processor_list = processor;
	else
		processor_list_tail->processor_list = processor;
	processor_list_tail = processor;
	processor_count++;
	assert(cpu_id < MAX_SCHED_CPUS);
	processor_array[cpu_id] = processor;
	simple_unlock(&processor_list_lock);
}
Beispiel #6
0
static void
thread_call_group_setup(
		thread_call_group_t 		group, 
		thread_call_priority_t		pri,
		uint32_t			target_thread_count,
		boolean_t			parallel)
{
	queue_init(&group->pending_queue);
	queue_init(&group->delayed_queue);

	timer_call_setup(&group->delayed_timer, thread_call_delayed_timer, group);
	timer_call_setup(&group->dealloc_timer, thread_call_dealloc_timer, group);

	wait_queue_init(&group->idle_wqueue, SYNC_POLICY_FIFO);

	group->target_thread_count = target_thread_count;
	group->pri = thread_call_priority_to_sched_pri(pri);

	group->sched_call = sched_call_thread; 
	if (parallel) {
		group->flags |= TCG_PARALLEL;
		group->sched_call = NULL;
	} 
}
static cyclic_id_t
timer_call_add_cyclic(wrap_timer_call_t *wrapTC, cyc_handler_t *handler, cyc_time_t *when)
{
	uint64_t now;

	timer_call_setup( &(wrapTC->call),  _timer_call_apply_cyclic, NULL );
	wrapTC->hdlr = *handler;
	wrapTC->when = *when;

	nanoseconds_to_absolutetime( wrapTC->when.cyt_interval, (uint64_t *)&wrapTC->when.cyt_interval );

	now = mach_absolute_time();
	wrapTC->deadline = now;

	clock_deadline_for_periodic_event( wrapTC->when.cyt_interval, now, &(wrapTC->deadline) );
	timer_call_enter1( &(wrapTC->call), (void *)wrapTC, wrapTC->deadline );

	return (cyclic_id_t)wrapTC;
}
__private_extern__
kern_return_t chudxnu_cpu_timer_callback_enter(chudxnu_cpu_timer_callback_func_t func, uint32_t time, uint32_t units)
{
    int cpu;
    boolean_t oldlevel;

    oldlevel = ml_set_interrupts_enabled(FALSE);
    cpu = cpu_number();

    timer_call_cancel(&(cpu_timer_call[cpu])); // cancel any existing callback for this cpu

    cpu_timer_callback_fn[cpu] = func;

    clock_interval_to_deadline(time, units, &(t_deadline[cpu]));
    timer_call_setup(&(cpu_timer_call[cpu]), chudxnu_private_cpu_timer_callback, NULL);
    timer_call_enter(&(cpu_timer_call[cpu]), t_deadline[cpu]);

    ml_set_interrupts_enabled(oldlevel);
    return KERN_SUCCESS;
}
Beispiel #9
0
/*
 * Create a new thread.
 * Doesn't start the thread running.
 */
static kern_return_t
thread_create_internal(
	task_t					parent_task,
	integer_t				priority,
	thread_continue_t		continuation,
	int						options,
#define TH_OPTION_NONE		0x00
#define TH_OPTION_NOCRED	0x01
#define TH_OPTION_NOSUSP	0x02
	thread_t				*out_thread)
{
	thread_t				new_thread;
	static thread_t			first_thread = THREAD_NULL;

	/*
	 *	Allocate a thread and initialize static fields
	 */
	if (first_thread == THREAD_NULL)
		new_thread = first_thread = current_thread();
	new_thread = (thread_t)zalloc(thread_zone);
	if (new_thread == THREAD_NULL)
		return (KERN_RESOURCE_SHORTAGE);

	if (new_thread != first_thread)
		*new_thread = thread_template;

#ifdef MACH_BSD
	new_thread->uthread = uthread_alloc(parent_task, new_thread, (options & TH_OPTION_NOCRED) != 0);
	if (new_thread->uthread == NULL) {
		zfree(thread_zone, new_thread);
		return (KERN_RESOURCE_SHORTAGE);
	}
#endif  /* MACH_BSD */

	if (machine_thread_create(new_thread, parent_task) != KERN_SUCCESS) {
#ifdef MACH_BSD
		void *ut = new_thread->uthread;

		new_thread->uthread = NULL;
		/* cred free may not be necessary */
		uthread_cleanup(parent_task, ut, parent_task->bsd_info);
		uthread_cred_free(ut);
		uthread_zone_free(ut);
#endif  /* MACH_BSD */

		zfree(thread_zone, new_thread);
		return (KERN_FAILURE);
	}

	new_thread->task = parent_task;

	thread_lock_init(new_thread);
	wake_lock_init(new_thread);

	lck_mtx_init(&new_thread->mutex, &thread_lck_grp, &thread_lck_attr);

	ipc_thread_init(new_thread);
	queue_init(&new_thread->held_ulocks);

	new_thread->continuation = continuation;

	lck_mtx_lock(&tasks_threads_lock);
	task_lock(parent_task);

	if (	!parent_task->active || parent_task->halting ||
			((options & TH_OPTION_NOSUSP) != 0 &&
			 	parent_task->suspend_count > 0)	||
			(parent_task->thread_count >= task_threadmax &&
				parent_task != kernel_task)		) {
		task_unlock(parent_task);
		lck_mtx_unlock(&tasks_threads_lock);

#ifdef MACH_BSD
		{
			void *ut = new_thread->uthread;

			new_thread->uthread = NULL;
			uthread_cleanup(parent_task, ut, parent_task->bsd_info);
			/* cred free may not be necessary */
			uthread_cred_free(ut);
			uthread_zone_free(ut);
		}
#endif  /* MACH_BSD */
		ipc_thread_disable(new_thread);
		ipc_thread_terminate(new_thread);
		lck_mtx_destroy(&new_thread->mutex, &thread_lck_grp);
		machine_thread_destroy(new_thread);
		zfree(thread_zone, new_thread);
		return (KERN_FAILURE);
	}

	/* New threads inherit any default state on the task */
	machine_thread_inherit_taskwide(new_thread, parent_task);

	task_reference_internal(parent_task);

	if (new_thread->task->rusage_cpu_flags & TASK_RUSECPU_FLAGS_PERTHR_LIMIT) {
		/*
		 * This task has a per-thread CPU limit; make sure this new thread
		 * gets its limit set too, before it gets out of the kernel.
		 */
		set_astledger(new_thread);
	}
	new_thread->t_threadledger = LEDGER_NULL;	/* per thread ledger is not inherited */
	new_thread->t_ledger = new_thread->task->ledger;
	if (new_thread->t_ledger)
		ledger_reference(new_thread->t_ledger);

	/* Cache the task's map */
	new_thread->map = parent_task->map;

	/* Chain the thread onto the task's list */
	queue_enter(&parent_task->threads, new_thread, thread_t, task_threads);
	parent_task->thread_count++;
	
	/* So terminating threads don't need to take the task lock to decrement */
	hw_atomic_add(&parent_task->active_thread_count, 1);

	/* Protected by the tasks_threads_lock */
	new_thread->thread_id = ++thread_unique_id;

	queue_enter(&threads, new_thread, thread_t, threads);
	threads_count++;

	timer_call_setup(&new_thread->wait_timer, thread_timer_expire, new_thread);
	timer_call_setup(&new_thread->depress_timer, thread_depress_expire, new_thread);

#if CONFIG_COUNTERS
	/*
	 * If parent task has any reservations, they need to be propagated to this
	 * thread.
	 */
	new_thread->t_chud = (TASK_PMC_FLAG == (parent_task->t_chud & TASK_PMC_FLAG)) ? 
		THREAD_PMC_FLAG : 0U;
#endif

	/* Set the thread's scheduling parameters */
	new_thread->sched_mode = SCHED(initial_thread_sched_mode)(parent_task);
	new_thread->sched_flags = 0;
	new_thread->max_priority = parent_task->max_priority;
	new_thread->task_priority = parent_task->priority;
	new_thread->priority = (priority < 0)? parent_task->priority: priority;
	if (new_thread->priority > new_thread->max_priority)
		new_thread->priority = new_thread->max_priority;
#if CONFIG_EMBEDDED 
	if (new_thread->priority < MAXPRI_THROTTLE) {
		new_thread->priority = MAXPRI_THROTTLE;
	}
#endif /* CONFIG_EMBEDDED */
	new_thread->importance =
					new_thread->priority - new_thread->task_priority;
#if CONFIG_EMBEDDED
	new_thread->saved_importance = new_thread->importance;
	/* apple ios daemon starts all threads in darwin background */
	if (parent_task->ext_appliedstate.apptype == PROC_POLICY_IOS_APPLE_DAEMON) {
		/* Cannot use generic routines here so apply darwin bacground directly */
		new_thread->policystate.hw_bg = TASK_POLICY_BACKGROUND_ATTRIBUTE_ALL;
		/* set thread self backgrounding */
		new_thread->appliedstate.hw_bg = new_thread->policystate.hw_bg;
		/* priority will get recomputed suitably bit later */
		new_thread->importance = INT_MIN;
		/* to avoid changes to many pri compute routines, set the effect of those here */
		new_thread->priority = MAXPRI_THROTTLE;
	}
#endif /* CONFIG_EMBEDDED */

#if defined(CONFIG_SCHED_TRADITIONAL)
	new_thread->sched_stamp = sched_tick;
	new_thread->pri_shift = sched_pri_shift;
#endif
	SCHED(compute_priority)(new_thread, FALSE);

	new_thread->active = TRUE;

	*out_thread = new_thread;

	{
		long	dbg_arg1, dbg_arg2, dbg_arg3, dbg_arg4;

		kdbg_trace_data(parent_task->bsd_info, &dbg_arg2);

		KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, 
			TRACEDBG_CODE(DBG_TRACE_DATA, 1) | DBG_FUNC_NONE,
			(vm_address_t)(uintptr_t)thread_tid(new_thread), dbg_arg2, 0, 0, 0);

		kdbg_trace_string(parent_task->bsd_info,
							&dbg_arg1, &dbg_arg2, &dbg_arg3, &dbg_arg4);

		KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, 
			TRACEDBG_CODE(DBG_TRACE_STRING, 1) | DBG_FUNC_NONE,
			dbg_arg1, dbg_arg2, dbg_arg3, dbg_arg4, 0);
	}

	DTRACE_PROC1(lwp__create, thread_t, *out_thread);

	return (KERN_SUCCESS);
}
Beispiel #10
0
/*
 * Create a new thread.
 * Doesn't start the thread running.
 */
static kern_return_t
thread_create_internal(
	task_t					parent_task,
	integer_t				priority,
	thread_continue_t		continuation,
	thread_t				*out_thread)
{
	thread_t				new_thread;
	static thread_t			first_thread;

	/*
	 *	Allocate a thread and initialize static fields
	 */
	if (first_thread == NULL)
		new_thread = first_thread = current_thread();
	else
		new_thread = (thread_t)zalloc(thread_zone);
	if (new_thread == NULL)
		return (KERN_RESOURCE_SHORTAGE);

	if (new_thread != first_thread)
		*new_thread = thread_template;

#ifdef MACH_BSD
    {
		new_thread->uthread = uthread_alloc(parent_task, new_thread);
		if (new_thread->uthread == NULL) {
			zfree(thread_zone, new_thread);
			return (KERN_RESOURCE_SHORTAGE);
		}
	}
#endif  /* MACH_BSD */

	if (machine_thread_create(new_thread, parent_task) != KERN_SUCCESS) {
#ifdef MACH_BSD
		{
			void *ut = new_thread->uthread;

			new_thread->uthread = NULL;
			/* cred free may not be necessary */
			uthread_cleanup(parent_task, ut, parent_task->bsd_info);
			uthread_cred_free(ut);
			uthread_zone_free(ut);
		}
#endif  /* MACH_BSD */
		zfree(thread_zone, new_thread);
		return (KERN_FAILURE);
	}

    new_thread->task = parent_task;

	thread_lock_init(new_thread);
	wake_lock_init(new_thread);

	mutex_init(&new_thread->mutex, 0);

	ipc_thread_init(new_thread);
	queue_init(&new_thread->held_ulocks);

	new_thread->continuation = continuation;

	mutex_lock(&tasks_threads_lock);
	task_lock(parent_task);

	if (	!parent_task->active							||
			(parent_task->thread_count >= THREAD_MAX	&&
			 parent_task != kernel_task)) {
		task_unlock(parent_task);
		mutex_unlock(&tasks_threads_lock);

#ifdef MACH_BSD
		{
			void *ut = new_thread->uthread;

			new_thread->uthread = NULL;
			uthread_cleanup(parent_task, ut, parent_task->bsd_info);
			/* cred free may not be necessary */
			uthread_cred_free(ut);
			uthread_zone_free(ut);
		}
#endif  /* MACH_BSD */
		ipc_thread_disable(new_thread);
		ipc_thread_terminate(new_thread);
		machine_thread_destroy(new_thread);
		zfree(thread_zone, new_thread);
		return (KERN_FAILURE);
	}

	task_reference_internal(parent_task);

	/* Cache the task's map */
	new_thread->map = parent_task->map;

	/* Chain the thread onto the task's list */
	queue_enter(&parent_task->threads, new_thread, thread_t, task_threads);
	parent_task->thread_count++;
	
	/* So terminating threads don't need to take the task lock to decrement */
	hw_atomic_add(&parent_task->active_thread_count, 1);

	queue_enter(&threads, new_thread, thread_t, threads);
	threads_count++;

	timer_call_setup(&new_thread->wait_timer, thread_timer_expire, new_thread);
	timer_call_setup(&new_thread->depress_timer, thread_depress_expire, new_thread);

	/* Set the thread's scheduling parameters */
	if (parent_task != kernel_task)
		new_thread->sched_mode |= TH_MODE_TIMESHARE;
	new_thread->max_priority = parent_task->max_priority;
	new_thread->task_priority = parent_task->priority;
	new_thread->priority = (priority < 0)? parent_task->priority: priority;
	if (new_thread->priority > new_thread->max_priority)
		new_thread->priority = new_thread->max_priority;
	new_thread->importance =
					new_thread->priority - new_thread->task_priority;
	new_thread->sched_stamp = sched_tick;
	new_thread->pri_shift = sched_pri_shift;
	compute_priority(new_thread, FALSE);

	new_thread->active = TRUE;

	*out_thread = new_thread;

	{
		long	dbg_arg1, dbg_arg2, dbg_arg3, dbg_arg4;

		kdbg_trace_data(parent_task->bsd_info, &dbg_arg2);

		KERNEL_DEBUG_CONSTANT(
					TRACEDBG_CODE(DBG_TRACE_DATA, 1) | DBG_FUNC_NONE,
							(vm_address_t)new_thread, dbg_arg2, 0, 0, 0);

		kdbg_trace_string(parent_task->bsd_info,
							&dbg_arg1, &dbg_arg2, &dbg_arg3, &dbg_arg4);

		KERNEL_DEBUG_CONSTANT(
					TRACEDBG_CODE(DBG_TRACE_STRING, 1) | DBG_FUNC_NONE,
							dbg_arg1, dbg_arg2, dbg_arg3, dbg_arg4, 0);
	}

	DTRACE_PROC1(lwp__create, thread_t, *out_thread);

	return (KERN_SUCCESS);
}
Beispiel #11
0
/*
 * Create a new thread.
 * Doesn't start the thread running.
 */
static kern_return_t
thread_create_internal(
	task_t					parent_task,
	integer_t				priority,
	thread_continue_t		continuation,
	int						options,
#define TH_OPTION_NONE		0x00
#define TH_OPTION_NOCRED	0x01
#define TH_OPTION_NOSUSP	0x02
	thread_t				*out_thread)
{
	thread_t				new_thread;
	static thread_t			first_thread;

	/*
	 *	Allocate a thread and initialize static fields
	 */
	if (first_thread == THREAD_NULL)
		new_thread = first_thread = current_thread();
	else
		new_thread = (thread_t)zalloc(thread_zone);
	if (new_thread == THREAD_NULL)
		return (KERN_RESOURCE_SHORTAGE);

	if (new_thread != first_thread)
		*new_thread = thread_template;

#ifdef MACH_BSD
	new_thread->uthread = uthread_alloc(parent_task, new_thread, (options & TH_OPTION_NOCRED) != 0);
	if (new_thread->uthread == NULL) {
		zfree(thread_zone, new_thread);
		return (KERN_RESOURCE_SHORTAGE);
	}
#endif  /* MACH_BSD */

	if (machine_thread_create(new_thread, parent_task) != KERN_SUCCESS) {
#ifdef MACH_BSD
		void *ut = new_thread->uthread;

		new_thread->uthread = NULL;
		/* cred free may not be necessary */
		uthread_cleanup(parent_task, ut, parent_task->bsd_info);
		uthread_cred_free(ut);
		uthread_zone_free(ut);
#endif  /* MACH_BSD */

		zfree(thread_zone, new_thread);
		return (KERN_FAILURE);
	}

    new_thread->task = parent_task;

	thread_lock_init(new_thread);
	wake_lock_init(new_thread);

	lck_mtx_init(&new_thread->mutex, &thread_lck_grp, &thread_lck_attr);

	ipc_thread_init(new_thread);
	queue_init(&new_thread->held_ulocks);

	new_thread->continuation = continuation;

	lck_mtx_lock(&tasks_threads_lock);
	task_lock(parent_task);

	if (	!parent_task->active || parent_task->halting ||
			((options & TH_OPTION_NOSUSP) != 0 &&
			 	parent_task->suspend_count > 0)	||
			(parent_task->thread_count >= task_threadmax &&
				parent_task != kernel_task)		) {
		task_unlock(parent_task);
		lck_mtx_unlock(&tasks_threads_lock);

#ifdef MACH_BSD
		{
			void *ut = new_thread->uthread;

			new_thread->uthread = NULL;
			uthread_cleanup(parent_task, ut, parent_task->bsd_info);
			/* cred free may not be necessary */
			uthread_cred_free(ut);
			uthread_zone_free(ut);
		}
#endif  /* MACH_BSD */
		ipc_thread_disable(new_thread);
		ipc_thread_terminate(new_thread);
		lck_mtx_destroy(&new_thread->mutex, &thread_lck_grp);
		machine_thread_destroy(new_thread);
		zfree(thread_zone, new_thread);
		return (KERN_FAILURE);
	}

	/* New threads inherit any default state on the task */
	machine_thread_inherit_taskwide(new_thread, parent_task);

	task_reference_internal(parent_task);

	/* Cache the task's map */
	new_thread->map = parent_task->map;

	/* Chain the thread onto the task's list */
	queue_enter(&parent_task->threads, new_thread, thread_t, task_threads);
	parent_task->thread_count++;
	
	/* So terminating threads don't need to take the task lock to decrement */
	hw_atomic_add(&parent_task->active_thread_count, 1);

	/* Protected by the tasks_threads_lock */
	new_thread->thread_id = ++thread_unique_id;

	queue_enter(&threads, new_thread, thread_t, threads);
	threads_count++;

	timer_call_setup(&new_thread->wait_timer, thread_timer_expire, new_thread);
	timer_call_setup(&new_thread->depress_timer, thread_depress_expire, new_thread);

#if CONFIG_COUNTERS
	/*
	 * If parent task has any reservations, they need to be propagated to this
	 * thread.
	 */
	new_thread->t_chud = (TASK_PMC_FLAG == (parent_task->t_chud & TASK_PMC_FLAG)) ? 
		THREAD_PMC_FLAG : 0U;
#endif

	/* Set the thread's scheduling parameters */
	if (parent_task != kernel_task)
		new_thread->sched_mode |= TH_MODE_TIMESHARE;
	new_thread->max_priority = parent_task->max_priority;
	new_thread->task_priority = parent_task->priority;
	new_thread->priority = (priority < 0)? parent_task->priority: priority;
	if (new_thread->priority > new_thread->max_priority)
		new_thread->priority = new_thread->max_priority;
	new_thread->importance =
					new_thread->priority - new_thread->task_priority;
	new_thread->sched_stamp = sched_tick;
	new_thread->pri_shift = sched_pri_shift;
	compute_priority(new_thread, FALSE);

	new_thread->active = TRUE;

	*out_thread = new_thread;

	{
		long	dbg_arg1, dbg_arg2, dbg_arg3, dbg_arg4;

		kdbg_trace_data(parent_task->bsd_info, &dbg_arg2);

		KERNEL_DEBUG_CONSTANT(
					TRACEDBG_CODE(DBG_TRACE_DATA, 1) | DBG_FUNC_NONE,
							(vm_address_t)(uintptr_t)thread_tid(new_thread), dbg_arg2, 0, 0, 0);

		kdbg_trace_string(parent_task->bsd_info,
							&dbg_arg1, &dbg_arg2, &dbg_arg3, &dbg_arg4);

		KERNEL_DEBUG_CONSTANT(
					TRACEDBG_CODE(DBG_TRACE_STRING, 1) | DBG_FUNC_NONE,
							dbg_arg1, dbg_arg2, dbg_arg3, dbg_arg4, 0);
	}

	DTRACE_PROC1(lwp__create, thread_t, *out_thread);

	return (KERN_SUCCESS);
}
Beispiel #12
0
extern int
kperf_timer_set_count(unsigned int count)
{
	struct kperf_timer *new_timerv = NULL, *old_timerv = NULL;
	unsigned int old_count;

	if (min_period_abstime == 0) {
		nanoseconds_to_absolutetime(MIN_PERIOD_NS, &min_period_abstime);
		nanoseconds_to_absolutetime(MIN_PERIOD_BG_NS, &min_period_bg_abstime);
		nanoseconds_to_absolutetime(MIN_PERIOD_PET_NS, &min_period_pet_abstime);
		nanoseconds_to_absolutetime(MIN_PERIOD_PET_BG_NS,
			&min_period_pet_bg_abstime);
		assert(min_period_abstime > 0);
	}

	if (count == kperf_timerc) {
		return 0;
	}
	if (count > TIMER_MAX) {
		return EINVAL;
	}

	/* TODO: allow shrinking? */
	if (count < kperf_timerc) {
		return EINVAL;
	}

	/*
	 * Make sure kperf is initialized when creating the array for the first
	 * time.
	 */
	if (kperf_timerc == 0) {
		int r;

		/* main kperf */
		if ((r = kperf_init())) {
			return r;
		}
	}

	/*
	 * Shut down any running timers since we will be messing with the timer
	 * call structures.
	 */
	kperf_timer_stop();

	/* create a new array */
	new_timerv = kalloc_tag(count * sizeof(struct kperf_timer),
		VM_KERN_MEMORY_DIAG);
	if (new_timerv == NULL) {
		return ENOMEM;
	}
	old_timerv = kperf_timerv;
	old_count = kperf_timerc;

	if (old_timerv != NULL) {
		bcopy(kperf_timerv, new_timerv,
			kperf_timerc * sizeof(struct kperf_timer));
	}

	/* zero the new entries */
	bzero(&(new_timerv[kperf_timerc]),
		(count - old_count) * sizeof(struct kperf_timer));

	/* (re-)setup the timer call info for all entries */
	for (unsigned int i = 0; i < count; i++) {
		timer_call_setup(&(new_timerv[i].tcall), kperf_timer_handler, &(new_timerv[i]));
	}

	kperf_timerv = new_timerv;
	kperf_timerc = count;

	if (old_timerv != NULL) {
		kfree(old_timerv, old_count * sizeof(struct kperf_timer));
	}

	return 0;
}
Beispiel #13
0
static void
setup_timer_call( struct time_trigger *trigger )
{
	timer_call_setup( &trigger->tcall, kperf_timer_handler, trigger );
}