Beispiel #1
0
void
flowadv_init(void)
{
	STAILQ_INIT(&fadv_list);

	/* Setup lock group and attribute for fadv_lock */
	fadv_lock_grp_attr = lck_grp_attr_alloc_init();
	fadv_lock_grp = lck_grp_alloc_init("fadv_lock", fadv_lock_grp_attr);
	lck_mtx_init(&fadv_lock, fadv_lock_grp, NULL);

	fadv_zone_size = P2ROUNDUP(sizeof (struct flowadv_fcentry),
	    sizeof (u_int64_t));
	fadv_zone = zinit(fadv_zone_size,
	    FADV_ZONE_MAX * fadv_zone_size, 0, FADV_ZONE_NAME);
	if (fadv_zone == NULL) {
		panic("%s: failed allocating %s", __func__, FADV_ZONE_NAME);
		/* NOTREACHED */
	}
	zone_change(fadv_zone, Z_EXPAND, TRUE);
	zone_change(fadv_zone, Z_CALLERACCT, FALSE);

	if (kernel_thread_start(flowadv_thread_func, NULL, &fadv_thread) !=
	    KERN_SUCCESS) {
		panic("%s: couldn't create flow event advisory thread",
		    __func__);
		/* NOTREACHED */
	}
	thread_deallocate(fadv_thread);
}
void BrcmPatchRAM::processWorkQueue(IOInterruptEventSource*, int)
{
    IOLockLock(mWorkLock);

    // start firmware loading process in a non-workloop thread
    if (mWorkPending & kWorkLoadFirmware)
    {
        DebugLog("_workPending kWorkLoadFirmare\n");
        mWorkPending &= ~kWorkLoadFirmware;
        retain();
        kern_return_t result = kernel_thread_start(&BrcmPatchRAM::uploadFirmwareThread, this, &mWorker);
        if (KERN_SUCCESS == result)
            DebugLog("Success creating firmware uploader thread\n");
        else
        {
            AlwaysLog("ERROR creating firmware uploader thread.\n");
            release();
        }
    }

    // firmware loading thread is finished
    if (mWorkPending & kWorkFinished)
    {
        DebugLog("_workPending kWorkFinished\n");
        mWorkPending &= ~kWorkFinished;
        thread_deallocate(mWorker);
        mWorker = 0;
        release();  // matching retain when thread created successfully
    }

    IOLockUnlock(mWorkLock);
}
/*
 *	Routine:	semaphore_signal_thread_trap
 *
 *	Trap interface to the semaphore_signal_thread function.
 */
kern_return_t
semaphore_signal_thread_trap(
	struct semaphore_signal_thread_trap_args *args)
{
	mach_port_name_t sema_name = args->signal_name;
	mach_port_name_t thread_name = args->thread_name;
	semaphore_t	semaphore;
	thread_t	thread;
	kern_return_t	kr;

	/* 
	 * MACH_PORT_NULL is not an error. It means that we want to
	 * select any one thread that is already waiting, but not to
	 * pre-post the semaphore.
	 */
	if (thread_name != MACH_PORT_NULL) {
		thread = port_name_to_thread(thread_name);
		if (thread == THREAD_NULL)
			return KERN_INVALID_ARGUMENT;
	} else
		thread = THREAD_NULL;

	kr = port_name_to_semaphore(sema_name, &semaphore);
	if (kr == KERN_SUCCESS) {
		kr = semaphore_signal_internal(semaphore,
				thread,
				SEMAPHORE_OPTION_NONE);
		semaphore_dereference(semaphore);
	}
	if (thread != THREAD_NULL) {
		thread_deallocate(thread);
	}
	return kr;
}
Beispiel #4
0
/*
 *	thread_call_daemon:
 */
static void
thread_call_daemon_continue(
	thread_call_group_t		group)
{
	kern_return_t	result;
	thread_t		thread;

    (void) splsched();
    thread_call_lock_spin();
        
	while (group->active_count == 0	&& group->pending_count > 0) {
		group->active_count++;

		thread_call_unlock();
		(void) spllo();
	
		result = kernel_thread_start_priority((thread_continue_t)thread_call_thread, group, BASEPRI_PREEMPT, &thread);
		if (result != KERN_SUCCESS)
			panic("thread_call_daemon");

		thread_deallocate(thread);

		(void) splsched();
		thread_call_lock_spin();
    }

    thread_call_daemon_awake = FALSE;
    wait_queue_assert_wait(&group->daemon_wqueue, NO_EVENT, THREAD_UNINT, 0);
    
    thread_call_unlock();
	(void) spllo();
    
	thread_block_parameter((thread_continue_t)thread_call_daemon_continue, group);
	/* NOTREACHED */
}
Beispiel #5
0
/* virtual */ void IOWorkLoop::threadMain()
{
restartThread:
    do {
	if ( !runEventSources() )
	    goto exitThread;

	IOInterruptState is = IOSimpleLockLockDisableInterrupt(workToDoLock);
        if ( !ISSETP(&fFlags, kLoopTerminate) && !workToDo) {
	    assert_wait((void *) &workToDo, false);
	    IOSimpleLockUnlockEnableInterrupt(workToDoLock, is);
	    thread_continue_t cptr = NULL;
	    if (!reserved || !(kPreciousStack & reserved->options))
		cptr = OSMemberFunctionCast(
			thread_continue_t, this, &IOWorkLoop::threadMain);
	    thread_block_parameter(cptr, this);
	    goto restartThread;
	    /* NOTREACHED */
	}

	// At this point we either have work to do or we need
	// to commit suicide.  But no matter 
	// Clear the simple lock and retore the interrupt state
	IOSimpleLockUnlockEnableInterrupt(workToDoLock, is);
    } while(workToDo);

exitThread:
	thread_t thread = workThread;
    workThread = 0;	// Say we don't have a loop and free ourselves
    free();

	thread_deallocate(thread);
    (void) thread_terminate(thread);
}
Beispiel #6
0
static thread_t thr_new(thr_callback_t f, void *p, int a) {
	thread_t thr;
	if (KERN_SUCCESS != (kernel_thread_start(f, p, &thr)))
		return NULL;
	thread_deallocate(thr);
	
	return thr;
}
Beispiel #7
0
inline void myExitThread(THREAD_T *thread)
{
	THREAD_T	thread_ptr = *thread;
	
	*thread = NULL;
	
	thread_deallocate(thread_ptr);
	thread_terminate(thread_ptr);
}
Beispiel #8
0
/*
 *	thread_call_initialize:
 *
 *	Initialize this module, called
 *	early during system initialization.
 */
void
thread_call_initialize(void)
{
	thread_call_t			call;
	thread_call_group_t		group = &thread_call_group0;
	kern_return_t			result;
	thread_t				thread;
	int						i;
	spl_t					s;

	i = sizeof (thread_call_data_t);
	thread_call_zone = zinit(i, 4096 * i, 16 * i, "thread_call");
	zone_change(thread_call_zone, Z_CALLERACCT, FALSE);
	zone_change(thread_call_zone, Z_NOENCRYPT, TRUE);

	lck_attr_setdefault(&thread_call_lck_attr);
	lck_grp_attr_setdefault(&thread_call_lck_grp_attr);
	lck_grp_init(&thread_call_queues_lck_grp, "thread_call_queues", &thread_call_lck_grp_attr);
	lck_grp_init(&thread_call_lck_grp, "thread_call", &thread_call_lck_grp_attr);

#if defined(__i386__) || defined(__x86_64__)
        lck_mtx_init(&thread_call_lock_data, &thread_call_lck_grp, &thread_call_lck_attr);
#else
        lck_spin_init(&thread_call_lock_data, &thread_call_lck_grp, &thread_call_lck_attr);
#endif
	queue_init(&group->pending_queue);
	queue_init(&group->delayed_queue);

	s = splsched();
	thread_call_lock_spin();

	timer_call_setup(&group->delayed_timer, thread_call_delayed_timer, group);

	wait_queue_init(&group->idle_wqueue, SYNC_POLICY_FIFO);
	wait_queue_init(&group->daemon_wqueue, SYNC_POLICY_FIFO);

	queue_init(&thread_call_internal_queue);
	for (
	    	call = internal_call_storage;
			call < &internal_call_storage[internal_call_count];
			call++) {

		enqueue_tail(&thread_call_internal_queue, qe(call));
	}

	thread_call_daemon_awake = TRUE;

	thread_call_unlock();
	splx(s);

	result = kernel_thread_start_priority((thread_continue_t)thread_call_daemon, group, BASEPRI_PREEMPT + 1, &thread);
	if (result != KERN_SUCCESS)
		panic("thread_call_initialize");

	thread_deallocate(thread);
}
Beispiel #9
0
/*
 *	thread_terminate_daemon:
 *
 *	Perform final clean up for terminating threads.
 */
static void
thread_terminate_daemon(void)
{
	thread_t			thread;
	task_t				task;

	(void)splsched();
	simple_lock(&thread_terminate_lock);

	while ((thread = (thread_t)dequeue_head(&thread_terminate_queue)) != THREAD_NULL) {
		simple_unlock(&thread_terminate_lock);
		(void)spllo();

		task = thread->task;

		task_lock(task);
		task->total_user_time += timer_grab(&thread->user_timer);
		task->total_system_time += timer_grab(&thread->system_timer);

		task->c_switch += thread->c_switch;
		task->p_switch += thread->p_switch;
		task->ps_switch += thread->ps_switch;

		queue_remove(&task->threads, thread, thread_t, task_threads);
		task->thread_count--;

		/* 
		 * If the task is being halted, and there is only one thread
		 * left in the task after this one, then wakeup that thread.
		 */
		if (task->thread_count == 1 && task->halting)
			thread_wakeup((event_t)&task->halting);

		task_unlock(task);

		lck_mtx_lock(&tasks_threads_lock);
		queue_remove(&threads, thread, thread_t, threads);
		threads_count--;
		lck_mtx_unlock(&tasks_threads_lock);

		thread_deallocate(thread);

		(void)splsched();
		simple_lock(&thread_terminate_lock);
	}

	assert_wait((event_t)&thread_terminate_queue, THREAD_UNINT);
	simple_unlock(&thread_terminate_lock);
	/* splsched */

	thread_block((thread_continue_t)thread_terminate_daemon);
	/*NOTREACHED*/
}
Beispiel #10
0
/*
 *	thread_call_initialize:
 *
 *	Initialize this module, called
 *	early during system initialization.
 */
void
thread_call_initialize(void)
{
	thread_call_t			call;
	kern_return_t			result;
	thread_t			thread;
	int				i;

	i = sizeof (thread_call_data_t);
	thread_call_zone = zinit(i, 4096 * i, 16 * i, "thread_call");
	zone_change(thread_call_zone, Z_CALLERACCT, FALSE);
	zone_change(thread_call_zone, Z_NOENCRYPT, TRUE);

	lck_attr_setdefault(&thread_call_lck_attr);
	lck_grp_attr_setdefault(&thread_call_lck_grp_attr);
	lck_grp_init(&thread_call_queues_lck_grp, "thread_call_queues", &thread_call_lck_grp_attr);
	lck_grp_init(&thread_call_lck_grp, "thread_call", &thread_call_lck_grp_attr);

#if defined(__i386__) || defined(__x86_64__)
        lck_mtx_init(&thread_call_lock_data, &thread_call_lck_grp, &thread_call_lck_attr);
#else
        lck_spin_init(&thread_call_lock_data, &thread_call_lck_grp, &thread_call_lck_attr);
#endif

	nanotime_to_absolutetime(0, THREAD_CALL_DEALLOC_INTERVAL_NS, &thread_call_dealloc_interval_abs);
	wait_queue_init(&daemon_wqueue, SYNC_POLICY_FIFO);

	thread_call_group_setup(&thread_call_groups[THREAD_CALL_PRIORITY_LOW], THREAD_CALL_PRIORITY_LOW, 0, TRUE);
	thread_call_group_setup(&thread_call_groups[THREAD_CALL_PRIORITY_USER], THREAD_CALL_PRIORITY_USER, 0, TRUE);
	thread_call_group_setup(&thread_call_groups[THREAD_CALL_PRIORITY_KERNEL], THREAD_CALL_PRIORITY_KERNEL, 1, TRUE);
	thread_call_group_setup(&thread_call_groups[THREAD_CALL_PRIORITY_HIGH], THREAD_CALL_PRIORITY_HIGH, THREAD_CALL_THREAD_MIN, FALSE);

	disable_ints_and_lock();

	queue_init(&thread_call_internal_queue);
	for (
			call = internal_call_storage;
			call < &internal_call_storage[INTERNAL_CALL_COUNT];
			call++) {

		enqueue_tail(&thread_call_internal_queue, qe(call));
	}

	thread_call_daemon_awake = TRUE;

	enable_ints_and_unlock();

	result = kernel_thread_start_priority((thread_continue_t)thread_call_daemon, NULL, BASEPRI_PREEMPT + 1, &thread);
	if (result != KERN_SUCCESS)
		panic("thread_call_initialize");

	thread_deallocate(thread);
}
Beispiel #11
0
IOThread IOCreateThread(IOThreadFunc fcn, void *arg)
{
	kern_return_t	result;
	thread_t		thread;

	result = kernel_thread_start((thread_continue_t)fcn, arg, &thread);
	if (result != KERN_SUCCESS)
		return (NULL);

	thread_deallocate(thread);

	return (thread);
}
Beispiel #12
0
/* -----------------------------------------------------------------------------
Called when we need to add the L2TP protocol to the domain
Typically, ppp_add is called by ppp_domain when we add the domain,
but we can add the protocol anytime later, if the domain is present
----------------------------------------------------------------------------- */
int l2tp_add(struct domain *domain)
{
    int 	 err;
    thread_t l2tp_timer_thread = NULL;

    bzero(&l2tp_usr, sizeof(struct pr_usrreqs));
    l2tp_usr.pru_abort 		= pru_abort_notsupp;
    l2tp_usr.pru_accept 	= pru_accept_notsupp;
    l2tp_usr.pru_attach 	= l2tp_attach;
    l2tp_usr.pru_bind 		= pru_bind_notsupp;
    l2tp_usr.pru_connect 	= pru_connect_notsupp;
    l2tp_usr.pru_connect2 	= pru_connect2_notsupp;
    l2tp_usr.pru_control 	= l2tp_control;
    l2tp_usr.pru_detach 	= l2tp_detach;
    l2tp_usr.pru_disconnect	= pru_disconnect_notsupp;
    l2tp_usr.pru_listen 	= pru_listen_notsupp;
    l2tp_usr.pru_peeraddr 	= pru_peeraddr_notsupp;
    l2tp_usr.pru_rcvd 		= pru_rcvd_notsupp;
    l2tp_usr.pru_rcvoob 	= pru_rcvoob_notsupp;
    l2tp_usr.pru_send 		= (int	(*)(struct socket *, int, struct mbuf *, 
				 struct sockaddr *, struct mbuf *, struct proc *))l2tp_send;
    l2tp_usr.pru_sense 		= pru_sense_null;
    l2tp_usr.pru_shutdown 	= pru_shutdown_notsupp;
    l2tp_usr.pru_sockaddr 	= pru_sockaddr_notsupp;
    l2tp_usr.pru_sosend 	= sosend;
    l2tp_usr.pru_soreceive 	= soreceive;
    l2tp_usr.pru_sopoll 	= pru_sopoll_notsupp;


    bzero(&l2tp, sizeof(struct protosw));
    l2tp.pr_type		= SOCK_DGRAM;
    l2tp.pr_domain		= domain;
    l2tp.pr_protocol 	= PPPPROTO_L2TP;
    l2tp.pr_flags		= PR_ATOMIC | PR_ADDR | PR_PROTOLOCK;
    l2tp.pr_ctloutput 	= l2tp_ctloutput;
    l2tp.pr_init		= l2tp_init;

    l2tp.pr_usrreqs 	= &l2tp_usr;

    /* Start timer thread */
    l2tp_timer_thread_is_dying = 0;
    if (kernel_thread_start((thread_continue_t)l2tp_timer, NULL, &l2tp_timer_thread) == KERN_SUCCESS) {
        thread_deallocate(l2tp_timer_thread);
    }
    
    err = net_add_proto(&l2tp, domain);
    if (err)
        return err;

    return KERN_SUCCESS;
}
Beispiel #13
0
void
ux_handler_init(void)
{
	thread_t	thread = THREAD_NULL;

	ux_exception_port = MACH_PORT_NULL;
	(void) kernel_thread_start((thread_continue_t)ux_handler, NULL, &thread);
	thread_deallocate(thread);
	proc_list_lock();
	if (ux_exception_port == MACH_PORT_NULL)  {
		(void)msleep(&ux_exception_port, proc_list_mlock, 0, "ux_handler_wait", 0);
	}
	proc_list_unlock();
}
DECLHIDDEN(int) rtThreadNativeCreate(PRTTHREADINT pThreadInt, PRTNATIVETHREAD pNativeThread)
{
    RT_ASSERT_PREEMPTIBLE();

    thread_t NativeThread;
    kern_return_t kr = kernel_thread_start(rtThreadNativeMain, pThreadInt, &NativeThread);
    if (kr == KERN_SUCCESS)
    {
        *pNativeThread = (RTNATIVETHREAD)NativeThread;
        thread_deallocate(NativeThread);
        return VINF_SUCCESS;
    }
    return RTErrConvertFromMachKernReturn(kr);
}
Beispiel #15
0
void
thread_daemon_init(void)
{
	kern_return_t	result;
	thread_t	thread = NULL;

	simple_lock_init(&thread_terminate_lock, 0);
	queue_init(&thread_terminate_queue);

	result = kernel_thread_start_priority((thread_continue_t)thread_terminate_daemon, NULL, MINPRI_KERNEL, &thread);
	if (result != KERN_SUCCESS)
		panic("thread_daemon_init: thread_terminate_daemon");

	thread_deallocate(thread);

	simple_lock_init(&thread_stack_lock, 0);
	queue_init(&thread_stack_queue);

	result = kernel_thread_start_priority((thread_continue_t)thread_stack_daemon, NULL, BASEPRI_PREEMPT, &thread);
	if (result != KERN_SUCCESS)
		panic("thread_daemon_init: thread_stack_daemon");

	thread_deallocate(thread);
}
Beispiel #16
0
void
serial_keyboard_init(void)
{
	kern_return_t	result;
	thread_t		thread;

	if(!(serialmode & SERIALMODE_INPUT)) /* Leave if we do not want a serial console */
		return;

	kprintf("Serial keyboard started\n");
	result = kernel_thread_start_priority((thread_continue_t)serial_keyboard_start, NULL, MAXPRI_KERNEL, &thread);
	if (result != KERN_SUCCESS)
		panic("serial_keyboard_init");

	thread_deallocate(thread);
}
Beispiel #17
0
/*
 * fork
 *
 * Description:	fork system call.
 *
 * Parameters:	parent			Parent process to fork
 *		uap (void)		[unused]
 *		retval			Return value
 *
 * Returns:	0			Success
 *		EAGAIN			Resource unavailable, try again
 *
 * Notes:	Attempts to create a new child process which inherits state
 *		from the parent process.  If successful, the call returns
 *		having created an initially suspended child process with an
 *		extra Mach task and thread reference, for which the thread
 *		is initially suspended.  Until we resume the child process,
 *		it is not yet running.
 *
 *		The return information to the child is contained in the
 *		thread state structure of the new child, and does not
 *		become visible to the child through a normal return process,
 *		since it never made the call into the kernel itself in the
 *		first place.
 *
 *		After resuming the thread, this function returns directly to
 *		the parent process which invoked the fork() system call.
 *
 * Important:	The child thread_resume occurs before the parent returns;
 *		depending on scheduling latency, this means that it is not
 *		deterministic as to whether the parent or child is scheduled
 *		to run first.  It is entirely possible that the child could
 *		run to completion prior to the parent running.
 */
int
fork(proc_t parent_proc, __unused struct fork_args *uap, int32_t *retval)
{
	thread_t child_thread;
	int err;

	retval[1] = 0;		/* flag parent return for user space */

	if ((err = fork1(parent_proc, &child_thread, PROC_CREATE_FORK, NULL)) == 0) {
		task_t child_task;
		proc_t child_proc;

		/* Return to the parent */
		child_proc = (proc_t)get_bsdthreadtask_info(child_thread);
		retval[0] = child_proc->p_pid;

		/*
		 * Drop the signal lock on the child which was taken on our
		 * behalf by forkproc()/cloneproc() to prevent signals being
		 * received by the child in a partially constructed state.
		 */
		proc_signalend(child_proc, 0);
		proc_transend(child_proc, 0);

		/* flag the fork has occurred */
		proc_knote(parent_proc, NOTE_FORK | child_proc->p_pid);
		DTRACE_PROC1(create, proc_t, child_proc);

#if CONFIG_DTRACE
		if ((dtrace_proc_waitfor_hook = dtrace_proc_waitfor_exec_ptr) != NULL)
			(*dtrace_proc_waitfor_hook)(child_proc);
#endif

		/* "Return" to the child */
		proc_clear_return_wait(child_proc, child_thread);

		/* drop the extra references we got during the creation */
		if ((child_task = (task_t)get_threadtask(child_thread)) != NULL) {
			task_deallocate(child_task);
		}
		thread_deallocate(child_thread);
	}

	return(err);
}
Beispiel #18
0
/*
 *	thread_terminate_daemon:
 *
 *	Perform final clean up for terminating threads.
 */
static void
thread_terminate_daemon(void)
{
	thread_t			thread;
	task_t				task;

	(void)splsched();
	simple_lock(&thread_terminate_lock);

	while ((thread = (thread_t)dequeue_head(&thread_terminate_queue)) != THREAD_NULL) {
		simple_unlock(&thread_terminate_lock);
		(void)spllo();

		task = thread->task;

		task_lock(task);
		task->total_user_time += timer_grab(&thread->user_timer);
		task->total_system_time += timer_grab(&thread->system_timer);

		task->c_switch += thread->c_switch;
		task->p_switch += thread->p_switch;
		task->ps_switch += thread->ps_switch;

		queue_remove(&task->threads, thread, thread_t, task_threads);
		task->thread_count--;
		task_unlock(task);

		mutex_lock(&tasks_threads_lock);
		queue_remove(&threads, thread, thread_t, threads);
		threads_count--;
		mutex_unlock(&tasks_threads_lock);

		thread_deallocate(thread);

		(void)splsched();
		simple_lock(&thread_terminate_lock);
	}

	assert_wait((event_t)&thread_terminate_queue, THREAD_UNINT);
	simple_unlock(&thread_terminate_lock);
	/* splsched */

	thread_block((thread_continue_t)thread_terminate_daemon);
	/*NOTREACHED*/
}
Beispiel #19
0
static kern_return_t
thread_create_running_internal2(
	register task_t         task,
	int                     flavor,
	thread_state_t          new_state,
	mach_msg_type_number_t  new_state_count,
	thread_t				*new_thread,
	boolean_t				from_user)
{
	register kern_return_t  result;
	thread_t				thread;

	if (task == TASK_NULL || task == kernel_task)
		return (KERN_INVALID_ARGUMENT);

	result = thread_create_internal(task, -1, (thread_continue_t)thread_bootstrap_return, TH_OPTION_NONE, &thread);
	if (result != KERN_SUCCESS)
		return (result);

	result = machine_thread_set_state(
						thread, flavor, new_state, new_state_count);
	if (result != KERN_SUCCESS) {
		task_unlock(task);
		lck_mtx_unlock(&tasks_threads_lock);

		thread_terminate(thread);
		thread_deallocate(thread);
		return (result);
	}

	thread_mtx_lock(thread);
	thread_start_internal(thread);
	thread_mtx_unlock(thread);

	if (from_user)
		extmod_statistics_incr_thread_create(task);

	task_unlock(task);
	lck_mtx_unlock(&tasks_threads_lock);

	*new_thread = thread;

	return (result);
}
Beispiel #20
0
/* memory pressure monitor thread */
static void
hv_mp_notify(void) {
	while (1) {
		mach_vm_pressure_monitor(TRUE, 0, NULL, NULL);

		lck_mtx_lock(hv_support_lck_mtx);
		if (hv_mp_notify_destroy == 1) {
			hv_mp_notify_destroy = 0;
			hv_mp_notify_enabled = 0;
			lck_mtx_unlock(hv_support_lck_mtx);
			break;
		} else {
			hv_callbacks.memory_pressure(NULL);
		}
		lck_mtx_unlock(hv_support_lck_mtx);
	}

	thread_deallocate(current_thread());
}
Beispiel #21
0
thread_t
kernel_thread(
	task_t			task,
	void			(*start)(void))
{
	kern_return_t	result;
	thread_t		thread;

	if (task != kernel_task)
		panic("kernel_thread");

	result = kernel_thread_start_priority((thread_continue_t)start, NULL, -1, &thread);
	if (result != KERN_SUCCESS)
		return (THREAD_NULL);

	thread_deallocate(thread);

	return (thread);
}
Beispiel #22
0
ipc_port_t
convert_thread_to_port(
	thread_t		thread)
{
	ipc_port_t		port;

	thread_mtx_lock(thread);

	if (thread->ith_self != IP_NULL)
		port = ipc_port_make_send(thread->ith_self);
	else
		port = IP_NULL;

	thread_mtx_unlock(thread);

	thread_deallocate(thread);

	return (port);
}
Beispiel #23
0
__private_extern__ kern_return_t
chudxnu_free_thread_list(
	thread_array_t	*thread_list,
	mach_msg_type_number_t	*count)
{
	vm_size_t size = (*count)*sizeof(mach_port_t);
	void *addr = *thread_list;

	if(addr) {
		int i, maxCount = *count;
		for(i=0; i<maxCount; i++) {
			thread_deallocate((*thread_list)[i]);
		}		
		kfree(addr, size);
		*thread_list = NULL;
		*count = 0;
		return KERN_SUCCESS;
	} else {
		return KERN_FAILURE;
	}
}
// IOFireWireIRMAllocation::handleBusReset
//
//
void IOFireWireIRMAllocation::handleBusReset(UInt32 generation)
{
	// Take the lock
	IORecursiveLockLock(fLock);

	if (!isAllocated)
	{
		IORecursiveLockUnlock(fLock);
		return;
	}
	
	if (fAllocationGeneration == generation)
	{
		IORecursiveLockUnlock(fLock);
		return;
	}
	
	// Spawn a thread to do the reallocation
	IRMAllocationThreadInfo * threadInfo = (IRMAllocationThreadInfo *)IOMalloc( sizeof(IRMAllocationThreadInfo) );
	if( threadInfo ) 
	{
		threadInfo->fGeneration = generation;
		threadInfo->fIRMAllocation = this;
		threadInfo->fControl = fControl;
		threadInfo->fLock = fLock;
		threadInfo->fIsochChannel = fIsochChannel; 
		threadInfo->fBandwidthUnits = fBandwidthUnits;

		retain();	// retain ourself for the thread to use

		thread_t		thread;
		if( kernel_thread_start((thread_continue_t)threadFunc, threadInfo, &thread ) == KERN_SUCCESS )
		{
			thread_deallocate(thread);
		}
	}
	
	// Unlock the lock
	IORecursiveLockUnlock(fLock);
}
Beispiel #25
0
void
fuse_sysctl_stop(void)
{
    int i;

    for (i = 0; fuse_sysctl_list[i]; i++) {
       sysctl_unregister_oid(fuse_sysctl_list[i]);
    }
    sysctl_unregister_oid(&sysctl__osxfuse);

#if OSXFUSE_ENABLE_MACFUSE_MODE
    lck_mtx_lock(osxfuse_sysctl_lock);

    thread_deallocate(osxfuse_sysctl_macfuse_thread);
    if (fuse_macfuse_mode) {
        fuse_sysctl_macfuse_stop();
    }

    lck_mtx_unlock(osxfuse_sysctl_lock);

    lck_mtx_free(osxfuse_sysctl_lock, osxfuse_lock_group);
    lck_grp_free(osxfuse_lock_group);
#endif /* OSXFUSE_ENABLE_MACFUSE_MODE */
}
Beispiel #26
0
/*
 * Simple wrapper for creating threads bound to 
 * thread call groups.
 */
static kern_return_t
thread_call_thread_create(
		thread_call_group_t             group)
{
	thread_t thread;
	kern_return_t result;

	result = kernel_thread_start_priority((thread_continue_t)thread_call_thread, group, group->pri, &thread);
	if (result != KERN_SUCCESS) {
		return result;
	}

	if (group->pri < BASEPRI_PREEMPT) {
		/*
		 * New style doesn't get to run to completion in 
		 * kernel if there are higher priority threads 
		 * available.
		 */
		thread_set_eager_preempt(thread);
	}

	thread_deallocate(thread);
	return KERN_SUCCESS;
}
int
ptrace(struct proc *p, struct ptrace_args *uap, register_t *retval)
{
	struct proc *t = current_proc();	/* target process */
	task_t		task;
	thread_t	th_act;
	struct uthread 	*ut;
	int tr_sigexc = 0;
	int error = 0;
	int stopped = 0;

	AUDIT_ARG(cmd, uap->req);
	AUDIT_ARG(pid, uap->pid);
	AUDIT_ARG(addr, uap->addr);
	AUDIT_ARG(value, uap->data);

	if (uap->req == PT_DENY_ATTACH) {
		proc_lock(p);
		if (ISSET(p->p_lflag, P_LTRACED)) {
			proc_unlock(p);
			exit1(p, W_EXITCODE(ENOTSUP, 0), retval);
			/* drop funnel before we return */
			thread_exception_return();
			/* NOTREACHED */
		}
		SET(p->p_lflag, P_LNOATTACH);
		proc_unlock(p);

		return(0);
	}

	if (uap->req == PT_FORCEQUOTA) {
		if (is_suser()) {
			OSBitOrAtomic(P_FORCEQUOTA, (UInt32 *)&t->p_flag);
			return (0);
		} else
			return (EPERM);
	}

	/*
	 *	Intercept and deal with "please trace me" request.
	 */	 
	if (uap->req == PT_TRACE_ME) {
		proc_lock(p);
		SET(p->p_lflag, P_LTRACED);
		/* Non-attached case, our tracer is our parent. */
		p->p_oppid = p->p_ppid;
		proc_unlock(p);
		return(0);
	}
	if (uap->req == PT_SIGEXC) {
		proc_lock(p);
		if (ISSET(p->p_lflag, P_LTRACED)) {
			SET(p->p_lflag, P_LSIGEXC);
			proc_unlock(p);
			return(0);
		} else {
			proc_unlock(p);
			return(EINVAL);
		}
	}

	/* 
	 * We do not want ptrace to do anything with kernel or launchd 
	 */
	if (uap->pid < 2) {
		return(EPERM);
	}

	/*
	 *	Locate victim, and make sure it is traceable.
	 */
	if ((t = proc_find(uap->pid)) == NULL)
			return (ESRCH);

	AUDIT_ARG(process, t);

	task = t->task;
	if (uap->req == PT_ATTACHEXC) {
		uap->req = PT_ATTACH;
		tr_sigexc = 1;
	}
	if (uap->req == PT_ATTACH) {
		int		err;
		
		if ( kauth_authorize_process(proc_ucred(p), KAUTH_PROCESS_CANTRACE, 
									 t, (uintptr_t)&err, 0, 0) == 0 ) {
			/* it's OK to attach */
			proc_lock(t);
			SET(t->p_lflag, P_LTRACED);
			if (tr_sigexc) 
				SET(t->p_lflag, P_LSIGEXC);
	
			t->p_oppid = t->p_ppid;
			proc_unlock(t);
			if (t->p_pptr != p)
				proc_reparentlocked(t, p, 1, 0);
	
			proc_lock(t);
			if (get_task_userstop(task) > 0 ) {
				stopped = 1;
			}
			t->p_xstat = 0;
			proc_unlock(t);
			psignal(t, SIGSTOP);
			/*
			 * If the process was stopped, wake up and run through
			 * issignal() again to properly connect to the tracing
			 * process.
			 */
			if (stopped)
				task_resume(task);       
			error = 0;
			goto out;
		}
		else {
			/* not allowed to attach, proper error code returned by kauth_authorize_process */
			if (ISSET(t->p_lflag, P_LNOATTACH)) {
				psignal(p, SIGSEGV);
			}
			
			error = err;
			goto out;
		}
	}

	/*
	 * You can't do what you want to the process if:
	 *	(1) It's not being traced at all,
	 */
	proc_lock(t);
	if (!ISSET(t->p_lflag, P_LTRACED)) {
		proc_unlock(t);
		error = EPERM;
		goto out;
	}

	/*
	 *	(2) it's not being traced by _you_, or
	 */
	if (t->p_pptr != p) {
		proc_unlock(t);
		error = EBUSY;
		goto out;
	}

	/*
	 *	(3) it's not currently stopped.
	 */
	if (t->p_stat != SSTOP) {
		proc_unlock(t);
		error = EBUSY;
		goto out;
	}

	/*
	 *	Mach version of ptrace executes request directly here,
	 *	thus simplifying the interaction of ptrace and signals.
	 */
	/* proc lock is held here */
	switch (uap->req) {

	case PT_DETACH:
		if (t->p_oppid != t->p_ppid) {
			struct proc *pp;

			proc_unlock(t);
			pp = proc_find(t->p_oppid);
			proc_reparentlocked(t, pp ? pp : initproc, 1, 0);
			if (pp != PROC_NULL)
				proc_rele(pp);
			proc_lock(t);
			
		}

		t->p_oppid = 0;
		CLR(t->p_lflag, P_LTRACED);
		CLR(t->p_lflag, P_LSIGEXC);
		proc_unlock(t);
		goto resume;
		
	case PT_KILL:
		/*
		 *	Tell child process to kill itself after it
		 *	is resumed by adding NSIG to p_cursig. [see issig]
		 */
		proc_unlock(t);
		psignal(t, SIGKILL);
		goto resume;

	case PT_STEP:			/* single step the child */
	case PT_CONTINUE:		/* continue the child */
		proc_unlock(t);
		th_act = (thread_t)get_firstthread(task);
		if (th_act == THREAD_NULL) {
			error = EINVAL;
			goto out;
		}

		if (uap->addr != (user_addr_t)1) {
#if defined(ppc)
#define ALIGNED(addr,size)	(((unsigned)(addr)&((size)-1))==0)
			if (!ALIGNED((int)uap->addr, sizeof(int)))
				return (ERESTART);
#undef 	ALIGNED
#endif
			thread_setentrypoint(th_act, uap->addr);
		}

		if ((unsigned)uap->data >= NSIG) {
			error = EINVAL;
			goto out;
		}

		if (uap->data != 0) {
			psignal(t, uap->data);
                }

		if (uap->req == PT_STEP) {
		        /*
			 * set trace bit
			 */
			if (thread_setsinglestep(th_act, 1) != KERN_SUCCESS) {
				error = ENOTSUP;
				goto out;
			}
		} else {
		        /*
			 * clear trace bit if on
			 */
			if (thread_setsinglestep(th_act, 0) != KERN_SUCCESS) {
				error = ENOTSUP;
				goto out;
			}
		}	
	resume:
		proc_lock(t);
		t->p_xstat = uap->data;
		t->p_stat = SRUN;
		if (t->sigwait) {
			wakeup((caddr_t)&(t->sigwait));
			proc_unlock(t);
			if ((t->p_lflag & P_LSIGEXC) == 0) {
				task_resume(task);
			}
		} else
			proc_unlock(t);
			
		break;
		
	case PT_THUPDATE:  {
		proc_unlock(t);
		if ((unsigned)uap->data >= NSIG) {
			error = EINVAL;
			goto out;
		}
		th_act = port_name_to_thread(CAST_DOWN(mach_port_name_t, uap->addr));
		if (th_act == THREAD_NULL)
			return (ESRCH);
		ut = (uthread_t)get_bsdthread_info(th_act);
		if (uap->data)
			ut->uu_siglist |= sigmask(uap->data);
		proc_lock(t);
		t->p_xstat = uap->data;
		t->p_stat = SRUN;
		proc_unlock(t);
		thread_deallocate(th_act);
		error = 0;
		}
		break;
	default:
		proc_unlock(t);
		error = EINVAL;
		goto out;
	}

	error = 0;
out:
	proc_rele(t);
	return(error);
}
Beispiel #28
0
void
kernel_bootstrap(void)
{
    kern_return_t	result;
    thread_t	thread;
    char		namep[16];

    printf("%s\n", version); /* log kernel version */

#define kernel_bootstrap_kprintf(x...) /* kprintf("kernel_bootstrap: " x) */

    if (PE_parse_boot_argn("-l", namep, sizeof (namep))) /* leaks logging */
        turn_on_log_leaks = 1;

    PE_parse_boot_argn("trace", &new_nkdbufs, sizeof (new_nkdbufs));

    /* i386_vm_init already checks for this ; do it aagin anyway */
    if (PE_parse_boot_argn("serverperfmode", &serverperfmode, sizeof (serverperfmode))) {
        serverperfmode = 1;
    }
    scale_setup();

    kernel_bootstrap_kprintf("calling vm_mem_bootstrap\n");
    vm_mem_bootstrap();

    kernel_bootstrap_kprintf("calling vm_mem_init\n");
    vm_mem_init();

    machine_info.memory_size = (uint32_t)mem_size;
    machine_info.max_mem = max_mem;
    machine_info.major_version = version_major;
    machine_info.minor_version = version_minor;

    kernel_bootstrap_kprintf("calling sched_init\n");
    sched_init();

    kernel_bootstrap_kprintf("calling wait_queue_bootstrap\n");
    wait_queue_bootstrap();

    kernel_bootstrap_kprintf("calling ipc_bootstrap\n");
    ipc_bootstrap();

#if CONFIG_MACF
    mac_policy_init();
#endif
    kernel_bootstrap_kprintf("calling ipc_init\n");
    ipc_init();

    /*
     * As soon as the virtual memory system is up, we record
     * that this CPU is using the kernel pmap.
     */
    kernel_bootstrap_kprintf("calling PMAP_ACTIVATE_KERNEL\n");
    PMAP_ACTIVATE_KERNEL(master_cpu);

    kernel_bootstrap_kprintf("calling mapping_free_prime\n");
    mapping_free_prime();						/* Load up with temporary mapping blocks */

    kernel_bootstrap_kprintf("calling machine_init\n");
    machine_init();

    kernel_bootstrap_kprintf("calling clock_init\n");
    clock_init();

    ledger_init();

    /*
     *	Initialize the IPC, task, and thread subsystems.
     */
    kernel_bootstrap_kprintf("calling task_init\n");
    task_init();

    kernel_bootstrap_kprintf("calling thread_init\n");
    thread_init();

    /*
     *	Create a kernel thread to execute the kernel bootstrap.
     */
    kernel_bootstrap_kprintf("calling kernel_thread_create\n");
    result = kernel_thread_create((thread_continue_t)kernel_bootstrap_thread, NULL, MAXPRI_KERNEL, &thread);

    if (result != KERN_SUCCESS) panic("kernel_bootstrap: result = %08X\n", result);

    thread->state = TH_RUN;
    thread_deallocate(thread);

    kernel_bootstrap_kprintf("calling load_context - done\n");
    load_context(thread);
    /*NOTREACHED*/
}
Beispiel #29
0
void
processor_doaction(
	processor_t	processor)
{
	thread_t			this_thread;
	spl_t				s;
	register processor_set_t	pset;
#if	MACH_HOST
	register processor_set_t	new_pset;
	register thread_t		thread;
	register thread_t		prev_thread = THREAD_NULL;
	thread_act_t			thr_act;
	boolean_t			have_pset_ref = FALSE;
#endif	/* MACH_HOST */

	/*
	 *	Get onto the processor to shutdown
	 */
	this_thread = current_thread();
	thread_bind(this_thread, processor);
	thread_block((void (*)(void)) 0);

	pset = processor->processor_set;
#if	MACH_HOST
	/*
	 *	If this is the last processor in the processor_set,
	 *	stop all the threads first.
	 */
	pset_lock(pset);
	if (pset->processor_count == 1) {
		thread = (thread_t) queue_first(&pset->threads);
		prev_thread = THREAD_NULL;
		pset->ref_count++;
		have_pset_ref = TRUE;
		pset->empty = TRUE;

		/*
		 * loop through freezing the processor set assignment
		 * and reference counting the threads;
		 */
		while (!queue_end(&pset->threads, (queue_entry_t) thread)) {
		    thread_reference(thread);
		    pset_unlock(pset);

		    /*
		     * Freeze the thread on the processor set.
		     * If it's moved, just release the reference.
		     * Get the next thread in the processor set list
		     * from the last one which was frozen.
		     */
		    if( thread_stop_freeze(thread, pset) )
		        prev_thread = thread;
		    else
			thread_deallocate(thread);

		    pset_lock(pset);
		    if( prev_thread != THREAD_NULL ) 
		        thread = (thread_t)queue_next(&prev_thread->pset_threads);
		    else
			thread = (thread_t) queue_first(&pset->threads);
		}

		/*
		 * Remove the processor from the set so that when the threads
		 * are unstopped below the ones blocked in the kernel don't
		 * start running again.
		 */
		s = splsched();
		processor_lock(processor);
		pset_remove_processor(pset, processor);

		/*
		 * Prevent race with another processor being added to the set
		 * See code after Restart_pset:
		 *   while(new_pset->empty && new_pset->processor_count > 0)
		 *
		 * ... it tests for the condition where a new processor is
		 * added to the set while the last one is still being removed.
		 */
		pset->processor_count++;	/* block new processors being added */
		assert( pset->processor_count == 1 );

		/*
		 * Release the thread assignment locks, unstop the threads and
		 * release the thread references which were taken above.
		 */
		thread = (thread_t) queue_first(&pset->threads);
		while( !queue_empty( &pset->threads) && (thread != THREAD_NULL) ) {
		    prev_thread = thread;
		    if( queue_end(&pset->threads, (queue_entry_t) thread) )
			thread = THREAD_NULL;
		    else
		        thread = (thread_t) queue_next(&prev_thread->pset_threads);
		    pset_unlock(pset);
		    thread_unfreeze(prev_thread);
		    thread_unstop(prev_thread);
		    thread_deallocate(prev_thread);
		    pset_lock(pset);
		}
		/*
		 * allow a processor to be added to the empty pset
		 */
		pset->processor_count--;
	}
	else { 
		/* not last processor in set */
#endif	/* MACH_HOST */
		/*
		 * At this point, it is ok to rm the processor from the pset.
		 */
		s = splsched();
		processor_lock(processor);
		pset_remove_processor(pset, processor);
#if	MACH_HOST
	}
	pset_unlock(pset);

	/*
	 *	Copy the next pset pointer into a local variable and clear
	 *	it because we are taking over its reference.
	 */
	new_pset = processor->processor_set_next;
	processor->processor_set_next = PROCESSOR_SET_NULL;

	if (processor->state == PROCESSOR_ASSIGN) {

Restart_pset:
	    /*
	     *	Nasty problem: we want to lock the target pset, but
	     *	we have to enable interrupts to do that which requires
	     *  dropping the processor lock.  While the processor
	     *  is unlocked, it could be reassigned or shutdown.
	     */
	    processor_unlock(processor);
	    splx(s);

	    /*
	     *  Lock target pset and handle remove last / assign first race.
	     *	Only happens if there is more than one action thread.
	     */
	    pset_lock(new_pset);
	    while (new_pset->empty && new_pset->processor_count > 0) {
		pset_unlock(new_pset);
		while (*(volatile boolean_t *)&new_pset->empty &&
		       *(volatile int *)&new_pset->processor_count > 0)
			/* spin */;
		pset_lock(new_pset);
	    }

	    /*
	     *	Finally relock the processor and see if something changed.
	     *	The only possibilities are assignment to a different pset
	     *	and shutdown.
	     */
	    s = splsched();
	    processor_lock(processor);

	    if (processor->state == PROCESSOR_SHUTDOWN) {
		pset_unlock(new_pset);
		goto shutdown; /* will release pset reference */
	    }

	    if (processor->processor_set_next != PROCESSOR_SET_NULL) {
		/*
		 *	Processor was reassigned.  Drop the reference
		 *	we have on the wrong new_pset, and get the
		 *	right one.  Involves lots of lock juggling.
		 */
		processor_unlock(processor);
		splx(s);
		pset_unlock(new_pset);
		pset_deallocate(new_pset);
		s = splsched();
	        processor_lock(processor);
		new_pset = processor->processor_set_next;
		processor->processor_set_next = PROCESSOR_SET_NULL;
		goto Restart_pset;
	    }

	    /*
	     *	If the pset has been deactivated since the operation
	     *	was requested, redirect to the default pset.
	     */
	    if (!(new_pset->active)) {
		pset_unlock(new_pset);
		pset_deallocate(new_pset);
		new_pset = &default_pset;
		pset_lock(new_pset);
		new_pset->ref_count++;
	    }

	    /*
	     *	Do assignment, then wakeup anyone waiting for it.
	     *	Finally context switch to have it take effect.
	     */
	    pset_add_processor(new_pset, processor);
	    if (new_pset->empty) {
		/*
		 *	Set all the threads loose
		 */
		thread = (thread_t) queue_first(&new_pset->threads);
		while (!queue_end(&new_pset->threads,(queue_entry_t)thread)) {
		    thr_act = thread_lock_act(thread);
		    thread_release(thread->top_act);
		    act_unlock_thread(thr_act);
		    thread = (thread_t) queue_next(&thread->pset_threads);
		}
		new_pset->empty = FALSE;
	    }
	    processor->processor_set_next = PROCESSOR_SET_NULL;
	    processor->state = PROCESSOR_RUNNING;
	    thread_wakeup((event_t)processor);
	    processor_unlock(processor);
	    splx(s);
	    pset_unlock(new_pset);

	    /*
	     *	Clean up dangling references, and release our binding.
	     */
	    pset_deallocate(new_pset);
	    if (have_pset_ref)
		pset_deallocate(pset);
	    if (prev_thread != THREAD_NULL)
		thread_deallocate(prev_thread);
	    thread_bind(this_thread, PROCESSOR_NULL);

	    thread_block((void (*)(void)) 0);
	    return;
	}

shutdown:
#endif	/* MACH_HOST */
	
	/*
	 *	Do shutdown, make sure we live when processor dies.
	 */
	if (processor->state != PROCESSOR_SHUTDOWN) {
		printf("state: %d\n", processor->state);
	    	panic("action_thread -- bad processor state");
	}
	processor_unlock(processor);
	/*
	 *	Clean up dangling references, and release our binding.
	 */
#if	MACH_HOST
	if (new_pset != PROCESSOR_SET_NULL)
		pset_deallocate(new_pset);
	if (have_pset_ref)
		pset_deallocate(pset);
	if (prev_thread != THREAD_NULL)
		thread_deallocate(prev_thread);
#endif	/* MACH_HOST */

	thread_bind(this_thread, PROCESSOR_NULL);
	switch_to_shutdown_context(this_thread,
				   processor_doshutdown,
				   processor);
	splx(s);
}
Beispiel #30
0
/*
 *	thread_terminate_daemon:
 *
 *	Perform final clean up for terminating threads.
 */
static void
thread_terminate_daemon(void)
{
	thread_t	self, thread;
	task_t		task;

	self = current_thread();
	self->options |= TH_OPT_SYSTEM_CRITICAL;

	(void)splsched();
	simple_lock(&thread_terminate_lock);

	while ((thread = (thread_t)dequeue_head(&thread_terminate_queue)) != THREAD_NULL) {
		simple_unlock(&thread_terminate_lock);
		(void)spllo();

		task = thread->task;

		task_lock(task);
		task->total_user_time += timer_grab(&thread->user_timer);
		if (thread->precise_user_kernel_time) {
			task->total_system_time += timer_grab(&thread->system_timer);
		} else {
			task->total_user_time += timer_grab(&thread->system_timer);
		}

		task->c_switch += thread->c_switch;
		task->p_switch += thread->p_switch;
		task->ps_switch += thread->ps_switch;

		task->syscalls_unix += thread->syscalls_unix;
		task->syscalls_mach += thread->syscalls_mach;

		task->task_timer_wakeups_bin_1 += thread->thread_timer_wakeups_bin_1;
		task->task_timer_wakeups_bin_2 += thread->thread_timer_wakeups_bin_2;
		queue_remove(&task->threads, thread, thread_t, task_threads);
		task->thread_count--;

		/* 
		 * If the task is being halted, and there is only one thread
		 * left in the task after this one, then wakeup that thread.
		 */
		if (task->thread_count == 1 && task->halting)
			thread_wakeup((event_t)&task->halting);

		task_unlock(task);

		lck_mtx_lock(&tasks_threads_lock);
		queue_remove(&threads, thread, thread_t, threads);
		threads_count--;
		lck_mtx_unlock(&tasks_threads_lock);

		thread_deallocate(thread);

		(void)splsched();
		simple_lock(&thread_terminate_lock);
	}

	assert_wait((event_t)&thread_terminate_queue, THREAD_UNINT);
	simple_unlock(&thread_terminate_lock);
	/* splsched */

	self->options &= ~TH_OPT_SYSTEM_CRITICAL;
	thread_block((thread_continue_t)thread_terminate_daemon);
	/*NOTREACHED*/
}