/* * Create a new thread. * Doesn't start the thread running. */ static kern_return_t thread_create_internal( task_t parent_task, integer_t priority, thread_continue_t continuation, int options, #define TH_OPTION_NONE 0x00 #define TH_OPTION_NOCRED 0x01 #define TH_OPTION_NOSUSP 0x02 thread_t *out_thread) { thread_t new_thread; static thread_t first_thread = THREAD_NULL; /* * Allocate a thread and initialize static fields */ if (first_thread == THREAD_NULL) new_thread = first_thread = current_thread(); new_thread = (thread_t)zalloc(thread_zone); if (new_thread == THREAD_NULL) return (KERN_RESOURCE_SHORTAGE); if (new_thread != first_thread) *new_thread = thread_template; #ifdef MACH_BSD new_thread->uthread = uthread_alloc(parent_task, new_thread, (options & TH_OPTION_NOCRED) != 0); if (new_thread->uthread == NULL) { zfree(thread_zone, new_thread); return (KERN_RESOURCE_SHORTAGE); } #endif /* MACH_BSD */ if (machine_thread_create(new_thread, parent_task) != KERN_SUCCESS) { #ifdef MACH_BSD void *ut = new_thread->uthread; new_thread->uthread = NULL; /* cred free may not be necessary */ uthread_cleanup(parent_task, ut, parent_task->bsd_info); uthread_cred_free(ut); uthread_zone_free(ut); #endif /* MACH_BSD */ zfree(thread_zone, new_thread); return (KERN_FAILURE); } new_thread->task = parent_task; thread_lock_init(new_thread); wake_lock_init(new_thread); lck_mtx_init(&new_thread->mutex, &thread_lck_grp, &thread_lck_attr); ipc_thread_init(new_thread); queue_init(&new_thread->held_ulocks); new_thread->continuation = continuation; lck_mtx_lock(&tasks_threads_lock); task_lock(parent_task); if ( !parent_task->active || parent_task->halting || ((options & TH_OPTION_NOSUSP) != 0 && parent_task->suspend_count > 0) || (parent_task->thread_count >= task_threadmax && parent_task != kernel_task) ) { task_unlock(parent_task); lck_mtx_unlock(&tasks_threads_lock); #ifdef MACH_BSD { void *ut = new_thread->uthread; new_thread->uthread = NULL; uthread_cleanup(parent_task, ut, parent_task->bsd_info); /* cred free may not be necessary */ uthread_cred_free(ut); uthread_zone_free(ut); } #endif /* MACH_BSD */ ipc_thread_disable(new_thread); ipc_thread_terminate(new_thread); lck_mtx_destroy(&new_thread->mutex, &thread_lck_grp); machine_thread_destroy(new_thread); zfree(thread_zone, new_thread); return (KERN_FAILURE); } /* New threads inherit any default state on the task */ machine_thread_inherit_taskwide(new_thread, parent_task); task_reference_internal(parent_task); if (new_thread->task->rusage_cpu_flags & TASK_RUSECPU_FLAGS_PERTHR_LIMIT) { /* * This task has a per-thread CPU limit; make sure this new thread * gets its limit set too, before it gets out of the kernel. */ set_astledger(new_thread); } new_thread->t_threadledger = LEDGER_NULL; /* per thread ledger is not inherited */ new_thread->t_ledger = new_thread->task->ledger; if (new_thread->t_ledger) ledger_reference(new_thread->t_ledger); /* Cache the task's map */ new_thread->map = parent_task->map; /* Chain the thread onto the task's list */ queue_enter(&parent_task->threads, new_thread, thread_t, task_threads); parent_task->thread_count++; /* So terminating threads don't need to take the task lock to decrement */ hw_atomic_add(&parent_task->active_thread_count, 1); /* Protected by the tasks_threads_lock */ new_thread->thread_id = ++thread_unique_id; queue_enter(&threads, new_thread, thread_t, threads); threads_count++; timer_call_setup(&new_thread->wait_timer, thread_timer_expire, new_thread); timer_call_setup(&new_thread->depress_timer, thread_depress_expire, new_thread); #if CONFIG_COUNTERS /* * If parent task has any reservations, they need to be propagated to this * thread. */ new_thread->t_chud = (TASK_PMC_FLAG == (parent_task->t_chud & TASK_PMC_FLAG)) ? THREAD_PMC_FLAG : 0U; #endif /* Set the thread's scheduling parameters */ new_thread->sched_mode = SCHED(initial_thread_sched_mode)(parent_task); new_thread->sched_flags = 0; new_thread->max_priority = parent_task->max_priority; new_thread->task_priority = parent_task->priority; new_thread->priority = (priority < 0)? parent_task->priority: priority; if (new_thread->priority > new_thread->max_priority) new_thread->priority = new_thread->max_priority; #if CONFIG_EMBEDDED if (new_thread->priority < MAXPRI_THROTTLE) { new_thread->priority = MAXPRI_THROTTLE; } #endif /* CONFIG_EMBEDDED */ new_thread->importance = new_thread->priority - new_thread->task_priority; #if CONFIG_EMBEDDED new_thread->saved_importance = new_thread->importance; /* apple ios daemon starts all threads in darwin background */ if (parent_task->ext_appliedstate.apptype == PROC_POLICY_IOS_APPLE_DAEMON) { /* Cannot use generic routines here so apply darwin bacground directly */ new_thread->policystate.hw_bg = TASK_POLICY_BACKGROUND_ATTRIBUTE_ALL; /* set thread self backgrounding */ new_thread->appliedstate.hw_bg = new_thread->policystate.hw_bg; /* priority will get recomputed suitably bit later */ new_thread->importance = INT_MIN; /* to avoid changes to many pri compute routines, set the effect of those here */ new_thread->priority = MAXPRI_THROTTLE; } #endif /* CONFIG_EMBEDDED */ #if defined(CONFIG_SCHED_TRADITIONAL) new_thread->sched_stamp = sched_tick; new_thread->pri_shift = sched_pri_shift; #endif SCHED(compute_priority)(new_thread, FALSE); new_thread->active = TRUE; *out_thread = new_thread; { long dbg_arg1, dbg_arg2, dbg_arg3, dbg_arg4; kdbg_trace_data(parent_task->bsd_info, &dbg_arg2); KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, TRACEDBG_CODE(DBG_TRACE_DATA, 1) | DBG_FUNC_NONE, (vm_address_t)(uintptr_t)thread_tid(new_thread), dbg_arg2, 0, 0, 0); kdbg_trace_string(parent_task->bsd_info, &dbg_arg1, &dbg_arg2, &dbg_arg3, &dbg_arg4); KERNEL_DEBUG_CONSTANT_IST(KDEBUG_TRACE, TRACEDBG_CODE(DBG_TRACE_STRING, 1) | DBG_FUNC_NONE, dbg_arg1, dbg_arg2, dbg_arg3, dbg_arg4, 0); } DTRACE_PROC1(lwp__create, thread_t, *out_thread); return (KERN_SUCCESS); }
/* * thread_terminate_self: */ void thread_terminate_self(void) { thread_t thread = current_thread(); task_t task; spl_t s; int threadcnt; DTRACE_PROC(lwp__exit); thread_mtx_lock(thread); ulock_release_all(thread); ipc_thread_disable(thread); thread_mtx_unlock(thread); s = splsched(); thread_lock(thread); /* * Cancel priority depression, wait for concurrent expirations * on other processors. */ if (thread->sched_flags & TH_SFLAG_DEPRESSED_MASK) { thread->sched_flags &= ~TH_SFLAG_DEPRESSED_MASK; if (timer_call_cancel(&thread->depress_timer)) thread->depress_timer_active--; } while (thread->depress_timer_active > 0) { thread_unlock(thread); splx(s); delay(1); s = splsched(); thread_lock(thread); } thread_sched_call(thread, NULL); thread_unlock(thread); splx(s); thread_policy_reset(thread); #if CONFIG_EMBEDDED thead_remove_taskwatch(thread); #endif /* CONFIG_EMBEDDED */ task = thread->task; uthread_cleanup(task, thread->uthread, task->bsd_info); threadcnt = hw_atomic_sub(&task->active_thread_count, 1); /* * If we are the last thread to terminate and the task is * associated with a BSD process, perform BSD process exit. */ if (threadcnt == 0 && task->bsd_info != NULL) proc_exit(task->bsd_info); uthread_cred_free(thread->uthread); s = splsched(); thread_lock(thread); /* * Cancel wait timer, and wait for * concurrent expirations. */ if (thread->wait_timer_is_set) { thread->wait_timer_is_set = FALSE; if (timer_call_cancel(&thread->wait_timer)) thread->wait_timer_active--; } while (thread->wait_timer_active > 0) { thread_unlock(thread); splx(s); delay(1); s = splsched(); thread_lock(thread); } /* * If there is a reserved stack, release it. */ if (thread->reserved_stack != 0) { stack_free_reserved(thread); thread->reserved_stack = 0; } /* * Mark thread as terminating, and block. */ thread->state |= TH_TERMINATE; thread_mark_wait_locked(thread, THREAD_UNINT); thread_unlock(thread); /* splsched */ thread_block((thread_continue_t)thread_terminate_continue); /*NOTREACHED*/ }
/* * Create a new thread. * Doesn't start the thread running. */ static kern_return_t thread_create_internal( task_t parent_task, integer_t priority, thread_continue_t continuation, thread_t *out_thread) { thread_t new_thread; static thread_t first_thread; /* * Allocate a thread and initialize static fields */ if (first_thread == NULL) new_thread = first_thread = current_thread(); else new_thread = (thread_t)zalloc(thread_zone); if (new_thread == NULL) return (KERN_RESOURCE_SHORTAGE); if (new_thread != first_thread) *new_thread = thread_template; #ifdef MACH_BSD { new_thread->uthread = uthread_alloc(parent_task, new_thread); if (new_thread->uthread == NULL) { zfree(thread_zone, new_thread); return (KERN_RESOURCE_SHORTAGE); } } #endif /* MACH_BSD */ if (machine_thread_create(new_thread, parent_task) != KERN_SUCCESS) { #ifdef MACH_BSD { void *ut = new_thread->uthread; new_thread->uthread = NULL; /* cred free may not be necessary */ uthread_cleanup(parent_task, ut, parent_task->bsd_info); uthread_cred_free(ut); uthread_zone_free(ut); } #endif /* MACH_BSD */ zfree(thread_zone, new_thread); return (KERN_FAILURE); } new_thread->task = parent_task; thread_lock_init(new_thread); wake_lock_init(new_thread); mutex_init(&new_thread->mutex, 0); ipc_thread_init(new_thread); queue_init(&new_thread->held_ulocks); new_thread->continuation = continuation; mutex_lock(&tasks_threads_lock); task_lock(parent_task); if ( !parent_task->active || (parent_task->thread_count >= THREAD_MAX && parent_task != kernel_task)) { task_unlock(parent_task); mutex_unlock(&tasks_threads_lock); #ifdef MACH_BSD { void *ut = new_thread->uthread; new_thread->uthread = NULL; uthread_cleanup(parent_task, ut, parent_task->bsd_info); /* cred free may not be necessary */ uthread_cred_free(ut); uthread_zone_free(ut); } #endif /* MACH_BSD */ ipc_thread_disable(new_thread); ipc_thread_terminate(new_thread); machine_thread_destroy(new_thread); zfree(thread_zone, new_thread); return (KERN_FAILURE); } task_reference_internal(parent_task); /* Cache the task's map */ new_thread->map = parent_task->map; /* Chain the thread onto the task's list */ queue_enter(&parent_task->threads, new_thread, thread_t, task_threads); parent_task->thread_count++; /* So terminating threads don't need to take the task lock to decrement */ hw_atomic_add(&parent_task->active_thread_count, 1); queue_enter(&threads, new_thread, thread_t, threads); threads_count++; timer_call_setup(&new_thread->wait_timer, thread_timer_expire, new_thread); timer_call_setup(&new_thread->depress_timer, thread_depress_expire, new_thread); /* Set the thread's scheduling parameters */ if (parent_task != kernel_task) new_thread->sched_mode |= TH_MODE_TIMESHARE; new_thread->max_priority = parent_task->max_priority; new_thread->task_priority = parent_task->priority; new_thread->priority = (priority < 0)? parent_task->priority: priority; if (new_thread->priority > new_thread->max_priority) new_thread->priority = new_thread->max_priority; new_thread->importance = new_thread->priority - new_thread->task_priority; new_thread->sched_stamp = sched_tick; new_thread->pri_shift = sched_pri_shift; compute_priority(new_thread, FALSE); new_thread->active = TRUE; *out_thread = new_thread; { long dbg_arg1, dbg_arg2, dbg_arg3, dbg_arg4; kdbg_trace_data(parent_task->bsd_info, &dbg_arg2); KERNEL_DEBUG_CONSTANT( TRACEDBG_CODE(DBG_TRACE_DATA, 1) | DBG_FUNC_NONE, (vm_address_t)new_thread, dbg_arg2, 0, 0, 0); kdbg_trace_string(parent_task->bsd_info, &dbg_arg1, &dbg_arg2, &dbg_arg3, &dbg_arg4); KERNEL_DEBUG_CONSTANT( TRACEDBG_CODE(DBG_TRACE_STRING, 1) | DBG_FUNC_NONE, dbg_arg1, dbg_arg2, dbg_arg3, dbg_arg4, 0); } DTRACE_PROC1(lwp__create, thread_t, *out_thread); return (KERN_SUCCESS); }
/* * thread_terminate_self: */ void thread_terminate_self(void) { thread_t thread = current_thread(); task_t task; spl_t s; int lastthread = 0; thread_mtx_lock(thread); ulock_release_all(thread); ipc_thread_disable(thread); thread_mtx_unlock(thread); s = splsched(); thread_lock(thread); /* * Cancel priority depression, wait for concurrent expirations * on other processors. */ if (thread->sched_mode & TH_MODE_ISDEPRESSED) { thread->sched_mode &= ~TH_MODE_ISDEPRESSED; if (timer_call_cancel(&thread->depress_timer)) thread->depress_timer_active--; } while (thread->depress_timer_active > 0) { thread_unlock(thread); splx(s); delay(1); s = splsched(); thread_lock(thread); } thread_unlock(thread); splx(s); thread_policy_reset(thread); /* * If we are the last thread to terminate and the task is * associated with a BSD process, perform BSD process exit. */ task = thread->task; uthread_cleanup(task, thread->uthread, task->bsd_info); if (hw_atomic_sub(&task->active_thread_count, 1) == 0 && task->bsd_info != NULL) { lastthread = 1; } if (lastthread != 0) proc_exit(task->bsd_info); uthread_cred_free(thread->uthread); s = splsched(); thread_lock(thread); /* * Cancel wait timer, and wait for * concurrent expirations. */ if (thread->wait_timer_is_set) { thread->wait_timer_is_set = FALSE; if (timer_call_cancel(&thread->wait_timer)) thread->wait_timer_active--; } while (thread->wait_timer_active > 0) { thread_unlock(thread); splx(s); delay(1); s = splsched(); thread_lock(thread); } /* * If there is a reserved stack, release it. */ if (thread->reserved_stack != 0) { if (thread->reserved_stack != thread->kernel_stack) stack_free_stack(thread->reserved_stack); thread->reserved_stack = 0; } /* * Mark thread as terminating, and block. */ thread->state |= TH_TERMINATE; thread_mark_wait_locked(thread, THREAD_UNINT); assert(thread->promotions == 0); thread_unlock(thread); /* splsched */ thread_block((thread_continue_t)thread_terminate_continue); /*NOTREACHED*/ }
/* to trick uthread_create() */ int main(int argc, char** argv) { uint32_t vcoreid; int retval; /* Initialize our barrier. */ mcs_barrier_init(&b, max_vcores()); /* vcore_context test */ assert(!in_vcore_context()); /* prep indirect ev_q. Note we grab a big one */ indirect_q = get_eventq(EV_MBOX_UCQ); indirect_q->ev_flags = EVENT_IPI; indirect_q->ev_vcore = 1; /* IPI core 1 */ indirect_q->ev_handler = 0; printf("Registering %08p for event type %d\n", indirect_q, EV_FREE_APPLE_PIE); register_kevent_q(indirect_q, EV_FREE_APPLE_PIE); /* handle events: just want to print out what we get. This is just a * quick set of handlers, not a registration for a kevent. */ for (int i = 0; i < MAX_NR_EVENT; i++) register_ev_handler(i, handle_generic, 0); /* Want to use the default ev_ev (which we just overwrote) */ register_ev_handler(EV_EVENT, handle_ev_ev, 0); /* vcore_lib_init() done in vcore_request() now. */ /* Set up event reception. For example, this will allow us to receive an * event and IPI for USER_IPIs on vcore 0. Check event.c for more stuff. * Note you don't have to register for USER_IPIs to receive ones you send * yourself with sys_self_notify(). */ enable_kevent(EV_USER_IPI, 0, EVENT_IPI | EVENT_VCORE_PRIVATE); /* Receive pending preemption events. (though there's no PP handler) */ struct event_queue *ev_q = get_eventq_vcpd(0, EVENT_VCORE_PRIVATE); ev_q->ev_flags = EVENT_IPI | EVENT_VCORE_APPRO; register_kevent_q(ev_q, EV_PREEMPT_PENDING); /* We also receive preemption events, it is set up in uthread.c */ /* Inits a thread for us, though we won't use it. Just a hack to get into * _M mode. Note this requests one vcore for us */ struct uthread dummy = {0}; uthread_2ls_init(&dummy, &ghetto_sched_ops); uthread_mcp_init(); /* Reset the blockon to be the spinner... This is really shitty. Any * blocking calls after we become an MCP and before this will fail. This is * just mhello showing its warts due to trying to work outside uthread.c */ ros_syscall_blockon = __ros_syscall_spinon; if ((vcoreid = vcore_id())) { printf("Should never see me! (from vcore %d)\n", vcoreid); } else { // core 0 temp = 0xdeadbeef; printf("Hello from vcore %d with temp addr = %p and temp = %p\n", vcoreid, &temp, temp); printf("Multi-Goodbye, world, from PID: %d!\n", sys_getpid()); printf("Requesting %d vcores\n", max_vcores() - 1); retval = vcore_request(max_vcores() - 1); /* since we already have 1 */ //retval = vcore_request(5); printf("This is vcore0, right after vcore_request, retval=%d\n", retval); /* vcore_context test */ assert(!in_vcore_context()); } //#if 0 /* test notifying my vcore2 */ udelay(5000000); printf("Vcore 0 self-notifying vcore 2 with notif 4!\n"); struct event_msg msg; msg.ev_type = 4; sys_self_notify(2, 4, &msg, TRUE); udelay(5000000); printf("Vcore 0 notifying itself with notif 6!\n"); msg.ev_type = 6; sys_notify(sys_getpid(), 6, &msg); udelay(1000000); //#endif /* test loop for restarting a uthread_ctx */ if (vcoreid == 0) { int ctr = 0; while(1) { printf("Vcore %d Spinning (%d), temp = %08x!\n", vcoreid, ctr++, temp); udelay(5000000); //exit(0); } } printf("Vcore %d Done!\n", vcoreid); //mcs_barrier_wait(&b,vcore_id()); printf("All Cores Done!\n", vcoreid); while(1); // manually kill from the monitor /* since everyone should cleanup their uthreads, even if they don't plan on * calling their code or want uthreads in the first place. <3 */ uthread_cleanup(&dummy); return 0; }
/* * Create a new thread. * Doesn't start the thread running. */ static kern_return_t thread_create_internal( task_t parent_task, integer_t priority, thread_continue_t continuation, int options, #define TH_OPTION_NONE 0x00 #define TH_OPTION_NOCRED 0x01 #define TH_OPTION_NOSUSP 0x02 thread_t *out_thread) { thread_t new_thread; static thread_t first_thread; /* * Allocate a thread and initialize static fields */ if (first_thread == THREAD_NULL) new_thread = first_thread = current_thread(); else new_thread = (thread_t)zalloc(thread_zone); if (new_thread == THREAD_NULL) return (KERN_RESOURCE_SHORTAGE); if (new_thread != first_thread) *new_thread = thread_template; #ifdef MACH_BSD new_thread->uthread = uthread_alloc(parent_task, new_thread, (options & TH_OPTION_NOCRED) != 0); if (new_thread->uthread == NULL) { zfree(thread_zone, new_thread); return (KERN_RESOURCE_SHORTAGE); } #endif /* MACH_BSD */ if (machine_thread_create(new_thread, parent_task) != KERN_SUCCESS) { #ifdef MACH_BSD void *ut = new_thread->uthread; new_thread->uthread = NULL; /* cred free may not be necessary */ uthread_cleanup(parent_task, ut, parent_task->bsd_info); uthread_cred_free(ut); uthread_zone_free(ut); #endif /* MACH_BSD */ zfree(thread_zone, new_thread); return (KERN_FAILURE); } new_thread->task = parent_task; thread_lock_init(new_thread); wake_lock_init(new_thread); lck_mtx_init(&new_thread->mutex, &thread_lck_grp, &thread_lck_attr); ipc_thread_init(new_thread); queue_init(&new_thread->held_ulocks); new_thread->continuation = continuation; lck_mtx_lock(&tasks_threads_lock); task_lock(parent_task); if ( !parent_task->active || parent_task->halting || ((options & TH_OPTION_NOSUSP) != 0 && parent_task->suspend_count > 0) || (parent_task->thread_count >= task_threadmax && parent_task != kernel_task) ) { task_unlock(parent_task); lck_mtx_unlock(&tasks_threads_lock); #ifdef MACH_BSD { void *ut = new_thread->uthread; new_thread->uthread = NULL; uthread_cleanup(parent_task, ut, parent_task->bsd_info); /* cred free may not be necessary */ uthread_cred_free(ut); uthread_zone_free(ut); } #endif /* MACH_BSD */ ipc_thread_disable(new_thread); ipc_thread_terminate(new_thread); lck_mtx_destroy(&new_thread->mutex, &thread_lck_grp); machine_thread_destroy(new_thread); zfree(thread_zone, new_thread); return (KERN_FAILURE); } /* New threads inherit any default state on the task */ machine_thread_inherit_taskwide(new_thread, parent_task); task_reference_internal(parent_task); /* Cache the task's map */ new_thread->map = parent_task->map; /* Chain the thread onto the task's list */ queue_enter(&parent_task->threads, new_thread, thread_t, task_threads); parent_task->thread_count++; /* So terminating threads don't need to take the task lock to decrement */ hw_atomic_add(&parent_task->active_thread_count, 1); /* Protected by the tasks_threads_lock */ new_thread->thread_id = ++thread_unique_id; queue_enter(&threads, new_thread, thread_t, threads); threads_count++; timer_call_setup(&new_thread->wait_timer, thread_timer_expire, new_thread); timer_call_setup(&new_thread->depress_timer, thread_depress_expire, new_thread); #if CONFIG_COUNTERS /* * If parent task has any reservations, they need to be propagated to this * thread. */ new_thread->t_chud = (TASK_PMC_FLAG == (parent_task->t_chud & TASK_PMC_FLAG)) ? THREAD_PMC_FLAG : 0U; #endif /* Set the thread's scheduling parameters */ if (parent_task != kernel_task) new_thread->sched_mode |= TH_MODE_TIMESHARE; new_thread->max_priority = parent_task->max_priority; new_thread->task_priority = parent_task->priority; new_thread->priority = (priority < 0)? parent_task->priority: priority; if (new_thread->priority > new_thread->max_priority) new_thread->priority = new_thread->max_priority; new_thread->importance = new_thread->priority - new_thread->task_priority; new_thread->sched_stamp = sched_tick; new_thread->pri_shift = sched_pri_shift; compute_priority(new_thread, FALSE); new_thread->active = TRUE; *out_thread = new_thread; { long dbg_arg1, dbg_arg2, dbg_arg3, dbg_arg4; kdbg_trace_data(parent_task->bsd_info, &dbg_arg2); KERNEL_DEBUG_CONSTANT( TRACEDBG_CODE(DBG_TRACE_DATA, 1) | DBG_FUNC_NONE, (vm_address_t)(uintptr_t)thread_tid(new_thread), dbg_arg2, 0, 0, 0); kdbg_trace_string(parent_task->bsd_info, &dbg_arg1, &dbg_arg2, &dbg_arg3, &dbg_arg4); KERNEL_DEBUG_CONSTANT( TRACEDBG_CODE(DBG_TRACE_STRING, 1) | DBG_FUNC_NONE, dbg_arg1, dbg_arg2, dbg_arg3, dbg_arg4, 0); } DTRACE_PROC1(lwp__create, thread_t, *out_thread); return (KERN_SUCCESS); }