int nk_thread_run(nk_thread_id_t t) { nk_thread_t * newthread = (nk_thread_t*)t; printk("Trying to execute thread %p (tid %lu)", newthread,newthread->tid); printk("RUN: Function: %llu\n", newthread->fun); printk("RUN: Bound_CPU: %llu\n", newthread->bound_cpu); thread_setup_init_stack(newthread, newthread->fun, newthread->input); nk_enqueue_thread_on_runq(newthread, newthread->bound_cpu); #ifdef NAUT_CONFIG_DEBUG_THREADS if (newthread->bound_cpu == CPU_ANY) { SCHED_DEBUG("Running thread (%p, tid=%u) on [ANY CPU]\n", newthread, newthread->tid); } else { SCHED_DEBUG("Newthread thread (%p, tid=%u) on cpu %u\n", newthread, newthread->tid, newthread->bound_cpu); } #endif #ifdef NAUT_CONFIG_KICK_SCHEDULE // kick it // this really should not fire on CPU_ANY.... if (newthread->bound_cpu != my_cpu_id()) { apic_ipi(per_cpu_get(apic), nk_get_nautilus_info()->sys.cpus[newthread->bound_cpu]->lapic_id, APIC_NULL_KICK_VEC); } #endif return 0; }
static inline void unschedule_cycle (EngineSchedule *sched, GslRing *ring) { guint leaf_level; GslRing *walk; g_return_if_fail (ENGINE_NODE_IS_SCHEDULED (ENGINE_NODE (ring->data)) == TRUE); leaf_level = ENGINE_NODE (ring->data)->sched_leaf_level; g_return_if_fail (leaf_level <= sched->leaf_levels); g_return_if_fail (sched->n_items > 0); SCHED_DEBUG ("unschedule_cycle(%p,%u,%p)", ring->data, leaf_level, ring); sched->nodes[leaf_level] = gsl_ring_remove (sched->nodes[leaf_level], ring); for (walk = ring; walk; walk = gsl_ring_walk (ring, walk)) { EngineNode *node = walk->data; if (!ENGINE_NODE_IS_SCHEDULED (node)) g_warning ("node(%p) in schedule ring(%p) is untagged", node, ring); node->sched_leaf_level = 0; node->sched_tag = FALSE; if (node->flow_jobs) _engine_mnl_reorder (node); } sched->n_items--; }
/* NOTE: this does not delete the threads in the queue, just * their entries in the queue */ void nk_thread_queue_destroy (nk_thread_queue_t * q) { // free any remaining entries SCHED_DEBUG("Destroying thread queue\n"); nk_queue_destroy(q, 1); }
/* * nk_thread_exit * * exit from this thread * * @retval: the value to return to the parent * * If there is someone waiting on this thread, this * function will wake them up. This will also call * any destructors for thread local storage * */ void nk_thread_exit (void * retval) { nk_thread_t * me = get_cur_thread(); /* clear any thread local storage that may have been allocated */ tls_exit(); while (__sync_lock_test_and_set(&me->lock, 1)); /* wait for my children to finish */ nk_join_all_children(NULL); me->output = retval; me->status = NK_THR_EXITED; /* wake up everyone who is waiting on me */ nk_wake_waiters(); me->refcount--; SCHED_DEBUG("Thread %p (tid=%u) exiting, joining with children\n", me, me->tid); __sync_lock_release(&me->lock); cli(); nk_schedule(); /* we should never get here! */ panic("Should never get here!\n"); }
static inline void unschedule_node (EngineSchedule *sched, EngineNode *node) { guint leaf_level; g_return_if_fail (ENGINE_NODE_IS_SCHEDULED (node) == TRUE); leaf_level = node->sched_leaf_level; g_return_if_fail (leaf_level <= sched->leaf_levels); g_return_if_fail (sched->n_items > 0); SCHED_DEBUG ("unschedule_node(%p,%u)", node, leaf_level); sched->nodes[leaf_level] = gsl_ring_remove (sched->nodes[leaf_level], node); node->sched_leaf_level = 0; node->sched_tag = FALSE; if (node->flow_jobs) _engine_mnl_reorder (node); sched->n_items--; }
/* * nk_thread_destroy * * destroys a thread and reclaims its memory (its stack page mostly) * interrupts should be off * * @t: the thread to destroy * */ void nk_thread_destroy (nk_thread_id_t t) { nk_thread_t * thethread = (nk_thread_t*)t; SCHED_DEBUG("Destroying thread (%p, tid=%lu)\n", (void*)thethread, thethread->tid); ASSERT(!irqs_enabled()); nk_dequeue_thread_from_runq(thethread); dequeue_thread_from_tlist(thethread); /* remove it from any wait queues */ nk_dequeue_entry(&(thethread->wait_node)); /* remove its own wait queue * (waiters should already have been notified */ nk_thread_queue_destroy(thethread->waitq); free(thethread->stack); free(thethread); }
int nk_thread_start (nk_thread_fun_t fun, void *input, void **output, uint8_t is_detached, nk_stack_size_t stack_size, nk_thread_id_t *tid, int cpu, int rt_type, rt_constraints *rt_constraints, uint64_t rt_deadline) #endif { nk_thread_id_t newtid = NULL; nk_thread_t * newthread = NULL; /* put it on the current CPU */ if (cpu == CPU_ANY) { cpu = my_cpu_id(); } if (nk_thread_create(fun, input, output, is_detached, stack_size, &newtid, cpu) < 0) { ERROR_PRINT("Could not create thread\n"); return -1; } newthread = (nk_thread_t*)newtid; if (tid) { *tid = newtid; } thread_setup_init_stack(newthread, fun, input); #ifdef NAUT_CONFIG_USE_RT_SCHEDULER rt_thread *rt = rt_thread_init(rt_type, rt_constraints, rt_deadline, newthread); RT_THREAD_DEBUG("rt_deadline is %llu\n", rt->deadline); struct sys_info *sys = per_cpu_get(system); if (sys->cpus[cpu]->rt_sched) { if (rt_admit(sys->cpus[cpu]->rt_sched, rt)) { if (rt_type == PERIODIC || rt_type == SPORADIC) { enqueue_thread(sys->cpus[cpu]->rt_sched->runnable, rt); RT_THREAD_DEBUG("THREAD DEADLINE ON RUN QUEUE IS: %llu\n", sys->cpus[cpu]->rt_sched->runnable->threads[0]->deadline); } else { enqueue_thread(sys->cpus[cpu]->rt_sched->aperiodic, rt); } } else { RT_THREAD_DEBUG("FAILED TO START THREAD. ADMISSION CONTROL DENYING ENTRY.\n"); } } nk_schedule(); #else nk_enqueue_thread_on_runq(newthread, cpu); #endif #ifdef NAUT_CONFIG_DEBUG_THREADS if (cpu == CPU_ANY) { SCHED_DEBUG("Started thread (%p, tid=%u) on [ANY CPU]\n", newthread, newthread->tid); } else { SCHED_DEBUG("Started thread (%p, tid=%u) on cpu %u\n", newthread, newthread->tid, cpu); } #endif #ifdef NAUT_CONFIG_KICK_SCHEDULE // kick it if (cpu != my_cpu_id()) { apic_ipi(per_cpu_get(apic), nk_get_nautilus_info()->sys.cpus[cpu]->lapic_id, APIC_NULL_KICK_VEC); } #endif return 0; }
int nk_thread_create (nk_thread_fun_t fun, void * input, void ** output, uint8_t is_detached, nk_stack_size_t stack_size, nk_thread_id_t * tid, int cpu) { nk_thread_t * t = NULL; void * stack = NULL; if (cpu == CPU_ANY) { cpu = my_cpu_id(); } #ifndef NAUT_CONFIG_THREAD_OPTIMIZE ASSERT(cpu < per_cpu_get(system)->num_cpus); if (cpu >= per_cpu_get(system)->num_cpus) { ERROR_PRINT("thread create received invalid CPU id (%u)\n", cpu); return -EINVAL; } #endif t = malloc(sizeof(nk_thread_t)); #ifndef NAUT_CONFIG_THREAD_OPTIMIZE ASSERT(t); if (!t) { ERROR_PRINT("Could not allocate thread struct\n"); return -EINVAL; } memset(t, 0, sizeof(nk_thread_t)); #endif #ifndef NAUT_CONFIG_THREAD_OPTIMIZE if (stack_size) { stack = (void*)malloc(stack_size); t->stack_size = stack_size; } else { stack = (void*)malloc(PAGE_SIZE); t->stack_size = PAGE_SIZE; } #else stack = malloc(PAGE_SIZE_4KB); t->stack_size = PAGE_SIZE_4KB; #endif ASSERT(stack); if (thread_init(t, stack, is_detached, cpu, get_cur_thread()) < 0) { ERROR_PRINT("Could not initialize thread\n"); goto out_err1; } t->status = NK_THR_INIT; t->fun = fun; t->input = input; t->output = output; enqueue_thread_on_tlist(t); if (tid) { *tid = (nk_thread_id_t)t; } SCHED_DEBUG("Thread create creating new thread with t=%p, tid=%lu\n", t, t->tid); return 0; out_err1: free(stack); free(t); return -1; }
static void thread_cleanup (void) { SCHED_DEBUG("Thread (%d) exiting on core %d\n", get_cur_thread()->tid, my_cpu_id()); nk_thread_exit(0); }