static int error_int_handler (excp_entry_t * excp, excp_vec_t v) { struct apic_dev * apic = per_cpu_get(apic); char * s = "[Unknown Error]"; uint8_t i = 0; uint32_t err = 0; apic_write(apic, APIC_REG_ESR, 0); err = apic_read(apic, APIC_REG_ESR); apic_do_eoi(); apic->err_int_cnt++; err &= 0xff; APIC_WARN("Error interrupt recieved from local APIC (ID=0x%x) on Core %u (error=0x%x):\n", per_cpu_get(apic)->id, my_cpu_id(), err); while (err) { if (err & 0x1) { s = (char*)apic_err_codes[i]; APIC_WARN("\t%s\n", s); } ++i; err >>= 1; } return 0; }
inline void nk_enqueue_thread_on_runq (nk_thread_t * t, int cpu) { NK_PROFILE_ENTRY(); nk_thread_queue_t * q = NULL; struct sys_info * sys = per_cpu_get(system); /* TODO: these checks should only occur at creation time */ if (unlikely(cpu <= CPU_ANY || cpu >= sys->num_cpus)) { q = per_cpu_get(run_q); } else { q = sys->cpus[cpu]->run_q; } /* bail if the run queue hasn't been created yet */ ASSERT(q); t->cur_run_q = q; t->status = NK_THR_SUSPENDED; nk_enqueue_entry_atomic(q, &(t->runq_node)); NK_PROFILE_EXIT(); }
static int spur_int_handler (excp_entry_t * excp, excp_vec_t v) { APIC_WARN("APIC (ID=0x%x) Received Spurious Interrupt on core %u\n", per_cpu_get(apic)->id, my_cpu_id()); struct apic_dev * a = per_cpu_get(apic); a->spur_int_cnt++; /* we don't need to EOI here */ return 0; }
void apic_do_eoi (void) { struct apic_dev * apic = (struct apic_dev*)per_cpu_get(apic); ASSERT(apic); apic_write(apic, APIC_REG_EOR, 0); }
int nk_thread_run(nk_thread_id_t t) { nk_thread_t * newthread = (nk_thread_t*)t; printk("Trying to execute thread %p (tid %lu)", newthread,newthread->tid); printk("RUN: Function: %llu\n", newthread->fun); printk("RUN: Bound_CPU: %llu\n", newthread->bound_cpu); thread_setup_init_stack(newthread, newthread->fun, newthread->input); nk_enqueue_thread_on_runq(newthread, newthread->bound_cpu); #ifdef NAUT_CONFIG_DEBUG_THREADS if (newthread->bound_cpu == CPU_ANY) { SCHED_DEBUG("Running thread (%p, tid=%u) on [ANY CPU]\n", newthread, newthread->tid); } else { SCHED_DEBUG("Newthread thread (%p, tid=%u) on cpu %u\n", newthread, newthread->tid, newthread->bound_cpu); } #endif #ifdef NAUT_CONFIG_KICK_SCHEDULE // kick it // this really should not fire on CPU_ANY.... if (newthread->bound_cpu != my_cpu_id()) { apic_ipi(per_cpu_get(apic), nk_get_nautilus_info()->sys.cpus[newthread->bound_cpu]->lapic_id, APIC_NULL_KICK_VEC); } #endif return 0; }
static int thermal_int_handler (excp_entry_t * excp, excp_vec_t v) { panic("Received a thermal interrupt from the LAPIC (0x%x) on core %u (Should be masked)\n", per_cpu_get(apic)->id, my_cpu_id()); return 0; }
static int dummy_int_handler (excp_entry_t * excp, excp_vec_t v) { panic("Received an interrupt from an Extended LVT vector on LAPIC (0x%x) on core %u (Should be masked)\n", per_cpu_get(apic)->id, my_cpu_id()); return 0; }
int nk_thread_start (nk_thread_fun_t fun, void *input, void **output, uint8_t is_detached, nk_stack_size_t stack_size, nk_thread_id_t *tid, int cpu, int rt_type, rt_constraints *rt_constraints, uint64_t rt_deadline) #endif { nk_thread_id_t newtid = NULL; nk_thread_t * newthread = NULL; /* put it on the current CPU */ if (cpu == CPU_ANY) { cpu = my_cpu_id(); } if (nk_thread_create(fun, input, output, is_detached, stack_size, &newtid, cpu) < 0) { ERROR_PRINT("Could not create thread\n"); return -1; } newthread = (nk_thread_t*)newtid; if (tid) { *tid = newtid; } thread_setup_init_stack(newthread, fun, input); #ifdef NAUT_CONFIG_USE_RT_SCHEDULER rt_thread *rt = rt_thread_init(rt_type, rt_constraints, rt_deadline, newthread); RT_THREAD_DEBUG("rt_deadline is %llu\n", rt->deadline); struct sys_info *sys = per_cpu_get(system); if (sys->cpus[cpu]->rt_sched) { if (rt_admit(sys->cpus[cpu]->rt_sched, rt)) { if (rt_type == PERIODIC || rt_type == SPORADIC) { enqueue_thread(sys->cpus[cpu]->rt_sched->runnable, rt); RT_THREAD_DEBUG("THREAD DEADLINE ON RUN QUEUE IS: %llu\n", sys->cpus[cpu]->rt_sched->runnable->threads[0]->deadline); } else { enqueue_thread(sys->cpus[cpu]->rt_sched->aperiodic, rt); } } else { RT_THREAD_DEBUG("FAILED TO START THREAD. ADMISSION CONTROL DENYING ENTRY.\n"); } } nk_schedule(); #else nk_enqueue_thread_on_runq(newthread, cpu); #endif #ifdef NAUT_CONFIG_DEBUG_THREADS if (cpu == CPU_ANY) { SCHED_DEBUG("Started thread (%p, tid=%u) on [ANY CPU]\n", newthread, newthread->tid); } else { SCHED_DEBUG("Started thread (%p, tid=%u) on cpu %u\n", newthread, newthread->tid, cpu); } #endif #ifdef NAUT_CONFIG_KICK_SCHEDULE // kick it if (cpu != my_cpu_id()) { apic_ipi(per_cpu_get(apic), nk_get_nautilus_info()->sys.cpus[cpu]->lapic_id, APIC_NULL_KICK_VEC); } #endif return 0; }
int nk_thread_create (nk_thread_fun_t fun, void * input, void ** output, uint8_t is_detached, nk_stack_size_t stack_size, nk_thread_id_t * tid, int cpu) { nk_thread_t * t = NULL; void * stack = NULL; if (cpu == CPU_ANY) { cpu = my_cpu_id(); } #ifndef NAUT_CONFIG_THREAD_OPTIMIZE ASSERT(cpu < per_cpu_get(system)->num_cpus); if (cpu >= per_cpu_get(system)->num_cpus) { ERROR_PRINT("thread create received invalid CPU id (%u)\n", cpu); return -EINVAL; } #endif t = malloc(sizeof(nk_thread_t)); #ifndef NAUT_CONFIG_THREAD_OPTIMIZE ASSERT(t); if (!t) { ERROR_PRINT("Could not allocate thread struct\n"); return -EINVAL; } memset(t, 0, sizeof(nk_thread_t)); #endif #ifndef NAUT_CONFIG_THREAD_OPTIMIZE if (stack_size) { stack = (void*)malloc(stack_size); t->stack_size = stack_size; } else { stack = (void*)malloc(PAGE_SIZE); t->stack_size = PAGE_SIZE; } #else stack = malloc(PAGE_SIZE_4KB); t->stack_size = PAGE_SIZE_4KB; #endif ASSERT(stack); if (thread_init(t, stack, is_detached, cpu, get_cur_thread()) < 0) { ERROR_PRINT("Could not initialize thread\n"); goto out_err1; } t->status = NK_THR_INIT; t->fun = fun; t->input = input; t->output = output; enqueue_thread_on_tlist(t); if (tid) { *tid = (nk_thread_id_t)t; } SCHED_DEBUG("Thread create creating new thread with t=%p, tid=%lu\n", t, t->tid); return 0; out_err1: free(stack); free(t); return -1; }
/* * get_runnable_thread * * get the next thread in the specified thread's CPU * * NOTE: assumes that this thread *will* be run after this * * */ static nk_thread_t * get_runnable_thread (uint32_t cpu) { nk_thread_t * runnable = NULL; nk_thread_queue_t * runq = NULL; nk_queue_entry_t * elm = NULL; struct sys_info * sys = per_cpu_get(system); uint8_t flags; if (unlikely(cpu >= sys->num_cpus || !sys->cpus[cpu])) { ERROR_PRINT("Attempt to get thread on invalid CPU (%u)\n", cpu); return NULL; } runq = sys->cpus[cpu]->run_q; ASSERT(runq); if (nk_queue_empty(runq)) { return NULL; } flags = spin_lock_irq_save(&runq->lock); elm = nk_dequeue_first(runq); ASSERT(elm); runnable = container_of(elm, nk_thread_t, runq_node); if (!get_cur_thread()->is_idle && get_cur_thread()->status == NK_THR_RUNNING) { /* the next thing is an idle thread, but do we have something else to run? */ if (runnable->is_idle) { if (!nk_queue_empty(runq)) { nk_thread_t * idle = runnable; elm = nk_dequeue_first(runq); ASSERT(elm); runnable = container_of(elm, nk_thread_t, runq_node); ASSERT(runnable); idle->status = NK_THR_SUSPENDED; nk_enqueue_entry(runq, &(idle->runq_node)); //nk_enqueue_thread_on_runq(idle, cpu); } else { /* we put the idle thread back when it is the only thing on the queue */ runnable->status = NK_THR_SUSPENDED; nk_enqueue_entry(runq, &(runnable->runq_node)); //nk_enqueue_thread_on_runq(runnable, cpu); runnable = NULL; } } else { /* all good, we switch to runnable */ } } else { /* if we're the idle thread, we *ALWAYS* run the next thing */ } if (runnable) { runnable->status = NK_THR_RUNNING; } spin_unlock_irq_restore(&runq->lock, flags); //irq_enable_restore(flags); return runnable; }