/* Unlock the sync object */ int syscall_sync_unlock(void *ptr, int sync_id) { struct sync *sn; thread_t thr; sn = find_sync(current->proc->pid, sync_id); if(!sn) return -EINVAL; list_dequeue(&sn->wq, thr, runq, thread_t); thr->states = THREAD_RUNNING; enqueue_thread(thr); return 0; }
struct thread *spawn_thread(struct vm_translation_map *map, void (*start_function)(void *param), void *param) { struct thread *th = slab_alloc(&thread_slab); th->kernel_stack = (unsigned char*) kmalloc(0x2000) + 0x2000; th->current_stack = (unsigned char*) th->kernel_stack - 0x840; th->map = map; ((unsigned int*) th->current_stack)[0x814 / 4] = (unsigned int) thread_start; th->start_function = start_function; th->param = param; acquire_spinlock(&thread_q_lock); enqueue_thread(&ready_q, th); release_spinlock(&thread_q_lock); }
void recompute_priority(void) { int i; thread_t *t1, *t2; // For all the queues qs[], select one thread at a time, // compute its pririty, insert it into tqs[] queue for (i=0; i<32; i++) { while (qs[i] != NULL) { t1 = qs[i]; dequeue_thread(i, t1, qs); t1->thr_cpupri /= 2; //edited t1->thr_pri = t1->thr_usrpri + (t1-> thr_cpupri / 4) + (2 * t1->thr_nice); enqueue_thread(t1, tqs); } } for (i=0; i<32; i++) qs[i] = tqs[i]; }
void reschedule(void) { int hwthread = __builtin_nyuzi_read_control_reg(CR_CURRENT_THREAD); struct thread *old_thread; struct thread *next_thread; // Put current thread back on ready queue acquire_spinlock(&thread_q_lock); old_thread = cur_thread[hwthread]; enqueue_thread(&ready_q, old_thread); next_thread = dequeue_thread(&ready_q); if (old_thread != next_thread) { cur_thread[hwthread] = next_thread; context_switch(&old_thread->current_stack, next_thread->current_stack, next_thread->map->page_dir, next_thread->map->asid); } release_spinlock(&thread_q_lock); }
void schedule() { int id; thread_t *t, *t1; ucontext_t dummy; // Select the first thread in first non-empty queue // dequeue it and context switch for (id=0; id < 32; id++) if (whichqs & (1 << id)) break; // Check if non-empty queue is found? if (id < 32) { t = qs[id]; dequeue_thread(id, t, qs); // If current thread is active then enqueue it. if (current_thread != NULL) { enqueue_thread(current_thread, qs); t1 = current_thread; current_thread = t; swapcontext(&t1->thr_context, &t->thr_context); } else { current_thread = t; swapcontext(&dummy, &t->thr_context); } } else { // Deadlock situtation is not addressed. // Assuming that at least one runnable thread must be available. if (current_thread != NULL) return; else exit(0); } }
int nk_thread_start (nk_thread_fun_t fun, void *input, void **output, uint8_t is_detached, nk_stack_size_t stack_size, nk_thread_id_t *tid, int cpu, int rt_type, rt_constraints *rt_constraints, uint64_t rt_deadline) #endif { nk_thread_id_t newtid = NULL; nk_thread_t * newthread = NULL; /* put it on the current CPU */ if (cpu == CPU_ANY) { cpu = my_cpu_id(); } if (nk_thread_create(fun, input, output, is_detached, stack_size, &newtid, cpu) < 0) { ERROR_PRINT("Could not create thread\n"); return -1; } newthread = (nk_thread_t*)newtid; if (tid) { *tid = newtid; } thread_setup_init_stack(newthread, fun, input); #ifdef NAUT_CONFIG_USE_RT_SCHEDULER rt_thread *rt = rt_thread_init(rt_type, rt_constraints, rt_deadline, newthread); RT_THREAD_DEBUG("rt_deadline is %llu\n", rt->deadline); struct sys_info *sys = per_cpu_get(system); if (sys->cpus[cpu]->rt_sched) { if (rt_admit(sys->cpus[cpu]->rt_sched, rt)) { if (rt_type == PERIODIC || rt_type == SPORADIC) { enqueue_thread(sys->cpus[cpu]->rt_sched->runnable, rt); RT_THREAD_DEBUG("THREAD DEADLINE ON RUN QUEUE IS: %llu\n", sys->cpus[cpu]->rt_sched->runnable->threads[0]->deadline); } else { enqueue_thread(sys->cpus[cpu]->rt_sched->aperiodic, rt); } } else { RT_THREAD_DEBUG("FAILED TO START THREAD. ADMISSION CONTROL DENYING ENTRY.\n"); } } nk_schedule(); #else nk_enqueue_thread_on_runq(newthread, cpu); #endif #ifdef NAUT_CONFIG_DEBUG_THREADS if (cpu == CPU_ANY) { SCHED_DEBUG("Started thread (%p, tid=%u) on [ANY CPU]\n", newthread, newthread->tid); } else { SCHED_DEBUG("Started thread (%p, tid=%u) on cpu %u\n", newthread, newthread->tid, cpu); } #endif #ifdef NAUT_CONFIG_KICK_SCHEDULE // kick it if (cpu != my_cpu_id()) { apic_ipi(per_cpu_get(apic), nk_get_nautilus_info()->sys.cpus[cpu]->lapic_id, APIC_NULL_KICK_VEC); } #endif return 0; }