// The kernel thread needs another stack to delete its own stack. static void kthread_do_kill_thread(void* user) { Thread* thread = (Thread*) user; while ( thread->state != ThreadState::DEAD ) kthread_yield(); FreeThread(thread); }
int kthread_init(const struct stack_struct *ss) { const int idle_task=0; const int boot_task=1; if((run_queue = kmalloc(sizeof *run_queue, GFP_KERNEL | GFP_ZERO))) { DEBUG_TRACE("%d = stack_check %x %x", stack_check(ss), ss->stack_base, ss->stack_size); spinlock_init(&run_queue->spinlock); run_queue->running = boot_task; // create an empty kthread for the boot-task! UGLY! run_queue->kthreads[boot_task] = _kmalloc_kthread(); if(run_queue->kthreads[boot_task]) { DEBUG_TRACE("%d = stack_check", stack_check(ss)); // store boot_stack info. run_queue->kthreads[boot_task]->stack = *ss; irq_itf irq; if(timer_open(&run_queue->timer, &irq, 0)==0) { DEBUG_TRACE(""); interrupt_controller_itf intc; if(interrupt_controller(&intc) == 0) { DEBUG_TRACE(""); INVOKE(intc, register_handler, irq); INVOKE(intc, unmask, irq); goto success; } } } } goto err; success: // start idle-task. if(_kthread_create(&run_queue->kthreads[idle_task], GFP_KERNEL, &_asm_idle_task, 0)==0) { DEBUG_TRACE(""); _BUG_ON(!run_queue->kthreads[idle_task]); // UGLY - yield to self! current task is first, and only runnable thread right now. // we NEED to do this to populate the empty kthread we allocated for ourselves earier kthread_yield(); return _sched_next_task(NULL); } err: _BUG(); return -1; }
void kthread_join(kthread_t thread) { if (!thread) return; while (!(thread->flags & KTHREAD_JOINABLE)) kthread_yield(); _free_kthread(thread); }
kfunction void krwlock_write_lock(volatile kuint* lock) { kuint temp = 0; kspinlock_lock(lock); if(katomic_bit_test_and_set(lock, 15)) { while((temp = (*lock & 0x7FFF7FFF))) { kspinlock_unlock(lock); kthread_yield(); kspinlock_lock(lock); } } else { while((temp = (*lock & 0x00007FFF))) { kspinlock_unlock(lock); kthread_yield(); kspinlock_lock(lock); } } *lock += 0x00010000; kspinlock_unlock(lock); }
kfunction void krwlock_read_lock(volatile kuint* lock) { kspinlock_lock(lock); while(1) { (*((kint16*)lock))++; if((*((kint16*)lock)) > 0) { break; } (*((kint16*)lock))--; kspinlock_unlock(lock); kthread_yield(); kspinlock_lock(lock); } kspinlock_unlock(lock); }
static void _exited_kthread() { spinlock_lock(&run_queue->spinlock); _BUG_ON(run_queue->running==0); // CANT QUIT IDLE TASK! struct kthread * c = run_queue_current(); if(c) { run_queue_remove(c); c->flags |= KTHREAD_JOINABLE; } spinlock_unlock(&run_queue->spinlock); kthread_yield(); _BUG(); _asm_idle_task(NULL); }