void _arch_irq_task_switch(void * _cpu_state) { if(run_queue) { spinlock_lock(&run_queue->spinlock); get_system_time(&run_queue->sched_time); struct kthread * c = run_queue_current(); struct kthread * n = run_queue_next(); spinlock_unlock(&run_queue->spinlock); _BUG_ON(!n); _BUG_ON(!c); if(stack_check(&(c->stack))<0) _BUG(); // TASK WE JUST PUT TO SLEEP BLEW ITS STACK! _switch(c,n,_cpu_state); // schedule next switch. _sched_next_task(NULL); } }
static struct kthread * run_queue_next() { struct kthread * next = 0; const int idle_task=0; const int current = run_queue->running; int running = current; for(;;) { running++; running %= (sizeof run_queue->kthreads / sizeof run_queue->kthreads[0]); // find next runnable task. if(running != idle_task) { if(_is_runnable((next = run_queue->kthreads[running]))) { run_queue->running = running; return next; } } // is no tasks are runnable, run idle-task. if(running == current) { run_queue->running = idle_task; _BUG_ON(!run_queue->kthreads[idle_task]); return run_queue->kthreads[idle_task]; } } }
int kthread_init(const struct stack_struct *ss) { const int idle_task=0; const int boot_task=1; if((run_queue = kmalloc(sizeof *run_queue, GFP_KERNEL | GFP_ZERO))) { DEBUG_TRACE("%d = stack_check %x %x", stack_check(ss), ss->stack_base, ss->stack_size); spinlock_init(&run_queue->spinlock); run_queue->running = boot_task; // create an empty kthread for the boot-task! UGLY! run_queue->kthreads[boot_task] = _kmalloc_kthread(); if(run_queue->kthreads[boot_task]) { DEBUG_TRACE("%d = stack_check", stack_check(ss)); // store boot_stack info. run_queue->kthreads[boot_task]->stack = *ss; irq_itf irq; if(timer_open(&run_queue->timer, &irq, 0)==0) { DEBUG_TRACE(""); interrupt_controller_itf intc; if(interrupt_controller(&intc) == 0) { DEBUG_TRACE(""); INVOKE(intc, register_handler, irq); INVOKE(intc, unmask, irq); goto success; } } } } goto err; success: // start idle-task. if(_kthread_create(&run_queue->kthreads[idle_task], GFP_KERNEL, &_asm_idle_task, 0)==0) { DEBUG_TRACE(""); _BUG_ON(!run_queue->kthreads[idle_task]); // UGLY - yield to self! current task is first, and only runnable thread right now. // we NEED to do this to populate the empty kthread we allocated for ourselves earier kthread_yield(); return _sched_next_task(NULL); } err: _BUG(); return -1; }
static void isp1362_write_ptd(struct isp1362_hcd *isp1362_hcd, struct isp1362_ep *ep, struct isp1362_ep_queue *epq) { struct ptd *ptd = &ep->ptd; int len = PTD_GET_DIR(ptd) == PTD_DIR_IN ? 0 : ep->length; _BUG_ON(ep->ptd_offset < 0); prefetch(ptd); isp1362_write_buffer(isp1362_hcd, ptd, ep->ptd_offset, PTD_HEADER_SIZE); if (len) isp1362_write_buffer(isp1362_hcd, ep->data, ep->ptd_offset + PTD_HEADER_SIZE, len); dump_ptd(ptd); dump_ptd_out_data(ptd, ep->data); }
static void _exited_kthread() { spinlock_lock(&run_queue->spinlock); _BUG_ON(run_queue->running==0); // CANT QUIT IDLE TASK! struct kthread * c = run_queue_current(); if(c) { run_queue_remove(c); c->flags |= KTHREAD_JOINABLE; } spinlock_unlock(&run_queue->spinlock); kthread_yield(); _BUG(); _asm_idle_task(NULL); }
int cpu_state_build(struct cpu_state_struct * cpuss, void *(*start)(void*), void *args, void *stack, void (*end)()) { const size_t ef_size = sizeof(struct exception_frame); const size_t ts_size = sizeof(struct task_state_struct); // Set stack pointer with space allocated for exception frame and other state. // SEE context.S cpuss->SP = (size_t)stack - (ef_size + ts_size); // Extra cpu state. struct task_state_struct *ts = (struct task_state_struct*)cpuss->SP; // Create an exception frame. struct exception_frame *ef = (struct exception_frame*)(cpuss->SP + ts_size); _BUG_ON((((size_t)ef)&7)); // exception bust be 8-byte aligned. ef->PC = (uint32_t)(start); ef->LR = (uint32_t)(end); ef->R0 = (uint32_t)(args); ef->xPSR_fpalign = 1<<24; // EPSR - Thumb-mode bit. /*** * 0xFFFFFFF1 // return to handler mode * 0xFFFFFFF9 // return to thread mode (main stack) * 0xFFFFFFFD // return to thread mode (process stack) */ ts->exception_return = 0xFFFFFFF9; return 0; }