static void _signal_deliver(rt_thread_t tid) { rt_ubase_t level; /* thread is not interested in pended signals */ if (!(tid->sig_pending & tid->sig_mask)) return; level = rt_hw_interrupt_disable(); if (tid->stat == RT_THREAD_SUSPEND) { /* resume thread to handle signal */ rt_thread_resume(tid); /* add signal state */ tid->stat |= RT_THREAD_STAT_SIGNAL; rt_hw_interrupt_enable(level); /* re-schedule */ rt_schedule(); } else { if (tid == rt_thread_self()) { /* add signal state */ tid->stat |= RT_THREAD_STAT_SIGNAL; rt_hw_interrupt_enable(level); /* do signal action in self thread context */ rt_thread_handle_sig(RT_TRUE); } else if (!(tid->stat & RT_THREAD_STAT_SIGNAL)) { tid->stat |= RT_THREAD_STAT_SIGNAL; /* point to the signal handle entry */ tid->sig_ret = tid->sp; tid->sp = rt_hw_stack_init((void*)_signal_entry, RT_NULL, (void *)((char *)tid->sig_ret - 32), RT_NULL); rt_hw_interrupt_enable(level); dbg_log(DBG_LOG, "signal stack pointer @ 0x%08x\n", tid->sp); /* re-schedule */ rt_schedule(); } else { rt_hw_interrupt_enable(level); } } }
static void rt_thread_exit(void) { struct rt_thread *thread; register rt_base_t level; /* get current thread */ thread = rt_current_thread; /* disable interrupt */ level = rt_hw_interrupt_disable(); /* remove from schedule */ rt_schedule_remove_thread(thread); /* change stat */ thread->stat = RT_THREAD_CLOSE; /* remove it from timer list */ rt_list_remove(&(thread->thread_timer.list)); rt_object_detach((rt_object_t)&(thread->thread_timer)); if ((rt_object_is_systemobject((rt_object_t)thread) == RT_EOK) && thread->cleanup == RT_NULL) { rt_object_detach((rt_object_t)thread); } else { /* insert to defunct thread list */ rt_list_insert_after(&rt_thread_defunct, &(thread->tlist)); } /* enable interrupt */ rt_hw_interrupt_enable(level); /* switch to next task */ rt_schedule(); }
/** * This function will let current thread yield processor, and scheduler will * choose a highest thread to run. After yield processor, the current thread * is still in READY state. * * @return RT_EOK * */ rt_err_t rt_thread_yield(void) { register rt_base_t level; struct rt_thread *thread; /* disable interrupt */ level = rt_hw_interrupt_disable(); /* set to current thread */ thread = rt_current_thread; /* if the thread stat is READY and on ready queue list */ if (thread->stat == RT_THREAD_READY && thread->tlist.next != thread->tlist.prev) { /* remove thread from thread list */ rt_list_remove(&(thread->tlist)); /* put thread to end of ready queue */ rt_list_insert_before(&(rt_thread_priority_table[thread->current_priority]), &(thread->tlist)); /* enable interrupt */ rt_hw_interrupt_enable(level); rt_schedule(); return RT_EOK; } /* enable interrupt */ rt_hw_interrupt_enable(level); return RT_EOK; }
static inline int tbx_wait_room_until(TBX *tbx, int *fravbs, int msgsize, RTIME time, RT_TASK *rt_current) { int timed = 0; unsigned long flags; flags = rt_global_save_flags_and_cli(); if ((*fravbs) < msgsize) { tbx->waiting_nr++; rt_current->blocked_on = SOMETHING; rt_current->resume_time = time; rt_current->state |= DELAYED; rt_rem_ready_current(rt_current); tbx->waiting_task = rt_current; rt_enq_timed_task(rt_current); rt_schedule(); if (rt_current->blocked_on) { tbx->waiting_nr--; rt_current->blocked_on = NOTHING; tbx->waiting_task = NOTHING; timed = 1; } } rt_global_restore_flags(flags); return timed; }
/** * This function will release a semaphore, if there are threads suspended on * semaphore, it will be waked up. * * @param sem the semaphore object * * @return the error code */ rt_err_t rt_sem_release(rt_sem_t sem) { register rt_base_t temp; register rt_bool_t need_schedule; #ifdef RT_USING_HOOK if (rt_object_put_hook != RT_NULL) rt_object_put_hook(&(sem->parent.parent)); #endif need_schedule = RT_FALSE; /* disable interrupt */ temp = rt_hw_interrupt_disable(); #ifdef RT_IPC_DEBUG rt_kprintf("thread %s releases sem:%s, which value is: %d\n", rt_thread_self()->name, ((struct rt_object*)sem)->name, sem->value); #endif if (sem->parent.suspend_thread_count > 0) { /* resume the suspended thread */ rt_ipc_object_resume(&(sem->parent)); need_schedule = RT_TRUE; } else sem->value ++; /* increase value */ /* enable interrupt */ rt_hw_interrupt_enable(temp); /* resume a thread, re-schedule */ if (need_schedule == RT_TRUE) rt_schedule(); return RT_EOK; }
static inline int tbx_smx_wait_until(TBX *tbx, SEM *smx, RTIME time, RT_TASK *rt_current) { int timed = 0; unsigned long flags; flags = rt_global_save_flags_and_cli(); if (!(smx->count)) { tbx->waiting_nr++; rt_current->blocked_on = &smx->queue; rt_current->resume_time = time; rt_current->state |= (SEMAPHORE | DELAYED); rt_rem_ready_current(rt_current); enqueue_blocked(rt_current, &smx->queue, smx->qtype); rt_enq_timed_task(rt_current); rt_schedule(); if (rt_current->blocked_on) { dequeue_blocked(rt_current); timed = 1; tbx->waiting_nr--; } } else { smx->count = 0; } rt_global_restore_flags(flags); return timed; }
/** * This function will release a semaphore, if there are threads suspended on * semaphore, it will be waked up. * * @param sem the semaphore object * * @return the error code */ rt_err_t rt_sem_release(rt_sem_t sem) { register rt_base_t temp; register rt_bool_t need_schedule; RT_OBJECT_HOOK_CALL(rt_object_put_hook, (&(sem->parent.parent))); need_schedule = RT_FALSE; /* disable interrupt */ temp = rt_hw_interrupt_disable(); RT_DEBUG_LOG(RT_DEBUG_IPC, ("thread %s releases sem:%s, which value is: %d\n", rt_thread_self()->name, ((struct rt_object *)sem)->name, sem->value)); if (!rt_list_isempty(&sem->parent.suspend_thread)) { /* resume the suspended thread */ rt_ipc_list_resume(&(sem->parent.suspend_thread)); need_schedule = RT_TRUE; } else sem->value ++; /* increase value */ /* enable interrupt */ rt_hw_interrupt_enable(temp); /* resume a thread, re-schedule */ if (need_schedule == RT_TRUE) rt_schedule(); return RT_EOK; }
static int mbx_wait_until(MBX *mbx, int *fravbs, RTIME time, RT_TASK *rt_current) { unsigned long flags; flags = rt_global_save_flags_and_cli(); if (!(*fravbs)) { void *retp; rt_current->blocked_on = (void *)mbx; mbx->waiting_task = rt_current; if ((rt_current->resume_time = time) > rt_smp_time_h[rtai_cpuid()]) { rt_current->state |= (RT_SCHED_MBXSUSP | RT_SCHED_DELAYED); rem_ready_current(rt_current); enq_timed_task(rt_current); rt_schedule(); } if (unlikely((retp = rt_current->blocked_on) != NULL)) { mbx->waiting_task = NULL; rt_global_restore_flags(flags); return likely(retp > RTP_HIGERR) ? RTE_TIMOUT : (retp == RTP_UNBLKD ? RTE_UNBLKD : RTE_OBJREM); } } rt_global_restore_flags(flags); return 0; }
/** * This function will start a rms task and put it to system ready queue */ rt_rms_t rt_rms_startup(rt_rms_t rms) { /* rms check */ RT_ASSERT(rms != RT_NULL); RT_ASSERT(rms->thread->stat == RT_RMS_INIT); /* set current priority to init priority */ rms->thread->current_priority = rms->thread->init_priority; #if RT_THREAD_PRIORITY_MAX > 32 rms->thread->number = rms->thread->current_priority >> 3; rms->thread->number_mask = 1L << rms->thread->number; rms->thread->high_mask = 1L << (rms->thread->current_priority & 0x07); #else rms->thread->number_mask = 1L << rms->thread->current_priority; #endif /* change rms stat */ rms->thread->stat = RT_RMS_SLEEP; rms->deadline = rt_tick_get() + rms->period; /* resume rms task */ rt_rms_resume(rms); if(rt_rms_self() != RT_NULL) { /* do a scheduling */ rt_schedule(); } return RT_RMS_EOK; }
/** * This function will let current thread sleep for some ticks. * * @param tick the sleep ticks * * @return RT_EOK * */ rt_err_t rt_thread_sleep(rt_tick_t tick) { register rt_base_t temp; struct rt_thread *thread; /* disable interrupt */ temp = rt_hw_interrupt_disable(); /* set to current thread */ thread = rt_current_thread; RT_ASSERT(thread != RT_NULL); /* suspend thread */ rt_thread_suspend(thread); /* reset the timeout of thread timer and start it */ rt_timer_control(&(thread->thread_timer), RT_TIMER_CTRL_SET_TIME, &tick); rt_timer_start(&(thread->thread_timer)); /* enable interrupt */ rt_hw_interrupt_enable(temp); rt_schedule(); /* clear error number of this thread to RT_EOK */ if (thread->error == -RT_ETIMEOUT) thread->error = RT_EOK; return RT_EOK; }
void rt_rms_exit(void) { struct rt_rms *rms; register rt_base_t level; /* get current rms */ rms = rt_current_rms; /* disable interrupt */ level = rt_hw_interrupt_disable(); /* remove from schedule */ rt_schedule_remove_rms(rms); /* change stat */ rms->thread->stat = RT_RMS_CLOSE; /* remove it from timer list */ rt_timer_detach(&rms->rms_timer); rt_timer_detach(&rms->thread->thread_timer); if(rt_object_is_systemobject((rt_object_t)rms->thread) == RT_TRUE) { rt_object_detach((rt_object_t)rms->thread); } rt_list_remove(&(rms->thread->tlist)); rt_list_remove(&(rms->rlist)); /* enable interrupt */ rt_hw_interrupt_enable(level); /* switch to next task */ rt_schedule(); }
void rt_completion_done(struct rt_completion *completion) { rt_base_t level; RT_ASSERT(completion != RT_NULL); if(completion->flag == RT_COMPLETED) return; level = rt_hw_interrupt_disable(); completion->flag = RT_COMPLETED; if (!rt_list_isempty(&(completion->suspended_list))) { /* there is one thread in suspended list */ struct rt_thread *thread; /* get thread entry */ thread = rt_list_entry(completion->suspended_list.next, struct rt_thread, tlist); /* resume it */ rt_thread_resume(thread); rt_hw_interrupt_enable(level); /* perform a schedule */ rt_schedule(); } else {
rt_err_t rt_completion_wait(struct rt_completion *completion, rt_int32_t timeout) { rt_err_t result; rt_base_t level; rt_thread_t thread; RT_ASSERT(completion != RT_NULL); result = RT_EOK; thread = rt_thread_self(); level = rt_hw_interrupt_disable(); if (completion->flag != RT_COMPLETED) { /* only one thread can suspend on complete */ RT_ASSERT(rt_list_isempty(&(completion->suspended_list))); if (timeout == 0) { result = -RT_ETIMEOUT; goto __exit; } else { /* reset thread error number */ thread->error = RT_EOK; /* suspend thread */ rt_thread_suspend(thread); /* add to suspended list */ rt_list_insert_before(&(completion->suspended_list), &(thread->tlist)); /* current context checking */ RT_DEBUG_NOT_IN_INTERRUPT; /* start timer */ if (timeout > 0) { /* reset the timeout of thread timer and start it */ rt_timer_control(&(thread->thread_timer), RT_TIMER_CTRL_SET_TIME, &timeout); rt_timer_start(&(thread->thread_timer)); } /* enable interrupt */ rt_hw_interrupt_enable(level); /* do schedule */ rt_schedule(); /* thread is waked up */ result = thread->error; level = rt_hw_interrupt_disable(); /* clean completed flag */ completion->flag = RT_UNCOMPLETED; } } __exit: rt_hw_interrupt_enable(level); return result; }
/** * This function will start a thread and put it to system ready queue * * @param thread the thread to be started * * @return the operation status, RT_EOK on OK, -RT_ERROR on error * */ rt_err_t rt_thread_startup(rt_thread_t thread) { /* thread check */ RT_ASSERT(thread != RT_NULL); RT_ASSERT(thread->stat == RT_THREAD_INIT); /* set current priority to init priority */ thread->current_priority = thread->init_priority; /* calculate priority attribute */ #if RT_THREAD_PRIORITY_MAX > 32 thread->number = thread->current_priority >> 3; /* 5bit */ thread->number_mask = 1L << thread->number; thread->high_mask = 1L << (thread->current_priority & 0x07); /* 3bit */ #else thread->number_mask = 1L << thread->current_priority; #endif RT_DEBUG_LOG(RT_DEBUG_THREAD,\ ("startup a thread:%s with priority:%d\n", thread->name, thread->init_priority)); /* change thread stat */ thread->stat = RT_THREAD_SUSPEND; /* then resume it */ rt_thread_resume(thread); if (rt_thread_self() != RT_NULL) { /* do a scheduling */ rt_schedule(); } return RT_EOK; }
void pthread_exit (void* value) { _pthread_data_t* ptd; _pthread_cleanup_t* cleanup; extern _pthread_key_data_t _thread_keys[PTHREAD_KEY_MAX]; ptd = _pthread_get_data(rt_thread_self()); rt_enter_critical(); /* disable cancel */ ptd->cancelstate = PTHREAD_CANCEL_DISABLE; /* set return value */ ptd->return_value = value; rt_exit_critical(); /* invoke pushed cleanup */ while (ptd->cleanup != RT_NULL) { cleanup = ptd->cleanup; ptd->cleanup = cleanup->next; cleanup->cleanup_func(cleanup->parameter); /* release this cleanup function */ rt_free(cleanup); } /* destruct thread local key */ if (ptd->tls != RT_NULL) { void* data; rt_uint32_t index; for (index = 0; index < PTHREAD_KEY_MAX; index ++) { if (_thread_keys[index].is_used) { data = ptd->tls[index]; if (data) _thread_keys[index].destructor(data); } } /* release tls area */ rt_free(ptd->tls); ptd->tls = RT_NULL; } if (ptd->attr.detachstate == PTHREAD_CREATE_JOINABLE) { /* release the joinable pthread */ rt_sem_release(ptd->joinable_sem); } /* detach thread */ rt_thread_detach(ptd->tid); /* reschedule thread */ rt_schedule(); }
RTAI_SYSCALL_MODE int rt_insert_timer(struct rt_tasklet_struct *timer, int priority, RTIME firing_time, RTIME period, void (*handler)(unsigned long), unsigned long data, int pid) { spinlock_t *lock; unsigned long flags, cpuid; RT_TASK *timer_manager; // timer initialization timer->uses_fpu = 0; if (pid >= 0) { if (!handler) { return -EINVAL; } timer->handler = handler; timer->data = data; } else { if (timer->handler != NULL || timer->handler == (void *)1) { timer->handler = (void *)1; timer->data = data; } } timer->priority = priority; REALTIME2COUNT(firing_time) timer->firing_time = firing_time; timer->period = period; if (!pid) { timer->task = 0; timer->cpuid = cpuid = NUM_CPUS > 1 ? rtai_cpuid() : 0; } else { timer->cpuid = cpuid = NUM_CPUS > 1 ? (timer->task)->runnable_on_cpus : 0; (timer->task)->priority = priority; rt_copy_to_user(timer->usptasklet, timer, sizeof(struct rt_usp_tasklet_struct)); } // timer insertion in timers_list flags = rt_spin_lock_irqsave(lock = &timers_lock[LIST_CPUID]); enq_timer(timer); rt_spin_unlock_irqrestore(flags, lock); // timers_manager priority inheritance if (timer->priority < (timer_manager = &timers_manager[LIST_CPUID])->priority) { timer_manager->priority = timer->priority; } // timers_task deadline inheritance flags = rt_global_save_flags_and_cli(); if (timers_list[LIST_CPUID].next == timer && (timer_manager->state & RT_SCHED_DELAYED) && firing_time < timer_manager->resume_time) { timer_manager->resume_time = firing_time; rem_timed_task(timer_manager); enq_timed_task(timer_manager); rt_schedule(); } rt_global_restore_flags(flags); return 0; }
RTAI_SYSCALL_MODE int _rt_bits_wait(BITS *bits, int testfun, unsigned long testmasks, int exitfun, unsigned long exitmasks, unsigned long *resulting_mask, int space) { RT_TASK *rt_current; unsigned long flags, mask = 0; int retval; CHECK_BITS_MAGIC(bits); flags = rt_global_save_flags_and_cli(); if (!test_fun[testfun](bits, testmasks)) { void *retpnt; long bits_test[2]; rt_current = RT_CURRENT; TEST_BUF(rt_current, bits_test); TEST_FUN(rt_current) = testfun; TEST_MASK(rt_current) = testmasks; rt_current->state |= RT_SCHED_SEMAPHORE; rem_ready_current(rt_current); enqueue_blocked(rt_current, &bits->queue, 1); rt_schedule(); if (unlikely((retpnt = rt_current->blocked_on) != NULL)) { if (likely(retpnt != RTP_OBJREM)) { dequeue_blocked(rt_current); retval = RTE_UNBLKD; } else { rt_current->prio_passed_to = NULL; retval = RTE_OBJREM; } goto retmask; } } retval = 0; mask = bits->mask; exec_fun[exitfun](bits, exitmasks); retmask: rt_global_restore_flags(flags); if (resulting_mask) { if (space) { *resulting_mask = mask; } else { rt_copy_to_user(resulting_mask, &mask, sizeof(mask)); } } return retval; }
/* 线程1入口 */ static void thread1_entry(void* parameter) { /* 低优先级线程1开始运行 */ rt_kprintf("thread1 startup%d\n"); /* 挂起自身 */ rt_kprintf("suspend thread self\n"); rt_thread_suspend(tid1); /* 主动执行线程调度 */ rt_schedule(); /* 当线程1被唤醒时 */ rt_kprintf("thread1 resumed\n"); }
RTAI_SYSCALL_MODE void rt_set_timer_firing_time(struct rt_tasklet_struct *timer, RTIME firing_time) { unsigned long flags; RT_TASK *timer_manager; set_timer_firing_time(timer, firing_time); flags = rt_global_save_flags_and_cli(); if (timers_list[TIMER_CPUID].next == timer && ((timer_manager = &timers_manager[TIMER_CPUID])->state & RT_SCHED_DELAYED) && firing_time < timer_manager->resume_time) { timer_manager->resume_time = firing_time; rem_timed_task(timer_manager); enq_timed_task(timer_manager); rt_schedule(); } rt_global_restore_flags(flags); }
/** * This function will release a memory block * * @param block the address of memory block to be released * */ void rt_mp_free (void *block) { rt_uint8_t **block_ptr; struct rt_mempool *mp; struct rt_thread *thread; register rt_base_t level; /* get the control block of pool which the block belongs to */ block_ptr = (rt_uint8_t**)((rt_uint8_t*)block - sizeof(rt_uint8_t*)); mp = (struct rt_mempool*) *block_ptr; #ifdef RT_USING_HOOK if (rt_mp_free_hook != RT_NULL) rt_mp_free_hook(mp, block); #endif /* disable interrupt */ level = rt_hw_interrupt_disable(); /* increase the free block count */ mp->block_free_count ++; /* link the block into the block list */ *block_ptr = mp->block_list; mp->block_list = (rt_uint8_t*)block_ptr; if (mp->suspend_thread_count > 0) { /* get the suspended thread */ thread = rt_list_entry(mp->suspend_thread.next, struct rt_thread, tlist); /* set error */ thread->error = RT_EOK; /* resume thread */ rt_thread_resume(thread); /* decrease suspended thread count */ mp->suspend_thread_count --; /* enable interrupt */ rt_hw_interrupt_enable(level); /* do a schedule */ rt_schedule(); return; }
static inline int tbx_smx_wait(TBX* tbx, SEM *smx, RT_TASK *rt_current) { unsigned long flags; flags = rt_global_save_flags_and_cli(); if (!(smx->count)) { tbx->waiting_nr++; rt_current->state |= SEMAPHORE; rt_rem_ready_current(rt_current); enqueue_blocked(rt_current, &smx->queue, smx->qtype); rt_schedule(); } else { smx->count = 0; } rt_global_restore_flags(flags); return (int)(rt_current->blocked_on); }
static inline int tbx_wait_room(TBX *tbx, int *fravbs, int msgsize, RT_TASK *rt_current) { unsigned long flags; flags = rt_global_save_flags_and_cli(); if ((*fravbs) < msgsize) { tbx->waiting_nr++; rt_current->suspdepth = 1; rt_current->state |= SUSPENDED; rt_rem_ready_current(rt_current); rt_current->blocked_on = SOMETHING; tbx->waiting_task = rt_current; rt_schedule(); } rt_global_restore_flags(flags); return (int)(rt_current->blocked_on); }
static void _rt_pipe_resume_writer(struct rt_audio_pipe *pipe) { if (!rt_list_isempty(&pipe->suspended_write_list)) { rt_thread_t thread; RT_ASSERT(pipe->flag & RT_PIPE_FLAG_BLOCK_WR); /* get suspended thread */ thread = rt_list_entry(pipe->suspended_write_list.next, struct rt_thread, tlist); /* resume the write thread */ rt_thread_resume(thread); rt_schedule(); }
static inline void tbx_signal(TBX *tbx) { unsigned long flags; RT_TASK *task; flags = rt_global_save_flags_and_cli(); if ((task = tbx->waiting_task)) { tbx->waiting_nr--; rt_rem_timed_task(task); task->blocked_on = NOTHING; tbx->waiting_task = NOTHING; if ((task->state &= ~(SUSPENDED | DELAYED)) == READY) { rt_enq_ready_task(task); rt_schedule(); } } rt_global_restore_flags(flags); }
static inline void tbx_smx_signal(TBX* tbx, SEM *smx) { unsigned long flags; RT_TASK *task; flags = rt_global_save_flags_and_cli(); if ((task = (smx->queue.next)->task)) { tbx->waiting_nr--; dequeue_blocked(task); rt_rem_timed_task(task); if ((task->state &= ~(SEMAPHORE | DELAYED)) == READY) { rt_enq_ready_task(task); rt_schedule(); } } else { smx->count = 1; } rt_global_restore_flags(flags); }
RTAI_SYSCALL_MODE int rt_wait_signal(RT_TASK *sigtask, RT_TASK *task) { unsigned long flags; if (sigtask->rt_signals != NULL) { flags = rt_global_save_flags_and_cli(); if (!sigtask->suspdepth++) { sigtask->state |= RT_SCHED_SIGSUSP; rem_ready_current(sigtask); if (task->pstate > 0 && !(--task->pstate) && (task->state &= ~RT_SCHED_SIGSUSP) == RT_SCHED_READY) { enq_ready_task(task); } rt_schedule(); } rt_global_restore_flags(flags); return sigtask->retval; } return 0; }
rt_err_t rt_prio_queue_push(struct rt_prio_queue *que, rt_uint8_t prio, void *data, rt_int32_t timeout) { rt_ubase_t level; struct rt_prio_queue_item *item; RT_ASSERT(que); if (prio >= RT_PRIO_QUEUE_PRIO_MAX) return -RT_ERROR; item = rt_mp_alloc(&que->pool, timeout); if (item == RT_NULL) return -RT_ENOMEM; rt_memcpy(item+1, data, que->item_sz); item->next = RT_NULL; level = rt_hw_interrupt_disable(); _do_push(que, prio, item); if (!rt_list_isempty(&(que->suspended_pop_list))) { rt_thread_t thread; /* get thread entry */ thread = rt_list_entry(que->suspended_pop_list.next, struct rt_thread, tlist); /* resume it */ rt_thread_resume(thread); rt_hw_interrupt_enable(level); /* perform a schedule */ rt_schedule(); return RT_EOK; }
static int mbx_wait(MBX *mbx, int *fravbs, RT_TASK *rt_current) { unsigned long flags; flags = rt_global_save_flags_and_cli(); if (!(*fravbs)) { unsigned long retval; rt_current->state |= RT_SCHED_MBXSUSP; rem_ready_current(rt_current); rt_current->blocked_on = (void *)mbx; mbx->waiting_task = rt_current; rt_schedule(); if (unlikely(retval = (unsigned long)rt_current->blocked_on)) { mbx->waiting_task = NULL; rt_global_restore_flags(flags); return retval; } } rt_global_restore_flags(flags); return 0; }
/** * RM Scheduling clock ISR */ void rt_rms_wakeup(void *parameter) { register struct rt_rms *rms; rt_int8_t count = 0; rt_tick_t tick = 0; rt_ubase_t priority = (rt_ubase_t)parameter; tick = rt_tick_get(); /* awake idle tasks */ rms = rt_list_entry(rt_rms_idle_table[priority].next, struct rt_rms, rlist); if(rms != RT_NULL && rms->deadline <= tick && rms->thread->stat == RT_RMS_IDLE) { rms->deadline += rms->period; rms->thread->stat = RT_RMS_READY; rt_list_remove(&(rt_rms_idle_table[priority])); rt_schedule_insert_rms(rms); count++; } rms = rt_list_entry(rt_rms_zombie_table[priority].next, struct rt_rms, rlist); /* remove all zombie tasks for which their deadline is expired */ if(rms != RT_NULL && rms->deadline <= tick && rms->thread->stat == RT_RMS_ZOMBIE) { utilization -= rms->utilization; rms->thread->stat = RT_RMS_CLOSE; rt_list_remove(&(rt_rms_zombie_table[priority])); rt_rms_delete(rms); count ++; } /* if at least a task has been awakened, call the scheduler */ if(count > 0) { rt_schedule(); } }
/** * This function will unlock the thread scheduler. */ void rt_exit_critical(void) { register rt_base_t level; /* disable interrupt */ level = rt_hw_interrupt_disable(); rt_scheduler_lock_nest --; if (rt_scheduler_lock_nest <= 0) { rt_scheduler_lock_nest = 0; /* enable interrupt */ rt_hw_interrupt_enable(level); rt_schedule(); } else { /* enable interrupt */ rt_hw_interrupt_enable(level); } }