/*! * Deactivate thread because: * 1. higher priority thread becomes active * 2. this thread time slice is expired * 3. this thread blocks on some queue */ static int rr_thread_deactivate ( kthread_t *kthread ) { /* Get current time and recalculate remainder */ time_t t; kthread_sched_data_t *tsched = kthread_get_sched_param ( kthread ); ksched_t *gsched = ksched_get ( tsched->sched_policy ); if (tsched->params.rr.remainder.sec + tsched->params.rr.remainder.nsec) { /* * "slice interrupted" * recalculate remainder */ k_get_time ( &t ); time_sub ( &tsched->params.rr.slice_end, &t ); tsched->params.rr.remainder = tsched->params.rr.slice_end; if ( kthread_is_ready ( kthread ) ) { /* is remainder too small or not? */ if ( time_cmp ( &tsched->params.rr.remainder, &gsched->params.rr.threshold ) <= 0 ) { kthread_move_to_ready ( kthread, LAST ); } else { kthread_move_to_ready ( kthread, FIRST ); } } } /* else = remainder is zero, thread is already enqueued in ready queue*/ return 0; }
/*! * Close a message queue * \param mqdes Queue descriptor address (user level descriptor) * \return 0 if successful, -1 otherwise and appropriate error number is set */ int sys__mq_close ( void *p ) { mqd_t *mqdes; kmq_queue_t *kq_queue; kobject_t *kobj; kmq_msg_t *kmq_msg; kthread_t *kthread; mqdes = *( (mqd_t **) p ); ASSERT_ERRNO_AND_EXIT ( mqdes, EBADF ); kobj = mqdes->ptr; ASSERT_ERRNO_AND_EXIT ( kobj, EBADF ); ASSERT_ERRNO_AND_EXIT ( list_find ( &kobjects, &kobj->list ), EBADF ); kq_queue = kobj->kobject; kq_queue = list_find_and_remove ( &kmq_queue, &kq_queue->list ); if ( !kq_queue || kq_queue->id != mqdes->id ) EXIT2 ( EBADF, EXIT_FAILURE ); kq_queue->ref_cnt--; if ( !kq_queue->ref_cnt ) { /* remove messages */ while( (kmq_msg = list_remove(&kq_queue->msg_list,FIRST,NULL)) ) kfree (kmq_msg); /* remove blocked threads */ while ( (kthread = kthreadq_remove (&kq_queue->send_q, NULL)) ) { kthread_move_to_ready ( kthread, LAST ); kthread_set_errno ( kthread, EBADF ); kthread_set_syscall_retval ( kthread, EXIT_FAILURE ); } while ( (kthread = kthreadq_remove (&kq_queue->recv_q, NULL)) ) { kthread_move_to_ready ( kthread, LAST ); kthread_set_errno ( kthread, EBADF ); kthread_set_syscall_retval ( kthread, EXIT_FAILURE ); } list_remove ( &kmq_queue, 0, &kq_queue->list ); k_free_id ( kq_queue->id ); kfree ( kq_queue->name ); kfree ( kq_queue ); } /* remove kernel object descriptor */ kfree_kobject ( kobj ); EXIT2 ( EXIT_SUCCESS, EXIT_SUCCESS ); }
/*! * Simple Round-Robin scheduler: * - on timer tick move active into ready queue and pick next ready task */ static void ksched_rr_tick ( sigval_t sigval ) { if ( sys__feature ( FEATURE_SCHED_RR, FEATURE_GET, 0 ) == 0 ) return; kthread_t *active_thread = kthread_get_active(); if ( kthread_is_active ( active_thread ) ) { kthread_move_to_ready ( active_thread, LAST ); kthreads_schedule (); } }
int kthread_set_prio ( kthread_t *kthread, int prio ) { kthread_t *kthr = kthread; int old_prio; if ( !kthr ) kthr = active_thread; old_prio = kthr->sched_priority; /* change thread priority: (i) if its active: change priority and move to ready (ii) if its ready: remove from queue, change priority, put back (iii) if its blocked: if queue is sorted by priority, same as (ii) */ switch ( kthr->state.state ) { case THR_STATE_ACTIVE: kthr->sched_priority = prio; kthread_move_to_ready ( kthr, LAST ); kthreads_schedule (); break; case THR_STATE_READY: kthread_remove_from_ready (kthr); kthr->sched_priority = prio; kthread_move_to_ready ( kthr, LAST ); kthreads_schedule (); break; case THR_STATE_WAIT: /* as now there is only FIFO queue */ kthr->sched_priority = prio; break; case THR_STATE_PASSIVE: /* report error or just change priority? */ kthr->sched_priority = prio; break; } return old_prio; }
static int cond_release ( void *p, int release_all ) { pthread_cond_t *cond; kpthread_cond_t *kcond; kpthread_mutex_t *kmutex; kobject_t *kobj_cond, *kobj_mutex; kthread_t *kthread; int retval = 0; cond = *( (pthread_cond_t **) p ); ASSERT_ERRNO_AND_EXIT ( cond, EINVAL ); kobj_cond = cond->ptr; ASSERT_ERRNO_AND_EXIT ( kobj_cond, EINVAL ); ASSERT_ERRNO_AND_EXIT ( list_find ( &kobjects, &kobj_cond->list ), EINVAL ); kcond = kobj_cond->kobject; ASSERT_ERRNO_AND_EXIT ( kcond && kcond->id == cond->id, EINVAL ); kthread_set_errno ( kthread_get_active (), EXIT_SUCCESS ); if ( (kthread = kthreadq_remove ( &kcond->queue, NULL )) ) { kobj_mutex = kthread_get_private_param ( kthread ); kmutex = kobj_mutex->kobject; retval = mutex_lock ( kmutex, kthread ); if ( retval == 0 ) kthread_move_to_ready ( kthread, LAST ); /* process other threads in queue */ while ( release_all && (kthread = kthreadq_remove ( &kcond->queue, NULL )) ) { kthread_set_errno ( kthread, EXIT_SUCCESS ); kobj_mutex = kthread_get_private_param ( kthread ); kmutex = kobj_mutex->kobject; kthread_enqueue(kthread, &kmutex->queue, 0, NULL, NULL); } } if ( retval > -1 ) kthreads_schedule (); return EXIT_SUCCESS; }
/*! * Create new thread * \param start_routine Starting function for new thread * \param arg Parameter sent to starting function * \param sched_policy Thread scheduling policy * \param sched_priority Thread priority * \param stackaddr Address of thread stack (if not NULL) * \param stacksize Stack size * \param proc Process descriptor thread belongs to * \return Pointer to descriptor of created kernel thread */ kthread_t *kthread_create ( void *start_routine, void *arg, uint flags, int sched_policy, int sched_priority, sched_supp_t *sched_param, void *stackaddr, size_t stacksize, kprocess_t *proc ) { ASSERT ( proc ); kthread_t *kthread; /* thread descriptor */ kthread = kmalloc ( sizeof (kthread_t) ); ASSERT ( kthread ); /* initialize thread descriptor */ kthread->id = k_new_id (); kthread->proc = proc; kthread->proc->thread_count++; kthread->queue = NULL; kthreadq_init ( &kthread->join_queue ); kthread_create_new_state ( kthread, start_routine, arg, stackaddr, stacksize, FALSE ); kthread->state.flags = flags; list_init ( &kthread->states ); /* connect signal mask in descriptor with state */ kthread->sig_handling.mask = &kthread->state.sigmask; ksignal_thread_init ( kthread ); list_append ( &all_threads, kthread, &kthread->all ); kthread->sched_policy = sched_policy; if ( sched_priority < 0 ) sched_priority = 0; if ( sched_priority >= PRIO_LEVELS ) sched_priority = PRIO_LEVELS - 1; kthread->sched_priority = sched_priority; kthread->ref_cnt = 1; kthread_move_to_ready ( kthread, LAST ); ksched2_thread_add (kthread, sched_policy, sched_priority, sched_param); return kthread; }
/*! * Release single thread from given queue (if queue not empty) * \param q Queue * \return 1 if thread was released, 0 if queue was empty */ int kthreadq_release ( kthread_q *q ) { kthread_t *kthread; ASSERT ( q ); kthread = kthreadq_remove ( q, NULL ); if ( kthread ) { kthread_move_to_ready ( kthread, LAST ); return 1; } else { return 0; } }
/*! * Select ready thread with highest priority as active * - if different from current, move current into ready queue (id not NULL) and * move selected thread from ready queue to active queue */ void kthreads_schedule () { kthread_t *curr, *next = NULL; curr = kthread_get_active(); next = get_first_ready (); /* must exist an thread to return to, 'curr' or first from 'ready' */ ASSERT ( ( curr && kthread_is_active ( curr ) ) || next ); if ( !sys__feature ( FEATURE_SCHEDULER, FEATURE_GET, 0 ) && curr && kthread_is_active ( curr ) ) return;/*scheduler disabled, don't switch from current thread */ if ( !curr || !kthread_is_active ( curr ) || kthread_get_prio ( curr ) < kthread_get_prio ( next ) ) { if ( curr && !kthread_is_passive (curr) ) /* deactivate curr */ { /* move last active to ready queue, if still ready */ if ( kthread_is_active ( curr ) ) kthread_move_to_ready ( curr, LAST ); /* deactivation might change ready thread list */ next = get_first_ready (); ASSERT (next); } /* activate next */ next = kthread_remove_from_ready ( next ); ASSERT ( next ); kthread_set_active ( next ); } /* process pending signals (if any) */ ksignal_process_pending ( kthread_get_active() ); if ( curr != kthread_get_active() ) kthread_switch_to_thread ( curr, kthread_get_active() ); /* else => continue with current thread */ }
/*! Timer interrupt for Round Robin */ static void rr_timer ( void *p ) { kthread_t *kthread = p; kthread_sched_data_t *tsched; if ( kthread_get_active () != kthread ) { /* bug or rr thread got canceled! Let asume second :) */ return; } tsched = kthread_get_sched_param ( kthread ); /* given time is elapsed, set remainder to zero */ tsched->params.rr.remainder.sec = tsched->params.rr.remainder.nsec = 0; /* move thread to ready queue - as last in coresponding queue */ kthread_move_to_ready ( kthread, LAST ); kthreads_schedule (); }
/*! * Resume suspended thread (called on timer activation) * \param sigval Thread that should be released */ static void kclock_wake_thread ( sigval_t sigval ) { kthread_t *kthread; ktimer_t *ktimer; int retval = 0; kthread = sigval.sival_ptr; ASSERT ( kthread ); ASSERT ( kthread_check_kthread ( kthread ) ); /* is this valid thread */ ASSERT ( kthread_is_suspended ( kthread, NULL, NULL ) ); ktimer = kthread_get_private_param ( kthread ); timespec_t *remain = ktimer->param; if ( remain ) TIME_RESET ( remain ); /* timer expired */ kthread_move_to_ready ( kthread, LAST ); retval += ktimer_delete ( ktimer ); ASSERT ( retval == EXIT_SUCCESS ); kthreads_schedule (); }
static int kmq_receive ( void *p, kthread_t *receiver ) { mqd_t *mqdes; char *msg_ptr; size_t msg_len; uint *msg_prio; kmq_queue_t *kq_queue; kobject_t *kobj; kmq_msg_t *kmq_msg; kthread_t *kthread; int retval; mqdes = *( (mqd_t **) p ); p += sizeof (mqd_t *); msg_ptr = *( (char **) p ); p += sizeof (char *); msg_len = *( (size_t *) p ); p += sizeof (size_t); msg_prio = *( (uint **) p ); ASSERT_ERRNO_AND_EXIT ( mqdes && msg_ptr, -EINVAL ); kobj = mqdes->ptr; ASSERT_ERRNO_AND_EXIT ( kobj, -EBADF ); ASSERT_ERRNO_AND_EXIT ( list_find ( &kobjects, &kobj->list ), -EBADF ); kq_queue = kobj->kobject; ASSERT_ERRNO_AND_EXIT ( kq_queue->id == mqdes->id, -EBADF ); ASSERT_ERRNO_AND_EXIT ( list_find ( &kmq_queue, &kq_queue->list ), -EBADF ); if ( kq_queue->attr.mq_curmsgs == 0 ) { if ( (kobj->flags & O_NONBLOCK) ) return -EAGAIN; /* block thread */ kthread_enqueue ( receiver, &kq_queue->recv_q, 1, NULL, NULL ); kthreads_schedule (); return -EAGAIN; } if ( msg_len < kq_queue->attr.mq_msgsize ) return -EMSGSIZE; kmq_msg = list_remove ( &kq_queue->msg_list, FIRST, NULL ); memcpy ( msg_ptr, &kmq_msg->msg_data[0], kmq_msg->msg_size ); msg_len = kmq_msg->msg_size; if ( msg_prio ) *msg_prio = kmq_msg->msg_prio; kfree (kmq_msg); kq_queue->attr.mq_curmsgs--; /* is there a blocked sender? */ if ( (kthread = kthreadq_remove ( &kq_queue->send_q, NULL )) ) { /* "save" receiver thread */ kthread_move_to_ready ( receiver, FIRST ); kthread_set_errno ( receiver, EXIT_SUCCESS ); kthread_set_syscall_retval ( receiver, msg_len ); /* unblock sender */ kthread_set_active ( kthread ); /* temporary */ p = arch_syscall_get_params ( kthread_get_context (kthread) ); retval = kmq_send ( p, kthread ); if ( retval == EXIT_SUCCESS ) { kthread_set_errno ( kthread, EXIT_SUCCESS ); kthread_set_syscall_retval ( kthread, retval ); } else { kthread_set_errno ( kthread, retval ); kthread_set_syscall_retval ( kthread, EXIT_FAILURE ); } kthreads_schedule (); } return msg_len; }
static int edf_schedule ( ksched_t *ksched ) { kthread_t *first, *next, *edf_active; kthread_sched2_t *sch_first, *sch_next, *ea; edf_active = ksched->params.edf.active; if ( edf_active && !kthread_is_ready ( edf_active ) ) { ksched->params.edf.active = edf_active = NULL; } first = kthreadq_get ( &ksched->params.edf.ready ); EDF_LOG ( "%x %x [active, first in queue]", edf_active, first ); if ( !first ) { kthreads_schedule (); return 0; /* no threads in edf.ready queue, edf.active unch. */ } if ( edf_active ) { next = first; first = edf_active; } else { next = kthreadq_get_next ( first ); } while ( first && next ) { sch_first = kthread_get_sched2_param ( first ); sch_next = kthread_get_sched2_param ( next ); if ( time_cmp ( &sch_first->params.edf.active_deadline, &sch_next->params.edf.active_deadline ) > 0 ) { first = next; } next = kthreadq_get_next ( next ); } if ( first && first != edf_active ) { next = kthreadq_remove ( &ksched->params.edf.ready, first ); EDF_LOG ( "%x removed, %x is now first", next, kthreadq_get ( &ksched->params.edf.ready ) ); if ( edf_active ) { EDF_LOG ( "%x=>%x [EDF_SCHED_PREEMPT]", edf_active, first ); /* * change active EDF thread: * -remove it from active/ready list * -put it into edf.ready list */ if ( kthread_is_ready (edf_active) ) { if ( !kthread_is_active (edf_active) ) { kthread_remove_from_ready (edf_active); /* * set "deactivated" flag, don't need * another call to "edf_schedule" */ } else { ea = kthread_get_sched2_param (edf_active); ea->activated = 0; } kthread_enqueue ( edf_active, &ksched->params.edf.ready ); } /* else = thread is blocked - leave it there */ } ksched->params.edf.active = first; EDF_LOG ( "%x [new active]", first ); kthread_move_to_ready ( first, LAST ); } kthreads_schedule (); return 0; }
/*! * Cancel thread (or restore it to previous state) * \param kthread Thread descriptor */ int kthread_exit ( kthread_t *kthread, void *exit_status, int force ) { kthread_state_t *prev; int restored = FALSE; kthread_t *released; kthread_q *q; void **p; ASSERT ( kthread ); do { prev = list_get ( &kthread->states, FIRST ); if ( prev ) restored = kthread_restore_state ( kthread ); } while ( prev && force ); /* if cancel is called, destroy all states */ if ( restored && !force ) /* restored to previous state */ { if ( kthread == active_thread ) kthread->state.state = THR_STATE_ACTIVE; kthreads_schedule (); return EXIT_SUCCESS; } if ( kthread->state.state == THR_STATE_PASSIVE ) return EXIT_SUCCESS; /* thread is already finished */ if ( kthread->state.state == THR_STATE_READY ) { /* remove target 'thread' from its queue */ if ( !kthread_remove_from_ready ( kthread ) ) ASSERT ( FALSE ); } else if ( kthread->state.state == THR_STATE_WAIT ) { /* remove target 'thread' from its queue */ if ( !kthreadq_remove ( kthread->queue, kthread ) ) ASSERT ( FALSE ); } else if ( kthread->state.state == THR_STATE_SUSPENDED ) { /* cancellation routine */ if ( kthread->state.cancel_suspend_handler ) kthread->state.cancel_suspend_handler ( kthread, kthread->state.cancel_suspend_param ); } else if ( kthread->state.state == THR_STATE_ACTIVE ) { if ( kthread != active_thread ) return ESRCH; /* thread descriptor corrupted ! */ } else { return ESRCH; /* thread descriptor corrupted ! */ } kthread->state.state = THR_STATE_PASSIVE; kthread->ref_cnt--; kthread->state.exit_status = exit_status; kthread->proc->thread_count--; arch_destroy_thread_context ( &kthread->state.context ); kthread_restore_state ( kthread ); q = &kthread->join_queue; while ( (released = kthreadq_remove ( q, NULL )) != NULL ) { /* save exit status to all waiting threads */ p = kthread_get_private_param ( released ); if ( p ) *p = exit_status; kthread_move_to_ready ( released, LAST ); kthread->ref_cnt--; } if ( !kthread->ref_cnt ) kthread_remove_descriptor ( kthread ); if ( kthread == active_thread ) { active_thread = NULL; kthreads_schedule (); } return EXIT_SUCCESS; }
/*! Send signal to target thread */ int ksignal_queue ( kthread_t *kthread, siginfo_t *sig ) { int enqueue = FALSE; int retval = EXIT_SUCCESS; int schedule = FALSE; ksignal_handling_t *sh; sigaction_t *act; void (*func) (kthread_t *, void *), *param; kprocess_t *proc; siginfo_t *us; param_t param1, param2, param3; ASSERT ( kthread ); ASSERT ( kthread_check_kthread ( kthread ) ); ASSERT ( sig->si_signo > 0 && sig->si_signo <= SIGMAX ); if ( !kthread_is_alive ( kthread ) ) return ESRCH; sh = kthread_get_sigparams ( kthread ); /* is thread suspended and waits for this signal? */ if ( kthread_is_suspended ( kthread, (void **) &func, ¶m ) && ((void *) func) == ((void *) ksignal_received_signal) ) { /* thread is waiting for signal */ if ( !ksignal_received_signal ( kthread, sig ) ) { /* waited for this signal */ /* do not process this signal further */ return EXIT_SUCCESS; } } /* * If thread is in interruptable state and signal is not masked: * - process signal * - otherwise queue it */ if ( kthread_get_interruptable ( kthread ) && !sigtestset ( sh->mask, sig->si_signo ) ) { act = &sh->act[sig->si_signo]; if ( act->sa_flags != SA_SIGINFO ) return ENOTSUP; /* not supported without SA_SIGINFO! */ if ( act->sa_sigaction == SIG_ERR || act->sa_sigaction == SIG_DFL || act->sa_sigaction == SIG_IGN || act->sa_sigaction == SIG_HOLD ) { return ENOTSUP; /* not yet supported */ } if ( !kthread_is_ready ( kthread ) ) { /* * thread is suspended/blocked on something else * * 1. handle interruption: * a) we break suspend/wait state * (call cancel function) * b) set errno = EINTR * c) set return value = FAILURE * 2. create new thread state * a) process signal in this state */ void (*func) (kthread_t *, void *), *param; kthread_is_suspended (kthread, (void **) &func, ¶m); if ( func ) func ( kthread, param ); kthread_move_to_ready ( kthread, LAST ); kthread_set_errno ( kthread, EINTR ); kthread_set_syscall_retval ( kthread, EXIT_FAILURE ); /* thread is unsuspended, but signal * handler will be added first */ schedule = TRUE; } /* copy sig to user space */ proc = kthread_get_process ( kthread ); us = kprocess_stack_alloc ( proc ); ASSERT (us); /*if ( !us ) return ENOMEM;*/ *us = *sig; kthread_create_new_state ( kthread, act->sa_sigaction, K2U_GET_ADR ( us, proc ), NULL, HANDLER_STACK_SIZE, TRUE ); param1.p_ptr = proc; param2.p_ptr = us; param3.p_ptr = NULL; kthread_add_cleanup ( kthread, kprocess_stack_free, param1, param2, param3 ); /* mask signal in thread mask */ sigaddset ( sh->mask, sig->si_signo ); /* mask additional signals in thread mask */ sigaddsets ( sh->mask, &act->sa_mask ); } else { enqueue = TRUE; } if ( enqueue ) { ksignal_add_to_pending ( sh, sig ); retval = EAGAIN; } if ( schedule ) kthreads_schedule (); return retval; }
/*! Callback function called when a signal is delivered to suspended thread */ static int ksignal_received_signal ( kthread_t *kthread, void *param ) { siginfo_t *sig; context_t *context; uint sysid; void *p; sigset_t *set; siginfo_t *info; int retval = EXIT_SUCCESS; ASSERT ( kthread ); /* thread waked by signal or other event? */ if ( param == NULL ) { kthread_set_errno ( kthread, EINTR ); kthread_set_syscall_retval ( kthread, EXIT_FAILURE ); return EXIT_FAILURE; /* other event interrupted thread */ } /* signal interrupted, but did thread waited for this signal? */ sig = param; /* get syscall which caused thread to be suspend */ context = kthread_get_context ( kthread ); sysid = arch_syscall_get_id ( context ); switch ( sysid ) { case SIGWAITINFO: /* sigwaitinfo */ p = arch_syscall_get_params ( context ); set = *( (sigset_t **) p ); p += sizeof (sigset_t *); info = *( (siginfo_t **) p ); p += sizeof (siginfo_t *); ASSERT ( set ); set = U2K_GET_ADR ( set, kthread_get_process (NULL) ); ASSERT ( set ); if ( info ) info = U2K_GET_ADR ( info, kthread_get_process (NULL) ); retval = EXIT_FAILURE; if ( sigtestset ( set, sig->si_signo ) ) { retval = sig->si_signo; kthread_set_syscall_retval ( kthread, retval ); if ( info ) *info = *sig; kthread_set_errno ( kthread, EXIT_SUCCESS ); /* resume with thread */ kthread_move_to_ready ( kthread, LAST ); kthreads_schedule (); return EXIT_SUCCESS; } else { /* not waiting for this signal */ return EXIT_FAILURE; } default: return EXIT_FAILURE; } }
static void edf_deadline_timer ( void *p ) { alarm_t alarm; kthread_t *kthread = p, *test; kthread_sched_data_t *tsched = kthread_get_sched_param ( kthread ); ASSERT ( kthread ); test = kthreadq_remove ( &ksched_edf.params.edf.wait, kthread ); LOG( DEBUG, "%x %x [Deadline alarm]", kthread, test ); if( test == kthread ) { if ( edf_check_deadline ( kthread ) ) { LOG( DEBUG, "%x [Waked, but too late]", kthread ); kthread_set_syscall_retval ( kthread, -1 ); kthread_move_to_ready ( kthread, LAST ); if ( tsched->params.edf.flags & EDF_TERMINATE ) { LOG( DEBUG, "%x [EDF_TERMINATE]", kthread ); tsched = kthread_get_sched_param ( kthread ); k_alarm_remove ( tsched->params.edf.edf_period_alarm ); k_alarm_remove ( tsched->params.edf.edf_deadline_alarm ); tsched->params.edf.edf_period_alarm = NULL; kthread_cancel ( kthread, -E_DEADLINE ); } kthreads_schedule (); } } else { /* * thread is not in edf.wait queue, but might be running or its * blocked - it is probable (sure?) it missed deadline */ LOG( DEBUG, "%x [Not in edf.wait. Missed deadline?]", kthread ); if ( edf_check_deadline ( kthread ) ) { /* what to do if its missed? kill thread? */ tsched = kthread_get_sched_param ( kthread ); if ( tsched->params.edf.flags & EDF_TERMINATE ) { LOG( DEBUG, "%x [EDF_TERMINATE]", kthread ); k_alarm_remove ( tsched->params.edf.edf_period_alarm ); k_alarm_remove ( tsched->params.edf.edf_deadline_alarm ); tsched->params.edf.edf_period_alarm = NULL; kthread_cancel ( kthread, -E_DEADLINE ); } else if ( tsched->params.edf.flags & EDF_CONTINUE ) { /* continue as deadline is not missed */ LOG( DEBUG, "%x [EDF_CONTINUE]", kthread ); } else if ( tsched->params.edf.flags & EDF_SKIP ) { /* skip deadline */ /* set times for next period */ LOG( DEBUG, "%x [EDF_SKIP]", kthread ); time_add ( &tsched->params.edf.next_run, &tsched->params.edf.period ); tsched->params.edf.active_deadline = tsched->params.edf.next_run; time_add ( &tsched->params.edf.active_deadline, &tsched->params.edf.relative_deadline ); if ( kthread == ksched_edf.params.edf.active ) ksched_edf.params.edf.active = NULL; alarm.action = edf_deadline_timer; alarm.param = kthread; alarm.flags = 0; alarm.period.sec = alarm.period.nsec = 0; alarm.exp_time = tsched->params.edf.active_deadline; k_alarm_set ( tsched->params.edf.edf_deadline_alarm, &alarm ); alarm.action = edf_period_timer; alarm.param = kthread; alarm.flags = ALARM_PERIODIC; alarm.period = tsched->params.edf.period; alarm.exp_time = tsched->params.edf.next_run; k_alarm_set ( tsched->params.edf.edf_period_alarm, &alarm ); kthread_enqueue ( kthread, &ksched_edf.params.edf.ready ); kthreads_schedule (); /* will call edf_schedule() */ } } } }
static void edf_deadline_alarm ( sigval_t sigev_value ) { kthread_t *kthread = sigev_value.sival_ptr, *test; kthread_sched2_t *tsched; ksched_t *ksched; itimerspec_t alarm; ASSERT ( kthread ); ksched = ksched2_get ( kthread_get_sched_policy (kthread) ); tsched = kthread_get_sched2_param ( kthread ); test = kthreadq_remove ( &ksched->params.edf.wait, kthread ); EDF_LOG ( "%x %x [Deadline alarm]", kthread, test ); if( test == kthread ) { EDF_LOG ( "%x [Waked, but too late]", kthread ); kthread_set_syscall_retval ( kthread, EXIT_FAILURE ); kthread_move_to_ready ( kthread, LAST ); if ( tsched->params.edf.flags & EDF_TERMINATE ) { EDF_LOG ( "%x [EDF_TERMINATE]", kthread ); ktimer_delete ( tsched->params.edf.period_alarm ); tsched->params.edf.period_alarm = NULL; ktimer_delete ( tsched->params.edf.deadline_alarm ); tsched->params.edf.deadline_alarm = NULL; kthread_set_errno ( kthread, ETIMEDOUT ); kthread_exit ( kthread, NULL, TRUE ); } else { edf_schedule (ksched); } } else { /* * thread is not in edf.wait queue, but might be running or its * blocked - it is probable (almost certain) that it missed deadline */ EDF_LOG ( "%x [Not in edf.wait. Missed deadline?]", kthread ); if ( edf_check_deadline ( kthread ) ) { /* what to do if its missed? kill thread? */ if ( tsched->params.edf.flags & EDF_TERMINATE ) { EDF_LOG ( "%x [EDF_TERMINATE]", kthread ); ktimer_delete (tsched->params.edf.period_alarm); tsched->params.edf.period_alarm = NULL; ktimer_delete ( tsched->params.edf.deadline_alarm ); tsched->params.edf.deadline_alarm = NULL; kthread_set_errno ( kthread, ETIMEDOUT ); kthread_exit ( kthread, NULL, TRUE ); } else if ( tsched->params.edf.flags & EDF_CONTINUE ) { /* continue as deadline is not missed */ EDF_LOG ( "%x [EDF_CONTINUE]", kthread ); } else if ( tsched->params.edf.flags & EDF_SKIP ) { /* skip deadline */ /* set times for next period */ EDF_LOG ( "%x [EDF_SKIP]", kthread ); time_add ( &tsched->params.edf.next_run, &tsched->params.edf.period ); tsched->params.edf.active_deadline = tsched->params.edf.next_run; time_add ( &tsched->params.edf.active_deadline, &tsched->params.edf.relative_deadline ); if ( kthread == ksched->params.edf.active ) ksched->params.edf.active = NULL; TIME_RESET ( &alarm.it_interval ); alarm.it_value = tsched->params.edf.active_deadline; ktimer_settime ( tsched->params.edf.deadline_alarm, TIMER_ABSTIME, &alarm, NULL ); alarm.it_interval = tsched->params.edf.period; alarm.it_value = tsched->params.edf.next_run; ktimer_settime ( tsched->params.edf.period_alarm, TIMER_ABSTIME, &alarm, NULL ); kthread_enqueue (kthread, &ksched->params.edf.ready); edf_schedule (ksched); } } /* moved 1 tab left for readability */ } }
/*! * Cancel thread (or restore it to previous state) * \param kthread Thread descriptor */ int kthread_exit ( kthread_t *kthread, void *exit_status, int force ) { kthread_state_t *prev; int restored = FALSE; kthread_t *released; kthread_q *q; void **p; ASSERT ( kthread ); do { prev = list_get ( &kthread->states, FIRST ); if ( prev ) restored = kthread_restore_state ( kthread ); } while ( prev && force ); /* if cancel is called, destroy all states */ if ( restored && !force ) /* restored to previous state */ { if ( kthread == active_thread ) { kthread->state.state = THR_STATE_ACTIVE; arch_switch_to_thread ( NULL, &kthread->state.context ); } else { ASSERT (FALSE); /* hum, rethink this */ /* move to ready? */ } kthreads_schedule (); return EXIT_SUCCESS; } if ( kthread->state.state == THR_STATE_PASSIVE ) return EXIT_SUCCESS; /* thread is already finished */ if ( kthread->state.state == THR_STATE_READY ) { /* remove target 'thread' from its queue */ if ( !kthread_remove_from_ready ( kthread ) ) ASSERT ( FALSE ); } else if ( kthread->state.state == THR_STATE_WAIT ) { /* remove target 'thread' from its queue */ if ( !kthreadq_remove ( kthread->queue, kthread ) ) ASSERT ( FALSE ); } else if ( kthread->state.state == THR_STATE_SUSPENDED ) { /* cancellation routine */ if ( kthread->state.cancel_suspend_handler ) kthread->state.cancel_suspend_handler ( kthread, kthread->state.cancel_suspend_param ); } else if ( kthread->state.state == THR_STATE_ACTIVE ) { if ( kthread != active_thread ) return ESRCH; /* thread descriptor corrupted ! */ } else { return ESRCH; /* thread descriptor corrupted ! */ } kthread->state.state = THR_STATE_PASSIVE; kthread->state.exit_status = exit_status; /* remove it from its scheduler */ ksched2_thread_remove ( kthread ); /* any thread waiting on this? */ q = &kthread->join_queue; while ( (released = kthreadq_remove ( q, NULL )) != NULL ) { /* save exit status to all waiting threads */ p = kthread_get_private_param ( released ); if ( p ) *p = exit_status; kthread_move_to_ready ( released, LAST ); kthread->ref_cnt--; } /* defer removing thread resources until last moment */ kthread->state.flags |= THR_FLAG_DELETE; if ( kthread == active_thread ) { //active_thread = NULL; kthreads_schedule (); } return EXIT_SUCCESS; }
/*! Send signal to target thread */ int ksignal_queue ( kthread_t *kthread, siginfo_t *sig ) { int enqueue = FALSE; int retval = EXIT_SUCCESS; int schedule = FALSE; ksignal_handling_t *sh; sigaction_t *act; void (*func) (kthread_t *, void *), *param; kprocess_t *proc; siginfo_t *us; param_t param1, param2, param3; ksiginfo_t *ksig; ASSERT ( kthread ); ASSERT ( kthread_check_kthread ( kthread ) ); ASSERT ( sig->si_signo > 0 && sig->si_signo <= SIGMAX ); if ( !kthread_is_alive ( kthread ) ) return ESRCH; sh = kthread_get_sigparams ( kthread ); /* is thread suspended and waits for this signal? */ if ( kthread_is_suspended ( kthread, (void **) &func, ¶m ) ) { if ( ((void *) func) == ((void *) ksignal_received_signal) ) { /* thread is waiting for signal */ if ( !ksignal_received_signal ( kthread, sig ) ) { /* waited for this signal */ /* should continue with signal handler also? */ /* do not process this signal further */ return EXIT_SUCCESS; } } /* else { * thread is waiting for something else; * deal with this later (in next "if") * } */ } /* if signal is not masked in thread signal mask, deliver signal */ if ( !sigtestset ( sh->mask, sig->si_signo ) ) { act = &sh->act[sig->si_signo]; if ( act->sa_flags != SA_SIGINFO ) return ENOTSUP; /* not supported without SA_SIGINFO! */ if ( act->sa_sigaction == SIG_ERR || act->sa_sigaction == SIG_DFL || act->sa_sigaction == SIG_IGN || act->sa_sigaction == SIG_HOLD ) { return ENOTSUP; /* not yet supported */ } /* current implementation * - if thread is active or ready * -- save old context in list * -- create new context * - else * -- enqueue signal or cancel wait state (todo) * * on sys__exit check if its handler or thread!!! */ if ( !kthread_is_ready ( kthread ) ) { void (*func) (kthread_t *, void *), *param; if ( kthread_is_suspended ( kthread, (void **) &func, ¶m ) ) { /* * thread is suspended on something * else; interrupt it or not? * * -handle interruption (kernel part) * -interrupt it (resume) * -process signal * * to do above just move thread to * ready, set errno & retval */ if ( func ) func ( kthread, param ); kthread_move_to_ready ( kthread, LAST ); kthread_set_errno ( kthread, EINTR ); kthread_set_syscall_retval(kthread,EXIT_FAILURE); /* thread is unsuspended, but signal * handler will be added first */ schedule = TRUE; } else { /* what else? this is error */ ASSERT ( FALSE ); } } /* copy sig to user space */ proc = kthread_get_process ( kthread ); us = ffs_alloc(proc->stack_pool, sizeof (siginfo_t)); ASSERT (us); /*if ( !us ) return ENOMEM;*/ *us = *sig; kthread_create_new_state ( kthread, act->sa_sigaction, K2U_GET_ADR ( us, proc ), NULL, HANDLER_STACK_SIZE, TRUE ); param1.p_ptr = proc->stack_pool; param2.p_ptr = us; param3.p_ptr = NULL; kthread_add_cleanup ( kthread, kthread_param_free, param1, param2, param3 ); /* mask signal in thread mask */ sigaddset ( sh->mask, sig->si_signo ); /* mask additional signals in thread mask */ sigaddsets ( sh->mask, &act->sa_mask ); } else { enqueue = TRUE; } if ( enqueue ) { /* mask signal in thread mask */ sigaddset ( sh->mask, sig->si_signo ); /* add signal to list of pending signals */ ksig = kmalloc ( sizeof (ksiginfo_t) ); ksig->siginfo = *sig; list_append ( &sh->pending_signals, ksig, &ksig->list ); /* list_sort_add ( &sh->pending_signals, ksig, &ksig->list, ksignal_compare ); */ retval = EAGAIN; } if ( schedule ) kthreads_schedule (); return retval; }
static int kmq_send ( void *p, kthread_t *sender ) { mqd_t *mqdes; char *msg_ptr; size_t msg_len; uint msg_prio; kmq_queue_t *kq_queue; kobject_t *kobj; kmq_msg_t *kmq_msg; kthread_t *kthread; int retval; mqdes = *( (mqd_t **) p ); p += sizeof (mqd_t *); msg_ptr = *( (char **) p ); p += sizeof (char *); msg_len = *( (size_t *) p ); p += sizeof (size_t); msg_prio = *( (uint *) p ); ASSERT_ERRNO_AND_EXIT ( mqdes && msg_ptr, EINVAL ); ASSERT_ERRNO_AND_EXIT ( msg_prio <= MQ_PRIO_MAX, EINVAL ); kobj = mqdes->ptr; ASSERT_ERRNO_AND_EXIT ( kobj, EBADF ); ASSERT_ERRNO_AND_EXIT ( list_find ( &kobjects, &kobj->list ), EBADF ); kq_queue = kobj->kobject; ASSERT_ERRNO_AND_EXIT ( kq_queue->id == mqdes->id, EBADF ); ASSERT_ERRNO_AND_EXIT ( list_find ( &kmq_queue, &kq_queue->list ), EBADF ); if ( kq_queue->attr.mq_curmsgs >= kq_queue->attr.mq_maxmsg ) { if ( (kobj->flags & O_NONBLOCK) ) return EAGAIN; /* block thread */ kthread_enqueue ( sender, &kq_queue->send_q, 1, NULL, NULL ); kthreads_schedule (); return EAGAIN; } if ( msg_len > kq_queue->attr.mq_msgsize ) return EMSGSIZE; kmq_msg = kmalloc ( sizeof (kmq_msg_t) + msg_len ); ASSERT_ERRNO_AND_EXIT ( kmq_msg, ENOMEM ); /* create message */ kmq_msg->msg_size = msg_len; kmq_msg->msg_prio = msg_prio; memcpy ( &kmq_msg->msg_data[0], msg_ptr, msg_len ); list_sort_add ( &kq_queue->msg_list, kmq_msg, &kmq_msg->list, ( int (*)(void *, void *) ) cmp_mq_msg ); kq_queue->attr.mq_curmsgs++; /* is there a blocked receiver? */ if ( (kthread = kthreadq_remove ( &kq_queue->recv_q, NULL )) ) { /* "save" sender thread */ kthread_move_to_ready ( sender, FIRST ); kthread_set_errno ( sender, EXIT_SUCCESS ); kthread_set_syscall_retval ( sender, EXIT_SUCCESS ); /* unblock receiver */ kthread_set_active ( kthread ); /* temporary */ p = arch_syscall_get_params ( kthread_get_context (kthread) ); retval = kmq_receive ( p, kthread ); if ( retval >= 0 ) { kthread_set_errno ( kthread, EXIT_SUCCESS ); kthread_set_syscall_retval ( kthread, retval ); } else { kthread_set_errno ( kthread, -retval ); kthread_set_syscall_retval ( kthread, EXIT_FAILURE ); } kthreads_schedule (); } return EXIT_SUCCESS; }
static int k_edf_schedule () { kthread_t *first, *next, *edf_active; kthread_sched_data_t *sch_first, *sch_next; ksched_t *gsched = ksched_get ( SCHED_EDF ); int retval = 0; edf_active = gsched->params.edf.active; first = kthreadq_get ( &gsched->params.edf.ready ); LOG( DEBUG, "%x [active]", edf_active ); LOG( DEBUG, "%x [first]", first ); //LOG( DEBUG, "%x [next]", next ); if ( !first ) return 0; /* no threads in edf.ready queue, edf.active unch. */ if ( edf_active ) { next = first; first = edf_active; LOG( DEBUG, "%x [next]", kthreadq_get_next ( next ) ); } else { next = kthreadq_get_next ( first ); LOG( DEBUG, "%x [next]", next ); } while ( first && next ) { sch_first = kthread_get_sched_param ( first ); sch_next = kthread_get_sched_param ( next ); if ( time_cmp ( &sch_first->params.edf.active_deadline, &sch_next->params.edf.active_deadline ) > 0 ) { first = next; } next = kthreadq_get_next ( next ); } if ( first && first != edf_active ) { next = kthreadq_remove ( &gsched->params.edf.ready, first ); LOG ( DEBUG, "%x removed, %x is now first", next, kthreadq_get ( &gsched->params.edf.ready ) ); if ( edf_active ) { LOG( DEBUG, "%x=>%x [EDF_SCHED_PREEMPT]", edf_active, first ); /* * change active EDF thread: * -remove it from active/ready list * -put it into edf.ready list */ if ( kthread_is_ready (edf_active) ) { if ( !kthread_is_active (edf_active) ) { kthread_remove_from_ready (edf_active); /* * set "deactivated" flag, don't need * another call to "edf_schedule" */ } else { kthread_get_sched_param (edf_active) ->activated = 0; } kthread_enqueue ( edf_active, &gsched->params.edf.ready ); } /* else = thread is blocked - leave it there */ } gsched->params.edf.active = first; LOG( DEBUG, "%x [new active]", first ); kthread_move_to_ready ( first, LAST ); retval = 1; } return retval; }