/*! * Deactivate thread because: * 1. higher priority thread becomes active * 2. this thread time slice is expired * 3. this thread blocks on some queue */ static int rr_thread_deactivate ( kthread_t *kthread ) { /* Get current time and recalculate remainder */ time_t t; kthread_sched_data_t *tsched = kthread_get_sched_param ( kthread ); ksched_t *gsched = ksched_get ( tsched->sched_policy ); if (tsched->params.rr.remainder.sec + tsched->params.rr.remainder.nsec) { /* * "slice interrupted" * recalculate remainder */ k_get_time ( &t ); time_sub ( &tsched->params.rr.slice_end, &t ); tsched->params.rr.remainder = tsched->params.rr.slice_end; if ( kthread_is_ready ( kthread ) ) { /* is remainder too small or not? */ if ( time_cmp ( &tsched->params.rr.remainder, &gsched->params.rr.threshold ) <= 0 ) { kthread_move_to_ready ( kthread, LAST ); } else { kthread_move_to_ready ( kthread, FIRST ); } } } /* else = remainder is zero, thread is already enqueued in ready queue*/ return 0; }
int k_delayed_send(int destination_proc_id, void *message_envelope, int delay) { msg_metadata *metadata = reserve_message_metadata(message_envelope); if (metadata == NULL) { return RTX_ERR; } if (message_envelope == NULL) { uart1_put_string("k_send_message is NULL. Bad!\n"); return RTX_ERR; } __disable_irq(); metadata->sender_pid = gp_current_process->pid; metadata->destination_pid = destination_proc_id; metadata->send_time = k_get_time() + delay; if (g_delayed_messages_count == (BLOCK_SIZE / sizeof(void *))) { // Overflow is... unlikely. ASSERT(0) } g_delayed_messages[g_delayed_messages_count++] = message_envelope; __enable_irq(); return RTX_OK; }
/*! Start time slice for thread (or countinue interrupted) */ static int rr_thread_activate ( kthread_t *kthread ) { kthread_sched_data_t *tsched = kthread_get_sched_param ( kthread ); ksched_t *gsched = ksched_get ( tsched->sched_policy ); /* check remainder if needs to be replenished */ if ( time_cmp ( &tsched->params.rr.remainder, &gsched->params.rr.threshold ) <= 0 ) { time_add ( &tsched->params.rr.remainder, &gsched->params.rr.time_slice ); } /* Get current time and store it */ k_get_time ( &tsched->params.rr.slice_start ); /* When to wake up? */ tsched->params.rr.slice_end = tsched->params.rr.slice_start; time_add ( &tsched->params.rr.slice_end, &tsched->params.rr.remainder ); /* Set alarm for remainder time */ gsched->params.rr.alarm.exp_time = tsched->params.rr.slice_end; gsched->params.rr.alarm.param = kthread; k_alarm_set ( gsched->params.rr.rr_alarm, &gsched->params.rr.alarm ); return 0; }
/*! * Get current time * \param time Pointer where to store time * \return status */ int sys__get_time ( time_t *time ) { ASSERT_ERRNO_AND_EXIT ( time, E_INVALID_HANDLE ); disable_interrupts (); k_get_time ( time ); enable_interrupts (); EXIT ( SUCCESS ); }
/*! * Get current time * \param time Pointer where to store time * \returns status */ int sys__get_time ( void *p ) { time_t *time; time = *( (void **) p ); ASSERT_ERRNO_AND_EXIT ( time, E_INVALID_HANDLE ); k_get_time ( time ); EXIT ( SUCCESS ); }
//int sys__get_time ( time_t *time ) int sys__get_time ( void *p ) { time_t *time; time = *( (void **) p ); ASSERT_ERRNO_AND_EXIT ( time, E_INVALID_HANDLE ); time = U2K_GET_ADR ( time, k_get_active_process() ); k_get_time ( time ); EXIT ( SUCCESS ); }
/*! * Check if task hasn't overrun its deadline at its start * Handle deadline overrun, based on flags */ static int edf_check_deadline ( kthread_t *kthread ) { /* * Check if "now" is greater than "active_deadline" */ time_t now; kthread_sched_data_t *tsched = kthread_get_sched_param ( kthread ); k_get_time ( &now ); if ( time_cmp ( &now, &tsched->params.edf.active_deadline ) > 0 ) { LOG( DEBUG, "%x [DEADLINE OVERRUN]", kthread ); return -1; } return 0; }
static int edf_set_thread_sched_parameters (kthread_t *kthread, sched_t *params) { time_t now; alarm_t alarm; kthread_sched_data_t *tsched = kthread_get_sched_param ( kthread ); ksched_t *gsched = ksched_get ( SCHED_EDF ); if ( gsched->params.edf.active == kthread ) gsched->params.edf.active = NULL; k_get_time ( &now ); if ( params->edf.flags & EDF_SET ) { /*LOG( DEBUG, "%x [SET]", kthread ); */ tsched->params.edf.period = params->edf.period; tsched->params.edf.relative_deadline = params->edf.deadline; tsched->params.edf.flags = params->edf.flags; /* set periodic alarm */ tsched->params.edf.next_run = now; time_add ( &tsched->params.edf.next_run, ¶ms->edf.period ); edf_arm_deadline ( kthread ); edf_arm_period ( kthread ); /* * adjust "next_run" and "deadline" for "0" period * - first "edf_wait" will set correct values for first period */ tsched->params.edf.next_run = now; time_sub ( &tsched->params.edf.next_run, ¶ms->edf.period ); tsched->params.edf.active_deadline = now; time_add ( &tsched->params.edf.active_deadline, ¶ms->edf.deadline ); } else if ( params->edf.flags & EDF_WAIT ) { if ( edf_check_deadline ( kthread ) ) return -1; /* set times for next period */ if ( time_cmp ( &now, &tsched->params.edf.next_run ) > 0 ) { time_add ( &tsched->params.edf.next_run, &tsched->params.edf.period ); tsched->params.edf.active_deadline = tsched->params.edf.next_run; time_add ( &tsched->params.edf.active_deadline, &tsched->params.edf.relative_deadline ); if ( kthread == gsched->params.edf.active ) gsched->params.edf.active = NULL; /* set (separate) alarm for deadline */ alarm.action = edf_deadline_timer; alarm.param = kthread; alarm.flags = 0; alarm.period.sec = alarm.period.nsec = 0; alarm.exp_time = tsched->params.edf.active_deadline; k_alarm_set ( tsched->params.edf.edf_deadline_alarm, &alarm ); } /* is task ready for execution, or must wait until next period */ if ( time_cmp ( &tsched->params.edf.next_run, &now ) > 0 ) { /* wait till "next_run" */ LOG( DEBUG, "%x [EDF WAIT]", kthread ); kthread_enqueue ( kthread, &gsched->params.edf.wait ); kthreads_schedule (); /* will call edf_schedule() */ } else { /* "next_run" has already come, * activate task => move it to "EDF ready tasks" */ LOG( DEBUG, "%x [EDF READY]", kthread ); LOG( DEBUG, "%x [1st READY]", kthreadq_get ( &gsched->params.edf.ready ) ); kthread_enqueue ( kthread, &gsched->params.edf.ready ); kthreads_schedule (); /* will call edf_schedule() */ } } else if ( params->edf.flags & EDF_EXIT ) { if ( kthread == gsched->params.edf.active ) gsched->params.edf.active = NULL; //LOG( DEBUG, "%x [EXIT]", kthread ); if ( edf_check_deadline ( kthread ) ) { LOG( DEBUG, "%x [EXIT-error]", kthread ); return -1; } LOG( DEBUG, "%x [EXIT-normal]", kthread ); if ( tsched->params.edf.edf_period_alarm ) k_alarm_remove ( tsched->params.edf.edf_period_alarm ); if ( tsched->params.edf.edf_deadline_alarm ) k_alarm_remove ( tsched->params.edf.edf_deadline_alarm ); tsched->sched_policy = SCHED_FIFO; LOG( DEBUG, "%x [EXIT]", kthread ); if ( k_edf_schedule () ) { LOG( DEBUG, "%x [EXIT]", kthread ); kthreads_schedule (); /* will NOT call edf_schedule() */ } LOG( DEBUG, "%x [EXIT]", kthread ); } return 0; }