/*! * Destroy semaphore object * \param sem Semaphore descriptor (user level descriptor) * \return 0 if successful, -1 otherwise and appropriate error number is set */ int sys__sem_destroy ( void *p ) { sem_t *sem; ksem_t *ksem; kobject_t *kobj; sem = *( (sem_t **) p ); ASSERT_ERRNO_AND_EXIT ( sem, EINVAL ); kobj = sem->ptr; ASSERT_ERRNO_AND_EXIT ( kobj, EINVAL ); ASSERT_ERRNO_AND_EXIT ( list_find ( &kobjects, &kobj->list ), EINVAL ); ksem = kobj->kobject; ASSERT_ERRNO_AND_EXIT ( ksem && ksem->id == sem->id, EINVAL ); ASSERT_ERRNO_AND_EXIT (kthreadq_get (&ksem->queue) == NULL, ENOTEMPTY); ksem->ref_cnt--; /* additional cleanup here (e.g. if semaphore is shared leave it) */ if ( ksem->ref_cnt ) EXIT2 ( EBUSY, EXIT_FAILURE ); kfree_kobject ( kobj ); sem->ptr = NULL; sem->id = 0; EXIT2 ( EXIT_SUCCESS, EXIT_SUCCESS ); }
/*! * Unlock mutex object * \param mutex Mutex descriptor (user level descriptor) * \return 0 if successful, -1 otherwise and appropriate error number is set */ int sys__pthread_mutex_unlock ( void *p ) { pthread_mutex_t *mutex; kpthread_mutex_t *kmutex; kobject_t *kobj; mutex = *( (pthread_mutex_t **) p ); ASSERT_ERRNO_AND_EXIT ( mutex, EINVAL ); kobj = mutex->ptr; ASSERT_ERRNO_AND_EXIT ( kobj, EINVAL ); ASSERT_ERRNO_AND_EXIT ( list_find ( &kobjects, &kobj->list ), EINVAL ); kmutex = kobj->kobject; ASSERT_ERRNO_AND_EXIT ( kmutex && kmutex->id == mutex->id, EINVAL ); if ( kmutex->owner != kthread_get_active() ) { SET_ERRNO ( EPERM ); return EXIT_FAILURE; } SET_ERRNO ( EXIT_SUCCESS ); kmutex->owner = kthreadq_get ( &kmutex->queue ); if ( kmutex->owner ) { kthreadq_release ( &kmutex->queue ); kthreads_schedule (); } return EXIT_SUCCESS; }
/*! Find and return highest priority thread in ready list */ static kthread_t *get_first_ready () { int i, first; for ( i = ready.mask_len - 1; i >= 0; i-- ) { if ( ready.mask[i] ) { first = i * UINT_SIZE + msb_index (ready.mask[i]); return kthreadq_get ( &ready.rq[first] ); } } return NULL; }
/*! * Wait on conditional variable * \param cond conditional variable descriptor (user level descriptor) * \param mutex Mutex descriptor (user level descriptor) * \return 0 if successful, -1 otherwise and appropriate error number is set */ int sys__pthread_cond_wait ( void *p ) { pthread_cond_t *cond; pthread_mutex_t *mutex; kpthread_cond_t *kcond; kpthread_mutex_t *kmutex; kobject_t *kobj_cond, *kobj_mutex; int retval = EXIT_SUCCESS; cond = *( (pthread_cond_t **) p ); p += sizeof (pthread_cond_t *); mutex = *( (pthread_mutex_t **) p ); ASSERT_ERRNO_AND_EXIT ( cond && mutex, EINVAL ); kobj_cond = cond->ptr; ASSERT_ERRNO_AND_EXIT ( kobj_cond, EINVAL ); ASSERT_ERRNO_AND_EXIT ( list_find ( &kobjects, &kobj_cond->list ), EINVAL ); kcond = kobj_cond->kobject; ASSERT_ERRNO_AND_EXIT ( kcond && kcond->id == cond->id, EINVAL ); kobj_mutex = mutex->ptr; ASSERT_ERRNO_AND_EXIT ( kobj_mutex, EINVAL ); ASSERT_ERRNO_AND_EXIT ( list_find ( &kobjects, &kobj_mutex->list), EINVAL ); kmutex = kobj_mutex->kobject; ASSERT_ERRNO_AND_EXIT ( kmutex && kmutex->id == mutex->id, EINVAL ); ASSERT_ERRNO_AND_EXIT ( kmutex->owner == kthread_get_active(), EPERM ); SET_ERRNO ( EXIT_SUCCESS ); /* move thread in conditional variable queue */ kthread_enqueue ( NULL, &kcond->queue, 0, NULL, NULL ); /* save reference to mutex object */ kthread_set_private_param ( NULL, kobj_mutex ); /* release mutex */ kmutex->owner = kthreadq_get ( &kmutex->queue ); if ( kmutex->owner ) kthreadq_release ( &kmutex->queue ); kthreads_schedule (); return retval; }
/*! * Wait on conditional variable * \param cond conditional variable descriptor (user level descriptor) * \param mutex Mutex descriptor (user level descriptor) * \return 0 if successful, -1 otherwise and appropriate error number is set */ int sys__pthread_cond_wait ( pthread_cond_t *cond, pthread_mutex_t *mutex ) { kpthread_cond_t *kcond; kpthread_mutex_t *kmutex; kobject_t *kobj_cond, *kobj_mutex; SYS_ENTRY(); ASSERT_ERRNO_AND_EXIT ( cond && mutex, EINVAL ); kobj_cond = cond->ptr; ASSERT_ERRNO_AND_EXIT ( kobj_cond, EINVAL ); ASSERT_ERRNO_AND_EXIT ( list_find ( &kobjects, &kobj_cond->list ), EINVAL ); kcond = kobj_cond->kobject; ASSERT_ERRNO_AND_EXIT ( kcond && kcond->id == cond->id, EINVAL ); kobj_mutex = mutex->ptr; ASSERT_ERRNO_AND_EXIT ( kobj_mutex, EINVAL ); ASSERT_ERRNO_AND_EXIT ( list_find ( &kobjects, &kobj_mutex->list), EINVAL ); kmutex = kobj_mutex->kobject; ASSERT_ERRNO_AND_EXIT ( kmutex && kmutex->id == mutex->id, EINVAL ); ASSERT_ERRNO_AND_EXIT ( kmutex->owner == kthread_get_active(), EPERM ); kthread_set_errno ( NULL, EXIT_SUCCESS ); kthread_set_syscall_retval ( NULL, EXIT_SUCCESS ); /* move thread in conditional variable queue */ kthread_enqueue ( NULL, &kcond->queue ); /* save reference to mutex object */ kthread_set_private_param ( NULL, kobj_mutex ); /* release mutex */ kmutex->owner = kthreadq_get ( &kmutex->queue ); if ( kmutex->owner ) kthreadq_release ( &kmutex->queue ); kthreads_schedule (); SYS_EXIT ( kthread_get_errno(NULL), kthread_get_syscall_retval(NULL) ); }
/*! * Increment (lock) semaphore value by 1 (or unblock one thread that is blocked) * \param sem Semaphore descriptor (user level descriptor) * \return 0 if successful, -1 otherwise and appropriate error number is set */ int sys__sem_post ( void *p ) { sem_t *sem; ksem_t *ksem; kobject_t *kobj; kthread_t *kthread, *released; sem = *( (sem_t **) p ); ASSERT_ERRNO_AND_EXIT ( sem, EINVAL ); kthread = kthread_get_active (); kobj = sem->ptr; ASSERT_ERRNO_AND_EXIT ( kobj, EINVAL ); ASSERT_ERRNO_AND_EXIT ( list_find ( &kobjects, &kobj->list ), EINVAL ); ksem = kobj->kobject; ASSERT_ERRNO_AND_EXIT ( ksem && ksem->id == sem->id, EINVAL ); kthread_set_errno ( kthread, EXIT_SUCCESS ); released = kthreadq_get ( &ksem->queue ); /* first to release */ if ( !released || ksem->sem_value < 0 ) { /* if initial semaphore value (set by sem_init) was negative, * semaphore will not release threads until until its value * reaches zero (small extension of POSIX semaphore) */ ksem->sem_value++; } else { kthreadq_release ( &ksem->queue ); kthreads_schedule (); } return EXIT_SUCCESS; }
/*! Remove given thread (its descriptor) from ready threads */ kthread_t *kthread_remove_from_ready ( kthread_t *kthread ) { int i, j, prio; if ( !kthread ) return NULL; prio = kthread_get_prio ( kthread ); if ( kthreadq_remove ( &ready.rq[prio], kthread ) != kthread ) return NULL; /* no more ready threads in list? */ if ( kthreadq_get ( &ready.rq[prio] ) == NULL ) { i = prio / UINT_SIZE; j = prio % UINT_SIZE; ready.mask[i] &= ~( (uint) ( 1 << j ) ); } return kthread; }
/*! * Destroy mutex object * \param mutex Mutex descriptor (user level descriptor) * \return 0 if successful, -1 otherwise and appropriate error number is set */ int sys__pthread_mutex_destroy ( void *p ) { pthread_mutex_t *mutex; kpthread_mutex_t *kmutex; kobject_t *kobj; mutex = *( (pthread_mutex_t **) p ); ASSERT_ERRNO_AND_EXIT ( mutex, EINVAL ); kobj = mutex->ptr; ASSERT_ERRNO_AND_EXIT ( kobj, EINVAL ); ASSERT_ERRNO_AND_EXIT ( list_find ( &kobjects, &kobj->list ), EINVAL ); kmutex = kobj->kobject; ASSERT_ERRNO_AND_EXIT ( kmutex && kmutex->id == mutex->id, EINVAL ); ASSERT_ERRNO_AND_EXIT ( kmutex->owner == NULL /* mutex locked! */ && kthreadq_get ( &kmutex->queue ) == NULL, ENOTEMPTY ); kmutex->ref_cnt--; /* additional cleanup here (e.g. if mutex is shared leave it) */ if ( kmutex->ref_cnt ) EXIT2 ( EBUSY, EXIT_FAILURE ); kfree_kobject ( kobj ); mutex->ptr = NULL; mutex->id = 0; EXIT2 ( EXIT_SUCCESS, EXIT_SUCCESS ); }
static int k_edf_schedule () { kthread_t *first, *next, *edf_active; kthread_sched_data_t *sch_first, *sch_next; ksched_t *gsched = ksched_get ( SCHED_EDF ); int retval = 0; edf_active = gsched->params.edf.active; first = kthreadq_get ( &gsched->params.edf.ready ); LOG( DEBUG, "%x [active]", edf_active ); LOG( DEBUG, "%x [first]", first ); //LOG( DEBUG, "%x [next]", next ); if ( !first ) return 0; /* no threads in edf.ready queue, edf.active unch. */ if ( edf_active ) { next = first; first = edf_active; LOG( DEBUG, "%x [next]", kthreadq_get_next ( next ) ); } else { next = kthreadq_get_next ( first ); LOG( DEBUG, "%x [next]", next ); } while ( first && next ) { sch_first = kthread_get_sched_param ( first ); sch_next = kthread_get_sched_param ( next ); if ( time_cmp ( &sch_first->params.edf.active_deadline, &sch_next->params.edf.active_deadline ) > 0 ) { first = next; } next = kthreadq_get_next ( next ); } if ( first && first != edf_active ) { next = kthreadq_remove ( &gsched->params.edf.ready, first ); LOG ( DEBUG, "%x removed, %x is now first", next, kthreadq_get ( &gsched->params.edf.ready ) ); if ( edf_active ) { LOG( DEBUG, "%x=>%x [EDF_SCHED_PREEMPT]", edf_active, first ); /* * change active EDF thread: * -remove it from active/ready list * -put it into edf.ready list */ if ( kthread_is_ready (edf_active) ) { if ( !kthread_is_active (edf_active) ) { kthread_remove_from_ready (edf_active); /* * set "deactivated" flag, don't need * another call to "edf_schedule" */ } else { kthread_get_sched_param (edf_active) ->activated = 0; } kthread_enqueue ( edf_active, &gsched->params.edf.ready ); } /* else = thread is blocked - leave it there */ } gsched->params.edf.active = first; LOG( DEBUG, "%x [new active]", first ); kthread_move_to_ready ( first, LAST ); retval = 1; } return retval; }
static int edf_set_thread_sched_parameters (kthread_t *kthread, sched_t *params) { time_t now; alarm_t alarm; kthread_sched_data_t *tsched = kthread_get_sched_param ( kthread ); ksched_t *gsched = ksched_get ( SCHED_EDF ); if ( gsched->params.edf.active == kthread ) gsched->params.edf.active = NULL; k_get_time ( &now ); if ( params->edf.flags & EDF_SET ) { /*LOG( DEBUG, "%x [SET]", kthread ); */ tsched->params.edf.period = params->edf.period; tsched->params.edf.relative_deadline = params->edf.deadline; tsched->params.edf.flags = params->edf.flags; /* set periodic alarm */ tsched->params.edf.next_run = now; time_add ( &tsched->params.edf.next_run, ¶ms->edf.period ); edf_arm_deadline ( kthread ); edf_arm_period ( kthread ); /* * adjust "next_run" and "deadline" for "0" period * - first "edf_wait" will set correct values for first period */ tsched->params.edf.next_run = now; time_sub ( &tsched->params.edf.next_run, ¶ms->edf.period ); tsched->params.edf.active_deadline = now; time_add ( &tsched->params.edf.active_deadline, ¶ms->edf.deadline ); } else if ( params->edf.flags & EDF_WAIT ) { if ( edf_check_deadline ( kthread ) ) return -1; /* set times for next period */ if ( time_cmp ( &now, &tsched->params.edf.next_run ) > 0 ) { time_add ( &tsched->params.edf.next_run, &tsched->params.edf.period ); tsched->params.edf.active_deadline = tsched->params.edf.next_run; time_add ( &tsched->params.edf.active_deadline, &tsched->params.edf.relative_deadline ); if ( kthread == gsched->params.edf.active ) gsched->params.edf.active = NULL; /* set (separate) alarm for deadline */ alarm.action = edf_deadline_timer; alarm.param = kthread; alarm.flags = 0; alarm.period.sec = alarm.period.nsec = 0; alarm.exp_time = tsched->params.edf.active_deadline; k_alarm_set ( tsched->params.edf.edf_deadline_alarm, &alarm ); } /* is task ready for execution, or must wait until next period */ if ( time_cmp ( &tsched->params.edf.next_run, &now ) > 0 ) { /* wait till "next_run" */ LOG( DEBUG, "%x [EDF WAIT]", kthread ); kthread_enqueue ( kthread, &gsched->params.edf.wait ); kthreads_schedule (); /* will call edf_schedule() */ } else { /* "next_run" has already come, * activate task => move it to "EDF ready tasks" */ LOG( DEBUG, "%x [EDF READY]", kthread ); LOG( DEBUG, "%x [1st READY]", kthreadq_get ( &gsched->params.edf.ready ) ); kthread_enqueue ( kthread, &gsched->params.edf.ready ); kthreads_schedule (); /* will call edf_schedule() */ } } else if ( params->edf.flags & EDF_EXIT ) { if ( kthread == gsched->params.edf.active ) gsched->params.edf.active = NULL; //LOG( DEBUG, "%x [EXIT]", kthread ); if ( edf_check_deadline ( kthread ) ) { LOG( DEBUG, "%x [EXIT-error]", kthread ); return -1; } LOG( DEBUG, "%x [EXIT-normal]", kthread ); if ( tsched->params.edf.edf_period_alarm ) k_alarm_remove ( tsched->params.edf.edf_period_alarm ); if ( tsched->params.edf.edf_deadline_alarm ) k_alarm_remove ( tsched->params.edf.edf_deadline_alarm ); tsched->sched_policy = SCHED_FIFO; LOG( DEBUG, "%x [EXIT]", kthread ); if ( k_edf_schedule () ) { LOG( DEBUG, "%x [EXIT]", kthread ); kthreads_schedule (); /* will NOT call edf_schedule() */ } LOG( DEBUG, "%x [EXIT]", kthread ); } return 0; }
static int edf_schedule ( ksched_t *ksched ) { kthread_t *first, *next, *edf_active; kthread_sched2_t *sch_first, *sch_next, *ea; edf_active = ksched->params.edf.active; if ( edf_active && !kthread_is_ready ( edf_active ) ) { ksched->params.edf.active = edf_active = NULL; } first = kthreadq_get ( &ksched->params.edf.ready ); EDF_LOG ( "%x %x [active, first in queue]", edf_active, first ); if ( !first ) { kthreads_schedule (); return 0; /* no threads in edf.ready queue, edf.active unch. */ } if ( edf_active ) { next = first; first = edf_active; } else { next = kthreadq_get_next ( first ); } while ( first && next ) { sch_first = kthread_get_sched2_param ( first ); sch_next = kthread_get_sched2_param ( next ); if ( time_cmp ( &sch_first->params.edf.active_deadline, &sch_next->params.edf.active_deadline ) > 0 ) { first = next; } next = kthreadq_get_next ( next ); } if ( first && first != edf_active ) { next = kthreadq_remove ( &ksched->params.edf.ready, first ); EDF_LOG ( "%x removed, %x is now first", next, kthreadq_get ( &ksched->params.edf.ready ) ); if ( edf_active ) { EDF_LOG ( "%x=>%x [EDF_SCHED_PREEMPT]", edf_active, first ); /* * change active EDF thread: * -remove it from active/ready list * -put it into edf.ready list */ if ( kthread_is_ready (edf_active) ) { if ( !kthread_is_active (edf_active) ) { kthread_remove_from_ready (edf_active); /* * set "deactivated" flag, don't need * another call to "edf_schedule" */ } else { ea = kthread_get_sched2_param (edf_active); ea->activated = 0; } kthread_enqueue ( edf_active, &ksched->params.edf.ready ); } /* else = thread is blocked - leave it there */ } ksched->params.edf.active = first; EDF_LOG ( "%x [new active]", first ); kthread_move_to_ready ( first, LAST ); } kthreads_schedule (); return 0; }