/*! * Registered 'arch' handler for timer interrupts; * update system time and forward interrupt to kernel if its timer is expired */ static void arch_timer_handler () { void (*k_handler) (); time_add ( &clock, &last_load ); time_sub ( &delay, &last_load ); last_load = timer->max_interval; if ( time_cmp ( &delay, &threshold ) <= 0 ) { delay = timer->max_interval; timer->set_interval ( &last_load ); k_handler = alarm_handler; alarm_handler = NULL; /* reset kernel callback function */ if ( k_handler ) k_handler (); /* forward interrupt to kernel */ } else { if ( time_cmp ( &delay, &last_load ) < 0 ) last_load = delay; timer->set_interval ( &last_load ); } }
static BOOL sleep_time_is_invalid(control_t *p_time, utc_time_t *p_sleep_time) { utc_time_t max_time = {0}; utc_time_t min_time = {0}; BOOL is_invalid = FALSE; tbox_get_time(p_time, p_sleep_time); max_time.hour = 24; //min_time.hour = 3; min_time.minute = 3; if(time_cmp(p_sleep_time, &max_time, TRUE) > 0) { p_sleep_time->hour = max_time.hour; p_sleep_time->minute = max_time.minute; is_invalid = TRUE; } else if(time_cmp(p_sleep_time, &min_time, TRUE) < 0) { p_sleep_time->hour = min_time.hour; p_sleep_time->minute = min_time.minute; is_invalid = TRUE; } else if(p_sleep_time->minute >= 60) { p_sleep_time->minute = 59; is_invalid = TRUE; } return is_invalid; }
bool gfx_render_is_animating(struct gfx_window *w) { struct timespec now; clock_gettime(CLOCK_REALTIME, &now); struct gfx_render *render = w->render; if (time_cmp(render->t0, render->t1) < 0 && time_cmp(now, render->t1) < 0) return true; return false; }
/* REPEATEDLY ATTEMPT TO OPEN FILE */ FILE *xfopen( char *fname, char *access, int max_attempts ) { int attempts = 1; char tstring[100]; TIME tlast, tnow; FILE *f; if ( NULL != ( f = fopen( fname, access ) ) ) {return( f ); /* IMMEDIATE RETURN IF NO PROBLEMS */ } time_set_now( &tlast ); time_to_string( tstring, &tlast, 0, NULL, 1 ); aside( "xfopen: \"%s\"\n PROBLEM/CONFLICT, access \"%s\" at %s", fname, access, tstring ); while ( attempts < max_attempts ) {attempts++; do /* WAIT TILL SECOND HAS ALTERED */ {time_set_now( &tnow ); } while ( 0 == time_cmp( &tlast, &tnow ) ); if ( NULL != ( f = fopen( fname, access ) ) ) {time_to_string( tstring, &tnow, 0, NULL, 1 ); aside( "xfopen: file \"%s\"\n OK on attempt %d at %s", fname, attempts, tstring ); return( f ); } time_copy( &tlast, &tnow ); } time_to_string( tstring, &tnow, 0, NULL, 1 ); aside( "xfopen: file \"%s\"\n ABANDONDED after attempt %d at %s", fname, attempts, tstring ); return( NULL ); }
/*! * Deactivate thread because: * 1. higher priority thread becomes active * 2. this thread time slice is expired * 3. this thread blocks on some queue */ static int rr_thread_deactivate ( kthread_t *kthread ) { /* Get current time and recalculate remainder */ time_t t; kthread_sched_data_t *tsched = kthread_get_sched_param ( kthread ); ksched_t *gsched = ksched_get ( tsched->sched_policy ); if (tsched->params.rr.remainder.sec + tsched->params.rr.remainder.nsec) { /* * "slice interrupted" * recalculate remainder */ k_get_time ( &t ); time_sub ( &tsched->params.rr.slice_end, &t ); tsched->params.rr.remainder = tsched->params.rr.slice_end; if ( kthread_is_ready ( kthread ) ) { /* is remainder too small or not? */ if ( time_cmp ( &tsched->params.rr.remainder, &gsched->params.rr.threshold ) <= 0 ) { kthread_move_to_ready ( kthread, LAST ); } else { kthread_move_to_ready ( kthread, FIRST ); } } } /* else = remainder is zero, thread is already enqueued in ready queue*/ return 0; }
/*! * Modify existing alarm (change its values) * \param alarm Alarm parameters * \return status (0 for success) */ int k_alarm_set ( void *id, alarm_t *alarm ) { kalarm_t *kalarm = id; ASSERT ( kalarm && kalarm->magic == ALARM_MAGIC ); kalarm->alarm.action = alarm->action; kalarm->alarm.param = alarm->param; kalarm->alarm.flags = alarm->flags; kalarm->alarm.period = alarm->period; /* is activation time changed? */ if ( time_cmp ( &kalarm->alarm.exp_time, &alarm->exp_time ) ) { /* remove from active alarms */ if ( kalarm->active ) list_remove ( &kalarms, FIRST, &kalarm->list ); kalarm->alarm.exp_time = alarm->exp_time; k_alarm_add ( kalarm ); } return SUCCESS; }
void post_event_with_delay(EventCallBack * handler, void * arg, unsigned long delay) { event_node * ev; event_node * qp; event_node ** qpp; check_error(pthread_mutex_lock(&event_lock)); if (cancel_handler == handler && cancel_arg == arg) { cancel_handler = NULL; check_error(pthread_cond_signal(&cancel_cond)); check_error(pthread_mutex_unlock(&event_lock)); return; } ev = alloc_node(handler, arg); if (clock_gettime(CLOCK_REALTIME, &ev->runtime)) { check_error(errno); } time_add_usec(&ev->runtime, delay); qpp = &timer_queue; while ((qp = *qpp) != 0 && time_cmp(&ev->runtime, &qp->runtime) >= 0) { qpp = &qp->next; } ev->next = qp; *qpp = ev; if (timer_queue == ev) { check_error(pthread_cond_signal(&event_cond)); } trace(LOG_EVENTCORE, "post_event: event %#lx, handler %#lx, arg %#lx, runtime %02d%02d.%03d", ev, ev->handler, ev->arg, ev->runtime.tv_sec / 60 % 60, ev->runtime.tv_sec % 60, ev->runtime.tv_nsec / 1000000); check_error(pthread_mutex_unlock(&event_lock)); }
/*! Start time slice for thread (or countinue interrupted) */ static int rr_thread_activate ( kthread_t *kthread ) { kthread_sched_data_t *tsched = kthread_get_sched_param ( kthread ); ksched_t *gsched = ksched_get ( tsched->sched_policy ); /* check remainder if needs to be replenished */ if ( time_cmp ( &tsched->params.rr.remainder, &gsched->params.rr.threshold ) <= 0 ) { time_add ( &tsched->params.rr.remainder, &gsched->params.rr.time_slice ); } /* Get current time and store it */ k_get_time ( &tsched->params.rr.slice_start ); /* When to wake up? */ tsched->params.rr.slice_end = tsched->params.rr.slice_start; time_add ( &tsched->params.rr.slice_end, &tsched->params.rr.remainder ); /* Set alarm for remainder time */ gsched->params.rr.alarm.exp_time = tsched->params.rr.slice_end; gsched->params.rr.alarm.param = kthread; k_alarm_set ( gsched->params.rr.rr_alarm, &gsched->params.rr.alarm ); return 0; }
static inline void time_wait_until(odp_time_t time) { odp_time_t cur; do { cur = time_local(); } while (time_cmp(time, cur) > 0); }
int deadline_miss(task_par_t * tp) { static struct timespec now; clock_gettime(CLOCK_MONOTONIC, &now); if (time_cmp(now, tp->dl) > 0) { tp->dmiss++; return 1; } return 0; }
int time_diff(struct time *res, const struct time *t1, const struct time *t2) { const int cmp = time_cmp(t1, t2); if (cmp >= 0) { _time_diff(res, t1, t2); } else { _time_diff(res, t2, t1); } return cmp; }
/** * true if task has missed its current deadline * @param i task index * @return true if task has missed its current deadline */ int has_deadline_miss(int i) { struct timespec now; clock_gettime(CLOCK_MONOTONIC, &now); if (time_cmp(now, tp[i].dl) > 0) { tp[i].dmiss++; return 1; } return 0; }
uint8_t check_time(time_ref period, time_ref trigger) { if(!period || !trigger) return FAILURE; if(time_cmp(global_time(), trigger) >= 0) { // if the_time > trigger // trigger = the_time + period time_set_to_sum(trigger, period, global_time()); //add_time_to_time(trigger, period); return TRUE; } return FALSE; }
/*! * Set next timer activation * \param time Time of next activation * \param alarm_func Function to call upon timer expiration */ void arch_timer_set ( time_t *time, void *alarm_func ) { time_t remainder; timer->get_interval_remainder ( &remainder ); time_sub ( &last_load, &remainder ); time_add ( &clock, &last_load ); delay = *time; if ( time_cmp ( &delay, &timer->min_interval ) < 0 ) delay = timer->min_interval; alarm_handler = alarm_func; if ( time_cmp ( &delay, &timer->max_interval ) > 0 ) last_load = timer->max_interval; else last_load = delay; timer->set_interval ( &last_load ); }
/*! Iterate through active alarms and activate newly expired ones */ static void k_schedule_alarms () { kalarm_t *first; time_t time, ref_time; arch_get_time ( &time ); ref_time = time; time_add ( &ref_time, &threshold ); /* should any alarm be activated? */ first = list_get ( &kalarms, FIRST ); while ( first != NULL ) { if ( time_cmp ( &first->alarm.exp_time, &ref_time ) <= 0 ) { /* 'activate' alarm */ /* but first remove alarm from list */ first = list_remove ( &kalarms, FIRST, NULL ); if ( first->alarm.flags & ALARM_PERIODIC ) { /* calculate next activation time */ time_add ( &first->alarm.exp_time, &first->alarm.period ); /* put back into list */ list_sort_add ( &kalarms, first, &first->list, alarm_cmp ); } else { first->active = 0; } if ( first->alarm.action ) /* activate alarm */ first->alarm.action ( first->alarm.param ); first = list_get ( &kalarms, FIRST ); } else { break; } } first = list_get ( &kalarms, FIRST ); if ( first ) { ref_time = first->alarm.exp_time; time_sub ( &ref_time, &time ); arch_timer_set ( &ref_time, k_timer_interrupt ); } }
void run_event_loop(void) { unsigned event_cnt = 0; assert(is_dispatch_thread()); check_error(pthread_mutex_lock(&event_lock)); while (process_events) { event_node * ev = NULL; if (timer_queue != NULL && (event_queue == NULL || (event_cnt & 0x3fu) == 0)) { struct timespec timenow; if (clock_gettime(CLOCK_REALTIME, &timenow)) { check_error(errno); } if (time_cmp(&timer_queue->runtime, &timenow) <= 0) { ev = timer_queue; timer_queue = ev->next; } } if (ev == NULL && event_queue != NULL) { ev = event_queue; event_queue = ev->next; if (event_queue == NULL) { assert(event_last == ev); event_last = NULL; } } if (ev == NULL) { if (timer_queue != NULL) { int error = pthread_cond_timedwait(&event_cond, &event_lock, &timer_queue->runtime); if (error && error != ETIMEDOUT) check_error(error); } else { check_error(pthread_cond_wait(&event_cond, &event_lock)); } } else { check_error(pthread_mutex_unlock(&event_lock)); trace(LOG_EVENTCORE, "run_event_loop: event %#lx, handler %#lx, arg %#lx", ev, ev->handler, ev->arg); ev->handler(ev->arg); check_error(pthread_mutex_lock(&event_lock)); free_node(ev); event_cnt++; } } }
static BOOL ap_check_time_update(u32 para1, u32 para2) { time_impl_data_t *p_time_impl_data = (time_impl_data_t *)para1; utc_time_t new_time = {0}; time_get(&new_time, TRUE); if(time_cmp(&(p_time_impl_data->old_time), &new_time, FALSE) != 0) { p_time_impl_data->old_time = new_time; return TRUE; } return FALSE; }
/*! Check if task hasn't overrun its deadline */ static int edf_check_deadline ( kthread_t *kthread ) { /* Check if "now" is greater than "active_deadline" */ timespec_t now; kthread_sched2_t *tsched = kthread_get_sched2_param ( kthread ); kclock_gettime ( CLOCK_REALTIME, &now ); if ( time_cmp ( &now, &tsched->params.edf.active_deadline ) > 0 ) { EDF_LOG ( "%x [DEADLINE OVERRUN]", kthread ); return EXIT_FAILURE; } return 0; }
static void _time_diff(struct time *res, const struct time *t1, const struct time *t2) { assert(t1->secs >= t2->secs); assert(time_cmp(t1, t2) >= 0); res->secs = t1->secs - t2->secs; if (t1->nsecs >= t2->nsecs) { res->nsecs = t1->nsecs - t2->nsecs; } else { assert(t1->nsecs + SEC_TO_NSEC >= t2->nsecs); res->nsecs = t1->nsecs + SEC_TO_NSEC - t2->nsecs; res->secs -= 1; } }
/*! * Check if task hasn't overrun its deadline at its start * Handle deadline overrun, based on flags */ static int edf_check_deadline ( kthread_t *kthread ) { /* * Check if "now" is greater than "active_deadline" */ time_t now; kthread_sched_data_t *tsched = kthread_get_sched_param ( kthread ); k_get_time ( &now ); if ( time_cmp ( &now, &tsched->params.edf.active_deadline ) > 0 ) { LOG( DEBUG, "%x [DEADLINE OVERRUN]", kthread ); return -1; } return 0; }
void InSDDSPort::pushSRI(const BULKIO::StreamSRI& H, const BULKIO::PrecisionUTCTime& T) { TRACE_ENTER(logger, "InSDDSPort::pushSRI" ); boost::mutex::scoped_lock lock(sriUpdateLock); bool foundSRI = false; BULKIO::StreamSRI tmpH = H; SriMap::iterator sriIter; sriIter = currentHs.begin(); while (sriIter != currentHs.end()) { if (strcmp(H.streamID, (*sriIter).first.c_str()) == 0) { foundSRI = true; break; } sriIter++; } if (!foundSRI) { currentHs.insert(std::make_pair(CORBA::string_dup(H.streamID), std::make_pair(H, T))); sriChanged = true; } else { bool schanged = false; if ( sri_cmp != NULL ) { schanged = sri_cmp( (*sriIter).second.first, H ); } bool tchanged = false; if ( time_cmp != NULL ) { tchanged = time_cmp( (*sriIter).second.second, T ); } sriChanged = !schanged || !tchanged; (*sriIter).second = std::make_pair(H, T); } TRACE_EXIT(logger, "InSDDSPort::pushSRI" ); }
void uuid_create(afsUUID * uuid) { static int uuid_inited = 0; struct timeval tv; int ret, got_time; uint64_t dce_time; if (uuid_inited == 0) { gettimeofday(&last_time, NULL); seq_num = arc4random(); get_node_addr(nodeaddr); uuid_inited = 1; } gettimeofday(&tv, NULL); got_time = 0; do { ret = time_cmp(&tv, &last_time); if (ret < 0) { /* Time went backward, just inc seq_num and be done. * seq_num is 6 + 8 bit field it the uuid, so let it wrap * around. don't let it be zero. */ seq_num = (seq_num + 1) & 0x3fff; if (seq_num == 0) seq_num++; got_time = 1; counter = 0; last_time = tv; } else if (ret > 0) { /* time went forward, reset counter and be happy */ last_time = tv; counter = 0; got_time = 1; } else { #define UUID_MAX_HZ (1) /* make this bigger fix you have larger tickrate */ #define MULTIPLIER_100_NANO_SEC 10 if (++counter < UUID_MAX_HZ * MULTIPLIER_100_NANO_SEC) got_time = 1; } } while (!got_time); /* * now shift time to dce_time, epoch 00:00:00:00, 15 October 1582 * dce time ends year ~3400, so start to worry now */ dce_time = tv.tv_usec * MULTIPLIER_100_NANO_SEC + counter; dce_time += ((uint64_t) tv.tv_sec) * 10000000; dce_time += (((uint64_t) 0x01b21dd2) << 32) + 0x13814000; uuid->time_low = dce_time & 0xffffffff; uuid->time_mid = 0xffff & (dce_time >> 32); uuid->time_hi_and_version = 0x0fff & (dce_time >> 48); uuid->time_hi_and_version |= (1 << 12); uuid->clock_seq_low = seq_num & 0xff; uuid->clock_seq_hi_and_reserved = (seq_num >> 8) & 0x3f; uuid->clock_seq_hi_and_reserved |= 0x80; /* dce variant */ memcpy(uuid->node, nodeaddr, 6); }
static RET_CODE epg_event_list_update(control_t* p_list, u16 start, u16 size, u32 context) { event_node_t *p_evt_node = NULL; u16 total_evt = 0; utc_time_t curn_time = {0}; s8 time_cmp_result = 0; u8 i, pos; u8 ascstr[32]; u16 cnt = list_get_count(p_list); utc_time_t start_time = {0}; utc_time_t end_time = {0}; book_pg_t temp_node = {0}; p_evt_node = mul_epg_get_sch_event(&g_prog_info, &total_evt); if(p_evt_node == NULL) { return ERR_FAILURE; } /*! found start event node. */ if(start > 0) { p_evt_node = mul_epg_get_sch_event_by_pos(&g_prog_info, p_evt_node, start); } for (i = 0; i < size; i++) { pos = (u8)(i + start); if((pos < cnt) && (pos < total_evt)) { if(p_evt_node != NULL) { time_to_local(&(p_evt_node->start_time), &start_time); memcpy(&end_time, &start_time, sizeof(utc_time_t)); time_add(&end_time, &(p_evt_node->drt_time)); sprintf((char*)ascstr,"%.2d:%.2d-%.2d:%.2d", \ start_time.hour, start_time.minute,\ end_time.hour, end_time.minute); memset(&temp_node, 0, sizeof(book_pg_t)); temp_node.pgid = db_dvbs_get_id_by_view_pos(ui_dbase_get_pg_view_id(), prog_focus); memcpy(&(temp_node.start_time), &start_time, sizeof(utc_time_t)); memcpy(&(temp_node.drt_time), &(p_evt_node->drt_time), sizeof(utc_time_t)); temp_node.book_mode = BOOK_TMR_ONCE; time_get(&curn_time, FALSE); time_cmp_result = time_cmp(&start_time, &curn_time, FALSE); if((book_get_match_node(&temp_node) < MAX_BOOK_PG) && (time_cmp_result >= 0)) { list_set_field_content_by_icon(p_list, pos, 0, IM_EPG_BOOK); } else { list_set_field_content_by_icon(p_list, pos, 0, 0); } list_set_field_content_by_ascstr(p_list, pos, 1, ascstr); list_set_field_content_by_unistr(p_list, pos, 2, p_evt_node->event_name); if(p_evt_node != NULL && p_evt_node->p_sht_text != NULL) { list_set_field_content_by_icon(p_list, pos, 3, IM_EPG_INFOR); } else { list_set_field_content_by_icon(p_list, pos, 3, 0); } p_evt_node = mul_epg_get_sch_event_by_pos(&g_prog_info, p_evt_node, 1); } } } return SUCCESS; }
/*! Activate timers and reschedule threads if required */ static void ktimer_schedule () { ktimer_t *first; timespec_t time, ref_time; int resched = 0; kclock_gettime ( CLOCK_REALTIME, &time ); /* should have separate "scheduler" for each clock */ ref_time = time; time_add ( &ref_time, &threshold ); /* use "ref_time" instead of "time" when looking timers to activate */ /* should any timer be activated? */ first = list_get ( &ktimers, FIRST ); while ( first != NULL ) { /* timers have absolute values in 'it_value' */ if ( time_cmp ( &first->itimer.it_value, &ref_time ) <= 0 ) { /* 'activate' timer */ /* but first remove timer from list */ first = list_remove ( &ktimers, FIRST, NULL ); /* and add to list if period is given */ if ( TIME_IS_SET ( &first->itimer.it_interval) ) { /* calculate next activation time */ time_add ( &first->itimer.it_value, &first->itimer.it_interval ); /* put back into list */ list_sort_add ( &ktimers, first, &first->list, ktimer_cmp ); } else { TIMER_DISARM ( first ); } if ( first->owner == NULL ) { /* timer set by kernel - call now, directly */ if ( first->evp.sigev_notify_function ) first->evp.sigev_notify_function ( first->evp.sigev_value ); } else { /* timer set by thread */ if ( !ksignal_process_event ( &first->evp, first->owner, SI_TIMER ) ) { resched++; } } first = list_get ( &ktimers, FIRST ); } else { break; } } first = list_get ( &ktimers, FIRST ); if ( first ) { ref_time = first->itimer.it_value; time_sub ( &ref_time, &time ); arch_timer_set ( &ref_time, ktimer_schedule ); } if ( resched ) kthreads_schedule (); }
/*! * Compare timers by expiration times (used when inserting new timer in list) * \param a First timer * \param b Second timer * \return -1 when a < b, 0 when a == b, 1 when a > b */ static int ktimer_cmp ( void *_a, void *_b ) { ktimer_t *a = _a, *b = _b; return time_cmp ( &a->itimer.it_value, &b->itimer.it_value ); }
static void periodic_task() { nrk_sig_mask_t wait_mask, func_wait_mask; nrk_time_t next_event, func_next_event; periodic_func_t **funcp; periodic_func_t *func; nrk_time_t now, sleep_time; int8_t rc; funcp = &functions[0]; while (*funcp) { func = *funcp; LOG("init: "); LOGF(func->name); LOGNL(); if (func->init) func->init(); funcp++; } rc = nrk_signal_register(func_signal); if (rc == NRK_ERROR) ABORT("reg sig: func\r\n"); while (1) { LOG("awake\r\n"); TIME_CLEAR(next_event); wait_mask = SIG(func_signal); funcp = &functions[0]; while (*funcp) { func = *funcp; TIME_CLEAR(func_next_event); func_wait_mask = 0; if (func->enabled || func->enabled != func->last_enabled) { LOG("proc: "); LOGF(func->name); LOGNL(); ASSERT(func->proc); func->proc(func->enabled, &func_next_event, &func_wait_mask); } func->last_enabled = func->enabled; wait_mask |= func_wait_mask; if (IS_VALID_TIME(func_next_event) && (!IS_VALID_TIME(next_event) || time_cmp(&func_next_event, &next_event) < 0)) { next_event = func_next_event; } funcp++; } if (IS_VALID_TIME(next_event)) { nrk_time_get(&now); rc = nrk_time_sub(&sleep_time, next_event, now); if (rc != NRK_OK) { LOG("next event in the past\r\n"); continue; } LOG("sleeping for: "); LOGP("%lu ms\r\n", TIME_TO_MS(sleep_time)); nrk_set_next_wakeup(sleep_time); wait_mask |= SIG(nrk_wakeup_signal); } LOG("waiting\r\n"); nrk_event_wait( wait_mask ); } ABORT("periodic task exited\r\n"); }
static int edf_set_thread_sched_parameters (kthread_t *kthread, sched_t *params) { time_t now; alarm_t alarm; kthread_sched_data_t *tsched = kthread_get_sched_param ( kthread ); ksched_t *gsched = ksched_get ( SCHED_EDF ); if ( gsched->params.edf.active == kthread ) gsched->params.edf.active = NULL; k_get_time ( &now ); if ( params->edf.flags & EDF_SET ) { /*LOG( DEBUG, "%x [SET]", kthread ); */ tsched->params.edf.period = params->edf.period; tsched->params.edf.relative_deadline = params->edf.deadline; tsched->params.edf.flags = params->edf.flags; /* set periodic alarm */ tsched->params.edf.next_run = now; time_add ( &tsched->params.edf.next_run, ¶ms->edf.period ); edf_arm_deadline ( kthread ); edf_arm_period ( kthread ); /* * adjust "next_run" and "deadline" for "0" period * - first "edf_wait" will set correct values for first period */ tsched->params.edf.next_run = now; time_sub ( &tsched->params.edf.next_run, ¶ms->edf.period ); tsched->params.edf.active_deadline = now; time_add ( &tsched->params.edf.active_deadline, ¶ms->edf.deadline ); } else if ( params->edf.flags & EDF_WAIT ) { if ( edf_check_deadline ( kthread ) ) return -1; /* set times for next period */ if ( time_cmp ( &now, &tsched->params.edf.next_run ) > 0 ) { time_add ( &tsched->params.edf.next_run, &tsched->params.edf.period ); tsched->params.edf.active_deadline = tsched->params.edf.next_run; time_add ( &tsched->params.edf.active_deadline, &tsched->params.edf.relative_deadline ); if ( kthread == gsched->params.edf.active ) gsched->params.edf.active = NULL; /* set (separate) alarm for deadline */ alarm.action = edf_deadline_timer; alarm.param = kthread; alarm.flags = 0; alarm.period.sec = alarm.period.nsec = 0; alarm.exp_time = tsched->params.edf.active_deadline; k_alarm_set ( tsched->params.edf.edf_deadline_alarm, &alarm ); } /* is task ready for execution, or must wait until next period */ if ( time_cmp ( &tsched->params.edf.next_run, &now ) > 0 ) { /* wait till "next_run" */ LOG( DEBUG, "%x [EDF WAIT]", kthread ); kthread_enqueue ( kthread, &gsched->params.edf.wait ); kthreads_schedule (); /* will call edf_schedule() */ } else { /* "next_run" has already come, * activate task => move it to "EDF ready tasks" */ LOG( DEBUG, "%x [EDF READY]", kthread ); LOG( DEBUG, "%x [1st READY]", kthreadq_get ( &gsched->params.edf.ready ) ); kthread_enqueue ( kthread, &gsched->params.edf.ready ); kthreads_schedule (); /* will call edf_schedule() */ } } else if ( params->edf.flags & EDF_EXIT ) { if ( kthread == gsched->params.edf.active ) gsched->params.edf.active = NULL; //LOG( DEBUG, "%x [EXIT]", kthread ); if ( edf_check_deadline ( kthread ) ) { LOG( DEBUG, "%x [EXIT-error]", kthread ); return -1; } LOG( DEBUG, "%x [EXIT-normal]", kthread ); if ( tsched->params.edf.edf_period_alarm ) k_alarm_remove ( tsched->params.edf.edf_period_alarm ); if ( tsched->params.edf.edf_deadline_alarm ) k_alarm_remove ( tsched->params.edf.edf_deadline_alarm ); tsched->sched_policy = SCHED_FIFO; LOG( DEBUG, "%x [EXIT]", kthread ); if ( k_edf_schedule () ) { LOG( DEBUG, "%x [EXIT]", kthread ); kthreads_schedule (); /* will NOT call edf_schedule() */ } LOG( DEBUG, "%x [EXIT]", kthread ); } return 0; }
static int k_edf_schedule () { kthread_t *first, *next, *edf_active; kthread_sched_data_t *sch_first, *sch_next; ksched_t *gsched = ksched_get ( SCHED_EDF ); int retval = 0; edf_active = gsched->params.edf.active; first = kthreadq_get ( &gsched->params.edf.ready ); LOG( DEBUG, "%x [active]", edf_active ); LOG( DEBUG, "%x [first]", first ); //LOG( DEBUG, "%x [next]", next ); if ( !first ) return 0; /* no threads in edf.ready queue, edf.active unch. */ if ( edf_active ) { next = first; first = edf_active; LOG( DEBUG, "%x [next]", kthreadq_get_next ( next ) ); } else { next = kthreadq_get_next ( first ); LOG( DEBUG, "%x [next]", next ); } while ( first && next ) { sch_first = kthread_get_sched_param ( first ); sch_next = kthread_get_sched_param ( next ); if ( time_cmp ( &sch_first->params.edf.active_deadline, &sch_next->params.edf.active_deadline ) > 0 ) { first = next; } next = kthreadq_get_next ( next ); } if ( first && first != edf_active ) { next = kthreadq_remove ( &gsched->params.edf.ready, first ); LOG ( DEBUG, "%x removed, %x is now first", next, kthreadq_get ( &gsched->params.edf.ready ) ); if ( edf_active ) { LOG( DEBUG, "%x=>%x [EDF_SCHED_PREEMPT]", edf_active, first ); /* * change active EDF thread: * -remove it from active/ready list * -put it into edf.ready list */ if ( kthread_is_ready (edf_active) ) { if ( !kthread_is_active (edf_active) ) { kthread_remove_from_ready (edf_active); /* * set "deactivated" flag, don't need * another call to "edf_schedule" */ } else { kthread_get_sched_param (edf_active) ->activated = 0; } kthread_enqueue ( edf_active, &gsched->params.edf.ready ); } /* else = thread is blocked - leave it there */ } gsched->params.edf.active = first; LOG( DEBUG, "%x [new active]", first ); kthread_move_to_ready ( first, LAST ); retval = 1; } return retval; }
sensor_ref create_temperature_sensor(uint8_t loc, time_ref period, uint16_t size) { sensor_ref * temperature_sensor; uint8_t (*delete_func)(); uint8_t (*enable_func)(); uint8_t (*disable_func)(); uint8_t channel; if(loc == 'I') { temperature_sensor = &internal_temperature_sensor; delete_func = &delete_internal_temperature_sensor; enable_func = &enable_internal_temperature_sensor; disable_func = &disable_internal_temperature_sensor; channel = TEMPERATURE_ADC; } else if (loc == 'A') { temperature_sensor = &external_temperature_sensor_A; delete_func = &delete_external_temperature_sensor_A; enable_func = &enable_external_temperature_sensor_A; disable_func = &disable_external_temperature_sensor_A; channel = PORT_A_ADC; } else if (loc == 'B') { temperature_sensor = &external_temperature_sensor_B; delete_func = &delete_external_temperature_sensor_B; enable_func = &enable_external_temperature_sensor_B; disable_func = &disable_external_temperature_sensor_B; channel = PORT_B_ADC; } else { return FAILURE; } if(*temperature_sensor) { // We already have one there if((time_cmp(period, sensor_get_period(*temperature_sensor)) == 0) && size == sensor_get_size(*temperature_sensor)) { // If they are the same...do nothing return *temperature_sensor; } else { // They are different, so delete the old one delete_func(); } } action_ref transmit_action = 0; action_ref lookup_temp_action = 0; node_ref temperature_node = 0; for(;;) { *temperature_sensor = new_sensor('T', channel, period, size); if(!*temperature_sensor) break; // Create the lookup_temp_action lookup_temp_action = new_action(); if(!lookup_temp_action) break; action_set_func(lookup_temp_action, &fix_temperature); temperature_node = new_node(*temperature_sensor, 0); if(!temperature_node) break; action_set_args(lookup_temp_action, temperature_node); sensor_add_action_on_data_full(*temperature_sensor, lookup_temp_action); // Create the transmit action transmit_action = new_transmit_action(*temperature_sensor); if(!transmit_action) break; sensor_add_action_on_data_full(*temperature_sensor, transmit_action); sensor_set_delete_func(*temperature_sensor, delete_func); sensor_set_enable_func(*temperature_sensor, enable_func); sensor_set_disable_func(*temperature_sensor, disable_func); return *temperature_sensor; } delete_func(); node_delete(&temperature_node); action_delete(&lookup_temp_action); action_delete(&transmit_action); return FAILURE; }
afs_int32 afs_uuid_create(afsUUID * uuid) { uuid_address_t eaddr; afs_int32 got_no_time = 0, code; if (!uuid_init_done) { uuid_time_t t; u_short *seedp, seed = 0; rand_m = 971;; rand_ia = 11113; rand_ib = 104322; rand_irand = 4181; /* * Generating our 'seed' value * * We start with the current time, but, since the resolution of clocks is * system hardware dependent (eg. Ultrix is 10 msec.) and most likely * coarser than our resolution (10 usec) we 'mixup' the bits by xor'ing * all the bits together. This will have the effect of involving all of * the bits in the determination of the seed value while remaining system * independent. Then for good measure to ensure a unique seed when there * are multiple processes creating UUID's on a system, we add in the PID. */ uuid__get_os_time(&t); seedp = (u_short *) (&t); seed ^= *seedp++; seed ^= *seedp++; seed ^= *seedp++; seed ^= *seedp++; #if defined(KERNEL) && defined(AFS_XBSD_ENV) rand_irand += seed + (afs_uint32) curproc->p_pid; #else rand_irand += seed + (afs_uint32) getpid(); #endif uuid__get_os_time(&time_last); clock_seq = true_random(); #ifdef AFS_NT40_ENV if (afs_winsockInit() < 0) { return WSAGetLastError(); } #endif uuid_init_done = 1; } if ((code = uuid_get_address(&eaddr))) return code; /* get our hardware network address */ do { /* get the current time */ uuid__get_os_time(&time_now); /* * check that our clock hasn't gone backwards and handle it * accordingly with clock_seq * check that we're not generating uuid's faster than we * can accommodate with our uuid_time_adjust fudge factor */ if ((code = time_cmp(&time_now, &time_last)) == -1) { /* A clock_seq value of 0 indicates that it hasn't been initialized. */ if (clock_seq == 0) { clock_seq = true_random(); } clock_seq = (clock_seq + 1) & 0x3fff; if (clock_seq == 0) clock_seq = clock_seq + 1; uuid_time_adjust = 0; } else if (code == 1) { uuid_time_adjust = 0; } else { if (uuid_time_adjust == 0x7fff) /* spin while we wait for the clock to tick */ got_no_time = 1; else uuid_time_adjust++; } } while (got_no_time); time_last.lo = time_now.lo; time_last.hi = time_now.hi; if (uuid_time_adjust != 0) { if (time_now.lo & 0x80000000) { time_now.lo += uuid_time_adjust; if (!(time_now.lo & 0x80000000)) time_now.hi++; } else time_now.lo += uuid_time_adjust; } uuid->time_low = time_now.lo; uuid->time_mid = time_now.hi & 0x0000ffff; uuid->time_hi_and_version = (time_now.hi & 0x0fff0000) >> 16; uuid->time_hi_and_version |= (1 << 12); uuid->clock_seq_low = clock_seq & 0xff; uuid->clock_seq_hi_and_reserved = (clock_seq & 0x3f00) >> 8; uuid->clock_seq_hi_and_reserved |= 0x80; uuid_memcpy((void *)uuid->node, (void *)&eaddr, sizeof(uuid_address_t)); return 0; }