static void tick_second_handler( struct tm *time, const TimeUnits unit ) { /* Bail out here if the timezone isn't set. */ if ( have_tz_offset() == false ) return; /* Get the UTC time and update the timers with it. */ struct tm utc = *time; time_convert_local_to_utc(&utc); update_event_times(&utc); /* Reload the menu in case row counts change. */ menu_layer_reload_data(event_menu); /* Stuff to do on the first tick. */ if ( first_tick == true ) { /* If the time zone message exists, we can remove it now. */ if ( tz_message != NULL ) { text_layer_destroy(tz_message); tz_message = NULL; } /* Set the selection to the first menu item on startup. */ menu_layer_set_selected_index(event_menu, (MenuIndex) { 0, 0 }, MenuRowAlignBottom, false); /* The menu layer is created hidden so we don't see * all the timers set to 0:00 before the first tick. */ layer_set_hidden(menu_layer_get_layer(event_menu), false); first_tick = false; } layer_mark_dirty(menu_layer_get_layer(event_menu)); }
proc_t *scheduler_next(proc_t *current, int reds_left) { set_phase(PHASE_NEXT); uint32_t reds_used = SLICE_REDUCTIONS -reds_left; ssi(SYS_STATS_CTX_SWITCHES); ssa(SYS_STATS_REDUCTIONS, reds_used); current->total_reds += reds_used; proc_t *next_proc = 0; uint64_t ticks = monotonic_clock(); // freeze time assert(current->my_queue == MY_QUEUE_NONE); #ifdef PROFILE_HARNESS static uint64_t proc_started_ns = 0; if (proc_started_ns != 0) prof_slice_complete(current->pid, current->result.what, current->cap.ip, proc_started_ns, ticks); #endif proc_t *expired; while ((expired = wait_list_expired(&queues.on_timed_receive, ticks)) != 0) { expired->cap.ip = expired->result.jump_to; if (scheduler_park_runnable_N(expired) < 0) scheduler_exit_process(expired, A_NO_MEMORY); } int memory_exhausted = 0; switch (current->result.what) { case SLICE_RESULT_YIELD: if (scheduler_park_runnable_N(current) < 0) memory_exhausted = 1; break; case SLICE_RESULT_WAIT: if (current->result.until_when == LING_INFINITY) { if (proc_list_put_N(&queues.on_infinite_receive, current) < 0) memory_exhausted = 1; else current->my_queue = MY_QUEUE_INF_WAIT; } else { if (wait_list_put_N(&queues.on_timed_receive, current, current->result.until_when) < 0) memory_exhausted = 1; else current->my_queue = MY_QUEUE_TIMED_WAIT; } break; case SLICE_RESULT_DONE: scheduler_exit_process(current, A_NORMAL); break; case SLICE_RESULT_PURGE_PROCS: // purge_module() call may have detected processes lingering on the old // code - terminate them if (scheduler_park_runnable_N(current) < 0) memory_exhausted = 1; for (int i = 0; i < num_purged; i++) if (scheduler_signal_exit_N(purgatory[i], current->pid, A_KILL) < 0) memory_exhausted = 1; num_purged = 0; break; case SLICE_RESULT_EXIT: scheduler_exit_process(current, current->result.reason); // what about the returned value when main function just returns? break; case SLICE_RESULT_EXIT2: // only needed to implement erlang:exit/2 if (scheduler_park_runnable_N(current) < 0 || (scheduler_signal_exit_N(current->result.victim, current->pid, current->result.reason2) < 0)) memory_exhausted = 1; break; case SLICE_RESULT_ERROR: scheduler_exit_process(current, current->result.reason); // how is this different from SLICE_RESULT_EXIT? break; case SLICE_RESULT_THROW: scheduler_exit_process(current, current->result.reason); // how is this different from SLICE_RESULT_EXIT? break; default: { assert(current->result.what == SLICE_RESULT_OUTLET_CLOSE); if (scheduler_park_runnable_N(current) < 0) memory_exhausted = 1; outlet_t *closing = current->result.closing; //assert(is_atom(current->result.why)); outlet_close(closing, current->result.why); break; } } if (memory_exhausted) scheduler_exit_process(current, A_NO_MEMORY); do_pending: ticks = monotonic_clock(); while ((expired = wait_list_expired(&queues.on_timed_receive, ticks)) != 0) { expired->cap.ip = expired->result.jump_to; if (scheduler_park_runnable_N(expired) < 0) scheduler_exit_process(expired, A_NO_MEMORY); } set_phase(PHASE_EVENTS); // software events/timeouts net_check_timeouts(); etimer_expired(ticks); // 'hardware' events int nr_fired = events_do_pending(); update_event_times(nr_fired, ticks); set_phase(PHASE_NEXT); // select_runnable if (!proc_queue_is_empty(&queues.high_prio)) next_proc = proc_queue_get(&queues.high_prio); else if (normal_count < NORMAL_ADVANTAGE) { if (!proc_queue_is_empty(&queues.normal_prio)) next_proc = proc_queue_get(&queues.normal_prio); else if (!proc_queue_is_empty(&queues.low_prio)) next_proc = proc_queue_get(&queues.low_prio); normal_count++; } else { if (!proc_queue_is_empty(&queues.low_prio)) next_proc = proc_queue_get(&queues.low_prio); else if (!proc_queue_is_empty(&queues.normal_prio)) next_proc = proc_queue_get(&queues.normal_prio); normal_count = 0; } if (next_proc == 0) { // no runnable processes; poll for events from all three sources // Beware that events_poll() reports events 5us after they occur. If // a new event is expected very soon we are better off polling event // bits manually (using events_do_pending()) // Devote a portion of time until the next event to gc waiting processes garbage_collect_waiting_processes(expect_event_in_ns /2); if (expect_event_in_ns < MANUAL_POLLING_THRESHOLD) goto do_pending; uint64_t next_ticks = wait_list_timeout(&queues.on_timed_receive); uint64_t closest_timeout = etimer_closest_timeout(); if (closest_timeout < next_ticks) next_ticks = closest_timeout; closest_timeout = lwip_closest_timeout(); if (closest_timeout < next_ticks) next_ticks = closest_timeout; scheduler_runtime_update(); events_poll(next_ticks); // LING_INFINITY is big enough scheduler_runtime_start(); goto do_pending; } next_proc->my_queue = MY_QUEUE_NONE; //TODO: update stats #ifdef PROFILE_HARNESS proc_started_ns = ticks; #endif set_phase(PHASE_ERLANG); return next_proc; }