pcb_t * proc_queue_remove(proc_queue_t * queue, pcb_t *pcb) { if (queue == NULL || pcb == NULL || proc_queue_is_empty(queue)) { return NULL; } if (queue->head == pcb) { return proc_queue_dequeue(queue); } pcb_t *prev_node = queue->head; pcb_t *curr_node = queue->head->next; while (curr_node) { if (curr_node == pcb) { prev_node->next = curr_node->next; curr_node->next = NULL; return curr_node; } prev_node = curr_node; curr_node = curr_node->next; } return NULL; }
pcb_t * proc_queue_dequeue(proc_queue_t* queue) { if (queue == NULL || proc_queue_is_empty(queue)) { return NULL; } pcb_t * pcb = queue->head; if (queue->head == NULL) { queue->tail = NULL; } else { queue->head = queue->head->next; } pcb->next = NULL; return pcb; }
int proc_queue_enqueue(proc_queue_t * queue, pcb_t * pcb) { if (queue == NULL || pcb == NULL) { return ERROR_NULL_ARG; } assert(pcb->next == NULL); if (proc_queue_is_empty(queue)) { queue->head = pcb; } else { queue->tail->next = pcb; } queue->tail = pcb; queue->tail->next = NULL; return CODE_SUCCESS; }
proc_t *scheduler_next(proc_t *current, int reds_left) { set_phase(PHASE_NEXT); uint32_t reds_used = SLICE_REDUCTIONS -reds_left; ssi(SYS_STATS_CTX_SWITCHES); ssa(SYS_STATS_REDUCTIONS, reds_used); current->total_reds += reds_used; proc_t *next_proc = 0; uint64_t ticks = monotonic_clock(); // freeze time assert(current->my_queue == MY_QUEUE_NONE); #ifdef PROFILE_HARNESS static uint64_t proc_started_ns = 0; if (proc_started_ns != 0) prof_slice_complete(current->pid, current->result.what, current->cap.ip, proc_started_ns, ticks); #endif proc_t *expired; while ((expired = wait_list_expired(&queues.on_timed_receive, ticks)) != 0) { expired->cap.ip = expired->result.jump_to; if (scheduler_park_runnable_N(expired) < 0) scheduler_exit_process(expired, A_NO_MEMORY); } int memory_exhausted = 0; switch (current->result.what) { case SLICE_RESULT_YIELD: if (scheduler_park_runnable_N(current) < 0) memory_exhausted = 1; break; case SLICE_RESULT_WAIT: if (current->result.until_when == LING_INFINITY) { if (proc_list_put_N(&queues.on_infinite_receive, current) < 0) memory_exhausted = 1; else current->my_queue = MY_QUEUE_INF_WAIT; } else { if (wait_list_put_N(&queues.on_timed_receive, current, current->result.until_when) < 0) memory_exhausted = 1; else current->my_queue = MY_QUEUE_TIMED_WAIT; } break; case SLICE_RESULT_DONE: scheduler_exit_process(current, A_NORMAL); break; case SLICE_RESULT_PURGE_PROCS: // purge_module() call may have detected processes lingering on the old // code - terminate them if (scheduler_park_runnable_N(current) < 0) memory_exhausted = 1; for (int i = 0; i < num_purged; i++) if (scheduler_signal_exit_N(purgatory[i], current->pid, A_KILL) < 0) memory_exhausted = 1; num_purged = 0; break; case SLICE_RESULT_EXIT: scheduler_exit_process(current, current->result.reason); // what about the returned value when main function just returns? break; case SLICE_RESULT_EXIT2: // only needed to implement erlang:exit/2 if (scheduler_park_runnable_N(current) < 0 || (scheduler_signal_exit_N(current->result.victim, current->pid, current->result.reason2) < 0)) memory_exhausted = 1; break; case SLICE_RESULT_ERROR: scheduler_exit_process(current, current->result.reason); // how is this different from SLICE_RESULT_EXIT? break; case SLICE_RESULT_THROW: scheduler_exit_process(current, current->result.reason); // how is this different from SLICE_RESULT_EXIT? break; default: { assert(current->result.what == SLICE_RESULT_OUTLET_CLOSE); if (scheduler_park_runnable_N(current) < 0) memory_exhausted = 1; outlet_t *closing = current->result.closing; //assert(is_atom(current->result.why)); outlet_close(closing, current->result.why); break; } } if (memory_exhausted) scheduler_exit_process(current, A_NO_MEMORY); do_pending: ticks = monotonic_clock(); while ((expired = wait_list_expired(&queues.on_timed_receive, ticks)) != 0) { expired->cap.ip = expired->result.jump_to; if (scheduler_park_runnable_N(expired) < 0) scheduler_exit_process(expired, A_NO_MEMORY); } set_phase(PHASE_EVENTS); // software events/timeouts net_check_timeouts(); etimer_expired(ticks); // 'hardware' events int nr_fired = events_do_pending(); update_event_times(nr_fired, ticks); set_phase(PHASE_NEXT); // select_runnable if (!proc_queue_is_empty(&queues.high_prio)) next_proc = proc_queue_get(&queues.high_prio); else if (normal_count < NORMAL_ADVANTAGE) { if (!proc_queue_is_empty(&queues.normal_prio)) next_proc = proc_queue_get(&queues.normal_prio); else if (!proc_queue_is_empty(&queues.low_prio)) next_proc = proc_queue_get(&queues.low_prio); normal_count++; } else { if (!proc_queue_is_empty(&queues.low_prio)) next_proc = proc_queue_get(&queues.low_prio); else if (!proc_queue_is_empty(&queues.normal_prio)) next_proc = proc_queue_get(&queues.normal_prio); normal_count = 0; } if (next_proc == 0) { // no runnable processes; poll for events from all three sources // Beware that events_poll() reports events 5us after they occur. If // a new event is expected very soon we are better off polling event // bits manually (using events_do_pending()) // Devote a portion of time until the next event to gc waiting processes garbage_collect_waiting_processes(expect_event_in_ns /2); if (expect_event_in_ns < MANUAL_POLLING_THRESHOLD) goto do_pending; uint64_t next_ticks = wait_list_timeout(&queues.on_timed_receive); uint64_t closest_timeout = etimer_closest_timeout(); if (closest_timeout < next_ticks) next_ticks = closest_timeout; closest_timeout = lwip_closest_timeout(); if (closest_timeout < next_ticks) next_ticks = closest_timeout; scheduler_runtime_update(); events_poll(next_ticks); // LING_INFINITY is big enough scheduler_runtime_start(); goto do_pending; } next_proc->my_queue = MY_QUEUE_NONE; //TODO: update stats #ifdef PROFILE_HARNESS proc_started_ns = ticks; #endif set_phase(PHASE_ERLANG); return next_proc; }