//------------------------------------------------------------------------------ //"sc_method_process::enable_process" // // This method enables the execution of this process, and if requested, its // descendants. If the process was suspended and has a resumption pending it // will be dispatched in the next delta cycle. Otherwise the state will be // adjusted to indicate it is no longer suspended, but no immediate execution // will occur. //------------------------------------------------------------------------------ void sc_method_process::enable_process( sc_descendant_inclusion_info descendants ) { // IF NEEDED PROPOGATE THE RESUME REQUEST THROUGH OUR DESCENDANTS: if ( descendants == SC_INCLUDE_DESCENDANTS ) { const std::vector<sc_object*>& children = get_child_objects(); int child_n = children.size(); for ( int child_i = 0; child_i < child_n; child_i++ ) { sc_process_b* child_p = DCAST<sc_process_b*>(children[child_i]); if ( child_p ) child_p->enable_process(descendants); } } // ENABLE THIS OBJECT INSTANCE: // // If it was disabled and ready to run then put it on the run queue. m_state = m_state & ~ps_bit_disabled; if ( m_state == ps_bit_ready_to_run ) { m_state = ps_normal; if ( next_runnable() == 0 ) simcontext()->push_runnable_method(this); } }
//------------------------------------------------------------------------------ //"sc_method_process::kill_process" // // This method removes throws a kill for this object instance. It calls the // sc_process_b::kill_process() method to perform low level clean up. //------------------------------------------------------------------------------ void sc_method_process::kill_process(sc_descendant_inclusion_info descendants) { // IF THE SIMULATION HAS NOT BEEN INITIALIZED YET THAT IS AN ERROR: if ( sc_get_status() == SC_ELABORATION ) { report_error( SC_ID_KILL_PROCESS_WHILE_UNITIALIZED_ ); } // IF NEEDED, PROPOGATE THE KILL REQUEST THROUGH OUR DESCENDANTS: if ( descendants == SC_INCLUDE_DESCENDANTS ) { const std::vector<sc_object*> children = get_child_objects(); int child_n = children.size(); for ( int child_i = 0; child_i < child_n; child_i++ ) { sc_process_b* child_p = DCAST<sc_process_b*>(children[child_i]); if ( child_p ) child_p->kill_process(descendants); } } // IF THE PROCESS IS CURRENTLY UNWINDING OR IS ALREADY A ZOMBIE // IGNORE THE KILL: if ( m_unwinding ) { SC_REPORT_WARNING( SC_ID_PROCESS_ALREADY_UNWINDING_, name() ); return; } if ( m_state & ps_bit_zombie ) return; // REMOVE OUR PROCESS FROM EVENTS, ETC., AND IF ITS THE ACTIVE PROCESS // THROW ITS KILL. // // Note we set the throw status to kill regardless if we throw or not. // That lets check_for_throws stumble across it if we were in the call // chain when the kill call occurred. if ( next_runnable() != 0 ) simcontext()->remove_runnable_method( this ); disconnect_process(); m_throw_status = THROW_KILL; if ( sc_get_current_process_b() == this ) { throw sc_unwind_exception( this, false ); } }
//------------------------------------------------------------------------------ //"sc_method_process::suspend_process" // // This virtual method suspends this process and its children if requested to. // descendants = indicator of whether this process' children should also // be suspended //------------------------------------------------------------------------------ void sc_method_process::suspend_process( sc_descendant_inclusion_info descendants ) { // IF NEEDED PROPOGATE THE SUSPEND REQUEST THROUGH OUR DESCENDANTS: if ( descendants == SC_INCLUDE_DESCENDANTS ) { const std::vector<sc_object*>& children = get_child_objects(); int child_n = children.size(); for ( int child_i = 0; child_i < child_n; child_i++ ) { sc_process_b* child_p = DCAST<sc_process_b*>(children[child_i]); if ( child_p ) child_p->suspend_process(descendants); } } // CORNER CASE CHECKS, THE FOLLOWING ARE ERRORS: // (a) if this method has a reset_signal_is specification // (b) if this method is in synchronous reset if ( !sc_allow_process_control_corners && m_has_reset_signal ) { report_error(SC_ID_PROCESS_CONTROL_CORNER_CASE_, "attempt to suspend a method that has a reset signal"); } else if ( !sc_allow_process_control_corners && m_sticky_reset ) { report_error(SC_ID_PROCESS_CONTROL_CORNER_CASE_, "attempt to suspend a method in synchronous reset"); } // SUSPEND OUR OBJECT INSTANCE: // // (1) If we are on the runnable queue then set suspended and ready_to_run, // and remove ourselves from the run queue. // (2) If this is a self-suspension then a resume should cause immediate // scheduling of the process. m_state = m_state | ps_bit_suspended; if ( next_runnable() != 0 ) { m_state = m_state | ps_bit_ready_to_run; simcontext()->remove_runnable_method( this ); } if ( sc_get_current_process_b() == DCAST<sc_process_b*>(this) ) { m_state = m_state | ps_bit_ready_to_run; } }
/* * Initialization. * Initializes reaper and idle threads, starts and initializes main thread. * Also creates the scheduler and other data * */ void minithread_system_initialize(proc_t mainproc, arg_t mainarg) { //allocate room for schedule data (global) schedule_data = (scheduler *) malloc(sizeof(scheduler)); if (schedule_data == NULL) { exit(1); //OOM } schedule_data->cleanup_queue = queue_new(); schedule_data->multi_run_queue = multilevel_queue_new(num_levels); reaper_sema = semaphore_create(); semaphore_initialize(reaper_sema, 0); // create main thread minithread_t* main_thread = minithread_fork(mainproc, mainarg); // initialize idle thread idle_thread = (minithread_t *) malloc(sizeof(minithread_t)); idle_thread->stacktop = NULL; idle_thread->thread_id = -1; //initialize alarm bookeeping data structure (priority queue) alarm_init(); //remove from run queue and run it schedule_data->running_thread = main_thread; main_thread->status = RUNNING; multilevel_queue_dequeue(schedule_data->multi_run_queue, 0, (void *) main_thread); //reaper thread init reaper_thread = minithread_create(reaper_queue_cleanup, NULL); minithread_start(reaper_thread); //Start clock minithread_clock_init(clock_period, clock_handler); //Initialize network network_initialize(network_handler); //START MAIN PROC //minithread_switch also enables clock interrupts minithread_switch(&idle_thread->stacktop, &main_thread->stacktop); //always comes back here to idle in the kernel level (allows freeing resources) while (1) { minithread_t* next = next_runnable(); set_interrupt_level(DISABLED); next->status = RUNNING; schedule_data->running_thread = next; minithread_switch(&idle_thread->stacktop, &next->stacktop); } }
//------------------------------------------------------------------------------ //"sc_method_process::resume_process" // // This method resumes the execution of this process, and if requested, its // descendants. If the process was suspended and has a resumption pending it // will be dispatched in the next delta cycle. Otherwise the state will be // adjusted to indicate it is no longer suspended, but no immediate execution // will occur. //------------------------------------------------------------------------------ void sc_method_process::resume_process( sc_descendant_inclusion_info descendants ) { // IF NEEDED PROPOGATE THE RESUME REQUEST THROUGH OUR DESCENDANTS: if ( descendants == SC_INCLUDE_DESCENDANTS ) { const std::vector<sc_object*>& children = get_child_objects(); int child_n = children.size(); for ( int child_i = 0; child_i < child_n; child_i++ ) { sc_process_b* child_p = DCAST<sc_process_b*>(children[child_i]); if ( child_p ) child_p->resume_process(descendants); } } // BY DEFAULT THE CORNER CASE IS AN ERROR: if ( !sc_allow_process_control_corners && (m_state & ps_bit_disabled) && (m_state & ps_bit_suspended) ) { m_state = m_state & ~ps_bit_suspended; report_error( SC_ID_PROCESS_CONTROL_CORNER_CASE_, "call to resume() on a disabled suspended method"); } // CLEAR THE SUSPENDED BIT: m_state = m_state & ~ps_bit_suspended; // RESUME OBJECT INSTANCE: // // If this is not a self-resume and the method is ready to run then // put it on the runnable queue. if ( m_state & ps_bit_ready_to_run ) { m_state = m_state & ~ps_bit_ready_to_run; if ( next_runnable() == 0 && ( sc_get_current_process_b() != DCAST<sc_process_b*>(this) ) ) { simcontext()->push_runnable_method(this); remove_dynamic_events(); } } }
void minithread_yield() { interrupt_level_t old_level = set_interrupt_level(DISABLED); if (multilevel_queue_total_elements(schedule_data->multi_run_queue) == 0) { set_interrupt_level(old_level); return; } minithread_t* old = schedule_data->running_thread; old->status = RUNNABLE; //adds itself to the end of the run queue multilevel_queue_enqueue(schedule_data->multi_run_queue, old->level, old); //yields to the next minithread in the run_queue minithread_t* next = next_runnable(); next->status = RUNNING; schedule_data->running_thread = next; minithread_switch(&old->stacktop, &next->stacktop); }
/* stops execution of current minithread, and then proceeds to get next RUNNABLE minithread * and begin execution from where that minithread left off. */ void minithread_stop() { set_interrupt_level(DISABLED); minithread_t* curr = schedule_data->running_thread; curr->status = WAITING; schedule_data->running_thread = idle_thread; if (multilevel_queue_total_elements(schedule_data->multi_run_queue) > 0) { minithread_t* next = next_runnable(); next->status = RUNNING; schedule_data->running_thread = next; //switch to new RUNNING mini thread minithread_switch(&curr->stacktop, &next->stacktop); } else { //idle in the kernel level minithread_switch(&curr->stacktop, &idle_thread->stacktop); } }