コード例 #1
0
void
NodeManager::prepare_nodes()
{
  assert( kernel().is_initialized() );

  /* We initialize the buffers of each node and calibrate it. */

  size_t num_active_nodes = 0;     // counts nodes that will be updated
  size_t num_active_wfr_nodes = 0; // counts nodes that use waveform relaxation

  std::vector< lockPTR< WrappedThreadException > > exceptions_raised(
    kernel().vp_manager.get_num_threads() );

#ifdef _OPENMP
#pragma omp parallel reduction( + : num_active_nodes, num_active_wfr_nodes )
  {
    size_t t = kernel().vp_manager.get_thread_id();
#else
    for ( index t = 0; t < kernel().vp_manager.get_num_threads(); ++t )
    {
#endif

    // We prepare nodes in a parallel region. Therefore, we need to catch
    // exceptions here and then handle them after the parallel region.
    try
    {
      for ( std::vector< Node* >::iterator it = nodes_vec_[ t ].begin();
            it != nodes_vec_[ t ].end();
            ++it )
      {
        prepare_node_( *it );
        if ( not( *it )->is_frozen() )
        {
          ++num_active_nodes;
          if ( ( *it )->node_uses_wfr() )
          {
            ++num_active_wfr_nodes;
          }
        }
      }
    }
    catch ( std::exception& e )
    {
      // so throw the exception after parallel region
      exceptions_raised.at( t ) =
        lockPTR< WrappedThreadException >( new WrappedThreadException( e ) );
    }

  } // end of parallel section / end of for threads

  // check if any exceptions have been raised
  for ( index thr = 0; thr < kernel().vp_manager.get_num_threads(); ++thr )
  {
    if ( exceptions_raised.at( thr ).valid() )
    {
      throw WrappedThreadException( *( exceptions_raised.at( thr ) ) );
    }
  }

  std::ostringstream os;
  std::string tmp_str = num_active_nodes == 1 ? " node" : " nodes";
  os << "Preparing " << num_active_nodes << tmp_str << " for simulation.";

  if ( num_active_wfr_nodes != 0 )
  {
    tmp_str = num_active_wfr_nodes == 1 ? " uses " : " use ";
    os << " " << num_active_wfr_nodes << " of them" << tmp_str
       << "iterative solution techniques.";
  }

  num_active_nodes_ = num_active_nodes;
  LOG( M_INFO, "NodeManager::prepare_nodes", os.str() );
}

void
NodeManager::post_run_cleanup()
{
#ifdef _OPENMP
#pragma omp parallel
  {
    index t = kernel().vp_manager.get_thread_id();
#else // clang-format off
  for ( index t = 0; t < kernel().vp_manager.get_num_threads(); ++t )
  {
#endif // clang-format on
    for ( size_t idx = 0; idx < local_nodes_.size(); ++idx )
    {
      Node* node = local_nodes_.get_node_by_index( idx );
      if ( node != 0 )
      {
        if ( node->num_thread_siblings() > 0 )
        {
          node->get_thread_sibling( t )->post_run_cleanup();
        }
        else
        {
          if ( static_cast< index >( node->get_thread() ) == t )
          {
            node->post_run_cleanup();
          }
        }
      }
    }
  }
}
コード例 #2
0
void
nest::SimulationManager::update_()
{
    // to store done values of the different threads
    std::vector< bool > done;
    bool done_all = true;
    delay old_to_step;

    std::vector< lockPTR< WrappedThreadException > > exceptions_raised(
        kernel().vp_manager.get_num_threads() );
// parallel section begins
    #pragma omp parallel
    {
        const int thrd = kernel().vp_manager.get_thread_id();

        do
        {
            if ( print_time_ )
                gettimeofday( &t_slice_begin_, NULL );

            if ( kernel().sp_manager.is_structural_plasticity_enabled()
                    && ( clock_.get_steps() + from_step_ )
                    % kernel().sp_manager.get_structural_plasticity_update_interval()
                    == 0 )
            {
                for ( std::vector< Node* >::const_iterator i =
                            kernel().node_manager.get_nodes_on_thread( thrd ).begin();
                        i != kernel().node_manager.get_nodes_on_thread( thrd ).end();
                        ++i )
                {
                    ( *i )->update_synaptic_elements(
                        Time( Time::step( clock_.get_steps() + from_step_ ) ).get_ms() );
                }
                #pragma omp barrier
                #pragma omp single
                {
                    kernel().sp_manager.update_structural_plasticity();
                }
                // Remove 10% of the vacant elements
                for ( std::vector< Node* >::const_iterator i =
                            kernel().node_manager.get_nodes_on_thread( thrd ).begin();
                        i != kernel().node_manager.get_nodes_on_thread( thrd ).end();
                        ++i )
                {
                    ( *i )->decay_synaptic_elements_vacant();
                }
            }


            if ( from_step_ == 0 ) // deliver only at beginning of slice
            {
                kernel().event_delivery_manager.deliver_events( thrd );
#ifdef HAVE_MUSIC
// advance the time of music by one step (min_delay * h) must
// be done after deliver_events_() since it calls
// music_event_out_proxy::handle(), which hands the spikes over to
// MUSIC *before* MUSIC time is advanced

// wait until all threads are done -> synchronize
                #pragma omp barrier
// the following block is executed by the master thread only
// the other threads are enforced to wait at the end of the block
                #pragma omp master
                {
                    // advance the time of music by one step (min_delay * h) must
                    // be done after deliver_events_() since it calls
                    // music_event_out_proxy::handle(), which hands the spikes over to
                    // MUSIC *before* MUSIC time is advanced
                    if ( slice_ > 0 )
                        kernel().music_manager.advance_music_time();

                    // the following could be made thread-safe
                    kernel().music_manager.update_music_event_handlers(
                        clock_, from_step_, to_step_ );
                }
// end of master section, all threads have to synchronize at this point
                #pragma omp barrier
#endif
            }

            // preliminary update of nodes that use waveform relaxtion
            if ( kernel().node_manager.any_node_uses_wfr() )
            {
                #pragma omp single
                {
                    // if the end of the simulation is in the middle
                    // of a min_delay_ step, we need to make a complete
                    // step in the wfr_update and only do
                    // the partial step in the final update
                    // needs to be done in omp single since to_step_ is a scheduler
                    // variable
                    old_to_step = to_step_;
                    if ( to_step_ < kernel().connection_manager.get_min_delay() )
                        to_step_ = kernel().connection_manager.get_min_delay();
                }

                bool max_iterations_reached = true;
                const std::vector< Node* >& thread_local_wfr_nodes =
                    kernel().node_manager.get_wfr_nodes_on_thread( thrd );
                for ( long_t n = 0; n < wfr_max_iterations_; ++n )
                {
                    bool done_p = true;

                    // this loop may be empty for those threads
                    // that do not have any nodes requiring wfr_update
                    for ( std::vector< Node* >::const_iterator i =
                                thread_local_wfr_nodes.begin();
                            i != thread_local_wfr_nodes.end();
                            ++i )
                        done_p = wfr_update_( *i ) && done_p;

// add done value of thread p to done vector
                    #pragma omp critical
                    done.push_back( done_p );
// parallel section ends, wait until all threads are done -> synchronize
                    #pragma omp barrier

// the following block is executed by a single thread
// the other threads wait at the end of the block
                    #pragma omp single
                    {
                        // set done_all
                        for ( size_t i = 0; i < done.size(); i++ )
                            done_all = done[ i ] && done_all;

                        // gather SecondaryEvents (e.g. GapJunctionEvents)
                        kernel().event_delivery_manager.gather_events( done_all );

                        // reset done and done_all
                        //(needs to be in the single threaded part)
                        done_all = true;
                        done.clear();
                    }

                    // deliver SecondaryEvents generated during wfr_update
                    // returns the done value over all threads
                    done_p = kernel().event_delivery_manager.deliver_events( thrd );

                    if ( done_p )
                    {
                        max_iterations_reached = false;
                        break;
                    }
                } // of for (wfr_max_iterations) ...

                #pragma omp single
                {
                    to_step_ = old_to_step;
                    if ( max_iterations_reached )
                    {
                        std::string msg = String::compose(
                                              "Maximum number of iterations reached at interval %1-%2 ms",
                                              clock_.get_ms(),
                                              clock_.get_ms() + to_step_ * Time::get_resolution().get_ms() );
                        LOG( M_WARNING, "SimulationManager::wfr_update", msg );
                    }
                }

            } // of if(any_node_uses_wfr)
            // end of preliminary update

            const std::vector< Node* >& thread_local_nodes =
                kernel().node_manager.get_nodes_on_thread( thrd );
            for (
                std::vector< Node* >::const_iterator node = thread_local_nodes.begin();
                node != thread_local_nodes.end();
                ++node )
            {
                // We update in a parallel region. Therefore, we need to catch
                // exceptions here and then handle them after the parallel region.
                try
                {
                    if ( not( *node )->is_frozen() )
                        ( *node )->update( clock_, from_step_, to_step_ );
                }
                catch ( std::exception& e )
                {
                    // so throw the exception after parallel region
                    exceptions_raised.at( thrd ) = lockPTR< WrappedThreadException >(
                                                       new WrappedThreadException( e ) );
                    terminate_ = true;
                }
            }

// parallel section ends, wait until all threads are done -> synchronize
            #pragma omp barrier

// the following block is executed by the master thread only
// the other threads are enforced to wait at the end of the block
            #pragma omp master
            {
                // gather only at end of slice
                if ( to_step_ == kernel().connection_manager.get_min_delay() )
                    kernel().event_delivery_manager.gather_events( true );

                advance_time_();

                if ( SLIsignalflag != 0 )
                {
                    LOG( M_INFO,
                         "SimulationManager::update",
                         "Simulation exiting on user signal." );
                    terminate_ = true;
                }

                if ( print_time_ )
                {
                    gettimeofday( &t_slice_end_, NULL );
                    print_progress_();
                }
            }
// end of master section, all threads have to synchronize at this point
            #pragma omp barrier

        } while ( ( to_do_ != 0 ) && ( !terminate_ ) );

        // End of the slice, we update the number of synaptic element
        for ( std::vector< Node* >::const_iterator i =
                    kernel().node_manager.get_nodes_on_thread( thrd ).begin();
                i != kernel().node_manager.get_nodes_on_thread( thrd ).end();
                ++i )
        {
            ( *i )->update_synaptic_elements(
                Time( Time::step( clock_.get_steps() + to_step_ ) ).get_ms() );
        }

    } // end of #pragma parallel omp

    // check if any exceptions have been raised
    for ( index thrd = 0; thrd < kernel().vp_manager.get_num_threads(); ++thrd )
        if ( exceptions_raised.at( thrd ).valid() )
            throw WrappedThreadException( *( exceptions_raised.at( thrd ) ) );
}