void
nest::SimulationManager::cleanup()
{
  if ( not prepared_ )
  {
    std::string msg = "Cleanup called without calling Prepare.";
    LOG( M_ERROR, "SimulationManager::cleanup", msg );
    throw KernelException();
  }

  if ( not simulated_ )
  {
    return;
  }

  // Check for synchronicity of global rngs over processes
  if ( kernel().mpi_manager.get_num_processes() > 1 )
  {
    if ( not kernel().mpi_manager.grng_synchrony(
           kernel().rng_manager.get_grng()->ulrand( 100000 ) ) )
    {
      throw KernelException(
        "In SimulationManager::cleanup(): "
        "Global Random Number Generators are not "
        "in sync at end of simulation." );
    }
  }

  kernel().node_manager.finalize_nodes();
  prepared_ = false;
}
void
nest::RNGManager::create_grng_()
{
  // create new grng
  LOG( M_INFO, "Network::create_grng_", "Creating new default global RNG" );

// create default RNG with default seed
#ifdef HAVE_GSL
  grng_ = librandom::RngPtr( new librandom::GslRandomGen(
    gsl_rng_knuthran2002, librandom::RandomGen::DefaultSeed ) );
#else
  grng_ = librandom::RandomGen::create_knuthlfg_rng(
    librandom::RandomGen::DefaultSeed );
#endif

  if ( !grng_ )
  {
    LOG( M_ERROR, "Network::create_grng_", "Error initializing knuthlfg" );

    throw KernelException();
  }

  /*
   The seed for the global rng should be different from the seeds
   of the local rngs_ for each thread seeded with 1,..., n_vps.
   */
  long s = 0;
  grng_seed_ = s;
  grng_->seed( s );
}
synindex
ConnectionManager::copy_synapse_prototype( synindex old_id, std::string new_name )
{
  // we can assert here, as nestmodule checks this for us
  assert( !synapsedict_->known( new_name ) );

  int new_id = prototypes_[ 0 ].size();

  if ( new_id == invalid_synindex ) // we wrapped around (=255), maximal id of synapse_model = 254
  {
    net_.message( SLIInterpreter::M_ERROR,
      "ConnectionManager::copy_synapse_prototype",
      "CopyModel cannot generate another synapse. Maximal synapse model count of 255 exceeded." );
    throw KernelException( "Synapse model count exceeded" );
  }
  assert( new_id != invalid_synindex );

  // if the copied synapse is a secondary connector model the synid of the copy has to
  // be mapped to the corresponding secondary event type
  if ( not get_synapse_prototype( old_id ).is_primary() )
  {
    ( get_synapse_prototype( old_id ).get_event() )->add_syn_id( new_id );
  }

  for ( thread t = 0; t < net_.get_num_threads(); ++t )
  {
    prototypes_[ t ].push_back( get_synapse_prototype( old_id ).clone( new_name ) );
    prototypes_[ t ][ new_id ]->set_syn_id( new_id );
  }

  synapsedict_->insert( new_name, new_id );
  return new_id;
}
示例#4
0
index
ModelManager::copy_synapse_model_( index old_id, Name new_name )
{
  size_t new_id = prototypes_[ 0 ].size();

  if ( new_id == invalid_synindex ) // we wrapped around (=255), maximal id of
                                    // synapse_model = 254
  {
    LOG( M_ERROR,
      "ModelManager::copy_synapse_model_",
      "CopyModel cannot generate another synapse. Maximal synapse model count "
      "of 255 exceeded." );
    throw KernelException( "Synapse model count exceeded" );
  }
  assert( new_id != invalid_synindex );

  // if the copied synapse is a secondary connector model the synid of the copy
  // has to be mapped to the corresponding secondary event type
  if ( not get_synapse_prototype( old_id ).is_primary() )
  {
    ( get_synapse_prototype( old_id ).get_event() )->add_syn_id( new_id );
  }

  for ( thread t = 0;
        t < static_cast< thread >( kernel().vp_manager.get_num_threads() );
        ++t )
  {
    prototypes_[ t ].push_back(
      get_synapse_prototype( old_id ).clone( new_name.toString() ) );
    prototypes_[ t ][ new_id ]->set_syn_id( new_id );
  }

  synapsedict_->insert( new_name, new_id );
  return new_id;
}
void
nest::RNGManager::create_rngs_()
{
  // if old generators exist, remove them; since rng_ contains
  // lockPTRs, we don't have to worry about deletion
  if ( !rng_.empty() )
  {
    LOG( M_INFO,
      "Network::create_rngs_",
      "Deleting existing random number generators" );

    rng_.clear();
  }

  LOG( M_INFO, "Network::create_rngs_", "Creating default RNGs" );

  rng_seeds_.resize( kernel().vp_manager.get_num_virtual_processes() );

  for ( index i = 0; i < static_cast< index >(
                           kernel().vp_manager.get_num_virtual_processes() );
        ++i )
  {
    unsigned long s = i + 1;
    if ( kernel().vp_manager.is_local_vp( i ) )
    {
/*
 We have to ensure that each thread is provided with a different
 stream of random numbers.  The seeding method for Knuth's LFG
 generator guarantees that different seeds yield non-overlapping
 random number sequences.

 We therefore have to seed with known numbers: using random
 seeds here would run the risk of using the same seed twice.
 For simplicity, we use 1 .. n_vps.
 */
#ifdef HAVE_GSL
      librandom::RngPtr rng(
        new librandom::GslRandomGen( gsl_rng_knuthran2002, s ) );
#else
      librandom::RngPtr rng = librandom::RandomGen::create_knuthlfg_rng( s );
#endif

      if ( !rng )
      {
        LOG( M_ERROR, "Network::create_rngs_", "Error initializing knuthlfg" );

        throw KernelException();
      }

      rng_.push_back( rng );
    }

    rng_seeds_[ i ] = s;
  }
}
void
nest::SimulationManager::assert_valid_simtime( Time const& t )
{
  if ( t == Time::ms( 0.0 ) )
  {
    return;
  }

  if ( t < Time::step( 1 ) )
  {
    LOG( M_ERROR,
      "SimulationManager::run",
      String::compose( "Simulation time must be >= %1 ms (one time step).",
           Time::get_resolution().get_ms() ) );
    throw KernelException();
  }

  if ( t.is_finite() )
  {
    Time time1 = clock_ + t;
    if ( not time1.is_finite() )
    {
      std::string msg = String::compose(
        "A clock overflow will occur after %1 of %2 ms. Please reset network "
        "clock first!",
        ( Time::max() - clock_ ).get_ms(),
        t.get_ms() );
      LOG( M_ERROR, "SimulationManager::run", msg );
      throw KernelException();
    }
  }
  else
  {
    std::string msg = String::compose(
      "The requested simulation time exceeds the largest time NEST can handle "
      "(T_max = %1 ms). Please use a shorter time!",
      Time::max().get_ms() );
    LOG( M_ERROR, "SimulationManager::run", msg );
    throw KernelException();
  }
}
示例#7
0
文件: model.cpp 项目: QJonny/CyNest
  void Model::set_threads_(thread t)
  {
    for (size_t i = 0; i < memory_.size(); ++i)
      if ( memory_[i].get_instantiations() > 0 )
	throw KernelException();

    std::vector<sli::pool> tmp(t); 
    memory_.swap(tmp);

    for (size_t i = 0; i < memory_.size(); ++i)
      init_memory_(memory_[i]);
  }
示例#8
0
nest::index
nest::SourceTable::get_gid( const thread tid,
  const synindex syn_id,
  const index lcid ) const
{
  if ( not kernel().connection_manager.get_keep_source_table() )
  {
    throw KernelException(
      "Cannot use SourceTable::get_gid when get_keep_source_table is false" );
  }
  return sources_[ tid ][ syn_id ][ lcid ].get_gid();
}
size_t
nest::SimulationManager::prepare_simulation_()
{
    assert( to_do_ != 0 ); // This is checked in simulate()

    // find shortest and longest delay across all MPI processes
    // this call sets the member variables
    kernel().connection_manager.update_delay_extrema_();
    kernel().event_delivery_manager.init_moduli();

    // Check for synchronicity of global rngs over processes.
    // We need to do this ahead of any simulation in case random numbers
    // have been consumed on the SLI level.
    if ( kernel().mpi_manager.get_num_processes() > 1 )
    {
        if ( !kernel().mpi_manager.grng_synchrony(
                    kernel().rng_manager.get_grng()->ulrand( 100000 ) ) )
        {
            LOG( M_ERROR,
                 "SimulationManager::simulate",
                 "Global Random Number Generators are not synchronized prior to "
                 "simulation." );
            throw KernelException();
        }
    }

    // if at the beginning of a simulation, set up spike buffers
    if ( !simulated_ )
        kernel().event_delivery_manager.configure_spike_buffers();

    kernel().node_manager.ensure_valid_thread_local_ids();
    const size_t num_active_nodes = kernel().node_manager.prepare_nodes();

    kernel().model_manager.create_secondary_events_prototypes();

    // we have to do enter_runtime after prepre_nodes, since we use
    // calibrate to map the ports of MUSIC devices, which has to be done
    // before enter_runtime
    if ( !simulated_ ) // only enter the runtime mode once
    {
        double tick = Time::get_resolution().get_ms()
                      * kernel().connection_manager.get_min_delay();
        kernel().music_manager.enter_runtime( tick );
    }

    return num_active_nodes;
}
void
nest::SimulationManager::finalize_simulation_()
{
  if ( not simulated_ )
    return;

  // Check for synchronicity of global rngs over processes
  // TODO: This seems double up, there is such a test at end of simulate()
  if ( kernel().mpi_manager.get_num_processes() > 1 )
    if ( !kernel().mpi_manager.grng_synchrony( kernel().rng_manager.get_grng()->ulrand( 100000 ) ) )
    {
      LOG( M_ERROR,
        "SimulationManager::simulate",
        "Global Random Number Generators are not synchronized after simulation." );
      throw KernelException();
    }

  kernel().node_manager.finalize_nodes();
}
示例#11
0
index NodeManager::add_node( index mod, long n ) // no_p
{
  assert( current_ != 0 );
  assert( root_ != 0 );

  if ( mod >= kernel().model_manager.get_num_node_models() )
  {
    throw UnknownModelID( mod );
  }

  if ( n < 1 )
  {
    throw BadProperty();
  }

  const thread n_threads = kernel().vp_manager.get_num_threads();
  assert( n_threads > 0 );

  const index min_gid = local_nodes_.get_max_gid() + 1;
  const index max_gid = min_gid + n;

  Model* model = kernel().model_manager.get_model( mod );
  assert( model != 0 );

  model->deprecation_warning( "Create" );

  /* current_ points to the instance of the current subnet on thread 0.
     The following code makes subnet a pointer to the wrapper container
     containing the instances of the current subnet on all threads.
   */
  const index subnet_gid = current_->get_gid();
  Node* subnet_node = local_nodes_.get_node_by_gid( subnet_gid );
  assert( subnet_node != 0 );

  SiblingContainer* subnet_container =
    dynamic_cast< SiblingContainer* >( subnet_node );
  assert( subnet_container != 0 );
  assert( subnet_container->num_thread_siblings()
    == static_cast< size_t >( n_threads ) );
  assert( subnet_container->get_thread_sibling( 0 ) == current_ );

  if ( max_gid > local_nodes_.max_size() || max_gid < min_gid )
  {
    LOG( M_ERROR,
      "NodeManager::add:node",
      "Requested number of nodes will overflow the memory." );
    LOG( M_ERROR, "NodeManager::add:node", "No nodes were created." );
    throw KernelException( "OutOfMemory" );
  }
  kernel().modelrange_manager.add_range( mod, min_gid, max_gid - 1 );

  if ( model->potential_global_receiver()
    and kernel().mpi_manager.get_num_rec_processes() > 0 )
  {
    // In this branch we create nodes for global receivers
    const int n_per_process = n / kernel().mpi_manager.get_num_rec_processes();
    const int n_per_thread = n_per_process / n_threads + 1;

    // We only need to reserve memory on the ranks on which we
    // actually create nodes. In this if-branch ---> Only on recording
    // processes
    if ( kernel().mpi_manager.get_rank()
      >= kernel().mpi_manager.get_num_sim_processes() )
    {
      local_nodes_.reserve( std::ceil( static_cast< double >( max_gid )
        / kernel().mpi_manager.get_num_sim_processes() ) );
      for ( thread t = 0; t < n_threads; ++t )
      {
        // Model::reserve() reserves memory for n ADDITIONAL nodes on thread t
        model->reserve_additional( t, n_per_thread );
      }
    }

    for ( size_t gid = min_gid; gid < max_gid; ++gid )
    {
      const thread vp = kernel().vp_manager.suggest_rec_vp( get_n_gsd() );
      const thread t = kernel().vp_manager.vp_to_thread( vp );

      if ( kernel().vp_manager.is_local_vp( vp ) )
      {
        Node* newnode = model->allocate( t );
        newnode->set_gid_( gid );
        newnode->set_model_id( mod );
        newnode->set_thread( t );
        newnode->set_vp( vp );
        newnode->set_has_proxies( true );
        newnode->set_local_receiver( false );

        local_nodes_.add_local_node( *newnode ); // put into local nodes list

        current_->add_node( newnode ); // and into current subnet, thread 0.
      }
      else
      {
        local_nodes_.add_remote_node( gid ); // ensures max_gid is correct
        current_->add_remote_node( gid, mod );
      }
      increment_n_gsd();
    }
  }

  else if ( model->has_proxies() )
  {
    // In this branch we create nodes for all GIDs which are on a local thread
    const int n_per_process = n / kernel().mpi_manager.get_num_sim_processes();
    const int n_per_thread = n_per_process / n_threads + 1;

    // We only need to reserve memory on the ranks on which we
    // actually create nodes. In this if-branch ---> Only on
    // simulation processes
    if ( kernel().mpi_manager.get_rank()
      < kernel().mpi_manager.get_num_sim_processes() )
    {
      // TODO: This will work reasonably for round-robin. The extra 50 entries
      //       are for subnets and devices.
      local_nodes_.reserve(
        std::ceil( static_cast< double >( max_gid )
          / kernel().mpi_manager.get_num_sim_processes() ) + 50 );
      for ( thread t = 0; t < n_threads; ++t )
      {
        // Model::reserve() reserves memory for n ADDITIONAL nodes on thread t
        // reserves at least one entry on each thread, nobody knows why
        model->reserve_additional( t, n_per_thread );
      }
    }

    size_t gid;
    if ( kernel().vp_manager.is_local_vp(
           kernel().vp_manager.suggest_vp( min_gid ) ) )
    {
      gid = min_gid;
    }
    else
    {
      gid = next_local_gid_( min_gid );
    }
    size_t next_lid = current_->global_size() + gid - min_gid;
    // The next loop will not visit every node, if more than one rank is
    // present.
    // Since we already know what range of gids will be created, we can tell the
    // current subnet the range and subsequent calls to
    // `current_->add_remote_node()`
    // become irrelevant.
    current_->add_gid_range( min_gid, max_gid - 1 );

    // min_gid is first valid gid i should create, hence ask for the first local
    // gid after min_gid-1
    while ( gid < max_gid )
    {
      const thread vp = kernel().vp_manager.suggest_vp( gid );
      const thread t = kernel().vp_manager.vp_to_thread( vp );

      if ( kernel().vp_manager.is_local_vp( vp ) )
      {
        Node* newnode = model->allocate( t );
        newnode->set_gid_( gid );
        newnode->set_model_id( mod );
        newnode->set_thread( t );
        newnode->set_vp( vp );

        local_nodes_.add_local_node( *newnode ); // put into local nodes list
        current_->add_node( newnode ); // and into current subnet, thread 0.

        // lid setting is wrong, if a range is set, as the subnet already
        // assumes,
        // the nodes are available.
        newnode->set_lid_( next_lid );
        const size_t next_gid = next_local_gid_( gid );
        next_lid += next_gid - gid;
        gid = next_gid;
      }
      else
      {
        ++gid; // brutal fix, next_lid has been set in if-branch
      }
    }
    // if last gid is not on this process, we need to add it as a remote node
    if ( not kernel().vp_manager.is_local_vp(
           kernel().vp_manager.suggest_vp( max_gid - 1 ) ) )
    {
      local_nodes_.add_remote_node( max_gid - 1 ); // ensures max_gid is correct
      current_->add_remote_node( max_gid - 1, mod );
    }
  }
  else if ( not model->one_node_per_process() )
  {
    // We allocate space for n containers which will hold the threads
    // sorted. We use SiblingContainers to store the instances for
    // each thread to exploit the very efficient memory allocation for
    // nodes.
    //
    // These containers are registered in the global nodes_ array to
    // provide access to the instances both for manipulation by SLI
    // functions and so that NodeManager::calibrate() can discover the
    // instances and register them for updating.
    //
    // The instances are also registered with the instance of the
    // current subnet for the thread to which the created instance
    // belongs. This is mainly important so that the subnet structure
    // is preserved on all VPs.  Node enumeration is done on by the
    // registration with the per-thread instances.
    //
    // The wrapper container can be addressed under the GID assigned
    // to no-proxy node created. If this no-proxy node is NOT a
    // container (e.g. a device), then each instance can be retrieved
    // by giving the respective thread-id to get_node(). Instances of
    // SiblingContainers cannot be addressed individually.
    //
    // The allocation of the wrapper containers is spread over threads
    // to balance memory load.
    size_t container_per_thread = n / n_threads + 1;

    // since we create the n nodes on each thread, we reserve the full load.
    for ( thread t = 0; t < n_threads; ++t )
    {
      model->reserve_additional( t, n );
      siblingcontainer_model_->reserve_additional( t, container_per_thread );
      static_cast< Subnet* >( subnet_container->get_thread_sibling( t ) )
        ->reserve( n );
    }

    // The following loop creates n nodes. For each node, a wrapper is created
    // and filled with one instance per thread, in total n * n_thread nodes in
    // n wrappers.
    local_nodes_.reserve(
      std::ceil( static_cast< double >( max_gid )
        / kernel().mpi_manager.get_num_sim_processes() ) + 50 );
    for ( index gid = min_gid; gid < max_gid; ++gid )
    {
      thread thread_id = kernel().vp_manager.vp_to_thread(
        kernel().vp_manager.suggest_vp( gid ) );

      // Create wrapper and register with nodes_ array.
      SiblingContainer* container = static_cast< SiblingContainer* >(
        siblingcontainer_model_->allocate( thread_id ) );
      container->set_model_id(
        -1 ); // mark as pseudo-container wrapping replicas, see reset_network()
      container->reserve( n_threads ); // space for one instance per thread
      container->set_gid_( gid );
      local_nodes_.add_local_node( *container );

      // Generate one instance of desired model per thread
      for ( thread t = 0; t < n_threads; ++t )
      {
        Node* newnode = model->allocate( t );
        newnode->set_gid_( gid ); // all instances get the same global id.
        newnode->set_model_id( mod );
        newnode->set_thread( t );
        newnode->set_vp( kernel().vp_manager.thread_to_vp( t ) );

        // Register instance with wrapper
        // container has one entry for each thread
        container->push_back( newnode );

        // Register instance with per-thread instance of enclosing subnet.
        static_cast< Subnet* >( subnet_container->get_thread_sibling( t ) )
          ->add_node( newnode );
      }
    }
  }
  else
  {
    // no proxies and one node per process
    // this is used by MUSIC proxies
    // Per r9700, this case is only relevant for music_*_proxy models,
    // which have a single instance per MPI process.
    for ( index gid = min_gid; gid < max_gid; ++gid )
    {
      Node* newnode = model->allocate( 0 );
      newnode->set_gid_( gid );
      newnode->set_model_id( mod );
      newnode->set_thread( 0 );
      newnode->set_vp( kernel().vp_manager.thread_to_vp( 0 ) );

      // Register instance
      local_nodes_.add_local_node( *newnode );

      // and into current subnet, thread 0.
      current_->add_node( newnode );
    }
  }

  // set off-grid spike communication if necessary
  if ( model->is_off_grid() )
  {
    kernel().event_delivery_manager.set_off_grid_communication( true );
    LOG( M_INFO,
      "NodeManager::add_node",
      "Neuron models emitting precisely timed spikes exist: "
      "the kernel property off_grid_spiking has been set to true.\n\n"
      "NOTE: Mixing precise-spiking and normal neuron models may "
      "lead to inconsistent results." );
  }

  return max_gid - 1;
}
void
nest::SimulationManager::set_status( const DictionaryDatum& d )
{
    // Create an instance of time converter here to capture the current
    // representation of time objects: TICS_PER_MS and TICS_PER_STEP
    // will be stored in time_converter.
    // This object can then be used to convert times in steps
    // (e.g. Connection::delay_) or tics to the new representation.
    // We pass this object to ConnectionManager::calibrate to update
    // all time objects in the connection system to the new representation.
    // MH 08-04-14
    TimeConverter time_converter;

    double_t time;
    if ( updateValue< double_t >( d, "time", time ) )
    {
        if ( time != 0.0 )
            throw BadProperty( "The simulation time can only be set to 0.0." );

        if ( clock_ > TimeZero )
        {
            // reset only if time has passed
            LOG( M_WARNING,
                 "SimulationManager::set_status",
                 "Simulation time reset to t=0.0. Resetting the simulation time is not "
                 "fully supported in NEST at present. Some spikes may be lost, and "
                 "stimulating devices may behave unexpectedly. PLEASE REVIEW YOUR "
                 "SIMULATION OUTPUT CAREFULLY!" );

            clock_ = Time::step( 0 );
            from_step_ = 0;
            slice_ = 0;
            // clear all old spikes
            kernel().event_delivery_manager.configure_spike_buffers();
        }
    }

    updateValue< bool >( d, "print_time", print_time_ );

    // tics_per_ms and resolution must come after local_num_thread /
    // total_num_threads because they might reset the network and the time
    // representation
    nest::double_t tics_per_ms = 0.0;
    bool tics_per_ms_updated =
        updateValue< nest::double_t >( d, "tics_per_ms", tics_per_ms );
    double_t resd = 0.0;
    bool res_updated = updateValue< double_t >( d, "resolution", resd );

    if ( tics_per_ms_updated || res_updated )
    {
        if ( kernel().node_manager.size() > 1 ) // root always exists
        {
            LOG( M_ERROR,
                 "SimulationManager::set_status",
                 "Cannot change time representation after nodes have been created. "
                 "Please call ResetKernel first." );
            throw KernelException();
        }
        else if ( has_been_simulated() ) // someone may have simulated empty network
        {
            LOG( M_ERROR,
                 "SimulationManager::set_status",
                 "Cannot change time representation after the network has been "
                 "simulated. Please call ResetKernel first." );
            throw KernelException();
        }
        else if ( kernel().connection_manager.get_num_connections() != 0 )
        {
            LOG( M_ERROR,
                 "SimulationManager::set_status",
                 "Cannot change time representation after connections have been "
                 "created. Please call ResetKernel first." );
            throw KernelException();
        }
        else if ( res_updated && tics_per_ms_updated ) // only allow TICS_PER_MS to
            // be changed together with
            // resolution
        {
            if ( resd < 1.0 / tics_per_ms )
            {
                LOG( M_ERROR,
                     "SimulationManager::set_status",
                     "Resolution must be greater than or equal to one tic. Value "
                     "unchanged." );
                throw KernelException();
            }
            else
            {
                nest::Time::set_resolution( tics_per_ms, resd );
                // adjust to new resolution
                clock_.calibrate();
                // adjust delays in the connection system to new resolution
                kernel().connection_manager.calibrate( time_converter );
                kernel().model_manager.calibrate( time_converter );
                LOG( M_INFO,
                     "SimulationManager::set_status",
                     "tics per ms and resolution changed." );

                // make sure that wfr communication interval is always greater or equal
                // to resolution if no wfr is used explicitly set wfr_comm_interval
                // to resolution because communication in every step is needed
                if ( wfr_comm_interval_ < Time::get_resolution().get_ms()
                        || not use_wfr_ )
                {
                    wfr_comm_interval_ = Time::get_resolution().get_ms();
                }
            }
        }
        else if ( res_updated ) // only resolution changed
        {
            if ( resd < Time::get_ms_per_tic() )
            {
                LOG( M_ERROR,
                     "SimulationManager::set_status",
                     "Resolution must be greater than or equal to one tic. Value "
                     "unchanged." );
                throw KernelException();
            }
            else
            {
                Time::set_resolution( resd );
                clock_.calibrate(); // adjust to new resolution
                // adjust delays in the connection system to new resolution
                kernel().connection_manager.calibrate( time_converter );
                kernel().model_manager.calibrate( time_converter );
                LOG( M_INFO,
                     "SimulationManager::set_status",
                     "Temporal resolution changed." );

                // make sure that wfr communication interval is always greater or equal
                // to resolution if no wfr is used explicitly set wfr_comm_interval
                // to resolution because communication in every step is needed
                if ( wfr_comm_interval_ < Time::get_resolution().get_ms()
                        || not use_wfr_ )
                {
                    wfr_comm_interval_ = Time::get_resolution().get_ms();
                }
            }
        }
        else
        {
            LOG( M_ERROR,
                 "SimulationManager::set_status",
                 "change of tics_per_step requires simultaneous specification of "
                 "resolution." );
            throw KernelException();
        }
    }

    // The decision whether the waveform relaxation is used
    // must be set before nodes are created.
    // Important: wfr_comm_interval_ may change depending on use_wfr_
    bool wfr;
    if ( updateValue< bool >( d, "use_wfr", wfr ) )
    {
        if ( kernel().node_manager.size() > 1 )
        {
            LOG( M_ERROR,
                 "SimulationManager::set_status",
                 "Cannot enable/disable usage of waveform relaxation after nodes have "
                 "been created. Please call ResetKernel first." );
            throw KernelException();
        }
        else
        {
            use_wfr_ = wfr;
            // if no wfr is used explicitly set wfr_comm_interval to resolution
            // because communication in every step is needed
            if ( not use_wfr_ )
            {
                wfr_comm_interval_ = Time::get_resolution().get_ms();
            }
        }
    }

    // wfr_comm_interval_ can only be changed if use_wfr_ is true and before
    // connections are created. If use_wfr_ is false wfr_comm_interval_ is set to
    // the resolution whenever the resolution changes.
    double_t wfr_interval;
    if ( updateValue< double_t >( d, "wfr_comm_interval", wfr_interval ) )
    {
        if ( not use_wfr_ )
        {
            LOG( M_ERROR,
                 "SimulationManager::set_status",
                 "Cannot set waveform communication interval when usage of waveform "
                 "relaxation is disabled. Set use_wfr to true first." );
            throw KernelException();
        }
        else if ( kernel().connection_manager.get_num_connections() != 0 )
        {
            LOG( M_ERROR,
                 "SimulationManager::set_status",
                 "Cannot change waveform communication interval after connections have "
                 "been created. Please call ResetKernel first." );
            throw KernelException();
        }
        else if ( wfr_interval < Time::get_resolution().get_ms() )
        {
            LOG( M_ERROR,
                 "SimulationManager::set_status",
                 "Communication interval of the waveform relaxation must be greater or "
                 "equal to the resolution of the simulation." );
            throw KernelException();
        }
        else
        {
            LOG( M_INFO,
                 "SimulationManager::set_status",
                 "Waveform communication interval changed successfully. " );
            wfr_comm_interval_ = wfr_interval;
        }
    }

    // set the convergence tolerance for the waveform relaxation method
    double_t tol;
    if ( updateValue< double_t >( d, "wfr_tol", tol ) )
    {
        if ( tol < 0.0 )
            LOG( M_ERROR,
                 "SimulationManager::set_status",
                 "Tolerance must be zero or positive" );
        else
            wfr_tol_ = tol;
    }

    // set the maximal number of iterations for the waveform relaxation method
    long max_iter;
    if ( updateValue< long >( d, "wfr_max_iterations", max_iter ) )
    {
        if ( max_iter <= 0 )
            LOG( M_ERROR,
                 "SimulationManager::set_status",
                 "Maximal number of iterations  for the waveform relaxation must be "
                 "positive. To disable waveform relaxation set use_wfr instead." );
        else
            wfr_max_iterations_ = max_iter;
    }

    // set the interpolation order for the waveform relaxation method
    long interp_order;
    if ( updateValue< long >( d, "wfr_interpolation_order", interp_order ) )
    {
        if ( ( interp_order < 0 ) || ( interp_order == 2 ) || ( interp_order > 3 ) )
            LOG( M_ERROR,
                 "SimulationManager::set_status",
                 "Interpolation order must be 0, 1, or 3." );
        else
            wfr_interpolation_order_ = interp_order;
    }
}
void
nest::SimulationManager::simulate( Time const& t )
{
    assert( kernel().is_initialized() );

    t_real_ = 0;
    t_slice_begin_ = timeval();
    t_slice_end_ = timeval();

    if ( t == Time::ms( 0.0 ) )
        return;

    if ( t < Time::step( 1 ) )
    {
        LOG( M_ERROR,
             "SimulationManager::simulate",
             String::compose( "Simulation time must be >= %1 ms (one time step).",
                              Time::get_resolution().get_ms() ) );
        throw KernelException();
    }

    if ( t.is_finite() )
    {
        Time time1 = clock_ + t;
        if ( !time1.is_finite() )
        {
            std::string msg = String::compose(
                                  "A clock overflow will occur after %1 of %2 ms. Please reset network "
                                  "clock first!",
                                  ( Time::max() - clock_ ).get_ms(),
                                  t.get_ms() );
            LOG( M_ERROR, "SimulationManager::simulate", msg );
            throw KernelException();
        }
    }
    else
    {
        std::string msg = String::compose(
                              "The requested simulation time exceeds the largest time NEST can handle "
                              "(T_max = %1 ms). Please use a shorter time!",
                              Time::max().get_ms() );
        LOG( M_ERROR, "SimulationManager::simulate", msg );
        throw KernelException();
    }

    to_do_ += t.get_steps();
    to_do_total_ = to_do_;

    const size_t num_active_nodes = prepare_simulation_();

    // from_step_ is not touched here.  If we are at the beginning
    // of a simulation, it has been reset properly elsewhere.  If
    // a simulation was ended and is now continued, from_step_ will
    // have the proper value.  to_step_ is set as in advance_time().

    delay end_sim = from_step_ + to_do_;
    if ( kernel().connection_manager.get_min_delay() < end_sim )
        to_step_ =
            kernel()
            .connection_manager.get_min_delay(); // update to end of time slice
    else
        to_step_ = end_sim; // update to end of simulation time

    // Warn about possible inconsistencies, see #504.
    // This test cannot come any earlier, because we first need to compute
    // min_delay_
    // above.
    if ( t.get_steps() % kernel().connection_manager.get_min_delay() != 0 )
        LOG( M_WARNING,
             "SimulationManager::simulate",
             "The requested simulation time is not an integer multiple of the minimal "
             "delay in the network. This may result in inconsistent results under the "
             "following conditions: (i) A network contains more than one source of "
             "randomness, e.g., two different poisson_generators, and (ii) Simulate "
             "is called repeatedly with simulation times that are not multiples of "
             "the minimal delay." );

    resume_( num_active_nodes );

    finalize_simulation_();
}
void
nest::SimulationManager::set_status( const DictionaryDatum& d )
{
  // Create an instance of time converter here to capture the current
  // representation of time objects: TICS_PER_MS and TICS_PER_STEP
  // will be stored in time_converter.
  // This object can then be used to convert times in steps
  // (e.g. Connection::delay_) or tics to the new representation.
  // We pass this object to ConnectionManager::calibrate to update
  // all time objects in the connection system to the new representation.
  // MH 08-04-14
  TimeConverter time_converter;

  double_t time;
  if ( updateValue< double_t >( d, "time", time ) )
  {
    if ( time != 0.0 )
      throw BadProperty( "The simulation time can only be set to 0.0." );

    if ( clock_ > TimeZero )
    {
      // reset only if time has passed
      LOG( M_WARNING,
        "SimulationManager::set_status",
        "Simulation time reset to t=0.0. Resetting the simulation time is not "
        "fully supported in NEST at present. Some spikes may be lost, and "
        "stimulating devices may behave unexpectedly. PLEASE REVIEW YOUR "
        "SIMULATION OUTPUT CAREFULLY!" );

      clock_ = Time::step( 0 );
      from_step_ = 0;
      slice_ = 0;
      kernel().event_delivery_manager.configure_spike_buffers(); // clear all old spikes
    }
  }

  updateValue< bool >( d, "print_time", print_time_ );

  // tics_per_ms and resolution must come after local_num_thread / total_num_threads
  // because they might reset the network and the time representation
  nest::double_t tics_per_ms;
  bool tics_per_ms_updated = updateValue< nest::double_t >( d, "tics_per_ms", tics_per_ms );
  double_t resd;
  bool res_updated = updateValue< double_t >( d, "resolution", resd );

  if ( tics_per_ms_updated || res_updated )
  {
    if ( kernel().node_manager.size() > 1 ) // root always exists
    {
      LOG( M_ERROR,
        "SimulationManager::set_status",
        "Cannot change time representation after nodes have been created. Please call ResetKernel "
        "first." );
      throw KernelException();
    }
    else if ( has_been_simulated() ) // someone may have simulated empty network
    {
      LOG( M_ERROR,
        "SimulationManager::set_status",
        "Cannot change time representation after the network has been simulated. Please call "
        "ResetKernel first." );
      throw KernelException();
    }
    else if ( kernel().connection_builder_manager.get_num_connections() != 0 )
    {
      LOG( M_ERROR,
        "SimulationManager::set_status",
        "Cannot change time representation after connections have been created. Please call "
        "ResetKernel first." );
      throw KernelException();
    }
    else if ( res_updated
      && tics_per_ms_updated ) // only allow TICS_PER_MS to be changed together with resolution
    {
      if ( resd < 1.0 / tics_per_ms )
      {
        LOG( M_ERROR,
          "SimulationManager::set_status",
          "Resolution must be greater than or equal to one tic. Value unchanged." );
        throw KernelException();
      }
      else
      {
        nest::Time::set_resolution( tics_per_ms, resd );
        clock_.calibrate(); // adjust to new resolution
        // adjust delays in the connection system to new resolution
        kernel().connection_builder_manager.calibrate( time_converter );
        kernel().model_manager.calibrate( time_converter );
        LOG( M_INFO, "SimulationManager::set_status", "tics per ms and resolution changed." );
      }
    }
    else if ( res_updated ) // only resolution changed
    {
      if ( resd < Time::get_ms_per_tic() )
      {
        LOG( M_ERROR,
          "SimulationManager::set_status",
          "Resolution must be greater than or equal to one tic. Value unchanged." );
        throw KernelException();
      }
      else
      {
        Time::set_resolution( resd );
        clock_.calibrate(); // adjust to new resolution
        // adjust delays in the connection system to new resolution
        kernel().connection_builder_manager.calibrate( time_converter );
        kernel().model_manager.calibrate( time_converter );
        LOG( M_INFO, "SimulationManager::set_status", "Temporal resolution changed." );
      }
    }
    else
    {
      LOG( M_ERROR,
        "SimulationManager::set_status",
        "change of tics_per_step requires simultaneous specification of resolution." );
      throw KernelException();
    }
  }

  // set the number of preliminary update cycles
  // e.g. for the implementation of gap junctions
  long nprelim;
  if ( updateValue< long >( d, "max_num_prelim_iterations", nprelim ) )
  {
    if ( nprelim < 0 )
      LOG( M_ERROR,
        "SimulationManager::set_status",
        "Number of preliminary update iterations must be zero or positive." );
    else
      max_num_prelim_iterations_ = nprelim;
  }

  double_t tol;
  if ( updateValue< double_t >( d, "prelim_tol", tol ) )
  {
    if ( tol < 0.0 )
      LOG( M_ERROR, "SimulationManager::set_status", "Tolerance must be zero or positive" );
    else
      prelim_tol_ = tol;
  }

  long interp_order;
  if ( updateValue< long >( d, "prelim_interpolation_order", interp_order ) )
  {
    if ( ( interp_order < 0 ) || ( interp_order == 2 ) || ( interp_order > 3 ) )
      LOG( M_ERROR, "SimulationManager::set_status", "Interpolation order must be 0, 1, or 3." );
    else
      prelim_interpolation_order_ = interp_order;
  }
}
void
nest::SimulationManager::run( Time const& t )
{
  assert_valid_simtime( t );

  if ( not prepared_ )
  {
    std::string msg = "Run called without calling Prepare.";
    LOG( M_ERROR, "SimulationManager::run", msg );
    throw KernelException();
  }

  to_do_ += t.get_steps();
  to_do_total_ = to_do_;

  if ( to_do_ == 0 )
  {
    return;
  }

  // Reset profiling timers and counters within event_delivery_manager
  kernel().event_delivery_manager.reset_timers_counters();

  // Check whether waveform relaxation is used on any MPI process
  kernel().node_manager.check_wfr_use();

  // from_step_ is not touched here.  If we are at the beginning
  // of a simulation, it has been reset properly elsewhere.  If
  // a simulation was ended and is now continued, from_step_ will
  // have the proper value.  to_step_ is set as in advance_time().

  delay end_sim = from_step_ + to_do_;
  if ( kernel().connection_manager.get_min_delay() < end_sim )
  {
    to_step_ =
      kernel()
        .connection_manager.get_min_delay(); // update to end of time slice
  }
  else
  {
    to_step_ = end_sim; // update to end of simulation time
  }

  // Warn about possible inconsistencies, see #504.
  // This test cannot come any earlier, because we first need to compute
  // min_delay_
  // above.
  if ( t.get_steps() % kernel().connection_manager.get_min_delay() != 0 )
  {
    LOG( M_WARNING,
      "SimulationManager::run",
      "The requested simulation time is not an integer multiple of the minimal "
      "delay in the network. This may result in inconsistent results under the "
      "following conditions: (i) A network contains more than one source of "
      "randomness, e.g., two different poisson_generators, and (ii) Simulate "
      "is called repeatedly with simulation times that are not multiples of "
      "the minimal delay." );
  }

  call_update_();

  kernel().node_manager.post_run_cleanup();
}
void
nest::SimulationManager::prepare()
{
  assert( kernel().is_initialized() );

  if ( prepared_ )
  {
    std::string msg = "Prepare called twice.";
    LOG( M_ERROR, "SimulationManager::prepare", msg );
    throw KernelException();
  }

  if ( inconsistent_state_ )
  {
    throw KernelException(
      "Kernel is in inconsistent state after an "
      "earlier error. Please run ResetKernel first." );
  }

  t_real_ = 0;
  t_slice_begin_ = timeval(); // set to timeval{0, 0} as unset flag
  t_slice_end_ = timeval();   // set to timeval{0, 0} as unset flag

  // find shortest and longest delay across all MPI processes
  // this call sets the member variables
  kernel().connection_manager.update_delay_extrema_();
  kernel().event_delivery_manager.init_moduli();

  // Check for synchronicity of global rngs over processes.
  // We need to do this ahead of any simulation in case random numbers
  // have been consumed on the SLI level.
  if ( kernel().mpi_manager.get_num_processes() > 1 )
  {
    if ( not kernel().mpi_manager.grng_synchrony(
           kernel().rng_manager.get_grng()->ulrand( 100000 ) ) )
    {
      LOG( M_ERROR,
        "SimulationManager::prepare",
        "Global Random Number Generators are not synchronized prior to "
        "simulation." );
      throw KernelException();
    }
  }

  // if at the beginning of a simulation, set up spike buffers
  if ( not simulated_ )
  {
    kernel().event_delivery_manager.configure_spike_buffers();
  }

  kernel().node_manager.ensure_valid_thread_local_ids();
  kernel().node_manager.prepare_nodes();

  kernel().model_manager.create_secondary_events_prototypes();

  // we have to do enter_runtime after prepare_nodes, since we use
  // calibrate to map the ports of MUSIC devices, which has to be done
  // before enter_runtime
  if ( not simulated_ ) // only enter the runtime mode once
  {
    double tick = Time::get_resolution().get_ms()
      * kernel().connection_manager.get_min_delay();
    kernel().music_manager.enter_runtime( tick );
  }
  prepared_ = true;
}