コード例 #1
0
 Model & Node::get_model_() const
 {
   if(net_==0 || model_id_<0)
     throw UnknownModelID(model_id_);
   
   return *net_->get_model(model_id_);
 }      
コード例 #2
0
ファイル: node.cpp プロジェクト: gewaltig/nest-simulator
Model&
Node::get_model_() const
{
  if ( model_id_ < 0 )
    throw UnknownModelID( model_id_ );

  return *kernel().model_manager.get_model( model_id_ );
}
コード例 #3
0
index NodeManager::add_node( index mod, long n ) // no_p
{
  assert( current_ != 0 );
  assert( root_ != 0 );

  if ( mod >= kernel().model_manager.get_num_node_models() )
  {
    throw UnknownModelID( mod );
  }

  if ( n < 1 )
  {
    throw BadProperty();
  }

  const thread n_threads = kernel().vp_manager.get_num_threads();
  assert( n_threads > 0 );

  const index min_gid = local_nodes_.get_max_gid() + 1;
  const index max_gid = min_gid + n;

  Model* model = kernel().model_manager.get_model( mod );
  assert( model != 0 );

  model->deprecation_warning( "Create" );

  /* current_ points to the instance of the current subnet on thread 0.
     The following code makes subnet a pointer to the wrapper container
     containing the instances of the current subnet on all threads.
   */
  const index subnet_gid = current_->get_gid();
  Node* subnet_node = local_nodes_.get_node_by_gid( subnet_gid );
  assert( subnet_node != 0 );

  SiblingContainer* subnet_container =
    dynamic_cast< SiblingContainer* >( subnet_node );
  assert( subnet_container != 0 );
  assert( subnet_container->num_thread_siblings()
    == static_cast< size_t >( n_threads ) );
  assert( subnet_container->get_thread_sibling( 0 ) == current_ );

  if ( max_gid > local_nodes_.max_size() || max_gid < min_gid )
  {
    LOG( M_ERROR,
      "NodeManager::add:node",
      "Requested number of nodes will overflow the memory." );
    LOG( M_ERROR, "NodeManager::add:node", "No nodes were created." );
    throw KernelException( "OutOfMemory" );
  }
  kernel().modelrange_manager.add_range( mod, min_gid, max_gid - 1 );

  if ( model->potential_global_receiver()
    and kernel().mpi_manager.get_num_rec_processes() > 0 )
  {
    // In this branch we create nodes for global receivers
    const int n_per_process = n / kernel().mpi_manager.get_num_rec_processes();
    const int n_per_thread = n_per_process / n_threads + 1;

    // We only need to reserve memory on the ranks on which we
    // actually create nodes. In this if-branch ---> Only on recording
    // processes
    if ( kernel().mpi_manager.get_rank()
      >= kernel().mpi_manager.get_num_sim_processes() )
    {
      local_nodes_.reserve( std::ceil( static_cast< double >( max_gid )
        / kernel().mpi_manager.get_num_sim_processes() ) );
      for ( thread t = 0; t < n_threads; ++t )
      {
        // Model::reserve() reserves memory for n ADDITIONAL nodes on thread t
        model->reserve_additional( t, n_per_thread );
      }
    }

    for ( size_t gid = min_gid; gid < max_gid; ++gid )
    {
      const thread vp = kernel().vp_manager.suggest_rec_vp( get_n_gsd() );
      const thread t = kernel().vp_manager.vp_to_thread( vp );

      if ( kernel().vp_manager.is_local_vp( vp ) )
      {
        Node* newnode = model->allocate( t );
        newnode->set_gid_( gid );
        newnode->set_model_id( mod );
        newnode->set_thread( t );
        newnode->set_vp( vp );
        newnode->set_has_proxies( true );
        newnode->set_local_receiver( false );

        local_nodes_.add_local_node( *newnode ); // put into local nodes list

        current_->add_node( newnode ); // and into current subnet, thread 0.
      }
      else
      {
        local_nodes_.add_remote_node( gid ); // ensures max_gid is correct
        current_->add_remote_node( gid, mod );
      }
      increment_n_gsd();
    }
  }

  else if ( model->has_proxies() )
  {
    // In this branch we create nodes for all GIDs which are on a local thread
    const int n_per_process = n / kernel().mpi_manager.get_num_sim_processes();
    const int n_per_thread = n_per_process / n_threads + 1;

    // We only need to reserve memory on the ranks on which we
    // actually create nodes. In this if-branch ---> Only on
    // simulation processes
    if ( kernel().mpi_manager.get_rank()
      < kernel().mpi_manager.get_num_sim_processes() )
    {
      // TODO: This will work reasonably for round-robin. The extra 50 entries
      //       are for subnets and devices.
      local_nodes_.reserve(
        std::ceil( static_cast< double >( max_gid )
          / kernel().mpi_manager.get_num_sim_processes() ) + 50 );
      for ( thread t = 0; t < n_threads; ++t )
      {
        // Model::reserve() reserves memory for n ADDITIONAL nodes on thread t
        // reserves at least one entry on each thread, nobody knows why
        model->reserve_additional( t, n_per_thread );
      }
    }

    size_t gid;
    if ( kernel().vp_manager.is_local_vp(
           kernel().vp_manager.suggest_vp( min_gid ) ) )
    {
      gid = min_gid;
    }
    else
    {
      gid = next_local_gid_( min_gid );
    }
    size_t next_lid = current_->global_size() + gid - min_gid;
    // The next loop will not visit every node, if more than one rank is
    // present.
    // Since we already know what range of gids will be created, we can tell the
    // current subnet the range and subsequent calls to
    // `current_->add_remote_node()`
    // become irrelevant.
    current_->add_gid_range( min_gid, max_gid - 1 );

    // min_gid is first valid gid i should create, hence ask for the first local
    // gid after min_gid-1
    while ( gid < max_gid )
    {
      const thread vp = kernel().vp_manager.suggest_vp( gid );
      const thread t = kernel().vp_manager.vp_to_thread( vp );

      if ( kernel().vp_manager.is_local_vp( vp ) )
      {
        Node* newnode = model->allocate( t );
        newnode->set_gid_( gid );
        newnode->set_model_id( mod );
        newnode->set_thread( t );
        newnode->set_vp( vp );

        local_nodes_.add_local_node( *newnode ); // put into local nodes list
        current_->add_node( newnode ); // and into current subnet, thread 0.

        // lid setting is wrong, if a range is set, as the subnet already
        // assumes,
        // the nodes are available.
        newnode->set_lid_( next_lid );
        const size_t next_gid = next_local_gid_( gid );
        next_lid += next_gid - gid;
        gid = next_gid;
      }
      else
      {
        ++gid; // brutal fix, next_lid has been set in if-branch
      }
    }
    // if last gid is not on this process, we need to add it as a remote node
    if ( not kernel().vp_manager.is_local_vp(
           kernel().vp_manager.suggest_vp( max_gid - 1 ) ) )
    {
      local_nodes_.add_remote_node( max_gid - 1 ); // ensures max_gid is correct
      current_->add_remote_node( max_gid - 1, mod );
    }
  }
  else if ( not model->one_node_per_process() )
  {
    // We allocate space for n containers which will hold the threads
    // sorted. We use SiblingContainers to store the instances for
    // each thread to exploit the very efficient memory allocation for
    // nodes.
    //
    // These containers are registered in the global nodes_ array to
    // provide access to the instances both for manipulation by SLI
    // functions and so that NodeManager::calibrate() can discover the
    // instances and register them for updating.
    //
    // The instances are also registered with the instance of the
    // current subnet for the thread to which the created instance
    // belongs. This is mainly important so that the subnet structure
    // is preserved on all VPs.  Node enumeration is done on by the
    // registration with the per-thread instances.
    //
    // The wrapper container can be addressed under the GID assigned
    // to no-proxy node created. If this no-proxy node is NOT a
    // container (e.g. a device), then each instance can be retrieved
    // by giving the respective thread-id to get_node(). Instances of
    // SiblingContainers cannot be addressed individually.
    //
    // The allocation of the wrapper containers is spread over threads
    // to balance memory load.
    size_t container_per_thread = n / n_threads + 1;

    // since we create the n nodes on each thread, we reserve the full load.
    for ( thread t = 0; t < n_threads; ++t )
    {
      model->reserve_additional( t, n );
      siblingcontainer_model_->reserve_additional( t, container_per_thread );
      static_cast< Subnet* >( subnet_container->get_thread_sibling( t ) )
        ->reserve( n );
    }

    // The following loop creates n nodes. For each node, a wrapper is created
    // and filled with one instance per thread, in total n * n_thread nodes in
    // n wrappers.
    local_nodes_.reserve(
      std::ceil( static_cast< double >( max_gid )
        / kernel().mpi_manager.get_num_sim_processes() ) + 50 );
    for ( index gid = min_gid; gid < max_gid; ++gid )
    {
      thread thread_id = kernel().vp_manager.vp_to_thread(
        kernel().vp_manager.suggest_vp( gid ) );

      // Create wrapper and register with nodes_ array.
      SiblingContainer* container = static_cast< SiblingContainer* >(
        siblingcontainer_model_->allocate( thread_id ) );
      container->set_model_id(
        -1 ); // mark as pseudo-container wrapping replicas, see reset_network()
      container->reserve( n_threads ); // space for one instance per thread
      container->set_gid_( gid );
      local_nodes_.add_local_node( *container );

      // Generate one instance of desired model per thread
      for ( thread t = 0; t < n_threads; ++t )
      {
        Node* newnode = model->allocate( t );
        newnode->set_gid_( gid ); // all instances get the same global id.
        newnode->set_model_id( mod );
        newnode->set_thread( t );
        newnode->set_vp( kernel().vp_manager.thread_to_vp( t ) );

        // Register instance with wrapper
        // container has one entry for each thread
        container->push_back( newnode );

        // Register instance with per-thread instance of enclosing subnet.
        static_cast< Subnet* >( subnet_container->get_thread_sibling( t ) )
          ->add_node( newnode );
      }
    }
  }
  else
  {
    // no proxies and one node per process
    // this is used by MUSIC proxies
    // Per r9700, this case is only relevant for music_*_proxy models,
    // which have a single instance per MPI process.
    for ( index gid = min_gid; gid < max_gid; ++gid )
    {
      Node* newnode = model->allocate( 0 );
      newnode->set_gid_( gid );
      newnode->set_model_id( mod );
      newnode->set_thread( 0 );
      newnode->set_vp( kernel().vp_manager.thread_to_vp( 0 ) );

      // Register instance
      local_nodes_.add_local_node( *newnode );

      // and into current subnet, thread 0.
      current_->add_node( newnode );
    }
  }

  // set off-grid spike communication if necessary
  if ( model->is_off_grid() )
  {
    kernel().event_delivery_manager.set_off_grid_communication( true );
    LOG( M_INFO,
      "NodeManager::add_node",
      "Neuron models emitting precisely timed spikes exist: "
      "the kernel property off_grid_spiking has been set to true.\n\n"
      "NOTE: Mixing precise-spiking and normal neuron models may "
      "lead to inconsistent results." );
  }

  return max_gid - 1;
}