Пример #1
0
  void cg_connect(ConnectionGeneratorDatum& cg, RangeSet& sources, std::vector<long>& source_gids, RangeSet& targets, std::vector<long>& target_gids, DictionaryDatum params_map, index syn)
  {
    cg_set_masks(cg, sources, targets);
    cg->start();

    int source, target, num_parameters = cg->arity();
    if (num_parameters == 0)
    {
      while (cg->next(source, target, NULL))
        ConnectionGeneratorModule::get_network().connect(source_gids.at(source), target_gids.at(target), syn);
    }
    else if (num_parameters == 2)
    {
      if (!params_map->known(names::weight) || !params_map->known(names::delay))
        throw BadProperty("The parameter map has to contain the indices of weight and delay.");  

      long w_idx = (*params_map)[names::weight];
      long d_idx = (*params_map)[names::delay];
      std::vector<double> params(2);

      while (cg->next(source, target, &params[0]))
        ConnectionGeneratorModule::get_network().connect(source_gids.at(source), target_gids.at(target), params[w_idx], params[d_idx], syn);
    }
    else
    {
      ConnectionGeneratorModule::get_network().message(SLIInterpreter::M_ERROR, "Connect", "Either two or no parameters in the Connection Set expected.");
      throw DimensionMismatch();  
    }
  }
/**
 * This function is not thread save and has to be called inside a omp critical
 * region.
 */
int
nest::sli_neuron::execute_sli_protected( DictionaryDatum state, Name cmd )
{
  SLIInterpreter& i = get_engine();

  i.DStack->push( state ); // push state dictionary as top namespace
  size_t exitlevel = i.EStack.load();
  i.EStack.push( new NameDatum( cmd ) );
  int result = i.execute_( exitlevel );
  i.DStack->pop(); // pop neuron's namespace

  if ( state->known( "error" ) )
  {
    assert( state->known( names::global_id ) );
    index g_id = ( *state )[ names::global_id ];
    std::string model = getValue< std::string >( ( *state )[ names::model ] );
    std::string msg =
      String::compose( "Error in %1 with global id %2.", model, g_id );

    LOG( M_ERROR, cmd.toString().c_str(), msg.c_str() );
    LOG( M_ERROR, "execute_sli_protected", "Terminating." );

    kernel().simulation_manager.terminate();
  }

  return result;
}
Пример #3
0
void
cg_connect( ConnectionGeneratorDatum& cg,
  RangeSet& sources,
  std::vector< long >& source_gids,
  RangeSet& targets,
  std::vector< long >& target_gids,
  DictionaryDatum params_map,
  index syn )
{
  cg_set_masks( cg, sources, targets );
  cg->start();

  int source, target, num_parameters = cg->arity();
  if ( num_parameters == 0 )
  {
    // connect source to target
    while ( cg->next( source, target, NULL ) )
    {
      if ( kernel().node_manager.is_local_gid( target_gids.at( target ) ) )
      {
        Node* const target_node = kernel().node_manager.get_node( target_gids.at( target ) );
        const thread target_thread = target_node->get_thread();
        kernel().connection_builder_manager.connect(
          source_gids.at( source ), target_node, target_thread, syn );
      }
    }
  }
  else if ( num_parameters == 2 )
  {
    if ( !params_map->known( names::weight ) || !params_map->known( names::delay ) )
      throw BadProperty( "The parameter map has to contain the indices of weight and delay." );

    long w_idx = ( *params_map )[ names::weight ];
    long d_idx = ( *params_map )[ names::delay ];
    std::vector< double > params( 2 );

    // connect source to target with weight and delay
    while ( cg->next( source, target, &params[ 0 ] ) )
    {
      if ( kernel().node_manager.is_local_gid( target_gids.at( target ) ) )
      {
        Node* const target_node = kernel().node_manager.get_node( target_gids.at( target ) );
        const thread target_thread = target_node->get_thread();
        kernel().connection_builder_manager.connect( source_gids.at( source ),
          target_node,
          target_thread,
          syn,
          params[ d_idx ],
          params[ w_idx ] );
      }
    }
  }
  else
  {
    LOG( M_ERROR, "Connect", "Either two or no parameters in the Connection Set expected." );
    throw DimensionMismatch();
  }
}
Пример #4
0
ArrayDatum
ConnectionManager::get_connections( DictionaryDatum params ) const
{
  ArrayDatum connectome;

  const Token& source_t = params->lookup( names::source );
  const Token& target_t = params->lookup( names::target );
  const Token& syn_model_t = params->lookup( names::synapse_model );
  const TokenArray* source_a = 0;
  const TokenArray* target_a = 0;

  if ( not source_t.empty() )
    source_a = dynamic_cast< TokenArray const* >( source_t.datum() );
  if ( not target_t.empty() )
    target_a = dynamic_cast< TokenArray const* >( target_t.datum() );

  size_t syn_id = 0;

#ifdef _OPENMP
  std::string msg;
  msg = String::compose( "Setting OpenMP num_threads to %1.", net_.get_num_threads() );
  net_.message( SLIInterpreter::M_DEBUG, "ConnectionManager::get_connections", msg );
  omp_set_num_threads( net_.get_num_threads() );
#endif

  // First we check, whether a synapse model is given.
  // If not, we will iterate all.
  if ( not syn_model_t.empty() )
  {
    Name synmodel_name = getValue< Name >( syn_model_t );
    const Token synmodel = synapsedict_->lookup( synmodel_name );
    if ( !synmodel.empty() )
      syn_id = static_cast< size_t >( synmodel );
    else
      throw UnknownModelName( synmodel_name.toString() );
    get_connections( connectome, source_a, target_a, syn_id );
  }
  else
  {
    for ( syn_id = 0; syn_id < prototypes_[ 0 ].size(); ++syn_id )
    {
      ArrayDatum conn;
      get_connections( conn, source_a, target_a, syn_id );
      if ( conn.size() > 0 )
        connectome.push_back( new ArrayDatum( conn ) );
    }
  }

  return connectome;
}
   /**
   * Set properties of this connection from position p in the properties
   * array given in dictionary.
   */
  void STDPRSNNSpikePairingConnectionHom::set_status(const DictionaryDatum & d, index p, ConnectorModel &cm)
  {
    ConnectionHetWD::set_status(d, p, cm);

     if (d->known("tau_pluss") ||
         d->known("lambds") ||
         d->known("alphas") ||
         d->known("mu_pluss") ||
         d->known("mu_minuss") ||
         d->known("Wmaxs") )
     {
       cm.network().message(SLIInterpreter::M_ERROR, "STDPRSNNSpikePairingConnectionHom::set_status()", "you are trying to set common properties via an individual synapse.");
     }
  }
Пример #6
0
void
nest::weight_recorder::Parameters_::set( const DictionaryDatum& d )
{
  if ( d->known( names::senders ) )
  {
    senders_ = getValue< std::vector< long > >( d->lookup( names::senders ) );
    std::sort( senders_.begin(), senders_.end() );
  }

  if ( d->known( names::targets ) )
  {
    targets_ = getValue< std::vector< long > >( d->lookup( names::targets ) );
    std::sort( targets_.begin(), targets_.end() );
  }
}
Пример #7
0
ArrayDatum
get_children( const index node_id, const DictionaryDatum& params, const bool include_remotes )
{
  Subnet* subnet = dynamic_cast< Subnet* >( kernel().node_manager.get_node( node_id ) );
  if ( subnet == NULL )
  {
    throw SubnetExpected();
  }

  LocalChildList localnodes( *subnet );
  ArrayDatum result;

  std::vector< MPIManager::NodeAddressingData > globalnodes;
  if ( params->empty() )
  {
    kernel().mpi_manager.communicate( localnodes, globalnodes, include_remotes );
  }
  else
  {
    kernel().mpi_manager.communicate( localnodes, globalnodes, params, include_remotes );
  }
  result.reserve( globalnodes.size() );
  for ( std::vector< MPIManager::NodeAddressingData >::iterator n = globalnodes.begin();
        n != globalnodes.end();
        ++n )
  {
    result.push_back( new IntegerDatum( n->get_gid() ) );
  }

  return result;
}
Пример #8
0
void
register_rng( const std::string& name, DictionaryDatum& dict )
{
  Token rngfactory =
    new librandom::RngFactoryDatum( new librandom::BuiltinRNGFactory< NumberGenerator > );
  dict->insert_move( Name( name ), rngfactory );
}
void
nest::sinusoidal_poisson_generator::Parameters_::set( const DictionaryDatum& d,
  const sinusoidal_poisson_generator& n )
{
  if ( not n.is_model_prototype()
    && d->known( names::individual_spike_trains ) )
  {
    throw BadProperty(
      "The individual_spike_trains property can only be set as"
      " a model default using SetDefaults or upon CopyModel." );
  }

  updateValue< bool >(
    d, names::individual_spike_trains, individual_spike_trains_ );

  if ( updateValue< double >( d, names::rate, rate_ ) )
  {
    rate_ /= 1000.0; // scale to ms^-1
  }

  if ( updateValue< double >( d, names::frequency, om_ ) )
  {
    om_ *= 2.0 * numerics::pi / 1000.0;
  }

  if ( updateValue< double >( d, names::phase, phi_ ) )
  {
    phi_ *= numerics::pi / 180.0;
  }

  if ( updateValue< double >( d, names::amplitude, amplitude_ ) )
  {
    amplitude_ /= 1000.0;
  }
}
Пример #10
0
void
nest::spin_detector::set_status( const DictionaryDatum& d )
{
  if ( d->known( names::precise_times ) )
    user_set_precise_times_ = true;

  device_.set_status( d );
}
Пример #11
0
  void Node::set_status_base(const DictionaryDatum &dict)
  {
    assert(dict.valid());

    // We call the child's set_status first, so that the Node remains
    // unchanged if the child should throw an exception.
    set_status(dict);

    if(dict->known(names::frozen))
    {
      bool frozen_val=(*dict)[names::frozen];

      if( frozen_val == true )
	set(frozen);
      else
	unset(frozen);
    }
  }
Пример #12
0
DictionaryDatum
Node::get_status_base()
{
  DictionaryDatum dict = get_status_dict_();

  assert( dict.valid() );

  // add information available for all nodes
  ( *dict )[ names::local ] = is_local();
  ( *dict )[ names::model ] = LiteralDatum( get_name() );

  // add information available only for local nodes
  if ( is_local() )
  {
    ( *dict )[ names::global_id ] = get_gid();
    ( *dict )[ names::frozen ] = is_frozen();
    ( *dict )[ names::node_uses_wfr ] = node_uses_wfr();
    ( *dict )[ names::thread ] = get_thread();
    ( *dict )[ names::vp ] = get_vp();
    if ( parent_ )
    {
      ( *dict )[ names::parent ] = parent_->get_gid();

      // LIDs are only sensible for nodes with parents.
      // Add 1 as we count lids internally from 0, but from
      // 1 in the user interface.
      ( *dict )[ names::local_id ] = get_lid() + 1;
    }
  }

  ( *dict )[ names::thread_local_id ] = get_thread_lid();
  ( *dict )[ names::supports_precise_spikes ] = is_off_grid();

  // This is overwritten with a corresponding value in the
  // base classes for stimulating and recording devices, and
  // in other special node classes
  ( *dict )[ names::element_type ] = LiteralDatum( names::neuron );

  // now call the child class' hook
  get_status( dict );

  assert( dict.valid() );
  return dict;
}
Пример #13
0
ArrayDatum
get_connections( const DictionaryDatum& dict )
{
  dict->clear_access_flags();

  ArrayDatum array = kernel().connection_builder_manager.get_connections( dict );

  ALL_ENTRIES_ACCESSED( *dict, "GetConnections", "Unread dictionary entries: " );

  return array;
}
Пример #14
0
void
nest::iaf_cond_alpha_mc::State_::set( const DictionaryDatum& d, const Parameters_& )
{
  // extract from sub-dictionaries
  for ( size_t n = 0; n < NCOMP; ++n )
    if ( d->known( comp_names_[ n ] ) )
    {
      DictionaryDatum dd = getValue< DictionaryDatum >( d, comp_names_[ n ] );
      updateValue< double >( dd, names::V_m, y_[ idx( n, V_M ) ] );
    }
}
Пример #15
0
void
nest::weight_recorder::set_status( const DictionaryDatum& d )
{
  if ( d->known( names::precise_times ) )
  {
    user_set_precise_times_ = true;
  }

  device_.set_status( d );

  P_.set( d );
}
Пример #16
0
void
ModelManager::set_node_defaults_( index model_id,
  const DictionaryDatum& params )
{
  params->clear_access_flags();

  get_model( model_id )->set_status( params );

  ALL_ENTRIES_ACCESSED( *params,
    "ModelManager::set_node_defaults_",
    "Unread dictionary entries: " );
}
Пример #17
0
void
nest::iaf_cond_alpha_mc::Parameters_::set( const DictionaryDatum& d )
{
  // allow setting the membrane potential
  updateValue< double >( d, names::V_th, V_th );
  updateValue< double >( d, names::V_reset, V_reset );
  updateValue< double >( d, names::t_ref, t_ref );

  updateValue< double >( d, Name( names::g_sp ), g_conn[ SOMA ] );
  updateValue< double >( d, Name( names::g_pd ), g_conn[ PROX ] );

  // extract from sub-dictionaries
  for ( size_t n = 0; n < NCOMP; ++n )
  {
    if ( d->known( comp_names_[ n ] ) )
    {
      DictionaryDatum dd = getValue< DictionaryDatum >( d, comp_names_[ n ] );

      updateValue< double >( dd, names::E_L, E_L[ n ] );
      updateValue< double >( dd, names::E_ex, E_ex[ n ] );
      updateValue< double >( dd, names::E_in, E_in[ n ] );
      updateValue< double >( dd, names::C_m, C_m[ n ] );
      updateValue< double >( dd, names::g_L, g_L[ n ] );
      updateValue< double >( dd, names::tau_syn_ex, tau_synE[ n ] );
      updateValue< double >( dd, names::tau_syn_in, tau_synI[ n ] );
      updateValue< double >( dd, names::I_e, I_e[ n ] );
    }
  }
  if ( V_reset >= V_th )
  {
    throw BadProperty( "Reset potential must be smaller than threshold." );
  }
  if ( t_ref < 0 )
  {
    throw BadProperty( "Refractory time cannot be negative." );
  }

  // apply checks compartment-wise
  for ( size_t n = 0; n < NCOMP; ++n )
  {
    if ( C_m[ n ] <= 0 )
    {
      throw BadProperty( "Capacitance (" + comp_names_[ n ].toString()
        + ") must be strictly positive." );
    }
    if ( tau_synE[ n ] <= 0 || tau_synI[ n ] <= 0 )
    {
      throw BadProperty( "All time constants (" + comp_names_[ n ].toString()
        + ") must be strictly positive." );
    }
  }
}
Пример #18
0
void
nest::iaf_cond_alpha_mc::State_::get( DictionaryDatum& d ) const
{
  // we assume here that State_::get() always is called after
  // Parameters_::get(), so that the per-compartment dictionaries exist
  for ( size_t n = 0; n < NCOMP; ++n )
  {
    assert( d->known( comp_names_[ n ] ) );
    DictionaryDatum dd = getValue< DictionaryDatum >( d, comp_names_[ n ] );

    def< double >( dd, names::V_m, y_[ idx( n, V_M ) ] ); // Membrane potential
  }
}
Пример #19
0
void
Node::set_status_base( const DictionaryDatum& dict )
{
  assert( dict.valid() );
  try
  {
    set_status( dict );
  }
  catch ( BadProperty& e )
  {
    throw BadProperty( String::compose(
      "Setting status of a '%1' with GID %2: %3", get_name(), get_gid(), e.message() ) );
  }

  updateValue< bool >( dict, names::frozen, frozen_ );
}
Пример #20
0
nest::RandomParameter::RandomParameter( const DictionaryDatum& rdv_spec, const size_t )
  : rdv_( 0 )
{
  if ( !rdv_spec->known( names::distribution ) )
    throw BadProperty( "Random distribution spec must contain distribution name." );

  const std::string rdv_name = ( *rdv_spec )[ names::distribution ];
  if ( !RandomNumbers::get_rdvdict().known( rdv_name ) )
    throw BadProperty( "Unknown random deviate: " + rdv_name );

  librandom::RdvFactoryDatum factory =
    getValue< librandom::RdvFactoryDatum >( RandomNumbers::get_rdvdict()[ rdv_name ] );

  rdv_ = factory->create();
  rdv_->set_status( rdv_spec );
}
void
nest::sinusoidal_gamma_generator::Parameters_::set( const DictionaryDatum& d,
  const sinusoidal_gamma_generator& n )
{
  if ( not n.is_model_prototype()
    && d->known( names::individual_spike_trains ) )
    throw BadProperty(
      "The individual_spike_trains property can only be set as"
      " a model default using SetDefaults or upon CopyModel." );

  if ( updateValue< bool >(
         d, names::individual_spike_trains, individual_spike_trains_ ) )
  {
    // this can happen only on model prototypes
    if ( individual_spike_trains_ )
      num_trains_ = 0; // will be counted up as connections are made
    else
      num_trains_ = 1; // fixed
  }

  if ( updateValue< double >( d, names::frequency, om_ ) )
    om_ *= 2.0 * numerics::pi / 1000.0;

  if ( updateValue< double >( d, names::phase, phi_ ) )
    phi_ *= numerics::pi / 180.0;

  if ( updateValue< double >( d, names::order, order_ ) )
  {
    if ( order_ < 1.0 )
      throw BadProperty( "The gamma order must be at least 1." );
  }

  /* The *_unscaled variables here are introduced to avoid spurious
     floating-point comparison issues under 32-bit Linux.
  */
  double dc_unscaled = 1e3 * rate_;
  if ( updateValue< double >( d, names::rate, dc_unscaled ) )
    rate_ = 1e-3 * dc_unscaled; // scale to 1/ms

  double ac_unscaled = 1e3 * amplitude_;
  if ( updateValue< double >( d, names::amplitude, ac_unscaled ) )
    amplitude_ = 1e-3 * ac_unscaled; // scale to 1/ms

  if ( not( 0.0 <= ac_unscaled and ac_unscaled <= dc_unscaled ) )
    throw BadProperty( "Rate parameters must fulfill 0 <= amplitude <= rate." );
}
Пример #22
0
ArrayDatum
get_nodes( const index node_id,
  const DictionaryDatum& params,
  const bool include_remotes,
  const bool return_gids_only )
{
  Subnet* subnet = dynamic_cast< Subnet* >( kernel().node_manager.get_node( node_id ) );
  if ( subnet == NULL )
    throw SubnetExpected();

  LocalNodeList localnodes( *subnet );
  std::vector< MPIManager::NodeAddressingData > globalnodes;
  if ( params->empty() )
  {
    kernel().mpi_manager.communicate( localnodes, globalnodes, include_remotes );
  }
  else
  {
    kernel().mpi_manager.communicate( localnodes, globalnodes, params, include_remotes );
  }

  ArrayDatum result;
  result.reserve( globalnodes.size() );
  for ( std::vector< MPIManager::NodeAddressingData >::iterator n = globalnodes.begin();
        n != globalnodes.end();
        ++n )
  {
    if ( return_gids_only )
    {
      result.push_back( new IntegerDatum( n->get_gid() ) );
    }
    else
    {
      DictionaryDatum* node_info = new DictionaryDatum( new Dictionary );
      ( **node_info )[ names::global_id ] = n->get_gid();
      ( **node_info )[ names::vp ] = n->get_vp();
      ( **node_info )[ names::parent ] = n->get_parent_gid();
      result.push_back( node_info );
    }
  }

  return result;
}
Пример #23
0
void
set_connection_status( const ConnectionDatum& conn, const DictionaryDatum& dict )
{
  DictionaryDatum conn_dict = conn.get_dict();
  long synapse_id = getValue< long >( conn_dict, nest::names::synapse_modelid );
  long port = getValue< long >( conn_dict, nest::names::port );
  long gid = getValue< long >( conn_dict, nest::names::source );
  thread tid = getValue< long >( conn_dict, nest::names::target_thread );
  kernel().node_manager.get_node( gid ); // Just to check if the node exists

  dict->clear_access_flags();

  kernel().connection_builder_manager.set_synapse_status( gid, synapse_id, port, tid, dict );

  ALL_ENTRIES_ACCESSED2( *dict,
    "SetStatus",
    "Unread dictionary entries: ",
    "Maybe you tried to set common synapse properties through an individual synapse?" );
}
Пример #24
0
void
NodeManager::set_status_single_node_( Node& target,
  const DictionaryDatum& d,
  bool clear_flags )
{
  // proxies have no properties
  if ( not target.is_proxy() )
  {
    if ( clear_flags )
    {
      d->clear_access_flags();
    }
    target.set_status_base( d );

    // TODO: Not sure this check should be at single neuron level; advantage is
    // it stops after first failure.
    ALL_ENTRIES_ACCESSED(
      *d, "NodeManager::set_status", "Unread dictionary entries: " );
  }
}
void
aeif_cond_alpha_multisynapse::State_::set( const DictionaryDatum& d )
{
  updateValue< double >( d, names::V_m, y_[ V_M ] );

  if ( ( d->known( names::g_ex ) ) && ( d->known( names::dg_ex ) ) && ( d->known( names::g_in ) )
    && ( d->known( names::dg_in ) ) )
  {
    const std::vector< double_t > g_exc =
      getValue< std::vector< double_t > >( d->lookup( names::g_ex ) );
    const std::vector< double_t > dg_exc =
      getValue< std::vector< double_t > >( d->lookup( names::dg_ex ) );
    const std::vector< double_t > g_inh =
      getValue< std::vector< double_t > >( d->lookup( names::g_in ) );
    const std::vector< double_t > dg_inh =
      getValue< std::vector< double_t > >( d->lookup( names::dg_in ) );

    if ( ( g_exc.size() != dg_exc.size() ) || ( g_exc.size() != g_inh.size() )
      || ( g_exc.size() != dg_inh.size() ) )
    {
      throw BadProperty( "Conductances must have the same sizes." );
    }

    for ( size_t i = 0; i < g_exc.size(); ++i )
    {
      if ( ( g_exc[ i ] < 0 ) || ( dg_exc[ i ] < 0 ) || ( g_inh[ i ] < 0 ) || ( dg_inh[ i ] < 0 ) )
      {
        throw BadProperty( "Conductances must not be negative." );
      }

      y_[ State_::G_EXC + ( State_::NUMBER_OF_STATES_ELEMENTS_PER_RECEPTOR * i ) ] = g_exc[ i ];
      y_[ State_::DG_EXC + ( State_::NUMBER_OF_STATES_ELEMENTS_PER_RECEPTOR * i ) ] = dg_exc[ i ];
      y_[ State_::G_INH + ( State_::NUMBER_OF_STATES_ELEMENTS_PER_RECEPTOR * i ) ] = g_inh[ i ];
      y_[ State_::DG_INH + ( State_::NUMBER_OF_STATES_ELEMENTS_PER_RECEPTOR * i ) ] = dg_inh[ i ];
    }
  }

  updateValue< double >( d, names::w, y_[ W ] );
}
Пример #26
0
bool nest::ac_poisson_generator::
Parameters_::extract_array_(const DictionaryDatum &d, 
			    const std::string& dname,
			    std::valarray<double>& data) const
{
  if ( d->known(dname) )
  {
    ArrayDatum *ad = dynamic_cast<ArrayDatum *>((*d)[dname].datum());
    if ( ad == 0 )
      throw BadProperty();

    const size_t nd = ad->size();
    data.resize(nd);
    for ( size_t n = 0 ; n < nd ; ++n )
      {
	data[n] = getValue<double>((*ad)[n]);
      }

    return true;
  }
  else
    return false;
}
Пример #27
0
void
NodeManager::set_status( const DictionaryDatum& d )
{
  std::string tmp;
  // proceed only if there are unaccessed items left
  if ( not d->all_accessed( tmp ) )
  {
    // Fetch the target pointer here. We cannot do it above, since
    // Network::set_status() may modify the root compound if the number
    // of threads changes. HEP, 2008-10-20
    Node* target = local_nodes_.get_node_by_gid( 0 );
    assert( target != 0 );

    for ( size_t t = 0; t < target->num_thread_siblings(); ++t )
    {
      // Root container for per-thread subnets. We must prevent clearing of
      // access flags before each compound's properties are set by passing false
      // as last arg we iterate over all threads
      assert( target->get_thread_sibling( t ) != 0 );
      set_status_single_node_( *( target->get_thread_sibling( t ) ), d, false );
    }
  }
}
Пример #28
0
nest::ConnBuilder::ConnBuilder( const GIDCollection& sources,
  const GIDCollection& targets,
  const DictionaryDatum& conn_spec,
  const DictionaryDatum& syn_spec )
  : sources_( &sources )
  , targets_( &targets )
  , autapses_( true )
  , multapses_( true )
  , symmetric_( false )
  , exceptions_raised_( kernel().vp_manager.get_num_threads() )
  , synapse_model_( kernel().model_manager.get_synapsedict()->lookup(
      "static_synapse" ) )
  , weight_( 0 )
  , delay_( 0 )
  , param_dicts_()
  , parameters_requiring_skipping_()
{
  // read out rule-related parameters -------------------------
  //  - /rule has been taken care of above
  //  - rule-specific params are handled by subclass c'tor
  updateValue< bool >( conn_spec, names::autapses, autapses_ );
  updateValue< bool >( conn_spec, names::multapses, multapses_ );
  updateValue< bool >( conn_spec, names::symmetric, symmetric_ );

  // read out synapse-related parameters ----------------------
  if ( !syn_spec->known( names::model ) )
    throw BadProperty( "Synapse spec must contain synapse model." );
  const std::string syn_name = ( *syn_spec )[ names::model ];
  if ( not kernel().model_manager.get_synapsedict()->known( syn_name ) )
    throw UnknownSynapseType( syn_name );

  // if another synapse than static_synapse is defined we need to make
  // sure that Connect can process all parameter specified
  if ( syn_name != "static_synapse" )
    check_synapse_params_( syn_name, syn_spec );

  synapse_model_ = kernel().model_manager.get_synapsedict()->lookup( syn_name );

  DictionaryDatum syn_defaults =
    kernel().model_manager.get_connector_defaults( synapse_model_ );

  // All synapse models have the possibility to set the delay (see
  // SynIdDelay), but some have homogeneous weights, hence it should
  // be possible to set the delay without the weight.
  default_weight_ = !syn_spec->known( names::weight );

  default_delay_ = !syn_spec->known( names::delay );

  // If neither weight nor delay are given in the dict, we handle this
  // separately. Important for hom_w synapses, on which weight cannot
  // be set. However, we use default weight and delay for _all_ types
  // of synapses.
  default_weight_and_delay_ = ( default_weight_ && default_delay_ );

#ifdef HAVE_MUSIC
  // We allow music_channel as alias for receptor_type during
  // connection setup
  ( *syn_defaults )[ names::music_channel ] = 0;
#endif

  if ( !default_weight_and_delay_ )
  {
    weight_ = syn_spec->known( names::weight )
      ? ConnParameter::create( ( *syn_spec )[ names::weight ],
          kernel().vp_manager.get_num_threads() )
      : ConnParameter::create( ( *syn_defaults )[ names::weight ],
          kernel().vp_manager.get_num_threads() );
    register_parameters_requiring_skipping_( *weight_ );
    delay_ = syn_spec->known( names::delay )
      ? ConnParameter::create(
          ( *syn_spec )[ names::delay ], kernel().vp_manager.get_num_threads() )
      : ConnParameter::create( ( *syn_defaults )[ names::delay ],
          kernel().vp_manager.get_num_threads() );
  }
  else if ( default_weight_ )
  {
    delay_ = syn_spec->known( names::delay )
      ? ConnParameter::create(
          ( *syn_spec )[ names::delay ], kernel().vp_manager.get_num_threads() )
      : ConnParameter::create( ( *syn_defaults )[ names::delay ],
          kernel().vp_manager.get_num_threads() );
  }
  register_parameters_requiring_skipping_( *delay_ );
  // Structural plasticity parameters
  // Check if both pre and post synaptic element are provided
  if ( syn_spec->known( names::pre_synaptic_element )
    && syn_spec->known( names::post_synaptic_element ) )
  {
    pre_synaptic_element_name =
      getValue< std::string >( syn_spec, names::pre_synaptic_element );
    post_synaptic_element_name =
      getValue< std::string >( syn_spec, names::post_synaptic_element );
  }
  else
  {
    if ( syn_spec->known( names::pre_synaptic_element )
      || syn_spec->known( names::post_synaptic_element ) )
    {
      throw BadProperty(
        "In order to use structural plasticity, both a pre and post synaptic "
        "element must be specified" );
    }
    pre_synaptic_element_name = "";
    post_synaptic_element_name = "";
  }

  // synapse-specific parameters
  // TODO: Can we create this set once and for all?
  //       Should not be done as static initialization, since
  //       that might conflict with static initialization of
  //       Name system.
  std::set< Name > skip_set;
  skip_set.insert( names::weight );
  skip_set.insert( names::delay );
  skip_set.insert( Name( "min_delay" ) );
  skip_set.insert( Name( "max_delay" ) );
  skip_set.insert( Name( "num_connections" ) );
  skip_set.insert( Name( "num_connectors" ) );
  skip_set.insert( Name( "property_object" ) );
  skip_set.insert( Name( "synapsemodel" ) );

  for ( Dictionary::const_iterator default_it = syn_defaults->begin();
        default_it != syn_defaults->end();
        ++default_it )
  {
    const Name param_name = default_it->first;
    if ( skip_set.find( param_name ) != skip_set.end() )
      continue; // weight, delay or not-settable parameter

    if ( syn_spec->known( param_name ) )
    {
      synapse_params_[ param_name ] = ConnParameter::create(
        ( *syn_spec )[ param_name ], kernel().vp_manager.get_num_threads() );
      register_parameters_requiring_skipping_( *synapse_params_[ param_name ] );
    }
  }

  // Now create dictionary with dummy values that we will use
  // to pass settings to the synapses created. We create it here
  // once to avoid re-creating the object over and over again.
  if ( synapse_params_.size() > 0 )
  {
    for ( index t = 0; t < kernel().vp_manager.get_num_threads(); ++t )
    {
      param_dicts_.push_back( new Dictionary() );

      for ( ConnParameterMap::const_iterator it = synapse_params_.begin();
            it != synapse_params_.end();
            ++it )
      {
        if ( it->first == names::receptor_type
          || it->first == names::music_channel
          || it->first == names::synapse_label )
          ( *param_dicts_[ t ] )[ it->first ] = Token( new IntegerDatum( 0 ) );
        else
          ( *param_dicts_[ t ] )[ it->first ] = Token( new DoubleDatum( 0.0 ) );
      }
    }
  }

  // If symmetric_ is requested call reset on all parameters in order
  // to check if all parameters support symmetric connections
  if ( symmetric_ )
  {
    if ( weight_ )
    {
      weight_->reset();
    }
    if ( delay_ )
    {
      delay_->reset();
    }
    for ( ConnParameterMap::const_iterator it = synapse_params_.begin();
          it != synapse_params_.end();
          ++it )
    {
      it->second->reset();
    }
  }
}
Пример #29
0
inline void
nest::ConnBuilder::check_synapse_params_( std::string syn_name,
  const DictionaryDatum& syn_spec )
{
  // throw error if weight is specified with static_synapse_hom_w
  if ( syn_name == "static_synapse_hom_w" )
  {
    if ( syn_spec->known( names::weight ) )
      throw BadProperty(
        "Weight cannot be specified since it needs to be equal "
        "for all connections when static_synapse_hom_w is used." );
    return;
  }


  // throw error if n or a are set in quantal_stp_synapse, Connect cannot handle
  // them since they are integer
  if ( syn_name == "quantal_stp_synapse" )
  {
    if ( syn_spec->known( names::n ) )
      throw NotImplemented(
        "Connect doesn't support the setting of parameter "
        "n in quantal_stp_synapse. Use SetDefaults() or CopyModel()." );
    if ( syn_spec->known( names::a ) )
      throw NotImplemented(
        "Connect doesn't support the setting of parameter "
        "a in quantal_stp_synapse. Use SetDefaults() or CopyModel()." );
    return;
  }

  // print warning if delay is specified outside cont_delay_synapse
  if ( syn_name == "cont_delay_synapse" )
  {
    if ( syn_spec->known( names::delay ) )
      LOG( M_WARNING,
        "Connect",
        "The delay will be rounded to the next multiple of the time step. "
        "To use a more precise time delay it needs to be defined within "
        "the synapse, e.g. with CopyModel()." );
    return;
  }

  // throw error if no volume transmitter is defined or parameters are specified
  // that need to be introduced via CopyModel or SetDefaults
  if ( syn_name == "stdp_dopamine_synapse" )
  {
    if ( syn_spec->known( "vt" ) )
      throw NotImplemented(
        "Connect doesn't support the direct specification of the "
        "volume transmitter of stdp_dopamine_synapse in syn_spec."
        "Use SetDefaults() or CopyModel()." );
    // setting of parameter c and n not thread save
    if ( kernel().vp_manager.get_num_threads() > 1 )
    {
      if ( syn_spec->known( names::c ) )
        throw NotImplemented(
          "For multi-threading Connect doesn't support the setting "
          "of parameter c in stdp_dopamine_synapse. "
          "Use SetDefaults() or CopyModel()." );
      if ( syn_spec->known( names::n ) )
        throw NotImplemented(
          "For multi-threading Connect doesn't support the setting "
          "of parameter n in stdp_dopamine_synapse. "
          "Use SetDefaults() or CopyModel()." );
    }
    std::string param_arr[] = {
      "A_minus", "A_plus", "Wmax", "Wmin", "b", "tau_c", "tau_n", "tau_plus"
    };
    std::vector< std::string > param_vec( param_arr, param_arr + 8 );
    for ( std::vector< std::string >::iterator it = param_vec.begin();
          it != param_vec.end();
          it++ )
    {
      if ( syn_spec->known( *it ) )
        throw NotImplemented(
          "Connect doesn't support the setting of parameter " + *it
          + " in stdp_dopamine_synapse. Use SetDefaults() or CopyModel()." );
    }
    return;
  }
}
Пример #30
0
MaskDatum
TopologyModule::create_mask( const Token& t )
{
  // t can be either an existing MaskDatum, or a Dictionary containing
  // mask parameters
  MaskDatum* maskd = dynamic_cast< MaskDatum* >( t.datum() );
  if ( maskd )
  {
    return *maskd;
  }
  else
  {

    DictionaryDatum* dd = dynamic_cast< DictionaryDatum* >( t.datum() );
    if ( dd == 0 )
    {
      throw BadProperty( "Mask must be masktype or dictionary." );
    }

    // The dictionary should contain one key which is the name of the
    // mask type, and optionally the key 'anchor'. To find the unknown
    // mask type key, we must loop through all keys. The value for the
    // anchor key will be stored in the anchor_token variable.
    Token anchor_token;
    bool has_anchor = false;
    AbstractMask* mask = 0;

    for ( Dictionary::iterator dit = ( *dd )->begin(); dit != ( *dd )->end();
          ++dit )
    {

      if ( dit->first == names::anchor )
      {

        anchor_token = dit->second;
        has_anchor = true;
      }
      else
      {

        if ( mask != 0 )
        { // mask has already been defined
          throw BadProperty(
            "Mask definition dictionary contains extraneous items." );
        }
        mask =
          create_mask( dit->first, getValue< DictionaryDatum >( dit->second ) );
      }
    }

    if ( has_anchor )
    {

      // The anchor may be an array of doubles (a spatial position), or a
      // dictionary containing the keys 'column' and 'row' (for grid
      // masks only)
      try
      {

        std::vector< double > anchor =
          getValue< std::vector< double > >( anchor_token );
        AbstractMask* amask;

        switch ( anchor.size() )
        {
        case 2:
          amask = new AnchoredMask< 2 >(
            dynamic_cast< Mask< 2 >& >( *mask ), anchor );
          break;
        case 3:
          amask = new AnchoredMask< 3 >(
            dynamic_cast< Mask< 3 >& >( *mask ), anchor );
          break;
        default:
          throw BadProperty( "Anchor must be 2- or 3-dimensional." );
        }

        delete mask;
        mask = amask;
      }
      catch ( TypeMismatch& e )
      {

        DictionaryDatum ad = getValue< DictionaryDatum >( anchor_token );

        int dim = 2;
        int column = getValue< long >( ad, names::column );
        int row = getValue< long >( ad, names::row );
        int layer;
        if ( ad->known( names::layer ) )
        {
          layer = getValue< long >( ad, names::layer );
          dim = 3;
        }
        switch ( dim )
        {
        case 2:
          try
          {
            GridMask< 2 >& grid_mask_2d =
              dynamic_cast< GridMask< 2 >& >( *mask );
            grid_mask_2d.set_anchor( Position< 2, int >( column, row ) );
          }
          catch ( std::bad_cast& e )
          {
            throw BadProperty( "Mask must be 2-dimensional grid mask." );
          }
          break;
        case 3:
          try
          {
            GridMask< 3 >& grid_mask_3d =
              dynamic_cast< GridMask< 3 >& >( *mask );
            grid_mask_3d.set_anchor( Position< 3, int >( column, row, layer ) );
          }
          catch ( std::bad_cast& e )
          {
            throw BadProperty( "Mask must be 3-dimensional grid mask." );
          }
          break;
        }
      }
    }

    return mask;
  }
}