void nrn_finitialize(int setv, double v) { int i; NrnThread* _nt; t = 0.; dt2thread(-1.); nrn_thread_table_check(); clear_event_queue(); nrn_spike_exchange_init(); #if VECTORIZE nrn_play_init(); /* Vector.play */ ///Play events should be executed before initializing events for (i=0; i < nrn_nthread; ++i) { nrn_deliver_events(nrn_threads + i); /* The play events at t=0 */ } if (setv) { for (_nt = nrn_threads; _nt < nrn_threads + nrn_nthread; ++_nt) { for (i=0; i < _nt->end; ++i) { VEC_V(i) = v; } } } for (i=0; i < nrn_nthread; ++i) { nrn_ba(nrn_threads + i, BEFORE_INITIAL); } /* the INITIAL blocks are ordered so that mechanisms that write concentrations are after ions and before mechanisms that read concentrations. */ /* the memblist list in NrnThread is already so ordered */ for (i=0; i < nrn_nthread; ++i) { NrnThread* nt = nrn_threads + i; NrnThreadMembList* tml; for (tml = nt->tml; tml; tml = tml->next) { mod_f_t s = memb_func[tml->index].initialize; if (s) { (*s)(nt, tml->ml, tml->index); } } } #endif init_net_events(); for (i = 0; i < nrn_nthread; ++i) { nrn_ba(nrn_threads + i, AFTER_INITIAL); } for (i=0; i < nrn_nthread; ++i) { nrn_deliver_events(nrn_threads + i); /* The INITIAL sent events at t=0 */ } for (i=0; i < nrn_nthread; ++i) { setup_tree_matrix_minimal(nrn_threads + i); } for (i=0; i < nrn_nthread; ++i) { nrn_deliver_events(nrn_threads + i); /* The record events at t=0 */ } #if NRNMPI nrn_spike_exchange(nrn_threads); #endif }
void* nrn_fixed_step_lastpart(NrnThread* nth) { CTBEGIN #if NRN_DAQ nrn_daq_ao(); #endif #if ELIMINATE_T_ROUNDOFF nth->nrn_ndt_ += .5; nth->_t = nrn_tbase_ + nth->nrn_ndt_ * nrn_dt_; #else nth->_t += .5 * nth->_dt; #endif fixed_play_continuous(nth); #if NRN_DAQ nrn_daq_scanstart(); #endif nrn_extra_scatter_gather(0, nth->id); nonvint(nth); nrn_ba(nth, AFTER_SOLVE); #if NRN_DAQ nrn_daq_ai(); #endif fixed_record_continuous(nth); CTADD nrn_deliver_events(nth) ; /* up to but not past texit */ return (void*)0; }