Пример #1
0
static void update(NrnThread* _nt)
{
	int i, i1, i2;
	i1 = 0;
	i2 = _nt->end;
#if CACHEVEC
    if (use_cachevec) {
	/* do not need to worry about linmod or extracellular*/
	if (secondorder) {
		for (i=i1; i < i2; ++i) {
			VEC_V(i) += 2.*VEC_RHS(i);
		}
	}else{
		for (i=i1; i < i2; ++i) {
			VEC_V(i) += VEC_RHS(i);
		}
	}
    }else
#endif
    {	/* use original non-vectorized update */
 	if (secondorder) {
#if _CRAY
#pragma _CRI ivdep
#endif
		for (i=i1; i < i2; ++i) {
			NODEV(_nt->_v_node[i]) += 2.*NODERHS(_nt->_v_node[i]);
		}
	}else{
#if _CRAY
#pragma _CRI ivdep
#endif
		for (i=i1; i < i2; ++i) {
			NODEV(_nt->_v_node[i]) += NODERHS(_nt->_v_node[i]);
		}
		if (use_sparse13) {
			nrndae_update();
		}
	}
    } /* end of non-vectorized update */

#if EXTRACELLULAR
	nrn_update_2d(_nt);
#endif

#if I_MEMBRANE
	if (_nt->tml) {
		assert(_nt->tml->index == CAP);
		nrn_capacity_current(_nt, _nt->tml->ml);
	}
#endif

}
Пример #2
0
static void nrn_cur(_NrnThread* _nt, _Memb_list* _ml, int _type) {
double* _p; Datum* _ppvar; Datum* _thread;
Node *_nd; int* _ni; double _rhs, _v; int _iml, _cntml;
#if CACHEVEC
    _ni = _ml->_nodeindices;
#endif
_cntml = _ml->_nodecount;
_thread = _ml->_thread;
for (_iml = 0; _iml < _cntml; ++_iml) {
 _p = _ml->_data[_iml]; _ppvar = _ml->_pdata[_iml];
#if CACHEVEC
  if (use_cachevec) {
    _v = VEC_V(_ni[_iml]);
  }else
#endif
  {
    _nd = _ml->_nodelist[_iml];
    _v = NODEV(_nd);
  }
 _g = _nrn_current(_p, _ppvar, _thread, _nt, _v + .001);
 	{ _rhs = _nrn_current(_p, _ppvar, _thread, _nt, _v);
 	}
 _g = (_g - _rhs)/.001;
#if CACHEVEC
  if (use_cachevec) {
	VEC_RHS(_ni[_iml]) -= _rhs;
  }else
#endif
  {
	NODERHS(_nd) -= _rhs;
  }
 
}}
Пример #3
0
/* triangularization of the matrix equations */
static void triang(NrnThread* _nt) {
    double p;
    int i, i2, i3;
    i2 = _nt->ncell;
    i3 = _nt->end;

    double* vec_a = &(VEC_A(0));
    double* vec_b = &(VEC_B(0));
    double* vec_d = &(VEC_D(0));
    double* vec_rhs = &(VEC_RHS(0));
    int* parent_index = _nt->_v_parent_index;
#if defined(_OPENACC)
    int stream_id = _nt->stream_id;
#endif

/** @todo: just for benchmarking, otherwise produces wrong results */
// clang-format off
    #pragma acc parallel loop seq present(      \
        vec_a[0:i3], vec_b[0:i3], vec_d[0:i3],  \
        vec_rhs[0:i3], parent_index[0:i3])      \
        async(stream_id) if (_nt->compute_gpu)
    // clang-format on
    for (i = i3 - 1; i >= i2; --i) {
        p = vec_a[i] / vec_d[i];
        vec_d[parent_index[i]] -= p * vec_b[i];
        vec_rhs[parent_index[i]] -= p * vec_rhs[i];
    }
}
Пример #4
0
static void nrn_cur(_NrnThread* _nt, _Memb_list* _ml, int _type){
Node *_nd; int* _ni; double _rhs, _v; int _iml, _cntml;
#if CACHEVEC
    _ni = _ml->_nodeindices;
#endif
_cntml = _ml->_nodecount;
for (_iml = 0; _iml < _cntml; ++_iml) {
 _p = _ml->_data[_iml]; _ppvar = _ml->_pdata[_iml];
#if CACHEVEC
  if (use_cachevec) {
    _v = VEC_V(_ni[_iml]);
  }else
#endif
  {
    _nd = _ml->_nodelist[_iml];
    _v = NODEV(_nd);
  }
  cai = _ion_cai;
  cao = _ion_cao;
  cai = _ion_cai;
  ki = _ion_ki;
  ko = _ion_ko;
  nai = _ion_nai;
  nao = _ion_nao;
 _g = _nrn_current(_v + .001);
 	{ double _dina;
 double _dik;
 double _dica;
  _dica = ica;
  _dik = ik;
  _dina = ina;
 _rhs = _nrn_current(_v);
  _ion_dicadv += (_dica - ica)/.001 ;
  _ion_dikdv += (_dik - ik)/.001 ;
  _ion_dinadv += (_dina - ina)/.001 ;
 	}
 _g = (_g - _rhs)/.001;
  _ion_ica += ica ;
  _ion_cai = cai;
  _ion_ik += ik ;
  _ion_ina += ina ;
#if CACHEVEC
  if (use_cachevec) {
	VEC_RHS(_ni[_iml]) -= _rhs;
  }else
#endif
  {
	NODERHS(_nd) -= _rhs;
  }
 
}}
Пример #5
0
/* back substitution to finish solving the matrix equations */
static void bksub(NrnThread* _nt) {
    int i, i1, i2, i3;
    i1 = 0;
    i2 = i1 + _nt->ncell;
    i3 = _nt->end;

    double* vec_b = &(VEC_B(0));
    double* vec_d = &(VEC_D(0));
    double* vec_rhs = &(VEC_RHS(0));
    int* parent_index = _nt->_v_parent_index;
#if defined(_OPENACC)
    int stream_id = _nt->stream_id;
#endif

/** @todo: just for benchmarking, otherwise produces wrong results */
// clang-format off
    #pragma acc parallel loop seq present(      \
        vec_d[0:i2], vec_rhs[0:i2])             \
        async(stream_id) if (_nt->compute_gpu)
    // clang-format on
    for (i = i1; i < i2; ++i) {
        vec_rhs[i] /= vec_d[i];
    }

/** @todo: just for benchmarking, otherwise produces wrong results */
// clang-format off
    #pragma acc parallel loop seq present(          \
        vec_b[0:i3], vec_d[0:i3], vec_rhs[0:i3],    \
        parent_index[0:i3]) async(stream_id)        \
        if (_nt->compute_gpu)
    for (i = i2; i < i3; ++i) {
        vec_rhs[i] -= vec_b[i] * vec_rhs[parent_index[i]];
        vec_rhs[i] /= vec_d[i];
    }

    #pragma acc wait(stream_id)
    // clang-format on
}
Пример #6
0
static void nrn_cur(_NrnThread* _nt, _Memb_list* _ml, int _type){
Node *_nd; int* _ni; double _rhs, _v; int _iml, _cntml;
#if CACHEVEC
    _ni = _ml->_nodeindices;
#endif
_cntml = _ml->_nodecount;
for (_iml = 0; _iml < _cntml; ++_iml) {
 _p = _ml->_data[_iml]; _ppvar = _ml->_pdata[_iml];
#if CACHEVEC
  if (use_cachevec) {
    _v = VEC_V(_ni[_iml]);
  }else
#endif
  {
    _nd = _ml->_nodelist[_iml];
    _v = NODEV(_nd);
  }
  Cai = _ion_Cai;
  Cao = _ion_Cao;
 _g = _nrn_current(_v + .001);
 	{ double _diCa;
  _diCa = iCa;
 _rhs = _nrn_current(_v);
  _ion_diCadv += (_diCa - iCa)/.001 ;
 	}
 _g = (_g - _rhs)/.001;
  _ion_iCa += iCa ;
#if CACHEVEC
  if (use_cachevec) {
	VEC_RHS(_ni[_iml]) -= _rhs;
  }else
#endif
  {
	NODERHS(_nd) -= _rhs;
  }
 
}}