void MiniTensorVector<T, N>:: applyUnary(Elementwise::UnaryFunction<T> const & f) { auto const dim = vector_.get_dimension(); for(minitensor::Index i{0}; i < dim; ++i) { vector_(i) = f.apply(vector_(i)); } }
void MiniTensorVector<T, N>:: applyBinary(Elementwise::BinaryFunction<T> const & f, Vector<T> const & x) { minitensor::Vector<T, N> const xval = MTfromROL<T, N>(x); auto const dim = vector_.get_dimension(); for(minitensor::Index i{0}; i < dim; ++i) { vector_(i) = f.apply(vector_(i), xval(i)); } }
void MiniTensorVector<T, N>:: scale(T const alpha) { auto const dim = vector_.get_dimension(); for (minitensor::Index i{0}; i < dim; ++i) { vector_(i) *= alpha; } }
void SurfaceVectorJump<EvalT, Traits>::evaluateFields( typename Traits::EvalData workset) { Intrepid2::Vector<ScalarT> vecA(0, 0, 0), vecB(0, 0, 0), vecJump(0, 0, 0); for (int cell = 0; cell < workset.numCells; ++cell) { for (int pt = 0; pt < num_qps_; ++pt) { vecA.fill(Intrepid2::ZEROS); vecB.fill(Intrepid2::ZEROS); for (int node = 0; node < num_plane_nodes_; ++node) { int topNode = node + num_plane_nodes_; vecA += Intrepid2::Vector<ScalarT>( ref_values_(node, pt) * vector_(cell, node, 0), ref_values_(node, pt) * vector_(cell, node, 1), ref_values_(node, pt) * vector_(cell, node, 2)); vecB += Intrepid2::Vector<ScalarT>( ref_values_(node, pt) * vector_(cell, topNode, 0), ref_values_(node, pt) * vector_(cell, topNode, 1), ref_values_(node, pt) * vector_(cell, topNode, 2)); } vecJump = vecB - vecA; jump_(cell, pt, 0) = vecJump(0); jump_(cell, pt, 1) = vecJump(1); jump_(cell, pt, 2) = vecJump(2); } } }
T MiniTensorVector<T, N>:: reduce(Elementwise::ReductionOp<T> const & r) const { T result = r.initialValue(); auto const dim = vector_.get_dimension(); for(minitensor::Index i{0}; i < dim; ++i) { r.reduce(vector_(i), result); } return result; }
void MiniTensorVector<T, N>:: axpy(T const alpha, Vector<T> const & x) { minitensor::Vector<T, N> const xval = MTfromROL<T, N>(x); auto const dim = xval.get_dimension(); assert(vector_.get_dimension() == dim); for (minitensor::Index i{0}; i < dim; ++i) { vector_(i) += alpha * xval(i); } }
void MiniTensorVector<T, N>:: plus(Vector<T> const & x) { Intrepid2::Vector<T, N> const xval = MTfromROL<T, N>(x); auto const dim = xval.get_dimension(); assert(vector_.get_dimension() == dim); for (Intrepid2::Index i{0}; i < dim; ++i) { vector_(i) += xval(i); } }
//void fdjac(n,x,fvec,df,vecfunc) //float **df,fvec[],x[]; //int n; //void (*vecfunc)(); void fdjac( int n, float x[], float fvec[], float **df, void (*vecfunc)(int, float [], float [])) { int i,j; float h,temp,*f; f=vector_(1,n); for (j=1; j<=n; j++) { temp=x[j]; h=(float)EPS*(float)fabs(temp); if (h == 0.0) h=(float)EPS; x[j]=temp+h; h=x[j]-temp; (*vecfunc)(n,x,f); x[j]=temp; for (i=1; i<=n; i++) df[i][j]=(f[i]-fvec[i])/h; } free_vector(f,1,n); }