void out_of_place(BE *backend, const_Tensor<InT, Block0> in, Tensor<OutT, Block1> out) { { dda::Data<Block0, dda::in> in_data(in.block()); dda::Data<Block1, dda::out> out_data(out.block()); backend->out_of_place(in_data.ptr(), in_data.stride(0), in_data.stride(1), in_data.stride(2), out_data.ptr(), out_data.stride(0), out_data.stride(1), out_data.stride(2), select_fft_size<InT, OutT>(in_data.size(0), out_data.size(0)), select_fft_size<InT, OutT>(in_data.size(1), out_data.size(1)), select_fft_size<InT, OutT>(in_data.size(2), out_data.size(2))); } // Scale the data if not already done by the backend. if (!backend->supports_scale() && !almost_equal(scale_, scalar_type(1.))) out *= scale_; }
void by_reference(BE *backend, const_Tensor<InT, Block0> in, Tensor<OutT, Block1> out) { { Ext_data<Block0> in_ext (in.block(), SYNC_IN); Ext_data<Block1> out_ext(out.block(), SYNC_OUT); backend->by_reference( in_ext.data(), in_ext.stride(0), in_ext.stride(1), in_ext.stride(2), out_ext.data(), out_ext.stride(0), out_ext.stride(1), out_ext.stride(2), select_fft_size<InT, OutT>(in_ext.size(0), out_ext.size(0)), select_fft_size<InT, OutT>(in_ext.size(1), out_ext.size(1)), select_fft_size<InT, OutT>(in_ext.size(2), out_ext.size(2))); } // Scale the data if not already done by the backend. if (!backend->supports_scale() && !almost_equal(scale_, scalar_type(1.))) out *= scale_; }
inline bool equal(const_Tensor<T1, B1> v, const_Tensor<T2, B2> w) { if (v.size(0) != w.size(0) || v.size(1) != w.size(1) || v.size(2) != w.size(2)) return false; for (length_type i = 0; i != v.size(0); ++i) for (length_type j = 0; j != v.size(1); ++j) for (length_type k = 0; k != v.size(2); ++k) if (!equal(v.get(i, j, k), w.get(i, j, k))) return false; return true; }