static void assign(af_array &out, const unsigned &ndims, const af_seq *index, const af_array &in) { ArrayInfo iInfo = getInfo(in); ArrayInfo oInfo = getInfo(out); af_dtype iType = iInfo.getType(); dim4 const outDs = oInfo.dims(); dim4 const iDims = iInfo.dims(); ARG_ASSERT(0, (outDs.ndims()>=iDims.ndims())); ARG_ASSERT(1, (outDs.ndims()>=(int)ndims)); AF_CHECK(af_eval(out)); vector<af_seq> index_(index, index+ndims); dim4 const oStrides = af::toStride(index_, outDs); dim4 oDims = af::toDims(index_, outDs); dim4 oOffsets = af::toOffset(index_, outDs); Array<T> *dst = createRefArray<T>(getArray<T>(out), oDims, oOffsets, oStrides); for (int i = 0; i < 4; i++) { if (oDims[i] != iDims[i]) AF_ERROR("Size mismatch between input and output", AF_ERR_SIZE); } bool noCaseExecuted = true; if (isComplex) { noCaseExecuted = false; switch(iType) { case c64: copy<cdouble, T>(*dst, getArray<cdouble>(in), scalar<T>(0), 1.0); break; case c32: copy<cfloat , T>(*dst, getArray<cfloat >(in), scalar<T>(0), 1.0); break; default : noCaseExecuted = true; break; } } static const T ZERO = scalar<T>(0); if(noCaseExecuted) { noCaseExecuted = false; switch(iType) { case f64: copy<double , T>(*dst, getArray<double>(in), ZERO, 1.0); break; case f32: copy<float , T>(*dst, getArray<float >(in), ZERO, 1.0); break; case s32: copy<int , T>(*dst, getArray<int >(in), ZERO, 1.0); break; case u32: copy<uint , T>(*dst, getArray<uint >(in), ZERO, 1.0); break; case u8 : copy<uchar , T>(*dst, getArray<uchar >(in), ZERO, 1.0); break; case b8 : copy<char , T>(*dst, getArray<char >(in), ZERO, 1.0); break; default : noCaseExecuted = true; break; } } if (noCaseExecuted) TYPE_ERROR(1, iType); delete dst; }
static void assign(Array<Tout> &out, const unsigned &ndims, const af_seq *index, const Array<Tin> &in_) { dim4 const outDs = out.dims(); dim4 const iDims = in_.dims(); DIM_ASSERT(0, (outDs.ndims()>=iDims.ndims())); DIM_ASSERT(0, (outDs.ndims()>=(dim_t)ndims)); out.eval(); vector<af_seq> index_(index, index+ndims); dim4 oDims = toDims(index_, outDs); bool is_vector = true; for (int i = 0; is_vector && i < (int)oDims.ndims() - 1; i++) { is_vector &= oDims[i] == 1; } is_vector &= in_.isVector() || in_.isScalar(); for (dim_t i = ndims; i < (int)in_.ndims(); i++) { oDims[i] = 1; } if (is_vector) { if (oDims.elements() != (dim_t)in_.elements() && in_.elements() != 1) { AF_ERROR("Size mismatch between input and output", AF_ERR_SIZE); } // If both out and in are vectors of equal elements, reshape in to out dims Array<Tin> in = in_.elements() == 1 ? tile(in_, oDims) : modDims(in_, oDims); Array<Tout> dst = createSubArray<Tout>(out, index_, false); copyArray<Tin , Tout>(dst, in); } else { for (int i = 0; i < 4; i++) { if (oDims[i] != iDims[i]) { AF_ERROR("Size mismatch between input and output", AF_ERR_SIZE); } } Array<Tout> dst = createSubArray<Tout>(out, index_, false); copyArray<Tin , Tout>(dst, in_); } }
array constant(cfloat val, const dim4 &dims) { af_array res; AF_THROW(af_constant_complex(&res, real(val), imag(val), dims.ndims(), dims.get(), c32)); return array(res); }
array constant(T val, const dim4 &dims, const af::dtype type) { af_array res; if (type != s64 && type != u64) { AF_THROW(af_constant(&res, (double)val, dims.ndims(), dims.get(), type)); } else if (type == s64) { AF_THROW(af_constant_long (&res, ( intl)val, dims.ndims(), dims.get())); } else { AF_THROW(af_constant_ulong(&res, (uintl)val, dims.ndims(), dims.get())); } return array(res); }
AFAPI array constant(cdouble val, const dim4 &dims, const af::dtype type) { if (type != c32 && type != c64) { return constant(real(val), dims, type); } af_array res; AF_THROW(af_constant_complex(&res, real(val), imag(val), dims.ndims(), dims.get(), type)); return array(res); }
AF_BATCH_KIND identifyBatchKind(const dim4 &sDims, const dim4 &fDims) { dim_t sn = sDims.ndims(); dim_t fn = fDims.ndims(); if (sn == baseDim && fn == baseDim) return AF_BATCH_NONE; else if (sn == baseDim && (fn > baseDim && fn <= 4)) return AF_BATCH_RHS; else if ((sn > baseDim && sn <= 4) && fn == baseDim) return AF_BATCH_LHS; else if ((sn > baseDim && sn <= 4) && (fn > baseDim && fn <= 4)) { bool doesDimensionsMatch = true; bool isInterleaved = true; for (dim_t i = baseDim; i < 4; i++) { doesDimensionsMatch &= (sDims[i] == fDims[i]); isInterleaved &= (sDims[i] == 1 || fDims[i] == 1 || sDims[i] == fDims[i]); } if (doesDimensionsMatch) return AF_BATCH_SAME; return (isInterleaved ? AF_BATCH_DIFF : AF_BATCH_UNSUPPORTED); } else return AF_BATCH_UNSUPPORTED; }
ConvolveBatchKind identifyBatchKind(const dim4 &sDims, const dim4 &fDims) { dim_t sn = sDims.ndims(); dim_t fn = fDims.ndims(); if (sn==baseDim && fn==baseDim) return ONE2ONE; else if (sn==baseDim && (fn>baseDim && fn<=4)) return ONE2MANY; else if ((sn>baseDim && sn<=4) && fn==baseDim) return MANY2ONE; else if ((sn>baseDim && sn<=4) && (fn>baseDim && fn<=4)) { bool doesDimensionsMatch = true; for (dim_t i=baseDim; i<4; i++) { if (sDims[i]!=fDims[i]) { doesDimensionsMatch = false; break; } } return (doesDimensionsMatch ? MANY2MANY : CONVOLVE_UNSUPPORTED_BATCH_MODE); } else return CONVOLVE_UNSUPPORTED_BATCH_MODE; }
Array<in_t> lookup(const Array<in_t> &input, const Array<idx_t> &indices, const unsigned dim) { const dim4 iDims = input.dims(); dim4 oDims(1); for (int d=0; d<4; ++d) oDims[d] = (d==int(dim) ? indices.elements() : iDims[d]); Array<in_t> out = createEmptyArray<in_t>(oDims); dim_t nDims = iDims.ndims(); switch(dim) { case 0: kernel::lookup<in_t, idx_t, 0>(out, input, indices, nDims); break; case 1: kernel::lookup<in_t, idx_t, 1>(out, input, indices, nDims); break; case 2: kernel::lookup<in_t, idx_t, 2>(out, input, indices, nDims); break; case 3: kernel::lookup<in_t, idx_t, 3>(out, input, indices, nDims); break; } return out; }
array iota(const dim4 &dims, const dim4 &tile_dims, const af::dtype ty) { af_array out; AF_THROW(af_iota(&out, dims.ndims(), dims.get(), tile_dims.ndims(), tile_dims.get(), ty)); return array(out); }
// Assign values to an array array::array_proxy& af::array::array_proxy::operator=(const array &other) { unsigned nd = numDims(impl->parent_->get()); const dim4 this_dims = getDims(impl->parent_->get()); const dim4 other_dims = other.dims(); int dim = gforDim(impl->indices_); af_array other_arr = other.get(); bool batch_assign = false; bool is_reordered = false; if (dim >= 0) { //FIXME: Figure out a faster, cleaner way to do this dim4 out_dims = seqToDims(impl->indices_, this_dims, false); batch_assign = true; for (int i = 0; i < AF_MAX_DIMS; i++) { if (this->impl->indices_[i].isBatch) batch_assign &= (other_dims[i] == 1); else batch_assign &= (other_dims[i] == out_dims[i]); } if (batch_assign) { af_array out; AF_THROW(af_tile(&out, other_arr, out_dims[0] / other_dims[0], out_dims[1] / other_dims[1], out_dims[2] / other_dims[2], out_dims[3] / other_dims[3])); other_arr = out; } else if (out_dims != other_dims) { // HACK: This is a quick check to see if other has been reordered inside gfor // TODO: Figure out if this breaks and implement a cleaner method other_arr = gforReorder(other_arr, dim); is_reordered = true; } } af_array par_arr = 0; if (impl->is_linear_) { AF_THROW(af_flat(&par_arr, impl->parent_->get())); nd = 1; } else { par_arr = impl->parent_->get(); } af_array tmp = 0; AF_THROW(af_assign_gen(&tmp, par_arr, nd, impl->indices_, other_arr)); af_array res = 0; if (impl->is_linear_) { AF_THROW(af_moddims(&res, tmp, this_dims.ndims(), this_dims.get())); AF_THROW(af_release_array(par_arr)); AF_THROW(af_release_array(tmp)); } else { res = tmp; } impl->parent_->set(res); if (dim >= 0 && (is_reordered || batch_assign)) { if (other_arr) AF_THROW(af_release_array(other_arr)); } return *this; }
array constant(double val, const dim4 &dims, af_dtype type) { af_array res; AF_THROW(af_constant(&res, val, dims.ndims(), dims.get(), type)); return array(res); }
array iota(const dim4 &dims, const unsigned rep, af_dtype ty) { af_array out; AF_THROW(af_iota(&out, dims.ndims(), dims.get(), rep, ty)); return array(out); }
af_err af_approx1_uniform(af_array *yo, const af_array yi, const af_array xo, const int xdim, const double xi_beg, const double xi_step, const af_interp_type method, const float offGrid) { try { const ArrayInfo& yi_info = getInfo(yi); const ArrayInfo& xo_info = getInfo(xo); const dim4 yi_dims = yi_info.dims(); const dim4 xo_dims = xo_info.dims(); ARG_ASSERT(1, yi_info.isFloating()); // Only floating and complex types ARG_ASSERT(2, xo_info.isRealFloating()) ; // Only floating types ARG_ASSERT(1, yi_info.isSingle() == xo_info.isSingle()); // Must have same precision ARG_ASSERT(1, yi_info.isDouble() == xo_info.isDouble()); // Must have same precision ARG_ASSERT(3, xdim >= 0 && xdim < 4); // POS should either be (x, 1, 1, 1) or (1, yi_dims[1], yi_dims[2], yi_dims[3]) if (xo_dims[xdim] != xo_dims.elements()) { for (int i = 0; i < 4; i++) { if (xdim != i) DIM_ASSERT(2, xo_dims[i] == yi_dims[i]); } } ARG_ASSERT(5, xi_step != 0); ARG_ASSERT(6, (method == AF_INTERP_CUBIC || method == AF_INTERP_CUBIC_SPLINE || method == AF_INTERP_LINEAR || method == AF_INTERP_LINEAR_COSINE || method == AF_INTERP_LOWER || method == AF_INTERP_NEAREST)); if (yi_dims.ndims() == 0 || xo_dims.ndims() == 0) { *yo = createHandle(dim4(0,0,0,0), yi_info.getType()); return AF_SUCCESS; } dim4 yo_dims = yi_dims; yo_dims[xdim] = xo_dims[xdim]; if (*yo == 0) { *yo = createHandle(yo_dims, yi_info.getType()); } DIM_ASSERT(1, getInfo(*yo).dims() == yo_dims); switch(yi_info.getType()) { case f32: approx1<float , float >(yo, yi, xo, xdim, xi_beg, xi_step, method, offGrid); break; case f64: approx1<double , double>(yo, yi, xo, xdim, xi_beg, xi_step, method, offGrid); break; case c32: approx1<cfloat , float >(yo, yi, xo, xdim, xi_beg, xi_step, method, offGrid); break; case c64: approx1<cdouble, double>(yo, yi, xo, xdim, xi_beg, xi_step, method, offGrid); break; default: TYPE_ERROR(1, yi_info.getType()); } } CATCHALL; return AF_SUCCESS; }
array randn(const dim4 &dims, const af::dtype type) { af_array res; AF_THROW(af_randn(&res, dims.ndims(), dims.get(), type)); return array(res); }
array moddims(const array& in, const dim4& dims) { return af::moddims(in, dims.ndims(), dims.get()); }
array identity(const dim4 &dims, const af::dtype type) { af_array res; AF_THROW(af_identity(&res, dims.ndims(), dims.get(), type)); return array(res); }
array randn(const dim4 &dims, const dtype ty, randomEngine &r) { af_array out; AF_THROW(af_random_normal(&out, dims.ndims(), dims.get(), ty, r.get())); return array(out); }
array range(const dim4 &dims, const int seq_dim, const af::dtype ty) { af_array out; AF_THROW(af_range(&out, dims.ndims(), dims.get(), seq_dim, ty)); return array(out); }