nd::array callable_type_data::operator()(ndt::type &dst_tp, intptr_t nsrc, const ndt::type *src_tp, const char *const *src_arrmeta, char **const *src_data, intptr_t nkwd, const nd::array *kwds, const std::map<std::string, ndt::type> &tp_vars) { // Allocate, then initialize, the data std::unique_ptr<char[]> data(new char[data_size]); if (data_size > 0) { data_init(static_data, data_size, data.get(), dst_tp, nsrc, src_tp, nkwd, kwds, tp_vars); } // Resolve the destination type if (dst_tp.is_symbolic()) { if (resolve_dst_type == NULL) { throw std::runtime_error("dst_tp is symbolic, but resolve_dst_type is NULL"); } resolve_dst_type(static_data, data_size, data.get(), dst_tp, nsrc, src_tp, nkwd, kwds, tp_vars); } // Allocate the destination array nd::array dst = nd::empty(dst_tp); // Generate and evaluate the ckernel ckernel_builder<kernel_request_host> ckb; instantiate(static_data, data_size, data.get(), &ckb, 0, dst_tp, dst.get_arrmeta(), nsrc, src_tp, src_arrmeta, kernreq, &eval::default_eval_context, nkwd, kwds, tp_vars); expr_metadata_single_t fn = ckb.get()->get_function<expr_metadata_single_t>(); fn(ckb.get(), dst.get_arrmeta(), &dst.get_ndo()->data.ptr, const_cast<char *const *>(src_arrmeta), src_data); return dst; }
nd::array nd::base_callable::call(ndt::type &dst_tp, intptr_t nsrc, const ndt::type *src_tp, const char *const *src_arrmeta, const array *src_data, intptr_t nkwd, const array *kwds, const std::map<std::string, ndt::type> &tp_vars) { // Allocate, then initialize, the data char *data = data_init(static_data(), dst_tp, nsrc, src_tp, nkwd, kwds, tp_vars); // Resolve the destination type if (dst_tp.is_symbolic()) { if (resolve_dst_type == NULL) { throw std::runtime_error("dst_tp is symbolic, but resolve_dst_type is NULL"); } resolve_dst_type(static_data(), data, dst_tp, nsrc, src_tp, nkwd, kwds, tp_vars); } // Allocate the destination array array dst = empty(dst_tp); // Generate and evaluate the ckernel kernel_builder ckb; instantiate(static_data(), data, &ckb, dst_tp, dst.get()->metadata(), nsrc, src_tp, src_arrmeta, kernel_request_call, nkwd, kwds, tp_vars); kernel_call_t fn = ckb.get()->get_function<kernel_call_t>(); fn(ckb.get(), &dst, src_data); return dst; }
nd::array nd::view(const nd::array &arr, const ndt::type &tp) { if (arr.get_type() == tp) { // If the types match exactly, simply return 'arr' return arr; } else if (tp.get_type_id() == bytes_type_id) { // If it's a request to view the data as raw bytes nd::array result = view_as_bytes(arr, tp); if (!result.is_null()) { return result; } } else if (arr.get_type().get_type_id() == bytes_type_id) { // If it's a request to view raw bytes as something else nd::array result = view_from_bytes(arr, tp); if (!result.is_null()) { return result; } } else if (arr.get_ndim() == tp.get_ndim()) { // If the type is symbolic, e.g. has a "Fixed" symbolic dimension, // first substitute in the shape from the array if (tp.is_symbolic()) { dimvector shape(arr.get_ndim()); arr.get_shape(shape.get()); return view_concrete(arr, substitute_shape(tp, arr.get_ndim(), shape.get())); } else { return view_concrete(arr, tp); } } stringstream ss; ss << "Unable to view nd::array of type " << arr.get_type(); ss << " as type " << tp; throw type_error(ss.str()); }
ndt::type resolve(base_callable *DYND_UNUSED(caller), char *DYND_UNUSED(data), call_graph &cg, const ndt::type &dst_tp, size_t nsrc, const ndt::type *src_tp, size_t nkwd, const array *kwds, const std::map<std::string, ndt::type> &tp_vars) { const callable &child = specialize(dst_tp, nsrc, src_tp); return child->resolve(this, nullptr, cg, dst_tp.is_symbolic() ? child->get_ret_type() : dst_tp, nsrc, src_tp, nkwd, kwds, tp_vars); }
/** * Substitutes type variables in a pattern type. * * \param pattern A symbolic type within which to substitute typevars. * \param typevars A map of names to type var values. * \param concrete If true, requires that the result be concrete. */ inline ndt::type substitute(const ndt::type &pattern, const std::map<nd::string, ndt::type> &typevars, bool concrete) { // This check for whether ``pattern`` is symbolic is put here in // the inline function to avoid the call overhead in this case if (!pattern.is_symbolic() && pattern.get_type_id() != callable_type_id) { return pattern; } else { return detail::internal_substitute(pattern, typevars, concrete); } }
size_t dynd::ndt::get_cuda_device_data_alignment(const ndt::type &tp) { if (tp.is_symbolic()) { return 0; } const ndt::type &dtp = tp.without_memory_type().get_dtype(); if (dtp.is_builtin()) { return dtp.get_data_size(); } else { // TODO: Return the data size of the largest built-in component return 0; } }
ndt::type resolve(base_callable *caller, char *DYND_UNUSED(data), call_graph &cg, const ndt::type &dst_tp, size_t DYND_UNUSED(nsrc), const ndt::type *src_tp, size_t nkwd, const array *kwds, const std::map<std::string, ndt::type> &tp_vars) { cg.emplace_back([](kernel_builder &kb, kernel_request_t kernreq, char *DYND_UNUSED(data), const char *dst_arrmeta, size_t nsrc, const char *const *src_arrmeta) { size_t self_offset = kb.size(); kb.emplace_back<forward_na_kernel<I...>>(kernreq); kb(kernel_request_single, nullptr, dst_arrmeta, nsrc, src_arrmeta); for (intptr_t i : std::array<index_t, sizeof...(I)>({I...})) { size_t is_na_offset = kb.size() - self_offset; kb(kernel_request_single, nullptr, nullptr, 1, src_arrmeta + i); kb.get_at<forward_na_kernel<I...>>(self_offset)->is_na_offset[i] = is_na_offset; } size_t assign_na_offset = kb.size() - self_offset; kb(kernel_request_single, nullptr, nullptr, 0, nullptr); kb.get_at<forward_na_kernel<I...>>(self_offset)->assign_na_offset = assign_na_offset; }); ndt::type src_value_tp[2]; for (intptr_t i = 0; i < 2; ++i) { src_value_tp[i] = src_tp[i]; } for (intptr_t i : std::array<index_t, sizeof...(I)>({I...})) { src_value_tp[i] = src_value_tp[i].extended<ndt::option_type>()->get_value_type(); } base_callable *child; if (m_child.is_null()) { child = caller; } else { child = m_child.get(); } ndt::type res_value_tp = child->resolve(this, nullptr, cg, dst_tp.is_symbolic() ? child->get_ret_type() : dst_tp, 2, src_value_tp, nkwd, kwds, tp_vars); for (index_t i : std::array<index_t, sizeof...(I)>({I...})) { is_na->resolve(this, nullptr, cg, ndt::make_type<bool>(), 1, src_tp + i, 0, nullptr, tp_vars); } return assign_na->resolve(this, nullptr, cg, ndt::make_type<ndt::option_type>(res_value_tp), 0, nullptr, nkwd, kwds, tp_vars); }
size_t pydynd::get_nonragged_dim_count(const ndt::type &tp, size_t max_count) { if (tp.is_symbolic()) { if (tp.is_scalar()) { return 0; } } if (!tp.is_scalar()) { if (max_count <= 1) { return max_count; } else { return min(max_count, 1 + get_nonragged_dim_count( static_cast<const ndt::base_dim_type *>(tp.extended()) ->get_element_type(), max_count - 1)); } } switch (tp.get_id()) { case struct_id: case tuple_id: if (max_count <= 1) { return max_count; } else { auto bsd = tp.extended<ndt::tuple_type>(); size_t field_count = bsd->get_field_count(); for (size_t i = 0; i != field_count; ++i) { size_t candidate = 1 + get_nonragged_dim_count(bsd->get_field_type(i), max_count - 1); if (candidate < max_count) { max_count = candidate; if (max_count <= 1) { return max_count; } } } return max_count; } default: return 0; } }
void nd::functional::old_multidispatch_ck::resolve_dst_type( char *static_data, size_t data_size, char *data, ndt::type &dst_tp, intptr_t nsrc, const ndt::type *src_tp, const nd::array &kwds, const std::map<nd::string, ndt::type> &tp_vars) { const vector<nd::arrfunc> *icd = reinterpret_cast<const vector<nd::arrfunc> *>(static_data); for (intptr_t i = 0; i < (intptr_t)icd->size(); ++i) { const nd::arrfunc &child = (*icd)[i]; if (nsrc == child.get_type()->get_npos()) { intptr_t isrc; std::map<nd::string, ndt::type> typevars; for (isrc = 0; isrc < nsrc; ++isrc) { if (!can_implicitly_convert( src_tp[isrc], child.get_type()->get_pos_type(isrc), typevars)) { break; } } if (isrc == nsrc) { dst_tp = child.get_type()->get_return_type(); if (dst_tp.is_symbolic()) { child.get()->resolve_dst_type( const_cast<char *>(child.get()->static_data), data_size, data, dst_tp, nsrc, src_tp, kwds, tp_vars); } return; } } } stringstream ss; ss << "Failed to find suitable signature in multidispatch resolution " "with " "input types ("; for (intptr_t isrc = 0; isrc < nsrc; ++isrc) { ss << src_tp[isrc]; if (isrc != nsrc - 1) { ss << ", "; } } ss << ")"; throw type_error(ss.str()); }
static inline bool broadcast_tagged_dims_from_type(intptr_t ndim, ndt::type tp, const intptr_t *tagged_dims, intptr_t *out_tagged_dims) { tp = tp.without_memory_type(); for (intptr_t i = 0; i < ndim; ++i) { intptr_t tagged_dim = tagged_dims[i], dim_size; switch (tp.get_id()) { case fixed_dim_id: if (tp.is_symbolic()) { if (tagged_dim < 0) { out_tagged_dims[i] = -2; } } else { dim_size = tp.extended<ndt::fixed_dim_type>()->get_fixed_dim_size(); if (tagged_dim < 0 || tagged_dim == 1) { out_tagged_dims[i] = dim_size; } else if (tagged_dim != dim_size && dim_size != 1) { return false; } } break; case var_dim_id: // All broadcasting is done dynamically for var break; default: { stringstream ss; ss << "dim_fragment_type failed to get shape from type " << tp; throw type_error(ss.str()); } } tp = tp.extended<ndt::base_dim_type>()->get_element_type(); } return true; }