bool ensure_immutable_contig(nd::array& a) { if (!a.is_null()) { return detail::ensure_immutable_contig<T>::run(a); } else { return false; } }
ndt::busdate_type::busdate_type(busdate_roll_t roll, const bool *weekmask, const nd::array &holidays) : base_type(busdate_type_id, datetime_kind, 4, 4, type_flag_scalar, 0, 0, 0), m_roll(roll) { memcpy(m_workweek, weekmask, sizeof(m_workweek)); m_busdays_in_weekmask = 0; for (int i = 0; i < 7; ++i) { m_busdays_in_weekmask += weekmask[i] ? 1 : 0; } if (!holidays.is_null()) { nd::array hol = holidays.ucast(make_date()).eval_immutable(); // TODO: Make sure hol is contiguous and one-dimensional m_holidays = hol; } }
nd::callable::callable(const nd::array &rhs) { if (!rhs.is_null()) { if (rhs.get_type().get_type_id() == callable_type_id) { const callable_type_data *af = reinterpret_cast<const callable_type_data *>( rhs.cdata()); if (af->instantiate != NULL) { // It's valid: callable type, contains instantiate function. m_value = rhs; } else { throw invalid_argument("Require a non-empty callable, " "provided callable has NULL " "instantiate function"); } } else { stringstream ss; ss << "Cannot implicitly convert nd::array of type " << rhs.get_type().value_type() << " to callable"; throw type_error(ss.str()); } } }
/** * Adds a ckernel layer for processing one dimension of the reduction. * This is for a strided dimension which is being broadcast, and is * the final dimension before the accumulation operation. */ static size_t make_strided_inner_broadcast_dimension_kernel( const callable_type_data *elwise_reduction_const, const ndt::callable_type *elwise_reduction_tp, const callable_type_data *dst_initialization_const, const ndt::callable_type *dst_initialization_tp, void *ckb, intptr_t ckb_offset, intptr_t dst_stride, intptr_t src_stride, intptr_t src_size, const ndt::type &dst_tp, const char *dst_arrmeta, const ndt::type &src_tp, const char *src_arrmeta, bool right_associative, const nd::array &reduction_identity, kernel_request_t kernreq, const eval::eval_context *ectx) { callable_type_data *elwise_reduction = const_cast<callable_type_data *>(elwise_reduction_const); callable_type_data *dst_initialization = const_cast<callable_type_data *>(dst_initialization_const); intptr_t root_ckb_offset = ckb_offset; strided_inner_broadcast_kernel_extra *e = reinterpret_cast<ckernel_builder<kernel_request_host> *>(ckb) ->alloc_ck<strided_inner_broadcast_kernel_extra>(ckb_offset); e->destructor = &strided_inner_broadcast_kernel_extra::destruct; // Cannot have both a dst_initialization kernel and a reduction identity if (dst_initialization != NULL && !reduction_identity.is_null()) { throw invalid_argument( "make_lifted_reduction_ckernel: cannot specify" " both a dst_initialization kernel and a reduction_identity"); } if (reduction_identity.is_null()) { // Get the function pointer for the first_call, for the case with // no reduction identity if (kernreq == kernel_request_single) { e->set_first_call_function( &strided_inner_broadcast_kernel_extra::single_first); } else if (kernreq == kernel_request_strided) { e->set_first_call_function( &strided_inner_broadcast_kernel_extra::strided_first); } else { stringstream ss; ss << "make_lifted_reduction_ckernel: unrecognized request " << (int)kernreq; throw runtime_error(ss.str()); } } else { // Get the function pointer for the first_call, for the case with // a reduction identity if (kernreq == kernel_request_single) { e->set_first_call_function( &strided_inner_broadcast_kernel_extra::single_first_with_ident); } else if (kernreq == kernel_request_strided) { e->set_first_call_function( &strided_inner_broadcast_kernel_extra::strided_first_with_ident); } else { stringstream ss; ss << "make_lifted_reduction_ckernel: unrecognized request " << (int)kernreq; throw runtime_error(ss.str()); } if (reduction_identity.get_type() != dst_tp) { stringstream ss; ss << "make_lifted_reduction_ckernel: reduction identity type "; ss << reduction_identity.get_type() << " does not match dst type "; ss << dst_tp; throw runtime_error(ss.str()); } e->ident_data = reduction_identity.get_readonly_originptr(); e->ident_ref = reduction_identity.get_memblock().release(); } // The function pointer for followup accumulation calls e->set_followup_call_function( &strided_inner_broadcast_kernel_extra::strided_followup); // The striding parameters e->dst_stride = dst_stride; e->src_stride = src_stride; e->size = src_size; // Validate that the provided callables are unary operations, // and have the correct types if (elwise_reduction_tp->get_npos() != 1 && elwise_reduction_tp->get_npos() != 2) { stringstream ss; ss << "make_lifted_reduction_ckernel: elwise reduction ckernel "; ss << "funcproto must be unary or a binary expr with all equal types"; throw runtime_error(ss.str()); } if (elwise_reduction_tp->get_return_type() != dst_tp) { stringstream ss; ss << "make_lifted_reduction_ckernel: elwise reduction ckernel "; ss << "dst type is " << elwise_reduction_tp->get_return_type(); ss << ", expected " << dst_tp; throw type_error(ss.str()); } if (elwise_reduction_tp->get_pos_type(0) != src_tp) { stringstream ss; ss << "make_lifted_reduction_ckernel: elwise reduction ckernel "; ss << "src type is " << elwise_reduction_tp->get_return_type(); ss << ", expected " << src_tp; throw type_error(ss.str()); } if (dst_initialization != NULL) { check_dst_initialization(dst_initialization_tp, dst_tp, src_tp); } if (elwise_reduction_tp->get_npos() == 2) { ckb_offset = kernels::wrap_binary_as_unary_reduction_ckernel( ckb, ckb_offset, right_associative, kernel_request_strided); ndt::type src_tp_doubled[2] = {src_tp, src_tp}; const char *src_arrmeta_doubled[2] = {src_arrmeta, src_arrmeta}; ckb_offset = elwise_reduction->instantiate( elwise_reduction->static_data, 0, NULL, ckb, ckb_offset, dst_tp, dst_arrmeta, elwise_reduction_tp->get_npos(), src_tp_doubled, src_arrmeta_doubled, kernel_request_strided, ectx, nd::array(), std::map<nd::string, ndt::type>()); } else { ckb_offset = elwise_reduction->instantiate( elwise_reduction->static_data, 0, NULL, ckb, ckb_offset, dst_tp, dst_arrmeta, elwise_reduction_tp->get_npos(), &src_tp, &src_arrmeta, kernel_request_strided, ectx, nd::array(), std::map<nd::string, ndt::type>()); } // Make sure there's capacity for the next ckernel reinterpret_cast<ckernel_builder<kernel_request_host> *>(ckb) ->reserve(ckb_offset + sizeof(ckernel_prefix)); // Need to retrieve 'e' again because it may have moved e = reinterpret_cast<ckernel_builder<kernel_request_host> *>(ckb) ->get_at<strided_inner_broadcast_kernel_extra>(root_ckb_offset); e->dst_init_kernel_offset = ckb_offset - root_ckb_offset; if (dst_initialization != NULL) { ckb_offset = dst_initialization->instantiate( dst_initialization->static_data, 0, NULL, ckb, ckb_offset, dst_tp, dst_arrmeta, elwise_reduction_tp->get_npos(), &src_tp, &src_arrmeta, kernel_request_strided, ectx, nd::array(), std::map<nd::string, ndt::type>()); } else if (reduction_identity.is_null()) { ckb_offset = make_assignment_kernel(ckb, ckb_offset, dst_tp, dst_arrmeta, src_tp, src_arrmeta, kernel_request_strided, ectx); } else { ckb_offset = make_assignment_kernel( ckb, ckb_offset, dst_tp, dst_arrmeta, reduction_identity.get_type(), reduction_identity.get_arrmeta(), kernel_request_strided, ectx); } return ckb_offset; }
size_t dynd::make_lifted_reduction_ckernel( const callable_type_data *elwise_reduction_const, const ndt::callable_type *elwise_reduction_tp, const callable_type_data *dst_initialization_const, const ndt::callable_type *dst_initialization_tp, void *ckb, intptr_t ckb_offset, const ndt::type &dst_tp, const char *dst_arrmeta, const ndt::type &src_tp, const char *src_arrmeta, intptr_t reduction_ndim, const bool *reduction_dimflags, bool associative, bool commutative, bool right_associative, const nd::array &reduction_identity, dynd::kernel_request_t kernreq, const eval::eval_context *ectx) { callable_type_data *elwise_reduction = const_cast<callable_type_data *>(elwise_reduction_const); callable_type_data *dst_initialization = const_cast<callable_type_data *>(dst_initialization_const); // Count the number of dimensions being reduced intptr_t reducedim_count = 0; for (intptr_t i = 0; i < reduction_ndim; ++i) { reducedim_count += reduction_dimflags[i]; } if (reducedim_count == 0) { if (reduction_ndim == 0) { // If there are no dimensions to reduce, it's // just a dst_initialization operation, so create // that ckernel directly if (dst_initialization != NULL) { return dst_initialization->instantiate( dst_initialization->static_data, 0, NULL, ckb, ckb_offset, dst_tp, dst_arrmeta, elwise_reduction_tp->get_npos(), &src_tp, &src_arrmeta, kernreq, ectx, nd::array(), std::map<nd::string, ndt::type>()); } else if (reduction_identity.is_null()) { return make_assignment_kernel(ckb, ckb_offset, dst_tp, dst_arrmeta, src_tp, src_arrmeta, kernreq, ectx); } else { // Create the kernel which copies the identity and then // does one reduction return make_strided_inner_reduction_dimension_kernel( elwise_reduction, elwise_reduction_tp, dst_initialization, dst_initialization_tp, ckb, ckb_offset, 0, 1, dst_tp, dst_arrmeta, src_tp, src_arrmeta, right_associative, reduction_identity, kernreq, ectx); } } throw runtime_error("make_lifted_reduction_ckernel: no dimensions were " "flagged for reduction"); } if (!(reducedim_count == 1 || (associative && commutative))) { throw runtime_error( "make_lifted_reduction_ckernel: for reducing along multiple dimensions," " the reduction function must be both associative and commutative"); } if (right_associative) { throw runtime_error("make_lifted_reduction_ckernel: right_associative is " "not yet supported"); } ndt::type dst_el_tp = elwise_reduction_tp->get_return_type(); ndt::type src_el_tp = elwise_reduction_tp->get_pos_type(0); // This is the number of dimensions being processed by the reduction if (reduction_ndim != src_tp.get_ndim() - src_el_tp.get_ndim()) { stringstream ss; ss << "make_lifted_reduction_ckernel: wrong number of reduction " "dimensions, "; ss << "requested " << reduction_ndim << ", but types have "; ss << (src_tp.get_ndim() - src_el_tp.get_ndim()); ss << " lifting from " << src_el_tp << " to " << src_tp; throw runtime_error(ss.str()); } // Determine whether reduced dimensions are being kept or not bool keep_dims; if (reduction_ndim == dst_tp.get_ndim() - dst_el_tp.get_ndim()) { keep_dims = true; } else if (reduction_ndim - reducedim_count == dst_tp.get_ndim() - dst_el_tp.get_ndim()) { keep_dims = false; } else { stringstream ss; ss << "make_lifted_reduction_ckernel: The number of dimensions flagged for " "reduction, "; ss << reducedim_count << ", is not consistent with the destination type "; ss << "reducing " << dst_tp << " with element " << dst_el_tp; throw runtime_error(ss.str()); } ndt::type dst_i_tp = dst_tp, src_i_tp = src_tp; for (intptr_t i = 0; i < reduction_ndim; ++i) { intptr_t dst_stride, dst_size, src_stride, src_size; // Get the striding parameters for the source dimension if (!src_i_tp.get_as_strided(src_arrmeta, &src_size, &src_stride, &src_i_tp, &src_arrmeta)) { stringstream ss; ss << "make_lifted_reduction_ckernel: type " << src_i_tp << " not supported as source"; throw type_error(ss.str()); } if (reduction_dimflags[i]) { // This dimension is being reduced if (src_size == 0 && reduction_identity.is_null()) { // If the size of the src is 0, a reduction identity is required to get // a value stringstream ss; ss << "cannot reduce a zero-sized dimension (axis "; ss << i << " of " << src_i_tp << ") because the operation"; ss << " has no identity"; throw invalid_argument(ss.str()); } if (keep_dims) { // If the dimensions are being kept, the output should be a // a strided dimension of size one if (dst_i_tp.get_as_strided(dst_arrmeta, &dst_size, &dst_stride, &dst_i_tp, &dst_arrmeta)) { if (dst_size != 1 || dst_stride != 0) { stringstream ss; ss << "make_lifted_reduction_ckernel: destination of a reduction " "dimension "; ss << "must have size 1, not size" << dst_size << "/stride " << dst_stride; ss << " in type " << dst_i_tp; throw type_error(ss.str()); } } else { stringstream ss; ss << "make_lifted_reduction_ckernel: type " << dst_i_tp; ss << " not supported the destination of a dimension being reduced"; throw type_error(ss.str()); } } if (i < reduction_ndim - 1) { // An initial dimension being reduced ckb_offset = make_strided_initial_reduction_dimension_kernel( ckb, ckb_offset, src_stride, src_size, kernreq); // The next request should be single, as that's the kind of // ckernel the 'first_call' should be in this case kernreq = kernel_request_single; } else { // The innermost dimension being reduced return make_strided_inner_reduction_dimension_kernel( elwise_reduction, elwise_reduction_tp, dst_initialization, dst_initialization_tp, ckb, ckb_offset, src_stride, src_size, dst_i_tp, dst_arrmeta, src_i_tp, src_arrmeta, right_associative, reduction_identity, kernreq, ectx); } } else { // This dimension is being broadcast, not reduced if (!dst_i_tp.get_as_strided(dst_arrmeta, &dst_size, &dst_stride, &dst_i_tp, &dst_arrmeta)) { stringstream ss; ss << "make_lifted_reduction_ckernel: type " << dst_i_tp << " not supported as destination"; throw type_error(ss.str()); } if (dst_size != src_size) { stringstream ss; ss << "make_lifted_reduction_ckernel: the dst dimension size " << dst_size; ss << " must equal the src dimension size " << src_size << " for broadcast dimensions"; throw runtime_error(ss.str()); } if (i < reduction_ndim - 1) { // An initial dimension being broadcast ckb_offset = make_strided_initial_broadcast_dimension_kernel( ckb, ckb_offset, dst_stride, src_stride, src_size, kernreq); // The next request should be strided, as that's the kind of // ckernel the 'first_call' should be in this case kernreq = kernel_request_strided; } else { // The innermost dimension being broadcast return make_strided_inner_broadcast_dimension_kernel( elwise_reduction, elwise_reduction_tp, dst_initialization, dst_initialization_tp, ckb, ckb_offset, dst_stride, src_stride, src_size, dst_i_tp, dst_arrmeta, src_i_tp, src_arrmeta, right_associative, reduction_identity, kernreq, ectx); } } } throw runtime_error("make_lifted_reduction_ckernel: internal error, " "should have returned in the loop"); }
void dynd::lift_reduction_arrfunc(arrfunc_type_data *out_ar, const nd::arrfunc& elwise_reduction_arr, const ndt::type& lifted_arr_type, const nd::arrfunc& dst_initialization_arr, bool keepdims, intptr_t reduction_ndim, const bool *reduction_dimflags, bool associative, bool commutative, bool right_associative, const nd::array& reduction_identity) { // Validate the input elwise_reduction arrfunc if (elwise_reduction_arr.is_null()) { throw runtime_error("lift_reduction_arrfunc: 'elwise_reduction' may not be empty"); } const arrfunc_type_data *elwise_reduction = elwise_reduction_arr.get(); if (elwise_reduction->get_param_count() != 1 && !(elwise_reduction->get_param_count() == 2 && elwise_reduction->get_param_type(0) == elwise_reduction->get_param_type(1) && elwise_reduction->get_param_type(0) == elwise_reduction->get_return_type())) { stringstream ss; ss << "lift_reduction_arrfunc: 'elwise_reduction' must contain a" " unary operation ckernel or a binary expr ckernel with all " "equal types, its prototype is " << elwise_reduction->func_proto; throw invalid_argument(ss.str()); } lifted_reduction_arrfunc_data *self = new lifted_reduction_arrfunc_data; *out_ar->get_data_as<lifted_reduction_arrfunc_data *>() = self; out_ar->free_func = &delete_lifted_reduction_arrfunc_data; self->child_elwise_reduction = elwise_reduction_arr; self->child_dst_initialization = dst_initialization_arr; if (!reduction_identity.is_null()) { if (reduction_identity.is_immutable() && reduction_identity.get_type() == elwise_reduction->get_return_type()) { self->reduction_identity = reduction_identity; } else { self->reduction_identity = nd::empty(elwise_reduction->get_return_type()); self->reduction_identity.vals() = reduction_identity; self->reduction_identity.flag_as_immutable(); } } // Figure out the result type ndt::type lifted_dst_type = elwise_reduction->get_return_type(); for (intptr_t i = reduction_ndim - 1; i >= 0; --i) { if (reduction_dimflags[i]) { if (keepdims) { lifted_dst_type = ndt::make_strided_dim(lifted_dst_type); } } else { ndt::type subtype = lifted_arr_type.get_type_at_dimension(NULL, i); switch (subtype.get_type_id()) { case strided_dim_type_id: case cfixed_dim_type_id: lifted_dst_type = ndt::make_strided_dim(lifted_dst_type); break; case var_dim_type_id: lifted_dst_type = ndt::make_var_dim(lifted_dst_type); break; default: { stringstream ss; ss << "lift_reduction_arrfunc: don't know how to process "; ss << "dimension of type " << subtype; throw type_error(ss.str()); } } } } self->data_types[0] = lifted_dst_type; self->data_types[1] = lifted_arr_type; self->reduction_ndim = reduction_ndim; self->associative = associative; self->commutative = commutative; self->right_associative = right_associative; self->reduction_dimflags.init(reduction_ndim); memcpy(self->reduction_dimflags.get(), reduction_dimflags, sizeof(bool) * reduction_ndim); out_ar->instantiate = &instantiate_lifted_reduction_arrfunc_data; out_ar->func_proto = ndt::make_funcproto(lifted_arr_type, lifted_dst_type); }
inline const arrfunc_type_data *get() const { return !m_value.is_null() ? reinterpret_cast<const arrfunc_type_data *>( m_value.get_readonly_originptr()) : NULL; }
inline bool is_null() const { return m_value.is_null(); }
nd::array dynd::struct_concat(nd::array lhs, nd::array rhs) { nd::array res; if (lhs.is_null()) { res = rhs; return res; } if (rhs.is_null()) { res = lhs; return res; } const ndt::type &lhs_tp = lhs.get_type(), &rhs_tp = rhs.get_type(); if (lhs_tp.get_kind() != struct_kind) { stringstream ss; ss << "Cannot concatenate array with type " << lhs_tp << " as a struct"; throw invalid_argument(ss.str()); } if (rhs_tp.get_kind() != struct_kind) { stringstream ss; ss << "Cannot concatenate array with type " << rhs_tp << " as a struct"; throw invalid_argument(ss.str()); } // Make an empty shell struct by concatenating the fields together intptr_t lhs_n = lhs_tp.extended<ndt::base_struct_type>()->get_field_count(); intptr_t rhs_n = rhs_tp.extended<ndt::base_struct_type>()->get_field_count(); intptr_t res_n = lhs_n + rhs_n; nd::array res_field_names = nd::empty(res_n, ndt::string_type::make()); nd::array res_field_types = nd::empty(res_n, ndt::make_type()); res_field_names(irange(0, lhs_n)).vals() = lhs_tp.extended<ndt::base_struct_type>()->get_field_names(); res_field_names(irange(lhs_n, res_n)).vals() = rhs_tp.extended<ndt::base_struct_type>()->get_field_names(); res_field_types(irange(0, lhs_n)).vals() = lhs_tp.extended<ndt::base_struct_type>()->get_field_types(); res_field_types(irange(lhs_n, res_n)).vals() = rhs_tp.extended<ndt::base_struct_type>()->get_field_types(); ndt::type res_tp = ndt::struct_type::make(res_field_names, res_field_types); const ndt::type *res_field_tps = res_tp.extended<ndt::base_struct_type>()->get_field_types_raw(); res = nd::empty_shell(res_tp); // Initialize the default data offsets for the struct arrmeta ndt::struct_type::fill_default_data_offsets(res_n, res_tp.extended<ndt::base_struct_type>()->get_field_types_raw(), reinterpret_cast<uintptr_t *>(res.get_arrmeta())); // Get information about the arrmeta layout of the input and res const uintptr_t *lhs_arrmeta_offsets = lhs_tp.extended<ndt::base_struct_type>()->get_arrmeta_offsets_raw(); const uintptr_t *rhs_arrmeta_offsets = rhs_tp.extended<ndt::base_struct_type>()->get_arrmeta_offsets_raw(); const uintptr_t *res_arrmeta_offsets = res_tp.extended<ndt::base_struct_type>()->get_arrmeta_offsets_raw(); const char *lhs_arrmeta = lhs.get_arrmeta(); const char *rhs_arrmeta = rhs.get_arrmeta(); char *res_arrmeta = res.get_arrmeta(); // Copy the arrmeta from the input arrays for (intptr_t i = 0; i < lhs_n; ++i) { const ndt::type &tp = res_field_tps[i]; if (!tp.is_builtin()) { tp.extended()->arrmeta_copy_construct(res_arrmeta + res_arrmeta_offsets[i], lhs_arrmeta + lhs_arrmeta_offsets[i], lhs.get_data_memblock().get()); } } for (intptr_t i = 0; i < rhs_n; ++i) { const ndt::type &tp = res_field_tps[i + lhs_n]; if (!tp.is_builtin()) { tp.extended()->arrmeta_copy_construct(res_arrmeta + res_arrmeta_offsets[i + lhs_n], rhs_arrmeta + rhs_arrmeta_offsets[i], rhs.get_data_memblock().get()); } } // Get information about the data layout of the input and res const uintptr_t *lhs_data_offsets = lhs_tp.extended<ndt::base_struct_type>()->get_data_offsets(lhs.get_arrmeta()); const uintptr_t *rhs_data_offsets = rhs_tp.extended<ndt::base_struct_type>()->get_data_offsets(rhs.get_arrmeta()); const uintptr_t *res_data_offsets = res_tp.extended<ndt::base_struct_type>()->get_data_offsets(res.get_arrmeta()); const char *lhs_data = lhs.get_readonly_originptr(); const char *rhs_data = rhs.get_readonly_originptr(); char *res_data = res.get_readwrite_originptr(); // Copy the data from the input arrays for (intptr_t i = 0; i < lhs_n; ++i) { const ndt::type &tp = res_field_tps[i]; typed_data_copy(tp, res_arrmeta + res_arrmeta_offsets[i], res_data + res_data_offsets[i], lhs_arrmeta + lhs_arrmeta_offsets[i], lhs_data + lhs_data_offsets[i]); } for (intptr_t i = 0; i < rhs_n; ++i) { const ndt::type &tp = res_field_tps[i + lhs_n]; typed_data_copy(tp, res_arrmeta + res_arrmeta_offsets[i + lhs_n], res_data + res_data_offsets[i + lhs_n], rhs_arrmeta + rhs_arrmeta_offsets[i], rhs_data + rhs_data_offsets[i]); } return res; }