size_t dynd::make_var_to_fixed_dim_assignment_kernel(void *ckb, intptr_t ckb_offset, const ndt::type &dst_strided_dim_tp, const char *dst_arrmeta, const ndt::type &src_var_dim_tp, const char *src_arrmeta, kernel_request_t kernreq, const eval::eval_context *ectx) { typedef var_to_strided_assign_ck self_type; if (src_var_dim_tp.get_type_id() != var_dim_type_id) { stringstream ss; ss << "make_var_to_fixed_dim_assignment_kernel: provided source type " << src_var_dim_tp << " is not a var_dim"; throw runtime_error(ss.str()); } const ndt::var_dim_type *src_vad = src_var_dim_tp.extended<ndt::var_dim_type>(); const var_dim_type_arrmeta *src_md = reinterpret_cast<const var_dim_type_arrmeta *>(src_arrmeta); self_type *self = self_type::make(ckb, kernreq, ckb_offset); ndt::type dst_element_tp; const char *dst_element_arrmeta; if (!dst_strided_dim_tp.get_as_strided(dst_arrmeta, &self->m_dst_dim_size, &self->m_dst_stride, &dst_element_tp, &dst_element_arrmeta)) { stringstream ss; ss << "make_var_to_fixed_dim_assignment_kernel: provided destination " "type " << dst_strided_dim_tp << " is not a strided_dim or fixed_array"; throw runtime_error(ss.str()); } self->m_src_md = src_md; return ::make_assignment_kernel(ckb, ckb_offset, dst_element_tp, dst_element_arrmeta, src_vad->get_element_type(), src_arrmeta + sizeof(var_dim_type_arrmeta), kernel_request_strided, ectx); }
static void parse_strided_dim_json(const ndt::type &tp, const char *arrmeta, char *out_data, const char *&begin, const char *end, const eval::eval_context *ectx) { intptr_t dim_size, stride; ndt::type el_tp; const char *el_arrmeta; if (!tp.get_as_strided(arrmeta, &dim_size, &stride, &el_tp, &el_arrmeta)) { throw json_parse_error(begin, "expected a strided dimension", tp); } if (!parse_token(begin, end, "[")) { throw json_parse_error(begin, "expected list starting with '['", tp); } for (intptr_t i = 0; i < dim_size; ++i) { parse_json(el_tp, el_arrmeta, out_data + i * stride, begin, end, ectx); if (i < dim_size - 1 && !parse_token(begin, end, ",")) { throw json_parse_error( begin, "array is too short, expected ',' list item separator", tp); } } if (!parse_token(begin, end, "]")) { throw json_parse_error( begin, "array is too long, expected list terminator ']'", tp); } }
// TODO This should handle both strided and var cases intptr_t nd::functional::rolling_ck::instantiate(char *_static_data, char *data, void *ckb, intptr_t ckb_offset, const ndt::type &dst_tp, const char *dst_arrmeta, intptr_t nsrc, const ndt::type *src_tp, const char *const *src_arrmeta, kernel_request_t kernreq, intptr_t nkwd, const nd::array *kwds, const std::map<std::string, ndt::type> &tp_vars) { typedef dynd::nd::functional::strided_rolling_ck self_type; rolling_callable_data *static_data = *reinterpret_cast<rolling_callable_data **>(_static_data); intptr_t root_ckb_offset = ckb_offset; self_type *self = self_type::make(ckb, kernreq, ckb_offset); const base_callable *window_af = static_data->window_op.get(); ndt::type dst_el_tp, src_el_tp; const char *dst_el_arrmeta, *src_el_arrmeta; if (!dst_tp.get_as_strided(dst_arrmeta, &self->m_dim_size, &self->m_dst_stride, &dst_el_tp, &dst_el_arrmeta)) { stringstream ss; ss << "rolling window ckernel: could not process type " << dst_tp; ss << " as a strided dimension"; throw type_error(ss.str()); } intptr_t src_dim_size; if (!src_tp[0].get_as_strided(src_arrmeta[0], &src_dim_size, &self->m_src_stride, &src_el_tp, &src_el_arrmeta)) { stringstream ss; ss << "rolling window ckernel: could not process type " << src_tp[0]; ss << " as a strided dimension"; throw type_error(ss.str()); } if (src_dim_size != self->m_dim_size) { stringstream ss; ss << "rolling window ckernel: source dimension size " << src_dim_size << " for type " << src_tp[0] << " does not match dest dimension size " << self->m_dim_size << " for type " << dst_tp; throw type_error(ss.str()); } self->m_window_size = static_data->window_size; // Create the NA-filling child ckernel // TODO: Need to fix this // ckb_offset = kernels::make_constant_value_assignment_ckernel( // ckb, ckb_offset, dst_el_tp, dst_el_arrmeta, // numeric_limits<double>::quiet_NaN(), kernel_request_strided, ectx); // Re-retrieve the self pointer, because it may be at a new memory location // now self = reinterpret_cast<ckernel_builder<kernel_request_host> *>(ckb)->get_at<self_type>(root_ckb_offset); // Create the window op child ckernel self->m_window_op_offset = ckb_offset - root_ckb_offset; // We construct array arrmeta for the window op ckernel to use, // without actually creating an nd::array to hold it. arrmeta_holder(ndt::make_fixed_dim(static_data->window_size, src_el_tp)).swap(self->m_src_winop_meta); self->m_src_winop_meta.get_at<fixed_dim_type_arrmeta>(0)->dim_size = self->m_window_size; self->m_src_winop_meta.get_at<fixed_dim_type_arrmeta>(0)->stride = self->m_src_stride; if (src_el_tp.get_arrmeta_size() > 0) { src_el_tp.extended()->arrmeta_copy_construct(self->m_src_winop_meta.get() + sizeof(fixed_dim_type_arrmeta), src_el_arrmeta, intrusive_ptr<memory_block_data>()); } const char *src_winop_meta = self->m_src_winop_meta.get(); return window_af->instantiate(const_cast<char *>(window_af->static_data()), data, ckb, ckb_offset, dst_el_tp, dst_el_arrmeta, nsrc, &self->m_src_winop_meta.get_type(), &src_winop_meta, kernel_request_strided, nkwd, kwds, tp_vars); }
size_t cfixed_dim_type::make_assignment_kernel( ckernel_builder *ckb, intptr_t ckb_offset, const ndt::type &dst_tp, const char *dst_arrmeta, const ndt::type &src_tp, const char *src_arrmeta, kernel_request_t kernreq, const eval::eval_context *ectx) const { if (this == dst_tp.extended()) { intptr_t src_size, src_stride; ndt::type src_el_tp; const char *src_el_arrmeta; if (src_tp.get_ndim() < dst_tp.get_ndim()) { kernels::strided_assign_ck *self = kernels::strided_assign_ck::create(ckb, kernreq, ckb_offset); self->m_size = get_fixed_dim_size(); self->m_dst_stride = get_fixed_stride(); // If the src has fewer dimensions, broadcast it across this one self->m_src_stride = 0; return ::make_assignment_kernel( ckb, ckb_offset, m_element_tp, dst_arrmeta + sizeof(cfixed_dim_type_arrmeta), src_tp, src_arrmeta, kernel_request_strided, ectx); } else if (src_tp.get_as_strided(src_arrmeta, &src_size, &src_stride, &src_el_tp, &src_el_arrmeta)) { kernels::strided_assign_ck *self = kernels::strided_assign_ck::create(ckb, kernreq, ckb_offset); self->m_size = get_fixed_dim_size(); self->m_dst_stride = get_fixed_stride(); self->m_src_stride = src_stride; // Check for a broadcasting error if (src_size != 1 && get_fixed_dim_size() != src_size) { throw broadcast_error(dst_tp, dst_arrmeta, src_tp, src_arrmeta); } return ::make_assignment_kernel( ckb, ckb_offset, m_element_tp, dst_arrmeta + sizeof(cfixed_dim_type_arrmeta), src_el_tp, src_el_arrmeta, kernel_request_strided, ectx); } else if (!src_tp.is_builtin()) { // Give the src type a chance to make a kernel return src_tp.extended()->make_assignment_kernel( ckb, ckb_offset, dst_tp, dst_arrmeta, src_tp, src_arrmeta, kernreq, ectx); } else { stringstream ss; ss << "Cannot assign from " << src_tp << " to " << dst_tp; throw dynd::type_error(ss.str()); } } else if (dst_tp.get_kind() == string_kind) { return make_any_to_string_assignment_kernel(ckb, ckb_offset, dst_tp, dst_arrmeta, src_tp, src_arrmeta, kernreq, ectx); } else if (dst_tp.get_ndim() < src_tp.get_ndim()) { throw broadcast_error(dst_tp, dst_arrmeta, src_tp, src_arrmeta); } else { stringstream ss; ss << "Cannot assign from " << src_tp << " to " << dst_tp; throw dynd::type_error(ss.str()); } }
intptr_t nd::indexed_take_ck::instantiate(char *DYND_UNUSED(static_data), char *DYND_UNUSED(data), void *ckb, intptr_t ckb_offset, const ndt::type &dst_tp, const char *dst_arrmeta, intptr_t DYND_UNUSED(nsrc), const ndt::type *src_tp, const char *const *src_arrmeta, kernel_request_t kernreq, intptr_t DYND_UNUSED(nkwd), const nd::array *DYND_UNUSED(kwds), const std::map<std::string, ndt::type> &DYND_UNUSED(tp_vars)) { typedef nd::indexed_take_ck self_type; self_type *self = self_type::make(ckb, kernreq, ckb_offset); ndt::type dst_el_tp; const char *dst_el_meta; if (!dst_tp.get_as_strided(dst_arrmeta, &self->m_dst_dim_size, &self->m_dst_stride, &dst_el_tp, &dst_el_meta)) { stringstream ss; ss << "indexed take arrfunc: could not process type " << dst_tp; ss << " as a strided dimension"; throw type_error(ss.str()); } intptr_t index_dim_size; ndt::type src0_el_tp, index_el_tp; const char *src0_el_meta, *index_el_meta; if (!src_tp[0].get_as_strided(src_arrmeta[0], &self->m_src0_dim_size, &self->m_src0_stride, &src0_el_tp, &src0_el_meta)) { stringstream ss; ss << "indexed take arrfunc: could not process type " << src_tp[0]; ss << " as a strided dimension"; throw type_error(ss.str()); } if (!src_tp[1].get_as_strided(src_arrmeta[1], &index_dim_size, &self->m_index_stride, &index_el_tp, &index_el_meta)) { stringstream ss; ss << "take arrfunc: could not process type " << src_tp[1]; ss << " as a strided dimension"; throw type_error(ss.str()); } if (self->m_dst_dim_size != index_dim_size) { stringstream ss; ss << "indexed take arrfunc: index data and dest have different sizes, "; ss << index_dim_size << " and " << self->m_dst_dim_size; throw invalid_argument(ss.str()); } if (index_el_tp.get_type_id() != (type_id_t)type_id_of<intptr_t>::value) { stringstream ss; ss << "indexed take arrfunc: index type should be intptr, not "; ss << index_el_tp; throw type_error(ss.str()); } // Create the child element assignment ckernel return make_assignment_kernel(ckb, ckb_offset, dst_el_tp, dst_el_meta, src0_el_tp, src0_el_meta, kernel_request_single, &eval::default_eval_context); }
static intptr_t instantiate(char *DYND_UNUSED(static_data), size_t DYND_UNUSED(data_size), char *DYND_UNUSED(data), void *ckb, intptr_t ckb_offset, const ndt::type &dst_tp, const char *dst_arrmeta, intptr_t DYND_UNUSED(nsrc), const ndt::type *src_tp, const char *const *src_arrmeta, kernel_request_t kernreq, const eval::eval_context *ectx, const nd::array &DYND_UNUSED(kwds), const std::map<nd::string, ndt::type> &DYND_UNUSED(tp_vars)) { intptr_t ndim = src_tp[0].get_ndim(); ndt::type dst_el_tp; const char *dst_el_meta; const size_stride_t *dst_size_stride; if (!dst_tp.get_as_strided(dst_arrmeta, 1, &dst_size_stride, &dst_el_tp, &dst_el_meta)) { stringstream ss; ss << "take_by_pointer callable: could not process type " << dst_tp; ss << " as a strided dimension"; throw type_error(ss.str()); } ndt::type src_el_tp[2]; const char *src_el_meta[2]; const size_stride_t *src_size_stride[2]; for (intptr_t i = 0; i < 2; ++i) { if (!src_tp[i].get_as_strided(src_arrmeta[i], src_tp[i].get_ndim(), &src_size_stride[i], &src_el_tp[i], &src_el_meta[i])) { stringstream ss; ss << "take_by_pointer callable: could not process type " << src_tp[i]; ss << " as a strided dimension"; throw type_error(ss.str()); } } take_by_pointer_outer_ck::make( ckb, kernreq, ckb_offset, dst_size_stride[0].dim_size, dst_size_stride[0].stride, src_size_stride[1][0].stride); for (intptr_t i = 0; i < ndim; ++i) { take_by_pointer_ck::make(ckb, kernel_request_single, ckb_offset, src_size_stride[0][i].dim_size, src_size_stride[0][i].stride, src_size_stride[1][1].stride); } return make_assignment_kernel(ckb, ckb_offset, dst_el_tp, dst_el_meta, src_el_tp[0], src_el_meta[0], kernel_request_single, ectx); }
static size_t make_elwise_strided_or_var_to_strided_dimension_expr_kernel_for_N( void *ckb, intptr_t ckb_offset, const ndt::type &dst_tp, const char *dst_arrmeta, size_t DYND_UNUSED(src_count), const ndt::type *src_tp, const char *const *src_arrmeta, kernel_request_t kernreq, const eval::eval_context *ectx, const expr_kernel_generator *elwise_handler) { intptr_t undim = dst_tp.get_ndim(); const char *dst_child_arrmeta; const char *src_child_arrmeta[N]; ndt::type dst_child_dt; ndt::type src_child_dt[N]; strided_or_var_to_strided_expr_kernel_extra<N> *e = reinterpret_cast<ckernel_builder<kernel_request_host> *>( ckb)->alloc_ck<strided_or_var_to_strided_expr_kernel_extra<N>>(ckb_offset); strided_or_var_to_strided_expr_kernel_extra<N>::make(ckb, kernreq, ckb_offset); // The dst strided parameters if (!dst_tp.get_as_strided(dst_arrmeta, &e->size, &e->dst_stride, &dst_child_dt, &dst_child_arrmeta)) { throw type_error("make_elwise_strided_dimension_expr_kernel: dst was not " "strided as expected"); } for (int i = 0; i < N; ++i) { intptr_t src_size; // The src[i] strided parameters if (src_tp[i].get_ndim() < undim) { // This src value is getting broadcasted e->src_stride[i] = 0; e->src_offset[i] = 0; e->is_src_var[i] = false; src_child_arrmeta[i] = src_arrmeta[i]; src_child_dt[i] = src_tp[i]; } else if (src_tp[i].get_as_strided(src_arrmeta[i], &src_size, &e->src_stride[i], &src_child_dt[i], &src_child_arrmeta[i])) { // Check for a broadcasting error if (src_size != 1 && e->size != src_size) { throw broadcast_error(dst_tp, dst_arrmeta, src_tp[i], src_arrmeta[i]); } e->src_offset[i] = 0; e->is_src_var[i] = false; } else { const ndt::var_dim_type *vdd = static_cast<const ndt::var_dim_type *>(src_tp[i].extended()); const var_dim_type_arrmeta *src_md = reinterpret_cast<const var_dim_type_arrmeta *>(src_arrmeta[i]); e->src_stride[i] = src_md->stride; e->src_offset[i] = src_md->offset; e->is_src_var[i] = true; src_child_arrmeta[i] = src_arrmeta[i] + sizeof(var_dim_type_arrmeta); src_child_dt[i] = vdd->get_element_type(); } } return elwise_handler->make_expr_kernel(ckb, ckb_offset, dst_child_dt, dst_child_arrmeta, N, src_child_dt, src_child_arrmeta, kernel_request_strided, ectx); }
static size_t make_elwise_strided_dimension_expr_kernel_for_N(void *ckb, intptr_t ckb_offset, const ndt::type &dst_tp, const char *dst_arrmeta, size_t DYND_UNUSED(src_count), const ndt::type *src_tp, const char *const *src_arrmeta, kernel_request_t kernreq, const eval::eval_context *ectx, const expr_kernel_generator *elwise_handler) { intptr_t undim = dst_tp.get_ndim(); const char *dst_child_arrmeta; const char *src_child_arrmeta[N]; ndt::type dst_child_dt; ndt::type src_child_dt[N]; strided_expr_kernel_extra<N> *e = strided_expr_kernel_extra<N>::make(ckb, kernreq, ckb_offset); // The dst strided parameters if (!dst_tp.get_as_strided(dst_arrmeta, &e->size, &e->dst_stride, &dst_child_dt, &dst_child_arrmeta)) { throw type_error("make_elwise_strided_dimension_expr_kernel: dst was not " "strided as expected"); } for (int i = 0; i < N; ++i) { intptr_t src_size; // The src[i] strided parameters if (src_tp[i].get_ndim() < undim) { // This src value is getting broadcasted e->src_stride[i] = 0; src_child_arrmeta[i] = src_arrmeta[i]; src_child_dt[i] = src_tp[i]; } else if (src_tp[i].get_as_strided(src_arrmeta[i], &src_size, &e->src_stride[i], &src_child_dt[i], &src_child_arrmeta[i])) { // Check for a broadcasting error if (src_size != 1 && e->size != src_size) { throw broadcast_error(dst_tp, dst_arrmeta, src_tp[i], src_arrmeta[i]); } } else { throw type_error("make_elwise_strided_dimension_expr_kernel: src was " "not strided as expected"); } } return elwise_handler->make_expr_kernel(ckb, ckb_offset, dst_child_dt, dst_child_arrmeta, N, src_child_dt, src_child_arrmeta, kernel_request_strided, ectx); }
// TODO This should handle both strided and var cases static intptr_t instantiate_strided(const arrfunc_type_data *af_self, const arrfunc_type *DYND_UNUSED(af_tp), void *ckb, intptr_t ckb_offset, const ndt::type &dst_tp, const char *dst_arrmeta, const ndt::type *src_tp, const char *const *src_arrmeta, kernel_request_t kernreq, const eval::eval_context *ectx, const nd::array &kwds) { typedef strided_rolling_ck self_type; rolling_arrfunc_data *data = *af_self->get_data_as<rolling_arrfunc_data *>(); intptr_t root_ckb_offset = ckb_offset; self_type *self = self_type::create(ckb, kernreq, ckb_offset); const arrfunc_type_data *window_af = data->window_op.get(); const arrfunc_type *window_af_tp = data->window_op.get_type(); ndt::type dst_el_tp, src_el_tp; const char *dst_el_arrmeta, *src_el_arrmeta; if (!dst_tp.get_as_strided(dst_arrmeta, &self->m_dim_size, &self->m_dst_stride, &dst_el_tp, &dst_el_arrmeta)) { stringstream ss; ss << "rolling window ckernel: could not process type " << dst_tp; ss << " as a strided dimension"; throw type_error(ss.str()); } intptr_t src_dim_size; if (!src_tp[0].get_as_strided(src_arrmeta[0], &src_dim_size, &self->m_src_stride, &src_el_tp, &src_el_arrmeta)) { stringstream ss; ss << "rolling window ckernel: could not process type " << src_tp[0]; ss << " as a strided dimension"; throw type_error(ss.str()); } if (src_dim_size != self->m_dim_size) { stringstream ss; ss << "rolling window ckernel: source dimension size " << src_dim_size << " for type " << src_tp[0] << " does not match dest dimension size " << self->m_dim_size << " for type " << dst_tp; throw type_error(ss.str()); } self->m_window_size = data->window_size; // Create the NA-filling child ckernel ckb_offset = kernels::make_constant_value_assignment_ckernel( ckb, ckb_offset, dst_el_tp, dst_el_arrmeta, numeric_limits<double>::quiet_NaN(), kernel_request_strided, ectx); // Re-retrieve the self pointer, because it may be at a new memory location now self = reinterpret_cast<ckernel_builder<kernel_request_host> *>(ckb)->get_at<self_type>(root_ckb_offset); // Create the window op child ckernel self->m_window_op_offset = ckb_offset - root_ckb_offset; // We construct array arrmeta for the window op ckernel to use, // without actually creating an nd::array to hold it. arrmeta_holder(ndt::make_fixed_dim(data->window_size, src_el_tp)) .swap(self->m_src_winop_meta); self->m_src_winop_meta.get_at<fixed_dim_type_arrmeta>(0)->dim_size = self->m_window_size; self->m_src_winop_meta.get_at<fixed_dim_type_arrmeta>(0)->stride = self->m_src_stride; if (src_el_tp.get_arrmeta_size() > 0) { src_el_tp.extended()->arrmeta_copy_construct( self->m_src_winop_meta.get() + sizeof(fixed_dim_type_arrmeta), src_el_arrmeta, NULL); } const char *src_winop_meta = self->m_src_winop_meta.get(); return window_af->instantiate( window_af, window_af_tp, ckb, ckb_offset, dst_el_tp, dst_el_arrmeta, &self->m_src_winop_meta.get_type(), &src_winop_meta, kernel_request_strided, ectx, kwds); }
static void refine_bytes_view(memory_block_ptr &data_ref, char *&data_ptr, ndt::type &data_tp, const char *&data_meta, intptr_t &data_dim_size, intptr_t &data_stride) { // Handle sequence of strided dims intptr_t dim_size, stride; ndt::type el_tp; const char *el_meta; if (data_tp.get_as_strided(data_meta, &dim_size, &stride, &el_tp, &el_meta)) { dimvector shape(data_tp.get_ndim()); dimvector strides(data_tp.get_ndim()); intptr_t ndim = 1; shape[0] = dim_size; strides[0] = stride; bool csorted = true; // Get all the strided dimensions we can in a row while (el_tp.get_as_strided(el_meta, &dim_size, &stride, &el_tp, &el_meta)) { shape[ndim] = dim_size; strides[ndim] = stride; if (stride > strides[ndim - 1]) { csorted = false; } ++ndim; } if (!csorted) { // If the strides weren't sorted in C order, sort them shortvector<int> axis_perm(ndim); strides_to_axis_perm(ndim, strides.get(), axis_perm.get()); dimvector shape_sorted(ndim); dimvector strides_sorted(ndim); for (intptr_t i = 0; i < ndim; ++i) { int i_perm = axis_perm[i]; shape_sorted[ndim - i - 1] = shape[i_perm]; strides_sorted[ndim - i - 1] = strides[i_perm]; } shape.swap(shape_sorted); strides.swap(strides_sorted); } // Try to collapse the shape/strides into a single strided array intptr_t i = 0; while (data_dim_size == -1 && i < ndim) { // If there's not already a dim_size/stride, start one if (shape[i] != 1) { data_dim_size = shape[i]; data_stride = strides[i]; } ++i; } for (; i < ndim; ++i) { if (shape[i] != 1) { if (shape[i] * strides[i] != data_stride) { // Indicate we couldn't view this as bytes data_tp = ndt::type(); data_dim_size = -1; return; } data_dim_size *= shape[i]; data_stride = strides[i]; } } data_tp = el_tp; data_meta = el_meta; return; } switch (data_tp.get_type_id()) { case var_dim_type_id: { // We can only allow leading var_dim if (data_dim_size != -1) { data_tp = ndt::type(); data_dim_size = -1; return; } const var_dim_type_arrmeta *meta = reinterpret_cast<const var_dim_type_arrmeta *>(data_meta); if (meta->blockref != NULL) { data_ref = meta->blockref; } var_dim_type_data *d = reinterpret_cast<var_dim_type_data *>(data_ptr); data_ptr = d->begin + meta->offset; if (d->size != 1) { data_dim_size = d->size; data_stride = meta->stride; } data_tp = data_tp.extended<ndt::var_dim_type>()->get_element_type(); data_meta += sizeof(var_dim_type_arrmeta); return; } case pointer_type_id: { // We can only strip away leading pointers if (data_dim_size != -1) { data_tp = ndt::type(); data_dim_size = -1; return; } const pointer_type_arrmeta *meta = reinterpret_cast<const pointer_type_arrmeta *>(data_meta); if (meta->blockref != NULL) { data_ref = meta->blockref; } data_ptr = *reinterpret_cast<char **>(data_ptr) + meta->offset; data_tp = data_tp.extended<ndt::pointer_type>()->get_target_type(); data_meta += sizeof(pointer_type_arrmeta); return; } case string_type_id: { // We can only view leading strings if (data_dim_size != -1) { data_tp = ndt::type(); data_dim_size = -1; return; } // Look at the actual string data, not the pointer to it const string_type_arrmeta *meta = reinterpret_cast<const string_type_arrmeta *>(data_meta); if (meta->blockref != NULL) { data_ref = meta->blockref; } const dynd::string *str_ptr = reinterpret_cast<const dynd::string *>(data_ptr); data_ptr = str_ptr->begin; data_tp = ndt::type(); data_dim_size = str_ptr->end - str_ptr->begin; data_stride = 1; return; } default: break; } // If the data type has a fixed size, check if it fits the strides size_t data_tp_size = data_tp.get_data_size(); if (data_tp_size > 0) { if (data_dim_size == -1) { // Indicate success (one item) data_tp = ndt::type(); data_dim_size = data_tp_size; data_stride = 1; return; } else if ((intptr_t)data_tp_size == data_stride) { data_tp = ndt::type(); data_dim_size *= data_tp_size; data_stride = 1; return; } } // Indicate we couldn't view this as bytes data_tp = ndt::type(); data_dim_size = -1; }