static nd::array array_function_dereference(const nd::array &self) { // Follow the pointers to eliminate them ndt::type dt = self.get_type(); const char *arrmeta = self.get_arrmeta(); char *data = self.get_ndo()->m_data_pointer; memory_block_data *dataref = self.get_ndo()->m_data_reference; if (dataref == NULL) { dataref = self.get_memblock().get(); } uint64_t flags = self.get_ndo()->m_flags; while (dt.get_type_id() == pointer_type_id) { const pointer_type_arrmeta *md = reinterpret_cast<const pointer_type_arrmeta *>(arrmeta); dt = dt.extended<ndt::pointer_type>()->get_target_type(); arrmeta += sizeof(pointer_type_arrmeta); data = *reinterpret_cast<char **>(data) + md->offset; dataref = md->blockref; } // Create an array without the pointers nd::array result(make_array_memory_block(dt.get_arrmeta_size())); if (!dt.is_builtin()) { dt.extended()->arrmeta_copy_construct(result.get_arrmeta(), arrmeta, &self.get_ndo()->m_memblockdata); } result.get_ndo()->m_type = dt.release(); result.get_ndo()->m_data_pointer = data; result.get_ndo()->m_data_reference = dataref; memory_block_incref(result.get_ndo()->m_data_reference); result.get_ndo()->m_flags = flags; return result; }
nd::array nd::view(const nd::array& arr, const ndt::type& tp) { // If the types match exactly, simply return 'arr' if (arr.get_type() == tp) { return arr; } else if (arr.get_ndim() == tp.get_ndim()) { // Allocate a result array to attempt the view in it array result(make_array_memory_block(tp.get_metadata_size())); // Copy the fields result.get_ndo()->m_data_pointer = arr.get_ndo()->m_data_pointer; if (arr.get_ndo()->m_data_reference == NULL) { // Embedded data, need reference to the array result.get_ndo()->m_data_reference = arr.get_memblock().release(); } else { // Use the same data reference, avoid producing a chain result.get_ndo()->m_data_reference = arr.get_data_memblock().release(); } result.get_ndo()->m_type = ndt::type(tp).release(); result.get_ndo()->m_flags = arr.get_ndo()->m_flags; // Now try to copy the metadata as a view if (try_view(arr.get_type(), arr.get_ndo_meta(), tp, result.get_ndo_meta(), arr.get_memblock().get())) { // If it succeeded, return it return result; } // Otherwise fall through, let it get destructed, and raise an error } stringstream ss; ss << "Unable to view nd::array of type " << arr.get_type(); ss << "as type " << tp; throw type_error(ss.str()); }
static nd::array view_concrete(const nd::array &arr, const ndt::type &tp) { // Allocate a result array to attempt the view in it nd::array result(make_array_memory_block(tp.get_arrmeta_size())); // Copy the fields result.get_ndo()->data.ptr = arr.get_ndo()->data.ptr; if (arr.get_ndo()->data.ref == NULL) { // Embedded data, need reference to the array result.get_ndo()->data.ref = arr.get_memblock().release(); } else { // Use the same data reference, avoid producing a chain result.get_ndo()->data.ref = arr.get_data_memblock().release(); } result.get_ndo()->m_type = ndt::type(tp).release(); result.get_ndo()->m_flags = arr.get_ndo()->m_flags; // First handle a special case of viewing outermost "var" as "fixed[#]" if (arr.get_type().get_type_id() == var_dim_type_id && tp.get_type_id() == fixed_dim_type_id) { const var_dim_type_arrmeta *in_am = reinterpret_cast<const var_dim_type_arrmeta *>(arr.get_arrmeta()); const var_dim_type_data *in_dat = reinterpret_cast<const var_dim_type_data *>(arr.get_readonly_originptr()); fixed_dim_type_arrmeta *out_am = reinterpret_cast<fixed_dim_type_arrmeta *>(result.get_arrmeta()); out_am->dim_size = tp.extended<ndt::fixed_dim_type>()->get_fixed_dim_size(); out_am->stride = in_am->stride; if ((intptr_t)in_dat->size == out_am->dim_size) { // Use the more specific data reference from the var arrmeta if possible if (in_am->blockref != NULL) { memory_block_decref(result.get_ndo()->data.ref); memory_block_incref(in_am->blockref); result.get_ndo()->data.ref = in_am->blockref; } result.get_ndo()->data.ptr = in_dat->begin + in_am->offset; // Try to copy the rest of the arrmeta as a view if (try_view(arr.get_type().extended<ndt::base_dim_type>()->get_element_type(), arr.get_arrmeta() + sizeof(var_dim_type_arrmeta), tp.extended<ndt::base_dim_type>()->get_element_type(), result.get_arrmeta() + sizeof(fixed_dim_type_arrmeta), arr.get_memblock().get())) { return result; } } } // Otherwise try to copy the arrmeta as a view else if (try_view(arr.get_type(), arr.get_arrmeta(), tp, result.get_arrmeta(), arr.get_memblock().get())) { // If it succeeded, return it return result; } stringstream ss; ss << "Unable to view nd::array of type " << arr.get_type(); ss << " as type " << tp; throw type_error(ss.str()); }
static void set(const ndt::type& paramtype, char *metadata, char *data, const nd::array& value) { if (paramtype.get_type_id() == void_pointer_type_id) { // TODO: switch to a better mechanism for passing nd::array references *reinterpret_cast<const array_preamble **>(data) = value.get_ndo(); } else { typed_data_assign(paramtype, metadata, data, value.get_type(), value.get_ndo_meta(), value.get_ndo()->m_data_pointer); } }
/** * Given a buffer array of type "strided * T" which was * created by nd::empty, resets it so it can be used * as a buffer again. * * NOTE: If the array is not of type "strided * T" and default * initialized by nd::empty, undefined behavior will result. * */ inline void reset_strided_buffer_array(const nd::array& buf) { const ndt::type &buf_tp = buf.get_type(); base_type_members::flags_type flags = buf_tp.extended()->get_flags(); if (flags & (type_flag_blockref | type_flag_zeroinit | type_flag_destructor)) { char *buf_arrmeta = buf.get_ndo()->get_arrmeta(); char *buf_data = buf.get_readwrite_originptr(); buf_tp.extended()->arrmeta_reset_buffers(buf.get_ndo()->get_arrmeta()); strided_dim_type_arrmeta *am = reinterpret_cast<strided_dim_type_arrmeta *>(buf_arrmeta); if (flags & type_flag_destructor) { buf_tp.extended()->data_destruct(buf_arrmeta, buf_data); } memset(buf_data, 0, am->dim_size * am->stride); } }
static nd::array view_as_bytes(const nd::array &arr, const ndt::type &tp) { if (arr.get_type().get_flags() & type_flag_destructor) { // Can't view arrays of object type return nd::array(); } // Get the essential components of the array to analyze memory_block_ptr data_ref = arr.get_data_memblock(); char *data_ptr = arr.get_ndo()->data.ptr; ndt::type data_tp = arr.get_type(); const char *data_meta = arr.get_arrmeta(); intptr_t data_dim_size = -1, data_stride = 0; // Repeatedly refine the data while (data_tp.get_type_id() != uninitialized_type_id) { refine_bytes_view(data_ref, data_ptr, data_tp, data_meta, data_dim_size, data_stride); } // Check that it worked, and that the resulting data pointer is aligned if (data_dim_size < 0 || !offset_is_aligned(reinterpret_cast<size_t>(data_ptr), tp.extended<ndt::bytes_type>()->get_target_alignment())) { // This signals we could not view the data as a // contiguous chunk of bytes return nd::array(); } char *result_data_ptr = NULL; nd::array result(make_array_memory_block(tp.extended()->get_arrmeta_size(), tp.get_data_size(), tp.get_data_alignment(), &result_data_ptr)); // Set the bytes extents ((char **)result_data_ptr)[0] = data_ptr; ((char **)result_data_ptr)[1] = data_ptr + data_dim_size; // Set the array arrmeta array_preamble *ndo = result.get_ndo(); ndo->m_type = ndt::type(tp).release(); ndo->data.ptr = result_data_ptr; ndo->data.ref = NULL; ndo->m_flags = arr.get_flags(); // Set the bytes arrmeta bytes_type_arrmeta *ndo_meta = reinterpret_cast<bytes_type_arrmeta *>(result.get_arrmeta()); ndo_meta->blockref = data_ref.release(); return result; }
static nd::array view_from_bytes(const nd::array &arr, const ndt::type &tp) { if (tp.get_flags() & (type_flag_blockref | type_flag_destructor | type_flag_not_host_readable)) { // Bytes cannot be viewed as blockref types, types which require // destruction, or types not on host memory. return nd::array(); } const bytes_type_arrmeta *bytes_meta = reinterpret_cast<const bytes_type_arrmeta *>(arr.get_arrmeta()); bytes_type_data *bytes_d = reinterpret_cast<bytes_type_data *>(arr.get_ndo()->data.ptr); memory_block_ptr data_ref; if (bytes_meta->blockref != NULL) { data_ref = bytes_meta->blockref; } else { data_ref = arr.get_data_memblock(); } char *data_ptr = bytes_d->begin; intptr_t data_size = bytes_d->end - data_ptr; size_t tp_data_size = tp.get_data_size(); if (tp_data_size > 0) { // If the data type has a single chunk of POD memory, it's ok if ((intptr_t)tp_data_size == data_size && offset_is_aligned(reinterpret_cast<size_t>(data_ptr), tp.get_data_alignment())) { // Allocate a result array to attempt the view in it nd::array result(make_array_memory_block(tp.get_arrmeta_size())); // Initialize the fields result.get_ndo()->data.ptr = data_ptr; result.get_ndo()->data.ref = data_ref.release(); result.get_ndo()->m_type = ndt::type(tp).release(); result.get_ndo()->m_flags = arr.get_ndo()->m_flags; if (tp.get_arrmeta_size() > 0) { tp.extended()->arrmeta_default_construct(result.get_arrmeta(), true); } return result; } } else if (tp.get_type_id() == fixed_dim_type_id) { ndt::type arr_tp = tp; ndt::type el_tp = arr_tp.extended<ndt::base_dim_type>()->get_element_type(); size_t el_data_size = el_tp.get_data_size(); // If the element type has a single chunk of POD memory, and // it divides into the memory size, it's ok if (data_size % (intptr_t)el_data_size == 0 && offset_is_aligned(reinterpret_cast<size_t>(data_ptr), arr_tp.get_data_alignment())) { intptr_t dim_size = data_size / el_data_size; if (arr_tp.get_kind() != kind_kind) { if (arr_tp.extended<ndt::fixed_dim_type>()->get_fixed_dim_size() != dim_size) { return nd::array(); } } else { // Transform the symbolic fixed type into a concrete one arr_tp = ndt::make_fixed_dim(dim_size, el_tp); } // Allocate a result array to attempt the view in it nd::array result(make_array_memory_block(arr_tp.get_arrmeta_size())); // Initialize the fields result.get_ndo()->data.ptr = data_ptr; result.get_ndo()->data.ref = data_ref.release(); result.get_ndo()->m_type = ndt::type(arr_tp).release(); result.get_ndo()->m_flags = arr.get_ndo()->m_flags; if (el_tp.get_arrmeta_size() > 0) { el_tp.extended()->arrmeta_default_construct(result.get_arrmeta() + sizeof(fixed_dim_type_arrmeta), true); } fixed_dim_type_arrmeta *fixed_meta = reinterpret_cast<fixed_dim_type_arrmeta *>(result.get_arrmeta()); fixed_meta->dim_size = dim_size; fixed_meta->stride = el_data_size; return result; } } // No view could be produced return nd::array(); }
dynd::nd::array pydynd::nd_fields(const nd::array& n, PyObject *field_list) { vector<string> selected_fields; pyobject_as_vector_string(field_list, selected_fields); // TODO: Move this implementation into dynd ndt::type fdt = n.get_dtype(); if (fdt.get_kind() != struct_kind) { stringstream ss; ss << "nd.fields must be given a dynd array of 'struct' kind, not "; ss << fdt; throw runtime_error(ss.str()); } const base_struct_type *bsd = static_cast<const base_struct_type *>(fdt.extended()); const ndt::type *field_types = bsd->get_field_types(); if (selected_fields.empty()) { throw runtime_error("nd.fields requires at least one field name to be specified"); } // Construct the field mapping and output field types vector<intptr_t> selected_index(selected_fields.size()); vector<ndt::type> selected_ndt_types(selected_fields.size()); for (size_t i = 0; i != selected_fields.size(); ++i) { selected_index[i] = bsd->get_field_index(selected_fields[i]); if (selected_index[i] < 0) { stringstream ss; ss << "field name "; print_escaped_utf8_string(ss, selected_fields[i]); ss << " does not exist in dynd type " << fdt; throw runtime_error(ss.str()); } selected_ndt_types[i] = field_types[selected_index[i]]; } // Create the result udt ndt::type rudt = ndt::make_struct(selected_ndt_types, selected_fields); ndt::type result_tp = n.get_type().with_replaced_dtype(rudt); const base_struct_type *rudt_bsd = static_cast<const base_struct_type *>(rudt.extended()); // Allocate the new memory block. size_t metadata_size = result_tp.get_metadata_size(); nd::array result(make_array_memory_block(metadata_size)); // Clone the data pointer result.get_ndo()->m_data_pointer = n.get_ndo()->m_data_pointer; result.get_ndo()->m_data_reference = n.get_ndo()->m_data_reference; if (result.get_ndo()->m_data_reference == NULL) { result.get_ndo()->m_data_reference = n.get_memblock().get(); } memory_block_incref(result.get_ndo()->m_data_reference); // Copy the flags result.get_ndo()->m_flags = n.get_ndo()->m_flags; // Set the type and transform the metadata result.get_ndo()->m_type = ndt::type(result_tp).release(); // First copy all the array data type metadata ndt::type tmp_dt = result_tp; char *dst_metadata = result.get_ndo_meta(); const char *src_metadata = n.get_ndo_meta(); while (tmp_dt.get_ndim() > 0) { if (tmp_dt.get_kind() != uniform_dim_kind) { throw runtime_error("nd.fields doesn't support dimensions with pointers yet"); } const base_uniform_dim_type *budd = static_cast<const base_uniform_dim_type *>( tmp_dt.extended()); size_t offset = budd->metadata_copy_construct_onedim(dst_metadata, src_metadata, n.get_memblock().get()); dst_metadata += offset; src_metadata += offset; tmp_dt = budd->get_element_type(); } // Then create the metadata for the new struct const size_t *metadata_offsets = bsd->get_metadata_offsets(); const size_t *result_metadata_offsets = rudt_bsd->get_metadata_offsets(); const size_t *data_offsets = bsd->get_data_offsets(src_metadata); size_t *result_data_offsets = reinterpret_cast<size_t *>(dst_metadata); for (size_t i = 0; i != selected_fields.size(); ++i) { const ndt::type& dt = selected_ndt_types[i]; // Copy the data offset result_data_offsets[i] = data_offsets[selected_index[i]]; // Copy the metadata for this field if (dt.get_metadata_size() > 0) { dt.extended()->metadata_copy_construct(dst_metadata + result_metadata_offsets[i], src_metadata + metadata_offsets[selected_index[i]], n.get_memblock().get()); } } return result; }