/**
 * Creates a result array for an elementwise
 * reduce operation.
 */
static ndarray_node_ptr make_elwise_reduce_result(const ndt::type& result_dt, uint32_t access_flags, bool keepdims,
                            int ndim, const dynd_bool *reduce_axes, const intptr_t *src_shape, const int *src_axis_perm,
                            char *&result_originptr, intptr_t *result_strides)
{
    dimvector result_shape(ndim);

    // Calculate the shape and strides of the reduction result
    // without removing the dimensions
    intptr_t num_elements = 1;
    intptr_t stride = result_dt.get_data_size();
    for (int i = 0; i < ndim; ++i) {
        int p = src_axis_perm[i];
        if (reduce_axes[p]) {
            result_shape[p] = 1;
            result_strides[p] = 0;
        } else {
            intptr_t size = src_shape[p];
            result_shape[p] = size;
            if (size == 1) {
                result_strides[p] = 0;
            } else {
                result_strides[p] = stride;
                stride *= size;
                num_elements *= size;
            }
        }
    }

    // Allocate the memoryblock for the data
    char *originptr = NULL;
    memory_block_ptr memblock = make_fixed_size_pod_memory_block(result_dt.get_data_size() * num_elements,
                    result_dt.get_data_alignment(), &originptr,
                    NULL, NULL);

    ndarray_node_ptr result;

    // Create the strided ndarray node, compressing the dimensions if requested
    if (!keepdims) {
        dimvector compressed_shape(ndim), compressed_strides(ndim);
        int compressed_ndim = 0;
        for (int i = 0; i < ndim; ++i) {
            if (!reduce_axes[i]) {
                compressed_shape[compressed_ndim] = result_shape[i];
                compressed_strides[compressed_ndim] = result_strides[i];
                ++compressed_ndim;
            }
        }
        result = make_strided_ndarray_node(result_dt, compressed_ndim,
                    compressed_shape.get(), compressed_strides.get(), originptr, access_flags, memblock);
    } else {
        result = make_strided_ndarray_node(result_dt, ndim,
                    result_shape.get(), result_strides, originptr, access_flags, memblock);
    }
    // Because we just allocated this buffer, we can write to it even though it
    // might be marked as readonly because the src memory block is readonly
    result_originptr = const_cast<char *>(result->get_readonly_originptr());

    return DYND_MOVE(result);
}
Beispiel #2
0
byteswap_type::byteswap_type(const ndt::type& value_type)
    : base_expr_type(byteswap_type_id, expr_kind, value_type.get_data_size(),
                            value_type.get_data_alignment(), type_flag_scalar, 0),
                    m_value_type(value_type),
                    m_operand_type(ndt::make_fixedbytes(value_type.get_data_size(), value_type.get_data_alignment()))
{
    if (!value_type.is_builtin()) {
        throw dynd::type_error("byteswap_type: Only built-in types are supported presently");
    }
}
Beispiel #3
0
/**
 * Scans through the types, and tries to view data
 * for 'tp'/'arrmeta' as 'view_tp'. For this to be
 * possible, one must be able to construct
 * arrmeta for 'tp' corresponding to the same data.
 *
 * \param tp  The type of the data.
 * \param arrmeta  The array arrmeta of the data.
 * \param view_tp  The type the data should be viewed as.
 * \param view_arrmeta The array arrmeta of the view, which should be populated.
 * \param embedded_reference  The containing memory block in case the data was embedded.
 *
 * \returns If it worked, returns true, otherwise false.
 */
static bool try_view(const ndt::type &tp, const char *arrmeta, const ndt::type &view_tp, char *view_arrmeta,
                     dynd::memory_block_data *embedded_reference)
{
  switch (tp.get_type_id()) {
  case fixed_dim_type_id: {
    // All the strided dim types share the same arrmeta, so can be
    // treated uniformly here
    const ndt::base_dim_type *sdt = tp.extended<ndt::base_dim_type>();
    const fixed_dim_type_arrmeta *md = reinterpret_cast<const fixed_dim_type_arrmeta *>(arrmeta);
    switch (view_tp.get_type_id()) {
    case fixed_dim_type_id: { // strided as fixed
      const ndt::fixed_dim_type *view_fdt = view_tp.extended<ndt::fixed_dim_type>();
      // The size must match exactly in this case
      if (md->dim_size != view_fdt->get_fixed_dim_size()) {
        return false;
      }
      fixed_dim_type_arrmeta *view_md = reinterpret_cast<fixed_dim_type_arrmeta *>(view_arrmeta);
      if (try_view(sdt->get_element_type(), arrmeta + sizeof(fixed_dim_type_arrmeta), view_fdt->get_element_type(),
                   view_arrmeta + sizeof(fixed_dim_type_arrmeta), embedded_reference)) {
        *view_md = *md;
        return true;
      } else {
        return false;
      }
    }
    default: // other cases cannot be handled
      return false;
    }
  }
  default:
    if (tp == view_tp) {
      // require equal types otherwise
      if (tp.get_arrmeta_size() > 0) {
        tp.extended()->arrmeta_copy_construct(view_arrmeta, arrmeta, embedded_reference);
      }
      return true;
    } else if (tp.is_pod() && view_tp.is_pod() && tp.get_data_size() == view_tp.get_data_size() &&
               tp.get_data_alignment() >= view_tp.get_data_alignment()) {
      // POD types with matching properties
      if (view_tp.get_arrmeta_size() > 0) {
        view_tp.extended()->arrmeta_default_construct(view_arrmeta, true);
      }
      return true;
    } else {
      return false;
    }
  }
}
static void array_getbuffer_pep3118_bytes(const ndt::type &tp, const char *arrmeta, char *data, Py_buffer *buffer,
                                          int flags)
{
  buffer->itemsize = 1;
  if (flags & PyBUF_FORMAT) {
    buffer->format = (char *)"c";
  }
  else {
    buffer->format = NULL;
  }
  buffer->ndim = 1;
#if PY_VERSION_HEX == 0x02070000
  buffer->internal = NULL;
  buffer->shape = &buffer->smalltable[0];
  buffer->strides = &buffer->smalltable[1];
#else
  buffer->internal = malloc(2 * sizeof(intptr_t));
  buffer->shape = reinterpret_cast<Py_ssize_t *>(buffer->internal);
  buffer->strides = buffer->shape + 1;
#endif
  buffer->strides[0] = 1;

  if (tp.get_id() == bytes_id) {
    // Variable-length bytes type
    buffer->buf = reinterpret_cast<bytes *>(data)->begin();
    buffer->len = reinterpret_cast<bytes *>(data)->size();
  }
  else {
    // Fixed-length bytes type
    buffer->len = tp.get_data_size();
  }
  buffer->shape[0] = buffer->len;
}
Beispiel #5
0
ndt::type dynd::ndt::make_fixed_dim(size_t ndim, const intptr_t *shape,
                const ndt::type& uniform_tp, const int *axis_perm)
{
    if (axis_perm == NULL) {
        // Build a C-order fixed array type
        ndt::type result = uniform_tp;
        for (ptrdiff_t i = (ptrdiff_t)ndim-1; i >= 0; --i) {
            result = ndt::make_fixed_dim(shape[i], result);
        }
        return result;
    } else {
        // Create strides with the axis permutation
        dimvector strides(ndim);
        intptr_t stride = uniform_tp.get_data_size();
        for (size_t i = 0; i < ndim; ++i) {
            int i_perm = axis_perm[i];
            size_t dim_size = shape[i_perm];
            strides[i_perm] = dim_size > 1 ? stride : 0;
            stride *= dim_size;
        }
        // Build the fixed array type
        ndt::type result = uniform_tp;
        for (ptrdiff_t i = (ptrdiff_t)ndim-1; i >= 0; --i) {
            result = ndt::make_fixed_dim(shape[i], result, strides[i]);
        }
        return result;
    }
}
Beispiel #6
0
fixed_dim_type::fixed_dim_type(size_t dimension_size, const ndt::type& element_tp, intptr_t stride)
    : base_uniform_dim_type(fixed_dim_type_id, element_tp, 0, element_tp.get_data_alignment(),
                    0, type_flag_none),
            m_stride(stride), m_dim_size(dimension_size)
{
    size_t child_element_size = element_tp.get_data_size();
    if (child_element_size == 0) {
        stringstream ss;
        ss << "Cannot create dynd fixed_dim type with element type " << element_tp;
        ss << ", as it does not have a fixed size";
        throw runtime_error(ss.str());
    }
    if (dimension_size <= 1 && stride != 0) {
        stringstream ss;
        ss << "Cannot create dynd fixed_dim type with size " << dimension_size;
        ss << " and stride " << stride << ", as the stride must be zero when the dimension size is 1";
        throw runtime_error(ss.str());
    }
    if (dimension_size > 1 && stride == 0) {
        stringstream ss;
        ss << "Cannot create dynd fixed_dim type with size " << dimension_size;
        ss << " and stride 0, as the stride must be non-zero when the dimension size is > 1";
        throw runtime_error(ss.str());
    }
    m_members.data_size = m_stride * (m_dim_size-1) + child_element_size;
    // Propagate the zeroinit flag from the element
    m_members.flags |= (element_tp.get_flags()&type_flag_zeroinit);

    // Copy ndobject properties and functions from the first non-array dimension
    get_scalar_properties_and_functions(m_array_properties, m_array_functions);
}
Beispiel #7
0
view_type::view_type(const ndt::type& value_type, const ndt::type& operand_type)
    : base_expression_type(view_type_id, expression_kind, operand_type.get_data_size(),
                    operand_type.get_data_alignment(),
                    inherited_flags(value_type.get_flags(), operand_type.get_flags()),
                    operand_type.get_metadata_size()),
            m_value_type(value_type), m_operand_type(operand_type)
{
    if (value_type.get_data_size() != operand_type.value_type().get_data_size()) {
        std::stringstream ss;
        ss << "view_type: Cannot view " << operand_type.value_type() << " as " << value_type << " because they have different sizes";
        throw std::runtime_error(ss.str());
    }
    if (!value_type.is_pod()) {
        throw std::runtime_error("view_type: Only POD types are supported");
    }
}
Beispiel #8
0
size_t bytes_type::make_assignment_kernel(
    void *ckb, intptr_t ckb_offset, const ndt::type &dst_tp,
    const char *dst_arrmeta, const ndt::type &src_tp, const char *src_arrmeta,
    kernel_request_t kernreq, const eval::eval_context *ectx) const
{
    if (this == dst_tp.extended()) {
        switch (src_tp.get_type_id()) {
            case bytes_type_id: {
                return make_blockref_bytes_assignment_kernel(ckb, ckb_offset,
                                get_data_alignment(), dst_arrmeta,
                                src_tp.get_data_alignment(), src_arrmeta,
                                kernreq, ectx);
            }
            case fixedbytes_type_id: {
                return make_fixedbytes_to_blockref_bytes_assignment_kernel(ckb, ckb_offset,
                                get_data_alignment(), dst_arrmeta,
                                src_tp.get_data_size(), src_tp.get_data_alignment(),
                                kernreq, ectx);
            }
            default: {
                if (!src_tp.is_builtin()) {
                    src_tp.extended()->make_assignment_kernel(
                        ckb, ckb_offset, dst_tp, dst_arrmeta, src_tp,
                        src_arrmeta, kernreq, ectx);
                }
                break;
            }
        }
    }

    stringstream ss;
    ss << "Cannot assign from " << src_tp << " to " << dst_tp;
    throw runtime_error(ss.str());
}
Beispiel #9
0
expr_type::expr_type(const ndt::type& value_type, const ndt::type& operand_type,
                const expr_kernel_generator *kgen)
    : base_expression_type(expr_type_id, expression_kind,
                        operand_type.get_data_size(), operand_type.get_data_alignment(),
                        inherited_flags(value_type.get_flags(), operand_type.get_flags()),
                        operand_type.get_metadata_size(), value_type.get_ndim()),
                    m_value_type(value_type), m_operand_type(operand_type),
                    m_kgen(kgen)
{
    if (operand_type.get_type_id() != cstruct_type_id) {
        stringstream ss;
        ss << "expr_type can only be constructed with a cstruct as its operand, given ";
        ss << operand_type;
        throw runtime_error(ss.str());
    }
    const cstruct_type *fsd = static_cast<const cstruct_type *>(operand_type.extended());
    size_t field_count = fsd->get_field_count();
    if (field_count == 1) {
        throw runtime_error("expr_type is for 2 or more operands, use unary_expr_type for 1 operand");
    }
    const ndt::type *field_types = fsd->get_field_types();
    for (size_t i = 0; i != field_count; ++i) {
        if (field_types[i].get_type_id() != pointer_type_id) {
            stringstream ss;
            ss << "each field of the expr_type's operand must be a pointer, field " << i;
            ss << " is " << field_types[i];
            throw runtime_error(ss.str());
        }
    }
}
Beispiel #10
0
adapt_type::adapt_type(const ndt::type &operand_type,
                       const ndt::type &value_type, const nd::string &op)
    : base_expr_type(
          adapt_type_id, expr_kind, operand_type.get_data_size(),
          operand_type.get_data_alignment(),
          inherited_flags(value_type.get_flags(), operand_type.get_flags()), 0),
      m_value_type(value_type), m_operand_type(operand_type), m_op(op)
{
  if (!value_type.is_builtin() &&
      value_type.extended()->adapt_type(operand_type.value_type(), op,
                                        m_forward, m_reverse)) {
  } else if (!operand_type.value_type().is_builtin() &&
             operand_type.value_type().extended()->reverse_adapt_type(
                 value_type, op, m_forward, m_reverse)) {
  } else {
    stringstream ss;
    ss << "Cannot create type ";
    print_type(ss);
    throw type_error(ss.str());
  }

  // If the operand is an expression, make a buffering arrfunc
  if (m_operand_type.get_kind() == expr_kind && !m_forward.is_null() &&
      m_operand_type != m_forward.get_type()->get_arg_type(0)) {
    m_forward = make_chain_arrfunc(
        make_arrfunc_from_assignment(m_forward.get_type()->get_arg_type(0),
                                     m_operand_type, assign_error_default),
        m_forward, m_forward.get_type()->get_arg_type(0));
  }
}
Beispiel #11
0
intptr_t dynd::make_cuda_to_device_builtin_type_assignment_kernel(
    const callable_type_data *DYND_UNUSED(self),
    const ndt::callable_type *DYND_UNUSED(af_tp), char *DYND_UNUSED(data),
    void *ckb, intptr_t ckb_offset, const ndt::type &dst_tp,
    const char *DYND_UNUSED(dst_arrmeta), intptr_t DYND_UNUSED(nsrc),
    const ndt::type *src_tp, const char *const *DYND_UNUSED(src_arrmeta),
    kernel_request_t kernreq, const eval::eval_context *ectx,
    const nd::array &DYND_UNUSED(kwds),
    const std::map<std::string, ndt::type> &DYND_UNUSED(tp_vars))
{
  assign_error_mode errmode = ectx->errmode;

  if (errmode != assign_error_nocheck &&
      is_lossless_assignment(dst_tp, *src_tp)) {
    errmode = assign_error_nocheck;
  }

  if (!dst_tp.is_builtin() || !src_tp->is_builtin() ||
      errmode == assign_error_default) {
    stringstream ss;
    ss << "cannot assign to CUDA device with types " << *src_tp << " to "
       << dst_tp;
    throw runtime_error(ss.str());
  }

  nd::cuda_host_to_device_assign_ck::make(ckb, kernreq, ckb_offset,
                                          dst_tp.get_data_size());
  return make_builtin_type_assignment_kernel(
      ckb, ckb_offset, dst_tp.get_type_id(), src_tp->get_type_id(),
      kernel_request_single, errmode);
}
Beispiel #12
0
ndt::adapt_type::adapt_type(const ndt::type &value_tp, const ndt::type &storage_tp, const nd::callable &forward,
                            const nd::callable &inverse)
    : base_expr_type(adapt_id, storage_tp.get_data_size(), storage_tp.get_data_alignment(), type_flag_none,
                     storage_tp.get_arrmeta_size(), storage_tp.get_ndim()),
      m_value_tp(value_tp), m_storage_tp(storage_tp), m_forward(forward), m_inverse(inverse)
{
}
Beispiel #13
0
byteswap_type::byteswap_type(const ndt::type& value_type, const ndt::type& operand_type)
    : base_expr_type(byteswap_type_id, expr_kind, operand_type.get_data_size(),
                    operand_type.get_data_alignment(), type_flag_scalar, 0),
            m_value_type(value_type), m_operand_type(operand_type)
{
    // Only a bytes type be the operand to the byteswap
    if (operand_type.value_type().get_type_id() != fixedbytes_type_id) {
        std::stringstream ss;
        ss << "byteswap_type: The operand to the type must have a value type of bytes, not " << operand_type.value_type();
        throw dynd::type_error(ss.str());
    }
    // Automatically realign if needed
    if (operand_type.value_type().get_data_alignment() < value_type.get_data_alignment()) {
        m_operand_type = ndt::make_view(operand_type,
                        ndt::make_fixedbytes(operand_type.get_data_size(), value_type.get_data_alignment()));
    }
}
Beispiel #14
0
unary_expr_type::unary_expr_type(const ndt::type& value_type, const ndt::type& operand_type,
                const expr_kernel_generator *kgen)
    : base_expression_type(unary_expr_type_id, expression_kind,
                        operand_type.get_data_size(), operand_type.get_data_alignment(),
                        inherited_flags(value_type.get_flags(), operand_type.get_flags()),
                        operand_type.get_metadata_size(), value_type.get_ndim()),
                    m_value_type(value_type), m_operand_type(operand_type),
                    m_kgen(kgen)
{
}
Beispiel #15
0
fixed_dim_type::fixed_dim_type(size_t dimension_size, const ndt::type& element_tp)
    : base_uniform_dim_type(fixed_dim_type_id, element_tp, 0, element_tp.get_data_alignment(),
                    0, type_flag_none),
            m_dim_size(dimension_size)
{
    size_t child_element_size = element_tp.get_data_size();
    if (child_element_size == 0) {
        stringstream ss;
        ss << "Cannot create dynd fixed_dim type with element type " << element_tp;
        ss << ", as it does not have a fixed size";
        throw runtime_error(ss.str());
    }
    m_stride = m_dim_size > 1 ? element_tp.get_data_size() : 0;
    m_members.data_size = m_stride * (m_dim_size-1) + child_element_size;
    // Propagate the operand flags from the element
    m_members.flags |= (element_tp.get_flags()&type_flags_operand_inherited);

    // Copy ndobject properties and functions from the first non-array dimension
    get_scalar_properties_and_functions(m_array_properties, m_array_functions);
}
ndarray_node_ptr dynd::eval::evaluate_strided_with_unary_kernel(ndarray_node *node, const eval::eval_context *DYND_UNUSED(ectx),
                                bool copy, uint32_t access_flags,
                                const ndt::type& dst_tp, kernel_instance<unary_operation_pair_t>& operation)
{
    const ndt::type& src_tp = node->get_type();
    ndarray_node_ptr result;
    int ndim = node->get_ndim();

    // Adjust the access flags, and force a copy if the access flags require it
    eval::process_access_flags(access_flags, node->get_access_flags(), copy);

    // For blockref result dtypes, this is the memblock
    // where the variable sized data goes
    memory_block_ptr dst_memblock;

    // Generate the axis_perm from the input strides, and use it to allocate the output
    shortvector<int> axis_perm(ndim);
    const intptr_t *node_strides = node->get_strides();
    char *result_originptr;
    strides_to_axis_perm(ndim, node_strides, axis_perm.get());

    result = initialize_dst_memblock(copy, dst_tp, ndim, node->get_shape(), axis_perm.get(),
                        access_flags, operation, node->get_data_memory_block(), dst_memblock, result_originptr);

    // Execute the kernel for all the elements
    raw_ndarray_iter<1,1> iter(node->get_ndim(), node->get_shape(),
                    result_originptr, result->get_strides(),
                    node->get_readonly_originptr(), node->get_strides());
    
    intptr_t innersize = iter.innersize();
    intptr_t dst_stride = iter.innerstride<0>();
    intptr_t src0_stride = iter.innerstride<1>();
    unary_specialization_t uspec = get_unary_specialization(dst_stride, dst_tp.get_data_size(),
                                                                src0_stride, src_tp.get_data_size());
    unary_operation_t kfunc = operation.specializations[uspec];
    if (innersize > 0) {
        do {
            kfunc(iter.data<0>(), dst_stride,
                        iter.data<1>(), src0_stride,
                        innersize, operation.auxdata);
        } while (iter.iternext());
    }

    // Finalize the destination memory block if it was a blockref dtype
    if (dst_memblock.get() != NULL) {
        memory_block_pod_allocator_api *api = get_memory_block_pod_allocator_api(dst_memblock.get());
        api->finalize(dst_memblock.get());
    }

    return DYND_MOVE(result);
}
Beispiel #17
0
size_t string_type::make_assignment_kernel(
                ckernel_builder *out, size_t offset_out,
                const ndt::type& dst_tp, const char *dst_metadata,
                const ndt::type& src_tp, const char *src_metadata,
                kernel_request_t kernreq, assign_error_mode errmode,
                const eval::eval_context *ectx) const
{
    if (this == dst_tp.extended()) {
        switch (src_tp.get_type_id()) {
            case string_type_id: {
                return make_blockref_string_assignment_kernel(out, offset_out,
                                dst_metadata, get_encoding(),
                                src_metadata, static_cast<const base_string_type *>(src_tp.extended())->get_encoding(),
                                kernreq, errmode, ectx);
            }
            case fixedstring_type_id: {
                return make_fixedstring_to_blockref_string_assignment_kernel(out, offset_out,
                                dst_metadata, get_encoding(),
                                src_tp.get_data_size(),
                                static_cast<const base_string_type *>(src_tp.extended())->get_encoding(),
                                kernreq, errmode, ectx);
            }
            default: {
                if (!src_tp.is_builtin()) {
                    return src_tp.extended()->make_assignment_kernel(out, offset_out,
                                    dst_tp, dst_metadata,
                                    src_tp, src_metadata,
                                    kernreq, errmode, ectx);
                } else {
                    return make_builtin_to_string_assignment_kernel(out, offset_out,
                                dst_tp, dst_metadata,
                                src_tp.get_type_id(),
                                kernreq, errmode, ectx);
                }
            }
        }
    } else {
        if (dst_tp.is_builtin()) {
            return make_string_to_builtin_assignment_kernel(out, offset_out,
                            dst_tp.get_type_id(),
                            src_tp, src_metadata,
                            kernreq, errmode, ectx);
        } else {
            stringstream ss;
            ss << "Cannot assign from " << src_tp << " to " << dst_tp;
            throw dynd::type_error(ss.str());
        }
    }
}
void dynd::typed_data_copy(const ndt::type& tp,
                const char *dst_arrmeta, char *dst_data,
                const char *src_arrmeta, const char *src_data)
{
    size_t data_size = tp.get_data_size();
    if (tp.is_pod()) {
        memcpy(dst_data, src_data, data_size);
    } else {
        unary_ckernel_builder k;
        make_assignment_kernel(&k, 0, tp, dst_arrmeta, tp, src_arrmeta,
                               kernel_request_single,
                               &eval::default_eval_context);
        k(dst_data, src_data);
    }
}
Beispiel #19
0
ndt::type ndt::make_unaligned(const ndt::type& value_type)
{
  if (value_type.get_data_alignment() > 1) {
    // Only do something if it requires alignment
    if (value_type.get_kind() != expr_kind) {
      return ndt::make_view(
          value_type, ndt::make_fixed_bytes(value_type.get_data_size(), 1));
    } else {
      const ndt::type &sdt = value_type.storage_type();
      return ndt::type(
          value_type.extended<base_expr_type>()->with_replaced_storage_type(
              ndt::make_view(sdt,
                             ndt::make_fixed_bytes(sdt.get_data_size(), 1))));
    }
  } else {
    return value_type;
  }
}
Beispiel #20
0
nd::array dynd::parse_json(const ndt::type &tp, const char *json_begin,
                           const char *json_end, const eval::eval_context *ectx)
{
    nd::array result;
    if (tp.get_data_size() != 0) {
        result = nd::empty(tp);
        parse_json(result, json_begin, json_end, ectx);
        if (!tp.is_builtin()) {
            tp.extended()->metadata_finalize_buffers(result.get_ndo_meta());
        }
        result.flag_as_immutable();
        return result;
    } else {
        stringstream ss;
        ss << "The dynd type provided to parse_json, " << tp << ", cannot be used because it requires additional shape information";
        throw runtime_error(ss.str());
    }
}
Beispiel #21
0
convert_type::convert_type(const ndt::type &value_type,
                           const ndt::type &operand_type)
    : base_expr_type(
          convert_type_id, expr_kind, operand_type.get_data_size(),
          operand_type.get_data_alignment(),
          inherited_flags(value_type.get_flags(), operand_type.get_flags()),
          operand_type.get_arrmeta_size(), value_type.get_ndim()),
      m_value_type(value_type), m_operand_type(operand_type)
{
    // An alternative to this error would be to use value_type.value_type(), cutting
    // away the expression part of the given value_type.
    if (m_value_type.get_kind() == expr_kind) {
        std::stringstream ss;
        ss << "convert_type: The destination type " << m_value_type;
        ss << " should not be an expr_kind";
        throw dynd::type_error(ss.str());
    }
}
Beispiel #22
0
static nd::array view_as_bytes(const nd::array &arr, const ndt::type &tp)
{
  if (arr.get_type().get_flags() & type_flag_destructor) {
    // Can't view arrays of object type
    return nd::array();
  }

  // Get the essential components of the array to analyze
  memory_block_ptr data_ref = arr.get_data_memblock();
  char *data_ptr = arr.get_ndo()->data.ptr;
  ndt::type data_tp = arr.get_type();
  const char *data_meta = arr.get_arrmeta();
  intptr_t data_dim_size = -1, data_stride = 0;
  // Repeatedly refine the data
  while (data_tp.get_type_id() != uninitialized_type_id) {
    refine_bytes_view(data_ref, data_ptr, data_tp, data_meta, data_dim_size, data_stride);
  }
  // Check that it worked, and that the resulting data pointer is aligned
  if (data_dim_size < 0 ||
      !offset_is_aligned(reinterpret_cast<size_t>(data_ptr), tp.extended<ndt::bytes_type>()->get_target_alignment())) {
    // This signals we could not view the data as a
    // contiguous chunk of bytes
    return nd::array();
  }

  char *result_data_ptr = NULL;
  nd::array result(make_array_memory_block(tp.extended()->get_arrmeta_size(), tp.get_data_size(),
                                           tp.get_data_alignment(), &result_data_ptr));
  // Set the bytes extents
  ((char **)result_data_ptr)[0] = data_ptr;
  ((char **)result_data_ptr)[1] = data_ptr + data_dim_size;
  // Set the array arrmeta
  array_preamble *ndo = result.get_ndo();
  ndo->m_type = ndt::type(tp).release();
  ndo->data.ptr = result_data_ptr;
  ndo->data.ref = NULL;
  ndo->m_flags = arr.get_flags();
  // Set the bytes arrmeta
  bytes_type_arrmeta *ndo_meta = reinterpret_cast<bytes_type_arrmeta *>(result.get_arrmeta());
  ndo_meta->blockref = data_ref.release();
  return result;
}
Beispiel #23
0
void dynd::typed_data_copy(const ndt::type &tp, const char *dst_arrmeta,
                           char *dst_data, const char *src_arrmeta,
                           const char *src_data)
{
  if (tp.get_type_id() == option_type_id) {
    return typed_data_copy(tp.extended<ndt::option_type>()->get_value_type(),
                           dst_arrmeta, dst_data, src_arrmeta, src_data);
  }

  size_t data_size = tp.get_data_size();
  if (tp.is_pod()) {
    memcpy(dst_data, src_data, data_size);
  } else {
    ckernel_builder<kernel_request_host> k;
    make_assignment_kernel(&k, 0, tp, dst_arrmeta, tp, src_arrmeta,
                           kernel_request_single, &eval::default_eval_context);
    expr_single_t fn = k.get()->get_function<expr_single_t>();
    char *src = const_cast<char *>(src_data);
    fn(dst_data, &src, k.get());
  }
}
Beispiel #24
0
 void internal_allocate()
 {
   m_stride = m_type.get_data_size();
   m_storage = new char[DYND_BUFFER_CHUNK_SIZE * m_stride];
   m_arrmeta = NULL;
   size_t metasize =
       m_type.is_builtin() ? 0 : m_type.extended()->get_arrmeta_size();
   if (metasize != 0) {
     try
     {
       m_arrmeta = new char[metasize];
       m_type.extended()->arrmeta_default_construct(m_arrmeta, 0, NULL, true);
     }
     catch (const std::exception &)
     {
       delete[] m_storage;
       delete[] m_arrmeta;
       throw;
     }
   }
 }
Beispiel #25
0
size_t dynd::make_assignment_kernel(
                ckernel_builder *out, size_t offset_out,
                const ndt::type& dst_tp, const char *dst_metadata,
                const ndt::type& src_tp, const char *src_metadata,
                kernel_request_t kernreq, assign_error_mode errmode,
                const eval::eval_context *ectx)
{
    if (errmode == assign_error_default && ectx != NULL) {
        errmode = ectx->default_errmode;
    }

    if (dst_tp.is_builtin()) {
        if (src_tp.is_builtin()) {
            // If the casting can be done losslessly, disable the error check to find faster code paths
            if (errmode != assign_error_none && is_lossless_assignment(dst_tp, src_tp)) {
                errmode = assign_error_none;
            }

            if (dst_tp.extended() == src_tp.extended()) {
                return make_pod_typed_data_assignment_kernel(out, offset_out,
                                dst_tp.get_data_size(), dst_tp.get_data_alignment(),
                                kernreq);
            } else {
                return make_builtin_type_assignment_kernel(out, offset_out,
                                dst_tp.get_type_id(), src_tp.get_type_id(),
                                kernreq, errmode);
            }
        } else {
            return src_tp.extended()->make_assignment_kernel(out, offset_out,
                            dst_tp, dst_metadata,
                            src_tp, src_metadata,
                            kernreq, errmode, ectx);
        }
    } else {
        return dst_tp.extended()->make_assignment_kernel(out, offset_out,
                        dst_tp, dst_metadata,
                        src_tp, src_metadata,
                        kernreq, errmode, ectx);
    }
}
size_t dynd::make_tuple_identical_assignment_kernel(
    void *ckb, intptr_t ckb_offset, const ndt::type &val_tup_tp,
    const char *dst_arrmeta, const char *src_arrmeta, kernel_request_t kernreq,
    const eval::eval_context *ectx)
{
  if (val_tup_tp.get_kind() != tuple_kind &&
      val_tup_tp.get_kind() != struct_kind) {
    stringstream ss;
    ss << "make_tuple_identical_assignment_kernel: provided type " << val_tup_tp
       << " is not of tuple or struct kind";
    throw runtime_error(ss.str());
  }
  if (val_tup_tp.is_pod()) {
    // For POD structs, get a trivial memory copy kernel
    return make_pod_typed_data_assignment_kernel(
        ckb, ckb_offset, val_tup_tp.get_data_size(),
        val_tup_tp.get_data_alignment(), kernreq);
  }

  auto sd = val_tup_tp.extended<ndt::base_tuple_type>();
  intptr_t field_count = sd->get_field_count();
  const uintptr_t *arrmeta_offsets = sd->get_arrmeta_offsets_raw();
  shortvector<const char *> dst_fields_arrmeta(field_count);
  for (intptr_t i = 0; i != field_count; ++i) {
    dst_fields_arrmeta[i] = dst_arrmeta + arrmeta_offsets[i];
  }
  shortvector<const char *> src_fields_arrmeta(field_count);
  for (intptr_t i = 0; i != field_count; ++i) {
    src_fields_arrmeta[i] = src_arrmeta + arrmeta_offsets[i];
  }

  return make_tuple_unary_op_ckernel(
      nd::copy::get().get(), nd::copy::get().get_type(), ckb, ckb_offset,
      field_count, sd->get_data_offsets(dst_arrmeta), sd->get_field_types_raw(),
      dst_fields_arrmeta.get(), sd->get_data_offsets(src_arrmeta),
      sd->get_field_types_raw(), src_fields_arrmeta.get(), kernreq, ectx);
}
Beispiel #27
0
// Returns true if the destination type can represent *all* the values
// of the source type, false otherwise. This is used, for example,
// to skip any overflow checks when doing value assignments between differing
// types.
bool dynd::is_lossless_assignment(const ndt::type &dst_tp,
                                  const ndt::type &src_tp)
{
  if (dst_tp.is_builtin() && src_tp.is_builtin()) {
    switch (src_tp.get_kind()) {
    case kind_kind: // TODO: raise an error?
      return true;
    case pattern_kind: // TODO: raise an error?
      return true;
    case bool_kind:
      switch (dst_tp.get_kind()) {
      case bool_kind:
      case sint_kind:
      case uint_kind:
      case real_kind:
      case complex_kind:
        return true;
      case bytes_kind:
        return false;
      default:
        break;
      }
      break;
    case sint_kind:
      switch (dst_tp.get_kind()) {
      case bool_kind:
        return false;
      case sint_kind:
        return dst_tp.get_data_size() >= src_tp.get_data_size();
      case uint_kind:
        return false;
      case real_kind:
        return dst_tp.get_data_size() > src_tp.get_data_size();
      case complex_kind:
        return dst_tp.get_data_size() > 2 * src_tp.get_data_size();
      case bytes_kind:
        return false;
      default:
        break;
      }
      break;
    case uint_kind:
      switch (dst_tp.get_kind()) {
      case bool_kind:
        return false;
      case sint_kind:
        return dst_tp.get_data_size() > src_tp.get_data_size();
      case uint_kind:
        return dst_tp.get_data_size() >= src_tp.get_data_size();
      case real_kind:
        return dst_tp.get_data_size() > src_tp.get_data_size();
      case complex_kind:
        return dst_tp.get_data_size() > 2 * src_tp.get_data_size();
      case bytes_kind:
        return false;
      default:
        break;
      }
      break;
    case real_kind:
      switch (dst_tp.get_kind()) {
      case bool_kind:
      case sint_kind:
      case uint_kind:
        return false;
      case real_kind:
        return dst_tp.get_data_size() >= src_tp.get_data_size();
      case complex_kind:
        return dst_tp.get_data_size() >= 2 * src_tp.get_data_size();
      case bytes_kind:
        return false;
      default:
        break;
      }
    case complex_kind:
      switch (dst_tp.get_kind()) {
      case bool_kind:
      case sint_kind:
      case uint_kind:
      case real_kind:
        return false;
      case complex_kind:
        return dst_tp.get_data_size() >= src_tp.get_data_size();
      case bytes_kind:
        return false;
      default:
        break;
      }
    case string_kind:
      switch (dst_tp.get_kind()) {
      case bool_kind:
      case sint_kind:
      case uint_kind:
      case real_kind:
      case complex_kind:
        return false;
      case bytes_kind:
        return false;
      default:
        break;
      }
    case bytes_kind:
      return dst_tp.get_kind() == bytes_kind &&
             dst_tp.get_data_size() == src_tp.get_data_size();
    default:
      break;
    }

    throw std::runtime_error(
        "unhandled built-in case in is_lossless_assignmently");
  }

  // Use the available base_type to check the casting
  if (!dst_tp.is_builtin()) {
    // Call with dst_dt (the first parameter) first
    return dst_tp.extended()->is_lossless_assignment(dst_tp, src_tp);
  } else {
    // Fall back to src_dt if the dst's extended is NULL
    return src_tp.extended()->is_lossless_assignment(dst_tp, src_tp);
  }
}
Beispiel #28
0
static void append_pep3118_format(intptr_t &out_itemsize, const ndt::type &tp, const char *arrmeta,
                                  std::stringstream &o)
{
  switch (tp.get_id()) {
  case bool_id:
    o << "?";
    out_itemsize = 1;
    return;
  case int8_id:
    o << "b";
    out_itemsize = 1;
    return;
  case int16_id:
    o << "h";
    out_itemsize = 2;
    return;
  case int32_id:
    o << "i";
    out_itemsize = 4;
    return;
  case int64_id:
    o << "q";
    out_itemsize = 8;
    return;
  case uint8_id:
    o << "B";
    out_itemsize = 1;
    return;
  case uint16_id:
    o << "H";
    out_itemsize = 2;
    return;
  case uint32_id:
    o << "I";
    out_itemsize = 4;
    return;
  case uint64_id:
    o << "Q";
    out_itemsize = 8;
    return;
  case float32_id:
    o << "f";
    out_itemsize = 4;
    return;
  case float64_id:
    o << "d";
    out_itemsize = 8;
    return;
  case complex_float32_id:
    o << "Zf";
    out_itemsize = 8;
    return;
  case complex_float64_id:
    o << "Zd";
    out_itemsize = 16;
    return;
  case fixed_string_id:
    switch (tp.extended<ndt::fixed_string_type>()->get_encoding()) {
    case string_encoding_ascii: {
      intptr_t element_size = tp.get_data_size();
      o << element_size << "s";
      out_itemsize = element_size;
      return;
    }
    // TODO: Couldn't find documentation for UCS-2 character code?
    case string_encoding_utf_32: {
      intptr_t element_size = tp.get_data_size();
      o << (element_size / 4) << "w";
      out_itemsize = element_size;
      return;
    }
    default:
      break;
    }
    // Pass through to error
    break;
  case fixed_dim_id: {
    ndt::type child_tp = tp;
    o << "(";
    do {
      const ndt::fixed_dim_type *tdt = child_tp.extended<ndt::fixed_dim_type>();
      intptr_t dim_size = tdt->get_fixed_dim_size();
      o << dim_size;
      if (child_tp.get_data_size() != tdt->get_element_type().get_data_size() * dim_size) {
        stringstream ss;
        ss << "Cannot convert dynd type " << tp << " into a PEP 3118 format because it is not C-order";
        throw dynd::type_error(ss.str());
      }
      o << ")";
      child_tp = tdt->get_element_type();
    } while (child_tp.get_id() == fixed_dim_id && (o << ","));
    append_pep3118_format(out_itemsize, child_tp, arrmeta, o);
    out_itemsize = tp.get_data_size();
    return;
  }
  case struct_id: {
    o << "T{";
    const ndt::struct_type *tdt = tp.extended<ndt::struct_type>();
    size_t num_fields = tdt->get_field_count();
    const uintptr_t *offsets = reinterpret_cast<const uintptr_t *>(arrmeta);
    const uintptr_t *arrmeta_offsets = tdt->get_arrmeta_offsets_raw();
    size_t format_offset = 0;
    for (size_t i = 0; i != num_fields; ++i) {
      size_t offset = offsets[i];
      // Add padding bytes
      while (offset > format_offset) {
        o << "x";
        ++format_offset;
      }
      if (offset < format_offset) {
        // DyND allows the order of fields in memory to differ from the logical
        // order, something not supported by PEP 3118
        stringstream ss;
        ss << "Cannot convert dynd type " << tp << " with out of order data layout into a PEP 3118 format string";
        throw type_error(ss.str());
      }
      // The field's type
      append_pep3118_format(out_itemsize, tdt->get_field_type(i), arrmeta ? (arrmeta + arrmeta_offsets[i]) : NULL, o);
      format_offset += out_itemsize;
      // Append the name
      o << ":" << tdt->get_field_name(i) << ":";
    }
    out_itemsize = format_offset;
    o << "}";
    return;
  }
  default:
    break;
  }
  stringstream ss;
  ss << "Cannot convert dynd type " << tp << " into a PEP 3118 format string";
  throw dynd::type_error(ss.str());
}
Beispiel #29
0
size_t json_type::make_assignment_kernel(
                ckernel_builder *out, size_t offset_out,
                const ndt::type& dst_tp, const char *dst_metadata,
                const ndt::type& src_tp, const char *src_metadata,
                kernel_request_t kernreq, assign_error_mode errmode,
                const eval::eval_context *ectx) const
{
    if (this == dst_tp.extended()) {
        switch (src_tp.get_type_id()) {
            case json_type_id: {
                // Assume the input is valid JSON when copying from json to json types
                return make_blockref_string_assignment_kernel(out, offset_out,
                                dst_metadata, string_encoding_utf_8,
                                src_metadata, string_encoding_utf_8,
                                kernreq, errmode, ectx);
            }
            case string_type_id:
            case fixedstring_type_id: {
                offset_out = make_kernreq_to_single_kernel_adapter(out, offset_out, kernreq);
                out->ensure_capacity(offset_out + sizeof(string_to_json_kernel_extra));
                string_to_json_kernel_extra *e = out->get_at<string_to_json_kernel_extra>(offset_out);
                e->base.set_function<unary_single_operation_t>(&string_to_json_kernel_extra::single);
                e->base.destructor = &string_to_json_kernel_extra::destruct;
                e->dst_metadata = dst_metadata;
                e->validate = (errmode != assign_error_none);
                if (src_tp.get_type_id() == string_type_id) {
                    return make_blockref_string_assignment_kernel(
                                    out, offset_out + sizeof(string_to_json_kernel_extra),
                                    dst_metadata, string_encoding_utf_8,
                                    src_metadata,
                                    static_cast<const base_string_type *>(src_tp.extended())->get_encoding(),
                                    kernel_request_single, errmode, ectx);
                } else {
                    return make_fixedstring_to_blockref_string_assignment_kernel(
                                    out, offset_out + sizeof(string_to_json_kernel_extra),
                                    dst_metadata, string_encoding_utf_8,
                                    src_tp.get_data_size(),
                                    static_cast<const base_string_type *>(src_tp.extended())->get_encoding(),
                                    kernel_request_single, errmode, ectx);
                }
            }
            default: {
                if (!src_tp.is_builtin()) {
                    return src_tp.extended()->make_assignment_kernel(out, offset_out,
                                    dst_tp, dst_metadata,
                                    src_tp, src_metadata,
                                    kernreq, errmode, ectx);
                } else {
                    return make_builtin_to_string_assignment_kernel(out, offset_out,
                                dst_tp, dst_metadata,
                                src_tp.get_type_id(),
                                kernreq, errmode, ectx);
                }
            }
        }
    } else {
        if (dst_tp.is_builtin()) {
            return make_string_to_builtin_assignment_kernel(out, offset_out,
                            dst_tp.get_type_id(),
                            src_tp, src_metadata,
                            kernreq, errmode, ectx);
        } else {
            stringstream ss;
            ss << "Cannot assign from " << src_tp << " to " << dst_tp;
            throw runtime_error(ss.str());
        }
    }
}
Beispiel #30
0
cuda_host_type::cuda_host_type(const ndt::type& storage_tp, unsigned int cuda_host_flags)
    : base_memory_type(cuda_host_type_id, storage_tp, storage_tp.get_data_size(),
                       get_cuda_device_data_alignment(storage_tp), 0, storage_tp.get_flags()),
      m_cuda_host_flags(cuda_host_flags)
{
}