Exemplo n.º 1
0
 arrmeta_holder(const ndt::type &tp)
     : m_arrmeta(malloc(sizeof(ndt::type) + tp.get_arrmeta_size()))
 {
     if (!m_arrmeta) {
         throw std::bad_alloc();
     }
     memset(reinterpret_cast<char *>(m_arrmeta) + sizeof(ndt::type), 0,
            tp.get_arrmeta_size());
     try {
         new (m_arrmeta) ndt::type(tp);
     } catch(...) {
         free(m_arrmeta);
         throw;
     }
 }
Exemplo n.º 2
0
/**
 * Scans through the types, and tries to view data
 * for 'tp'/'arrmeta' as 'view_tp'. For this to be
 * possible, one must be able to construct
 * arrmeta for 'tp' corresponding to the same data.
 *
 * \param tp  The type of the data.
 * \param arrmeta  The array arrmeta of the data.
 * \param view_tp  The type the data should be viewed as.
 * \param view_arrmeta The array arrmeta of the view, which should be populated.
 * \param embedded_reference  The containing memory block in case the data was embedded.
 *
 * \returns If it worked, returns true, otherwise false.
 */
static bool try_view(const ndt::type &tp, const char *arrmeta, const ndt::type &view_tp, char *view_arrmeta,
                     dynd::memory_block_data *embedded_reference)
{
  switch (tp.get_type_id()) {
  case fixed_dim_type_id: {
    // All the strided dim types share the same arrmeta, so can be
    // treated uniformly here
    const ndt::base_dim_type *sdt = tp.extended<ndt::base_dim_type>();
    const fixed_dim_type_arrmeta *md = reinterpret_cast<const fixed_dim_type_arrmeta *>(arrmeta);
    switch (view_tp.get_type_id()) {
    case fixed_dim_type_id: { // strided as fixed
      const ndt::fixed_dim_type *view_fdt = view_tp.extended<ndt::fixed_dim_type>();
      // The size must match exactly in this case
      if (md->dim_size != view_fdt->get_fixed_dim_size()) {
        return false;
      }
      fixed_dim_type_arrmeta *view_md = reinterpret_cast<fixed_dim_type_arrmeta *>(view_arrmeta);
      if (try_view(sdt->get_element_type(), arrmeta + sizeof(fixed_dim_type_arrmeta), view_fdt->get_element_type(),
                   view_arrmeta + sizeof(fixed_dim_type_arrmeta), embedded_reference)) {
        *view_md = *md;
        return true;
      } else {
        return false;
      }
    }
    default: // other cases cannot be handled
      return false;
    }
  }
  default:
    if (tp == view_tp) {
      // require equal types otherwise
      if (tp.get_arrmeta_size() > 0) {
        tp.extended()->arrmeta_copy_construct(view_arrmeta, arrmeta, embedded_reference);
      }
      return true;
    } else if (tp.is_pod() && view_tp.is_pod() && tp.get_data_size() == view_tp.get_data_size() &&
               tp.get_data_alignment() >= view_tp.get_data_alignment()) {
      // POD types with matching properties
      if (view_tp.get_arrmeta_size() > 0) {
        view_tp.extended()->arrmeta_default_construct(view_arrmeta, true);
      }
      return true;
    } else {
      return false;
    }
  }
}
Exemplo n.º 3
0
ndt::adapt_type::adapt_type(const ndt::type &value_tp, const ndt::type &storage_tp, const nd::callable &forward,
                            const nd::callable &inverse)
    : base_expr_type(adapt_id, storage_tp.get_data_size(), storage_tp.get_data_alignment(), type_flag_none,
                     storage_tp.get_arrmeta_size(), storage_tp.get_ndim()),
      m_value_tp(value_tp), m_storage_tp(storage_tp), m_forward(forward), m_inverse(inverse)
{
}
Exemplo n.º 4
0
static nd::array view_concrete(const nd::array &arr, const ndt::type &tp)
{
  // Allocate a result array to attempt the view in it
  nd::array result(make_array_memory_block(tp.get_arrmeta_size()));
  // Copy the fields
  result.get_ndo()->data.ptr = arr.get_ndo()->data.ptr;
  if (arr.get_ndo()->data.ref == NULL) {
    // Embedded data, need reference to the array
    result.get_ndo()->data.ref = arr.get_memblock().release();
  } else {
    // Use the same data reference, avoid producing a chain
    result.get_ndo()->data.ref = arr.get_data_memblock().release();
  }
  result.get_ndo()->m_type = ndt::type(tp).release();
  result.get_ndo()->m_flags = arr.get_ndo()->m_flags;
  // First handle a special case of viewing outermost "var" as "fixed[#]"
  if (arr.get_type().get_type_id() == var_dim_type_id && tp.get_type_id() == fixed_dim_type_id) {
    const var_dim_type_arrmeta *in_am = reinterpret_cast<const var_dim_type_arrmeta *>(arr.get_arrmeta());
    const var_dim_type_data *in_dat = reinterpret_cast<const var_dim_type_data *>(arr.get_readonly_originptr());
    fixed_dim_type_arrmeta *out_am = reinterpret_cast<fixed_dim_type_arrmeta *>(result.get_arrmeta());
    out_am->dim_size = tp.extended<ndt::fixed_dim_type>()->get_fixed_dim_size();
    out_am->stride = in_am->stride;
    if ((intptr_t)in_dat->size == out_am->dim_size) {
      // Use the more specific data reference from the var arrmeta if possible
      if (in_am->blockref != NULL) {
        memory_block_decref(result.get_ndo()->data.ref);
        memory_block_incref(in_am->blockref);
        result.get_ndo()->data.ref = in_am->blockref;
      }
      result.get_ndo()->data.ptr = in_dat->begin + in_am->offset;
      // Try to copy the rest of the arrmeta as a view
      if (try_view(arr.get_type().extended<ndt::base_dim_type>()->get_element_type(),
                   arr.get_arrmeta() + sizeof(var_dim_type_arrmeta),
                   tp.extended<ndt::base_dim_type>()->get_element_type(),
                   result.get_arrmeta() + sizeof(fixed_dim_type_arrmeta), arr.get_memblock().get())) {
        return result;
      }
    }
  }
  // Otherwise try to copy the arrmeta as a view
  else if (try_view(arr.get_type(), arr.get_arrmeta(), tp, result.get_arrmeta(), arr.get_memblock().get())) {
    // If it succeeded, return it
    return result;
  }

  stringstream ss;
  ss << "Unable to view nd::array of type " << arr.get_type();
  ss << " as type " << tp;
  throw type_error(ss.str());
}
Exemplo n.º 5
0
 base_memory_type(type_id_t type_id, const ndt::type &element_tp,
                  size_t data_size, size_t alignment,
                  size_t storage_arrmeta_offset, flags_type flags)
     : base_type(type_id, memory_kind, data_size, alignment, flags,
                 storage_arrmeta_offset + element_tp.get_arrmeta_size(),
                 element_tp.get_ndim(), 0),
       m_element_tp(element_tp),
       m_storage_arrmeta_offset(storage_arrmeta_offset)
 {
   if (element_tp.get_kind() == memory_kind ||
       element_tp.get_kind() == symbolic_kind) {
     stringstream ss;
     ss << "a memory space cannot be specified for type " << element_tp;
     throw runtime_error(ss.str());
   }
 }
Exemplo n.º 6
0
convert_type::convert_type(const ndt::type &value_type,
                           const ndt::type &operand_type)
    : base_expr_type(
          convert_type_id, expr_kind, operand_type.get_data_size(),
          operand_type.get_data_alignment(),
          inherited_flags(value_type.get_flags(), operand_type.get_flags()),
          operand_type.get_arrmeta_size(), value_type.get_ndim()),
      m_value_type(value_type), m_operand_type(operand_type)
{
    // An alternative to this error would be to use value_type.value_type(), cutting
    // away the expression part of the given value_type.
    if (m_value_type.get_kind() == expr_kind) {
        std::stringstream ss;
        ss << "convert_type: The destination type " << m_value_type;
        ss << " should not be an expr_kind";
        throw dynd::type_error(ss.str());
    }
}
Exemplo n.º 7
0
pointer_type::pointer_type(const ndt::type& target_tp)
    : base_expr_type(pointer_type_id, expr_kind, sizeof(void *),
                    sizeof(void *),
                    inherited_flags(target_tp.get_flags(), type_flag_zeroinit|type_flag_blockref),
                    sizeof(pointer_type_arrmeta) + target_tp.get_arrmeta_size(),
                    target_tp.get_ndim()),
                    m_target_tp(target_tp)
{
    // I'm not 100% sure how blockref pointer types should interact with
    // the computational subsystem, the details will have to shake out
    // when we want to actually do something with them.
    if (target_tp.get_kind() == expr_kind && target_tp.get_type_id() != pointer_type_id) {
        stringstream ss;
        ss << "A dynd pointer type's target cannot be the expression type ";
        ss << target_tp;
        throw dynd::type_error(ss.str());
    }
}
Exemplo n.º 8
0
static nd::array view_from_bytes(const nd::array &arr, const ndt::type &tp)
{
  if (tp.get_flags() & (type_flag_blockref | type_flag_destructor | type_flag_not_host_readable)) {
    // Bytes cannot be viewed as blockref types, types which require
    // destruction, or types not on host memory.
    return nd::array();
  }

  const bytes_type_arrmeta *bytes_meta = reinterpret_cast<const bytes_type_arrmeta *>(arr.get_arrmeta());
  bytes_type_data *bytes_d = reinterpret_cast<bytes_type_data *>(arr.get_ndo()->data.ptr);
  memory_block_ptr data_ref;
  if (bytes_meta->blockref != NULL) {
    data_ref = bytes_meta->blockref;
  } else {
    data_ref = arr.get_data_memblock();
  }
  char *data_ptr = bytes_d->begin;
  intptr_t data_size = bytes_d->end - data_ptr;

  size_t tp_data_size = tp.get_data_size();
  if (tp_data_size > 0) {
    // If the data type has a single chunk of POD memory, it's ok
    if ((intptr_t)tp_data_size == data_size &&
        offset_is_aligned(reinterpret_cast<size_t>(data_ptr), tp.get_data_alignment())) {
      // Allocate a result array to attempt the view in it
      nd::array result(make_array_memory_block(tp.get_arrmeta_size()));
      // Initialize the fields
      result.get_ndo()->data.ptr = data_ptr;
      result.get_ndo()->data.ref = data_ref.release();
      result.get_ndo()->m_type = ndt::type(tp).release();
      result.get_ndo()->m_flags = arr.get_ndo()->m_flags;
      if (tp.get_arrmeta_size() > 0) {
        tp.extended()->arrmeta_default_construct(result.get_arrmeta(), true);
      }
      return result;
    }
  } else if (tp.get_type_id() == fixed_dim_type_id) {
    ndt::type arr_tp = tp;
    ndt::type el_tp = arr_tp.extended<ndt::base_dim_type>()->get_element_type();
    size_t el_data_size = el_tp.get_data_size();
    // If the element type has a single chunk of POD memory, and
    // it divides into the memory size, it's ok
    if (data_size % (intptr_t)el_data_size == 0 &&
        offset_is_aligned(reinterpret_cast<size_t>(data_ptr), arr_tp.get_data_alignment())) {
      intptr_t dim_size = data_size / el_data_size;
      if (arr_tp.get_kind() != kind_kind) {
        if (arr_tp.extended<ndt::fixed_dim_type>()->get_fixed_dim_size() != dim_size) {
          return nd::array();
        }
      } else {
        // Transform the symbolic fixed type into a concrete one
        arr_tp = ndt::make_fixed_dim(dim_size, el_tp);
      }
      // Allocate a result array to attempt the view in it
      nd::array result(make_array_memory_block(arr_tp.get_arrmeta_size()));
      // Initialize the fields
      result.get_ndo()->data.ptr = data_ptr;
      result.get_ndo()->data.ref = data_ref.release();
      result.get_ndo()->m_type = ndt::type(arr_tp).release();
      result.get_ndo()->m_flags = arr.get_ndo()->m_flags;
      if (el_tp.get_arrmeta_size() > 0) {
        el_tp.extended()->arrmeta_default_construct(result.get_arrmeta() + sizeof(fixed_dim_type_arrmeta), true);
      }
      fixed_dim_type_arrmeta *fixed_meta = reinterpret_cast<fixed_dim_type_arrmeta *>(result.get_arrmeta());
      fixed_meta->dim_size = dim_size;
      fixed_meta->stride = el_data_size;
      return result;
    }
  }

  // No view could be produced
  return nd::array();
}
size_t dynd::make_struct_comparison_kernel(
                ckernel_builder *ckb, intptr_t ckb_offset,
                const ndt::type& src_tp,
                const char *src0_arrmeta, const char *src1_arrmeta,
                comparison_type_t comptype,
                const eval::eval_context *ectx)
{
  intptr_t root_ckb_offset = ckb_offset;
  const base_struct_type *bsd = src_tp.tcast<base_struct_type>();
  size_t field_count = bsd->get_field_count();
  if (comptype == comparison_type_sorting_less) {
    if (src0_arrmeta == src1_arrmeta || src_tp.get_arrmeta_size() == 0 ||
        memcmp(src0_arrmeta, src1_arrmeta, src_tp.get_arrmeta_size()) == 0) {
      // The arrmeta is identical, so can use a more specialized comparison
      // function
      kernels::inc_ckb_offset(
          ckb_offset,
          sizeof(struct_compare_sorting_less_matching_arrmeta_kernel) +
              field_count * sizeof(size_t));
      ckb->ensure_capacity(ckb_offset);
      struct_compare_sorting_less_matching_arrmeta_kernel *e =
          ckb->get_at<struct_compare_sorting_less_matching_arrmeta_kernel>(
              root_ckb_offset);
      e->base.set_function<expr_predicate_t>(
          &struct_compare_sorting_less_matching_arrmeta_kernel::sorting_less);
      e->base.destructor =
          &struct_compare_sorting_less_matching_arrmeta_kernel::destruct;
      e->field_count = field_count;
      e->src_data_offsets = bsd->get_data_offsets(src0_arrmeta);
      size_t *field_kernel_offsets;
      const uintptr_t *arrmeta_offsets = bsd->get_arrmeta_offsets_raw();
      for (size_t i = 0; i != field_count; ++i) {
        // Reserve space for the child, and save the offset to this
        // field comparison kernel. Have to re-get
        // the pointer because creating the field comparison kernel may
        // move the memory.
        ckb->ensure_capacity(ckb_offset);
        e = ckb->get_at<struct_compare_sorting_less_matching_arrmeta_kernel>(
            root_ckb_offset);
        field_kernel_offsets = reinterpret_cast<size_t *>(e + 1);
        field_kernel_offsets[i] = ckb_offset - root_ckb_offset;
        const char *field_arrmeta = src0_arrmeta + arrmeta_offsets[i];
        const ndt::type &ft = bsd->get_field_type(i);
        ckb_offset = make_comparison_kernel(
            ckb, ckb_offset, ft, field_arrmeta, ft, field_arrmeta,
            comparison_type_sorting_less, ectx);
      }
      return ckb_offset;
    } else {
      // The arrmeta is different, so have to get the kernels both ways for the
      // fields
      kernels::inc_ckb_offset(
          ckb_offset, sizeof(struct_compare_sorting_less_diff_arrmeta_kernel) +
                          2 * field_count * sizeof(size_t));
      ckb->ensure_capacity(ckb_offset);
      struct_compare_sorting_less_diff_arrmeta_kernel *e =
          ckb->get_at<struct_compare_sorting_less_diff_arrmeta_kernel>(
              root_ckb_offset);
      e->base.set_function<expr_predicate_t>(
          &struct_compare_sorting_less_diff_arrmeta_kernel::sorting_less);
      e->base.destructor =
          &struct_compare_sorting_less_diff_arrmeta_kernel::destruct;
      e->field_count = field_count;
      e->src0_data_offsets = bsd->get_data_offsets(src0_arrmeta);
      e->src1_data_offsets = bsd->get_data_offsets(src1_arrmeta);
      size_t *field_kernel_offsets;
      const uintptr_t *arrmeta_offsets = bsd->get_arrmeta_offsets_raw();
      for (size_t i = 0; i != field_count; ++i) {
        const ndt::type &ft = bsd->get_field_type(i);
        // Reserve space for the child, and save the offset to this
        // field comparison kernel. Have to re-get
        // the pointer because creating the field comparison kernel may
        // move the memory.
        ckb->ensure_capacity(ckb_offset);
        e = ckb->get_at<struct_compare_sorting_less_diff_arrmeta_kernel>(
            root_ckb_offset);
        field_kernel_offsets = reinterpret_cast<size_t *>(e + 1);
        field_kernel_offsets[2 * i] = ckb_offset - root_ckb_offset;
        ckb_offset = make_comparison_kernel(
            ckb, ckb_offset, ft, src0_arrmeta + arrmeta_offsets[i], ft,
            src1_arrmeta + arrmeta_offsets[i], comparison_type_sorting_less,
            ectx);
        // Repeat for comparing the other way
        ckb->ensure_capacity(ckb_offset);
        e = ckb->get_at<struct_compare_sorting_less_diff_arrmeta_kernel>(
            root_ckb_offset);
        field_kernel_offsets = reinterpret_cast<size_t *>(e + 1);
        field_kernel_offsets[2 * i + 1] = ckb_offset - root_ckb_offset;
        ckb_offset = make_comparison_kernel(
            ckb, ckb_offset, ft, src1_arrmeta + arrmeta_offsets[i], ft,
            src0_arrmeta + arrmeta_offsets[i], comparison_type_sorting_less,
            ectx);
      }
      return ckb_offset;
    }
  } else if (comptype == comparison_type_equal ||
             comptype == comparison_type_not_equal) {
    kernels::inc_ckb_offset(ckb_offset, sizeof(struct_compare_equality_kernel) +
                                            field_count * sizeof(size_t));
    ckb->ensure_capacity(ckb_offset);
    struct_compare_equality_kernel *e =
        ckb->get_at<struct_compare_equality_kernel>(root_ckb_offset);
    if (comptype == comparison_type_equal) {
      e->base.set_function<expr_predicate_t>(
          &struct_compare_equality_kernel::equal);
    } else {
      e->base.set_function<expr_predicate_t>(
          &struct_compare_equality_kernel::not_equal);
    }
    e->base.destructor = &struct_compare_equality_kernel::destruct;
    e->field_count = field_count;
    e->src0_data_offsets = bsd->get_data_offsets(src0_arrmeta);
    e->src1_data_offsets = bsd->get_data_offsets(src1_arrmeta);
    size_t *field_kernel_offsets;
    const uintptr_t *arrmeta_offsets = bsd->get_arrmeta_offsets_raw();
    for (size_t i = 0; i != field_count; ++i) {
      const ndt::type &ft = bsd->get_field_type(i);
      // Reserve space for the child, and save the offset to this
      // field comparison kernel. Have to re-get
      // the pointer because creating the field comparison kernel may
      // move the memory.
      ckb->ensure_capacity(ckb_offset);
      e = ckb->get_at<struct_compare_equality_kernel>(root_ckb_offset);
      field_kernel_offsets = reinterpret_cast<size_t *>(e + 1);
      field_kernel_offsets[i] = ckb_offset - root_ckb_offset;
      const char *field_arrmeta = src0_arrmeta + arrmeta_offsets[i];
      ckb_offset = make_comparison_kernel(ckb, ckb_offset, ft, field_arrmeta,
                                          ft, field_arrmeta, comptype, ectx);
    }
    return ckb_offset;
  } else {
    throw not_comparable_error(src_tp, src_tp, comptype);
  }
}